Compare commits
1 Commits
__refs_pul
...
__refs_pul
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
bd11b10298 |
@@ -363,11 +363,7 @@ if(ENABLE_QT)
|
||||
|
||||
set(YUZU_QT_NO_CMAKE_SYSTEM_PATH "NO_CMAKE_SYSTEM_PATH")
|
||||
endif()
|
||||
if ((${CMAKE_SYSTEM_NAME} STREQUAL "Linux") AND YUZU_USE_BUNDLED_QT)
|
||||
find_package(Qt5 ${QT_VERSION} REQUIRED COMPONENTS Widgets DBus ${QT_PREFIX_HINT} ${YUZU_QT_NO_CMAKE_SYSTEM_PATH})
|
||||
else()
|
||||
find_package(Qt5 ${QT_VERSION} REQUIRED COMPONENTS Widgets ${QT_PREFIX_HINT} ${YUZU_QT_NO_CMAKE_SYSTEM_PATH})
|
||||
endif()
|
||||
find_package(Qt5 ${QT_VERSION} REQUIRED COMPONENTS Widgets ${QT_PREFIX_HINT} ${YUZU_QT_NO_CMAKE_SYSTEM_PATH})
|
||||
if (YUZU_USE_QT_WEB_ENGINE)
|
||||
find_package(Qt5 COMPONENTS WebEngineCore WebEngineWidgets)
|
||||
endif()
|
||||
|
||||
@@ -57,11 +57,4 @@ requires std::is_integral_v<T>
|
||||
return static_cast<T>(1ULL << ((8U * sizeof(T)) - std::countl_zero(value - 1U)));
|
||||
}
|
||||
|
||||
template <size_t bit_index, typename T>
|
||||
requires std::is_integral_v<T>
|
||||
[[nodiscard]] constexpr bool Bit(const T value) {
|
||||
static_assert(bit_index < BitSize<T>(), "bit_index must be smaller than size of T");
|
||||
return ((value >> bit_index) & T(1)) == T(1);
|
||||
}
|
||||
|
||||
} // namespace Common
|
||||
|
||||
@@ -4,8 +4,6 @@
|
||||
|
||||
#pragma once
|
||||
|
||||
#include "common/alignment.h"
|
||||
#include "common/common_funcs.h"
|
||||
#include "common/parent_of_member.h"
|
||||
#include "common/tree.h"
|
||||
|
||||
@@ -17,33 +15,32 @@ class IntrusiveRedBlackTreeImpl;
|
||||
|
||||
}
|
||||
|
||||
#pragma pack(push, 4)
|
||||
struct IntrusiveRedBlackTreeNode {
|
||||
YUZU_NON_COPYABLE(IntrusiveRedBlackTreeNode);
|
||||
|
||||
public:
|
||||
using RBEntry = freebsd::RBEntry<IntrusiveRedBlackTreeNode>;
|
||||
using EntryType = RBEntry<IntrusiveRedBlackTreeNode>;
|
||||
|
||||
constexpr IntrusiveRedBlackTreeNode() = default;
|
||||
|
||||
void SetEntry(const EntryType& new_entry) {
|
||||
entry = new_entry;
|
||||
}
|
||||
|
||||
[[nodiscard]] EntryType& GetEntry() {
|
||||
return entry;
|
||||
}
|
||||
|
||||
[[nodiscard]] const EntryType& GetEntry() const {
|
||||
return entry;
|
||||
}
|
||||
|
||||
private:
|
||||
RBEntry m_entry;
|
||||
EntryType entry{};
|
||||
|
||||
public:
|
||||
explicit IntrusiveRedBlackTreeNode() = default;
|
||||
friend class impl::IntrusiveRedBlackTreeImpl;
|
||||
|
||||
[[nodiscard]] constexpr RBEntry& GetRBEntry() {
|
||||
return m_entry;
|
||||
}
|
||||
[[nodiscard]] constexpr const RBEntry& GetRBEntry() const {
|
||||
return m_entry;
|
||||
}
|
||||
|
||||
constexpr void SetRBEntry(const RBEntry& entry) {
|
||||
m_entry = entry;
|
||||
}
|
||||
template <class, class, class>
|
||||
friend class IntrusiveRedBlackTree;
|
||||
};
|
||||
static_assert(sizeof(IntrusiveRedBlackTreeNode) ==
|
||||
3 * sizeof(void*) + std::max<size_t>(sizeof(freebsd::RBColor), 4));
|
||||
#pragma pack(pop)
|
||||
|
||||
template <class T, class Traits, class Comparator>
|
||||
class IntrusiveRedBlackTree;
|
||||
@@ -51,17 +48,12 @@ class IntrusiveRedBlackTree;
|
||||
namespace impl {
|
||||
|
||||
class IntrusiveRedBlackTreeImpl {
|
||||
YUZU_NON_COPYABLE(IntrusiveRedBlackTreeImpl);
|
||||
|
||||
private:
|
||||
template <class, class, class>
|
||||
friend class ::Common::IntrusiveRedBlackTree;
|
||||
|
||||
private:
|
||||
using RootType = freebsd::RBHead<IntrusiveRedBlackTreeNode>;
|
||||
|
||||
private:
|
||||
RootType m_root;
|
||||
using RootType = RBHead<IntrusiveRedBlackTreeNode>;
|
||||
RootType root;
|
||||
|
||||
public:
|
||||
template <bool Const>
|
||||
@@ -89,150 +81,149 @@ public:
|
||||
IntrusiveRedBlackTreeImpl::reference>;
|
||||
|
||||
private:
|
||||
pointer m_node;
|
||||
pointer node;
|
||||
|
||||
public:
|
||||
constexpr explicit Iterator(pointer n) : m_node(n) {}
|
||||
explicit Iterator(pointer n) : node(n) {}
|
||||
|
||||
constexpr bool operator==(const Iterator& rhs) const {
|
||||
return m_node == rhs.m_node;
|
||||
bool operator==(const Iterator& rhs) const {
|
||||
return this->node == rhs.node;
|
||||
}
|
||||
|
||||
constexpr bool operator!=(const Iterator& rhs) const {
|
||||
bool operator!=(const Iterator& rhs) const {
|
||||
return !(*this == rhs);
|
||||
}
|
||||
|
||||
constexpr pointer operator->() const {
|
||||
return m_node;
|
||||
pointer operator->() const {
|
||||
return this->node;
|
||||
}
|
||||
|
||||
constexpr reference operator*() const {
|
||||
return *m_node;
|
||||
reference operator*() const {
|
||||
return *this->node;
|
||||
}
|
||||
|
||||
constexpr Iterator& operator++() {
|
||||
m_node = GetNext(m_node);
|
||||
Iterator& operator++() {
|
||||
this->node = GetNext(this->node);
|
||||
return *this;
|
||||
}
|
||||
|
||||
constexpr Iterator& operator--() {
|
||||
m_node = GetPrev(m_node);
|
||||
Iterator& operator--() {
|
||||
this->node = GetPrev(this->node);
|
||||
return *this;
|
||||
}
|
||||
|
||||
constexpr Iterator operator++(int) {
|
||||
Iterator operator++(int) {
|
||||
const Iterator it{*this};
|
||||
++(*this);
|
||||
return it;
|
||||
}
|
||||
|
||||
constexpr Iterator operator--(int) {
|
||||
Iterator operator--(int) {
|
||||
const Iterator it{*this};
|
||||
--(*this);
|
||||
return it;
|
||||
}
|
||||
|
||||
constexpr operator Iterator<true>() const {
|
||||
return Iterator<true>(m_node);
|
||||
operator Iterator<true>() const {
|
||||
return Iterator<true>(this->node);
|
||||
}
|
||||
};
|
||||
|
||||
private:
|
||||
constexpr bool EmptyImpl() const {
|
||||
return m_root.IsEmpty();
|
||||
// Define accessors using RB_* functions.
|
||||
bool EmptyImpl() const {
|
||||
return root.IsEmpty();
|
||||
}
|
||||
|
||||
constexpr IntrusiveRedBlackTreeNode* GetMinImpl() const {
|
||||
return freebsd::RB_MIN(const_cast<RootType&>(m_root));
|
||||
IntrusiveRedBlackTreeNode* GetMinImpl() const {
|
||||
return RB_MIN(const_cast<RootType*>(&root));
|
||||
}
|
||||
|
||||
constexpr IntrusiveRedBlackTreeNode* GetMaxImpl() const {
|
||||
return freebsd::RB_MAX(const_cast<RootType&>(m_root));
|
||||
IntrusiveRedBlackTreeNode* GetMaxImpl() const {
|
||||
return RB_MAX(const_cast<RootType*>(&root));
|
||||
}
|
||||
|
||||
constexpr IntrusiveRedBlackTreeNode* RemoveImpl(IntrusiveRedBlackTreeNode* node) {
|
||||
return freebsd::RB_REMOVE(m_root, node);
|
||||
IntrusiveRedBlackTreeNode* RemoveImpl(IntrusiveRedBlackTreeNode* node) {
|
||||
return RB_REMOVE(&root, node);
|
||||
}
|
||||
|
||||
public:
|
||||
static constexpr IntrusiveRedBlackTreeNode* GetNext(IntrusiveRedBlackTreeNode* node) {
|
||||
return freebsd::RB_NEXT(node);
|
||||
static IntrusiveRedBlackTreeNode* GetNext(IntrusiveRedBlackTreeNode* node) {
|
||||
return RB_NEXT(node);
|
||||
}
|
||||
|
||||
static constexpr IntrusiveRedBlackTreeNode* GetPrev(IntrusiveRedBlackTreeNode* node) {
|
||||
return freebsd::RB_PREV(node);
|
||||
static IntrusiveRedBlackTreeNode* GetPrev(IntrusiveRedBlackTreeNode* node) {
|
||||
return RB_PREV(node);
|
||||
}
|
||||
|
||||
static constexpr IntrusiveRedBlackTreeNode const* GetNext(
|
||||
IntrusiveRedBlackTreeNode const* node) {
|
||||
static const IntrusiveRedBlackTreeNode* GetNext(const IntrusiveRedBlackTreeNode* node) {
|
||||
return static_cast<const IntrusiveRedBlackTreeNode*>(
|
||||
GetNext(const_cast<IntrusiveRedBlackTreeNode*>(node)));
|
||||
}
|
||||
|
||||
static constexpr IntrusiveRedBlackTreeNode const* GetPrev(
|
||||
IntrusiveRedBlackTreeNode const* node) {
|
||||
static const IntrusiveRedBlackTreeNode* GetPrev(const IntrusiveRedBlackTreeNode* node) {
|
||||
return static_cast<const IntrusiveRedBlackTreeNode*>(
|
||||
GetPrev(const_cast<IntrusiveRedBlackTreeNode*>(node)));
|
||||
}
|
||||
|
||||
public:
|
||||
constexpr IntrusiveRedBlackTreeImpl() = default;
|
||||
constexpr IntrusiveRedBlackTreeImpl() {}
|
||||
|
||||
// Iterator accessors.
|
||||
constexpr iterator begin() {
|
||||
iterator begin() {
|
||||
return iterator(this->GetMinImpl());
|
||||
}
|
||||
|
||||
constexpr const_iterator begin() const {
|
||||
const_iterator begin() const {
|
||||
return const_iterator(this->GetMinImpl());
|
||||
}
|
||||
|
||||
constexpr iterator end() {
|
||||
iterator end() {
|
||||
return iterator(static_cast<IntrusiveRedBlackTreeNode*>(nullptr));
|
||||
}
|
||||
|
||||
constexpr const_iterator end() const {
|
||||
const_iterator end() const {
|
||||
return const_iterator(static_cast<const IntrusiveRedBlackTreeNode*>(nullptr));
|
||||
}
|
||||
|
||||
constexpr const_iterator cbegin() const {
|
||||
const_iterator cbegin() const {
|
||||
return this->begin();
|
||||
}
|
||||
|
||||
constexpr const_iterator cend() const {
|
||||
const_iterator cend() const {
|
||||
return this->end();
|
||||
}
|
||||
|
||||
constexpr iterator iterator_to(reference ref) {
|
||||
return iterator(std::addressof(ref));
|
||||
iterator iterator_to(reference ref) {
|
||||
return iterator(&ref);
|
||||
}
|
||||
|
||||
constexpr const_iterator iterator_to(const_reference ref) const {
|
||||
return const_iterator(std::addressof(ref));
|
||||
const_iterator iterator_to(const_reference ref) const {
|
||||
return const_iterator(&ref);
|
||||
}
|
||||
|
||||
// Content management.
|
||||
constexpr bool empty() const {
|
||||
bool empty() const {
|
||||
return this->EmptyImpl();
|
||||
}
|
||||
|
||||
constexpr reference back() {
|
||||
reference back() {
|
||||
return *this->GetMaxImpl();
|
||||
}
|
||||
|
||||
constexpr const_reference back() const {
|
||||
const_reference back() const {
|
||||
return *this->GetMaxImpl();
|
||||
}
|
||||
|
||||
constexpr reference front() {
|
||||
reference front() {
|
||||
return *this->GetMinImpl();
|
||||
}
|
||||
|
||||
constexpr const_reference front() const {
|
||||
const_reference front() const {
|
||||
return *this->GetMinImpl();
|
||||
}
|
||||
|
||||
constexpr iterator erase(iterator it) {
|
||||
iterator erase(iterator it) {
|
||||
auto cur = std::addressof(*it);
|
||||
auto next = GetNext(cur);
|
||||
this->RemoveImpl(cur);
|
||||
@@ -243,16 +234,16 @@ public:
|
||||
} // namespace impl
|
||||
|
||||
template <typename T>
|
||||
concept HasRedBlackKeyType = requires {
|
||||
{ std::is_same<typename T::RedBlackKeyType, void>::value } -> std::convertible_to<bool>;
|
||||
concept HasLightCompareType = requires {
|
||||
{ std::is_same<typename T::LightCompareType, void>::value } -> std::convertible_to<bool>;
|
||||
};
|
||||
|
||||
namespace impl {
|
||||
|
||||
template <typename T, typename Default>
|
||||
consteval auto* GetRedBlackKeyType() {
|
||||
if constexpr (HasRedBlackKeyType<T>) {
|
||||
return static_cast<typename T::RedBlackKeyType*>(nullptr);
|
||||
consteval auto* GetLightCompareType() {
|
||||
if constexpr (HasLightCompareType<T>) {
|
||||
return static_cast<typename T::LightCompareType*>(nullptr);
|
||||
} else {
|
||||
return static_cast<Default*>(nullptr);
|
||||
}
|
||||
@@ -261,17 +252,16 @@ namespace impl {
|
||||
} // namespace impl
|
||||
|
||||
template <typename T, typename Default>
|
||||
using RedBlackKeyType = std::remove_pointer_t<decltype(impl::GetRedBlackKeyType<T, Default>())>;
|
||||
using LightCompareType = std::remove_pointer_t<decltype(impl::GetLightCompareType<T, Default>())>;
|
||||
|
||||
template <class T, class Traits, class Comparator>
|
||||
class IntrusiveRedBlackTree {
|
||||
YUZU_NON_COPYABLE(IntrusiveRedBlackTree);
|
||||
|
||||
public:
|
||||
using ImplType = impl::IntrusiveRedBlackTreeImpl;
|
||||
|
||||
private:
|
||||
ImplType m_impl;
|
||||
ImplType impl{};
|
||||
|
||||
public:
|
||||
template <bool Const>
|
||||
@@ -287,9 +277,9 @@ public:
|
||||
using iterator = Iterator<false>;
|
||||
using const_iterator = Iterator<true>;
|
||||
|
||||
using key_type = RedBlackKeyType<Comparator, value_type>;
|
||||
using const_key_pointer = const key_type*;
|
||||
using const_key_reference = const key_type&;
|
||||
using light_value_type = LightCompareType<Comparator, value_type>;
|
||||
using const_light_pointer = const light_value_type*;
|
||||
using const_light_reference = const light_value_type&;
|
||||
|
||||
template <bool Const>
|
||||
class Iterator {
|
||||
@@ -308,201 +298,183 @@ public:
|
||||
IntrusiveRedBlackTree::reference>;
|
||||
|
||||
private:
|
||||
ImplIterator m_impl;
|
||||
ImplIterator iterator;
|
||||
|
||||
private:
|
||||
constexpr explicit Iterator(ImplIterator it) : m_impl(it) {}
|
||||
explicit Iterator(ImplIterator it) : iterator(it) {}
|
||||
|
||||
constexpr explicit Iterator(typename ImplIterator::pointer p) : m_impl(p) {}
|
||||
explicit Iterator(typename std::conditional<Const, ImplType::const_iterator,
|
||||
ImplType::iterator>::type::pointer ptr)
|
||||
: iterator(ptr) {}
|
||||
|
||||
constexpr ImplIterator GetImplIterator() const {
|
||||
return m_impl;
|
||||
ImplIterator GetImplIterator() const {
|
||||
return this->iterator;
|
||||
}
|
||||
|
||||
public:
|
||||
constexpr bool operator==(const Iterator& rhs) const {
|
||||
return m_impl == rhs.m_impl;
|
||||
bool operator==(const Iterator& rhs) const {
|
||||
return this->iterator == rhs.iterator;
|
||||
}
|
||||
|
||||
constexpr bool operator!=(const Iterator& rhs) const {
|
||||
bool operator!=(const Iterator& rhs) const {
|
||||
return !(*this == rhs);
|
||||
}
|
||||
|
||||
constexpr pointer operator->() const {
|
||||
return Traits::GetParent(std::addressof(*m_impl));
|
||||
pointer operator->() const {
|
||||
return Traits::GetParent(std::addressof(*this->iterator));
|
||||
}
|
||||
|
||||
constexpr reference operator*() const {
|
||||
return *Traits::GetParent(std::addressof(*m_impl));
|
||||
reference operator*() const {
|
||||
return *Traits::GetParent(std::addressof(*this->iterator));
|
||||
}
|
||||
|
||||
constexpr Iterator& operator++() {
|
||||
++m_impl;
|
||||
Iterator& operator++() {
|
||||
++this->iterator;
|
||||
return *this;
|
||||
}
|
||||
|
||||
constexpr Iterator& operator--() {
|
||||
--m_impl;
|
||||
Iterator& operator--() {
|
||||
--this->iterator;
|
||||
return *this;
|
||||
}
|
||||
|
||||
constexpr Iterator operator++(int) {
|
||||
Iterator operator++(int) {
|
||||
const Iterator it{*this};
|
||||
++m_impl;
|
||||
++this->iterator;
|
||||
return it;
|
||||
}
|
||||
|
||||
constexpr Iterator operator--(int) {
|
||||
Iterator operator--(int) {
|
||||
const Iterator it{*this};
|
||||
--m_impl;
|
||||
--this->iterator;
|
||||
return it;
|
||||
}
|
||||
|
||||
constexpr operator Iterator<true>() const {
|
||||
return Iterator<true>(m_impl);
|
||||
operator Iterator<true>() const {
|
||||
return Iterator<true>(this->iterator);
|
||||
}
|
||||
};
|
||||
|
||||
private:
|
||||
static constexpr int CompareImpl(const IntrusiveRedBlackTreeNode* lhs,
|
||||
const IntrusiveRedBlackTreeNode* rhs) {
|
||||
static int CompareImpl(const IntrusiveRedBlackTreeNode* lhs,
|
||||
const IntrusiveRedBlackTreeNode* rhs) {
|
||||
return Comparator::Compare(*Traits::GetParent(lhs), *Traits::GetParent(rhs));
|
||||
}
|
||||
|
||||
static constexpr int CompareKeyImpl(const_key_reference key,
|
||||
const IntrusiveRedBlackTreeNode* rhs) {
|
||||
return Comparator::Compare(key, *Traits::GetParent(rhs));
|
||||
static int LightCompareImpl(const void* elm, const IntrusiveRedBlackTreeNode* rhs) {
|
||||
return Comparator::Compare(*static_cast<const_light_pointer>(elm), *Traits::GetParent(rhs));
|
||||
}
|
||||
|
||||
// Define accessors using RB_* functions.
|
||||
constexpr IntrusiveRedBlackTreeNode* InsertImpl(IntrusiveRedBlackTreeNode* node) {
|
||||
return freebsd::RB_INSERT(m_impl.m_root, node, CompareImpl);
|
||||
IntrusiveRedBlackTreeNode* InsertImpl(IntrusiveRedBlackTreeNode* node) {
|
||||
return RB_INSERT(&impl.root, node, CompareImpl);
|
||||
}
|
||||
|
||||
constexpr IntrusiveRedBlackTreeNode* FindImpl(IntrusiveRedBlackTreeNode const* node) const {
|
||||
return freebsd::RB_FIND(const_cast<ImplType::RootType&>(m_impl.m_root),
|
||||
const_cast<IntrusiveRedBlackTreeNode*>(node), CompareImpl);
|
||||
IntrusiveRedBlackTreeNode* FindImpl(const IntrusiveRedBlackTreeNode* node) const {
|
||||
return RB_FIND(const_cast<ImplType::RootType*>(&impl.root),
|
||||
const_cast<IntrusiveRedBlackTreeNode*>(node), CompareImpl);
|
||||
}
|
||||
|
||||
constexpr IntrusiveRedBlackTreeNode* NFindImpl(IntrusiveRedBlackTreeNode const* node) const {
|
||||
return freebsd::RB_NFIND(const_cast<ImplType::RootType&>(m_impl.m_root),
|
||||
const_cast<IntrusiveRedBlackTreeNode*>(node), CompareImpl);
|
||||
IntrusiveRedBlackTreeNode* NFindImpl(const IntrusiveRedBlackTreeNode* node) const {
|
||||
return RB_NFIND(const_cast<ImplType::RootType*>(&impl.root),
|
||||
const_cast<IntrusiveRedBlackTreeNode*>(node), CompareImpl);
|
||||
}
|
||||
|
||||
constexpr IntrusiveRedBlackTreeNode* FindKeyImpl(const_key_reference key) const {
|
||||
return freebsd::RB_FIND_KEY(const_cast<ImplType::RootType&>(m_impl.m_root), key,
|
||||
CompareKeyImpl);
|
||||
IntrusiveRedBlackTreeNode* FindLightImpl(const_light_pointer lelm) const {
|
||||
return RB_FIND_LIGHT(const_cast<ImplType::RootType*>(&impl.root),
|
||||
static_cast<const void*>(lelm), LightCompareImpl);
|
||||
}
|
||||
|
||||
constexpr IntrusiveRedBlackTreeNode* NFindKeyImpl(const_key_reference key) const {
|
||||
return freebsd::RB_NFIND_KEY(const_cast<ImplType::RootType&>(m_impl.m_root), key,
|
||||
CompareKeyImpl);
|
||||
}
|
||||
|
||||
constexpr IntrusiveRedBlackTreeNode* FindExistingImpl(
|
||||
IntrusiveRedBlackTreeNode const* node) const {
|
||||
return freebsd::RB_FIND_EXISTING(const_cast<ImplType::RootType&>(m_impl.m_root),
|
||||
const_cast<IntrusiveRedBlackTreeNode*>(node), CompareImpl);
|
||||
}
|
||||
|
||||
constexpr IntrusiveRedBlackTreeNode* FindExistingKeyImpl(const_key_reference key) const {
|
||||
return freebsd::RB_FIND_EXISTING_KEY(const_cast<ImplType::RootType&>(m_impl.m_root), key,
|
||||
CompareKeyImpl);
|
||||
IntrusiveRedBlackTreeNode* NFindLightImpl(const_light_pointer lelm) const {
|
||||
return RB_NFIND_LIGHT(const_cast<ImplType::RootType*>(&impl.root),
|
||||
static_cast<const void*>(lelm), LightCompareImpl);
|
||||
}
|
||||
|
||||
public:
|
||||
constexpr IntrusiveRedBlackTree() = default;
|
||||
|
||||
// Iterator accessors.
|
||||
constexpr iterator begin() {
|
||||
return iterator(m_impl.begin());
|
||||
iterator begin() {
|
||||
return iterator(this->impl.begin());
|
||||
}
|
||||
|
||||
constexpr const_iterator begin() const {
|
||||
return const_iterator(m_impl.begin());
|
||||
const_iterator begin() const {
|
||||
return const_iterator(this->impl.begin());
|
||||
}
|
||||
|
||||
constexpr iterator end() {
|
||||
return iterator(m_impl.end());
|
||||
iterator end() {
|
||||
return iterator(this->impl.end());
|
||||
}
|
||||
|
||||
constexpr const_iterator end() const {
|
||||
return const_iterator(m_impl.end());
|
||||
const_iterator end() const {
|
||||
return const_iterator(this->impl.end());
|
||||
}
|
||||
|
||||
constexpr const_iterator cbegin() const {
|
||||
const_iterator cbegin() const {
|
||||
return this->begin();
|
||||
}
|
||||
|
||||
constexpr const_iterator cend() const {
|
||||
const_iterator cend() const {
|
||||
return this->end();
|
||||
}
|
||||
|
||||
constexpr iterator iterator_to(reference ref) {
|
||||
return iterator(m_impl.iterator_to(*Traits::GetNode(std::addressof(ref))));
|
||||
iterator iterator_to(reference ref) {
|
||||
return iterator(this->impl.iterator_to(*Traits::GetNode(std::addressof(ref))));
|
||||
}
|
||||
|
||||
constexpr const_iterator iterator_to(const_reference ref) const {
|
||||
return const_iterator(m_impl.iterator_to(*Traits::GetNode(std::addressof(ref))));
|
||||
const_iterator iterator_to(const_reference ref) const {
|
||||
return const_iterator(this->impl.iterator_to(*Traits::GetNode(std::addressof(ref))));
|
||||
}
|
||||
|
||||
// Content management.
|
||||
constexpr bool empty() const {
|
||||
return m_impl.empty();
|
||||
bool empty() const {
|
||||
return this->impl.empty();
|
||||
}
|
||||
|
||||
constexpr reference back() {
|
||||
return *Traits::GetParent(std::addressof(m_impl.back()));
|
||||
reference back() {
|
||||
return *Traits::GetParent(std::addressof(this->impl.back()));
|
||||
}
|
||||
|
||||
constexpr const_reference back() const {
|
||||
return *Traits::GetParent(std::addressof(m_impl.back()));
|
||||
const_reference back() const {
|
||||
return *Traits::GetParent(std::addressof(this->impl.back()));
|
||||
}
|
||||
|
||||
constexpr reference front() {
|
||||
return *Traits::GetParent(std::addressof(m_impl.front()));
|
||||
reference front() {
|
||||
return *Traits::GetParent(std::addressof(this->impl.front()));
|
||||
}
|
||||
|
||||
constexpr const_reference front() const {
|
||||
return *Traits::GetParent(std::addressof(m_impl.front()));
|
||||
const_reference front() const {
|
||||
return *Traits::GetParent(std::addressof(this->impl.front()));
|
||||
}
|
||||
|
||||
constexpr iterator erase(iterator it) {
|
||||
return iterator(m_impl.erase(it.GetImplIterator()));
|
||||
iterator erase(iterator it) {
|
||||
return iterator(this->impl.erase(it.GetImplIterator()));
|
||||
}
|
||||
|
||||
constexpr iterator insert(reference ref) {
|
||||
iterator insert(reference ref) {
|
||||
ImplType::pointer node = Traits::GetNode(std::addressof(ref));
|
||||
this->InsertImpl(node);
|
||||
return iterator(node);
|
||||
}
|
||||
|
||||
constexpr iterator find(const_reference ref) const {
|
||||
iterator find(const_reference ref) const {
|
||||
return iterator(this->FindImpl(Traits::GetNode(std::addressof(ref))));
|
||||
}
|
||||
|
||||
constexpr iterator nfind(const_reference ref) const {
|
||||
iterator nfind(const_reference ref) const {
|
||||
return iterator(this->NFindImpl(Traits::GetNode(std::addressof(ref))));
|
||||
}
|
||||
|
||||
constexpr iterator find_key(const_key_reference ref) const {
|
||||
return iterator(this->FindKeyImpl(ref));
|
||||
iterator find_light(const_light_reference ref) const {
|
||||
return iterator(this->FindLightImpl(std::addressof(ref)));
|
||||
}
|
||||
|
||||
constexpr iterator nfind_key(const_key_reference ref) const {
|
||||
return iterator(this->NFindKeyImpl(ref));
|
||||
}
|
||||
|
||||
constexpr iterator find_existing(const_reference ref) const {
|
||||
return iterator(this->FindExistingImpl(Traits::GetNode(std::addressof(ref))));
|
||||
}
|
||||
|
||||
constexpr iterator find_existing_key(const_key_reference ref) const {
|
||||
return iterator(this->FindExistingKeyImpl(ref));
|
||||
iterator nfind_light(const_light_reference ref) const {
|
||||
return iterator(this->NFindLightImpl(std::addressof(ref)));
|
||||
}
|
||||
};
|
||||
|
||||
template <auto T, class Derived = Common::impl::GetParentType<T>>
|
||||
template <auto T, class Derived = impl::GetParentType<T>>
|
||||
class IntrusiveRedBlackTreeMemberTraits;
|
||||
|
||||
template <class Parent, IntrusiveRedBlackTreeNode Parent::*Member, class Derived>
|
||||
@@ -526,16 +498,19 @@ private:
|
||||
return std::addressof(parent->*Member);
|
||||
}
|
||||
|
||||
static Derived* GetParent(IntrusiveRedBlackTreeNode* node) {
|
||||
return Common::GetParentPointer<Member, Derived>(node);
|
||||
static constexpr Derived* GetParent(IntrusiveRedBlackTreeNode* node) {
|
||||
return GetParentPointer<Member, Derived>(node);
|
||||
}
|
||||
|
||||
static Derived const* GetParent(IntrusiveRedBlackTreeNode const* node) {
|
||||
return Common::GetParentPointer<Member, Derived>(node);
|
||||
static constexpr Derived const* GetParent(const IntrusiveRedBlackTreeNode* node) {
|
||||
return GetParentPointer<Member, Derived>(node);
|
||||
}
|
||||
|
||||
private:
|
||||
static constexpr TypedStorage<Derived> DerivedStorage = {};
|
||||
};
|
||||
|
||||
template <auto T, class Derived = Common::impl::GetParentType<T>>
|
||||
template <auto T, class Derived = impl::GetParentType<T>>
|
||||
class IntrusiveRedBlackTreeMemberTraitsDeferredAssert;
|
||||
|
||||
template <class Parent, IntrusiveRedBlackTreeNode Parent::*Member, class Derived>
|
||||
@@ -546,6 +521,11 @@ public:
|
||||
IntrusiveRedBlackTree<Derived, IntrusiveRedBlackTreeMemberTraitsDeferredAssert, Comparator>;
|
||||
using TreeTypeImpl = impl::IntrusiveRedBlackTreeImpl;
|
||||
|
||||
static constexpr bool IsValid() {
|
||||
TypedStorage<Derived> DerivedStorage = {};
|
||||
return GetParent(GetNode(GetPointer(DerivedStorage))) == GetPointer(DerivedStorage);
|
||||
}
|
||||
|
||||
private:
|
||||
template <class, class, class>
|
||||
friend class IntrusiveRedBlackTree;
|
||||
@@ -560,36 +540,30 @@ private:
|
||||
return std::addressof(parent->*Member);
|
||||
}
|
||||
|
||||
static Derived* GetParent(IntrusiveRedBlackTreeNode* node) {
|
||||
return Common::GetParentPointer<Member, Derived>(node);
|
||||
static constexpr Derived* GetParent(IntrusiveRedBlackTreeNode* node) {
|
||||
return GetParentPointer<Member, Derived>(node);
|
||||
}
|
||||
|
||||
static Derived const* GetParent(IntrusiveRedBlackTreeNode const* node) {
|
||||
return Common::GetParentPointer<Member, Derived>(node);
|
||||
static constexpr Derived const* GetParent(const IntrusiveRedBlackTreeNode* node) {
|
||||
return GetParentPointer<Member, Derived>(node);
|
||||
}
|
||||
};
|
||||
|
||||
template <class Derived>
|
||||
class alignas(void*) IntrusiveRedBlackTreeBaseNode : public IntrusiveRedBlackTreeNode {
|
||||
class IntrusiveRedBlackTreeBaseNode : public IntrusiveRedBlackTreeNode {
|
||||
public:
|
||||
using IntrusiveRedBlackTreeNode::IntrusiveRedBlackTreeNode;
|
||||
|
||||
constexpr Derived* GetPrev() {
|
||||
return static_cast<Derived*>(static_cast<IntrusiveRedBlackTreeBaseNode*>(
|
||||
impl::IntrusiveRedBlackTreeImpl::GetPrev(this)));
|
||||
return static_cast<Derived*>(impl::IntrusiveRedBlackTreeImpl::GetPrev(this));
|
||||
}
|
||||
constexpr const Derived* GetPrev() const {
|
||||
return static_cast<const Derived*>(static_cast<const IntrusiveRedBlackTreeBaseNode*>(
|
||||
impl::IntrusiveRedBlackTreeImpl::GetPrev(this)));
|
||||
return static_cast<const Derived*>(impl::IntrusiveRedBlackTreeImpl::GetPrev(this));
|
||||
}
|
||||
|
||||
constexpr Derived* GetNext() {
|
||||
return static_cast<Derived*>(static_cast<IntrusiveRedBlackTreeBaseNode*>(
|
||||
impl::IntrusiveRedBlackTreeImpl::GetNext(this)));
|
||||
return static_cast<Derived*>(impl::IntrusiveRedBlackTreeImpl::GetNext(this));
|
||||
}
|
||||
constexpr const Derived* GetNext() const {
|
||||
return static_cast<const Derived*>(static_cast<const IntrusiveRedBlackTreeBaseNode*>(
|
||||
impl::IntrusiveRedBlackTreeImpl::GetNext(this)));
|
||||
return static_cast<const Derived*>(impl::IntrusiveRedBlackTreeImpl::GetNext(this));
|
||||
}
|
||||
};
|
||||
|
||||
@@ -607,22 +581,19 @@ private:
|
||||
friend class impl::IntrusiveRedBlackTreeImpl;
|
||||
|
||||
static constexpr IntrusiveRedBlackTreeNode* GetNode(Derived* parent) {
|
||||
return static_cast<IntrusiveRedBlackTreeNode*>(
|
||||
static_cast<IntrusiveRedBlackTreeBaseNode<Derived>*>(parent));
|
||||
return static_cast<IntrusiveRedBlackTreeNode*>(parent);
|
||||
}
|
||||
|
||||
static constexpr IntrusiveRedBlackTreeNode const* GetNode(Derived const* parent) {
|
||||
return static_cast<const IntrusiveRedBlackTreeNode*>(
|
||||
static_cast<const IntrusiveRedBlackTreeBaseNode<Derived>*>(parent));
|
||||
return static_cast<const IntrusiveRedBlackTreeNode*>(parent);
|
||||
}
|
||||
|
||||
static constexpr Derived* GetParent(IntrusiveRedBlackTreeNode* node) {
|
||||
return static_cast<Derived*>(static_cast<IntrusiveRedBlackTreeBaseNode<Derived>*>(node));
|
||||
return static_cast<Derived*>(node);
|
||||
}
|
||||
|
||||
static constexpr Derived const* GetParent(IntrusiveRedBlackTreeNode const* node) {
|
||||
return static_cast<const Derived*>(
|
||||
static_cast<const IntrusiveRedBlackTreeBaseNode<Derived>*>(node));
|
||||
static constexpr Derived const* GetParent(const IntrusiveRedBlackTreeNode* node) {
|
||||
return static_cast<const Derived*>(node);
|
||||
}
|
||||
};
|
||||
|
||||
|
||||
@@ -276,9 +276,9 @@ private:
|
||||
ColorConsoleBackend color_console_backend{};
|
||||
FileBackend file_backend;
|
||||
|
||||
std::jthread backend_thread;
|
||||
MPSCQueue<Entry, true> message_queue{};
|
||||
std::chrono::steady_clock::time_point time_origin{std::chrono::steady_clock::now()};
|
||||
std::jthread backend_thread;
|
||||
};
|
||||
} // namespace
|
||||
|
||||
|
||||
@@ -55,50 +55,22 @@ void AppendBuildInfo(FieldCollection& fc) {
|
||||
|
||||
void AppendCPUInfo(FieldCollection& fc) {
|
||||
#ifdef ARCHITECTURE_x86_64
|
||||
|
||||
const auto& caps = Common::GetCPUCaps();
|
||||
const auto add_field = [&fc](std::string_view field_name, const auto& field_value) {
|
||||
fc.AddField(FieldType::UserSystem, field_name, field_value);
|
||||
};
|
||||
add_field("CPU_Model", caps.cpu_string);
|
||||
add_field("CPU_BrandString", caps.brand_string);
|
||||
|
||||
add_field("CPU_Extension_x64_SSE", caps.sse);
|
||||
add_field("CPU_Extension_x64_SSE2", caps.sse2);
|
||||
add_field("CPU_Extension_x64_SSE3", caps.sse3);
|
||||
add_field("CPU_Extension_x64_SSSE3", caps.ssse3);
|
||||
add_field("CPU_Extension_x64_SSE41", caps.sse4_1);
|
||||
add_field("CPU_Extension_x64_SSE42", caps.sse4_2);
|
||||
|
||||
add_field("CPU_Extension_x64_AVX", caps.avx);
|
||||
add_field("CPU_Extension_x64_AVX_VNNI", caps.avx_vnni);
|
||||
add_field("CPU_Extension_x64_AVX2", caps.avx2);
|
||||
|
||||
// Skylake-X/SP level AVX512, for compatibility with the previous telemetry field
|
||||
add_field("CPU_Extension_x64_AVX512",
|
||||
caps.avx512f && caps.avx512cd && caps.avx512vl && caps.avx512dq && caps.avx512bw);
|
||||
|
||||
add_field("CPU_Extension_x64_AVX512F", caps.avx512f);
|
||||
add_field("CPU_Extension_x64_AVX512CD", caps.avx512cd);
|
||||
add_field("CPU_Extension_x64_AVX512VL", caps.avx512vl);
|
||||
add_field("CPU_Extension_x64_AVX512DQ", caps.avx512dq);
|
||||
add_field("CPU_Extension_x64_AVX512BW", caps.avx512bw);
|
||||
add_field("CPU_Extension_x64_AVX512BITALG", caps.avx512bitalg);
|
||||
add_field("CPU_Extension_x64_AVX512VBMI", caps.avx512vbmi);
|
||||
|
||||
add_field("CPU_Extension_x64_AES", caps.aes);
|
||||
add_field("CPU_Extension_x64_BMI1", caps.bmi1);
|
||||
add_field("CPU_Extension_x64_BMI2", caps.bmi2);
|
||||
add_field("CPU_Extension_x64_F16C", caps.f16c);
|
||||
add_field("CPU_Extension_x64_FMA", caps.fma);
|
||||
add_field("CPU_Extension_x64_FMA4", caps.fma4);
|
||||
add_field("CPU_Extension_x64_GFNI", caps.gfni);
|
||||
add_field("CPU_Extension_x64_INVARIANT_TSC", caps.invariant_tsc);
|
||||
add_field("CPU_Extension_x64_LZCNT", caps.lzcnt);
|
||||
add_field("CPU_Extension_x64_MOVBE", caps.movbe);
|
||||
add_field("CPU_Extension_x64_PCLMULQDQ", caps.pclmulqdq);
|
||||
add_field("CPU_Extension_x64_POPCNT", caps.popcnt);
|
||||
add_field("CPU_Extension_x64_SHA", caps.sha);
|
||||
fc.AddField(FieldType::UserSystem, "CPU_Model", Common::GetCPUCaps().cpu_string);
|
||||
fc.AddField(FieldType::UserSystem, "CPU_BrandString", Common::GetCPUCaps().brand_string);
|
||||
fc.AddField(FieldType::UserSystem, "CPU_Extension_x64_AES", Common::GetCPUCaps().aes);
|
||||
fc.AddField(FieldType::UserSystem, "CPU_Extension_x64_AVX", Common::GetCPUCaps().avx);
|
||||
fc.AddField(FieldType::UserSystem, "CPU_Extension_x64_AVX2", Common::GetCPUCaps().avx2);
|
||||
fc.AddField(FieldType::UserSystem, "CPU_Extension_x64_AVX512", Common::GetCPUCaps().avx512);
|
||||
fc.AddField(FieldType::UserSystem, "CPU_Extension_x64_BMI1", Common::GetCPUCaps().bmi1);
|
||||
fc.AddField(FieldType::UserSystem, "CPU_Extension_x64_BMI2", Common::GetCPUCaps().bmi2);
|
||||
fc.AddField(FieldType::UserSystem, "CPU_Extension_x64_FMA", Common::GetCPUCaps().fma);
|
||||
fc.AddField(FieldType::UserSystem, "CPU_Extension_x64_FMA4", Common::GetCPUCaps().fma4);
|
||||
fc.AddField(FieldType::UserSystem, "CPU_Extension_x64_SSE", Common::GetCPUCaps().sse);
|
||||
fc.AddField(FieldType::UserSystem, "CPU_Extension_x64_SSE2", Common::GetCPUCaps().sse2);
|
||||
fc.AddField(FieldType::UserSystem, "CPU_Extension_x64_SSE3", Common::GetCPUCaps().sse3);
|
||||
fc.AddField(FieldType::UserSystem, "CPU_Extension_x64_SSSE3", Common::GetCPUCaps().ssse3);
|
||||
fc.AddField(FieldType::UserSystem, "CPU_Extension_x64_SSE41", Common::GetCPUCaps().sse4_1);
|
||||
fc.AddField(FieldType::UserSystem, "CPU_Extension_x64_SSE42", Common::GetCPUCaps().sse4_2);
|
||||
#else
|
||||
fc.AddField(FieldType::UserSystem, "CPU_Model", "Other");
|
||||
#endif
|
||||
|
||||
@@ -8,7 +8,6 @@
|
||||
#include <map>
|
||||
#include <memory>
|
||||
#include <string>
|
||||
#include <string_view>
|
||||
#include "common/common_funcs.h"
|
||||
#include "common/common_types.h"
|
||||
|
||||
@@ -56,8 +55,8 @@ class Field : public FieldInterface {
|
||||
public:
|
||||
YUZU_NON_COPYABLE(Field);
|
||||
|
||||
Field(FieldType type_, std::string_view name_, T value_)
|
||||
: name(name_), type(type_), value(std::move(value_)) {}
|
||||
Field(FieldType type_, std::string name_, T value_)
|
||||
: name(std::move(name_)), type(type_), value(std::move(value_)) {}
|
||||
|
||||
~Field() override = default;
|
||||
|
||||
@@ -124,7 +123,7 @@ public:
|
||||
* @param value Value for the field to add.
|
||||
*/
|
||||
template <typename T>
|
||||
void AddField(FieldType type, std::string_view name, T value) {
|
||||
void AddField(FieldType type, const char* name, T value) {
|
||||
return AddField(std::make_unique<Field<T>>(type, name, std::move(value)));
|
||||
}
|
||||
|
||||
|
||||
@@ -43,445 +43,246 @@
|
||||
* The maximum height of a red-black tree is 2lg (n+1).
|
||||
*/
|
||||
|
||||
namespace Common::freebsd {
|
||||
#include "common/assert.h"
|
||||
|
||||
enum class RBColor {
|
||||
RB_BLACK = 0,
|
||||
RB_RED = 1,
|
||||
namespace Common {
|
||||
template <typename T>
|
||||
class RBHead {
|
||||
public:
|
||||
[[nodiscard]] T* Root() {
|
||||
return rbh_root;
|
||||
}
|
||||
|
||||
[[nodiscard]] const T* Root() const {
|
||||
return rbh_root;
|
||||
}
|
||||
|
||||
void SetRoot(T* root) {
|
||||
rbh_root = root;
|
||||
}
|
||||
|
||||
[[nodiscard]] bool IsEmpty() const {
|
||||
return Root() == nullptr;
|
||||
}
|
||||
|
||||
private:
|
||||
T* rbh_root = nullptr;
|
||||
};
|
||||
|
||||
enum class EntryColor {
|
||||
Black,
|
||||
Red,
|
||||
};
|
||||
|
||||
#pragma pack(push, 4)
|
||||
template <typename T>
|
||||
class RBEntry {
|
||||
public:
|
||||
constexpr RBEntry() = default;
|
||||
|
||||
[[nodiscard]] constexpr T* Left() {
|
||||
return m_rbe_left;
|
||||
}
|
||||
[[nodiscard]] constexpr const T* Left() const {
|
||||
return m_rbe_left;
|
||||
[[nodiscard]] T* Left() {
|
||||
return rbe_left;
|
||||
}
|
||||
|
||||
constexpr void SetLeft(T* e) {
|
||||
m_rbe_left = e;
|
||||
[[nodiscard]] const T* Left() const {
|
||||
return rbe_left;
|
||||
}
|
||||
|
||||
[[nodiscard]] constexpr T* Right() {
|
||||
return m_rbe_right;
|
||||
}
|
||||
[[nodiscard]] constexpr const T* Right() const {
|
||||
return m_rbe_right;
|
||||
void SetLeft(T* left) {
|
||||
rbe_left = left;
|
||||
}
|
||||
|
||||
constexpr void SetRight(T* e) {
|
||||
m_rbe_right = e;
|
||||
[[nodiscard]] T* Right() {
|
||||
return rbe_right;
|
||||
}
|
||||
|
||||
[[nodiscard]] constexpr T* Parent() {
|
||||
return m_rbe_parent;
|
||||
}
|
||||
[[nodiscard]] constexpr const T* Parent() const {
|
||||
return m_rbe_parent;
|
||||
[[nodiscard]] const T* Right() const {
|
||||
return rbe_right;
|
||||
}
|
||||
|
||||
constexpr void SetParent(T* e) {
|
||||
m_rbe_parent = e;
|
||||
void SetRight(T* right) {
|
||||
rbe_right = right;
|
||||
}
|
||||
|
||||
[[nodiscard]] constexpr bool IsBlack() const {
|
||||
return m_rbe_color == RBColor::RB_BLACK;
|
||||
}
|
||||
[[nodiscard]] constexpr bool IsRed() const {
|
||||
return m_rbe_color == RBColor::RB_RED;
|
||||
}
|
||||
[[nodiscard]] constexpr RBColor Color() const {
|
||||
return m_rbe_color;
|
||||
[[nodiscard]] T* Parent() {
|
||||
return rbe_parent;
|
||||
}
|
||||
|
||||
constexpr void SetColor(RBColor c) {
|
||||
m_rbe_color = c;
|
||||
[[nodiscard]] const T* Parent() const {
|
||||
return rbe_parent;
|
||||
}
|
||||
|
||||
void SetParent(T* parent) {
|
||||
rbe_parent = parent;
|
||||
}
|
||||
|
||||
[[nodiscard]] bool IsBlack() const {
|
||||
return rbe_color == EntryColor::Black;
|
||||
}
|
||||
|
||||
[[nodiscard]] bool IsRed() const {
|
||||
return rbe_color == EntryColor::Red;
|
||||
}
|
||||
|
||||
[[nodiscard]] EntryColor Color() const {
|
||||
return rbe_color;
|
||||
}
|
||||
|
||||
void SetColor(EntryColor color) {
|
||||
rbe_color = color;
|
||||
}
|
||||
|
||||
private:
|
||||
T* m_rbe_left{};
|
||||
T* m_rbe_right{};
|
||||
T* m_rbe_parent{};
|
||||
RBColor m_rbe_color{RBColor::RB_BLACK};
|
||||
};
|
||||
#pragma pack(pop)
|
||||
|
||||
template <typename T>
|
||||
struct CheckRBEntry {
|
||||
static constexpr bool value = false;
|
||||
};
|
||||
template <typename T>
|
||||
struct CheckRBEntry<RBEntry<T>> {
|
||||
static constexpr bool value = true;
|
||||
T* rbe_left = nullptr;
|
||||
T* rbe_right = nullptr;
|
||||
T* rbe_parent = nullptr;
|
||||
EntryColor rbe_color{};
|
||||
};
|
||||
|
||||
template <typename T>
|
||||
concept IsRBEntry = CheckRBEntry<T>::value;
|
||||
|
||||
template <typename T>
|
||||
concept HasRBEntry = requires(T& t, const T& ct) {
|
||||
{ t.GetRBEntry() } -> std::same_as<RBEntry<T>&>;
|
||||
{ ct.GetRBEntry() } -> std::same_as<const RBEntry<T>&>;
|
||||
};
|
||||
|
||||
template <typename T>
|
||||
requires HasRBEntry<T>
|
||||
class RBHead {
|
||||
private:
|
||||
T* m_rbh_root = nullptr;
|
||||
|
||||
public:
|
||||
[[nodiscard]] constexpr T* Root() {
|
||||
return m_rbh_root;
|
||||
}
|
||||
[[nodiscard]] constexpr const T* Root() const {
|
||||
return m_rbh_root;
|
||||
}
|
||||
constexpr void SetRoot(T* root) {
|
||||
m_rbh_root = root;
|
||||
}
|
||||
|
||||
[[nodiscard]] constexpr bool IsEmpty() const {
|
||||
return this->Root() == nullptr;
|
||||
}
|
||||
};
|
||||
|
||||
template <typename T>
|
||||
requires HasRBEntry<T>
|
||||
[[nodiscard]] constexpr RBEntry<T>& RB_ENTRY(T* t) {
|
||||
return t->GetRBEntry();
|
||||
}
|
||||
template <typename T>
|
||||
requires HasRBEntry<T>
|
||||
[[nodiscard]] constexpr const RBEntry<T>& RB_ENTRY(const T* t) {
|
||||
return t->GetRBEntry();
|
||||
template <typename Node>
|
||||
[[nodiscard]] RBEntry<Node>& RB_ENTRY(Node* node) {
|
||||
return node->GetEntry();
|
||||
}
|
||||
|
||||
template <typename T>
|
||||
requires HasRBEntry<T>
|
||||
[[nodiscard]] constexpr T* RB_LEFT(T* t) {
|
||||
return RB_ENTRY(t).Left();
|
||||
}
|
||||
template <typename T>
|
||||
requires HasRBEntry<T>
|
||||
[[nodiscard]] constexpr const T* RB_LEFT(const T* t) {
|
||||
return RB_ENTRY(t).Left();
|
||||
template <typename Node>
|
||||
[[nodiscard]] const RBEntry<Node>& RB_ENTRY(const Node* node) {
|
||||
return node->GetEntry();
|
||||
}
|
||||
|
||||
template <typename T>
|
||||
requires HasRBEntry<T>
|
||||
[[nodiscard]] constexpr T* RB_RIGHT(T* t) {
|
||||
return RB_ENTRY(t).Right();
|
||||
}
|
||||
template <typename T>
|
||||
requires HasRBEntry<T>
|
||||
[[nodiscard]] constexpr const T* RB_RIGHT(const T* t) {
|
||||
return RB_ENTRY(t).Right();
|
||||
template <typename Node>
|
||||
[[nodiscard]] Node* RB_PARENT(Node* node) {
|
||||
return RB_ENTRY(node).Parent();
|
||||
}
|
||||
|
||||
template <typename T>
|
||||
requires HasRBEntry<T>
|
||||
[[nodiscard]] constexpr T* RB_PARENT(T* t) {
|
||||
return RB_ENTRY(t).Parent();
|
||||
}
|
||||
template <typename T>
|
||||
requires HasRBEntry<T>
|
||||
[[nodiscard]] constexpr const T* RB_PARENT(const T* t) {
|
||||
return RB_ENTRY(t).Parent();
|
||||
template <typename Node>
|
||||
[[nodiscard]] const Node* RB_PARENT(const Node* node) {
|
||||
return RB_ENTRY(node).Parent();
|
||||
}
|
||||
|
||||
template <typename T>
|
||||
requires HasRBEntry<T>
|
||||
constexpr void RB_SET_LEFT(T* t, T* e) {
|
||||
RB_ENTRY(t).SetLeft(e);
|
||||
}
|
||||
template <typename T>
|
||||
requires HasRBEntry<T>
|
||||
constexpr void RB_SET_RIGHT(T* t, T* e) {
|
||||
RB_ENTRY(t).SetRight(e);
|
||||
}
|
||||
template <typename T>
|
||||
requires HasRBEntry<T>
|
||||
constexpr void RB_SET_PARENT(T* t, T* e) {
|
||||
RB_ENTRY(t).SetParent(e);
|
||||
template <typename Node>
|
||||
void RB_SET_PARENT(Node* node, Node* parent) {
|
||||
return RB_ENTRY(node).SetParent(parent);
|
||||
}
|
||||
|
||||
template <typename T>
|
||||
requires HasRBEntry<T>
|
||||
[[nodiscard]] constexpr bool RB_IS_BLACK(const T* t) {
|
||||
return RB_ENTRY(t).IsBlack();
|
||||
}
|
||||
template <typename T>
|
||||
requires HasRBEntry<T>
|
||||
[[nodiscard]] constexpr bool RB_IS_RED(const T* t) {
|
||||
return RB_ENTRY(t).IsRed();
|
||||
template <typename Node>
|
||||
[[nodiscard]] Node* RB_LEFT(Node* node) {
|
||||
return RB_ENTRY(node).Left();
|
||||
}
|
||||
|
||||
template <typename T>
|
||||
requires HasRBEntry<T>
|
||||
[[nodiscard]] constexpr RBColor RB_COLOR(const T* t) {
|
||||
return RB_ENTRY(t).Color();
|
||||
template <typename Node>
|
||||
[[nodiscard]] const Node* RB_LEFT(const Node* node) {
|
||||
return RB_ENTRY(node).Left();
|
||||
}
|
||||
|
||||
template <typename T>
|
||||
requires HasRBEntry<T>
|
||||
constexpr void RB_SET_COLOR(T* t, RBColor c) {
|
||||
RB_ENTRY(t).SetColor(c);
|
||||
template <typename Node>
|
||||
void RB_SET_LEFT(Node* node, Node* left) {
|
||||
return RB_ENTRY(node).SetLeft(left);
|
||||
}
|
||||
|
||||
template <typename T>
|
||||
requires HasRBEntry<T>
|
||||
constexpr void RB_SET(T* elm, T* parent) {
|
||||
auto& rb_entry = RB_ENTRY(elm);
|
||||
rb_entry.SetParent(parent);
|
||||
rb_entry.SetLeft(nullptr);
|
||||
rb_entry.SetRight(nullptr);
|
||||
rb_entry.SetColor(RBColor::RB_RED);
|
||||
template <typename Node>
|
||||
[[nodiscard]] Node* RB_RIGHT(Node* node) {
|
||||
return RB_ENTRY(node).Right();
|
||||
}
|
||||
|
||||
template <typename T>
|
||||
requires HasRBEntry<T>
|
||||
constexpr void RB_SET_BLACKRED(T* black, T* red) {
|
||||
RB_SET_COLOR(black, RBColor::RB_BLACK);
|
||||
RB_SET_COLOR(red, RBColor::RB_RED);
|
||||
template <typename Node>
|
||||
[[nodiscard]] const Node* RB_RIGHT(const Node* node) {
|
||||
return RB_ENTRY(node).Right();
|
||||
}
|
||||
|
||||
template <typename T>
|
||||
requires HasRBEntry<T>
|
||||
constexpr void RB_ROTATE_LEFT(RBHead<T>& head, T* elm, T*& tmp) {
|
||||
template <typename Node>
|
||||
void RB_SET_RIGHT(Node* node, Node* right) {
|
||||
return RB_ENTRY(node).SetRight(right);
|
||||
}
|
||||
|
||||
template <typename Node>
|
||||
[[nodiscard]] bool RB_IS_BLACK(const Node* node) {
|
||||
return RB_ENTRY(node).IsBlack();
|
||||
}
|
||||
|
||||
template <typename Node>
|
||||
[[nodiscard]] bool RB_IS_RED(const Node* node) {
|
||||
return RB_ENTRY(node).IsRed();
|
||||
}
|
||||
|
||||
template <typename Node>
|
||||
[[nodiscard]] EntryColor RB_COLOR(const Node* node) {
|
||||
return RB_ENTRY(node).Color();
|
||||
}
|
||||
|
||||
template <typename Node>
|
||||
void RB_SET_COLOR(Node* node, EntryColor color) {
|
||||
return RB_ENTRY(node).SetColor(color);
|
||||
}
|
||||
|
||||
template <typename Node>
|
||||
void RB_SET(Node* node, Node* parent) {
|
||||
auto& entry = RB_ENTRY(node);
|
||||
entry.SetParent(parent);
|
||||
entry.SetLeft(nullptr);
|
||||
entry.SetRight(nullptr);
|
||||
entry.SetColor(EntryColor::Red);
|
||||
}
|
||||
|
||||
template <typename Node>
|
||||
void RB_SET_BLACKRED(Node* black, Node* red) {
|
||||
RB_SET_COLOR(black, EntryColor::Black);
|
||||
RB_SET_COLOR(red, EntryColor::Red);
|
||||
}
|
||||
|
||||
template <typename Node>
|
||||
void RB_ROTATE_LEFT(RBHead<Node>* head, Node* elm, Node*& tmp) {
|
||||
tmp = RB_RIGHT(elm);
|
||||
if (RB_SET_RIGHT(elm, RB_LEFT(tmp)); RB_RIGHT(elm) != nullptr) {
|
||||
RB_SET_RIGHT(elm, RB_LEFT(tmp));
|
||||
if (RB_RIGHT(elm) != nullptr) {
|
||||
RB_SET_PARENT(RB_LEFT(tmp), elm);
|
||||
}
|
||||
|
||||
if (RB_SET_PARENT(tmp, RB_PARENT(elm)); RB_PARENT(tmp) != nullptr) {
|
||||
RB_SET_PARENT(tmp, RB_PARENT(elm));
|
||||
if (RB_PARENT(tmp) != nullptr) {
|
||||
if (elm == RB_LEFT(RB_PARENT(elm))) {
|
||||
RB_SET_LEFT(RB_PARENT(elm), tmp);
|
||||
} else {
|
||||
RB_SET_RIGHT(RB_PARENT(elm), tmp);
|
||||
}
|
||||
} else {
|
||||
head.SetRoot(tmp);
|
||||
head->SetRoot(tmp);
|
||||
}
|
||||
|
||||
RB_SET_LEFT(tmp, elm);
|
||||
RB_SET_PARENT(elm, tmp);
|
||||
}
|
||||
|
||||
template <typename T>
|
||||
requires HasRBEntry<T>
|
||||
constexpr void RB_ROTATE_RIGHT(RBHead<T>& head, T* elm, T*& tmp) {
|
||||
template <typename Node>
|
||||
void RB_ROTATE_RIGHT(RBHead<Node>* head, Node* elm, Node*& tmp) {
|
||||
tmp = RB_LEFT(elm);
|
||||
if (RB_SET_LEFT(elm, RB_RIGHT(tmp)); RB_LEFT(elm) != nullptr) {
|
||||
RB_SET_LEFT(elm, RB_RIGHT(tmp));
|
||||
if (RB_LEFT(elm) != nullptr) {
|
||||
RB_SET_PARENT(RB_RIGHT(tmp), elm);
|
||||
}
|
||||
|
||||
if (RB_SET_PARENT(tmp, RB_PARENT(elm)); RB_PARENT(tmp) != nullptr) {
|
||||
RB_SET_PARENT(tmp, RB_PARENT(elm));
|
||||
if (RB_PARENT(tmp) != nullptr) {
|
||||
if (elm == RB_LEFT(RB_PARENT(elm))) {
|
||||
RB_SET_LEFT(RB_PARENT(elm), tmp);
|
||||
} else {
|
||||
RB_SET_RIGHT(RB_PARENT(elm), tmp);
|
||||
}
|
||||
} else {
|
||||
head.SetRoot(tmp);
|
||||
head->SetRoot(tmp);
|
||||
}
|
||||
|
||||
RB_SET_RIGHT(tmp, elm);
|
||||
RB_SET_PARENT(elm, tmp);
|
||||
}
|
||||
|
||||
template <typename T>
|
||||
requires HasRBEntry<T>
|
||||
constexpr void RB_REMOVE_COLOR(RBHead<T>& head, T* parent, T* elm) {
|
||||
T* tmp;
|
||||
while ((elm == nullptr || RB_IS_BLACK(elm)) && elm != head.Root()) {
|
||||
if (RB_LEFT(parent) == elm) {
|
||||
tmp = RB_RIGHT(parent);
|
||||
if (RB_IS_RED(tmp)) {
|
||||
RB_SET_BLACKRED(tmp, parent);
|
||||
RB_ROTATE_LEFT(head, parent, tmp);
|
||||
tmp = RB_RIGHT(parent);
|
||||
}
|
||||
template <typename Node>
|
||||
void RB_INSERT_COLOR(RBHead<Node>* head, Node* elm) {
|
||||
Node* parent = nullptr;
|
||||
Node* tmp = nullptr;
|
||||
|
||||
if ((RB_LEFT(tmp) == nullptr || RB_IS_BLACK(RB_LEFT(tmp))) &&
|
||||
(RB_RIGHT(tmp) == nullptr || RB_IS_BLACK(RB_RIGHT(tmp)))) {
|
||||
RB_SET_COLOR(tmp, RBColor::RB_RED);
|
||||
elm = parent;
|
||||
parent = RB_PARENT(elm);
|
||||
} else {
|
||||
if (RB_RIGHT(tmp) == nullptr || RB_IS_BLACK(RB_RIGHT(tmp))) {
|
||||
T* oleft;
|
||||
if ((oleft = RB_LEFT(tmp)) != nullptr) {
|
||||
RB_SET_COLOR(oleft, RBColor::RB_BLACK);
|
||||
}
|
||||
|
||||
RB_SET_COLOR(tmp, RBColor::RB_RED);
|
||||
RB_ROTATE_RIGHT(head, tmp, oleft);
|
||||
tmp = RB_RIGHT(parent);
|
||||
}
|
||||
|
||||
RB_SET_COLOR(tmp, RB_COLOR(parent));
|
||||
RB_SET_COLOR(parent, RBColor::RB_BLACK);
|
||||
if (RB_RIGHT(tmp)) {
|
||||
RB_SET_COLOR(RB_RIGHT(tmp), RBColor::RB_BLACK);
|
||||
}
|
||||
|
||||
RB_ROTATE_LEFT(head, parent, tmp);
|
||||
elm = head.Root();
|
||||
break;
|
||||
}
|
||||
} else {
|
||||
tmp = RB_LEFT(parent);
|
||||
if (RB_IS_RED(tmp)) {
|
||||
RB_SET_BLACKRED(tmp, parent);
|
||||
RB_ROTATE_RIGHT(head, parent, tmp);
|
||||
tmp = RB_LEFT(parent);
|
||||
}
|
||||
|
||||
if ((RB_LEFT(tmp) == nullptr || RB_IS_BLACK(RB_LEFT(tmp))) &&
|
||||
(RB_RIGHT(tmp) == nullptr || RB_IS_BLACK(RB_RIGHT(tmp)))) {
|
||||
RB_SET_COLOR(tmp, RBColor::RB_RED);
|
||||
elm = parent;
|
||||
parent = RB_PARENT(elm);
|
||||
} else {
|
||||
if (RB_LEFT(tmp) == nullptr || RB_IS_BLACK(RB_LEFT(tmp))) {
|
||||
T* oright;
|
||||
if ((oright = RB_RIGHT(tmp)) != nullptr) {
|
||||
RB_SET_COLOR(oright, RBColor::RB_BLACK);
|
||||
}
|
||||
|
||||
RB_SET_COLOR(tmp, RBColor::RB_RED);
|
||||
RB_ROTATE_LEFT(head, tmp, oright);
|
||||
tmp = RB_LEFT(parent);
|
||||
}
|
||||
|
||||
RB_SET_COLOR(tmp, RB_COLOR(parent));
|
||||
RB_SET_COLOR(parent, RBColor::RB_BLACK);
|
||||
|
||||
if (RB_LEFT(tmp)) {
|
||||
RB_SET_COLOR(RB_LEFT(tmp), RBColor::RB_BLACK);
|
||||
}
|
||||
|
||||
RB_ROTATE_RIGHT(head, parent, tmp);
|
||||
elm = head.Root();
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if (elm) {
|
||||
RB_SET_COLOR(elm, RBColor::RB_BLACK);
|
||||
}
|
||||
}
|
||||
|
||||
template <typename T>
|
||||
requires HasRBEntry<T>
|
||||
constexpr T* RB_REMOVE(RBHead<T>& head, T* elm) {
|
||||
T* child = nullptr;
|
||||
T* parent = nullptr;
|
||||
T* old = elm;
|
||||
RBColor color = RBColor::RB_BLACK;
|
||||
|
||||
if (RB_LEFT(elm) == nullptr) {
|
||||
child = RB_RIGHT(elm);
|
||||
} else if (RB_RIGHT(elm) == nullptr) {
|
||||
child = RB_LEFT(elm);
|
||||
} else {
|
||||
T* left;
|
||||
elm = RB_RIGHT(elm);
|
||||
while ((left = RB_LEFT(elm)) != nullptr) {
|
||||
elm = left;
|
||||
}
|
||||
|
||||
child = RB_RIGHT(elm);
|
||||
parent = RB_PARENT(elm);
|
||||
color = RB_COLOR(elm);
|
||||
|
||||
if (child) {
|
||||
RB_SET_PARENT(child, parent);
|
||||
}
|
||||
|
||||
if (parent) {
|
||||
if (RB_LEFT(parent) == elm) {
|
||||
RB_SET_LEFT(parent, child);
|
||||
} else {
|
||||
RB_SET_RIGHT(parent, child);
|
||||
}
|
||||
} else {
|
||||
head.SetRoot(child);
|
||||
}
|
||||
|
||||
if (RB_PARENT(elm) == old) {
|
||||
parent = elm;
|
||||
}
|
||||
|
||||
elm->SetRBEntry(old->GetRBEntry());
|
||||
|
||||
if (RB_PARENT(old)) {
|
||||
if (RB_LEFT(RB_PARENT(old)) == old) {
|
||||
RB_SET_LEFT(RB_PARENT(old), elm);
|
||||
} else {
|
||||
RB_SET_RIGHT(RB_PARENT(old), elm);
|
||||
}
|
||||
} else {
|
||||
head.SetRoot(elm);
|
||||
}
|
||||
|
||||
RB_SET_PARENT(RB_LEFT(old), elm);
|
||||
|
||||
if (RB_RIGHT(old)) {
|
||||
RB_SET_PARENT(RB_RIGHT(old), elm);
|
||||
}
|
||||
|
||||
if (parent) {
|
||||
left = parent;
|
||||
}
|
||||
|
||||
if (color == RBColor::RB_BLACK) {
|
||||
RB_REMOVE_COLOR(head, parent, child);
|
||||
}
|
||||
|
||||
return old;
|
||||
}
|
||||
|
||||
parent = RB_PARENT(elm);
|
||||
color = RB_COLOR(elm);
|
||||
|
||||
if (child) {
|
||||
RB_SET_PARENT(child, parent);
|
||||
}
|
||||
if (parent) {
|
||||
if (RB_LEFT(parent) == elm) {
|
||||
RB_SET_LEFT(parent, child);
|
||||
} else {
|
||||
RB_SET_RIGHT(parent, child);
|
||||
}
|
||||
} else {
|
||||
head.SetRoot(child);
|
||||
}
|
||||
|
||||
if (color == RBColor::RB_BLACK) {
|
||||
RB_REMOVE_COLOR(head, parent, child);
|
||||
}
|
||||
|
||||
return old;
|
||||
}
|
||||
|
||||
template <typename T>
|
||||
requires HasRBEntry<T>
|
||||
constexpr void RB_INSERT_COLOR(RBHead<T>& head, T* elm) {
|
||||
T *parent = nullptr, *tmp = nullptr;
|
||||
while ((parent = RB_PARENT(elm)) != nullptr && RB_IS_RED(parent)) {
|
||||
T* gparent = RB_PARENT(parent);
|
||||
Node* gparent = RB_PARENT(parent);
|
||||
if (parent == RB_LEFT(gparent)) {
|
||||
tmp = RB_RIGHT(gparent);
|
||||
if (tmp && RB_IS_RED(tmp)) {
|
||||
RB_SET_COLOR(tmp, RBColor::RB_BLACK);
|
||||
RB_SET_COLOR(tmp, EntryColor::Black);
|
||||
RB_SET_BLACKRED(parent, gparent);
|
||||
elm = gparent;
|
||||
continue;
|
||||
@@ -499,7 +300,7 @@ constexpr void RB_INSERT_COLOR(RBHead<T>& head, T* elm) {
|
||||
} else {
|
||||
tmp = RB_LEFT(gparent);
|
||||
if (tmp && RB_IS_RED(tmp)) {
|
||||
RB_SET_COLOR(tmp, RBColor::RB_BLACK);
|
||||
RB_SET_COLOR(tmp, EntryColor::Black);
|
||||
RB_SET_BLACKRED(parent, gparent);
|
||||
elm = gparent;
|
||||
continue;
|
||||
@@ -517,14 +318,194 @@ constexpr void RB_INSERT_COLOR(RBHead<T>& head, T* elm) {
|
||||
}
|
||||
}
|
||||
|
||||
RB_SET_COLOR(head.Root(), RBColor::RB_BLACK);
|
||||
RB_SET_COLOR(head->Root(), EntryColor::Black);
|
||||
}
|
||||
|
||||
template <typename T, typename Compare>
|
||||
requires HasRBEntry<T>
|
||||
constexpr T* RB_INSERT(RBHead<T>& head, T* elm, Compare cmp) {
|
||||
T* parent = nullptr;
|
||||
T* tmp = head.Root();
|
||||
template <typename Node>
|
||||
void RB_REMOVE_COLOR(RBHead<Node>* head, Node* parent, Node* elm) {
|
||||
Node* tmp;
|
||||
while ((elm == nullptr || RB_IS_BLACK(elm)) && elm != head->Root() && parent != nullptr) {
|
||||
if (RB_LEFT(parent) == elm) {
|
||||
tmp = RB_RIGHT(parent);
|
||||
if (!tmp) {
|
||||
ASSERT_MSG(false, "tmp is invalid!");
|
||||
break;
|
||||
}
|
||||
if (RB_IS_RED(tmp)) {
|
||||
RB_SET_BLACKRED(tmp, parent);
|
||||
RB_ROTATE_LEFT(head, parent, tmp);
|
||||
tmp = RB_RIGHT(parent);
|
||||
}
|
||||
|
||||
if ((RB_LEFT(tmp) == nullptr || RB_IS_BLACK(RB_LEFT(tmp))) &&
|
||||
(RB_RIGHT(tmp) == nullptr || RB_IS_BLACK(RB_RIGHT(tmp)))) {
|
||||
RB_SET_COLOR(tmp, EntryColor::Red);
|
||||
elm = parent;
|
||||
parent = RB_PARENT(elm);
|
||||
} else {
|
||||
if (RB_RIGHT(tmp) == nullptr || RB_IS_BLACK(RB_RIGHT(tmp))) {
|
||||
Node* oleft;
|
||||
if ((oleft = RB_LEFT(tmp)) != nullptr) {
|
||||
RB_SET_COLOR(oleft, EntryColor::Black);
|
||||
}
|
||||
|
||||
RB_SET_COLOR(tmp, EntryColor::Red);
|
||||
RB_ROTATE_RIGHT(head, tmp, oleft);
|
||||
tmp = RB_RIGHT(parent);
|
||||
}
|
||||
|
||||
RB_SET_COLOR(tmp, RB_COLOR(parent));
|
||||
RB_SET_COLOR(parent, EntryColor::Black);
|
||||
if (RB_RIGHT(tmp)) {
|
||||
RB_SET_COLOR(RB_RIGHT(tmp), EntryColor::Black);
|
||||
}
|
||||
|
||||
RB_ROTATE_LEFT(head, parent, tmp);
|
||||
elm = head->Root();
|
||||
break;
|
||||
}
|
||||
} else {
|
||||
tmp = RB_LEFT(parent);
|
||||
if (RB_IS_RED(tmp)) {
|
||||
RB_SET_BLACKRED(tmp, parent);
|
||||
RB_ROTATE_RIGHT(head, parent, tmp);
|
||||
tmp = RB_LEFT(parent);
|
||||
}
|
||||
|
||||
if (!tmp) {
|
||||
ASSERT_MSG(false, "tmp is invalid!");
|
||||
break;
|
||||
}
|
||||
|
||||
if ((RB_LEFT(tmp) == nullptr || RB_IS_BLACK(RB_LEFT(tmp))) &&
|
||||
(RB_RIGHT(tmp) == nullptr || RB_IS_BLACK(RB_RIGHT(tmp)))) {
|
||||
RB_SET_COLOR(tmp, EntryColor::Red);
|
||||
elm = parent;
|
||||
parent = RB_PARENT(elm);
|
||||
} else {
|
||||
if (RB_LEFT(tmp) == nullptr || RB_IS_BLACK(RB_LEFT(tmp))) {
|
||||
Node* oright;
|
||||
if ((oright = RB_RIGHT(tmp)) != nullptr) {
|
||||
RB_SET_COLOR(oright, EntryColor::Black);
|
||||
}
|
||||
|
||||
RB_SET_COLOR(tmp, EntryColor::Red);
|
||||
RB_ROTATE_LEFT(head, tmp, oright);
|
||||
tmp = RB_LEFT(parent);
|
||||
}
|
||||
|
||||
RB_SET_COLOR(tmp, RB_COLOR(parent));
|
||||
RB_SET_COLOR(parent, EntryColor::Black);
|
||||
|
||||
if (RB_LEFT(tmp)) {
|
||||
RB_SET_COLOR(RB_LEFT(tmp), EntryColor::Black);
|
||||
}
|
||||
|
||||
RB_ROTATE_RIGHT(head, parent, tmp);
|
||||
elm = head->Root();
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if (elm) {
|
||||
RB_SET_COLOR(elm, EntryColor::Black);
|
||||
}
|
||||
}
|
||||
|
||||
template <typename Node>
|
||||
Node* RB_REMOVE(RBHead<Node>* head, Node* elm) {
|
||||
Node* child = nullptr;
|
||||
Node* parent = nullptr;
|
||||
Node* old = elm;
|
||||
EntryColor color{};
|
||||
|
||||
const auto finalize = [&] {
|
||||
if (color == EntryColor::Black) {
|
||||
RB_REMOVE_COLOR(head, parent, child);
|
||||
}
|
||||
|
||||
return old;
|
||||
};
|
||||
|
||||
if (RB_LEFT(elm) == nullptr) {
|
||||
child = RB_RIGHT(elm);
|
||||
} else if (RB_RIGHT(elm) == nullptr) {
|
||||
child = RB_LEFT(elm);
|
||||
} else {
|
||||
Node* left;
|
||||
elm = RB_RIGHT(elm);
|
||||
while ((left = RB_LEFT(elm)) != nullptr) {
|
||||
elm = left;
|
||||
}
|
||||
|
||||
child = RB_RIGHT(elm);
|
||||
parent = RB_PARENT(elm);
|
||||
color = RB_COLOR(elm);
|
||||
|
||||
if (child) {
|
||||
RB_SET_PARENT(child, parent);
|
||||
}
|
||||
if (parent) {
|
||||
if (RB_LEFT(parent) == elm) {
|
||||
RB_SET_LEFT(parent, child);
|
||||
} else {
|
||||
RB_SET_RIGHT(parent, child);
|
||||
}
|
||||
} else {
|
||||
head->SetRoot(child);
|
||||
}
|
||||
|
||||
if (RB_PARENT(elm) == old) {
|
||||
parent = elm;
|
||||
}
|
||||
|
||||
elm->SetEntry(old->GetEntry());
|
||||
|
||||
if (RB_PARENT(old)) {
|
||||
if (RB_LEFT(RB_PARENT(old)) == old) {
|
||||
RB_SET_LEFT(RB_PARENT(old), elm);
|
||||
} else {
|
||||
RB_SET_RIGHT(RB_PARENT(old), elm);
|
||||
}
|
||||
} else {
|
||||
head->SetRoot(elm);
|
||||
}
|
||||
RB_SET_PARENT(RB_LEFT(old), elm);
|
||||
if (RB_RIGHT(old)) {
|
||||
RB_SET_PARENT(RB_RIGHT(old), elm);
|
||||
}
|
||||
if (parent) {
|
||||
left = parent;
|
||||
}
|
||||
|
||||
return finalize();
|
||||
}
|
||||
|
||||
parent = RB_PARENT(elm);
|
||||
color = RB_COLOR(elm);
|
||||
|
||||
if (child) {
|
||||
RB_SET_PARENT(child, parent);
|
||||
}
|
||||
if (parent) {
|
||||
if (RB_LEFT(parent) == elm) {
|
||||
RB_SET_LEFT(parent, child);
|
||||
} else {
|
||||
RB_SET_RIGHT(parent, child);
|
||||
}
|
||||
} else {
|
||||
head->SetRoot(child);
|
||||
}
|
||||
|
||||
return finalize();
|
||||
}
|
||||
|
||||
// Inserts a node into the RB tree
|
||||
template <typename Node, typename CompareFunction>
|
||||
Node* RB_INSERT(RBHead<Node>* head, Node* elm, CompareFunction cmp) {
|
||||
Node* parent = nullptr;
|
||||
Node* tmp = head->Root();
|
||||
int comp = 0;
|
||||
|
||||
while (tmp) {
|
||||
@@ -548,17 +529,17 @@ constexpr T* RB_INSERT(RBHead<T>& head, T* elm, Compare cmp) {
|
||||
RB_SET_RIGHT(parent, elm);
|
||||
}
|
||||
} else {
|
||||
head.SetRoot(elm);
|
||||
head->SetRoot(elm);
|
||||
}
|
||||
|
||||
RB_INSERT_COLOR(head, elm);
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
template <typename T, typename Compare>
|
||||
requires HasRBEntry<T>
|
||||
constexpr T* RB_FIND(RBHead<T>& head, T* elm, Compare cmp) {
|
||||
T* tmp = head.Root();
|
||||
// Finds the node with the same key as elm
|
||||
template <typename Node, typename CompareFunction>
|
||||
Node* RB_FIND(RBHead<Node>* head, Node* elm, CompareFunction cmp) {
|
||||
Node* tmp = head->Root();
|
||||
|
||||
while (tmp) {
|
||||
const int comp = cmp(elm, tmp);
|
||||
@@ -574,11 +555,11 @@ constexpr T* RB_FIND(RBHead<T>& head, T* elm, Compare cmp) {
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
template <typename T, typename Compare>
|
||||
requires HasRBEntry<T>
|
||||
constexpr T* RB_NFIND(RBHead<T>& head, T* elm, Compare cmp) {
|
||||
T* tmp = head.Root();
|
||||
T* res = nullptr;
|
||||
// Finds the first node greater than or equal to the search key
|
||||
template <typename Node, typename CompareFunction>
|
||||
Node* RB_NFIND(RBHead<Node>* head, Node* elm, CompareFunction cmp) {
|
||||
Node* tmp = head->Root();
|
||||
Node* res = nullptr;
|
||||
|
||||
while (tmp) {
|
||||
const int comp = cmp(elm, tmp);
|
||||
@@ -595,13 +576,13 @@ constexpr T* RB_NFIND(RBHead<T>& head, T* elm, Compare cmp) {
|
||||
return res;
|
||||
}
|
||||
|
||||
template <typename T, typename U, typename Compare>
|
||||
requires HasRBEntry<T>
|
||||
constexpr T* RB_FIND_KEY(RBHead<T>& head, const U& key, Compare cmp) {
|
||||
T* tmp = head.Root();
|
||||
// Finds the node with the same key as lelm
|
||||
template <typename Node, typename CompareFunction>
|
||||
Node* RB_FIND_LIGHT(RBHead<Node>* head, const void* lelm, CompareFunction lcmp) {
|
||||
Node* tmp = head->Root();
|
||||
|
||||
while (tmp) {
|
||||
const int comp = cmp(key, tmp);
|
||||
const int comp = lcmp(lelm, tmp);
|
||||
if (comp < 0) {
|
||||
tmp = RB_LEFT(tmp);
|
||||
} else if (comp > 0) {
|
||||
@@ -614,14 +595,14 @@ constexpr T* RB_FIND_KEY(RBHead<T>& head, const U& key, Compare cmp) {
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
template <typename T, typename U, typename Compare>
|
||||
requires HasRBEntry<T>
|
||||
constexpr T* RB_NFIND_KEY(RBHead<T>& head, const U& key, Compare cmp) {
|
||||
T* tmp = head.Root();
|
||||
T* res = nullptr;
|
||||
// Finds the first node greater than or equal to the search key
|
||||
template <typename Node, typename CompareFunction>
|
||||
Node* RB_NFIND_LIGHT(RBHead<Node>* head, const void* lelm, CompareFunction lcmp) {
|
||||
Node* tmp = head->Root();
|
||||
Node* res = nullptr;
|
||||
|
||||
while (tmp) {
|
||||
const int comp = cmp(key, tmp);
|
||||
const int comp = lcmp(lelm, tmp);
|
||||
if (comp < 0) {
|
||||
res = tmp;
|
||||
tmp = RB_LEFT(tmp);
|
||||
@@ -635,43 +616,8 @@ constexpr T* RB_NFIND_KEY(RBHead<T>& head, const U& key, Compare cmp) {
|
||||
return res;
|
||||
}
|
||||
|
||||
template <typename T, typename Compare>
|
||||
requires HasRBEntry<T>
|
||||
constexpr T* RB_FIND_EXISTING(RBHead<T>& head, T* elm, Compare cmp) {
|
||||
T* tmp = head.Root();
|
||||
|
||||
while (true) {
|
||||
const int comp = cmp(elm, tmp);
|
||||
if (comp < 0) {
|
||||
tmp = RB_LEFT(tmp);
|
||||
} else if (comp > 0) {
|
||||
tmp = RB_RIGHT(tmp);
|
||||
} else {
|
||||
return tmp;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
template <typename T, typename U, typename Compare>
|
||||
requires HasRBEntry<T>
|
||||
constexpr T* RB_FIND_EXISTING_KEY(RBHead<T>& head, const U& key, Compare cmp) {
|
||||
T* tmp = head.Root();
|
||||
|
||||
while (true) {
|
||||
const int comp = cmp(key, tmp);
|
||||
if (comp < 0) {
|
||||
tmp = RB_LEFT(tmp);
|
||||
} else if (comp > 0) {
|
||||
tmp = RB_RIGHT(tmp);
|
||||
} else {
|
||||
return tmp;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
template <typename T>
|
||||
requires HasRBEntry<T>
|
||||
constexpr T* RB_NEXT(T* elm) {
|
||||
template <typename Node>
|
||||
Node* RB_NEXT(Node* elm) {
|
||||
if (RB_RIGHT(elm)) {
|
||||
elm = RB_RIGHT(elm);
|
||||
while (RB_LEFT(elm)) {
|
||||
@@ -690,9 +636,8 @@ constexpr T* RB_NEXT(T* elm) {
|
||||
return elm;
|
||||
}
|
||||
|
||||
template <typename T>
|
||||
requires HasRBEntry<T>
|
||||
constexpr T* RB_PREV(T* elm) {
|
||||
template <typename Node>
|
||||
Node* RB_PREV(Node* elm) {
|
||||
if (RB_LEFT(elm)) {
|
||||
elm = RB_LEFT(elm);
|
||||
while (RB_RIGHT(elm)) {
|
||||
@@ -711,32 +656,30 @@ constexpr T* RB_PREV(T* elm) {
|
||||
return elm;
|
||||
}
|
||||
|
||||
template <typename T>
|
||||
requires HasRBEntry<T>
|
||||
constexpr T* RB_MIN(RBHead<T>& head) {
|
||||
T* tmp = head.Root();
|
||||
T* parent = nullptr;
|
||||
template <typename Node>
|
||||
Node* RB_MINMAX(RBHead<Node>* head, bool is_min) {
|
||||
Node* tmp = head->Root();
|
||||
Node* parent = nullptr;
|
||||
|
||||
while (tmp) {
|
||||
parent = tmp;
|
||||
tmp = RB_LEFT(tmp);
|
||||
if (is_min) {
|
||||
tmp = RB_LEFT(tmp);
|
||||
} else {
|
||||
tmp = RB_RIGHT(tmp);
|
||||
}
|
||||
}
|
||||
|
||||
return parent;
|
||||
}
|
||||
|
||||
template <typename T>
|
||||
requires HasRBEntry<T>
|
||||
constexpr T* RB_MAX(RBHead<T>& head) {
|
||||
T* tmp = head.Root();
|
||||
T* parent = nullptr;
|
||||
|
||||
while (tmp) {
|
||||
parent = tmp;
|
||||
tmp = RB_RIGHT(tmp);
|
||||
}
|
||||
|
||||
return parent;
|
||||
template <typename Node>
|
||||
Node* RB_MIN(RBHead<Node>* head) {
|
||||
return RB_MINMAX(head, true);
|
||||
}
|
||||
|
||||
} // namespace Common::freebsd
|
||||
template <typename Node>
|
||||
Node* RB_MAX(RBHead<Node>* head) {
|
||||
return RB_MINMAX(head, false);
|
||||
}
|
||||
} // namespace Common
|
||||
|
||||
@@ -1,12 +1,8 @@
|
||||
// Copyright 2013 Dolphin Emulator Project / 2015 Citra Emulator Project / 2022 Yuzu Emulator
|
||||
// Project Licensed under GPLv2 or any later version Refer to the license.txt file included.
|
||||
// Copyright 2013 Dolphin Emulator Project / 2015 Citra Emulator Project
|
||||
// Licensed under GPLv2 or any later version
|
||||
// Refer to the license.txt file included.
|
||||
|
||||
#include <array>
|
||||
#include <cstring>
|
||||
#include <iterator>
|
||||
#include <span>
|
||||
#include <string_view>
|
||||
#include "common/bit_util.h"
|
||||
#include "common/common_types.h"
|
||||
#include "common/x64/cpu_detect.h"
|
||||
|
||||
@@ -21,7 +17,7 @@
|
||||
// clang-format on
|
||||
#endif
|
||||
|
||||
static inline void __cpuidex(int info[4], u32 function_id, u32 subfunction_id) {
|
||||
static inline void __cpuidex(int info[4], int function_id, int subfunction_id) {
|
||||
#if defined(__DragonFly__) || defined(__FreeBSD__)
|
||||
// Despite the name, this is just do_cpuid() with ECX as second input.
|
||||
cpuid_count((u_int)function_id, (u_int)subfunction_id, (u_int*)info);
|
||||
@@ -34,7 +30,7 @@ static inline void __cpuidex(int info[4], u32 function_id, u32 subfunction_id) {
|
||||
#endif
|
||||
}
|
||||
|
||||
static inline void __cpuid(int info[4], u32 function_id) {
|
||||
static inline void __cpuid(int info[4], int function_id) {
|
||||
return __cpuidex(info, function_id, 0);
|
||||
}
|
||||
|
||||
@@ -49,17 +45,6 @@ static inline u64 _xgetbv(u32 index) {
|
||||
|
||||
namespace Common {
|
||||
|
||||
CPUCaps::Manufacturer CPUCaps::ParseManufacturer(std::string_view brand_string) {
|
||||
if (brand_string == "GenuineIntel") {
|
||||
return Manufacturer::Intel;
|
||||
} else if (brand_string == "AuthenticAMD") {
|
||||
return Manufacturer::AMD;
|
||||
} else if (brand_string == "HygonGenuine") {
|
||||
return Manufacturer::Hygon;
|
||||
}
|
||||
return Manufacturer::Unknown;
|
||||
}
|
||||
|
||||
// Detects the various CPU features
|
||||
static CPUCaps Detect() {
|
||||
CPUCaps caps = {};
|
||||
@@ -68,74 +53,75 @@ static CPUCaps Detect() {
|
||||
// yuzu at all anyway
|
||||
|
||||
int cpu_id[4];
|
||||
memset(caps.brand_string, 0, sizeof(caps.brand_string));
|
||||
|
||||
// Detect CPU's CPUID capabilities and grab manufacturer string
|
||||
// Detect CPU's CPUID capabilities and grab CPU string
|
||||
__cpuid(cpu_id, 0x00000000);
|
||||
const u32 max_std_fn = cpu_id[0]; // EAX
|
||||
u32 max_std_fn = cpu_id[0]; // EAX
|
||||
|
||||
std::memset(caps.brand_string, 0, std::size(caps.brand_string));
|
||||
std::memcpy(&caps.brand_string[0], &cpu_id[1], sizeof(u32));
|
||||
std::memcpy(&caps.brand_string[4], &cpu_id[3], sizeof(u32));
|
||||
std::memcpy(&caps.brand_string[8], &cpu_id[2], sizeof(u32));
|
||||
|
||||
caps.manufacturer = CPUCaps::ParseManufacturer(caps.brand_string);
|
||||
|
||||
// Set reasonable default cpu string even if brand string not available
|
||||
std::strncpy(caps.cpu_string, caps.brand_string, std::size(caps.brand_string));
|
||||
std::memcpy(&caps.brand_string[0], &cpu_id[1], sizeof(int));
|
||||
std::memcpy(&caps.brand_string[4], &cpu_id[3], sizeof(int));
|
||||
std::memcpy(&caps.brand_string[8], &cpu_id[2], sizeof(int));
|
||||
if (cpu_id[1] == 0x756e6547 && cpu_id[2] == 0x6c65746e && cpu_id[3] == 0x49656e69)
|
||||
caps.manufacturer = Manufacturer::Intel;
|
||||
else if (cpu_id[1] == 0x68747541 && cpu_id[2] == 0x444d4163 && cpu_id[3] == 0x69746e65)
|
||||
caps.manufacturer = Manufacturer::AMD;
|
||||
else if (cpu_id[1] == 0x6f677948 && cpu_id[2] == 0x656e6975 && cpu_id[3] == 0x6e65476e)
|
||||
caps.manufacturer = Manufacturer::Hygon;
|
||||
else
|
||||
caps.manufacturer = Manufacturer::Unknown;
|
||||
|
||||
__cpuid(cpu_id, 0x80000000);
|
||||
|
||||
const u32 max_ex_fn = cpu_id[0];
|
||||
u32 max_ex_fn = cpu_id[0];
|
||||
|
||||
// Set reasonable default brand string even if brand string not available
|
||||
strcpy(caps.cpu_string, caps.brand_string);
|
||||
|
||||
// Detect family and other miscellaneous features
|
||||
if (max_std_fn >= 1) {
|
||||
__cpuid(cpu_id, 0x00000001);
|
||||
caps.sse = Common::Bit<25>(cpu_id[3]);
|
||||
caps.sse2 = Common::Bit<26>(cpu_id[3]);
|
||||
caps.sse3 = Common::Bit<0>(cpu_id[2]);
|
||||
caps.pclmulqdq = Common::Bit<1>(cpu_id[2]);
|
||||
caps.ssse3 = Common::Bit<9>(cpu_id[2]);
|
||||
caps.sse4_1 = Common::Bit<19>(cpu_id[2]);
|
||||
caps.sse4_2 = Common::Bit<20>(cpu_id[2]);
|
||||
caps.movbe = Common::Bit<22>(cpu_id[2]);
|
||||
caps.popcnt = Common::Bit<23>(cpu_id[2]);
|
||||
caps.aes = Common::Bit<25>(cpu_id[2]);
|
||||
caps.f16c = Common::Bit<29>(cpu_id[2]);
|
||||
if ((cpu_id[3] >> 25) & 1)
|
||||
caps.sse = true;
|
||||
if ((cpu_id[3] >> 26) & 1)
|
||||
caps.sse2 = true;
|
||||
if ((cpu_id[2]) & 1)
|
||||
caps.sse3 = true;
|
||||
if ((cpu_id[2] >> 9) & 1)
|
||||
caps.ssse3 = true;
|
||||
if ((cpu_id[2] >> 19) & 1)
|
||||
caps.sse4_1 = true;
|
||||
if ((cpu_id[2] >> 20) & 1)
|
||||
caps.sse4_2 = true;
|
||||
if ((cpu_id[2] >> 25) & 1)
|
||||
caps.aes = true;
|
||||
|
||||
// AVX support requires 3 separate checks:
|
||||
// - Is the AVX bit set in CPUID?
|
||||
// - Is the XSAVE bit set in CPUID?
|
||||
// - XGETBV result has the XCR bit set.
|
||||
if (Common::Bit<28>(cpu_id[2]) && Common::Bit<27>(cpu_id[2])) {
|
||||
if (((cpu_id[2] >> 28) & 1) && ((cpu_id[2] >> 27) & 1)) {
|
||||
if ((_xgetbv(_XCR_XFEATURE_ENABLED_MASK) & 0x6) == 0x6) {
|
||||
caps.avx = true;
|
||||
if (Common::Bit<12>(cpu_id[2]))
|
||||
if ((cpu_id[2] >> 12) & 1)
|
||||
caps.fma = true;
|
||||
}
|
||||
}
|
||||
|
||||
if (max_std_fn >= 7) {
|
||||
__cpuidex(cpu_id, 0x00000007, 0x00000000);
|
||||
// Can't enable AVX{2,512} unless the XSAVE/XGETBV checks above passed
|
||||
if (caps.avx) {
|
||||
caps.avx2 = Common::Bit<5>(cpu_id[1]);
|
||||
caps.avx512f = Common::Bit<16>(cpu_id[1]);
|
||||
caps.avx512dq = Common::Bit<17>(cpu_id[1]);
|
||||
caps.avx512cd = Common::Bit<28>(cpu_id[1]);
|
||||
caps.avx512bw = Common::Bit<30>(cpu_id[1]);
|
||||
caps.avx512vl = Common::Bit<31>(cpu_id[1]);
|
||||
caps.avx512vbmi = Common::Bit<1>(cpu_id[2]);
|
||||
caps.avx512bitalg = Common::Bit<12>(cpu_id[2]);
|
||||
// Can't enable AVX2 unless the XSAVE/XGETBV checks above passed
|
||||
if ((cpu_id[1] >> 5) & 1)
|
||||
caps.avx2 = caps.avx;
|
||||
if ((cpu_id[1] >> 3) & 1)
|
||||
caps.bmi1 = true;
|
||||
if ((cpu_id[1] >> 8) & 1)
|
||||
caps.bmi2 = true;
|
||||
// Checks for AVX512F, AVX512CD, AVX512VL, AVX512DQ, AVX512BW (Intel Skylake-X/SP)
|
||||
if ((cpu_id[1] >> 16) & 1 && (cpu_id[1] >> 28) & 1 && (cpu_id[1] >> 31) & 1 &&
|
||||
(cpu_id[1] >> 17) & 1 && (cpu_id[1] >> 30) & 1) {
|
||||
caps.avx512 = caps.avx2;
|
||||
}
|
||||
|
||||
caps.bmi1 = Common::Bit<3>(cpu_id[1]);
|
||||
caps.bmi2 = Common::Bit<8>(cpu_id[1]);
|
||||
caps.sha = Common::Bit<29>(cpu_id[1]);
|
||||
|
||||
caps.gfni = Common::Bit<8>(cpu_id[2]);
|
||||
|
||||
__cpuidex(cpu_id, 0x00000007, 0x00000001);
|
||||
caps.avx_vnni = caps.avx && Common::Bit<4>(cpu_id[0]);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -152,13 +138,15 @@ static CPUCaps Detect() {
|
||||
if (max_ex_fn >= 0x80000001) {
|
||||
// Check for more features
|
||||
__cpuid(cpu_id, 0x80000001);
|
||||
caps.lzcnt = Common::Bit<5>(cpu_id[2]);
|
||||
caps.fma4 = Common::Bit<16>(cpu_id[2]);
|
||||
if ((cpu_id[2] >> 16) & 1)
|
||||
caps.fma4 = true;
|
||||
}
|
||||
|
||||
if (max_ex_fn >= 0x80000007) {
|
||||
__cpuid(cpu_id, 0x80000007);
|
||||
caps.invariant_tsc = Common::Bit<8>(cpu_id[3]);
|
||||
if (cpu_id[3] & (1 << 8)) {
|
||||
caps.invariant_tsc = true;
|
||||
}
|
||||
}
|
||||
|
||||
if (max_std_fn >= 0x16) {
|
||||
|
||||
@@ -1,65 +1,42 @@
|
||||
// Copyright 2013 Dolphin Emulator Project / 2015 Citra Emulator Project / 2022 Yuzu Emulator
|
||||
// Project Project Licensed under GPLv2 or any later version Refer to the license.txt file included.
|
||||
// Copyright 2013 Dolphin Emulator Project / 2015 Citra Emulator Project
|
||||
// Licensed under GPLv2 or any later version
|
||||
// Refer to the license.txt file included.
|
||||
|
||||
#pragma once
|
||||
|
||||
#include <string_view>
|
||||
#include "common/common_types.h"
|
||||
|
||||
namespace Common {
|
||||
|
||||
enum class Manufacturer : u32 {
|
||||
Intel = 0,
|
||||
AMD = 1,
|
||||
Hygon = 2,
|
||||
Unknown = 3,
|
||||
};
|
||||
|
||||
/// x86/x64 CPU capabilities that may be detected by this module
|
||||
struct CPUCaps {
|
||||
|
||||
enum class Manufacturer : u8 {
|
||||
Unknown = 0,
|
||||
Intel = 1,
|
||||
AMD = 2,
|
||||
Hygon = 3,
|
||||
};
|
||||
|
||||
static Manufacturer ParseManufacturer(std::string_view brand_string);
|
||||
|
||||
Manufacturer manufacturer;
|
||||
char brand_string[13];
|
||||
|
||||
char cpu_string[48];
|
||||
|
||||
char cpu_string[0x21];
|
||||
char brand_string[0x41];
|
||||
bool sse;
|
||||
bool sse2;
|
||||
bool sse3;
|
||||
bool ssse3;
|
||||
bool sse4_1;
|
||||
bool sse4_2;
|
||||
bool lzcnt;
|
||||
bool avx;
|
||||
bool avx2;
|
||||
bool avx512;
|
||||
bool bmi1;
|
||||
bool bmi2;
|
||||
bool fma;
|
||||
bool fma4;
|
||||
bool aes;
|
||||
bool invariant_tsc;
|
||||
u32 base_frequency;
|
||||
u32 max_frequency;
|
||||
u32 bus_frequency;
|
||||
|
||||
bool sse : 1;
|
||||
bool sse2 : 1;
|
||||
bool sse3 : 1;
|
||||
bool ssse3 : 1;
|
||||
bool sse4_1 : 1;
|
||||
bool sse4_2 : 1;
|
||||
|
||||
bool avx : 1;
|
||||
bool avx_vnni : 1;
|
||||
bool avx2 : 1;
|
||||
bool avx512f : 1;
|
||||
bool avx512dq : 1;
|
||||
bool avx512cd : 1;
|
||||
bool avx512bw : 1;
|
||||
bool avx512vl : 1;
|
||||
bool avx512vbmi : 1;
|
||||
bool avx512bitalg : 1;
|
||||
|
||||
bool aes : 1;
|
||||
bool bmi1 : 1;
|
||||
bool bmi2 : 1;
|
||||
bool f16c : 1;
|
||||
bool fma : 1;
|
||||
bool fma4 : 1;
|
||||
bool gfni : 1;
|
||||
bool invariant_tsc : 1;
|
||||
bool lzcnt : 1;
|
||||
bool movbe : 1;
|
||||
bool pclmulqdq : 1;
|
||||
bool popcnt : 1;
|
||||
bool sha : 1;
|
||||
};
|
||||
|
||||
/**
|
||||
|
||||
@@ -122,8 +122,6 @@ add_library(core STATIC
|
||||
frontend/applets/error.h
|
||||
frontend/applets/general_frontend.cpp
|
||||
frontend/applets/general_frontend.h
|
||||
frontend/applets/mii.cpp
|
||||
frontend/applets/mii.h
|
||||
frontend/applets/profile_select.cpp
|
||||
frontend/applets/profile_select.h
|
||||
frontend/applets/software_keyboard.cpp
|
||||
@@ -209,7 +207,6 @@ add_library(core STATIC
|
||||
hle/kernel/k_memory_region.h
|
||||
hle/kernel/k_memory_region_type.h
|
||||
hle/kernel/k_page_bitmap.h
|
||||
hle/kernel/k_page_buffer.h
|
||||
hle/kernel/k_page_heap.cpp
|
||||
hle/kernel/k_page_heap.h
|
||||
hle/kernel/k_page_linked_list.h
|
||||
@@ -247,8 +244,6 @@ add_library(core STATIC
|
||||
hle/kernel/k_system_control.h
|
||||
hle/kernel/k_thread.cpp
|
||||
hle/kernel/k_thread.h
|
||||
hle/kernel/k_thread_local_page.cpp
|
||||
hle/kernel/k_thread_local_page.h
|
||||
hle/kernel/k_thread_queue.cpp
|
||||
hle/kernel/k_thread_queue.h
|
||||
hle/kernel/k_trace.h
|
||||
@@ -305,8 +300,6 @@ add_library(core STATIC
|
||||
hle/service/am/applets/applet_error.h
|
||||
hle/service/am/applets/applet_general_backend.cpp
|
||||
hle/service/am/applets/applet_general_backend.h
|
||||
hle/service/am/applets/applet_mii.cpp
|
||||
hle/service/am/applets/applet_mii.h
|
||||
hle/service/am/applets/applet_profile_select.cpp
|
||||
hle/service/am/applets/applet_profile_select.h
|
||||
hle/service/am/applets/applet_software_keyboard.cpp
|
||||
|
||||
@@ -148,8 +148,8 @@ std::shared_ptr<Dynarmic::A32::Jit> ARM_Dynarmic_32::MakeJit(Common::PageTable*
|
||||
config.wall_clock_cntpct = uses_wall_clock;
|
||||
|
||||
// Code cache size
|
||||
config.code_cache_size = 128_MiB;
|
||||
config.far_code_offset = 100_MiB;
|
||||
config.code_cache_size = 512_MiB;
|
||||
config.far_code_offset = 400_MiB;
|
||||
|
||||
// Safe optimizations
|
||||
if (Settings::values.cpu_debug_mode) {
|
||||
|
||||
@@ -208,8 +208,8 @@ std::shared_ptr<Dynarmic::A64::Jit> ARM_Dynarmic_64::MakeJit(Common::PageTable*
|
||||
config.wall_clock_cntpct = uses_wall_clock;
|
||||
|
||||
// Code cache size
|
||||
config.code_cache_size = 128_MiB;
|
||||
config.far_code_offset = 100_MiB;
|
||||
config.code_cache_size = 512_MiB;
|
||||
config.far_code_offset = 400_MiB;
|
||||
|
||||
// Safe optimizations
|
||||
if (Settings::values.cpu_debug_mode) {
|
||||
|
||||
@@ -326,9 +326,7 @@ struct System::Impl {
|
||||
is_powered_on = false;
|
||||
exit_lock = false;
|
||||
|
||||
if (gpu_core != nullptr) {
|
||||
gpu_core->NotifyShutdown();
|
||||
}
|
||||
gpu_core->NotifyShutdown();
|
||||
|
||||
services.reset();
|
||||
service_manager.reset();
|
||||
|
||||
@@ -1,19 +0,0 @@
|
||||
// Copyright 2022 yuzu Emulator Project
|
||||
// Licensed under GPLv2 or any later version
|
||||
// Refer to the license.txt file included.
|
||||
|
||||
#include "common/logging/log.h"
|
||||
#include "core/frontend/applets/mii.h"
|
||||
|
||||
namespace Core::Frontend {
|
||||
|
||||
MiiApplet::~MiiApplet() = default;
|
||||
|
||||
void DefaultMiiApplet::ShowMii(
|
||||
const MiiParameters& parameters,
|
||||
const std::function<void(const Core::Frontend::MiiParameters& parameters)> callback) const {
|
||||
LOG_INFO(Service_HID, "(STUBBED) called");
|
||||
callback(parameters);
|
||||
}
|
||||
|
||||
} // namespace Core::Frontend
|
||||
@@ -1,35 +0,0 @@
|
||||
// Copyright 2022 yuzu Emulator Project
|
||||
// Licensed under GPLv2 or any later version
|
||||
// Refer to the license.txt file included.
|
||||
|
||||
#pragma once
|
||||
|
||||
#include <functional>
|
||||
|
||||
#include "core/hle/result.h"
|
||||
#include "core/hle/service/mii/mii_manager.h"
|
||||
|
||||
namespace Core::Frontend {
|
||||
|
||||
struct MiiParameters {
|
||||
bool is_editable;
|
||||
Service::Mii::MiiInfo mii_data{};
|
||||
};
|
||||
|
||||
class MiiApplet {
|
||||
public:
|
||||
virtual ~MiiApplet();
|
||||
|
||||
virtual void ShowMii(const MiiParameters& parameters,
|
||||
const std::function<void(const Core::Frontend::MiiParameters& parameters)>
|
||||
callback) const = 0;
|
||||
};
|
||||
|
||||
class DefaultMiiApplet final : public MiiApplet {
|
||||
public:
|
||||
void ShowMii(const MiiParameters& parameters,
|
||||
const std::function<void(const Core::Frontend::MiiParameters& parameters)>
|
||||
callback) const override;
|
||||
};
|
||||
|
||||
} // namespace Core::Frontend
|
||||
@@ -42,20 +42,11 @@ public:
|
||||
context.MakeCurrent();
|
||||
}
|
||||
~Scoped() {
|
||||
if (active) {
|
||||
context.DoneCurrent();
|
||||
}
|
||||
}
|
||||
|
||||
/// In the event that context was destroyed before the Scoped is destroyed, this provides a
|
||||
/// mechanism to prevent calling a destroyed object's method during the deconstructor
|
||||
void Cancel() {
|
||||
active = false;
|
||||
context.DoneCurrent();
|
||||
}
|
||||
|
||||
private:
|
||||
GraphicsContext& context;
|
||||
bool active{true};
|
||||
};
|
||||
|
||||
/// Calls MakeCurrent on the context and calls DoneCurrent when the scope for the returned value
|
||||
|
||||
@@ -385,7 +385,7 @@ public:
|
||||
T PopRaw();
|
||||
|
||||
template <class T>
|
||||
std::weak_ptr<T> PopIpcInterface() {
|
||||
std::shared_ptr<T> PopIpcInterface() {
|
||||
ASSERT(context->Session()->IsDomain());
|
||||
ASSERT(context->GetDomainMessageHeader().input_object_count > 0);
|
||||
return context->GetDomainHandler<T>(Pop<u32>() - 1);
|
||||
|
||||
@@ -45,7 +45,7 @@ bool SessionRequestManager::HasSessionRequestHandler(const HLERequestContext& co
|
||||
LOG_CRITICAL(IPC, "object_id {} is too big!", object_id);
|
||||
return false;
|
||||
}
|
||||
return DomainHandler(object_id - 1).lock() != nullptr;
|
||||
return DomainHandler(object_id - 1) != nullptr;
|
||||
} else {
|
||||
return session_handler != nullptr;
|
||||
}
|
||||
@@ -53,6 +53,9 @@ bool SessionRequestManager::HasSessionRequestHandler(const HLERequestContext& co
|
||||
|
||||
void SessionRequestHandler::ClientConnected(KServerSession* session) {
|
||||
session->ClientConnected(shared_from_this());
|
||||
|
||||
// Ensure our server session is tracked globally.
|
||||
kernel.RegisterServerSession(session);
|
||||
}
|
||||
|
||||
void SessionRequestHandler::ClientDisconnected(KServerSession* session) {
|
||||
|
||||
@@ -94,7 +94,6 @@ protected:
|
||||
std::weak_ptr<ServiceThread> service_thread;
|
||||
};
|
||||
|
||||
using SessionRequestHandlerWeakPtr = std::weak_ptr<SessionRequestHandler>;
|
||||
using SessionRequestHandlerPtr = std::shared_ptr<SessionRequestHandler>;
|
||||
|
||||
/**
|
||||
@@ -140,7 +139,7 @@ public:
|
||||
}
|
||||
}
|
||||
|
||||
SessionRequestHandlerWeakPtr DomainHandler(std::size_t index) const {
|
||||
SessionRequestHandlerPtr DomainHandler(std::size_t index) const {
|
||||
ASSERT_MSG(index < DomainHandlerCount(), "Unexpected handler index {}", index);
|
||||
return domain_handlers.at(index);
|
||||
}
|
||||
@@ -329,10 +328,10 @@ public:
|
||||
|
||||
template <typename T>
|
||||
std::shared_ptr<T> GetDomainHandler(std::size_t index) const {
|
||||
return std::static_pointer_cast<T>(manager.lock()->DomainHandler(index).lock());
|
||||
return std::static_pointer_cast<T>(manager->DomainHandler(index));
|
||||
}
|
||||
|
||||
void SetSessionRequestManager(std::weak_ptr<SessionRequestManager> manager_) {
|
||||
void SetSessionRequestManager(std::shared_ptr<SessionRequestManager> manager_) {
|
||||
manager = std::move(manager_);
|
||||
}
|
||||
|
||||
@@ -375,7 +374,7 @@ private:
|
||||
u32 handles_offset{};
|
||||
u32 domain_offset{};
|
||||
|
||||
std::weak_ptr<SessionRequestManager> manager;
|
||||
std::shared_ptr<SessionRequestManager> manager;
|
||||
|
||||
KernelCore& kernel;
|
||||
Core::Memory::Memory& memory;
|
||||
|
||||
@@ -7,23 +7,19 @@
|
||||
#include "common/common_funcs.h"
|
||||
#include "common/common_types.h"
|
||||
#include "core/core.h"
|
||||
#include "core/device_memory.h"
|
||||
#include "core/hardware_properties.h"
|
||||
#include "core/hle/kernel/init/init_slab_setup.h"
|
||||
#include "core/hle/kernel/k_code_memory.h"
|
||||
#include "core/hle/kernel/k_event.h"
|
||||
#include "core/hle/kernel/k_memory_layout.h"
|
||||
#include "core/hle/kernel/k_memory_manager.h"
|
||||
#include "core/hle/kernel/k_page_buffer.h"
|
||||
#include "core/hle/kernel/k_port.h"
|
||||
#include "core/hle/kernel/k_process.h"
|
||||
#include "core/hle/kernel/k_resource_limit.h"
|
||||
#include "core/hle/kernel/k_session.h"
|
||||
#include "core/hle/kernel/k_shared_memory.h"
|
||||
#include "core/hle/kernel/k_shared_memory_info.h"
|
||||
#include "core/hle/kernel/k_system_control.h"
|
||||
#include "core/hle/kernel/k_thread.h"
|
||||
#include "core/hle/kernel/k_thread_local_page.h"
|
||||
#include "core/hle/kernel/k_transfer_memory.h"
|
||||
|
||||
namespace Kernel::Init {
|
||||
@@ -36,13 +32,9 @@ namespace Kernel::Init {
|
||||
HANDLER(KEvent, (SLAB_COUNT(KEvent)), ##__VA_ARGS__) \
|
||||
HANDLER(KPort, (SLAB_COUNT(KPort)), ##__VA_ARGS__) \
|
||||
HANDLER(KSharedMemory, (SLAB_COUNT(KSharedMemory)), ##__VA_ARGS__) \
|
||||
HANDLER(KSharedMemoryInfo, (SLAB_COUNT(KSharedMemory) * 8), ##__VA_ARGS__) \
|
||||
HANDLER(KTransferMemory, (SLAB_COUNT(KTransferMemory)), ##__VA_ARGS__) \
|
||||
HANDLER(KCodeMemory, (SLAB_COUNT(KCodeMemory)), ##__VA_ARGS__) \
|
||||
HANDLER(KSession, (SLAB_COUNT(KSession)), ##__VA_ARGS__) \
|
||||
HANDLER(KThreadLocalPage, \
|
||||
(SLAB_COUNT(KProcess) + (SLAB_COUNT(KProcess) + SLAB_COUNT(KThread)) / 8), \
|
||||
##__VA_ARGS__) \
|
||||
HANDLER(KResourceLimit, (SLAB_COUNT(KResourceLimit)), ##__VA_ARGS__)
|
||||
|
||||
namespace {
|
||||
@@ -58,46 +50,38 @@ enum KSlabType : u32 {
|
||||
// Constexpr counts.
|
||||
constexpr size_t SlabCountKProcess = 80;
|
||||
constexpr size_t SlabCountKThread = 800;
|
||||
constexpr size_t SlabCountKEvent = 900;
|
||||
constexpr size_t SlabCountKEvent = 700;
|
||||
constexpr size_t SlabCountKInterruptEvent = 100;
|
||||
constexpr size_t SlabCountKPort = 384;
|
||||
constexpr size_t SlabCountKPort = 256 + 0x20; // Extra 0x20 ports over Nintendo for homebrew.
|
||||
constexpr size_t SlabCountKSharedMemory = 80;
|
||||
constexpr size_t SlabCountKTransferMemory = 200;
|
||||
constexpr size_t SlabCountKCodeMemory = 10;
|
||||
constexpr size_t SlabCountKDeviceAddressSpace = 300;
|
||||
constexpr size_t SlabCountKSession = 1133;
|
||||
constexpr size_t SlabCountKSession = 933;
|
||||
constexpr size_t SlabCountKLightSession = 100;
|
||||
constexpr size_t SlabCountKObjectName = 7;
|
||||
constexpr size_t SlabCountKResourceLimit = 5;
|
||||
constexpr size_t SlabCountKDebug = Core::Hardware::NUM_CPU_CORES;
|
||||
constexpr size_t SlabCountKIoPool = 1;
|
||||
constexpr size_t SlabCountKIoRegion = 6;
|
||||
constexpr size_t SlabCountKAlpha = 1;
|
||||
constexpr size_t SlabCountKBeta = 6;
|
||||
|
||||
constexpr size_t SlabCountExtraKThread = 160;
|
||||
|
||||
/// Helper function to translate from the slab virtual address to the reserved location in physical
|
||||
/// memory.
|
||||
static PAddr TranslateSlabAddrToPhysical(KMemoryLayout& memory_layout, VAddr slab_addr) {
|
||||
slab_addr -= memory_layout.GetSlabRegionAddress();
|
||||
return slab_addr + Core::DramMemoryMap::SlabHeapBase;
|
||||
}
|
||||
|
||||
template <typename T>
|
||||
VAddr InitializeSlabHeap(Core::System& system, KMemoryLayout& memory_layout, VAddr address,
|
||||
size_t num_objects) {
|
||||
// TODO(bunnei): This is just a place holder. We should initialize the appropriate KSlabHeap for
|
||||
// kernel object type T with the backing kernel memory pointer once we emulate kernel memory.
|
||||
|
||||
const size_t size = Common::AlignUp(sizeof(T) * num_objects, alignof(void*));
|
||||
VAddr start = Common::AlignUp(address, alignof(T));
|
||||
|
||||
// This should use the virtual memory address passed in, but currently, we do not setup the
|
||||
// kernel virtual memory layout. Instead, we simply map these at a region of physical memory
|
||||
// that we reserve for the slab heaps.
|
||||
// TODO(bunnei): Fix this once we support the kernel virtual memory layout.
|
||||
// This is intentionally empty. Once KSlabHeap is fully implemented, we can replace this with
|
||||
// the pointer to emulated memory to pass along. Until then, KSlabHeap will just allocate/free
|
||||
// host memory.
|
||||
void* backing_kernel_memory{};
|
||||
|
||||
if (size > 0) {
|
||||
void* backing_kernel_memory{
|
||||
system.DeviceMemory().GetPointer(TranslateSlabAddrToPhysical(memory_layout, start))};
|
||||
|
||||
const KMemoryRegion* region = memory_layout.FindVirtual(start + size - 1);
|
||||
ASSERT(region != nullptr);
|
||||
ASSERT(region->IsDerivedFrom(KMemoryRegionType_KernelSlab));
|
||||
@@ -107,12 +91,6 @@ VAddr InitializeSlabHeap(Core::System& system, KMemoryLayout& memory_layout, VAd
|
||||
return start + size;
|
||||
}
|
||||
|
||||
size_t CalculateSlabHeapGapSize() {
|
||||
constexpr size_t KernelSlabHeapGapSize = 2_MiB - 296_KiB;
|
||||
static_assert(KernelSlabHeapGapSize <= KernelSlabHeapGapsSizeMax);
|
||||
return KernelSlabHeapGapSize;
|
||||
}
|
||||
|
||||
} // namespace
|
||||
|
||||
KSlabResourceCounts KSlabResourceCounts::CreateDefault() {
|
||||
@@ -131,8 +109,8 @@ KSlabResourceCounts KSlabResourceCounts::CreateDefault() {
|
||||
.num_KObjectName = SlabCountKObjectName,
|
||||
.num_KResourceLimit = SlabCountKResourceLimit,
|
||||
.num_KDebug = SlabCountKDebug,
|
||||
.num_KIoPool = SlabCountKIoPool,
|
||||
.num_KIoRegion = SlabCountKIoRegion,
|
||||
.num_KAlpha = SlabCountKAlpha,
|
||||
.num_KBeta = SlabCountKBeta,
|
||||
};
|
||||
}
|
||||
|
||||
@@ -158,34 +136,11 @@ size_t CalculateTotalSlabHeapSize(const KernelCore& kernel) {
|
||||
#undef ADD_SLAB_SIZE
|
||||
|
||||
// Add the reserved size.
|
||||
size += CalculateSlabHeapGapSize();
|
||||
size += KernelSlabHeapGapsSize;
|
||||
|
||||
return size;
|
||||
}
|
||||
|
||||
void InitializeKPageBufferSlabHeap(Core::System& system) {
|
||||
auto& kernel = system.Kernel();
|
||||
|
||||
const auto& counts = kernel.SlabResourceCounts();
|
||||
const size_t num_pages =
|
||||
counts.num_KProcess + counts.num_KThread + (counts.num_KProcess + counts.num_KThread) / 8;
|
||||
const size_t slab_size = num_pages * PageSize;
|
||||
|
||||
// Reserve memory from the system resource limit.
|
||||
ASSERT(kernel.GetSystemResourceLimit()->Reserve(LimitableResource::PhysicalMemory, slab_size));
|
||||
|
||||
// Allocate memory for the slab.
|
||||
constexpr auto AllocateOption = KMemoryManager::EncodeOption(
|
||||
KMemoryManager::Pool::System, KMemoryManager::Direction::FromFront);
|
||||
const PAddr slab_address =
|
||||
kernel.MemoryManager().AllocateAndOpenContinuous(num_pages, 1, AllocateOption);
|
||||
ASSERT(slab_address != 0);
|
||||
|
||||
// Initialize the slabheap.
|
||||
KPageBuffer::InitializeSlabHeap(kernel, system.DeviceMemory().GetPointer(slab_address),
|
||||
slab_size);
|
||||
}
|
||||
|
||||
void InitializeSlabHeaps(Core::System& system, KMemoryLayout& memory_layout) {
|
||||
auto& kernel = system.Kernel();
|
||||
|
||||
@@ -205,13 +160,13 @@ void InitializeSlabHeaps(Core::System& system, KMemoryLayout& memory_layout) {
|
||||
}
|
||||
|
||||
// Create an array to represent the gaps between the slabs.
|
||||
const size_t total_gap_size = CalculateSlabHeapGapSize();
|
||||
const size_t total_gap_size = KernelSlabHeapGapsSize;
|
||||
std::array<size_t, slab_types.size()> slab_gaps;
|
||||
for (auto& slab_gap : slab_gaps) {
|
||||
for (size_t i = 0; i < slab_gaps.size(); i++) {
|
||||
// Note: This is an off-by-one error from Nintendo's intention, because GenerateRandomRange
|
||||
// is inclusive. However, Nintendo also has the off-by-one error, and it's "harmless", so we
|
||||
// will include it ourselves.
|
||||
slab_gap = KSystemControl::GenerateRandomRange(0, total_gap_size);
|
||||
slab_gaps[i] = KSystemControl::GenerateRandomRange(0, total_gap_size);
|
||||
}
|
||||
|
||||
// Sort the array, so that we can treat differences between values as offsets to the starts of
|
||||
@@ -222,21 +177,13 @@ void InitializeSlabHeaps(Core::System& system, KMemoryLayout& memory_layout) {
|
||||
}
|
||||
}
|
||||
|
||||
// Track the gaps, so that we can free them to the unused slab tree.
|
||||
VAddr gap_start = address;
|
||||
size_t gap_size = 0;
|
||||
|
||||
for (size_t i = 0; i < slab_gaps.size(); i++) {
|
||||
for (size_t i = 0; i < slab_types.size(); i++) {
|
||||
// Add the random gap to the address.
|
||||
const auto cur_gap = (i == 0) ? slab_gaps[0] : slab_gaps[i] - slab_gaps[i - 1];
|
||||
address += cur_gap;
|
||||
gap_size += cur_gap;
|
||||
address += (i == 0) ? slab_gaps[0] : slab_gaps[i] - slab_gaps[i - 1];
|
||||
|
||||
#define INITIALIZE_SLAB_HEAP(NAME, COUNT, ...) \
|
||||
case KSlabType_##NAME: \
|
||||
if (COUNT > 0) { \
|
||||
address = InitializeSlabHeap<NAME>(system, memory_layout, address, COUNT); \
|
||||
} \
|
||||
address = InitializeSlabHeap<NAME>(system, memory_layout, address, COUNT); \
|
||||
break;
|
||||
|
||||
// Initialize the slabheap.
|
||||
@@ -245,13 +192,7 @@ void InitializeSlabHeaps(Core::System& system, KMemoryLayout& memory_layout) {
|
||||
FOREACH_SLAB_TYPE(INITIALIZE_SLAB_HEAP)
|
||||
// If we somehow get an invalid type, abort.
|
||||
default:
|
||||
UNREACHABLE_MSG("Unknown slab type: {}", slab_types[i]);
|
||||
}
|
||||
|
||||
// If we've hit the end of a gap, free it.
|
||||
if (gap_start + gap_size != address) {
|
||||
gap_start = address;
|
||||
gap_size = 0;
|
||||
UNREACHABLE();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -32,13 +32,12 @@ struct KSlabResourceCounts {
|
||||
size_t num_KObjectName;
|
||||
size_t num_KResourceLimit;
|
||||
size_t num_KDebug;
|
||||
size_t num_KIoPool;
|
||||
size_t num_KIoRegion;
|
||||
size_t num_KAlpha;
|
||||
size_t num_KBeta;
|
||||
};
|
||||
|
||||
void InitializeSlabResourceCounts(KernelCore& kernel);
|
||||
size_t CalculateTotalSlabHeapSize(const KernelCore& kernel);
|
||||
void InitializeKPageBufferSlabHeap(Core::System& system);
|
||||
void InitializeSlabHeaps(Core::System& system, KMemoryLayout& memory_layout);
|
||||
|
||||
} // namespace Kernel::Init
|
||||
|
||||
@@ -115,7 +115,7 @@ ResultCode KAddressArbiter::Signal(VAddr addr, s32 count) {
|
||||
{
|
||||
KScopedSchedulerLock sl(kernel);
|
||||
|
||||
auto it = thread_tree.nfind_key({addr, -1});
|
||||
auto it = thread_tree.nfind_light({addr, -1});
|
||||
while ((it != thread_tree.end()) && (count <= 0 || num_waiters < count) &&
|
||||
(it->GetAddressArbiterKey() == addr)) {
|
||||
// End the thread's wait.
|
||||
@@ -148,7 +148,7 @@ ResultCode KAddressArbiter::SignalAndIncrementIfEqual(VAddr addr, s32 value, s32
|
||||
return ResultInvalidState;
|
||||
}
|
||||
|
||||
auto it = thread_tree.nfind_key({addr, -1});
|
||||
auto it = thread_tree.nfind_light({addr, -1});
|
||||
while ((it != thread_tree.end()) && (count <= 0 || num_waiters < count) &&
|
||||
(it->GetAddressArbiterKey() == addr)) {
|
||||
// End the thread's wait.
|
||||
@@ -171,7 +171,7 @@ ResultCode KAddressArbiter::SignalAndModifyByWaitingCountIfEqual(VAddr addr, s32
|
||||
{
|
||||
[[maybe_unused]] const KScopedSchedulerLock sl(kernel);
|
||||
|
||||
auto it = thread_tree.nfind_key({addr, -1});
|
||||
auto it = thread_tree.nfind_light({addr, -1});
|
||||
// Determine the updated value.
|
||||
s32 new_value{};
|
||||
if (count <= 0) {
|
||||
|
||||
@@ -244,7 +244,7 @@ void KConditionVariable::Signal(u64 cv_key, s32 count) {
|
||||
{
|
||||
KScopedSchedulerLock sl(kernel);
|
||||
|
||||
auto it = thread_tree.nfind_key({cv_key, -1});
|
||||
auto it = thread_tree.nfind_light({cv_key, -1});
|
||||
while ((it != thread_tree.end()) && (count <= 0 || num_waiters < count) &&
|
||||
(it->GetConditionVariableKey() == cv_key)) {
|
||||
KThread* target_thread = std::addressof(*it);
|
||||
|
||||
@@ -57,11 +57,11 @@ constexpr std::size_t KernelPageTableHeapSize = GetMaximumOverheadSize(MainMemor
|
||||
constexpr std::size_t KernelInitialPageHeapSize = 128_KiB;
|
||||
|
||||
constexpr std::size_t KernelSlabHeapDataSize = 5_MiB;
|
||||
constexpr std::size_t KernelSlabHeapGapsSizeMax = 2_MiB - 64_KiB;
|
||||
constexpr std::size_t KernelSlabHeapSize = KernelSlabHeapDataSize + KernelSlabHeapGapsSizeMax;
|
||||
constexpr std::size_t KernelSlabHeapGapsSize = 2_MiB - 64_KiB;
|
||||
constexpr std::size_t KernelSlabHeapSize = KernelSlabHeapDataSize + KernelSlabHeapGapsSize;
|
||||
|
||||
// NOTE: This is calculated from KThread slab counts, assuming KThread size <= 0x860.
|
||||
constexpr std::size_t KernelSlabHeapAdditionalSize = 0x68000;
|
||||
constexpr std::size_t KernelSlabHeapAdditionalSize = 416_KiB;
|
||||
|
||||
constexpr std::size_t KernelResourceSize =
|
||||
KernelPageTableHeapSize + KernelInitialPageHeapSize + KernelSlabHeapSize;
|
||||
|
||||
@@ -1,34 +0,0 @@
|
||||
// Copyright 2022 yuzu Emulator Project
|
||||
// Licensed under GPLv2 or any later version
|
||||
// Refer to the license.txt file included.
|
||||
|
||||
#pragma once
|
||||
|
||||
#include <array>
|
||||
|
||||
#include "common/alignment.h"
|
||||
#include "common/assert.h"
|
||||
#include "common/common_types.h"
|
||||
#include "core/core.h"
|
||||
#include "core/device_memory.h"
|
||||
#include "core/hle/kernel/memory_types.h"
|
||||
|
||||
namespace Kernel {
|
||||
|
||||
class KPageBuffer final : public KSlabAllocated<KPageBuffer> {
|
||||
public:
|
||||
KPageBuffer() = default;
|
||||
|
||||
static KPageBuffer* FromPhysicalAddress(Core::System& system, PAddr phys_addr) {
|
||||
ASSERT(Common::IsAligned(phys_addr, PageSize));
|
||||
return reinterpret_cast<KPageBuffer*>(system.DeviceMemory().GetPointer(phys_addr));
|
||||
}
|
||||
|
||||
private:
|
||||
[[maybe_unused]] alignas(PageSize) std::array<u8, PageSize> m_buffer{};
|
||||
};
|
||||
|
||||
static_assert(sizeof(KPageBuffer) == PageSize);
|
||||
static_assert(alignof(KPageBuffer) == PageSize);
|
||||
|
||||
} // namespace Kernel
|
||||
@@ -285,207 +285,76 @@ ResultCode KPageTable::MapProcessCode(VAddr addr, std::size_t num_pages, KMemory
|
||||
return ResultSuccess;
|
||||
}
|
||||
|
||||
ResultCode KPageTable::MapCodeMemory(VAddr dst_address, VAddr src_address, std::size_t size) {
|
||||
// Validate the mapping request.
|
||||
R_UNLESS(this->CanContain(dst_address, size, KMemoryState::AliasCode),
|
||||
ResultInvalidMemoryRegion);
|
||||
|
||||
// Lock the table.
|
||||
ResultCode KPageTable::MapCodeMemory(VAddr dst_addr, VAddr src_addr, std::size_t size) {
|
||||
KScopedLightLock lk(general_lock);
|
||||
|
||||
// Verify that the source memory is normal heap.
|
||||
KMemoryState src_state{};
|
||||
KMemoryPermission src_perm{};
|
||||
std::size_t num_src_allocator_blocks{};
|
||||
R_TRY(this->CheckMemoryState(&src_state, &src_perm, nullptr, &num_src_allocator_blocks,
|
||||
src_address, size, KMemoryState::All, KMemoryState::Normal,
|
||||
KMemoryPermission::All, KMemoryPermission::UserReadWrite,
|
||||
KMemoryAttribute::All, KMemoryAttribute::None));
|
||||
const std::size_t num_pages{size / PageSize};
|
||||
|
||||
// Verify that the destination memory is unmapped.
|
||||
std::size_t num_dst_allocator_blocks{};
|
||||
R_TRY(this->CheckMemoryState(&num_dst_allocator_blocks, dst_address, size, KMemoryState::All,
|
||||
KMemoryState::Free, KMemoryPermission::None,
|
||||
KMemoryPermission::None, KMemoryAttribute::None,
|
||||
KMemoryAttribute::None));
|
||||
KMemoryState state{};
|
||||
KMemoryPermission perm{};
|
||||
CASCADE_CODE(CheckMemoryState(&state, &perm, nullptr, nullptr, src_addr, size,
|
||||
KMemoryState::All, KMemoryState::Normal, KMemoryPermission::All,
|
||||
KMemoryPermission::UserReadWrite, KMemoryAttribute::Mask,
|
||||
KMemoryAttribute::None, KMemoryAttribute::IpcAndDeviceMapped));
|
||||
|
||||
// Map the code memory.
|
||||
{
|
||||
// Determine the number of pages being operated on.
|
||||
const std::size_t num_pages = size / PageSize;
|
||||
|
||||
// Create page groups for the memory being mapped.
|
||||
KPageLinkedList pg;
|
||||
AddRegionToPages(src_address, num_pages, pg);
|
||||
|
||||
// Reprotect the source as kernel-read/not mapped.
|
||||
const auto new_perm = static_cast<KMemoryPermission>(KMemoryPermission::KernelRead |
|
||||
KMemoryPermission::NotMapped);
|
||||
R_TRY(Operate(src_address, num_pages, new_perm, OperationType::ChangePermissions));
|
||||
|
||||
// Ensure that we unprotect the source pages on failure.
|
||||
auto unprot_guard = SCOPE_GUARD({
|
||||
ASSERT(this->Operate(src_address, num_pages, src_perm, OperationType::ChangePermissions)
|
||||
.IsSuccess());
|
||||
});
|
||||
|
||||
// Map the alias pages.
|
||||
R_TRY(MapPages(dst_address, pg, new_perm));
|
||||
|
||||
// We successfully mapped the alias pages, so we don't need to unprotect the src pages on
|
||||
// failure.
|
||||
unprot_guard.Cancel();
|
||||
|
||||
// Apply the memory block updates.
|
||||
block_manager->Update(src_address, num_pages, src_state, new_perm,
|
||||
KMemoryAttribute::Locked);
|
||||
block_manager->Update(dst_address, num_pages, KMemoryState::AliasCode, new_perm,
|
||||
KMemoryAttribute::None);
|
||||
if (IsRegionMapped(dst_addr, size)) {
|
||||
return ResultInvalidCurrentMemory;
|
||||
}
|
||||
|
||||
KPageLinkedList page_linked_list;
|
||||
AddRegionToPages(src_addr, num_pages, page_linked_list);
|
||||
|
||||
{
|
||||
auto block_guard = detail::ScopeExit(
|
||||
[&] { Operate(src_addr, num_pages, perm, OperationType::ChangePermissions); });
|
||||
|
||||
CASCADE_CODE(Operate(src_addr, num_pages, KMemoryPermission::None,
|
||||
OperationType::ChangePermissions));
|
||||
CASCADE_CODE(MapPages(dst_addr, page_linked_list, KMemoryPermission::None));
|
||||
|
||||
block_guard.Cancel();
|
||||
}
|
||||
|
||||
block_manager->Update(src_addr, num_pages, state, KMemoryPermission::None,
|
||||
KMemoryAttribute::Locked);
|
||||
block_manager->Update(dst_addr, num_pages, KMemoryState::AliasCode);
|
||||
|
||||
return ResultSuccess;
|
||||
}
|
||||
|
||||
ResultCode KPageTable::UnmapCodeMemory(VAddr dst_address, VAddr src_address, std::size_t size) {
|
||||
// Validate the mapping request.
|
||||
R_UNLESS(this->CanContain(dst_address, size, KMemoryState::AliasCode),
|
||||
ResultInvalidMemoryRegion);
|
||||
|
||||
// Lock the table.
|
||||
ResultCode KPageTable::UnmapCodeMemory(VAddr dst_addr, VAddr src_addr, std::size_t size) {
|
||||
KScopedLightLock lk(general_lock);
|
||||
|
||||
// Verify that the source memory is locked normal heap.
|
||||
std::size_t num_src_allocator_blocks{};
|
||||
R_TRY(this->CheckMemoryState(std::addressof(num_src_allocator_blocks), src_address, size,
|
||||
KMemoryState::All, KMemoryState::Normal, KMemoryPermission::None,
|
||||
KMemoryPermission::None, KMemoryAttribute::All,
|
||||
KMemoryAttribute::Locked));
|
||||
if (!size) {
|
||||
return ResultSuccess;
|
||||
}
|
||||
|
||||
// Verify that the destination memory is aliasable code.
|
||||
std::size_t num_dst_allocator_blocks{};
|
||||
R_TRY(this->CheckMemoryStateContiguous(
|
||||
std::addressof(num_dst_allocator_blocks), dst_address, size, KMemoryState::FlagCanCodeAlias,
|
||||
const std::size_t num_pages{size / PageSize};
|
||||
|
||||
CASCADE_CODE(CheckMemoryState(nullptr, nullptr, nullptr, nullptr, src_addr, size,
|
||||
KMemoryState::All, KMemoryState::Normal, KMemoryPermission::None,
|
||||
KMemoryPermission::None, KMemoryAttribute::Mask,
|
||||
KMemoryAttribute::Locked, KMemoryAttribute::IpcAndDeviceMapped));
|
||||
|
||||
KMemoryState state{};
|
||||
CASCADE_CODE(CheckMemoryState(
|
||||
&state, nullptr, nullptr, nullptr, dst_addr, PageSize, KMemoryState::FlagCanCodeAlias,
|
||||
KMemoryState::FlagCanCodeAlias, KMemoryPermission::None, KMemoryPermission::None,
|
||||
KMemoryAttribute::All, KMemoryAttribute::None));
|
||||
KMemoryAttribute::Mask, KMemoryAttribute::None, KMemoryAttribute::IpcAndDeviceMapped));
|
||||
CASCADE_CODE(CheckMemoryState(dst_addr, size, KMemoryState::All, state, KMemoryPermission::None,
|
||||
KMemoryPermission::None, KMemoryAttribute::Mask,
|
||||
KMemoryAttribute::None));
|
||||
CASCADE_CODE(Operate(dst_addr, num_pages, KMemoryPermission::None, OperationType::Unmap));
|
||||
|
||||
// Determine whether any pages being unmapped are code.
|
||||
bool any_code_pages = false;
|
||||
{
|
||||
KMemoryBlockManager::const_iterator it = block_manager->FindIterator(dst_address);
|
||||
while (true) {
|
||||
// Get the memory info.
|
||||
const KMemoryInfo info = it->GetMemoryInfo();
|
||||
block_manager->Update(dst_addr, num_pages, KMemoryState::Free);
|
||||
block_manager->Update(src_addr, num_pages, KMemoryState::Normal,
|
||||
KMemoryPermission::UserReadWrite);
|
||||
|
||||
// Check if the memory has code flag.
|
||||
if ((info.GetState() & KMemoryState::FlagCode) != KMemoryState::None) {
|
||||
any_code_pages = true;
|
||||
break;
|
||||
}
|
||||
|
||||
// Check if we're done.
|
||||
if (dst_address + size - 1 <= info.GetLastAddress()) {
|
||||
break;
|
||||
}
|
||||
|
||||
// Advance.
|
||||
++it;
|
||||
}
|
||||
}
|
||||
|
||||
// Ensure that we maintain the instruction cache.
|
||||
bool reprotected_pages = false;
|
||||
SCOPE_EXIT({
|
||||
if (reprotected_pages && any_code_pages) {
|
||||
system.InvalidateCpuInstructionCacheRange(dst_address, size);
|
||||
}
|
||||
});
|
||||
|
||||
// Unmap.
|
||||
{
|
||||
// Determine the number of pages being operated on.
|
||||
const std::size_t num_pages = size / PageSize;
|
||||
|
||||
// Unmap the aliased copy of the pages.
|
||||
R_TRY(Operate(dst_address, num_pages, KMemoryPermission::None, OperationType::Unmap));
|
||||
|
||||
// Try to set the permissions for the source pages back to what they should be.
|
||||
R_TRY(Operate(src_address, num_pages, KMemoryPermission::UserReadWrite,
|
||||
OperationType::ChangePermissions));
|
||||
|
||||
// Apply the memory block updates.
|
||||
block_manager->Update(dst_address, num_pages, KMemoryState::None);
|
||||
block_manager->Update(src_address, num_pages, KMemoryState::Normal,
|
||||
KMemoryPermission::UserReadWrite);
|
||||
|
||||
// Note that we reprotected pages.
|
||||
reprotected_pages = true;
|
||||
}
|
||||
system.InvalidateCpuInstructionCacheRange(dst_addr, size);
|
||||
|
||||
return ResultSuccess;
|
||||
}
|
||||
|
||||
VAddr KPageTable::FindFreeArea(VAddr region_start, std::size_t region_num_pages,
|
||||
std::size_t num_pages, std::size_t alignment, std::size_t offset,
|
||||
std::size_t guard_pages) {
|
||||
VAddr address = 0;
|
||||
|
||||
if (num_pages <= region_num_pages) {
|
||||
if (this->IsAslrEnabled()) {
|
||||
// Try to directly find a free area up to 8 times.
|
||||
for (std::size_t i = 0; i < 8; i++) {
|
||||
const std::size_t random_offset =
|
||||
KSystemControl::GenerateRandomRange(
|
||||
0, (region_num_pages - num_pages - guard_pages) * PageSize / alignment) *
|
||||
alignment;
|
||||
const VAddr candidate =
|
||||
Common::AlignDown((region_start + random_offset), alignment) + offset;
|
||||
|
||||
KMemoryInfo info = this->QueryInfoImpl(candidate);
|
||||
|
||||
if (info.state != KMemoryState::Free) {
|
||||
continue;
|
||||
}
|
||||
if (region_start > candidate) {
|
||||
continue;
|
||||
}
|
||||
if (info.GetAddress() + guard_pages * PageSize > candidate) {
|
||||
continue;
|
||||
}
|
||||
|
||||
const VAddr candidate_end = candidate + (num_pages + guard_pages) * PageSize - 1;
|
||||
if (candidate_end > info.GetLastAddress()) {
|
||||
continue;
|
||||
}
|
||||
if (candidate_end > region_start + region_num_pages * PageSize - 1) {
|
||||
continue;
|
||||
}
|
||||
|
||||
address = candidate;
|
||||
break;
|
||||
}
|
||||
// Fall back to finding the first free area with a random offset.
|
||||
if (address == 0) {
|
||||
// NOTE: Nintendo does not account for guard pages here.
|
||||
// This may theoretically cause an offset to be chosen that cannot be mapped. We
|
||||
// will account for guard pages.
|
||||
const std::size_t offset_pages = KSystemControl::GenerateRandomRange(
|
||||
0, region_num_pages - num_pages - guard_pages);
|
||||
address = block_manager->FindFreeArea(region_start + offset_pages * PageSize,
|
||||
region_num_pages - offset_pages, num_pages,
|
||||
alignment, offset, guard_pages);
|
||||
}
|
||||
}
|
||||
|
||||
// Find the first free area.
|
||||
if (address == 0) {
|
||||
address = block_manager->FindFreeArea(region_start, region_num_pages, num_pages,
|
||||
alignment, offset, guard_pages);
|
||||
}
|
||||
}
|
||||
|
||||
return address;
|
||||
}
|
||||
|
||||
ResultCode KPageTable::UnmapProcessMemory(VAddr dst_addr, std::size_t size,
|
||||
KPageTable& src_page_table, VAddr src_addr) {
|
||||
KScopedLightLock lk(general_lock);
|
||||
@@ -1117,46 +986,6 @@ ResultCode KPageTable::MapPages(VAddr address, KPageLinkedList& page_linked_list
|
||||
return ResultSuccess;
|
||||
}
|
||||
|
||||
ResultCode KPageTable::MapPages(VAddr* out_addr, std::size_t num_pages, std::size_t alignment,
|
||||
PAddr phys_addr, bool is_pa_valid, VAddr region_start,
|
||||
std::size_t region_num_pages, KMemoryState state,
|
||||
KMemoryPermission perm) {
|
||||
ASSERT(Common::IsAligned(alignment, PageSize) && alignment >= PageSize);
|
||||
|
||||
// Ensure this is a valid map request.
|
||||
R_UNLESS(this->CanContain(region_start, region_num_pages * PageSize, state),
|
||||
ResultInvalidCurrentMemory);
|
||||
R_UNLESS(num_pages < region_num_pages, ResultOutOfMemory);
|
||||
|
||||
// Lock the table.
|
||||
KScopedLightLock lk(general_lock);
|
||||
|
||||
// Find a random address to map at.
|
||||
VAddr addr = this->FindFreeArea(region_start, region_num_pages, num_pages, alignment, 0,
|
||||
this->GetNumGuardPages());
|
||||
R_UNLESS(addr != 0, ResultOutOfMemory);
|
||||
ASSERT(Common::IsAligned(addr, alignment));
|
||||
ASSERT(this->CanContain(addr, num_pages * PageSize, state));
|
||||
ASSERT(this->CheckMemoryState(addr, num_pages * PageSize, KMemoryState::All, KMemoryState::Free,
|
||||
KMemoryPermission::None, KMemoryPermission::None,
|
||||
KMemoryAttribute::None, KMemoryAttribute::None)
|
||||
.IsSuccess());
|
||||
|
||||
// Perform mapping operation.
|
||||
if (is_pa_valid) {
|
||||
R_TRY(this->Operate(addr, num_pages, perm, OperationType::Map, phys_addr));
|
||||
} else {
|
||||
UNIMPLEMENTED();
|
||||
}
|
||||
|
||||
// Update the blocks.
|
||||
block_manager->Update(addr, num_pages, state, perm);
|
||||
|
||||
// We successfully mapped the pages.
|
||||
*out_addr = addr;
|
||||
return ResultSuccess;
|
||||
}
|
||||
|
||||
ResultCode KPageTable::UnmapPages(VAddr addr, const KPageLinkedList& page_linked_list) {
|
||||
ASSERT(this->IsLockedByCurrentThread());
|
||||
|
||||
@@ -1199,30 +1028,6 @@ ResultCode KPageTable::UnmapPages(VAddr addr, KPageLinkedList& page_linked_list,
|
||||
return ResultSuccess;
|
||||
}
|
||||
|
||||
ResultCode KPageTable::UnmapPages(VAddr address, std::size_t num_pages, KMemoryState state) {
|
||||
// Check that the unmap is in range.
|
||||
const std::size_t size = num_pages * PageSize;
|
||||
R_UNLESS(this->Contains(address, size), ResultInvalidCurrentMemory);
|
||||
|
||||
// Lock the table.
|
||||
KScopedLightLock lk(general_lock);
|
||||
|
||||
// Check the memory state.
|
||||
std::size_t num_allocator_blocks{};
|
||||
R_TRY(this->CheckMemoryState(std::addressof(num_allocator_blocks), address, size,
|
||||
KMemoryState::All, state, KMemoryPermission::None,
|
||||
KMemoryPermission::None, KMemoryAttribute::All,
|
||||
KMemoryAttribute::None));
|
||||
|
||||
// Perform the unmap.
|
||||
R_TRY(Operate(address, num_pages, KMemoryPermission::None, OperationType::Unmap));
|
||||
|
||||
// Update the blocks.
|
||||
block_manager->Update(address, num_pages, KMemoryState::Free, KMemoryPermission::None);
|
||||
|
||||
return ResultSuccess;
|
||||
}
|
||||
|
||||
ResultCode KPageTable::SetProcessMemoryPermission(VAddr addr, std::size_t size,
|
||||
Svc::MemoryPermission svc_perm) {
|
||||
const size_t num_pages = size / PageSize;
|
||||
|
||||
@@ -36,8 +36,8 @@ public:
|
||||
KMemoryManager::Pool pool);
|
||||
ResultCode MapProcessCode(VAddr addr, std::size_t pages_count, KMemoryState state,
|
||||
KMemoryPermission perm);
|
||||
ResultCode MapCodeMemory(VAddr dst_address, VAddr src_address, std::size_t size);
|
||||
ResultCode UnmapCodeMemory(VAddr dst_address, VAddr src_address, std::size_t size);
|
||||
ResultCode MapCodeMemory(VAddr dst_addr, VAddr src_addr, std::size_t size);
|
||||
ResultCode UnmapCodeMemory(VAddr dst_addr, VAddr src_addr, std::size_t size);
|
||||
ResultCode UnmapProcessMemory(VAddr dst_addr, std::size_t size, KPageTable& src_page_table,
|
||||
VAddr src_addr);
|
||||
ResultCode MapPhysicalMemory(VAddr addr, std::size_t size);
|
||||
@@ -46,14 +46,7 @@ public:
|
||||
ResultCode UnmapMemory(VAddr dst_addr, VAddr src_addr, std::size_t size);
|
||||
ResultCode MapPages(VAddr addr, KPageLinkedList& page_linked_list, KMemoryState state,
|
||||
KMemoryPermission perm);
|
||||
ResultCode MapPages(VAddr* out_addr, std::size_t num_pages, std::size_t alignment,
|
||||
PAddr phys_addr, KMemoryState state, KMemoryPermission perm) {
|
||||
return this->MapPages(out_addr, num_pages, alignment, phys_addr, true,
|
||||
this->GetRegionAddress(state), this->GetRegionSize(state) / PageSize,
|
||||
state, perm);
|
||||
}
|
||||
ResultCode UnmapPages(VAddr addr, KPageLinkedList& page_linked_list, KMemoryState state);
|
||||
ResultCode UnmapPages(VAddr address, std::size_t num_pages, KMemoryState state);
|
||||
ResultCode SetProcessMemoryPermission(VAddr addr, std::size_t size,
|
||||
Svc::MemoryPermission svc_perm);
|
||||
KMemoryInfo QueryInfo(VAddr addr);
|
||||
@@ -98,9 +91,6 @@ private:
|
||||
ResultCode InitializeMemoryLayout(VAddr start, VAddr end);
|
||||
ResultCode MapPages(VAddr addr, const KPageLinkedList& page_linked_list,
|
||||
KMemoryPermission perm);
|
||||
ResultCode MapPages(VAddr* out_addr, std::size_t num_pages, std::size_t alignment,
|
||||
PAddr phys_addr, bool is_pa_valid, VAddr region_start,
|
||||
std::size_t region_num_pages, KMemoryState state, KMemoryPermission perm);
|
||||
ResultCode UnmapPages(VAddr addr, const KPageLinkedList& page_linked_list);
|
||||
bool IsRegionMapped(VAddr address, u64 size);
|
||||
bool IsRegionContiguous(VAddr addr, u64 size) const;
|
||||
@@ -115,9 +105,6 @@ private:
|
||||
VAddr GetRegionAddress(KMemoryState state) const;
|
||||
std::size_t GetRegionSize(KMemoryState state) const;
|
||||
|
||||
VAddr FindFreeArea(VAddr region_start, std::size_t region_num_pages, std::size_t num_pages,
|
||||
std::size_t alignment, std::size_t offset, std::size_t guard_pages);
|
||||
|
||||
ResultCode CheckMemoryStateContiguous(std::size_t* out_blocks_needed, VAddr addr,
|
||||
std::size_t size, KMemoryState state_mask,
|
||||
KMemoryState state, KMemoryPermission perm_mask,
|
||||
@@ -150,7 +137,7 @@ private:
|
||||
return CheckMemoryState(nullptr, nullptr, nullptr, out_blocks_needed, addr, size,
|
||||
state_mask, state, perm_mask, perm, attr_mask, attr, ignore_attr);
|
||||
}
|
||||
ResultCode CheckMemoryState(VAddr addr, std::size_t size, KMemoryState state_mask,
|
||||
ResultCode CheckMemoryState(VAddr addr, size_t size, KMemoryState state_mask,
|
||||
KMemoryState state, KMemoryPermission perm_mask,
|
||||
KMemoryPermission perm, KMemoryAttribute attr_mask,
|
||||
KMemoryAttribute attr,
|
||||
@@ -223,7 +210,7 @@ public:
|
||||
constexpr VAddr GetAliasCodeRegionSize() const {
|
||||
return alias_code_region_end - alias_code_region_start;
|
||||
}
|
||||
std::size_t GetNormalMemorySize() {
|
||||
size_t GetNormalMemorySize() {
|
||||
KScopedLightLock lk(general_lock);
|
||||
return GetHeapSize() + mapped_physical_memory_size;
|
||||
}
|
||||
@@ -266,9 +253,7 @@ public:
|
||||
constexpr bool IsInsideASLRRegion(VAddr address, std::size_t size) const {
|
||||
return !IsOutsideASLRRegion(address, size);
|
||||
}
|
||||
constexpr std::size_t GetNumGuardPages() const {
|
||||
return IsKernel() ? 1 : 4;
|
||||
}
|
||||
|
||||
PAddr GetPhysicalAddr(VAddr addr) const {
|
||||
const auto backing_addr = page_table_impl.backing_addr[addr >> PageBits];
|
||||
ASSERT(backing_addr);
|
||||
@@ -290,6 +275,10 @@ private:
|
||||
return is_aslr_enabled;
|
||||
}
|
||||
|
||||
constexpr std::size_t GetNumGuardPages() const {
|
||||
return IsKernel() ? 1 : 4;
|
||||
}
|
||||
|
||||
constexpr bool ContainsPages(VAddr addr, std::size_t num_pages) const {
|
||||
return (address_space_start <= addr) &&
|
||||
(num_pages <= (address_space_end - address_space_start) / PageSize) &&
|
||||
|
||||
@@ -57,12 +57,7 @@ ResultCode KPort::EnqueueSession(KServerSession* session) {
|
||||
R_UNLESS(state == State::Normal, ResultPortClosed);
|
||||
|
||||
server.EnqueueSession(session);
|
||||
|
||||
if (auto session_ptr = server.GetSessionRequestHandler().lock()) {
|
||||
session_ptr->ClientConnected(server.AcceptSession());
|
||||
} else {
|
||||
UNREACHABLE();
|
||||
}
|
||||
server.GetSessionRequestHandler()->ClientConnected(server.AcceptSession());
|
||||
|
||||
return ResultSuccess;
|
||||
}
|
||||
|
||||
@@ -70,6 +70,58 @@ void SetupMainThread(Core::System& system, KProcess& owner_process, u32 priority
|
||||
}
|
||||
} // Anonymous namespace
|
||||
|
||||
// Represents a page used for thread-local storage.
|
||||
//
|
||||
// Each TLS page contains slots that may be used by processes and threads.
|
||||
// Every process and thread is created with a slot in some arbitrary page
|
||||
// (whichever page happens to have an available slot).
|
||||
class TLSPage {
|
||||
public:
|
||||
static constexpr std::size_t num_slot_entries =
|
||||
Core::Memory::PAGE_SIZE / Core::Memory::TLS_ENTRY_SIZE;
|
||||
|
||||
explicit TLSPage(VAddr address) : base_address{address} {}
|
||||
|
||||
bool HasAvailableSlots() const {
|
||||
return !is_slot_used.all();
|
||||
}
|
||||
|
||||
VAddr GetBaseAddress() const {
|
||||
return base_address;
|
||||
}
|
||||
|
||||
std::optional<VAddr> ReserveSlot() {
|
||||
for (std::size_t i = 0; i < is_slot_used.size(); i++) {
|
||||
if (is_slot_used[i]) {
|
||||
continue;
|
||||
}
|
||||
|
||||
is_slot_used[i] = true;
|
||||
return base_address + (i * Core::Memory::TLS_ENTRY_SIZE);
|
||||
}
|
||||
|
||||
return std::nullopt;
|
||||
}
|
||||
|
||||
void ReleaseSlot(VAddr address) {
|
||||
// Ensure that all given addresses are consistent with how TLS pages
|
||||
// are intended to be used when releasing slots.
|
||||
ASSERT(IsWithinPage(address));
|
||||
ASSERT((address % Core::Memory::TLS_ENTRY_SIZE) == 0);
|
||||
|
||||
const std::size_t index = (address - base_address) / Core::Memory::TLS_ENTRY_SIZE;
|
||||
is_slot_used[index] = false;
|
||||
}
|
||||
|
||||
private:
|
||||
bool IsWithinPage(VAddr address) const {
|
||||
return base_address <= address && address < base_address + Core::Memory::PAGE_SIZE;
|
||||
}
|
||||
|
||||
VAddr base_address;
|
||||
std::bitset<num_slot_entries> is_slot_used;
|
||||
};
|
||||
|
||||
ResultCode KProcess::Initialize(KProcess* process, Core::System& system, std::string process_name,
|
||||
ProcessType type, KResourceLimit* res_limit) {
|
||||
auto& kernel = system.Kernel();
|
||||
@@ -352,7 +404,7 @@ ResultCode KProcess::LoadFromMetadata(const FileSys::ProgramMetadata& metadata,
|
||||
}
|
||||
|
||||
// Create TLS region
|
||||
R_TRY(this->CreateThreadLocalRegion(std::addressof(tls_region_address)));
|
||||
tls_region_address = CreateTLSRegion();
|
||||
memory_reservation.Commit();
|
||||
|
||||
return handle_table.Initialize(capabilities.GetHandleTableSize());
|
||||
@@ -392,7 +444,7 @@ void KProcess::PrepareForTermination() {
|
||||
|
||||
stop_threads(kernel.System().GlobalSchedulerContext().GetThreadList());
|
||||
|
||||
this->DeleteThreadLocalRegion(tls_region_address);
|
||||
FreeTLSRegion(tls_region_address);
|
||||
tls_region_address = 0;
|
||||
|
||||
if (resource_limit) {
|
||||
@@ -404,6 +456,9 @@ void KProcess::PrepareForTermination() {
|
||||
}
|
||||
|
||||
void KProcess::Finalize() {
|
||||
// Finalize the handle table and close any open handles.
|
||||
handle_table.Finalize();
|
||||
|
||||
// Free all shared memory infos.
|
||||
{
|
||||
auto it = shared_memory_list.begin();
|
||||
@@ -428,110 +483,67 @@ void KProcess::Finalize() {
|
||||
resource_limit = nullptr;
|
||||
}
|
||||
|
||||
// Finalize the page table.
|
||||
page_table.reset();
|
||||
|
||||
// Perform inherited finalization.
|
||||
KAutoObjectWithSlabHeapAndContainer<KProcess, KWorkerTask>::Finalize();
|
||||
}
|
||||
|
||||
ResultCode KProcess::CreateThreadLocalRegion(VAddr* out) {
|
||||
KThreadLocalPage* tlp = nullptr;
|
||||
VAddr tlr = 0;
|
||||
|
||||
// See if we can get a region from a partially used TLP.
|
||||
{
|
||||
KScopedSchedulerLock sl{kernel};
|
||||
|
||||
if (auto it = partially_used_tlp_tree.begin(); it != partially_used_tlp_tree.end()) {
|
||||
tlr = it->Reserve();
|
||||
ASSERT(tlr != 0);
|
||||
|
||||
if (it->IsAllUsed()) {
|
||||
tlp = std::addressof(*it);
|
||||
partially_used_tlp_tree.erase(it);
|
||||
fully_used_tlp_tree.insert(*tlp);
|
||||
}
|
||||
|
||||
*out = tlr;
|
||||
return ResultSuccess;
|
||||
}
|
||||
}
|
||||
|
||||
// Allocate a new page.
|
||||
tlp = KThreadLocalPage::Allocate(kernel);
|
||||
R_UNLESS(tlp != nullptr, ResultOutOfMemory);
|
||||
auto tlp_guard = SCOPE_GUARD({ KThreadLocalPage::Free(kernel, tlp); });
|
||||
|
||||
// Initialize the new page.
|
||||
R_TRY(tlp->Initialize(kernel, this));
|
||||
|
||||
// Reserve a TLR.
|
||||
tlr = tlp->Reserve();
|
||||
ASSERT(tlr != 0);
|
||||
|
||||
// Insert into our tree.
|
||||
{
|
||||
KScopedSchedulerLock sl{kernel};
|
||||
if (tlp->IsAllUsed()) {
|
||||
fully_used_tlp_tree.insert(*tlp);
|
||||
} else {
|
||||
partially_used_tlp_tree.insert(*tlp);
|
||||
}
|
||||
}
|
||||
|
||||
// We succeeded!
|
||||
tlp_guard.Cancel();
|
||||
*out = tlr;
|
||||
return ResultSuccess;
|
||||
/**
|
||||
* Attempts to find a TLS page that contains a free slot for
|
||||
* use by a thread.
|
||||
*
|
||||
* @returns If a page with an available slot is found, then an iterator
|
||||
* pointing to the page is returned. Otherwise the end iterator
|
||||
* is returned instead.
|
||||
*/
|
||||
static auto FindTLSPageWithAvailableSlots(std::vector<TLSPage>& tls_pages) {
|
||||
return std::find_if(tls_pages.begin(), tls_pages.end(),
|
||||
[](const auto& page) { return page.HasAvailableSlots(); });
|
||||
}
|
||||
|
||||
ResultCode KProcess::DeleteThreadLocalRegion(VAddr addr) {
|
||||
KThreadLocalPage* page_to_free = nullptr;
|
||||
|
||||
// Release the region.
|
||||
{
|
||||
KScopedSchedulerLock sl{kernel};
|
||||
|
||||
// Try to find the page in the partially used list.
|
||||
auto it = partially_used_tlp_tree.find_key(Common::AlignDown(addr, PageSize));
|
||||
if (it == partially_used_tlp_tree.end()) {
|
||||
// If we don't find it, it has to be in the fully used list.
|
||||
it = fully_used_tlp_tree.find_key(Common::AlignDown(addr, PageSize));
|
||||
R_UNLESS(it != fully_used_tlp_tree.end(), ResultInvalidAddress);
|
||||
|
||||
// Release the region.
|
||||
it->Release(addr);
|
||||
|
||||
// Move the page out of the fully used list.
|
||||
KThreadLocalPage* tlp = std::addressof(*it);
|
||||
fully_used_tlp_tree.erase(it);
|
||||
if (tlp->IsAllFree()) {
|
||||
page_to_free = tlp;
|
||||
} else {
|
||||
partially_used_tlp_tree.insert(*tlp);
|
||||
}
|
||||
} else {
|
||||
// Release the region.
|
||||
it->Release(addr);
|
||||
|
||||
// Handle the all-free case.
|
||||
KThreadLocalPage* tlp = std::addressof(*it);
|
||||
if (tlp->IsAllFree()) {
|
||||
partially_used_tlp_tree.erase(it);
|
||||
page_to_free = tlp;
|
||||
}
|
||||
}
|
||||
VAddr KProcess::CreateTLSRegion() {
|
||||
KScopedSchedulerLock lock(kernel);
|
||||
if (auto tls_page_iter{FindTLSPageWithAvailableSlots(tls_pages)};
|
||||
tls_page_iter != tls_pages.cend()) {
|
||||
return *tls_page_iter->ReserveSlot();
|
||||
}
|
||||
|
||||
// If we should free the page it was in, do so.
|
||||
if (page_to_free != nullptr) {
|
||||
page_to_free->Finalize();
|
||||
Page* const tls_page_ptr{kernel.GetUserSlabHeapPages().Allocate()};
|
||||
ASSERT(tls_page_ptr);
|
||||
|
||||
KThreadLocalPage::Free(kernel, page_to_free);
|
||||
}
|
||||
const VAddr start{page_table->GetKernelMapRegionStart()};
|
||||
const VAddr size{page_table->GetKernelMapRegionEnd() - start};
|
||||
const PAddr tls_map_addr{kernel.System().DeviceMemory().GetPhysicalAddr(tls_page_ptr)};
|
||||
const VAddr tls_page_addr{page_table
|
||||
->AllocateAndMapMemory(1, PageSize, true, start, size / PageSize,
|
||||
KMemoryState::ThreadLocal,
|
||||
KMemoryPermission::UserReadWrite,
|
||||
tls_map_addr)
|
||||
.ValueOr(0)};
|
||||
|
||||
return ResultSuccess;
|
||||
ASSERT(tls_page_addr);
|
||||
|
||||
std::memset(tls_page_ptr, 0, PageSize);
|
||||
tls_pages.emplace_back(tls_page_addr);
|
||||
|
||||
const auto reserve_result{tls_pages.back().ReserveSlot()};
|
||||
ASSERT(reserve_result.has_value());
|
||||
|
||||
return *reserve_result;
|
||||
}
|
||||
|
||||
void KProcess::FreeTLSRegion(VAddr tls_address) {
|
||||
KScopedSchedulerLock lock(kernel);
|
||||
const VAddr aligned_address = Common::AlignDown(tls_address, Core::Memory::PAGE_SIZE);
|
||||
auto iter =
|
||||
std::find_if(tls_pages.begin(), tls_pages.end(), [aligned_address](const auto& page) {
|
||||
return page.GetBaseAddress() == aligned_address;
|
||||
});
|
||||
|
||||
// Something has gone very wrong if we're freeing a region
|
||||
// with no actual page available.
|
||||
ASSERT(iter != tls_pages.cend());
|
||||
|
||||
iter->ReleaseSlot(tls_address);
|
||||
}
|
||||
|
||||
void KProcess::LoadModule(CodeSet code_set, VAddr base_addr) {
|
||||
|
||||
@@ -15,7 +15,6 @@
|
||||
#include "core/hle/kernel/k_condition_variable.h"
|
||||
#include "core/hle/kernel/k_handle_table.h"
|
||||
#include "core/hle/kernel/k_synchronization_object.h"
|
||||
#include "core/hle/kernel/k_thread_local_page.h"
|
||||
#include "core/hle/kernel/k_worker_task.h"
|
||||
#include "core/hle/kernel/process_capability.h"
|
||||
#include "core/hle/kernel/slab_helpers.h"
|
||||
@@ -363,10 +362,10 @@ public:
|
||||
// Thread-local storage management
|
||||
|
||||
// Marks the next available region as used and returns the address of the slot.
|
||||
[[nodiscard]] ResultCode CreateThreadLocalRegion(VAddr* out);
|
||||
[[nodiscard]] VAddr CreateTLSRegion();
|
||||
|
||||
// Frees a used TLS slot identified by the given address
|
||||
ResultCode DeleteThreadLocalRegion(VAddr addr);
|
||||
void FreeTLSRegion(VAddr tls_address);
|
||||
|
||||
private:
|
||||
void PinThread(s32 core_id, KThread* thread) {
|
||||
@@ -414,6 +413,13 @@ private:
|
||||
/// The ideal CPU core for this process, threads are scheduled on this core by default.
|
||||
u8 ideal_core = 0;
|
||||
|
||||
/// The Thread Local Storage area is allocated as processes create threads,
|
||||
/// each TLS area is 0x200 bytes, so one page (0x1000) is split up in 8 parts, and each part
|
||||
/// holds the TLS for a specific thread. This vector contains which parts are in use for each
|
||||
/// page as a bitmask.
|
||||
/// This vector will grow as more pages are allocated for new threads.
|
||||
std::vector<TLSPage> tls_pages;
|
||||
|
||||
/// Contains the parsed process capability descriptors.
|
||||
ProcessCapabilities capabilities;
|
||||
|
||||
@@ -476,12 +482,6 @@ private:
|
||||
KThread* exception_thread{};
|
||||
|
||||
KLightLock state_lock;
|
||||
|
||||
using TLPTree =
|
||||
Common::IntrusiveRedBlackTreeBaseTraits<KThreadLocalPage>::TreeType<KThreadLocalPage>;
|
||||
using TLPIterator = TLPTree::iterator;
|
||||
TLPTree fully_used_tlp_tree;
|
||||
TLPTree partially_used_tlp_tree;
|
||||
};
|
||||
|
||||
} // namespace Kernel
|
||||
|
||||
@@ -30,11 +30,11 @@ public:
|
||||
|
||||
/// Whether or not this server port has an HLE handler available.
|
||||
bool HasSessionRequestHandler() const {
|
||||
return !session_handler.expired();
|
||||
return session_handler != nullptr;
|
||||
}
|
||||
|
||||
/// Gets the HLE handler for this port.
|
||||
SessionRequestHandlerWeakPtr GetSessionRequestHandler() const {
|
||||
SessionRequestHandlerPtr GetSessionRequestHandler() const {
|
||||
return session_handler;
|
||||
}
|
||||
|
||||
@@ -42,7 +42,7 @@ public:
|
||||
* Sets the HLE handler template for the port. ServerSessions crated by connecting to this port
|
||||
* will inherit a reference to this handler.
|
||||
*/
|
||||
void SetSessionHandler(SessionRequestHandlerWeakPtr&& handler) {
|
||||
void SetSessionHandler(SessionRequestHandlerPtr&& handler) {
|
||||
session_handler = std::move(handler);
|
||||
}
|
||||
|
||||
@@ -66,7 +66,7 @@ private:
|
||||
void CleanupSessions();
|
||||
|
||||
SessionList session_list;
|
||||
SessionRequestHandlerWeakPtr session_handler;
|
||||
SessionRequestHandlerPtr session_handler;
|
||||
KPort* parent{};
|
||||
};
|
||||
|
||||
|
||||
@@ -27,7 +27,10 @@ namespace Kernel {
|
||||
|
||||
KServerSession::KServerSession(KernelCore& kernel_) : KSynchronizationObject{kernel_} {}
|
||||
|
||||
KServerSession::~KServerSession() = default;
|
||||
KServerSession::~KServerSession() {
|
||||
// Ensure that the global list tracking server sessions does not hold on to a reference.
|
||||
kernel.UnregisterServerSession(this);
|
||||
}
|
||||
|
||||
void KServerSession::Initialize(KSession* parent_session_, std::string&& name_,
|
||||
std::shared_ptr<SessionRequestManager> manager_) {
|
||||
@@ -46,9 +49,6 @@ void KServerSession::Destroy() {
|
||||
parent->OnServerClosed();
|
||||
|
||||
parent->Close();
|
||||
|
||||
// Release host emulation members.
|
||||
manager.reset();
|
||||
}
|
||||
|
||||
void KServerSession::OnClientClosed() {
|
||||
@@ -98,12 +98,7 @@ ResultCode KServerSession::HandleDomainSyncRequest(Kernel::HLERequestContext& co
|
||||
UNREACHABLE();
|
||||
return ResultSuccess; // Ignore error if asserts are off
|
||||
}
|
||||
if (auto strong_ptr = manager->DomainHandler(object_id - 1).lock()) {
|
||||
return strong_ptr->HandleSyncRequest(*this, context);
|
||||
} else {
|
||||
UNREACHABLE();
|
||||
return ResultSuccess;
|
||||
}
|
||||
return manager->DomainHandler(object_id - 1)->HandleSyncRequest(*this, context);
|
||||
|
||||
case IPC::DomainMessageHeader::CommandType::CloseVirtualHandle: {
|
||||
LOG_DEBUG(IPC, "CloseVirtualHandle, object_id=0x{:08X}", object_id);
|
||||
|
||||
@@ -16,34 +16,39 @@ class KernelCore;
|
||||
|
||||
namespace impl {
|
||||
|
||||
class KSlabHeapImpl {
|
||||
class KSlabHeapImpl final {
|
||||
public:
|
||||
YUZU_NON_COPYABLE(KSlabHeapImpl);
|
||||
YUZU_NON_MOVEABLE(KSlabHeapImpl);
|
||||
|
||||
public:
|
||||
struct Node {
|
||||
Node* next{};
|
||||
};
|
||||
|
||||
public:
|
||||
constexpr KSlabHeapImpl() = default;
|
||||
constexpr ~KSlabHeapImpl() = default;
|
||||
|
||||
void Initialize() {
|
||||
ASSERT(m_head == nullptr);
|
||||
void Initialize(std::size_t size) {
|
||||
ASSERT(head == nullptr);
|
||||
obj_size = size;
|
||||
}
|
||||
|
||||
constexpr std::size_t GetObjectSize() const {
|
||||
return obj_size;
|
||||
}
|
||||
|
||||
Node* GetHead() const {
|
||||
return m_head;
|
||||
return head;
|
||||
}
|
||||
|
||||
void* Allocate() {
|
||||
Node* ret = m_head.load();
|
||||
Node* ret = head.load();
|
||||
|
||||
do {
|
||||
if (ret == nullptr) {
|
||||
break;
|
||||
}
|
||||
} while (!m_head.compare_exchange_weak(ret, ret->next));
|
||||
} while (!head.compare_exchange_weak(ret, ret->next));
|
||||
|
||||
return ret;
|
||||
}
|
||||
@@ -51,157 +56,170 @@ public:
|
||||
void Free(void* obj) {
|
||||
Node* node = static_cast<Node*>(obj);
|
||||
|
||||
Node* cur_head = m_head.load();
|
||||
Node* cur_head = head.load();
|
||||
do {
|
||||
node->next = cur_head;
|
||||
} while (!m_head.compare_exchange_weak(cur_head, node));
|
||||
} while (!head.compare_exchange_weak(cur_head, node));
|
||||
}
|
||||
|
||||
private:
|
||||
std::atomic<Node*> m_head{};
|
||||
std::atomic<Node*> head{};
|
||||
std::size_t obj_size{};
|
||||
};
|
||||
|
||||
} // namespace impl
|
||||
|
||||
template <bool SupportDynamicExpansion>
|
||||
class KSlabHeapBase : protected impl::KSlabHeapImpl {
|
||||
class KSlabHeapBase {
|
||||
public:
|
||||
YUZU_NON_COPYABLE(KSlabHeapBase);
|
||||
YUZU_NON_MOVEABLE(KSlabHeapBase);
|
||||
|
||||
private:
|
||||
size_t m_obj_size{};
|
||||
uintptr_t m_peak{};
|
||||
uintptr_t m_start{};
|
||||
uintptr_t m_end{};
|
||||
|
||||
private:
|
||||
void UpdatePeakImpl(uintptr_t obj) {
|
||||
static_assert(std::atomic_ref<uintptr_t>::is_always_lock_free);
|
||||
std::atomic_ref<uintptr_t> peak_ref(m_peak);
|
||||
|
||||
const uintptr_t alloc_peak = obj + this->GetObjectSize();
|
||||
uintptr_t cur_peak = m_peak;
|
||||
do {
|
||||
if (alloc_peak <= cur_peak) {
|
||||
break;
|
||||
}
|
||||
} while (!peak_ref.compare_exchange_strong(cur_peak, alloc_peak));
|
||||
}
|
||||
|
||||
public:
|
||||
constexpr KSlabHeapBase() = default;
|
||||
constexpr ~KSlabHeapBase() = default;
|
||||
|
||||
bool Contains(uintptr_t address) const {
|
||||
return m_start <= address && address < m_end;
|
||||
constexpr bool Contains(uintptr_t addr) const {
|
||||
return start <= addr && addr < end;
|
||||
}
|
||||
|
||||
void Initialize(size_t obj_size, void* memory, size_t memory_size) {
|
||||
// Ensure we don't initialize a slab using null memory.
|
||||
constexpr std::size_t GetSlabHeapSize() const {
|
||||
return (end - start) / GetObjectSize();
|
||||
}
|
||||
|
||||
constexpr std::size_t GetObjectSize() const {
|
||||
return impl.GetObjectSize();
|
||||
}
|
||||
|
||||
constexpr uintptr_t GetSlabHeapAddress() const {
|
||||
return start;
|
||||
}
|
||||
|
||||
std::size_t GetObjectIndexImpl(const void* obj) const {
|
||||
return (reinterpret_cast<uintptr_t>(obj) - start) / GetObjectSize();
|
||||
}
|
||||
|
||||
std::size_t GetPeakIndex() const {
|
||||
return GetObjectIndexImpl(reinterpret_cast<const void*>(peak));
|
||||
}
|
||||
|
||||
void* AllocateImpl() {
|
||||
return impl.Allocate();
|
||||
}
|
||||
|
||||
void FreeImpl(void* obj) {
|
||||
// Don't allow freeing an object that wasn't allocated from this heap
|
||||
ASSERT(Contains(reinterpret_cast<uintptr_t>(obj)));
|
||||
|
||||
impl.Free(obj);
|
||||
}
|
||||
|
||||
void InitializeImpl(std::size_t obj_size, void* memory, std::size_t memory_size) {
|
||||
// Ensure we don't initialize a slab using null memory
|
||||
ASSERT(memory != nullptr);
|
||||
|
||||
// Set our object size.
|
||||
m_obj_size = obj_size;
|
||||
// Initialize the base allocator
|
||||
impl.Initialize(obj_size);
|
||||
|
||||
// Initialize the base allocator.
|
||||
KSlabHeapImpl::Initialize();
|
||||
// Set our tracking variables
|
||||
const std::size_t num_obj = (memory_size / obj_size);
|
||||
start = reinterpret_cast<uintptr_t>(memory);
|
||||
end = start + num_obj * obj_size;
|
||||
peak = start;
|
||||
|
||||
// Set our tracking variables.
|
||||
const size_t num_obj = (memory_size / obj_size);
|
||||
m_start = reinterpret_cast<uintptr_t>(memory);
|
||||
m_end = m_start + num_obj * obj_size;
|
||||
m_peak = m_start;
|
||||
// Free the objects
|
||||
u8* cur = reinterpret_cast<u8*>(end);
|
||||
|
||||
// Free the objects.
|
||||
u8* cur = reinterpret_cast<u8*>(m_end);
|
||||
|
||||
for (size_t i = 0; i < num_obj; i++) {
|
||||
for (std::size_t i{}; i < num_obj; i++) {
|
||||
cur -= obj_size;
|
||||
KSlabHeapImpl::Free(cur);
|
||||
impl.Free(cur);
|
||||
}
|
||||
}
|
||||
|
||||
size_t GetSlabHeapSize() const {
|
||||
return (m_end - m_start) / this->GetObjectSize();
|
||||
}
|
||||
private:
|
||||
using Impl = impl::KSlabHeapImpl;
|
||||
|
||||
size_t GetObjectSize() const {
|
||||
return m_obj_size;
|
||||
}
|
||||
|
||||
void* Allocate() {
|
||||
void* obj = KSlabHeapImpl::Allocate();
|
||||
|
||||
return obj;
|
||||
}
|
||||
|
||||
void Free(void* obj) {
|
||||
// Don't allow freeing an object that wasn't allocated from this heap.
|
||||
const bool contained = this->Contains(reinterpret_cast<uintptr_t>(obj));
|
||||
ASSERT(contained);
|
||||
KSlabHeapImpl::Free(obj);
|
||||
}
|
||||
|
||||
size_t GetObjectIndex(const void* obj) const {
|
||||
if constexpr (SupportDynamicExpansion) {
|
||||
if (!this->Contains(reinterpret_cast<uintptr_t>(obj))) {
|
||||
return std::numeric_limits<size_t>::max();
|
||||
}
|
||||
}
|
||||
|
||||
return (reinterpret_cast<uintptr_t>(obj) - m_start) / this->GetObjectSize();
|
||||
}
|
||||
|
||||
size_t GetPeakIndex() const {
|
||||
return this->GetObjectIndex(reinterpret_cast<const void*>(m_peak));
|
||||
}
|
||||
|
||||
uintptr_t GetSlabHeapAddress() const {
|
||||
return m_start;
|
||||
}
|
||||
|
||||
size_t GetNumRemaining() const {
|
||||
// Only calculate the number of remaining objects under debug configuration.
|
||||
return 0;
|
||||
}
|
||||
Impl impl;
|
||||
uintptr_t peak{};
|
||||
uintptr_t start{};
|
||||
uintptr_t end{};
|
||||
};
|
||||
|
||||
template <typename T>
|
||||
class KSlabHeap final : public KSlabHeapBase<false> {
|
||||
private:
|
||||
using BaseHeap = KSlabHeapBase<false>;
|
||||
|
||||
class KSlabHeap final : public KSlabHeapBase {
|
||||
public:
|
||||
constexpr KSlabHeap() = default;
|
||||
enum class AllocationType {
|
||||
Host,
|
||||
Guest,
|
||||
};
|
||||
|
||||
void Initialize(void* memory, size_t memory_size) {
|
||||
BaseHeap::Initialize(sizeof(T), memory, memory_size);
|
||||
explicit constexpr KSlabHeap(AllocationType allocation_type_ = AllocationType::Host)
|
||||
: KSlabHeapBase(), allocation_type{allocation_type_} {}
|
||||
|
||||
void Initialize(void* memory, std::size_t memory_size) {
|
||||
if (allocation_type == AllocationType::Guest) {
|
||||
InitializeImpl(sizeof(T), memory, memory_size);
|
||||
}
|
||||
}
|
||||
|
||||
T* Allocate() {
|
||||
T* obj = static_cast<T*>(BaseHeap::Allocate());
|
||||
switch (allocation_type) {
|
||||
case AllocationType::Host:
|
||||
// Fallback for cases where we do not yet support allocating guest memory from the slab
|
||||
// heap, such as for kernel memory regions.
|
||||
return new T;
|
||||
|
||||
if (obj != nullptr) [[likely]] {
|
||||
std::construct_at(obj);
|
||||
case AllocationType::Guest:
|
||||
T* obj = static_cast<T*>(AllocateImpl());
|
||||
if (obj != nullptr) {
|
||||
new (obj) T();
|
||||
}
|
||||
return obj;
|
||||
}
|
||||
return obj;
|
||||
|
||||
UNREACHABLE_MSG("Invalid AllocationType {}", allocation_type);
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
T* Allocate(KernelCore& kernel) {
|
||||
T* obj = static_cast<T*>(BaseHeap::Allocate());
|
||||
T* AllocateWithKernel(KernelCore& kernel) {
|
||||
switch (allocation_type) {
|
||||
case AllocationType::Host:
|
||||
// Fallback for cases where we do not yet support allocating guest memory from the slab
|
||||
// heap, such as for kernel memory regions.
|
||||
return new T(kernel);
|
||||
|
||||
if (obj != nullptr) [[likely]] {
|
||||
std::construct_at(obj, kernel);
|
||||
case AllocationType::Guest:
|
||||
T* obj = static_cast<T*>(AllocateImpl());
|
||||
if (obj != nullptr) {
|
||||
new (obj) T(kernel);
|
||||
}
|
||||
return obj;
|
||||
}
|
||||
return obj;
|
||||
|
||||
UNREACHABLE_MSG("Invalid AllocationType {}", allocation_type);
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
void Free(T* obj) {
|
||||
BaseHeap::Free(obj);
|
||||
switch (allocation_type) {
|
||||
case AllocationType::Host:
|
||||
// Fallback for cases where we do not yet support allocating guest memory from the slab
|
||||
// heap, such as for kernel memory regions.
|
||||
delete obj;
|
||||
return;
|
||||
|
||||
case AllocationType::Guest:
|
||||
FreeImpl(obj);
|
||||
return;
|
||||
}
|
||||
|
||||
UNREACHABLE_MSG("Invalid AllocationType {}", allocation_type);
|
||||
}
|
||||
|
||||
size_t GetObjectIndex(const T* obj) const {
|
||||
return BaseHeap::GetObjectIndex(obj);
|
||||
constexpr std::size_t GetObjectIndex(const T* obj) const {
|
||||
return GetObjectIndexImpl(obj);
|
||||
}
|
||||
|
||||
private:
|
||||
const AllocationType allocation_type;
|
||||
};
|
||||
|
||||
} // namespace Kernel
|
||||
|
||||
@@ -210,7 +210,7 @@ ResultCode KThread::Initialize(KThreadFunction func, uintptr_t arg, VAddr user_s
|
||||
if (owner != nullptr) {
|
||||
// Setup the TLS, if needed.
|
||||
if (type == ThreadType::User) {
|
||||
R_TRY(owner->CreateThreadLocalRegion(std::addressof(tls_address)));
|
||||
tls_address = owner->CreateTLSRegion();
|
||||
}
|
||||
|
||||
parent = owner;
|
||||
@@ -305,7 +305,7 @@ void KThread::Finalize() {
|
||||
|
||||
// If the thread has a local region, delete it.
|
||||
if (tls_address != 0) {
|
||||
ASSERT(parent->DeleteThreadLocalRegion(tls_address).IsSuccess());
|
||||
parent->FreeTLSRegion(tls_address);
|
||||
}
|
||||
|
||||
// Release any waiters.
|
||||
@@ -326,9 +326,6 @@ void KThread::Finalize() {
|
||||
}
|
||||
}
|
||||
|
||||
// Release host emulation members.
|
||||
host_context.reset();
|
||||
|
||||
// Perform inherited finalization.
|
||||
KSynchronizationObject::Finalize();
|
||||
}
|
||||
|
||||
@@ -656,7 +656,7 @@ private:
|
||||
static_assert(sizeof(SyncObjectBuffer::sync_objects) == sizeof(SyncObjectBuffer::handles));
|
||||
|
||||
struct ConditionVariableComparator {
|
||||
struct RedBlackKeyType {
|
||||
struct LightCompareType {
|
||||
u64 cv_key{};
|
||||
s32 priority{};
|
||||
|
||||
@@ -672,8 +672,8 @@ private:
|
||||
template <typename T>
|
||||
requires(
|
||||
std::same_as<T, KThread> ||
|
||||
std::same_as<T, RedBlackKeyType>) static constexpr int Compare(const T& lhs,
|
||||
const KThread& rhs) {
|
||||
std::same_as<T, LightCompareType>) static constexpr int Compare(const T& lhs,
|
||||
const KThread& rhs) {
|
||||
const u64 l_key = lhs.GetConditionVariableKey();
|
||||
const u64 r_key = rhs.GetConditionVariableKey();
|
||||
|
||||
|
||||
@@ -1,65 +0,0 @@
|
||||
// Copyright 2022 yuzu Emulator Project
|
||||
// Licensed under GPLv2 or any later version
|
||||
// Refer to the license.txt file included.
|
||||
|
||||
#include "common/scope_exit.h"
|
||||
#include "core/hle/kernel/k_memory_block.h"
|
||||
#include "core/hle/kernel/k_page_table.h"
|
||||
#include "core/hle/kernel/k_process.h"
|
||||
#include "core/hle/kernel/k_thread_local_page.h"
|
||||
#include "core/hle/kernel/kernel.h"
|
||||
|
||||
namespace Kernel {
|
||||
|
||||
ResultCode KThreadLocalPage::Initialize(KernelCore& kernel, KProcess* process) {
|
||||
// Set that this process owns us.
|
||||
m_owner = process;
|
||||
m_kernel = &kernel;
|
||||
|
||||
// Allocate a new page.
|
||||
KPageBuffer* page_buf = KPageBuffer::Allocate(kernel);
|
||||
R_UNLESS(page_buf != nullptr, ResultOutOfMemory);
|
||||
auto page_buf_guard = SCOPE_GUARD({ KPageBuffer::Free(kernel, page_buf); });
|
||||
|
||||
// Map the address in.
|
||||
const auto phys_addr = kernel.System().DeviceMemory().GetPhysicalAddr(page_buf);
|
||||
R_TRY(m_owner->PageTable().MapPages(std::addressof(m_virt_addr), 1, PageSize, phys_addr,
|
||||
KMemoryState::ThreadLocal,
|
||||
KMemoryPermission::UserReadWrite));
|
||||
|
||||
// We succeeded.
|
||||
page_buf_guard.Cancel();
|
||||
|
||||
return ResultSuccess;
|
||||
}
|
||||
|
||||
ResultCode KThreadLocalPage::Finalize() {
|
||||
// Get the physical address of the page.
|
||||
const PAddr phys_addr = m_owner->PageTable().GetPhysicalAddr(m_virt_addr);
|
||||
ASSERT(phys_addr);
|
||||
|
||||
// Unmap the page.
|
||||
R_TRY(m_owner->PageTable().UnmapPages(this->GetAddress(), 1, KMemoryState::ThreadLocal));
|
||||
|
||||
// Free the page.
|
||||
KPageBuffer::Free(*m_kernel, KPageBuffer::FromPhysicalAddress(m_kernel->System(), phys_addr));
|
||||
|
||||
return ResultSuccess;
|
||||
}
|
||||
|
||||
VAddr KThreadLocalPage::Reserve() {
|
||||
for (size_t i = 0; i < m_is_region_free.size(); i++) {
|
||||
if (m_is_region_free[i]) {
|
||||
m_is_region_free[i] = false;
|
||||
return this->GetRegionAddress(i);
|
||||
}
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
void KThreadLocalPage::Release(VAddr addr) {
|
||||
m_is_region_free[this->GetRegionIndex(addr)] = true;
|
||||
}
|
||||
|
||||
} // namespace Kernel
|
||||
@@ -1,112 +0,0 @@
|
||||
// Copyright 2022 yuzu Emulator Project
|
||||
// Licensed under GPLv2 or any later version
|
||||
// Refer to the license.txt file included.
|
||||
|
||||
#pragma once
|
||||
|
||||
#include <algorithm>
|
||||
#include <array>
|
||||
|
||||
#include "common/alignment.h"
|
||||
#include "common/assert.h"
|
||||
#include "common/common_types.h"
|
||||
#include "common/intrusive_red_black_tree.h"
|
||||
#include "core/hle/kernel/k_page_buffer.h"
|
||||
#include "core/hle/kernel/memory_types.h"
|
||||
#include "core/hle/kernel/slab_helpers.h"
|
||||
#include "core/hle/result.h"
|
||||
|
||||
namespace Kernel {
|
||||
|
||||
class KernelCore;
|
||||
class KProcess;
|
||||
|
||||
class KThreadLocalPage final : public Common::IntrusiveRedBlackTreeBaseNode<KThreadLocalPage>,
|
||||
public KSlabAllocated<KThreadLocalPage> {
|
||||
public:
|
||||
static constexpr size_t RegionsPerPage = PageSize / Svc::ThreadLocalRegionSize;
|
||||
static_assert(RegionsPerPage > 0);
|
||||
|
||||
public:
|
||||
constexpr explicit KThreadLocalPage(VAddr addr = {}) : m_virt_addr(addr) {
|
||||
m_is_region_free.fill(true);
|
||||
}
|
||||
|
||||
constexpr VAddr GetAddress() const {
|
||||
return m_virt_addr;
|
||||
}
|
||||
|
||||
ResultCode Initialize(KernelCore& kernel, KProcess* process);
|
||||
ResultCode Finalize();
|
||||
|
||||
VAddr Reserve();
|
||||
void Release(VAddr addr);
|
||||
|
||||
bool IsAllUsed() const {
|
||||
return std::ranges::all_of(m_is_region_free.begin(), m_is_region_free.end(),
|
||||
[](bool is_free) { return !is_free; });
|
||||
}
|
||||
|
||||
bool IsAllFree() const {
|
||||
return std::ranges::all_of(m_is_region_free.begin(), m_is_region_free.end(),
|
||||
[](bool is_free) { return is_free; });
|
||||
}
|
||||
|
||||
bool IsAnyUsed() const {
|
||||
return !this->IsAllFree();
|
||||
}
|
||||
|
||||
bool IsAnyFree() const {
|
||||
return !this->IsAllUsed();
|
||||
}
|
||||
|
||||
public:
|
||||
using RedBlackKeyType = VAddr;
|
||||
|
||||
static constexpr RedBlackKeyType GetRedBlackKey(const RedBlackKeyType& v) {
|
||||
return v;
|
||||
}
|
||||
static constexpr RedBlackKeyType GetRedBlackKey(const KThreadLocalPage& v) {
|
||||
return v.GetAddress();
|
||||
}
|
||||
|
||||
template <typename T>
|
||||
requires(std::same_as<T, KThreadLocalPage> ||
|
||||
std::same_as<T, RedBlackKeyType>) static constexpr int Compare(const T& lhs,
|
||||
const KThreadLocalPage&
|
||||
rhs) {
|
||||
const VAddr lval = GetRedBlackKey(lhs);
|
||||
const VAddr rval = GetRedBlackKey(rhs);
|
||||
|
||||
if (lval < rval) {
|
||||
return -1;
|
||||
} else if (lval == rval) {
|
||||
return 0;
|
||||
} else {
|
||||
return 1;
|
||||
}
|
||||
}
|
||||
|
||||
private:
|
||||
constexpr VAddr GetRegionAddress(size_t i) const {
|
||||
return this->GetAddress() + i * Svc::ThreadLocalRegionSize;
|
||||
}
|
||||
|
||||
constexpr bool Contains(VAddr addr) const {
|
||||
return this->GetAddress() <= addr && addr < this->GetAddress() + PageSize;
|
||||
}
|
||||
|
||||
constexpr size_t GetRegionIndex(VAddr addr) const {
|
||||
ASSERT(Common::IsAligned(addr, Svc::ThreadLocalRegionSize));
|
||||
ASSERT(this->Contains(addr));
|
||||
return (addr - this->GetAddress()) / Svc::ThreadLocalRegionSize;
|
||||
}
|
||||
|
||||
private:
|
||||
VAddr m_virt_addr{};
|
||||
KProcess* m_owner{};
|
||||
KernelCore* m_kernel{};
|
||||
std::array<bool, RegionsPerPage> m_is_region_free{};
|
||||
};
|
||||
|
||||
} // namespace Kernel
|
||||
@@ -52,7 +52,7 @@ namespace Kernel {
|
||||
|
||||
struct KernelCore::Impl {
|
||||
explicit Impl(Core::System& system_, KernelCore& kernel_)
|
||||
: time_manager{system_},
|
||||
: time_manager{system_}, object_list_container{kernel_},
|
||||
service_threads_manager{1, "yuzu:ServiceThreadsManager"}, system{system_} {}
|
||||
|
||||
void SetMulticore(bool is_multi) {
|
||||
@@ -60,7 +60,6 @@ struct KernelCore::Impl {
|
||||
}
|
||||
|
||||
void Initialize(KernelCore& kernel) {
|
||||
global_object_list_container = std::make_unique<KAutoObjectWithListContainer>(kernel);
|
||||
global_scheduler_context = std::make_unique<Kernel::GlobalSchedulerContext>(kernel);
|
||||
global_handle_table = std::make_unique<Kernel::KHandleTable>(kernel);
|
||||
global_handle_table->Initialize(KHandleTable::MaxTableSize);
|
||||
@@ -77,7 +76,7 @@ struct KernelCore::Impl {
|
||||
// Initialize kernel memory and resources.
|
||||
InitializeSystemResourceLimit(kernel, system.CoreTiming());
|
||||
InitializeMemoryLayout();
|
||||
Init::InitializeKPageBufferSlabHeap(system);
|
||||
InitializePageSlab();
|
||||
InitializeSchedulers();
|
||||
InitializeSuspendThreads();
|
||||
InitializePreemption(kernel);
|
||||
@@ -108,6 +107,19 @@ struct KernelCore::Impl {
|
||||
for (auto* server_port : server_ports_) {
|
||||
server_port->Close();
|
||||
}
|
||||
// Close all open server sessions.
|
||||
std::unordered_set<KServerSession*> server_sessions_;
|
||||
{
|
||||
std::lock_guard lk(server_sessions_lock);
|
||||
server_sessions_ = server_sessions;
|
||||
server_sessions.clear();
|
||||
}
|
||||
for (auto* server_session : server_sessions_) {
|
||||
server_session->Close();
|
||||
}
|
||||
|
||||
// Ensure that the object list container is finalized and properly shutdown.
|
||||
object_list_container.Finalize();
|
||||
|
||||
// Ensures all service threads gracefully shutdown.
|
||||
ClearServiceThreads();
|
||||
@@ -182,15 +194,11 @@ struct KernelCore::Impl {
|
||||
{
|
||||
std::lock_guard lk(registered_objects_lock);
|
||||
if (registered_objects.size()) {
|
||||
LOG_DEBUG(Kernel, "{} kernel objects were dangling on shutdown!",
|
||||
registered_objects.size());
|
||||
LOG_WARNING(Kernel, "{} kernel objects were dangling on shutdown!",
|
||||
registered_objects.size());
|
||||
registered_objects.clear();
|
||||
}
|
||||
}
|
||||
|
||||
// Ensure that the object list container is finalized and properly shutdown.
|
||||
global_object_list_container->Finalize();
|
||||
global_object_list_container.reset();
|
||||
}
|
||||
|
||||
void InitializePhysicalCores() {
|
||||
@@ -283,16 +291,15 @@ struct KernelCore::Impl {
|
||||
|
||||
// Gets the dummy KThread for the caller, allocating a new one if this is the first time
|
||||
KThread* GetHostDummyThread() {
|
||||
auto initialize = [this](KThread* thread) {
|
||||
auto make_thread = [this]() {
|
||||
KThread* thread = KThread::Create(system.Kernel());
|
||||
ASSERT(KThread::InitializeDummyThread(thread).IsSuccess());
|
||||
thread->SetName(fmt::format("DummyThread:{}", GetHostThreadId()));
|
||||
return thread;
|
||||
};
|
||||
|
||||
thread_local auto raw_thread = KThread(system.Kernel());
|
||||
thread_local auto thread = initialize(&raw_thread);
|
||||
|
||||
return thread;
|
||||
thread_local KThread* saved_thread = make_thread();
|
||||
return saved_thread;
|
||||
}
|
||||
|
||||
/// Registers a CPU core thread by allocating a host thread ID for it
|
||||
@@ -653,6 +660,22 @@ struct KernelCore::Impl {
|
||||
time_phys_addr, time_size, "Time:SharedMemory");
|
||||
}
|
||||
|
||||
void InitializePageSlab() {
|
||||
// Allocate slab heaps
|
||||
user_slab_heap_pages =
|
||||
std::make_unique<KSlabHeap<Page>>(KSlabHeap<Page>::AllocationType::Guest);
|
||||
|
||||
// TODO(ameerj): This should be derived, not hardcoded within the kernel
|
||||
constexpr u64 user_slab_heap_size{0x3de000};
|
||||
// Reserve slab heaps
|
||||
ASSERT(
|
||||
system_resource_limit->Reserve(LimitableResource::PhysicalMemory, user_slab_heap_size));
|
||||
// Initialize slab heap
|
||||
user_slab_heap_pages->Initialize(
|
||||
system.DeviceMemory().GetPointer(Core::DramMemoryMap::SlabHeapBase),
|
||||
user_slab_heap_size);
|
||||
}
|
||||
|
||||
KClientPort* CreateNamedServicePort(std::string name) {
|
||||
auto search = service_interface_factory.find(name);
|
||||
if (search == service_interface_factory.end()) {
|
||||
@@ -690,6 +713,7 @@ struct KernelCore::Impl {
|
||||
}
|
||||
|
||||
std::mutex server_ports_lock;
|
||||
std::mutex server_sessions_lock;
|
||||
std::mutex registered_objects_lock;
|
||||
std::mutex registered_in_use_objects_lock;
|
||||
|
||||
@@ -713,13 +737,14 @@ struct KernelCore::Impl {
|
||||
// stores all the objects in place.
|
||||
std::unique_ptr<KHandleTable> global_handle_table;
|
||||
|
||||
std::unique_ptr<KAutoObjectWithListContainer> global_object_list_container;
|
||||
KAutoObjectWithListContainer object_list_container;
|
||||
|
||||
/// Map of named ports managed by the kernel, which can be retrieved using
|
||||
/// the ConnectToPort SVC.
|
||||
std::unordered_map<std::string, ServiceInterfaceFactory> service_interface_factory;
|
||||
NamedPortTable named_ports;
|
||||
std::unordered_set<KServerPort*> server_ports;
|
||||
std::unordered_set<KServerSession*> server_sessions;
|
||||
std::unordered_set<KAutoObject*> registered_objects;
|
||||
std::unordered_set<KAutoObject*> registered_in_use_objects;
|
||||
|
||||
@@ -731,6 +756,7 @@ struct KernelCore::Impl {
|
||||
|
||||
// Kernel memory management
|
||||
std::unique_ptr<KMemoryManager> memory_manager;
|
||||
std::unique_ptr<KSlabHeap<Page>> user_slab_heap_pages;
|
||||
|
||||
// Shared memory for services
|
||||
Kernel::KSharedMemory* hid_shared_mem{};
|
||||
@@ -889,11 +915,11 @@ const Core::ExclusiveMonitor& KernelCore::GetExclusiveMonitor() const {
|
||||
}
|
||||
|
||||
KAutoObjectWithListContainer& KernelCore::ObjectListContainer() {
|
||||
return *impl->global_object_list_container;
|
||||
return impl->object_list_container;
|
||||
}
|
||||
|
||||
const KAutoObjectWithListContainer& KernelCore::ObjectListContainer() const {
|
||||
return *impl->global_object_list_container;
|
||||
return impl->object_list_container;
|
||||
}
|
||||
|
||||
void KernelCore::InvalidateAllInstructionCaches() {
|
||||
@@ -923,6 +949,16 @@ KClientPort* KernelCore::CreateNamedServicePort(std::string name) {
|
||||
return impl->CreateNamedServicePort(std::move(name));
|
||||
}
|
||||
|
||||
void KernelCore::RegisterServerSession(KServerSession* server_session) {
|
||||
std::lock_guard lk(impl->server_sessions_lock);
|
||||
impl->server_sessions.insert(server_session);
|
||||
}
|
||||
|
||||
void KernelCore::UnregisterServerSession(KServerSession* server_session) {
|
||||
std::lock_guard lk(impl->server_sessions_lock);
|
||||
impl->server_sessions.erase(server_session);
|
||||
}
|
||||
|
||||
void KernelCore::RegisterKernelObject(KAutoObject* object) {
|
||||
std::lock_guard lk(impl->registered_objects_lock);
|
||||
impl->registered_objects.insert(object);
|
||||
@@ -995,6 +1031,14 @@ const KMemoryManager& KernelCore::MemoryManager() const {
|
||||
return *impl->memory_manager;
|
||||
}
|
||||
|
||||
KSlabHeap<Page>& KernelCore::GetUserSlabHeapPages() {
|
||||
return *impl->user_slab_heap_pages;
|
||||
}
|
||||
|
||||
const KSlabHeap<Page>& KernelCore::GetUserSlabHeapPages() const {
|
||||
return *impl->user_slab_heap_pages;
|
||||
}
|
||||
|
||||
Kernel::KSharedMemory& KernelCore::GetHidSharedMem() {
|
||||
return *impl->hid_shared_mem;
|
||||
}
|
||||
|
||||
@@ -43,7 +43,6 @@ class KHandleTable;
|
||||
class KLinkedListNode;
|
||||
class KMemoryLayout;
|
||||
class KMemoryManager;
|
||||
class KPageBuffer;
|
||||
class KPort;
|
||||
class KProcess;
|
||||
class KResourceLimit;
|
||||
@@ -53,7 +52,6 @@ class KSession;
|
||||
class KSharedMemory;
|
||||
class KSharedMemoryInfo;
|
||||
class KThread;
|
||||
class KThreadLocalPage;
|
||||
class KTransferMemory;
|
||||
class KWorkerTaskManager;
|
||||
class KWritableEvent;
|
||||
@@ -196,6 +194,14 @@ public:
|
||||
/// Opens a port to a service previously registered with RegisterNamedService.
|
||||
KClientPort* CreateNamedServicePort(std::string name);
|
||||
|
||||
/// Registers a server session with the gobal emulation state, to be freed on shutdown. This is
|
||||
/// necessary because we do not emulate processes for HLE sessions.
|
||||
void RegisterServerSession(KServerSession* server_session);
|
||||
|
||||
/// Unregisters a server session previously registered with RegisterServerSession when it was
|
||||
/// destroyed during the current emulation session.
|
||||
void UnregisterServerSession(KServerSession* server_session);
|
||||
|
||||
/// Registers all kernel objects with the global emulation state, this is purely for tracking
|
||||
/// leaks after emulation has been shutdown.
|
||||
void RegisterKernelObject(KAutoObject* object);
|
||||
@@ -233,6 +239,12 @@ public:
|
||||
/// Gets the virtual memory manager for the kernel.
|
||||
const KMemoryManager& MemoryManager() const;
|
||||
|
||||
/// Gets the slab heap allocated for user space pages.
|
||||
KSlabHeap<Page>& GetUserSlabHeapPages();
|
||||
|
||||
/// Gets the slab heap allocated for user space pages.
|
||||
const KSlabHeap<Page>& GetUserSlabHeapPages() const;
|
||||
|
||||
/// Gets the shared memory object for HID services.
|
||||
Kernel::KSharedMemory& GetHidSharedMem();
|
||||
|
||||
@@ -324,10 +336,6 @@ public:
|
||||
return slab_heap_container->writeable_event;
|
||||
} else if constexpr (std::is_same_v<T, KCodeMemory>) {
|
||||
return slab_heap_container->code_memory;
|
||||
} else if constexpr (std::is_same_v<T, KPageBuffer>) {
|
||||
return slab_heap_container->page_buffer;
|
||||
} else if constexpr (std::is_same_v<T, KThreadLocalPage>) {
|
||||
return slab_heap_container->thread_local_page;
|
||||
}
|
||||
}
|
||||
|
||||
@@ -389,8 +397,6 @@ private:
|
||||
KSlabHeap<KTransferMemory> transfer_memory;
|
||||
KSlabHeap<KWritableEvent> writeable_event;
|
||||
KSlabHeap<KCodeMemory> code_memory;
|
||||
KSlabHeap<KPageBuffer> page_buffer;
|
||||
KSlabHeap<KThreadLocalPage> thread_local_page;
|
||||
};
|
||||
|
||||
std::unique_ptr<SlabHeapContainer> slab_heap_container;
|
||||
|
||||
@@ -49,9 +49,12 @@ ServiceThread::Impl::Impl(KernelCore& kernel, std::size_t num_threads, const std
|
||||
return;
|
||||
}
|
||||
|
||||
// Allocate a dummy guest thread for this host thread.
|
||||
kernel.RegisterHostThread();
|
||||
|
||||
// Ensure the dummy thread allocated for this host thread is closed on exit.
|
||||
auto* dummy_thread = kernel.GetCurrentEmuThread();
|
||||
SCOPE_EXIT({ dummy_thread->Close(); });
|
||||
|
||||
while (true) {
|
||||
std::function<void()> task;
|
||||
|
||||
|
||||
@@ -59,7 +59,7 @@ class KAutoObjectWithSlabHeapAndContainer : public Base {
|
||||
|
||||
private:
|
||||
static Derived* Allocate(KernelCore& kernel) {
|
||||
return kernel.SlabHeap<Derived>().Allocate(kernel);
|
||||
return kernel.SlabHeap<Derived>().AllocateWithKernel(kernel);
|
||||
}
|
||||
|
||||
static void Free(KernelCore& kernel, Derived* obj) {
|
||||
|
||||
@@ -96,6 +96,4 @@ constexpr inline s32 IdealCoreNoUpdate = -3;
|
||||
constexpr inline s32 LowestThreadPriority = 63;
|
||||
constexpr inline s32 HighestThreadPriority = 0;
|
||||
|
||||
constexpr inline size_t ThreadLocalRegionSize = 0x200;
|
||||
|
||||
} // namespace Kernel::Svc
|
||||
|
||||
@@ -980,7 +980,7 @@ private:
|
||||
LOG_DEBUG(Service_AM, "called");
|
||||
|
||||
IPC::RequestParser rp{ctx};
|
||||
applet->GetBroker().PushNormalDataFromGame(rp.PopIpcInterface<IStorage>().lock());
|
||||
applet->GetBroker().PushNormalDataFromGame(rp.PopIpcInterface<IStorage>());
|
||||
|
||||
IPC::ResponseBuilder rb{ctx, 2};
|
||||
rb.Push(ResultSuccess);
|
||||
@@ -1007,7 +1007,7 @@ private:
|
||||
LOG_DEBUG(Service_AM, "called");
|
||||
|
||||
IPC::RequestParser rp{ctx};
|
||||
applet->GetBroker().PushInteractiveDataFromGame(rp.PopIpcInterface<IStorage>().lock());
|
||||
applet->GetBroker().PushInteractiveDataFromGame(rp.PopIpcInterface<IStorage>());
|
||||
|
||||
ASSERT(applet->IsInitialized());
|
||||
applet->ExecuteInteractive();
|
||||
|
||||
@@ -1,101 +0,0 @@
|
||||
// Copyright 2022 yuzu Emulator Project
|
||||
// Licensed under GPLv2 or any later version
|
||||
// Refer to the license.txt file included.
|
||||
|
||||
#include "common/assert.h"
|
||||
#include "common/logging/log.h"
|
||||
#include "core/core.h"
|
||||
#include "core/frontend/applets/mii.h"
|
||||
#include "core/hle/service/am/am.h"
|
||||
#include "core/hle/service/am/applets/applet_mii.h"
|
||||
#include "core/reporter.h"
|
||||
|
||||
namespace Service::AM::Applets {
|
||||
|
||||
Mii::Mii(Core::System& system_, LibraryAppletMode applet_mode_,
|
||||
const Core::Frontend::MiiApplet& frontend_)
|
||||
: Applet{system_, applet_mode_}, frontend{frontend_}, system{system_} {}
|
||||
|
||||
Mii::~Mii() = default;
|
||||
|
||||
void Mii::Initialize() {
|
||||
is_complete = false;
|
||||
|
||||
const auto storage = broker.PopNormalDataToApplet();
|
||||
ASSERT(storage != nullptr);
|
||||
|
||||
const auto data = storage->GetData();
|
||||
ASSERT(data.size() == sizeof(MiiAppletInput));
|
||||
|
||||
std::memcpy(&input_data, data.data(), sizeof(MiiAppletInput));
|
||||
}
|
||||
|
||||
bool Mii::TransactionComplete() const {
|
||||
return is_complete;
|
||||
}
|
||||
|
||||
ResultCode Mii::GetStatus() const {
|
||||
return ResultSuccess;
|
||||
}
|
||||
|
||||
void Mii::ExecuteInteractive() {
|
||||
UNREACHABLE_MSG("Unexpected interactive applet data!");
|
||||
}
|
||||
|
||||
void Mii::Execute() {
|
||||
if (is_complete) {
|
||||
return;
|
||||
}
|
||||
|
||||
const auto callback = [this](const Core::Frontend::MiiParameters& parameters) {
|
||||
DisplayCompleted(parameters);
|
||||
};
|
||||
|
||||
switch (input_data.applet_mode) {
|
||||
case MiiAppletMode::ShowMiiEdit: {
|
||||
Service::Mii::MiiManager manager;
|
||||
Core::Frontend::MiiParameters params{
|
||||
.is_editable = false,
|
||||
.mii_data = input_data.mii_char_info.mii_data,
|
||||
};
|
||||
frontend.ShowMii(params, callback);
|
||||
break;
|
||||
}
|
||||
case MiiAppletMode::EditMii: {
|
||||
Service::Mii::MiiManager manager;
|
||||
Core::Frontend::MiiParameters params{
|
||||
.is_editable = true,
|
||||
.mii_data = input_data.mii_char_info.mii_data,
|
||||
};
|
||||
frontend.ShowMii(params, callback);
|
||||
break;
|
||||
}
|
||||
case MiiAppletMode::CreateMii: {
|
||||
Service::Mii::MiiManager manager;
|
||||
Core::Frontend::MiiParameters params{
|
||||
.is_editable = true,
|
||||
.mii_data = manager.BuildDefault(0),
|
||||
};
|
||||
frontend.ShowMii(params, callback);
|
||||
break;
|
||||
}
|
||||
default:
|
||||
UNIMPLEMENTED_MSG("Unimplemented LibAppletMiiEdit mode={:02X}!", input_data.applet_mode);
|
||||
}
|
||||
}
|
||||
|
||||
void Mii::DisplayCompleted(const Core::Frontend::MiiParameters& parameters) {
|
||||
is_complete = true;
|
||||
|
||||
std::vector<u8> reply(sizeof(AppletOutputForCharInfoEditing));
|
||||
output_data = {
|
||||
.result = ResultSuccess,
|
||||
.mii_data = parameters.mii_data,
|
||||
};
|
||||
|
||||
std::memcpy(reply.data(), &output_data, sizeof(AppletOutputForCharInfoEditing));
|
||||
broker.PushNormalDataFromApplet(std::make_shared<IStorage>(system, std::move(reply)));
|
||||
broker.SignalStateChanged();
|
||||
}
|
||||
|
||||
} // namespace Service::AM::Applets
|
||||
@@ -1,90 +0,0 @@
|
||||
// Copyright 2022 yuzu Emulator Project
|
||||
// Licensed under GPLv2 or any later version
|
||||
// Refer to the license.txt file included.
|
||||
|
||||
#pragma once
|
||||
|
||||
#include <array>
|
||||
|
||||
#include "core/hle/result.h"
|
||||
#include "core/hle/service/am/applets/applets.h"
|
||||
#include "core/hle/service/mii/mii_manager.h"
|
||||
|
||||
namespace Core {
|
||||
class System;
|
||||
}
|
||||
|
||||
namespace Service::AM::Applets {
|
||||
|
||||
// This is nn::mii::AppletMode
|
||||
enum class MiiAppletMode : u32 {
|
||||
ShowMiiEdit = 0,
|
||||
AppendMii = 1,
|
||||
AppendMiiImage = 2,
|
||||
UpdateMiiImage = 3,
|
||||
CreateMii = 4,
|
||||
EditMii = 5,
|
||||
};
|
||||
|
||||
struct MiiCharInfo {
|
||||
Service::Mii::MiiInfo mii_data{};
|
||||
INSERT_PADDING_BYTES(0x28);
|
||||
};
|
||||
static_assert(sizeof(MiiCharInfo) == 0x80, "MiiCharInfo has incorrect size.");
|
||||
|
||||
// This is nn::mii::AppletInput
|
||||
struct MiiAppletInput {
|
||||
s32 version{};
|
||||
MiiAppletMode applet_mode{};
|
||||
u32 special_mii_key_code{};
|
||||
union {
|
||||
std::array<Common::UUID, 8> valid_uuid;
|
||||
MiiCharInfo mii_char_info;
|
||||
};
|
||||
Common::UUID used_uuid;
|
||||
INSERT_PADDING_BYTES(0x64);
|
||||
};
|
||||
static_assert(sizeof(MiiAppletInput) == 0x100, "MiiAppletInput has incorrect size.");
|
||||
|
||||
// This is nn::mii::AppletOutput
|
||||
struct MiiAppletOutput {
|
||||
ResultCode result{ResultSuccess};
|
||||
s32 index{};
|
||||
INSERT_PADDING_BYTES(0x18);
|
||||
};
|
||||
static_assert(sizeof(MiiAppletOutput) == 0x20, "MiiAppletOutput has incorrect size.");
|
||||
|
||||
// This is nn::mii::AppletOutputForCharInfoEditing
|
||||
struct AppletOutputForCharInfoEditing {
|
||||
ResultCode result{ResultSuccess};
|
||||
Service::Mii::MiiInfo mii_data{};
|
||||
INSERT_PADDING_BYTES(0x24);
|
||||
};
|
||||
static_assert(sizeof(AppletOutputForCharInfoEditing) == 0x80,
|
||||
"AppletOutputForCharInfoEditing has incorrect size.");
|
||||
|
||||
class Mii final : public Applet {
|
||||
public:
|
||||
explicit Mii(Core::System& system_, LibraryAppletMode applet_mode_,
|
||||
const Core::Frontend::MiiApplet& frontend_);
|
||||
~Mii() override;
|
||||
|
||||
void Initialize() override;
|
||||
|
||||
bool TransactionComplete() const override;
|
||||
ResultCode GetStatus() const override;
|
||||
void ExecuteInteractive() override;
|
||||
void Execute() override;
|
||||
|
||||
void DisplayCompleted(const Core::Frontend::MiiParameters& parameters);
|
||||
|
||||
private:
|
||||
const Core::Frontend::MiiApplet& frontend;
|
||||
MiiAppletInput input_data{};
|
||||
AppletOutputForCharInfoEditing output_data{};
|
||||
|
||||
bool is_complete = false;
|
||||
Core::System& system;
|
||||
};
|
||||
|
||||
} // namespace Service::AM::Applets
|
||||
@@ -9,7 +9,6 @@
|
||||
#include "core/frontend/applets/controller.h"
|
||||
#include "core/frontend/applets/error.h"
|
||||
#include "core/frontend/applets/general_frontend.h"
|
||||
#include "core/frontend/applets/mii.h"
|
||||
#include "core/frontend/applets/profile_select.h"
|
||||
#include "core/frontend/applets/software_keyboard.h"
|
||||
#include "core/frontend/applets/web_browser.h"
|
||||
@@ -20,7 +19,6 @@
|
||||
#include "core/hle/service/am/applets/applet_controller.h"
|
||||
#include "core/hle/service/am/applets/applet_error.h"
|
||||
#include "core/hle/service/am/applets/applet_general_backend.h"
|
||||
#include "core/hle/service/am/applets/applet_mii.h"
|
||||
#include "core/hle/service/am/applets/applet_profile_select.h"
|
||||
#include "core/hle/service/am/applets/applet_software_keyboard.h"
|
||||
#include "core/hle/service/am/applets/applet_web_browser.h"
|
||||
@@ -174,11 +172,10 @@ AppletFrontendSet::AppletFrontendSet() = default;
|
||||
|
||||
AppletFrontendSet::AppletFrontendSet(ControllerApplet controller_applet, ErrorApplet error_applet,
|
||||
ParentalControlsApplet parental_controls_applet,
|
||||
MiiApplet mii_applet, PhotoViewer photo_viewer_,
|
||||
ProfileSelect profile_select_,
|
||||
PhotoViewer photo_viewer_, ProfileSelect profile_select_,
|
||||
SoftwareKeyboard software_keyboard_, WebBrowser web_browser_)
|
||||
: controller{std::move(controller_applet)}, error{std::move(error_applet)},
|
||||
parental_controls{std::move(parental_controls_applet)}, mii{std::move(mii_applet)},
|
||||
parental_controls{std::move(parental_controls_applet)},
|
||||
photo_viewer{std::move(photo_viewer_)}, profile_select{std::move(profile_select_)},
|
||||
software_keyboard{std::move(software_keyboard_)}, web_browser{std::move(web_browser_)} {}
|
||||
|
||||
@@ -209,10 +206,6 @@ void AppletManager::SetAppletFrontendSet(AppletFrontendSet set) {
|
||||
frontend.parental_controls = std::move(set.parental_controls);
|
||||
}
|
||||
|
||||
if (set.mii != nullptr) {
|
||||
frontend.mii = std::move(set.mii);
|
||||
}
|
||||
|
||||
if (set.photo_viewer != nullptr) {
|
||||
frontend.photo_viewer = std::move(set.photo_viewer);
|
||||
}
|
||||
@@ -250,10 +243,6 @@ void AppletManager::SetDefaultAppletsIfMissing() {
|
||||
std::make_unique<Core::Frontend::DefaultParentalControlsApplet>();
|
||||
}
|
||||
|
||||
if (frontend.mii == nullptr) {
|
||||
frontend.mii = std::make_unique<Core::Frontend::DefaultMiiApplet>();
|
||||
}
|
||||
|
||||
if (frontend.photo_viewer == nullptr) {
|
||||
frontend.photo_viewer = std::make_unique<Core::Frontend::DefaultPhotoViewerApplet>();
|
||||
}
|
||||
@@ -288,8 +277,6 @@ std::shared_ptr<Applet> AppletManager::GetApplet(AppletId id, LibraryAppletMode
|
||||
return std::make_shared<ProfileSelect>(system, mode, *frontend.profile_select);
|
||||
case AppletId::SoftwareKeyboard:
|
||||
return std::make_shared<SoftwareKeyboard>(system, mode, *frontend.software_keyboard);
|
||||
case AppletId::MiiEdit:
|
||||
return std::make_shared<Mii>(system, mode, *frontend.mii);
|
||||
case AppletId::Web:
|
||||
case AppletId::Shop:
|
||||
case AppletId::OfflineWeb:
|
||||
|
||||
@@ -21,7 +21,6 @@ class ControllerApplet;
|
||||
class ECommerceApplet;
|
||||
class ErrorApplet;
|
||||
class ParentalControlsApplet;
|
||||
class MiiApplet;
|
||||
class PhotoViewerApplet;
|
||||
class ProfileSelectApplet;
|
||||
class SoftwareKeyboardApplet;
|
||||
@@ -180,7 +179,6 @@ struct AppletFrontendSet {
|
||||
using ControllerApplet = std::unique_ptr<Core::Frontend::ControllerApplet>;
|
||||
using ErrorApplet = std::unique_ptr<Core::Frontend::ErrorApplet>;
|
||||
using ParentalControlsApplet = std::unique_ptr<Core::Frontend::ParentalControlsApplet>;
|
||||
using MiiApplet = std::unique_ptr<Core::Frontend::MiiApplet>;
|
||||
using PhotoViewer = std::unique_ptr<Core::Frontend::PhotoViewerApplet>;
|
||||
using ProfileSelect = std::unique_ptr<Core::Frontend::ProfileSelectApplet>;
|
||||
using SoftwareKeyboard = std::unique_ptr<Core::Frontend::SoftwareKeyboardApplet>;
|
||||
@@ -188,9 +186,9 @@ struct AppletFrontendSet {
|
||||
|
||||
AppletFrontendSet();
|
||||
AppletFrontendSet(ControllerApplet controller_applet, ErrorApplet error_applet,
|
||||
ParentalControlsApplet parental_controls_applet, MiiApplet mii_applet,
|
||||
PhotoViewer photo_viewer_, ProfileSelect profile_select_,
|
||||
SoftwareKeyboard software_keyboard_, WebBrowser web_browser_);
|
||||
ParentalControlsApplet parental_controls_applet, PhotoViewer photo_viewer_,
|
||||
ProfileSelect profile_select_, SoftwareKeyboard software_keyboard_,
|
||||
WebBrowser web_browser_);
|
||||
~AppletFrontendSet();
|
||||
|
||||
AppletFrontendSet(const AppletFrontendSet&) = delete;
|
||||
@@ -202,7 +200,6 @@ struct AppletFrontendSet {
|
||||
ControllerApplet controller;
|
||||
ErrorApplet error;
|
||||
ParentalControlsApplet parental_controls;
|
||||
MiiApplet mii;
|
||||
PhotoViewer photo_viewer;
|
||||
ProfileSelect profile_select;
|
||||
SoftwareKeyboard software_keyboard;
|
||||
|
||||
@@ -17,12 +17,21 @@ namespace Service::KernelHelpers {
|
||||
|
||||
ServiceContext::ServiceContext(Core::System& system_, std::string name_)
|
||||
: kernel(system_.Kernel()) {
|
||||
|
||||
// Create a resource limit for the process.
|
||||
const auto physical_memory_size =
|
||||
kernel.MemoryManager().GetSize(Kernel::KMemoryManager::Pool::System);
|
||||
auto* resource_limit = Kernel::CreateResourceLimitForProcess(system_, physical_memory_size);
|
||||
|
||||
// Create the process.
|
||||
process = Kernel::KProcess::Create(kernel);
|
||||
ASSERT(Kernel::KProcess::Initialize(process, system_, std::move(name_),
|
||||
Kernel::KProcess::ProcessType::KernelInternal,
|
||||
kernel.GetSystemResourceLimit())
|
||||
resource_limit)
|
||||
.IsSuccess());
|
||||
|
||||
// Close reference to our resource limit, as the process opens one.
|
||||
resource_limit->Close();
|
||||
}
|
||||
|
||||
ServiceContext::~ServiceContext() {
|
||||
|
||||
@@ -288,7 +288,7 @@ public:
|
||||
}
|
||||
|
||||
bool ValidateRegionForMap(Kernel::KPageTable& page_table, VAddr start, std::size_t size) const {
|
||||
const std::size_t padding_size{page_table.GetNumGuardPages() * Kernel::PageSize};
|
||||
constexpr std::size_t padding_size{4 * Kernel::PageSize};
|
||||
const auto start_info{page_table.QueryInfo(start - 1)};
|
||||
|
||||
if (start_info.state != Kernel::KMemoryState::Free) {
|
||||
@@ -308,69 +308,31 @@ public:
|
||||
return (start + size + padding_size) <= (end_info.GetAddress() + end_info.GetSize());
|
||||
}
|
||||
|
||||
ResultCode GetAvailableMapRegion(Kernel::KPageTable& page_table, u64 size, VAddr& out_addr) {
|
||||
size = Common::AlignUp(size, Kernel::PageSize);
|
||||
size += page_table.GetNumGuardPages() * Kernel::PageSize * 4;
|
||||
|
||||
const auto is_region_available = [&](VAddr addr) {
|
||||
const auto end_addr = addr + size;
|
||||
while (addr < end_addr) {
|
||||
if (system.Memory().IsValidVirtualAddress(addr)) {
|
||||
return false;
|
||||
}
|
||||
|
||||
if (!page_table.IsInsideAddressSpace(out_addr, size)) {
|
||||
return false;
|
||||
}
|
||||
|
||||
if (page_table.IsInsideHeapRegion(out_addr, size)) {
|
||||
return false;
|
||||
}
|
||||
|
||||
if (page_table.IsInsideAliasRegion(out_addr, size)) {
|
||||
return false;
|
||||
}
|
||||
|
||||
addr += Kernel::PageSize;
|
||||
}
|
||||
return true;
|
||||
};
|
||||
|
||||
bool succeeded = false;
|
||||
const auto map_region_end =
|
||||
page_table.GetAliasCodeRegionStart() + page_table.GetAliasCodeRegionSize();
|
||||
while (current_map_addr < map_region_end) {
|
||||
if (is_region_available(current_map_addr)) {
|
||||
succeeded = true;
|
||||
break;
|
||||
}
|
||||
current_map_addr += 0x100000;
|
||||
}
|
||||
|
||||
if (!succeeded) {
|
||||
UNREACHABLE_MSG("Out of address space!");
|
||||
return Kernel::ResultOutOfMemory;
|
||||
}
|
||||
|
||||
out_addr = current_map_addr;
|
||||
current_map_addr += size;
|
||||
|
||||
return ResultSuccess;
|
||||
VAddr GetRandomMapRegion(const Kernel::KPageTable& page_table, std::size_t size) const {
|
||||
VAddr addr{};
|
||||
const std::size_t end_pages{(page_table.GetAliasCodeRegionSize() - size) >>
|
||||
Kernel::PageBits};
|
||||
do {
|
||||
addr = page_table.GetAliasCodeRegionStart() +
|
||||
(Kernel::KSystemControl::GenerateRandomRange(0, end_pages) << Kernel::PageBits);
|
||||
} while (!page_table.IsInsideAddressSpace(addr, size) ||
|
||||
page_table.IsInsideHeapRegion(addr, size) ||
|
||||
page_table.IsInsideAliasRegion(addr, size));
|
||||
return addr;
|
||||
}
|
||||
|
||||
ResultVal<VAddr> MapProcessCodeMemory(Kernel::KProcess* process, VAddr base_addr, u64 size) {
|
||||
auto& page_table{process->PageTable()};
|
||||
VAddr addr{};
|
||||
|
||||
ResultVal<VAddr> MapProcessCodeMemory(Kernel::KProcess* process, VAddr baseAddress,
|
||||
u64 size) const {
|
||||
for (std::size_t retry = 0; retry < MAXIMUM_MAP_RETRIES; retry++) {
|
||||
R_TRY(GetAvailableMapRegion(page_table, size, addr));
|
||||
auto& page_table{process->PageTable()};
|
||||
const VAddr addr{GetRandomMapRegion(page_table, size)};
|
||||
const ResultCode result{page_table.MapCodeMemory(addr, baseAddress, size)};
|
||||
|
||||
const ResultCode result{page_table.MapCodeMemory(addr, base_addr, size)};
|
||||
if (result == Kernel::ResultInvalidCurrentMemory) {
|
||||
continue;
|
||||
}
|
||||
|
||||
R_TRY(result);
|
||||
CASCADE_CODE(result);
|
||||
|
||||
if (ValidateRegionForMap(page_table, addr, size)) {
|
||||
return addr;
|
||||
@@ -381,7 +343,7 @@ public:
|
||||
}
|
||||
|
||||
ResultVal<VAddr> MapNro(Kernel::KProcess* process, VAddr nro_addr, std::size_t nro_size,
|
||||
VAddr bss_addr, std::size_t bss_size, std::size_t size) {
|
||||
VAddr bss_addr, std::size_t bss_size, std::size_t size) const {
|
||||
for (std::size_t retry = 0; retry < MAXIMUM_MAP_RETRIES; retry++) {
|
||||
auto& page_table{process->PageTable()};
|
||||
VAddr addr{};
|
||||
@@ -635,7 +597,6 @@ public:
|
||||
LOG_WARNING(Service_LDR, "(STUBBED) called");
|
||||
|
||||
initialized = true;
|
||||
current_map_addr = system.CurrentProcess()->PageTable().GetAliasCodeRegionStart();
|
||||
|
||||
IPC::ResponseBuilder rb{ctx, 2};
|
||||
rb.Push(ResultSuccess);
|
||||
@@ -646,7 +607,6 @@ private:
|
||||
|
||||
std::map<VAddr, NROInfo> nro;
|
||||
std::map<VAddr, std::vector<SHA256Hash>> nrr;
|
||||
VAddr current_map_addr{};
|
||||
|
||||
bool IsValidNROHash(const SHA256Hash& hash) const {
|
||||
return std::any_of(nrr.begin(), nrr.end(), [&hash](const auto& p) {
|
||||
|
||||
@@ -81,8 +81,6 @@ ResultVal<Kernel::KPort*> ServiceManager::GetServicePort(const std::string& name
|
||||
}
|
||||
|
||||
auto* port = Kernel::KPort::Create(kernel);
|
||||
SCOPE_EXIT({ port->Close(); });
|
||||
|
||||
port->Initialize(ServerSessionCountMax, false, name);
|
||||
auto handler = it->second;
|
||||
port->GetServerPort().SetSessionHandler(std::move(handler));
|
||||
|
||||
@@ -22,7 +22,7 @@ constexpr u32 NUM_TEXTURE_AND_IMAGE_SCALING_WORDS =
|
||||
struct RescalingLayout {
|
||||
alignas(16) std::array<u32, NUM_TEXTURE_SCALING_WORDS> rescaling_textures;
|
||||
alignas(16) std::array<u32, NUM_IMAGE_SCALING_WORDS> rescaling_images;
|
||||
u32 down_factor;
|
||||
alignas(16) u32 down_factor;
|
||||
};
|
||||
constexpr u32 RESCALING_LAYOUT_WORDS_OFFSET = offsetof(RescalingLayout, rescaling_textures);
|
||||
constexpr u32 RESCALING_LAYOUT_DOWN_FACTOR_OFFSET = offsetof(RescalingLayout, down_factor);
|
||||
|
||||
@@ -2,6 +2,30 @@
|
||||
// Licensed under GPLv2 or any later version
|
||||
// Refer to the license.txt file included.
|
||||
|
||||
// This files contains code from Ryujinx
|
||||
// A copy of the code can be obtained from https://github.com/Ryujinx/Ryujinx
|
||||
// The sections using code from Ryujinx are marked with a link to the original version
|
||||
|
||||
// MIT License
|
||||
//
|
||||
// Copyright (c) Ryujinx Team and Contributors
|
||||
//
|
||||
// Permission is hereby granted, free of charge, to any person obtaining a copy of this software and
|
||||
// associated documentation files (the "Software"), to deal in the Software without restriction,
|
||||
// including without limitation the rights to use, copy, modify, merge, publish, distribute,
|
||||
// sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is
|
||||
// furnished to do so, subject to the following conditions:
|
||||
//
|
||||
// The above copyright notice and this permission notice shall be included in all copies or
|
||||
// substantial portions of the Software.
|
||||
//
|
||||
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT
|
||||
// NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
|
||||
// NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM,
|
||||
// DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
|
||||
//
|
||||
|
||||
#include "common/bit_field.h"
|
||||
#include "common/common_types.h"
|
||||
#include "shader_recompiler/frontend/maxwell/translate/impl/common_funcs.h"
|
||||
@@ -13,535 +37,87 @@ namespace {
|
||||
// Emulate GPU's LOP3.LUT (three-input logic op with 8-bit truth table)
|
||||
IR::U32 ApplyLUT(IR::IREmitter& ir, const IR::U32& a, const IR::U32& b, const IR::U32& c,
|
||||
u64 ttbl) {
|
||||
switch (ttbl) {
|
||||
// generated code, do not edit manually
|
||||
case 0:
|
||||
return ir.Imm32(0);
|
||||
case 1:
|
||||
return ir.BitwiseNot(ir.BitwiseOr(a, ir.BitwiseOr(b, c)));
|
||||
case 2:
|
||||
return ir.BitwiseAnd(c, ir.BitwiseNot(ir.BitwiseOr(a, b)));
|
||||
case 3:
|
||||
return ir.BitwiseNot(ir.BitwiseOr(a, b));
|
||||
case 4:
|
||||
return ir.BitwiseAnd(b, ir.BitwiseNot(ir.BitwiseOr(a, c)));
|
||||
case 5:
|
||||
return ir.BitwiseNot(ir.BitwiseOr(a, c));
|
||||
case 6:
|
||||
return ir.BitwiseAnd(ir.BitwiseNot(a), ir.BitwiseXor(b, c));
|
||||
case 7:
|
||||
return ir.BitwiseNot(ir.BitwiseOr(a, ir.BitwiseAnd(b, c)));
|
||||
case 8:
|
||||
return ir.BitwiseAnd(ir.BitwiseAnd(b, c), ir.BitwiseNot(a));
|
||||
case 9:
|
||||
return ir.BitwiseNot(ir.BitwiseOr(a, ir.BitwiseXor(b, c)));
|
||||
case 10:
|
||||
return ir.BitwiseAnd(c, ir.BitwiseNot(a));
|
||||
case 11:
|
||||
return ir.BitwiseAnd(ir.BitwiseNot(a), ir.BitwiseOr(c, ir.BitwiseNot(b)));
|
||||
case 12:
|
||||
return ir.BitwiseAnd(b, ir.BitwiseNot(a));
|
||||
case 13:
|
||||
return ir.BitwiseAnd(ir.BitwiseNot(a), ir.BitwiseOr(b, ir.BitwiseNot(c)));
|
||||
case 14:
|
||||
return ir.BitwiseAnd(ir.BitwiseNot(a), ir.BitwiseOr(b, c));
|
||||
case 15:
|
||||
return ir.BitwiseNot(a);
|
||||
case 16:
|
||||
return ir.BitwiseAnd(a, ir.BitwiseNot(ir.BitwiseOr(b, c)));
|
||||
case 17:
|
||||
return ir.BitwiseNot(ir.BitwiseOr(b, c));
|
||||
case 18:
|
||||
return ir.BitwiseAnd(ir.BitwiseNot(b), ir.BitwiseXor(a, c));
|
||||
case 19:
|
||||
return ir.BitwiseNot(ir.BitwiseOr(b, ir.BitwiseAnd(a, c)));
|
||||
case 20:
|
||||
return ir.BitwiseAnd(ir.BitwiseNot(c), ir.BitwiseXor(a, b));
|
||||
case 21:
|
||||
return ir.BitwiseNot(ir.BitwiseOr(c, ir.BitwiseAnd(a, b)));
|
||||
case 22:
|
||||
return ir.BitwiseXor(ir.BitwiseOr(a, b), ir.BitwiseOr(c, ir.BitwiseAnd(a, b)));
|
||||
case 23:
|
||||
return ir.BitwiseXor(ir.BitwiseAnd(ir.BitwiseXor(a, b), ir.BitwiseXor(a, c)),
|
||||
ir.BitwiseNot(a));
|
||||
case 24:
|
||||
return ir.BitwiseAnd(ir.BitwiseXor(a, b), ir.BitwiseXor(a, c));
|
||||
case 25:
|
||||
return ir.BitwiseNot(ir.BitwiseOr(ir.BitwiseAnd(a, b), ir.BitwiseXor(b, c)));
|
||||
case 26:
|
||||
return ir.BitwiseAnd(ir.BitwiseOr(c, ir.BitwiseNot(b)), ir.BitwiseXor(a, c));
|
||||
case 27:
|
||||
return ir.BitwiseXor(ir.BitwiseOr(a, ir.BitwiseNot(c)), ir.BitwiseOr(b, c));
|
||||
case 28:
|
||||
return ir.BitwiseAnd(ir.BitwiseOr(b, ir.BitwiseNot(c)), ir.BitwiseXor(a, b));
|
||||
case 29:
|
||||
return ir.BitwiseXor(ir.BitwiseOr(a, ir.BitwiseNot(b)), ir.BitwiseOr(b, c));
|
||||
case 30:
|
||||
return ir.BitwiseXor(a, ir.BitwiseOr(b, c));
|
||||
case 31:
|
||||
return ir.BitwiseNot(ir.BitwiseAnd(a, ir.BitwiseOr(b, c)));
|
||||
case 32:
|
||||
return ir.BitwiseAnd(ir.BitwiseAnd(a, c), ir.BitwiseNot(b));
|
||||
case 33:
|
||||
return ir.BitwiseNot(ir.BitwiseOr(b, ir.BitwiseXor(a, c)));
|
||||
case 34:
|
||||
return ir.BitwiseAnd(c, ir.BitwiseNot(b));
|
||||
case 35:
|
||||
return ir.BitwiseAnd(ir.BitwiseNot(b), ir.BitwiseOr(c, ir.BitwiseNot(a)));
|
||||
case 36:
|
||||
return ir.BitwiseAnd(ir.BitwiseXor(a, b), ir.BitwiseXor(b, c));
|
||||
case 37:
|
||||
return ir.BitwiseNot(ir.BitwiseOr(ir.BitwiseAnd(a, b), ir.BitwiseXor(a, c)));
|
||||
case 38:
|
||||
return ir.BitwiseAnd(ir.BitwiseOr(c, ir.BitwiseNot(a)), ir.BitwiseXor(b, c));
|
||||
case 39:
|
||||
return ir.BitwiseXor(ir.BitwiseOr(a, c), ir.BitwiseOr(b, ir.BitwiseNot(c)));
|
||||
case 40:
|
||||
return ir.BitwiseAnd(c, ir.BitwiseXor(a, b));
|
||||
case 41:
|
||||
return ir.BitwiseXor(ir.BitwiseOr(a, b),
|
||||
ir.BitwiseOr(ir.BitwiseAnd(a, b), ir.BitwiseNot(c)));
|
||||
case 42:
|
||||
return ir.BitwiseAnd(c, ir.BitwiseNot(ir.BitwiseAnd(a, b)));
|
||||
case 43:
|
||||
return ir.BitwiseXor(ir.BitwiseOr(a, ir.BitwiseNot(c)),
|
||||
ir.BitwiseOr(b, ir.BitwiseXor(a, c)));
|
||||
case 44:
|
||||
return ir.BitwiseAnd(ir.BitwiseOr(b, c), ir.BitwiseXor(a, b));
|
||||
case 45:
|
||||
return ir.BitwiseXor(a, ir.BitwiseOr(b, ir.BitwiseNot(c)));
|
||||
case 46:
|
||||
return ir.BitwiseXor(ir.BitwiseAnd(a, b), ir.BitwiseOr(b, c));
|
||||
case 47:
|
||||
return ir.BitwiseOr(ir.BitwiseAnd(c, ir.BitwiseNot(b)), ir.BitwiseNot(a));
|
||||
case 48:
|
||||
return ir.BitwiseAnd(a, ir.BitwiseNot(b));
|
||||
case 49:
|
||||
return ir.BitwiseAnd(ir.BitwiseNot(b), ir.BitwiseOr(a, ir.BitwiseNot(c)));
|
||||
case 50:
|
||||
return ir.BitwiseAnd(ir.BitwiseNot(b), ir.BitwiseOr(a, c));
|
||||
case 51:
|
||||
return ir.BitwiseNot(b);
|
||||
case 52:
|
||||
return ir.BitwiseAnd(ir.BitwiseOr(a, ir.BitwiseNot(c)), ir.BitwiseXor(a, b));
|
||||
case 53:
|
||||
return ir.BitwiseXor(ir.BitwiseOr(a, c), ir.BitwiseOr(b, ir.BitwiseNot(a)));
|
||||
case 54:
|
||||
return ir.BitwiseXor(b, ir.BitwiseOr(a, c));
|
||||
case 55:
|
||||
return ir.BitwiseNot(ir.BitwiseAnd(b, ir.BitwiseOr(a, c)));
|
||||
case 56:
|
||||
return ir.BitwiseAnd(ir.BitwiseOr(a, c), ir.BitwiseXor(a, b));
|
||||
case 57:
|
||||
return ir.BitwiseXor(b, ir.BitwiseOr(a, ir.BitwiseNot(c)));
|
||||
case 58:
|
||||
return ir.BitwiseXor(ir.BitwiseAnd(a, b), ir.BitwiseOr(a, c));
|
||||
case 59:
|
||||
return ir.BitwiseOr(ir.BitwiseAnd(c, ir.BitwiseNot(a)), ir.BitwiseNot(b));
|
||||
case 60:
|
||||
return ir.BitwiseXor(a, b);
|
||||
case 61:
|
||||
return ir.BitwiseOr(ir.BitwiseNot(ir.BitwiseOr(a, c)), ir.BitwiseXor(a, b));
|
||||
case 62:
|
||||
return ir.BitwiseOr(ir.BitwiseAnd(c, ir.BitwiseNot(a)), ir.BitwiseXor(a, b));
|
||||
case 63:
|
||||
return ir.BitwiseNot(ir.BitwiseAnd(a, b));
|
||||
case 64:
|
||||
return ir.BitwiseAnd(ir.BitwiseAnd(a, b), ir.BitwiseNot(c));
|
||||
case 65:
|
||||
return ir.BitwiseNot(ir.BitwiseOr(c, ir.BitwiseXor(a, b)));
|
||||
case 66:
|
||||
return ir.BitwiseAnd(ir.BitwiseXor(a, c), ir.BitwiseXor(b, c));
|
||||
case 67:
|
||||
return ir.BitwiseNot(ir.BitwiseOr(ir.BitwiseAnd(a, c), ir.BitwiseXor(a, b)));
|
||||
case 68:
|
||||
return ir.BitwiseAnd(b, ir.BitwiseNot(c));
|
||||
case 69:
|
||||
return ir.BitwiseAnd(ir.BitwiseNot(c), ir.BitwiseOr(b, ir.BitwiseNot(a)));
|
||||
case 70:
|
||||
return ir.BitwiseAnd(ir.BitwiseOr(b, ir.BitwiseNot(a)), ir.BitwiseXor(b, c));
|
||||
case 71:
|
||||
return ir.BitwiseXor(ir.BitwiseOr(a, b), ir.BitwiseOr(c, ir.BitwiseNot(b)));
|
||||
case 72:
|
||||
return ir.BitwiseAnd(b, ir.BitwiseXor(a, c));
|
||||
case 73:
|
||||
return ir.BitwiseXor(ir.BitwiseOr(a, c),
|
||||
ir.BitwiseOr(ir.BitwiseAnd(a, c), ir.BitwiseNot(b)));
|
||||
case 74:
|
||||
return ir.BitwiseAnd(ir.BitwiseOr(b, c), ir.BitwiseXor(a, c));
|
||||
case 75:
|
||||
return ir.BitwiseXor(a, ir.BitwiseOr(c, ir.BitwiseNot(b)));
|
||||
case 76:
|
||||
return ir.BitwiseAnd(b, ir.BitwiseNot(ir.BitwiseAnd(a, c)));
|
||||
case 77:
|
||||
return ir.BitwiseXor(ir.BitwiseOr(a, ir.BitwiseNot(b)),
|
||||
ir.BitwiseOr(c, ir.BitwiseXor(a, b)));
|
||||
case 78:
|
||||
return ir.BitwiseXor(ir.BitwiseAnd(a, c), ir.BitwiseOr(b, c));
|
||||
case 79:
|
||||
return ir.BitwiseOr(ir.BitwiseAnd(b, ir.BitwiseNot(c)), ir.BitwiseNot(a));
|
||||
case 80:
|
||||
return ir.BitwiseAnd(a, ir.BitwiseNot(c));
|
||||
case 81:
|
||||
return ir.BitwiseAnd(ir.BitwiseNot(c), ir.BitwiseOr(a, ir.BitwiseNot(b)));
|
||||
case 82:
|
||||
return ir.BitwiseAnd(ir.BitwiseOr(a, ir.BitwiseNot(b)), ir.BitwiseXor(a, c));
|
||||
case 83:
|
||||
return ir.BitwiseXor(ir.BitwiseOr(a, b), ir.BitwiseOr(c, ir.BitwiseNot(a)));
|
||||
case 84:
|
||||
return ir.BitwiseAnd(ir.BitwiseNot(c), ir.BitwiseOr(a, b));
|
||||
case 85:
|
||||
return ir.BitwiseNot(c);
|
||||
case 86:
|
||||
return ir.BitwiseXor(c, ir.BitwiseOr(a, b));
|
||||
case 87:
|
||||
return ir.BitwiseNot(ir.BitwiseAnd(c, ir.BitwiseOr(a, b)));
|
||||
case 88:
|
||||
return ir.BitwiseAnd(ir.BitwiseOr(a, b), ir.BitwiseXor(a, c));
|
||||
case 89:
|
||||
return ir.BitwiseXor(c, ir.BitwiseOr(a, ir.BitwiseNot(b)));
|
||||
case 90:
|
||||
return ir.BitwiseXor(a, c);
|
||||
case 91:
|
||||
return ir.BitwiseOr(ir.BitwiseNot(ir.BitwiseOr(a, b)), ir.BitwiseXor(a, c));
|
||||
case 92:
|
||||
return ir.BitwiseXor(ir.BitwiseAnd(a, c), ir.BitwiseOr(a, b));
|
||||
case 93:
|
||||
return ir.BitwiseOr(ir.BitwiseAnd(b, ir.BitwiseNot(a)), ir.BitwiseNot(c));
|
||||
case 94:
|
||||
return ir.BitwiseOr(ir.BitwiseAnd(b, ir.BitwiseNot(a)), ir.BitwiseXor(a, c));
|
||||
case 95:
|
||||
return ir.BitwiseNot(ir.BitwiseAnd(a, c));
|
||||
case 96:
|
||||
return ir.BitwiseAnd(a, ir.BitwiseXor(b, c));
|
||||
case 97:
|
||||
return ir.BitwiseXor(ir.BitwiseOr(b, c),
|
||||
ir.BitwiseOr(ir.BitwiseAnd(b, c), ir.BitwiseNot(a)));
|
||||
case 98:
|
||||
return ir.BitwiseAnd(ir.BitwiseOr(a, c), ir.BitwiseXor(b, c));
|
||||
case 99:
|
||||
return ir.BitwiseXor(b, ir.BitwiseOr(c, ir.BitwiseNot(a)));
|
||||
case 100:
|
||||
return ir.BitwiseAnd(ir.BitwiseOr(a, b), ir.BitwiseXor(b, c));
|
||||
case 101:
|
||||
return ir.BitwiseXor(c, ir.BitwiseOr(b, ir.BitwiseNot(a)));
|
||||
case 102:
|
||||
return ir.BitwiseXor(b, c);
|
||||
case 103:
|
||||
return ir.BitwiseOr(ir.BitwiseNot(ir.BitwiseOr(a, b)), ir.BitwiseXor(b, c));
|
||||
case 104:
|
||||
return ir.BitwiseAnd(ir.BitwiseOr(a, b), ir.BitwiseXor(c, ir.BitwiseAnd(a, b)));
|
||||
case 105:
|
||||
return ir.BitwiseXor(ir.BitwiseNot(a), ir.BitwiseXor(b, c));
|
||||
case 106:
|
||||
return ir.BitwiseXor(c, ir.BitwiseAnd(a, b));
|
||||
case 107:
|
||||
return ir.BitwiseXor(ir.BitwiseAnd(c, ir.BitwiseOr(a, b)),
|
||||
ir.BitwiseXor(a, ir.BitwiseNot(b)));
|
||||
case 108:
|
||||
return ir.BitwiseXor(b, ir.BitwiseAnd(a, c));
|
||||
case 109:
|
||||
return ir.BitwiseXor(ir.BitwiseAnd(b, ir.BitwiseOr(a, c)),
|
||||
ir.BitwiseXor(a, ir.BitwiseNot(c)));
|
||||
case 110:
|
||||
return ir.BitwiseOr(ir.BitwiseAnd(b, ir.BitwiseNot(a)), ir.BitwiseXor(b, c));
|
||||
case 111:
|
||||
return ir.BitwiseOr(ir.BitwiseNot(a), ir.BitwiseXor(b, c));
|
||||
case 112:
|
||||
return ir.BitwiseAnd(a, ir.BitwiseNot(ir.BitwiseAnd(b, c)));
|
||||
case 113:
|
||||
return ir.BitwiseXor(ir.BitwiseOr(b, ir.BitwiseNot(a)),
|
||||
ir.BitwiseOr(c, ir.BitwiseXor(a, b)));
|
||||
case 114:
|
||||
return ir.BitwiseXor(ir.BitwiseAnd(b, c), ir.BitwiseOr(a, c));
|
||||
case 115:
|
||||
return ir.BitwiseOr(ir.BitwiseAnd(a, ir.BitwiseNot(c)), ir.BitwiseNot(b));
|
||||
case 116:
|
||||
return ir.BitwiseXor(ir.BitwiseAnd(b, c), ir.BitwiseOr(a, b));
|
||||
case 117:
|
||||
return ir.BitwiseOr(ir.BitwiseAnd(a, ir.BitwiseNot(b)), ir.BitwiseNot(c));
|
||||
case 118:
|
||||
return ir.BitwiseOr(ir.BitwiseAnd(a, ir.BitwiseNot(b)), ir.BitwiseXor(b, c));
|
||||
case 119:
|
||||
return ir.BitwiseNot(ir.BitwiseAnd(b, c));
|
||||
case 120:
|
||||
return ir.BitwiseXor(a, ir.BitwiseAnd(b, c));
|
||||
case 121:
|
||||
return ir.BitwiseXor(ir.BitwiseAnd(a, ir.BitwiseOr(b, c)),
|
||||
ir.BitwiseXor(b, ir.BitwiseNot(c)));
|
||||
case 122:
|
||||
return ir.BitwiseOr(ir.BitwiseAnd(a, ir.BitwiseNot(b)), ir.BitwiseXor(a, c));
|
||||
case 123:
|
||||
return ir.BitwiseOr(ir.BitwiseNot(b), ir.BitwiseXor(a, c));
|
||||
case 124:
|
||||
return ir.BitwiseOr(ir.BitwiseAnd(a, ir.BitwiseNot(c)), ir.BitwiseXor(a, b));
|
||||
case 125:
|
||||
return ir.BitwiseOr(ir.BitwiseNot(c), ir.BitwiseXor(a, b));
|
||||
case 126:
|
||||
return ir.BitwiseOr(ir.BitwiseXor(a, b), ir.BitwiseXor(a, c));
|
||||
case 127:
|
||||
return ir.BitwiseNot(ir.BitwiseAnd(a, ir.BitwiseAnd(b, c)));
|
||||
case 128:
|
||||
return ir.BitwiseAnd(a, ir.BitwiseAnd(b, c));
|
||||
case 129:
|
||||
return ir.BitwiseNot(ir.BitwiseOr(ir.BitwiseXor(a, b), ir.BitwiseXor(a, c)));
|
||||
case 130:
|
||||
return ir.BitwiseAnd(c, ir.BitwiseXor(a, ir.BitwiseNot(b)));
|
||||
case 131:
|
||||
return ir.BitwiseAnd(ir.BitwiseOr(c, ir.BitwiseNot(a)), ir.BitwiseXor(a, ir.BitwiseNot(b)));
|
||||
case 132:
|
||||
return ir.BitwiseAnd(b, ir.BitwiseXor(a, ir.BitwiseNot(c)));
|
||||
case 133:
|
||||
return ir.BitwiseAnd(ir.BitwiseOr(b, ir.BitwiseNot(a)), ir.BitwiseXor(a, ir.BitwiseNot(c)));
|
||||
case 134:
|
||||
return ir.BitwiseAnd(ir.BitwiseOr(b, c), ir.BitwiseXor(a, ir.BitwiseXor(b, c)));
|
||||
case 135:
|
||||
return ir.BitwiseXor(ir.BitwiseAnd(b, c), ir.BitwiseNot(a));
|
||||
case 136:
|
||||
return ir.BitwiseAnd(b, c);
|
||||
case 137:
|
||||
return ir.BitwiseAnd(ir.BitwiseOr(b, ir.BitwiseNot(a)), ir.BitwiseXor(b, ir.BitwiseNot(c)));
|
||||
case 138:
|
||||
return ir.BitwiseAnd(c, ir.BitwiseOr(b, ir.BitwiseNot(a)));
|
||||
case 139:
|
||||
return ir.BitwiseOr(ir.BitwiseAnd(b, c), ir.BitwiseNot(ir.BitwiseOr(a, b)));
|
||||
case 140:
|
||||
return ir.BitwiseAnd(b, ir.BitwiseOr(c, ir.BitwiseNot(a)));
|
||||
case 141:
|
||||
return ir.BitwiseOr(ir.BitwiseAnd(b, c), ir.BitwiseNot(ir.BitwiseOr(a, c)));
|
||||
case 142:
|
||||
return ir.BitwiseXor(a, ir.BitwiseOr(ir.BitwiseXor(a, b), ir.BitwiseXor(a, c)));
|
||||
case 143:
|
||||
return ir.BitwiseOr(ir.BitwiseAnd(b, c), ir.BitwiseNot(a));
|
||||
case 144:
|
||||
return ir.BitwiseAnd(a, ir.BitwiseXor(b, ir.BitwiseNot(c)));
|
||||
case 145:
|
||||
return ir.BitwiseAnd(ir.BitwiseOr(a, ir.BitwiseNot(b)), ir.BitwiseXor(b, ir.BitwiseNot(c)));
|
||||
case 146:
|
||||
return ir.BitwiseAnd(ir.BitwiseOr(a, c), ir.BitwiseXor(a, ir.BitwiseXor(b, c)));
|
||||
case 147:
|
||||
return ir.BitwiseXor(ir.BitwiseAnd(a, c), ir.BitwiseNot(b));
|
||||
case 148:
|
||||
return ir.BitwiseAnd(ir.BitwiseOr(a, b), ir.BitwiseXor(a, ir.BitwiseXor(b, c)));
|
||||
case 149:
|
||||
return ir.BitwiseXor(ir.BitwiseAnd(a, b), ir.BitwiseNot(c));
|
||||
case 150:
|
||||
return ir.BitwiseXor(a, ir.BitwiseXor(b, c));
|
||||
case 151:
|
||||
return ir.BitwiseOr(ir.BitwiseNot(ir.BitwiseOr(a, b)),
|
||||
ir.BitwiseXor(a, ir.BitwiseXor(b, c)));
|
||||
case 152:
|
||||
return ir.BitwiseAnd(ir.BitwiseOr(a, b), ir.BitwiseXor(b, ir.BitwiseNot(c)));
|
||||
case 153:
|
||||
return ir.BitwiseXor(b, ir.BitwiseNot(c));
|
||||
case 154:
|
||||
return ir.BitwiseXor(c, ir.BitwiseAnd(a, ir.BitwiseNot(b)));
|
||||
case 155:
|
||||
return ir.BitwiseNot(ir.BitwiseAnd(ir.BitwiseOr(a, b), ir.BitwiseXor(b, c)));
|
||||
case 156:
|
||||
return ir.BitwiseXor(b, ir.BitwiseAnd(a, ir.BitwiseNot(c)));
|
||||
case 157:
|
||||
return ir.BitwiseNot(ir.BitwiseAnd(ir.BitwiseOr(a, c), ir.BitwiseXor(b, c)));
|
||||
case 158:
|
||||
return ir.BitwiseOr(ir.BitwiseAnd(b, c), ir.BitwiseXor(a, ir.BitwiseOr(b, c)));
|
||||
case 159:
|
||||
return ir.BitwiseNot(ir.BitwiseAnd(a, ir.BitwiseXor(b, c)));
|
||||
case 160:
|
||||
return ir.BitwiseAnd(a, c);
|
||||
case 161:
|
||||
return ir.BitwiseAnd(ir.BitwiseOr(a, ir.BitwiseNot(b)), ir.BitwiseXor(a, ir.BitwiseNot(c)));
|
||||
case 162:
|
||||
return ir.BitwiseAnd(c, ir.BitwiseOr(a, ir.BitwiseNot(b)));
|
||||
case 163:
|
||||
return ir.BitwiseOr(ir.BitwiseAnd(a, c), ir.BitwiseNot(ir.BitwiseOr(a, b)));
|
||||
case 164:
|
||||
return ir.BitwiseAnd(ir.BitwiseOr(a, b), ir.BitwiseXor(a, ir.BitwiseNot(c)));
|
||||
case 165:
|
||||
return ir.BitwiseXor(a, ir.BitwiseNot(c));
|
||||
case 166:
|
||||
return ir.BitwiseXor(c, ir.BitwiseAnd(b, ir.BitwiseNot(a)));
|
||||
case 167:
|
||||
return ir.BitwiseNot(ir.BitwiseAnd(ir.BitwiseOr(a, b), ir.BitwiseXor(a, c)));
|
||||
case 168:
|
||||
return ir.BitwiseAnd(c, ir.BitwiseOr(a, b));
|
||||
case 169:
|
||||
return ir.BitwiseXor(ir.BitwiseNot(c), ir.BitwiseOr(a, b));
|
||||
case 170:
|
||||
return c;
|
||||
case 171:
|
||||
return ir.BitwiseOr(c, ir.BitwiseNot(ir.BitwiseOr(a, b)));
|
||||
case 172:
|
||||
return ir.BitwiseAnd(ir.BitwiseOr(a, b), ir.BitwiseOr(c, ir.BitwiseNot(a)));
|
||||
case 173:
|
||||
return ir.BitwiseOr(ir.BitwiseAnd(b, c), ir.BitwiseXor(a, ir.BitwiseNot(c)));
|
||||
case 174:
|
||||
return ir.BitwiseOr(c, ir.BitwiseAnd(b, ir.BitwiseNot(a)));
|
||||
case 175:
|
||||
return ir.BitwiseOr(c, ir.BitwiseNot(a));
|
||||
case 176:
|
||||
return ir.BitwiseAnd(a, ir.BitwiseOr(c, ir.BitwiseNot(b)));
|
||||
case 177:
|
||||
return ir.BitwiseOr(ir.BitwiseAnd(a, c), ir.BitwiseNot(ir.BitwiseOr(b, c)));
|
||||
case 178:
|
||||
return ir.BitwiseXor(b, ir.BitwiseOr(ir.BitwiseXor(a, b), ir.BitwiseXor(a, c)));
|
||||
case 179:
|
||||
return ir.BitwiseOr(ir.BitwiseAnd(a, c), ir.BitwiseNot(b));
|
||||
case 180:
|
||||
return ir.BitwiseXor(a, ir.BitwiseAnd(b, ir.BitwiseNot(c)));
|
||||
case 181:
|
||||
return ir.BitwiseNot(ir.BitwiseAnd(ir.BitwiseOr(b, c), ir.BitwiseXor(a, c)));
|
||||
case 182:
|
||||
return ir.BitwiseOr(ir.BitwiseAnd(a, c), ir.BitwiseXor(b, ir.BitwiseOr(a, c)));
|
||||
case 183:
|
||||
return ir.BitwiseNot(ir.BitwiseAnd(b, ir.BitwiseXor(a, c)));
|
||||
case 184:
|
||||
return ir.BitwiseAnd(ir.BitwiseOr(a, b), ir.BitwiseOr(c, ir.BitwiseNot(b)));
|
||||
case 185:
|
||||
return ir.BitwiseOr(ir.BitwiseAnd(a, c), ir.BitwiseXor(b, ir.BitwiseNot(c)));
|
||||
case 186:
|
||||
return ir.BitwiseOr(c, ir.BitwiseAnd(a, ir.BitwiseNot(b)));
|
||||
case 187:
|
||||
return ir.BitwiseOr(c, ir.BitwiseNot(b));
|
||||
case 188:
|
||||
return ir.BitwiseOr(ir.BitwiseAnd(a, c), ir.BitwiseXor(a, b));
|
||||
case 189:
|
||||
return ir.BitwiseOr(ir.BitwiseXor(a, b), ir.BitwiseXor(a, ir.BitwiseNot(c)));
|
||||
case 190:
|
||||
return ir.BitwiseOr(c, ir.BitwiseXor(a, b));
|
||||
case 191:
|
||||
return ir.BitwiseOr(c, ir.BitwiseNot(ir.BitwiseAnd(a, b)));
|
||||
case 192:
|
||||
return ir.BitwiseAnd(a, b);
|
||||
case 193:
|
||||
return ir.BitwiseAnd(ir.BitwiseOr(a, ir.BitwiseNot(c)), ir.BitwiseXor(a, ir.BitwiseNot(b)));
|
||||
case 194:
|
||||
return ir.BitwiseAnd(ir.BitwiseOr(a, c), ir.BitwiseXor(a, ir.BitwiseNot(b)));
|
||||
case 195:
|
||||
return ir.BitwiseXor(a, ir.BitwiseNot(b));
|
||||
case 196:
|
||||
return ir.BitwiseAnd(b, ir.BitwiseOr(a, ir.BitwiseNot(c)));
|
||||
case 197:
|
||||
return ir.BitwiseOr(ir.BitwiseAnd(a, b), ir.BitwiseNot(ir.BitwiseOr(a, c)));
|
||||
case 198:
|
||||
return ir.BitwiseXor(b, ir.BitwiseAnd(c, ir.BitwiseNot(a)));
|
||||
case 199:
|
||||
return ir.BitwiseNot(ir.BitwiseAnd(ir.BitwiseOr(a, c), ir.BitwiseXor(a, b)));
|
||||
case 200:
|
||||
return ir.BitwiseAnd(b, ir.BitwiseOr(a, c));
|
||||
case 201:
|
||||
return ir.BitwiseXor(ir.BitwiseNot(b), ir.BitwiseOr(a, c));
|
||||
case 202:
|
||||
return ir.BitwiseAnd(ir.BitwiseOr(a, c), ir.BitwiseOr(b, ir.BitwiseNot(a)));
|
||||
case 203:
|
||||
return ir.BitwiseOr(ir.BitwiseAnd(b, c), ir.BitwiseXor(a, ir.BitwiseNot(b)));
|
||||
case 204:
|
||||
return b;
|
||||
case 205:
|
||||
return ir.BitwiseOr(b, ir.BitwiseNot(ir.BitwiseOr(a, c)));
|
||||
case 206:
|
||||
return ir.BitwiseOr(b, ir.BitwiseAnd(c, ir.BitwiseNot(a)));
|
||||
case 207:
|
||||
return ir.BitwiseOr(b, ir.BitwiseNot(a));
|
||||
case 208:
|
||||
return ir.BitwiseAnd(a, ir.BitwiseOr(b, ir.BitwiseNot(c)));
|
||||
case 209:
|
||||
return ir.BitwiseOr(ir.BitwiseAnd(a, b), ir.BitwiseNot(ir.BitwiseOr(b, c)));
|
||||
case 210:
|
||||
return ir.BitwiseXor(a, ir.BitwiseAnd(c, ir.BitwiseNot(b)));
|
||||
case 211:
|
||||
return ir.BitwiseNot(ir.BitwiseAnd(ir.BitwiseOr(b, c), ir.BitwiseXor(a, b)));
|
||||
case 212:
|
||||
return ir.BitwiseXor(c, ir.BitwiseOr(ir.BitwiseXor(a, b), ir.BitwiseXor(a, c)));
|
||||
case 213:
|
||||
return ir.BitwiseOr(ir.BitwiseAnd(a, b), ir.BitwiseNot(c));
|
||||
case 214:
|
||||
return ir.BitwiseOr(ir.BitwiseAnd(a, b), ir.BitwiseXor(c, ir.BitwiseOr(a, b)));
|
||||
case 215:
|
||||
return ir.BitwiseNot(ir.BitwiseAnd(c, ir.BitwiseXor(a, b)));
|
||||
case 216:
|
||||
return ir.BitwiseAnd(ir.BitwiseOr(a, c), ir.BitwiseOr(b, ir.BitwiseNot(c)));
|
||||
case 217:
|
||||
return ir.BitwiseOr(ir.BitwiseAnd(a, b), ir.BitwiseXor(b, ir.BitwiseNot(c)));
|
||||
case 218:
|
||||
return ir.BitwiseOr(ir.BitwiseAnd(a, b), ir.BitwiseXor(a, c));
|
||||
case 219:
|
||||
return ir.BitwiseOr(ir.BitwiseXor(a, c), ir.BitwiseXor(a, ir.BitwiseNot(b)));
|
||||
case 220:
|
||||
return ir.BitwiseOr(b, ir.BitwiseAnd(a, ir.BitwiseNot(c)));
|
||||
case 221:
|
||||
return ir.BitwiseOr(b, ir.BitwiseNot(c));
|
||||
case 222:
|
||||
return ir.BitwiseOr(b, ir.BitwiseXor(a, c));
|
||||
case 223:
|
||||
return ir.BitwiseOr(b, ir.BitwiseNot(ir.BitwiseAnd(a, c)));
|
||||
case 224:
|
||||
return ir.BitwiseAnd(a, ir.BitwiseOr(b, c));
|
||||
case 225:
|
||||
return ir.BitwiseXor(ir.BitwiseNot(a), ir.BitwiseOr(b, c));
|
||||
case 226:
|
||||
return ir.BitwiseAnd(ir.BitwiseOr(a, ir.BitwiseNot(b)), ir.BitwiseOr(b, c));
|
||||
case 227:
|
||||
return ir.BitwiseOr(ir.BitwiseAnd(a, c), ir.BitwiseXor(a, ir.BitwiseNot(b)));
|
||||
case 228:
|
||||
return ir.BitwiseAnd(ir.BitwiseOr(a, ir.BitwiseNot(c)), ir.BitwiseOr(b, c));
|
||||
case 229:
|
||||
return ir.BitwiseOr(ir.BitwiseAnd(a, b), ir.BitwiseXor(a, ir.BitwiseNot(c)));
|
||||
case 230:
|
||||
return ir.BitwiseOr(ir.BitwiseAnd(a, b), ir.BitwiseXor(b, c));
|
||||
case 231:
|
||||
return ir.BitwiseOr(ir.BitwiseXor(a, ir.BitwiseNot(b)), ir.BitwiseXor(b, c));
|
||||
case 232:
|
||||
return ir.BitwiseAnd(ir.BitwiseOr(a, b), ir.BitwiseOr(c, ir.BitwiseAnd(a, b)));
|
||||
case 233:
|
||||
return ir.BitwiseOr(ir.BitwiseAnd(a, b),
|
||||
ir.BitwiseXor(ir.BitwiseNot(c), ir.BitwiseOr(a, b)));
|
||||
case 234:
|
||||
return ir.BitwiseOr(c, ir.BitwiseAnd(a, b));
|
||||
case 235:
|
||||
return ir.BitwiseOr(c, ir.BitwiseXor(a, ir.BitwiseNot(b)));
|
||||
case 236:
|
||||
return ir.BitwiseOr(b, ir.BitwiseAnd(a, c));
|
||||
case 237:
|
||||
return ir.BitwiseOr(b, ir.BitwiseXor(a, ir.BitwiseNot(c)));
|
||||
case 238:
|
||||
return ir.BitwiseOr(b, c);
|
||||
case 239:
|
||||
return ir.BitwiseOr(ir.BitwiseNot(a), ir.BitwiseOr(b, c));
|
||||
case 240:
|
||||
return a;
|
||||
case 241:
|
||||
return ir.BitwiseOr(a, ir.BitwiseNot(ir.BitwiseOr(b, c)));
|
||||
case 242:
|
||||
return ir.BitwiseOr(a, ir.BitwiseAnd(c, ir.BitwiseNot(b)));
|
||||
case 243:
|
||||
return ir.BitwiseOr(a, ir.BitwiseNot(b));
|
||||
case 244:
|
||||
return ir.BitwiseOr(a, ir.BitwiseAnd(b, ir.BitwiseNot(c)));
|
||||
case 245:
|
||||
return ir.BitwiseOr(a, ir.BitwiseNot(c));
|
||||
case 246:
|
||||
return ir.BitwiseOr(a, ir.BitwiseXor(b, c));
|
||||
case 247:
|
||||
return ir.BitwiseOr(a, ir.BitwiseNot(ir.BitwiseAnd(b, c)));
|
||||
case 248:
|
||||
return ir.BitwiseOr(a, ir.BitwiseAnd(b, c));
|
||||
case 249:
|
||||
return ir.BitwiseOr(a, ir.BitwiseXor(b, ir.BitwiseNot(c)));
|
||||
case 250:
|
||||
return ir.BitwiseOr(a, c);
|
||||
case 251:
|
||||
return ir.BitwiseOr(ir.BitwiseNot(b), ir.BitwiseOr(a, c));
|
||||
case 252:
|
||||
return ir.BitwiseOr(a, b);
|
||||
case 253:
|
||||
return ir.BitwiseOr(ir.BitwiseNot(c), ir.BitwiseOr(a, b));
|
||||
case 254:
|
||||
return ir.BitwiseOr(a, ir.BitwiseOr(b, c));
|
||||
case 255:
|
||||
return ir.Imm32(0xFFFFFFFF);
|
||||
// end of generated code
|
||||
std::optional<IR::U32> value;
|
||||
|
||||
// Encode into gray code.
|
||||
u32 map = ttbl & 1;
|
||||
map |= ((ttbl >> 1) & 1) << 4;
|
||||
map |= ((ttbl >> 2) & 1) << 1;
|
||||
map |= ((ttbl >> 3) & 1) << 5;
|
||||
map |= ((ttbl >> 4) & 1) << 3;
|
||||
map |= ((ttbl >> 5) & 1) << 7;
|
||||
map |= ((ttbl >> 6) & 1) << 2;
|
||||
map |= ((ttbl >> 7) & 1) << 6;
|
||||
|
||||
u32 visited = 0;
|
||||
for (u32 index = 0; index < 8 && visited != 0xff; index++) {
|
||||
if ((map & (1 << index)) == 0) {
|
||||
continue;
|
||||
}
|
||||
|
||||
const auto RotateLeft4 = [](u32 value, u32 shift) {
|
||||
return ((value << shift) | (value >> (4 - shift))) & 0xf;
|
||||
};
|
||||
|
||||
u32 mask = 0;
|
||||
for (u32 size = 4; size != 0; size >>= 1) {
|
||||
mask = RotateLeft4((1 << size) - 1, index & 3) << (index & 4);
|
||||
|
||||
if ((map & mask) == mask) {
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
// The mask should wrap, if we are on the high row, shift to low etc.
|
||||
const u32 mask2 = (index & 4) != 0 ? mask >> 4 : mask << 4;
|
||||
|
||||
if ((map & mask2) == mask2) {
|
||||
mask |= mask2;
|
||||
}
|
||||
|
||||
if ((mask & visited) == mask) {
|
||||
continue;
|
||||
}
|
||||
|
||||
const bool not_a = (mask & 0x33) != 0;
|
||||
const bool not_b = (mask & 0x99) != 0;
|
||||
const bool not_c = (mask & 0x0f) != 0;
|
||||
|
||||
const bool a_changes = (mask & 0xcc) != 0 && not_a;
|
||||
const bool b_changes = (mask & 0x66) != 0 && not_b;
|
||||
const bool c_changes = (mask & 0xf0) != 0 && not_c;
|
||||
|
||||
std::optional<IR::U32> local_value;
|
||||
|
||||
const auto And = [&](const IR::U32& source, bool inverted) {
|
||||
IR::U32 result = inverted ? ir.BitwiseNot(source) : source;
|
||||
if (local_value) {
|
||||
local_value = ir.BitwiseAnd(*local_value, result);
|
||||
} else {
|
||||
local_value = result;
|
||||
}
|
||||
};
|
||||
|
||||
if (!a_changes) {
|
||||
And(a, not_a);
|
||||
}
|
||||
|
||||
if (!b_changes) {
|
||||
And(b, not_b);
|
||||
}
|
||||
|
||||
if (!c_changes) {
|
||||
And(c, not_c);
|
||||
}
|
||||
|
||||
if (value) {
|
||||
value = ir.BitwiseOr(*value, *local_value);
|
||||
} else {
|
||||
value = local_value;
|
||||
}
|
||||
visited |= mask;
|
||||
}
|
||||
throw NotImplementedException("LOP3 with out of range ttbl");
|
||||
return *value;
|
||||
}
|
||||
|
||||
IR::U32 LOP3(TranslatorVisitor& v, u64 insn, const IR::U32& op_b, const IR::U32& op_c, u64 lut) {
|
||||
|
||||
@@ -1,92 +0,0 @@
|
||||
# Copyright © 2022 degasus <markus@selfnet.de>
|
||||
# This work is free. You can redistribute it and/or modify it under the
|
||||
# terms of the Do What The Fuck You Want To Public License, Version 2,
|
||||
# as published by Sam Hocevar. See http://www.wtfpl.net/ for more details.
|
||||
|
||||
from itertools import product
|
||||
|
||||
# The primitive instructions
|
||||
OPS = {
|
||||
'ir.BitwiseAnd({}, {})' : (2, 1, lambda a,b: a&b),
|
||||
'ir.BitwiseOr({}, {})' : (2, 1, lambda a,b: a|b),
|
||||
'ir.BitwiseXor({}, {})' : (2, 1, lambda a,b: a^b),
|
||||
'ir.BitwiseNot({})' : (1, 0.1, lambda a: (~a) & 255), # Only tiny cost, as this can often inlined in other instructions
|
||||
}
|
||||
|
||||
# Our database of combination of instructions
|
||||
optimized_calls = {}
|
||||
def cmp(lhs, rhs):
|
||||
if lhs is None: # new entry
|
||||
return True
|
||||
if lhs[3] > rhs[3]: # costs
|
||||
return True
|
||||
if lhs[3] < rhs[3]: # costs
|
||||
return False
|
||||
if len(lhs[0]) > len(rhs[0]): # string len
|
||||
return True
|
||||
if len(lhs[0]) < len(rhs[0]): # string len
|
||||
return False
|
||||
if lhs[0] > rhs[0]: # string sorting
|
||||
return True
|
||||
if lhs[0] < rhs[0]: # string sorting
|
||||
return False
|
||||
assert lhs == rhs, "redundant instruction, bug in brute force"
|
||||
return False
|
||||
def register(imm, instruction, count, latency):
|
||||
# Use the sum of instruction count and latency as costs to evaluate which combination is best
|
||||
costs = count + latency
|
||||
|
||||
old = optimized_calls.get(imm, None)
|
||||
new = (instruction, count, latency, costs)
|
||||
|
||||
# Update if new or better
|
||||
if cmp(old, new):
|
||||
optimized_calls[imm] = new
|
||||
return True
|
||||
|
||||
return False
|
||||
|
||||
# Constants: 0, 1 (for free)
|
||||
register(0, 'ir.Imm32(0)', 0, 0)
|
||||
register(255, 'ir.Imm32(0xFFFFFFFF)', 0, 0)
|
||||
|
||||
# Inputs: a, b, c (for free)
|
||||
ta = 0xF0
|
||||
tb = 0xCC
|
||||
tc = 0xAA
|
||||
inputs = {
|
||||
ta : 'a',
|
||||
tb : 'b',
|
||||
tc : 'c',
|
||||
}
|
||||
for imm, instruction in inputs.items():
|
||||
register(imm, instruction, 0, 0)
|
||||
register((~imm) & 255, 'ir.BitwiseNot({})'.format(instruction), 0.099, 0.099) # slightly cheaper NEG on inputs
|
||||
|
||||
# Try to combine two values from the db with an instruction.
|
||||
# If it is better than the old method, update it.
|
||||
while True:
|
||||
registered = 0
|
||||
calls_copy = optimized_calls.copy()
|
||||
for OP, (argc, cost, f) in OPS.items():
|
||||
for args in product(calls_copy.items(), repeat=argc):
|
||||
# unpack(transponse) the arrays
|
||||
imm = [arg[0] for arg in args]
|
||||
value = [arg[1][0] for arg in args]
|
||||
count = [arg[1][1] for arg in args]
|
||||
latency = [arg[1][2] for arg in args]
|
||||
|
||||
registered += register(
|
||||
f(*imm),
|
||||
OP.format(*value),
|
||||
sum(count) + cost,
|
||||
max(latency) + cost)
|
||||
if registered == 0:
|
||||
# No update at all? So terminate
|
||||
break
|
||||
|
||||
# Hacky output. Please improve me to output valid C++ instead.
|
||||
s = """ case {imm}:
|
||||
return {op};"""
|
||||
for imm in range(256):
|
||||
print(s.format(imm=imm, op=optimized_calls[imm][0]))
|
||||
@@ -212,11 +212,11 @@ IR::Program TranslateProgram(ObjectPool<IR::Inst>& inst_pool, ObjectPool<IR::Blo
|
||||
}
|
||||
Optimization::SsaRewritePass(program);
|
||||
|
||||
Optimization::ConstantPropagationPass(program);
|
||||
|
||||
Optimization::GlobalMemoryToStorageBufferPass(program);
|
||||
Optimization::TexturePass(env, program);
|
||||
|
||||
Optimization::ConstantPropagationPass(program);
|
||||
|
||||
if (Settings::values.resolution_info.active) {
|
||||
Optimization::RescalingPass(program);
|
||||
}
|
||||
|
||||
@@ -334,8 +334,7 @@ std::optional<LowAddrInfo> TrackLowAddress(IR::Inst* inst) {
|
||||
/// Tries to track the storage buffer address used by a global memory instruction
|
||||
std::optional<StorageBufferAddr> Track(const IR::Value& value, const Bias* bias) {
|
||||
const auto pred{[bias](const IR::Inst* inst) -> std::optional<StorageBufferAddr> {
|
||||
if (inst->GetOpcode() != IR::Opcode::GetCbufU32 &&
|
||||
inst->GetOpcode() != IR::Opcode::GetCbufU32x2) {
|
||||
if (inst->GetOpcode() != IR::Opcode::GetCbufU32) {
|
||||
return std::nullopt;
|
||||
}
|
||||
const IR::Value index{inst->Arg(0)};
|
||||
|
||||
@@ -183,31 +183,6 @@ void ScaleIntegerComposite(IR::IREmitter& ir, IR::Inst& inst, const IR::U1& is_s
|
||||
}
|
||||
}
|
||||
|
||||
void ScaleIntegerOffsetComposite(IR::IREmitter& ir, IR::Inst& inst, const IR::U1& is_scaled,
|
||||
size_t index) {
|
||||
const IR::Value composite{inst.Arg(index)};
|
||||
if (composite.IsEmpty()) {
|
||||
return;
|
||||
}
|
||||
const auto info{inst.Flags<IR::TextureInstInfo>()};
|
||||
const IR::U32 x{Scale(ir, is_scaled, IR::U32{ir.CompositeExtract(composite, 0)})};
|
||||
const IR::U32 y{Scale(ir, is_scaled, IR::U32{ir.CompositeExtract(composite, 1)})};
|
||||
switch (info.type) {
|
||||
case TextureType::ColorArray2D:
|
||||
case TextureType::Color2D:
|
||||
inst.SetArg(index, ir.CompositeConstruct(x, y));
|
||||
break;
|
||||
case TextureType::Color1D:
|
||||
case TextureType::ColorArray1D:
|
||||
case TextureType::Color3D:
|
||||
case TextureType::ColorCube:
|
||||
case TextureType::ColorArrayCube:
|
||||
case TextureType::Buffer:
|
||||
// Nothing to patch here
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
void SubScaleCoord(IR::IREmitter& ir, IR::Inst& inst, const IR::U1& is_scaled) {
|
||||
const auto info{inst.Flags<IR::TextureInstInfo>()};
|
||||
const IR::Value coord{inst.Arg(1)};
|
||||
@@ -245,7 +220,7 @@ void SubScaleImageFetch(IR::Block& block, IR::Inst& inst) {
|
||||
const IR::U1 is_scaled{ir.IsTextureScaled(ir.Imm32(info.descriptor_index))};
|
||||
SubScaleCoord(ir, inst, is_scaled);
|
||||
// Scale ImageFetch offset
|
||||
ScaleIntegerOffsetComposite(ir, inst, is_scaled, 2);
|
||||
ScaleIntegerComposite(ir, inst, is_scaled, 2);
|
||||
}
|
||||
|
||||
void SubScaleImageRead(IR::Block& block, IR::Inst& inst) {
|
||||
@@ -267,7 +242,7 @@ void PatchImageFetch(IR::Block& block, IR::Inst& inst) {
|
||||
const IR::U1 is_scaled{ir.IsTextureScaled(ir.Imm32(info.descriptor_index))};
|
||||
ScaleIntegerComposite(ir, inst, is_scaled, 1);
|
||||
// Scale ImageFetch offset
|
||||
ScaleIntegerOffsetComposite(ir, inst, is_scaled, 2);
|
||||
ScaleIntegerComposite(ir, inst, is_scaled, 2);
|
||||
}
|
||||
|
||||
void PatchImageRead(IR::Block& block, IR::Inst& inst) {
|
||||
|
||||
@@ -7,7 +7,6 @@
|
||||
#include "common/assert.h"
|
||||
#include "core/core.h"
|
||||
#include "core/core_timing.h"
|
||||
#include "video_core/dirty_flags.h"
|
||||
#include "video_core/engines/maxwell_3d.h"
|
||||
#include "video_core/gpu.h"
|
||||
#include "video_core/memory_manager.h"
|
||||
@@ -196,7 +195,7 @@ void Maxwell3D::ProcessMethodCall(u32 method, u32 argument, u32 nonshadow_argume
|
||||
case MAXWELL3D_REG_INDEX(const_buffer.cb_data) + 13:
|
||||
case MAXWELL3D_REG_INDEX(const_buffer.cb_data) + 14:
|
||||
case MAXWELL3D_REG_INDEX(const_buffer.cb_data) + 15:
|
||||
return ProcessCBData(argument);
|
||||
return StartCBData(method);
|
||||
case MAXWELL3D_REG_INDEX(cb_bind[0]):
|
||||
return ProcessCBBind(0);
|
||||
case MAXWELL3D_REG_INDEX(cb_bind[1]):
|
||||
@@ -209,14 +208,6 @@ void Maxwell3D::ProcessMethodCall(u32 method, u32 argument, u32 nonshadow_argume
|
||||
return ProcessCBBind(4);
|
||||
case MAXWELL3D_REG_INDEX(draw.vertex_end_gl):
|
||||
return DrawArrays();
|
||||
case MAXWELL3D_REG_INDEX(small_index):
|
||||
regs.index_array.count = regs.small_index.count;
|
||||
regs.index_array.first = regs.small_index.first;
|
||||
dirty.flags[VideoCommon::Dirty::IndexBuffer] = true;
|
||||
return DrawArrays();
|
||||
case MAXWELL3D_REG_INDEX(topology_override):
|
||||
use_topology_override = true;
|
||||
return;
|
||||
case MAXWELL3D_REG_INDEX(clear_buffers):
|
||||
return ProcessClearBuffers();
|
||||
case MAXWELL3D_REG_INDEX(query.query_get):
|
||||
@@ -257,6 +248,14 @@ void Maxwell3D::CallMacroMethod(u32 method, const std::vector<u32>& parameters)
|
||||
}
|
||||
|
||||
void Maxwell3D::CallMethod(u32 method, u32 method_argument, bool is_last_call) {
|
||||
if (method == cb_data_state.current) {
|
||||
regs.reg_array[method] = method_argument;
|
||||
ProcessCBData(method_argument);
|
||||
return;
|
||||
} else if (cb_data_state.current != null_cb_data) {
|
||||
FinishCBData();
|
||||
}
|
||||
|
||||
// It is an error to write to a register other than the current macro's ARG register before it
|
||||
// has finished execution.
|
||||
if (executing_macro != 0) {
|
||||
@@ -303,7 +302,7 @@ void Maxwell3D::CallMultiMethod(u32 method, const u32* base_start, u32 amount,
|
||||
case MAXWELL3D_REG_INDEX(const_buffer.cb_data) + 13:
|
||||
case MAXWELL3D_REG_INDEX(const_buffer.cb_data) + 14:
|
||||
case MAXWELL3D_REG_INDEX(const_buffer.cb_data) + 15:
|
||||
ProcessCBMultiData(base_start, amount);
|
||||
ProcessCBMultiData(method, base_start, amount);
|
||||
break;
|
||||
default:
|
||||
for (std::size_t i = 0; i < amount; i++) {
|
||||
@@ -361,35 +360,6 @@ void Maxwell3D::CallMethodFromMME(u32 method, u32 method_argument) {
|
||||
}
|
||||
}
|
||||
|
||||
void Maxwell3D::ProcessTopologyOverride() {
|
||||
using PrimitiveTopology = Maxwell3D::Regs::PrimitiveTopology;
|
||||
using PrimitiveTopologyOverride = Maxwell3D::Regs::PrimitiveTopologyOverride;
|
||||
|
||||
PrimitiveTopology topology{};
|
||||
|
||||
switch (regs.topology_override) {
|
||||
case PrimitiveTopologyOverride::None:
|
||||
topology = regs.draw.topology;
|
||||
break;
|
||||
case PrimitiveTopologyOverride::Points:
|
||||
topology = PrimitiveTopology::Points;
|
||||
break;
|
||||
case PrimitiveTopologyOverride::Lines:
|
||||
topology = PrimitiveTopology::Lines;
|
||||
break;
|
||||
case PrimitiveTopologyOverride::LineStrip:
|
||||
topology = PrimitiveTopology::LineStrip;
|
||||
break;
|
||||
default:
|
||||
topology = static_cast<PrimitiveTopology>(regs.topology_override);
|
||||
break;
|
||||
}
|
||||
|
||||
if (use_topology_override) {
|
||||
regs.draw.topology.Assign(topology);
|
||||
}
|
||||
}
|
||||
|
||||
void Maxwell3D::FlushMMEInlineDraw() {
|
||||
LOG_TRACE(HW_GPU, "called, topology={}, count={}", regs.draw.topology.Value(),
|
||||
regs.vertex_buffer.count);
|
||||
@@ -400,8 +370,6 @@ void Maxwell3D::FlushMMEInlineDraw() {
|
||||
ASSERT_MSG(!regs.draw.instance_next || !regs.draw.instance_cont,
|
||||
"Illegal combination of instancing parameters");
|
||||
|
||||
ProcessTopologyOverride();
|
||||
|
||||
const bool is_indexed = mme_draw.current_mode == MMEDrawMode::Indexed;
|
||||
if (ShouldExecute()) {
|
||||
rasterizer->Draw(is_indexed, true);
|
||||
@@ -561,8 +529,6 @@ void Maxwell3D::DrawArrays() {
|
||||
ASSERT_MSG(!regs.draw.instance_next || !regs.draw.instance_cont,
|
||||
"Illegal combination of instancing parameters");
|
||||
|
||||
ProcessTopologyOverride();
|
||||
|
||||
if (regs.draw.instance_next) {
|
||||
// Increment the current instance *before* drawing.
|
||||
state.current_instance += 1;
|
||||
@@ -621,7 +587,46 @@ void Maxwell3D::ProcessCBBind(size_t stage_index) {
|
||||
rasterizer->BindGraphicsUniformBuffer(stage_index, bind_data.index, gpu_addr, size);
|
||||
}
|
||||
|
||||
void Maxwell3D::ProcessCBMultiData(const u32* start_base, u32 amount) {
|
||||
void Maxwell3D::ProcessCBData(u32 value) {
|
||||
const u32 id = cb_data_state.id;
|
||||
cb_data_state.buffer[id][cb_data_state.counter] = value;
|
||||
// Increment the current buffer position.
|
||||
regs.const_buffer.cb_pos = regs.const_buffer.cb_pos + 4;
|
||||
cb_data_state.counter++;
|
||||
}
|
||||
|
||||
void Maxwell3D::StartCBData(u32 method) {
|
||||
constexpr u32 first_cb_data = MAXWELL3D_REG_INDEX(const_buffer.cb_data);
|
||||
cb_data_state.start_pos = regs.const_buffer.cb_pos;
|
||||
cb_data_state.id = method - first_cb_data;
|
||||
cb_data_state.current = method;
|
||||
cb_data_state.counter = 0;
|
||||
ProcessCBData(regs.const_buffer.cb_data[cb_data_state.id]);
|
||||
}
|
||||
|
||||
void Maxwell3D::ProcessCBMultiData(u32 method, const u32* start_base, u32 amount) {
|
||||
if (cb_data_state.current != method) {
|
||||
if (cb_data_state.current != null_cb_data) {
|
||||
FinishCBData();
|
||||
}
|
||||
constexpr u32 first_cb_data = MAXWELL3D_REG_INDEX(const_buffer.cb_data);
|
||||
cb_data_state.start_pos = regs.const_buffer.cb_pos;
|
||||
cb_data_state.id = method - first_cb_data;
|
||||
cb_data_state.current = method;
|
||||
cb_data_state.counter = 0;
|
||||
}
|
||||
const std::size_t id = cb_data_state.id;
|
||||
const std::size_t size = amount;
|
||||
std::size_t i = 0;
|
||||
for (; i < size; i++) {
|
||||
cb_data_state.buffer[id][cb_data_state.counter] = start_base[i];
|
||||
cb_data_state.counter++;
|
||||
}
|
||||
// Increment the current buffer position.
|
||||
regs.const_buffer.cb_pos = regs.const_buffer.cb_pos + 4 * amount;
|
||||
}
|
||||
|
||||
void Maxwell3D::FinishCBData() {
|
||||
// Write the input value to the current const buffer at the current position.
|
||||
const GPUVAddr buffer_address = regs.const_buffer.BufferAddress();
|
||||
ASSERT(buffer_address != 0);
|
||||
@@ -629,16 +634,14 @@ void Maxwell3D::ProcessCBMultiData(const u32* start_base, u32 amount) {
|
||||
// Don't allow writing past the end of the buffer.
|
||||
ASSERT(regs.const_buffer.cb_pos <= regs.const_buffer.cb_size);
|
||||
|
||||
const GPUVAddr address{buffer_address + regs.const_buffer.cb_pos};
|
||||
const size_t copy_size = amount * sizeof(u32);
|
||||
memory_manager.WriteBlock(address, start_base, copy_size);
|
||||
const GPUVAddr address{buffer_address + cb_data_state.start_pos};
|
||||
const std::size_t size = regs.const_buffer.cb_pos - cb_data_state.start_pos;
|
||||
|
||||
// Increment the current buffer position.
|
||||
regs.const_buffer.cb_pos += static_cast<u32>(copy_size);
|
||||
}
|
||||
const u32 id = cb_data_state.id;
|
||||
memory_manager.WriteBlock(address, cb_data_state.buffer[id].data(), size);
|
||||
|
||||
void Maxwell3D::ProcessCBData(u32 value) {
|
||||
ProcessCBMultiData(&value, 1);
|
||||
cb_data_state.id = null_cb_data;
|
||||
cb_data_state.current = null_cb_data;
|
||||
}
|
||||
|
||||
Texture::TICEntry Maxwell3D::GetTICEntry(u32 tic_index) const {
|
||||
|
||||
@@ -367,22 +367,6 @@ public:
|
||||
Patches = 0xe,
|
||||
};
|
||||
|
||||
// Constants as from NVC0_3D_UNK1970_D3D
|
||||
// https://gitlab.freedesktop.org/mesa/mesa/-/blob/main/src/gallium/drivers/nouveau/nvc0/nvc0_3d.xml.h#L1598
|
||||
enum class PrimitiveTopologyOverride : u32 {
|
||||
None = 0x0,
|
||||
Points = 0x1,
|
||||
Lines = 0x2,
|
||||
LineStrip = 0x3,
|
||||
Triangles = 0x4,
|
||||
TriangleStrip = 0x5,
|
||||
LinesAdjacency = 0xa,
|
||||
LineStripAdjacency = 0xb,
|
||||
TrianglesAdjacency = 0xc,
|
||||
TriangleStripAdjacency = 0xd,
|
||||
Patches = 0xe,
|
||||
};
|
||||
|
||||
enum class IndexFormat : u32 {
|
||||
UnsignedByte = 0x0,
|
||||
UnsignedShort = 0x1,
|
||||
@@ -1216,12 +1200,7 @@ public:
|
||||
}
|
||||
} index_array;
|
||||
|
||||
union {
|
||||
BitField<0, 16, u32> first;
|
||||
BitField<16, 16, u32> count;
|
||||
} small_index;
|
||||
|
||||
INSERT_PADDING_WORDS_NOINIT(0x6);
|
||||
INSERT_PADDING_WORDS_NOINIT(0x7);
|
||||
|
||||
INSERT_PADDING_WORDS_NOINIT(0x1F);
|
||||
|
||||
@@ -1265,11 +1244,7 @@ public:
|
||||
BitField<11, 1, u32> depth_clamp_disabled;
|
||||
} view_volume_clip_control;
|
||||
|
||||
INSERT_PADDING_WORDS_NOINIT(0xC);
|
||||
|
||||
PrimitiveTopologyOverride topology_override;
|
||||
|
||||
INSERT_PADDING_WORDS_NOINIT(0x12);
|
||||
INSERT_PADDING_WORDS_NOINIT(0x1F);
|
||||
|
||||
u32 depth_bounds_enable;
|
||||
|
||||
@@ -1545,8 +1520,10 @@ private:
|
||||
void ProcessSyncPoint();
|
||||
|
||||
/// Handles a write to the CB_DATA[i] register.
|
||||
void StartCBData(u32 method);
|
||||
void ProcessCBData(u32 value);
|
||||
void ProcessCBMultiData(const u32* start_base, u32 amount);
|
||||
void ProcessCBMultiData(u32 method, const u32* start_base, u32 amount);
|
||||
void FinishCBData();
|
||||
|
||||
/// Handles a write to the CB_BIND register.
|
||||
void ProcessCBBind(size_t stage_index);
|
||||
@@ -1554,9 +1531,6 @@ private:
|
||||
/// Handles a write to the VERTEX_END_GL register, triggering a draw.
|
||||
void DrawArrays();
|
||||
|
||||
/// Handles use of topology overrides (e.g., to avoid using a topology assigned from a macro)
|
||||
void ProcessTopologyOverride();
|
||||
|
||||
// Handles a instance drawcall from MME
|
||||
void StepInstance(MMEDrawMode expected_mode, u32 count);
|
||||
|
||||
@@ -1581,10 +1555,20 @@ private:
|
||||
/// Interpreter for the macro codes uploaded to the GPU.
|
||||
std::unique_ptr<MacroEngine> macro_engine;
|
||||
|
||||
static constexpr u32 null_cb_data = 0xFFFFFFFF;
|
||||
struct CBDataState {
|
||||
static constexpr size_t inline_size = 0x4000;
|
||||
std::array<std::array<u32, inline_size>, 16> buffer;
|
||||
u32 current{null_cb_data};
|
||||
u32 id{null_cb_data};
|
||||
u32 start_pos{};
|
||||
u32 counter{};
|
||||
};
|
||||
CBDataState cb_data_state;
|
||||
|
||||
Upload::State upload_state;
|
||||
|
||||
bool execute_on{true};
|
||||
bool use_topology_override{false};
|
||||
};
|
||||
|
||||
#define ASSERT_REG_POSITION(field_name, position) \
|
||||
@@ -1701,7 +1685,6 @@ ASSERT_REG_POSITION(draw, 0x585);
|
||||
ASSERT_REG_POSITION(primitive_restart, 0x591);
|
||||
ASSERT_REG_POSITION(provoking_vertex_last, 0x5A1);
|
||||
ASSERT_REG_POSITION(index_array, 0x5F2);
|
||||
ASSERT_REG_POSITION(small_index, 0x5F9);
|
||||
ASSERT_REG_POSITION(polygon_offset_clamp, 0x61F);
|
||||
ASSERT_REG_POSITION(instanced_arrays, 0x620);
|
||||
ASSERT_REG_POSITION(vp_point_size, 0x644);
|
||||
@@ -1711,7 +1694,6 @@ ASSERT_REG_POSITION(cull_face, 0x648);
|
||||
ASSERT_REG_POSITION(pixel_center_integer, 0x649);
|
||||
ASSERT_REG_POSITION(viewport_transform_enabled, 0x64B);
|
||||
ASSERT_REG_POSITION(view_volume_clip_control, 0x64F);
|
||||
ASSERT_REG_POSITION(topology_override, 0x65C);
|
||||
ASSERT_REG_POSITION(depth_bounds_enable, 0x66F);
|
||||
ASSERT_REG_POSITION(logic_op, 0x671);
|
||||
ASSERT_REG_POSITION(clear_buffers, 0x674);
|
||||
|
||||
@@ -53,6 +53,7 @@ void MaxwellDMA::Launch() {
|
||||
|
||||
// TODO(Subv): Perform more research and implement all features of this engine.
|
||||
const LaunchDMA& launch = regs.launch_dma;
|
||||
ASSERT(launch.semaphore_type == LaunchDMA::SemaphoreType::NONE);
|
||||
ASSERT(launch.interrupt_type == LaunchDMA::InterruptType::NONE);
|
||||
ASSERT(launch.data_transfer_type == LaunchDMA::DataTransferType::NON_PIPELINED);
|
||||
ASSERT(regs.dst_params.origin.x == 0);
|
||||
@@ -78,7 +79,6 @@ void MaxwellDMA::Launch() {
|
||||
CopyPitchToBlockLinear();
|
||||
}
|
||||
}
|
||||
ReleaseSemaphore();
|
||||
}
|
||||
|
||||
void MaxwellDMA::CopyPitchToPitch() {
|
||||
@@ -244,22 +244,4 @@ void MaxwellDMA::FastCopyBlockLinearToPitch() {
|
||||
memory_manager.WriteBlock(regs.offset_out, write_buffer.data(), dst_size);
|
||||
}
|
||||
|
||||
void MaxwellDMA::ReleaseSemaphore() {
|
||||
const auto type = regs.launch_dma.semaphore_type;
|
||||
const GPUVAddr address = regs.semaphore.address;
|
||||
switch (type) {
|
||||
case LaunchDMA::SemaphoreType::NONE:
|
||||
break;
|
||||
case LaunchDMA::SemaphoreType::RELEASE_ONE_WORD_SEMAPHORE:
|
||||
memory_manager.Write<u32>(address, regs.semaphore.payload);
|
||||
break;
|
||||
case LaunchDMA::SemaphoreType::RELEASE_FOUR_WORD_SEMAPHORE:
|
||||
memory_manager.Write<u64>(address, static_cast<u64>(regs.semaphore.payload));
|
||||
memory_manager.Write<u64>(address + 8, system.GPU().GetTicks());
|
||||
break;
|
||||
default:
|
||||
UNREACHABLE_MSG("Unknown semaphore type: {}", static_cast<u32>(type.Value()));
|
||||
}
|
||||
}
|
||||
|
||||
} // namespace Tegra::Engines
|
||||
|
||||
@@ -224,8 +224,6 @@ private:
|
||||
|
||||
void FastCopyBlockLinearToPitch();
|
||||
|
||||
void ReleaseSemaphore();
|
||||
|
||||
Core::System& system;
|
||||
|
||||
MemoryManager& memory_manager;
|
||||
|
||||
@@ -2,7 +2,6 @@
|
||||
// Licensed under GPLv2 or any later version
|
||||
// Refer to the license.txt file included.
|
||||
|
||||
#include <array>
|
||||
#include <cstring>
|
||||
#include <memory>
|
||||
#include <optional>
|
||||
@@ -293,7 +292,7 @@ std::pair<VkBuffer, VkDeviceSize> QuadIndexedPass::Assemble(
|
||||
.srcAccessMask = VK_ACCESS_SHADER_WRITE_BIT,
|
||||
.dstAccessMask = VK_ACCESS_INDEX_READ_BIT,
|
||||
};
|
||||
const std::array<u32, 2> push_constants{base_vertex, index_shift};
|
||||
const std::array push_constants{base_vertex, index_shift};
|
||||
const VkDescriptorSet set = descriptor_allocator.Commit();
|
||||
device.GetLogical().UpdateDescriptorSet(set, *descriptor_template, descriptor_data);
|
||||
cmdbuf.BindPipeline(VK_PIPELINE_BIND_POINT_COMPUTE, *pipeline);
|
||||
|
||||
@@ -1067,8 +1067,7 @@ void TextureCacheRuntime::ConvertImage(Framebuffer* dst, ImageView& dst_view, Im
|
||||
}
|
||||
break;
|
||||
case PixelFormat::A8B8G8R8_UNORM:
|
||||
if (src_view.format == PixelFormat::S8_UINT_D24_UNORM ||
|
||||
src_view.format == PixelFormat::D24_UNORM_S8_UINT) {
|
||||
if (src_view.format == PixelFormat::S8_UINT_D24_UNORM) {
|
||||
return blit_image_helper.ConvertD24S8ToABGR8(dst, src_view);
|
||||
}
|
||||
break;
|
||||
|
||||
@@ -50,7 +50,6 @@ std::unique_ptr<Tegra::GPU> CreateGPU(Core::Frontend::EmuWindow& emu_window, Cor
|
||||
gpu->BindRenderer(std::move(renderer));
|
||||
return gpu;
|
||||
} catch (const std::runtime_error& exception) {
|
||||
scope.Cancel();
|
||||
LOG_ERROR(HW_GPU, "Failed to initialize GPU: {}", exception.what());
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
@@ -775,7 +775,6 @@ void Config::ReadUIValues() {
|
||||
ReadBasicSetting(UISettings::values.pause_when_in_background);
|
||||
ReadBasicSetting(UISettings::values.mute_when_in_background);
|
||||
ReadBasicSetting(UISettings::values.hide_mouse);
|
||||
ReadBasicSetting(UISettings::values.disable_web_applet);
|
||||
|
||||
qt_config->endGroup();
|
||||
}
|
||||
@@ -1156,8 +1155,6 @@ void Config::SaveCpuValues() {
|
||||
WriteBasicSetting(Settings::values.cpuopt_misc_ir);
|
||||
WriteBasicSetting(Settings::values.cpuopt_reduce_misalign_checks);
|
||||
WriteBasicSetting(Settings::values.cpuopt_fastmem);
|
||||
WriteBasicSetting(Settings::values.cpuopt_fastmem_exclusives);
|
||||
WriteBasicSetting(Settings::values.cpuopt_recompile_exclusives);
|
||||
}
|
||||
|
||||
qt_config->endGroup();
|
||||
@@ -1309,7 +1306,6 @@ void Config::SaveUIValues() {
|
||||
WriteBasicSetting(UISettings::values.pause_when_in_background);
|
||||
WriteBasicSetting(UISettings::values.mute_when_in_background);
|
||||
WriteBasicSetting(UISettings::values.hide_mouse);
|
||||
WriteBasicSetting(UISettings::values.disable_web_applet);
|
||||
|
||||
qt_config->endGroup();
|
||||
}
|
||||
|
||||
@@ -59,13 +59,6 @@ void ConfigureDebug::SetConfiguration() {
|
||||
ui->disable_loop_safety_checks->setChecked(
|
||||
Settings::values.disable_shader_loop_safety_checks.GetValue());
|
||||
ui->extended_logging->setChecked(Settings::values.extended_logging.GetValue());
|
||||
|
||||
#ifdef YUZU_USE_QT_WEB_ENGINE
|
||||
ui->disable_web_applet->setChecked(UISettings::values.disable_web_applet.GetValue());
|
||||
#else
|
||||
ui->disable_web_applet->setEnabled(false);
|
||||
ui->disable_web_applet->setText(QString::fromUtf8("Web applet not compiled"));
|
||||
#endif
|
||||
}
|
||||
|
||||
void ConfigureDebug::ApplyConfiguration() {
|
||||
@@ -87,7 +80,6 @@ void ConfigureDebug::ApplyConfiguration() {
|
||||
ui->disable_loop_safety_checks->isChecked();
|
||||
Settings::values.disable_macro_jit = ui->disable_macro_jit->isChecked();
|
||||
Settings::values.extended_logging = ui->extended_logging->isChecked();
|
||||
UISettings::values.disable_web_applet = ui->disable_web_applet->isChecked();
|
||||
Debugger::ToggleConsole();
|
||||
Common::Log::Filter filter;
|
||||
filter.ParseFilterString(Settings::values.log_filter.GetValue());
|
||||
|
||||
@@ -8,49 +8,49 @@
|
||||
<property name="title">
|
||||
<string>Logging</string>
|
||||
</property>
|
||||
<layout class="QGridLayout" name="gridLayout_1">
|
||||
<item row="0" column="0" colspan="2">
|
||||
<layout class="QHBoxLayout" name="horizontalLayout_1">
|
||||
<item>
|
||||
<widget class="QLabel" name="label_1">
|
||||
<property name="text">
|
||||
<string>Global Log Filter</string>
|
||||
<layout class="QGridLayout" name="gridLayout_1">
|
||||
<item row="0" column="0" colspan="2">
|
||||
<layout class="QHBoxLayout" name="horizontalLayout_1">
|
||||
<item>
|
||||
<widget class="QLabel" name="label_1">
|
||||
<property name="text">
|
||||
<string>Global Log Filter</string>
|
||||
</property>
|
||||
</widget>
|
||||
</item>
|
||||
<item>
|
||||
<widget class="QLineEdit" name="log_filter_edit"/>
|
||||
</item>
|
||||
</layout>
|
||||
</item>
|
||||
<item row="1" column="0">
|
||||
<widget class="QCheckBox" name="toggle_console">
|
||||
<property name="text">
|
||||
<string>Show Log in Console</string>
|
||||
</property>
|
||||
</widget>
|
||||
</item>
|
||||
<item row="1" column="1">
|
||||
<widget class="QPushButton" name="open_log_button">
|
||||
<property name="text">
|
||||
<string>Open Log Location</string>
|
||||
</property>
|
||||
</widget>
|
||||
</item>
|
||||
<item row="2" column="0">
|
||||
<widget class="QCheckBox" name="extended_logging">
|
||||
<property name="enabled">
|
||||
<bool>true</bool>
|
||||
</property>
|
||||
</widget>
|
||||
</item>
|
||||
<item>
|
||||
<widget class="QLineEdit" name="log_filter_edit"/>
|
||||
</item>
|
||||
</layout>
|
||||
</item>
|
||||
<item row="1" column="0">
|
||||
<widget class="QCheckBox" name="toggle_console">
|
||||
<property name="text">
|
||||
<string>Show Log in Console</string>
|
||||
</property>
|
||||
</widget>
|
||||
</item>
|
||||
<item row="1" column="1">
|
||||
<widget class="QPushButton" name="open_log_button">
|
||||
<property name="text">
|
||||
<string>Open Log Location</string>
|
||||
</property>
|
||||
</widget>
|
||||
</item>
|
||||
<item row="2" column="0">
|
||||
<widget class="QCheckBox" name="extended_logging">
|
||||
<property name="enabled">
|
||||
<bool>true</bool>
|
||||
</property>
|
||||
<property name="toolTip">
|
||||
<string>When checked, the max size of the log increases from 100 MB to 1 GB</string>
|
||||
</property>
|
||||
<property name="text">
|
||||
<string>Enable Extended Logging**</string>
|
||||
</property>
|
||||
</widget>
|
||||
</item>
|
||||
</layout>
|
||||
<property name="toolTip">
|
||||
<string>When checked, the max size of the log increases from 100 MB to 1 GB</string>
|
||||
</property>
|
||||
<property name="text">
|
||||
<string>Enable Extended Logging**</string>
|
||||
</property>
|
||||
</widget>
|
||||
</item>
|
||||
</layout>
|
||||
</widget>
|
||||
</item>
|
||||
<item>
|
||||
@@ -183,7 +183,7 @@
|
||||
<string>Advanced</string>
|
||||
</property>
|
||||
<layout class="QGridLayout" name="gridLayout_4">
|
||||
<item row="0" column="0">
|
||||
<item> row="0" column="0">
|
||||
<widget class="QCheckBox" name="quest_flag">
|
||||
<property name="text">
|
||||
<string>Kiosk (Quest) Mode</string>
|
||||
@@ -218,13 +218,6 @@
|
||||
</property>
|
||||
</widget>
|
||||
</item>
|
||||
<item row="2" column="1">
|
||||
<widget class="QCheckBox" name="disable_web_applet">
|
||||
<property name="text">
|
||||
<string>Disable Web Applet**</string>
|
||||
</property>
|
||||
</widget>
|
||||
</item>
|
||||
</layout>
|
||||
</widget>
|
||||
</item>
|
||||
|
||||
@@ -25,7 +25,6 @@
|
||||
#include "core/file_sys/vfs_real.h"
|
||||
#include "core/frontend/applets/controller.h"
|
||||
#include "core/frontend/applets/general_frontend.h"
|
||||
#include "core/frontend/applets/mii.h"
|
||||
#include "core/frontend/applets/software_keyboard.h"
|
||||
#include "core/hid/emulated_controller.h"
|
||||
#include "core/hid/hid_core.h"
|
||||
@@ -250,9 +249,9 @@ GMainWindow::GMainWindow()
|
||||
#ifdef ARCHITECTURE_x86_64
|
||||
const auto& caps = Common::GetCPUCaps();
|
||||
std::string cpu_string = caps.cpu_string;
|
||||
if (caps.avx || caps.avx2 || caps.avx512f) {
|
||||
if (caps.avx || caps.avx2 || caps.avx512) {
|
||||
cpu_string += " | AVX";
|
||||
if (caps.avx512f) {
|
||||
if (caps.avx512) {
|
||||
cpu_string += "512";
|
||||
} else if (caps.avx2) {
|
||||
cpu_string += '2';
|
||||
@@ -587,7 +586,7 @@ void GMainWindow::WebBrowserOpenWebPage(const std::string& main_url,
|
||||
#ifdef YUZU_USE_QT_WEB_ENGINE
|
||||
|
||||
// Raw input breaks with the web applet, Disable web applets if enabled
|
||||
if (UISettings::values.disable_web_applet || Settings::values.enable_raw_input) {
|
||||
if (disable_web_applet || Settings::values.enable_raw_input) {
|
||||
emit WebBrowserClosed(Service::AM::Applets::WebExitReason::WindowClosed,
|
||||
"http://localhost/");
|
||||
return;
|
||||
@@ -652,12 +651,12 @@ void GMainWindow::WebBrowserOpenWebPage(const std::string& main_url,
|
||||
connect(exit_action, &QAction::triggered, this, [this, &web_browser_view] {
|
||||
const auto result = QMessageBox::warning(
|
||||
this, tr("Disable Web Applet"),
|
||||
tr("Disabling the web applet can lead to undefined behavior and should only be used "
|
||||
"with Super Mario 3D All-Stars. Are you sure you want to disable the web "
|
||||
"applet?\n(This can be re-enabled in the Debug settings.)"),
|
||||
tr("Disabling the web applet will cause it to not be shown again for the rest of the "
|
||||
"emulated session. This can lead to undefined behavior and should only be used with "
|
||||
"Super Mario 3D All-Stars. Are you sure you want to disable the web applet?"),
|
||||
QMessageBox::Yes | QMessageBox::No);
|
||||
if (result == QMessageBox::Yes) {
|
||||
UISettings::values.disable_web_applet = true;
|
||||
disable_web_applet = true;
|
||||
web_browser_view.SetFinished(true);
|
||||
}
|
||||
});
|
||||
@@ -1286,7 +1285,6 @@ bool GMainWindow::LoadROM(const QString& filename, u64 program_id, std::size_t p
|
||||
std::make_unique<QtControllerSelector>(*this), // Controller Selector
|
||||
std::make_unique<QtErrorDisplay>(*this), // Error Display
|
||||
nullptr, // Parental Controls
|
||||
nullptr, // Mii editor
|
||||
nullptr, // Photo Viewer
|
||||
std::make_unique<QtProfileSelector>(*this), // Profile Selector
|
||||
std::make_unique<QtSoftwareKeyboard>(*this), // Software Keyboard
|
||||
|
||||
@@ -400,6 +400,9 @@ private:
|
||||
// Last game booted, used for multi-process apps
|
||||
QString last_filename_booted;
|
||||
|
||||
// Disables the web applet for the rest of the emulated session
|
||||
bool disable_web_applet{};
|
||||
|
||||
// Applets
|
||||
QtSoftwareKeyboardDialog* software_keyboard = nullptr;
|
||||
|
||||
|
||||
@@ -114,7 +114,6 @@ struct Values {
|
||||
|
||||
bool configuration_applied;
|
||||
bool reset_to_defaults;
|
||||
Settings::BasicSetting<bool> disable_web_applet{true, "disable_web_applet"};
|
||||
};
|
||||
|
||||
extern Values values;
|
||||
|
||||
@@ -124,11 +124,7 @@ keyboard_enabled =
|
||||
[Core]
|
||||
# Whether to use multi-core for CPU emulation
|
||||
# 0: Disabled, 1 (default): Enabled
|
||||
use_multi_core =
|
||||
|
||||
# Enable extended guest system memory layout (6GB DRAM)
|
||||
# 0 (default): Disabled, 1: Enabled
|
||||
use_extended_memory_layout =
|
||||
use_multi_core=
|
||||
|
||||
[Cpu]
|
||||
# Adjusts various optimizations.
|
||||
|
||||
Reference in New Issue
Block a user