Compare commits

..

1 Commits

Author SHA1 Message Date
Lody
bd11b10298 shader: rewrite LOP3.LUT
shader: opt
2022-03-07 12:15:34 +08:00
335 changed files with 2232 additions and 3514 deletions

View File

@@ -8,7 +8,7 @@ steps:
displayName: 'Install vulkan-sdk'
- script: python -m pip install --upgrade pip conan
displayName: 'Install conan'
- script: refreshenv && mkdir build && cd build && cmake -G "Visual Studio 16 2019" -A x64 -DYUZU_USE_BUNDLED_QT=1 -DYUZU_USE_BUNDLED_SDL2=1 -DYUZU_USE_QT_WEB_ENGINE=ON -DENABLE_COMPATIBILITY_LIST_DOWNLOAD=ON -DYUZU_ENABLE_COMPATIBILITY_REPORTING=${COMPAT} -DYUZU_TESTS=OFF -DUSE_DISCORD_PRESENCE=ON -DENABLE_QT_TRANSLATION=ON -DDISPLAY_VERSION=${{ parameters['version'] }} -DCMAKE_BUILD_TYPE=Release .. && cd ..
- script: refreshenv && mkdir build && cd build && cmake -G "Visual Studio 16 2019" -A x64 -DYUZU_USE_BUNDLED_QT=1 -DYUZU_USE_BUNDLED_SDL2=1 -DYUZU_USE_QT_WEB_ENGINE=ON -DENABLE_COMPATIBILITY_LIST_DOWNLOAD=ON -DYUZU_ENABLE_COMPATIBILITY_REPORTING=${COMPAT} -DUSE_DISCORD_PRESENCE=ON -DENABLE_QT_TRANSLATION=ON -DDISPLAY_VERSION=${{ parameters['version'] }} -DCMAKE_BUILD_TYPE=Release .. && cd ..
displayName: 'Configure CMake'
- task: MSBuild@1
displayName: 'Build'

View File

@@ -363,11 +363,7 @@ if(ENABLE_QT)
set(YUZU_QT_NO_CMAKE_SYSTEM_PATH "NO_CMAKE_SYSTEM_PATH")
endif()
if ((${CMAKE_SYSTEM_NAME} STREQUAL "Linux") AND YUZU_USE_BUNDLED_QT)
find_package(Qt5 ${QT_VERSION} REQUIRED COMPONENTS Widgets DBus ${QT_PREFIX_HINT} ${YUZU_QT_NO_CMAKE_SYSTEM_PATH})
else()
find_package(Qt5 ${QT_VERSION} REQUIRED COMPONENTS Widgets ${QT_PREFIX_HINT} ${YUZU_QT_NO_CMAKE_SYSTEM_PATH})
endif()
find_package(Qt5 ${QT_VERSION} REQUIRED COMPONENTS Widgets ${QT_PREFIX_HINT} ${YUZU_QT_NO_CMAKE_SYSTEM_PATH})
if (YUZU_USE_QT_WEB_ENGINE)
find_package(Qt5 COMPONENTS WebEngineCore WebEngineWidgets)
endif()

View File

@@ -4,12 +4,13 @@
#pragma once
#include <cstring>
#include <memory>
#include "common/common_types.h"
#if _MSC_VER
#include <intrin.h>
#else
#include <cstring>
#endif
namespace Common {

View File

@@ -33,6 +33,7 @@
#include <cstddef>
#include <limits>
#include <type_traits>
#include "common/common_funcs.h"
#include "common/swap.h"
/*

View File

@@ -57,11 +57,4 @@ requires std::is_integral_v<T>
return static_cast<T>(1ULL << ((8U * sizeof(T)) - std::countl_zero(value - 1U)));
}
template <size_t bit_index, typename T>
requires std::is_integral_v<T>
[[nodiscard]] constexpr bool Bit(const T value) {
static_assert(bit_index < BitSize<T>(), "bit_index must be smaller than size of T");
return ((value >> bit_index) & T(1)) == T(1);
}
} // namespace Common

View File

@@ -2,6 +2,7 @@
// Licensed under GPLv2+
// Refer to the license.txt file included.
#include <cstring>
#include <string>
#include <utility>

View File

@@ -4,6 +4,7 @@
#include "common/fs/file.h"
#include "common/fs/fs.h"
#include "common/fs/path_util.h"
#include "common/logging/log.h"
#ifdef _WIN32

View File

@@ -6,8 +6,10 @@
#include <cstdio>
#include <filesystem>
#include <fstream>
#include <span>
#include <type_traits>
#include <vector>
#include "common/concepts.h"
#include "common/fs/fs_types.h"

View File

@@ -7,6 +7,7 @@
#include <functional>
#include "common/common_funcs.h"
#include "common/common_types.h"
namespace Common::FS {

View File

@@ -8,6 +8,7 @@
#include <filesystem>
#include <span>
#include <string>
#include <string_view>
#include "common/common_types.h"

View File

@@ -7,6 +7,7 @@
#include <array>
#include <cstddef>
#include <string>
#include <type_traits>
#include <vector>
#include <fmt/format.h>
#include "common/common_types.h"

View File

@@ -18,7 +18,6 @@
#include <fcntl.h>
#include <sys/mman.h>
#include <unistd.h>
#include "common/scope_exit.h"
#endif // ^^^ Linux ^^^
@@ -28,6 +27,7 @@
#include "common/assert.h"
#include "common/host_memory.h"
#include "common/logging/log.h"
#include "common/scope_exit.h"
namespace Common {

View File

@@ -4,7 +4,6 @@
#pragma once
#include "common/common_funcs.h"
#include "common/parent_of_member.h"
#include "common/tree.h"
@@ -16,33 +15,32 @@ class IntrusiveRedBlackTreeImpl;
}
#pragma pack(push, 4)
struct IntrusiveRedBlackTreeNode {
YUZU_NON_COPYABLE(IntrusiveRedBlackTreeNode);
public:
using RBEntry = freebsd::RBEntry<IntrusiveRedBlackTreeNode>;
using EntryType = RBEntry<IntrusiveRedBlackTreeNode>;
constexpr IntrusiveRedBlackTreeNode() = default;
void SetEntry(const EntryType& new_entry) {
entry = new_entry;
}
[[nodiscard]] EntryType& GetEntry() {
return entry;
}
[[nodiscard]] const EntryType& GetEntry() const {
return entry;
}
private:
RBEntry m_entry;
EntryType entry{};
public:
explicit IntrusiveRedBlackTreeNode() = default;
friend class impl::IntrusiveRedBlackTreeImpl;
[[nodiscard]] constexpr RBEntry& GetRBEntry() {
return m_entry;
}
[[nodiscard]] constexpr const RBEntry& GetRBEntry() const {
return m_entry;
}
constexpr void SetRBEntry(const RBEntry& entry) {
m_entry = entry;
}
template <class, class, class>
friend class IntrusiveRedBlackTree;
};
static_assert(sizeof(IntrusiveRedBlackTreeNode) ==
3 * sizeof(void*) + std::max<size_t>(sizeof(freebsd::RBColor), 4));
#pragma pack(pop)
template <class T, class Traits, class Comparator>
class IntrusiveRedBlackTree;
@@ -50,17 +48,12 @@ class IntrusiveRedBlackTree;
namespace impl {
class IntrusiveRedBlackTreeImpl {
YUZU_NON_COPYABLE(IntrusiveRedBlackTreeImpl);
private:
template <class, class, class>
friend class ::Common::IntrusiveRedBlackTree;
private:
using RootType = freebsd::RBHead<IntrusiveRedBlackTreeNode>;
private:
RootType m_root;
using RootType = RBHead<IntrusiveRedBlackTreeNode>;
RootType root;
public:
template <bool Const>
@@ -88,150 +81,149 @@ public:
IntrusiveRedBlackTreeImpl::reference>;
private:
pointer m_node;
pointer node;
public:
constexpr explicit Iterator(pointer n) : m_node(n) {}
explicit Iterator(pointer n) : node(n) {}
constexpr bool operator==(const Iterator& rhs) const {
return m_node == rhs.m_node;
bool operator==(const Iterator& rhs) const {
return this->node == rhs.node;
}
constexpr bool operator!=(const Iterator& rhs) const {
bool operator!=(const Iterator& rhs) const {
return !(*this == rhs);
}
constexpr pointer operator->() const {
return m_node;
pointer operator->() const {
return this->node;
}
constexpr reference operator*() const {
return *m_node;
reference operator*() const {
return *this->node;
}
constexpr Iterator& operator++() {
m_node = GetNext(m_node);
Iterator& operator++() {
this->node = GetNext(this->node);
return *this;
}
constexpr Iterator& operator--() {
m_node = GetPrev(m_node);
Iterator& operator--() {
this->node = GetPrev(this->node);
return *this;
}
constexpr Iterator operator++(int) {
Iterator operator++(int) {
const Iterator it{*this};
++(*this);
return it;
}
constexpr Iterator operator--(int) {
Iterator operator--(int) {
const Iterator it{*this};
--(*this);
return it;
}
constexpr operator Iterator<true>() const {
return Iterator<true>(m_node);
operator Iterator<true>() const {
return Iterator<true>(this->node);
}
};
private:
constexpr bool EmptyImpl() const {
return m_root.IsEmpty();
// Define accessors using RB_* functions.
bool EmptyImpl() const {
return root.IsEmpty();
}
constexpr IntrusiveRedBlackTreeNode* GetMinImpl() const {
return freebsd::RB_MIN(const_cast<RootType&>(m_root));
IntrusiveRedBlackTreeNode* GetMinImpl() const {
return RB_MIN(const_cast<RootType*>(&root));
}
constexpr IntrusiveRedBlackTreeNode* GetMaxImpl() const {
return freebsd::RB_MAX(const_cast<RootType&>(m_root));
IntrusiveRedBlackTreeNode* GetMaxImpl() const {
return RB_MAX(const_cast<RootType*>(&root));
}
constexpr IntrusiveRedBlackTreeNode* RemoveImpl(IntrusiveRedBlackTreeNode* node) {
return freebsd::RB_REMOVE(m_root, node);
IntrusiveRedBlackTreeNode* RemoveImpl(IntrusiveRedBlackTreeNode* node) {
return RB_REMOVE(&root, node);
}
public:
static constexpr IntrusiveRedBlackTreeNode* GetNext(IntrusiveRedBlackTreeNode* node) {
return freebsd::RB_NEXT(node);
static IntrusiveRedBlackTreeNode* GetNext(IntrusiveRedBlackTreeNode* node) {
return RB_NEXT(node);
}
static constexpr IntrusiveRedBlackTreeNode* GetPrev(IntrusiveRedBlackTreeNode* node) {
return freebsd::RB_PREV(node);
static IntrusiveRedBlackTreeNode* GetPrev(IntrusiveRedBlackTreeNode* node) {
return RB_PREV(node);
}
static constexpr IntrusiveRedBlackTreeNode const* GetNext(
IntrusiveRedBlackTreeNode const* node) {
static const IntrusiveRedBlackTreeNode* GetNext(const IntrusiveRedBlackTreeNode* node) {
return static_cast<const IntrusiveRedBlackTreeNode*>(
GetNext(const_cast<IntrusiveRedBlackTreeNode*>(node)));
}
static constexpr IntrusiveRedBlackTreeNode const* GetPrev(
IntrusiveRedBlackTreeNode const* node) {
static const IntrusiveRedBlackTreeNode* GetPrev(const IntrusiveRedBlackTreeNode* node) {
return static_cast<const IntrusiveRedBlackTreeNode*>(
GetPrev(const_cast<IntrusiveRedBlackTreeNode*>(node)));
}
public:
constexpr IntrusiveRedBlackTreeImpl() = default;
constexpr IntrusiveRedBlackTreeImpl() {}
// Iterator accessors.
constexpr iterator begin() {
iterator begin() {
return iterator(this->GetMinImpl());
}
constexpr const_iterator begin() const {
const_iterator begin() const {
return const_iterator(this->GetMinImpl());
}
constexpr iterator end() {
iterator end() {
return iterator(static_cast<IntrusiveRedBlackTreeNode*>(nullptr));
}
constexpr const_iterator end() const {
const_iterator end() const {
return const_iterator(static_cast<const IntrusiveRedBlackTreeNode*>(nullptr));
}
constexpr const_iterator cbegin() const {
const_iterator cbegin() const {
return this->begin();
}
constexpr const_iterator cend() const {
const_iterator cend() const {
return this->end();
}
constexpr iterator iterator_to(reference ref) {
return iterator(std::addressof(ref));
iterator iterator_to(reference ref) {
return iterator(&ref);
}
constexpr const_iterator iterator_to(const_reference ref) const {
return const_iterator(std::addressof(ref));
const_iterator iterator_to(const_reference ref) const {
return const_iterator(&ref);
}
// Content management.
constexpr bool empty() const {
bool empty() const {
return this->EmptyImpl();
}
constexpr reference back() {
reference back() {
return *this->GetMaxImpl();
}
constexpr const_reference back() const {
const_reference back() const {
return *this->GetMaxImpl();
}
constexpr reference front() {
reference front() {
return *this->GetMinImpl();
}
constexpr const_reference front() const {
const_reference front() const {
return *this->GetMinImpl();
}
constexpr iterator erase(iterator it) {
iterator erase(iterator it) {
auto cur = std::addressof(*it);
auto next = GetNext(cur);
this->RemoveImpl(cur);
@@ -242,16 +234,16 @@ public:
} // namespace impl
template <typename T>
concept HasRedBlackKeyType = requires {
{ std::is_same<typename T::RedBlackKeyType, void>::value } -> std::convertible_to<bool>;
concept HasLightCompareType = requires {
{ std::is_same<typename T::LightCompareType, void>::value } -> std::convertible_to<bool>;
};
namespace impl {
template <typename T, typename Default>
consteval auto* GetRedBlackKeyType() {
if constexpr (HasRedBlackKeyType<T>) {
return static_cast<typename T::RedBlackKeyType*>(nullptr);
consteval auto* GetLightCompareType() {
if constexpr (HasLightCompareType<T>) {
return static_cast<typename T::LightCompareType*>(nullptr);
} else {
return static_cast<Default*>(nullptr);
}
@@ -260,17 +252,16 @@ namespace impl {
} // namespace impl
template <typename T, typename Default>
using RedBlackKeyType = std::remove_pointer_t<decltype(impl::GetRedBlackKeyType<T, Default>())>;
using LightCompareType = std::remove_pointer_t<decltype(impl::GetLightCompareType<T, Default>())>;
template <class T, class Traits, class Comparator>
class IntrusiveRedBlackTree {
YUZU_NON_COPYABLE(IntrusiveRedBlackTree);
public:
using ImplType = impl::IntrusiveRedBlackTreeImpl;
private:
ImplType m_impl;
ImplType impl{};
public:
template <bool Const>
@@ -286,9 +277,9 @@ public:
using iterator = Iterator<false>;
using const_iterator = Iterator<true>;
using key_type = RedBlackKeyType<Comparator, value_type>;
using const_key_pointer = const key_type*;
using const_key_reference = const key_type&;
using light_value_type = LightCompareType<Comparator, value_type>;
using const_light_pointer = const light_value_type*;
using const_light_reference = const light_value_type&;
template <bool Const>
class Iterator {
@@ -307,201 +298,183 @@ public:
IntrusiveRedBlackTree::reference>;
private:
ImplIterator m_impl;
ImplIterator iterator;
private:
constexpr explicit Iterator(ImplIterator it) : m_impl(it) {}
explicit Iterator(ImplIterator it) : iterator(it) {}
constexpr explicit Iterator(typename ImplIterator::pointer p) : m_impl(p) {}
explicit Iterator(typename std::conditional<Const, ImplType::const_iterator,
ImplType::iterator>::type::pointer ptr)
: iterator(ptr) {}
constexpr ImplIterator GetImplIterator() const {
return m_impl;
ImplIterator GetImplIterator() const {
return this->iterator;
}
public:
constexpr bool operator==(const Iterator& rhs) const {
return m_impl == rhs.m_impl;
bool operator==(const Iterator& rhs) const {
return this->iterator == rhs.iterator;
}
constexpr bool operator!=(const Iterator& rhs) const {
bool operator!=(const Iterator& rhs) const {
return !(*this == rhs);
}
constexpr pointer operator->() const {
return Traits::GetParent(std::addressof(*m_impl));
pointer operator->() const {
return Traits::GetParent(std::addressof(*this->iterator));
}
constexpr reference operator*() const {
return *Traits::GetParent(std::addressof(*m_impl));
reference operator*() const {
return *Traits::GetParent(std::addressof(*this->iterator));
}
constexpr Iterator& operator++() {
++m_impl;
Iterator& operator++() {
++this->iterator;
return *this;
}
constexpr Iterator& operator--() {
--m_impl;
Iterator& operator--() {
--this->iterator;
return *this;
}
constexpr Iterator operator++(int) {
Iterator operator++(int) {
const Iterator it{*this};
++m_impl;
++this->iterator;
return it;
}
constexpr Iterator operator--(int) {
Iterator operator--(int) {
const Iterator it{*this};
--m_impl;
--this->iterator;
return it;
}
constexpr operator Iterator<true>() const {
return Iterator<true>(m_impl);
operator Iterator<true>() const {
return Iterator<true>(this->iterator);
}
};
private:
static constexpr int CompareImpl(const IntrusiveRedBlackTreeNode* lhs,
const IntrusiveRedBlackTreeNode* rhs) {
static int CompareImpl(const IntrusiveRedBlackTreeNode* lhs,
const IntrusiveRedBlackTreeNode* rhs) {
return Comparator::Compare(*Traits::GetParent(lhs), *Traits::GetParent(rhs));
}
static constexpr int CompareKeyImpl(const_key_reference key,
const IntrusiveRedBlackTreeNode* rhs) {
return Comparator::Compare(key, *Traits::GetParent(rhs));
static int LightCompareImpl(const void* elm, const IntrusiveRedBlackTreeNode* rhs) {
return Comparator::Compare(*static_cast<const_light_pointer>(elm), *Traits::GetParent(rhs));
}
// Define accessors using RB_* functions.
constexpr IntrusiveRedBlackTreeNode* InsertImpl(IntrusiveRedBlackTreeNode* node) {
return freebsd::RB_INSERT(m_impl.m_root, node, CompareImpl);
IntrusiveRedBlackTreeNode* InsertImpl(IntrusiveRedBlackTreeNode* node) {
return RB_INSERT(&impl.root, node, CompareImpl);
}
constexpr IntrusiveRedBlackTreeNode* FindImpl(IntrusiveRedBlackTreeNode const* node) const {
return freebsd::RB_FIND(const_cast<ImplType::RootType&>(m_impl.m_root),
const_cast<IntrusiveRedBlackTreeNode*>(node), CompareImpl);
IntrusiveRedBlackTreeNode* FindImpl(const IntrusiveRedBlackTreeNode* node) const {
return RB_FIND(const_cast<ImplType::RootType*>(&impl.root),
const_cast<IntrusiveRedBlackTreeNode*>(node), CompareImpl);
}
constexpr IntrusiveRedBlackTreeNode* NFindImpl(IntrusiveRedBlackTreeNode const* node) const {
return freebsd::RB_NFIND(const_cast<ImplType::RootType&>(m_impl.m_root),
const_cast<IntrusiveRedBlackTreeNode*>(node), CompareImpl);
IntrusiveRedBlackTreeNode* NFindImpl(const IntrusiveRedBlackTreeNode* node) const {
return RB_NFIND(const_cast<ImplType::RootType*>(&impl.root),
const_cast<IntrusiveRedBlackTreeNode*>(node), CompareImpl);
}
constexpr IntrusiveRedBlackTreeNode* FindKeyImpl(const_key_reference key) const {
return freebsd::RB_FIND_KEY(const_cast<ImplType::RootType&>(m_impl.m_root), key,
CompareKeyImpl);
IntrusiveRedBlackTreeNode* FindLightImpl(const_light_pointer lelm) const {
return RB_FIND_LIGHT(const_cast<ImplType::RootType*>(&impl.root),
static_cast<const void*>(lelm), LightCompareImpl);
}
constexpr IntrusiveRedBlackTreeNode* NFindKeyImpl(const_key_reference key) const {
return freebsd::RB_NFIND_KEY(const_cast<ImplType::RootType&>(m_impl.m_root), key,
CompareKeyImpl);
}
constexpr IntrusiveRedBlackTreeNode* FindExistingImpl(
IntrusiveRedBlackTreeNode const* node) const {
return freebsd::RB_FIND_EXISTING(const_cast<ImplType::RootType&>(m_impl.m_root),
const_cast<IntrusiveRedBlackTreeNode*>(node), CompareImpl);
}
constexpr IntrusiveRedBlackTreeNode* FindExistingKeyImpl(const_key_reference key) const {
return freebsd::RB_FIND_EXISTING_KEY(const_cast<ImplType::RootType&>(m_impl.m_root), key,
CompareKeyImpl);
IntrusiveRedBlackTreeNode* NFindLightImpl(const_light_pointer lelm) const {
return RB_NFIND_LIGHT(const_cast<ImplType::RootType*>(&impl.root),
static_cast<const void*>(lelm), LightCompareImpl);
}
public:
constexpr IntrusiveRedBlackTree() = default;
// Iterator accessors.
constexpr iterator begin() {
return iterator(m_impl.begin());
iterator begin() {
return iterator(this->impl.begin());
}
constexpr const_iterator begin() const {
return const_iterator(m_impl.begin());
const_iterator begin() const {
return const_iterator(this->impl.begin());
}
constexpr iterator end() {
return iterator(m_impl.end());
iterator end() {
return iterator(this->impl.end());
}
constexpr const_iterator end() const {
return const_iterator(m_impl.end());
const_iterator end() const {
return const_iterator(this->impl.end());
}
constexpr const_iterator cbegin() const {
const_iterator cbegin() const {
return this->begin();
}
constexpr const_iterator cend() const {
const_iterator cend() const {
return this->end();
}
constexpr iterator iterator_to(reference ref) {
return iterator(m_impl.iterator_to(*Traits::GetNode(std::addressof(ref))));
iterator iterator_to(reference ref) {
return iterator(this->impl.iterator_to(*Traits::GetNode(std::addressof(ref))));
}
constexpr const_iterator iterator_to(const_reference ref) const {
return const_iterator(m_impl.iterator_to(*Traits::GetNode(std::addressof(ref))));
const_iterator iterator_to(const_reference ref) const {
return const_iterator(this->impl.iterator_to(*Traits::GetNode(std::addressof(ref))));
}
// Content management.
constexpr bool empty() const {
return m_impl.empty();
bool empty() const {
return this->impl.empty();
}
constexpr reference back() {
return *Traits::GetParent(std::addressof(m_impl.back()));
reference back() {
return *Traits::GetParent(std::addressof(this->impl.back()));
}
constexpr const_reference back() const {
return *Traits::GetParent(std::addressof(m_impl.back()));
const_reference back() const {
return *Traits::GetParent(std::addressof(this->impl.back()));
}
constexpr reference front() {
return *Traits::GetParent(std::addressof(m_impl.front()));
reference front() {
return *Traits::GetParent(std::addressof(this->impl.front()));
}
constexpr const_reference front() const {
return *Traits::GetParent(std::addressof(m_impl.front()));
const_reference front() const {
return *Traits::GetParent(std::addressof(this->impl.front()));
}
constexpr iterator erase(iterator it) {
return iterator(m_impl.erase(it.GetImplIterator()));
iterator erase(iterator it) {
return iterator(this->impl.erase(it.GetImplIterator()));
}
constexpr iterator insert(reference ref) {
iterator insert(reference ref) {
ImplType::pointer node = Traits::GetNode(std::addressof(ref));
this->InsertImpl(node);
return iterator(node);
}
constexpr iterator find(const_reference ref) const {
iterator find(const_reference ref) const {
return iterator(this->FindImpl(Traits::GetNode(std::addressof(ref))));
}
constexpr iterator nfind(const_reference ref) const {
iterator nfind(const_reference ref) const {
return iterator(this->NFindImpl(Traits::GetNode(std::addressof(ref))));
}
constexpr iterator find_key(const_key_reference ref) const {
return iterator(this->FindKeyImpl(ref));
iterator find_light(const_light_reference ref) const {
return iterator(this->FindLightImpl(std::addressof(ref)));
}
constexpr iterator nfind_key(const_key_reference ref) const {
return iterator(this->NFindKeyImpl(ref));
}
constexpr iterator find_existing(const_reference ref) const {
return iterator(this->FindExistingImpl(Traits::GetNode(std::addressof(ref))));
}
constexpr iterator find_existing_key(const_key_reference ref) const {
return iterator(this->FindExistingKeyImpl(ref));
iterator nfind_light(const_light_reference ref) const {
return iterator(this->NFindLightImpl(std::addressof(ref)));
}
};
template <auto T, class Derived = Common::impl::GetParentType<T>>
template <auto T, class Derived = impl::GetParentType<T>>
class IntrusiveRedBlackTreeMemberTraits;
template <class Parent, IntrusiveRedBlackTreeNode Parent::*Member, class Derived>
@@ -525,16 +498,19 @@ private:
return std::addressof(parent->*Member);
}
static Derived* GetParent(IntrusiveRedBlackTreeNode* node) {
return Common::GetParentPointer<Member, Derived>(node);
static constexpr Derived* GetParent(IntrusiveRedBlackTreeNode* node) {
return GetParentPointer<Member, Derived>(node);
}
static Derived const* GetParent(IntrusiveRedBlackTreeNode const* node) {
return Common::GetParentPointer<Member, Derived>(node);
static constexpr Derived const* GetParent(const IntrusiveRedBlackTreeNode* node) {
return GetParentPointer<Member, Derived>(node);
}
private:
static constexpr TypedStorage<Derived> DerivedStorage = {};
};
template <auto T, class Derived = Common::impl::GetParentType<T>>
template <auto T, class Derived = impl::GetParentType<T>>
class IntrusiveRedBlackTreeMemberTraitsDeferredAssert;
template <class Parent, IntrusiveRedBlackTreeNode Parent::*Member, class Derived>
@@ -545,6 +521,11 @@ public:
IntrusiveRedBlackTree<Derived, IntrusiveRedBlackTreeMemberTraitsDeferredAssert, Comparator>;
using TreeTypeImpl = impl::IntrusiveRedBlackTreeImpl;
static constexpr bool IsValid() {
TypedStorage<Derived> DerivedStorage = {};
return GetParent(GetNode(GetPointer(DerivedStorage))) == GetPointer(DerivedStorage);
}
private:
template <class, class, class>
friend class IntrusiveRedBlackTree;
@@ -559,36 +540,30 @@ private:
return std::addressof(parent->*Member);
}
static Derived* GetParent(IntrusiveRedBlackTreeNode* node) {
return Common::GetParentPointer<Member, Derived>(node);
static constexpr Derived* GetParent(IntrusiveRedBlackTreeNode* node) {
return GetParentPointer<Member, Derived>(node);
}
static Derived const* GetParent(IntrusiveRedBlackTreeNode const* node) {
return Common::GetParentPointer<Member, Derived>(node);
static constexpr Derived const* GetParent(const IntrusiveRedBlackTreeNode* node) {
return GetParentPointer<Member, Derived>(node);
}
};
template <class Derived>
class alignas(void*) IntrusiveRedBlackTreeBaseNode : public IntrusiveRedBlackTreeNode {
class IntrusiveRedBlackTreeBaseNode : public IntrusiveRedBlackTreeNode {
public:
using IntrusiveRedBlackTreeNode::IntrusiveRedBlackTreeNode;
constexpr Derived* GetPrev() {
return static_cast<Derived*>(static_cast<IntrusiveRedBlackTreeBaseNode*>(
impl::IntrusiveRedBlackTreeImpl::GetPrev(this)));
return static_cast<Derived*>(impl::IntrusiveRedBlackTreeImpl::GetPrev(this));
}
constexpr const Derived* GetPrev() const {
return static_cast<const Derived*>(static_cast<const IntrusiveRedBlackTreeBaseNode*>(
impl::IntrusiveRedBlackTreeImpl::GetPrev(this)));
return static_cast<const Derived*>(impl::IntrusiveRedBlackTreeImpl::GetPrev(this));
}
constexpr Derived* GetNext() {
return static_cast<Derived*>(static_cast<IntrusiveRedBlackTreeBaseNode*>(
impl::IntrusiveRedBlackTreeImpl::GetNext(this)));
return static_cast<Derived*>(impl::IntrusiveRedBlackTreeImpl::GetNext(this));
}
constexpr const Derived* GetNext() const {
return static_cast<const Derived*>(static_cast<const IntrusiveRedBlackTreeBaseNode*>(
impl::IntrusiveRedBlackTreeImpl::GetNext(this)));
return static_cast<const Derived*>(impl::IntrusiveRedBlackTreeImpl::GetNext(this));
}
};
@@ -606,22 +581,19 @@ private:
friend class impl::IntrusiveRedBlackTreeImpl;
static constexpr IntrusiveRedBlackTreeNode* GetNode(Derived* parent) {
return static_cast<IntrusiveRedBlackTreeNode*>(
static_cast<IntrusiveRedBlackTreeBaseNode<Derived>*>(parent));
return static_cast<IntrusiveRedBlackTreeNode*>(parent);
}
static constexpr IntrusiveRedBlackTreeNode const* GetNode(Derived const* parent) {
return static_cast<const IntrusiveRedBlackTreeNode*>(
static_cast<const IntrusiveRedBlackTreeBaseNode<Derived>*>(parent));
return static_cast<const IntrusiveRedBlackTreeNode*>(parent);
}
static constexpr Derived* GetParent(IntrusiveRedBlackTreeNode* node) {
return static_cast<Derived*>(static_cast<IntrusiveRedBlackTreeBaseNode<Derived>*>(node));
return static_cast<Derived*>(node);
}
static constexpr Derived const* GetParent(IntrusiveRedBlackTreeNode const* node) {
return static_cast<const Derived*>(
static_cast<const IntrusiveRedBlackTreeBaseNode<Derived>*>(node));
static constexpr Derived const* GetParent(const IntrusiveRedBlackTreeNode* node) {
return static_cast<const Derived*>(node);
}
};

View File

@@ -5,8 +5,10 @@
#include <atomic>
#include <chrono>
#include <climits>
#include <exception>
#include <stop_token>
#include <thread>
#include <vector>
#include <fmt/format.h>
@@ -274,9 +276,9 @@ private:
ColorConsoleBackend color_console_backend{};
FileBackend file_backend;
std::jthread backend_thread;
MPSCQueue<Entry, true> message_queue{};
std::chrono::steady_clock::time_point time_origin{std::chrono::steady_clock::now()};
std::jthread backend_thread;
};
} // namespace

View File

@@ -4,6 +4,7 @@
#pragma once
#include <filesystem>
#include "common/logging/filter.h"
namespace Common::Log {

View File

@@ -7,6 +7,7 @@
#include <array>
#include <chrono>
#include <cstddef>
#include <string_view>
#include "common/logging/log.h"
namespace Common::Log {

View File

@@ -10,10 +10,12 @@
#endif
#include "common/assert.h"
#include "common/common_funcs.h"
#include "common/logging/filter.h"
#include "common/logging/log.h"
#include "common/logging/log_entry.h"
#include "common/logging/text_formatter.h"
#include "common/string_util.h"
namespace Common::Log {

View File

@@ -4,6 +4,7 @@
#pragma once
#include <cstddef>
#include <string>
namespace Common::Log {

View File

@@ -70,4 +70,4 @@ const MemoryInfo& GetMemInfo() {
return mem_info;
}
} // namespace Common
} // namespace Common

View File

@@ -6,6 +6,7 @@
#include <fmt/format.h>
#include "common/fs/file.h"
#include "common/fs/fs.h"
#include "common/fs/path_util.h"
#include "common/nvidia_flags.h"

View File

@@ -5,6 +5,7 @@
#pragma once
#include <atomic>
#include <tuple>
#include "common/common_types.h"
#include "common/virtual_buffer.h"

View File

@@ -7,6 +7,7 @@
#include <type_traits>
#include "common/assert.h"
#include "common/common_types.h"
namespace Common {
namespace detail {

View File

@@ -12,6 +12,7 @@
#include <new>
#include <type_traits>
#include <vector>
#include "common/common_types.h"
namespace Common {

View File

@@ -5,9 +5,11 @@
#include <algorithm>
#include <cctype>
#include <codecvt>
#include <cstdlib>
#include <locale>
#include <sstream>
#include "common/logging/log.h"
#include "common/string_util.h"
#ifdef _WIN32

View File

@@ -4,6 +4,7 @@
#include <algorithm>
#include <cstring>
#include "common/assert.h"
#include "common/scm_rev.h"
#include "common/telemetry.h"
@@ -54,50 +55,22 @@ void AppendBuildInfo(FieldCollection& fc) {
void AppendCPUInfo(FieldCollection& fc) {
#ifdef ARCHITECTURE_x86_64
const auto& caps = Common::GetCPUCaps();
const auto add_field = [&fc](std::string_view field_name, const auto& field_value) {
fc.AddField(FieldType::UserSystem, field_name, field_value);
};
add_field("CPU_Model", caps.cpu_string);
add_field("CPU_BrandString", caps.brand_string);
add_field("CPU_Extension_x64_SSE", caps.sse);
add_field("CPU_Extension_x64_SSE2", caps.sse2);
add_field("CPU_Extension_x64_SSE3", caps.sse3);
add_field("CPU_Extension_x64_SSSE3", caps.ssse3);
add_field("CPU_Extension_x64_SSE41", caps.sse4_1);
add_field("CPU_Extension_x64_SSE42", caps.sse4_2);
add_field("CPU_Extension_x64_AVX", caps.avx);
add_field("CPU_Extension_x64_AVX_VNNI", caps.avx_vnni);
add_field("CPU_Extension_x64_AVX2", caps.avx2);
// Skylake-X/SP level AVX512, for compatibility with the previous telemetry field
add_field("CPU_Extension_x64_AVX512",
caps.avx512f && caps.avx512cd && caps.avx512vl && caps.avx512dq && caps.avx512bw);
add_field("CPU_Extension_x64_AVX512F", caps.avx512f);
add_field("CPU_Extension_x64_AVX512CD", caps.avx512cd);
add_field("CPU_Extension_x64_AVX512VL", caps.avx512vl);
add_field("CPU_Extension_x64_AVX512DQ", caps.avx512dq);
add_field("CPU_Extension_x64_AVX512BW", caps.avx512bw);
add_field("CPU_Extension_x64_AVX512BITALG", caps.avx512bitalg);
add_field("CPU_Extension_x64_AVX512VBMI", caps.avx512vbmi);
add_field("CPU_Extension_x64_AES", caps.aes);
add_field("CPU_Extension_x64_BMI1", caps.bmi1);
add_field("CPU_Extension_x64_BMI2", caps.bmi2);
add_field("CPU_Extension_x64_F16C", caps.f16c);
add_field("CPU_Extension_x64_FMA", caps.fma);
add_field("CPU_Extension_x64_FMA4", caps.fma4);
add_field("CPU_Extension_x64_GFNI", caps.gfni);
add_field("CPU_Extension_x64_INVARIANT_TSC", caps.invariant_tsc);
add_field("CPU_Extension_x64_LZCNT", caps.lzcnt);
add_field("CPU_Extension_x64_MOVBE", caps.movbe);
add_field("CPU_Extension_x64_PCLMULQDQ", caps.pclmulqdq);
add_field("CPU_Extension_x64_POPCNT", caps.popcnt);
add_field("CPU_Extension_x64_SHA", caps.sha);
fc.AddField(FieldType::UserSystem, "CPU_Model", Common::GetCPUCaps().cpu_string);
fc.AddField(FieldType::UserSystem, "CPU_BrandString", Common::GetCPUCaps().brand_string);
fc.AddField(FieldType::UserSystem, "CPU_Extension_x64_AES", Common::GetCPUCaps().aes);
fc.AddField(FieldType::UserSystem, "CPU_Extension_x64_AVX", Common::GetCPUCaps().avx);
fc.AddField(FieldType::UserSystem, "CPU_Extension_x64_AVX2", Common::GetCPUCaps().avx2);
fc.AddField(FieldType::UserSystem, "CPU_Extension_x64_AVX512", Common::GetCPUCaps().avx512);
fc.AddField(FieldType::UserSystem, "CPU_Extension_x64_BMI1", Common::GetCPUCaps().bmi1);
fc.AddField(FieldType::UserSystem, "CPU_Extension_x64_BMI2", Common::GetCPUCaps().bmi2);
fc.AddField(FieldType::UserSystem, "CPU_Extension_x64_FMA", Common::GetCPUCaps().fma);
fc.AddField(FieldType::UserSystem, "CPU_Extension_x64_FMA4", Common::GetCPUCaps().fma4);
fc.AddField(FieldType::UserSystem, "CPU_Extension_x64_SSE", Common::GetCPUCaps().sse);
fc.AddField(FieldType::UserSystem, "CPU_Extension_x64_SSE2", Common::GetCPUCaps().sse2);
fc.AddField(FieldType::UserSystem, "CPU_Extension_x64_SSE3", Common::GetCPUCaps().sse3);
fc.AddField(FieldType::UserSystem, "CPU_Extension_x64_SSSE3", Common::GetCPUCaps().ssse3);
fc.AddField(FieldType::UserSystem, "CPU_Extension_x64_SSE41", Common::GetCPUCaps().sse4_1);
fc.AddField(FieldType::UserSystem, "CPU_Extension_x64_SSE42", Common::GetCPUCaps().sse4_2);
#else
fc.AddField(FieldType::UserSystem, "CPU_Model", "Other");
#endif

View File

@@ -55,8 +55,8 @@ class Field : public FieldInterface {
public:
YUZU_NON_COPYABLE(Field);
Field(FieldType type_, std::string_view name_, T value_)
: name(name_), type(type_), value(std::move(value_)) {}
Field(FieldType type_, std::string name_, T value_)
: name(std::move(name_)), type(type_), value(std::move(value_)) {}
~Field() override = default;
@@ -123,7 +123,7 @@ public:
* @param value Value for the field to add.
*/
template <typename T>
void AddField(FieldType type, std::string_view name, T value) {
void AddField(FieldType type, const char* name, T value) {
return AddField(std::make_unique<Field<T>>(type, name, std::move(value)));
}

View File

@@ -43,445 +43,246 @@
* The maximum height of a red-black tree is 2lg (n+1).
*/
namespace Common::freebsd {
#include "common/assert.h"
enum class RBColor {
RB_BLACK = 0,
RB_RED = 1,
namespace Common {
template <typename T>
class RBHead {
public:
[[nodiscard]] T* Root() {
return rbh_root;
}
[[nodiscard]] const T* Root() const {
return rbh_root;
}
void SetRoot(T* root) {
rbh_root = root;
}
[[nodiscard]] bool IsEmpty() const {
return Root() == nullptr;
}
private:
T* rbh_root = nullptr;
};
enum class EntryColor {
Black,
Red,
};
#pragma pack(push, 4)
template <typename T>
class RBEntry {
public:
constexpr RBEntry() = default;
[[nodiscard]] constexpr T* Left() {
return m_rbe_left;
}
[[nodiscard]] constexpr const T* Left() const {
return m_rbe_left;
[[nodiscard]] T* Left() {
return rbe_left;
}
constexpr void SetLeft(T* e) {
m_rbe_left = e;
[[nodiscard]] const T* Left() const {
return rbe_left;
}
[[nodiscard]] constexpr T* Right() {
return m_rbe_right;
}
[[nodiscard]] constexpr const T* Right() const {
return m_rbe_right;
void SetLeft(T* left) {
rbe_left = left;
}
constexpr void SetRight(T* e) {
m_rbe_right = e;
[[nodiscard]] T* Right() {
return rbe_right;
}
[[nodiscard]] constexpr T* Parent() {
return m_rbe_parent;
}
[[nodiscard]] constexpr const T* Parent() const {
return m_rbe_parent;
[[nodiscard]] const T* Right() const {
return rbe_right;
}
constexpr void SetParent(T* e) {
m_rbe_parent = e;
void SetRight(T* right) {
rbe_right = right;
}
[[nodiscard]] constexpr bool IsBlack() const {
return m_rbe_color == RBColor::RB_BLACK;
}
[[nodiscard]] constexpr bool IsRed() const {
return m_rbe_color == RBColor::RB_RED;
}
[[nodiscard]] constexpr RBColor Color() const {
return m_rbe_color;
[[nodiscard]] T* Parent() {
return rbe_parent;
}
constexpr void SetColor(RBColor c) {
m_rbe_color = c;
[[nodiscard]] const T* Parent() const {
return rbe_parent;
}
void SetParent(T* parent) {
rbe_parent = parent;
}
[[nodiscard]] bool IsBlack() const {
return rbe_color == EntryColor::Black;
}
[[nodiscard]] bool IsRed() const {
return rbe_color == EntryColor::Red;
}
[[nodiscard]] EntryColor Color() const {
return rbe_color;
}
void SetColor(EntryColor color) {
rbe_color = color;
}
private:
T* m_rbe_left{};
T* m_rbe_right{};
T* m_rbe_parent{};
RBColor m_rbe_color{RBColor::RB_BLACK};
};
#pragma pack(pop)
template <typename T>
struct CheckRBEntry {
static constexpr bool value = false;
};
template <typename T>
struct CheckRBEntry<RBEntry<T>> {
static constexpr bool value = true;
T* rbe_left = nullptr;
T* rbe_right = nullptr;
T* rbe_parent = nullptr;
EntryColor rbe_color{};
};
template <typename T>
concept IsRBEntry = CheckRBEntry<T>::value;
template <typename T>
concept HasRBEntry = requires(T& t, const T& ct) {
{ t.GetRBEntry() } -> std::same_as<RBEntry<T>&>;
{ ct.GetRBEntry() } -> std::same_as<const RBEntry<T>&>;
};
template <typename T>
requires HasRBEntry<T>
class RBHead {
private:
T* m_rbh_root = nullptr;
public:
[[nodiscard]] constexpr T* Root() {
return m_rbh_root;
}
[[nodiscard]] constexpr const T* Root() const {
return m_rbh_root;
}
constexpr void SetRoot(T* root) {
m_rbh_root = root;
}
[[nodiscard]] constexpr bool IsEmpty() const {
return this->Root() == nullptr;
}
};
template <typename T>
requires HasRBEntry<T>
[[nodiscard]] constexpr RBEntry<T>& RB_ENTRY(T* t) {
return t->GetRBEntry();
}
template <typename T>
requires HasRBEntry<T>
[[nodiscard]] constexpr const RBEntry<T>& RB_ENTRY(const T* t) {
return t->GetRBEntry();
template <typename Node>
[[nodiscard]] RBEntry<Node>& RB_ENTRY(Node* node) {
return node->GetEntry();
}
template <typename T>
requires HasRBEntry<T>
[[nodiscard]] constexpr T* RB_LEFT(T* t) {
return RB_ENTRY(t).Left();
}
template <typename T>
requires HasRBEntry<T>
[[nodiscard]] constexpr const T* RB_LEFT(const T* t) {
return RB_ENTRY(t).Left();
template <typename Node>
[[nodiscard]] const RBEntry<Node>& RB_ENTRY(const Node* node) {
return node->GetEntry();
}
template <typename T>
requires HasRBEntry<T>
[[nodiscard]] constexpr T* RB_RIGHT(T* t) {
return RB_ENTRY(t).Right();
}
template <typename T>
requires HasRBEntry<T>
[[nodiscard]] constexpr const T* RB_RIGHT(const T* t) {
return RB_ENTRY(t).Right();
template <typename Node>
[[nodiscard]] Node* RB_PARENT(Node* node) {
return RB_ENTRY(node).Parent();
}
template <typename T>
requires HasRBEntry<T>
[[nodiscard]] constexpr T* RB_PARENT(T* t) {
return RB_ENTRY(t).Parent();
}
template <typename T>
requires HasRBEntry<T>
[[nodiscard]] constexpr const T* RB_PARENT(const T* t) {
return RB_ENTRY(t).Parent();
template <typename Node>
[[nodiscard]] const Node* RB_PARENT(const Node* node) {
return RB_ENTRY(node).Parent();
}
template <typename T>
requires HasRBEntry<T>
constexpr void RB_SET_LEFT(T* t, T* e) {
RB_ENTRY(t).SetLeft(e);
}
template <typename T>
requires HasRBEntry<T>
constexpr void RB_SET_RIGHT(T* t, T* e) {
RB_ENTRY(t).SetRight(e);
}
template <typename T>
requires HasRBEntry<T>
constexpr void RB_SET_PARENT(T* t, T* e) {
RB_ENTRY(t).SetParent(e);
template <typename Node>
void RB_SET_PARENT(Node* node, Node* parent) {
return RB_ENTRY(node).SetParent(parent);
}
template <typename T>
requires HasRBEntry<T>
[[nodiscard]] constexpr bool RB_IS_BLACK(const T* t) {
return RB_ENTRY(t).IsBlack();
}
template <typename T>
requires HasRBEntry<T>
[[nodiscard]] constexpr bool RB_IS_RED(const T* t) {
return RB_ENTRY(t).IsRed();
template <typename Node>
[[nodiscard]] Node* RB_LEFT(Node* node) {
return RB_ENTRY(node).Left();
}
template <typename T>
requires HasRBEntry<T>
[[nodiscard]] constexpr RBColor RB_COLOR(const T* t) {
return RB_ENTRY(t).Color();
template <typename Node>
[[nodiscard]] const Node* RB_LEFT(const Node* node) {
return RB_ENTRY(node).Left();
}
template <typename T>
requires HasRBEntry<T>
constexpr void RB_SET_COLOR(T* t, RBColor c) {
RB_ENTRY(t).SetColor(c);
template <typename Node>
void RB_SET_LEFT(Node* node, Node* left) {
return RB_ENTRY(node).SetLeft(left);
}
template <typename T>
requires HasRBEntry<T>
constexpr void RB_SET(T* elm, T* parent) {
auto& rb_entry = RB_ENTRY(elm);
rb_entry.SetParent(parent);
rb_entry.SetLeft(nullptr);
rb_entry.SetRight(nullptr);
rb_entry.SetColor(RBColor::RB_RED);
template <typename Node>
[[nodiscard]] Node* RB_RIGHT(Node* node) {
return RB_ENTRY(node).Right();
}
template <typename T>
requires HasRBEntry<T>
constexpr void RB_SET_BLACKRED(T* black, T* red) {
RB_SET_COLOR(black, RBColor::RB_BLACK);
RB_SET_COLOR(red, RBColor::RB_RED);
template <typename Node>
[[nodiscard]] const Node* RB_RIGHT(const Node* node) {
return RB_ENTRY(node).Right();
}
template <typename T>
requires HasRBEntry<T>
constexpr void RB_ROTATE_LEFT(RBHead<T>& head, T* elm, T*& tmp) {
template <typename Node>
void RB_SET_RIGHT(Node* node, Node* right) {
return RB_ENTRY(node).SetRight(right);
}
template <typename Node>
[[nodiscard]] bool RB_IS_BLACK(const Node* node) {
return RB_ENTRY(node).IsBlack();
}
template <typename Node>
[[nodiscard]] bool RB_IS_RED(const Node* node) {
return RB_ENTRY(node).IsRed();
}
template <typename Node>
[[nodiscard]] EntryColor RB_COLOR(const Node* node) {
return RB_ENTRY(node).Color();
}
template <typename Node>
void RB_SET_COLOR(Node* node, EntryColor color) {
return RB_ENTRY(node).SetColor(color);
}
template <typename Node>
void RB_SET(Node* node, Node* parent) {
auto& entry = RB_ENTRY(node);
entry.SetParent(parent);
entry.SetLeft(nullptr);
entry.SetRight(nullptr);
entry.SetColor(EntryColor::Red);
}
template <typename Node>
void RB_SET_BLACKRED(Node* black, Node* red) {
RB_SET_COLOR(black, EntryColor::Black);
RB_SET_COLOR(red, EntryColor::Red);
}
template <typename Node>
void RB_ROTATE_LEFT(RBHead<Node>* head, Node* elm, Node*& tmp) {
tmp = RB_RIGHT(elm);
if (RB_SET_RIGHT(elm, RB_LEFT(tmp)); RB_RIGHT(elm) != nullptr) {
RB_SET_RIGHT(elm, RB_LEFT(tmp));
if (RB_RIGHT(elm) != nullptr) {
RB_SET_PARENT(RB_LEFT(tmp), elm);
}
if (RB_SET_PARENT(tmp, RB_PARENT(elm)); RB_PARENT(tmp) != nullptr) {
RB_SET_PARENT(tmp, RB_PARENT(elm));
if (RB_PARENT(tmp) != nullptr) {
if (elm == RB_LEFT(RB_PARENT(elm))) {
RB_SET_LEFT(RB_PARENT(elm), tmp);
} else {
RB_SET_RIGHT(RB_PARENT(elm), tmp);
}
} else {
head.SetRoot(tmp);
head->SetRoot(tmp);
}
RB_SET_LEFT(tmp, elm);
RB_SET_PARENT(elm, tmp);
}
template <typename T>
requires HasRBEntry<T>
constexpr void RB_ROTATE_RIGHT(RBHead<T>& head, T* elm, T*& tmp) {
template <typename Node>
void RB_ROTATE_RIGHT(RBHead<Node>* head, Node* elm, Node*& tmp) {
tmp = RB_LEFT(elm);
if (RB_SET_LEFT(elm, RB_RIGHT(tmp)); RB_LEFT(elm) != nullptr) {
RB_SET_LEFT(elm, RB_RIGHT(tmp));
if (RB_LEFT(elm) != nullptr) {
RB_SET_PARENT(RB_RIGHT(tmp), elm);
}
if (RB_SET_PARENT(tmp, RB_PARENT(elm)); RB_PARENT(tmp) != nullptr) {
RB_SET_PARENT(tmp, RB_PARENT(elm));
if (RB_PARENT(tmp) != nullptr) {
if (elm == RB_LEFT(RB_PARENT(elm))) {
RB_SET_LEFT(RB_PARENT(elm), tmp);
} else {
RB_SET_RIGHT(RB_PARENT(elm), tmp);
}
} else {
head.SetRoot(tmp);
head->SetRoot(tmp);
}
RB_SET_RIGHT(tmp, elm);
RB_SET_PARENT(elm, tmp);
}
template <typename T>
requires HasRBEntry<T>
constexpr void RB_REMOVE_COLOR(RBHead<T>& head, T* parent, T* elm) {
T* tmp;
while ((elm == nullptr || RB_IS_BLACK(elm)) && elm != head.Root()) {
if (RB_LEFT(parent) == elm) {
tmp = RB_RIGHT(parent);
if (RB_IS_RED(tmp)) {
RB_SET_BLACKRED(tmp, parent);
RB_ROTATE_LEFT(head, parent, tmp);
tmp = RB_RIGHT(parent);
}
template <typename Node>
void RB_INSERT_COLOR(RBHead<Node>* head, Node* elm) {
Node* parent = nullptr;
Node* tmp = nullptr;
if ((RB_LEFT(tmp) == nullptr || RB_IS_BLACK(RB_LEFT(tmp))) &&
(RB_RIGHT(tmp) == nullptr || RB_IS_BLACK(RB_RIGHT(tmp)))) {
RB_SET_COLOR(tmp, RBColor::RB_RED);
elm = parent;
parent = RB_PARENT(elm);
} else {
if (RB_RIGHT(tmp) == nullptr || RB_IS_BLACK(RB_RIGHT(tmp))) {
T* oleft;
if ((oleft = RB_LEFT(tmp)) != nullptr) {
RB_SET_COLOR(oleft, RBColor::RB_BLACK);
}
RB_SET_COLOR(tmp, RBColor::RB_RED);
RB_ROTATE_RIGHT(head, tmp, oleft);
tmp = RB_RIGHT(parent);
}
RB_SET_COLOR(tmp, RB_COLOR(parent));
RB_SET_COLOR(parent, RBColor::RB_BLACK);
if (RB_RIGHT(tmp)) {
RB_SET_COLOR(RB_RIGHT(tmp), RBColor::RB_BLACK);
}
RB_ROTATE_LEFT(head, parent, tmp);
elm = head.Root();
break;
}
} else {
tmp = RB_LEFT(parent);
if (RB_IS_RED(tmp)) {
RB_SET_BLACKRED(tmp, parent);
RB_ROTATE_RIGHT(head, parent, tmp);
tmp = RB_LEFT(parent);
}
if ((RB_LEFT(tmp) == nullptr || RB_IS_BLACK(RB_LEFT(tmp))) &&
(RB_RIGHT(tmp) == nullptr || RB_IS_BLACK(RB_RIGHT(tmp)))) {
RB_SET_COLOR(tmp, RBColor::RB_RED);
elm = parent;
parent = RB_PARENT(elm);
} else {
if (RB_LEFT(tmp) == nullptr || RB_IS_BLACK(RB_LEFT(tmp))) {
T* oright;
if ((oright = RB_RIGHT(tmp)) != nullptr) {
RB_SET_COLOR(oright, RBColor::RB_BLACK);
}
RB_SET_COLOR(tmp, RBColor::RB_RED);
RB_ROTATE_LEFT(head, tmp, oright);
tmp = RB_LEFT(parent);
}
RB_SET_COLOR(tmp, RB_COLOR(parent));
RB_SET_COLOR(parent, RBColor::RB_BLACK);
if (RB_LEFT(tmp)) {
RB_SET_COLOR(RB_LEFT(tmp), RBColor::RB_BLACK);
}
RB_ROTATE_RIGHT(head, parent, tmp);
elm = head.Root();
break;
}
}
}
if (elm) {
RB_SET_COLOR(elm, RBColor::RB_BLACK);
}
}
template <typename T>
requires HasRBEntry<T>
constexpr T* RB_REMOVE(RBHead<T>& head, T* elm) {
T* child = nullptr;
T* parent = nullptr;
T* old = elm;
RBColor color = RBColor::RB_BLACK;
if (RB_LEFT(elm) == nullptr) {
child = RB_RIGHT(elm);
} else if (RB_RIGHT(elm) == nullptr) {
child = RB_LEFT(elm);
} else {
T* left;
elm = RB_RIGHT(elm);
while ((left = RB_LEFT(elm)) != nullptr) {
elm = left;
}
child = RB_RIGHT(elm);
parent = RB_PARENT(elm);
color = RB_COLOR(elm);
if (child) {
RB_SET_PARENT(child, parent);
}
if (parent) {
if (RB_LEFT(parent) == elm) {
RB_SET_LEFT(parent, child);
} else {
RB_SET_RIGHT(parent, child);
}
} else {
head.SetRoot(child);
}
if (RB_PARENT(elm) == old) {
parent = elm;
}
elm->SetRBEntry(old->GetRBEntry());
if (RB_PARENT(old)) {
if (RB_LEFT(RB_PARENT(old)) == old) {
RB_SET_LEFT(RB_PARENT(old), elm);
} else {
RB_SET_RIGHT(RB_PARENT(old), elm);
}
} else {
head.SetRoot(elm);
}
RB_SET_PARENT(RB_LEFT(old), elm);
if (RB_RIGHT(old)) {
RB_SET_PARENT(RB_RIGHT(old), elm);
}
if (parent) {
left = parent;
}
if (color == RBColor::RB_BLACK) {
RB_REMOVE_COLOR(head, parent, child);
}
return old;
}
parent = RB_PARENT(elm);
color = RB_COLOR(elm);
if (child) {
RB_SET_PARENT(child, parent);
}
if (parent) {
if (RB_LEFT(parent) == elm) {
RB_SET_LEFT(parent, child);
} else {
RB_SET_RIGHT(parent, child);
}
} else {
head.SetRoot(child);
}
if (color == RBColor::RB_BLACK) {
RB_REMOVE_COLOR(head, parent, child);
}
return old;
}
template <typename T>
requires HasRBEntry<T>
constexpr void RB_INSERT_COLOR(RBHead<T>& head, T* elm) {
T *parent = nullptr, *tmp = nullptr;
while ((parent = RB_PARENT(elm)) != nullptr && RB_IS_RED(parent)) {
T* gparent = RB_PARENT(parent);
Node* gparent = RB_PARENT(parent);
if (parent == RB_LEFT(gparent)) {
tmp = RB_RIGHT(gparent);
if (tmp && RB_IS_RED(tmp)) {
RB_SET_COLOR(tmp, RBColor::RB_BLACK);
RB_SET_COLOR(tmp, EntryColor::Black);
RB_SET_BLACKRED(parent, gparent);
elm = gparent;
continue;
@@ -499,7 +300,7 @@ constexpr void RB_INSERT_COLOR(RBHead<T>& head, T* elm) {
} else {
tmp = RB_LEFT(gparent);
if (tmp && RB_IS_RED(tmp)) {
RB_SET_COLOR(tmp, RBColor::RB_BLACK);
RB_SET_COLOR(tmp, EntryColor::Black);
RB_SET_BLACKRED(parent, gparent);
elm = gparent;
continue;
@@ -517,14 +318,194 @@ constexpr void RB_INSERT_COLOR(RBHead<T>& head, T* elm) {
}
}
RB_SET_COLOR(head.Root(), RBColor::RB_BLACK);
RB_SET_COLOR(head->Root(), EntryColor::Black);
}
template <typename T, typename Compare>
requires HasRBEntry<T>
constexpr T* RB_INSERT(RBHead<T>& head, T* elm, Compare cmp) {
T* parent = nullptr;
T* tmp = head.Root();
template <typename Node>
void RB_REMOVE_COLOR(RBHead<Node>* head, Node* parent, Node* elm) {
Node* tmp;
while ((elm == nullptr || RB_IS_BLACK(elm)) && elm != head->Root() && parent != nullptr) {
if (RB_LEFT(parent) == elm) {
tmp = RB_RIGHT(parent);
if (!tmp) {
ASSERT_MSG(false, "tmp is invalid!");
break;
}
if (RB_IS_RED(tmp)) {
RB_SET_BLACKRED(tmp, parent);
RB_ROTATE_LEFT(head, parent, tmp);
tmp = RB_RIGHT(parent);
}
if ((RB_LEFT(tmp) == nullptr || RB_IS_BLACK(RB_LEFT(tmp))) &&
(RB_RIGHT(tmp) == nullptr || RB_IS_BLACK(RB_RIGHT(tmp)))) {
RB_SET_COLOR(tmp, EntryColor::Red);
elm = parent;
parent = RB_PARENT(elm);
} else {
if (RB_RIGHT(tmp) == nullptr || RB_IS_BLACK(RB_RIGHT(tmp))) {
Node* oleft;
if ((oleft = RB_LEFT(tmp)) != nullptr) {
RB_SET_COLOR(oleft, EntryColor::Black);
}
RB_SET_COLOR(tmp, EntryColor::Red);
RB_ROTATE_RIGHT(head, tmp, oleft);
tmp = RB_RIGHT(parent);
}
RB_SET_COLOR(tmp, RB_COLOR(parent));
RB_SET_COLOR(parent, EntryColor::Black);
if (RB_RIGHT(tmp)) {
RB_SET_COLOR(RB_RIGHT(tmp), EntryColor::Black);
}
RB_ROTATE_LEFT(head, parent, tmp);
elm = head->Root();
break;
}
} else {
tmp = RB_LEFT(parent);
if (RB_IS_RED(tmp)) {
RB_SET_BLACKRED(tmp, parent);
RB_ROTATE_RIGHT(head, parent, tmp);
tmp = RB_LEFT(parent);
}
if (!tmp) {
ASSERT_MSG(false, "tmp is invalid!");
break;
}
if ((RB_LEFT(tmp) == nullptr || RB_IS_BLACK(RB_LEFT(tmp))) &&
(RB_RIGHT(tmp) == nullptr || RB_IS_BLACK(RB_RIGHT(tmp)))) {
RB_SET_COLOR(tmp, EntryColor::Red);
elm = parent;
parent = RB_PARENT(elm);
} else {
if (RB_LEFT(tmp) == nullptr || RB_IS_BLACK(RB_LEFT(tmp))) {
Node* oright;
if ((oright = RB_RIGHT(tmp)) != nullptr) {
RB_SET_COLOR(oright, EntryColor::Black);
}
RB_SET_COLOR(tmp, EntryColor::Red);
RB_ROTATE_LEFT(head, tmp, oright);
tmp = RB_LEFT(parent);
}
RB_SET_COLOR(tmp, RB_COLOR(parent));
RB_SET_COLOR(parent, EntryColor::Black);
if (RB_LEFT(tmp)) {
RB_SET_COLOR(RB_LEFT(tmp), EntryColor::Black);
}
RB_ROTATE_RIGHT(head, parent, tmp);
elm = head->Root();
break;
}
}
}
if (elm) {
RB_SET_COLOR(elm, EntryColor::Black);
}
}
template <typename Node>
Node* RB_REMOVE(RBHead<Node>* head, Node* elm) {
Node* child = nullptr;
Node* parent = nullptr;
Node* old = elm;
EntryColor color{};
const auto finalize = [&] {
if (color == EntryColor::Black) {
RB_REMOVE_COLOR(head, parent, child);
}
return old;
};
if (RB_LEFT(elm) == nullptr) {
child = RB_RIGHT(elm);
} else if (RB_RIGHT(elm) == nullptr) {
child = RB_LEFT(elm);
} else {
Node* left;
elm = RB_RIGHT(elm);
while ((left = RB_LEFT(elm)) != nullptr) {
elm = left;
}
child = RB_RIGHT(elm);
parent = RB_PARENT(elm);
color = RB_COLOR(elm);
if (child) {
RB_SET_PARENT(child, parent);
}
if (parent) {
if (RB_LEFT(parent) == elm) {
RB_SET_LEFT(parent, child);
} else {
RB_SET_RIGHT(parent, child);
}
} else {
head->SetRoot(child);
}
if (RB_PARENT(elm) == old) {
parent = elm;
}
elm->SetEntry(old->GetEntry());
if (RB_PARENT(old)) {
if (RB_LEFT(RB_PARENT(old)) == old) {
RB_SET_LEFT(RB_PARENT(old), elm);
} else {
RB_SET_RIGHT(RB_PARENT(old), elm);
}
} else {
head->SetRoot(elm);
}
RB_SET_PARENT(RB_LEFT(old), elm);
if (RB_RIGHT(old)) {
RB_SET_PARENT(RB_RIGHT(old), elm);
}
if (parent) {
left = parent;
}
return finalize();
}
parent = RB_PARENT(elm);
color = RB_COLOR(elm);
if (child) {
RB_SET_PARENT(child, parent);
}
if (parent) {
if (RB_LEFT(parent) == elm) {
RB_SET_LEFT(parent, child);
} else {
RB_SET_RIGHT(parent, child);
}
} else {
head->SetRoot(child);
}
return finalize();
}
// Inserts a node into the RB tree
template <typename Node, typename CompareFunction>
Node* RB_INSERT(RBHead<Node>* head, Node* elm, CompareFunction cmp) {
Node* parent = nullptr;
Node* tmp = head->Root();
int comp = 0;
while (tmp) {
@@ -548,17 +529,17 @@ constexpr T* RB_INSERT(RBHead<T>& head, T* elm, Compare cmp) {
RB_SET_RIGHT(parent, elm);
}
} else {
head.SetRoot(elm);
head->SetRoot(elm);
}
RB_INSERT_COLOR(head, elm);
return nullptr;
}
template <typename T, typename Compare>
requires HasRBEntry<T>
constexpr T* RB_FIND(RBHead<T>& head, T* elm, Compare cmp) {
T* tmp = head.Root();
// Finds the node with the same key as elm
template <typename Node, typename CompareFunction>
Node* RB_FIND(RBHead<Node>* head, Node* elm, CompareFunction cmp) {
Node* tmp = head->Root();
while (tmp) {
const int comp = cmp(elm, tmp);
@@ -574,11 +555,11 @@ constexpr T* RB_FIND(RBHead<T>& head, T* elm, Compare cmp) {
return nullptr;
}
template <typename T, typename Compare>
requires HasRBEntry<T>
constexpr T* RB_NFIND(RBHead<T>& head, T* elm, Compare cmp) {
T* tmp = head.Root();
T* res = nullptr;
// Finds the first node greater than or equal to the search key
template <typename Node, typename CompareFunction>
Node* RB_NFIND(RBHead<Node>* head, Node* elm, CompareFunction cmp) {
Node* tmp = head->Root();
Node* res = nullptr;
while (tmp) {
const int comp = cmp(elm, tmp);
@@ -595,13 +576,13 @@ constexpr T* RB_NFIND(RBHead<T>& head, T* elm, Compare cmp) {
return res;
}
template <typename T, typename U, typename Compare>
requires HasRBEntry<T>
constexpr T* RB_FIND_KEY(RBHead<T>& head, const U& key, Compare cmp) {
T* tmp = head.Root();
// Finds the node with the same key as lelm
template <typename Node, typename CompareFunction>
Node* RB_FIND_LIGHT(RBHead<Node>* head, const void* lelm, CompareFunction lcmp) {
Node* tmp = head->Root();
while (tmp) {
const int comp = cmp(key, tmp);
const int comp = lcmp(lelm, tmp);
if (comp < 0) {
tmp = RB_LEFT(tmp);
} else if (comp > 0) {
@@ -614,14 +595,14 @@ constexpr T* RB_FIND_KEY(RBHead<T>& head, const U& key, Compare cmp) {
return nullptr;
}
template <typename T, typename U, typename Compare>
requires HasRBEntry<T>
constexpr T* RB_NFIND_KEY(RBHead<T>& head, const U& key, Compare cmp) {
T* tmp = head.Root();
T* res = nullptr;
// Finds the first node greater than or equal to the search key
template <typename Node, typename CompareFunction>
Node* RB_NFIND_LIGHT(RBHead<Node>* head, const void* lelm, CompareFunction lcmp) {
Node* tmp = head->Root();
Node* res = nullptr;
while (tmp) {
const int comp = cmp(key, tmp);
const int comp = lcmp(lelm, tmp);
if (comp < 0) {
res = tmp;
tmp = RB_LEFT(tmp);
@@ -635,43 +616,8 @@ constexpr T* RB_NFIND_KEY(RBHead<T>& head, const U& key, Compare cmp) {
return res;
}
template <typename T, typename Compare>
requires HasRBEntry<T>
constexpr T* RB_FIND_EXISTING(RBHead<T>& head, T* elm, Compare cmp) {
T* tmp = head.Root();
while (true) {
const int comp = cmp(elm, tmp);
if (comp < 0) {
tmp = RB_LEFT(tmp);
} else if (comp > 0) {
tmp = RB_RIGHT(tmp);
} else {
return tmp;
}
}
}
template <typename T, typename U, typename Compare>
requires HasRBEntry<T>
constexpr T* RB_FIND_EXISTING_KEY(RBHead<T>& head, const U& key, Compare cmp) {
T* tmp = head.Root();
while (true) {
const int comp = cmp(key, tmp);
if (comp < 0) {
tmp = RB_LEFT(tmp);
} else if (comp > 0) {
tmp = RB_RIGHT(tmp);
} else {
return tmp;
}
}
}
template <typename T>
requires HasRBEntry<T>
constexpr T* RB_NEXT(T* elm) {
template <typename Node>
Node* RB_NEXT(Node* elm) {
if (RB_RIGHT(elm)) {
elm = RB_RIGHT(elm);
while (RB_LEFT(elm)) {
@@ -690,9 +636,8 @@ constexpr T* RB_NEXT(T* elm) {
return elm;
}
template <typename T>
requires HasRBEntry<T>
constexpr T* RB_PREV(T* elm) {
template <typename Node>
Node* RB_PREV(Node* elm) {
if (RB_LEFT(elm)) {
elm = RB_LEFT(elm);
while (RB_RIGHT(elm)) {
@@ -711,32 +656,30 @@ constexpr T* RB_PREV(T* elm) {
return elm;
}
template <typename T>
requires HasRBEntry<T>
constexpr T* RB_MIN(RBHead<T>& head) {
T* tmp = head.Root();
T* parent = nullptr;
template <typename Node>
Node* RB_MINMAX(RBHead<Node>* head, bool is_min) {
Node* tmp = head->Root();
Node* parent = nullptr;
while (tmp) {
parent = tmp;
tmp = RB_LEFT(tmp);
if (is_min) {
tmp = RB_LEFT(tmp);
} else {
tmp = RB_RIGHT(tmp);
}
}
return parent;
}
template <typename T>
requires HasRBEntry<T>
constexpr T* RB_MAX(RBHead<T>& head) {
T* tmp = head.Root();
T* parent = nullptr;
while (tmp) {
parent = tmp;
tmp = RB_RIGHT(tmp);
}
return parent;
template <typename Node>
Node* RB_MIN(RBHead<Node>* head) {
return RB_MINMAX(head, true);
}
} // namespace Common::freebsd
template <typename Node>
Node* RB_MAX(RBHead<Node>* head) {
return RB_MINMAX(head, false);
}
} // namespace Common

View File

@@ -4,6 +4,7 @@
#pragma once
#include <cstring>
#include <utility>
#ifdef _MSC_VER
@@ -12,7 +13,6 @@
#pragma intrinsic(_umul128)
#pragma intrinsic(_udiv128)
#else
#include <cstring>
#include <x86intrin.h>
#endif

View File

@@ -7,6 +7,7 @@
#include <array>
#include <functional>
#include <string>
#include <string_view>
#include "common/common_types.h"

View File

@@ -4,6 +4,7 @@
#pragma once
#include <type_traits>
#include <utility>
namespace Common {

View File

@@ -2,6 +2,8 @@
// Licensed under GPLv2 or any later version
// Refer to the license.txt file included.
#include <cstdint>
#include "common/uint128.h"
#include "common/wall_clock.h"

View File

@@ -1,11 +1,8 @@
// Copyright 2013 Dolphin Emulator Project / 2015 Citra Emulator Project / 2022 Yuzu Emulator
// Project Licensed under GPLv2 or any later version Refer to the license.txt file included.
// Copyright 2013 Dolphin Emulator Project / 2015 Citra Emulator Project
// Licensed under GPLv2 or any later version
// Refer to the license.txt file included.
#include <array>
#include <cstring>
#include <iterator>
#include <string_view>
#include "common/bit_util.h"
#include "common/common_types.h"
#include "common/x64/cpu_detect.h"
@@ -20,7 +17,7 @@
// clang-format on
#endif
static inline void __cpuidex(int info[4], u32 function_id, u32 subfunction_id) {
static inline void __cpuidex(int info[4], int function_id, int subfunction_id) {
#if defined(__DragonFly__) || defined(__FreeBSD__)
// Despite the name, this is just do_cpuid() with ECX as second input.
cpuid_count((u_int)function_id, (u_int)subfunction_id, (u_int*)info);
@@ -33,7 +30,7 @@ static inline void __cpuidex(int info[4], u32 function_id, u32 subfunction_id) {
#endif
}
static inline void __cpuid(int info[4], u32 function_id) {
static inline void __cpuid(int info[4], int function_id) {
return __cpuidex(info, function_id, 0);
}
@@ -48,17 +45,6 @@ static inline u64 _xgetbv(u32 index) {
namespace Common {
CPUCaps::Manufacturer CPUCaps::ParseManufacturer(std::string_view brand_string) {
if (brand_string == "GenuineIntel") {
return Manufacturer::Intel;
} else if (brand_string == "AuthenticAMD") {
return Manufacturer::AMD;
} else if (brand_string == "HygonGenuine") {
return Manufacturer::Hygon;
}
return Manufacturer::Unknown;
}
// Detects the various CPU features
static CPUCaps Detect() {
CPUCaps caps = {};
@@ -67,74 +53,75 @@ static CPUCaps Detect() {
// yuzu at all anyway
int cpu_id[4];
memset(caps.brand_string, 0, sizeof(caps.brand_string));
// Detect CPU's CPUID capabilities and grab manufacturer string
// Detect CPU's CPUID capabilities and grab CPU string
__cpuid(cpu_id, 0x00000000);
const u32 max_std_fn = cpu_id[0]; // EAX
u32 max_std_fn = cpu_id[0]; // EAX
std::memset(caps.brand_string, 0, std::size(caps.brand_string));
std::memcpy(&caps.brand_string[0], &cpu_id[1], sizeof(u32));
std::memcpy(&caps.brand_string[4], &cpu_id[3], sizeof(u32));
std::memcpy(&caps.brand_string[8], &cpu_id[2], sizeof(u32));
caps.manufacturer = CPUCaps::ParseManufacturer(caps.brand_string);
// Set reasonable default cpu string even if brand string not available
std::strncpy(caps.cpu_string, caps.brand_string, std::size(caps.brand_string));
std::memcpy(&caps.brand_string[0], &cpu_id[1], sizeof(int));
std::memcpy(&caps.brand_string[4], &cpu_id[3], sizeof(int));
std::memcpy(&caps.brand_string[8], &cpu_id[2], sizeof(int));
if (cpu_id[1] == 0x756e6547 && cpu_id[2] == 0x6c65746e && cpu_id[3] == 0x49656e69)
caps.manufacturer = Manufacturer::Intel;
else if (cpu_id[1] == 0x68747541 && cpu_id[2] == 0x444d4163 && cpu_id[3] == 0x69746e65)
caps.manufacturer = Manufacturer::AMD;
else if (cpu_id[1] == 0x6f677948 && cpu_id[2] == 0x656e6975 && cpu_id[3] == 0x6e65476e)
caps.manufacturer = Manufacturer::Hygon;
else
caps.manufacturer = Manufacturer::Unknown;
__cpuid(cpu_id, 0x80000000);
const u32 max_ex_fn = cpu_id[0];
u32 max_ex_fn = cpu_id[0];
// Set reasonable default brand string even if brand string not available
strcpy(caps.cpu_string, caps.brand_string);
// Detect family and other miscellaneous features
if (max_std_fn >= 1) {
__cpuid(cpu_id, 0x00000001);
caps.sse = Common::Bit<25>(cpu_id[3]);
caps.sse2 = Common::Bit<26>(cpu_id[3]);
caps.sse3 = Common::Bit<0>(cpu_id[2]);
caps.pclmulqdq = Common::Bit<1>(cpu_id[2]);
caps.ssse3 = Common::Bit<9>(cpu_id[2]);
caps.sse4_1 = Common::Bit<19>(cpu_id[2]);
caps.sse4_2 = Common::Bit<20>(cpu_id[2]);
caps.movbe = Common::Bit<22>(cpu_id[2]);
caps.popcnt = Common::Bit<23>(cpu_id[2]);
caps.aes = Common::Bit<25>(cpu_id[2]);
caps.f16c = Common::Bit<29>(cpu_id[2]);
if ((cpu_id[3] >> 25) & 1)
caps.sse = true;
if ((cpu_id[3] >> 26) & 1)
caps.sse2 = true;
if ((cpu_id[2]) & 1)
caps.sse3 = true;
if ((cpu_id[2] >> 9) & 1)
caps.ssse3 = true;
if ((cpu_id[2] >> 19) & 1)
caps.sse4_1 = true;
if ((cpu_id[2] >> 20) & 1)
caps.sse4_2 = true;
if ((cpu_id[2] >> 25) & 1)
caps.aes = true;
// AVX support requires 3 separate checks:
// - Is the AVX bit set in CPUID?
// - Is the XSAVE bit set in CPUID?
// - XGETBV result has the XCR bit set.
if (Common::Bit<28>(cpu_id[2]) && Common::Bit<27>(cpu_id[2])) {
if (((cpu_id[2] >> 28) & 1) && ((cpu_id[2] >> 27) & 1)) {
if ((_xgetbv(_XCR_XFEATURE_ENABLED_MASK) & 0x6) == 0x6) {
caps.avx = true;
if (Common::Bit<12>(cpu_id[2]))
if ((cpu_id[2] >> 12) & 1)
caps.fma = true;
}
}
if (max_std_fn >= 7) {
__cpuidex(cpu_id, 0x00000007, 0x00000000);
// Can't enable AVX{2,512} unless the XSAVE/XGETBV checks above passed
if (caps.avx) {
caps.avx2 = Common::Bit<5>(cpu_id[1]);
caps.avx512f = Common::Bit<16>(cpu_id[1]);
caps.avx512dq = Common::Bit<17>(cpu_id[1]);
caps.avx512cd = Common::Bit<28>(cpu_id[1]);
caps.avx512bw = Common::Bit<30>(cpu_id[1]);
caps.avx512vl = Common::Bit<31>(cpu_id[1]);
caps.avx512vbmi = Common::Bit<1>(cpu_id[2]);
caps.avx512bitalg = Common::Bit<12>(cpu_id[2]);
// Can't enable AVX2 unless the XSAVE/XGETBV checks above passed
if ((cpu_id[1] >> 5) & 1)
caps.avx2 = caps.avx;
if ((cpu_id[1] >> 3) & 1)
caps.bmi1 = true;
if ((cpu_id[1] >> 8) & 1)
caps.bmi2 = true;
// Checks for AVX512F, AVX512CD, AVX512VL, AVX512DQ, AVX512BW (Intel Skylake-X/SP)
if ((cpu_id[1] >> 16) & 1 && (cpu_id[1] >> 28) & 1 && (cpu_id[1] >> 31) & 1 &&
(cpu_id[1] >> 17) & 1 && (cpu_id[1] >> 30) & 1) {
caps.avx512 = caps.avx2;
}
caps.bmi1 = Common::Bit<3>(cpu_id[1]);
caps.bmi2 = Common::Bit<8>(cpu_id[1]);
caps.sha = Common::Bit<29>(cpu_id[1]);
caps.gfni = Common::Bit<8>(cpu_id[2]);
__cpuidex(cpu_id, 0x00000007, 0x00000001);
caps.avx_vnni = caps.avx && Common::Bit<4>(cpu_id[0]);
}
}
@@ -151,13 +138,15 @@ static CPUCaps Detect() {
if (max_ex_fn >= 0x80000001) {
// Check for more features
__cpuid(cpu_id, 0x80000001);
caps.lzcnt = Common::Bit<5>(cpu_id[2]);
caps.fma4 = Common::Bit<16>(cpu_id[2]);
if ((cpu_id[2] >> 16) & 1)
caps.fma4 = true;
}
if (max_ex_fn >= 0x80000007) {
__cpuid(cpu_id, 0x80000007);
caps.invariant_tsc = Common::Bit<8>(cpu_id[3]);
if (cpu_id[3] & (1 << 8)) {
caps.invariant_tsc = true;
}
}
if (max_std_fn >= 0x16) {

View File

@@ -1,65 +1,42 @@
// Copyright 2013 Dolphin Emulator Project / 2015 Citra Emulator Project / 2022 Yuzu Emulator
// Project Project Licensed under GPLv2 or any later version Refer to the license.txt file included.
// Copyright 2013 Dolphin Emulator Project / 2015 Citra Emulator Project
// Licensed under GPLv2 or any later version
// Refer to the license.txt file included.
#pragma once
#include <string_view>
#include "common/common_types.h"
namespace Common {
enum class Manufacturer : u32 {
Intel = 0,
AMD = 1,
Hygon = 2,
Unknown = 3,
};
/// x86/x64 CPU capabilities that may be detected by this module
struct CPUCaps {
enum class Manufacturer : u8 {
Unknown = 0,
Intel = 1,
AMD = 2,
Hygon = 3,
};
static Manufacturer ParseManufacturer(std::string_view brand_string);
Manufacturer manufacturer;
char brand_string[13];
char cpu_string[48];
char cpu_string[0x21];
char brand_string[0x41];
bool sse;
bool sse2;
bool sse3;
bool ssse3;
bool sse4_1;
bool sse4_2;
bool lzcnt;
bool avx;
bool avx2;
bool avx512;
bool bmi1;
bool bmi2;
bool fma;
bool fma4;
bool aes;
bool invariant_tsc;
u32 base_frequency;
u32 max_frequency;
u32 bus_frequency;
bool sse : 1;
bool sse2 : 1;
bool sse3 : 1;
bool ssse3 : 1;
bool sse4_1 : 1;
bool sse4_2 : 1;
bool avx : 1;
bool avx_vnni : 1;
bool avx2 : 1;
bool avx512f : 1;
bool avx512dq : 1;
bool avx512cd : 1;
bool avx512bw : 1;
bool avx512vl : 1;
bool avx512vbmi : 1;
bool avx512bitalg : 1;
bool aes : 1;
bool bmi1 : 1;
bool bmi2 : 1;
bool f16c : 1;
bool fma : 1;
bool fma4 : 1;
bool gfni : 1;
bool invariant_tsc : 1;
bool lzcnt : 1;
bool movbe : 1;
bool pclmulqdq : 1;
bool popcnt : 1;
bool sha : 1;
};
/**

View File

@@ -4,6 +4,8 @@
#include <array>
#include <chrono>
#include <limits>
#include <mutex>
#include <thread>
#include "common/atomic_ops.h"

View File

@@ -4,6 +4,8 @@
#pragma once
#include <optional>
#include "common/wall_clock.h"
namespace Common {

View File

@@ -122,8 +122,6 @@ add_library(core STATIC
frontend/applets/error.h
frontend/applets/general_frontend.cpp
frontend/applets/general_frontend.h
frontend/applets/mii_edit.cpp
frontend/applets/mii_edit.h
frontend/applets/profile_select.cpp
frontend/applets/profile_select.h
frontend/applets/software_keyboard.cpp
@@ -209,8 +207,6 @@ add_library(core STATIC
hle/kernel/k_memory_region.h
hle/kernel/k_memory_region_type.h
hle/kernel/k_page_bitmap.h
hle/kernel/k_page_buffer.cpp
hle/kernel/k_page_buffer.h
hle/kernel/k_page_heap.cpp
hle/kernel/k_page_heap.h
hle/kernel/k_page_linked_list.h
@@ -248,8 +244,6 @@ add_library(core STATIC
hle/kernel/k_system_control.h
hle/kernel/k_thread.cpp
hle/kernel/k_thread.h
hle/kernel/k_thread_local_page.cpp
hle/kernel/k_thread_local_page.h
hle/kernel/k_thread_queue.cpp
hle/kernel/k_thread_queue.h
hle/kernel/k_trace.h
@@ -306,9 +300,6 @@ add_library(core STATIC
hle/service/am/applets/applet_error.h
hle/service/am/applets/applet_general_backend.cpp
hle/service/am/applets/applet_general_backend.h
hle/service/am/applets/applet_mii_edit.cpp
hle/service/am/applets/applet_mii_edit.h
hle/service/am/applets/applet_mii_edit_types.h
hle/service/am/applets/applet_profile_select.cpp
hle/service/am/applets/applet_profile_select.h
hle/service/am/applets/applet_software_keyboard.cpp

View File

@@ -4,6 +4,8 @@
#pragma once
#include <unordered_map>
#include <dynarmic/interface/exclusive_monitor.h>
#include "common/common_types.h"

View File

@@ -38,6 +38,7 @@
#include "core/hle/service/apm/apm_controller.h"
#include "core/hle/service/filesystem/filesystem.h"
#include "core/hle/service/glue/glue_manager.h"
#include "core/hle/service/hid/hid.h"
#include "core/hle/service/service.h"
#include "core/hle/service/sm/sm.h"
#include "core/hle/service/time/time_manager.h"
@@ -325,9 +326,7 @@ struct System::Impl {
is_powered_on = false;
exit_lock = false;
if (gpu_core != nullptr) {
gpu_core->NotifyShutdown();
}
gpu_core->NotifyShutdown();
services.reset();
service_manager.reset();

View File

@@ -5,6 +5,7 @@
#pragma once
#include <cstddef>
#include <iterator>
#include "common/common_funcs.h"
#include "common/common_types.h"

View File

@@ -10,10 +10,7 @@
#include "common/hex_util.h"
#include "common/logging/log.h"
#include "common/settings.h"
#ifndef _WIN32
#include "common/string_util.h"
#endif
#include "core/core.h"
#include "core/file_sys/common_funcs.h"
#include "core/file_sys/content_archive.h"

View File

@@ -6,9 +6,7 @@
#include <array>
#include <vector>
#include "common/bit_field.h"
#include "common/common_funcs.h"
#include "common/common_types.h"
#include "common/swap.h"
#include "core/file_sys/vfs_types.h"

View File

@@ -1,18 +0,0 @@
// Copyright 2022 yuzu Emulator Project
// Licensed under GPLv2 or any later version
// Refer to the license.txt file included.
#include "common/logging/log.h"
#include "core/frontend/applets/mii_edit.h"
namespace Core::Frontend {
MiiEditApplet::~MiiEditApplet() = default;
void DefaultMiiEditApplet::ShowMiiEdit(const std::function<void()>& callback) const {
LOG_WARNING(Service_AM, "(STUBBED) called");
callback();
}
} // namespace Core::Frontend

View File

@@ -1,23 +0,0 @@
// Copyright 2022 yuzu Emulator Project
// Licensed under GPLv2 or any later version
// Refer to the license.txt file included.
#pragma once
#include <functional>
namespace Core::Frontend {
class MiiEditApplet {
public:
virtual ~MiiEditApplet();
virtual void ShowMiiEdit(const std::function<void()>& callback) const = 0;
};
class DefaultMiiEditApplet final : public MiiEditApplet {
public:
void ShowMiiEdit(const std::function<void()>& callback) const override;
};
} // namespace Core::Frontend

View File

@@ -42,20 +42,11 @@ public:
context.MakeCurrent();
}
~Scoped() {
if (active) {
context.DoneCurrent();
}
}
/// In the event that context was destroyed before the Scoped is destroyed, this provides a
/// mechanism to prevent calling a destroyed object's method during the deconstructor
void Cancel() {
active = false;
context.DoneCurrent();
}
private:
GraphicsContext& context;
bool active{true};
};
/// Calls MakeCurrent on the context and calls DoneCurrent when the scope for the returned value

View File

@@ -385,7 +385,7 @@ public:
T PopRaw();
template <class T>
std::weak_ptr<T> PopIpcInterface() {
std::shared_ptr<T> PopIpcInterface() {
ASSERT(context->Session()->IsDomain());
ASSERT(context->GetDomainMessageHeader().input_object_count > 0);
return context->GetDomainHandler<T>(Pop<u32>() - 1);

View File

@@ -17,6 +17,7 @@
#include "core/hle/kernel/k_auto_object.h"
#include "core/hle/kernel/k_handle_table.h"
#include "core/hle/kernel/k_process.h"
#include "core/hle/kernel/k_readable_event.h"
#include "core/hle/kernel/k_server_session.h"
#include "core/hle/kernel/k_thread.h"
#include "core/hle/kernel/kernel.h"
@@ -44,7 +45,7 @@ bool SessionRequestManager::HasSessionRequestHandler(const HLERequestContext& co
LOG_CRITICAL(IPC, "object_id {} is too big!", object_id);
return false;
}
return DomainHandler(object_id - 1).lock() != nullptr;
return DomainHandler(object_id - 1) != nullptr;
} else {
return session_handler != nullptr;
}
@@ -52,6 +53,9 @@ bool SessionRequestManager::HasSessionRequestHandler(const HLERequestContext& co
void SessionRequestHandler::ClientConnected(KServerSession* session) {
session->ClientConnected(shared_from_this());
// Ensure our server session is tracked globally.
kernel.RegisterServerSession(session);
}
void SessionRequestHandler::ClientDisconnected(KServerSession* session) {

View File

@@ -94,7 +94,6 @@ protected:
std::weak_ptr<ServiceThread> service_thread;
};
using SessionRequestHandlerWeakPtr = std::weak_ptr<SessionRequestHandler>;
using SessionRequestHandlerPtr = std::shared_ptr<SessionRequestHandler>;
/**
@@ -140,7 +139,7 @@ public:
}
}
SessionRequestHandlerWeakPtr DomainHandler(std::size_t index) const {
SessionRequestHandlerPtr DomainHandler(std::size_t index) const {
ASSERT_MSG(index < DomainHandlerCount(), "Unexpected handler index {}", index);
return domain_handlers.at(index);
}
@@ -329,10 +328,10 @@ public:
template <typename T>
std::shared_ptr<T> GetDomainHandler(std::size_t index) const {
return std::static_pointer_cast<T>(manager.lock()->DomainHandler(index).lock());
return std::static_pointer_cast<T>(manager->DomainHandler(index));
}
void SetSessionRequestManager(std::weak_ptr<SessionRequestManager> manager_) {
void SetSessionRequestManager(std::shared_ptr<SessionRequestManager> manager_) {
manager = std::move(manager_);
}
@@ -375,7 +374,7 @@ private:
u32 handles_offset{};
u32 domain_offset{};
std::weak_ptr<SessionRequestManager> manager;
std::shared_ptr<SessionRequestManager> manager;
KernelCore& kernel;
Core::Memory::Memory& memory;

View File

@@ -7,23 +7,19 @@
#include "common/common_funcs.h"
#include "common/common_types.h"
#include "core/core.h"
#include "core/device_memory.h"
#include "core/hardware_properties.h"
#include "core/hle/kernel/init/init_slab_setup.h"
#include "core/hle/kernel/k_code_memory.h"
#include "core/hle/kernel/k_event.h"
#include "core/hle/kernel/k_memory_layout.h"
#include "core/hle/kernel/k_memory_manager.h"
#include "core/hle/kernel/k_page_buffer.h"
#include "core/hle/kernel/k_port.h"
#include "core/hle/kernel/k_process.h"
#include "core/hle/kernel/k_resource_limit.h"
#include "core/hle/kernel/k_session.h"
#include "core/hle/kernel/k_shared_memory.h"
#include "core/hle/kernel/k_shared_memory_info.h"
#include "core/hle/kernel/k_system_control.h"
#include "core/hle/kernel/k_thread.h"
#include "core/hle/kernel/k_thread_local_page.h"
#include "core/hle/kernel/k_transfer_memory.h"
namespace Kernel::Init {
@@ -36,13 +32,9 @@ namespace Kernel::Init {
HANDLER(KEvent, (SLAB_COUNT(KEvent)), ##__VA_ARGS__) \
HANDLER(KPort, (SLAB_COUNT(KPort)), ##__VA_ARGS__) \
HANDLER(KSharedMemory, (SLAB_COUNT(KSharedMemory)), ##__VA_ARGS__) \
HANDLER(KSharedMemoryInfo, (SLAB_COUNT(KSharedMemory) * 8), ##__VA_ARGS__) \
HANDLER(KTransferMemory, (SLAB_COUNT(KTransferMemory)), ##__VA_ARGS__) \
HANDLER(KCodeMemory, (SLAB_COUNT(KCodeMemory)), ##__VA_ARGS__) \
HANDLER(KSession, (SLAB_COUNT(KSession)), ##__VA_ARGS__) \
HANDLER(KThreadLocalPage, \
(SLAB_COUNT(KProcess) + (SLAB_COUNT(KProcess) + SLAB_COUNT(KThread)) / 8), \
##__VA_ARGS__) \
HANDLER(KResourceLimit, (SLAB_COUNT(KResourceLimit)), ##__VA_ARGS__)
namespace {
@@ -58,46 +50,38 @@ enum KSlabType : u32 {
// Constexpr counts.
constexpr size_t SlabCountKProcess = 80;
constexpr size_t SlabCountKThread = 800;
constexpr size_t SlabCountKEvent = 900;
constexpr size_t SlabCountKEvent = 700;
constexpr size_t SlabCountKInterruptEvent = 100;
constexpr size_t SlabCountKPort = 384;
constexpr size_t SlabCountKPort = 256 + 0x20; // Extra 0x20 ports over Nintendo for homebrew.
constexpr size_t SlabCountKSharedMemory = 80;
constexpr size_t SlabCountKTransferMemory = 200;
constexpr size_t SlabCountKCodeMemory = 10;
constexpr size_t SlabCountKDeviceAddressSpace = 300;
constexpr size_t SlabCountKSession = 1133;
constexpr size_t SlabCountKSession = 933;
constexpr size_t SlabCountKLightSession = 100;
constexpr size_t SlabCountKObjectName = 7;
constexpr size_t SlabCountKResourceLimit = 5;
constexpr size_t SlabCountKDebug = Core::Hardware::NUM_CPU_CORES;
constexpr size_t SlabCountKIoPool = 1;
constexpr size_t SlabCountKIoRegion = 6;
constexpr size_t SlabCountKAlpha = 1;
constexpr size_t SlabCountKBeta = 6;
constexpr size_t SlabCountExtraKThread = 160;
/// Helper function to translate from the slab virtual address to the reserved location in physical
/// memory.
static PAddr TranslateSlabAddrToPhysical(KMemoryLayout& memory_layout, VAddr slab_addr) {
slab_addr -= memory_layout.GetSlabRegionAddress();
return slab_addr + Core::DramMemoryMap::SlabHeapBase;
}
template <typename T>
VAddr InitializeSlabHeap(Core::System& system, KMemoryLayout& memory_layout, VAddr address,
size_t num_objects) {
// TODO(bunnei): This is just a place holder. We should initialize the appropriate KSlabHeap for
// kernel object type T with the backing kernel memory pointer once we emulate kernel memory.
const size_t size = Common::AlignUp(sizeof(T) * num_objects, alignof(void*));
VAddr start = Common::AlignUp(address, alignof(T));
// This should use the virtual memory address passed in, but currently, we do not setup the
// kernel virtual memory layout. Instead, we simply map these at a region of physical memory
// that we reserve for the slab heaps.
// TODO(bunnei): Fix this once we support the kernel virtual memory layout.
// This is intentionally empty. Once KSlabHeap is fully implemented, we can replace this with
// the pointer to emulated memory to pass along. Until then, KSlabHeap will just allocate/free
// host memory.
void* backing_kernel_memory{};
if (size > 0) {
void* backing_kernel_memory{
system.DeviceMemory().GetPointer(TranslateSlabAddrToPhysical(memory_layout, start))};
const KMemoryRegion* region = memory_layout.FindVirtual(start + size - 1);
ASSERT(region != nullptr);
ASSERT(region->IsDerivedFrom(KMemoryRegionType_KernelSlab));
@@ -107,12 +91,6 @@ VAddr InitializeSlabHeap(Core::System& system, KMemoryLayout& memory_layout, VAd
return start + size;
}
size_t CalculateSlabHeapGapSize() {
constexpr size_t KernelSlabHeapGapSize = 2_MiB - 296_KiB;
static_assert(KernelSlabHeapGapSize <= KernelSlabHeapGapsSizeMax);
return KernelSlabHeapGapSize;
}
} // namespace
KSlabResourceCounts KSlabResourceCounts::CreateDefault() {
@@ -131,8 +109,8 @@ KSlabResourceCounts KSlabResourceCounts::CreateDefault() {
.num_KObjectName = SlabCountKObjectName,
.num_KResourceLimit = SlabCountKResourceLimit,
.num_KDebug = SlabCountKDebug,
.num_KIoPool = SlabCountKIoPool,
.num_KIoRegion = SlabCountKIoRegion,
.num_KAlpha = SlabCountKAlpha,
.num_KBeta = SlabCountKBeta,
};
}
@@ -158,34 +136,11 @@ size_t CalculateTotalSlabHeapSize(const KernelCore& kernel) {
#undef ADD_SLAB_SIZE
// Add the reserved size.
size += CalculateSlabHeapGapSize();
size += KernelSlabHeapGapsSize;
return size;
}
void InitializeKPageBufferSlabHeap(Core::System& system) {
auto& kernel = system.Kernel();
const auto& counts = kernel.SlabResourceCounts();
const size_t num_pages =
counts.num_KProcess + counts.num_KThread + (counts.num_KProcess + counts.num_KThread) / 8;
const size_t slab_size = num_pages * PageSize;
// Reserve memory from the system resource limit.
ASSERT(kernel.GetSystemResourceLimit()->Reserve(LimitableResource::PhysicalMemory, slab_size));
// Allocate memory for the slab.
constexpr auto AllocateOption = KMemoryManager::EncodeOption(
KMemoryManager::Pool::System, KMemoryManager::Direction::FromFront);
const PAddr slab_address =
kernel.MemoryManager().AllocateAndOpenContinuous(num_pages, 1, AllocateOption);
ASSERT(slab_address != 0);
// Initialize the slabheap.
KPageBuffer::InitializeSlabHeap(kernel, system.DeviceMemory().GetPointer(slab_address),
slab_size);
}
void InitializeSlabHeaps(Core::System& system, KMemoryLayout& memory_layout) {
auto& kernel = system.Kernel();
@@ -205,13 +160,13 @@ void InitializeSlabHeaps(Core::System& system, KMemoryLayout& memory_layout) {
}
// Create an array to represent the gaps between the slabs.
const size_t total_gap_size = CalculateSlabHeapGapSize();
const size_t total_gap_size = KernelSlabHeapGapsSize;
std::array<size_t, slab_types.size()> slab_gaps;
for (auto& slab_gap : slab_gaps) {
for (size_t i = 0; i < slab_gaps.size(); i++) {
// Note: This is an off-by-one error from Nintendo's intention, because GenerateRandomRange
// is inclusive. However, Nintendo also has the off-by-one error, and it's "harmless", so we
// will include it ourselves.
slab_gap = KSystemControl::GenerateRandomRange(0, total_gap_size);
slab_gaps[i] = KSystemControl::GenerateRandomRange(0, total_gap_size);
}
// Sort the array, so that we can treat differences between values as offsets to the starts of
@@ -222,21 +177,13 @@ void InitializeSlabHeaps(Core::System& system, KMemoryLayout& memory_layout) {
}
}
// Track the gaps, so that we can free them to the unused slab tree.
VAddr gap_start = address;
size_t gap_size = 0;
for (size_t i = 0; i < slab_gaps.size(); i++) {
for (size_t i = 0; i < slab_types.size(); i++) {
// Add the random gap to the address.
const auto cur_gap = (i == 0) ? slab_gaps[0] : slab_gaps[i] - slab_gaps[i - 1];
address += cur_gap;
gap_size += cur_gap;
address += (i == 0) ? slab_gaps[0] : slab_gaps[i] - slab_gaps[i - 1];
#define INITIALIZE_SLAB_HEAP(NAME, COUNT, ...) \
case KSlabType_##NAME: \
if (COUNT > 0) { \
address = InitializeSlabHeap<NAME>(system, memory_layout, address, COUNT); \
} \
address = InitializeSlabHeap<NAME>(system, memory_layout, address, COUNT); \
break;
// Initialize the slabheap.
@@ -245,13 +192,7 @@ void InitializeSlabHeaps(Core::System& system, KMemoryLayout& memory_layout) {
FOREACH_SLAB_TYPE(INITIALIZE_SLAB_HEAP)
// If we somehow get an invalid type, abort.
default:
UNREACHABLE_MSG("Unknown slab type: {}", slab_types[i]);
}
// If we've hit the end of a gap, free it.
if (gap_start + gap_size != address) {
gap_start = address;
gap_size = 0;
UNREACHABLE();
}
}
}

View File

@@ -32,13 +32,12 @@ struct KSlabResourceCounts {
size_t num_KObjectName;
size_t num_KResourceLimit;
size_t num_KDebug;
size_t num_KIoPool;
size_t num_KIoRegion;
size_t num_KAlpha;
size_t num_KBeta;
};
void InitializeSlabResourceCounts(KernelCore& kernel);
size_t CalculateTotalSlabHeapSize(const KernelCore& kernel);
void InitializeKPageBufferSlabHeap(Core::System& system);
void InitializeSlabHeaps(Core::System& system, KMemoryLayout& memory_layout);
} // namespace Kernel::Init

View File

@@ -115,7 +115,7 @@ ResultCode KAddressArbiter::Signal(VAddr addr, s32 count) {
{
KScopedSchedulerLock sl(kernel);
auto it = thread_tree.nfind_key({addr, -1});
auto it = thread_tree.nfind_light({addr, -1});
while ((it != thread_tree.end()) && (count <= 0 || num_waiters < count) &&
(it->GetAddressArbiterKey() == addr)) {
// End the thread's wait.
@@ -148,7 +148,7 @@ ResultCode KAddressArbiter::SignalAndIncrementIfEqual(VAddr addr, s32 value, s32
return ResultInvalidState;
}
auto it = thread_tree.nfind_key({addr, -1});
auto it = thread_tree.nfind_light({addr, -1});
while ((it != thread_tree.end()) && (count <= 0 || num_waiters < count) &&
(it->GetAddressArbiterKey() == addr)) {
// End the thread's wait.
@@ -171,7 +171,7 @@ ResultCode KAddressArbiter::SignalAndModifyByWaitingCountIfEqual(VAddr addr, s32
{
[[maybe_unused]] const KScopedSchedulerLock sl(kernel);
auto it = thread_tree.nfind_key({addr, -1});
auto it = thread_tree.nfind_light({addr, -1});
// Determine the updated value.
s32 new_value{};
if (count <= 0) {

View File

@@ -4,6 +4,8 @@
#pragma once
#include <atomic>
#include "common/bit_util.h"
#include "common/common_types.h"

View File

@@ -5,6 +5,7 @@
#include "common/alignment.h"
#include "common/common_types.h"
#include "core/device_memory.h"
#include "core/hle/kernel/k_auto_object.h"
#include "core/hle/kernel/k_code_memory.h"
#include "core/hle/kernel/k_light_lock.h"
#include "core/hle/kernel/k_memory_block.h"

View File

@@ -9,6 +9,7 @@
#include "core/hle/kernel/k_process.h"
#include "core/hle/kernel/k_scheduler.h"
#include "core/hle/kernel/k_scoped_scheduler_lock_and_sleep.h"
#include "core/hle/kernel/k_synchronization_object.h"
#include "core/hle/kernel/k_thread.h"
#include "core/hle/kernel/k_thread_queue.h"
#include "core/hle/kernel/kernel.h"
@@ -243,7 +244,7 @@ void KConditionVariable::Signal(u64 cv_key, s32 count) {
{
KScopedSchedulerLock sl(kernel);
auto it = thread_tree.nfind_key({cv_key, -1});
auto it = thread_tree.nfind_light({cv_key, -1});
while ((it != thread_tree.end()) && (count <= 0 || num_waiters < count) &&
(it->GetConditionVariableKey() == cv_key)) {
KThread* target_thread = std::addressof(*it);

View File

@@ -63,7 +63,7 @@ bool KHandleTable::Remove(Handle handle) {
return true;
}
ResultCode KHandleTable::Add(Handle* out_handle, KAutoObject* obj) {
ResultCode KHandleTable::Add(Handle* out_handle, KAutoObject* obj, u16 type) {
KScopedDisableDispatch dd(kernel);
KScopedSpinLock lk(m_lock);
@@ -75,7 +75,7 @@ ResultCode KHandleTable::Add(Handle* out_handle, KAutoObject* obj) {
const auto linear_id = this->AllocateLinearId();
const auto index = this->AllocateEntry();
m_entry_infos[index].linear_id = linear_id;
m_entry_infos[index].info = {.linear_id = linear_id, .type = type};
m_objects[index] = obj;
obj->Open();
@@ -116,7 +116,7 @@ void KHandleTable::Unreserve(Handle handle) {
}
}
void KHandleTable::Register(Handle handle, KAutoObject* obj) {
void KHandleTable::Register(Handle handle, KAutoObject* obj, u16 type) {
KScopedDisableDispatch dd(kernel);
KScopedSpinLock lk(m_lock);
@@ -132,7 +132,7 @@ void KHandleTable::Register(Handle handle, KAutoObject* obj) {
// Set the entry.
ASSERT(m_objects[index] == nullptr);
m_entry_infos[index].linear_id = static_cast<u16>(linear_id);
m_entry_infos[index].info = {.linear_id = static_cast<u16>(linear_id), .type = type};
m_objects[index] = obj;
obj->Open();

View File

@@ -42,7 +42,7 @@ public:
m_free_head_index = -1;
// Free all entries.
for (s16 i = 0; i < static_cast<s16>(m_table_size); ++i) {
for (s32 i = 0; i < static_cast<s32>(m_table_size); ++i) {
m_objects[i] = nullptr;
m_entry_infos[i].next_free_index = i - 1;
m_free_head_index = i;
@@ -104,8 +104,17 @@ public:
ResultCode Reserve(Handle* out_handle);
void Unreserve(Handle handle);
ResultCode Add(Handle* out_handle, KAutoObject* obj);
void Register(Handle handle, KAutoObject* obj);
template <typename T>
ResultCode Add(Handle* out_handle, T* obj) {
static_assert(std::is_base_of_v<KAutoObject, T>);
return this->Add(out_handle, obj, obj->GetTypeObj().GetClassToken());
}
template <typename T>
void Register(Handle handle, T* obj) {
static_assert(std::is_base_of_v<KAutoObject, T>);
return this->Register(handle, obj, obj->GetTypeObj().GetClassToken());
}
template <typename T>
bool GetMultipleObjects(T** out, const Handle* handles, size_t num_handles) const {
@@ -151,6 +160,9 @@ public:
}
private:
ResultCode Add(Handle* out_handle, KAutoObject* obj, u16 type);
void Register(Handle handle, KAutoObject* obj, u16 type);
s32 AllocateEntry() {
ASSERT(m_count < m_table_size);
@@ -167,7 +179,7 @@ private:
ASSERT(m_count > 0);
m_objects[index] = nullptr;
m_entry_infos[index].next_free_index = static_cast<s16>(m_free_head_index);
m_entry_infos[index].next_free_index = m_free_head_index;
m_free_head_index = index;
@@ -266,13 +278,19 @@ private:
}
union EntryInfo {
u16 linear_id;
s16 next_free_index;
struct {
u16 linear_id;
u16 type;
} info;
s32 next_free_index;
constexpr u16 GetLinearId() const {
return linear_id;
return info.linear_id;
}
constexpr s16 GetNextFreeIndex() const {
constexpr u16 GetType() const {
return info.type;
}
constexpr s32 GetNextFreeIndex() const {
return next_free_index;
}
};

View File

@@ -57,11 +57,11 @@ constexpr std::size_t KernelPageTableHeapSize = GetMaximumOverheadSize(MainMemor
constexpr std::size_t KernelInitialPageHeapSize = 128_KiB;
constexpr std::size_t KernelSlabHeapDataSize = 5_MiB;
constexpr std::size_t KernelSlabHeapGapsSizeMax = 2_MiB - 64_KiB;
constexpr std::size_t KernelSlabHeapSize = KernelSlabHeapDataSize + KernelSlabHeapGapsSizeMax;
constexpr std::size_t KernelSlabHeapGapsSize = 2_MiB - 64_KiB;
constexpr std::size_t KernelSlabHeapSize = KernelSlabHeapDataSize + KernelSlabHeapGapsSize;
// NOTE: This is calculated from KThread slab counts, assuming KThread size <= 0x860.
constexpr std::size_t KernelSlabHeapAdditionalSize = 0x68000;
constexpr std::size_t KernelSlabHeapAdditionalSize = 416_KiB;
constexpr std::size_t KernelResourceSize =
KernelPageTableHeapSize + KernelInitialPageHeapSize + KernelSlabHeapSize;

View File

@@ -15,6 +15,7 @@
#include "core/hle/kernel/k_page_linked_list.h"
#include "core/hle/kernel/kernel.h"
#include "core/hle/kernel/svc_results.h"
#include "core/memory.h"
namespace Kernel {

View File

@@ -1,19 +0,0 @@
// Copyright 2022 yuzu Emulator Project
// Licensed under GPLv2 or any later version
// Refer to the license.txt file included.
#include "common/alignment.h"
#include "common/assert.h"
#include "core/core.h"
#include "core/device_memory.h"
#include "core/hle/kernel/k_page_buffer.h"
#include "core/hle/kernel/memory_types.h"
namespace Kernel {
KPageBuffer* KPageBuffer::FromPhysicalAddress(Core::System& system, PAddr phys_addr) {
ASSERT(Common::IsAligned(phys_addr, PageSize));
return reinterpret_cast<KPageBuffer*>(system.DeviceMemory().GetPointer(phys_addr));
}
} // namespace Kernel

View File

@@ -1,28 +0,0 @@
// Copyright 2022 yuzu Emulator Project
// Licensed under GPLv2 or any later version
// Refer to the license.txt file included.
#pragma once
#include <array>
#include "common/common_types.h"
#include "core/hle/kernel/memory_types.h"
#include "core/hle/kernel/slab_helpers.h"
namespace Kernel {
class KPageBuffer final : public KSlabAllocated<KPageBuffer> {
public:
KPageBuffer() = default;
static KPageBuffer* FromPhysicalAddress(Core::System& system, PAddr phys_addr);
private:
[[maybe_unused]] alignas(PageSize) std::array<u8, PageSize> m_buffer{};
};
static_assert(sizeof(KPageBuffer) == PageSize);
static_assert(alignof(KPageBuffer) == PageSize);
} // namespace Kernel

View File

@@ -285,207 +285,76 @@ ResultCode KPageTable::MapProcessCode(VAddr addr, std::size_t num_pages, KMemory
return ResultSuccess;
}
ResultCode KPageTable::MapCodeMemory(VAddr dst_address, VAddr src_address, std::size_t size) {
// Validate the mapping request.
R_UNLESS(this->CanContain(dst_address, size, KMemoryState::AliasCode),
ResultInvalidMemoryRegion);
// Lock the table.
ResultCode KPageTable::MapCodeMemory(VAddr dst_addr, VAddr src_addr, std::size_t size) {
KScopedLightLock lk(general_lock);
// Verify that the source memory is normal heap.
KMemoryState src_state{};
KMemoryPermission src_perm{};
std::size_t num_src_allocator_blocks{};
R_TRY(this->CheckMemoryState(&src_state, &src_perm, nullptr, &num_src_allocator_blocks,
src_address, size, KMemoryState::All, KMemoryState::Normal,
KMemoryPermission::All, KMemoryPermission::UserReadWrite,
KMemoryAttribute::All, KMemoryAttribute::None));
const std::size_t num_pages{size / PageSize};
// Verify that the destination memory is unmapped.
std::size_t num_dst_allocator_blocks{};
R_TRY(this->CheckMemoryState(&num_dst_allocator_blocks, dst_address, size, KMemoryState::All,
KMemoryState::Free, KMemoryPermission::None,
KMemoryPermission::None, KMemoryAttribute::None,
KMemoryAttribute::None));
KMemoryState state{};
KMemoryPermission perm{};
CASCADE_CODE(CheckMemoryState(&state, &perm, nullptr, nullptr, src_addr, size,
KMemoryState::All, KMemoryState::Normal, KMemoryPermission::All,
KMemoryPermission::UserReadWrite, KMemoryAttribute::Mask,
KMemoryAttribute::None, KMemoryAttribute::IpcAndDeviceMapped));
// Map the code memory.
{
// Determine the number of pages being operated on.
const std::size_t num_pages = size / PageSize;
// Create page groups for the memory being mapped.
KPageLinkedList pg;
AddRegionToPages(src_address, num_pages, pg);
// Reprotect the source as kernel-read/not mapped.
const auto new_perm = static_cast<KMemoryPermission>(KMemoryPermission::KernelRead |
KMemoryPermission::NotMapped);
R_TRY(Operate(src_address, num_pages, new_perm, OperationType::ChangePermissions));
// Ensure that we unprotect the source pages on failure.
auto unprot_guard = SCOPE_GUARD({
ASSERT(this->Operate(src_address, num_pages, src_perm, OperationType::ChangePermissions)
.IsSuccess());
});
// Map the alias pages.
R_TRY(MapPages(dst_address, pg, new_perm));
// We successfully mapped the alias pages, so we don't need to unprotect the src pages on
// failure.
unprot_guard.Cancel();
// Apply the memory block updates.
block_manager->Update(src_address, num_pages, src_state, new_perm,
KMemoryAttribute::Locked);
block_manager->Update(dst_address, num_pages, KMemoryState::AliasCode, new_perm,
KMemoryAttribute::None);
if (IsRegionMapped(dst_addr, size)) {
return ResultInvalidCurrentMemory;
}
KPageLinkedList page_linked_list;
AddRegionToPages(src_addr, num_pages, page_linked_list);
{
auto block_guard = detail::ScopeExit(
[&] { Operate(src_addr, num_pages, perm, OperationType::ChangePermissions); });
CASCADE_CODE(Operate(src_addr, num_pages, KMemoryPermission::None,
OperationType::ChangePermissions));
CASCADE_CODE(MapPages(dst_addr, page_linked_list, KMemoryPermission::None));
block_guard.Cancel();
}
block_manager->Update(src_addr, num_pages, state, KMemoryPermission::None,
KMemoryAttribute::Locked);
block_manager->Update(dst_addr, num_pages, KMemoryState::AliasCode);
return ResultSuccess;
}
ResultCode KPageTable::UnmapCodeMemory(VAddr dst_address, VAddr src_address, std::size_t size) {
// Validate the mapping request.
R_UNLESS(this->CanContain(dst_address, size, KMemoryState::AliasCode),
ResultInvalidMemoryRegion);
// Lock the table.
ResultCode KPageTable::UnmapCodeMemory(VAddr dst_addr, VAddr src_addr, std::size_t size) {
KScopedLightLock lk(general_lock);
// Verify that the source memory is locked normal heap.
std::size_t num_src_allocator_blocks{};
R_TRY(this->CheckMemoryState(std::addressof(num_src_allocator_blocks), src_address, size,
KMemoryState::All, KMemoryState::Normal, KMemoryPermission::None,
KMemoryPermission::None, KMemoryAttribute::All,
KMemoryAttribute::Locked));
if (!size) {
return ResultSuccess;
}
// Verify that the destination memory is aliasable code.
std::size_t num_dst_allocator_blocks{};
R_TRY(this->CheckMemoryStateContiguous(
std::addressof(num_dst_allocator_blocks), dst_address, size, KMemoryState::FlagCanCodeAlias,
const std::size_t num_pages{size / PageSize};
CASCADE_CODE(CheckMemoryState(nullptr, nullptr, nullptr, nullptr, src_addr, size,
KMemoryState::All, KMemoryState::Normal, KMemoryPermission::None,
KMemoryPermission::None, KMemoryAttribute::Mask,
KMemoryAttribute::Locked, KMemoryAttribute::IpcAndDeviceMapped));
KMemoryState state{};
CASCADE_CODE(CheckMemoryState(
&state, nullptr, nullptr, nullptr, dst_addr, PageSize, KMemoryState::FlagCanCodeAlias,
KMemoryState::FlagCanCodeAlias, KMemoryPermission::None, KMemoryPermission::None,
KMemoryAttribute::All, KMemoryAttribute::None));
KMemoryAttribute::Mask, KMemoryAttribute::None, KMemoryAttribute::IpcAndDeviceMapped));
CASCADE_CODE(CheckMemoryState(dst_addr, size, KMemoryState::All, state, KMemoryPermission::None,
KMemoryPermission::None, KMemoryAttribute::Mask,
KMemoryAttribute::None));
CASCADE_CODE(Operate(dst_addr, num_pages, KMemoryPermission::None, OperationType::Unmap));
// Determine whether any pages being unmapped are code.
bool any_code_pages = false;
{
KMemoryBlockManager::const_iterator it = block_manager->FindIterator(dst_address);
while (true) {
// Get the memory info.
const KMemoryInfo info = it->GetMemoryInfo();
block_manager->Update(dst_addr, num_pages, KMemoryState::Free);
block_manager->Update(src_addr, num_pages, KMemoryState::Normal,
KMemoryPermission::UserReadWrite);
// Check if the memory has code flag.
if ((info.GetState() & KMemoryState::FlagCode) != KMemoryState::None) {
any_code_pages = true;
break;
}
// Check if we're done.
if (dst_address + size - 1 <= info.GetLastAddress()) {
break;
}
// Advance.
++it;
}
}
// Ensure that we maintain the instruction cache.
bool reprotected_pages = false;
SCOPE_EXIT({
if (reprotected_pages && any_code_pages) {
system.InvalidateCpuInstructionCacheRange(dst_address, size);
}
});
// Unmap.
{
// Determine the number of pages being operated on.
const std::size_t num_pages = size / PageSize;
// Unmap the aliased copy of the pages.
R_TRY(Operate(dst_address, num_pages, KMemoryPermission::None, OperationType::Unmap));
// Try to set the permissions for the source pages back to what they should be.
R_TRY(Operate(src_address, num_pages, KMemoryPermission::UserReadWrite,
OperationType::ChangePermissions));
// Apply the memory block updates.
block_manager->Update(dst_address, num_pages, KMemoryState::None);
block_manager->Update(src_address, num_pages, KMemoryState::Normal,
KMemoryPermission::UserReadWrite);
// Note that we reprotected pages.
reprotected_pages = true;
}
system.InvalidateCpuInstructionCacheRange(dst_addr, size);
return ResultSuccess;
}
VAddr KPageTable::FindFreeArea(VAddr region_start, std::size_t region_num_pages,
std::size_t num_pages, std::size_t alignment, std::size_t offset,
std::size_t guard_pages) {
VAddr address = 0;
if (num_pages <= region_num_pages) {
if (this->IsAslrEnabled()) {
// Try to directly find a free area up to 8 times.
for (std::size_t i = 0; i < 8; i++) {
const std::size_t random_offset =
KSystemControl::GenerateRandomRange(
0, (region_num_pages - num_pages - guard_pages) * PageSize / alignment) *
alignment;
const VAddr candidate =
Common::AlignDown((region_start + random_offset), alignment) + offset;
KMemoryInfo info = this->QueryInfoImpl(candidate);
if (info.state != KMemoryState::Free) {
continue;
}
if (region_start > candidate) {
continue;
}
if (info.GetAddress() + guard_pages * PageSize > candidate) {
continue;
}
const VAddr candidate_end = candidate + (num_pages + guard_pages) * PageSize - 1;
if (candidate_end > info.GetLastAddress()) {
continue;
}
if (candidate_end > region_start + region_num_pages * PageSize - 1) {
continue;
}
address = candidate;
break;
}
// Fall back to finding the first free area with a random offset.
if (address == 0) {
// NOTE: Nintendo does not account for guard pages here.
// This may theoretically cause an offset to be chosen that cannot be mapped. We
// will account for guard pages.
const std::size_t offset_pages = KSystemControl::GenerateRandomRange(
0, region_num_pages - num_pages - guard_pages);
address = block_manager->FindFreeArea(region_start + offset_pages * PageSize,
region_num_pages - offset_pages, num_pages,
alignment, offset, guard_pages);
}
}
// Find the first free area.
if (address == 0) {
address = block_manager->FindFreeArea(region_start, region_num_pages, num_pages,
alignment, offset, guard_pages);
}
}
return address;
}
ResultCode KPageTable::UnmapProcessMemory(VAddr dst_addr, std::size_t size,
KPageTable& src_page_table, VAddr src_addr) {
KScopedLightLock lk(general_lock);
@@ -1117,46 +986,6 @@ ResultCode KPageTable::MapPages(VAddr address, KPageLinkedList& page_linked_list
return ResultSuccess;
}
ResultCode KPageTable::MapPages(VAddr* out_addr, std::size_t num_pages, std::size_t alignment,
PAddr phys_addr, bool is_pa_valid, VAddr region_start,
std::size_t region_num_pages, KMemoryState state,
KMemoryPermission perm) {
ASSERT(Common::IsAligned(alignment, PageSize) && alignment >= PageSize);
// Ensure this is a valid map request.
R_UNLESS(this->CanContain(region_start, region_num_pages * PageSize, state),
ResultInvalidCurrentMemory);
R_UNLESS(num_pages < region_num_pages, ResultOutOfMemory);
// Lock the table.
KScopedLightLock lk(general_lock);
// Find a random address to map at.
VAddr addr = this->FindFreeArea(region_start, region_num_pages, num_pages, alignment, 0,
this->GetNumGuardPages());
R_UNLESS(addr != 0, ResultOutOfMemory);
ASSERT(Common::IsAligned(addr, alignment));
ASSERT(this->CanContain(addr, num_pages * PageSize, state));
ASSERT(this->CheckMemoryState(addr, num_pages * PageSize, KMemoryState::All, KMemoryState::Free,
KMemoryPermission::None, KMemoryPermission::None,
KMemoryAttribute::None, KMemoryAttribute::None)
.IsSuccess());
// Perform mapping operation.
if (is_pa_valid) {
R_TRY(this->Operate(addr, num_pages, perm, OperationType::Map, phys_addr));
} else {
UNIMPLEMENTED();
}
// Update the blocks.
block_manager->Update(addr, num_pages, state, perm);
// We successfully mapped the pages.
*out_addr = addr;
return ResultSuccess;
}
ResultCode KPageTable::UnmapPages(VAddr addr, const KPageLinkedList& page_linked_list) {
ASSERT(this->IsLockedByCurrentThread());
@@ -1199,30 +1028,6 @@ ResultCode KPageTable::UnmapPages(VAddr addr, KPageLinkedList& page_linked_list,
return ResultSuccess;
}
ResultCode KPageTable::UnmapPages(VAddr address, std::size_t num_pages, KMemoryState state) {
// Check that the unmap is in range.
const std::size_t size = num_pages * PageSize;
R_UNLESS(this->Contains(address, size), ResultInvalidCurrentMemory);
// Lock the table.
KScopedLightLock lk(general_lock);
// Check the memory state.
std::size_t num_allocator_blocks{};
R_TRY(this->CheckMemoryState(std::addressof(num_allocator_blocks), address, size,
KMemoryState::All, state, KMemoryPermission::None,
KMemoryPermission::None, KMemoryAttribute::All,
KMemoryAttribute::None));
// Perform the unmap.
R_TRY(Operate(address, num_pages, KMemoryPermission::None, OperationType::Unmap));
// Update the blocks.
block_manager->Update(address, num_pages, KMemoryState::Free, KMemoryPermission::None);
return ResultSuccess;
}
ResultCode KPageTable::SetProcessMemoryPermission(VAddr addr, std::size_t size,
Svc::MemoryPermission svc_perm) {
const size_t num_pages = size / PageSize;

View File

@@ -36,8 +36,8 @@ public:
KMemoryManager::Pool pool);
ResultCode MapProcessCode(VAddr addr, std::size_t pages_count, KMemoryState state,
KMemoryPermission perm);
ResultCode MapCodeMemory(VAddr dst_address, VAddr src_address, std::size_t size);
ResultCode UnmapCodeMemory(VAddr dst_address, VAddr src_address, std::size_t size);
ResultCode MapCodeMemory(VAddr dst_addr, VAddr src_addr, std::size_t size);
ResultCode UnmapCodeMemory(VAddr dst_addr, VAddr src_addr, std::size_t size);
ResultCode UnmapProcessMemory(VAddr dst_addr, std::size_t size, KPageTable& src_page_table,
VAddr src_addr);
ResultCode MapPhysicalMemory(VAddr addr, std::size_t size);
@@ -46,14 +46,7 @@ public:
ResultCode UnmapMemory(VAddr dst_addr, VAddr src_addr, std::size_t size);
ResultCode MapPages(VAddr addr, KPageLinkedList& page_linked_list, KMemoryState state,
KMemoryPermission perm);
ResultCode MapPages(VAddr* out_addr, std::size_t num_pages, std::size_t alignment,
PAddr phys_addr, KMemoryState state, KMemoryPermission perm) {
return this->MapPages(out_addr, num_pages, alignment, phys_addr, true,
this->GetRegionAddress(state), this->GetRegionSize(state) / PageSize,
state, perm);
}
ResultCode UnmapPages(VAddr addr, KPageLinkedList& page_linked_list, KMemoryState state);
ResultCode UnmapPages(VAddr address, std::size_t num_pages, KMemoryState state);
ResultCode SetProcessMemoryPermission(VAddr addr, std::size_t size,
Svc::MemoryPermission svc_perm);
KMemoryInfo QueryInfo(VAddr addr);
@@ -98,9 +91,6 @@ private:
ResultCode InitializeMemoryLayout(VAddr start, VAddr end);
ResultCode MapPages(VAddr addr, const KPageLinkedList& page_linked_list,
KMemoryPermission perm);
ResultCode MapPages(VAddr* out_addr, std::size_t num_pages, std::size_t alignment,
PAddr phys_addr, bool is_pa_valid, VAddr region_start,
std::size_t region_num_pages, KMemoryState state, KMemoryPermission perm);
ResultCode UnmapPages(VAddr addr, const KPageLinkedList& page_linked_list);
bool IsRegionMapped(VAddr address, u64 size);
bool IsRegionContiguous(VAddr addr, u64 size) const;
@@ -115,9 +105,6 @@ private:
VAddr GetRegionAddress(KMemoryState state) const;
std::size_t GetRegionSize(KMemoryState state) const;
VAddr FindFreeArea(VAddr region_start, std::size_t region_num_pages, std::size_t num_pages,
std::size_t alignment, std::size_t offset, std::size_t guard_pages);
ResultCode CheckMemoryStateContiguous(std::size_t* out_blocks_needed, VAddr addr,
std::size_t size, KMemoryState state_mask,
KMemoryState state, KMemoryPermission perm_mask,
@@ -150,7 +137,7 @@ private:
return CheckMemoryState(nullptr, nullptr, nullptr, out_blocks_needed, addr, size,
state_mask, state, perm_mask, perm, attr_mask, attr, ignore_attr);
}
ResultCode CheckMemoryState(VAddr addr, std::size_t size, KMemoryState state_mask,
ResultCode CheckMemoryState(VAddr addr, size_t size, KMemoryState state_mask,
KMemoryState state, KMemoryPermission perm_mask,
KMemoryPermission perm, KMemoryAttribute attr_mask,
KMemoryAttribute attr,
@@ -223,7 +210,7 @@ public:
constexpr VAddr GetAliasCodeRegionSize() const {
return alias_code_region_end - alias_code_region_start;
}
std::size_t GetNormalMemorySize() {
size_t GetNormalMemorySize() {
KScopedLightLock lk(general_lock);
return GetHeapSize() + mapped_physical_memory_size;
}
@@ -266,9 +253,7 @@ public:
constexpr bool IsInsideASLRRegion(VAddr address, std::size_t size) const {
return !IsOutsideASLRRegion(address, size);
}
constexpr std::size_t GetNumGuardPages() const {
return IsKernel() ? 1 : 4;
}
PAddr GetPhysicalAddr(VAddr addr) const {
const auto backing_addr = page_table_impl.backing_addr[addr >> PageBits];
ASSERT(backing_addr);
@@ -290,6 +275,10 @@ private:
return is_aslr_enabled;
}
constexpr std::size_t GetNumGuardPages() const {
return IsKernel() ? 1 : 4;
}
constexpr bool ContainsPages(VAddr addr, std::size_t num_pages) const {
return (address_space_start <= addr) &&
(num_pages <= (address_space_end - address_space_start) / PageSize) &&

View File

@@ -57,12 +57,7 @@ ResultCode KPort::EnqueueSession(KServerSession* session) {
R_UNLESS(state == State::Normal, ResultPortClosed);
server.EnqueueSession(session);
if (auto session_ptr = server.GetSessionRequestHandler().lock()) {
session_ptr->ClientConnected(server.AcceptSession());
} else {
UNREACHABLE();
}
server.GetSessionRequestHandler()->ClientConnected(server.AcceptSession());
return ResultSuccess;
}

View File

@@ -13,6 +13,7 @@
#include "common/scope_exit.h"
#include "common/settings.h"
#include "core/core.h"
#include "core/device_memory.h"
#include "core/file_sys/program_metadata.h"
#include "core/hle/kernel/code_set.h"
#include "core/hle/kernel/k_memory_block_manager.h"
@@ -23,6 +24,7 @@
#include "core/hle/kernel/k_scoped_resource_reservation.h"
#include "core/hle/kernel/k_shared_memory.h"
#include "core/hle/kernel/k_shared_memory_info.h"
#include "core/hle/kernel/k_slab_heap.h"
#include "core/hle/kernel/k_thread.h"
#include "core/hle/kernel/kernel.h"
#include "core/hle/kernel/svc_results.h"
@@ -68,6 +70,58 @@ void SetupMainThread(Core::System& system, KProcess& owner_process, u32 priority
}
} // Anonymous namespace
// Represents a page used for thread-local storage.
//
// Each TLS page contains slots that may be used by processes and threads.
// Every process and thread is created with a slot in some arbitrary page
// (whichever page happens to have an available slot).
class TLSPage {
public:
static constexpr std::size_t num_slot_entries =
Core::Memory::PAGE_SIZE / Core::Memory::TLS_ENTRY_SIZE;
explicit TLSPage(VAddr address) : base_address{address} {}
bool HasAvailableSlots() const {
return !is_slot_used.all();
}
VAddr GetBaseAddress() const {
return base_address;
}
std::optional<VAddr> ReserveSlot() {
for (std::size_t i = 0; i < is_slot_used.size(); i++) {
if (is_slot_used[i]) {
continue;
}
is_slot_used[i] = true;
return base_address + (i * Core::Memory::TLS_ENTRY_SIZE);
}
return std::nullopt;
}
void ReleaseSlot(VAddr address) {
// Ensure that all given addresses are consistent with how TLS pages
// are intended to be used when releasing slots.
ASSERT(IsWithinPage(address));
ASSERT((address % Core::Memory::TLS_ENTRY_SIZE) == 0);
const std::size_t index = (address - base_address) / Core::Memory::TLS_ENTRY_SIZE;
is_slot_used[index] = false;
}
private:
bool IsWithinPage(VAddr address) const {
return base_address <= address && address < base_address + Core::Memory::PAGE_SIZE;
}
VAddr base_address;
std::bitset<num_slot_entries> is_slot_used;
};
ResultCode KProcess::Initialize(KProcess* process, Core::System& system, std::string process_name,
ProcessType type, KResourceLimit* res_limit) {
auto& kernel = system.Kernel();
@@ -350,7 +404,7 @@ ResultCode KProcess::LoadFromMetadata(const FileSys::ProgramMetadata& metadata,
}
// Create TLS region
R_TRY(this->CreateThreadLocalRegion(std::addressof(tls_region_address)));
tls_region_address = CreateTLSRegion();
memory_reservation.Commit();
return handle_table.Initialize(capabilities.GetHandleTableSize());
@@ -390,7 +444,7 @@ void KProcess::PrepareForTermination() {
stop_threads(kernel.System().GlobalSchedulerContext().GetThreadList());
this->DeleteThreadLocalRegion(tls_region_address);
FreeTLSRegion(tls_region_address);
tls_region_address = 0;
if (resource_limit) {
@@ -402,6 +456,9 @@ void KProcess::PrepareForTermination() {
}
void KProcess::Finalize() {
// Finalize the handle table and close any open handles.
handle_table.Finalize();
// Free all shared memory infos.
{
auto it = shared_memory_list.begin();
@@ -426,110 +483,67 @@ void KProcess::Finalize() {
resource_limit = nullptr;
}
// Finalize the page table.
page_table.reset();
// Perform inherited finalization.
KAutoObjectWithSlabHeapAndContainer<KProcess, KWorkerTask>::Finalize();
}
ResultCode KProcess::CreateThreadLocalRegion(VAddr* out) {
KThreadLocalPage* tlp = nullptr;
VAddr tlr = 0;
// See if we can get a region from a partially used TLP.
{
KScopedSchedulerLock sl{kernel};
if (auto it = partially_used_tlp_tree.begin(); it != partially_used_tlp_tree.end()) {
tlr = it->Reserve();
ASSERT(tlr != 0);
if (it->IsAllUsed()) {
tlp = std::addressof(*it);
partially_used_tlp_tree.erase(it);
fully_used_tlp_tree.insert(*tlp);
}
*out = tlr;
return ResultSuccess;
}
}
// Allocate a new page.
tlp = KThreadLocalPage::Allocate(kernel);
R_UNLESS(tlp != nullptr, ResultOutOfMemory);
auto tlp_guard = SCOPE_GUARD({ KThreadLocalPage::Free(kernel, tlp); });
// Initialize the new page.
R_TRY(tlp->Initialize(kernel, this));
// Reserve a TLR.
tlr = tlp->Reserve();
ASSERT(tlr != 0);
// Insert into our tree.
{
KScopedSchedulerLock sl{kernel};
if (tlp->IsAllUsed()) {
fully_used_tlp_tree.insert(*tlp);
} else {
partially_used_tlp_tree.insert(*tlp);
}
}
// We succeeded!
tlp_guard.Cancel();
*out = tlr;
return ResultSuccess;
/**
* Attempts to find a TLS page that contains a free slot for
* use by a thread.
*
* @returns If a page with an available slot is found, then an iterator
* pointing to the page is returned. Otherwise the end iterator
* is returned instead.
*/
static auto FindTLSPageWithAvailableSlots(std::vector<TLSPage>& tls_pages) {
return std::find_if(tls_pages.begin(), tls_pages.end(),
[](const auto& page) { return page.HasAvailableSlots(); });
}
ResultCode KProcess::DeleteThreadLocalRegion(VAddr addr) {
KThreadLocalPage* page_to_free = nullptr;
// Release the region.
{
KScopedSchedulerLock sl{kernel};
// Try to find the page in the partially used list.
auto it = partially_used_tlp_tree.find_key(Common::AlignDown(addr, PageSize));
if (it == partially_used_tlp_tree.end()) {
// If we don't find it, it has to be in the fully used list.
it = fully_used_tlp_tree.find_key(Common::AlignDown(addr, PageSize));
R_UNLESS(it != fully_used_tlp_tree.end(), ResultInvalidAddress);
// Release the region.
it->Release(addr);
// Move the page out of the fully used list.
KThreadLocalPage* tlp = std::addressof(*it);
fully_used_tlp_tree.erase(it);
if (tlp->IsAllFree()) {
page_to_free = tlp;
} else {
partially_used_tlp_tree.insert(*tlp);
}
} else {
// Release the region.
it->Release(addr);
// Handle the all-free case.
KThreadLocalPage* tlp = std::addressof(*it);
if (tlp->IsAllFree()) {
partially_used_tlp_tree.erase(it);
page_to_free = tlp;
}
}
VAddr KProcess::CreateTLSRegion() {
KScopedSchedulerLock lock(kernel);
if (auto tls_page_iter{FindTLSPageWithAvailableSlots(tls_pages)};
tls_page_iter != tls_pages.cend()) {
return *tls_page_iter->ReserveSlot();
}
// If we should free the page it was in, do so.
if (page_to_free != nullptr) {
page_to_free->Finalize();
Page* const tls_page_ptr{kernel.GetUserSlabHeapPages().Allocate()};
ASSERT(tls_page_ptr);
KThreadLocalPage::Free(kernel, page_to_free);
}
const VAddr start{page_table->GetKernelMapRegionStart()};
const VAddr size{page_table->GetKernelMapRegionEnd() - start};
const PAddr tls_map_addr{kernel.System().DeviceMemory().GetPhysicalAddr(tls_page_ptr)};
const VAddr tls_page_addr{page_table
->AllocateAndMapMemory(1, PageSize, true, start, size / PageSize,
KMemoryState::ThreadLocal,
KMemoryPermission::UserReadWrite,
tls_map_addr)
.ValueOr(0)};
return ResultSuccess;
ASSERT(tls_page_addr);
std::memset(tls_page_ptr, 0, PageSize);
tls_pages.emplace_back(tls_page_addr);
const auto reserve_result{tls_pages.back().ReserveSlot()};
ASSERT(reserve_result.has_value());
return *reserve_result;
}
void KProcess::FreeTLSRegion(VAddr tls_address) {
KScopedSchedulerLock lock(kernel);
const VAddr aligned_address = Common::AlignDown(tls_address, Core::Memory::PAGE_SIZE);
auto iter =
std::find_if(tls_pages.begin(), tls_pages.end(), [aligned_address](const auto& page) {
return page.GetBaseAddress() == aligned_address;
});
// Something has gone very wrong if we're freeing a region
// with no actual page available.
ASSERT(iter != tls_pages.cend());
iter->ReleaseSlot(tls_address);
}
void KProcess::LoadModule(CodeSet code_set, VAddr base_addr) {

View File

@@ -8,13 +8,13 @@
#include <cstddef>
#include <list>
#include <string>
#include <vector>
#include "common/common_types.h"
#include "core/hle/kernel/k_address_arbiter.h"
#include "core/hle/kernel/k_auto_object.h"
#include "core/hle/kernel/k_condition_variable.h"
#include "core/hle/kernel/k_handle_table.h"
#include "core/hle/kernel/k_synchronization_object.h"
#include "core/hle/kernel/k_thread_local_page.h"
#include "core/hle/kernel/k_worker_task.h"
#include "core/hle/kernel/process_capability.h"
#include "core/hle/kernel/slab_helpers.h"
@@ -362,10 +362,10 @@ public:
// Thread-local storage management
// Marks the next available region as used and returns the address of the slot.
[[nodiscard]] ResultCode CreateThreadLocalRegion(VAddr* out);
[[nodiscard]] VAddr CreateTLSRegion();
// Frees a used TLS slot identified by the given address
ResultCode DeleteThreadLocalRegion(VAddr addr);
void FreeTLSRegion(VAddr tls_address);
private:
void PinThread(s32 core_id, KThread* thread) {
@@ -413,6 +413,13 @@ private:
/// The ideal CPU core for this process, threads are scheduled on this core by default.
u8 ideal_core = 0;
/// The Thread Local Storage area is allocated as processes create threads,
/// each TLS area is 0x200 bytes, so one page (0x1000) is split up in 8 parts, and each part
/// holds the TLS for a specific thread. This vector contains which parts are in use for each
/// page as a bitmask.
/// This vector will grow as more pages are allocated for new threads.
std::vector<TLSPage> tls_pages;
/// Contains the parsed process capability descriptors.
ProcessCapabilities capabilities;
@@ -475,12 +482,6 @@ private:
KThread* exception_thread{};
KLightLock state_lock;
using TLPTree =
Common::IntrusiveRedBlackTreeBaseTraits<KThreadLocalPage>::TreeType<KThreadLocalPage>;
using TLPIterator = TLPTree::iterator;
TLPTree fully_used_tlp_tree;
TLPTree partially_used_tlp_tree;
};
} // namespace Kernel

View File

@@ -22,6 +22,7 @@
#include "core/hle/kernel/k_thread.h"
#include "core/hle/kernel/kernel.h"
#include "core/hle/kernel/physical_core.h"
#include "core/hle/kernel/time_manager.h"
namespace Kernel {

View File

@@ -30,11 +30,11 @@ public:
/// Whether or not this server port has an HLE handler available.
bool HasSessionRequestHandler() const {
return !session_handler.expired();
return session_handler != nullptr;
}
/// Gets the HLE handler for this port.
SessionRequestHandlerWeakPtr GetSessionRequestHandler() const {
SessionRequestHandlerPtr GetSessionRequestHandler() const {
return session_handler;
}
@@ -42,7 +42,7 @@ public:
* Sets the HLE handler template for the port. ServerSessions crated by connecting to this port
* will inherit a reference to this handler.
*/
void SetSessionHandler(SessionRequestHandlerWeakPtr&& handler) {
void SetSessionHandler(SessionRequestHandlerPtr&& handler) {
session_handler = std::move(handler);
}
@@ -66,7 +66,7 @@ private:
void CleanupSessions();
SessionList session_list;
SessionRequestHandlerWeakPtr session_handler;
SessionRequestHandlerPtr session_handler;
KPort* parent{};
};

View File

@@ -27,7 +27,10 @@ namespace Kernel {
KServerSession::KServerSession(KernelCore& kernel_) : KSynchronizationObject{kernel_} {}
KServerSession::~KServerSession() = default;
KServerSession::~KServerSession() {
// Ensure that the global list tracking server sessions does not hold on to a reference.
kernel.UnregisterServerSession(this);
}
void KServerSession::Initialize(KSession* parent_session_, std::string&& name_,
std::shared_ptr<SessionRequestManager> manager_) {
@@ -46,9 +49,6 @@ void KServerSession::Destroy() {
parent->OnServerClosed();
parent->Close();
// Release host emulation members.
manager.reset();
}
void KServerSession::OnClientClosed() {
@@ -98,12 +98,7 @@ ResultCode KServerSession::HandleDomainSyncRequest(Kernel::HLERequestContext& co
UNREACHABLE();
return ResultSuccess; // Ignore error if asserts are off
}
if (auto strong_ptr = manager->DomainHandler(object_id - 1).lock()) {
return strong_ptr->HandleSyncRequest(*this, context);
} else {
UNREACHABLE();
return ResultSuccess;
}
return manager->DomainHandler(object_id - 1)->HandleSyncRequest(*this, context);
case IPC::DomainMessageHeader::CommandType::CloseVirtualHandle: {
LOG_DEBUG(IPC, "CloseVirtualHandle, object_id=0x{:08X}", object_id);

View File

@@ -16,34 +16,39 @@ class KernelCore;
namespace impl {
class KSlabHeapImpl {
class KSlabHeapImpl final {
public:
YUZU_NON_COPYABLE(KSlabHeapImpl);
YUZU_NON_MOVEABLE(KSlabHeapImpl);
public:
struct Node {
Node* next{};
};
public:
constexpr KSlabHeapImpl() = default;
constexpr ~KSlabHeapImpl() = default;
void Initialize() {
ASSERT(m_head == nullptr);
void Initialize(std::size_t size) {
ASSERT(head == nullptr);
obj_size = size;
}
constexpr std::size_t GetObjectSize() const {
return obj_size;
}
Node* GetHead() const {
return m_head;
return head;
}
void* Allocate() {
Node* ret = m_head.load();
Node* ret = head.load();
do {
if (ret == nullptr) {
break;
}
} while (!m_head.compare_exchange_weak(ret, ret->next));
} while (!head.compare_exchange_weak(ret, ret->next));
return ret;
}
@@ -51,157 +56,170 @@ public:
void Free(void* obj) {
Node* node = static_cast<Node*>(obj);
Node* cur_head = m_head.load();
Node* cur_head = head.load();
do {
node->next = cur_head;
} while (!m_head.compare_exchange_weak(cur_head, node));
} while (!head.compare_exchange_weak(cur_head, node));
}
private:
std::atomic<Node*> m_head{};
std::atomic<Node*> head{};
std::size_t obj_size{};
};
} // namespace impl
template <bool SupportDynamicExpansion>
class KSlabHeapBase : protected impl::KSlabHeapImpl {
class KSlabHeapBase {
public:
YUZU_NON_COPYABLE(KSlabHeapBase);
YUZU_NON_MOVEABLE(KSlabHeapBase);
private:
size_t m_obj_size{};
uintptr_t m_peak{};
uintptr_t m_start{};
uintptr_t m_end{};
private:
void UpdatePeakImpl(uintptr_t obj) {
static_assert(std::atomic_ref<uintptr_t>::is_always_lock_free);
std::atomic_ref<uintptr_t> peak_ref(m_peak);
const uintptr_t alloc_peak = obj + this->GetObjectSize();
uintptr_t cur_peak = m_peak;
do {
if (alloc_peak <= cur_peak) {
break;
}
} while (!peak_ref.compare_exchange_strong(cur_peak, alloc_peak));
}
public:
constexpr KSlabHeapBase() = default;
constexpr ~KSlabHeapBase() = default;
bool Contains(uintptr_t address) const {
return m_start <= address && address < m_end;
constexpr bool Contains(uintptr_t addr) const {
return start <= addr && addr < end;
}
void Initialize(size_t obj_size, void* memory, size_t memory_size) {
// Ensure we don't initialize a slab using null memory.
constexpr std::size_t GetSlabHeapSize() const {
return (end - start) / GetObjectSize();
}
constexpr std::size_t GetObjectSize() const {
return impl.GetObjectSize();
}
constexpr uintptr_t GetSlabHeapAddress() const {
return start;
}
std::size_t GetObjectIndexImpl(const void* obj) const {
return (reinterpret_cast<uintptr_t>(obj) - start) / GetObjectSize();
}
std::size_t GetPeakIndex() const {
return GetObjectIndexImpl(reinterpret_cast<const void*>(peak));
}
void* AllocateImpl() {
return impl.Allocate();
}
void FreeImpl(void* obj) {
// Don't allow freeing an object that wasn't allocated from this heap
ASSERT(Contains(reinterpret_cast<uintptr_t>(obj)));
impl.Free(obj);
}
void InitializeImpl(std::size_t obj_size, void* memory, std::size_t memory_size) {
// Ensure we don't initialize a slab using null memory
ASSERT(memory != nullptr);
// Set our object size.
m_obj_size = obj_size;
// Initialize the base allocator
impl.Initialize(obj_size);
// Initialize the base allocator.
KSlabHeapImpl::Initialize();
// Set our tracking variables
const std::size_t num_obj = (memory_size / obj_size);
start = reinterpret_cast<uintptr_t>(memory);
end = start + num_obj * obj_size;
peak = start;
// Set our tracking variables.
const size_t num_obj = (memory_size / obj_size);
m_start = reinterpret_cast<uintptr_t>(memory);
m_end = m_start + num_obj * obj_size;
m_peak = m_start;
// Free the objects
u8* cur = reinterpret_cast<u8*>(end);
// Free the objects.
u8* cur = reinterpret_cast<u8*>(m_end);
for (size_t i = 0; i < num_obj; i++) {
for (std::size_t i{}; i < num_obj; i++) {
cur -= obj_size;
KSlabHeapImpl::Free(cur);
impl.Free(cur);
}
}
size_t GetSlabHeapSize() const {
return (m_end - m_start) / this->GetObjectSize();
}
private:
using Impl = impl::KSlabHeapImpl;
size_t GetObjectSize() const {
return m_obj_size;
}
void* Allocate() {
void* obj = KSlabHeapImpl::Allocate();
return obj;
}
void Free(void* obj) {
// Don't allow freeing an object that wasn't allocated from this heap.
const bool contained = this->Contains(reinterpret_cast<uintptr_t>(obj));
ASSERT(contained);
KSlabHeapImpl::Free(obj);
}
size_t GetObjectIndex(const void* obj) const {
if constexpr (SupportDynamicExpansion) {
if (!this->Contains(reinterpret_cast<uintptr_t>(obj))) {
return std::numeric_limits<size_t>::max();
}
}
return (reinterpret_cast<uintptr_t>(obj) - m_start) / this->GetObjectSize();
}
size_t GetPeakIndex() const {
return this->GetObjectIndex(reinterpret_cast<const void*>(m_peak));
}
uintptr_t GetSlabHeapAddress() const {
return m_start;
}
size_t GetNumRemaining() const {
// Only calculate the number of remaining objects under debug configuration.
return 0;
}
Impl impl;
uintptr_t peak{};
uintptr_t start{};
uintptr_t end{};
};
template <typename T>
class KSlabHeap final : public KSlabHeapBase<false> {
private:
using BaseHeap = KSlabHeapBase<false>;
class KSlabHeap final : public KSlabHeapBase {
public:
constexpr KSlabHeap() = default;
enum class AllocationType {
Host,
Guest,
};
void Initialize(void* memory, size_t memory_size) {
BaseHeap::Initialize(sizeof(T), memory, memory_size);
explicit constexpr KSlabHeap(AllocationType allocation_type_ = AllocationType::Host)
: KSlabHeapBase(), allocation_type{allocation_type_} {}
void Initialize(void* memory, std::size_t memory_size) {
if (allocation_type == AllocationType::Guest) {
InitializeImpl(sizeof(T), memory, memory_size);
}
}
T* Allocate() {
T* obj = static_cast<T*>(BaseHeap::Allocate());
switch (allocation_type) {
case AllocationType::Host:
// Fallback for cases where we do not yet support allocating guest memory from the slab
// heap, such as for kernel memory regions.
return new T;
if (obj != nullptr) [[likely]] {
std::construct_at(obj);
case AllocationType::Guest:
T* obj = static_cast<T*>(AllocateImpl());
if (obj != nullptr) {
new (obj) T();
}
return obj;
}
return obj;
UNREACHABLE_MSG("Invalid AllocationType {}", allocation_type);
return nullptr;
}
T* Allocate(KernelCore& kernel) {
T* obj = static_cast<T*>(BaseHeap::Allocate());
T* AllocateWithKernel(KernelCore& kernel) {
switch (allocation_type) {
case AllocationType::Host:
// Fallback for cases where we do not yet support allocating guest memory from the slab
// heap, such as for kernel memory regions.
return new T(kernel);
if (obj != nullptr) [[likely]] {
std::construct_at(obj, kernel);
case AllocationType::Guest:
T* obj = static_cast<T*>(AllocateImpl());
if (obj != nullptr) {
new (obj) T(kernel);
}
return obj;
}
return obj;
UNREACHABLE_MSG("Invalid AllocationType {}", allocation_type);
return nullptr;
}
void Free(T* obj) {
BaseHeap::Free(obj);
switch (allocation_type) {
case AllocationType::Host:
// Fallback for cases where we do not yet support allocating guest memory from the slab
// heap, such as for kernel memory regions.
delete obj;
return;
case AllocationType::Guest:
FreeImpl(obj);
return;
}
UNREACHABLE_MSG("Invalid AllocationType {}", allocation_type);
}
size_t GetObjectIndex(const T* obj) const {
return BaseHeap::GetObjectIndex(obj);
constexpr std::size_t GetObjectIndex(const T* obj) const {
return GetObjectIndexImpl(obj);
}
private:
const AllocationType allocation_type;
};
} // namespace Kernel

View File

@@ -14,7 +14,9 @@
#include "common/common_types.h"
#include "common/fiber.h"
#include "common/logging/log.h"
#include "common/scope_exit.h"
#include "common/settings.h"
#include "common/thread_queue_list.h"
#include "core/core.h"
#include "core/cpu_manager.h"
#include "core/hardware_properties.h"
@@ -31,6 +33,7 @@
#include "core/hle/kernel/k_worker_task_manager.h"
#include "core/hle/kernel/kernel.h"
#include "core/hle/kernel/svc_results.h"
#include "core/hle/kernel/time_manager.h"
#include "core/hle/result.h"
#include "core/memory.h"
@@ -207,7 +210,7 @@ ResultCode KThread::Initialize(KThreadFunction func, uintptr_t arg, VAddr user_s
if (owner != nullptr) {
// Setup the TLS, if needed.
if (type == ThreadType::User) {
R_TRY(owner->CreateThreadLocalRegion(std::addressof(tls_address)));
tls_address = owner->CreateTLSRegion();
}
parent = owner;
@@ -302,7 +305,7 @@ void KThread::Finalize() {
// If the thread has a local region, delete it.
if (tls_address != 0) {
ASSERT(parent->DeleteThreadLocalRegion(tls_address).IsSuccess());
parent->FreeTLSRegion(tls_address);
}
// Release any waiters.
@@ -323,9 +326,6 @@ void KThread::Finalize() {
}
}
// Release host emulation members.
host_context.reset();
// Perform inherited finalization.
KSynchronizationObject::Finalize();
}

View File

@@ -656,7 +656,7 @@ private:
static_assert(sizeof(SyncObjectBuffer::sync_objects) == sizeof(SyncObjectBuffer::handles));
struct ConditionVariableComparator {
struct RedBlackKeyType {
struct LightCompareType {
u64 cv_key{};
s32 priority{};
@@ -672,8 +672,8 @@ private:
template <typename T>
requires(
std::same_as<T, KThread> ||
std::same_as<T, RedBlackKeyType>) static constexpr int Compare(const T& lhs,
const KThread& rhs) {
std::same_as<T, LightCompareType>) static constexpr int Compare(const T& lhs,
const KThread& rhs) {
const u64 l_key = lhs.GetConditionVariableKey();
const u64 r_key = rhs.GetConditionVariableKey();

View File

@@ -1,68 +0,0 @@
// Copyright 2022 yuzu Emulator Project
// Licensed under GPLv2 or any later version
// Refer to the license.txt file included.
#include "common/scope_exit.h"
#include "core/core.h"
#include "core/hle/kernel/k_memory_block.h"
#include "core/hle/kernel/k_page_buffer.h"
#include "core/hle/kernel/k_page_table.h"
#include "core/hle/kernel/k_process.h"
#include "core/hle/kernel/k_thread_local_page.h"
#include "core/hle/kernel/kernel.h"
namespace Kernel {
ResultCode KThreadLocalPage::Initialize(KernelCore& kernel, KProcess* process) {
// Set that this process owns us.
m_owner = process;
m_kernel = &kernel;
// Allocate a new page.
KPageBuffer* page_buf = KPageBuffer::Allocate(kernel);
R_UNLESS(page_buf != nullptr, ResultOutOfMemory);
auto page_buf_guard = SCOPE_GUARD({ KPageBuffer::Free(kernel, page_buf); });
// Map the address in.
const auto phys_addr = kernel.System().DeviceMemory().GetPhysicalAddr(page_buf);
R_TRY(m_owner->PageTable().MapPages(std::addressof(m_virt_addr), 1, PageSize, phys_addr,
KMemoryState::ThreadLocal,
KMemoryPermission::UserReadWrite));
// We succeeded.
page_buf_guard.Cancel();
return ResultSuccess;
}
ResultCode KThreadLocalPage::Finalize() {
// Get the physical address of the page.
const PAddr phys_addr = m_owner->PageTable().GetPhysicalAddr(m_virt_addr);
ASSERT(phys_addr);
// Unmap the page.
R_TRY(m_owner->PageTable().UnmapPages(this->GetAddress(), 1, KMemoryState::ThreadLocal));
// Free the page.
KPageBuffer::Free(*m_kernel, KPageBuffer::FromPhysicalAddress(m_kernel->System(), phys_addr));
return ResultSuccess;
}
VAddr KThreadLocalPage::Reserve() {
for (size_t i = 0; i < m_is_region_free.size(); i++) {
if (m_is_region_free[i]) {
m_is_region_free[i] = false;
return this->GetRegionAddress(i);
}
}
return 0;
}
void KThreadLocalPage::Release(VAddr addr) {
m_is_region_free[this->GetRegionIndex(addr)] = true;
}
} // namespace Kernel

View File

@@ -1,111 +0,0 @@
// Copyright 2022 yuzu Emulator Project
// Licensed under GPLv2 or any later version
// Refer to the license.txt file included.
#pragma once
#include <algorithm>
#include <array>
#include "common/alignment.h"
#include "common/assert.h"
#include "common/common_types.h"
#include "common/intrusive_red_black_tree.h"
#include "core/hle/kernel/memory_types.h"
#include "core/hle/kernel/slab_helpers.h"
#include "core/hle/result.h"
namespace Kernel {
class KernelCore;
class KProcess;
class KThreadLocalPage final : public Common::IntrusiveRedBlackTreeBaseNode<KThreadLocalPage>,
public KSlabAllocated<KThreadLocalPage> {
public:
static constexpr size_t RegionsPerPage = PageSize / Svc::ThreadLocalRegionSize;
static_assert(RegionsPerPage > 0);
public:
constexpr explicit KThreadLocalPage(VAddr addr = {}) : m_virt_addr(addr) {
m_is_region_free.fill(true);
}
constexpr VAddr GetAddress() const {
return m_virt_addr;
}
ResultCode Initialize(KernelCore& kernel, KProcess* process);
ResultCode Finalize();
VAddr Reserve();
void Release(VAddr addr);
bool IsAllUsed() const {
return std::ranges::all_of(m_is_region_free.begin(), m_is_region_free.end(),
[](bool is_free) { return !is_free; });
}
bool IsAllFree() const {
return std::ranges::all_of(m_is_region_free.begin(), m_is_region_free.end(),
[](bool is_free) { return is_free; });
}
bool IsAnyUsed() const {
return !this->IsAllFree();
}
bool IsAnyFree() const {
return !this->IsAllUsed();
}
public:
using RedBlackKeyType = VAddr;
static constexpr RedBlackKeyType GetRedBlackKey(const RedBlackKeyType& v) {
return v;
}
static constexpr RedBlackKeyType GetRedBlackKey(const KThreadLocalPage& v) {
return v.GetAddress();
}
template <typename T>
requires(std::same_as<T, KThreadLocalPage> ||
std::same_as<T, RedBlackKeyType>) static constexpr int Compare(const T& lhs,
const KThreadLocalPage&
rhs) {
const VAddr lval = GetRedBlackKey(lhs);
const VAddr rval = GetRedBlackKey(rhs);
if (lval < rval) {
return -1;
} else if (lval == rval) {
return 0;
} else {
return 1;
}
}
private:
constexpr VAddr GetRegionAddress(size_t i) const {
return this->GetAddress() + i * Svc::ThreadLocalRegionSize;
}
constexpr bool Contains(VAddr addr) const {
return this->GetAddress() <= addr && addr < this->GetAddress() + PageSize;
}
constexpr size_t GetRegionIndex(VAddr addr) const {
ASSERT(Common::IsAligned(addr, Svc::ThreadLocalRegionSize));
ASSERT(this->Contains(addr));
return (addr - this->GetAddress()) / Svc::ThreadLocalRegionSize;
}
private:
VAddr m_virt_addr{};
KProcess* m_owner{};
KernelCore* m_kernel{};
std::array<bool, RegionsPerPage> m_is_region_free{};
};
} // namespace Kernel

View File

@@ -22,7 +22,9 @@
#include "core/arm/exclusive_monitor.h"
#include "core/core.h"
#include "core/core_timing.h"
#include "core/core_timing_util.h"
#include "core/cpu_manager.h"
#include "core/device_memory.h"
#include "core/hardware_properties.h"
#include "core/hle/kernel/init/init_slab_setup.h"
#include "core/hle/kernel/k_client_port.h"
@@ -33,6 +35,7 @@
#include "core/hle/kernel/k_resource_limit.h"
#include "core/hle/kernel/k_scheduler.h"
#include "core/hle/kernel/k_shared_memory.h"
#include "core/hle/kernel/k_slab_heap.h"
#include "core/hle/kernel/k_thread.h"
#include "core/hle/kernel/k_worker_task_manager.h"
#include "core/hle/kernel/kernel.h"
@@ -49,7 +52,7 @@ namespace Kernel {
struct KernelCore::Impl {
explicit Impl(Core::System& system_, KernelCore& kernel_)
: time_manager{system_},
: time_manager{system_}, object_list_container{kernel_},
service_threads_manager{1, "yuzu:ServiceThreadsManager"}, system{system_} {}
void SetMulticore(bool is_multi) {
@@ -57,7 +60,6 @@ struct KernelCore::Impl {
}
void Initialize(KernelCore& kernel) {
global_object_list_container = std::make_unique<KAutoObjectWithListContainer>(kernel);
global_scheduler_context = std::make_unique<Kernel::GlobalSchedulerContext>(kernel);
global_handle_table = std::make_unique<Kernel::KHandleTable>(kernel);
global_handle_table->Initialize(KHandleTable::MaxTableSize);
@@ -74,7 +76,7 @@ struct KernelCore::Impl {
// Initialize kernel memory and resources.
InitializeSystemResourceLimit(kernel, system.CoreTiming());
InitializeMemoryLayout();
Init::InitializeKPageBufferSlabHeap(system);
InitializePageSlab();
InitializeSchedulers();
InitializeSuspendThreads();
InitializePreemption(kernel);
@@ -105,6 +107,19 @@ struct KernelCore::Impl {
for (auto* server_port : server_ports_) {
server_port->Close();
}
// Close all open server sessions.
std::unordered_set<KServerSession*> server_sessions_;
{
std::lock_guard lk(server_sessions_lock);
server_sessions_ = server_sessions;
server_sessions.clear();
}
for (auto* server_session : server_sessions_) {
server_session->Close();
}
// Ensure that the object list container is finalized and properly shutdown.
object_list_container.Finalize();
// Ensures all service threads gracefully shutdown.
ClearServiceThreads();
@@ -179,15 +194,11 @@ struct KernelCore::Impl {
{
std::lock_guard lk(registered_objects_lock);
if (registered_objects.size()) {
LOG_DEBUG(Kernel, "{} kernel objects were dangling on shutdown!",
registered_objects.size());
LOG_WARNING(Kernel, "{} kernel objects were dangling on shutdown!",
registered_objects.size());
registered_objects.clear();
}
}
// Ensure that the object list container is finalized and properly shutdown.
global_object_list_container->Finalize();
global_object_list_container.reset();
}
void InitializePhysicalCores() {
@@ -280,16 +291,15 @@ struct KernelCore::Impl {
// Gets the dummy KThread for the caller, allocating a new one if this is the first time
KThread* GetHostDummyThread() {
auto initialize = [this](KThread* thread) {
auto make_thread = [this]() {
KThread* thread = KThread::Create(system.Kernel());
ASSERT(KThread::InitializeDummyThread(thread).IsSuccess());
thread->SetName(fmt::format("DummyThread:{}", GetHostThreadId()));
return thread;
};
thread_local auto raw_thread = KThread(system.Kernel());
thread_local auto thread = initialize(&raw_thread);
return thread;
thread_local KThread* saved_thread = make_thread();
return saved_thread;
}
/// Registers a CPU core thread by allocating a host thread ID for it
@@ -650,6 +660,22 @@ struct KernelCore::Impl {
time_phys_addr, time_size, "Time:SharedMemory");
}
void InitializePageSlab() {
// Allocate slab heaps
user_slab_heap_pages =
std::make_unique<KSlabHeap<Page>>(KSlabHeap<Page>::AllocationType::Guest);
// TODO(ameerj): This should be derived, not hardcoded within the kernel
constexpr u64 user_slab_heap_size{0x3de000};
// Reserve slab heaps
ASSERT(
system_resource_limit->Reserve(LimitableResource::PhysicalMemory, user_slab_heap_size));
// Initialize slab heap
user_slab_heap_pages->Initialize(
system.DeviceMemory().GetPointer(Core::DramMemoryMap::SlabHeapBase),
user_slab_heap_size);
}
KClientPort* CreateNamedServicePort(std::string name) {
auto search = service_interface_factory.find(name);
if (search == service_interface_factory.end()) {
@@ -687,6 +713,7 @@ struct KernelCore::Impl {
}
std::mutex server_ports_lock;
std::mutex server_sessions_lock;
std::mutex registered_objects_lock;
std::mutex registered_in_use_objects_lock;
@@ -710,13 +737,14 @@ struct KernelCore::Impl {
// stores all the objects in place.
std::unique_ptr<KHandleTable> global_handle_table;
std::unique_ptr<KAutoObjectWithListContainer> global_object_list_container;
KAutoObjectWithListContainer object_list_container;
/// Map of named ports managed by the kernel, which can be retrieved using
/// the ConnectToPort SVC.
std::unordered_map<std::string, ServiceInterfaceFactory> service_interface_factory;
NamedPortTable named_ports;
std::unordered_set<KServerPort*> server_ports;
std::unordered_set<KServerSession*> server_sessions;
std::unordered_set<KAutoObject*> registered_objects;
std::unordered_set<KAutoObject*> registered_in_use_objects;
@@ -728,6 +756,7 @@ struct KernelCore::Impl {
// Kernel memory management
std::unique_ptr<KMemoryManager> memory_manager;
std::unique_ptr<KSlabHeap<Page>> user_slab_heap_pages;
// Shared memory for services
Kernel::KSharedMemory* hid_shared_mem{};
@@ -886,11 +915,11 @@ const Core::ExclusiveMonitor& KernelCore::GetExclusiveMonitor() const {
}
KAutoObjectWithListContainer& KernelCore::ObjectListContainer() {
return *impl->global_object_list_container;
return impl->object_list_container;
}
const KAutoObjectWithListContainer& KernelCore::ObjectListContainer() const {
return *impl->global_object_list_container;
return impl->object_list_container;
}
void KernelCore::InvalidateAllInstructionCaches() {
@@ -920,6 +949,16 @@ KClientPort* KernelCore::CreateNamedServicePort(std::string name) {
return impl->CreateNamedServicePort(std::move(name));
}
void KernelCore::RegisterServerSession(KServerSession* server_session) {
std::lock_guard lk(impl->server_sessions_lock);
impl->server_sessions.insert(server_session);
}
void KernelCore::UnregisterServerSession(KServerSession* server_session) {
std::lock_guard lk(impl->server_sessions_lock);
impl->server_sessions.erase(server_session);
}
void KernelCore::RegisterKernelObject(KAutoObject* object) {
std::lock_guard lk(impl->registered_objects_lock);
impl->registered_objects.insert(object);
@@ -992,6 +1031,14 @@ const KMemoryManager& KernelCore::MemoryManager() const {
return *impl->memory_manager;
}
KSlabHeap<Page>& KernelCore::GetUserSlabHeapPages() {
return *impl->user_slab_heap_pages;
}
const KSlabHeap<Page>& KernelCore::GetUserSlabHeapPages() const {
return *impl->user_slab_heap_pages;
}
Kernel::KSharedMemory& KernelCore::GetHidSharedMem() {
return *impl->hid_shared_mem;
}

View File

@@ -14,6 +14,7 @@
#include "core/hardware_properties.h"
#include "core/hle/kernel/k_auto_object.h"
#include "core/hle/kernel/k_slab_heap.h"
#include "core/hle/kernel/memory_types.h"
#include "core/hle/kernel/svc_common.h"
namespace Core {
@@ -42,7 +43,6 @@ class KHandleTable;
class KLinkedListNode;
class KMemoryLayout;
class KMemoryManager;
class KPageBuffer;
class KPort;
class KProcess;
class KResourceLimit;
@@ -52,7 +52,6 @@ class KSession;
class KSharedMemory;
class KSharedMemoryInfo;
class KThread;
class KThreadLocalPage;
class KTransferMemory;
class KWorkerTaskManager;
class KWritableEvent;
@@ -195,6 +194,14 @@ public:
/// Opens a port to a service previously registered with RegisterNamedService.
KClientPort* CreateNamedServicePort(std::string name);
/// Registers a server session with the gobal emulation state, to be freed on shutdown. This is
/// necessary because we do not emulate processes for HLE sessions.
void RegisterServerSession(KServerSession* server_session);
/// Unregisters a server session previously registered with RegisterServerSession when it was
/// destroyed during the current emulation session.
void UnregisterServerSession(KServerSession* server_session);
/// Registers all kernel objects with the global emulation state, this is purely for tracking
/// leaks after emulation has been shutdown.
void RegisterKernelObject(KAutoObject* object);
@@ -232,6 +239,12 @@ public:
/// Gets the virtual memory manager for the kernel.
const KMemoryManager& MemoryManager() const;
/// Gets the slab heap allocated for user space pages.
KSlabHeap<Page>& GetUserSlabHeapPages();
/// Gets the slab heap allocated for user space pages.
const KSlabHeap<Page>& GetUserSlabHeapPages() const;
/// Gets the shared memory object for HID services.
Kernel::KSharedMemory& GetHidSharedMem();
@@ -323,10 +336,6 @@ public:
return slab_heap_container->writeable_event;
} else if constexpr (std::is_same_v<T, KCodeMemory>) {
return slab_heap_container->code_memory;
} else if constexpr (std::is_same_v<T, KPageBuffer>) {
return slab_heap_container->page_buffer;
} else if constexpr (std::is_same_v<T, KThreadLocalPage>) {
return slab_heap_container->thread_local_page;
}
}
@@ -388,8 +397,6 @@ private:
KSlabHeap<KTransferMemory> transfer_memory;
KSlabHeap<KWritableEvent> writeable_event;
KSlabHeap<KCodeMemory> code_memory;
KSlabHeap<KPageBuffer> page_buffer;
KSlabHeap<KThreadLocalPage> thread_local_page;
};
std::unique_ptr<SlabHeapContainer> slab_heap_container;

View File

@@ -49,9 +49,12 @@ ServiceThread::Impl::Impl(KernelCore& kernel, std::size_t num_threads, const std
return;
}
// Allocate a dummy guest thread for this host thread.
kernel.RegisterHostThread();
// Ensure the dummy thread allocated for this host thread is closed on exit.
auto* dummy_thread = kernel.GetCurrentEmuThread();
SCOPE_EXIT({ dummy_thread->Close(); });
while (true) {
std::function<void()> task;

View File

@@ -59,7 +59,7 @@ class KAutoObjectWithSlabHeapAndContainer : public Base {
private:
static Derived* Allocate(KernelCore& kernel) {
return kernel.SlabHeap<Derived>().Allocate(kernel);
return kernel.SlabHeap<Derived>().AllocateWithKernel(kernel);
}
static void Free(KernelCore& kernel, Derived* obj) {

View File

@@ -96,6 +96,4 @@ constexpr inline s32 IdealCoreNoUpdate = -3;
constexpr inline s32 LowestThreadPriority = 63;
constexpr inline s32 HighestThreadPriority = 0;
constexpr inline size_t ThreadLocalRegionSize = 0x200;
} // namespace Kernel::Svc

View File

@@ -16,6 +16,7 @@
#include "core/file_sys/control_metadata.h"
#include "core/file_sys/patch_manager.h"
#include "core/hle/ipc_helpers.h"
#include "core/hle/kernel/kernel.h"
#include "core/hle/service/acc/acc.h"
#include "core/hle/service/acc/acc_aa.h"
#include "core/hle/service/acc/acc_su.h"

View File

@@ -7,7 +7,6 @@
#include <array>
#include <optional>
#include "common/common_funcs.h"
#include "common/common_types.h"
#include "common/swap.h"
#include "common/uuid.h"

View File

@@ -980,7 +980,7 @@ private:
LOG_DEBUG(Service_AM, "called");
IPC::RequestParser rp{ctx};
applet->GetBroker().PushNormalDataFromGame(rp.PopIpcInterface<IStorage>().lock());
applet->GetBroker().PushNormalDataFromGame(rp.PopIpcInterface<IStorage>());
IPC::ResponseBuilder rb{ctx, 2};
rb.Push(ResultSuccess);
@@ -1007,7 +1007,7 @@ private:
LOG_DEBUG(Service_AM, "called");
IPC::RequestParser rp{ctx};
applet->GetBroker().PushInteractiveDataFromGame(rp.PopIpcInterface<IStorage>().lock());
applet->GetBroker().PushInteractiveDataFromGame(rp.PopIpcInterface<IStorage>());
ASSERT(applet->IsInitialized());
applet->ExecuteInteractive();

View File

@@ -1,139 +0,0 @@
// Copyright 2022 yuzu Emulator Project
// Licensed under GPLv2 or any later version
// Refer to the license.txt file included.
#include "common/assert.h"
#include "common/logging/log.h"
#include "core/core.h"
#include "core/frontend/applets/mii_edit.h"
#include "core/hle/service/am/am.h"
#include "core/hle/service/am/applets/applet_mii_edit.h"
#include "core/hle/service/mii/mii_manager.h"
namespace Service::AM::Applets {
MiiEdit::MiiEdit(Core::System& system_, LibraryAppletMode applet_mode_,
const Core::Frontend::MiiEditApplet& frontend_)
: Applet{system_, applet_mode_}, frontend{frontend_}, system{system_} {}
MiiEdit::~MiiEdit() = default;
void MiiEdit::Initialize() {
// Note: MiiEdit is not initialized with common arguments.
// Instead, it is initialized by an AppletInput storage with size 0x100 bytes.
// Do NOT call Applet::Initialize() here.
const auto storage = broker.PopNormalDataToApplet();
ASSERT(storage != nullptr);
const auto applet_input_data = storage->GetData();
ASSERT(applet_input_data.size() >= sizeof(MiiEditAppletInputCommon));
std::memcpy(&applet_input_common, applet_input_data.data(), sizeof(MiiEditAppletInputCommon));
LOG_INFO(Service_AM,
"Initializing MiiEdit Applet with MiiEditAppletVersion={} and MiiEditAppletMode={}",
applet_input_common.version, applet_input_common.applet_mode);
switch (applet_input_common.version) {
case MiiEditAppletVersion::Version3:
ASSERT(applet_input_data.size() ==
sizeof(MiiEditAppletInputCommon) + sizeof(MiiEditAppletInputV3));
std::memcpy(&applet_input_v3, applet_input_data.data() + sizeof(MiiEditAppletInputCommon),
sizeof(MiiEditAppletInputV3));
break;
case MiiEditAppletVersion::Version4:
ASSERT(applet_input_data.size() ==
sizeof(MiiEditAppletInputCommon) + sizeof(MiiEditAppletInputV4));
std::memcpy(&applet_input_v4, applet_input_data.data() + sizeof(MiiEditAppletInputCommon),
sizeof(MiiEditAppletInputV4));
break;
default:
UNIMPLEMENTED_MSG("Unknown MiiEditAppletVersion={} with size={}",
applet_input_common.version, applet_input_data.size());
ASSERT(applet_input_data.size() >=
sizeof(MiiEditAppletInputCommon) + sizeof(MiiEditAppletInputV4));
std::memcpy(&applet_input_v4, applet_input_data.data() + sizeof(MiiEditAppletInputCommon),
sizeof(MiiEditAppletInputV4));
break;
}
}
bool MiiEdit::TransactionComplete() const {
return is_complete;
}
ResultCode MiiEdit::GetStatus() const {
return ResultSuccess;
}
void MiiEdit::ExecuteInteractive() {
UNREACHABLE_MSG("Attempted to call interactive execution on non-interactive applet.");
}
void MiiEdit::Execute() {
if (is_complete) {
return;
}
// This is a default stub for each of the MiiEdit applet modes.
switch (applet_input_common.applet_mode) {
case MiiEditAppletMode::ShowMiiEdit:
case MiiEditAppletMode::AppendMii:
case MiiEditAppletMode::AppendMiiImage:
case MiiEditAppletMode::UpdateMiiImage:
MiiEditOutput(MiiEditResult::Success, 0);
break;
case MiiEditAppletMode::CreateMii:
case MiiEditAppletMode::EditMii: {
Service::Mii::MiiManager mii_manager;
const MiiEditCharInfo char_info{
.mii_info{applet_input_common.applet_mode == MiiEditAppletMode::EditMii
? applet_input_v4.char_info.mii_info
: mii_manager.BuildDefault(0)},
};
MiiEditOutputForCharInfoEditing(MiiEditResult::Success, char_info);
break;
}
default:
UNIMPLEMENTED_MSG("Unknown MiiEditAppletMode={}", applet_input_common.applet_mode);
MiiEditOutput(MiiEditResult::Success, 0);
break;
}
}
void MiiEdit::MiiEditOutput(MiiEditResult result, s32 index) {
const MiiEditAppletOutput applet_output{
.result{result},
.index{index},
};
std::vector<u8> out_data(sizeof(MiiEditAppletOutput));
std::memcpy(out_data.data(), &applet_output, sizeof(MiiEditAppletOutput));
is_complete = true;
broker.PushNormalDataFromApplet(std::make_shared<IStorage>(system, std::move(out_data)));
broker.SignalStateChanged();
}
void MiiEdit::MiiEditOutputForCharInfoEditing(MiiEditResult result,
const MiiEditCharInfo& char_info) {
const MiiEditAppletOutputForCharInfoEditing applet_output{
.result{result},
.char_info{char_info},
};
std::vector<u8> out_data(sizeof(MiiEditAppletOutputForCharInfoEditing));
std::memcpy(out_data.data(), &applet_output, sizeof(MiiEditAppletOutputForCharInfoEditing));
is_complete = true;
broker.PushNormalDataFromApplet(std::make_shared<IStorage>(system, std::move(out_data)));
broker.SignalStateChanged();
}
} // namespace Service::AM::Applets

View File

@@ -1,45 +0,0 @@
// Copyright 2022 yuzu Emulator Project
// Licensed under GPLv2 or any later version
// Refer to the license.txt file included.
#pragma once
#include "core/hle/result.h"
#include "core/hle/service/am/applets/applet_mii_edit_types.h"
#include "core/hle/service/am/applets/applets.h"
namespace Core {
class System;
} // namespace Core
namespace Service::AM::Applets {
class MiiEdit final : public Applet {
public:
explicit MiiEdit(Core::System& system_, LibraryAppletMode applet_mode_,
const Core::Frontend::MiiEditApplet& frontend_);
~MiiEdit() override;
void Initialize() override;
bool TransactionComplete() const override;
ResultCode GetStatus() const override;
void ExecuteInteractive() override;
void Execute() override;
void MiiEditOutput(MiiEditResult result, s32 index);
void MiiEditOutputForCharInfoEditing(MiiEditResult result, const MiiEditCharInfo& char_info);
private:
const Core::Frontend::MiiEditApplet& frontend;
Core::System& system;
MiiEditAppletInputCommon applet_input_common{};
MiiEditAppletInputV3 applet_input_v3{};
MiiEditAppletInputV4 applet_input_v4{};
bool is_complete{false};
};
} // namespace Service::AM::Applets

View File

@@ -1,83 +0,0 @@
// Copyright 2022 yuzu Emulator Project
// Licensed under GPLv2 or any later version
// Refer to the license.txt file included.
#pragma once
#include <array>
#include "common/common_funcs.h"
#include "common/common_types.h"
#include "core/hle/service/mii/types.h"
namespace Service::AM::Applets {
enum class MiiEditAppletVersion : s32 {
Version3 = 0x3, // 1.0.0 - 10.1.1
Version4 = 0x4, // 10.2.0+
};
// This is nn::mii::AppletMode
enum class MiiEditAppletMode : u32 {
ShowMiiEdit = 0,
AppendMii = 1,
AppendMiiImage = 2,
UpdateMiiImage = 3,
CreateMii = 4,
EditMii = 5,
};
enum class MiiEditResult : u32 {
Success,
Cancel,
};
struct MiiEditCharInfo {
Service::Mii::MiiInfo mii_info{};
};
static_assert(sizeof(MiiEditCharInfo) == 0x58, "MiiEditCharInfo has incorrect size.");
struct MiiEditAppletInputCommon {
MiiEditAppletVersion version{};
MiiEditAppletMode applet_mode{};
};
static_assert(sizeof(MiiEditAppletInputCommon) == 0x8,
"MiiEditAppletInputCommon has incorrect size.");
struct MiiEditAppletInputV3 {
u32 special_mii_key_code{};
std::array<Common::UUID, 8> valid_uuids{};
Common::UUID used_uuid{};
INSERT_PADDING_BYTES(0x64);
};
static_assert(sizeof(MiiEditAppletInputV3) == 0x100 - sizeof(MiiEditAppletInputCommon),
"MiiEditAppletInputV3 has incorrect size.");
struct MiiEditAppletInputV4 {
u32 special_mii_key_code{};
MiiEditCharInfo char_info{};
INSERT_PADDING_BYTES(0x28);
Common::UUID used_uuid{};
INSERT_PADDING_BYTES(0x64);
};
static_assert(sizeof(MiiEditAppletInputV4) == 0x100 - sizeof(MiiEditAppletInputCommon),
"MiiEditAppletInputV4 has incorrect size.");
// This is nn::mii::AppletOutput
struct MiiEditAppletOutput {
MiiEditResult result{};
s32 index{};
INSERT_PADDING_BYTES(0x18);
};
static_assert(sizeof(MiiEditAppletOutput) == 0x20, "MiiEditAppletOutput has incorrect size.");
// This is nn::mii::AppletOutputForCharInfoEditing
struct MiiEditAppletOutputForCharInfoEditing {
MiiEditResult result{};
MiiEditCharInfo char_info{};
INSERT_PADDING_BYTES(0x24);
};
static_assert(sizeof(MiiEditAppletOutputForCharInfoEditing) == 0x80,
"MiiEditAppletOutputForCharInfoEditing has incorrect size.");
} // namespace Service::AM::Applets

View File

@@ -9,7 +9,6 @@
#include "core/frontend/applets/controller.h"
#include "core/frontend/applets/error.h"
#include "core/frontend/applets/general_frontend.h"
#include "core/frontend/applets/mii_edit.h"
#include "core/frontend/applets/profile_select.h"
#include "core/frontend/applets/software_keyboard.h"
#include "core/frontend/applets/web_browser.h"
@@ -20,7 +19,6 @@
#include "core/hle/service/am/applets/applet_controller.h"
#include "core/hle/service/am/applets/applet_error.h"
#include "core/hle/service/am/applets/applet_general_backend.h"
#include "core/hle/service/am/applets/applet_mii_edit.h"
#include "core/hle/service/am/applets/applet_profile_select.h"
#include "core/hle/service/am/applets/applet_software_keyboard.h"
#include "core/hle/service/am/applets/applet_web_browser.h"
@@ -173,12 +171,11 @@ void Applet::Initialize() {
AppletFrontendSet::AppletFrontendSet() = default;
AppletFrontendSet::AppletFrontendSet(ControllerApplet controller_applet, ErrorApplet error_applet,
MiiEdit mii_edit_,
ParentalControlsApplet parental_controls_applet,
PhotoViewer photo_viewer_, ProfileSelect profile_select_,
SoftwareKeyboard software_keyboard_, WebBrowser web_browser_)
: controller{std::move(controller_applet)}, error{std::move(error_applet)},
mii_edit{std::move(mii_edit_)}, parental_controls{std::move(parental_controls_applet)},
parental_controls{std::move(parental_controls_applet)},
photo_viewer{std::move(photo_viewer_)}, profile_select{std::move(profile_select_)},
software_keyboard{std::move(software_keyboard_)}, web_browser{std::move(web_browser_)} {}
@@ -205,10 +202,6 @@ void AppletManager::SetAppletFrontendSet(AppletFrontendSet set) {
frontend.error = std::move(set.error);
}
if (set.mii_edit != nullptr) {
frontend.mii_edit = std::move(set.mii_edit);
}
if (set.parental_controls != nullptr) {
frontend.parental_controls = std::move(set.parental_controls);
}
@@ -245,10 +238,6 @@ void AppletManager::SetDefaultAppletsIfMissing() {
frontend.error = std::make_unique<Core::Frontend::DefaultErrorApplet>();
}
if (frontend.mii_edit == nullptr) {
frontend.mii_edit = std::make_unique<Core::Frontend::DefaultMiiEditApplet>();
}
if (frontend.parental_controls == nullptr) {
frontend.parental_controls =
std::make_unique<Core::Frontend::DefaultParentalControlsApplet>();
@@ -288,8 +277,6 @@ std::shared_ptr<Applet> AppletManager::GetApplet(AppletId id, LibraryAppletMode
return std::make_shared<ProfileSelect>(system, mode, *frontend.profile_select);
case AppletId::SoftwareKeyboard:
return std::make_shared<SoftwareKeyboard>(system, mode, *frontend.software_keyboard);
case AppletId::MiiEdit:
return std::make_shared<MiiEdit>(system, mode, *frontend.mii_edit);
case AppletId::Web:
case AppletId::Shop:
case AppletId::OfflineWeb:

View File

@@ -20,7 +20,6 @@ namespace Core::Frontend {
class ControllerApplet;
class ECommerceApplet;
class ErrorApplet;
class MiiEditApplet;
class ParentalControlsApplet;
class PhotoViewerApplet;
class ProfileSelectApplet;
@@ -179,7 +178,6 @@ protected:
struct AppletFrontendSet {
using ControllerApplet = std::unique_ptr<Core::Frontend::ControllerApplet>;
using ErrorApplet = std::unique_ptr<Core::Frontend::ErrorApplet>;
using MiiEdit = std::unique_ptr<Core::Frontend::MiiEditApplet>;
using ParentalControlsApplet = std::unique_ptr<Core::Frontend::ParentalControlsApplet>;
using PhotoViewer = std::unique_ptr<Core::Frontend::PhotoViewerApplet>;
using ProfileSelect = std::unique_ptr<Core::Frontend::ProfileSelectApplet>;
@@ -188,9 +186,9 @@ struct AppletFrontendSet {
AppletFrontendSet();
AppletFrontendSet(ControllerApplet controller_applet, ErrorApplet error_applet,
MiiEdit mii_edit_, ParentalControlsApplet parental_controls_applet,
PhotoViewer photo_viewer_, ProfileSelect profile_select_,
SoftwareKeyboard software_keyboard_, WebBrowser web_browser_);
ParentalControlsApplet parental_controls_applet, PhotoViewer photo_viewer_,
ProfileSelect profile_select_, SoftwareKeyboard software_keyboard_,
WebBrowser web_browser_);
~AppletFrontendSet();
AppletFrontendSet(const AppletFrontendSet&) = delete;
@@ -201,7 +199,6 @@ struct AppletFrontendSet {
ControllerApplet controller;
ErrorApplet error;
MiiEdit mii_edit;
ParentalControlsApplet parental_controls;
PhotoViewer photo_viewer;
ProfileSelect profile_select;

View File

@@ -2,6 +2,7 @@
// Licensed under GPLv2 or any later version
// Refer to the license.txt file included.
#include "common/settings.h"
#include "core/core_timing.h"
#include "core/hid/emulated_console.h"
#include "core/hid/hid_core.h"

View File

@@ -8,6 +8,7 @@
#include "common/common_types.h"
#include "common/quaternion.h"
#include "core/hid/hid_types.h"
#include "core/hle/service/hid/controllers/controller_base.h"
#include "core/hle/service/hid/ring_lifo.h"

View File

@@ -5,6 +5,7 @@
#pragma once
#include "common/common_types.h"
#include "common/swap.h"
namespace Core::Timing {
class CoreTiming;

View File

@@ -4,8 +4,11 @@
#pragma once
#include <array>
#include "common/bit_field.h"
#include "common/common_funcs.h"
#include "common/common_types.h"
#include "common/swap.h"
#include "core/hle/service/hid/controllers/controller_base.h"
#include "core/hle/service/hid/ring_lifo.h"

View File

@@ -4,7 +4,11 @@
#pragma once
#include <array>
#include "common/bit_field.h"
#include "common/common_funcs.h"
#include "common/common_types.h"
#include "common/swap.h"
#include "core/hle/service/hid/controllers/controller_base.h"
#include "core/hle/service/hid/ring_lifo.h"

View File

@@ -4,7 +4,10 @@
#pragma once
#include <array>
#include "common/bit_field.h"
#include "common/common_types.h"
#include "common/swap.h"
#include "core/hle/service/hid/controllers/controller_base.h"
#include "core/hle/service/hid/ring_lifo.h"

View File

@@ -10,8 +10,7 @@
#include "common/bit_field.h"
#include "common/common_types.h"
#include "common/vector_math.h"
#include "common/quaternion.h"
#include "core/hid/hid_types.h"
#include "core/hle/service/hid/controllers/controller_base.h"
#include "core/hle/service/hid/ring_lifo.h"

View File

@@ -5,6 +5,7 @@
#include <algorithm>
#include <cstring>
#include "common/common_types.h"
#include "common/logging/log.h"
#include "common/settings.h"
#include "core/core.h"
#include "core/core_timing.h"

View File

@@ -4,8 +4,11 @@
#pragma once
#include "common/bit_field.h"
#include "common/common_funcs.h"
#include "common/common_types.h"
#include "common/point.h"
#include "common/swap.h"
#include "core/hid/hid_types.h"
#include "core/hle/service/hid/controllers/controller_base.h"
#include "core/hle/service/hid/ring_lifo.h"

View File

@@ -5,7 +5,9 @@
#pragma once
#include "common/bit_field.h"
#include "common/common_funcs.h"
#include "common/common_types.h"
#include "common/swap.h"
#include "core/hid/hid_types.h"
#include "core/hle/service/hid/controllers/controller_base.h"
#include "core/hle/service/hid/ring_lifo.h"

View File

@@ -17,12 +17,21 @@ namespace Service::KernelHelpers {
ServiceContext::ServiceContext(Core::System& system_, std::string name_)
: kernel(system_.Kernel()) {
// Create a resource limit for the process.
const auto physical_memory_size =
kernel.MemoryManager().GetSize(Kernel::KMemoryManager::Pool::System);
auto* resource_limit = Kernel::CreateResourceLimitForProcess(system_, physical_memory_size);
// Create the process.
process = Kernel::KProcess::Create(kernel);
ASSERT(Kernel::KProcess::Initialize(process, system_, std::move(name_),
Kernel::KProcess::ProcessType::KernelInternal,
kernel.GetSystemResourceLimit())
resource_limit)
.IsSuccess());
// Close reference to our resource limit, as the process opens one.
resource_limit->Close();
}
ServiceContext::~ServiceContext() {

View File

@@ -12,6 +12,7 @@
#include "core/core.h"
#include "core/hle/ipc_helpers.h"
#include "core/hle/kernel/k_page_table.h"
#include "core/hle/kernel/k_system_control.h"
#include "core/hle/kernel/svc_results.h"
#include "core/hle/kernel/svc_types.h"
#include "core/hle/service/ldr/ldr.h"
@@ -287,7 +288,7 @@ public:
}
bool ValidateRegionForMap(Kernel::KPageTable& page_table, VAddr start, std::size_t size) const {
const std::size_t padding_size{page_table.GetNumGuardPages() * Kernel::PageSize};
constexpr std::size_t padding_size{4 * Kernel::PageSize};
const auto start_info{page_table.QueryInfo(start - 1)};
if (start_info.state != Kernel::KMemoryState::Free) {
@@ -307,69 +308,31 @@ public:
return (start + size + padding_size) <= (end_info.GetAddress() + end_info.GetSize());
}
ResultCode GetAvailableMapRegion(Kernel::KPageTable& page_table, u64 size, VAddr& out_addr) {
size = Common::AlignUp(size, Kernel::PageSize);
size += page_table.GetNumGuardPages() * Kernel::PageSize * 4;
const auto is_region_available = [&](VAddr addr) {
const auto end_addr = addr + size;
while (addr < end_addr) {
if (system.Memory().IsValidVirtualAddress(addr)) {
return false;
}
if (!page_table.IsInsideAddressSpace(out_addr, size)) {
return false;
}
if (page_table.IsInsideHeapRegion(out_addr, size)) {
return false;
}
if (page_table.IsInsideAliasRegion(out_addr, size)) {
return false;
}
addr += Kernel::PageSize;
}
return true;
};
bool succeeded = false;
const auto map_region_end =
page_table.GetAliasCodeRegionStart() + page_table.GetAliasCodeRegionSize();
while (current_map_addr < map_region_end) {
if (is_region_available(current_map_addr)) {
succeeded = true;
break;
}
current_map_addr += 0x100000;
}
if (!succeeded) {
UNREACHABLE_MSG("Out of address space!");
return Kernel::ResultOutOfMemory;
}
out_addr = current_map_addr;
current_map_addr += size;
return ResultSuccess;
VAddr GetRandomMapRegion(const Kernel::KPageTable& page_table, std::size_t size) const {
VAddr addr{};
const std::size_t end_pages{(page_table.GetAliasCodeRegionSize() - size) >>
Kernel::PageBits};
do {
addr = page_table.GetAliasCodeRegionStart() +
(Kernel::KSystemControl::GenerateRandomRange(0, end_pages) << Kernel::PageBits);
} while (!page_table.IsInsideAddressSpace(addr, size) ||
page_table.IsInsideHeapRegion(addr, size) ||
page_table.IsInsideAliasRegion(addr, size));
return addr;
}
ResultVal<VAddr> MapProcessCodeMemory(Kernel::KProcess* process, VAddr base_addr, u64 size) {
auto& page_table{process->PageTable()};
VAddr addr{};
ResultVal<VAddr> MapProcessCodeMemory(Kernel::KProcess* process, VAddr baseAddress,
u64 size) const {
for (std::size_t retry = 0; retry < MAXIMUM_MAP_RETRIES; retry++) {
R_TRY(GetAvailableMapRegion(page_table, size, addr));
auto& page_table{process->PageTable()};
const VAddr addr{GetRandomMapRegion(page_table, size)};
const ResultCode result{page_table.MapCodeMemory(addr, baseAddress, size)};
const ResultCode result{page_table.MapCodeMemory(addr, base_addr, size)};
if (result == Kernel::ResultInvalidCurrentMemory) {
continue;
}
R_TRY(result);
CASCADE_CODE(result);
if (ValidateRegionForMap(page_table, addr, size)) {
return addr;
@@ -380,7 +343,7 @@ public:
}
ResultVal<VAddr> MapNro(Kernel::KProcess* process, VAddr nro_addr, std::size_t nro_size,
VAddr bss_addr, std::size_t bss_size, std::size_t size) {
VAddr bss_addr, std::size_t bss_size, std::size_t size) const {
for (std::size_t retry = 0; retry < MAXIMUM_MAP_RETRIES; retry++) {
auto& page_table{process->PageTable()};
VAddr addr{};
@@ -634,7 +597,6 @@ public:
LOG_WARNING(Service_LDR, "(STUBBED) called");
initialized = true;
current_map_addr = system.CurrentProcess()->PageTable().GetAliasCodeRegionStart();
IPC::ResponseBuilder rb{ctx, 2};
rb.Push(ResultSuccess);
@@ -645,7 +607,6 @@ private:
std::map<VAddr, NROInfo> nro;
std::map<VAddr, std::vector<SHA256Hash>> nrr;
VAddr current_map_addr{};
bool IsValidNROHash(const SHA256Hash& hash) const {
return std::any_of(nrr.begin(), nrr.end(), [&hash](const auto& p) {

View File

@@ -1,4 +1,4 @@
// Copyright 2020 yuzu Emulator Project
// Copyright 2020 yuzu emulator team
// Licensed under GPLv2 or any later version
// Refer to the license.txt file included.
@@ -12,6 +12,7 @@
#include "core/hle/service/acc/profile_manager.h"
#include "core/hle/service/mii/mii_manager.h"
#include "core/hle/service/mii/raw_data.h"
#include "core/hle/service/mii/types.h"
namespace Service::Mii {

View File

@@ -1,16 +1,315 @@
// Copyright 2020 yuzu Emulator Project
// Copyright 2020 yuzu emulator team
// Licensed under GPLv2 or any later version
// Refer to the license.txt file included.
#pragma once
#include <array>
#include <vector>
#include "common/bit_field.h"
#include "common/common_funcs.h"
#include "common/uuid.h"
#include "core/hle/result.h"
#include "core/hle/service/mii/types.h"
namespace Service::Mii {
enum class Source : u32 {
Database = 0,
Default = 1,
Account = 2,
Friend = 3,
};
enum class SourceFlag : u32 {
None = 0,
Database = 1 << 0,
Default = 1 << 1,
};
DECLARE_ENUM_FLAG_OPERATORS(SourceFlag);
struct MiiInfo {
Common::UUID uuid;
std::array<char16_t, 11> name;
u8 font_region;
u8 favorite_color;
u8 gender;
u8 height;
u8 build;
u8 type;
u8 region_move;
u8 faceline_type;
u8 faceline_color;
u8 faceline_wrinkle;
u8 faceline_make;
u8 hair_type;
u8 hair_color;
u8 hair_flip;
u8 eye_type;
u8 eye_color;
u8 eye_scale;
u8 eye_aspect;
u8 eye_rotate;
u8 eye_x;
u8 eye_y;
u8 eyebrow_type;
u8 eyebrow_color;
u8 eyebrow_scale;
u8 eyebrow_aspect;
u8 eyebrow_rotate;
u8 eyebrow_x;
u8 eyebrow_y;
u8 nose_type;
u8 nose_scale;
u8 nose_y;
u8 mouth_type;
u8 mouth_color;
u8 mouth_scale;
u8 mouth_aspect;
u8 mouth_y;
u8 beard_color;
u8 beard_type;
u8 mustache_type;
u8 mustache_scale;
u8 mustache_y;
u8 glasses_type;
u8 glasses_color;
u8 glasses_scale;
u8 glasses_y;
u8 mole_type;
u8 mole_scale;
u8 mole_x;
u8 mole_y;
u8 padding;
std::u16string Name() const;
};
static_assert(sizeof(MiiInfo) == 0x58, "MiiInfo has incorrect size.");
static_assert(std::has_unique_object_representations_v<MiiInfo>,
"All bits of MiiInfo must contribute to its value.");
#pragma pack(push, 4)
struct MiiInfoElement {
MiiInfoElement(const MiiInfo& info_, Source source_) : info{info_}, source{source_} {}
MiiInfo info{};
Source source{};
};
static_assert(sizeof(MiiInfoElement) == 0x5c, "MiiInfoElement has incorrect size.");
struct MiiStoreBitFields {
union {
u32 word_0{};
BitField<0, 8, u32> hair_type;
BitField<8, 7, u32> height;
BitField<15, 1, u32> mole_type;
BitField<16, 7, u32> build;
BitField<23, 1, HairFlip> hair_flip;
BitField<24, 7, u32> hair_color;
BitField<31, 1, u32> type;
};
union {
u32 word_1{};
BitField<0, 7, u32> eye_color;
BitField<7, 1, Gender> gender;
BitField<8, 7, u32> eyebrow_color;
BitField<16, 7, u32> mouth_color;
BitField<24, 7, u32> beard_color;
};
union {
u32 word_2{};
BitField<0, 7, u32> glasses_color;
BitField<8, 6, u32> eye_type;
BitField<14, 2, u32> region_move;
BitField<16, 6, u32> mouth_type;
BitField<22, 2, FontRegion> font_region;
BitField<24, 5, u32> eye_y;
BitField<29, 3, u32> glasses_scale;
};
union {
u32 word_3{};
BitField<0, 5, u32> eyebrow_type;
BitField<5, 3, MustacheType> mustache_type;
BitField<8, 5, u32> nose_type;
BitField<13, 3, BeardType> beard_type;
BitField<16, 5, u32> nose_y;
BitField<21, 3, u32> mouth_aspect;
BitField<24, 5, u32> mouth_y;
BitField<29, 3, u32> eyebrow_aspect;
};
union {
u32 word_4{};
BitField<0, 5, u32> mustache_y;
BitField<5, 3, u32> eye_rotate;
BitField<8, 5, u32> glasses_y;
BitField<13, 3, u32> eye_aspect;
BitField<16, 5, u32> mole_x;
BitField<21, 3, u32> eye_scale;
BitField<24, 5, u32> mole_y;
};
union {
u32 word_5{};
BitField<0, 5, u32> glasses_type;
BitField<8, 4, u32> favorite_color;
BitField<12, 4, u32> faceline_type;
BitField<16, 4, u32> faceline_color;
BitField<20, 4, u32> faceline_wrinkle;
BitField<24, 4, u32> faceline_makeup;
BitField<28, 4, u32> eye_x;
};
union {
u32 word_6{};
BitField<0, 4, u32> eyebrow_scale;
BitField<4, 4, u32> eyebrow_rotate;
BitField<8, 4, u32> eyebrow_x;
BitField<12, 4, u32> eyebrow_y;
BitField<16, 4, u32> nose_scale;
BitField<20, 4, u32> mouth_scale;
BitField<24, 4, u32> mustache_scale;
BitField<28, 4, u32> mole_scale;
};
};
static_assert(sizeof(MiiStoreBitFields) == 0x1c, "MiiStoreBitFields has incorrect size.");
static_assert(std::is_trivially_copyable_v<MiiStoreBitFields>,
"MiiStoreBitFields is not trivially copyable.");
struct MiiStoreData {
using Name = std::array<char16_t, 10>;
MiiStoreData();
MiiStoreData(const Name& name, const MiiStoreBitFields& bit_fields,
const Common::UUID& user_id);
// This corresponds to the above structure MiiStoreBitFields. I did it like this because the
// BitField<> type makes this (and any thing that contains it) not trivially copyable, which is
// not suitable for our uses.
struct {
std::array<u8, 0x1C> data{};
static_assert(sizeof(MiiStoreBitFields) == sizeof(data), "data field has incorrect size.");
Name name{};
Common::UUID uuid{};
} data;
u16 data_crc{};
u16 device_crc{};
};
static_assert(sizeof(MiiStoreData) == 0x44, "MiiStoreData has incorrect size.");
struct MiiStoreDataElement {
MiiStoreData data{};
Source source{};
};
static_assert(sizeof(MiiStoreDataElement) == 0x48, "MiiStoreDataElement has incorrect size.");
struct MiiDatabase {
u32 magic{}; // 'NFDB'
std::array<MiiStoreData, 0x64> miis{};
INSERT_PADDING_BYTES(1);
u8 count{};
u16 crc{};
};
static_assert(sizeof(MiiDatabase) == 0x1A98, "MiiDatabase has incorrect size.");
struct RandomMiiValues {
std::array<u8, 0xbc> values{};
};
static_assert(sizeof(RandomMiiValues) == 0xbc, "RandomMiiValues has incorrect size.");
struct RandomMiiData4 {
Gender gender{};
Age age{};
Race race{};
u32 values_count{};
std::array<u32, 47> values{};
};
static_assert(sizeof(RandomMiiData4) == 0xcc, "RandomMiiData4 has incorrect size.");
struct RandomMiiData3 {
u32 arg_1;
u32 arg_2;
u32 values_count;
std::array<u32, 47> values{};
};
static_assert(sizeof(RandomMiiData3) == 0xc8, "RandomMiiData3 has incorrect size.");
struct RandomMiiData2 {
u32 arg_1;
u32 values_count;
std::array<u32, 47> values{};
};
static_assert(sizeof(RandomMiiData2) == 0xc4, "RandomMiiData2 has incorrect size.");
struct DefaultMii {
u32 face_type{};
u32 face_color{};
u32 face_wrinkle{};
u32 face_makeup{};
u32 hair_type{};
u32 hair_color{};
u32 hair_flip{};
u32 eye_type{};
u32 eye_color{};
u32 eye_scale{};
u32 eye_aspect{};
u32 eye_rotate{};
u32 eye_x{};
u32 eye_y{};
u32 eyebrow_type{};
u32 eyebrow_color{};
u32 eyebrow_scale{};
u32 eyebrow_aspect{};
u32 eyebrow_rotate{};
u32 eyebrow_x{};
u32 eyebrow_y{};
u32 nose_type{};
u32 nose_scale{};
u32 nose_y{};
u32 mouth_type{};
u32 mouth_color{};
u32 mouth_scale{};
u32 mouth_aspect{};
u32 mouth_y{};
u32 mustache_type{};
u32 beard_type{};
u32 beard_color{};
u32 mustache_scale{};
u32 mustache_y{};
u32 glasses_type{};
u32 glasses_color{};
u32 glasses_scale{};
u32 glasses_y{};
u32 mole_type{};
u32 mole_scale{};
u32 mole_x{};
u32 mole_y{};
u32 height{};
u32 weight{};
Gender gender{};
u32 favorite_color{};
u32 region{};
FontRegion font_region{};
u32 type{};
INSERT_PADDING_WORDS(5);
};
static_assert(sizeof(DefaultMii) == 0xd8, "MiiStoreData has incorrect size.");
#pragma pack(pop)
// The Mii manager is responsible for loading and storing the Miis to the database in NAND along
// with providing an easy interface for HLE emulation of the mii service.
class MiiManager {

Some files were not shown because too many files have changed in this diff Show More