Compare commits

...

40 Commits

Author SHA1 Message Date
Liam
50a59487eb qt: implement RequestExit for applets 2023-03-25 14:49:43 -04:00
Liam
950db851ea applets: implement RequestExit 2023-03-25 12:44:26 -04:00
bunnei
09da9da6fb Merge pull request #9985 from liamwhite/funny-meme
vulkan: fix scheduler chunk reserve
2023-03-24 23:40:17 -07:00
Morph
6892a0942f Merge pull request #9988 from rschlaikjer/rs-gpu-page-table-copy-elision
Pass GPU page table by reference inside TextureCache::ForEachImageInRegionGPU
2023-03-25 01:59:08 -04:00
Ross Schlaikjer
f38ae8e953 Pass GPU page table by reference 2023-03-25 00:25:02 -04:00
liamwhite
cfb9672093 Merge pull request #9983 from Morph1984/boost
CMakeLists: Update boost to 1.81.0
2023-03-24 10:53:30 -04:00
liamwhite
462c430c8b Merge pull request #9981 from german77/nfp_connect
nfc: Initialize device when controller is connected
2023-03-24 10:53:05 -04:00
Liam
5a2dff87bf vulkan: fix scheduler chunk reserve 2023-03-24 09:09:01 -04:00
Morph
7a8a7545f2 Merge pull request #9975 from liamwhite/more-waiting
vulkan: fix more excessive waiting in scheduler
2023-03-24 00:19:43 -04:00
Morph
abe2ad7aac zstd: Use ZSTD_getFrameContentSize instead of ZSTD_getDecompressedSize 2023-03-23 22:16:20 -04:00
Morph
877e8991c7 CMakeLists: Update boost to 1.81.0 2023-03-23 20:53:39 -04:00
Morph
032e5b983c vcpkg: Update vcpkg to 2023.02.24 2023-03-23 20:53:39 -04:00
liamwhite
ac3927074b Merge pull request #9971 from Morph1984/q
bounded_threadsafe_queue: Use simplified impl of bounded queue
2023-03-23 10:00:31 -04:00
liamwhite
c41a4baf06 Merge pull request #9964 from liamwhite/typed-address
kernel: use KTypedAddress for addresses
2023-03-23 10:00:19 -04:00
Morph
6adaa0d5e2 Merge pull request #9962 from Kelebek1/disable_srgb
[video_core] Disable SRGB border color conversion in samplers
2023-03-23 03:07:00 -04:00
Liam
fb49ec19c1 kernel: use KTypedAddress for addresses 2023-03-22 09:35:16 -04:00
Morph
197d756560 bounded_threadsafe_queue: Refactor Pop
Introduces PopModes to bring waiting logic into Pop, similar to Push.
2023-03-21 22:33:58 -04:00
Morph
8c56481249 bounded_threadsafe_queue: Add producer cv to avoid busy waiting 2023-03-21 22:33:57 -04:00
Narr the Reg
6ff4bf9b1c nfc: Initialize device when controller is connected 2023-03-21 20:09:36 -06:00
bunnei
dba86ee007 Merge pull request #9965 from german77/thankYouEpicBoy
config: Fix controller config from resetting
2023-03-21 17:52:25 -07:00
Morph
407dc917f1 bounded_threadsafe_queue: Deduplicate and add PushModes
Adds the PushModes Try and Wait to allow producers to specify how they want to push their data to the queue if the queue is full.
If the queue is full:
- Try will fail to push to the queue, returning false. Try only returns true if it successfully pushes to the queue. This may result in items not being pushed into the queue.
- Wait will wait until a slot is available to push to the queue, resulting in potential for deadlock if a consumer is not running.
2023-03-21 19:20:21 -04:00
Morph
15d573194c bounded_threadsafe_queue: Add TryPush 2023-03-21 19:17:38 -04:00
Morph
f28ca5361f logging: Make use of bounded queue 2023-03-21 19:17:38 -04:00
Morph
306840a580 bounded_threadsafe_queue: Use simplified impl of bounded queue
Provides a simplified SPSC, MPSC, and MPMC bounded queue implementation using mutexes.
2023-03-21 19:17:32 -04:00
bunnei
3d4c113037 Merge pull request #9970 from bunnei/string-util-view
common: string_util: Use std::string_view for UTF16ToUTF8/UTF8ToUTF16W.
2023-03-19 11:10:16 -07:00
bunnei
230d118252 Merge pull request #9972 from liamwhite/ipc-trace
kernel: fix LOG_TRACE in ipc
2023-03-19 11:00:42 -07:00
Liam
b9b1318bea vulkan: fix more excessive waiting in scheduler 2023-03-19 13:40:33 -04:00
Liam
43d909949e kernel: fix LOG_TRACE in ipc 2023-03-19 10:02:20 -04:00
bunnei
00d401d639 common: string_util: Use std::string_view for UTF16ToUTF8/UTF8ToUTF16W. 2023-03-18 22:42:25 -07:00
liamwhite
0e7e98e24e Merge pull request #9966 from bunnei/bounded-polyfill
common: bounded_threadsafe_queue: Use polyfill_thread.
2023-03-18 12:39:52 -04:00
bunnei
0eb3fa05e5 common: bounded_threadsafe_queue: Use polyfill_thread. 2023-03-17 23:42:17 -07:00
bunnei
889454f9bf Merge pull request #9778 from behunin/my-box-chevy
gpu_thread: Use bounded queue
2023-03-17 22:14:29 -07:00
bunnei
8bcaa8c2e4 Merge pull request #9953 from german77/amiibo_crc
service: nfp: Actually write correct crc
2023-03-17 22:13:57 -07:00
Narr the Reg
c95baf92ce config: Fix controller config from resetting 2023-03-17 22:08:16 -06:00
Kelebek1
a7651168dd Disable SRGB border color conversion for now, to fix shadows in Xenoblade. 2023-03-17 04:46:38 +00:00
liamwhite
6d76a54d37 Merge pull request #9955 from liamwhite/color-blend-equation
vulkan: disable extendedDynamicState3ColorBlendEquation on radv
2023-03-15 20:19:45 -04:00
liamwhite
a04061e6ae Merge pull request #9931 from liamwhite/sched
vk_scheduler: split work queue waits and execution waits
2023-03-15 20:19:35 -04:00
Liam
da83afdeaf vulkan: disable extendedDynamicState3ColorBlendEquation on radv 2023-03-15 15:55:07 -04:00
Liam
3f261f22c9 vk_scheduler: split work queue waits and execution waits 2023-03-12 17:19:44 -04:00
Behunin
44518b225c gpu_thread: Use bounded queue 2023-03-03 18:20:56 -07:00
170 changed files with 2273 additions and 1384 deletions

View File

@@ -210,7 +210,7 @@ set(CMAKE_RUNTIME_OUTPUT_DIRECTORY ${PROJECT_BINARY_DIR}/bin)
# =======================================================================
# Enforce the search mode of non-required packages for better and shorter failure messages
find_package(Boost 1.73.0 REQUIRED context)
find_package(Boost 1.81.0 REQUIRED context)
find_package(enet 1.3 MODULE)
find_package(fmt 9 REQUIRED)
find_package(inih 52 MODULE COMPONENTS INIReader)

View File

@@ -132,6 +132,7 @@ add_library(common STATIC
time_zone.h
tiny_mt.h
tree.h
typed_address.h
uint128.h
unique_function.h
uuid.cpp

View File

@@ -1,158 +1,249 @@
// SPDX-FileCopyrightText: Copyright (c) 2020 Erik Rigtorp <erik@rigtorp.se>
// SPDX-License-Identifier: MIT
// SPDX-FileCopyrightText: Copyright 2023 yuzu Emulator Project
// SPDX-License-Identifier: GPL-2.0-or-later
#pragma once
#include <atomic>
#include <bit>
#include <condition_variable>
#include <memory>
#include <cstddef>
#include <mutex>
#include <new>
#include <stop_token>
#include <type_traits>
#include <utility>
#include "common/polyfill_thread.h"
namespace Common {
#if defined(__cpp_lib_hardware_interference_size)
constexpr size_t hardware_interference_size = std::hardware_destructive_interference_size;
#else
constexpr size_t hardware_interference_size = 64;
#endif
namespace detail {
constexpr size_t DefaultCapacity = 0x1000;
} // namespace detail
template <typename T, size_t Capacity = detail::DefaultCapacity>
class SPSCQueue {
static_assert((Capacity & (Capacity - 1)) == 0, "Capacity must be a power of two.");
template <typename T, size_t capacity = 0x400>
class MPSCQueue {
public:
explicit MPSCQueue() : allocator{std::allocator<Slot<T>>()} {
// Allocate one extra slot to prevent false sharing on the last slot
slots = allocator.allocate(capacity + 1);
// Allocators are not required to honor alignment for over-aligned types
// (see http://eel.is/c++draft/allocator.requirements#10) so we verify
// alignment here
if (reinterpret_cast<uintptr_t>(slots) % alignof(Slot<T>) != 0) {
allocator.deallocate(slots, capacity + 1);
throw std::bad_alloc();
}
for (size_t i = 0; i < capacity; ++i) {
std::construct_at(&slots[i]);
}
static_assert(std::has_single_bit(capacity), "capacity must be an integer power of 2");
static_assert(alignof(Slot<T>) == hardware_interference_size,
"Slot must be aligned to cache line boundary to prevent false sharing");
static_assert(sizeof(Slot<T>) % hardware_interference_size == 0,
"Slot size must be a multiple of cache line size to prevent "
"false sharing between adjacent slots");
static_assert(sizeof(MPSCQueue) % hardware_interference_size == 0,
"Queue size must be a multiple of cache line size to "
"prevent false sharing between adjacent queues");
template <typename... Args>
bool TryEmplace(Args&&... args) {
return Emplace<PushMode::Try>(std::forward<Args>(args)...);
}
~MPSCQueue() noexcept {
for (size_t i = 0; i < capacity; ++i) {
std::destroy_at(&slots[i]);
}
allocator.deallocate(slots, capacity + 1);
template <typename... Args>
void EmplaceWait(Args&&... args) {
Emplace<PushMode::Wait>(std::forward<Args>(args)...);
}
// The queue must be both non-copyable and non-movable
MPSCQueue(const MPSCQueue&) = delete;
MPSCQueue& operator=(const MPSCQueue&) = delete;
MPSCQueue(MPSCQueue&&) = delete;
MPSCQueue& operator=(MPSCQueue&&) = delete;
void Push(const T& v) noexcept {
static_assert(std::is_nothrow_copy_constructible_v<T>,
"T must be nothrow copy constructible");
emplace(v);
bool TryPop(T& t) {
return Pop<PopMode::Try>(t);
}
template <typename P, typename = std::enable_if_t<std::is_nothrow_constructible_v<T, P&&>>>
void Push(P&& v) noexcept {
emplace(std::forward<P>(v));
void PopWait(T& t) {
Pop<PopMode::Wait>(t);
}
void Pop(T& v, std::stop_token stop) noexcept {
auto const tail = tail_.fetch_add(1);
auto& slot = slots[idx(tail)];
if (!slot.turn.test()) {
std::unique_lock lock{cv_mutex};
cv.wait(lock, stop, [&slot] { return slot.turn.test(); });
}
v = slot.move();
slot.destroy();
slot.turn.clear();
slot.turn.notify_one();
void PopWait(T& t, std::stop_token stop_token) {
Pop<PopMode::WaitWithStopToken>(t, stop_token);
}
T PopWait() {
T t;
Pop<PopMode::Wait>(t);
return t;
}
T PopWait(std::stop_token stop_token) {
T t;
Pop<PopMode::WaitWithStopToken>(t, stop_token);
return t;
}
private:
template <typename U = T>
struct Slot {
~Slot() noexcept {
if (turn.test()) {
destroy();
}
}
template <typename... Args>
void construct(Args&&... args) noexcept {
static_assert(std::is_nothrow_constructible_v<U, Args&&...>,
"T must be nothrow constructible with Args&&...");
std::construct_at(reinterpret_cast<U*>(&storage), std::forward<Args>(args)...);
}
void destroy() noexcept {
static_assert(std::is_nothrow_destructible_v<U>, "T must be nothrow destructible");
std::destroy_at(reinterpret_cast<U*>(&storage));
}
U&& move() noexcept {
return reinterpret_cast<U&&>(storage);
}
// Align to avoid false sharing between adjacent slots
alignas(hardware_interference_size) std::atomic_flag turn{};
struct aligned_store {
struct type {
alignas(U) unsigned char data[sizeof(U)];
};
};
typename aligned_store::type storage;
enum class PushMode {
Try,
Wait,
Count,
};
enum class PopMode {
Try,
Wait,
WaitWithStopToken,
Count,
};
template <PushMode Mode, typename... Args>
bool Emplace(Args&&... args) {
const size_t write_index = m_write_index.load(std::memory_order::relaxed);
if constexpr (Mode == PushMode::Try) {
// Check if we have free slots to write to.
if ((write_index - m_read_index.load(std::memory_order::acquire)) == Capacity) {
return false;
}
} else if constexpr (Mode == PushMode::Wait) {
// Wait until we have free slots to write to.
std::unique_lock lock{producer_cv_mutex};
producer_cv.wait(lock, [this, write_index] {
return (write_index - m_read_index.load(std::memory_order::acquire)) < Capacity;
});
} else {
static_assert(Mode < PushMode::Count, "Invalid PushMode.");
}
// Determine the position to write to.
const size_t pos = write_index % Capacity;
// Emplace into the queue.
std::construct_at(std::addressof(m_data[pos]), std::forward<Args>(args)...);
// Increment the write index.
++m_write_index;
// Notify the consumer that we have pushed into the queue.
std::scoped_lock lock{consumer_cv_mutex};
consumer_cv.notify_one();
return true;
}
template <PopMode Mode>
bool Pop(T& t, [[maybe_unused]] std::stop_token stop_token = {}) {
const size_t read_index = m_read_index.load(std::memory_order::relaxed);
if constexpr (Mode == PopMode::Try) {
// Check if the queue is empty.
if (read_index == m_write_index.load(std::memory_order::acquire)) {
return false;
}
} else if constexpr (Mode == PopMode::Wait) {
// Wait until the queue is not empty.
std::unique_lock lock{consumer_cv_mutex};
consumer_cv.wait(lock, [this, read_index] {
return read_index != m_write_index.load(std::memory_order::acquire);
});
} else if constexpr (Mode == PopMode::WaitWithStopToken) {
// Wait until the queue is not empty.
std::unique_lock lock{consumer_cv_mutex};
Common::CondvarWait(consumer_cv, lock, stop_token, [this, read_index] {
return read_index != m_write_index.load(std::memory_order::acquire);
});
if (stop_token.stop_requested()) {
return false;
}
} else {
static_assert(Mode < PopMode::Count, "Invalid PopMode.");
}
// Determine the position to read from.
const size_t pos = read_index % Capacity;
// Pop the data off the queue, moving it.
t = std::move(m_data[pos]);
// Increment the read index.
++m_read_index;
// Notify the producer that we have popped off the queue.
std::scoped_lock lock{producer_cv_mutex};
producer_cv.notify_one();
return true;
}
alignas(128) std::atomic_size_t m_read_index{0};
alignas(128) std::atomic_size_t m_write_index{0};
std::array<T, Capacity> m_data;
std::condition_variable_any producer_cv;
std::mutex producer_cv_mutex;
std::condition_variable_any consumer_cv;
std::mutex consumer_cv_mutex;
};
template <typename T, size_t Capacity = detail::DefaultCapacity>
class MPSCQueue {
public:
template <typename... Args>
void emplace(Args&&... args) noexcept {
static_assert(std::is_nothrow_constructible_v<T, Args&&...>,
"T must be nothrow constructible with Args&&...");
auto const head = head_.fetch_add(1);
auto& slot = slots[idx(head)];
slot.turn.wait(true);
slot.construct(std::forward<Args>(args)...);
slot.turn.test_and_set();
cv.notify_one();
bool TryEmplace(Args&&... args) {
std::scoped_lock lock{write_mutex};
return spsc_queue.TryEmplace(std::forward<Args>(args)...);
}
constexpr size_t idx(size_t i) const noexcept {
return i & mask;
template <typename... Args>
void EmplaceWait(Args&&... args) {
std::scoped_lock lock{write_mutex};
spsc_queue.EmplaceWait(std::forward<Args>(args)...);
}
static constexpr size_t mask = capacity - 1;
bool TryPop(T& t) {
return spsc_queue.TryPop(t);
}
// Align to avoid false sharing between head_ and tail_
alignas(hardware_interference_size) std::atomic<size_t> head_{0};
alignas(hardware_interference_size) std::atomic<size_t> tail_{0};
void PopWait(T& t) {
spsc_queue.PopWait(t);
}
std::mutex cv_mutex;
std::condition_variable_any cv;
void PopWait(T& t, std::stop_token stop_token) {
spsc_queue.PopWait(t, stop_token);
}
Slot<T>* slots;
[[no_unique_address]] std::allocator<Slot<T>> allocator;
T PopWait() {
return spsc_queue.PopWait();
}
static_assert(std::is_nothrow_copy_assignable_v<T> || std::is_nothrow_move_assignable_v<T>,
"T must be nothrow copy or move assignable");
T PopWait(std::stop_token stop_token) {
return spsc_queue.PopWait(stop_token);
}
static_assert(std::is_nothrow_destructible_v<T>, "T must be nothrow destructible");
private:
SPSCQueue<T, Capacity> spsc_queue;
std::mutex write_mutex;
};
template <typename T, size_t Capacity = detail::DefaultCapacity>
class MPMCQueue {
public:
template <typename... Args>
bool TryEmplace(Args&&... args) {
std::scoped_lock lock{write_mutex};
return spsc_queue.TryEmplace(std::forward<Args>(args)...);
}
template <typename... Args>
void EmplaceWait(Args&&... args) {
std::scoped_lock lock{write_mutex};
spsc_queue.EmplaceWait(std::forward<Args>(args)...);
}
bool TryPop(T& t) {
std::scoped_lock lock{read_mutex};
return spsc_queue.TryPop(t);
}
void PopWait(T& t) {
std::scoped_lock lock{read_mutex};
spsc_queue.PopWait(t);
}
void PopWait(T& t, std::stop_token stop_token) {
std::scoped_lock lock{read_mutex};
spsc_queue.PopWait(t, stop_token);
}
T PopWait() {
std::scoped_lock lock{read_mutex};
return spsc_queue.PopWait();
}
T PopWait(std::stop_token stop_token) {
std::scoped_lock lock{read_mutex};
return spsc_queue.PopWait(stop_token);
}
private:
SPSCQueue<T, Capacity> spsc_queue;
std::mutex write_mutex;
std::mutex read_mutex;
};
} // namespace Common

View File

@@ -28,7 +28,7 @@
#ifdef _WIN32
#include "common/string_util.h"
#endif
#include "common/threadsafe_queue.h"
#include "common/bounded_threadsafe_queue.h"
namespace Common::Log {
@@ -204,11 +204,11 @@ public:
void PushEntry(Class log_class, Level log_level, const char* filename, unsigned int line_num,
const char* function, std::string&& message) {
if (!filter.CheckMessage(log_class, log_level))
if (!filter.CheckMessage(log_class, log_level)) {
return;
const Entry& entry =
CreateEntry(log_class, log_level, filename, line_num, function, std::move(message));
message_queue.Push(entry);
}
message_queue.EmplaceWait(
CreateEntry(log_class, log_level, filename, line_num, function, std::move(message)));
}
private:
@@ -225,7 +225,7 @@ private:
ForEachBackend([&entry](Backend& backend) { backend.Write(entry); });
};
while (!stop_token.stop_requested()) {
entry = message_queue.PopWait(stop_token);
message_queue.PopWait(entry, stop_token);
if (entry.filename != nullptr) {
write_logs();
}
@@ -233,7 +233,7 @@ private:
// Drain the logging queue. Only writes out up to MAX_LOGS_TO_WRITE to prevent a
// case where a system is repeatedly spamming logs even on close.
int max_logs_to_write = filter.IsDebug() ? INT_MAX : 100;
while (max_logs_to_write-- && message_queue.Pop(entry)) {
while (max_logs_to_write-- && message_queue.TryPop(entry)) {
write_logs();
}
});
@@ -273,7 +273,7 @@ private:
ColorConsoleBackend color_console_backend{};
FileBackend file_backend;
MPSCQueue<Entry, true> message_queue{};
MPSCQueue<Entry> message_queue{};
std::chrono::steady_clock::time_point time_origin{std::chrono::steady_clock::now()};
std::jthread backend_thread;
};

View File

@@ -125,18 +125,18 @@ std::string ReplaceAll(std::string result, const std::string& src, const std::st
return result;
}
std::string UTF16ToUTF8(const std::u16string& input) {
std::string UTF16ToUTF8(std::u16string_view input) {
std::wstring_convert<std::codecvt_utf8_utf16<char16_t>, char16_t> convert;
return convert.to_bytes(input);
return convert.to_bytes(input.data(), input.data() + input.size());
}
std::u16string UTF8ToUTF16(const std::string& input) {
std::u16string UTF8ToUTF16(std::string_view input) {
std::wstring_convert<std::codecvt_utf8_utf16<char16_t>, char16_t> convert;
return convert.from_bytes(input);
return convert.from_bytes(input.data(), input.data() + input.size());
}
#ifdef _WIN32
static std::wstring CPToUTF16(u32 code_page, const std::string& input) {
static std::wstring CPToUTF16(u32 code_page, std::string_view input) {
const auto size =
MultiByteToWideChar(code_page, 0, input.data(), static_cast<int>(input.size()), nullptr, 0);
@@ -154,7 +154,7 @@ static std::wstring CPToUTF16(u32 code_page, const std::string& input) {
return output;
}
std::string UTF16ToUTF8(const std::wstring& input) {
std::string UTF16ToUTF8(std::wstring_view input) {
const auto size = WideCharToMultiByte(CP_UTF8, 0, input.data(), static_cast<int>(input.size()),
nullptr, 0, nullptr, nullptr);
if (size == 0) {
@@ -172,7 +172,7 @@ std::string UTF16ToUTF8(const std::wstring& input) {
return output;
}
std::wstring UTF8ToUTF16W(const std::string& input) {
std::wstring UTF8ToUTF16W(std::string_view input) {
return CPToUTF16(CP_UTF8, input);
}

View File

@@ -36,12 +36,12 @@ bool SplitPath(const std::string& full_path, std::string* _pPath, std::string* _
[[nodiscard]] std::string ReplaceAll(std::string result, const std::string& src,
const std::string& dest);
[[nodiscard]] std::string UTF16ToUTF8(const std::u16string& input);
[[nodiscard]] std::u16string UTF8ToUTF16(const std::string& input);
[[nodiscard]] std::string UTF16ToUTF8(std::u16string_view input);
[[nodiscard]] std::u16string UTF8ToUTF16(std::string_view input);
#ifdef _WIN32
[[nodiscard]] std::string UTF16ToUTF8(const std::wstring& input);
[[nodiscard]] std::wstring UTF8ToUTF16W(const std::string& str);
[[nodiscard]] std::string UTF16ToUTF8(std::wstring_view input);
[[nodiscard]] std::wstring UTF8ToUTF16W(std::string_view str);
#endif

320
src/common/typed_address.h Normal file
View File

@@ -0,0 +1,320 @@
// SPDX-FileCopyrightText: Copyright 2023 yuzu Emulator Project
// SPDX-License-Identifier: GPL-2.0-or-later
#pragma once
#include <compare>
#include <type_traits>
#include <fmt/format.h>
#include "common/common_types.h"
namespace Common {
template <bool Virtual, typename T>
class TypedAddress {
public:
// Constructors.
constexpr inline TypedAddress() : m_address(0) {}
constexpr inline TypedAddress(uint64_t a) : m_address(a) {}
template <typename U>
constexpr inline explicit TypedAddress(const U* ptr)
: m_address(reinterpret_cast<uint64_t>(ptr)) {}
// Copy constructor.
constexpr inline TypedAddress(const TypedAddress& rhs) = default;
// Assignment operator.
constexpr inline TypedAddress& operator=(const TypedAddress& rhs) = default;
// Arithmetic operators.
template <typename I>
constexpr inline TypedAddress operator+(I rhs) const {
static_assert(std::is_integral_v<I>);
return m_address + rhs;
}
constexpr inline TypedAddress operator+(TypedAddress rhs) const {
return m_address + rhs.m_address;
}
constexpr inline TypedAddress operator++() {
return ++m_address;
}
constexpr inline TypedAddress operator++(int) {
return m_address++;
}
template <typename I>
constexpr inline TypedAddress operator-(I rhs) const {
static_assert(std::is_integral_v<I>);
return m_address - rhs;
}
constexpr inline ptrdiff_t operator-(TypedAddress rhs) const {
return m_address - rhs.m_address;
}
constexpr inline TypedAddress operator--() {
return --m_address;
}
constexpr inline TypedAddress operator--(int) {
return m_address--;
}
template <typename I>
constexpr inline TypedAddress operator+=(I rhs) {
static_assert(std::is_integral_v<I>);
m_address += rhs;
return *this;
}
template <typename I>
constexpr inline TypedAddress operator-=(I rhs) {
static_assert(std::is_integral_v<I>);
m_address -= rhs;
return *this;
}
// Logical operators.
constexpr inline uint64_t operator&(uint64_t mask) const {
return m_address & mask;
}
constexpr inline uint64_t operator|(uint64_t mask) const {
return m_address | mask;
}
template <typename I>
constexpr inline TypedAddress operator|=(I rhs) {
static_assert(std::is_integral_v<I>);
m_address |= rhs;
return *this;
}
constexpr inline uint64_t operator<<(int shift) const {
return m_address << shift;
}
constexpr inline uint64_t operator>>(int shift) const {
return m_address >> shift;
}
template <typename U>
constexpr inline size_t operator/(U size) const {
return m_address / size;
}
constexpr explicit operator bool() const {
return m_address != 0;
}
// constexpr inline uint64_t operator%(U align) const { return m_address % align; }
// Comparison operators.
constexpr bool operator==(const TypedAddress&) const = default;
constexpr bool operator!=(const TypedAddress&) const = default;
constexpr auto operator<=>(const TypedAddress&) const = default;
// For convenience, also define comparison operators versus uint64_t.
constexpr inline bool operator==(uint64_t rhs) const {
return m_address == rhs;
}
constexpr inline bool operator!=(uint64_t rhs) const {
return m_address != rhs;
}
// Allow getting the address explicitly, for use in accessors.
constexpr inline uint64_t GetValue() const {
return m_address;
}
private:
uint64_t m_address{};
};
struct PhysicalAddressTag {};
struct VirtualAddressTag {};
struct ProcessAddressTag {};
using PhysicalAddress = TypedAddress<false, PhysicalAddressTag>;
using VirtualAddress = TypedAddress<true, VirtualAddressTag>;
using ProcessAddress = TypedAddress<true, ProcessAddressTag>;
// Define accessors.
template <typename T>
concept IsTypedAddress = std::same_as<T, PhysicalAddress> || std::same_as<T, VirtualAddress> ||
std::same_as<T, ProcessAddress>;
template <typename T>
constexpr inline T Null = [] {
if constexpr (std::is_same<T, uint64_t>::value) {
return 0;
} else {
static_assert(std::is_same<T, PhysicalAddress>::value ||
std::is_same<T, VirtualAddress>::value ||
std::is_same<T, ProcessAddress>::value);
return T(0);
}
}();
// Basic type validations.
static_assert(sizeof(PhysicalAddress) == sizeof(uint64_t));
static_assert(sizeof(VirtualAddress) == sizeof(uint64_t));
static_assert(sizeof(ProcessAddress) == sizeof(uint64_t));
static_assert(std::is_trivially_copyable_v<PhysicalAddress>);
static_assert(std::is_trivially_copyable_v<VirtualAddress>);
static_assert(std::is_trivially_copyable_v<ProcessAddress>);
static_assert(std::is_trivially_copy_constructible_v<PhysicalAddress>);
static_assert(std::is_trivially_copy_constructible_v<VirtualAddress>);
static_assert(std::is_trivially_copy_constructible_v<ProcessAddress>);
static_assert(std::is_trivially_move_constructible_v<PhysicalAddress>);
static_assert(std::is_trivially_move_constructible_v<VirtualAddress>);
static_assert(std::is_trivially_move_constructible_v<ProcessAddress>);
static_assert(std::is_trivially_copy_assignable_v<PhysicalAddress>);
static_assert(std::is_trivially_copy_assignable_v<VirtualAddress>);
static_assert(std::is_trivially_copy_assignable_v<ProcessAddress>);
static_assert(std::is_trivially_move_assignable_v<PhysicalAddress>);
static_assert(std::is_trivially_move_assignable_v<VirtualAddress>);
static_assert(std::is_trivially_move_assignable_v<ProcessAddress>);
static_assert(std::is_trivially_destructible_v<PhysicalAddress>);
static_assert(std::is_trivially_destructible_v<VirtualAddress>);
static_assert(std::is_trivially_destructible_v<ProcessAddress>);
static_assert(Null<uint64_t> == 0);
static_assert(Null<PhysicalAddress> == Null<uint64_t>);
static_assert(Null<VirtualAddress> == Null<uint64_t>);
static_assert(Null<ProcessAddress> == Null<uint64_t>);
// Constructor/assignment validations.
static_assert([] {
const PhysicalAddress a(5);
PhysicalAddress b(a);
return b;
}() == PhysicalAddress(5));
static_assert([] {
const PhysicalAddress a(5);
PhysicalAddress b(10);
b = a;
return b;
}() == PhysicalAddress(5));
// Arithmetic validations.
static_assert(PhysicalAddress(10) + 5 == PhysicalAddress(15));
static_assert(PhysicalAddress(10) - 5 == PhysicalAddress(5));
static_assert([] {
PhysicalAddress v(10);
v += 5;
return v;
}() == PhysicalAddress(15));
static_assert([] {
PhysicalAddress v(10);
v -= 5;
return v;
}() == PhysicalAddress(5));
static_assert(PhysicalAddress(10)++ == PhysicalAddress(10));
static_assert(++PhysicalAddress(10) == PhysicalAddress(11));
static_assert(PhysicalAddress(10)-- == PhysicalAddress(10));
static_assert(--PhysicalAddress(10) == PhysicalAddress(9));
// Logical validations.
static_assert((PhysicalAddress(0b11111111) >> 1) == 0b01111111);
static_assert((PhysicalAddress(0b10101010) >> 1) == 0b01010101);
static_assert((PhysicalAddress(0b11111111) << 1) == 0b111111110);
static_assert((PhysicalAddress(0b01010101) << 1) == 0b10101010);
static_assert((PhysicalAddress(0b11111111) & 0b01010101) == 0b01010101);
static_assert((PhysicalAddress(0b11111111) & 0b10101010) == 0b10101010);
static_assert((PhysicalAddress(0b01010101) & 0b10101010) == 0b00000000);
static_assert((PhysicalAddress(0b00000000) | 0b01010101) == 0b01010101);
static_assert((PhysicalAddress(0b11111111) | 0b01010101) == 0b11111111);
static_assert((PhysicalAddress(0b10101010) | 0b01010101) == 0b11111111);
// Comparisons.
static_assert(PhysicalAddress(0) == PhysicalAddress(0));
static_assert(PhysicalAddress(0) != PhysicalAddress(1));
static_assert(PhysicalAddress(0) < PhysicalAddress(1));
static_assert(PhysicalAddress(0) <= PhysicalAddress(1));
static_assert(PhysicalAddress(1) > PhysicalAddress(0));
static_assert(PhysicalAddress(1) >= PhysicalAddress(0));
static_assert(!(PhysicalAddress(0) == PhysicalAddress(1)));
static_assert(!(PhysicalAddress(0) != PhysicalAddress(0)));
static_assert(!(PhysicalAddress(1) < PhysicalAddress(0)));
static_assert(!(PhysicalAddress(1) <= PhysicalAddress(0)));
static_assert(!(PhysicalAddress(0) > PhysicalAddress(1)));
static_assert(!(PhysicalAddress(0) >= PhysicalAddress(1)));
} // namespace Common
template <bool Virtual, typename T>
constexpr inline uint64_t GetInteger(Common::TypedAddress<Virtual, T> address) {
return address.GetValue();
}
template <>
struct fmt::formatter<Common::PhysicalAddress> {
constexpr auto parse(fmt::format_parse_context& ctx) {
return ctx.begin();
}
template <typename FormatContext>
auto format(const Common::PhysicalAddress& addr, FormatContext& ctx) {
return fmt::format_to(ctx.out(), "{:#x}", static_cast<u64>(addr.GetValue()));
}
};
template <>
struct fmt::formatter<Common::ProcessAddress> {
constexpr auto parse(fmt::format_parse_context& ctx) {
return ctx.begin();
}
template <typename FormatContext>
auto format(const Common::ProcessAddress& addr, FormatContext& ctx) {
return fmt::format_to(ctx.out(), "{:#x}", static_cast<u64>(addr.GetValue()));
}
};
template <>
struct fmt::formatter<Common::VirtualAddress> {
constexpr auto parse(fmt::format_parse_context& ctx) {
return ctx.begin();
}
template <typename FormatContext>
auto format(const Common::VirtualAddress& addr, FormatContext& ctx) {
return fmt::format_to(ctx.out(), "{:#x}", static_cast<u64>(addr.GetValue()));
}
};
namespace std {
template <>
struct hash<Common::PhysicalAddress> {
size_t operator()(const Common::PhysicalAddress& k) const noexcept {
return k.GetValue();
}
};
template <>
struct hash<Common::ProcessAddress> {
size_t operator()(const Common::ProcessAddress& k) const noexcept {
return k.GetValue();
}
};
template <>
struct hash<Common::VirtualAddress> {
size_t operator()(const Common::VirtualAddress& k) const noexcept {
return k.GetValue();
}
};
} // namespace std

View File

@@ -33,7 +33,7 @@ std::vector<u8> CompressDataZSTDDefault(const u8* source, std::size_t source_siz
std::vector<u8> DecompressDataZSTD(std::span<const u8> compressed) {
const std::size_t decompressed_size =
ZSTD_getDecompressedSize(compressed.data(), compressed.size());
ZSTD_getFrameContentSize(compressed.data(), compressed.size());
std::vector<u8> decompressed(decompressed_size);
const std::size_t uncompressed_result_size = ZSTD_decompress(

View File

@@ -278,6 +278,7 @@ add_library(core STATIC
hle/kernel/k_trace.h
hle/kernel/k_transfer_memory.cpp
hle/kernel/k_transfer_memory.h
hle/kernel/k_typed_address.h
hle/kernel/k_worker_task.h
hle/kernel/k_worker_task_manager.cpp
hle/kernel/k_worker_task_manager.h

View File

@@ -168,21 +168,21 @@ void ARM_Interface::LoadWatchpointArray(const WatchpointArray& wp) {
}
const Kernel::DebugWatchpoint* ARM_Interface::MatchingWatchpoint(
VAddr addr, u64 size, Kernel::DebugWatchpointType access_type) const {
u64 addr, u64 size, Kernel::DebugWatchpointType access_type) const {
if (!watchpoints) {
return nullptr;
}
const VAddr start_address{addr};
const VAddr end_address{addr + size};
const u64 start_address{addr};
const u64 end_address{addr + size};
for (size_t i = 0; i < Core::Hardware::NUM_WATCHPOINTS; i++) {
const auto& watch{(*watchpoints)[i]};
if (end_address <= watch.start_address) {
if (end_address <= GetInteger(watch.start_address)) {
continue;
}
if (start_address >= watch.end_address) {
if (start_address >= GetInteger(watch.end_address)) {
continue;
}
if ((access_type & watch.type) == Kernel::DebugWatchpointType::None) {

View File

@@ -78,7 +78,7 @@ public:
* @param addr Start address of the cache range to clear
* @param size Size of the cache range to clear, starting at addr
*/
virtual void InvalidateCacheRange(VAddr addr, std::size_t size) = 0;
virtual void InvalidateCacheRange(u64 addr, std::size_t size) = 0;
/**
* Notifies CPU emulation that the current page table has changed.
@@ -149,9 +149,9 @@ public:
*/
virtual void SetPSTATE(u32 pstate) = 0;
virtual VAddr GetTlsAddress() const = 0;
virtual u64 GetTlsAddress() const = 0;
virtual void SetTlsAddress(VAddr address) = 0;
virtual void SetTlsAddress(u64 address) = 0;
/**
* Gets the value within the TPIDR_EL0 (read/write software thread ID) register.
@@ -214,7 +214,7 @@ protected:
static void SymbolicateBacktrace(Core::System& system, std::vector<BacktraceEntry>& out);
const Kernel::DebugWatchpoint* MatchingWatchpoint(
VAddr addr, u64 size, Kernel::DebugWatchpointType access_type) const;
u64 addr, u64 size, Kernel::DebugWatchpointType access_type) const;
virtual Dynarmic::HaltReason RunJit() = 0;
virtual Dynarmic::HaltReason StepJit() = 0;

View File

@@ -155,7 +155,7 @@ public:
return std::max<s64>(parent.system.CoreTiming().GetDowncount(), 0);
}
bool CheckMemoryAccess(VAddr addr, u64 size, Kernel::DebugWatchpointType type) {
bool CheckMemoryAccess(u64 addr, u64 size, Kernel::DebugWatchpointType type) {
if (!check_memory_access) {
return true;
}
@@ -397,7 +397,7 @@ u64 ARM_Dynarmic_32::GetTlsAddress() const {
return cp15->uro;
}
void ARM_Dynarmic_32::SetTlsAddress(VAddr address) {
void ARM_Dynarmic_32::SetTlsAddress(u64 address) {
cp15->uro = static_cast<u32>(address);
}
@@ -439,7 +439,7 @@ void ARM_Dynarmic_32::ClearInstructionCache() {
jit.load()->ClearCache();
}
void ARM_Dynarmic_32::InvalidateCacheRange(VAddr addr, std::size_t size) {
void ARM_Dynarmic_32::InvalidateCacheRange(u64 addr, std::size_t size) {
jit.load()->InvalidateCacheRange(static_cast<u32>(addr), size);
}

View File

@@ -41,8 +41,8 @@ public:
void SetVectorReg(int index, u128 value) override;
u32 GetPSTATE() const override;
void SetPSTATE(u32 pstate) override;
VAddr GetTlsAddress() const override;
void SetTlsAddress(VAddr address) override;
u64 GetTlsAddress() const override;
void SetTlsAddress(u64 address) override;
void SetTPIDR_EL0(u64 value) override;
u64 GetTPIDR_EL0() const override;
@@ -60,7 +60,7 @@ public:
void ClearExclusiveState() override;
void ClearInstructionCache() override;
void InvalidateCacheRange(VAddr addr, std::size_t size) override;
void InvalidateCacheRange(u64 addr, std::size_t size) override;
void PageTableChanged(Common::PageTable& new_page_table,
std::size_t new_address_space_size_in_bits) override;

View File

@@ -117,7 +117,7 @@ public:
}
void InstructionCacheOperationRaised(Dynarmic::A64::InstructionCacheOperation op,
VAddr value) override {
u64 value) override {
switch (op) {
case Dynarmic::A64::InstructionCacheOperation::InvalidateByVAToPoU: {
static constexpr u64 ICACHE_LINE_SIZE = 64;
@@ -199,7 +199,7 @@ public:
return parent.system.CoreTiming().GetClockTicks();
}
bool CheckMemoryAccess(VAddr addr, u64 size, Kernel::DebugWatchpointType type) {
bool CheckMemoryAccess(u64 addr, u64 size, Kernel::DebugWatchpointType type) {
if (!check_memory_access) {
return true;
}
@@ -452,7 +452,7 @@ u64 ARM_Dynarmic_64::GetTlsAddress() const {
return cb->tpidrro_el0;
}
void ARM_Dynarmic_64::SetTlsAddress(VAddr address) {
void ARM_Dynarmic_64::SetTlsAddress(u64 address) {
cb->tpidrro_el0 = address;
}
@@ -500,7 +500,7 @@ void ARM_Dynarmic_64::ClearInstructionCache() {
jit.load()->ClearCache();
}
void ARM_Dynarmic_64::InvalidateCacheRange(VAddr addr, std::size_t size) {
void ARM_Dynarmic_64::InvalidateCacheRange(u64 addr, std::size_t size) {
jit.load()->InvalidateCacheRange(addr, size);
}

View File

@@ -38,8 +38,8 @@ public:
void SetVectorReg(int index, u128 value) override;
u32 GetPSTATE() const override;
void SetPSTATE(u32 pstate) override;
VAddr GetTlsAddress() const override;
void SetTlsAddress(VAddr address) override;
u64 GetTlsAddress() const override;
void SetTlsAddress(u64 address) override;
void SetTPIDR_EL0(u64 value) override;
u64 GetTPIDR_EL0() const override;
@@ -53,7 +53,7 @@ public:
void ClearExclusiveState() override;
void ClearInstructionCache() override;
void InvalidateCacheRange(VAddr addr, std::size_t size) override;
void InvalidateCacheRange(u64 addr, std::size_t size) override;
void PageTableChanged(Common::PageTable& new_page_table,
std::size_t new_address_space_size_in_bits) override;

View File

@@ -564,7 +564,7 @@ void System::InvalidateCpuInstructionCaches() {
impl->kernel.InvalidateAllInstructionCaches();
}
void System::InvalidateCpuInstructionCacheRange(VAddr addr, std::size_t size) {
void System::InvalidateCpuInstructionCacheRange(u64 addr, std::size_t size) {
impl->kernel.InvalidateCpuInstructionCacheRange(addr, size);
}
@@ -794,7 +794,7 @@ FileSys::VirtualFilesystem System::GetFilesystem() const {
}
void System::RegisterCheatList(const std::vector<Memory::CheatEntry>& list,
const std::array<u8, 32>& build_id, VAddr main_region_begin,
const std::array<u8, 32>& build_id, u64 main_region_begin,
u64 main_region_size) {
impl->cheat_engine = std::make_unique<Memory::CheatEngine>(*this, list, build_id);
impl->cheat_engine->SetMainMemoryParameters(main_region_begin, main_region_size);

View File

@@ -172,7 +172,7 @@ public:
*/
void InvalidateCpuInstructionCaches();
void InvalidateCpuInstructionCacheRange(VAddr addr, std::size_t size);
void InvalidateCpuInstructionCacheRange(u64 addr, std::size_t size);
/// Shutdown the main emulated process.
void ShutdownMainProcess();
@@ -353,7 +353,7 @@ public:
[[nodiscard]] FileSys::VirtualFilesystem GetFilesystem() const;
void RegisterCheatList(const std::vector<Memory::CheatEntry>& list,
const std::array<u8, 0x20>& build_id, VAddr main_region_begin,
const std::array<u8, 0x20>& build_id, u64 main_region_begin,
u64 main_region_size);
void SetAppletFrontendSet(Service::AM::Applets::AppletFrontendSet&& set);

View File

@@ -118,14 +118,14 @@ void GDBStub::Watchpoint(Kernel::KThread* thread, const Kernel::DebugWatchpoint&
switch (watch.type) {
case Kernel::DebugWatchpointType::Read:
SendReply(fmt::format("{}rwatch:{:x};", status, watch.start_address));
SendReply(fmt::format("{}rwatch:{:x};", status, GetInteger(watch.start_address)));
break;
case Kernel::DebugWatchpointType::Write:
SendReply(fmt::format("{}watch:{:x};", status, watch.start_address));
SendReply(fmt::format("{}watch:{:x};", status, GetInteger(watch.start_address)));
break;
case Kernel::DebugWatchpointType::ReadOrWrite:
default:
SendReply(fmt::format("{}awatch:{:x};", status, watch.start_address));
SendReply(fmt::format("{}awatch:{:x};", status, GetInteger(watch.start_address)));
break;
}
}
@@ -554,8 +554,9 @@ void GDBStub::HandleQuery(std::string_view command) {
if (main != modules.end()) {
SendReply(fmt::format("TextSeg={:x}", main->first));
} else {
SendReply(fmt::format("TextSeg={:x}",
system.ApplicationProcess()->PageTable().GetCodeRegionStart()));
SendReply(fmt::format(
"TextSeg={:x}",
GetInteger(system.ApplicationProcess()->PageTable().GetCodeRegionStart())));
}
} else if (command.starts_with("Xfer:libraries:read::")) {
Loader::AppLoader::Modules modules;
@@ -757,17 +758,20 @@ void GDBStub::HandleRcmd(const std::vector<u8>& command) {
reply = fmt::format("Process: {:#x} ({})\n"
"Program Id: {:#018x}\n",
process->GetProcessId(), process->GetName(), process->GetProgramId());
reply +=
fmt::format("Layout:\n"
" Alias: {:#012x} - {:#012x}\n"
" Heap: {:#012x} - {:#012x}\n"
" Aslr: {:#012x} - {:#012x}\n"
" Stack: {:#012x} - {:#012x}\n"
"Modules:\n",
page_table.GetAliasRegionStart(), page_table.GetAliasRegionEnd(),
page_table.GetHeapRegionStart(), page_table.GetHeapRegionEnd(),
page_table.GetAliasCodeRegionStart(), page_table.GetAliasCodeRegionEnd(),
page_table.GetStackRegionStart(), page_table.GetStackRegionEnd());
reply += fmt::format("Layout:\n"
" Alias: {:#012x} - {:#012x}\n"
" Heap: {:#012x} - {:#012x}\n"
" Aslr: {:#012x} - {:#012x}\n"
" Stack: {:#012x} - {:#012x}\n"
"Modules:\n",
GetInteger(page_table.GetAliasRegionStart()),
GetInteger(page_table.GetAliasRegionEnd()),
GetInteger(page_table.GetHeapRegionStart()),
GetInteger(page_table.GetHeapRegionEnd()),
GetInteger(page_table.GetAliasCodeRegionStart()),
GetInteger(page_table.GetAliasCodeRegionEnd()),
GetInteger(page_table.GetStackRegionStart()),
GetInteger(page_table.GetStackRegionEnd()));
for (const auto& [vaddr, name] : modules) {
reply += fmt::format(" {:#012x} - {:#012x} {}\n", vaddr,

View File

@@ -3,8 +3,8 @@
#pragma once
#include "common/common_types.h"
#include "common/host_memory.h"
#include "common/typed_address.h"
namespace Core {
@@ -25,20 +25,22 @@ public:
DeviceMemory(const DeviceMemory&) = delete;
template <typename T>
PAddr GetPhysicalAddr(const T* ptr) const {
Common::PhysicalAddress GetPhysicalAddr(const T* ptr) const {
return (reinterpret_cast<uintptr_t>(ptr) -
reinterpret_cast<uintptr_t>(buffer.BackingBasePointer())) +
DramMemoryMap::Base;
}
template <typename T>
T* GetPointer(PAddr addr) {
return reinterpret_cast<T*>(buffer.BackingBasePointer() + (addr - DramMemoryMap::Base));
T* GetPointer(Common::PhysicalAddress addr) {
return reinterpret_cast<T*>(buffer.BackingBasePointer() +
(GetInteger(addr) - DramMemoryMap::Base));
}
template <typename T>
const T* GetPointer(PAddr addr) const {
return reinterpret_cast<T*>(buffer.BackingBasePointer() + (addr - DramMemoryMap::Base));
const T* GetPointer(Common::PhysicalAddress addr) const {
return reinterpret_cast<T*>(buffer.BackingBasePointer() +
(GetInteger(addr) - DramMemoryMap::Base));
}
Common::HostMemory buffer;

View File

@@ -0,0 +1,14 @@
// SPDX-FileCopyrightText: Copyright 2023 yuzu Emulator Project
// SPDX-License-Identifier: GPL-2.0-or-later
#pragma once
namespace Core::Frontend {
class Applet {
public:
virtual ~Applet() = default;
virtual void Close() const = 0;
};
} // namespace Core::Frontend

View File

@@ -10,6 +10,8 @@ namespace Core::Frontend {
CabinetApplet::~CabinetApplet() = default;
void DefaultCabinetApplet::Close() const {}
void DefaultCabinetApplet::ShowCabinetApplet(
const CabinetCallback& callback, const CabinetParameters& parameters,
std::shared_ptr<Service::NFP::NfpDevice> nfp_device) const {

View File

@@ -4,6 +4,7 @@
#pragma once
#include <functional>
#include "core/frontend/applets/applet.h"
#include "core/hle/service/nfp/nfp_types.h"
namespace Service::NFP {
@@ -20,7 +21,7 @@ struct CabinetParameters {
using CabinetCallback = std::function<void(bool, const std::string&)>;
class CabinetApplet {
class CabinetApplet : public Applet {
public:
virtual ~CabinetApplet();
virtual void ShowCabinetApplet(const CabinetCallback& callback,
@@ -30,6 +31,7 @@ public:
class DefaultCabinetApplet final : public CabinetApplet {
public:
void Close() const override;
void ShowCabinetApplet(const CabinetCallback& callback, const CabinetParameters& parameters,
std::shared_ptr<Service::NFP::NfpDevice> nfp_device) const override;
};

View File

@@ -16,6 +16,8 @@ DefaultControllerApplet::DefaultControllerApplet(HID::HIDCore& hid_core_) : hid_
DefaultControllerApplet::~DefaultControllerApplet() = default;
void DefaultControllerApplet::Close() const {}
void DefaultControllerApplet::ReconfigureControllers(ReconfigureCallback callback,
const ControllerParameters& parameters) const {
LOG_INFO(Service_HID, "called, deducing the best configuration based on the given parameters!");

View File

@@ -7,6 +7,7 @@
#include <vector>
#include "common/common_types.h"
#include "core/frontend/applets/applet.h"
namespace Core::HID {
class HIDCore;
@@ -34,7 +35,7 @@ struct ControllerParameters {
bool allow_gamecube_controller{};
};
class ControllerApplet {
class ControllerApplet : public Applet {
public:
using ReconfigureCallback = std::function<void()>;
@@ -49,6 +50,7 @@ public:
explicit DefaultControllerApplet(HID::HIDCore& hid_core_);
~DefaultControllerApplet() override;
void Close() const override;
void ReconfigureControllers(ReconfigureCallback callback,
const ControllerParameters& parameters) const override;

View File

@@ -8,6 +8,8 @@ namespace Core::Frontend {
ErrorApplet::~ErrorApplet() = default;
void DefaultErrorApplet::Close() const {}
void DefaultErrorApplet::ShowError(Result error, FinishedCallback finished) const {
LOG_CRITICAL(Service_Fatal, "Application requested error display: {:04}-{:04} (raw={:08X})",
error.module.Value(), error.description.Value(), error.raw);

View File

@@ -6,11 +6,12 @@
#include <chrono>
#include <functional>
#include "core/frontend/applets/applet.h"
#include "core/hle/result.h"
namespace Core::Frontend {
class ErrorApplet {
class ErrorApplet : public Applet {
public:
using FinishedCallback = std::function<void()>;
@@ -28,6 +29,7 @@ public:
class DefaultErrorApplet final : public ErrorApplet {
public:
void Close() const override;
void ShowError(Result error, FinishedCallback finished) const override;
void ShowErrorWithTimestamp(Result error, std::chrono::seconds time,
FinishedCallback finished) const override;

View File

@@ -10,6 +10,8 @@ ParentalControlsApplet::~ParentalControlsApplet() = default;
DefaultParentalControlsApplet::~DefaultParentalControlsApplet() = default;
void DefaultParentalControlsApplet::Close() const {}
void DefaultParentalControlsApplet::VerifyPIN(std::function<void(bool)> finished,
bool suspend_future_verification_temporarily) {
LOG_INFO(Service_AM,
@@ -39,6 +41,8 @@ PhotoViewerApplet::~PhotoViewerApplet() = default;
DefaultPhotoViewerApplet::~DefaultPhotoViewerApplet() = default;
void DefaultPhotoViewerApplet::Close() const {}
void DefaultPhotoViewerApplet::ShowPhotosForApplication(u64 title_id,
std::function<void()> finished) const {
LOG_INFO(Service_AM,

View File

@@ -6,9 +6,11 @@
#include <functional>
#include "common/common_types.h"
#include "core/frontend/applets/applet.h"
namespace Core::Frontend {
class ParentalControlsApplet {
class ParentalControlsApplet : public Applet {
public:
virtual ~ParentalControlsApplet();
@@ -33,6 +35,7 @@ class DefaultParentalControlsApplet final : public ParentalControlsApplet {
public:
~DefaultParentalControlsApplet() override;
void Close() const override;
void VerifyPIN(std::function<void(bool)> finished,
bool suspend_future_verification_temporarily) override;
void VerifyPINForSettings(std::function<void(bool)> finished) override;
@@ -40,7 +43,7 @@ public:
void ChangePIN(std::function<void()> finished) override;
};
class PhotoViewerApplet {
class PhotoViewerApplet : public Applet {
public:
virtual ~PhotoViewerApplet();
@@ -52,6 +55,7 @@ class DefaultPhotoViewerApplet final : public PhotoViewerApplet {
public:
~DefaultPhotoViewerApplet() override;
void Close() const override;
void ShowPhotosForApplication(u64 title_id, std::function<void()> finished) const override;
void ShowAllPhotos(std::function<void()> finished) const override;
};

View File

@@ -8,6 +8,8 @@ namespace Core::Frontend {
MiiEditApplet::~MiiEditApplet() = default;
void DefaultMiiEditApplet::Close() const {}
void DefaultMiiEditApplet::ShowMiiEdit(const MiiEditCallback& callback) const {
LOG_WARNING(Service_AM, "(STUBBED) called");

View File

@@ -5,9 +5,11 @@
#include <functional>
#include "core/frontend/applets/applet.h"
namespace Core::Frontend {
class MiiEditApplet {
class MiiEditApplet : public Applet {
public:
using MiiEditCallback = std::function<void()>;
@@ -18,6 +20,7 @@ public:
class DefaultMiiEditApplet final : public MiiEditApplet {
public:
void Close() const override;
void ShowMiiEdit(const MiiEditCallback& callback) const override;
};

View File

@@ -9,6 +9,8 @@ namespace Core::Frontend {
ProfileSelectApplet::~ProfileSelectApplet() = default;
void DefaultProfileSelectApplet::Close() const {}
void DefaultProfileSelectApplet::SelectProfile(SelectProfileCallback callback) const {
Service::Account::ProfileManager manager;
callback(manager.GetUser(Settings::values.current_user.GetValue()).value_or(Common::UUID{}));

View File

@@ -7,9 +7,11 @@
#include <optional>
#include "common/uuid.h"
#include "core/frontend/applets/applet.h"
namespace Core::Frontend {
class ProfileSelectApplet {
class ProfileSelectApplet : public Applet {
public:
using SelectProfileCallback = std::function<void(std::optional<Common::UUID>)>;
@@ -20,6 +22,7 @@ public:
class DefaultProfileSelectApplet final : public ProfileSelectApplet {
public:
void Close() const override;
void SelectProfile(SelectProfileCallback callback) const override;
};

View File

@@ -13,6 +13,8 @@ SoftwareKeyboardApplet::~SoftwareKeyboardApplet() = default;
DefaultSoftwareKeyboardApplet::~DefaultSoftwareKeyboardApplet() = default;
void DefaultSoftwareKeyboardApplet::Close() const {}
void DefaultSoftwareKeyboardApplet::InitializeKeyboard(
bool is_inline, KeyboardInitializeParameters initialize_parameters,
SubmitNormalCallback submit_normal_callback_, SubmitInlineCallback submit_inline_callback_) {

View File

@@ -7,6 +7,7 @@
#include "common/common_types.h"
#include "core/frontend/applets/applet.h"
#include "core/hle/service/am/applets/applet_software_keyboard_types.h"
namespace Core::Frontend {
@@ -52,7 +53,7 @@ struct InlineTextParameters {
s32 cursor_position;
};
class SoftwareKeyboardApplet {
class SoftwareKeyboardApplet : public Applet {
public:
using SubmitInlineCallback =
std::function<void(Service::AM::Applets::SwkbdReplyType, std::u16string, s32)>;
@@ -84,6 +85,8 @@ class DefaultSoftwareKeyboardApplet final : public SoftwareKeyboardApplet {
public:
~DefaultSoftwareKeyboardApplet() override;
void Close() const override;
void InitializeKeyboard(bool is_inline, KeyboardInitializeParameters initialize_parameters,
SubmitNormalCallback submit_normal_callback_,
SubmitInlineCallback submit_inline_callback_) override;

View File

@@ -10,6 +10,8 @@ WebBrowserApplet::~WebBrowserApplet() = default;
DefaultWebBrowserApplet::~DefaultWebBrowserApplet() = default;
void DefaultWebBrowserApplet::Close() const {}
void DefaultWebBrowserApplet::OpenLocalWebPage(const std::string& local_url,
ExtractROMFSCallback extract_romfs_callback,
OpenWebPageCallback callback) const {

View File

@@ -5,11 +5,12 @@
#include <functional>
#include "core/frontend/applets/applet.h"
#include "core/hle/service/am/applets/applet_web_browser_types.h"
namespace Core::Frontend {
class WebBrowserApplet {
class WebBrowserApplet : public Applet {
public:
using ExtractROMFSCallback = std::function<void()>;
using OpenWebPageCallback =
@@ -29,6 +30,8 @@ class DefaultWebBrowserApplet final : public WebBrowserApplet {
public:
~DefaultWebBrowserApplet() override;
void Close() const override;
void OpenLocalWebPage(const std::string& local_url, ExtractROMFSCallback extract_romfs_callback,
OpenWebPageCallback callback) const override;

View File

@@ -76,22 +76,24 @@ void SetupDevicePhysicalMemoryRegions(KMemoryLayout& memory_layout) {
void SetupDramPhysicalMemoryRegions(KMemoryLayout& memory_layout) {
const size_t intended_memory_size = KSystemControl::Init::GetIntendedMemorySize();
const PAddr physical_memory_base_address =
const KPhysicalAddress physical_memory_base_address =
KSystemControl::Init::GetKernelPhysicalBaseAddress(DramPhysicalAddress);
// Insert blocks into the tree.
ASSERT(memory_layout.GetPhysicalMemoryRegionTree().Insert(
physical_memory_base_address, intended_memory_size, KMemoryRegionType_Dram));
GetInteger(physical_memory_base_address), intended_memory_size, KMemoryRegionType_Dram));
ASSERT(memory_layout.GetPhysicalMemoryRegionTree().Insert(
physical_memory_base_address, ReservedEarlyDramSize, KMemoryRegionType_DramReservedEarly));
GetInteger(physical_memory_base_address), ReservedEarlyDramSize,
KMemoryRegionType_DramReservedEarly));
// Insert the KTrace block at the end of Dram, if KTrace is enabled.
static_assert(!IsKTraceEnabled || KTraceBufferSize > 0);
if constexpr (IsKTraceEnabled) {
const PAddr ktrace_buffer_phys_addr =
const KPhysicalAddress ktrace_buffer_phys_addr =
physical_memory_base_address + intended_memory_size - KTraceBufferSize;
ASSERT(memory_layout.GetPhysicalMemoryRegionTree().Insert(
ktrace_buffer_phys_addr, KTraceBufferSize, KMemoryRegionType_KernelTraceBuffer));
GetInteger(ktrace_buffer_phys_addr), KTraceBufferSize,
KMemoryRegionType_KernelTraceBuffer));
}
}

View File

@@ -3,10 +3,10 @@
#pragma once
#include "common/common_types.h"
#include "core/hle/kernel/k_typed_address.h"
namespace Kernel {
constexpr inline PAddr MainMemoryAddress = 0x80000000;
constexpr inline KPhysicalAddress MainMemoryAddress = 0x80000000;
} // namespace Kernel

View File

@@ -61,7 +61,7 @@ size_t KSystemControl::Init::GetIntendedMemorySize() {
}
}
PAddr KSystemControl::Init::GetKernelPhysicalBaseAddress(u64 base_address) {
KPhysicalAddress KSystemControl::Init::GetKernelPhysicalBaseAddress(KPhysicalAddress base_address) {
const size_t real_dram_size = KSystemControl::Init::GetRealMemorySize();
const size_t intended_dram_size = KSystemControl::Init::GetIntendedMemorySize();
if (intended_dram_size * 2 < real_dram_size) {

View File

@@ -3,7 +3,7 @@
#pragma once
#include "common/common_types.h"
#include "core/hle/kernel/k_typed_address.h"
namespace Kernel::Board::Nintendo::Nx {
@@ -18,7 +18,7 @@ public:
// Initialization.
static std::size_t GetRealMemorySize();
static std::size_t GetIntendedMemorySize();
static PAddr GetKernelPhysicalBaseAddress(u64 base_address);
static KPhysicalAddress GetKernelPhysicalBaseAddress(KPhysicalAddress base_address);
static bool ShouldIncreaseThreadResourceLimit();
static std::size_t GetApplicationPoolSize();
static std::size_t GetAppletPoolSize();

View File

@@ -5,7 +5,7 @@
#include <cstddef>
#include "common/common_types.h"
#include "core/hle/kernel/k_typed_address.h"
#include "core/hle/kernel/physical_memory.h"
namespace Kernel {
@@ -36,7 +36,7 @@ struct CodeSet final {
std::size_t offset = 0;
/// The address to map this segment to.
VAddr addr = 0;
KProcessAddress addr = 0;
/// The size of this segment in bytes.
u32 size = 0;
@@ -82,7 +82,7 @@ struct CodeSet final {
std::array<Segment, 3> segments;
/// The entry point address for this code set.
VAddr entrypoint = 0;
KProcessAddress entrypoint = 0;
};
} // namespace Kernel

View File

@@ -4,7 +4,6 @@
#include "common/alignment.h"
#include "common/assert.h"
#include "common/common_funcs.h"
#include "common/common_types.h"
#include "core/core.h"
#include "core/device_memory.h"
#include "core/hardware_properties.h"
@@ -30,6 +29,7 @@
#include "core/hle/kernel/k_thread.h"
#include "core/hle/kernel/k_thread_local_page.h"
#include "core/hle/kernel/k_transfer_memory.h"
#include "core/hle/kernel/k_typed_address.h"
namespace Kernel::Init {
@@ -104,17 +104,18 @@ static_assert(KernelPageBufferAdditionalSize ==
/// Helper function to translate from the slab virtual address to the reserved location in physical
/// memory.
static PAddr TranslateSlabAddrToPhysical(KMemoryLayout& memory_layout, VAddr slab_addr) {
slab_addr -= memory_layout.GetSlabRegionAddress();
return slab_addr + Core::DramMemoryMap::SlabHeapBase;
static KPhysicalAddress TranslateSlabAddrToPhysical(KMemoryLayout& memory_layout,
KVirtualAddress slab_addr) {
slab_addr -= GetInteger(memory_layout.GetSlabRegionAddress());
return GetInteger(slab_addr) + Core::DramMemoryMap::SlabHeapBase;
}
template <typename T>
VAddr InitializeSlabHeap(Core::System& system, KMemoryLayout& memory_layout, VAddr address,
size_t num_objects) {
KVirtualAddress InitializeSlabHeap(Core::System& system, KMemoryLayout& memory_layout,
KVirtualAddress address, size_t num_objects) {
const size_t size = Common::AlignUp(sizeof(T) * num_objects, alignof(void*));
VAddr start = Common::AlignUp(address, alignof(T));
KVirtualAddress start = Common::AlignUp(GetInteger(address), alignof(T));
// This should use the virtual memory address passed in, but currently, we do not setup the
// kernel virtual memory layout. Instead, we simply map these at a region of physical memory
@@ -195,7 +196,7 @@ void InitializeSlabHeaps(Core::System& system, KMemoryLayout& memory_layout) {
auto& kernel = system.Kernel();
// Get the start of the slab region, since that's where we'll be working.
VAddr address = memory_layout.GetSlabRegionAddress();
KVirtualAddress address = memory_layout.GetSlabRegionAddress();
// Initialize slab type array to be in sorted order.
std::array<KSlabType, KSlabType_Count> slab_types;
@@ -228,7 +229,7 @@ void InitializeSlabHeaps(Core::System& system, KMemoryLayout& memory_layout) {
}
// Track the gaps, so that we can free them to the unused slab tree.
VAddr gap_start = address;
KVirtualAddress gap_start = address;
size_t gap_size = 0;
for (size_t i = 0; i < slab_gaps.size(); i++) {
@@ -280,7 +281,7 @@ void KPageBufferSlabHeap::Initialize(Core::System& system) {
// Allocate memory for the slab.
constexpr auto AllocateOption = KMemoryManager::EncodeOption(
KMemoryManager::Pool::System, KMemoryManager::Direction::FromFront);
const PAddr slab_address =
const KPhysicalAddress slab_address =
kernel.MemoryManager().AllocateAndOpenContinuous(num_pages, 1, AllocateOption);
ASSERT(slab_address != 0);

View File

@@ -14,7 +14,7 @@ using namespace Common::Literals;
constexpr std::size_t InitialProcessBinarySizeMax = 12_MiB;
static inline PAddr GetInitialProcessBinaryPhysicalAddress() {
static inline KPhysicalAddress GetInitialProcessBinaryPhysicalAddress() {
return Kernel::Board::Nintendo::Nx::KSystemControl::Init::GetKernelPhysicalBaseAddress(
MainMemoryAddress);
}

View File

@@ -8,6 +8,7 @@
#include "core/hle/kernel/k_scoped_scheduler_lock_and_sleep.h"
#include "core/hle/kernel/k_thread.h"
#include "core/hle/kernel/k_thread_queue.h"
#include "core/hle/kernel/k_typed_address.h"
#include "core/hle/kernel/kernel.h"
#include "core/hle/kernel/svc_results.h"
#include "core/memory.h"
@@ -20,12 +21,12 @@ KAddressArbiter::~KAddressArbiter() = default;
namespace {
bool ReadFromUser(Core::System& system, s32* out, VAddr address) {
*out = system.Memory().Read32(address);
bool ReadFromUser(Core::System& system, s32* out, KProcessAddress address) {
*out = system.Memory().Read32(GetInteger(address));
return true;
}
bool DecrementIfLessThan(Core::System& system, s32* out, VAddr address, s32 value) {
bool DecrementIfLessThan(Core::System& system, s32* out, KProcessAddress address, s32 value) {
auto& monitor = system.Monitor();
const auto current_core = system.Kernel().CurrentPhysicalCoreIndex();
@@ -35,7 +36,8 @@ bool DecrementIfLessThan(Core::System& system, s32* out, VAddr address, s32 valu
// TODO(bunnei): We should call CanAccessAtomic(..) here.
// Load the value from the address.
const s32 current_value = static_cast<s32>(monitor.ExclusiveRead32(current_core, address));
const s32 current_value =
static_cast<s32>(monitor.ExclusiveRead32(current_core, GetInteger(address)));
// Compare it to the desired one.
if (current_value < value) {
@@ -43,7 +45,8 @@ bool DecrementIfLessThan(Core::System& system, s32* out, VAddr address, s32 valu
const s32 decrement_value = current_value - 1;
// Decrement and try to store.
if (!monitor.ExclusiveWrite32(current_core, address, static_cast<u32>(decrement_value))) {
if (!monitor.ExclusiveWrite32(current_core, GetInteger(address),
static_cast<u32>(decrement_value))) {
// If we failed to store, try again.
DecrementIfLessThan(system, out, address, value);
}
@@ -57,7 +60,8 @@ bool DecrementIfLessThan(Core::System& system, s32* out, VAddr address, s32 valu
return true;
}
bool UpdateIfEqual(Core::System& system, s32* out, VAddr address, s32 value, s32 new_value) {
bool UpdateIfEqual(Core::System& system, s32* out, KProcessAddress address, s32 value,
s32 new_value) {
auto& monitor = system.Monitor();
const auto current_core = system.Kernel().CurrentPhysicalCoreIndex();
@@ -67,14 +71,16 @@ bool UpdateIfEqual(Core::System& system, s32* out, VAddr address, s32 value, s32
// TODO(bunnei): We should call CanAccessAtomic(..) here.
// Load the value from the address.
const s32 current_value = static_cast<s32>(monitor.ExclusiveRead32(current_core, address));
const s32 current_value =
static_cast<s32>(monitor.ExclusiveRead32(current_core, GetInteger(address)));
// Compare it to the desired one.
if (current_value == value) {
// If equal, we want to try to write the new value.
// Try to store.
if (!monitor.ExclusiveWrite32(current_core, address, static_cast<u32>(new_value))) {
if (!monitor.ExclusiveWrite32(current_core, GetInteger(address),
static_cast<u32>(new_value))) {
// If we failed to store, try again.
UpdateIfEqual(system, out, address, value, new_value);
}
@@ -110,7 +116,7 @@ private:
} // namespace
Result KAddressArbiter::Signal(VAddr addr, s32 count) {
Result KAddressArbiter::Signal(uint64_t addr, s32 count) {
// Perform signaling.
s32 num_waiters{};
{
@@ -133,7 +139,7 @@ Result KAddressArbiter::Signal(VAddr addr, s32 count) {
R_SUCCEED();
}
Result KAddressArbiter::SignalAndIncrementIfEqual(VAddr addr, s32 value, s32 count) {
Result KAddressArbiter::SignalAndIncrementIfEqual(uint64_t addr, s32 value, s32 count) {
// Perform signaling.
s32 num_waiters{};
{
@@ -162,7 +168,7 @@ Result KAddressArbiter::SignalAndIncrementIfEqual(VAddr addr, s32 value, s32 cou
R_SUCCEED();
}
Result KAddressArbiter::SignalAndModifyByWaitingCountIfEqual(VAddr addr, s32 value, s32 count) {
Result KAddressArbiter::SignalAndModifyByWaitingCountIfEqual(uint64_t addr, s32 value, s32 count) {
// Perform signaling.
s32 num_waiters{};
{
@@ -225,7 +231,7 @@ Result KAddressArbiter::SignalAndModifyByWaitingCountIfEqual(VAddr addr, s32 val
R_SUCCEED();
}
Result KAddressArbiter::WaitIfLessThan(VAddr addr, s32 value, bool decrement, s64 timeout) {
Result KAddressArbiter::WaitIfLessThan(uint64_t addr, s32 value, bool decrement, s64 timeout) {
// Prepare to wait.
KThread* cur_thread = GetCurrentThreadPointer(m_kernel);
KHardwareTimer* timer{};
@@ -280,7 +286,7 @@ Result KAddressArbiter::WaitIfLessThan(VAddr addr, s32 value, bool decrement, s6
return cur_thread->GetWaitResult();
}
Result KAddressArbiter::WaitIfEqual(VAddr addr, s32 value, s64 timeout) {
Result KAddressArbiter::WaitIfEqual(uint64_t addr, s32 value, s64 timeout) {
// Prepare to wait.
KThread* cur_thread = GetCurrentThreadPointer(m_kernel);
KHardwareTimer* timer{};

View File

@@ -25,7 +25,7 @@ public:
explicit KAddressArbiter(Core::System& system);
~KAddressArbiter();
Result SignalToAddress(VAddr addr, Svc::SignalType type, s32 value, s32 count) {
Result SignalToAddress(uint64_t addr, Svc::SignalType type, s32 value, s32 count) {
switch (type) {
case Svc::SignalType::Signal:
R_RETURN(this->Signal(addr, count));
@@ -38,7 +38,7 @@ public:
}
}
Result WaitForAddress(VAddr addr, Svc::ArbitrationType type, s32 value, s64 timeout) {
Result WaitForAddress(uint64_t addr, Svc::ArbitrationType type, s32 value, s64 timeout) {
switch (type) {
case Svc::ArbitrationType::WaitIfLessThan:
R_RETURN(WaitIfLessThan(addr, value, false, timeout));
@@ -52,11 +52,11 @@ public:
}
private:
Result Signal(VAddr addr, s32 count);
Result SignalAndIncrementIfEqual(VAddr addr, s32 value, s32 count);
Result SignalAndModifyByWaitingCountIfEqual(VAddr addr, s32 value, s32 count);
Result WaitIfLessThan(VAddr addr, s32 value, bool decrement, s64 timeout);
Result WaitIfEqual(VAddr addr, s32 value, s64 timeout);
Result Signal(uint64_t addr, s32 count);
Result SignalAndIncrementIfEqual(uint64_t addr, s32 value, s32 count);
Result SignalAndModifyByWaitingCountIfEqual(uint64_t addr, s32 value, s32 count);
Result WaitIfLessThan(uint64_t addr, s32 value, bool decrement, s64 timeout);
Result WaitIfEqual(uint64_t addr, s32 value, s64 timeout);
private:
ThreadTree m_tree;

View File

@@ -29,7 +29,8 @@ Result KClientSession::SendSyncRequest() {
SCOPE_EXIT({ request->Close(); });
// Initialize the request.
request->Initialize(nullptr, GetCurrentThread(m_kernel).GetTlsAddress(), MessageBufferSize);
request->Initialize(nullptr, GetInteger(GetCurrentThread(m_kernel).GetTlsAddress()),
MessageBufferSize);
// Send the request.
R_RETURN(m_parent->GetServerSession().OnRequest(request));

View File

@@ -19,7 +19,8 @@ namespace Kernel {
KCodeMemory::KCodeMemory(KernelCore& kernel)
: KAutoObjectWithSlabHeapAndContainer{kernel}, m_lock(kernel) {}
Result KCodeMemory::Initialize(Core::DeviceMemory& device_memory, VAddr addr, size_t size) {
Result KCodeMemory::Initialize(Core::DeviceMemory& device_memory, KProcessAddress addr,
size_t size) {
// Set members.
m_owner = GetCurrentProcessPointer(m_kernel);
@@ -63,7 +64,7 @@ void KCodeMemory::Finalize() {
m_owner->Close();
}
Result KCodeMemory::Map(VAddr address, size_t size) {
Result KCodeMemory::Map(KProcessAddress address, size_t size) {
// Validate the size.
R_UNLESS(m_page_group->GetNumPages() == Common::DivideUp(size, PageSize), ResultInvalidSize);
@@ -83,7 +84,7 @@ Result KCodeMemory::Map(VAddr address, size_t size) {
R_SUCCEED();
}
Result KCodeMemory::Unmap(VAddr address, size_t size) {
Result KCodeMemory::Unmap(KProcessAddress address, size_t size) {
// Validate the size.
R_UNLESS(m_page_group->GetNumPages() == Common::DivideUp(size, PageSize), ResultInvalidSize);
@@ -100,7 +101,7 @@ Result KCodeMemory::Unmap(VAddr address, size_t size) {
R_SUCCEED();
}
Result KCodeMemory::MapToOwner(VAddr address, size_t size, Svc::MemoryPermission perm) {
Result KCodeMemory::MapToOwner(KProcessAddress address, size_t size, Svc::MemoryPermission perm) {
// Validate the size.
R_UNLESS(m_page_group->GetNumPages() == Common::DivideUp(size, PageSize), ResultInvalidSize);
@@ -134,7 +135,7 @@ Result KCodeMemory::MapToOwner(VAddr address, size_t size, Svc::MemoryPermission
R_SUCCEED();
}
Result KCodeMemory::UnmapFromOwner(VAddr address, size_t size) {
Result KCodeMemory::UnmapFromOwner(KProcessAddress address, size_t size) {
// Validate the size.
R_UNLESS(m_page_group->GetNumPages() == Common::DivideUp(size, PageSize), ResultInvalidSize);

View File

@@ -5,12 +5,12 @@
#include <optional>
#include "common/common_types.h"
#include "core/device_memory.h"
#include "core/hle/kernel/k_auto_object.h"
#include "core/hle/kernel/k_light_lock.h"
#include "core/hle/kernel/k_page_group.h"
#include "core/hle/kernel/k_process.h"
#include "core/hle/kernel/k_typed_address.h"
#include "core/hle/kernel/slab_helpers.h"
#include "core/hle/kernel/svc_types.h"
#include "core/hle/result.h"
@@ -31,13 +31,13 @@ class KCodeMemory final
public:
explicit KCodeMemory(KernelCore& kernel);
Result Initialize(Core::DeviceMemory& device_memory, VAddr address, size_t size);
Result Initialize(Core::DeviceMemory& device_memory, KProcessAddress address, size_t size);
void Finalize() override;
Result Map(VAddr address, size_t size);
Result Unmap(VAddr address, size_t size);
Result MapToOwner(VAddr address, size_t size, Svc::MemoryPermission perm);
Result UnmapFromOwner(VAddr address, size_t size);
Result Map(KProcessAddress address, size_t size);
Result Unmap(KProcessAddress address, size_t size);
Result MapToOwner(KProcessAddress address, size_t size, Svc::MemoryPermission perm);
Result UnmapFromOwner(KProcessAddress address, size_t size);
bool IsInitialized() const override {
return m_is_initialized;
@@ -47,7 +47,7 @@ public:
KProcess* GetOwner() const override {
return m_owner;
}
VAddr GetSourceAddress() const {
KProcessAddress GetSourceAddress() const {
return m_address;
}
size_t GetSize() const {
@@ -57,7 +57,7 @@ public:
private:
std::optional<KPageGroup> m_page_group{};
KProcess* m_owner{};
VAddr m_address{};
KProcessAddress m_address{};
KLightLock m_lock;
bool m_is_initialized{};
bool m_is_owner_mapped{};

View File

@@ -18,23 +18,23 @@ namespace Kernel {
namespace {
bool ReadFromUser(Core::System& system, u32* out, VAddr address) {
*out = system.Memory().Read32(address);
bool ReadFromUser(Core::System& system, u32* out, KProcessAddress address) {
*out = system.Memory().Read32(GetInteger(address));
return true;
}
bool WriteToUser(Core::System& system, VAddr address, const u32* p) {
system.Memory().Write32(address, *p);
bool WriteToUser(Core::System& system, KProcessAddress address, const u32* p) {
system.Memory().Write32(GetInteger(address), *p);
return true;
}
bool UpdateLockAtomic(Core::System& system, u32* out, VAddr address, u32 if_zero,
bool UpdateLockAtomic(Core::System& system, u32* out, KProcessAddress address, u32 if_zero,
u32 new_orr_mask) {
auto& monitor = system.Monitor();
const auto current_core = system.Kernel().CurrentPhysicalCoreIndex();
// Load the value from the address.
const auto expected = monitor.ExclusiveRead32(current_core, address);
const auto expected = monitor.ExclusiveRead32(current_core, GetInteger(address));
// Orr in the new mask.
u32 value = expected | new_orr_mask;
@@ -45,7 +45,7 @@ bool UpdateLockAtomic(Core::System& system, u32* out, VAddr address, u32 if_zero
}
// Try to store.
if (!monitor.ExclusiveWrite32(current_core, address, value)) {
if (!monitor.ExclusiveWrite32(current_core, GetInteger(address), value)) {
// If we failed to store, try again.
return UpdateLockAtomic(system, out, address, if_zero, new_orr_mask);
}
@@ -102,7 +102,7 @@ KConditionVariable::KConditionVariable(Core::System& system)
KConditionVariable::~KConditionVariable() = default;
Result KConditionVariable::SignalToAddress(VAddr addr) {
Result KConditionVariable::SignalToAddress(KProcessAddress addr) {
KThread* owner_thread = GetCurrentThreadPointer(m_kernel);
// Signal the address.
@@ -143,7 +143,7 @@ Result KConditionVariable::SignalToAddress(VAddr addr) {
}
}
Result KConditionVariable::WaitForAddress(Handle handle, VAddr addr, u32 value) {
Result KConditionVariable::WaitForAddress(Handle handle, KProcessAddress addr, u32 value) {
KThread* cur_thread = GetCurrentThreadPointer(m_kernel);
ThreadQueueImplForKConditionVariableWaitForAddress wait_queue(m_kernel);
@@ -191,7 +191,7 @@ void KConditionVariable::SignalImpl(KThread* thread) {
ASSERT(KScheduler::IsSchedulerLockedByCurrentThread(m_kernel));
// Update the tag.
VAddr address = thread->GetAddressKey();
KProcessAddress address = thread->GetAddressKey();
u32 own_tag = thread->GetAddressKeyValue();
u32 prev_tag{};
@@ -262,7 +262,7 @@ void KConditionVariable::Signal(u64 cv_key, s32 count) {
}
}
Result KConditionVariable::Wait(VAddr addr, u64 key, u32 value, s64 timeout) {
Result KConditionVariable::Wait(KProcessAddress addr, u64 key, u32 value, s64 timeout) {
// Prepare to wait.
KThread* cur_thread = GetCurrentThreadPointer(m_kernel);
KHardwareTimer* timer{};

View File

@@ -4,10 +4,10 @@
#pragma once
#include "common/assert.h"
#include "common/common_types.h"
#include "core/hle/kernel/k_scheduler.h"
#include "core/hle/kernel/k_thread.h"
#include "core/hle/kernel/k_typed_address.h"
#include "core/hle/kernel/kernel.h"
#include "core/hle/result.h"
@@ -25,12 +25,12 @@ public:
~KConditionVariable();
// Arbitration
Result SignalToAddress(VAddr addr);
Result WaitForAddress(Handle handle, VAddr addr, u32 value);
Result SignalToAddress(KProcessAddress addr);
Result WaitForAddress(Handle handle, KProcessAddress addr, u32 value);
// Condition variable
void Signal(u64 cv_key, s32 count);
Result Wait(VAddr addr, u64 key, u32 value, s64 timeout);
Result Wait(KProcessAddress addr, u64 key, u32 value, s64 timeout);
private:
void SignalImpl(KThread* thread);

View File

@@ -54,8 +54,8 @@ Result KDeviceAddressSpace::Detach(Svc::DeviceName device_name) {
R_SUCCEED();
}
Result KDeviceAddressSpace::Map(KPageTable* page_table, VAddr process_address, size_t size,
u64 device_address, u32 option, bool is_aligned) {
Result KDeviceAddressSpace::Map(KPageTable* page_table, KProcessAddress process_address,
size_t size, u64 device_address, u32 option, bool is_aligned) {
// Check that the address falls within the space.
R_UNLESS((m_space_address <= device_address &&
device_address + size - 1 <= m_space_address + m_space_size - 1),
@@ -113,8 +113,8 @@ Result KDeviceAddressSpace::Map(KPageTable* page_table, VAddr process_address, s
R_SUCCEED();
}
Result KDeviceAddressSpace::Unmap(KPageTable* page_table, VAddr process_address, size_t size,
u64 device_address) {
Result KDeviceAddressSpace::Unmap(KPageTable* page_table, KProcessAddress process_address,
size_t size, u64 device_address) {
// Check that the address falls within the space.
R_UNLESS((m_space_address <= device_address &&
device_address + size - 1 <= m_space_address + m_space_size - 1),

View File

@@ -5,8 +5,8 @@
#include <string>
#include "common/common_types.h"
#include "core/hle/kernel/k_page_table.h"
#include "core/hle/kernel/k_typed_address.h"
#include "core/hle/kernel/slab_helpers.h"
#include "core/hle/result.h"
@@ -31,23 +31,24 @@ public:
Result Attach(Svc::DeviceName device_name);
Result Detach(Svc::DeviceName device_name);
Result MapByForce(KPageTable* page_table, VAddr process_address, size_t size,
Result MapByForce(KPageTable* page_table, KProcessAddress process_address, size_t size,
u64 device_address, u32 option) {
R_RETURN(this->Map(page_table, process_address, size, device_address, option, false));
}
Result MapAligned(KPageTable* page_table, VAddr process_address, size_t size,
Result MapAligned(KPageTable* page_table, KProcessAddress process_address, size_t size,
u64 device_address, u32 option) {
R_RETURN(this->Map(page_table, process_address, size, device_address, option, true));
}
Result Unmap(KPageTable* page_table, VAddr process_address, size_t size, u64 device_address);
Result Unmap(KPageTable* page_table, KProcessAddress process_address, size_t size,
u64 device_address);
static void Initialize();
private:
Result Map(KPageTable* page_table, VAddr process_address, size_t size, u64 device_address,
u32 option, bool is_aligned);
Result Map(KPageTable* page_table, KProcessAddress process_address, size_t size,
u64 device_address, u32 option, bool is_aligned);
private:
KLightLock m_lock;

View File

@@ -6,9 +6,9 @@
#include <vector>
#include "common/alignment.h"
#include "common/common_types.h"
#include "core/hle/kernel/k_page_bitmap.h"
#include "core/hle/kernel/k_spin_lock.h"
#include "core/hle/kernel/k_typed_address.h"
#include "core/hle/kernel/memory_types.h"
#include "core/hle/kernel/svc_results.h"
@@ -26,23 +26,23 @@ public:
KDynamicPageManager() = default;
template <typename T>
T* GetPointer(VAddr addr) {
T* GetPointer(KVirtualAddress addr) {
return reinterpret_cast<T*>(m_backing_memory.data() + (addr - m_address));
}
template <typename T>
const T* GetPointer(VAddr addr) const {
const T* GetPointer(KVirtualAddress addr) const {
return reinterpret_cast<T*>(m_backing_memory.data() + (addr - m_address));
}
Result Initialize(VAddr memory, size_t size, size_t align) {
Result Initialize(KVirtualAddress memory, size_t size, size_t align) {
// We need to have positive size.
R_UNLESS(size > 0, ResultOutOfMemory);
m_backing_memory.resize(size);
// Set addresses.
m_address = memory;
m_aligned_address = Common::AlignDown(memory, align);
m_aligned_address = Common::AlignDown(GetInteger(memory), align);
// Calculate extents.
const size_t managed_size = m_address + size - m_aligned_address;
@@ -79,7 +79,7 @@ public:
R_SUCCEED();
}
VAddr GetAddress() const {
KVirtualAddress GetAddress() const {
return m_address;
}
size_t GetSize() const {
@@ -145,7 +145,8 @@ public:
KScopedSpinLock lk(m_lock);
// Set the bit for the free page.
size_t offset = (reinterpret_cast<uintptr_t>(pb) - m_aligned_address) / sizeof(PageBuffer);
size_t offset =
(reinterpret_cast<uint64_t>(pb) - GetInteger(m_aligned_address)) / sizeof(PageBuffer);
m_page_bitmap.SetBit(offset);
// Decrement our used count.
@@ -158,8 +159,8 @@ private:
size_t m_used{};
size_t m_peak{};
size_t m_count{};
VAddr m_address{};
VAddr m_aligned_address{};
KVirtualAddress m_address{};
KVirtualAddress m_aligned_address{};
size_t m_size{};
// TODO(bunnei): Back by host memory until we emulate kernel virtual address space.

View File

@@ -19,7 +19,7 @@ class KDynamicSlabHeap : protected impl::KSlabHeapImpl {
public:
constexpr KDynamicSlabHeap() = default;
constexpr VAddr GetAddress() const {
constexpr KVirtualAddress GetAddress() const {
return m_address;
}
constexpr size_t GetSize() const {
@@ -35,7 +35,7 @@ public:
return m_count.load();
}
constexpr bool IsInRange(VAddr addr) const {
constexpr bool IsInRange(KVirtualAddress addr) const {
return this->GetAddress() <= addr && addr <= this->GetAddress() + this->GetSize() - 1;
}
@@ -115,7 +115,7 @@ private:
std::atomic<size_t> m_used{};
std::atomic<size_t> m_peak{};
std::atomic<size_t> m_count{};
VAddr m_address{};
KVirtualAddress m_address{};
size_t m_size{};
};

View File

@@ -5,8 +5,8 @@
#include "common/alignment.h"
#include "common/assert.h"
#include "common/common_types.h"
#include "common/intrusive_red_black_tree.h"
#include "core/hle/kernel/k_typed_address.h"
#include "core/hle/kernel/memory_types.h"
#include "core/hle/kernel/svc_types.h"
@@ -282,7 +282,7 @@ class KMemoryBlock : public Common::IntrusiveRedBlackTreeBaseNode<KMemoryBlock>
private:
u16 m_device_disable_merge_left_count{};
u16 m_device_disable_merge_right_count{};
VAddr m_address{};
KProcessAddress m_address{};
size_t m_num_pages{};
KMemoryState m_memory_state{KMemoryState::None};
u16 m_ipc_lock_count{};
@@ -306,7 +306,7 @@ public:
}
public:
constexpr VAddr GetAddress() const {
constexpr KProcessAddress GetAddress() const {
return m_address;
}
@@ -318,11 +318,11 @@ public:
return this->GetNumPages() * PageSize;
}
constexpr VAddr GetEndAddress() const {
constexpr KProcessAddress GetEndAddress() const {
return this->GetAddress() + this->GetSize();
}
constexpr VAddr GetLastAddress() const {
constexpr KProcessAddress GetLastAddress() const {
return this->GetEndAddress() - 1;
}
@@ -348,7 +348,7 @@ public:
constexpr KMemoryInfo GetMemoryInfo() const {
return {
.m_address = this->GetAddress(),
.m_address = GetInteger(this->GetAddress()),
.m_size = this->GetSize(),
.m_state = m_memory_state,
.m_device_disable_merge_left_count = m_device_disable_merge_left_count,
@@ -366,12 +366,12 @@ public:
public:
explicit KMemoryBlock() = default;
constexpr KMemoryBlock(VAddr addr, size_t np, KMemoryState ms, KMemoryPermission p,
constexpr KMemoryBlock(KProcessAddress addr, size_t np, KMemoryState ms, KMemoryPermission p,
KMemoryAttribute attr)
: Common::IntrusiveRedBlackTreeBaseNode<KMemoryBlock>(), m_address(addr), m_num_pages(np),
m_memory_state(ms), m_permission(p), m_attribute(attr) {}
constexpr void Initialize(VAddr addr, size_t np, KMemoryState ms, KMemoryPermission p,
constexpr void Initialize(KProcessAddress addr, size_t np, KMemoryState ms, KMemoryPermission p,
KMemoryAttribute attr) {
m_device_disable_merge_left_count = 0;
m_device_disable_merge_right_count = 0;
@@ -408,7 +408,7 @@ public:
KMemoryBlockDisableMergeAttribute::None;
}
constexpr bool Contains(VAddr addr) const {
constexpr bool Contains(KProcessAddress addr) const {
return this->GetAddress() <= addr && addr <= this->GetEndAddress();
}
@@ -443,10 +443,10 @@ public:
}
}
constexpr void Split(KMemoryBlock* block, VAddr addr) {
constexpr void Split(KMemoryBlock* block, KProcessAddress addr) {
ASSERT(this->GetAddress() < addr);
ASSERT(this->Contains(addr));
ASSERT(Common::IsAligned(addr, PageSize));
ASSERT(Common::IsAligned(GetInteger(addr), PageSize));
block->m_address = m_address;
block->m_num_pages = (addr - this->GetAddress()) / PageSize;

View File

@@ -7,7 +7,8 @@ namespace Kernel {
KMemoryBlockManager::KMemoryBlockManager() = default;
Result KMemoryBlockManager::Initialize(VAddr st, VAddr nd, KMemoryBlockSlabManager* slab_manager) {
Result KMemoryBlockManager::Initialize(KProcessAddress st, KProcessAddress nd,
KMemoryBlockSlabManager* slab_manager) {
// Allocate a block to encapsulate the address space, insert it into the tree.
KMemoryBlock* start_block = slab_manager->Allocate();
R_UNLESS(start_block != nullptr, ResultOutOfResource);
@@ -15,8 +16,8 @@ Result KMemoryBlockManager::Initialize(VAddr st, VAddr nd, KMemoryBlockSlabManag
// Set our start and end.
m_start_address = st;
m_end_address = nd;
ASSERT(Common::IsAligned(m_start_address, PageSize));
ASSERT(Common::IsAligned(m_end_address, PageSize));
ASSERT(Common::IsAligned(GetInteger(m_start_address), PageSize));
ASSERT(Common::IsAligned(GetInteger(m_end_address), PageSize));
// Initialize and insert the block.
start_block->Initialize(m_start_address, (m_end_address - m_start_address) / PageSize,
@@ -40,12 +41,13 @@ void KMemoryBlockManager::Finalize(KMemoryBlockSlabManager* slab_manager,
ASSERT(m_memory_block_tree.empty());
}
VAddr KMemoryBlockManager::FindFreeArea(VAddr region_start, size_t region_num_pages,
size_t num_pages, size_t alignment, size_t offset,
size_t guard_pages) const {
KProcessAddress KMemoryBlockManager::FindFreeArea(KProcessAddress region_start,
size_t region_num_pages, size_t num_pages,
size_t alignment, size_t offset,
size_t guard_pages) const {
if (num_pages > 0) {
const VAddr region_end = region_start + region_num_pages * PageSize;
const VAddr region_last = region_end - 1;
const KProcessAddress region_end = region_start + region_num_pages * PageSize;
const KProcessAddress region_last = region_end - 1;
for (const_iterator it = this->FindIterator(region_start); it != m_memory_block_tree.cend();
it++) {
const KMemoryInfo info = it->GetMemoryInfo();
@@ -56,17 +58,19 @@ VAddr KMemoryBlockManager::FindFreeArea(VAddr region_start, size_t region_num_pa
continue;
}
VAddr area = (info.GetAddress() <= region_start) ? region_start : info.GetAddress();
KProcessAddress area =
(info.GetAddress() <= GetInteger(region_start)) ? region_start : info.GetAddress();
area += guard_pages * PageSize;
const VAddr offset_area = Common::AlignDown(area, alignment) + offset;
const KProcessAddress offset_area =
Common::AlignDown(GetInteger(area), alignment) + offset;
area = (area <= offset_area) ? offset_area : offset_area + alignment;
const VAddr area_end = area + num_pages * PageSize + guard_pages * PageSize;
const VAddr area_last = area_end - 1;
const KProcessAddress area_end = area + num_pages * PageSize + guard_pages * PageSize;
const KProcessAddress area_last = area_end - 1;
if (info.GetAddress() <= area && area < area_last && area_last <= region_last &&
area_last <= info.GetLastAddress()) {
if (info.GetAddress() <= GetInteger(area) && area < area_last &&
area_last <= region_last && area_last <= info.GetLastAddress()) {
return area;
}
}
@@ -76,7 +80,7 @@ VAddr KMemoryBlockManager::FindFreeArea(VAddr region_start, size_t region_num_pa
}
void KMemoryBlockManager::CoalesceForUpdate(KMemoryBlockManagerUpdateAllocator* allocator,
VAddr address, size_t num_pages) {
KProcessAddress address, size_t num_pages) {
// Find the iterator now that we've updated.
iterator it = this->FindIterator(address);
if (address != m_start_address) {
@@ -104,18 +108,18 @@ void KMemoryBlockManager::CoalesceForUpdate(KMemoryBlockManagerUpdateAllocator*
}
}
void KMemoryBlockManager::Update(KMemoryBlockManagerUpdateAllocator* allocator, VAddr address,
size_t num_pages, KMemoryState state, KMemoryPermission perm,
KMemoryAttribute attr,
void KMemoryBlockManager::Update(KMemoryBlockManagerUpdateAllocator* allocator,
KProcessAddress address, size_t num_pages, KMemoryState state,
KMemoryPermission perm, KMemoryAttribute attr,
KMemoryBlockDisableMergeAttribute set_disable_attr,
KMemoryBlockDisableMergeAttribute clear_disable_attr) {
// Ensure for auditing that we never end up with an invalid tree.
KScopedMemoryBlockManagerAuditor auditor(this);
ASSERT(Common::IsAligned(address, PageSize));
ASSERT(Common::IsAligned(GetInteger(address), PageSize));
ASSERT((attr & (KMemoryAttribute::IpcLocked | KMemoryAttribute::DeviceShared)) ==
KMemoryAttribute::None);
VAddr cur_address = address;
KProcessAddress cur_address = address;
size_t remaining_pages = num_pages;
iterator it = this->FindIterator(address);
@@ -168,17 +172,17 @@ void KMemoryBlockManager::Update(KMemoryBlockManagerUpdateAllocator* allocator,
}
void KMemoryBlockManager::UpdateIfMatch(KMemoryBlockManagerUpdateAllocator* allocator,
VAddr address, size_t num_pages, KMemoryState test_state,
KMemoryPermission test_perm, KMemoryAttribute test_attr,
KMemoryState state, KMemoryPermission perm,
KMemoryAttribute attr) {
KProcessAddress address, size_t num_pages,
KMemoryState test_state, KMemoryPermission test_perm,
KMemoryAttribute test_attr, KMemoryState state,
KMemoryPermission perm, KMemoryAttribute attr) {
// Ensure for auditing that we never end up with an invalid tree.
KScopedMemoryBlockManagerAuditor auditor(this);
ASSERT(Common::IsAligned(address, PageSize));
ASSERT(Common::IsAligned(GetInteger(address), PageSize));
ASSERT((attr & (KMemoryAttribute::IpcLocked | KMemoryAttribute::DeviceShared)) ==
KMemoryAttribute::None);
VAddr cur_address = address;
KProcessAddress cur_address = address;
size_t remaining_pages = num_pages;
iterator it = this->FindIterator(address);
@@ -230,18 +234,18 @@ void KMemoryBlockManager::UpdateIfMatch(KMemoryBlockManagerUpdateAllocator* allo
this->CoalesceForUpdate(allocator, address, num_pages);
}
void KMemoryBlockManager::UpdateLock(KMemoryBlockManagerUpdateAllocator* allocator, VAddr address,
size_t num_pages, MemoryBlockLockFunction lock_func,
KMemoryPermission perm) {
void KMemoryBlockManager::UpdateLock(KMemoryBlockManagerUpdateAllocator* allocator,
KProcessAddress address, size_t num_pages,
MemoryBlockLockFunction lock_func, KMemoryPermission perm) {
// Ensure for auditing that we never end up with an invalid tree.
KScopedMemoryBlockManagerAuditor auditor(this);
ASSERT(Common::IsAligned(address, PageSize));
ASSERT(Common::IsAligned(GetInteger(address), PageSize));
VAddr cur_address = address;
KProcessAddress cur_address = address;
size_t remaining_pages = num_pages;
iterator it = this->FindIterator(address);
const VAddr end_address = address + (num_pages * PageSize);
const KProcessAddress end_address = address + (num_pages * PageSize);
while (remaining_pages > 0) {
const size_t remaining_size = remaining_pages * PageSize;

View File

@@ -7,9 +7,9 @@
#include <functional>
#include "common/common_funcs.h"
#include "common/common_types.h"
#include "core/hle/kernel/k_dynamic_resource_manager.h"
#include "core/hle/kernel/k_memory_block.h"
#include "core/hle/kernel/k_typed_address.h"
namespace Kernel {
@@ -85,9 +85,10 @@ public:
public:
KMemoryBlockManager();
using HostUnmapCallback = std::function<void(VAddr, u64)>;
using HostUnmapCallback = std::function<void(Common::ProcessAddress, u64)>;
Result Initialize(VAddr st, VAddr nd, KMemoryBlockSlabManager* slab_manager);
Result Initialize(KProcessAddress st, KProcessAddress nd,
KMemoryBlockSlabManager* slab_manager);
void Finalize(KMemoryBlockSlabManager* slab_manager, HostUnmapCallback&& host_unmap_callback);
iterator end() {
@@ -100,27 +101,28 @@ public:
return m_memory_block_tree.cend();
}
VAddr FindFreeArea(VAddr region_start, size_t region_num_pages, size_t num_pages,
size_t alignment, size_t offset, size_t guard_pages) const;
KProcessAddress FindFreeArea(KProcessAddress region_start, size_t region_num_pages,
size_t num_pages, size_t alignment, size_t offset,
size_t guard_pages) const;
void Update(KMemoryBlockManagerUpdateAllocator* allocator, VAddr address, size_t num_pages,
KMemoryState state, KMemoryPermission perm, KMemoryAttribute attr,
void Update(KMemoryBlockManagerUpdateAllocator* allocator, KProcessAddress address,
size_t num_pages, KMemoryState state, KMemoryPermission perm, KMemoryAttribute attr,
KMemoryBlockDisableMergeAttribute set_disable_attr,
KMemoryBlockDisableMergeAttribute clear_disable_attr);
void UpdateLock(KMemoryBlockManagerUpdateAllocator* allocator, VAddr address, size_t num_pages,
MemoryBlockLockFunction lock_func, KMemoryPermission perm);
void UpdateLock(KMemoryBlockManagerUpdateAllocator* allocator, KProcessAddress address,
size_t num_pages, MemoryBlockLockFunction lock_func, KMemoryPermission perm);
void UpdateIfMatch(KMemoryBlockManagerUpdateAllocator* allocator, VAddr address,
void UpdateIfMatch(KMemoryBlockManagerUpdateAllocator* allocator, KProcessAddress address,
size_t num_pages, KMemoryState test_state, KMemoryPermission test_perm,
KMemoryAttribute test_attr, KMemoryState state, KMemoryPermission perm,
KMemoryAttribute attr);
iterator FindIterator(VAddr address) const {
iterator FindIterator(KProcessAddress address) const {
return m_memory_block_tree.find(KMemoryBlock(
address, 1, KMemoryState::Free, KMemoryPermission::None, KMemoryAttribute::None));
}
const KMemoryBlock* FindBlock(VAddr address) const {
const KMemoryBlock* FindBlock(KProcessAddress address) const {
if (const_iterator it = this->FindIterator(address); it != m_memory_block_tree.end()) {
return std::addressof(*it);
}
@@ -132,12 +134,12 @@ public:
bool CheckState() const;
private:
void CoalesceForUpdate(KMemoryBlockManagerUpdateAllocator* allocator, VAddr address,
void CoalesceForUpdate(KMemoryBlockManagerUpdateAllocator* allocator, KProcessAddress address,
size_t num_pages);
MemoryBlockTree m_memory_block_tree;
VAddr m_start_address{};
VAddr m_end_address{};
KProcessAddress m_start_address{};
KProcessAddress m_end_address{};
};
class KScopedMemoryBlockManagerAuditor {

View File

@@ -85,7 +85,8 @@ bool KMemoryRegionTree::Insert(u64 address, size_t size, u32 type_id, u32 new_at
return true;
}
VAddr KMemoryRegionTree::GetRandomAlignedRegion(size_t size, size_t alignment, u32 type_id) {
KVirtualAddress KMemoryRegionTree::GetRandomAlignedRegion(size_t size, size_t alignment,
u32 type_id) {
// We want to find the total extents of the type id.
const auto extents = this->GetDerivedRegionExtents(static_cast<KMemoryRegionType>(type_id));
@@ -130,11 +131,13 @@ KMemoryLayout::KMemoryLayout()
m_virtual_linear_tree{m_memory_region_allocator}, m_physical_linear_tree{
m_memory_region_allocator} {}
void KMemoryLayout::InitializeLinearMemoryRegionTrees(PAddr aligned_linear_phys_start,
VAddr linear_virtual_start) {
void KMemoryLayout::InitializeLinearMemoryRegionTrees(KPhysicalAddress aligned_linear_phys_start,
KVirtualAddress linear_virtual_start) {
// Set static differences.
m_linear_phys_to_virt_diff = linear_virtual_start - aligned_linear_phys_start;
m_linear_virt_to_phys_diff = aligned_linear_phys_start - linear_virtual_start;
m_linear_phys_to_virt_diff =
GetInteger(linear_virtual_start) - GetInteger(aligned_linear_phys_start);
m_linear_virt_to_phys_diff =
GetInteger(aligned_linear_phys_start) - GetInteger(linear_virtual_start);
// Initialize linear trees.
for (auto& region : GetPhysicalMemoryRegionTree()) {

View File

@@ -10,6 +10,7 @@
#include "core/device_memory.h"
#include "core/hle/kernel/k_memory_region.h"
#include "core/hle/kernel/k_memory_region_type.h"
#include "core/hle/kernel/k_typed_address.h"
#include "core/hle/kernel/memory_types.h"
namespace Kernel {
@@ -69,10 +70,11 @@ constexpr std::size_t KernelResourceSize = KernelPageTableHeapSize + KernelIniti
//! NB: Use KThread::GetAddressKeyIsKernel().
//! See explanation for deviation of GetAddressKey.
bool IsKernelAddressKey(VAddr key) = delete;
bool IsKernelAddressKey(KProcessAddress key) = delete;
constexpr bool IsKernelAddress(VAddr address) {
return KernelVirtualAddressSpaceBase <= address && address < KernelVirtualAddressSpaceEnd;
constexpr bool IsKernelAddress(KProcessAddress address) {
return KernelVirtualAddressSpaceBase <= GetInteger(address) &&
address < KernelVirtualAddressSpaceEnd;
}
class KMemoryLayout final {
@@ -104,38 +106,38 @@ public:
return m_physical_linear_tree;
}
VAddr GetLinearVirtualAddress(PAddr address) const {
return address + m_linear_phys_to_virt_diff;
KVirtualAddress GetLinearVirtualAddress(KPhysicalAddress address) const {
return GetInteger(address) + m_linear_phys_to_virt_diff;
}
PAddr GetLinearPhysicalAddress(VAddr address) const {
return address + m_linear_virt_to_phys_diff;
KPhysicalAddress GetLinearPhysicalAddress(KVirtualAddress address) const {
return GetInteger(address) + m_linear_virt_to_phys_diff;
}
const KMemoryRegion* FindVirtual(VAddr address) const {
const KMemoryRegion* FindVirtual(KVirtualAddress address) const {
return Find(address, GetVirtualMemoryRegionTree());
}
const KMemoryRegion* FindPhysical(PAddr address) const {
const KMemoryRegion* FindPhysical(KPhysicalAddress address) const {
return Find(address, GetPhysicalMemoryRegionTree());
}
const KMemoryRegion* FindVirtualLinear(VAddr address) const {
const KMemoryRegion* FindVirtualLinear(KVirtualAddress address) const {
return Find(address, GetVirtualLinearMemoryRegionTree());
}
const KMemoryRegion* FindPhysicalLinear(PAddr address) const {
const KMemoryRegion* FindPhysicalLinear(KPhysicalAddress address) const {
return Find(address, GetPhysicalLinearMemoryRegionTree());
}
VAddr GetMainStackTopAddress(s32 core_id) const {
KVirtualAddress GetMainStackTopAddress(s32 core_id) const {
return GetStackTopAddress(core_id, KMemoryRegionType_KernelMiscMainStack);
}
VAddr GetIdleStackTopAddress(s32 core_id) const {
KVirtualAddress GetIdleStackTopAddress(s32 core_id) const {
return GetStackTopAddress(core_id, KMemoryRegionType_KernelMiscIdleStack);
}
VAddr GetExceptionStackTopAddress(s32 core_id) const {
KVirtualAddress GetExceptionStackTopAddress(s32 core_id) const {
return GetStackTopAddress(core_id, KMemoryRegionType_KernelMiscExceptionStack);
}
VAddr GetSlabRegionAddress() const {
KVirtualAddress GetSlabRegionAddress() const {
return Dereference(GetVirtualMemoryRegionTree().FindByType(KMemoryRegionType_KernelSlab))
.GetAddress();
}
@@ -143,10 +145,10 @@ public:
const KMemoryRegion& GetDeviceRegion(KMemoryRegionType type) const {
return Dereference(GetPhysicalMemoryRegionTree().FindFirstDerived(type));
}
PAddr GetDevicePhysicalAddress(KMemoryRegionType type) const {
KPhysicalAddress GetDevicePhysicalAddress(KMemoryRegionType type) const {
return GetDeviceRegion(type).GetAddress();
}
VAddr GetDeviceVirtualAddress(KMemoryRegionType type) const {
KVirtualAddress GetDeviceVirtualAddress(KMemoryRegionType type) const {
return GetDeviceRegion(type).GetPairAddress();
}
@@ -175,11 +177,11 @@ public:
KMemoryRegionType_VirtualDramKernelSecureAppletMemory));
}
const KMemoryRegion& GetVirtualLinearRegion(VAddr address) const {
const KMemoryRegion& GetVirtualLinearRegion(KVirtualAddress address) const {
return Dereference(FindVirtualLinear(address));
}
const KMemoryRegion& GetPhysicalLinearRegion(PAddr address) const {
const KMemoryRegion& GetPhysicalLinearRegion(KPhysicalAddress address) const {
return Dereference(FindPhysicalLinear(address));
}
@@ -193,29 +195,32 @@ public:
return GetPhysicalMemoryRegionTree().FindFirstDerived(KMemoryRegionType_DTB);
}
bool IsHeapPhysicalAddress(const KMemoryRegion*& region, PAddr address) const {
bool IsHeapPhysicalAddress(const KMemoryRegion*& region, KPhysicalAddress address) const {
return IsTypedAddress(region, address, GetPhysicalLinearMemoryRegionTree(),
KMemoryRegionType_DramUserPool);
}
bool IsHeapVirtualAddress(const KMemoryRegion*& region, VAddr address) const {
bool IsHeapVirtualAddress(const KMemoryRegion*& region, KVirtualAddress address) const {
return IsTypedAddress(region, address, GetVirtualLinearMemoryRegionTree(),
KMemoryRegionType_VirtualDramUserPool);
}
bool IsHeapPhysicalAddress(const KMemoryRegion*& region, PAddr address, size_t size) const {
bool IsHeapPhysicalAddress(const KMemoryRegion*& region, KPhysicalAddress address,
size_t size) const {
return IsTypedAddress(region, address, size, GetPhysicalLinearMemoryRegionTree(),
KMemoryRegionType_DramUserPool);
}
bool IsHeapVirtualAddress(const KMemoryRegion*& region, VAddr address, size_t size) const {
bool IsHeapVirtualAddress(const KMemoryRegion*& region, KVirtualAddress address,
size_t size) const {
return IsTypedAddress(region, address, size, GetVirtualLinearMemoryRegionTree(),
KMemoryRegionType_VirtualDramUserPool);
}
bool IsLinearMappedPhysicalAddress(const KMemoryRegion*& region, PAddr address) const {
bool IsLinearMappedPhysicalAddress(const KMemoryRegion*& region,
KPhysicalAddress address) const {
return IsTypedAddress(region, address, GetPhysicalLinearMemoryRegionTree(),
static_cast<KMemoryRegionType>(KMemoryRegionAttr_LinearMapped));
}
bool IsLinearMappedPhysicalAddress(const KMemoryRegion*& region, PAddr address,
bool IsLinearMappedPhysicalAddress(const KMemoryRegion*& region, KPhysicalAddress address,
size_t size) const {
return IsTypedAddress(region, address, size, GetPhysicalLinearMemoryRegionTree(),
static_cast<KMemoryRegionType>(KMemoryRegionAttr_LinearMapped));
@@ -234,8 +239,8 @@ public:
return std::make_pair(total_size, kernel_size);
}
void InitializeLinearMemoryRegionTrees(PAddr aligned_linear_phys_start,
VAddr linear_virtual_start);
void InitializeLinearMemoryRegionTrees(KPhysicalAddress aligned_linear_phys_start,
KVirtualAddress linear_virtual_start);
static size_t GetResourceRegionSizeForInit(bool use_extra_resource);
auto GetKernelRegionExtents() const {
@@ -261,8 +266,8 @@ public:
auto GetLinearRegionVirtualExtents() const {
const auto physical = GetLinearRegionPhysicalExtents();
return KMemoryRegion(GetLinearVirtualAddress(physical.GetAddress()),
GetLinearVirtualAddress(physical.GetLastAddress()), 0,
return KMemoryRegion(GetInteger(GetLinearVirtualAddress(physical.GetAddress())),
GetInteger(GetLinearVirtualAddress(physical.GetLastAddress())), 0,
KMemoryRegionType_None);
}
@@ -334,12 +339,12 @@ private:
static bool IsTypedAddress(const KMemoryRegion*& region, AddressType address,
const KMemoryRegionTree& tree, KMemoryRegionType type) {
// Check if the cached region already contains the address.
if (region != nullptr && region->Contains(address)) {
if (region != nullptr && region->Contains(GetInteger(address))) {
return true;
}
// Find the containing region, and update the cache.
if (const KMemoryRegion* found = tree.Find(address);
if (const KMemoryRegion* found = tree.Find(GetInteger(address));
found != nullptr && found->IsDerivedFrom(type)) {
region = found;
return true;
@@ -352,11 +357,12 @@ private:
static bool IsTypedAddress(const KMemoryRegion*& region, AddressType address, size_t size,
const KMemoryRegionTree& tree, KMemoryRegionType type) {
// Get the end of the checked region.
const u64 last_address = address + size - 1;
const u64 last_address = GetInteger(address) + size - 1;
// Walk the tree to verify the region is correct.
const KMemoryRegion* cur =
(region != nullptr && region->Contains(address)) ? region : tree.Find(address);
const KMemoryRegion* cur = (region != nullptr && region->Contains(GetInteger(address)))
? region
: tree.Find(GetInteger(address));
while (cur != nullptr && cur->IsDerivedFrom(type)) {
if (last_address <= cur->GetLastAddress()) {
region = cur;
@@ -370,7 +376,7 @@ private:
template <typename AddressType>
static const KMemoryRegion* Find(AddressType address, const KMemoryRegionTree& tree) {
return tree.Find(address);
return tree.Find(GetInteger(address));
}
static KMemoryRegion& Dereference(KMemoryRegion* region) {
@@ -383,7 +389,7 @@ private:
return *region;
}
VAddr GetStackTopAddress(s32 core_id, KMemoryRegionType type) const {
KVirtualAddress GetStackTopAddress(s32 core_id, KMemoryRegionType type) const {
const auto& region = Dereference(
GetVirtualMemoryRegionTree().FindByTypeAndAttribute(type, static_cast<u32>(core_id)));
ASSERT(region.GetEndAddress() != 0);

View File

@@ -5,7 +5,6 @@
#include "common/alignment.h"
#include "common/assert.h"
#include "common/common_types.h"
#include "common/scope_exit.h"
#include "core/core.h"
#include "core/device_memory.h"
@@ -44,10 +43,10 @@ KMemoryManager::KMemoryManager(Core::System& system)
KLightLock{system.Kernel()},
} {}
void KMemoryManager::Initialize(VAddr management_region, size_t management_region_size) {
void KMemoryManager::Initialize(KVirtualAddress management_region, size_t management_region_size) {
// Clear the management region to zero.
const VAddr management_region_end = management_region + management_region_size;
const KVirtualAddress management_region_end = management_region + management_region_size;
// std::memset(GetVoidPointer(management_region), 0, management_region_size);
// Reset our manager count.
@@ -56,7 +55,7 @@ void KMemoryManager::Initialize(VAddr management_region, size_t management_regio
// Traverse the virtual memory layout tree, initializing each manager as appropriate.
while (m_num_managers != MaxManagerCount) {
// Locate the region that should initialize the current manager.
PAddr region_address = 0;
KPhysicalAddress region_address = 0;
size_t region_size = 0;
Pool region_pool = Pool::Count;
for (const auto& it : m_system.Kernel().MemoryLayout().GetPhysicalMemoryRegionTree()) {
@@ -70,8 +69,8 @@ void KMemoryManager::Initialize(VAddr management_region, size_t management_regio
continue;
}
const PAddr cur_start = it.GetAddress();
const PAddr cur_end = it.GetEndAddress();
const KPhysicalAddress cur_start = it.GetAddress();
const KPhysicalAddress cur_end = it.GetEndAddress();
// Validate the region.
ASSERT(cur_end != 0);
@@ -119,17 +118,17 @@ void KMemoryManager::Initialize(VAddr management_region, size_t management_regio
// Free each region to its corresponding heap.
size_t reserved_sizes[MaxManagerCount] = {};
const PAddr ini_start = GetInitialProcessBinaryPhysicalAddress();
const PAddr ini_end = ini_start + InitialProcessBinarySizeMax;
const PAddr ini_last = ini_end - 1;
const KPhysicalAddress ini_start = GetInitialProcessBinaryPhysicalAddress();
const KPhysicalAddress ini_end = ini_start + InitialProcessBinarySizeMax;
const KPhysicalAddress ini_last = ini_end - 1;
for (const auto& it : m_system.Kernel().MemoryLayout().GetPhysicalMemoryRegionTree()) {
if (it.IsDerivedFrom(KMemoryRegionType_DramUserPool)) {
// Get the manager for the region.
auto& manager = m_managers[it.GetAttributes()];
const PAddr cur_start = it.GetAddress();
const PAddr cur_last = it.GetLastAddress();
const PAddr cur_end = it.GetEndAddress();
const KPhysicalAddress cur_start = it.GetAddress();
const KPhysicalAddress cur_last = it.GetLastAddress();
const KPhysicalAddress cur_end = it.GetEndAddress();
if (cur_start <= ini_start && ini_last <= cur_last) {
// Free memory before the ini to the heap.
@@ -175,7 +174,8 @@ void KMemoryManager::FinalizeOptimizedMemory(u64 process_id, Pool pool) {
UNREACHABLE();
}
PAddr KMemoryManager::AllocateAndOpenContinuous(size_t num_pages, size_t align_pages, u32 option) {
KPhysicalAddress KMemoryManager::AllocateAndOpenContinuous(size_t num_pages, size_t align_pages,
u32 option) {
// Early return if we're allocating no pages.
if (num_pages == 0) {
return 0;
@@ -190,7 +190,7 @@ PAddr KMemoryManager::AllocateAndOpenContinuous(size_t num_pages, size_t align_p
// Loop, trying to iterate from each block.
Impl* chosen_manager = nullptr;
PAddr allocated_block = 0;
KPhysicalAddress allocated_block = 0;
for (chosen_manager = this->GetFirstManager(pool, dir); chosen_manager != nullptr;
chosen_manager = this->GetNextManager(chosen_manager, dir)) {
allocated_block = chosen_manager->AllocateAligned(heap_index, num_pages, align_pages);
@@ -239,7 +239,7 @@ Result KMemoryManager::AllocatePageGroupImpl(KPageGroup* out, size_t num_pages,
cur_manager = this->GetNextManager(cur_manager, dir)) {
while (num_pages >= pages_per_alloc) {
// Allocate a block.
PAddr allocated_block = cur_manager->AllocateBlock(index, random);
KPhysicalAddress allocated_block = cur_manager->AllocateBlock(index, random);
if (allocated_block == 0) {
break;
}
@@ -286,7 +286,7 @@ Result KMemoryManager::AllocateAndOpen(KPageGroup* out, size_t num_pages, u32 op
// Open the first reference to the pages.
for (const auto& block : *out) {
PAddr cur_address = block.GetAddress();
KPhysicalAddress cur_address = block.GetAddress();
size_t remaining_pages = block.GetNumPages();
while (remaining_pages > 0) {
// Get the manager for the current address.
@@ -337,7 +337,7 @@ Result KMemoryManager::AllocateForProcess(KPageGroup* out, size_t num_pages, u32
// Iterate over the allocated blocks.
for (const auto& block : *out) {
// Get the block extents.
const PAddr block_address = block.GetAddress();
const KPhysicalAddress block_address = block.GetAddress();
const size_t block_pages = block.GetNumPages();
// If it has no pages, we don't need to do anything.
@@ -348,7 +348,7 @@ Result KMemoryManager::AllocateForProcess(KPageGroup* out, size_t num_pages, u32
// Fill all the pages that we need to fill.
bool any_new = false;
{
PAddr cur_address = block_address;
KPhysicalAddress cur_address = block_address;
size_t remaining_pages = block_pages;
while (remaining_pages > 0) {
// Get the manager for the current address.
@@ -369,7 +369,7 @@ Result KMemoryManager::AllocateForProcess(KPageGroup* out, size_t num_pages, u32
// If there are new pages, update tracking for the allocation.
if (any_new) {
// Update tracking for the allocation.
PAddr cur_address = block_address;
KPhysicalAddress cur_address = block_address;
size_t remaining_pages = block_pages;
while (remaining_pages > 0) {
// Get the manager for the current address.
@@ -400,8 +400,9 @@ Result KMemoryManager::AllocateForProcess(KPageGroup* out, size_t num_pages, u32
R_SUCCEED();
}
size_t KMemoryManager::Impl::Initialize(PAddr address, size_t size, VAddr management,
VAddr management_end, Pool p) {
size_t KMemoryManager::Impl::Initialize(KPhysicalAddress address, size_t size,
KVirtualAddress management, KVirtualAddress management_end,
Pool p) {
// Calculate management sizes.
const size_t ref_count_size = (size / PageSize) * sizeof(u16);
const size_t optimize_map_size = CalculateOptimizedProcessOverheadSize(size);
@@ -417,7 +418,7 @@ size_t KMemoryManager::Impl::Initialize(PAddr address, size_t size, VAddr manage
m_management_region = management;
m_page_reference_counts.resize(
Kernel::Board::Nintendo::Nx::KSystemControl::Init::GetIntendedMemorySize() / PageSize);
ASSERT(Common::IsAligned(m_management_region, PageSize));
ASSERT(Common::IsAligned(GetInteger(m_management_region), PageSize));
// Initialize the manager's KPageHeap.
m_heap.Initialize(address, size, management + manager_size, page_heap_size);
@@ -425,15 +426,15 @@ size_t KMemoryManager::Impl::Initialize(PAddr address, size_t size, VAddr manage
return total_management_size;
}
void KMemoryManager::Impl::TrackUnoptimizedAllocation(PAddr block, size_t num_pages) {
void KMemoryManager::Impl::TrackUnoptimizedAllocation(KPhysicalAddress block, size_t num_pages) {
UNREACHABLE();
}
void KMemoryManager::Impl::TrackOptimizedAllocation(PAddr block, size_t num_pages) {
void KMemoryManager::Impl::TrackOptimizedAllocation(KPhysicalAddress block, size_t num_pages) {
UNREACHABLE();
}
bool KMemoryManager::Impl::ProcessOptimizedAllocation(PAddr block, size_t num_pages,
bool KMemoryManager::Impl::ProcessOptimizedAllocation(KPhysicalAddress block, size_t num_pages,
u8 fill_pattern) {
UNREACHABLE();
}

View File

@@ -7,10 +7,10 @@
#include <tuple>
#include "common/common_funcs.h"
#include "common/common_types.h"
#include "core/hle/kernel/k_light_lock.h"
#include "core/hle/kernel/k_memory_layout.h"
#include "core/hle/kernel/k_page_heap.h"
#include "core/hle/kernel/k_typed_address.h"
#include "core/hle/result.h"
namespace Core {
@@ -50,21 +50,21 @@ public:
explicit KMemoryManager(Core::System& system);
void Initialize(VAddr management_region, size_t management_region_size);
void Initialize(KVirtualAddress management_region, size_t management_region_size);
Result InitializeOptimizedMemory(u64 process_id, Pool pool);
void FinalizeOptimizedMemory(u64 process_id, Pool pool);
PAddr AllocateAndOpenContinuous(size_t num_pages, size_t align_pages, u32 option);
KPhysicalAddress AllocateAndOpenContinuous(size_t num_pages, size_t align_pages, u32 option);
Result AllocateAndOpen(KPageGroup* out, size_t num_pages, u32 option);
Result AllocateForProcess(KPageGroup* out, size_t num_pages, u32 option, u64 process_id,
u8 fill_pattern);
Pool GetPool(PAddr address) const {
Pool GetPool(KPhysicalAddress address) const {
return this->GetManager(address).GetPool();
}
void Open(PAddr address, size_t num_pages) {
void Open(KPhysicalAddress address, size_t num_pages) {
// Repeatedly open references until we've done so for all pages.
while (num_pages) {
auto& manager = this->GetManager(address);
@@ -80,7 +80,7 @@ public:
}
}
void OpenFirst(PAddr address, size_t num_pages) {
void OpenFirst(KPhysicalAddress address, size_t num_pages) {
// Repeatedly open references until we've done so for all pages.
while (num_pages) {
auto& manager = this->GetManager(address);
@@ -96,7 +96,7 @@ public:
}
}
void Close(PAddr address, size_t num_pages) {
void Close(KPhysicalAddress address, size_t num_pages) {
// Repeatedly close references until we've done so for all pages.
while (num_pages) {
auto& manager = this->GetManager(address);
@@ -199,16 +199,16 @@ private:
public:
Impl() = default;
size_t Initialize(PAddr address, size_t size, VAddr management, VAddr management_end,
Pool p);
size_t Initialize(KPhysicalAddress address, size_t size, KVirtualAddress management,
KVirtualAddress management_end, Pool p);
PAddr AllocateBlock(s32 index, bool random) {
KPhysicalAddress AllocateBlock(s32 index, bool random) {
return m_heap.AllocateBlock(index, random);
}
PAddr AllocateAligned(s32 index, size_t num_pages, size_t align_pages) {
KPhysicalAddress AllocateAligned(s32 index, size_t num_pages, size_t align_pages) {
return m_heap.AllocateAligned(index, num_pages, align_pages);
}
void Free(PAddr addr, size_t num_pages) {
void Free(KPhysicalAddress addr, size_t num_pages) {
m_heap.Free(addr, num_pages);
}
@@ -220,10 +220,10 @@ private:
UNIMPLEMENTED();
}
void TrackUnoptimizedAllocation(PAddr block, size_t num_pages);
void TrackOptimizedAllocation(PAddr block, size_t num_pages);
void TrackUnoptimizedAllocation(KPhysicalAddress block, size_t num_pages);
void TrackOptimizedAllocation(KPhysicalAddress block, size_t num_pages);
bool ProcessOptimizedAllocation(PAddr block, size_t num_pages, u8 fill_pattern);
bool ProcessOptimizedAllocation(KPhysicalAddress block, size_t num_pages, u8 fill_pattern);
constexpr Pool GetPool() const {
return m_pool;
@@ -231,7 +231,7 @@ private:
constexpr size_t GetSize() const {
return m_heap.GetSize();
}
constexpr PAddr GetEndAddress() const {
constexpr KPhysicalAddress GetEndAddress() const {
return m_heap.GetEndAddress();
}
@@ -243,10 +243,10 @@ private:
UNIMPLEMENTED();
}
constexpr size_t GetPageOffset(PAddr address) const {
constexpr size_t GetPageOffset(KPhysicalAddress address) const {
return m_heap.GetPageOffset(address);
}
constexpr size_t GetPageOffsetToEnd(PAddr address) const {
constexpr size_t GetPageOffsetToEnd(KPhysicalAddress address) const {
return m_heap.GetPageOffsetToEnd(address);
}
@@ -263,7 +263,7 @@ private:
return m_prev;
}
void OpenFirst(PAddr address, size_t num_pages) {
void OpenFirst(KPhysicalAddress address, size_t num_pages) {
size_t index = this->GetPageOffset(address);
const size_t end = index + num_pages;
while (index < end) {
@@ -274,7 +274,7 @@ private:
}
}
void Open(PAddr address, size_t num_pages) {
void Open(KPhysicalAddress address, size_t num_pages) {
size_t index = this->GetPageOffset(address);
const size_t end = index + num_pages;
while (index < end) {
@@ -285,7 +285,7 @@ private:
}
}
void Close(PAddr address, size_t num_pages) {
void Close(KPhysicalAddress address, size_t num_pages) {
size_t index = this->GetPageOffset(address);
const size_t end = index + num_pages;
@@ -323,18 +323,18 @@ private:
KPageHeap m_heap;
std::vector<RefCount> m_page_reference_counts;
VAddr m_management_region{};
KVirtualAddress m_management_region{};
Pool m_pool{};
Impl* m_next{};
Impl* m_prev{};
};
private:
Impl& GetManager(PAddr address) {
Impl& GetManager(KPhysicalAddress address) {
return m_managers[m_memory_layout.GetPhysicalLinearRegion(address).GetAttributes()];
}
const Impl& GetManager(PAddr address) const {
const Impl& GetManager(KPhysicalAddress address) const {
return m_managers[m_memory_layout.GetPhysicalLinearRegion(address).GetAttributes()];
}

View File

@@ -5,9 +5,9 @@
#include "common/assert.h"
#include "common/common_funcs.h"
#include "common/common_types.h"
#include "common/intrusive_red_black_tree.h"
#include "core/hle/kernel/k_memory_region_type.h"
#include "core/hle/kernel/k_typed_address.h"
namespace Kernel {
@@ -243,10 +243,10 @@ public:
void InsertDirectly(u64 address, u64 last_address, u32 attr = 0, u32 type_id = 0);
bool Insert(u64 address, size_t size, u32 type_id, u32 new_attr = 0, u32 old_attr = 0);
VAddr GetRandomAlignedRegion(size_t size, size_t alignment, u32 type_id);
KVirtualAddress GetRandomAlignedRegion(size_t size, size_t alignment, u32 type_id);
VAddr GetRandomAlignedRegionWithGuard(size_t size, size_t alignment, u32 type_id,
size_t guard_size) {
KVirtualAddress GetRandomAlignedRegionWithGuard(size_t size, size_t alignment, u32 type_id,
size_t guard_size) {
return this->GetRandomAlignedRegion(size + 2 * guard_size, alignment, type_id) + guard_size;
}

View File

@@ -10,8 +10,8 @@
namespace Kernel {
KPageBuffer* KPageBuffer::FromPhysicalAddress(Core::System& system, PAddr phys_addr) {
ASSERT(Common::IsAligned(phys_addr, PageSize));
KPageBuffer* KPageBuffer::FromPhysicalAddress(Core::System& system, KPhysicalAddress phys_addr) {
ASSERT(Common::IsAligned(GetInteger(phys_addr), PageSize));
return system.DeviceMemory().GetPointer<KPageBuffer>(phys_addr);
}

View File

@@ -26,7 +26,7 @@ public:
explicit KPageBuffer(KernelCore&) {}
KPageBuffer() = default;
static KPageBuffer* FromPhysicalAddress(Core::System& system, PAddr phys_addr);
static KPageBuffer* FromPhysicalAddress(Core::System& system, KPhysicalAddress phys_addr);
private:
alignas(PageSize) std::array<u8, PageSize> m_buffer{};

View File

@@ -22,7 +22,7 @@ public:
constexpr explicit KBlockInfo() : m_next(nullptr) {}
constexpr void Initialize(KPhysicalAddress addr, size_t np) {
ASSERT(Common::IsAligned(addr, PageSize));
ASSERT(Common::IsAligned(GetInteger(addr), PageSize));
ASSERT(static_cast<u32>(np) == np);
m_page_index = static_cast<u32>(addr / PageSize);

View File

@@ -6,14 +6,14 @@
namespace Kernel {
void KPageHeap::Initialize(PAddr address, size_t size, VAddr management_address,
size_t management_size, const size_t* block_shifts,
size_t num_block_shifts) {
void KPageHeap::Initialize(KPhysicalAddress address, size_t size,
KVirtualAddress management_address, size_t management_size,
const size_t* block_shifts, size_t num_block_shifts) {
// Check our assumptions.
ASSERT(Common::IsAligned(address, PageSize));
ASSERT(Common::IsAligned(GetInteger(address), PageSize));
ASSERT(Common::IsAligned(size, PageSize));
ASSERT(0 < num_block_shifts && num_block_shifts <= NumMemoryBlockPageShifts);
const VAddr management_end = management_address + management_size;
const KVirtualAddress management_end = management_address + management_size;
// Set our members.
m_heap_address = address;
@@ -31,7 +31,7 @@ void KPageHeap::Initialize(PAddr address, size_t size, VAddr management_address,
}
// Ensure we didn't overextend our bounds.
ASSERT(VAddr(cur_bitmap_storage) <= management_end);
ASSERT(KVirtualAddress(cur_bitmap_storage) <= management_end);
}
size_t KPageHeap::GetNumFreePages() const {
@@ -44,11 +44,11 @@ size_t KPageHeap::GetNumFreePages() const {
return num_free;
}
PAddr KPageHeap::AllocateByLinearSearch(s32 index) {
KPhysicalAddress KPageHeap::AllocateByLinearSearch(s32 index) {
const size_t needed_size = m_blocks[index].GetSize();
for (s32 i = index; i < static_cast<s32>(m_num_blocks); i++) {
if (const PAddr addr = m_blocks[i].PopBlock(false); addr != 0) {
if (const KPhysicalAddress addr = m_blocks[i].PopBlock(false); addr != 0) {
if (const size_t allocated_size = m_blocks[i].GetSize(); allocated_size > needed_size) {
this->Free(addr + needed_size, (allocated_size - needed_size) / PageSize);
}
@@ -59,7 +59,7 @@ PAddr KPageHeap::AllocateByLinearSearch(s32 index) {
return 0;
}
PAddr KPageHeap::AllocateByRandom(s32 index, size_t num_pages, size_t align_pages) {
KPhysicalAddress KPageHeap::AllocateByRandom(s32 index, size_t num_pages, size_t align_pages) {
// Get the size and required alignment.
const size_t needed_size = num_pages * PageSize;
const size_t align_size = align_pages * PageSize;
@@ -110,7 +110,7 @@ PAddr KPageHeap::AllocateByRandom(s32 index, size_t num_pages, size_t align_page
}
// Pop a block from the index we selected.
if (PAddr addr = m_blocks[index].PopBlock(true); addr != 0) {
if (KPhysicalAddress addr = m_blocks[index].PopBlock(true); addr != 0) {
// Determine how much size we have left over.
if (const size_t leftover_size = m_blocks[index].GetSize() - needed_size;
leftover_size > 0) {
@@ -141,13 +141,13 @@ PAddr KPageHeap::AllocateByRandom(s32 index, size_t num_pages, size_t align_page
return 0;
}
void KPageHeap::FreeBlock(PAddr block, s32 index) {
void KPageHeap::FreeBlock(KPhysicalAddress block, s32 index) {
do {
block = m_blocks[index++].PushBlock(block);
} while (block != 0);
}
void KPageHeap::Free(PAddr addr, size_t num_pages) {
void KPageHeap::Free(KPhysicalAddress addr, size_t num_pages) {
// Freeing no pages is a no-op.
if (num_pages == 0) {
return;
@@ -155,16 +155,16 @@ void KPageHeap::Free(PAddr addr, size_t num_pages) {
// Find the largest block size that we can free, and free as many as possible.
s32 big_index = static_cast<s32>(m_num_blocks) - 1;
const PAddr start = addr;
const PAddr end = addr + num_pages * PageSize;
PAddr before_start = start;
PAddr before_end = start;
PAddr after_start = end;
PAddr after_end = end;
const KPhysicalAddress start = addr;
const KPhysicalAddress end = addr + num_pages * PageSize;
KPhysicalAddress before_start = start;
KPhysicalAddress before_end = start;
KPhysicalAddress after_start = end;
KPhysicalAddress after_end = end;
while (big_index >= 0) {
const size_t block_size = m_blocks[big_index].GetSize();
const PAddr big_start = Common::AlignUp(start, block_size);
const PAddr big_end = Common::AlignDown(end, block_size);
const KPhysicalAddress big_start = Common::AlignUp(GetInteger(start), block_size);
const KPhysicalAddress big_end = Common::AlignDown(GetInteger(end), block_size);
if (big_start < big_end) {
// Free as many big blocks as we can.
for (auto block = big_start; block < big_end; block += block_size) {

View File

@@ -8,8 +8,8 @@
#include "common/alignment.h"
#include "common/common_funcs.h"
#include "common/common_types.h"
#include "core/hle/kernel/k_page_bitmap.h"
#include "core/hle/kernel/k_typed_address.h"
#include "core/hle/kernel/memory_types.h"
namespace Kernel {
@@ -18,24 +18,24 @@ class KPageHeap {
public:
KPageHeap() = default;
constexpr PAddr GetAddress() const {
constexpr KPhysicalAddress GetAddress() const {
return m_heap_address;
}
constexpr size_t GetSize() const {
return m_heap_size;
}
constexpr PAddr GetEndAddress() const {
constexpr KPhysicalAddress GetEndAddress() const {
return this->GetAddress() + this->GetSize();
}
constexpr size_t GetPageOffset(PAddr block) const {
constexpr size_t GetPageOffset(KPhysicalAddress block) const {
return (block - this->GetAddress()) / PageSize;
}
constexpr size_t GetPageOffsetToEnd(PAddr block) const {
constexpr size_t GetPageOffsetToEnd(KPhysicalAddress block) const {
return (this->GetEndAddress() - block) / PageSize;
}
void Initialize(PAddr heap_address, size_t heap_size, VAddr management_address,
size_t management_size) {
void Initialize(KPhysicalAddress heap_address, size_t heap_size,
KVirtualAddress management_address, size_t management_size) {
return this->Initialize(heap_address, heap_size, management_address, management_size,
MemoryBlockPageShifts.data(), NumMemoryBlockPageShifts);
}
@@ -53,7 +53,7 @@ public:
m_initial_used_size = m_heap_size - free_size - reserved_size;
}
PAddr AllocateBlock(s32 index, bool random) {
KPhysicalAddress AllocateBlock(s32 index, bool random) {
if (random) {
const size_t block_pages = m_blocks[index].GetNumPages();
return this->AllocateByRandom(index, block_pages, block_pages);
@@ -62,12 +62,12 @@ public:
}
}
PAddr AllocateAligned(s32 index, size_t num_pages, size_t align_pages) {
KPhysicalAddress AllocateAligned(s32 index, size_t num_pages, size_t align_pages) {
// TODO: linear search support?
return this->AllocateByRandom(index, num_pages, align_pages);
}
void Free(PAddr addr, size_t num_pages);
void Free(KPhysicalAddress addr, size_t num_pages);
static size_t CalculateManagementOverheadSize(size_t region_size) {
return CalculateManagementOverheadSize(region_size, MemoryBlockPageShifts.data(),
@@ -125,24 +125,25 @@ private:
return this->GetNumFreeBlocks() * this->GetNumPages();
}
u64* Initialize(PAddr addr, size_t size, size_t bs, size_t nbs, u64* bit_storage) {
u64* Initialize(KPhysicalAddress addr, size_t size, size_t bs, size_t nbs,
u64* bit_storage) {
// Set shifts.
m_block_shift = bs;
m_next_block_shift = nbs;
// Align up the address.
PAddr end = addr + size;
KPhysicalAddress end = addr + size;
const size_t align = (m_next_block_shift != 0) ? (u64(1) << m_next_block_shift)
: (u64(1) << m_block_shift);
addr = Common::AlignDown(addr, align);
end = Common::AlignUp(end, align);
addr = Common::AlignDown(GetInteger(addr), align);
end = Common::AlignUp(GetInteger(end), align);
m_heap_address = addr;
m_end_offset = (end - addr) / (u64(1) << m_block_shift);
return m_bitmap.Initialize(bit_storage, m_end_offset);
}
PAddr PushBlock(PAddr address) {
KPhysicalAddress PushBlock(KPhysicalAddress address) {
// Set the bit for the free block.
size_t offset = (address - m_heap_address) >> this->GetShift();
m_bitmap.SetBit(offset);
@@ -161,7 +162,7 @@ private:
return {};
}
PAddr PopBlock(bool random) {
KPhysicalAddress PopBlock(bool random) {
// Find a free block.
s64 soffset = m_bitmap.FindFreeBlock(random);
if (soffset < 0) {
@@ -187,18 +188,19 @@ private:
private:
KPageBitmap m_bitmap;
PAddr m_heap_address{};
KPhysicalAddress m_heap_address{};
uintptr_t m_end_offset{};
size_t m_block_shift{};
size_t m_next_block_shift{};
};
private:
void Initialize(PAddr heap_address, size_t heap_size, VAddr management_address,
size_t management_size, const size_t* block_shifts, size_t num_block_shifts);
void Initialize(KPhysicalAddress heap_address, size_t heap_size,
KVirtualAddress management_address, size_t management_size,
const size_t* block_shifts, size_t num_block_shifts);
size_t GetNumFreePages() const;
void FreeBlock(PAddr block, s32 index);
void FreeBlock(KPhysicalAddress block, s32 index);
static constexpr size_t NumMemoryBlockPageShifts{7};
static constexpr std::array<size_t, NumMemoryBlockPageShifts> MemoryBlockPageShifts{
@@ -206,14 +208,14 @@ private:
};
private:
PAddr AllocateByLinearSearch(s32 index);
PAddr AllocateByRandom(s32 index, size_t num_pages, size_t align_pages);
KPhysicalAddress AllocateByLinearSearch(s32 index);
KPhysicalAddress AllocateByRandom(s32 index, size_t num_pages, size_t align_pages);
static size_t CalculateManagementOverheadSize(size_t region_size, const size_t* block_shifts,
size_t num_block_shifts);
private:
PAddr m_heap_address{};
KPhysicalAddress m_heap_address{};
size_t m_heap_size{};
size_t m_initial_used_size{};
size_t m_num_blocks{};

File diff suppressed because it is too large Load Diff

View File

@@ -6,7 +6,6 @@
#include <memory>
#include "common/common_funcs.h"
#include "common/common_types.h"
#include "common/page_table.h"
#include "core/file_sys/program_metadata.h"
#include "core/hle/kernel/k_dynamic_resource_manager.h"
@@ -15,6 +14,7 @@
#include "core/hle/kernel/k_memory_block_manager.h"
#include "core/hle/kernel/k_memory_layout.h"
#include "core/hle/kernel/k_memory_manager.h"
#include "core/hle/kernel/k_typed_address.h"
#include "core/hle/result.h"
#include "core/memory.h"
@@ -65,45 +65,47 @@ public:
Result InitializeForProcess(FileSys::ProgramAddressSpaceType as_type, bool enable_aslr,
bool enable_das_merge, bool from_back, KMemoryManager::Pool pool,
VAddr code_addr, size_t code_size, KSystemResource* system_resource,
KResourceLimit* resource_limit);
KProcessAddress code_addr, size_t code_size,
KSystemResource* system_resource, KResourceLimit* resource_limit);
void Finalize();
Result MapProcessCode(VAddr addr, size_t pages_count, KMemoryState state,
Result MapProcessCode(KProcessAddress addr, size_t pages_count, KMemoryState state,
KMemoryPermission perm);
Result MapCodeMemory(VAddr dst_address, VAddr src_address, size_t size);
Result UnmapCodeMemory(VAddr dst_address, VAddr src_address, size_t size,
Result MapCodeMemory(KProcessAddress dst_address, KProcessAddress src_address, size_t size);
Result UnmapCodeMemory(KProcessAddress dst_address, KProcessAddress src_address, size_t size,
ICacheInvalidationStrategy icache_invalidation_strategy);
Result UnmapProcessMemory(VAddr dst_addr, size_t size, KPageTable& src_page_table,
VAddr src_addr);
Result MapPhysicalMemory(VAddr addr, size_t size);
Result UnmapPhysicalMemory(VAddr addr, size_t size);
Result MapMemory(VAddr dst_addr, VAddr src_addr, size_t size);
Result UnmapMemory(VAddr dst_addr, VAddr src_addr, size_t size);
Result SetProcessMemoryPermission(VAddr addr, size_t size, Svc::MemoryPermission svc_perm);
KMemoryInfo QueryInfo(VAddr addr);
Result SetMemoryPermission(VAddr addr, size_t size, Svc::MemoryPermission perm);
Result SetMemoryAttribute(VAddr addr, size_t size, u32 mask, u32 attr);
Result UnmapProcessMemory(KProcessAddress dst_addr, size_t size, KPageTable& src_page_table,
KProcessAddress src_addr);
Result MapPhysicalMemory(KProcessAddress addr, size_t size);
Result UnmapPhysicalMemory(KProcessAddress addr, size_t size);
Result MapMemory(KProcessAddress dst_addr, KProcessAddress src_addr, size_t size);
Result UnmapMemory(KProcessAddress dst_addr, KProcessAddress src_addr, size_t size);
Result SetProcessMemoryPermission(KProcessAddress addr, size_t size,
Svc::MemoryPermission svc_perm);
KMemoryInfo QueryInfo(KProcessAddress addr);
Result SetMemoryPermission(KProcessAddress addr, size_t size, Svc::MemoryPermission perm);
Result SetMemoryAttribute(KProcessAddress addr, size_t size, u32 mask, u32 attr);
Result SetMaxHeapSize(size_t size);
Result SetHeapSize(VAddr* out, size_t size);
Result LockForMapDeviceAddressSpace(bool* out_is_io, VAddr address, size_t size,
Result SetHeapSize(u64* out, size_t size);
Result LockForMapDeviceAddressSpace(bool* out_is_io, KProcessAddress address, size_t size,
KMemoryPermission perm, bool is_aligned, bool check_heap);
Result LockForUnmapDeviceAddressSpace(VAddr address, size_t size, bool check_heap);
Result LockForUnmapDeviceAddressSpace(KProcessAddress address, size_t size, bool check_heap);
Result UnlockForDeviceAddressSpace(VAddr addr, size_t size);
Result UnlockForDeviceAddressSpace(KProcessAddress addr, size_t size);
Result LockForIpcUserBuffer(PAddr* out, VAddr address, size_t size);
Result UnlockForIpcUserBuffer(VAddr address, size_t size);
Result LockForIpcUserBuffer(KPhysicalAddress* out, KProcessAddress address, size_t size);
Result UnlockForIpcUserBuffer(KProcessAddress address, size_t size);
Result SetupForIpc(VAddr* out_dst_addr, size_t size, VAddr src_addr, KPageTable& src_page_table,
KMemoryPermission test_perm, KMemoryState dst_state, bool send);
Result CleanupForIpcServer(VAddr address, size_t size, KMemoryState dst_state);
Result CleanupForIpcClient(VAddr address, size_t size, KMemoryState dst_state);
Result SetupForIpc(KProcessAddress* out_dst_addr, size_t size, KProcessAddress src_addr,
KPageTable& src_page_table, KMemoryPermission test_perm,
KMemoryState dst_state, bool send);
Result CleanupForIpcServer(KProcessAddress address, size_t size, KMemoryState dst_state);
Result CleanupForIpcClient(KProcessAddress address, size_t size, KMemoryState dst_state);
Result LockForCodeMemory(KPageGroup* out, VAddr addr, size_t size);
Result UnlockForCodeMemory(VAddr addr, size_t size, const KPageGroup& pg);
Result MakeAndOpenPageGroup(KPageGroup* out, VAddr address, size_t num_pages,
Result LockForCodeMemory(KPageGroup* out, KProcessAddress addr, size_t size);
Result UnlockForCodeMemory(KProcessAddress addr, size_t size, const KPageGroup& pg);
Result MakeAndOpenPageGroup(KPageGroup* out, KProcessAddress address, size_t num_pages,
KMemoryState state_mask, KMemoryState state,
KMemoryPermission perm_mask, KMemoryPermission perm,
KMemoryAttribute attr_mask, KMemoryAttribute attr);
@@ -120,7 +122,7 @@ public:
return m_block_info_manager;
}
bool CanContain(VAddr addr, size_t size, KMemoryState state) const;
bool CanContain(KProcessAddress addr, size_t size, KMemoryState state) const;
Result MapPages(KProcessAddress* out_addr, size_t num_pages, size_t alignment,
KPhysicalAddress phys_addr, KProcessAddress region_start,
@@ -173,8 +175,8 @@ protected:
m_root = n;
}
void Push(Core::Memory::Memory& memory, VAddr addr) {
this->Push(memory.GetPointer<Node>(addr));
void Push(Core::Memory::Memory& memory, KVirtualAddress addr) {
this->Push(memory.GetPointer<Node>(GetInteger(addr)));
}
Node* Peek() const {
@@ -212,27 +214,28 @@ private:
Result MapPages(KProcessAddress* out_addr, size_t num_pages, size_t alignment,
KPhysicalAddress phys_addr, bool is_pa_valid, KProcessAddress region_start,
size_t region_num_pages, KMemoryState state, KMemoryPermission perm);
bool IsRegionContiguous(VAddr addr, u64 size) const;
void AddRegionToPages(VAddr start, size_t num_pages, KPageGroup& page_linked_list);
KMemoryInfo QueryInfoImpl(VAddr addr);
VAddr AllocateVirtualMemory(VAddr start, size_t region_num_pages, u64 needed_num_pages,
size_t align);
Result Operate(VAddr addr, size_t num_pages, const KPageGroup& page_group,
bool IsRegionContiguous(KProcessAddress addr, u64 size) const;
void AddRegionToPages(KProcessAddress start, size_t num_pages, KPageGroup& page_linked_list);
KMemoryInfo QueryInfoImpl(KProcessAddress addr);
KProcessAddress AllocateVirtualMemory(KProcessAddress start, size_t region_num_pages,
u64 needed_num_pages, size_t align);
Result Operate(KProcessAddress addr, size_t num_pages, const KPageGroup& page_group,
OperationType operation);
Result Operate(VAddr addr, size_t num_pages, KMemoryPermission perm, OperationType operation,
PAddr map_addr = 0);
Result Operate(KProcessAddress addr, size_t num_pages, KMemoryPermission perm,
OperationType operation, KPhysicalAddress map_addr = 0);
void FinalizeUpdate(PageLinkedList* page_list);
VAddr GetRegionAddress(KMemoryState state) const;
KProcessAddress GetRegionAddress(KMemoryState state) const;
size_t GetRegionSize(KMemoryState state) const;
VAddr FindFreeArea(VAddr region_start, size_t region_num_pages, size_t num_pages,
size_t alignment, size_t offset, size_t guard_pages);
KProcessAddress FindFreeArea(KProcessAddress region_start, size_t region_num_pages,
size_t num_pages, size_t alignment, size_t offset,
size_t guard_pages);
Result CheckMemoryStateContiguous(size_t* out_blocks_needed, VAddr addr, size_t size,
Result CheckMemoryStateContiguous(size_t* out_blocks_needed, KProcessAddress addr, size_t size,
KMemoryState state_mask, KMemoryState state,
KMemoryPermission perm_mask, KMemoryPermission perm,
KMemoryAttribute attr_mask, KMemoryAttribute attr) const;
Result CheckMemoryStateContiguous(VAddr addr, size_t size, KMemoryState state_mask,
Result CheckMemoryStateContiguous(KProcessAddress addr, size_t size, KMemoryState state_mask,
KMemoryState state, KMemoryPermission perm_mask,
KMemoryPermission perm, KMemoryAttribute attr_mask,
KMemoryAttribute attr) const {
@@ -244,12 +247,12 @@ private:
KMemoryPermission perm_mask, KMemoryPermission perm,
KMemoryAttribute attr_mask, KMemoryAttribute attr) const;
Result CheckMemoryState(KMemoryState* out_state, KMemoryPermission* out_perm,
KMemoryAttribute* out_attr, size_t* out_blocks_needed, VAddr addr,
size_t size, KMemoryState state_mask, KMemoryState state,
KMemoryPermission perm_mask, KMemoryPermission perm,
KMemoryAttribute* out_attr, size_t* out_blocks_needed,
KProcessAddress addr, size_t size, KMemoryState state_mask,
KMemoryState state, KMemoryPermission perm_mask, KMemoryPermission perm,
KMemoryAttribute attr_mask, KMemoryAttribute attr,
KMemoryAttribute ignore_attr = DefaultMemoryIgnoreAttr) const;
Result CheckMemoryState(size_t* out_blocks_needed, VAddr addr, size_t size,
Result CheckMemoryState(size_t* out_blocks_needed, KProcessAddress addr, size_t size,
KMemoryState state_mask, KMemoryState state,
KMemoryPermission perm_mask, KMemoryPermission perm,
KMemoryAttribute attr_mask, KMemoryAttribute attr,
@@ -258,39 +261,40 @@ private:
state_mask, state, perm_mask, perm, attr_mask, attr,
ignore_attr));
}
Result CheckMemoryState(VAddr addr, size_t size, KMemoryState state_mask, KMemoryState state,
KMemoryPermission perm_mask, KMemoryPermission perm,
Result CheckMemoryState(KProcessAddress addr, size_t size, KMemoryState state_mask,
KMemoryState state, KMemoryPermission perm_mask, KMemoryPermission perm,
KMemoryAttribute attr_mask, KMemoryAttribute attr,
KMemoryAttribute ignore_attr = DefaultMemoryIgnoreAttr) const {
R_RETURN(this->CheckMemoryState(nullptr, addr, size, state_mask, state, perm_mask, perm,
attr_mask, attr, ignore_attr));
}
Result LockMemoryAndOpen(KPageGroup* out_pg, PAddr* out_paddr, VAddr addr, size_t size,
KMemoryState state_mask, KMemoryState state,
KMemoryPermission perm_mask, KMemoryPermission perm,
KMemoryAttribute attr_mask, KMemoryAttribute attr,
KMemoryPermission new_perm, KMemoryAttribute lock_attr);
Result UnlockMemory(VAddr addr, size_t size, KMemoryState state_mask, KMemoryState state,
KMemoryPermission perm_mask, KMemoryPermission perm,
Result LockMemoryAndOpen(KPageGroup* out_pg, KPhysicalAddress* out_KPhysicalAddress,
KProcessAddress addr, size_t size, KMemoryState state_mask,
KMemoryState state, KMemoryPermission perm_mask,
KMemoryPermission perm, KMemoryAttribute attr_mask,
KMemoryAttribute attr, KMemoryPermission new_perm,
KMemoryAttribute lock_attr);
Result UnlockMemory(KProcessAddress addr, size_t size, KMemoryState state_mask,
KMemoryState state, KMemoryPermission perm_mask, KMemoryPermission perm,
KMemoryAttribute attr_mask, KMemoryAttribute attr,
KMemoryPermission new_perm, KMemoryAttribute lock_attr,
const KPageGroup* pg);
Result MakePageGroup(KPageGroup& pg, VAddr addr, size_t num_pages);
bool IsValidPageGroup(const KPageGroup& pg, VAddr addr, size_t num_pages);
Result MakePageGroup(KPageGroup& pg, KProcessAddress addr, size_t num_pages);
bool IsValidPageGroup(const KPageGroup& pg, KProcessAddress addr, size_t num_pages);
bool IsLockedByCurrentThread() const {
return m_general_lock.IsLockedByCurrentThread();
}
bool IsHeapPhysicalAddress(const KMemoryLayout& layout, PAddr phys_addr) {
bool IsHeapPhysicalAddress(const KMemoryLayout& layout, KPhysicalAddress phys_addr) {
ASSERT(this->IsLockedByCurrentThread());
return layout.IsHeapPhysicalAddress(m_cached_physical_heap_region, phys_addr);
}
bool GetPhysicalAddressLocked(PAddr* out, VAddr virt_addr) const {
bool GetPhysicalAddressLocked(KPhysicalAddress* out, KProcessAddress virt_addr) const {
ASSERT(this->IsLockedByCurrentThread());
*out = GetPhysicalAddr(virt_addr);
@@ -298,12 +302,13 @@ private:
return *out != 0;
}
Result SetupForIpcClient(PageLinkedList* page_list, size_t* out_blocks_needed, VAddr address,
size_t size, KMemoryPermission test_perm, KMemoryState dst_state);
Result SetupForIpcServer(VAddr* out_addr, size_t size, VAddr src_addr,
Result SetupForIpcClient(PageLinkedList* page_list, size_t* out_blocks_needed,
KProcessAddress address, size_t size, KMemoryPermission test_perm,
KMemoryState dst_state);
Result SetupForIpcServer(KProcessAddress* out_addr, size_t size, KProcessAddress src_addr,
KMemoryPermission test_perm, KMemoryState dst_state,
KPageTable& src_page_table, bool send);
void CleanupForIpcClientOnServerSetupFailure(PageLinkedList* page_list, VAddr address,
void CleanupForIpcClientOnServerSetupFailure(PageLinkedList* page_list, KProcessAddress address,
size_t size, KMemoryPermission prot_perm);
Result AllocateAndMapPagesImpl(PageLinkedList* page_list, KProcessAddress address,
@@ -315,61 +320,61 @@ private:
mutable KLightLock m_map_physical_memory_lock;
public:
constexpr VAddr GetAddressSpaceStart() const {
constexpr KProcessAddress GetAddressSpaceStart() const {
return m_address_space_start;
}
constexpr VAddr GetAddressSpaceEnd() const {
constexpr KProcessAddress GetAddressSpaceEnd() const {
return m_address_space_end;
}
constexpr size_t GetAddressSpaceSize() const {
return m_address_space_end - m_address_space_start;
}
constexpr VAddr GetHeapRegionStart() const {
constexpr KProcessAddress GetHeapRegionStart() const {
return m_heap_region_start;
}
constexpr VAddr GetHeapRegionEnd() const {
constexpr KProcessAddress GetHeapRegionEnd() const {
return m_heap_region_end;
}
constexpr size_t GetHeapRegionSize() const {
return m_heap_region_end - m_heap_region_start;
}
constexpr VAddr GetAliasRegionStart() const {
constexpr KProcessAddress GetAliasRegionStart() const {
return m_alias_region_start;
}
constexpr VAddr GetAliasRegionEnd() const {
constexpr KProcessAddress GetAliasRegionEnd() const {
return m_alias_region_end;
}
constexpr size_t GetAliasRegionSize() const {
return m_alias_region_end - m_alias_region_start;
}
constexpr VAddr GetStackRegionStart() const {
constexpr KProcessAddress GetStackRegionStart() const {
return m_stack_region_start;
}
constexpr VAddr GetStackRegionEnd() const {
constexpr KProcessAddress GetStackRegionEnd() const {
return m_stack_region_end;
}
constexpr size_t GetStackRegionSize() const {
return m_stack_region_end - m_stack_region_start;
}
constexpr VAddr GetKernelMapRegionStart() const {
constexpr KProcessAddress GetKernelMapRegionStart() const {
return m_kernel_map_region_start;
}
constexpr VAddr GetKernelMapRegionEnd() const {
constexpr KProcessAddress GetKernelMapRegionEnd() const {
return m_kernel_map_region_end;
}
constexpr VAddr GetCodeRegionStart() const {
constexpr KProcessAddress GetCodeRegionStart() const {
return m_code_region_start;
}
constexpr VAddr GetCodeRegionEnd() const {
constexpr KProcessAddress GetCodeRegionEnd() const {
return m_code_region_end;
}
constexpr VAddr GetAliasCodeRegionStart() const {
constexpr KProcessAddress GetAliasCodeRegionStart() const {
return m_alias_code_region_start;
}
constexpr VAddr GetAliasCodeRegionEnd() const {
constexpr KProcessAddress GetAliasCodeRegionEnd() const {
return m_alias_code_region_end;
}
constexpr VAddr GetAliasCodeRegionSize() const {
constexpr size_t GetAliasCodeRegionSize() const {
return m_alias_code_region_end - m_alias_code_region_start;
}
size_t GetNormalMemorySize() {
@@ -382,25 +387,25 @@ public:
constexpr size_t GetHeapSize() const {
return m_current_heap_end - m_heap_region_start;
}
constexpr bool IsInsideAddressSpace(VAddr address, size_t size) const {
constexpr bool IsInsideAddressSpace(KProcessAddress address, size_t size) const {
return m_address_space_start <= address && address + size - 1 <= m_address_space_end - 1;
}
constexpr bool IsOutsideAliasRegion(VAddr address, size_t size) const {
constexpr bool IsOutsideAliasRegion(KProcessAddress address, size_t size) const {
return m_alias_region_start > address || address + size - 1 > m_alias_region_end - 1;
}
constexpr bool IsOutsideStackRegion(VAddr address, size_t size) const {
constexpr bool IsOutsideStackRegion(KProcessAddress address, size_t size) const {
return m_stack_region_start > address || address + size - 1 > m_stack_region_end - 1;
}
constexpr bool IsInvalidRegion(VAddr address, size_t size) const {
constexpr bool IsInvalidRegion(KProcessAddress address, size_t size) const {
return address + size - 1 > GetAliasCodeRegionStart() + GetAliasCodeRegionSize() - 1;
}
constexpr bool IsInsideHeapRegion(VAddr address, size_t size) const {
constexpr bool IsInsideHeapRegion(KProcessAddress address, size_t size) const {
return address + size > m_heap_region_start && m_heap_region_end > address;
}
constexpr bool IsInsideAliasRegion(VAddr address, size_t size) const {
constexpr bool IsInsideAliasRegion(KProcessAddress address, size_t size) const {
return address + size > m_alias_region_start && m_alias_region_end > address;
}
constexpr bool IsOutsideASLRRegion(VAddr address, size_t size) const {
constexpr bool IsOutsideASLRRegion(KProcessAddress address, size_t size) const {
if (IsInvalidRegion(address, size)) {
return true;
}
@@ -412,47 +417,53 @@ public:
}
return {};
}
constexpr bool IsInsideASLRRegion(VAddr address, size_t size) const {
constexpr bool IsInsideASLRRegion(KProcessAddress address, size_t size) const {
return !IsOutsideASLRRegion(address, size);
}
constexpr size_t GetNumGuardPages() const {
return IsKernel() ? 1 : 4;
}
PAddr GetPhysicalAddr(VAddr addr) const {
KPhysicalAddress GetPhysicalAddr(KProcessAddress addr) const {
const auto backing_addr = m_page_table_impl->backing_addr[addr >> PageBits];
ASSERT(backing_addr);
return backing_addr + addr;
return backing_addr + GetInteger(addr);
}
constexpr bool Contains(VAddr addr) const {
constexpr bool Contains(KProcessAddress addr) const {
return m_address_space_start <= addr && addr <= m_address_space_end - 1;
}
constexpr bool Contains(VAddr addr, size_t size) const {
constexpr bool Contains(KProcessAddress addr, size_t size) const {
return m_address_space_start <= addr && addr < addr + size &&
addr + size - 1 <= m_address_space_end - 1;
}
public:
static VAddr GetLinearMappedVirtualAddress(const KMemoryLayout& layout, PAddr addr) {
static KVirtualAddress GetLinearMappedVirtualAddress(const KMemoryLayout& layout,
KPhysicalAddress addr) {
return layout.GetLinearVirtualAddress(addr);
}
static PAddr GetLinearMappedPhysicalAddress(const KMemoryLayout& layout, VAddr addr) {
static KPhysicalAddress GetLinearMappedPhysicalAddress(const KMemoryLayout& layout,
KVirtualAddress addr) {
return layout.GetLinearPhysicalAddress(addr);
}
static VAddr GetHeapVirtualAddress(const KMemoryLayout& layout, PAddr addr) {
static KVirtualAddress GetHeapVirtualAddress(const KMemoryLayout& layout,
KPhysicalAddress addr) {
return GetLinearMappedVirtualAddress(layout, addr);
}
static PAddr GetHeapPhysicalAddress(const KMemoryLayout& layout, VAddr addr) {
static KPhysicalAddress GetHeapPhysicalAddress(const KMemoryLayout& layout,
KVirtualAddress addr) {
return GetLinearMappedPhysicalAddress(layout, addr);
}
static VAddr GetPageTableVirtualAddress(const KMemoryLayout& layout, PAddr addr) {
static KVirtualAddress GetPageTableVirtualAddress(const KMemoryLayout& layout,
KPhysicalAddress addr) {
return GetLinearMappedVirtualAddress(layout, addr);
}
static PAddr GetPageTablePhysicalAddress(const KMemoryLayout& layout, VAddr addr) {
static KPhysicalAddress GetPageTablePhysicalAddress(const KMemoryLayout& layout,
KVirtualAddress addr) {
return GetLinearMappedPhysicalAddress(layout, addr);
}
@@ -464,7 +475,7 @@ private:
return m_enable_aslr;
}
constexpr bool ContainsPages(VAddr addr, size_t num_pages) const {
constexpr bool ContainsPages(KProcessAddress addr, size_t num_pages) const {
return (m_address_space_start <= addr) &&
(num_pages <= (m_address_space_end - m_address_space_start) / PageSize) &&
(addr + num_pages * PageSize - 1 <= m_address_space_end - 1);
@@ -489,21 +500,21 @@ private:
};
private:
VAddr m_address_space_start{};
VAddr m_address_space_end{};
VAddr m_heap_region_start{};
VAddr m_heap_region_end{};
VAddr m_current_heap_end{};
VAddr m_alias_region_start{};
VAddr m_alias_region_end{};
VAddr m_stack_region_start{};
VAddr m_stack_region_end{};
VAddr m_kernel_map_region_start{};
VAddr m_kernel_map_region_end{};
VAddr m_code_region_start{};
VAddr m_code_region_end{};
VAddr m_alias_code_region_start{};
VAddr m_alias_code_region_end{};
KProcessAddress m_address_space_start{};
KProcessAddress m_address_space_end{};
KProcessAddress m_heap_region_start{};
KProcessAddress m_heap_region_end{};
KProcessAddress m_current_heap_end{};
KProcessAddress m_alias_region_start{};
KProcessAddress m_alias_region_end{};
KProcessAddress m_stack_region_start{};
KProcessAddress m_stack_region_end{};
KProcessAddress m_kernel_map_region_start{};
KProcessAddress m_kernel_map_region_end{};
KProcessAddress m_code_region_start{};
KProcessAddress m_code_region_end{};
KProcessAddress m_alias_code_region_start{};
KProcessAddress m_alias_code_region_end{};
size_t m_max_heap_size{};
size_t m_mapped_physical_memory_size{};

View File

@@ -5,9 +5,9 @@
#include <atomic>
#include "common/common_types.h"
#include "core/hle/kernel/k_dynamic_resource_manager.h"
#include "core/hle/kernel/k_page_table_slab_heap.h"
#include "core/hle/kernel/k_typed_address.h"
namespace Kernel {
@@ -26,23 +26,23 @@ public:
BaseHeap::Initialize(page_allocator, pt_heap);
}
VAddr Allocate() {
return VAddr(BaseHeap::Allocate());
KVirtualAddress Allocate() {
return KVirtualAddress(BaseHeap::Allocate());
}
RefCount GetRefCount(VAddr addr) const {
RefCount GetRefCount(KVirtualAddress addr) const {
return m_pt_heap->GetRefCount(addr);
}
void Open(VAddr addr, int count) {
void Open(KVirtualAddress addr, int count) {
return m_pt_heap->Open(addr, count);
}
bool Close(VAddr addr, int count) {
bool Close(KVirtualAddress addr, int count) {
return m_pt_heap->Close(addr, count);
}
bool IsInPageTableHeap(VAddr addr) const {
bool IsInPageTableHeap(KVirtualAddress addr) const {
return m_pt_heap->IsInRange(addr);
}

View File

@@ -6,8 +6,8 @@
#include <array>
#include <vector>
#include "common/common_types.h"
#include "core/hle/kernel/k_dynamic_slab_heap.h"
#include "core/hle/kernel/k_typed_address.h"
#include "core/hle/kernel/slab_helpers.h"
namespace Kernel {
@@ -45,12 +45,12 @@ public:
this->Initialize(rc);
}
RefCount GetRefCount(VAddr addr) {
RefCount GetRefCount(KVirtualAddress addr) {
ASSERT(this->IsInRange(addr));
return *this->GetRefCountPointer(addr);
}
void Open(VAddr addr, int count) {
void Open(KVirtualAddress addr, int count) {
ASSERT(this->IsInRange(addr));
*this->GetRefCountPointer(addr) += static_cast<RefCount>(count);
@@ -58,7 +58,7 @@ public:
ASSERT(this->GetRefCount(addr) > 0);
}
bool Close(VAddr addr, int count) {
bool Close(KVirtualAddress addr, int count) {
ASSERT(this->IsInRange(addr));
ASSERT(this->GetRefCount(addr) >= count);
@@ -66,7 +66,7 @@ public:
return this->GetRefCount(addr) == 0;
}
bool IsInPageTableHeap(VAddr addr) const {
bool IsInPageTableHeap(KVirtualAddress addr) const {
return this->IsInRange(addr);
}
@@ -81,7 +81,7 @@ private:
}
}
RefCount* GetRefCountPointer(VAddr addr) {
RefCount* GetRefCountPointer(KVirtualAddress addr) {
return m_ref_counts.data() + ((addr - this->GetAddress()) / PageSize);
}

View File

@@ -36,8 +36,9 @@ namespace {
* @param owner_process The parent process for the main thread
* @param priority The priority to give the main thread
*/
void SetupMainThread(Core::System& system, KProcess& owner_process, u32 priority, VAddr stack_top) {
const VAddr entry_point = owner_process.PageTable().GetCodeRegionStart();
void SetupMainThread(Core::System& system, KProcess& owner_process, u32 priority,
KProcessAddress stack_top) {
const KProcessAddress entry_point = owner_process.PageTable().GetCodeRegionStart();
ASSERT(owner_process.GetResourceLimit()->Reserve(LimitableResource::ThreadCountMax, 1));
KThread* thread = KThread::Create(system.Kernel());
@@ -219,7 +220,7 @@ void KProcess::UnpinThread(KThread* thread) {
KScheduler::SetSchedulerUpdateNeeded(m_kernel);
}
Result KProcess::AddSharedMemory(KSharedMemory* shmem, [[maybe_unused]] VAddr address,
Result KProcess::AddSharedMemory(KSharedMemory* shmem, [[maybe_unused]] KProcessAddress address,
[[maybe_unused]] size_t size) {
// Lock ourselves, to prevent concurrent access.
KScopedLightLock lk(m_state_lock);
@@ -248,7 +249,7 @@ Result KProcess::AddSharedMemory(KSharedMemory* shmem, [[maybe_unused]] VAddr ad
R_SUCCEED();
}
void KProcess::RemoveSharedMemory(KSharedMemory* shmem, [[maybe_unused]] VAddr address,
void KProcess::RemoveSharedMemory(KSharedMemory* shmem, [[maybe_unused]] KProcessAddress address,
[[maybe_unused]] size_t size) {
// Lock ourselves, to prevent concurrent access.
KScopedLightLock lk(m_state_lock);
@@ -399,8 +400,8 @@ Result KProcess::LoadFromMetadata(const FileSys::ProgramMetadata& metadata, std:
case FileSys::ProgramAddressSpaceType::Is32BitNoMap:
m_memory_usage_capacity =
m_page_table.GetHeapRegionEnd() - m_page_table.GetHeapRegionStart() +
m_page_table.GetAliasRegionEnd() - m_page_table.GetAliasRegionStart();
(m_page_table.GetHeapRegionEnd() - m_page_table.GetHeapRegionStart()) +
(m_page_table.GetAliasRegionEnd() - m_page_table.GetAliasRegionStart());
break;
default:
@@ -492,9 +493,9 @@ void KProcess::Finalize() {
KSynchronizationObject::Finalize();
}
Result KProcess::CreateThreadLocalRegion(VAddr* out) {
Result KProcess::CreateThreadLocalRegion(KProcessAddress* out) {
KThreadLocalPage* tlp = nullptr;
VAddr tlr = 0;
KProcessAddress tlr = 0;
// See if we can get a region from a partially used TLP.
{
@@ -543,7 +544,7 @@ Result KProcess::CreateThreadLocalRegion(VAddr* out) {
R_SUCCEED();
}
Result KProcess::DeleteThreadLocalRegion(VAddr addr) {
Result KProcess::DeleteThreadLocalRegion(KProcessAddress addr) {
KThreadLocalPage* page_to_free = nullptr;
// Release the region.
@@ -551,10 +552,10 @@ Result KProcess::DeleteThreadLocalRegion(VAddr addr) {
KScopedSchedulerLock sl{m_kernel};
// Try to find the page in the partially used list.
auto it = m_partially_used_tlp_tree.find_key(Common::AlignDown(addr, PageSize));
auto it = m_partially_used_tlp_tree.find_key(Common::AlignDown(GetInteger(addr), PageSize));
if (it == m_partially_used_tlp_tree.end()) {
// If we don't find it, it has to be in the fully used list.
it = m_fully_used_tlp_tree.find_key(Common::AlignDown(addr, PageSize));
it = m_fully_used_tlp_tree.find_key(Common::AlignDown(GetInteger(addr), PageSize));
R_UNLESS(it != m_fully_used_tlp_tree.end(), ResultInvalidAddress);
// Release the region.
@@ -591,7 +592,7 @@ Result KProcess::DeleteThreadLocalRegion(VAddr addr) {
R_SUCCEED();
}
bool KProcess::InsertWatchpoint(Core::System& system, VAddr addr, u64 size,
bool KProcess::InsertWatchpoint(Core::System& system, KProcessAddress addr, u64 size,
DebugWatchpointType type) {
const auto watch{std::find_if(m_watchpoints.begin(), m_watchpoints.end(), [&](const auto& wp) {
return wp.type == DebugWatchpointType::None;
@@ -605,7 +606,8 @@ bool KProcess::InsertWatchpoint(Core::System& system, VAddr addr, u64 size,
watch->end_address = addr + size;
watch->type = type;
for (VAddr page = Common::AlignDown(addr, PageSize); page < addr + size; page += PageSize) {
for (KProcessAddress page = Common::AlignDown(GetInteger(addr), PageSize); page < addr + size;
page += PageSize) {
m_debug_page_refcounts[page]++;
system.Memory().MarkRegionDebug(page, PageSize, true);
}
@@ -613,7 +615,7 @@ bool KProcess::InsertWatchpoint(Core::System& system, VAddr addr, u64 size,
return true;
}
bool KProcess::RemoveWatchpoint(Core::System& system, VAddr addr, u64 size,
bool KProcess::RemoveWatchpoint(Core::System& system, KProcessAddress addr, u64 size,
DebugWatchpointType type) {
const auto watch{std::find_if(m_watchpoints.begin(), m_watchpoints.end(), [&](const auto& wp) {
return wp.start_address == addr && wp.end_address == addr + size && wp.type == type;
@@ -627,7 +629,8 @@ bool KProcess::RemoveWatchpoint(Core::System& system, VAddr addr, u64 size,
watch->end_address = 0;
watch->type = DebugWatchpointType::None;
for (VAddr page = Common::AlignDown(addr, PageSize); page < addr + size; page += PageSize) {
for (KProcessAddress page = Common::AlignDown(GetInteger(addr), PageSize); page < addr + size;
page += PageSize) {
m_debug_page_refcounts[page]--;
if (!m_debug_page_refcounts[page]) {
system.Memory().MarkRegionDebug(page, PageSize, false);
@@ -637,7 +640,7 @@ bool KProcess::RemoveWatchpoint(Core::System& system, VAddr addr, u64 size,
return true;
}
void KProcess::LoadModule(CodeSet code_set, VAddr base_addr) {
void KProcess::LoadModule(CodeSet code_set, KProcessAddress base_addr) {
const auto ReprotectSegment = [&](const CodeSet::Segment& segment,
Svc::MemoryPermission permission) {
m_page_table.SetProcessMemoryPermission(segment.addr + base_addr, segment.size, permission);

View File

@@ -8,7 +8,6 @@
#include <list>
#include <map>
#include <string>
#include "common/common_types.h"
#include "core/hle/kernel/k_address_arbiter.h"
#include "core/hle/kernel/k_auto_object.h"
#include "core/hle/kernel/k_condition_variable.h"
@@ -16,6 +15,7 @@
#include "core/hle/kernel/k_page_table.h"
#include "core/hle/kernel/k_synchronization_object.h"
#include "core/hle/kernel/k_thread_local_page.h"
#include "core/hle/kernel/k_typed_address.h"
#include "core/hle/kernel/k_worker_task.h"
#include "core/hle/kernel/process_capability.h"
#include "core/hle/kernel/slab_helpers.h"
@@ -59,8 +59,8 @@ enum class DebugWatchpointType : u8 {
DECLARE_ENUM_FLAG_OPERATORS(DebugWatchpointType);
struct DebugWatchpoint {
VAddr start_address;
VAddr end_address;
KProcessAddress start_address;
KProcessAddress end_address;
DebugWatchpointType type;
};
@@ -135,11 +135,11 @@ public:
return m_handle_table;
}
Result SignalToAddress(VAddr address) {
Result SignalToAddress(KProcessAddress address) {
return m_condition_var.SignalToAddress(address);
}
Result WaitForAddress(Handle handle, VAddr address, u32 tag) {
Result WaitForAddress(Handle handle, KProcessAddress address, u32 tag) {
return m_condition_var.WaitForAddress(handle, address, tag);
}
@@ -147,20 +147,21 @@ public:
return m_condition_var.Signal(cv_key, count);
}
Result WaitConditionVariable(VAddr address, u64 cv_key, u32 tag, s64 ns) {
Result WaitConditionVariable(KProcessAddress address, u64 cv_key, u32 tag, s64 ns) {
R_RETURN(m_condition_var.Wait(address, cv_key, tag, ns));
}
Result SignalAddressArbiter(VAddr address, Svc::SignalType signal_type, s32 value, s32 count) {
Result SignalAddressArbiter(uint64_t address, Svc::SignalType signal_type, s32 value,
s32 count) {
R_RETURN(m_address_arbiter.SignalToAddress(address, signal_type, value, count));
}
Result WaitAddressArbiter(VAddr address, Svc::ArbitrationType arb_type, s32 value,
Result WaitAddressArbiter(uint64_t address, Svc::ArbitrationType arb_type, s32 value,
s64 timeout) {
R_RETURN(m_address_arbiter.WaitForAddress(address, arb_type, value, timeout));
}
VAddr GetProcessLocalRegionAddress() const {
KProcessAddress GetProcessLocalRegionAddress() const {
return m_plr_address;
}
@@ -352,7 +353,7 @@ public:
*/
void PrepareForTermination();
void LoadModule(CodeSet code_set, VAddr base_addr);
void LoadModule(CodeSet code_set, KProcessAddress base_addr);
bool IsInitialized() const override {
return m_is_initialized;
@@ -380,26 +381,28 @@ public:
return m_state_lock;
}
Result AddSharedMemory(KSharedMemory* shmem, VAddr address, size_t size);
void RemoveSharedMemory(KSharedMemory* shmem, VAddr address, size_t size);
Result AddSharedMemory(KSharedMemory* shmem, KProcessAddress address, size_t size);
void RemoveSharedMemory(KSharedMemory* shmem, KProcessAddress address, size_t size);
///////////////////////////////////////////////////////////////////////////////////////////////
// Thread-local storage management
// Marks the next available region as used and returns the address of the slot.
[[nodiscard]] Result CreateThreadLocalRegion(VAddr* out);
[[nodiscard]] Result CreateThreadLocalRegion(KProcessAddress* out);
// Frees a used TLS slot identified by the given address
Result DeleteThreadLocalRegion(VAddr addr);
Result DeleteThreadLocalRegion(KProcessAddress addr);
///////////////////////////////////////////////////////////////////////////////////////////////
// Debug watchpoint management
// Attempts to insert a watchpoint into a free slot. Returns false if none are available.
bool InsertWatchpoint(Core::System& system, VAddr addr, u64 size, DebugWatchpointType type);
bool InsertWatchpoint(Core::System& system, KProcessAddress addr, u64 size,
DebugWatchpointType type);
// Attempts to remove the watchpoint specified by the given parameters.
bool RemoveWatchpoint(Core::System& system, VAddr addr, u64 size, DebugWatchpointType type);
bool RemoveWatchpoint(Core::System& system, KProcessAddress addr, u64 size,
DebugWatchpointType type);
const std::array<DebugWatchpoint, Core::Hardware::NUM_WATCHPOINTS>& GetWatchpoints() const {
return m_watchpoints;
@@ -457,7 +460,7 @@ private:
/// Resource limit descriptor for this process
KResourceLimit* m_resource_limit{};
VAddr m_system_resource_address{};
KVirtualAddress m_system_resource_address{};
/// The ideal CPU core for this process, threads are scheduled on this core by default.
u8 m_ideal_core = 0;
@@ -485,7 +488,7 @@ private:
KConditionVariable m_condition_var;
/// Address indicating the location of the process' dedicated TLS region.
VAddr m_plr_address = 0;
KProcessAddress m_plr_address = 0;
/// Random values for svcGetInfo RandomEntropy
std::array<u64, RANDOM_ENTROPY_SIZE> m_random_entropy{};
@@ -497,7 +500,7 @@ private:
std::list<KSharedMemoryInfo*> m_shared_memory_list;
/// Address of the top of the main thread's stack
VAddr m_main_thread_stack_top{};
KProcessAddress m_main_thread_stack_top{};
/// Size of the main thread's stack
std::size_t m_main_thread_stack_size{};
@@ -527,7 +530,7 @@ private:
std::array<u64, Core::Hardware::NUM_CPU_CORES> m_running_thread_idle_counts{};
std::array<KThread*, Core::Hardware::NUM_CPU_CORES> m_pinned_threads{};
std::array<DebugWatchpoint, Core::Hardware::NUM_WATCHPOINTS> m_watchpoints{};
std::map<VAddr, u64> m_debug_page_refcounts;
std::map<KProcessAddress, u64> m_debug_page_refcounts;
KThread* m_exception_thread{};

View File

@@ -511,7 +511,7 @@ void KScheduler::Reload(KThread* thread) {
auto& cpu_core = m_kernel.System().ArmInterface(m_core_id);
cpu_core.LoadContext(thread->GetContext32());
cpu_core.LoadContext(thread->GetContext64());
cpu_core.SetTlsAddress(thread->GetTlsAddress());
cpu_core.SetTlsAddress(GetInteger(thread->GetTlsAddress()));
cpu_core.SetTPIDR_EL0(thread->GetTpidrEl0());
cpu_core.LoadWatchpointArray(thread->GetOwnerProcess()->GetWatchpoints());
cpu_core.ClearExclusiveState();

View File

@@ -6,8 +6,8 @@
namespace Kernel {
Result KSessionRequest::SessionMappings::PushMap(VAddr client, VAddr server, size_t size,
KMemoryState state, size_t index) {
Result KSessionRequest::SessionMappings::PushMap(KProcessAddress client, KProcessAddress server,
size_t size, KMemoryState state, size_t index) {
// At most 15 buffers of each type (4-bit descriptor counts).
ASSERT(index < ((1ul << 4) - 1) * 3);
@@ -33,20 +33,21 @@ Result KSessionRequest::SessionMappings::PushMap(VAddr client, VAddr server, siz
R_SUCCEED();
}
Result KSessionRequest::SessionMappings::PushSend(VAddr client, VAddr server, size_t size,
KMemoryState state) {
Result KSessionRequest::SessionMappings::PushSend(KProcessAddress client, KProcessAddress server,
size_t size, KMemoryState state) {
ASSERT(m_num_recv == 0);
ASSERT(m_num_exch == 0);
R_RETURN(this->PushMap(client, server, size, state, m_num_send++));
}
Result KSessionRequest::SessionMappings::PushReceive(VAddr client, VAddr server, size_t size,
KMemoryState state) {
Result KSessionRequest::SessionMappings::PushReceive(KProcessAddress client, KProcessAddress server,
size_t size, KMemoryState state) {
ASSERT(m_num_exch == 0);
R_RETURN(this->PushMap(client, server, size, state, m_num_send + m_num_recv++));
}
Result KSessionRequest::SessionMappings::PushExchange(VAddr client, VAddr server, size_t size,
Result KSessionRequest::SessionMappings::PushExchange(KProcessAddress client,
KProcessAddress server, size_t size,
KMemoryState state) {
R_RETURN(this->PushMap(client, server, size, state, m_num_send + m_num_recv + m_num_exch++));
}

View File

@@ -26,17 +26,17 @@ public:
class Mapping {
public:
constexpr void Set(VAddr c, VAddr s, size_t sz, KMemoryState st) {
constexpr void Set(KProcessAddress c, KProcessAddress s, size_t sz, KMemoryState st) {
m_client_address = c;
m_server_address = s;
m_size = sz;
m_state = st;
}
constexpr VAddr GetClientAddress() const {
constexpr KProcessAddress GetClientAddress() const {
return m_client_address;
}
constexpr VAddr GetServerAddress() const {
constexpr KProcessAddress GetServerAddress() const {
return m_server_address;
}
constexpr size_t GetSize() const {
@@ -47,8 +47,8 @@ public:
}
private:
VAddr m_client_address{};
VAddr m_server_address{};
KProcessAddress m_client_address{};
KProcessAddress m_server_address{};
size_t m_size{};
KMemoryState m_state{};
};
@@ -69,14 +69,17 @@ public:
return m_num_exch;
}
Result PushSend(VAddr client, VAddr server, size_t size, KMemoryState state);
Result PushReceive(VAddr client, VAddr server, size_t size, KMemoryState state);
Result PushExchange(VAddr client, VAddr server, size_t size, KMemoryState state);
Result PushSend(KProcessAddress client, KProcessAddress server, size_t size,
KMemoryState state);
Result PushReceive(KProcessAddress client, KProcessAddress server, size_t size,
KMemoryState state);
Result PushExchange(KProcessAddress client, KProcessAddress server, size_t size,
KMemoryState state);
VAddr GetSendClientAddress(size_t i) const {
KProcessAddress GetSendClientAddress(size_t i) const {
return GetSendMapping(i).GetClientAddress();
}
VAddr GetSendServerAddress(size_t i) const {
KProcessAddress GetSendServerAddress(size_t i) const {
return GetSendMapping(i).GetServerAddress();
}
size_t GetSendSize(size_t i) const {
@@ -86,10 +89,10 @@ public:
return GetSendMapping(i).GetMemoryState();
}
VAddr GetReceiveClientAddress(size_t i) const {
KProcessAddress GetReceiveClientAddress(size_t i) const {
return GetReceiveMapping(i).GetClientAddress();
}
VAddr GetReceiveServerAddress(size_t i) const {
KProcessAddress GetReceiveServerAddress(size_t i) const {
return GetReceiveMapping(i).GetServerAddress();
}
size_t GetReceiveSize(size_t i) const {
@@ -99,10 +102,10 @@ public:
return GetReceiveMapping(i).GetMemoryState();
}
VAddr GetExchangeClientAddress(size_t i) const {
KProcessAddress GetExchangeClientAddress(size_t i) const {
return GetExchangeMapping(i).GetClientAddress();
}
VAddr GetExchangeServerAddress(size_t i) const {
KProcessAddress GetExchangeServerAddress(size_t i) const {
return GetExchangeMapping(i).GetServerAddress();
}
size_t GetExchangeSize(size_t i) const {
@@ -113,7 +116,8 @@ public:
}
private:
Result PushMap(VAddr client, VAddr server, size_t size, KMemoryState state, size_t index);
Result PushMap(KProcessAddress client, KProcessAddress server, size_t size,
KMemoryState state, size_t index);
const Mapping& GetSendMapping(size_t i) const {
ASSERT(i < m_num_send);
@@ -227,22 +231,25 @@ public:
return m_mappings.GetExchangeCount();
}
Result PushSend(VAddr client, VAddr server, size_t size, KMemoryState state) {
Result PushSend(KProcessAddress client, KProcessAddress server, size_t size,
KMemoryState state) {
return m_mappings.PushSend(client, server, size, state);
}
Result PushReceive(VAddr client, VAddr server, size_t size, KMemoryState state) {
Result PushReceive(KProcessAddress client, KProcessAddress server, size_t size,
KMemoryState state) {
return m_mappings.PushReceive(client, server, size, state);
}
Result PushExchange(VAddr client, VAddr server, size_t size, KMemoryState state) {
Result PushExchange(KProcessAddress client, KProcessAddress server, size_t size,
KMemoryState state) {
return m_mappings.PushExchange(client, server, size, state);
}
VAddr GetSendClientAddress(size_t i) const {
KProcessAddress GetSendClientAddress(size_t i) const {
return m_mappings.GetSendClientAddress(i);
}
VAddr GetSendServerAddress(size_t i) const {
KProcessAddress GetSendServerAddress(size_t i) const {
return m_mappings.GetSendServerAddress(i);
}
size_t GetSendSize(size_t i) const {
@@ -252,10 +259,10 @@ public:
return m_mappings.GetSendMemoryState(i);
}
VAddr GetReceiveClientAddress(size_t i) const {
KProcessAddress GetReceiveClientAddress(size_t i) const {
return m_mappings.GetReceiveClientAddress(i);
}
VAddr GetReceiveServerAddress(size_t i) const {
KProcessAddress GetReceiveServerAddress(size_t i) const {
return m_mappings.GetReceiveServerAddress(i);
}
size_t GetReceiveSize(size_t i) const {
@@ -265,10 +272,10 @@ public:
return m_mappings.GetReceiveMemoryState(i);
}
VAddr GetExchangeClientAddress(size_t i) const {
KProcessAddress GetExchangeClientAddress(size_t i) const {
return m_mappings.GetExchangeClientAddress(i);
}
VAddr GetExchangeServerAddress(size_t i) const {
KProcessAddress GetExchangeServerAddress(size_t i) const {
return m_mappings.GetExchangeServerAddress(i);
}
size_t GetExchangeSize(size_t i) const {

View File

@@ -76,7 +76,7 @@ void KSharedMemory::Finalize() {
m_resource_limit->Close();
}
Result KSharedMemory::Map(KProcess& target_process, VAddr address, std::size_t map_size,
Result KSharedMemory::Map(KProcess& target_process, KProcessAddress address, std::size_t map_size,
Svc::MemoryPermission map_perm) {
// Validate the size.
R_UNLESS(m_size == map_size, ResultInvalidSize);
@@ -94,7 +94,8 @@ Result KSharedMemory::Map(KProcess& target_process, VAddr address, std::size_t m
ConvertToKMemoryPermission(map_perm)));
}
Result KSharedMemory::Unmap(KProcess& target_process, VAddr address, std::size_t unmap_size) {
Result KSharedMemory::Unmap(KProcess& target_process, KProcessAddress address,
std::size_t unmap_size) {
// Validate the size.
R_UNLESS(m_size == unmap_size, ResultInvalidSize);

View File

@@ -6,11 +6,11 @@
#include <optional>
#include <string>
#include "common/common_types.h"
#include "core/device_memory.h"
#include "core/hle/kernel/k_memory_block.h"
#include "core/hle/kernel/k_page_group.h"
#include "core/hle/kernel/k_process.h"
#include "core/hle/kernel/k_typed_address.h"
#include "core/hle/kernel/slab_helpers.h"
#include "core/hle/result.h"
@@ -37,7 +37,7 @@ public:
* @param map_size Size of the shared memory block to map
* @param permissions Memory block map permissions (specified by SVC field)
*/
Result Map(KProcess& target_process, VAddr address, std::size_t map_size,
Result Map(KProcess& target_process, KProcessAddress address, std::size_t map_size,
Svc::MemoryPermission permissions);
/**
@@ -46,7 +46,7 @@ public:
* @param address Address in system memory to unmap shared memory block
* @param unmap_size Size of the shared memory block to unmap
*/
Result Unmap(KProcess& target_process, VAddr address, std::size_t unmap_size);
Result Unmap(KProcess& target_process, KProcessAddress address, std::size_t unmap_size);
/**
* Gets a pointer to the shared memory block
@@ -79,7 +79,7 @@ private:
std::optional<KPageGroup> m_page_group{};
Svc::MemoryPermission m_owner_permission{};
Svc::MemoryPermission m_user_permission{};
PAddr m_physical_address{};
KPhysicalAddress m_physical_address{};
std::size_t m_size{};
KResourceLimit* m_resource_limit{};
bool m_is_initialized{};

View File

@@ -130,7 +130,7 @@ private:
KBlockInfoSlabHeap m_block_info_heap;
KPageTableSlabHeap m_page_table_heap;
KResourceLimit* m_resource_limit{};
VAddr m_resource_address{};
KVirtualAddress m_resource_address{};
size_t m_resource_size{};
};

View File

@@ -48,8 +48,8 @@ static void ResetThreadContext32(Kernel::KThread::ThreadContext32& context, u32
context.fpscr = 0;
}
static void ResetThreadContext64(Kernel::KThread::ThreadContext64& context, VAddr stack_top,
VAddr entry_point, u64 arg) {
static void ResetThreadContext64(Kernel::KThread::ThreadContext64& context, u64 stack_top,
u64 entry_point, u64 arg) {
context = {};
context.cpu_registers[0] = arg;
context.cpu_registers[18] = Kernel::KSystemControl::GenerateRandomU64() | 1;
@@ -100,8 +100,8 @@ KThread::KThread(KernelCore& kernel)
: KAutoObjectWithSlabHeapAndContainer{kernel}, m_activity_pause_lock{kernel} {}
KThread::~KThread() = default;
Result KThread::Initialize(KThreadFunction func, uintptr_t arg, VAddr user_stack_top, s32 prio,
s32 virt_core, KProcess* owner, ThreadType type) {
Result KThread::Initialize(KThreadFunction func, uintptr_t arg, KProcessAddress user_stack_top,
s32 prio, s32 virt_core, KProcess* owner, ThreadType type) {
// Assert parameters are valid.
ASSERT((type == ThreadType::Main) || (type == ThreadType::Dummy) ||
(Svc::HighestThreadPriority <= prio && prio <= Svc::LowestThreadPriority));
@@ -221,9 +221,9 @@ Result KThread::Initialize(KThreadFunction func, uintptr_t arg, VAddr user_stack
}
// Initialize thread context.
ResetThreadContext64(m_thread_context_64, user_stack_top, func, arg);
ResetThreadContext32(m_thread_context_32, static_cast<u32>(user_stack_top),
static_cast<u32>(func), static_cast<u32>(arg));
ResetThreadContext64(m_thread_context_64, GetInteger(user_stack_top), GetInteger(func), arg);
ResetThreadContext32(m_thread_context_32, static_cast<u32>(GetInteger(user_stack_top)),
static_cast<u32>(GetInteger(func)), static_cast<u32>(arg));
// Setup the stack parameters.
StackParameters& sp = this->GetStackParameters();
@@ -249,8 +249,9 @@ Result KThread::Initialize(KThreadFunction func, uintptr_t arg, VAddr user_stack
}
Result KThread::InitializeThread(KThread* thread, KThreadFunction func, uintptr_t arg,
VAddr user_stack_top, s32 prio, s32 core, KProcess* owner,
ThreadType type, std::function<void()>&& init_func) {
KProcessAddress user_stack_top, s32 prio, s32 core,
KProcess* owner, ThreadType type,
std::function<void()>&& init_func) {
// Initialize the thread.
R_TRY(thread->Initialize(func, arg, user_stack_top, prio, core, owner, type));
@@ -288,8 +289,8 @@ Result KThread::InitializeHighPriorityThread(Core::System& system, KThread* thre
}
Result KThread::InitializeUserThread(Core::System& system, KThread* thread, KThreadFunction func,
uintptr_t arg, VAddr user_stack_top, s32 prio, s32 virt_core,
KProcess* owner) {
uintptr_t arg, KProcessAddress user_stack_top, s32 prio,
s32 virt_core, KProcess* owner) {
system.Kernel().GlobalSchedulerContext().AddThread(thread);
R_RETURN(InitializeThread(thread, func, arg, user_stack_top, prio, virt_core, owner,
ThreadType::User, system.GetCpuManager().GetGuestThreadFunc()));
@@ -951,7 +952,7 @@ void KThread::AddHeldLock(LockWithPriorityInheritanceInfo* lock_info) {
m_held_lock_info_list.push_front(*lock_info);
}
KThread::LockWithPriorityInheritanceInfo* KThread::FindHeldLock(VAddr address_key,
KThread::LockWithPriorityInheritanceInfo* KThread::FindHeldLock(KProcessAddress address_key,
bool is_kernel_address_key) {
ASSERT(KScheduler::IsSchedulerLockedByCurrentThread(m_kernel));
@@ -1087,7 +1088,8 @@ void KThread::RemoveWaiter(KThread* thread) {
}
}
KThread* KThread::RemoveWaiterByKey(bool* out_has_waiters, VAddr key, bool is_kernel_address_key_) {
KThread* KThread::RemoveWaiterByKey(bool* out_has_waiters, KProcessAddress key,
bool is_kernel_address_key_) {
ASSERT(KScheduler::IsSchedulerLockedByCurrentThread(m_kernel));
// Get the relevant lock info.

View File

@@ -14,7 +14,6 @@
#include <boost/intrusive/list.hpp>
#include "common/common_types.h"
#include "common/intrusive_red_black_tree.h"
#include "common/spin_lock.h"
#include "core/arm/arm_interface.h"
@@ -23,6 +22,7 @@
#include "core/hle/kernel/k_spin_lock.h"
#include "core/hle/kernel/k_synchronization_object.h"
#include "core/hle/kernel/k_timer_task.h"
#include "core/hle/kernel/k_typed_address.h"
#include "core/hle/kernel/k_worker_task.h"
#include "core/hle/kernel/slab_helpers.h"
#include "core/hle/kernel/svc_common.h"
@@ -46,7 +46,7 @@ class KProcess;
class KScheduler;
class KThreadQueue;
using KThreadFunction = VAddr;
using KThreadFunction = KProcessAddress;
enum class ThreadType : u32 {
Main = 0,
@@ -230,9 +230,9 @@ public:
/*
* Returns the Thread Local Storage address of the current thread
* @returns VAddr of the thread's TLS
* @returns Address of the thread's TLS
*/
VAddr GetTlsAddress() const {
KProcessAddress GetTlsAddress() const {
return m_tls_address;
}
@@ -419,8 +419,8 @@ public:
KThreadFunction func, uintptr_t arg, s32 virt_core);
static Result InitializeUserThread(Core::System& system, KThread* thread, KThreadFunction func,
uintptr_t arg, VAddr user_stack_top, s32 prio, s32 virt_core,
KProcess* owner);
uintptr_t arg, KProcessAddress user_stack_top, s32 prio,
s32 virt_core, KProcess* owner);
static Result InitializeServiceThread(Core::System& system, KThread* thread,
std::function<void()>&& thread_func, s32 prio,
@@ -565,15 +565,15 @@ public:
Result GetThreadContext3(std::vector<u8>& out);
KThread* RemoveUserWaiterByKey(bool* out_has_waiters, VAddr key) {
KThread* RemoveUserWaiterByKey(bool* out_has_waiters, KProcessAddress key) {
return this->RemoveWaiterByKey(out_has_waiters, key, false);
}
KThread* RemoveKernelWaiterByKey(bool* out_has_waiters, VAddr key) {
KThread* RemoveKernelWaiterByKey(bool* out_has_waiters, KProcessAddress key) {
return this->RemoveWaiterByKey(out_has_waiters, key, true);
}
VAddr GetAddressKey() const {
KProcessAddress GetAddressKey() const {
return m_address_key;
}
@@ -591,14 +591,14 @@ public:
// to cope with arbitrary host pointers making their way
// into things.
void SetUserAddressKey(VAddr key, u32 val) {
void SetUserAddressKey(KProcessAddress key, u32 val) {
ASSERT(m_waiting_lock_info == nullptr);
m_address_key = key;
m_address_key_value = val;
m_is_kernel_address_key = false;
}
void SetKernelAddressKey(VAddr key) {
void SetKernelAddressKey(KProcessAddress key) {
ASSERT(m_waiting_lock_info == nullptr);
m_address_key = key;
m_is_kernel_address_key = true;
@@ -637,12 +637,13 @@ public:
return m_argument;
}
VAddr GetUserStackTop() const {
KProcessAddress GetUserStackTop() const {
return m_stack_top;
}
private:
KThread* RemoveWaiterByKey(bool* out_has_waiters, VAddr key, bool is_kernel_address_key);
KThread* RemoveWaiterByKey(bool* out_has_waiters, KProcessAddress key,
bool is_kernel_address_key);
static constexpr size_t PriorityInheritanceCountMax = 10;
union SyncObjectBuffer {
@@ -695,12 +696,13 @@ private:
void IncreaseBasePriority(s32 priority);
Result Initialize(KThreadFunction func, uintptr_t arg, VAddr user_stack_top, s32 prio,
Result Initialize(KThreadFunction func, uintptr_t arg, KProcessAddress user_stack_top, s32 prio,
s32 virt_core, KProcess* owner, ThreadType type);
static Result InitializeThread(KThread* thread, KThreadFunction func, uintptr_t arg,
VAddr user_stack_top, s32 prio, s32 core, KProcess* owner,
ThreadType type, std::function<void()>&& init_func);
KProcessAddress user_stack_top, s32 prio, s32 core,
KProcess* owner, ThreadType type,
std::function<void()>&& init_func);
// For core KThread implementation
ThreadContext32 m_thread_context_32{};
@@ -749,7 +751,8 @@ public:
public:
explicit LockWithPriorityInheritanceInfo(KernelCore&) {}
static LockWithPriorityInheritanceInfo* Create(KernelCore& kernel, VAddr address_key,
static LockWithPriorityInheritanceInfo* Create(KernelCore& kernel,
KProcessAddress address_key,
bool is_kernel_address_key) {
// Create a new lock info.
auto* new_lock = LockWithPriorityInheritanceInfo::Allocate(kernel);
@@ -797,7 +800,7 @@ public:
return m_tree;
}
VAddr GetAddressKey() const {
KProcessAddress GetAddressKey() const {
return m_address_key;
}
bool GetIsKernelAddressKey() const {
@@ -812,7 +815,7 @@ public:
private:
LockWithPriorityInheritanceThreadTree m_tree{};
VAddr m_address_key{};
KProcessAddress m_address_key{};
KThread* m_owner{};
u32 m_waiter_count{};
bool m_is_kernel_address_key{};
@@ -827,7 +830,8 @@ public:
}
void AddHeldLock(LockWithPriorityInheritanceInfo* lock_info);
LockWithPriorityInheritanceInfo* FindHeldLock(VAddr address_key, bool is_kernel_address_key);
LockWithPriorityInheritanceInfo* FindHeldLock(KProcessAddress address_key,
bool is_kernel_address_key);
private:
using LockWithPriorityInheritanceInfoList =
@@ -839,11 +843,11 @@ private:
KAffinityMask m_physical_affinity_mask{};
u64 m_thread_id{};
std::atomic<s64> m_cpu_time{};
VAddr m_address_key{};
KProcessAddress m_address_key{};
KProcess* m_parent{};
VAddr m_kernel_stack_top{};
KVirtualAddress m_kernel_stack_top{};
u32* m_light_ipc_data{};
VAddr m_tls_address{};
KProcessAddress m_tls_address{};
KLightLock m_activity_pause_lock;
s64 m_schedule_count{};
s64 m_last_scheduled_tick{};
@@ -887,16 +891,16 @@ private:
// For debugging
std::vector<KSynchronizationObject*> m_wait_objects_for_debugging{};
VAddr m_mutex_wait_address_for_debugging{};
KProcessAddress m_mutex_wait_address_for_debugging{};
ThreadWaitReasonForDebugging m_wait_reason_for_debugging{};
uintptr_t m_argument{};
VAddr m_stack_top{};
KProcessAddress m_stack_top{};
public:
using ConditionVariableThreadTreeType = ConditionVariableThreadTree;
void SetConditionVariable(ConditionVariableThreadTree* tree, VAddr address, u64 cv_key,
u32 value) {
void SetConditionVariable(ConditionVariableThreadTree* tree, KProcessAddress address,
u64 cv_key, u32 value) {
ASSERT(m_waiting_lock_info == nullptr);
m_condvar_tree = tree;
m_condvar_key = cv_key;

View File

@@ -37,7 +37,7 @@ Result KThreadLocalPage::Initialize(KernelCore& kernel, KProcess* process) {
Result KThreadLocalPage::Finalize() {
// Get the physical address of the page.
const PAddr phys_addr = m_owner->PageTable().GetPhysicalAddr(m_virt_addr);
const KPhysicalAddress phys_addr = m_owner->PageTable().GetPhysicalAddr(m_virt_addr);
ASSERT(phys_addr);
// Unmap the page.
@@ -49,7 +49,7 @@ Result KThreadLocalPage::Finalize() {
return ResultSuccess;
}
VAddr KThreadLocalPage::Reserve() {
KProcessAddress KThreadLocalPage::Reserve() {
for (size_t i = 0; i < m_is_region_free.size(); i++) {
if (m_is_region_free[i]) {
m_is_region_free[i] = false;
@@ -60,7 +60,7 @@ VAddr KThreadLocalPage::Reserve() {
return 0;
}
void KThreadLocalPage::Release(VAddr addr) {
void KThreadLocalPage::Release(KProcessAddress addr) {
m_is_region_free[this->GetRegionIndex(addr)] = true;
}

View File

@@ -27,19 +27,20 @@ public:
static_assert(RegionsPerPage > 0);
public:
constexpr explicit KThreadLocalPage(KernelCore&, VAddr addr = {}) : m_virt_addr(addr) {
constexpr explicit KThreadLocalPage(KernelCore&, KProcessAddress addr = {})
: m_virt_addr(addr) {
m_is_region_free.fill(true);
}
constexpr VAddr GetAddress() const {
constexpr KProcessAddress GetAddress() const {
return m_virt_addr;
}
Result Initialize(KernelCore& kernel, KProcess* process);
Result Finalize();
VAddr Reserve();
void Release(VAddr addr);
KProcessAddress Reserve();
void Release(KProcessAddress addr);
bool IsAllUsed() const {
return std::ranges::all_of(m_is_region_free.begin(), m_is_region_free.end(),
@@ -60,7 +61,7 @@ public:
}
public:
using RedBlackKeyType = VAddr;
using RedBlackKeyType = KProcessAddress;
static constexpr RedBlackKeyType GetRedBlackKey(const RedBlackKeyType& v) {
return v;
@@ -72,8 +73,8 @@ public:
template <typename T>
requires(std::same_as<T, KThreadLocalPage> || std::same_as<T, RedBlackKeyType>)
static constexpr int Compare(const T& lhs, const KThreadLocalPage& rhs) {
const VAddr lval = GetRedBlackKey(lhs);
const VAddr rval = GetRedBlackKey(rhs);
const KProcessAddress lval = GetRedBlackKey(lhs);
const KProcessAddress rval = GetRedBlackKey(rhs);
if (lval < rval) {
return -1;
@@ -85,22 +86,22 @@ public:
}
private:
constexpr VAddr GetRegionAddress(size_t i) const {
constexpr KProcessAddress GetRegionAddress(size_t i) const {
return this->GetAddress() + i * Svc::ThreadLocalRegionSize;
}
constexpr bool Contains(VAddr addr) const {
constexpr bool Contains(KProcessAddress addr) const {
return this->GetAddress() <= addr && addr < this->GetAddress() + PageSize;
}
constexpr size_t GetRegionIndex(VAddr addr) const {
ASSERT(Common::IsAligned(addr, Svc::ThreadLocalRegionSize));
constexpr size_t GetRegionIndex(KProcessAddress addr) const {
ASSERT(Common::IsAligned(GetInteger(addr), Svc::ThreadLocalRegionSize));
ASSERT(this->Contains(addr));
return (addr - this->GetAddress()) / Svc::ThreadLocalRegionSize;
}
private:
VAddr m_virt_addr{};
KProcessAddress m_virt_addr{};
KProcess* m_owner{};
KernelCore* m_kernel{};
std::array<bool, RegionsPerPage> m_is_region_free{};

View File

@@ -13,7 +13,7 @@ KTransferMemory::KTransferMemory(KernelCore& kernel)
KTransferMemory::~KTransferMemory() = default;
Result KTransferMemory::Initialize(VAddr address, std::size_t size,
Result KTransferMemory::Initialize(KProcessAddress address, std::size_t size,
Svc::MemoryPermission owner_perm) {
// Set members.
m_owner = GetCurrentProcessPointer(m_kernel);

View File

@@ -26,7 +26,7 @@ public:
explicit KTransferMemory(KernelCore& kernel);
~KTransferMemory() override;
Result Initialize(VAddr address, std::size_t size, Svc::MemoryPermission owner_perm);
Result Initialize(KProcessAddress address, std::size_t size, Svc::MemoryPermission owner_perm);
void Finalize() override;
@@ -44,7 +44,7 @@ public:
return m_owner;
}
VAddr GetSourceAddress() const {
KProcessAddress GetSourceAddress() const {
return m_address;
}
@@ -54,7 +54,7 @@ public:
private:
KProcess* m_owner{};
VAddr m_address{};
KProcessAddress m_address{};
Svc::MemoryPermission m_owner_perm{};
size_t m_size{};
bool m_is_initialized{};

View File

@@ -0,0 +1,12 @@
// SPDX-FileCopyrightText: Copyright 2023 yuzu Emulator Project
// SPDX-License-Identifier: GPL-2.0-or-later
#include "common/typed_address.h"
namespace Kernel {
using KPhysicalAddress = Common::PhysicalAddress;
using KVirtualAddress = Common::VirtualAddress;
using KProcessAddress = Common::ProcessAddress;
} // namespace Kernel

View File

@@ -271,9 +271,9 @@ struct KernelCore::Impl {
system.CoreTiming().ScheduleLoopingEvent(time_interval, time_interval, preemption_event);
}
void InitializeResourceManagers(KernelCore& kernel, VAddr address, size_t size) {
void InitializeResourceManagers(KernelCore& kernel, KVirtualAddress address, size_t size) {
// Ensure that the buffer is suitable for our use.
ASSERT(Common::IsAligned(address, PageSize));
ASSERT(Common::IsAligned(GetInteger(address), PageSize));
ASSERT(Common::IsAligned(size, PageSize));
// Ensure that we have space for our reference counts.
@@ -462,29 +462,30 @@ struct KernelCore::Impl {
KernelPhysicalAddressSpaceBase + KernelPhysicalAddressSpaceSize - 1);
// Save start and end for ease of use.
const VAddr code_start_virt_addr = KernelVirtualAddressCodeBase;
const VAddr code_end_virt_addr = KernelVirtualAddressCodeEnd;
constexpr KVirtualAddress code_start_virt_addr = KernelVirtualAddressCodeBase;
constexpr KVirtualAddress code_end_virt_addr = KernelVirtualAddressCodeEnd;
// Setup the containing kernel region.
constexpr size_t KernelRegionSize = 1_GiB;
constexpr size_t KernelRegionAlign = 1_GiB;
constexpr VAddr kernel_region_start =
Common::AlignDown(code_start_virt_addr, KernelRegionAlign);
constexpr KVirtualAddress kernel_region_start =
Common::AlignDown(GetInteger(code_start_virt_addr), KernelRegionAlign);
size_t kernel_region_size = KernelRegionSize;
if (!(kernel_region_start + KernelRegionSize - 1 <= KernelVirtualAddressSpaceLast)) {
kernel_region_size = KernelVirtualAddressSpaceEnd - kernel_region_start;
kernel_region_size = KernelVirtualAddressSpaceEnd - GetInteger(kernel_region_start);
}
ASSERT(memory_layout->GetVirtualMemoryRegionTree().Insert(
kernel_region_start, kernel_region_size, KMemoryRegionType_Kernel));
GetInteger(kernel_region_start), kernel_region_size, KMemoryRegionType_Kernel));
// Setup the code region.
constexpr size_t CodeRegionAlign = PageSize;
constexpr VAddr code_region_start =
Common::AlignDown(code_start_virt_addr, CodeRegionAlign);
constexpr VAddr code_region_end = Common::AlignUp(code_end_virt_addr, CodeRegionAlign);
constexpr KVirtualAddress code_region_start =
Common::AlignDown(GetInteger(code_start_virt_addr), CodeRegionAlign);
constexpr KVirtualAddress code_region_end =
Common::AlignUp(GetInteger(code_end_virt_addr), CodeRegionAlign);
constexpr size_t code_region_size = code_region_end - code_region_start;
ASSERT(memory_layout->GetVirtualMemoryRegionTree().Insert(
code_region_start, code_region_size, KMemoryRegionType_KernelCode));
GetInteger(code_region_start), code_region_size, KMemoryRegionType_KernelCode));
// Setup board-specific device physical regions.
Init::SetupDevicePhysicalMemoryRegions(*memory_layout);
@@ -520,11 +521,11 @@ struct KernelCore::Impl {
ASSERT(misc_region_size > 0);
// Setup the misc region.
const VAddr misc_region_start =
const KVirtualAddress misc_region_start =
memory_layout->GetVirtualMemoryRegionTree().GetRandomAlignedRegion(
misc_region_size, MiscRegionAlign, KMemoryRegionType_Kernel);
ASSERT(memory_layout->GetVirtualMemoryRegionTree().Insert(
misc_region_start, misc_region_size, KMemoryRegionType_KernelMisc));
GetInteger(misc_region_start), misc_region_size, KMemoryRegionType_KernelMisc));
// Determine if we'll use extra thread resources.
const bool use_extra_resources = KSystemControl::Init::ShouldIncreaseThreadResourceLimit();
@@ -532,11 +533,11 @@ struct KernelCore::Impl {
// Setup the stack region.
constexpr size_t StackRegionSize = 14_MiB;
constexpr size_t StackRegionAlign = KernelAslrAlignment;
const VAddr stack_region_start =
const KVirtualAddress stack_region_start =
memory_layout->GetVirtualMemoryRegionTree().GetRandomAlignedRegion(
StackRegionSize, StackRegionAlign, KMemoryRegionType_Kernel);
ASSERT(memory_layout->GetVirtualMemoryRegionTree().Insert(
stack_region_start, StackRegionSize, KMemoryRegionType_KernelStack));
GetInteger(stack_region_start), StackRegionSize, KMemoryRegionType_KernelStack));
// Determine the size of the resource region.
const size_t resource_region_size =
@@ -548,29 +549,29 @@ struct KernelCore::Impl {
ASSERT(slab_region_size <= resource_region_size);
// Setup the slab region.
const PAddr code_start_phys_addr = KernelPhysicalAddressCodeBase;
const PAddr code_end_phys_addr = code_start_phys_addr + code_region_size;
const PAddr slab_start_phys_addr = code_end_phys_addr;
const PAddr slab_end_phys_addr = slab_start_phys_addr + slab_region_size;
const KPhysicalAddress code_start_phys_addr = KernelPhysicalAddressCodeBase;
const KPhysicalAddress code_end_phys_addr = code_start_phys_addr + code_region_size;
const KPhysicalAddress slab_start_phys_addr = code_end_phys_addr;
const KPhysicalAddress slab_end_phys_addr = slab_start_phys_addr + slab_region_size;
constexpr size_t SlabRegionAlign = KernelAslrAlignment;
const size_t slab_region_needed_size =
Common::AlignUp(code_end_phys_addr + slab_region_size, SlabRegionAlign) -
Common::AlignDown(code_end_phys_addr, SlabRegionAlign);
const VAddr slab_region_start =
Common::AlignUp(GetInteger(code_end_phys_addr) + slab_region_size, SlabRegionAlign) -
Common::AlignDown(GetInteger(code_end_phys_addr), SlabRegionAlign);
const KVirtualAddress slab_region_start =
memory_layout->GetVirtualMemoryRegionTree().GetRandomAlignedRegion(
slab_region_needed_size, SlabRegionAlign, KMemoryRegionType_Kernel) +
(code_end_phys_addr % SlabRegionAlign);
(GetInteger(code_end_phys_addr) % SlabRegionAlign);
ASSERT(memory_layout->GetVirtualMemoryRegionTree().Insert(
slab_region_start, slab_region_size, KMemoryRegionType_KernelSlab));
GetInteger(slab_region_start), slab_region_size, KMemoryRegionType_KernelSlab));
// Setup the temp region.
constexpr size_t TempRegionSize = 128_MiB;
constexpr size_t TempRegionAlign = KernelAslrAlignment;
const VAddr temp_region_start =
const KVirtualAddress temp_region_start =
memory_layout->GetVirtualMemoryRegionTree().GetRandomAlignedRegion(
TempRegionSize, TempRegionAlign, KMemoryRegionType_Kernel);
ASSERT(memory_layout->GetVirtualMemoryRegionTree().Insert(temp_region_start, TempRegionSize,
KMemoryRegionType_KernelTemp));
ASSERT(memory_layout->GetVirtualMemoryRegionTree().Insert(
GetInteger(temp_region_start), TempRegionSize, KMemoryRegionType_KernelTemp));
// Automatically map in devices that have auto-map attributes.
for (auto& region : memory_layout->GetPhysicalMemoryRegionTree()) {
@@ -596,35 +597,37 @@ struct KernelCore::Impl {
region.SetTypeAttribute(KMemoryRegionAttr_DidKernelMap);
// Create a virtual pair region and insert it into the tree.
const PAddr map_phys_addr = Common::AlignDown(region.GetAddress(), PageSize);
const KPhysicalAddress map_phys_addr = Common::AlignDown(region.GetAddress(), PageSize);
const size_t map_size =
Common::AlignUp(region.GetEndAddress(), PageSize) - map_phys_addr;
const VAddr map_virt_addr =
Common::AlignUp(region.GetEndAddress(), PageSize) - GetInteger(map_phys_addr);
const KVirtualAddress map_virt_addr =
memory_layout->GetVirtualMemoryRegionTree().GetRandomAlignedRegionWithGuard(
map_size, PageSize, KMemoryRegionType_KernelMisc, PageSize);
ASSERT(memory_layout->GetVirtualMemoryRegionTree().Insert(
map_virt_addr, map_size, KMemoryRegionType_KernelMiscMappedDevice));
region.SetPairAddress(map_virt_addr + region.GetAddress() - map_phys_addr);
GetInteger(map_virt_addr), map_size, KMemoryRegionType_KernelMiscMappedDevice));
region.SetPairAddress(GetInteger(map_virt_addr) + region.GetAddress() -
GetInteger(map_phys_addr));
}
Init::SetupDramPhysicalMemoryRegions(*memory_layout);
// Insert a physical region for the kernel code region.
ASSERT(memory_layout->GetPhysicalMemoryRegionTree().Insert(
code_start_phys_addr, code_region_size, KMemoryRegionType_DramKernelCode));
GetInteger(code_start_phys_addr), code_region_size, KMemoryRegionType_DramKernelCode));
// Insert a physical region for the kernel slab region.
ASSERT(memory_layout->GetPhysicalMemoryRegionTree().Insert(
slab_start_phys_addr, slab_region_size, KMemoryRegionType_DramKernelSlab));
GetInteger(slab_start_phys_addr), slab_region_size, KMemoryRegionType_DramKernelSlab));
// Determine size available for kernel page table heaps, requiring > 8 MB.
const PAddr resource_end_phys_addr = slab_start_phys_addr + resource_region_size;
const KPhysicalAddress resource_end_phys_addr = slab_start_phys_addr + resource_region_size;
const size_t page_table_heap_size = resource_end_phys_addr - slab_end_phys_addr;
ASSERT(page_table_heap_size / 4_MiB > 2);
// Insert a physical region for the kernel page table heap region
ASSERT(memory_layout->GetPhysicalMemoryRegionTree().Insert(
slab_end_phys_addr, page_table_heap_size, KMemoryRegionType_DramKernelPtHeap));
GetInteger(slab_end_phys_addr), page_table_heap_size,
KMemoryRegionType_DramKernelPtHeap));
// All DRAM regions that we haven't tagged by this point will be mapped under the linear
// mapping. Tag them.
@@ -646,20 +649,21 @@ struct KernelCore::Impl {
// Setup the linear mapping region.
constexpr size_t LinearRegionAlign = 1_GiB;
const PAddr aligned_linear_phys_start =
const KPhysicalAddress aligned_linear_phys_start =
Common::AlignDown(linear_extents.GetAddress(), LinearRegionAlign);
const size_t linear_region_size =
Common::AlignUp(linear_extents.GetEndAddress(), LinearRegionAlign) -
aligned_linear_phys_start;
const VAddr linear_region_start =
GetInteger(aligned_linear_phys_start);
const KVirtualAddress linear_region_start =
memory_layout->GetVirtualMemoryRegionTree().GetRandomAlignedRegionWithGuard(
linear_region_size, LinearRegionAlign, KMemoryRegionType_None, LinearRegionAlign);
const u64 linear_region_phys_to_virt_diff = linear_region_start - aligned_linear_phys_start;
const u64 linear_region_phys_to_virt_diff =
GetInteger(linear_region_start) - GetInteger(aligned_linear_phys_start);
// Map and create regions for all the linearly-mapped data.
{
PAddr cur_phys_addr = 0;
KPhysicalAddress cur_phys_addr = 0;
u64 cur_size = 0;
for (auto& region : memory_layout->GetPhysicalMemoryRegionTree()) {
if (!region.HasTypeAttribute(KMemoryRegionAttr_LinearMapped)) {
@@ -678,15 +682,16 @@ struct KernelCore::Impl {
cur_size = region.GetSize();
}
const VAddr region_virt_addr =
const KVirtualAddress region_virt_addr =
region.GetAddress() + linear_region_phys_to_virt_diff;
ASSERT(memory_layout->GetVirtualMemoryRegionTree().Insert(
region_virt_addr, region.GetSize(),
GetInteger(region_virt_addr), region.GetSize(),
GetTypeForVirtualLinearMapping(region.GetType())));
region.SetPairAddress(region_virt_addr);
region.SetPairAddress(GetInteger(region_virt_addr));
KMemoryRegion* virt_region =
memory_layout->GetVirtualMemoryRegionTree().FindModifiable(region_virt_addr);
memory_layout->GetVirtualMemoryRegionTree().FindModifiable(
GetInteger(region_virt_addr));
ASSERT(virt_region != nullptr);
virt_region->SetPairAddress(region.GetAddress());
}
@@ -694,10 +699,11 @@ struct KernelCore::Impl {
// Insert regions for the initial page table region.
ASSERT(memory_layout->GetPhysicalMemoryRegionTree().Insert(
resource_end_phys_addr, KernelPageTableHeapSize, KMemoryRegionType_DramKernelInitPt));
GetInteger(resource_end_phys_addr), KernelPageTableHeapSize,
KMemoryRegionType_DramKernelInitPt));
ASSERT(memory_layout->GetVirtualMemoryRegionTree().Insert(
resource_end_phys_addr + linear_region_phys_to_virt_diff, KernelPageTableHeapSize,
KMemoryRegionType_VirtualDramKernelInitPt));
GetInteger(resource_end_phys_addr) + linear_region_phys_to_virt_diff,
KernelPageTableHeapSize, KMemoryRegionType_VirtualDramKernelInitPt));
// All linear-mapped DRAM regions that we haven't tagged by this point will be allocated to
// some pool partition. Tag them.
@@ -969,12 +975,12 @@ void KernelCore::InvalidateAllInstructionCaches() {
}
}
void KernelCore::InvalidateCpuInstructionCacheRange(VAddr addr, std::size_t size) {
void KernelCore::InvalidateCpuInstructionCacheRange(KProcessAddress addr, std::size_t size) {
for (auto& physical_core : impl->cores) {
if (!physical_core->IsInitialized()) {
continue;
}
physical_core->ArmInterface().InvalidateCacheRange(addr, size);
physical_core->ArmInterface().InvalidateCacheRange(GetInteger(addr), size);
}
}

View File

@@ -14,6 +14,7 @@
#include "core/hardware_properties.h"
#include "core/hle/kernel/k_auto_object.h"
#include "core/hle/kernel/k_slab_heap.h"
#include "core/hle/kernel/k_typed_address.h"
#include "core/hle/kernel/svc_common.h"
namespace Core {
@@ -185,7 +186,7 @@ public:
void InvalidateAllInstructionCaches();
void InvalidateCpuInstructionCacheRange(VAddr addr, std::size_t size);
void InvalidateCpuInstructionCacheRange(KProcessAddress addr, std::size_t size);
/// Registers all kernel objects with the global emulation state, this is purely for tracking
/// leaks after emulation has been shutdown.

View File

@@ -6,6 +6,7 @@
#include <array>
#include "common/common_types.h"
#include "core/hle/kernel/k_typed_address.h"
namespace Kernel {
@@ -14,7 +15,4 @@ constexpr std::size_t PageSize{1 << PageBits};
using Page = std::array<u8, PageSize>;
using KPhysicalAddress = PAddr;
using KProcessAddress = VAddr;
} // namespace Kernel

View File

@@ -37,7 +37,7 @@ constexpr bool IsValidArbitrationType(Svc::ArbitrationType type) {
} // namespace
// Wait for an address (via Address Arbiter)
Result WaitForAddress(Core::System& system, VAddr address, ArbitrationType arb_type, s32 value,
Result WaitForAddress(Core::System& system, u64 address, ArbitrationType arb_type, s32 value,
s64 timeout_ns) {
LOG_TRACE(Kernel_SVC, "called, address=0x{:X}, arb_type=0x{:X}, value=0x{:X}, timeout_ns={}",
address, arb_type, value, timeout_ns);
@@ -68,7 +68,7 @@ Result WaitForAddress(Core::System& system, VAddr address, ArbitrationType arb_t
}
// Signals to an address (via Address Arbiter)
Result SignalToAddress(Core::System& system, VAddr address, SignalType signal_type, s32 value,
Result SignalToAddress(Core::System& system, u64 address, SignalType signal_type, s32 value,
s32 count) {
LOG_TRACE(Kernel_SVC, "called, address=0x{:X}, signal_type=0x{:X}, value=0x{:X}, count=0x{:X}",
address, signal_type, value, count);
@@ -82,12 +82,12 @@ Result SignalToAddress(Core::System& system, VAddr address, SignalType signal_ty
.SignalAddressArbiter(address, signal_type, value, count));
}
Result WaitForAddress64(Core::System& system, VAddr address, ArbitrationType arb_type, s32 value,
Result WaitForAddress64(Core::System& system, u64 address, ArbitrationType arb_type, s32 value,
s64 timeout_ns) {
R_RETURN(WaitForAddress(system, address, arb_type, value, timeout_ns));
}
Result SignalToAddress64(Core::System& system, VAddr address, SignalType signal_type, s32 value,
Result SignalToAddress64(Core::System& system, u64 address, SignalType signal_type, s32 value,
s32 count) {
R_RETURN(SignalToAddress(system, address, signal_type, value, count));
}

View File

@@ -29,7 +29,7 @@ constexpr bool IsValidUnmapFromOwnerCodeMemoryPermission(MemoryPermission perm)
} // namespace
Result CreateCodeMemory(Core::System& system, Handle* out, VAddr address, uint64_t size) {
Result CreateCodeMemory(Core::System& system, Handle* out, u64 address, uint64_t size) {
LOG_TRACE(Kernel_SVC, "called, address=0x{:X}, size=0x{:X}", address, size);
// Get kernel instance.
@@ -64,7 +64,7 @@ Result CreateCodeMemory(Core::System& system, Handle* out, VAddr address, uint64
}
Result ControlCodeMemory(Core::System& system, Handle code_memory_handle,
CodeMemoryOperation operation, VAddr address, uint64_t size,
CodeMemoryOperation operation, u64 address, uint64_t size,
MemoryPermission perm) {
LOG_TRACE(Kernel_SVC,

View File

@@ -11,7 +11,7 @@
namespace Kernel::Svc {
/// Wait process wide key atomic
Result WaitProcessWideKeyAtomic(Core::System& system, VAddr address, VAddr cv_key, u32 tag,
Result WaitProcessWideKeyAtomic(Core::System& system, u64 address, u64 cv_key, u32 tag,
s64 timeout_ns) {
LOG_TRACE(Kernel_SVC, "called address={:X}, cv_key={:X}, tag=0x{:08X}, timeout_ns={}", address,
cv_key, tag, timeout_ns);
@@ -43,7 +43,7 @@ Result WaitProcessWideKeyAtomic(Core::System& system, VAddr address, VAddr cv_ke
}
/// Signal process wide key
void SignalProcessWideKey(Core::System& system, VAddr cv_key, s32 count) {
void SignalProcessWideKey(Core::System& system, u64 cv_key, s32 count) {
LOG_TRACE(Kernel_SVC, "called, cv_key=0x{:X}, count=0x{:08X}", cv_key, count);
// Signal the condition variable.

View File

@@ -8,7 +8,7 @@
namespace Kernel::Svc {
/// Used to output a message on a debug hardware unit - does nothing on a retail unit
Result OutputDebugString(Core::System& system, VAddr address, u64 len) {
Result OutputDebugString(Core::System& system, u64 address, u64 len) {
R_SUCCEED_IF(len == 0);
std::string str(len, '\0');

View File

@@ -20,7 +20,7 @@ void Break(Core::System& system, BreakReason reason, u64 info1, u64 info2) {
bool has_dumped_buffer{};
std::vector<u8> debug_buffer;
const auto handle_debug_buffer = [&](VAddr addr, u64 sz) {
const auto handle_debug_buffer = [&](u64 addr, u64 sz) {
if (sz == 0 || addr == 0 || has_dumped_buffer) {
return;
}

View File

@@ -54,7 +54,7 @@ Result GetInfo(Core::System& system, u64* result, InfoType info_id_type, Handle
R_SUCCEED();
case InfoType::AliasRegionAddress:
*result = process->PageTable().GetAliasRegionStart();
*result = GetInteger(process->PageTable().GetAliasRegionStart());
R_SUCCEED();
case InfoType::AliasRegionSize:
@@ -62,7 +62,7 @@ Result GetInfo(Core::System& system, u64* result, InfoType info_id_type, Handle
R_SUCCEED();
case InfoType::HeapRegionAddress:
*result = process->PageTable().GetHeapRegionStart();
*result = GetInteger(process->PageTable().GetHeapRegionStart());
R_SUCCEED();
case InfoType::HeapRegionSize:
@@ -70,7 +70,7 @@ Result GetInfo(Core::System& system, u64* result, InfoType info_id_type, Handle
R_SUCCEED();
case InfoType::AslrRegionAddress:
*result = process->PageTable().GetAliasCodeRegionStart();
*result = GetInteger(process->PageTable().GetAliasCodeRegionStart());
R_SUCCEED();
case InfoType::AslrRegionSize:
@@ -78,7 +78,7 @@ Result GetInfo(Core::System& system, u64* result, InfoType info_id_type, Handle
R_SUCCEED();
case InfoType::StackRegionAddress:
*result = process->PageTable().GetStackRegionStart();
*result = GetInteger(process->PageTable().GetStackRegionStart());
R_SUCCEED();
case InfoType::StackRegionSize:
@@ -107,7 +107,7 @@ Result GetInfo(Core::System& system, u64* result, InfoType info_id_type, Handle
R_SUCCEED();
case InfoType::UserExceptionContextAddress:
*result = process->GetProcessLocalRegionAddress();
*result = GetInteger(process->GetProcessLocalRegionAddress());
R_SUCCEED();
case InfoType::TotalNonSystemMemorySize:

View File

@@ -17,7 +17,7 @@ Result SendSyncRequest(Core::System& system, Handle handle) {
GetCurrentProcess(system.Kernel()).GetHandleTable().GetObject<KClientSession>(handle);
R_UNLESS(session.IsNotNull(), ResultInvalidHandle);
LOG_TRACE(Kernel_SVC, "called handle=0x{:08X}({})", handle, session->GetName());
LOG_TRACE(Kernel_SVC, "called handle=0x{:08X}", handle);
R_RETURN(session->SendSyncRequest());
}

View File

@@ -9,7 +9,7 @@
namespace Kernel::Svc {
/// Attempts to locks a mutex
Result ArbitrateLock(Core::System& system, Handle thread_handle, VAddr address, u32 tag) {
Result ArbitrateLock(Core::System& system, Handle thread_handle, u64 address, u32 tag) {
LOG_TRACE(Kernel_SVC, "called thread_handle=0x{:08X}, address=0x{:X}, tag=0x{:08X}",
thread_handle, address, tag);
@@ -21,7 +21,7 @@ Result ArbitrateLock(Core::System& system, Handle thread_handle, VAddr address,
}
/// Unlock a mutex
Result ArbitrateUnlock(Core::System& system, VAddr address) {
Result ArbitrateUnlock(Core::System& system, u64 address) {
LOG_TRACE(Kernel_SVC, "called address=0x{:X}", address);
// Validate the input address.

View File

@@ -22,15 +22,14 @@ constexpr bool IsValidSetMemoryPermission(MemoryPermission perm) {
// Checks if address + size is greater than the given address
// This can return false if the size causes an overflow of a 64-bit type
// or if the given size is zero.
constexpr bool IsValidAddressRange(VAddr address, u64 size) {
constexpr bool IsValidAddressRange(u64 address, u64 size) {
return address + size > address;
}
// Helper function that performs the common sanity checks for svcMapMemory
// and svcUnmapMemory. This is doable, as both functions perform their sanitizing
// in the same order.
Result MapUnmapMemorySanityChecks(const KPageTable& manager, VAddr dst_addr, VAddr src_addr,
u64 size) {
Result MapUnmapMemorySanityChecks(const KPageTable& manager, u64 dst_addr, u64 src_addr, u64 size) {
if (!Common::Is4KBAligned(dst_addr)) {
LOG_ERROR(Kernel_SVC, "Destination address is not aligned to 4KB, 0x{:016X}", dst_addr);
R_THROW(ResultInvalidAddress);
@@ -99,7 +98,7 @@ Result MapUnmapMemorySanityChecks(const KPageTable& manager, VAddr dst_addr, VAd
} // namespace
Result SetMemoryPermission(Core::System& system, VAddr address, u64 size, MemoryPermission perm) {
Result SetMemoryPermission(Core::System& system, u64 address, u64 size, MemoryPermission perm) {
LOG_DEBUG(Kernel_SVC, "called, address=0x{:016X}, size=0x{:X}, perm=0x{:08X", address, size,
perm);
@@ -120,7 +119,7 @@ Result SetMemoryPermission(Core::System& system, VAddr address, u64 size, Memory
R_RETURN(page_table.SetMemoryPermission(address, size, perm));
}
Result SetMemoryAttribute(Core::System& system, VAddr address, u64 size, u32 mask, u32 attr) {
Result SetMemoryAttribute(Core::System& system, u64 address, u64 size, u32 mask, u32 attr) {
LOG_DEBUG(Kernel_SVC,
"called, address=0x{:016X}, size=0x{:X}, mask=0x{:08X}, attribute=0x{:08X}", address,
size, mask, attr);
@@ -145,7 +144,7 @@ Result SetMemoryAttribute(Core::System& system, VAddr address, u64 size, u32 mas
}
/// Maps a memory range into a different range.
Result MapMemory(Core::System& system, VAddr dst_addr, VAddr src_addr, u64 size) {
Result MapMemory(Core::System& system, u64 dst_addr, u64 src_addr, u64 size) {
LOG_TRACE(Kernel_SVC, "called, dst_addr=0x{:X}, src_addr=0x{:X}, size=0x{:X}", dst_addr,
src_addr, size);
@@ -160,7 +159,7 @@ Result MapMemory(Core::System& system, VAddr dst_addr, VAddr src_addr, u64 size)
}
/// Unmaps a region that was previously mapped with svcMapMemory
Result UnmapMemory(Core::System& system, VAddr dst_addr, VAddr src_addr, u64 size) {
Result UnmapMemory(Core::System& system, u64 dst_addr, u64 src_addr, u64 size) {
LOG_TRACE(Kernel_SVC, "called, dst_addr=0x{:X}, src_addr=0x{:X}, size=0x{:X}", dst_addr,
src_addr, size);

View File

@@ -8,7 +8,7 @@
namespace Kernel::Svc {
/// Set the process heap to a given Size. It can both extend and shrink the heap.
Result SetHeapSize(Core::System& system, VAddr* out_address, u64 size) {
Result SetHeapSize(Core::System& system, u64* out_address, u64 size) {
LOG_TRACE(Kernel_SVC, "called, heap_size=0x{:X}", size);
// Validate size.
@@ -20,7 +20,7 @@ Result SetHeapSize(Core::System& system, VAddr* out_address, u64 size) {
}
/// Maps memory at a desired address
Result MapPhysicalMemory(Core::System& system, VAddr addr, u64 size) {
Result MapPhysicalMemory(Core::System& system, u64 addr, u64 size) {
LOG_DEBUG(Kernel_SVC, "called, addr=0x{:016X}, size=0x{:X}", addr, size);
if (!Common::Is4KBAligned(addr)) {
@@ -69,7 +69,7 @@ Result MapPhysicalMemory(Core::System& system, VAddr addr, u64 size) {
}
/// Unmaps memory previously mapped via MapPhysicalMemory
Result UnmapPhysicalMemory(Core::System& system, VAddr addr, u64 size) {
Result UnmapPhysicalMemory(Core::System& system, u64 addr, u64 size) {
LOG_DEBUG(Kernel_SVC, "called, addr=0x{:016X}, size=0x{:X}", addr, size);
if (!Common::Is4KBAligned(addr)) {

Some files were not shown because too many files have changed in this diff Show More