Compare commits

...

4 Commits

Author SHA1 Message Date
David Marcec
aa31decea8 Fixed crash due to unmapped memory 2018-12-03 01:58:39 +11:00
David Marcec
26705ba6ad Fixed slight error with MapPhysicalMemory, added error messages 2018-12-01 17:46:45 +11:00
David Marcec
6714dd3381 Documented system_resource_size 2018-12-01 16:05:24 +11:00
David Marcec
ba079516cf Implemented MapPhysicalMemory & UnmapPhysicalMemory 2018-12-01 16:03:18 +11:00
8 changed files with 431 additions and 66 deletions

View File

@@ -63,6 +63,10 @@ u32 ProgramMetadata::GetMainThreadStackSize() const {
return npdm_header.main_stack_size;
}
u32 ProgramMetadata::GetSystemResourceSize() const {
return npdm_header.system_resource_size;
}
u64 ProgramMetadata::GetTitleID() const {
return aci_header.title_id;
}

View File

@@ -50,6 +50,7 @@ public:
u32 GetMainThreadStackSize() const;
u64 GetTitleID() const;
u64 GetFilesystemPermissions() const;
u32 GetSystemResourceSize() const;
void Print() const;
@@ -68,7 +69,8 @@ private:
u8 reserved_3;
u8 main_thread_priority;
u8 main_thread_cpu;
std::array<u8, 8> reserved_4;
std::array<u8, 4> reserved_4;
u32 system_resource_size;
u32_le process_category;
u32_le main_stack_size;
std::array<u8, 0x10> application_name;

View File

@@ -45,6 +45,7 @@ SharedPtr<Process> Process::Create(KernelCore& kernel, std::string&& name) {
}
void Process::LoadFromMetadata(const FileSys::ProgramMetadata& metadata) {
system_resource_size = metadata.GetSystemResourceSize();
program_id = metadata.GetTitleID();
is_64bit_process = metadata.Is64BitProgram();
vm_manager.Reset(metadata.GetAddressSpaceType());

View File

@@ -170,6 +170,12 @@ public:
return program_id;
}
/// Gets the extra system resources we can allocate which is typically passed to
/// MapPhysicalMemory
u32 GetSystemResourceSize() const {
return system_resource_size;
}
/// Gets the resource limit descriptor for this process
ResourceLimit& GetResourceLimit() {
return *resource_limit;
@@ -279,6 +285,9 @@ private:
/// Title ID corresponding to the process
u64 program_id;
/// The extra system resources we can allocate which is typically passed to MapPhysicalMemory
u32 system_resource_size = 0;
/// Resource limit descriptor for this process
SharedPtr<ResourceLimit> resource_limit;

View File

@@ -38,31 +38,6 @@
namespace Kernel {
namespace {
// Checks if address + size is greater than the given address
// This can return false if the size causes an overflow of a 64-bit type
// or if the given size is zero.
constexpr bool IsValidAddressRange(VAddr address, u64 size) {
return address + size > address;
}
// Checks if a given address range lies within a larger address range.
constexpr bool IsInsideAddressRange(VAddr address, u64 size, VAddr address_range_begin,
VAddr address_range_end) {
const VAddr end_address = address + size - 1;
return address_range_begin <= address && end_address <= address_range_end - 1;
}
bool IsInsideAddressSpace(const VMManager& vm, VAddr address, u64 size) {
return IsInsideAddressRange(address, size, vm.GetAddressSpaceBaseAddress(),
vm.GetAddressSpaceEndAddress());
}
bool IsInsideNewMapRegion(const VMManager& vm, VAddr address, u64 size) {
return IsInsideAddressRange(address, size, vm.GetNewMapRegionBaseAddress(),
vm.GetNewMapRegionEndAddress());
}
// 8 GiB
constexpr u64 MAIN_MEMORY_SIZE = 0x200000000;
@@ -104,16 +79,16 @@ ResultCode MapUnmapMemorySanityChecks(const VMManager& vm_manager, VAddr dst_add
return ERR_INVALID_ADDRESS_STATE;
}
if (!IsInsideAddressSpace(vm_manager, src_addr, size)) {
if (!vm_manager.IsInsideAddressSpace(src_addr, size)) {
LOG_ERROR(Kernel_SVC,
"Source is not within the address space, addr=0x{:016X}, size=0x{:016X}",
"Source is not inside the address space, addr=0x{:016X}, size=0x{:016X}",
src_addr, size);
return ERR_INVALID_ADDRESS_STATE;
}
if (!IsInsideNewMapRegion(vm_manager, dst_addr, size)) {
if (!vm_manager.IsInsideNewMapRegion(dst_addr, size)) {
LOG_ERROR(Kernel_SVC,
"Destination is not within the new map region, addr=0x{:016X}, size=0x{:016X}",
"Destination is not inside the new map region, addr=0x{:016X}, size=0x{:016X}",
dst_addr, size);
return ERR_INVALID_MEMORY_RANGE;
}
@@ -139,6 +114,7 @@ ResultCode MapUnmapMemorySanityChecks(const VMManager& vm_manager, VAddr dst_add
return RESULT_SUCCESS;
}
} // namespace
enum class ResourceLimitValueType {
CurrentValue,
@@ -171,7 +147,6 @@ ResultVal<s64> RetrieveResourceLimitValue(Handle resource_limit, u32 resource_ty
return MakeResult(resource_limit_object->GetMaxResourceValue(type));
}
} // Anonymous namespace
/// Set the process heap to a given Size. It can both extend and shrink the heap.
static ResultCode SetHeapSize(VAddr* heap_addr, u64 heap_size) {
@@ -231,7 +206,7 @@ static ResultCode SetMemoryPermission(VAddr addr, u64 size, u32 prot) {
auto* const current_process = Core::CurrentProcess();
auto& vm_manager = current_process->VMManager();
if (!IsInsideAddressSpace(vm_manager, addr, size)) {
if (!vm_manager.IsInsideAddressSpace(addr, size)) {
LOG_ERROR(Kernel_SVC,
"Source is not within the address space, addr=0x{:016X}, size=0x{:016X}", addr,
size);
@@ -245,8 +220,8 @@ static ResultCode SetMemoryPermission(VAddr addr, u64 size, u32 prot) {
}
LOG_WARNING(Kernel_SVC, "Uniformity check on protected memory is not implemented.");
// TODO: Performs a uniformity check to make sure only protected memory is changed (it doesn't
// make sense to allow changing permissions on kernel memory itself, etc).
// TODO: Performs a uniformity check to make sure only protected memory is changed (it
// doesn't make sense to allow changing permissions on kernel memory itself, etc).
const auto converted_permissions = SharedMemory::ConvertPermissions(permission);
@@ -620,10 +595,10 @@ static void Break(u32 reason, u64 info1, u64 info2) {
}
if (!break_reason.signal_debugger) {
LOG_CRITICAL(
Debug_Emulated,
"Emulated program broke execution! reason=0x{:016X}, info1=0x{:016X}, info2=0x{:016X}",
reason, info1, info2);
LOG_CRITICAL(Debug_Emulated,
"Emulated program broke execution! reason=0x{:016X}, info1=0x{:016X}, "
"info2=0x{:016X}",
reason, info1, info2);
handle_debug_buffer(info1, info2);
ASSERT(false);
@@ -672,7 +647,7 @@ static ResultCode GetInfo(u64* result, u64 info_id, u64 handle, u64 info_sub_id)
NewMapRegionBaseAddr = 14,
NewMapRegionSize = 15,
// 3.0.0+
IsVirtualAddressMemoryEnabled = 16,
SystemResourceSize = 16,
PersonalMmHeapUsage = 17,
TitleId = 18,
// 4.0.0+
@@ -741,8 +716,11 @@ static ResultCode GetInfo(u64* result, u64 info_id, u64 handle, u64 info_sub_id)
case GetInfoType::NewMapRegionSize:
*result = vm_manager.GetNewMapRegionSize();
break;
case GetInfoType::IsVirtualAddressMemoryEnabled:
*result = current_process->IsVirtualMemoryEnabled();
case GetInfoType::SystemResourceSize:
*result = current_process->GetSystemResourceSize();
break;
case GetInfoType::PersonalMmHeapUsage:
*result = vm_manager.GetPersonalMmHeapUsage();
break;
case GetInfoType::TitleId:
*result = current_process->GetTitleID();
@@ -866,10 +844,10 @@ static ResultCode SetThreadPriority(Handle handle, u32 priority) {
LOG_TRACE(Kernel_SVC, "called");
if (priority > THREADPRIO_LOWEST) {
LOG_ERROR(
Kernel_SVC,
"An invalid priority was specified, expected {} but got {} for thread_handle={:08X}",
THREADPRIO_LOWEST, priority, handle);
LOG_ERROR(Kernel_SVC,
"An invalid priority was specified, expected {} but got {} for "
"thread_handle={:08X}",
THREADPRIO_LOWEST, priority, handle);
return ERR_INVALID_THREAD_PRIORITY;
}
@@ -1140,10 +1118,10 @@ static void SleepThread(s64 nanoseconds) {
/// Wait process wide key atomic
static ResultCode WaitProcessWideKeyAtomic(VAddr mutex_addr, VAddr condition_variable_addr,
Handle thread_handle, s64 nano_seconds) {
LOG_TRACE(
Kernel_SVC,
"called mutex_addr={:X}, condition_variable_addr={:X}, thread_handle=0x{:08X}, timeout={}",
mutex_addr, condition_variable_addr, thread_handle, nano_seconds);
LOG_TRACE(Kernel_SVC,
"called mutex_addr={:X}, condition_variable_addr={:X}, thread_handle=0x{:08X}, "
"timeout={}",
mutex_addr, condition_variable_addr, thread_handle, nano_seconds);
const auto& handle_table = Core::CurrentProcess()->GetHandleTable();
SharedPtr<Thread> thread = handle_table.Get<Thread>(thread_handle);
@@ -1451,7 +1429,8 @@ static ResultCode SetThreadCoreMask(Handle thread_handle, u32 core, u64 mask) {
return ERR_INVALID_COMBINATION;
}
/// This value is used to only change the affinity mask without changing the current ideal core.
/// This value is used to only change the affinity mask without changing the current ideal
/// core.
static constexpr u32 OnlyChangeMask = static_cast<u32>(-3);
if (core == OnlyChangeMask) {
@@ -1630,17 +1609,103 @@ static ResultCode SetResourceLimitLimitValue(Handle resource_limit, u32 resource
const auto set_result = resource_limit_object->SetLimitValue(type, static_cast<s64>(value));
if (set_result.IsError()) {
LOG_ERROR(
Kernel_SVC,
"Attempted to lower resource limit ({}) for category '{}' below its current value ({})",
resource_limit_object->GetMaxResourceValue(type), resource_type,
resource_limit_object->GetCurrentResourceValue(type));
LOG_ERROR(Kernel_SVC,
"Attempted to lower resource limit ({}) for category '{}' below its current "
"value ({})",
resource_limit_object->GetMaxResourceValue(type), resource_type,
resource_limit_object->GetCurrentResourceValue(type));
return set_result;
}
return RESULT_SUCCESS;
}
static ResultCode MapPhysicalMemory(VAddr addr, u64 size) {
LOG_DEBUG(Kernel_SVC, "called, addr=0x{:08X}, size=0x{:X}", addr, size);
if (!Common::Is4KBAligned(addr)) {
LOG_ERROR(Kernel_SVC, "Address is not aligned to 4KB, 0x{:016X}", addr);
return ERR_INVALID_ADDRESS;
}
if (size == 0) {
LOG_ERROR(Kernel_SVC, "Size is 0");
return ERR_INVALID_SIZE;
}
if (!Common::Is4KBAligned(size)) {
LOG_ERROR(Kernel_SVC, "Size is not aligned to 4KB, 0x{:016X}", size);
return ERR_INVALID_SIZE;
}
if (!IsValidAddressRange(addr, size)) {
LOG_ERROR(Kernel_SVC,
"Address is not a valid address range, addr=0x{:016X}, size=0x{:016X}", addr,
size);
return ERR_INVALID_ADDRESS_STATE;
}
auto* const current_process = Core::CurrentProcess();
auto& vm_manager = current_process->VMManager();
if (current_process->GetSystemResourceSize() == 0) {
LOG_ERROR(Kernel_SVC, "The system resource size is 0");
return ERR_INVALID_STATE;
}
if (!vm_manager.IsInsideMapRegion(addr, size)) {
LOG_ERROR(Kernel_SVC,
"Destination does not fit within the map region, addr=0x{:016X}, "
"size=0x{:016X}",
addr, size);
return ERR_INVALID_MEMORY_RANGE;
}
return vm_manager.MapPhysicalMemory(addr, size);
}
static ResultCode UnmapPhysicalMemory(VAddr addr, u64 size) {
LOG_DEBUG(Kernel_SVC, "called, addr=0x{:08X}, size=0x{:X}", addr, size);
if (!Common::Is4KBAligned(addr)) {
LOG_ERROR(Kernel_SVC, "Address is not aligned to 4KB, 0x{:016X}", addr);
return ERR_INVALID_ADDRESS;
}
if (size == 0) {
LOG_ERROR(Kernel_SVC, "Size is 0");
return ERR_INVALID_SIZE;
}
if (!Common::Is4KBAligned(size)) {
LOG_ERROR(Kernel_SVC, "Size is not aligned to 4KB, 0x{:016X}", size);
return ERR_INVALID_SIZE;
}
if (!IsValidAddressRange(addr, size)) {
LOG_ERROR(Kernel_SVC,
"Address is not a valid address range, addr=0x{:016X}, size=0x{:016X}", addr,
size);
return ERR_INVALID_ADDRESS_STATE;
}
auto* const current_process = Core::CurrentProcess();
auto& vm_manager = current_process->VMManager();
if (current_process->GetSystemResourceSize() == 0) {
LOG_ERROR(Kernel_SVC, "The system resource size is 0");
return ERR_INVALID_STATE;
}
if (!vm_manager.IsInsideMapRegion(addr, size)) {
LOG_ERROR(Kernel_SVC,
"Destination does not fit within the map region, addr=0x{:016X}, "
"size=0x{:016X}",
addr, size);
return ERR_INVALID_MEMORY_RANGE;
}
return vm_manager.UnmapPhysicalMemory(addr, size);
}
namespace {
struct FunctionDef {
using Func = void();
@@ -1696,8 +1761,8 @@ static const FunctionDef SVC_Table[] = {
{0x29, SvcWrap<GetInfo>, "GetInfo"},
{0x2A, nullptr, "FlushEntireDataCache"},
{0x2B, nullptr, "FlushDataCache"},
{0x2C, nullptr, "MapPhysicalMemory"},
{0x2D, nullptr, "UnmapPhysicalMemory"},
{0x2C, SvcWrap<MapPhysicalMemory>, "MapPhysicalMemory"},
{0x2D, SvcWrap<UnmapPhysicalMemory>, "UnmapPhysicalMemory"},
{0x2E, nullptr, "GetFutureThreadInfo"},
{0x2F, nullptr, "GetLastThreadInfo"},
{0x30, SvcWrap<GetResourceLimitLimitValue>, "GetResourceLimitLimitValue"},

View File

@@ -145,6 +145,11 @@ void SvcWrap() {
FuncReturn(func(static_cast<u32>(Param(0)), Param(1), Param(2)).raw);
}
template <ResultCode func(u64, u64)>
void SvcWrap() {
FuncReturn(func(Param(0), Param(1)).raw);
}
template <ResultCode func(u32*, u64, u64, s64)>
void SvcWrap() {
u32 param_1 = 0;

View File

@@ -7,6 +7,7 @@
#include <utility>
#include "common/assert.h"
#include "common/logging/log.h"
#include "common/scope_exit.h"
#include "core/arm/arm_interface.h"
#include "core/core.h"
#include "core/file_sys/program_metadata.h"
@@ -163,6 +164,219 @@ ResultVal<VAddr> VMManager::FindFreeRegion(u64 size) const {
return MakeResult<VAddr>(target);
}
constexpr bool AreOverlapping(VAddr addr_start, VAddr addr_end, VAddr region_start,
VAddr region_end) {
return std::max(addr_end, region_end) - std::min(addr_start, region_start) <
(addr_end - addr_start) + (region_end - region_start);
}
ResultCode VMManager::MapPhysicalMemory(VAddr addr, u64 size) {
const auto base = GetMapRegionBaseAddress();
const auto end = GetMapRegionEndAddress();
if (!IsInsideMapRegion(addr, size)) {
LOG_ERROR(
Kernel,
"Address and size does not fall inside the map region, addr=0x{:016X}, size=0x{:016X}",
addr, size);
return ERR_INVALID_ADDRESS;
}
// We have nothing mapped, we can just map directly
if (personal_heap_usage == 0) {
const auto result = MapMemoryBlock(addr, std::make_shared<std::vector<u8>>(size, 0), 0,
size, MemoryState::Mapped);
personal_heap_usage += size;
return result.Code();
}
auto vma = FindVMA(base);
u64 remaining_to_map = size;
auto last_result = RESULT_SUCCESS;
// Needed just in case we fail to map a region, we'll unmap everything.
std::vector<std::pair<u64, u64>> mapped_regions;
while (vma != vma_map.end() && vma->second.base <= end && remaining_to_map > 0) {
const auto vma_start = vma->second.base;
const auto vma_end = vma_start + vma->second.size;
const auto is_mapped = vma->second.meminfo_state == MemoryState::Mapped;
// Something failed, lets bail out
if (last_result.IsError()) {
break;
}
last_result = RESULT_SUCCESS;
// Allows us to use continue without worrying about incrementing the vma
SCOPE_EXIT({ vma++; });
// We're out of range now, we can just break. We should be done with everything now
if (vma_start > addr + size - 1) {
break;
}
// We're not processing addresses yet, lets keep skipping
if (!AreOverlapping(addr, addr + size, vma_start, vma_end)) {
continue;
}
// If we fall within the vma, get the offset of where we begin in the said vma
const auto offset_in_vma = vma_start + ((addr + size - remaining_to_map) - vma_start);
const auto remaining_vma_size = (vma_end - offset_in_vma);
// Our vma is already mapped
if (is_mapped) {
if (remaining_vma_size >= remaining_to_map) {
// Our region we need is already mapped
break;
} else {
// We are partially mapped, Make note of it and move on
remaining_to_map -= remaining_vma_size;
continue;
}
} else {
// We're not mapped, so lets map some space
if (remaining_vma_size >= remaining_to_map) {
// We can fit everything in this region, lets finish off the mapping
last_result = MapMemoryBlock(offset_in_vma,
std::make_shared<std::vector<u8>>(remaining_to_map, 0),
0, remaining_to_map, MemoryState::Mapped)
.Code();
if (last_result.IsSuccess()) {
personal_heap_usage += remaining_to_map;
mapped_regions.push_back(std::make_pair(offset_in_vma, remaining_to_map));
} else {
LOG_ERROR(Kernel,
"Failed to map entire VMA with error 0x{:X}, addr=0x{:016X}, "
"size=0x{:016X}, vma_start={:016X}, vma_end={:016X}, "
"offset_in_vma={:016X}, remaining_to_map={:016X}",
last_result.raw, addr, size, vma_start, vma_end, offset_in_vma,
remaining_to_map);
}
break;
} else {
// We can do a partial mapping here
last_result =
MapMemoryBlock(offset_in_vma,
std::make_shared<std::vector<u8>>(remaining_vma_size, 0), 0,
remaining_vma_size, MemoryState::Mapped)
.Code();
// Update our usage and continue to the next vma
if (last_result.IsSuccess()) {
personal_heap_usage += remaining_vma_size;
remaining_to_map -= remaining_vma_size;
mapped_regions.push_back(std::make_pair(offset_in_vma, remaining_vma_size));
} else {
LOG_ERROR(Kernel,
"Failed to map partial VMA with error 0x{:X}, addr=0x{:016X}, "
"size=0x{:016X}, vma_start={:016X}, vma_end={:016X}, "
"offset_in_vma={:016X}, remaining_to_map={:016X}",
last_result.raw, addr, size, vma_start, vma_end, offset_in_vma,
remaining_to_map);
}
continue;
}
}
}
// We failed to map something, lets unmap everything we mapped
if (last_result.IsError() && !mapped_regions.empty()) {
for (const auto [mapped_addr, mapped_size] : mapped_regions) {
if (UnmapRange(mapped_addr, mapped_size).IsSuccess()) {
personal_heap_usage -= mapped_size;
}
}
}
return last_result;
}
ResultCode VMManager::UnmapPhysicalMemory(VAddr addr, u64 size) {
const auto base = GetMapRegionBaseAddress();
const auto end = GetMapRegionEndAddress();
if (!IsInsideMapRegion(addr, size)) {
return ERR_INVALID_ADDRESS;
}
// We have nothing mapped, we can just map directly
if (personal_heap_usage == 0) {
LOG_WARNING(Kernel, "Unmap physical memory called when our personal usage is empty");
return RESULT_SUCCESS;
}
auto vma = FindVMA(base);
u64 remaining_to_unmap = size;
// Needed just in case we fail to map a region, we'll unmap everything.
std::vector<std::pair<u64, u64>> unmapped_regions;
while (vma != vma_map.end() && vma->second.base <= end && remaining_to_unmap > 0) {
const auto vma_start = vma->second.base;
const auto vma_end = vma_start + vma->second.size;
const auto is_unmapped = vma->second.meminfo_state != MemoryState::Mapped;
// Allows us to use continue without worrying about incrementing the vma
SCOPE_EXIT({ vma++; });
// We're out of range now, we can just break. We should be done with everything now
if (vma_start > addr + size - 1) {
break;
}
// We're not processing addresses yet, lets keep skipping
if (!AreOverlapping(addr, addr + size, vma_start, vma_end)) {
continue;
}
const auto offset_in_vma = vma_start + ((addr + size - remaining_to_unmap) - vma_start);
const auto remaining_vma_size = (vma_end - offset_in_vma);
// Our vma is already unmapped
if (is_unmapped) {
if (remaining_vma_size >= remaining_to_unmap) {
// Our region we need is already unmapped
break;
} else {
// We are partially unmapped, Make note of it and move on
remaining_to_unmap -= remaining_vma_size;
continue;
}
} else {
// We're mapped, so lets unmap
if (remaining_vma_size >= remaining_to_unmap) {
// The rest of what we need to unmap fits in this region
unmapped_regions.push_back(std::make_pair(offset_in_vma, remaining_to_unmap));
break;
} else {
// We only partially fit here, lets unmap what we can
// Update our usage and continue to the next vma
remaining_to_unmap -= remaining_vma_size;
unmapped_regions.push_back(std::make_pair(offset_in_vma, remaining_vma_size));
continue;
}
}
}
auto last_result = RESULT_SUCCESS;
if (!unmapped_regions.empty()) {
for (auto it = unmapped_regions.begin(); it != unmapped_regions.end(); ++it) {
last_result = UnmapRange((*it).first, (*it).second);
if (last_result.IsSuccess()) {
personal_heap_usage -= (*it).second;
} else {
LOG_ERROR(Kernel,
"Failed to unmap region addr=0x{:016X}, size=0x{:016X} with error=0x{:X}",
(*it).first, (*it).second, last_result.raw);
while (it != unmapped_regions.begin()) {
if (MapMemoryBlock((*it).first,
std::make_shared<std::vector<u8>>((*it).second, 0), 0,
(*it).second, MemoryState::Mapped)
.Succeeded()) {
personal_heap_usage += (*it).second;
}
--it;
}
}
}
}
return last_result;
}
ResultVal<VMManager::VMAHandle> VMManager::MapMMIO(VAddr target, PAddr paddr, u64 size,
MemoryState state,
Memory::MemoryHookPointer mmio_handler) {
@@ -202,8 +416,8 @@ ResultCode VMManager::UnmapRange(VAddr target, u64 size) {
const VAddr target_end = target + size;
const VMAIter end = vma_map.end();
// The comparison against the end of the range must be done using addresses since VMAs can be
// merged during this process, causing invalidation of the iterators.
// The comparison against the end of the range must be done using addresses since VMAs can
// be merged during this process, causing invalidation of the iterators.
while (vma != end && vma->second.base < target_end) {
vma = std::next(Unmap(vma));
}
@@ -234,8 +448,8 @@ ResultCode VMManager::ReprotectRange(VAddr target, u64 size, VMAPermission new_p
const VAddr target_end = target + size;
const VMAIter end = vma_map.end();
// The comparison against the end of the range must be done using addresses since VMAs can be
// merged during this process, causing invalidation of the iterators.
// The comparison against the end of the range must be done using addresses since VMAs can
// be merged during this process, causing invalidation of the iterators.
while (vma != end && vma->second.base < target_end) {
vma = std::next(StripIterConstness(Reprotect(vma, new_perms)));
}
@@ -323,8 +537,8 @@ ResultCode VMManager::MirrorMemory(VAddr dst_addr, VAddr src_addr, u64 size, Mem
}
void VMManager::RefreshMemoryBlockMappings(const std::vector<u8>* block) {
// If this ever proves to have a noticeable performance impact, allow users of the function to
// specify a specific range of addresses to limit the scan to.
// If this ever proves to have a noticeable performance impact, allow users of the function
// to specify a specific range of addresses to limit the scan to.
for (const auto& p : vma_map) {
const VirtualMemoryArea& vma = p.second;
if (block == vma.backing_block.get()) {
@@ -419,8 +633,8 @@ VMManager::VMAIter VMManager::SplitVMA(VMAIter vma_handle, u64 offset_in_vma) {
VirtualMemoryArea& old_vma = vma_handle->second;
VirtualMemoryArea new_vma = old_vma; // Make a copy of the VMA
// For now, don't allow no-op VMA splits (trying to split at a boundary) because it's probably
// a bug. This restriction might be removed later.
// For now, don't allow no-op VMA splits (trying to split at a boundary) because it's
// probably a bug. This restriction might be removed later.
ASSERT(offset_in_vma < old_vma.size);
ASSERT(offset_in_vma > 0);
@@ -685,4 +899,22 @@ u64 VMManager::GetTLSIORegionSize() const {
return tls_io_region_end - tls_io_region_base;
}
u64 VMManager::GetPersonalMmHeapUsage() const {
return personal_heap_usage;
}
bool VMManager::IsInsideAddressSpace(VAddr address, u64 size) const {
return IsInsideAddressRange(address, size, GetAddressSpaceBaseAddress(),
GetAddressSpaceEndAddress());
}
bool VMManager::IsInsideNewMapRegion(VAddr address, u64 size) const {
return IsInsideAddressRange(address, size, GetNewMapRegionBaseAddress(),
GetNewMapRegionEndAddress());
}
bool VMManager::IsInsideMapRegion(VAddr address, u64 size) const {
return IsInsideAddressRange(address, size, GetMapRegionBaseAddress(), GetMapRegionEndAddress());
}
} // namespace Kernel

View File

@@ -18,6 +18,20 @@ enum class ProgramAddressSpaceType : u8;
namespace Kernel {
// Checks if address + size is greater than the given address
// This can return false if the size causes an overflow of a 64-bit type
// or if the given size is zero.
constexpr bool IsValidAddressRange(VAddr address, u64 size) {
return address + size > address;
}
// Checks if a given address range lies within a larger address range.
constexpr bool IsInsideAddressRange(VAddr address, u64 size, VAddr address_range_begin,
VAddr address_range_end) {
const VAddr end_address = address + size - 1;
return address_range_begin <= address && end_address <= address_range_end - 1;
}
enum class VMAType : u8 {
/// VMA represents an unmapped region of the address space.
Free,
@@ -165,6 +179,30 @@ public:
*/
ResultVal<VAddr> FindFreeRegion(u64 size) const;
/**
* Maps memory to the PersonalMmHeap region at a given address. MapPhysicalMemory will not remap
* any regions. The goal of MapPhysicalMemory is to "fill" a regions empty space given an offset
* and a size. Any memory which is already mapped in the subsection we want to allocate is
* ignored and we only map the remaining data needed. Reminder that we're not remapping, just
* filling the space we want to fill. This is typically used with "PersonalMmHeap" which allows
* processes to have extra resources mapped. Typically this is seen with 5.0.0+ games and
* sysmodule specifically. The PersonalMmHeapSize is pulled from the NPDM and is passed to
* loader when the process is created which is in turn passed to the kernel when
* svcCreateProcess is called
*
* @param target The address of where you want to map
* @param size The size of the memory you want to map
*/
ResultCode MapPhysicalMemory(VAddr target, u64 size);
/**
* Unmaps memory from the PersonalMmHeap region at a given address.
*
* @param target The address of where you want to unmap
* @param size The size of the memory you want to unmap
*/
ResultCode UnmapPhysicalMemory(VAddr target, u64 size);
/**
* Maps a memory-mapped IO region at a given address.
*
@@ -276,6 +314,13 @@ public:
/// Gets the total size of the TLS IO region in bytes.
u64 GetTLSIORegionSize() const;
/// Gets the total size of the PersonalMmHeap region in bytes.
u64 GetPersonalMmHeapUsage() const;
bool IsInsideAddressSpace(VAddr address, u64 size) const;
bool IsInsideNewMapRegion(VAddr address, u64 size) const;
bool IsInsideMapRegion(VAddr address, u64 size) const;
/// Each VMManager has its own page table, which is set as the main one when the owning process
/// is scheduled.
Memory::PageTable page_table;
@@ -359,5 +404,7 @@ private:
VAddr heap_start = 0;
VAddr heap_end = 0;
u64 heap_used = 0;
u64 personal_heap_usage = 0;
};
} // namespace Kernel