Compare commits

..

2 Commits

Author SHA1 Message Date
voidanix
fac4b7783c cmake: remove headers requirement from boost
While the boost headers are obviously still necessary, this avoids the following warning that made it fall back to the external
boost:

CMake Warning at /usr/share/cmake/Modules/FindBoost.cmake:2216 (message):
  No header defined for headers; skipping header check (note: header-only
  libraries have no designated component)
Call Stack (most recent call first):
  CMakeLists.txt:212 (find_package)
2022-02-16 12:52:43 +01:00
voidanix
0e9d5b9b5c cmake: remove QUIET from ffmpeg/boost detection
This allows for specific error messages from cmake itself about version requirements or
missing components/headers.

Update the boost detection message as well and turn it into a warning.
2022-02-16 12:48:06 +01:00
16 changed files with 168 additions and 659 deletions

View File

@@ -209,7 +209,7 @@ macro(yuzu_find_packages)
endmacro()
if (NOT YUZU_USE_BUNDLED_BOOST)
find_package(Boost 1.73.0 CONFIG COMPONENTS context headers QUIET)
find_package(Boost 1.73.0 COMPONENTS context)
endif()
if (Boost_FOUND)
set(Boost_LIBRARIES Boost::boost)
@@ -222,7 +222,7 @@ if (Boost_FOUND)
list(APPEND Boost_LIBRARIES Boost::context)
endif()
elseif (${CMAKE_SYSTEM_NAME} STREQUAL "Linux" OR YUZU_USE_BUNDLED_BOOST)
message(STATUS "Boost 1.73.0 or newer not found, falling back to externals")
message(WARNING "Boost not found or too old, falling back to externals")
set(YUZU_USE_BUNDLED_BOOST ON CACHE BOOL "Download bundled Boost" FORCE)
# Use yuzu Boost binaries
@@ -522,7 +522,7 @@ if (UNIX AND NOT APPLE)
endif()
if (NOT YUZU_USE_BUNDLED_FFMPEG)
# Use system installed FFmpeg
find_package(FFmpeg 4.3 QUIET COMPONENTS ${FFmpeg_COMPONENTS})
find_package(FFmpeg 4.3 COMPONENTS ${FFmpeg_COMPONENTS})
if (FFmpeg_FOUND)
# Overwrite aggregate defines from FFmpeg module to avoid over-linking libraries.

View File

@@ -108,7 +108,6 @@ bool ParseFilterRule(Filter& instance, Iterator begin, Iterator end) {
SUB(Service, Migration) \
SUB(Service, Mii) \
SUB(Service, MM) \
SUB(Service, MNPP) \
SUB(Service, NCM) \
SUB(Service, NFC) \
SUB(Service, NFP) \

View File

@@ -76,7 +76,6 @@ enum class Class : u8 {
Service_Migration, ///< The migration service
Service_Mii, ///< The Mii service
Service_MM, ///< The MM (Multimedia) service
Service_MNPP, ///< The MNPP service
Service_NCM, ///< The NCM service
Service_NFC, ///< The NFC (Near-field communication) service
Service_NFP, ///< The NFP service

View File

@@ -10,65 +10,11 @@ PageTable::PageTable() = default;
PageTable::~PageTable() noexcept = default;
bool PageTable::BeginTraversal(TraversalEntry& out_entry, TraversalContext& out_context,
u64 address) const {
// Setup invalid defaults.
out_entry.phys_addr = 0;
out_entry.block_size = page_size;
out_context.next_page = 0;
// Validate that we can read the actual entry.
const auto page = address / page_size;
if (page >= backing_addr.size()) {
return false;
}
// Validate that the entry is mapped.
const auto phys_addr = backing_addr[page];
if (phys_addr == 0) {
return false;
}
// Populate the results.
out_entry.phys_addr = phys_addr + address;
out_context.next_page = page + 1;
out_context.next_offset = address + page_size;
return true;
}
bool PageTable::ContinueTraversal(TraversalEntry& out_entry, TraversalContext& context) const {
// Setup invalid defaults.
out_entry.phys_addr = 0;
out_entry.block_size = page_size;
// Validate that we can read the actual entry.
const auto page = context.next_page;
if (page >= backing_addr.size()) {
return false;
}
// Validate that the entry is mapped.
const auto phys_addr = backing_addr[page];
if (phys_addr == 0) {
return false;
}
// Populate the results.
out_entry.phys_addr = phys_addr + context.next_offset;
context.next_page = page + 1;
context.next_offset += page_size;
return true;
}
void PageTable::Resize(std::size_t address_space_width_in_bits, std::size_t page_size_in_bits) {
const std::size_t num_page_table_entries{1ULL
<< (address_space_width_in_bits - page_size_in_bits)};
void PageTable::Resize(size_t address_space_width_in_bits, size_t page_size_in_bits) {
const size_t num_page_table_entries{1ULL << (address_space_width_in_bits - page_size_in_bits)};
pointers.resize(num_page_table_entries);
backing_addr.resize(num_page_table_entries);
current_address_space_width_in_bits = address_space_width_in_bits;
page_size = 1ULL << page_size_in_bits;
}
} // namespace Common

View File

@@ -27,16 +27,6 @@ enum class PageType : u8 {
* mimics the way a real CPU page table works.
*/
struct PageTable {
struct TraversalEntry {
u64 phys_addr{};
std::size_t block_size{};
};
struct TraversalContext {
u64 next_page{};
u64 next_offset{};
};
/// Number of bits reserved for attribute tagging.
/// This can be at most the guaranteed alignment of the pointers in the page table.
static constexpr int ATTRIBUTE_BITS = 2;
@@ -99,10 +89,6 @@ struct PageTable {
PageTable(PageTable&&) noexcept = default;
PageTable& operator=(PageTable&&) noexcept = default;
bool BeginTraversal(TraversalEntry& out_entry, TraversalContext& out_context,
u64 address) const;
bool ContinueTraversal(TraversalEntry& out_entry, TraversalContext& context) const;
/**
* Resizes the page table to be able to accommodate enough pages within
* a given address space.
@@ -110,9 +96,9 @@ struct PageTable {
* @param address_space_width_in_bits The address size width in bits.
* @param page_size_in_bits The page size in bits.
*/
void Resize(std::size_t address_space_width_in_bits, std::size_t page_size_in_bits);
void Resize(size_t address_space_width_in_bits, size_t page_size_in_bits);
std::size_t GetAddressSpaceBits() const {
size_t GetAddressSpaceBits() const {
return current_address_space_width_in_bits;
}
@@ -124,11 +110,9 @@ struct PageTable {
VirtualBuffer<u64> backing_addr;
std::size_t current_address_space_width_in_bits{};
size_t current_address_space_width_in_bits;
u8* fastmem_arena{};
std::size_t page_size{};
u8* fastmem_arena;
};
} // namespace Common

View File

@@ -171,9 +171,6 @@ struct VisitorInterface {
struct NullVisitor final : public VisitorInterface {
YUZU_NON_COPYABLE(NullVisitor);
NullVisitor() = default;
~NullVisitor() override = default;
void Visit(const Field<bool>& /*field*/) override {}
void Visit(const Field<double>& /*field*/) override {}
void Visit(const Field<float>& /*field*/) override {}

View File

@@ -467,8 +467,6 @@ add_library(core STATIC
hle/service/mii/types.h
hle/service/mm/mm_u.cpp
hle/service/mm/mm_u.h
hle/service/mnpp/mnpp_app.cpp
hle/service/mnpp/mnpp_app.h
hle/service/ncm/ncm.cpp
hle/service/ncm/ncm.h
hle/service/nfc/nfc.cpp

View File

@@ -128,6 +128,15 @@ VirtualDir PatchManager::PatchExeFS(VirtualDir exefs) const {
if (exefs == nullptr)
return exefs;
if (Settings::values.dump_exefs) {
LOG_INFO(Loader, "Dumping ExeFS for title_id={:016X}", title_id);
const auto dump_dir = fs_controller.GetModificationDumpRoot(title_id);
if (dump_dir != nullptr) {
const auto exefs_dir = GetOrCreateDirectoryRelative(dump_dir, "/exefs");
VfsRawCopyD(exefs, exefs_dir);
}
}
const auto& disabled = Settings::values.disabled_addons[title_id];
const auto update_disabled =
std::find(disabled.cbegin(), disabled.cend(), "Update") != disabled.cend();
@@ -170,15 +179,6 @@ VirtualDir PatchManager::PatchExeFS(VirtualDir exefs) const {
}
}
if (Settings::values.dump_exefs) {
LOG_INFO(Loader, "Dumping ExeFS for title_id={:016X}", title_id);
const auto dump_dir = fs_controller.GetModificationDumpRoot(title_id);
if (dump_dir != nullptr) {
const auto exefs_dir = GetOrCreateDirectoryRelative(dump_dir, "/exefs");
VfsRawCopyD(exefs, exefs_dir);
}
}
return exefs;
}

View File

@@ -41,6 +41,24 @@ constexpr std::size_t GetAddressSpaceWidthFromType(FileSys::ProgramAddressSpaceT
}
}
constexpr u64 GetAddressInRange(const KMemoryInfo& info, VAddr addr) {
if (info.GetAddress() < addr) {
return addr;
}
return info.GetAddress();
}
constexpr std::size_t GetSizeInRange(const KMemoryInfo& info, VAddr start, VAddr end) {
std::size_t size{info.GetSize()};
if (info.GetAddress() < start) {
size -= start - info.GetAddress();
}
if (info.GetEndAddress() > end) {
size -= info.GetEndAddress() - end;
}
return size;
}
} // namespace
KPageTable::KPageTable(Core::System& system_)
@@ -382,471 +400,148 @@ ResultCode KPageTable::UnmapProcessMemory(VAddr dst_addr, std::size_t size,
return ResultSuccess;
}
ResultCode KPageTable::MapPhysicalMemory(VAddr address, std::size_t size) {
// Lock the physical memory lock.
KScopedLightLock map_phys_mem_lk(map_physical_memory_lock);
// Calculate the last address for convenience.
const VAddr last_address = address + size - 1;
// Define iteration variables.
VAddr cur_address;
std::size_t mapped_size;
// The entire mapping process can be retried.
while (true) {
// Check if the memory is already mapped.
{
// Lock the table.
KScopedLightLock lk(general_lock);
// Iterate over the memory.
cur_address = address;
mapped_size = 0;
auto it = block_manager->FindIterator(cur_address);
while (true) {
// Check that the iterator is valid.
ASSERT(it != block_manager->end());
// Get the memory info.
const KMemoryInfo info = it->GetMemoryInfo();
// Check if we're done.
if (last_address <= info.GetLastAddress()) {
if (info.GetState() != KMemoryState::Free) {
mapped_size += (last_address + 1 - cur_address);
}
break;
}
// Track the memory if it's mapped.
if (info.GetState() != KMemoryState::Free) {
mapped_size += VAddr(info.GetEndAddress()) - cur_address;
}
// Advance.
cur_address = info.GetEndAddress();
++it;
}
// If the size mapped is the size requested, we've nothing to do.
R_SUCCEED_IF(size == mapped_size);
}
// Allocate and map the memory.
{
// Reserve the memory from the process resource limit.
KScopedResourceReservation memory_reservation(
system.Kernel().CurrentProcess()->GetResourceLimit(),
LimitableResource::PhysicalMemory, size - mapped_size);
R_UNLESS(memory_reservation.Succeeded(), ResultLimitReached);
// Allocate pages for the new memory.
KPageLinkedList page_linked_list;
R_TRY(system.Kernel().MemoryManager().Allocate(
page_linked_list, (size - mapped_size) / PageSize, memory_pool, allocation_option));
// Map the memory.
{
// Lock the table.
KScopedLightLock lk(general_lock);
size_t num_allocator_blocks = 0;
// Verify that nobody has mapped memory since we first checked.
{
// Iterate over the memory.
size_t checked_mapped_size = 0;
cur_address = address;
auto it = block_manager->FindIterator(cur_address);
while (true) {
// Check that the iterator is valid.
ASSERT(it != block_manager->end());
// Get the memory info.
const KMemoryInfo info = it->GetMemoryInfo();
const bool is_free = info.GetState() == KMemoryState::Free;
if (is_free) {
if (info.GetAddress() < address) {
++num_allocator_blocks;
}
if (last_address < info.GetLastAddress()) {
++num_allocator_blocks;
}
}
// Check if we're done.
if (last_address <= info.GetLastAddress()) {
if (!is_free) {
checked_mapped_size += (last_address + 1 - cur_address);
}
break;
}
// Track the memory if it's mapped.
if (!is_free) {
checked_mapped_size += VAddr(info.GetEndAddress()) - cur_address;
}
// Advance.
cur_address = info.GetEndAddress();
++it;
}
// If the size now isn't what it was before, somebody mapped or unmapped
// concurrently. If this happened, retry.
if (mapped_size != checked_mapped_size) {
continue;
}
}
// Reset the current tracking address, and make sure we clean up on failure.
cur_address = address;
auto unmap_guard = detail::ScopeExit([&] {
if (cur_address > address) {
const VAddr last_unmap_address = cur_address - 1;
// Iterate, unmapping the pages.
cur_address = address;
auto it = block_manager->FindIterator(cur_address);
while (true) {
// Check that the iterator is valid.
ASSERT(it != block_manager->end());
// Get the memory info.
const KMemoryInfo info = it->GetMemoryInfo();
// If the memory state is free, we mapped it and need to unmap it.
if (info.GetState() == KMemoryState::Free) {
// Determine the range to unmap.
const size_t cur_pages =
std::min(VAddr(info.GetEndAddress()) - cur_address,
last_unmap_address + 1 - cur_address) /
PageSize;
// Unmap.
ASSERT(Operate(cur_address, cur_pages, KMemoryPermission::None,
OperationType::Unmap)
.IsSuccess());
}
// Check if we're done.
if (last_unmap_address <= info.GetLastAddress()) {
break;
}
// Advance.
cur_address = info.GetEndAddress();
++it;
}
}
});
// Iterate over the memory.
auto pg_it = page_linked_list.Nodes().begin();
PAddr pg_phys_addr = pg_it->GetAddress();
size_t pg_pages = pg_it->GetNumPages();
auto it = block_manager->FindIterator(cur_address);
while (true) {
// Check that the iterator is valid.
ASSERT(it != block_manager->end());
// Get the memory info.
const KMemoryInfo info = it->GetMemoryInfo();
// If it's unmapped, we need to map it.
if (info.GetState() == KMemoryState::Free) {
// Determine the range to map.
size_t map_pages = std::min(VAddr(info.GetEndAddress()) - cur_address,
last_address + 1 - cur_address) /
PageSize;
// While we have pages to map, map them.
while (map_pages > 0) {
// Check if we're at the end of the physical block.
if (pg_pages == 0) {
// Ensure there are more pages to map.
ASSERT(pg_it != page_linked_list.Nodes().end());
// Advance our physical block.
++pg_it;
pg_phys_addr = pg_it->GetAddress();
pg_pages = pg_it->GetNumPages();
}
// Map whatever we can.
const size_t cur_pages = std::min(pg_pages, map_pages);
R_TRY(Operate(cur_address, cur_pages, KMemoryPermission::UserReadWrite,
OperationType::Map, pg_phys_addr));
// Advance.
cur_address += cur_pages * PageSize;
map_pages -= cur_pages;
pg_phys_addr += cur_pages * PageSize;
pg_pages -= cur_pages;
}
}
// Check if we're done.
if (last_address <= info.GetLastAddress()) {
break;
}
// Advance.
cur_address = info.GetEndAddress();
++it;
}
// We succeeded, so commit the memory reservation.
memory_reservation.Commit();
// Increase our tracked mapped size.
mapped_physical_memory_size += (size - mapped_size);
// Update the relevant memory blocks.
block_manager->Update(address, size / PageSize, KMemoryState::Free,
KMemoryPermission::None, KMemoryAttribute::None,
KMemoryState::Normal, KMemoryPermission::UserReadWrite,
KMemoryAttribute::None);
// Cancel our guard.
unmap_guard.Cancel();
return ResultSuccess;
}
}
}
}
ResultCode KPageTable::UnmapPhysicalMemory(VAddr address, std::size_t size) {
ResultCode KPageTable::MapPhysicalMemory(VAddr addr, std::size_t size) {
// Lock the physical memory lock.
KScopedLightLock map_phys_mem_lk(map_physical_memory_lock);
// Lock the table.
KScopedLightLock lk(general_lock);
// Calculate the last address for convenience.
const VAddr last_address = address + size - 1;
std::size_t mapped_size{};
const VAddr end_addr{addr + size};
// Define iteration variables.
VAddr cur_address = 0;
std::size_t mapped_size = 0;
std::size_t num_allocator_blocks = 0;
// Check if the memory is mapped.
{
// Iterate over the memory.
cur_address = address;
mapped_size = 0;
auto it = block_manager->FindIterator(cur_address);
while (true) {
// Check that the iterator is valid.
ASSERT(it != block_manager->end());
// Get the memory info.
const KMemoryInfo info = it->GetMemoryInfo();
// Verify the memory's state.
const bool is_normal = info.GetState() == KMemoryState::Normal &&
info.GetAttribute() == KMemoryAttribute::None;
const bool is_free = info.GetState() == KMemoryState::Free;
R_UNLESS(is_normal || is_free, ResultInvalidCurrentMemory);
if (is_normal) {
R_UNLESS(info.GetAttribute() == KMemoryAttribute::None, ResultInvalidCurrentMemory);
if (info.GetAddress() < address) {
++num_allocator_blocks;
}
if (last_address < info.GetLastAddress()) {
++num_allocator_blocks;
}
}
// Check if we're done.
if (last_address <= info.GetLastAddress()) {
if (is_normal) {
mapped_size += (last_address + 1 - cur_address);
}
break;
}
// Track the memory if it's mapped.
if (is_normal) {
mapped_size += VAddr(info.GetEndAddress()) - cur_address;
}
// Advance.
cur_address = info.GetEndAddress();
++it;
}
// If there's nothing mapped, we've nothing to do.
R_SUCCEED_IF(mapped_size == 0);
}
// Make a page group for the unmap region.
KPageLinkedList pg;
{
auto& impl = this->PageTableImpl();
// Begin traversal.
Common::PageTable::TraversalContext context;
Common::PageTable::TraversalEntry cur_entry = {.phys_addr = 0, .block_size = 0};
bool cur_valid = false;
Common::PageTable::TraversalEntry next_entry;
bool next_valid = false;
size_t tot_size = 0;
cur_address = address;
next_valid = impl.BeginTraversal(next_entry, context, cur_address);
next_entry.block_size =
(next_entry.block_size - (next_entry.phys_addr & (next_entry.block_size - 1)));
// Iterate, building the group.
while (true) {
if ((!next_valid && !cur_valid) ||
(next_valid && cur_valid &&
next_entry.phys_addr == cur_entry.phys_addr + cur_entry.block_size)) {
cur_entry.block_size += next_entry.block_size;
} else {
if (cur_valid) {
// ASSERT(IsHeapPhysicalAddress(cur_entry.phys_addr));
R_TRY(pg.AddBlock(cur_entry.phys_addr, cur_entry.block_size / PageSize));
}
// Update tracking variables.
tot_size += cur_entry.block_size;
cur_entry = next_entry;
cur_valid = next_valid;
}
if (cur_entry.block_size + tot_size >= size) {
break;
}
next_valid = impl.ContinueTraversal(next_entry, context);
}
// Add the last block.
if (cur_valid) {
// ASSERT(IsHeapPhysicalAddress(cur_entry.phys_addr));
R_TRY(pg.AddBlock(cur_entry.phys_addr, (size - tot_size) / PageSize));
}
}
ASSERT(pg.GetNumPages() == mapped_size / PageSize);
// Reset the current tracking address, and make sure we clean up on failure.
cur_address = address;
auto remap_guard = detail::ScopeExit([&] {
if (cur_address > address) {
const VAddr last_map_address = cur_address - 1;
cur_address = address;
// Iterate over the memory we unmapped.
auto it = block_manager->FindIterator(cur_address);
auto pg_it = pg.Nodes().begin();
PAddr pg_phys_addr = pg_it->GetAddress();
size_t pg_pages = pg_it->GetNumPages();
while (true) {
// Get the memory info for the pages we unmapped, convert to property.
const KMemoryInfo info = it->GetMemoryInfo();
// If the memory is normal, we unmapped it and need to re-map it.
if (info.GetState() == KMemoryState::Normal) {
// Determine the range to map.
size_t map_pages = std::min(VAddr(info.GetEndAddress()) - cur_address,
last_map_address + 1 - cur_address) /
PageSize;
// While we have pages to map, map them.
while (map_pages > 0) {
// Check if we're at the end of the physical block.
if (pg_pages == 0) {
// Ensure there are more pages to map.
ASSERT(pg_it != pg.Nodes().end());
// Advance our physical block.
++pg_it;
pg_phys_addr = pg_it->GetAddress();
pg_pages = pg_it->GetNumPages();
}
// Map whatever we can.
const size_t cur_pages = std::min(pg_pages, map_pages);
ASSERT(this->Operate(cur_address, cur_pages, info.GetPermission(),
OperationType::Map, pg_phys_addr) == ResultSuccess);
// Advance.
cur_address += cur_pages * PageSize;
map_pages -= cur_pages;
pg_phys_addr += cur_pages * PageSize;
pg_pages -= cur_pages;
}
}
// Check if we're done.
if (last_map_address <= info.GetLastAddress()) {
break;
}
// Advance.
++it;
}
block_manager->IterateForRange(addr, end_addr, [&](const KMemoryInfo& info) {
if (info.state != KMemoryState::Free) {
mapped_size += GetSizeInRange(info, addr, end_addr);
}
});
// Iterate over the memory, unmapping as we go.
auto it = block_manager->FindIterator(cur_address);
while (true) {
// Check that the iterator is valid.
ASSERT(it != block_manager->end());
// Get the memory info.
const KMemoryInfo info = it->GetMemoryInfo();
// If the memory state is normal, we need to unmap it.
if (info.GetState() == KMemoryState::Normal) {
// Determine the range to unmap.
const size_t cur_pages = std::min(VAddr(info.GetEndAddress()) - cur_address,
last_address + 1 - cur_address) /
PageSize;
// Unmap.
R_TRY(Operate(cur_address, cur_pages, KMemoryPermission::None, OperationType::Unmap));
}
// Check if we're done.
if (last_address <= info.GetLastAddress()) {
break;
}
// Advance.
cur_address = info.GetEndAddress();
++it;
if (mapped_size == size) {
return ResultSuccess;
}
// Release the memory resource.
mapped_physical_memory_size -= mapped_size;
const std::size_t remaining_size{size - mapped_size};
const std::size_t remaining_pages{remaining_size / PageSize};
// Reserve the memory from the process resource limit.
KScopedResourceReservation memory_reservation(
system.Kernel().CurrentProcess()->GetResourceLimit(), LimitableResource::PhysicalMemory,
remaining_size);
if (!memory_reservation.Succeeded()) {
LOG_ERROR(Kernel, "Could not reserve remaining {:X} bytes", remaining_size);
return ResultLimitReached;
}
KPageLinkedList page_linked_list;
CASCADE_CODE(system.Kernel().MemoryManager().Allocate(page_linked_list, remaining_pages,
memory_pool, allocation_option));
// We succeeded, so commit the memory reservation.
memory_reservation.Commit();
// Map the memory.
auto node{page_linked_list.Nodes().begin()};
PAddr map_addr{node->GetAddress()};
std::size_t src_num_pages{node->GetNumPages()};
block_manager->IterateForRange(addr, end_addr, [&](const KMemoryInfo& info) {
if (info.state != KMemoryState::Free) {
return;
}
std::size_t dst_num_pages{GetSizeInRange(info, addr, end_addr) / PageSize};
VAddr dst_addr{GetAddressInRange(info, addr)};
while (dst_num_pages) {
if (!src_num_pages) {
node = std::next(node);
map_addr = node->GetAddress();
src_num_pages = node->GetNumPages();
}
const std::size_t num_pages{std::min(src_num_pages, dst_num_pages)};
Operate(dst_addr, num_pages, KMemoryPermission::UserReadWrite, OperationType::Map,
map_addr);
dst_addr += num_pages * PageSize;
map_addr += num_pages * PageSize;
src_num_pages -= num_pages;
dst_num_pages -= num_pages;
}
});
mapped_physical_memory_size += remaining_size;
const std::size_t num_pages{size / PageSize};
block_manager->Update(addr, num_pages, KMemoryState::Free, KMemoryPermission::None,
KMemoryAttribute::None, KMemoryState::Normal,
KMemoryPermission::UserReadWrite, KMemoryAttribute::None);
return ResultSuccess;
}
ResultCode KPageTable::UnmapPhysicalMemory(VAddr addr, std::size_t size) {
// Lock the physical memory lock.
KScopedLightLock map_phys_mem_lk(map_physical_memory_lock);
// Lock the table.
KScopedLightLock lk(general_lock);
const VAddr end_addr{addr + size};
ResultCode result{ResultSuccess};
std::size_t mapped_size{};
// Verify that the region can be unmapped
block_manager->IterateForRange(addr, end_addr, [&](const KMemoryInfo& info) {
if (info.state == KMemoryState::Normal) {
if (info.attribute != KMemoryAttribute::None) {
result = ResultInvalidCurrentMemory;
return;
}
mapped_size += GetSizeInRange(info, addr, end_addr);
} else if (info.state != KMemoryState::Free) {
result = ResultInvalidCurrentMemory;
}
});
if (result.IsError()) {
return result;
}
if (!mapped_size) {
return ResultSuccess;
}
// Unmap each region within the range
KPageLinkedList page_linked_list;
block_manager->IterateForRange(addr, end_addr, [&](const KMemoryInfo& info) {
if (info.state == KMemoryState::Normal) {
const std::size_t block_size{GetSizeInRange(info, addr, end_addr)};
const std::size_t block_num_pages{block_size / PageSize};
const VAddr block_addr{GetAddressInRange(info, addr)};
AddRegionToPages(block_addr, block_size / PageSize, page_linked_list);
if (result = Operate(block_addr, block_num_pages, KMemoryPermission::None,
OperationType::Unmap);
result.IsError()) {
return;
}
}
});
if (result.IsError()) {
return result;
}
const std::size_t num_pages{size / PageSize};
system.Kernel().MemoryManager().Free(page_linked_list, num_pages, memory_pool,
allocation_option);
block_manager->Update(addr, num_pages, KMemoryState::Free);
auto process{system.Kernel().CurrentProcess()};
process->GetResourceLimit()->Release(LimitableResource::PhysicalMemory, mapped_size);
// Update memory blocks.
system.Kernel().MemoryManager().Free(pg, size / PageSize, memory_pool, allocation_option);
block_manager->Update(address, size / PageSize, KMemoryState::Free, KMemoryPermission::None,
KMemoryAttribute::None);
// We succeeded.
remap_guard.Cancel();
mapped_physical_memory_size -= mapped_size;
return ResultSuccess;
}

View File

@@ -645,10 +645,6 @@ static void OutputDebugString(Core::System& system, VAddr address, u64 len) {
LOG_DEBUG(Debug_Emulated, "{}", str);
}
static void OutputDebugString32(Core::System& system, u32 address, u32 len) {
OutputDebugString(system, address, len);
}
/// Gets system/memory information for the current process
static ResultCode GetInfo(Core::System& system, u64* result, u64 info_id, Handle handle,
u64 info_sub_id) {
@@ -1408,7 +1404,7 @@ static ResultCode UnmapProcessMemory(Core::System& system, VAddr dst_address, Ha
}
static ResultCode CreateCodeMemory(Core::System& system, Handle* out, VAddr address, size_t size) {
LOG_TRACE(Kernel_SVC, "called, handle_out={}, address=0x{:X}, size=0x{:X}",
LOG_TRACE(Kernel_SVC, "called, handle_out=0x{:X}, address=0x{:X}, size=0x{:X}",
static_cast<void*>(out), address, size);
// Get kernel instance.
auto& kernel = system.Kernel();
@@ -1442,10 +1438,6 @@ static ResultCode CreateCodeMemory(Core::System& system, Handle* out, VAddr addr
return ResultSuccess;
}
static ResultCode CreateCodeMemory32(Core::System& system, Handle* out, u32 address, u32 size) {
return CreateCodeMemory(system, out, address, size);
}
static ResultCode ControlCodeMemory(Core::System& system, Handle code_memory_handle, u32 operation,
VAddr address, size_t size, Svc::MemoryPermission perm) {
@@ -1525,12 +1517,6 @@ static ResultCode ControlCodeMemory(Core::System& system, Handle code_memory_han
return ResultSuccess;
}
static ResultCode ControlCodeMemory32(Core::System& system, Handle code_memory_handle,
u32 operation, u64 address, u64 size,
Svc::MemoryPermission perm) {
return ControlCodeMemory(system, code_memory_handle, operation, address, size, perm);
}
static ResultCode QueryProcessMemory(Core::System& system, VAddr memory_info_address,
VAddr page_info_address, Handle process_handle,
VAddr address) {
@@ -2612,7 +2598,7 @@ static const FunctionDef SVC_Table_32[] = {
{0x24, SvcWrap32<GetProcessId32>, "GetProcessId32"},
{0x25, SvcWrap32<GetThreadId32>, "GetThreadId32"},
{0x26, SvcWrap32<Break32>, "Break32"},
{0x27, SvcWrap32<OutputDebugString32>, "OutputDebugString32"},
{0x27, nullptr, "OutputDebugString32"},
{0x28, nullptr, "ReturnFromException32"},
{0x29, SvcWrap32<GetInfo32>, "GetInfo32"},
{0x2a, nullptr, "FlushEntireDataCache32"},
@@ -2648,8 +2634,8 @@ static const FunctionDef SVC_Table_32[] = {
{0x48, nullptr, "MapPhysicalMemoryUnsafe32"},
{0x49, nullptr, "UnmapPhysicalMemoryUnsafe32"},
{0x4a, nullptr, "SetUnsafeLimit32"},
{0x4b, SvcWrap32<CreateCodeMemory32>, "CreateCodeMemory32"},
{0x4c, SvcWrap32<ControlCodeMemory32>, "ControlCodeMemory32"},
{0x4b, nullptr, "CreateCodeMemory32"},
{0x4c, nullptr, "ControlCodeMemory32"},
{0x4d, nullptr, "SleepSystem32"},
{0x4e, nullptr, "ReadWriteRegister32"},
{0x4f, nullptr, "SetProcessActivity32"},

View File

@@ -669,26 +669,4 @@ void SvcWrap32(Core::System& system) {
FuncReturn(system, retval);
}
// Used by CreateCodeMemory32
template <ResultCode func(Core::System&, Handle*, u32, u32)>
void SvcWrap32(Core::System& system) {
Handle handle = 0;
const u32 retval = func(system, &handle, Param32(system, 1), Param32(system, 2)).raw;
system.CurrentArmInterface().SetReg(1, handle);
FuncReturn(system, retval);
}
// Used by ControlCodeMemory32
template <ResultCode func(Core::System&, Handle, u32, u64, u64, Svc::MemoryPermission)>
void SvcWrap32(Core::System& system) {
const u32 retval =
func(system, Param32(system, 0), Param32(system, 1), Param(system, 2), Param(system, 4),
static_cast<Svc::MemoryPermission>(Param32(system, 6)))
.raw;
FuncReturn(system, retval);
}
} // namespace Kernel

View File

@@ -1,45 +0,0 @@
// Copyright 2022 yuzu emulator team
// Licensed under GPLv2 or any later version
// Refer to the license.txt file included.
#include "common/logging/log.h"
#include "core/hle/ipc_helpers.h"
#include "core/hle/service/mnpp/mnpp_app.h"
#include "core/hle/service/sm/sm.h"
namespace Service::MNPP {
class MNPP_APP final : public ServiceFramework<MNPP_APP> {
public:
explicit MNPP_APP(Core::System& system_) : ServiceFramework{system_, "mnpp:app"} {
// clang-format off
static const FunctionInfo functions[] = {
{0, &MNPP_APP::Unknown0, "unknown0"},
{1, &MNPP_APP::Unknown1, "unknown1"},
};
// clang-format on
RegisterHandlers(functions);
}
private:
void Unknown0(Kernel::HLERequestContext& ctx) {
LOG_WARNING(Service_MNPP, "(STUBBED) called");
IPC::ResponseBuilder rb{ctx, 2};
rb.Push(ResultSuccess);
}
void Unknown1(Kernel::HLERequestContext& ctx) {
LOG_WARNING(Service_MNPP, "(STUBBED) called");
IPC::ResponseBuilder rb{ctx, 2};
rb.Push(ResultSuccess);
}
};
void InstallInterfaces(SM::ServiceManager& service_manager, Core::System& system) {
std::make_shared<MNPP_APP>(system)->InstallAsService(service_manager);
}
} // namespace Service::MNPP

View File

@@ -1,20 +0,0 @@
// Copyright 2022 yuzu emulator team
// Licensed under GPLv2 or any later version
// Refer to the license.txt file included.
#pragma once
namespace Core {
class System;
}
namespace Service::SM {
class ServiceManager;
}
namespace Service::MNPP {
/// Registers all MNPP services with the specified service manager.
void InstallInterfaces(SM::ServiceManager& service_manager, Core::System& system);
} // namespace Service::MNPP

View File

@@ -39,7 +39,6 @@
#include "core/hle/service/mig/mig.h"
#include "core/hle/service/mii/mii.h"
#include "core/hle/service/mm/mm_u.h"
#include "core/hle/service/mnpp/mnpp_app.h"
#include "core/hle/service/ncm/ncm.h"
#include "core/hle/service/nfc/nfc.h"
#include "core/hle/service/nfp/nfp.h"
@@ -266,7 +265,6 @@ Services::Services(std::shared_ptr<SM::ServiceManager>& sm, Core::System& system
Migration::InstallInterfaces(*sm, system);
Mii::InstallInterfaces(*sm, system);
MM::InstallInterfaces(*sm, system);
MNPP::InstallInterfaces(*sm, system);
NCM::InstallInterfaces(*sm, system);
NFC::InstallInterfaces(*sm, system);
NFP::InstallInterfaces(*sm, system);

View File

@@ -1332,9 +1332,6 @@ void ConfigureInputPlayer::HandleClick(
QPushButton* button, std::size_t button_id,
std::function<void(const Common::ParamPackage&)> new_input_setter,
InputCommon::Polling::InputType type) {
if (timeout_timer->isActive()) {
return;
}
if (button == ui->buttonMotionLeft || button == ui->buttonMotionRight) {
button->setText(tr("Shake!"));
} else {

View File

@@ -227,9 +227,6 @@ void ConfigureTouchFromButton::RenameMapping() {
}
void ConfigureTouchFromButton::GetButtonInput(const int row_index, const bool is_new) {
if (timeout_timer->isActive()) {
return;
}
binding_list_model->item(row_index, 0)->setText(tr("[press key]"));
input_setter = [this, row_index, is_new](const Common::ParamPackage& params,