Compare commits

...

29 Commits

Author SHA1 Message Date
Ameer J
5f8d6bc504 vulkan_device: Blacklist RADV on RDNA2 from VK_EXT_vertex_input_dynamic_state
RDNA2 devices running under the RADV driver were crashing when VK_EXT_vertex_input_dynamic_state was enabled.

Blacklisting these devices until a proper fix is established.
2022-02-25 23:09:03 -05:00
Mai M
2f45e999d8 Merge pull request #7933 from german77/am_update
service: am: Update enum names to match documentation
2022-02-21 20:42:01 -05:00
Narr the Reg
d44464829b service: am: Update enum names to match documentation 2022-02-21 18:00:50 -06:00
bunnei
efe50d88ec Merge pull request #7913 from voidanix/anv-fix
vulkan_device: fix missing format in ANV
2022-02-21 14:34:27 -07:00
bunnei
21f5912ec9 Merge pull request #7919 from bunnei/phys-mem-updates
core: hle: kernel: KPageTable: Improve Un/MapPhysicalMemory.
2022-02-21 13:39:05 -07:00
bunnei
8d46c3cc66 Merge pull request #7920 from bunnei/fix-unmap-pages
core: hle: kernel: KPageTable: Fix UnmapPages.
2022-02-21 13:38:52 -07:00
voidanix
7712e46d64 vulkan_device: fix missing format in ANV
Currently Mesa's ANV driver does not support
VK_FORMAT_B5G6R5_UNORM_PACK16, implement an alternative for it.
2022-02-21 09:21:41 +01:00
bunnei
92b2e92620 fixup! core: hle: kernel: KPageTable: Improve Un/MapPhysicalMemory. 2022-02-19 00:14:27 -08:00
bunnei
2984695265 Merge pull request #7867 from german77/amiibo
nfp: Improve amiibo support
2022-02-19 00:57:47 -07:00
bunnei
c9260a75f6 core: hle: kernel: KPageTable: Fix UnmapPages.
- Fixes a logic bug in KPageTable::UnmapPages.
2022-02-18 23:48:16 -08:00
bunnei
1a16d055df core: hle: kernel: KPageTable: Improve Un/MapPhysicalMemory.
- Improves the implementations of MapPhysicalMemory and UnmapPhysicalMemory to more closely reflect latest HOS.
2022-02-18 23:42:27 -08:00
bunnei
83a84f1c2d Merge pull request #7900 from german77/enter
yuzu: config: Fix mapping issues with the enter key
2022-02-18 15:47:34 -07:00
Mai M
90a4591563 Merge pull request #7909 from Wunkolo/null-visit-ctor
common: Add NullVisitor default constructor
2022-02-18 17:44:02 -05:00
Wunkolo
768fdb269e common: Add NullVisitor default constructor
Addresses https://github.com/yuzu-emu/yuzu/issues/7881 to fix linux
builds.

`YUZU_NON_COPYABLE` deletes the `T(const T&)` constructor which will
cause the implicitly defined default ctor/dtor to no-longer generate.
2022-02-17 06:28:19 -08:00
Mai M
c48b9668f0 Merge pull request #7866 from xerpi/svc-OutputDebugString32-CreateCodeMemory32-ControlCodeMemory32
kernel: svc: Add OutputDebugString32, CreateCodeMemory32, ControlCodeMemory32
2022-02-16 22:49:56 -05:00
bunnei
027ff7847c Merge pull request #7878 from german77/mnpp
service/mnpp: Stub mnpp_app
2022-02-16 18:42:49 -07:00
Morph
4514325b9c Merge pull request #7899 from Kelebek1/test
file_sys: Dump patched exefs rather than base
2022-02-16 16:37:09 -05:00
Narr the Reg
1e21f5f872 yuzu: config: Fix mapping issues with the enter key 2022-02-15 11:08:11 -06:00
Kelebek1
e1201abc1e Dump patched exefs rather than base 2022-02-15 04:52:28 +00:00
Sergi Granell
c3242abe95 kernel: svc: Add OutputDebugString32, CreateCodeMemory32, ControlCodeMemory32
Very straightforward, they are just wrappers to the 64-bit version of
the SVC.
2022-02-15 00:45:19 +01:00
german77
b57d61010f nfp: Allow files without password data 2022-02-13 13:52:34 -06:00
Narr the Reg
6705439cf3 service/mnpp: Stub mnpp_app
Used in Super Nintendo Entertainment System™ - Nintendo Switch Online
2022-02-10 21:55:28 -06:00
Narr the Reg
6a1ad03153 nfp: Separate nfc tag from amiibo data 2022-02-10 10:58:37 -06:00
german77
e35c2fd5d0 nfp: Address compiler issues 2022-02-08 18:52:44 -06:00
Narr the Reg
29f9a454eb nfp: Validate amiibo files 2022-02-08 14:09:30 -06:00
german77
41b65d38fa yuzu: Allow to open and remove the amiibo 2022-02-08 10:08:04 -06:00
german77
fc9abd3c62 nfp: Improve implementation 2022-02-08 10:08:04 -06:00
german77
c001a2af25 nfp: Move IUser class to header and add missing enum and structs 2022-02-07 09:18:22 -06:00
german77
3d24eb54ec nfp: Sort functions by command number 2022-02-07 09:18:22 -06:00
29 changed files with 1753 additions and 497 deletions

View File

@@ -108,6 +108,7 @@ bool ParseFilterRule(Filter& instance, Iterator begin, Iterator end) {
SUB(Service, Migration) \
SUB(Service, Mii) \
SUB(Service, MM) \
SUB(Service, MNPP) \
SUB(Service, NCM) \
SUB(Service, NFC) \
SUB(Service, NFP) \

View File

@@ -76,6 +76,7 @@ enum class Class : u8 {
Service_Migration, ///< The migration service
Service_Mii, ///< The Mii service
Service_MM, ///< The MM (Multimedia) service
Service_MNPP, ///< The MNPP service
Service_NCM, ///< The NCM service
Service_NFC, ///< The NFC (Near-field communication) service
Service_NFP, ///< The NFP service

View File

@@ -10,11 +10,65 @@ PageTable::PageTable() = default;
PageTable::~PageTable() noexcept = default;
void PageTable::Resize(size_t address_space_width_in_bits, size_t page_size_in_bits) {
const size_t num_page_table_entries{1ULL << (address_space_width_in_bits - page_size_in_bits)};
bool PageTable::BeginTraversal(TraversalEntry& out_entry, TraversalContext& out_context,
u64 address) const {
// Setup invalid defaults.
out_entry.phys_addr = 0;
out_entry.block_size = page_size;
out_context.next_page = 0;
// Validate that we can read the actual entry.
const auto page = address / page_size;
if (page >= backing_addr.size()) {
return false;
}
// Validate that the entry is mapped.
const auto phys_addr = backing_addr[page];
if (phys_addr == 0) {
return false;
}
// Populate the results.
out_entry.phys_addr = phys_addr + address;
out_context.next_page = page + 1;
out_context.next_offset = address + page_size;
return true;
}
bool PageTable::ContinueTraversal(TraversalEntry& out_entry, TraversalContext& context) const {
// Setup invalid defaults.
out_entry.phys_addr = 0;
out_entry.block_size = page_size;
// Validate that we can read the actual entry.
const auto page = context.next_page;
if (page >= backing_addr.size()) {
return false;
}
// Validate that the entry is mapped.
const auto phys_addr = backing_addr[page];
if (phys_addr == 0) {
return false;
}
// Populate the results.
out_entry.phys_addr = phys_addr + context.next_offset;
context.next_page = page + 1;
context.next_offset += page_size;
return true;
}
void PageTable::Resize(std::size_t address_space_width_in_bits, std::size_t page_size_in_bits) {
const std::size_t num_page_table_entries{1ULL
<< (address_space_width_in_bits - page_size_in_bits)};
pointers.resize(num_page_table_entries);
backing_addr.resize(num_page_table_entries);
current_address_space_width_in_bits = address_space_width_in_bits;
page_size = 1ULL << page_size_in_bits;
}
} // namespace Common

View File

@@ -27,6 +27,16 @@ enum class PageType : u8 {
* mimics the way a real CPU page table works.
*/
struct PageTable {
struct TraversalEntry {
u64 phys_addr{};
std::size_t block_size{};
};
struct TraversalContext {
u64 next_page{};
u64 next_offset{};
};
/// Number of bits reserved for attribute tagging.
/// This can be at most the guaranteed alignment of the pointers in the page table.
static constexpr int ATTRIBUTE_BITS = 2;
@@ -89,6 +99,10 @@ struct PageTable {
PageTable(PageTable&&) noexcept = default;
PageTable& operator=(PageTable&&) noexcept = default;
bool BeginTraversal(TraversalEntry& out_entry, TraversalContext& out_context,
u64 address) const;
bool ContinueTraversal(TraversalEntry& out_entry, TraversalContext& context) const;
/**
* Resizes the page table to be able to accommodate enough pages within
* a given address space.
@@ -96,9 +110,9 @@ struct PageTable {
* @param address_space_width_in_bits The address size width in bits.
* @param page_size_in_bits The page size in bits.
*/
void Resize(size_t address_space_width_in_bits, size_t page_size_in_bits);
void Resize(std::size_t address_space_width_in_bits, std::size_t page_size_in_bits);
size_t GetAddressSpaceBits() const {
std::size_t GetAddressSpaceBits() const {
return current_address_space_width_in_bits;
}
@@ -110,9 +124,11 @@ struct PageTable {
VirtualBuffer<u64> backing_addr;
size_t current_address_space_width_in_bits;
std::size_t current_address_space_width_in_bits{};
u8* fastmem_arena;
u8* fastmem_arena{};
std::size_t page_size{};
};
} // namespace Common

View File

@@ -171,6 +171,9 @@ struct VisitorInterface {
struct NullVisitor final : public VisitorInterface {
YUZU_NON_COPYABLE(NullVisitor);
NullVisitor() = default;
~NullVisitor() override = default;
void Visit(const Field<bool>& /*field*/) override {}
void Visit(const Field<double>& /*field*/) override {}
void Visit(const Field<float>& /*field*/) override {}

View File

@@ -467,6 +467,8 @@ add_library(core STATIC
hle/service/mii/types.h
hle/service/mm/mm_u.cpp
hle/service/mm/mm_u.h
hle/service/mnpp/mnpp_app.cpp
hle/service/mnpp/mnpp_app.h
hle/service/ncm/ncm.cpp
hle/service/ncm/ncm.h
hle/service/nfc/nfc.cpp

View File

@@ -128,15 +128,6 @@ VirtualDir PatchManager::PatchExeFS(VirtualDir exefs) const {
if (exefs == nullptr)
return exefs;
if (Settings::values.dump_exefs) {
LOG_INFO(Loader, "Dumping ExeFS for title_id={:016X}", title_id);
const auto dump_dir = fs_controller.GetModificationDumpRoot(title_id);
if (dump_dir != nullptr) {
const auto exefs_dir = GetOrCreateDirectoryRelative(dump_dir, "/exefs");
VfsRawCopyD(exefs, exefs_dir);
}
}
const auto& disabled = Settings::values.disabled_addons[title_id];
const auto update_disabled =
std::find(disabled.cbegin(), disabled.cend(), "Update") != disabled.cend();
@@ -179,6 +170,15 @@ VirtualDir PatchManager::PatchExeFS(VirtualDir exefs) const {
}
}
if (Settings::values.dump_exefs) {
LOG_INFO(Loader, "Dumping ExeFS for title_id={:016X}", title_id);
const auto dump_dir = fs_controller.GetModificationDumpRoot(title_id);
if (dump_dir != nullptr) {
const auto exefs_dir = GetOrCreateDirectoryRelative(dump_dir, "/exefs");
VfsRawCopyD(exefs, exefs_dir);
}
}
return exefs;
}

View File

@@ -885,6 +885,12 @@ bool EmulatedController::TestVibration(std::size_t device_index) {
return SetVibration(device_index, DEFAULT_VIBRATION_VALUE);
}
bool EmulatedController::SetPollingMode(Common::Input::PollingMode polling_mode) {
LOG_INFO(Service_HID, "Set polling mode {}", polling_mode);
auto& output_device = output_devices[static_cast<std::size_t>(DeviceIndex::Right)];
return output_device->SetPollingMode(polling_mode) == Common::Input::PollingError::None;
}
void EmulatedController::SetLedPattern() {
for (auto& device : output_devices) {
if (!device) {

View File

@@ -299,16 +299,23 @@ public:
/**
* Sends a specific vibration to the output device
* @return returns true if vibration had no errors
* @return true if vibration had no errors
*/
bool SetVibration(std::size_t device_index, VibrationValue vibration);
/**
* Sends a small vibration to the output device
* @return returns true if SetVibration was successfull
* @return true if SetVibration was successfull
*/
bool TestVibration(std::size_t device_index);
/**
* Sets the desired data to be polled from a controller
* @param polling_mode type of input desired buttons, gyro, nfc, ir, etc.
* @return true if SetPollingMode was successfull
*/
bool SetPollingMode(Common::Input::PollingMode polling_mode);
/// Returns the led pattern corresponding to this emulated controller
LedPattern GetLedPattern() const;

View File

@@ -41,24 +41,6 @@ constexpr std::size_t GetAddressSpaceWidthFromType(FileSys::ProgramAddressSpaceT
}
}
constexpr u64 GetAddressInRange(const KMemoryInfo& info, VAddr addr) {
if (info.GetAddress() < addr) {
return addr;
}
return info.GetAddress();
}
constexpr std::size_t GetSizeInRange(const KMemoryInfo& info, VAddr start, VAddr end) {
std::size_t size{info.GetSize()};
if (info.GetAddress() < start) {
size -= start - info.GetAddress();
}
if (info.GetEndAddress() > end) {
size -= info.GetEndAddress() - end;
}
return size;
}
} // namespace
KPageTable::KPageTable(Core::System& system_)
@@ -400,148 +382,471 @@ ResultCode KPageTable::UnmapProcessMemory(VAddr dst_addr, std::size_t size,
return ResultSuccess;
}
ResultCode KPageTable::MapPhysicalMemory(VAddr addr, std::size_t size) {
ResultCode KPageTable::MapPhysicalMemory(VAddr address, std::size_t size) {
// Lock the physical memory lock.
KScopedLightLock map_phys_mem_lk(map_physical_memory_lock);
// Lock the table.
KScopedLightLock lk(general_lock);
// Calculate the last address for convenience.
const VAddr last_address = address + size - 1;
std::size_t mapped_size{};
const VAddr end_addr{addr + size};
// Define iteration variables.
VAddr cur_address;
std::size_t mapped_size;
block_manager->IterateForRange(addr, end_addr, [&](const KMemoryInfo& info) {
if (info.state != KMemoryState::Free) {
mapped_size += GetSizeInRange(info, addr, end_addr);
}
});
// The entire mapping process can be retried.
while (true) {
// Check if the memory is already mapped.
{
// Lock the table.
KScopedLightLock lk(general_lock);
if (mapped_size == size) {
return ResultSuccess;
}
// Iterate over the memory.
cur_address = address;
mapped_size = 0;
const std::size_t remaining_size{size - mapped_size};
const std::size_t remaining_pages{remaining_size / PageSize};
auto it = block_manager->FindIterator(cur_address);
while (true) {
// Check that the iterator is valid.
ASSERT(it != block_manager->end());
// Reserve the memory from the process resource limit.
KScopedResourceReservation memory_reservation(
system.Kernel().CurrentProcess()->GetResourceLimit(), LimitableResource::PhysicalMemory,
remaining_size);
if (!memory_reservation.Succeeded()) {
LOG_ERROR(Kernel, "Could not reserve remaining {:X} bytes", remaining_size);
return ResultLimitReached;
}
// Get the memory info.
const KMemoryInfo info = it->GetMemoryInfo();
KPageLinkedList page_linked_list;
// Check if we're done.
if (last_address <= info.GetLastAddress()) {
if (info.GetState() != KMemoryState::Free) {
mapped_size += (last_address + 1 - cur_address);
}
break;
}
CASCADE_CODE(system.Kernel().MemoryManager().Allocate(page_linked_list, remaining_pages,
memory_pool, allocation_option));
// Track the memory if it's mapped.
if (info.GetState() != KMemoryState::Free) {
mapped_size += VAddr(info.GetEndAddress()) - cur_address;
}
// We succeeded, so commit the memory reservation.
memory_reservation.Commit();
// Map the memory.
auto node{page_linked_list.Nodes().begin()};
PAddr map_addr{node->GetAddress()};
std::size_t src_num_pages{node->GetNumPages()};
block_manager->IterateForRange(addr, end_addr, [&](const KMemoryInfo& info) {
if (info.state != KMemoryState::Free) {
return;
}
std::size_t dst_num_pages{GetSizeInRange(info, addr, end_addr) / PageSize};
VAddr dst_addr{GetAddressInRange(info, addr)};
while (dst_num_pages) {
if (!src_num_pages) {
node = std::next(node);
map_addr = node->GetAddress();
src_num_pages = node->GetNumPages();
// Advance.
cur_address = info.GetEndAddress();
++it;
}
const std::size_t num_pages{std::min(src_num_pages, dst_num_pages)};
Operate(dst_addr, num_pages, KMemoryPermission::UserReadWrite, OperationType::Map,
map_addr);
dst_addr += num_pages * PageSize;
map_addr += num_pages * PageSize;
src_num_pages -= num_pages;
dst_num_pages -= num_pages;
// If the size mapped is the size requested, we've nothing to do.
R_SUCCEED_IF(size == mapped_size);
}
});
mapped_physical_memory_size += remaining_size;
// Allocate and map the memory.
{
// Reserve the memory from the process resource limit.
KScopedResourceReservation memory_reservation(
system.Kernel().CurrentProcess()->GetResourceLimit(),
LimitableResource::PhysicalMemory, size - mapped_size);
R_UNLESS(memory_reservation.Succeeded(), ResultLimitReached);
const std::size_t num_pages{size / PageSize};
block_manager->Update(addr, num_pages, KMemoryState::Free, KMemoryPermission::None,
KMemoryAttribute::None, KMemoryState::Normal,
KMemoryPermission::UserReadWrite, KMemoryAttribute::None);
// Allocate pages for the new memory.
KPageLinkedList page_linked_list;
R_TRY(system.Kernel().MemoryManager().Allocate(
page_linked_list, (size - mapped_size) / PageSize, memory_pool, allocation_option));
return ResultSuccess;
// Map the memory.
{
// Lock the table.
KScopedLightLock lk(general_lock);
size_t num_allocator_blocks = 0;
// Verify that nobody has mapped memory since we first checked.
{
// Iterate over the memory.
size_t checked_mapped_size = 0;
cur_address = address;
auto it = block_manager->FindIterator(cur_address);
while (true) {
// Check that the iterator is valid.
ASSERT(it != block_manager->end());
// Get the memory info.
const KMemoryInfo info = it->GetMemoryInfo();
const bool is_free = info.GetState() == KMemoryState::Free;
if (is_free) {
if (info.GetAddress() < address) {
++num_allocator_blocks;
}
if (last_address < info.GetLastAddress()) {
++num_allocator_blocks;
}
}
// Check if we're done.
if (last_address <= info.GetLastAddress()) {
if (!is_free) {
checked_mapped_size += (last_address + 1 - cur_address);
}
break;
}
// Track the memory if it's mapped.
if (!is_free) {
checked_mapped_size += VAddr(info.GetEndAddress()) - cur_address;
}
// Advance.
cur_address = info.GetEndAddress();
++it;
}
// If the size now isn't what it was before, somebody mapped or unmapped
// concurrently. If this happened, retry.
if (mapped_size != checked_mapped_size) {
continue;
}
}
// Reset the current tracking address, and make sure we clean up on failure.
cur_address = address;
auto unmap_guard = detail::ScopeExit([&] {
if (cur_address > address) {
const VAddr last_unmap_address = cur_address - 1;
// Iterate, unmapping the pages.
cur_address = address;
auto it = block_manager->FindIterator(cur_address);
while (true) {
// Check that the iterator is valid.
ASSERT(it != block_manager->end());
// Get the memory info.
const KMemoryInfo info = it->GetMemoryInfo();
// If the memory state is free, we mapped it and need to unmap it.
if (info.GetState() == KMemoryState::Free) {
// Determine the range to unmap.
const size_t cur_pages =
std::min(VAddr(info.GetEndAddress()) - cur_address,
last_unmap_address + 1 - cur_address) /
PageSize;
// Unmap.
ASSERT(Operate(cur_address, cur_pages, KMemoryPermission::None,
OperationType::Unmap)
.IsSuccess());
}
// Check if we're done.
if (last_unmap_address <= info.GetLastAddress()) {
break;
}
// Advance.
cur_address = info.GetEndAddress();
++it;
}
}
});
// Iterate over the memory.
auto pg_it = page_linked_list.Nodes().begin();
PAddr pg_phys_addr = pg_it->GetAddress();
size_t pg_pages = pg_it->GetNumPages();
auto it = block_manager->FindIterator(cur_address);
while (true) {
// Check that the iterator is valid.
ASSERT(it != block_manager->end());
// Get the memory info.
const KMemoryInfo info = it->GetMemoryInfo();
// If it's unmapped, we need to map it.
if (info.GetState() == KMemoryState::Free) {
// Determine the range to map.
size_t map_pages = std::min(VAddr(info.GetEndAddress()) - cur_address,
last_address + 1 - cur_address) /
PageSize;
// While we have pages to map, map them.
while (map_pages > 0) {
// Check if we're at the end of the physical block.
if (pg_pages == 0) {
// Ensure there are more pages to map.
ASSERT(pg_it != page_linked_list.Nodes().end());
// Advance our physical block.
++pg_it;
pg_phys_addr = pg_it->GetAddress();
pg_pages = pg_it->GetNumPages();
}
// Map whatever we can.
const size_t cur_pages = std::min(pg_pages, map_pages);
R_TRY(Operate(cur_address, cur_pages, KMemoryPermission::UserReadWrite,
OperationType::Map, pg_phys_addr));
// Advance.
cur_address += cur_pages * PageSize;
map_pages -= cur_pages;
pg_phys_addr += cur_pages * PageSize;
pg_pages -= cur_pages;
}
}
// Check if we're done.
if (last_address <= info.GetLastAddress()) {
break;
}
// Advance.
cur_address = info.GetEndAddress();
++it;
}
// We succeeded, so commit the memory reservation.
memory_reservation.Commit();
// Increase our tracked mapped size.
mapped_physical_memory_size += (size - mapped_size);
// Update the relevant memory blocks.
block_manager->Update(address, size / PageSize, KMemoryState::Free,
KMemoryPermission::None, KMemoryAttribute::None,
KMemoryState::Normal, KMemoryPermission::UserReadWrite,
KMemoryAttribute::None);
// Cancel our guard.
unmap_guard.Cancel();
return ResultSuccess;
}
}
}
}
ResultCode KPageTable::UnmapPhysicalMemory(VAddr addr, std::size_t size) {
ResultCode KPageTable::UnmapPhysicalMemory(VAddr address, std::size_t size) {
// Lock the physical memory lock.
KScopedLightLock map_phys_mem_lk(map_physical_memory_lock);
// Lock the table.
KScopedLightLock lk(general_lock);
const VAddr end_addr{addr + size};
ResultCode result{ResultSuccess};
std::size_t mapped_size{};
// Calculate the last address for convenience.
const VAddr last_address = address + size - 1;
// Verify that the region can be unmapped
block_manager->IterateForRange(addr, end_addr, [&](const KMemoryInfo& info) {
if (info.state == KMemoryState::Normal) {
if (info.attribute != KMemoryAttribute::None) {
result = ResultInvalidCurrentMemory;
return;
// Define iteration variables.
VAddr cur_address = 0;
std::size_t mapped_size = 0;
std::size_t num_allocator_blocks = 0;
// Check if the memory is mapped.
{
// Iterate over the memory.
cur_address = address;
mapped_size = 0;
auto it = block_manager->FindIterator(cur_address);
while (true) {
// Check that the iterator is valid.
ASSERT(it != block_manager->end());
// Get the memory info.
const KMemoryInfo info = it->GetMemoryInfo();
// Verify the memory's state.
const bool is_normal = info.GetState() == KMemoryState::Normal &&
info.GetAttribute() == KMemoryAttribute::None;
const bool is_free = info.GetState() == KMemoryState::Free;
R_UNLESS(is_normal || is_free, ResultInvalidCurrentMemory);
if (is_normal) {
R_UNLESS(info.GetAttribute() == KMemoryAttribute::None, ResultInvalidCurrentMemory);
if (info.GetAddress() < address) {
++num_allocator_blocks;
}
if (last_address < info.GetLastAddress()) {
++num_allocator_blocks;
}
}
mapped_size += GetSizeInRange(info, addr, end_addr);
} else if (info.state != KMemoryState::Free) {
result = ResultInvalidCurrentMemory;
// Check if we're done.
if (last_address <= info.GetLastAddress()) {
if (is_normal) {
mapped_size += (last_address + 1 - cur_address);
}
break;
}
// Track the memory if it's mapped.
if (is_normal) {
mapped_size += VAddr(info.GetEndAddress()) - cur_address;
}
// Advance.
cur_address = info.GetEndAddress();
++it;
}
});
if (result.IsError()) {
return result;
// If there's nothing mapped, we've nothing to do.
R_SUCCEED_IF(mapped_size == 0);
}
if (!mapped_size) {
return ResultSuccess;
// Make a page group for the unmap region.
KPageLinkedList pg;
{
auto& impl = this->PageTableImpl();
// Begin traversal.
Common::PageTable::TraversalContext context;
Common::PageTable::TraversalEntry cur_entry = {.phys_addr = 0, .block_size = 0};
bool cur_valid = false;
Common::PageTable::TraversalEntry next_entry;
bool next_valid = false;
size_t tot_size = 0;
cur_address = address;
next_valid = impl.BeginTraversal(next_entry, context, cur_address);
next_entry.block_size =
(next_entry.block_size - (next_entry.phys_addr & (next_entry.block_size - 1)));
// Iterate, building the group.
while (true) {
if ((!next_valid && !cur_valid) ||
(next_valid && cur_valid &&
next_entry.phys_addr == cur_entry.phys_addr + cur_entry.block_size)) {
cur_entry.block_size += next_entry.block_size;
} else {
if (cur_valid) {
// ASSERT(IsHeapPhysicalAddress(cur_entry.phys_addr));
R_TRY(pg.AddBlock(cur_entry.phys_addr, cur_entry.block_size / PageSize));
}
// Update tracking variables.
tot_size += cur_entry.block_size;
cur_entry = next_entry;
cur_valid = next_valid;
}
if (cur_entry.block_size + tot_size >= size) {
break;
}
next_valid = impl.ContinueTraversal(next_entry, context);
}
// Add the last block.
if (cur_valid) {
// ASSERT(IsHeapPhysicalAddress(cur_entry.phys_addr));
R_TRY(pg.AddBlock(cur_entry.phys_addr, (size - tot_size) / PageSize));
}
}
ASSERT(pg.GetNumPages() == mapped_size / PageSize);
// Unmap each region within the range
KPageLinkedList page_linked_list;
block_manager->IterateForRange(addr, end_addr, [&](const KMemoryInfo& info) {
if (info.state == KMemoryState::Normal) {
const std::size_t block_size{GetSizeInRange(info, addr, end_addr)};
const std::size_t block_num_pages{block_size / PageSize};
const VAddr block_addr{GetAddressInRange(info, addr)};
// Reset the current tracking address, and make sure we clean up on failure.
cur_address = address;
auto remap_guard = detail::ScopeExit([&] {
if (cur_address > address) {
const VAddr last_map_address = cur_address - 1;
cur_address = address;
AddRegionToPages(block_addr, block_size / PageSize, page_linked_list);
// Iterate over the memory we unmapped.
auto it = block_manager->FindIterator(cur_address);
auto pg_it = pg.Nodes().begin();
PAddr pg_phys_addr = pg_it->GetAddress();
size_t pg_pages = pg_it->GetNumPages();
if (result = Operate(block_addr, block_num_pages, KMemoryPermission::None,
OperationType::Unmap);
result.IsError()) {
return;
while (true) {
// Get the memory info for the pages we unmapped, convert to property.
const KMemoryInfo info = it->GetMemoryInfo();
// If the memory is normal, we unmapped it and need to re-map it.
if (info.GetState() == KMemoryState::Normal) {
// Determine the range to map.
size_t map_pages = std::min(VAddr(info.GetEndAddress()) - cur_address,
last_map_address + 1 - cur_address) /
PageSize;
// While we have pages to map, map them.
while (map_pages > 0) {
// Check if we're at the end of the physical block.
if (pg_pages == 0) {
// Ensure there are more pages to map.
ASSERT(pg_it != pg.Nodes().end());
// Advance our physical block.
++pg_it;
pg_phys_addr = pg_it->GetAddress();
pg_pages = pg_it->GetNumPages();
}
// Map whatever we can.
const size_t cur_pages = std::min(pg_pages, map_pages);
ASSERT(this->Operate(cur_address, cur_pages, info.GetPermission(),
OperationType::Map, pg_phys_addr) == ResultSuccess);
// Advance.
cur_address += cur_pages * PageSize;
map_pages -= cur_pages;
pg_phys_addr += cur_pages * PageSize;
pg_pages -= cur_pages;
}
}
// Check if we're done.
if (last_map_address <= info.GetLastAddress()) {
break;
}
// Advance.
++it;
}
}
});
if (result.IsError()) {
return result;
// Iterate over the memory, unmapping as we go.
auto it = block_manager->FindIterator(cur_address);
while (true) {
// Check that the iterator is valid.
ASSERT(it != block_manager->end());
// Get the memory info.
const KMemoryInfo info = it->GetMemoryInfo();
// If the memory state is normal, we need to unmap it.
if (info.GetState() == KMemoryState::Normal) {
// Determine the range to unmap.
const size_t cur_pages = std::min(VAddr(info.GetEndAddress()) - cur_address,
last_address + 1 - cur_address) /
PageSize;
// Unmap.
R_TRY(Operate(cur_address, cur_pages, KMemoryPermission::None, OperationType::Unmap));
}
// Check if we're done.
if (last_address <= info.GetLastAddress()) {
break;
}
// Advance.
cur_address = info.GetEndAddress();
++it;
}
const std::size_t num_pages{size / PageSize};
system.Kernel().MemoryManager().Free(page_linked_list, num_pages, memory_pool,
allocation_option);
block_manager->Update(addr, num_pages, KMemoryState::Free);
// Release the memory resource.
mapped_physical_memory_size -= mapped_size;
auto process{system.Kernel().CurrentProcess()};
process->GetResourceLimit()->Release(LimitableResource::PhysicalMemory, mapped_size);
mapped_physical_memory_size -= mapped_size;
// Update memory blocks.
system.Kernel().MemoryManager().Free(pg, size / PageSize, memory_pool, allocation_option);
block_manager->Update(address, size / PageSize, KMemoryState::Free, KMemoryPermission::None,
KMemoryAttribute::None);
// We succeeded.
remap_guard.Cancel();
return ResultSuccess;
}
@@ -681,9 +986,8 @@ ResultCode KPageTable::UnmapPages(VAddr addr, const KPageLinkedList& page_linked
VAddr cur_addr{addr};
for (const auto& node : page_linked_list.Nodes()) {
const std::size_t num_pages{(addr - cur_addr) / PageSize};
if (const auto result{
Operate(addr, num_pages, KMemoryPermission::None, OperationType::Unmap)};
if (const auto result{Operate(cur_addr, node.GetNumPages(), KMemoryPermission::None,
OperationType::Unmap)};
result.IsError()) {
return result;
}

View File

@@ -645,6 +645,10 @@ static void OutputDebugString(Core::System& system, VAddr address, u64 len) {
LOG_DEBUG(Debug_Emulated, "{}", str);
}
static void OutputDebugString32(Core::System& system, u32 address, u32 len) {
OutputDebugString(system, address, len);
}
/// Gets system/memory information for the current process
static ResultCode GetInfo(Core::System& system, u64* result, u64 info_id, Handle handle,
u64 info_sub_id) {
@@ -1404,7 +1408,7 @@ static ResultCode UnmapProcessMemory(Core::System& system, VAddr dst_address, Ha
}
static ResultCode CreateCodeMemory(Core::System& system, Handle* out, VAddr address, size_t size) {
LOG_TRACE(Kernel_SVC, "called, handle_out=0x{:X}, address=0x{:X}, size=0x{:X}",
LOG_TRACE(Kernel_SVC, "called, handle_out={}, address=0x{:X}, size=0x{:X}",
static_cast<void*>(out), address, size);
// Get kernel instance.
auto& kernel = system.Kernel();
@@ -1438,6 +1442,10 @@ static ResultCode CreateCodeMemory(Core::System& system, Handle* out, VAddr addr
return ResultSuccess;
}
static ResultCode CreateCodeMemory32(Core::System& system, Handle* out, u32 address, u32 size) {
return CreateCodeMemory(system, out, address, size);
}
static ResultCode ControlCodeMemory(Core::System& system, Handle code_memory_handle, u32 operation,
VAddr address, size_t size, Svc::MemoryPermission perm) {
@@ -1517,6 +1525,12 @@ static ResultCode ControlCodeMemory(Core::System& system, Handle code_memory_han
return ResultSuccess;
}
static ResultCode ControlCodeMemory32(Core::System& system, Handle code_memory_handle,
u32 operation, u64 address, u64 size,
Svc::MemoryPermission perm) {
return ControlCodeMemory(system, code_memory_handle, operation, address, size, perm);
}
static ResultCode QueryProcessMemory(Core::System& system, VAddr memory_info_address,
VAddr page_info_address, Handle process_handle,
VAddr address) {
@@ -2598,7 +2612,7 @@ static const FunctionDef SVC_Table_32[] = {
{0x24, SvcWrap32<GetProcessId32>, "GetProcessId32"},
{0x25, SvcWrap32<GetThreadId32>, "GetThreadId32"},
{0x26, SvcWrap32<Break32>, "Break32"},
{0x27, nullptr, "OutputDebugString32"},
{0x27, SvcWrap32<OutputDebugString32>, "OutputDebugString32"},
{0x28, nullptr, "ReturnFromException32"},
{0x29, SvcWrap32<GetInfo32>, "GetInfo32"},
{0x2a, nullptr, "FlushEntireDataCache32"},
@@ -2634,8 +2648,8 @@ static const FunctionDef SVC_Table_32[] = {
{0x48, nullptr, "MapPhysicalMemoryUnsafe32"},
{0x49, nullptr, "UnmapPhysicalMemoryUnsafe32"},
{0x4a, nullptr, "SetUnsafeLimit32"},
{0x4b, nullptr, "CreateCodeMemory32"},
{0x4c, nullptr, "ControlCodeMemory32"},
{0x4b, SvcWrap32<CreateCodeMemory32>, "CreateCodeMemory32"},
{0x4c, SvcWrap32<ControlCodeMemory32>, "ControlCodeMemory32"},
{0x4d, nullptr, "SleepSystem32"},
{0x4e, nullptr, "ReadWriteRegister32"},
{0x4f, nullptr, "SetProcessActivity32"},

View File

@@ -669,4 +669,26 @@ void SvcWrap32(Core::System& system) {
FuncReturn(system, retval);
}
// Used by CreateCodeMemory32
template <ResultCode func(Core::System&, Handle*, u32, u32)>
void SvcWrap32(Core::System& system) {
Handle handle = 0;
const u32 retval = func(system, &handle, Param32(system, 1), Param32(system, 2)).raw;
system.CurrentArmInterface().SetReg(1, handle);
FuncReturn(system, retval);
}
// Used by ControlCodeMemory32
template <ResultCode func(Core::System&, Handle, u32, u64, u64, Svc::MemoryPermission)>
void SvcWrap32(Core::System& system) {
const u32 retval =
func(system, Param32(system, 0), Param32(system, 1), Param(system, 2), Param(system, 4),
static_cast<Svc::MemoryPermission>(Param32(system, 6)))
.raw;
FuncReturn(system, retval);
}
} // namespace Kernel

View File

@@ -618,7 +618,7 @@ void AppletMessageQueue::PushMessage(AppletMessage msg) {
AppletMessageQueue::AppletMessage AppletMessageQueue::PopMessage() {
if (messages.empty()) {
on_new_message->GetWritableEvent().Clear();
return AppletMessage::NoMessage;
return AppletMessage::None;
}
auto msg = messages.front();
messages.pop();
@@ -633,7 +633,7 @@ std::size_t AppletMessageQueue::GetMessageCount() const {
}
void AppletMessageQueue::RequestExit() {
PushMessage(AppletMessage::ExitRequested);
PushMessage(AppletMessage::Exit);
}
void AppletMessageQueue::FocusStateChanged() {
@@ -732,7 +732,7 @@ void ICommonStateGetter::ReceiveMessage(Kernel::HLERequestContext& ctx) {
const auto message = msg_queue->PopMessage();
IPC::ResponseBuilder rb{ctx, 3};
if (message == AppletMessageQueue::AppletMessage::NoMessage) {
if (message == AppletMessageQueue::AppletMessage::None) {
LOG_ERROR(Service_AM, "Message queue is empty");
rb.Push(ERR_NO_MESSAGES);
rb.PushEnum<AppletMessageQueue::AppletMessage>(message);

View File

@@ -22,6 +22,7 @@ class NVFlinger;
namespace Service::AM {
// This is nn::settings::Language
enum SystemLanguage {
Japanese = 0,
English = 1, // en-US
@@ -41,16 +42,44 @@ enum SystemLanguage {
// 4.0.0+
SimplifiedChinese = 15,
TraditionalChinese = 16,
// 10.1.0+
BrazilianPortuguese = 17,
};
class AppletMessageQueue {
public:
// This is nn::am::AppletMessage
enum class AppletMessage : u32 {
NoMessage = 0,
ExitRequested = 4,
None = 0,
ChangeIntoForeground = 1,
ChangeIntoBackground = 2,
Exit = 4,
ApplicationExited = 6,
FocusStateChanged = 15,
Resume = 16,
DetectShortPressingHomeButton = 20,
DetectLongPressingHomeButton = 21,
DetectShortPressingPowerButton = 22,
DetectMiddlePressingPowerButton = 23,
DetectLongPressingPowerButton = 24,
RequestToPrepareSleep = 25,
FinishedSleepSequence = 26,
SleepRequiredByHighTemperature = 27,
SleepRequiredByLowBattery = 28,
AutoPowerDown = 29,
OperationModeChanged = 30,
PerformanceModeChanged = 31,
DetectReceivingCecSystemStandby = 32,
SdCardRemoved = 33,
LaunchApplicationRequested = 50,
RequestToDisplay = 51,
ShowApplicationLogo = 55,
HideApplicationLogo = 56,
ForceHideApplicationLogo = 57,
FloatingApplicationDetected = 60,
DetectShortPressingCaptureButton = 90,
AlbumScreenShotTaken = 92,
AlbumRecordingSaved = 93,
};
explicit AppletMessageQueue(Core::System& system);
@@ -179,11 +208,14 @@ public:
~ICommonStateGetter() override;
private:
// This is nn::oe::FocusState
enum class FocusState : u8 {
InFocus = 1,
NotInFocus = 2,
Background = 3,
};
// This is nn::oe::OperationMode
enum class OperationMode : u8 {
Handheld = 0,
Docked = 1,

View File

@@ -17,8 +17,8 @@ constexpr auto DEFAULT_PERFORMANCE_CONFIGURATION = PerformanceConfiguration::Con
Controller::Controller(Core::Timing::CoreTiming& core_timing_)
: core_timing{core_timing_}, configs{
{PerformanceMode::Handheld, DEFAULT_PERFORMANCE_CONFIGURATION},
{PerformanceMode::Docked, DEFAULT_PERFORMANCE_CONFIGURATION},
{PerformanceMode::Normal, DEFAULT_PERFORMANCE_CONFIGURATION},
{PerformanceMode::Boost, DEFAULT_PERFORMANCE_CONFIGURATION},
} {}
Controller::~Controller() = default;
@@ -63,13 +63,13 @@ void Controller::SetFromCpuBoostMode(CpuBoostMode mode) {
PerformanceConfiguration::Config15,
}};
SetPerformanceConfiguration(PerformanceMode::Docked,
SetPerformanceConfiguration(PerformanceMode::Boost,
BOOST_MODE_TO_CONFIG_MAP.at(static_cast<u32>(mode)));
}
PerformanceMode Controller::GetCurrentPerformanceMode() const {
return Settings::values.use_docked_mode.GetValue() ? PerformanceMode::Docked
: PerformanceMode::Handheld;
return Settings::values.use_docked_mode.GetValue() ? PerformanceMode::Boost
: PerformanceMode::Normal;
}
PerformanceConfiguration Controller::GetCurrentPerformanceConfiguration(PerformanceMode mode) {

View File

@@ -32,15 +32,18 @@ enum class PerformanceConfiguration : u32 {
Config16 = 0x9222000C,
};
// This is nn::oe::CpuBoostMode
enum class CpuBoostMode : u32 {
Disabled = 0,
Full = 1, // CPU + GPU -> Config 13, 14, 15, or 16
Partial = 2, // GPU Only -> Config 15 or 16
Normal = 0, // Boost mode disabled
FastLoad = 1, // CPU + GPU -> Config 13, 14, 15, or 16
Partial = 2, // GPU Only -> Config 15 or 16
};
enum class PerformanceMode : u8 {
Handheld = 0,
Docked = 1,
// This is nn::oe::PerformanceMode
enum class PerformanceMode : s32 {
Invalid = -1,
Normal = 0,
Boost = 1,
};
// Class to manage the state and change of the emulated system performance.

View File

@@ -0,0 +1,45 @@
// Copyright 2022 yuzu emulator team
// Licensed under GPLv2 or any later version
// Refer to the license.txt file included.
#include "common/logging/log.h"
#include "core/hle/ipc_helpers.h"
#include "core/hle/service/mnpp/mnpp_app.h"
#include "core/hle/service/sm/sm.h"
namespace Service::MNPP {
class MNPP_APP final : public ServiceFramework<MNPP_APP> {
public:
explicit MNPP_APP(Core::System& system_) : ServiceFramework{system_, "mnpp:app"} {
// clang-format off
static const FunctionInfo functions[] = {
{0, &MNPP_APP::Unknown0, "unknown0"},
{1, &MNPP_APP::Unknown1, "unknown1"},
};
// clang-format on
RegisterHandlers(functions);
}
private:
void Unknown0(Kernel::HLERequestContext& ctx) {
LOG_WARNING(Service_MNPP, "(STUBBED) called");
IPC::ResponseBuilder rb{ctx, 2};
rb.Push(ResultSuccess);
}
void Unknown1(Kernel::HLERequestContext& ctx) {
LOG_WARNING(Service_MNPP, "(STUBBED) called");
IPC::ResponseBuilder rb{ctx, 2};
rb.Push(ResultSuccess);
}
};
void InstallInterfaces(SM::ServiceManager& service_manager, Core::System& system) {
std::make_shared<MNPP_APP>(system)->InstallAsService(service_manager);
}
} // namespace Service::MNPP

View File

@@ -0,0 +1,20 @@
// Copyright 2022 yuzu emulator team
// Licensed under GPLv2 or any later version
// Refer to the license.txt file included.
#pragma once
namespace Core {
class System;
}
namespace Service::SM {
class ServiceManager;
}
namespace Service::MNPP {
/// Registers all MNPP services with the specified service manager.
void InstallInterfaces(SM::ServiceManager& service_manager, Core::System& system);
} // namespace Service::MNPP

File diff suppressed because it is too large Load Diff

View File

@@ -7,15 +7,132 @@
#include <array>
#include <vector>
#include "common/common_funcs.h"
#include "core/hle/service/kernel_helpers.h"
#include "core/hle/service/mii/mii_manager.h"
#include "core/hle/service/service.h"
namespace Kernel {
class KEvent;
}
class KReadableEvent;
} // namespace Kernel
namespace Core::HID {
enum class NpadIdType : u32;
} // namespace Core::HID
namespace Service::NFP {
enum class ServiceType : u32 {
User,
Debug,
System,
};
enum class State : u32 {
NonInitialized,
Initialized,
};
enum class DeviceState : u32 {
Initialized,
SearchingForTag,
TagFound,
TagRemoved,
TagMounted,
Unaviable,
Finalized,
};
enum class ModelType : u32 {
Amiibo,
};
enum class MountTarget : u32 {
Rom,
Ram,
All,
};
enum class AmiiboType : u8 {
Figure,
Card,
Yarn,
};
enum class AmiiboSeries : u8 {
SuperSmashBros,
SuperMario,
ChibiRobo,
YoshiWoollyWorld,
Splatoon,
AnimalCrossing,
EightBitMario,
Skylanders,
Unknown8,
TheLegendOfZelda,
ShovelKnight,
Unknown11,
Kiby,
Pokemon,
MarioSportsSuperstars,
MonsterHunter,
BoxBoy,
Pikmin,
FireEmblem,
Metroid,
Others,
MegaMan,
Diablo
};
using TagUuid = std::array<u8, 10>;
struct TagInfo {
TagUuid uuid;
u8 uuid_length;
INSERT_PADDING_BYTES(0x15);
s32 protocol;
u32 tag_type;
INSERT_PADDING_BYTES(0x30);
};
static_assert(sizeof(TagInfo) == 0x58, "TagInfo is an invalid size");
struct CommonInfo {
u16 last_write_year;
u8 last_write_month;
u8 last_write_day;
u16 write_counter;
u16 version;
u32 application_area_size;
INSERT_PADDING_BYTES(0x34);
};
static_assert(sizeof(CommonInfo) == 0x40, "CommonInfo is an invalid size");
struct ModelInfo {
u16 character_id;
u8 character_variant;
AmiiboType amiibo_type;
u16 model_number;
AmiiboSeries series;
u8 fixed; // Must be 02
INSERT_PADDING_BYTES(0x4); // Unknown
INSERT_PADDING_BYTES(0x20); // Probably a SHA256-(HMAC?) hash
INSERT_PADDING_BYTES(0x14); // SHA256-HMAC
};
static_assert(sizeof(ModelInfo) == 0x40, "ModelInfo is an invalid size");
struct RegisterInfo {
Service::Mii::MiiInfo mii_char_info;
u16 first_write_year;
u8 first_write_month;
u8 first_write_day;
std::array<u8, 11> amiibo_name;
u8 unknown;
INSERT_PADDING_BYTES(0x98);
};
static_assert(sizeof(RegisterInfo) == 0x100, "RegisterInfo is an invalid size");
class Module final {
public:
class Interface : public ServiceFramework<Interface> {
@@ -24,34 +141,131 @@ public:
const char* name);
~Interface() override;
struct ModelInfo {
std::array<u8, 0x8> amiibo_identification_block;
INSERT_PADDING_BYTES(0x38);
struct EncryptedAmiiboFile {
u16 crypto_init; // Must be A5 XX
u16 write_count; // Number of times the amiibo has been written?
INSERT_PADDING_BYTES(0x20); // System crypts
INSERT_PADDING_BYTES(0x20); // SHA256-(HMAC?) hash
ModelInfo model_info; // This struct is bigger than documentation
INSERT_PADDING_BYTES(0xC); // SHA256-HMAC
INSERT_PADDING_BYTES(0x114); // section 1 encrypted buffer
INSERT_PADDING_BYTES(0x54); // section 2 encrypted buffer
};
static_assert(sizeof(ModelInfo) == 0x40, "ModelInfo is an invalid size");
static_assert(sizeof(EncryptedAmiiboFile) == 0x1F8, "AmiiboFile is an invalid size");
struct AmiiboFile {
std::array<u8, 10> uuid;
INSERT_PADDING_BYTES(0x4a);
ModelInfo model_info;
struct NTAG215Password {
u32 PWD; // Password to allow write access
u16 PACK; // Password acknowledge reply
u16 RFUI; // Reserved for future use
};
static_assert(sizeof(AmiiboFile) == 0x94, "AmiiboFile is an invalid size");
static_assert(sizeof(NTAG215Password) == 0x8, "NTAG215Password is an invalid size");
struct NTAG215File {
TagUuid uuid; // Unique serial number
u16 lock_bytes; // Set defined pages as read only
u32 compability_container; // Defines available memory
EncryptedAmiiboFile user_memory; // Writable data
u32 dynamic_lock; // Dynamic lock
u32 CFG0; // Defines memory protected by password
u32 CFG1; // Defines number of verification attempts
NTAG215Password password; // Password data
};
static_assert(sizeof(NTAG215File) == 0x21C, "NTAG215File is an invalid size");
void CreateUserInterface(Kernel::HLERequestContext& ctx);
bool LoadAmiibo(const std::vector<u8>& buffer);
Kernel::KReadableEvent& GetNFCEvent();
const AmiiboFile& GetAmiiboBuffer() const;
void CloseAmiibo();
void Initialize();
void Finalize();
ResultCode StartDetection(s32 protocol_);
ResultCode StopDetection();
ResultCode Mount();
ResultCode Unmount();
ResultCode GetTagInfo(TagInfo& tag_info) const;
ResultCode GetCommonInfo(CommonInfo& common_info) const;
ResultCode GetModelInfo(ModelInfo& model_info) const;
ResultCode GetRegisterInfo(RegisterInfo& register_info) const;
ResultCode OpenApplicationArea(u32 access_id);
ResultCode GetApplicationArea(std::vector<u8>& data) const;
ResultCode SetApplicationArea(const std::vector<u8>& data);
ResultCode CreateApplicationArea(u32 access_id, const std::vector<u8>& data);
u64 GetHandle() const;
DeviceState GetCurrentState() const;
Core::HID::NpadIdType GetNpadId() const;
Kernel::KReadableEvent& GetActivateEvent() const;
Kernel::KReadableEvent& GetDeactivateEvent() const;
protected:
std::shared_ptr<Module> module;
private:
/// Validates that the amiibo file is not corrupted
bool IsAmiiboValid() const;
bool AmiiboApplicationDataExist(u32 access_id) const;
std::vector<u8> LoadAmiiboApplicationData(u32 access_id) const;
void SaveAmiiboApplicationData(u32 access_id, const std::vector<u8>& data) const;
/// return password needed to allow write access to protected memory
u32 GetTagPassword(const TagUuid& uuid) const;
const Core::HID::NpadIdType npad_id;
DeviceState device_state{DeviceState::Unaviable};
KernelHelpers::ServiceContext service_context;
Kernel::KEvent* nfc_tag_load;
AmiiboFile amiibo{};
Kernel::KEvent* activate_event;
Kernel::KEvent* deactivate_event;
NTAG215File tag_data{};
s32 protocol;
bool is_application_area_initialized{};
u32 application_area_id;
std::vector<u8> application_area_data;
};
};
class IUser final : public ServiceFramework<IUser> {
public:
explicit IUser(Module::Interface& nfp_interface_, Core::System& system_);
private:
void Initialize(Kernel::HLERequestContext& ctx);
void Finalize(Kernel::HLERequestContext& ctx);
void ListDevices(Kernel::HLERequestContext& ctx);
void StartDetection(Kernel::HLERequestContext& ctx);
void StopDetection(Kernel::HLERequestContext& ctx);
void Mount(Kernel::HLERequestContext& ctx);
void Unmount(Kernel::HLERequestContext& ctx);
void OpenApplicationArea(Kernel::HLERequestContext& ctx);
void GetApplicationArea(Kernel::HLERequestContext& ctx);
void SetApplicationArea(Kernel::HLERequestContext& ctx);
void CreateApplicationArea(Kernel::HLERequestContext& ctx);
void GetTagInfo(Kernel::HLERequestContext& ctx);
void GetRegisterInfo(Kernel::HLERequestContext& ctx);
void GetCommonInfo(Kernel::HLERequestContext& ctx);
void GetModelInfo(Kernel::HLERequestContext& ctx);
void AttachActivateEvent(Kernel::HLERequestContext& ctx);
void AttachDeactivateEvent(Kernel::HLERequestContext& ctx);
void GetState(Kernel::HLERequestContext& ctx);
void GetDeviceState(Kernel::HLERequestContext& ctx);
void GetNpadId(Kernel::HLERequestContext& ctx);
void GetApplicationAreaSize(Kernel::HLERequestContext& ctx);
void AttachAvailabilityChangeEvent(Kernel::HLERequestContext& ctx);
KernelHelpers::ServiceContext service_context;
// TODO(german77): We should have a vector of interfaces
Module::Interface& nfp_interface;
State state{State::NonInitialized};
Kernel::KEvent* availability_change_event;
};
void InstallInterfaces(SM::ServiceManager& service_manager, Core::System& system);
} // namespace Service::NFP

View File

@@ -39,6 +39,7 @@
#include "core/hle/service/mig/mig.h"
#include "core/hle/service/mii/mii.h"
#include "core/hle/service/mm/mm_u.h"
#include "core/hle/service/mnpp/mnpp_app.h"
#include "core/hle/service/ncm/ncm.h"
#include "core/hle/service/nfc/nfc.h"
#include "core/hle/service/nfp/nfp.h"
@@ -265,6 +266,7 @@ Services::Services(std::shared_ptr<SM::ServiceManager>& sm, Core::System& system
Migration::InstallInterfaces(*sm, system);
Mii::InstallInterfaces(*sm, system);
MM::InstallInterfaces(*sm, system);
MNPP::InstallInterfaces(*sm, system);
NCM::InstallInterfaces(*sm, system);
NFC::InstallInterfaces(*sm, system);
NFP::InstallInterfaces(*sm, system);

View File

@@ -554,10 +554,12 @@ void CopyBufferToImage(vk::CommandBuffer cmdbuf, VkBuffer src_buffer, VkImage im
};
}
[[nodiscard]] bool IsFormatFlipped(PixelFormat format) {
[[nodiscard]] bool IsFormatFlipped(PixelFormat format, bool emulate_bgr565) {
switch (format) {
case PixelFormat::A1B5G5R5_UNORM:
return true;
case PixelFormat::B5G6R5_UNORM:
return emulate_bgr565;
default:
return false;
}
@@ -1488,7 +1490,7 @@ ImageView::ImageView(TextureCacheRuntime& runtime, const VideoCommon::ImageViewI
};
if (!info.IsRenderTarget()) {
swizzle = info.Swizzle();
if (IsFormatFlipped(format)) {
if (IsFormatFlipped(format, device->MustEmulateBGR565())) {
std::ranges::transform(swizzle, swizzle.begin(), SwapBlueRed);
}
if ((aspect_mask & (VK_IMAGE_ASPECT_DEPTH_BIT | VK_IMAGE_ASPECT_STENCIL_BIT)) != 0) {

View File

@@ -39,6 +39,11 @@ constexpr std::array DEPTH16_UNORM_STENCIL8_UINT{
VK_FORMAT_D32_SFLOAT_S8_UINT,
VK_FORMAT_UNDEFINED,
};
constexpr std::array B5G6R5_UNORM_PACK16{
VK_FORMAT_R5G6B5_UNORM_PACK16,
VK_FORMAT_UNDEFINED,
};
} // namespace Alternatives
enum class NvidiaArchitecture {
@@ -87,6 +92,8 @@ constexpr const VkFormat* GetFormatAlternatives(VkFormat format) {
return Alternatives::DEPTH24_UNORM_STENCIL8_UINT.data();
case VK_FORMAT_D16_UNORM_S8_UINT:
return Alternatives::DEPTH16_UNORM_STENCIL8_UINT.data();
case VK_FORMAT_B5G6R5_UNORM_PACK16:
return Alternatives::B5G6R5_UNORM_PACK16.data();
default:
return nullptr;
}
@@ -224,9 +231,14 @@ std::vector<std::string> GetSupportedExtensions(vk::PhysicalDevice physical) {
return supported_extensions;
}
bool IsExtensionSupported(std::span<const std::string> supported_extensions,
std::string_view extension) {
return std::ranges::find(supported_extensions, extension) != supported_extensions.end();
}
NvidiaArchitecture GetNvidiaArchitecture(vk::PhysicalDevice physical,
std::span<const std::string> exts) {
if (std::ranges::find(exts, VK_KHR_FRAGMENT_SHADING_RATE_EXTENSION_NAME) != exts.end()) {
if (IsExtensionSupported(exts, VK_KHR_FRAGMENT_SHADING_RATE_EXTENSION_NAME)) {
VkPhysicalDeviceFragmentShadingRatePropertiesKHR shading_rate_props{};
shading_rate_props.sType =
VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_FRAGMENT_SHADING_RATE_PROPERTIES_KHR;
@@ -239,7 +251,7 @@ NvidiaArchitecture GetNvidiaArchitecture(vk::PhysicalDevice physical,
return NvidiaArchitecture::AmpereOrNewer;
}
}
if (std::ranges::find(exts, VK_NV_SHADING_RATE_IMAGE_EXTENSION_NAME) != exts.end()) {
if (IsExtensionSupported(exts, VK_NV_SHADING_RATE_IMAGE_EXTENSION_NAME)) {
return NvidiaArchitecture::Turing;
}
return NvidiaArchitecture::VoltaOrOlder;
@@ -604,7 +616,8 @@ Device::Device(VkInstance instance_, vk::PhysicalDevice physical_, VkSurfaceKHR
break;
}
}
if (ext_extended_dynamic_state && driver_id == VK_DRIVER_ID_MESA_RADV) {
const bool is_radv = driver_id == VK_DRIVER_ID_MESA_RADV;
if (ext_extended_dynamic_state && is_radv) {
// Mask driver version variant
const u32 version = (properties.driverVersion << 3) >> 3;
if (version < VK_MAKE_API_VERSION(0, 21, 2, 0)) {
@@ -613,6 +626,17 @@ Device::Device(VkInstance instance_, vk::PhysicalDevice physical_, VkSurfaceKHR
ext_extended_dynamic_state = false;
}
}
if (ext_vertex_input_dynamic_state && is_radv) {
// TODO(ameerj): Blacklist only offending driver versions
// TODO(ameerj): Confirm if RDNA1 is affected
const bool is_rdna2 =
IsExtensionSupported(supported_extensions, VK_KHR_FRAGMENT_SHADING_RATE_EXTENSION_NAME);
if (is_rdna2) {
LOG_WARNING(Render_Vulkan,
"RADV has broken VK_EXT_vertex_input_dynamic_state on RDNA2 hardware");
ext_vertex_input_dynamic_state = false;
}
}
sets_per_pool = 64;
const bool is_amd =
@@ -628,7 +652,7 @@ Device::Device(VkInstance instance_, vk::PhysicalDevice physical_, VkSurfaceKHR
has_broken_cube_compatibility = true;
}
}
const bool is_amd_or_radv = is_amd || driver_id == VK_DRIVER_ID_MESA_RADV;
const bool is_amd_or_radv = is_amd || is_radv;
if (ext_sampler_filter_minmax && is_amd_or_radv) {
// Disable ext_sampler_filter_minmax on AMD GCN4 and lower as it is broken.
if (!is_float16_supported) {
@@ -639,6 +663,7 @@ Device::Device(VkInstance instance_, vk::PhysicalDevice physical_, VkSurfaceKHR
}
const bool is_intel_windows = driver_id == VK_DRIVER_ID_INTEL_PROPRIETARY_WINDOWS;
const bool is_intel_anv = driver_id == VK_DRIVER_ID_INTEL_OPEN_SOURCE_MESA;
if (ext_vertex_input_dynamic_state && is_intel_windows) {
LOG_WARNING(Render_Vulkan, "Blacklisting Intel for VK_EXT_vertex_input_dynamic_state");
ext_vertex_input_dynamic_state = false;
@@ -652,6 +677,10 @@ Device::Device(VkInstance instance_, vk::PhysicalDevice physical_, VkSurfaceKHR
LOG_WARNING(Render_Vulkan, "Intel proprietary drivers do not support MSAA image blits");
cant_blit_msaa = true;
}
if (is_intel_anv) {
LOG_WARNING(Render_Vulkan, "ANV driver does not support native BGR format");
must_emulate_bgr565 = true;
}
supports_d24_depth =
IsFormatSupported(VK_FORMAT_D24_UNORM_S8_UINT,

View File

@@ -354,6 +354,10 @@ public:
return cant_blit_msaa;
}
bool MustEmulateBGR565() const {
return must_emulate_bgr565;
}
private:
/// Checks if the physical device is suitable.
void CheckSuitability(bool requires_swapchain) const;
@@ -448,6 +452,7 @@ private:
bool has_nsight_graphics{}; ///< Has Nsight Graphics attached
bool supports_d24_depth{}; ///< Supports D24 depth buffers.
bool cant_blit_msaa{}; ///< Does not support MSAA<->MSAA blitting.
bool must_emulate_bgr565{}; ///< Emulates BGR565 by swizzling RGB565 format.
// Telemetry parameters
std::string vendor_name; ///< Device's driver name.

View File

@@ -77,13 +77,13 @@ const std::array<UISettings::Shortcut, 22> Config::default_hotkeys{{
{QStringLiteral("Exit Fullscreen"), QStringLiteral("Main Window"), {QStringLiteral("Esc"), QStringLiteral(""), Qt::WindowShortcut}},
{QStringLiteral("Exit yuzu"), QStringLiteral("Main Window"), {QStringLiteral("Ctrl+Q"), QStringLiteral("Home+Minus"), Qt::WindowShortcut}},
{QStringLiteral("Fullscreen"), QStringLiteral("Main Window"), {QStringLiteral("F11"), QStringLiteral("Home+B"), Qt::WindowShortcut}},
{QStringLiteral("Load Amiibo"), QStringLiteral("Main Window"), {QStringLiteral("F2"), QStringLiteral("Home+A"), Qt::WidgetWithChildrenShortcut}},
{QStringLiteral("Load File"), QStringLiteral("Main Window"), {QStringLiteral("Ctrl+O"), QStringLiteral(""), Qt::WidgetWithChildrenShortcut}},
{QStringLiteral("Load/Remove Amiibo"), QStringLiteral("Main Window"), {QStringLiteral("F2"), QStringLiteral("Home+A"), Qt::WidgetWithChildrenShortcut}},
{QStringLiteral("Restart Emulation"), QStringLiteral("Main Window"), {QStringLiteral("F6"), QStringLiteral(""), Qt::WindowShortcut}},
{QStringLiteral("Stop Emulation"), QStringLiteral("Main Window"), {QStringLiteral("F5"), QStringLiteral(""), Qt::WindowShortcut}},
{QStringLiteral("TAS Start/Stop"), QStringLiteral("Main Window"), {QStringLiteral("Ctrl+F5"), QStringLiteral(""), Qt::ApplicationShortcut}},
{QStringLiteral("TAS Reset"), QStringLiteral("Main Window"), {QStringLiteral("Ctrl+F6"), QStringLiteral(""), Qt::ApplicationShortcut}},
{QStringLiteral("TAS Record"), QStringLiteral("Main Window"), {QStringLiteral("Ctrl+F7"), QStringLiteral(""), Qt::ApplicationShortcut}},
{QStringLiteral("TAS Reset"), QStringLiteral("Main Window"), {QStringLiteral("Ctrl+F6"), QStringLiteral(""), Qt::ApplicationShortcut}},
{QStringLiteral("TAS Start/Stop"), QStringLiteral("Main Window"), {QStringLiteral("Ctrl+F5"), QStringLiteral(""), Qt::ApplicationShortcut}},
{QStringLiteral("Toggle Filter Bar"), QStringLiteral("Main Window"), {QStringLiteral("Ctrl+F"), QStringLiteral(""), Qt::WindowShortcut}},
{QStringLiteral("Toggle Framerate Limit"), QStringLiteral("Main Window"), {QStringLiteral("Ctrl+U"), QStringLiteral("Home+Y"), Qt::ApplicationShortcut}},
{QStringLiteral("Toggle Mouse Panning"), QStringLiteral("Main Window"), {QStringLiteral("Ctrl+F9"), QStringLiteral(""), Qt::ApplicationShortcut}},

View File

@@ -1332,6 +1332,9 @@ void ConfigureInputPlayer::HandleClick(
QPushButton* button, std::size_t button_id,
std::function<void(const Common::ParamPackage&)> new_input_setter,
InputCommon::Polling::InputType type) {
if (timeout_timer->isActive()) {
return;
}
if (button == ui->buttonMotionLeft || button == ui->buttonMotionRight) {
button->setText(tr("Shake!"));
} else {

View File

@@ -227,6 +227,9 @@ void ConfigureTouchFromButton::RenameMapping() {
}
void ConfigureTouchFromButton::GetButtonInput(const int row_index, const bool is_new) {
if (timeout_timer->isActive()) {
return;
}
binding_list_model->item(row_index, 0)->setText(tr("[press key]"));
input_setter = [this, row_index, is_new](const Common::ParamPackage& params,

View File

@@ -934,7 +934,7 @@ void GMainWindow::InitializeHotkeys() {
hotkey_registry.LoadHotkeys();
LinkActionShortcut(ui->action_Load_File, QStringLiteral("Load File"));
LinkActionShortcut(ui->action_Load_Amiibo, QStringLiteral("Load Amiibo"));
LinkActionShortcut(ui->action_Load_Amiibo, QStringLiteral("Load/Remove Amiibo"));
LinkActionShortcut(ui->action_Exit, QStringLiteral("Exit yuzu"));
LinkActionShortcut(ui->action_Restart, QStringLiteral("Restart Emulation"));
LinkActionShortcut(ui->action_Pause, QStringLiteral("Continue/Pause Emulation"));
@@ -2927,6 +2927,24 @@ void GMainWindow::OnLoadAmiibo() {
return;
}
Service::SM::ServiceManager& sm = system->ServiceManager();
auto nfc = sm.GetService<Service::NFP::Module::Interface>("nfp:user");
if (nfc == nullptr) {
QMessageBox::warning(this, tr("Error"), tr("The current game is not looking for amiibos"));
return;
}
const auto nfc_state = nfc->GetCurrentState();
if (nfc_state == Service::NFP::DeviceState::TagFound ||
nfc_state == Service::NFP::DeviceState::TagMounted) {
nfc->CloseAmiibo();
return;
}
if (nfc_state != Service::NFP::DeviceState::SearchingForTag) {
QMessageBox::warning(this, tr("Error"), tr("The current game is not looking for amiibos"));
return;
}
is_amiibo_file_select_active = true;
const QString extensions{QStringLiteral("*.bin")};
const QString file_filter = tr("Amiibo File (%1);; All Files (*.*)").arg(extensions);

View File

@@ -266,7 +266,7 @@
<bool>false</bool>
</property>
<property name="text">
<string>Load &amp;Amiibo...</string>
<string>Load/Remove &amp;Amiibo...</string>
</property>
</action>
<action name="action_Report_Compatibility">