Compare commits
21 Commits
__refs_pul
...
__refs_pul
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
1913cf4783 | ||
|
|
40dee76c57 | ||
|
|
23d68a07dc | ||
|
|
60746e4e52 | ||
|
|
6d00780045 | ||
|
|
59dae03dbe | ||
|
|
22420612db | ||
|
|
b7551e457b | ||
|
|
2ba4e2263c | ||
|
|
ea3151f475 | ||
|
|
6c9ca8cbca | ||
|
|
c30cd898fc | ||
|
|
f1a4a004fb | ||
|
|
0a023cfb4f | ||
|
|
9022d926eb | ||
|
|
fbb3cd110c | ||
|
|
bc0f1896fc | ||
|
|
4415e00181 | ||
|
|
10c6d89119 | ||
|
|
239ac8abe2 | ||
|
|
9e11a76e92 |
@@ -181,10 +181,10 @@ add_library(core STATIC
|
||||
hle/service/nvflinger/buffer_queue.h
|
||||
hle/service/nvflinger/nvflinger.cpp
|
||||
hle/service/nvflinger/nvflinger.h
|
||||
hle/service/pctl/module.cpp
|
||||
hle/service/pctl/module.h
|
||||
hle/service/pctl/pctl.cpp
|
||||
hle/service/pctl/pctl.h
|
||||
hle/service/pctl/pctl_a.cpp
|
||||
hle/service/pctl/pctl_a.h
|
||||
hle/service/service.cpp
|
||||
hle/service/service.h
|
||||
hle/service/set/set.cpp
|
||||
|
||||
@@ -26,7 +26,7 @@ ResultVal<Handle> HandleTable::Create(SharedPtr<Object> obj) {
|
||||
|
||||
u16 slot = next_free_slot;
|
||||
if (slot >= generations.size()) {
|
||||
LOG_ERROR(Kernel, "Unable to allocate Handle, too many slots in use.");
|
||||
NGLOG_ERROR(Kernel, "Unable to allocate Handle, too many slots in use.");
|
||||
return ERR_OUT_OF_HANDLES;
|
||||
}
|
||||
next_free_slot = generations[slot];
|
||||
@@ -48,7 +48,7 @@ ResultVal<Handle> HandleTable::Create(SharedPtr<Object> obj) {
|
||||
ResultVal<Handle> HandleTable::Duplicate(Handle handle) {
|
||||
SharedPtr<Object> object = GetGeneric(handle);
|
||||
if (object == nullptr) {
|
||||
LOG_ERROR(Kernel, "Tried to duplicate invalid handle: %08X", handle);
|
||||
NGLOG_ERROR(Kernel, "Tried to duplicate invalid handle: {:08X}", handle);
|
||||
return ERR_INVALID_HANDLE;
|
||||
}
|
||||
return Create(std::move(object));
|
||||
|
||||
@@ -118,7 +118,7 @@ void HLERequestContext::ParseCommandBuffer(u32_le* src_cmdbuf, bool incoming) {
|
||||
std::make_shared<IPC::DomainMessageHeader>(rp.PopRaw<IPC::DomainMessageHeader>());
|
||||
} else {
|
||||
if (Session()->IsDomain())
|
||||
LOG_WARNING(IPC, "Domain request has no DomainMessageHeader!");
|
||||
NGLOG_WARNING(IPC, "Domain request has no DomainMessageHeader!");
|
||||
}
|
||||
}
|
||||
|
||||
@@ -270,7 +270,8 @@ size_t HLERequestContext::WriteBuffer(const void* buffer, size_t size) const {
|
||||
const bool is_buffer_b{BufferDescriptorB().size() && BufferDescriptorB()[0].Size()};
|
||||
const size_t buffer_size{GetWriteBufferSize()};
|
||||
if (size > buffer_size) {
|
||||
LOG_CRITICAL(Core, "size (%016zx) is greater than buffer_size (%016zx)", size, buffer_size);
|
||||
NGLOG_CRITICAL(Core, "size ({:016X}) is greater than buffer_size ({:016X})", size,
|
||||
buffer_size);
|
||||
size = buffer_size; // TODO(bunnei): This needs to be HW tested
|
||||
}
|
||||
|
||||
|
||||
@@ -54,7 +54,7 @@ void Process::ParseKernelCaps(const u32* kernel_caps, size_t len) {
|
||||
continue;
|
||||
} else if ((type & 0xF00) == 0xE00) { // 0x0FFF
|
||||
// Allowed interrupts list
|
||||
LOG_WARNING(Loader, "ExHeader allowed interrupts list ignored");
|
||||
NGLOG_WARNING(Loader, "ExHeader allowed interrupts list ignored");
|
||||
} else if ((type & 0xF80) == 0xF00) { // 0x07FF
|
||||
// Allowed syscalls mask
|
||||
unsigned int index = ((descriptor >> 24) & 7) * 24;
|
||||
@@ -74,7 +74,7 @@ void Process::ParseKernelCaps(const u32* kernel_caps, size_t len) {
|
||||
} else if ((type & 0xFFE) == 0xFF8) { // 0x001F
|
||||
// Mapped memory range
|
||||
if (i + 1 >= len || ((kernel_caps[i + 1] >> 20) & 0xFFE) != 0xFF8) {
|
||||
LOG_WARNING(Loader, "Incomplete exheader memory range descriptor ignored.");
|
||||
NGLOG_WARNING(Loader, "Incomplete exheader memory range descriptor ignored.");
|
||||
continue;
|
||||
}
|
||||
u32 end_desc = kernel_caps[i + 1];
|
||||
@@ -109,9 +109,9 @@ void Process::ParseKernelCaps(const u32* kernel_caps, size_t len) {
|
||||
|
||||
int minor = kernel_version & 0xFF;
|
||||
int major = (kernel_version >> 8) & 0xFF;
|
||||
LOG_INFO(Loader, "ExHeader kernel version: %d.%d", major, minor);
|
||||
NGLOG_INFO(Loader, "ExHeader kernel version: {}.{}", major, minor);
|
||||
} else {
|
||||
LOG_ERROR(Loader, "Unhandled kernel caps descriptor: 0x%08X", descriptor);
|
||||
NGLOG_ERROR(Loader, "Unhandled kernel caps descriptor: {:#010X}", descriptor);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -29,7 +29,7 @@ SharedPtr<ResourceLimit> ResourceLimit::GetForCategory(ResourceLimitCategory cat
|
||||
case ResourceLimitCategory::OTHER:
|
||||
return resource_limits[static_cast<u8>(category)];
|
||||
default:
|
||||
LOG_CRITICAL(Kernel, "Unknown resource limit category");
|
||||
NGLOG_CRITICAL(Kernel, "Unknown resource limit category");
|
||||
UNREACHABLE();
|
||||
}
|
||||
}
|
||||
@@ -55,7 +55,7 @@ s32 ResourceLimit::GetCurrentResourceValue(ResourceType resource) const {
|
||||
case ResourceType::CPUTime:
|
||||
return current_cpu_time;
|
||||
default:
|
||||
LOG_ERROR(Kernel, "Unknown resource type=%08X", static_cast<u32>(resource));
|
||||
NGLOG_ERROR(Kernel, "Unknown resource type={:08X}", static_cast<u32>(resource));
|
||||
UNIMPLEMENTED();
|
||||
return 0;
|
||||
}
|
||||
@@ -84,7 +84,7 @@ u32 ResourceLimit::GetMaxResourceValue(ResourceType resource) const {
|
||||
case ResourceType::CPUTime:
|
||||
return max_cpu_time;
|
||||
default:
|
||||
LOG_ERROR(Kernel, "Unknown resource type=%08X", static_cast<u32>(resource));
|
||||
NGLOG_ERROR(Kernel, "Unknown resource type={:08X}", static_cast<u32>(resource));
|
||||
UNIMPLEMENTED();
|
||||
return 0;
|
||||
}
|
||||
|
||||
@@ -94,11 +94,11 @@ void Scheduler::Reschedule() {
|
||||
Thread* next = PopNextReadyThread();
|
||||
|
||||
if (cur && next) {
|
||||
LOG_TRACE(Kernel, "context switch %u -> %u", cur->GetObjectId(), next->GetObjectId());
|
||||
NGLOG_TRACE(Kernel, "context switch {} -> {}", cur->GetObjectId(), next->GetObjectId());
|
||||
} else if (cur) {
|
||||
LOG_TRACE(Kernel, "context switch %u -> idle", cur->GetObjectId());
|
||||
NGLOG_TRACE(Kernel, "context switch {} -> idle", cur->GetObjectId());
|
||||
} else if (next) {
|
||||
LOG_TRACE(Kernel, "context switch idle -> %u", next->GetObjectId());
|
||||
NGLOG_TRACE(Kernel, "context switch idle -> {}", next->GetObjectId());
|
||||
}
|
||||
|
||||
SwitchContext(next);
|
||||
|
||||
@@ -68,7 +68,7 @@ ResultCode ServerSession::HandleDomainSyncRequest(Kernel::HLERequestContext& con
|
||||
return domain_request_handlers[object_id - 1]->HandleSyncRequest(context);
|
||||
|
||||
case IPC::DomainMessageHeader::CommandType::CloseVirtualHandle: {
|
||||
LOG_DEBUG(IPC, "CloseVirtualHandle, object_id=0x%08X", object_id);
|
||||
NGLOG_DEBUG(IPC, "CloseVirtualHandle, object_id={:#010X}", object_id);
|
||||
|
||||
domain_request_handlers[object_id - 1] = nullptr;
|
||||
|
||||
@@ -78,8 +78,8 @@ ResultCode ServerSession::HandleDomainSyncRequest(Kernel::HLERequestContext& con
|
||||
}
|
||||
}
|
||||
|
||||
LOG_CRITICAL(IPC, "Unknown domain command=%d",
|
||||
static_cast<int>(domain_message_header->command.Value()));
|
||||
NGLOG_CRITICAL(IPC, "Unknown domain command={}",
|
||||
static_cast<int>(domain_message_header->command.Value()));
|
||||
ASSERT(false);
|
||||
}
|
||||
|
||||
|
||||
@@ -107,16 +107,16 @@ ResultCode SharedMemory::Map(Process* target_process, VAddr address, MemoryPermi
|
||||
|
||||
// Error out if the requested permissions don't match what the creator process allows.
|
||||
if (static_cast<u32>(permissions) & ~static_cast<u32>(own_other_permissions)) {
|
||||
LOG_ERROR(Kernel, "cannot map id=%u, address=0x%lx name=%s, permissions don't match",
|
||||
GetObjectId(), address, name.c_str());
|
||||
NGLOG_ERROR(Kernel, "cannot map id={}, address={:#X} name={}, permissions don't match",
|
||||
GetObjectId(), address, name);
|
||||
return ERR_INVALID_COMBINATION;
|
||||
}
|
||||
|
||||
// Error out if the provided permissions are not compatible with what the creator process needs.
|
||||
if (other_permissions != MemoryPermission::DontCare &&
|
||||
static_cast<u32>(this->permissions) & ~static_cast<u32>(other_permissions)) {
|
||||
LOG_ERROR(Kernel, "cannot map id=%u, address=0x%lx name=%s, permissions don't match",
|
||||
GetObjectId(), address, name.c_str());
|
||||
NGLOG_ERROR(Kernel, "cannot map id={}, address={:#X} name={}, permissions don't match",
|
||||
GetObjectId(), address, name);
|
||||
return ERR_WRONG_PERMISSION;
|
||||
}
|
||||
|
||||
@@ -131,9 +131,10 @@ ResultCode SharedMemory::Map(Process* target_process, VAddr address, MemoryPermi
|
||||
auto result = target_process->vm_manager.MapMemoryBlock(
|
||||
target_address, backing_block, backing_block_offset, size, MemoryState::Shared);
|
||||
if (result.Failed()) {
|
||||
LOG_ERROR(Kernel,
|
||||
"cannot map id=%u, target_address=0x%lx name=%s, error mapping to virtual memory",
|
||||
GetObjectId(), target_address, name.c_str());
|
||||
NGLOG_ERROR(
|
||||
Kernel,
|
||||
"cannot map id={}, target_address={:#X} name={}, error mapping to virtual memory",
|
||||
GetObjectId(), target_address, name);
|
||||
return result.Code();
|
||||
}
|
||||
|
||||
@@ -151,7 +152,7 @@ VMAPermission SharedMemory::ConvertPermissions(MemoryPermission permission) {
|
||||
u32 masked_permissions =
|
||||
static_cast<u32>(permission) & static_cast<u32>(MemoryPermission::ReadWriteExecute);
|
||||
return static_cast<VMAPermission>(masked_permissions);
|
||||
};
|
||||
}
|
||||
|
||||
u8* SharedMemory::GetPointer(u32 offset) {
|
||||
return backing_block->data() + backing_block_offset + offset;
|
||||
|
||||
@@ -31,7 +31,7 @@ namespace Kernel {
|
||||
|
||||
/// Set the process heap to a given Size. It can both extend and shrink the heap.
|
||||
static ResultCode SetHeapSize(VAddr* heap_addr, u64 heap_size) {
|
||||
LOG_TRACE(Kernel_SVC, "called, heap_size=0x%llx", heap_size);
|
||||
NGLOG_TRACE(Kernel_SVC, "called, heap_size={:#X}", heap_size);
|
||||
auto& process = *Core::CurrentProcess();
|
||||
CASCADE_RESULT(*heap_addr,
|
||||
process.HeapAllocate(Memory::HEAP_VADDR, heap_size, VMAPermission::ReadWrite));
|
||||
@@ -39,21 +39,21 @@ static ResultCode SetHeapSize(VAddr* heap_addr, u64 heap_size) {
|
||||
}
|
||||
|
||||
static ResultCode SetMemoryAttribute(VAddr addr, u64 size, u32 state0, u32 state1) {
|
||||
LOG_WARNING(Kernel_SVC, "(STUBBED) called, addr=0x%lx", addr);
|
||||
NGLOG_WARNING(Kernel_SVC, "(STUBBED) called, addr={:#X}", addr);
|
||||
return RESULT_SUCCESS;
|
||||
}
|
||||
|
||||
/// Maps a memory range into a different range.
|
||||
static ResultCode MapMemory(VAddr dst_addr, VAddr src_addr, u64 size) {
|
||||
LOG_TRACE(Kernel_SVC, "called, dst_addr=0x%llx, src_addr=0x%llx, size=0x%llx", dst_addr,
|
||||
src_addr, size);
|
||||
NGLOG_TRACE(Kernel_SVC, "called, dst_addr={:#X}, src_addr={:#X}, size={:#X}", dst_addr,
|
||||
src_addr, size);
|
||||
return Core::CurrentProcess()->MirrorMemory(dst_addr, src_addr, size);
|
||||
}
|
||||
|
||||
/// Unmaps a region that was previously mapped with svcMapMemory
|
||||
static ResultCode UnmapMemory(VAddr dst_addr, VAddr src_addr, u64 size) {
|
||||
LOG_TRACE(Kernel_SVC, "called, dst_addr=0x%llx, src_addr=0x%llx, size=0x%llx", dst_addr,
|
||||
src_addr, size);
|
||||
NGLOG_TRACE(Kernel_SVC, "called, dst_addr={:#X}, src_addr={:#X}, size={:#X}", dst_addr,
|
||||
src_addr, size);
|
||||
return Core::CurrentProcess()->UnmapMemory(dst_addr, src_addr, size);
|
||||
}
|
||||
|
||||
@@ -68,11 +68,11 @@ static ResultCode ConnectToNamedPort(Handle* out_handle, VAddr port_name_address
|
||||
if (port_name.size() > PortNameMaxLength)
|
||||
return ERR_PORT_NAME_TOO_LONG;
|
||||
|
||||
LOG_TRACE(Kernel_SVC, "called port_name=%s", port_name.c_str());
|
||||
NGLOG_TRACE(Kernel_SVC, "called port_name={}", port_name);
|
||||
|
||||
auto it = Service::g_kernel_named_ports.find(port_name);
|
||||
if (it == Service::g_kernel_named_ports.end()) {
|
||||
LOG_WARNING(Kernel_SVC, "tried to connect to unknown port: %s", port_name.c_str());
|
||||
NGLOG_WARNING(Kernel_SVC, "tried to connect to unknown port: {}", port_name);
|
||||
return ERR_NOT_FOUND;
|
||||
}
|
||||
|
||||
@@ -90,11 +90,11 @@ static ResultCode ConnectToNamedPort(Handle* out_handle, VAddr port_name_address
|
||||
static ResultCode SendSyncRequest(Handle handle) {
|
||||
SharedPtr<ClientSession> session = g_handle_table.Get<ClientSession>(handle);
|
||||
if (!session) {
|
||||
LOG_ERROR(Kernel_SVC, "called with invalid handle=0x%08X", handle);
|
||||
NGLOG_ERROR(Kernel_SVC, "called with invalid handle={:#010X}", handle);
|
||||
return ERR_INVALID_HANDLE;
|
||||
}
|
||||
|
||||
LOG_TRACE(Kernel_SVC, "called handle=0x%08X(%s)", handle, session->GetName().c_str());
|
||||
NGLOG_TRACE(Kernel_SVC, "called handle={:#010X}({})", handle, session->GetName());
|
||||
|
||||
Core::System::GetInstance().PrepareReschedule();
|
||||
|
||||
@@ -105,7 +105,7 @@ static ResultCode SendSyncRequest(Handle handle) {
|
||||
|
||||
/// Get the ID for the specified thread.
|
||||
static ResultCode GetThreadId(u32* thread_id, Handle thread_handle) {
|
||||
LOG_TRACE(Kernel_SVC, "called thread=0x%08X", thread_handle);
|
||||
NGLOG_TRACE(Kernel_SVC, "called thread={:#010X}", thread_handle);
|
||||
|
||||
const SharedPtr<Thread> thread = g_handle_table.Get<Thread>(thread_handle);
|
||||
if (!thread) {
|
||||
@@ -118,7 +118,7 @@ static ResultCode GetThreadId(u32* thread_id, Handle thread_handle) {
|
||||
|
||||
/// Get the ID of the specified process
|
||||
static ResultCode GetProcessId(u32* process_id, Handle process_handle) {
|
||||
LOG_TRACE(Kernel_SVC, "called process=0x%08X", process_handle);
|
||||
NGLOG_TRACE(Kernel_SVC, "called process={:#010X}", process_handle);
|
||||
|
||||
const SharedPtr<Process> process = g_handle_table.Get<Process>(process_handle);
|
||||
if (!process) {
|
||||
@@ -178,8 +178,8 @@ static ResultCode WaitSynchronization1(
|
||||
/// Wait for the given handles to synchronize, timeout after the specified nanoseconds
|
||||
static ResultCode WaitSynchronization(Handle* index, VAddr handles_address, u64 handle_count,
|
||||
s64 nano_seconds) {
|
||||
LOG_TRACE(Kernel_SVC, "called handles_address=0x%llx, handle_count=%d, nano_seconds=%d",
|
||||
handles_address, handle_count, nano_seconds);
|
||||
NGLOG_TRACE(Kernel_SVC, "called handles_address={:#X}, handle_count={}, nano_seconds={}",
|
||||
handles_address, handle_count, nano_seconds);
|
||||
|
||||
if (!Memory::IsValidVirtualAddress(handles_address))
|
||||
return ERR_INVALID_POINTER;
|
||||
@@ -239,7 +239,7 @@ static ResultCode WaitSynchronization(Handle* index, VAddr handles_address, u64
|
||||
|
||||
/// Resumes a thread waiting on WaitSynchronization
|
||||
static ResultCode CancelSynchronization(Handle thread_handle) {
|
||||
LOG_TRACE(Kernel_SVC, "called thread=0x%08X", thread_handle);
|
||||
NGLOG_TRACE(Kernel_SVC, "called thread={:#X}", thread_handle);
|
||||
|
||||
const SharedPtr<Thread> thread = g_handle_table.Get<Thread>(thread_handle);
|
||||
if (!thread) {
|
||||
@@ -256,38 +256,38 @@ static ResultCode CancelSynchronization(Handle thread_handle) {
|
||||
/// Attempts to locks a mutex, creating it if it does not already exist
|
||||
static ResultCode ArbitrateLock(Handle holding_thread_handle, VAddr mutex_addr,
|
||||
Handle requesting_thread_handle) {
|
||||
LOG_TRACE(Kernel_SVC,
|
||||
"called holding_thread_handle=0x%08X, mutex_addr=0x%llx, "
|
||||
"requesting_current_thread_handle=0x%08X",
|
||||
holding_thread_handle, mutex_addr, requesting_thread_handle);
|
||||
NGLOG_TRACE(Kernel_SVC,
|
||||
"called holding_thread_handle={:#010X}, mutex_addr={:#X}, "
|
||||
"requesting_current_thread_handle={:#010X}",
|
||||
holding_thread_handle, mutex_addr, requesting_thread_handle);
|
||||
|
||||
return Mutex::TryAcquire(mutex_addr, holding_thread_handle, requesting_thread_handle);
|
||||
}
|
||||
|
||||
/// Unlock a mutex
|
||||
static ResultCode ArbitrateUnlock(VAddr mutex_addr) {
|
||||
LOG_TRACE(Kernel_SVC, "called mutex_addr=0x%llx", mutex_addr);
|
||||
NGLOG_TRACE(Kernel_SVC, "called mutex_addr={:#X}", mutex_addr);
|
||||
|
||||
return Mutex::Release(mutex_addr);
|
||||
}
|
||||
|
||||
/// Break program execution
|
||||
static void Break(u64 unk_0, u64 unk_1, u64 unk_2) {
|
||||
LOG_CRITICAL(Debug_Emulated, "Emulated program broke execution!");
|
||||
NGLOG_CRITICAL(Debug_Emulated, "Emulated program broke execution!");
|
||||
ASSERT(false);
|
||||
}
|
||||
|
||||
/// Used to output a message on a debug hardware unit - does nothing on a retail unit
|
||||
static void OutputDebugString(VAddr address, s32 len) {
|
||||
std::vector<char> string(len);
|
||||
Memory::ReadBlock(address, string.data(), len);
|
||||
LOG_DEBUG(Debug_Emulated, "%.*s", len, string.data());
|
||||
std::string str(len, '\0');
|
||||
Memory::ReadBlock(address, str.data(), str.size());
|
||||
NGLOG_DEBUG(Debug_Emulated, "{}", str);
|
||||
}
|
||||
|
||||
/// Gets system/memory information for the current process
|
||||
static ResultCode GetInfo(u64* result, u64 info_id, u64 handle, u64 info_sub_id) {
|
||||
LOG_TRACE(Kernel_SVC, "called info_id=0x%X, info_sub_id=0x%X, handle=0x%08X", info_id,
|
||||
info_sub_id, handle);
|
||||
NGLOG_TRACE(Kernel_SVC, "called info_id={:#X}, info_sub_id={:#X}, handle={:#010X}", info_id,
|
||||
info_sub_id, handle);
|
||||
|
||||
auto& vm_manager = Core::CurrentProcess()->vm_manager;
|
||||
|
||||
@@ -338,12 +338,12 @@ static ResultCode GetInfo(u64* result, u64 info_id, u64 handle, u64 info_sub_id)
|
||||
*result = Core::CurrentProcess()->is_virtual_address_memory_enabled;
|
||||
break;
|
||||
case GetInfoType::TitleId:
|
||||
LOG_WARNING(Kernel_SVC, "(STUBBED) Attempted to query titleid, returned 0");
|
||||
NGLOG_WARNING(Kernel_SVC, "(STUBBED) Attempted to query titleid, returned 0");
|
||||
*result = 0;
|
||||
break;
|
||||
case GetInfoType::PrivilegedProcessId:
|
||||
LOG_WARNING(Kernel_SVC,
|
||||
"(STUBBED) Attempted to query priviledged process id bounds, returned 0");
|
||||
NGLOG_WARNING(Kernel_SVC,
|
||||
"(STUBBED) Attempted to query privileged process id bounds, returned 0");
|
||||
*result = 0;
|
||||
break;
|
||||
default:
|
||||
@@ -355,13 +355,14 @@ static ResultCode GetInfo(u64* result, u64 info_id, u64 handle, u64 info_sub_id)
|
||||
|
||||
/// Sets the thread activity
|
||||
static ResultCode SetThreadActivity(Handle handle, u32 unknown) {
|
||||
LOG_WARNING(Kernel_SVC, "(STUBBED) called, handle=0x%08X, unknown=0x%08X", handle, unknown);
|
||||
NGLOG_WARNING(Kernel_SVC, "(STUBBED) called, handle={:#010X}, unknown={:#010X}", handle,
|
||||
unknown);
|
||||
return RESULT_SUCCESS;
|
||||
}
|
||||
|
||||
/// Gets the thread context
|
||||
static ResultCode GetThreadContext(Handle handle, VAddr addr) {
|
||||
LOG_WARNING(Kernel_SVC, "(STUBBED) called, handle=0x%08X, addr=0x%" PRIx64, handle, addr);
|
||||
NGLOG_WARNING(Kernel_SVC, "(STUBBED) called, handle={:#010X}, addr={:#X}", handle, addr);
|
||||
return RESULT_SUCCESS;
|
||||
}
|
||||
|
||||
@@ -400,15 +401,15 @@ static ResultCode SetThreadPriority(Handle handle, u32 priority) {
|
||||
|
||||
/// Get which CPU core is executing the current thread
|
||||
static u32 GetCurrentProcessorNumber() {
|
||||
LOG_WARNING(Kernel_SVC, "(STUBBED) called, defaulting to processor 0");
|
||||
NGLOG_WARNING(Kernel_SVC, "(STUBBED) called, defaulting to processor 0");
|
||||
return 0;
|
||||
}
|
||||
|
||||
static ResultCode MapSharedMemory(Handle shared_memory_handle, VAddr addr, u64 size,
|
||||
u32 permissions) {
|
||||
LOG_TRACE(Kernel_SVC,
|
||||
"called, shared_memory_handle=0x%08X, addr=0x%llx, size=0x%llx, permissions=0x%08X",
|
||||
shared_memory_handle, addr, size, permissions);
|
||||
NGLOG_TRACE(Kernel_SVC,
|
||||
"called, shared_memory_handle={:#X}, addr={:#X}, size={:#X}, permissions={:#010X}",
|
||||
shared_memory_handle, addr, size, permissions);
|
||||
|
||||
SharedPtr<SharedMemory> shared_memory = g_handle_table.Get<SharedMemory>(shared_memory_handle);
|
||||
if (!shared_memory) {
|
||||
@@ -428,16 +429,15 @@ static ResultCode MapSharedMemory(Handle shared_memory_handle, VAddr addr, u64 s
|
||||
return shared_memory->Map(Core::CurrentProcess().get(), addr, permissions_type,
|
||||
MemoryPermission::DontCare);
|
||||
default:
|
||||
LOG_ERROR(Kernel_SVC, "unknown permissions=0x%08X", permissions);
|
||||
NGLOG_ERROR(Kernel_SVC, "unknown permissions={:#010X}", permissions);
|
||||
}
|
||||
|
||||
return RESULT_SUCCESS;
|
||||
}
|
||||
|
||||
static ResultCode UnmapSharedMemory(Handle shared_memory_handle, VAddr addr, u64 size) {
|
||||
LOG_WARNING(Kernel_SVC,
|
||||
"called, shared_memory_handle=0x%08X, addr=0x%" PRIx64 ", size=0x%" PRIx64 "",
|
||||
shared_memory_handle, addr, size);
|
||||
NGLOG_WARNING(Kernel_SVC, "called, shared_memory_handle={:#010X}, addr={:#X}, size={:#X}",
|
||||
shared_memory_handle, addr, size);
|
||||
|
||||
SharedPtr<SharedMemory> shared_memory = g_handle_table.Get<SharedMemory>(shared_memory_handle);
|
||||
|
||||
@@ -465,19 +465,19 @@ static ResultCode QueryProcessMemory(MemoryInfo* memory_info, PageInfo* /*page_i
|
||||
memory_info->type = static_cast<u32>(vma->second.meminfo_state);
|
||||
}
|
||||
|
||||
LOG_TRACE(Kernel_SVC, "called process=0x%08X addr=%llx", process_handle, addr);
|
||||
NGLOG_TRACE(Kernel_SVC, "called process={:#010X} addr={:X}", process_handle, addr);
|
||||
return RESULT_SUCCESS;
|
||||
}
|
||||
|
||||
/// Query memory
|
||||
static ResultCode QueryMemory(MemoryInfo* memory_info, PageInfo* page_info, VAddr addr) {
|
||||
LOG_TRACE(Kernel_SVC, "called, addr=%llx", addr);
|
||||
NGLOG_TRACE(Kernel_SVC, "called, addr={:X}", addr);
|
||||
return QueryProcessMemory(memory_info, page_info, CurrentProcess, addr);
|
||||
}
|
||||
|
||||
/// Exits the current process
|
||||
static void ExitProcess() {
|
||||
LOG_INFO(Kernel_SVC, "Process %u exiting", Core::CurrentProcess()->process_id);
|
||||
NGLOG_INFO(Kernel_SVC, "Process {} exiting", Core::CurrentProcess()->process_id);
|
||||
|
||||
ASSERT_MSG(Core::CurrentProcess()->status == ProcessStatus::Running,
|
||||
"Process has already exited");
|
||||
@@ -534,9 +534,9 @@ static ResultCode CreateThread(Handle* out_handle, VAddr entry_point, u64 arg, V
|
||||
case THREADPROCESSORID_2:
|
||||
case THREADPROCESSORID_3:
|
||||
// TODO(bunnei): Implement support for other processor IDs
|
||||
LOG_ERROR(Kernel_SVC,
|
||||
"Newly created thread must run in another thread (%u), unimplemented.",
|
||||
processor_id);
|
||||
NGLOG_ERROR(Kernel_SVC,
|
||||
"Newly created thread must run in another thread ({}), unimplemented.",
|
||||
processor_id);
|
||||
break;
|
||||
default:
|
||||
ASSERT_MSG(false, "Unsupported thread processor ID: %d", processor_id);
|
||||
@@ -551,17 +551,17 @@ static ResultCode CreateThread(Handle* out_handle, VAddr entry_point, u64 arg, V
|
||||
|
||||
Core::System::GetInstance().PrepareReschedule();
|
||||
|
||||
LOG_TRACE(Kernel_SVC,
|
||||
"called entrypoint=0x%08X (%s), arg=0x%08X, stacktop=0x%08X, "
|
||||
"threadpriority=0x%08X, processorid=0x%08X : created handle=0x%08X",
|
||||
entry_point, name.c_str(), arg, stack_top, priority, processor_id, *out_handle);
|
||||
NGLOG_TRACE(Kernel_SVC,
|
||||
"called entrypoint={:#010X} ({}), arg={:#010X}, stacktop={:#010X}, "
|
||||
"threadpriority={:#010X}, processorid={:#010X} : created handle={:#010X}",
|
||||
entry_point, name, arg, stack_top, priority, processor_id, *out_handle);
|
||||
|
||||
return RESULT_SUCCESS;
|
||||
}
|
||||
|
||||
/// Starts the thread for the provided handle
|
||||
static ResultCode StartThread(Handle thread_handle) {
|
||||
LOG_TRACE(Kernel_SVC, "called thread=0x%08X", thread_handle);
|
||||
NGLOG_TRACE(Kernel_SVC, "called thread={:#010X}", thread_handle);
|
||||
|
||||
const SharedPtr<Thread> thread = g_handle_table.Get<Thread>(thread_handle);
|
||||
if (!thread) {
|
||||
@@ -575,7 +575,7 @@ static ResultCode StartThread(Handle thread_handle) {
|
||||
|
||||
/// Called when a thread exits
|
||||
static void ExitThread() {
|
||||
LOG_TRACE(Kernel_SVC, "called, pc=0x%08X", Core::CPU().GetPC());
|
||||
NGLOG_TRACE(Kernel_SVC, "called, pc={:#010X}", Core::CPU().GetPC());
|
||||
|
||||
ExitCurrentThread();
|
||||
Core::System::GetInstance().PrepareReschedule();
|
||||
@@ -583,7 +583,7 @@ static void ExitThread() {
|
||||
|
||||
/// Sleep the current thread
|
||||
static void SleepThread(s64 nanoseconds) {
|
||||
LOG_TRACE(Kernel_SVC, "called nanoseconds=%lld", nanoseconds);
|
||||
NGLOG_TRACE(Kernel_SVC, "called nanoseconds={}", nanoseconds);
|
||||
|
||||
// Don't attempt to yield execution if there are no available threads to run,
|
||||
// this way we avoid a useless reschedule to the idle thread.
|
||||
@@ -602,9 +602,9 @@ static void SleepThread(s64 nanoseconds) {
|
||||
/// Signal process wide key atomic
|
||||
static ResultCode WaitProcessWideKeyAtomic(VAddr mutex_addr, VAddr condition_variable_addr,
|
||||
Handle thread_handle, s64 nano_seconds) {
|
||||
LOG_TRACE(
|
||||
NGLOG_TRACE(
|
||||
Kernel_SVC,
|
||||
"called mutex_addr=%llx, condition_variable_addr=%llx, thread_handle=0x%08X, timeout=%d",
|
||||
"called mutex_addr={:X}, condition_variable_addr={:X}, thread_handle={:#010X}, timeout={}",
|
||||
mutex_addr, condition_variable_addr, thread_handle, nano_seconds);
|
||||
|
||||
SharedPtr<Thread> thread = g_handle_table.Get<Thread>(thread_handle);
|
||||
@@ -629,8 +629,8 @@ static ResultCode WaitProcessWideKeyAtomic(VAddr mutex_addr, VAddr condition_var
|
||||
|
||||
/// Signal process wide key
|
||||
static ResultCode SignalProcessWideKey(VAddr condition_variable_addr, s32 target) {
|
||||
LOG_TRACE(Kernel_SVC, "called, condition_variable_addr=0x%llx, target=0x%08x",
|
||||
condition_variable_addr, target);
|
||||
NGLOG_TRACE(Kernel_SVC, "called, condition_variable_addr={:#X}, target={:#010X}",
|
||||
condition_variable_addr, target);
|
||||
|
||||
u32 processed = 0;
|
||||
auto& thread_list = Core::System::GetInstance().Scheduler().GetThreadList();
|
||||
@@ -696,13 +696,13 @@ static u64 GetSystemTick() {
|
||||
|
||||
/// Close a handle
|
||||
static ResultCode CloseHandle(Handle handle) {
|
||||
LOG_TRACE(Kernel_SVC, "Closing handle 0x%08X", handle);
|
||||
NGLOG_TRACE(Kernel_SVC, "Closing handle {:#010X}", handle);
|
||||
return g_handle_table.Close(handle);
|
||||
}
|
||||
|
||||
/// Reset an event
|
||||
static ResultCode ResetSignal(Handle handle) {
|
||||
LOG_WARNING(Kernel_SVC, "(STUBBED) called handle 0x%08X", handle);
|
||||
NGLOG_WARNING(Kernel_SVC, "(STUBBED) called handle {:#010X}", handle);
|
||||
auto event = g_handle_table.Get<Event>(handle);
|
||||
ASSERT(event != nullptr);
|
||||
event->Clear();
|
||||
@@ -711,29 +711,29 @@ static ResultCode ResetSignal(Handle handle) {
|
||||
|
||||
/// Creates a TransferMemory object
|
||||
static ResultCode CreateTransferMemory(Handle* handle, VAddr addr, u64 size, u32 permissions) {
|
||||
LOG_WARNING(Kernel_SVC, "(STUBBED) called addr=0x%lx, size=0x%lx, perms=%08X", addr, size,
|
||||
permissions);
|
||||
NGLOG_WARNING(Kernel_SVC, "(STUBBED) called addr={:#X}, size={:#X}, perms={:010X}", addr, size,
|
||||
permissions);
|
||||
*handle = 0;
|
||||
return RESULT_SUCCESS;
|
||||
}
|
||||
|
||||
static ResultCode GetThreadCoreMask(Handle handle, u32* mask, u64* unknown) {
|
||||
LOG_WARNING(Kernel_SVC, "(STUBBED) called, handle=0x%08X", handle);
|
||||
NGLOG_WARNING(Kernel_SVC, "(STUBBED) called, handle={:010X}", handle);
|
||||
*mask = 0x0;
|
||||
*unknown = 0xf;
|
||||
return RESULT_SUCCESS;
|
||||
}
|
||||
|
||||
static ResultCode SetThreadCoreMask(Handle handle, u32 mask, u64 unknown) {
|
||||
LOG_WARNING(Kernel_SVC, "(STUBBED) called, handle=0x%08X, mask=0x%08X, unknown=0x%lx", handle,
|
||||
mask, unknown);
|
||||
NGLOG_WARNING(Kernel_SVC, "(STUBBED) called, handle={:#010X}, mask={:#010X}, unknown={:#X}",
|
||||
handle, mask, unknown);
|
||||
return RESULT_SUCCESS;
|
||||
}
|
||||
|
||||
static ResultCode CreateSharedMemory(Handle* handle, u64 size, u32 local_permissions,
|
||||
u32 remote_permissions) {
|
||||
LOG_TRACE(Kernel_SVC, "called, size=0x%llx, localPerms=0x%08x, remotePerms=0x%08x", size,
|
||||
local_permissions, remote_permissions);
|
||||
NGLOG_TRACE(Kernel_SVC, "called, size={:#X}, localPerms={:#010X}, remotePerms={:#010X}", size,
|
||||
local_permissions, remote_permissions);
|
||||
auto sharedMemHandle =
|
||||
SharedMemory::Create(g_handle_table.Get<Process>(KernelHandle::CurrentProcess), size,
|
||||
static_cast<MemoryPermission>(local_permissions),
|
||||
@@ -744,7 +744,7 @@ static ResultCode CreateSharedMemory(Handle* handle, u64 size, u32 local_permiss
|
||||
}
|
||||
|
||||
static ResultCode ClearEvent(Handle handle) {
|
||||
LOG_TRACE(Kernel_SVC, "called, event=0xX", handle);
|
||||
NGLOG_TRACE(Kernel_SVC, "called, event={:010X}", handle);
|
||||
|
||||
SharedPtr<Event> evt = g_handle_table.Get<Event>(handle);
|
||||
if (evt == nullptr)
|
||||
@@ -896,7 +896,7 @@ static const FunctionDef SVC_Table[] = {
|
||||
|
||||
static const FunctionDef* GetSVCInfo(u32 func_num) {
|
||||
if (func_num >= std::size(SVC_Table)) {
|
||||
LOG_ERROR(Kernel_SVC, "unknown svc=0x%02X", func_num);
|
||||
NGLOG_ERROR(Kernel_SVC, "Unknown svc={:#04X}", func_num);
|
||||
return nullptr;
|
||||
}
|
||||
return &SVC_Table[func_num];
|
||||
@@ -915,10 +915,10 @@ void CallSVC(u32 immediate) {
|
||||
if (info->func) {
|
||||
info->func();
|
||||
} else {
|
||||
LOG_CRITICAL(Kernel_SVC, "unimplemented SVC function %s(..)", info->name);
|
||||
NGLOG_CRITICAL(Kernel_SVC, "Unimplemented SVC function {}(..)", info->name);
|
||||
}
|
||||
} else {
|
||||
LOG_CRITICAL(Kernel_SVC, "unknown SVC function 0x%x", immediate);
|
||||
NGLOG_CRITICAL(Kernel_SVC, "Unknown SVC function {:#X}", immediate);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -101,9 +101,10 @@ void ExitCurrentThread() {
|
||||
* @param cycles_late The number of CPU cycles that have passed since the desired wakeup time
|
||||
*/
|
||||
static void ThreadWakeupCallback(u64 thread_handle, int cycles_late) {
|
||||
SharedPtr<Thread> thread = wakeup_callback_handle_table.Get<Thread>((Handle)thread_handle);
|
||||
const auto proper_handle = static_cast<Handle>(thread_handle);
|
||||
SharedPtr<Thread> thread = wakeup_callback_handle_table.Get<Thread>(proper_handle);
|
||||
if (thread == nullptr) {
|
||||
LOG_CRITICAL(Kernel, "Callback fired for invalid thread %08X", (Handle)thread_handle);
|
||||
NGLOG_CRITICAL(Kernel, "Callback fired for invalid thread {:08X}", proper_handle);
|
||||
return;
|
||||
}
|
||||
|
||||
@@ -238,19 +239,19 @@ ResultVal<SharedPtr<Thread>> Thread::Create(std::string name, VAddr entry_point,
|
||||
SharedPtr<Process> owner_process) {
|
||||
// Check if priority is in ranged. Lowest priority -> highest priority id.
|
||||
if (priority > THREADPRIO_LOWEST) {
|
||||
LOG_ERROR(Kernel_SVC, "Invalid thread priority: %u", priority);
|
||||
NGLOG_ERROR(Kernel_SVC, "Invalid thread priority: {}", priority);
|
||||
return ERR_OUT_OF_RANGE;
|
||||
}
|
||||
|
||||
if (processor_id > THREADPROCESSORID_MAX) {
|
||||
LOG_ERROR(Kernel_SVC, "Invalid processor id: %d", processor_id);
|
||||
NGLOG_ERROR(Kernel_SVC, "Invalid processor id: {}", processor_id);
|
||||
return ERR_OUT_OF_RANGE_KERNEL;
|
||||
}
|
||||
|
||||
// TODO(yuriks): Other checks, returning 0xD9001BEA
|
||||
|
||||
if (!Memory::IsValidVirtualAddress(*owner_process, entry_point)) {
|
||||
LOG_ERROR(Kernel_SVC, "(name=%s): invalid entry %016" PRIx64, name.c_str(), entry_point);
|
||||
NGLOG_ERROR(Kernel_SVC, "(name={}): invalid entry {:016X}", name, entry_point);
|
||||
// TODO (bunnei): Find the correct error code to use here
|
||||
return ResultCode(-1);
|
||||
}
|
||||
@@ -289,8 +290,8 @@ ResultVal<SharedPtr<Thread>> Thread::Create(std::string name, VAddr entry_point,
|
||||
auto& linheap_memory = memory_region->linear_heap_memory;
|
||||
|
||||
if (linheap_memory->size() + Memory::PAGE_SIZE > memory_region->size) {
|
||||
LOG_ERROR(Kernel_SVC,
|
||||
"Not enough space in region to allocate a new TLS page for thread");
|
||||
NGLOG_ERROR(Kernel_SVC,
|
||||
"Not enough space in region to allocate a new TLS page for thread");
|
||||
return ERR_OUT_OF_MEMORY;
|
||||
}
|
||||
|
||||
|
||||
@@ -77,7 +77,7 @@ void Timer::WakeupAllWaitingThreads() {
|
||||
}
|
||||
|
||||
void Timer::Signal(int cycles_late) {
|
||||
LOG_TRACE(Kernel, "Timer %u fired", GetObjectId());
|
||||
NGLOG_TRACE(Kernel, "Timer {} fired", GetObjectId());
|
||||
|
||||
signaled = true;
|
||||
|
||||
@@ -97,7 +97,7 @@ static void TimerCallback(u64 timer_handle, int cycles_late) {
|
||||
timer_callback_handle_table.Get<Timer>(static_cast<Handle>(timer_handle));
|
||||
|
||||
if (timer == nullptr) {
|
||||
LOG_CRITICAL(Kernel, "Callback fired for invalid timer %08" PRIx64, timer_handle);
|
||||
NGLOG_CRITICAL(Kernel, "Callback fired for invalid timer {:016X}", timer_handle);
|
||||
return;
|
||||
}
|
||||
|
||||
|
||||
@@ -379,22 +379,22 @@ void VMManager::UpdatePageTableForVMA(const VirtualMemoryArea& vma) {
|
||||
}
|
||||
|
||||
u64 VMManager::GetTotalMemoryUsage() {
|
||||
LOG_WARNING(Kernel, "(STUBBED) called");
|
||||
NGLOG_WARNING(Kernel, "(STUBBED) called");
|
||||
return 0xF8000000;
|
||||
}
|
||||
|
||||
u64 VMManager::GetTotalHeapUsage() {
|
||||
LOG_WARNING(Kernel, "(STUBBED) called");
|
||||
NGLOG_WARNING(Kernel, "(STUBBED) called");
|
||||
return 0x0;
|
||||
}
|
||||
|
||||
VAddr VMManager::GetAddressSpaceBaseAddr() {
|
||||
LOG_WARNING(Kernel, "(STUBBED) called");
|
||||
NGLOG_WARNING(Kernel, "(STUBBED) called");
|
||||
return 0x8000000;
|
||||
}
|
||||
|
||||
u64 VMManager::GetAddressSpaceSize() {
|
||||
LOG_WARNING(Kernel, "(STUBBED) called");
|
||||
NGLOG_WARNING(Kernel, "(STUBBED) called");
|
||||
return MAX_ADDRESS;
|
||||
}
|
||||
|
||||
|
||||
@@ -4,7 +4,8 @@
|
||||
|
||||
#include "common/logging/log.h"
|
||||
#include "core/hle/ipc_helpers.h"
|
||||
#include "core/hle/service/pctl/pctl_a.h"
|
||||
#include "core/hle/service/pctl/module.h"
|
||||
#include "core/hle/service/pctl/pctl.h"
|
||||
|
||||
namespace Service::PCTL {
|
||||
|
||||
@@ -12,7 +13,7 @@ class IParentalControlService final : public ServiceFramework<IParentalControlSe
|
||||
public:
|
||||
IParentalControlService() : ServiceFramework("IParentalControlService") {
|
||||
static const FunctionInfo functions[] = {
|
||||
{1, nullptr, "Initialize"},
|
||||
{1, &IParentalControlService::Initialize, "Initialize"},
|
||||
{1001, nullptr, "CheckFreeCommunicationPermission"},
|
||||
{1002, nullptr, "ConfirmLaunchApplicationPermission"},
|
||||
{1003, nullptr, "ConfirmResumeApplicationPermission"},
|
||||
@@ -108,20 +109,38 @@ public:
|
||||
};
|
||||
RegisterHandlers(functions);
|
||||
}
|
||||
|
||||
private:
|
||||
void Initialize(Kernel::HLERequestContext& ctx) {
|
||||
NGLOG_WARNING(Service_PCTL, "(STUBBED) called");
|
||||
IPC::ResponseBuilder rb{ctx, 2, 0, 0};
|
||||
rb.Push(RESULT_SUCCESS);
|
||||
}
|
||||
};
|
||||
void PCTL_A::CreateService(Kernel::HLERequestContext& ctx) {
|
||||
|
||||
void Module::Interface::CreateService(Kernel::HLERequestContext& ctx) {
|
||||
IPC::ResponseBuilder rb{ctx, 2, 0, 1};
|
||||
rb.Push(RESULT_SUCCESS);
|
||||
rb.PushIpcInterface<IParentalControlService>();
|
||||
NGLOG_DEBUG(Service_PCTL, "called");
|
||||
}
|
||||
|
||||
PCTL_A::PCTL_A() : ServiceFramework("pctl:a") {
|
||||
static const FunctionInfo functions[] = {
|
||||
{0, &PCTL_A::CreateService, "CreateService"},
|
||||
{1, nullptr, "CreateServiceWithoutInitialize"},
|
||||
};
|
||||
RegisterHandlers(functions);
|
||||
void Module::Interface::CreateServiceWithoutInitialize(Kernel::HLERequestContext& ctx) {
|
||||
IPC::ResponseBuilder rb{ctx, 2, 0, 1};
|
||||
rb.Push(RESULT_SUCCESS);
|
||||
rb.PushIpcInterface<IParentalControlService>();
|
||||
NGLOG_DEBUG(Service_PCTL, "called");
|
||||
}
|
||||
|
||||
Module::Interface::Interface(std::shared_ptr<Module> module, const char* name)
|
||||
: ServiceFramework(name), module(std::move(module)) {}
|
||||
|
||||
void InstallInterfaces(SM::ServiceManager& service_manager) {
|
||||
auto module = std::make_shared<Module>();
|
||||
std::make_shared<PCTL>(module, "pctl")->InstallAsService(service_manager);
|
||||
std::make_shared<PCTL>(module, "pctl:a")->InstallAsService(service_manager);
|
||||
std::make_shared<PCTL>(module, "pctl:r")->InstallAsService(service_manager);
|
||||
std::make_shared<PCTL>(module, "pctl:s")->InstallAsService(service_manager);
|
||||
}
|
||||
|
||||
} // namespace Service::PCTL
|
||||
28
src/core/hle/service/pctl/module.h
Normal file
28
src/core/hle/service/pctl/module.h
Normal file
@@ -0,0 +1,28 @@
|
||||
// Copyright 2018 yuzu emulator team
|
||||
// Licensed under GPLv2 or any later version
|
||||
// Refer to the license.txt file included.
|
||||
|
||||
#pragma once
|
||||
|
||||
#include "core/hle/service/service.h"
|
||||
|
||||
namespace Service::PCTL {
|
||||
|
||||
class Module final {
|
||||
public:
|
||||
class Interface : public ServiceFramework<Interface> {
|
||||
public:
|
||||
Interface(std::shared_ptr<Module> module, const char* name);
|
||||
|
||||
void CreateService(Kernel::HLERequestContext& ctx);
|
||||
void CreateServiceWithoutInitialize(Kernel::HLERequestContext& ctx);
|
||||
|
||||
protected:
|
||||
std::shared_ptr<Module> module;
|
||||
};
|
||||
};
|
||||
|
||||
/// Registers all PCTL services with the specified service manager.
|
||||
void InstallInterfaces(SM::ServiceManager& service_manager);
|
||||
|
||||
} // namespace Service::PCTL
|
||||
@@ -3,12 +3,15 @@
|
||||
// Refer to the license.txt file included.
|
||||
|
||||
#include "core/hle/service/pctl/pctl.h"
|
||||
#include "core/hle/service/pctl/pctl_a.h"
|
||||
|
||||
namespace Service::PCTL {
|
||||
|
||||
void InstallInterfaces(SM::ServiceManager& service_manager) {
|
||||
std::make_shared<PCTL_A>()->InstallAsService(service_manager);
|
||||
PCTL::PCTL(std::shared_ptr<Module> module, const char* name)
|
||||
: Module::Interface(std::move(module), name) {
|
||||
static const FunctionInfo functions[] = {
|
||||
{0, &PCTL::CreateService, "CreateService"},
|
||||
{1, &PCTL::CreateServiceWithoutInitialize, "CreateServiceWithoutInitialize"},
|
||||
};
|
||||
RegisterHandlers(functions);
|
||||
}
|
||||
|
||||
} // namespace Service::PCTL
|
||||
|
||||
@@ -4,11 +4,13 @@
|
||||
|
||||
#pragma once
|
||||
|
||||
#include "core/hle/service/service.h"
|
||||
#include "core/hle/service/pctl/module.h"
|
||||
|
||||
namespace Service::PCTL {
|
||||
|
||||
/// Registers all PCTL services with the specified service manager.
|
||||
void InstallInterfaces(SM::ServiceManager& service_manager);
|
||||
class PCTL final : public Module::Interface {
|
||||
public:
|
||||
explicit PCTL(std::shared_ptr<Module> module, const char* name);
|
||||
};
|
||||
|
||||
} // namespace Service::PCTL
|
||||
|
||||
@@ -1,20 +0,0 @@
|
||||
// Copyright 2018 yuzu emulator team
|
||||
// Licensed under GPLv2 or any later version
|
||||
// Refer to the license.txt file included.
|
||||
|
||||
#pragma once
|
||||
|
||||
#include "core/hle/service/service.h"
|
||||
|
||||
namespace Service::PCTL {
|
||||
|
||||
class PCTL_A final : public ServiceFramework<PCTL_A> {
|
||||
public:
|
||||
PCTL_A();
|
||||
~PCTL_A() = default;
|
||||
|
||||
private:
|
||||
void CreateService(Kernel::HLERequestContext& ctx);
|
||||
};
|
||||
|
||||
} // namespace Service::PCTL
|
||||
@@ -29,7 +29,7 @@
|
||||
#include "core/hle/service/nifm/nifm.h"
|
||||
#include "core/hle/service/ns/ns.h"
|
||||
#include "core/hle/service/nvdrv/nvdrv.h"
|
||||
#include "core/hle/service/pctl/pctl.h"
|
||||
#include "core/hle/service/pctl/module.h"
|
||||
#include "core/hle/service/service.h"
|
||||
#include "core/hle/service/set/settings.h"
|
||||
#include "core/hle/service/sm/controller.h"
|
||||
|
||||
@@ -39,8 +39,8 @@ PageTable* GetCurrentPageTable() {
|
||||
}
|
||||
|
||||
static void MapPages(PageTable& page_table, VAddr base, u64 size, u8* memory, PageType type) {
|
||||
LOG_DEBUG(HW_Memory, "Mapping %p onto %016" PRIX64 "-%016" PRIX64, memory, base * PAGE_SIZE,
|
||||
(base + size) * PAGE_SIZE);
|
||||
NGLOG_DEBUG(HW_Memory, "Mapping {} onto {:016X}-{:016X}", fmt::ptr(memory), base * PAGE_SIZE,
|
||||
(base + size) * PAGE_SIZE);
|
||||
|
||||
RasterizerFlushVirtualRegion(base << PAGE_BITS, size * PAGE_SIZE,
|
||||
FlushMode::FlushAndInvalidate);
|
||||
@@ -169,10 +169,10 @@ T Read(const VAddr vaddr) {
|
||||
PageType type = current_page_table->attributes[vaddr >> PAGE_BITS];
|
||||
switch (type) {
|
||||
case PageType::Unmapped:
|
||||
LOG_ERROR(HW_Memory, "unmapped Read%lu @ 0x%08X", sizeof(T) * 8, vaddr);
|
||||
NGLOG_ERROR(HW_Memory, "Unmapped Read{} @ {:#010X}", sizeof(T) * 8, vaddr);
|
||||
return 0;
|
||||
case PageType::Memory:
|
||||
ASSERT_MSG(false, "Mapped memory page without a pointer @ %08X", vaddr);
|
||||
ASSERT_MSG(false, "Mapped memory page without a pointer @ %016" PRIX64, vaddr);
|
||||
break;
|
||||
case PageType::RasterizerCachedMemory: {
|
||||
RasterizerFlushVirtualRegion(vaddr, sizeof(T), FlushMode::Flush);
|
||||
@@ -201,11 +201,11 @@ void Write(const VAddr vaddr, const T data) {
|
||||
PageType type = current_page_table->attributes[vaddr >> PAGE_BITS];
|
||||
switch (type) {
|
||||
case PageType::Unmapped:
|
||||
LOG_ERROR(HW_Memory, "unmapped Write%lu 0x%08X @ 0x%08X", sizeof(data) * 8, (u32)data,
|
||||
vaddr);
|
||||
NGLOG_ERROR(HW_Memory, "Unmapped Write{} {:#010X} @ {:#018X}", sizeof(data) * 8, (u32)data,
|
||||
vaddr);
|
||||
return;
|
||||
case PageType::Memory:
|
||||
ASSERT_MSG(false, "Mapped memory page without a pointer @ %08X", vaddr);
|
||||
ASSERT_MSG(false, "Mapped memory page without a pointer @ %016" PRIX64, vaddr);
|
||||
break;
|
||||
case PageType::RasterizerCachedMemory: {
|
||||
RasterizerFlushVirtualRegion(vaddr, sizeof(T), FlushMode::Invalidate);
|
||||
@@ -251,7 +251,7 @@ u8* GetPointer(const VAddr vaddr) {
|
||||
return GetPointerFromVMA(vaddr);
|
||||
}
|
||||
|
||||
LOG_ERROR(HW_Memory, "unknown GetPointer @ 0x%08x", vaddr);
|
||||
NGLOG_ERROR(HW_Memory, "Unknown GetPointer @ {:#018X}", vaddr);
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
@@ -288,13 +288,12 @@ u8* GetPhysicalPointer(PAddr address) {
|
||||
});
|
||||
|
||||
if (area == std::end(memory_areas)) {
|
||||
LOG_ERROR(HW_Memory, "unknown GetPhysicalPointer @ 0x%016" PRIX64, address);
|
||||
NGLOG_ERROR(HW_Memory, "Unknown GetPhysicalPointer @ {:#018X}", address);
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
if (area->paddr_base == IO_AREA_PADDR) {
|
||||
LOG_ERROR(HW_Memory, "MMIO mappings are not supported yet. phys_addr=0x%016" PRIX64,
|
||||
address);
|
||||
NGLOG_ERROR(HW_Memory, "MMIO mappings are not supported yet. phys_addr={:018X}", address);
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
@@ -325,15 +324,29 @@ u8* GetPhysicalPointer(PAddr address) {
|
||||
return target_pointer;
|
||||
}
|
||||
|
||||
void RasterizerMarkRegionCached(VAddr start, u64 size, bool cached) {
|
||||
if (start == 0) {
|
||||
void RasterizerMarkRegionCached(Tegra::GPUVAddr gpu_addr, u64 size, bool cached) {
|
||||
if (gpu_addr == 0) {
|
||||
return;
|
||||
}
|
||||
|
||||
u64 num_pages = ((start + size - 1) >> PAGE_BITS) - (start >> PAGE_BITS) + 1;
|
||||
VAddr vaddr = start;
|
||||
// Iterate over a contiguous CPU address space, which corresponds to the specified GPU address
|
||||
// space, marking the region as un/cached. The region is marked un/cached at a granularity of
|
||||
// CPU pages, hence why we iterate on a CPU page basis (note: GPU page size is different). This
|
||||
// assumes the specified GPU address region is contiguous as well.
|
||||
|
||||
u64 num_pages = ((gpu_addr + size - 1) >> PAGE_BITS) - (gpu_addr >> PAGE_BITS) + 1;
|
||||
for (unsigned i = 0; i < num_pages; ++i, gpu_addr += PAGE_SIZE) {
|
||||
boost::optional<VAddr> maybe_vaddr =
|
||||
Core::System::GetInstance().GPU().memory_manager->GpuToCpuAddress(gpu_addr);
|
||||
// The GPU <-> CPU virtual memory mapping is not 1:1
|
||||
if (!maybe_vaddr) {
|
||||
NGLOG_ERROR(HW_Memory,
|
||||
"Trying to flush a cached region to an invalid physical address {:016X}",
|
||||
gpu_addr);
|
||||
continue;
|
||||
}
|
||||
VAddr vaddr = *maybe_vaddr;
|
||||
|
||||
for (unsigned i = 0; i < num_pages; ++i, vaddr += PAGE_SIZE) {
|
||||
PageType& page_type = current_page_table->attributes[vaddr >> PAGE_BITS];
|
||||
|
||||
if (cached) {
|
||||
@@ -347,6 +360,10 @@ void RasterizerMarkRegionCached(VAddr start, u64 size, bool cached) {
|
||||
page_type = PageType::RasterizerCachedMemory;
|
||||
current_page_table->pointers[vaddr >> PAGE_BITS] = nullptr;
|
||||
break;
|
||||
case PageType::RasterizerCachedMemory:
|
||||
// There can be more than one GPU region mapped per CPU region, so it's common that
|
||||
// this area is already marked as cached.
|
||||
break;
|
||||
default:
|
||||
UNREACHABLE();
|
||||
}
|
||||
@@ -357,6 +374,10 @@ void RasterizerMarkRegionCached(VAddr start, u64 size, bool cached) {
|
||||
// It is not necessary for a process to have this region mapped into its address
|
||||
// space, for example, a system module need not have a VRAM mapping.
|
||||
break;
|
||||
case PageType::Memory:
|
||||
// There can be more than one GPU region mapped per CPU region, so it's common that
|
||||
// this area is already unmarked as cached.
|
||||
break;
|
||||
case PageType::RasterizerCachedMemory: {
|
||||
u8* pointer = GetPointerFromVMA(vaddr & ~PAGE_MASK);
|
||||
if (pointer == nullptr) {
|
||||
@@ -394,19 +415,29 @@ void RasterizerFlushVirtualRegion(VAddr start, u64 size, FlushMode mode) {
|
||||
|
||||
VAddr overlap_start = std::max(start, region_start);
|
||||
VAddr overlap_end = std::min(end, region_end);
|
||||
|
||||
std::vector<Tegra::GPUVAddr> gpu_addresses =
|
||||
Core::System::GetInstance().GPU().memory_manager->CpuToGpuAddress(overlap_start);
|
||||
|
||||
if (gpu_addresses.empty()) {
|
||||
return;
|
||||
}
|
||||
|
||||
u64 overlap_size = overlap_end - overlap_start;
|
||||
|
||||
auto* rasterizer = VideoCore::g_renderer->Rasterizer();
|
||||
switch (mode) {
|
||||
case FlushMode::Flush:
|
||||
rasterizer->FlushRegion(overlap_start, overlap_size);
|
||||
break;
|
||||
case FlushMode::Invalidate:
|
||||
rasterizer->InvalidateRegion(overlap_start, overlap_size);
|
||||
break;
|
||||
case FlushMode::FlushAndInvalidate:
|
||||
rasterizer->FlushAndInvalidateRegion(overlap_start, overlap_size);
|
||||
break;
|
||||
for (const auto& gpu_address : gpu_addresses) {
|
||||
auto* rasterizer = VideoCore::g_renderer->Rasterizer();
|
||||
switch (mode) {
|
||||
case FlushMode::Flush:
|
||||
rasterizer->FlushRegion(gpu_address, overlap_size);
|
||||
break;
|
||||
case FlushMode::Invalidate:
|
||||
rasterizer->InvalidateRegion(gpu_address, overlap_size);
|
||||
break;
|
||||
case FlushMode::FlushAndInvalidate:
|
||||
rasterizer->FlushAndInvalidateRegion(gpu_address, overlap_size);
|
||||
break;
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
@@ -445,8 +476,9 @@ void ReadBlock(const Kernel::Process& process, const VAddr src_addr, void* dest_
|
||||
|
||||
switch (page_table.attributes[page_index]) {
|
||||
case PageType::Unmapped: {
|
||||
LOG_ERROR(HW_Memory, "unmapped ReadBlock @ 0x%08X (start address = 0x%08X, size = %zu)",
|
||||
current_vaddr, src_addr, size);
|
||||
NGLOG_ERROR(HW_Memory,
|
||||
"Unmapped ReadBlock @ {:#018X} (start address = {:#018X}, size = {})",
|
||||
current_vaddr, src_addr, size);
|
||||
std::memset(dest_buffer, 0, copy_amount);
|
||||
break;
|
||||
}
|
||||
@@ -508,9 +540,9 @@ void WriteBlock(const Kernel::Process& process, const VAddr dest_addr, const voi
|
||||
|
||||
switch (page_table.attributes[page_index]) {
|
||||
case PageType::Unmapped: {
|
||||
LOG_ERROR(HW_Memory,
|
||||
"unmapped WriteBlock @ 0x%08X (start address = 0x%08X, size = %zu)",
|
||||
current_vaddr, dest_addr, size);
|
||||
NGLOG_ERROR(HW_Memory,
|
||||
"Unmapped WriteBlock @ {:#018X} (start address = {:#018X}, size = {})",
|
||||
current_vaddr, dest_addr, size);
|
||||
break;
|
||||
}
|
||||
case PageType::Memory: {
|
||||
@@ -556,8 +588,9 @@ void ZeroBlock(const Kernel::Process& process, const VAddr dest_addr, const size
|
||||
|
||||
switch (page_table.attributes[page_index]) {
|
||||
case PageType::Unmapped: {
|
||||
LOG_ERROR(HW_Memory, "unmapped ZeroBlock @ 0x%08X (start address = 0x%08X, size = %zu)",
|
||||
current_vaddr, dest_addr, size);
|
||||
NGLOG_ERROR(HW_Memory,
|
||||
"Unmapped ZeroBlock @ {:#018X} (start address = {#:018X}, size = {})",
|
||||
current_vaddr, dest_addr, size);
|
||||
break;
|
||||
}
|
||||
case PageType::Memory: {
|
||||
@@ -596,8 +629,9 @@ void CopyBlock(const Kernel::Process& process, VAddr dest_addr, VAddr src_addr,
|
||||
|
||||
switch (page_table.attributes[page_index]) {
|
||||
case PageType::Unmapped: {
|
||||
LOG_ERROR(HW_Memory, "unmapped CopyBlock @ 0x%08X (start address = 0x%08X, size = %zu)",
|
||||
current_vaddr, src_addr, size);
|
||||
NGLOG_ERROR(HW_Memory,
|
||||
"Unmapped CopyBlock @ {:#018X} (start address = {:#018X}, size = {})",
|
||||
current_vaddr, src_addr, size);
|
||||
ZeroBlock(process, dest_addr, copy_amount);
|
||||
break;
|
||||
}
|
||||
@@ -646,7 +680,7 @@ boost::optional<PAddr> TryVirtualToPhysicalAddress(const VAddr addr) {
|
||||
PAddr VirtualToPhysicalAddress(const VAddr addr) {
|
||||
auto paddr = TryVirtualToPhysicalAddress(addr);
|
||||
if (!paddr) {
|
||||
LOG_ERROR(HW_Memory, "Unknown virtual address @ 0x%016" PRIX64, addr);
|
||||
NGLOG_ERROR(HW_Memory, "Unknown virtual address @ {:#018X}", addr);
|
||||
// To help with debugging, set bit on address so that it's obviously invalid.
|
||||
return addr | 0x80000000;
|
||||
}
|
||||
|
||||
@@ -14,6 +14,7 @@
|
||||
#include <boost/optional.hpp>
|
||||
#include "common/common_types.h"
|
||||
#include "core/memory_hook.h"
|
||||
#include "video_core/memory_manager.h"
|
||||
|
||||
namespace Kernel {
|
||||
class Process;
|
||||
@@ -258,7 +259,7 @@ enum class FlushMode {
|
||||
/**
|
||||
* Mark each page touching the region as cached.
|
||||
*/
|
||||
void RasterizerMarkRegionCached(VAddr start, u64 size, bool cached);
|
||||
void RasterizerMarkRegionCached(Tegra::GPUVAddr start, u64 size, bool cached);
|
||||
|
||||
/**
|
||||
* Flushes and invalidates any externally cached rasterizer resources touching the given virtual
|
||||
|
||||
@@ -31,12 +31,14 @@ enum class BufferMethods {
|
||||
};
|
||||
|
||||
void GPU::WriteReg(u32 method, u32 subchannel, u32 value, u32 remaining_params) {
|
||||
LOG_WARNING(HW_GPU, "Processing method %08X on subchannel %u value %08X remaining params %u",
|
||||
method, subchannel, value, remaining_params);
|
||||
NGLOG_WARNING(HW_GPU,
|
||||
"Processing method {:08X} on subchannel {} value "
|
||||
"{:08X} remaining params {}",
|
||||
method, subchannel, value, remaining_params);
|
||||
|
||||
if (method == static_cast<u32>(BufferMethods::SetGraphMacroEntry)) {
|
||||
// Prepare to upload a new macro, reset the upload counter.
|
||||
LOG_DEBUG(HW_GPU, "Uploading GPU macro %08X", value);
|
||||
NGLOG_DEBUG(HW_GPU, "Uploading GPU macro {:08X}", value);
|
||||
current_macro_entry = value;
|
||||
current_macro_code.clear();
|
||||
return;
|
||||
@@ -58,7 +60,7 @@ void GPU::WriteReg(u32 method, u32 subchannel, u32 value, u32 remaining_params)
|
||||
|
||||
if (method == static_cast<u32>(BufferMethods::BindObject)) {
|
||||
// Bind the current subchannel to the desired engine id.
|
||||
LOG_DEBUG(HW_GPU, "Binding subchannel %u to engine %u", subchannel, value);
|
||||
NGLOG_DEBUG(HW_GPU, "Binding subchannel {} to engine {}", subchannel, value);
|
||||
ASSERT(bound_engines.find(subchannel) == bound_engines.end());
|
||||
bound_engines[subchannel] = static_cast<EngineID>(value);
|
||||
return;
|
||||
@@ -66,7 +68,7 @@ void GPU::WriteReg(u32 method, u32 subchannel, u32 value, u32 remaining_params)
|
||||
|
||||
if (method < static_cast<u32>(BufferMethods::CountBufferMethods)) {
|
||||
// TODO(Subv): Research and implement these methods.
|
||||
LOG_ERROR(HW_GPU, "Special buffer methods other than Bind are not implemented");
|
||||
NGLOG_ERROR(HW_GPU, "Special buffer methods other than Bind are not implemented");
|
||||
return;
|
||||
}
|
||||
|
||||
@@ -90,11 +92,9 @@ void GPU::WriteReg(u32 method, u32 subchannel, u32 value, u32 remaining_params)
|
||||
}
|
||||
|
||||
void GPU::ProcessCommandList(GPUVAddr address, u32 size) {
|
||||
// TODO(Subv): PhysicalToVirtualAddress is a misnomer, it converts a GPU VAddr into an
|
||||
// application VAddr.
|
||||
const VAddr head_address = memory_manager->PhysicalToVirtualAddress(address);
|
||||
VAddr current_addr = head_address;
|
||||
while (current_addr < head_address + size * sizeof(CommandHeader)) {
|
||||
const boost::optional<VAddr> head_address = memory_manager->GpuToCpuAddress(address);
|
||||
VAddr current_addr = *head_address;
|
||||
while (current_addr < *head_address + size * sizeof(CommandHeader)) {
|
||||
const CommandHeader header = {Memory::Read32(current_addr)};
|
||||
current_addr += sizeof(u32);
|
||||
|
||||
|
||||
@@ -145,7 +145,7 @@ void Maxwell3D::ProcessQueryGet() {
|
||||
GPUVAddr sequence_address = regs.query.QueryAddress();
|
||||
// Since the sequence address is given as a GPU VAddr, we have to convert it to an application
|
||||
// VAddr before writing.
|
||||
VAddr address = memory_manager.PhysicalToVirtualAddress(sequence_address);
|
||||
boost::optional<VAddr> address = memory_manager.GpuToCpuAddress(sequence_address);
|
||||
|
||||
// TODO(Subv): Support the other query units.
|
||||
ASSERT_MSG(regs.query.query_get.unit == Regs::QueryUnit::Crop,
|
||||
@@ -153,7 +153,7 @@ void Maxwell3D::ProcessQueryGet() {
|
||||
ASSERT_MSG(regs.query.query_get.short_query,
|
||||
"Writing the entire query result structure is unimplemented");
|
||||
|
||||
u32 value = Memory::Read32(address);
|
||||
u32 value = Memory::Read32(*address);
|
||||
u32 result = 0;
|
||||
|
||||
// TODO(Subv): Support the other query variables
|
||||
@@ -173,7 +173,7 @@ void Maxwell3D::ProcessQueryGet() {
|
||||
case Regs::QueryMode::Write2: {
|
||||
// Write the current query sequence to the sequence address.
|
||||
u32 sequence = regs.query.query_sequence;
|
||||
Memory::Write32(address, sequence);
|
||||
Memory::Write32(*address, sequence);
|
||||
|
||||
// TODO(Subv): Write the proper query response structure to the address when not using short
|
||||
// mode.
|
||||
@@ -186,8 +186,8 @@ void Maxwell3D::ProcessQueryGet() {
|
||||
}
|
||||
|
||||
void Maxwell3D::DrawArrays() {
|
||||
LOG_DEBUG(HW_GPU, "called, topology=%d, count=%d", regs.draw.topology.Value(),
|
||||
regs.vertex_buffer.count);
|
||||
NGLOG_DEBUG(HW_GPU, "called, topology={}, count={}",
|
||||
static_cast<u32>(regs.draw.topology.Value()), regs.vertex_buffer.count);
|
||||
ASSERT_MSG(!(regs.index_array.count && regs.vertex_buffer.count), "Both indexed and direct?");
|
||||
|
||||
auto debug_context = Core::System::GetInstance().GetGPUDebugContext();
|
||||
@@ -225,10 +225,10 @@ void Maxwell3D::ProcessCBData(u32 value) {
|
||||
// Don't allow writing past the end of the buffer.
|
||||
ASSERT(regs.const_buffer.cb_pos + sizeof(u32) <= regs.const_buffer.cb_size);
|
||||
|
||||
VAddr address =
|
||||
memory_manager.PhysicalToVirtualAddress(buffer_address + regs.const_buffer.cb_pos);
|
||||
boost::optional<VAddr> address =
|
||||
memory_manager.GpuToCpuAddress(buffer_address + regs.const_buffer.cb_pos);
|
||||
|
||||
Memory::Write32(address, value);
|
||||
Memory::Write32(*address, value);
|
||||
|
||||
// Increment the current buffer position.
|
||||
regs.const_buffer.cb_pos = regs.const_buffer.cb_pos + 4;
|
||||
@@ -238,10 +238,10 @@ Texture::TICEntry Maxwell3D::GetTICEntry(u32 tic_index) const {
|
||||
GPUVAddr tic_base_address = regs.tic.TICAddress();
|
||||
|
||||
GPUVAddr tic_address_gpu = tic_base_address + tic_index * sizeof(Texture::TICEntry);
|
||||
VAddr tic_address_cpu = memory_manager.PhysicalToVirtualAddress(tic_address_gpu);
|
||||
boost::optional<VAddr> tic_address_cpu = memory_manager.GpuToCpuAddress(tic_address_gpu);
|
||||
|
||||
Texture::TICEntry tic_entry;
|
||||
Memory::ReadBlock(tic_address_cpu, &tic_entry, sizeof(Texture::TICEntry));
|
||||
Memory::ReadBlock(*tic_address_cpu, &tic_entry, sizeof(Texture::TICEntry));
|
||||
|
||||
ASSERT_MSG(tic_entry.header_version == Texture::TICHeaderVersion::BlockLinear ||
|
||||
tic_entry.header_version == Texture::TICHeaderVersion::Pitch,
|
||||
@@ -268,10 +268,10 @@ Texture::TSCEntry Maxwell3D::GetTSCEntry(u32 tsc_index) const {
|
||||
GPUVAddr tsc_base_address = regs.tsc.TSCAddress();
|
||||
|
||||
GPUVAddr tsc_address_gpu = tsc_base_address + tsc_index * sizeof(Texture::TSCEntry);
|
||||
VAddr tsc_address_cpu = memory_manager.PhysicalToVirtualAddress(tsc_address_gpu);
|
||||
boost::optional<VAddr> tsc_address_cpu = memory_manager.GpuToCpuAddress(tsc_address_gpu);
|
||||
|
||||
Texture::TSCEntry tsc_entry;
|
||||
Memory::ReadBlock(tsc_address_cpu, &tsc_entry, sizeof(Texture::TSCEntry));
|
||||
Memory::ReadBlock(*tsc_address_cpu, &tsc_entry, sizeof(Texture::TSCEntry));
|
||||
return tsc_entry;
|
||||
}
|
||||
|
||||
@@ -293,7 +293,7 @@ std::vector<Texture::FullTextureInfo> Maxwell3D::GetStageTextures(Regs::ShaderSt
|
||||
current_texture < tex_info_buffer_end; current_texture += sizeof(Texture::TextureHandle)) {
|
||||
|
||||
Texture::TextureHandle tex_handle{
|
||||
Memory::Read32(memory_manager.PhysicalToVirtualAddress(current_texture))};
|
||||
Memory::Read32(*memory_manager.GpuToCpuAddress(current_texture))};
|
||||
|
||||
Texture::FullTextureInfo tex_info{};
|
||||
// TODO(Subv): Use the shader to determine which textures are actually accessed.
|
||||
|
||||
@@ -8,90 +8,112 @@
|
||||
|
||||
namespace Tegra {
|
||||
|
||||
PAddr MemoryManager::AllocateSpace(u64 size, u64 align) {
|
||||
boost::optional<PAddr> paddr = FindFreeBlock(size, align);
|
||||
ASSERT(paddr);
|
||||
GPUVAddr MemoryManager::AllocateSpace(u64 size, u64 align) {
|
||||
boost::optional<GPUVAddr> gpu_addr = FindFreeBlock(size, align);
|
||||
ASSERT(gpu_addr);
|
||||
|
||||
for (u64 offset = 0; offset < size; offset += PAGE_SIZE) {
|
||||
ASSERT(PageSlot(*paddr + offset) == static_cast<u64>(PageStatus::Unmapped));
|
||||
PageSlot(*paddr + offset) = static_cast<u64>(PageStatus::Allocated);
|
||||
ASSERT(PageSlot(*gpu_addr + offset) == static_cast<u64>(PageStatus::Unmapped));
|
||||
PageSlot(*gpu_addr + offset) = static_cast<u64>(PageStatus::Allocated);
|
||||
}
|
||||
|
||||
return *paddr;
|
||||
return *gpu_addr;
|
||||
}
|
||||
|
||||
PAddr MemoryManager::AllocateSpace(PAddr paddr, u64 size, u64 align) {
|
||||
GPUVAddr MemoryManager::AllocateSpace(GPUVAddr gpu_addr, u64 size, u64 align) {
|
||||
for (u64 offset = 0; offset < size; offset += PAGE_SIZE) {
|
||||
ASSERT(PageSlot(paddr + offset) == static_cast<u64>(PageStatus::Unmapped));
|
||||
PageSlot(paddr + offset) = static_cast<u64>(PageStatus::Allocated);
|
||||
ASSERT(PageSlot(gpu_addr + offset) == static_cast<u64>(PageStatus::Unmapped));
|
||||
PageSlot(gpu_addr + offset) = static_cast<u64>(PageStatus::Allocated);
|
||||
}
|
||||
|
||||
return paddr;
|
||||
return gpu_addr;
|
||||
}
|
||||
|
||||
PAddr MemoryManager::MapBufferEx(VAddr vaddr, u64 size) {
|
||||
boost::optional<PAddr> paddr = FindFreeBlock(size, PAGE_SIZE);
|
||||
ASSERT(paddr);
|
||||
GPUVAddr MemoryManager::MapBufferEx(VAddr cpu_addr, u64 size) {
|
||||
boost::optional<GPUVAddr> gpu_addr = FindFreeBlock(size, PAGE_SIZE);
|
||||
ASSERT(gpu_addr);
|
||||
|
||||
for (u64 offset = 0; offset < size; offset += PAGE_SIZE) {
|
||||
ASSERT(PageSlot(*paddr + offset) == static_cast<u64>(PageStatus::Unmapped));
|
||||
PageSlot(*paddr + offset) = vaddr + offset;
|
||||
ASSERT(PageSlot(*gpu_addr + offset) == static_cast<u64>(PageStatus::Unmapped));
|
||||
PageSlot(*gpu_addr + offset) = cpu_addr + offset;
|
||||
}
|
||||
|
||||
return *paddr;
|
||||
MappedRegion region{cpu_addr, *gpu_addr, size};
|
||||
mapped_regions.push_back(region);
|
||||
|
||||
return *gpu_addr;
|
||||
}
|
||||
|
||||
PAddr MemoryManager::MapBufferEx(VAddr vaddr, PAddr paddr, u64 size) {
|
||||
ASSERT((paddr & PAGE_MASK) == 0);
|
||||
GPUVAddr MemoryManager::MapBufferEx(VAddr cpu_addr, GPUVAddr gpu_addr, u64 size) {
|
||||
ASSERT((gpu_addr & PAGE_MASK) == 0);
|
||||
|
||||
for (u64 offset = 0; offset < size; offset += PAGE_SIZE) {
|
||||
ASSERT(PageSlot(paddr + offset) == static_cast<u64>(PageStatus::Allocated));
|
||||
PageSlot(paddr + offset) = vaddr + offset;
|
||||
ASSERT(PageSlot(gpu_addr + offset) == static_cast<u64>(PageStatus::Allocated));
|
||||
PageSlot(gpu_addr + offset) = cpu_addr + offset;
|
||||
}
|
||||
|
||||
return paddr;
|
||||
MappedRegion region{cpu_addr, gpu_addr, size};
|
||||
mapped_regions.push_back(region);
|
||||
|
||||
return gpu_addr;
|
||||
}
|
||||
|
||||
boost::optional<PAddr> MemoryManager::FindFreeBlock(u64 size, u64 align) {
|
||||
PAddr paddr = 0;
|
||||
boost::optional<GPUVAddr> MemoryManager::FindFreeBlock(u64 size, u64 align) {
|
||||
GPUVAddr gpu_addr = 0;
|
||||
u64 free_space = 0;
|
||||
align = (align + PAGE_MASK) & ~PAGE_MASK;
|
||||
|
||||
while (paddr + free_space < MAX_ADDRESS) {
|
||||
if (!IsPageMapped(paddr + free_space)) {
|
||||
while (gpu_addr + free_space < MAX_ADDRESS) {
|
||||
if (!IsPageMapped(gpu_addr + free_space)) {
|
||||
free_space += PAGE_SIZE;
|
||||
if (free_space >= size) {
|
||||
return paddr;
|
||||
return gpu_addr;
|
||||
}
|
||||
} else {
|
||||
paddr += free_space + PAGE_SIZE;
|
||||
gpu_addr += free_space + PAGE_SIZE;
|
||||
free_space = 0;
|
||||
paddr = Common::AlignUp(paddr, align);
|
||||
gpu_addr = Common::AlignUp(gpu_addr, align);
|
||||
}
|
||||
}
|
||||
|
||||
return {};
|
||||
}
|
||||
|
||||
VAddr MemoryManager::PhysicalToVirtualAddress(PAddr paddr) {
|
||||
VAddr base_addr = PageSlot(paddr);
|
||||
boost::optional<VAddr> MemoryManager::GpuToCpuAddress(GPUVAddr gpu_addr) {
|
||||
VAddr base_addr = PageSlot(gpu_addr);
|
||||
ASSERT(base_addr != static_cast<u64>(PageStatus::Unmapped));
|
||||
return base_addr + (paddr & PAGE_MASK);
|
||||
|
||||
if (base_addr == static_cast<u64>(PageStatus::Allocated)) {
|
||||
return {};
|
||||
}
|
||||
|
||||
return base_addr + (gpu_addr & PAGE_MASK);
|
||||
}
|
||||
|
||||
bool MemoryManager::IsPageMapped(PAddr paddr) {
|
||||
return PageSlot(paddr) != static_cast<u64>(PageStatus::Unmapped);
|
||||
std::vector<GPUVAddr> MemoryManager::CpuToGpuAddress(VAddr cpu_addr) const {
|
||||
std::vector<GPUVAddr> results;
|
||||
for (const auto& region : mapped_regions) {
|
||||
if (cpu_addr >= region.cpu_addr && cpu_addr < (region.cpu_addr + region.size)) {
|
||||
u64 offset = cpu_addr - region.cpu_addr;
|
||||
results.push_back(region.gpu_addr + offset);
|
||||
}
|
||||
}
|
||||
return results;
|
||||
}
|
||||
|
||||
VAddr& MemoryManager::PageSlot(PAddr paddr) {
|
||||
auto& block = page_table[(paddr >> (PAGE_BITS + PAGE_TABLE_BITS)) & PAGE_TABLE_MASK];
|
||||
bool MemoryManager::IsPageMapped(GPUVAddr gpu_addr) {
|
||||
return PageSlot(gpu_addr) != static_cast<u64>(PageStatus::Unmapped);
|
||||
}
|
||||
|
||||
VAddr& MemoryManager::PageSlot(GPUVAddr gpu_addr) {
|
||||
auto& block = page_table[(gpu_addr >> (PAGE_BITS + PAGE_TABLE_BITS)) & PAGE_TABLE_MASK];
|
||||
if (!block) {
|
||||
block = std::make_unique<PageBlock>();
|
||||
for (unsigned index = 0; index < PAGE_BLOCK_SIZE; index++) {
|
||||
(*block)[index] = static_cast<u64>(PageStatus::Unmapped);
|
||||
}
|
||||
}
|
||||
return (*block)[(paddr >> PAGE_BITS) & PAGE_BLOCK_MASK];
|
||||
return (*block)[(gpu_addr >> PAGE_BITS) & PAGE_BLOCK_MASK];
|
||||
}
|
||||
|
||||
} // namespace Tegra
|
||||
|
||||
@@ -6,8 +6,11 @@
|
||||
|
||||
#include <array>
|
||||
#include <memory>
|
||||
#include <vector>
|
||||
|
||||
#include <boost/optional.hpp>
|
||||
|
||||
#include "common/common_types.h"
|
||||
#include "core/memory.h"
|
||||
|
||||
namespace Tegra {
|
||||
|
||||
@@ -18,20 +21,21 @@ class MemoryManager final {
|
||||
public:
|
||||
MemoryManager() = default;
|
||||
|
||||
PAddr AllocateSpace(u64 size, u64 align);
|
||||
PAddr AllocateSpace(PAddr paddr, u64 size, u64 align);
|
||||
PAddr MapBufferEx(VAddr vaddr, u64 size);
|
||||
PAddr MapBufferEx(VAddr vaddr, PAddr paddr, u64 size);
|
||||
VAddr PhysicalToVirtualAddress(PAddr paddr);
|
||||
GPUVAddr AllocateSpace(u64 size, u64 align);
|
||||
GPUVAddr AllocateSpace(GPUVAddr gpu_addr, u64 size, u64 align);
|
||||
GPUVAddr MapBufferEx(VAddr cpu_addr, u64 size);
|
||||
GPUVAddr MapBufferEx(VAddr cpu_addr, GPUVAddr gpu_addr, u64 size);
|
||||
boost::optional<VAddr> GpuToCpuAddress(GPUVAddr gpu_addr);
|
||||
std::vector<GPUVAddr> CpuToGpuAddress(VAddr cpu_addr) const;
|
||||
|
||||
static constexpr u64 PAGE_BITS = 16;
|
||||
static constexpr u64 PAGE_SIZE = 1 << PAGE_BITS;
|
||||
static constexpr u64 PAGE_MASK = PAGE_SIZE - 1;
|
||||
|
||||
private:
|
||||
boost::optional<PAddr> FindFreeBlock(u64 size, u64 align = 1);
|
||||
bool IsPageMapped(PAddr paddr);
|
||||
VAddr& PageSlot(PAddr paddr);
|
||||
boost::optional<GPUVAddr> FindFreeBlock(u64 size, u64 align = 1);
|
||||
bool IsPageMapped(GPUVAddr gpu_addr);
|
||||
VAddr& PageSlot(GPUVAddr gpu_addr);
|
||||
|
||||
enum class PageStatus : u64 {
|
||||
Unmapped = 0xFFFFFFFFFFFFFFFFULL,
|
||||
@@ -48,6 +52,14 @@ private:
|
||||
|
||||
using PageBlock = std::array<VAddr, PAGE_BLOCK_SIZE>;
|
||||
std::array<std::unique_ptr<PageBlock>, PAGE_TABLE_SIZE> page_table{};
|
||||
|
||||
struct MappedRegion {
|
||||
VAddr cpu_addr;
|
||||
GPUVAddr gpu_addr;
|
||||
u64 size;
|
||||
};
|
||||
|
||||
std::vector<MappedRegion> mapped_regions;
|
||||
};
|
||||
|
||||
} // namespace Tegra
|
||||
|
||||
@@ -6,6 +6,7 @@
|
||||
|
||||
#include "common/common_types.h"
|
||||
#include "video_core/gpu.h"
|
||||
#include "video_core/memory_manager.h"
|
||||
|
||||
struct ScreenInfo;
|
||||
|
||||
@@ -25,14 +26,14 @@ public:
|
||||
virtual void FlushAll() = 0;
|
||||
|
||||
/// Notify rasterizer that any caches of the specified region should be flushed to Switch memory
|
||||
virtual void FlushRegion(VAddr addr, u64 size) = 0;
|
||||
virtual void FlushRegion(Tegra::GPUVAddr addr, u64 size) = 0;
|
||||
|
||||
/// Notify rasterizer that any caches of the specified region should be invalidated
|
||||
virtual void InvalidateRegion(VAddr addr, u64 size) = 0;
|
||||
virtual void InvalidateRegion(Tegra::GPUVAddr addr, u64 size) = 0;
|
||||
|
||||
/// Notify rasterizer that any caches of the specified region should be flushed to Switch memory
|
||||
/// and invalidated
|
||||
virtual void FlushAndInvalidateRegion(VAddr addr, u64 size) = 0;
|
||||
virtual void FlushAndInvalidateRegion(Tegra::GPUVAddr addr, u64 size) = 0;
|
||||
|
||||
/// Attempt to use a faster method to perform a display transfer with is_texture_copy = 0
|
||||
virtual bool AccelerateDisplayTransfer(const void* config) {
|
||||
|
||||
@@ -116,7 +116,7 @@ RasterizerOpenGL::RasterizerOpenGL() {
|
||||
|
||||
glEnable(GL_BLEND);
|
||||
|
||||
LOG_CRITICAL(Render_OpenGL, "Sync fixed function OpenGL state here!");
|
||||
NGLOG_CRITICAL(Render_OpenGL, "Sync fixed function OpenGL state here!");
|
||||
}
|
||||
|
||||
RasterizerOpenGL::~RasterizerOpenGL() {
|
||||
@@ -150,9 +150,8 @@ std::pair<u8*, GLintptr> RasterizerOpenGL::SetupVertexArrays(u8* array_ptr,
|
||||
u64 size = end - start + 1;
|
||||
|
||||
// Copy vertex array data
|
||||
const VAddr data_addr{memory_manager->PhysicalToVirtualAddress(start)};
|
||||
res_cache.FlushRegion(data_addr, size, nullptr);
|
||||
Memory::ReadBlock(data_addr, array_ptr, size);
|
||||
res_cache.FlushRegion(start, size, nullptr);
|
||||
Memory::ReadBlock(*memory_manager->GpuToCpuAddress(start), array_ptr, size);
|
||||
|
||||
// Bind the vertex array to the buffer at the current offset.
|
||||
glBindVertexBuffer(index, stream_buffer->GetHandle(), buffer_offset, vertex_array.stride);
|
||||
@@ -233,8 +232,8 @@ void RasterizerOpenGL::SetupShaders(u8* buffer_ptr, GLintptr buffer_offset) {
|
||||
// Fetch program code from memory
|
||||
GLShader::ProgramCode program_code;
|
||||
const u64 gpu_address{gpu.regs.code_address.CodeAddress() + shader_config.offset};
|
||||
const VAddr cpu_address{gpu.memory_manager.PhysicalToVirtualAddress(gpu_address)};
|
||||
Memory::ReadBlock(cpu_address, program_code.data(), program_code.size() * sizeof(u64));
|
||||
const boost::optional<VAddr> cpu_address{gpu.memory_manager.GpuToCpuAddress(gpu_address)};
|
||||
Memory::ReadBlock(*cpu_address, program_code.data(), program_code.size() * sizeof(u64));
|
||||
GLShader::ShaderSetup setup{std::move(program_code)};
|
||||
|
||||
GLShader::ShaderEntries shader_resources;
|
||||
@@ -253,8 +252,8 @@ void RasterizerOpenGL::SetupShaders(u8* buffer_ptr, GLintptr buffer_offset) {
|
||||
break;
|
||||
}
|
||||
default:
|
||||
LOG_CRITICAL(HW_GPU, "Unimplemented shader index=%d, enable=%d, offset=0x%08X", index,
|
||||
shader_config.enable.Value(), shader_config.offset);
|
||||
NGLOG_CRITICAL(HW_GPU, "Unimplemented shader index={}, enable={}, offset={:#010X}",
|
||||
index, shader_config.enable.Value(), shader_config.offset);
|
||||
UNREACHABLE();
|
||||
}
|
||||
|
||||
@@ -394,9 +393,9 @@ void RasterizerOpenGL::DrawArrays() {
|
||||
GLintptr index_buffer_offset = 0;
|
||||
if (is_indexed) {
|
||||
const auto& memory_manager = Core::System().GetInstance().GPU().memory_manager;
|
||||
const VAddr index_data_addr{
|
||||
memory_manager->PhysicalToVirtualAddress(regs.index_array.StartAddress())};
|
||||
Memory::ReadBlock(index_data_addr, offseted_buffer, index_buffer_size);
|
||||
const boost::optional<VAddr> index_data_addr{
|
||||
memory_manager->GpuToCpuAddress(regs.index_array.StartAddress())};
|
||||
Memory::ReadBlock(*index_data_addr, offseted_buffer, index_buffer_size);
|
||||
|
||||
index_buffer_offset = buffer_offset;
|
||||
offseted_buffer += index_buffer_size;
|
||||
@@ -519,17 +518,17 @@ void RasterizerOpenGL::FlushAll() {
|
||||
res_cache.FlushAll();
|
||||
}
|
||||
|
||||
void RasterizerOpenGL::FlushRegion(VAddr addr, u64 size) {
|
||||
void RasterizerOpenGL::FlushRegion(Tegra::GPUVAddr addr, u64 size) {
|
||||
MICROPROFILE_SCOPE(OpenGL_CacheManagement);
|
||||
res_cache.FlushRegion(addr, size);
|
||||
}
|
||||
|
||||
void RasterizerOpenGL::InvalidateRegion(VAddr addr, u64 size) {
|
||||
void RasterizerOpenGL::InvalidateRegion(Tegra::GPUVAddr addr, u64 size) {
|
||||
MICROPROFILE_SCOPE(OpenGL_CacheManagement);
|
||||
res_cache.InvalidateRegion(addr, size, nullptr);
|
||||
}
|
||||
|
||||
void RasterizerOpenGL::FlushAndInvalidateRegion(VAddr addr, u64 size) {
|
||||
void RasterizerOpenGL::FlushAndInvalidateRegion(Tegra::GPUVAddr addr, u64 size) {
|
||||
MICROPROFILE_SCOPE(OpenGL_CacheManagement);
|
||||
res_cache.FlushRegion(addr, size);
|
||||
res_cache.InvalidateRegion(addr, size, nullptr);
|
||||
@@ -560,7 +559,8 @@ bool RasterizerOpenGL::AccelerateDisplay(const Tegra::FramebufferConfig& framebu
|
||||
MICROPROFILE_SCOPE(OpenGL_CacheManagement);
|
||||
|
||||
SurfaceParams src_params;
|
||||
src_params.addr = framebuffer_addr;
|
||||
src_params.cpu_addr = framebuffer_addr;
|
||||
src_params.addr = res_cache.TryFindFramebufferGpuAddress(framebuffer_addr).get_value_or(0);
|
||||
src_params.width = std::min(framebuffer.width, pixel_stride);
|
||||
src_params.height = framebuffer.height;
|
||||
src_params.stride = pixel_stride;
|
||||
@@ -659,9 +659,9 @@ u32 RasterizerOpenGL::SetupConstBuffers(Maxwell::ShaderStage stage, GLuint progr
|
||||
buffer_draw_state.enabled = true;
|
||||
buffer_draw_state.bindpoint = current_bindpoint + bindpoint;
|
||||
|
||||
VAddr addr = gpu.memory_manager->PhysicalToVirtualAddress(buffer.address);
|
||||
boost::optional<VAddr> addr = gpu.memory_manager->GpuToCpuAddress(buffer.address);
|
||||
std::vector<u8> data(used_buffer.GetSize() * sizeof(float));
|
||||
Memory::ReadBlock(addr, data.data(), data.size());
|
||||
Memory::ReadBlock(*addr, data.data(), data.size());
|
||||
|
||||
glBindBuffer(GL_SHADER_STORAGE_BUFFER, buffer_draw_state.ssbo);
|
||||
glBufferData(GL_SHADER_STORAGE_BUFFER, data.size(), data.data(), GL_DYNAMIC_DRAW);
|
||||
|
||||
@@ -11,6 +11,7 @@
|
||||
#include <glad/glad.h>
|
||||
#include "common/common_types.h"
|
||||
#include "video_core/engines/maxwell_3d.h"
|
||||
#include "video_core/memory_manager.h"
|
||||
#include "video_core/rasterizer_interface.h"
|
||||
#include "video_core/renderer_opengl/gl_rasterizer_cache.h"
|
||||
#include "video_core/renderer_opengl/gl_resource_manager.h"
|
||||
@@ -29,9 +30,9 @@ public:
|
||||
void DrawArrays() override;
|
||||
void NotifyMaxwellRegisterChanged(u32 method) override;
|
||||
void FlushAll() override;
|
||||
void FlushRegion(VAddr addr, u64 size) override;
|
||||
void InvalidateRegion(VAddr addr, u64 size) override;
|
||||
void FlushAndInvalidateRegion(VAddr addr, u64 size) override;
|
||||
void FlushRegion(Tegra::GPUVAddr addr, u64 size) override;
|
||||
void InvalidateRegion(Tegra::GPUVAddr addr, u64 size) override;
|
||||
void FlushAndInvalidateRegion(Tegra::GPUVAddr addr, u64 size) override;
|
||||
bool AccelerateDisplayTransfer(const void* config) override;
|
||||
bool AccelerateTextureCopy(const void* config) override;
|
||||
bool AccelerateFill(const void* config) override;
|
||||
|
||||
@@ -41,18 +41,15 @@ struct FormatTuple {
|
||||
GLenum format;
|
||||
GLenum type;
|
||||
bool compressed;
|
||||
// How many pixels in the original texture are equivalent to one pixel in the compressed
|
||||
// texture.
|
||||
u32 compression_factor;
|
||||
};
|
||||
|
||||
static constexpr std::array<FormatTuple, SurfaceParams::MaxPixelFormat> tex_format_tuples = {{
|
||||
{GL_RGBA8, GL_RGBA, GL_UNSIGNED_INT_8_8_8_8_REV, false, 1}, // ABGR8
|
||||
{GL_RGB, GL_RGB, GL_UNSIGNED_SHORT_5_6_5_REV, false, 1}, // B5G6R5
|
||||
{GL_RGB10_A2, GL_RGBA, GL_UNSIGNED_INT_2_10_10_10_REV, false, 1}, // A2B10G10R10
|
||||
{GL_COMPRESSED_RGB_S3TC_DXT1_EXT, GL_RGB, GL_UNSIGNED_INT_8_8_8_8, true, 16}, // DXT1
|
||||
{GL_COMPRESSED_RGBA_S3TC_DXT3_EXT, GL_RGBA, GL_UNSIGNED_INT_8_8_8_8, true, 16}, // DXT23
|
||||
{GL_COMPRESSED_RGBA_S3TC_DXT5_EXT, GL_RGBA, GL_UNSIGNED_INT_8_8_8_8, true, 16}, // DXT45
|
||||
{GL_RGBA8, GL_RGBA, GL_UNSIGNED_INT_8_8_8_8_REV, false}, // ABGR8
|
||||
{GL_RGB, GL_RGB, GL_UNSIGNED_SHORT_5_6_5_REV, false}, // B5G6R5
|
||||
{GL_RGB10_A2, GL_RGBA, GL_UNSIGNED_INT_2_10_10_10_REV, false}, // A2B10G10R10
|
||||
{GL_COMPRESSED_RGB_S3TC_DXT1_EXT, GL_RGB, GL_UNSIGNED_INT_8_8_8_8, true}, // DXT1
|
||||
{GL_COMPRESSED_RGBA_S3TC_DXT3_EXT, GL_RGBA, GL_UNSIGNED_INT_8_8_8_8, true}, // DXT23
|
||||
{GL_COMPRESSED_RGBA_S3TC_DXT5_EXT, GL_RGBA, GL_UNSIGNED_INT_8_8_8_8, true}, // DXT45
|
||||
}};
|
||||
|
||||
static const FormatTuple& GetFormatTuple(PixelFormat pixel_format, ComponentType component_type) {
|
||||
@@ -83,26 +80,30 @@ static u16 GetResolutionScaleFactor() {
|
||||
}
|
||||
|
||||
template <bool morton_to_gl, PixelFormat format>
|
||||
void MortonCopy(u32 stride, u32 block_height, u32 height, u8* gl_buffer, VAddr base, VAddr start,
|
||||
VAddr end) {
|
||||
constexpr u32 bytes_per_pixel = SurfaceParams::GetFormatBpp(format) / 8;
|
||||
void MortonCopy(u32 stride, u32 block_height, u32 height, u8* gl_buffer, Tegra::GPUVAddr base,
|
||||
Tegra::GPUVAddr start, Tegra::GPUVAddr end) {
|
||||
constexpr u32 bytes_per_pixel = SurfaceParams::GetFormatBpp(format) / CHAR_BIT;
|
||||
constexpr u32 gl_bytes_per_pixel = CachedSurface::GetGLBytesPerPixel(format);
|
||||
const auto& gpu = Core::System::GetInstance().GPU();
|
||||
|
||||
if (morton_to_gl) {
|
||||
auto data = Tegra::Texture::UnswizzleTexture(
|
||||
base, SurfaceParams::TextureFormatFromPixelFormat(format), stride, height,
|
||||
block_height);
|
||||
*gpu.memory_manager->GpuToCpuAddress(base),
|
||||
SurfaceParams::TextureFormatFromPixelFormat(format), stride, height, block_height);
|
||||
std::memcpy(gl_buffer, data.data(), data.size());
|
||||
} else {
|
||||
// TODO(bunnei): Assumes the default rendering GOB size of 16 (128 lines). We should check
|
||||
// the configuration for this and perform more generic un/swizzle
|
||||
LOG_WARNING(Render_OpenGL, "need to use correct swizzle/GOB parameters!");
|
||||
VideoCore::MortonCopyPixels128(stride, height, bytes_per_pixel, gl_bytes_per_pixel,
|
||||
Memory::GetPointer(base), gl_buffer, morton_to_gl);
|
||||
NGLOG_WARNING(Render_OpenGL, "need to use correct swizzle/GOB parameters!");
|
||||
VideoCore::MortonCopyPixels128(
|
||||
stride, height, bytes_per_pixel, gl_bytes_per_pixel,
|
||||
Memory::GetPointer(*gpu.memory_manager->GpuToCpuAddress(base)), gl_buffer,
|
||||
morton_to_gl);
|
||||
}
|
||||
}
|
||||
|
||||
static constexpr std::array<void (*)(u32, u32, u32, u8*, VAddr, VAddr, VAddr),
|
||||
static constexpr std::array<void (*)(u32, u32, u32, u8*, Tegra::GPUVAddr, Tegra::GPUVAddr,
|
||||
Tegra::GPUVAddr),
|
||||
SurfaceParams::MaxPixelFormat>
|
||||
morton_to_gl_fns = {
|
||||
MortonCopy<true, PixelFormat::ABGR8>, MortonCopy<true, PixelFormat::B5G6R5>,
|
||||
@@ -110,7 +111,8 @@ static constexpr std::array<void (*)(u32, u32, u32, u8*, VAddr, VAddr, VAddr),
|
||||
MortonCopy<true, PixelFormat::DXT23>, MortonCopy<true, PixelFormat::DXT45>,
|
||||
};
|
||||
|
||||
static constexpr std::array<void (*)(u32, u32, u32, u8*, VAddr, VAddr, VAddr),
|
||||
static constexpr std::array<void (*)(u32, u32, u32, u8*, Tegra::GPUVAddr, Tegra::GPUVAddr,
|
||||
Tegra::GPUVAddr),
|
||||
SurfaceParams::MaxPixelFormat>
|
||||
gl_to_morton_fns = {
|
||||
MortonCopy<false, PixelFormat::ABGR8>,
|
||||
@@ -219,9 +221,9 @@ SurfaceParams SurfaceParams::FromInterval(SurfaceInterval interval) const {
|
||||
SurfaceParams params = *this;
|
||||
const u32 tiled_size = is_tiled ? 8 : 1;
|
||||
const u64 stride_tiled_bytes = BytesInPixels(stride * tiled_size);
|
||||
VAddr aligned_start =
|
||||
Tegra::GPUVAddr aligned_start =
|
||||
addr + Common::AlignDown(boost::icl::first(interval) - addr, stride_tiled_bytes);
|
||||
VAddr aligned_end =
|
||||
Tegra::GPUVAddr aligned_end =
|
||||
addr + Common::AlignUp(boost::icl::last_next(interval) - addr, stride_tiled_bytes);
|
||||
|
||||
if (aligned_end - aligned_start > stride_tiled_bytes) {
|
||||
@@ -342,6 +344,13 @@ bool SurfaceParams::CanTexCopy(const SurfaceParams& texcopy_params) const {
|
||||
return FromInterval(texcopy_params.GetInterval()).GetInterval() == texcopy_params.GetInterval();
|
||||
}
|
||||
|
||||
VAddr SurfaceParams::GetCpuAddr() const {
|
||||
// When this function is used, only cpu_addr or (GPU) addr should be set, not both
|
||||
ASSERT(!(cpu_addr && addr));
|
||||
const auto& gpu = Core::System::GetInstance().GPU();
|
||||
return cpu_addr.get_value_or(*gpu.memory_manager->GpuToCpuAddress(addr));
|
||||
}
|
||||
|
||||
bool CachedSurface::CanFill(const SurfaceParams& dest_surface,
|
||||
SurfaceInterval fill_interval) const {
|
||||
if (type == SurfaceType::Fill && IsRegionValid(fill_interval) &&
|
||||
@@ -349,9 +358,9 @@ bool CachedSurface::CanFill(const SurfaceParams& dest_surface,
|
||||
boost::icl::last_next(fill_interval) <= end && // dest_surface is within our fill range
|
||||
dest_surface.FromInterval(fill_interval).GetInterval() ==
|
||||
fill_interval) { // make sure interval is a rectangle in dest surface
|
||||
if (fill_size * 8 != dest_surface.GetFormatBpp()) {
|
||||
if (fill_size * CHAR_BIT != dest_surface.GetFormatBpp()) {
|
||||
// Check if bits repeat for our fill_size
|
||||
const u32 dest_bytes_per_pixel = std::max(dest_surface.GetFormatBpp() / 8, 1u);
|
||||
const u32 dest_bytes_per_pixel = std::max(dest_surface.GetFormatBpp() / CHAR_BIT, 1u);
|
||||
std::vector<u8> fill_test(fill_size * dest_bytes_per_pixel);
|
||||
|
||||
for (u32 i = 0; i < dest_bytes_per_pixel; ++i)
|
||||
@@ -456,15 +465,15 @@ void RasterizerCacheOpenGL::CopySurface(const Surface& src_surface, const Surfac
|
||||
}
|
||||
|
||||
MICROPROFILE_DEFINE(OpenGL_SurfaceLoad, "OpenGL", "Surface Load", MP_RGB(128, 64, 192));
|
||||
void CachedSurface::LoadGLBuffer(VAddr load_start, VAddr load_end) {
|
||||
void CachedSurface::LoadGLBuffer(Tegra::GPUVAddr load_start, Tegra::GPUVAddr load_end) {
|
||||
ASSERT(type != SurfaceType::Fill);
|
||||
|
||||
u8* const texture_src_data = Memory::GetPointer(addr);
|
||||
u8* const texture_src_data = Memory::GetPointer(GetCpuAddr());
|
||||
if (texture_src_data == nullptr)
|
||||
return;
|
||||
|
||||
if (gl_buffer == nullptr) {
|
||||
gl_buffer_size = width * height * GetGLBytesPerPixel(pixel_format);
|
||||
gl_buffer_size = GetActualWidth() * GetActualHeight() * GetGLBytesPerPixel(pixel_format);
|
||||
gl_buffer.reset(new u8[gl_buffer_size]);
|
||||
}
|
||||
|
||||
@@ -479,14 +488,15 @@ void CachedSurface::LoadGLBuffer(VAddr load_start, VAddr load_end) {
|
||||
std::memcpy(&gl_buffer[start_offset], texture_src_data + start_offset,
|
||||
bytes_per_pixel * width * height);
|
||||
} else {
|
||||
morton_to_gl_fns[static_cast<size_t>(pixel_format)](
|
||||
stride, block_height, height, &gl_buffer[0], addr, load_start, load_end);
|
||||
morton_to_gl_fns[static_cast<size_t>(pixel_format)](GetActualWidth(), block_height,
|
||||
GetActualHeight(), &gl_buffer[0], addr,
|
||||
load_start, load_end);
|
||||
}
|
||||
}
|
||||
|
||||
MICROPROFILE_DEFINE(OpenGL_SurfaceFlush, "OpenGL", "Surface Flush", MP_RGB(128, 192, 64));
|
||||
void CachedSurface::FlushGLBuffer(VAddr flush_start, VAddr flush_end) {
|
||||
u8* const dst_buffer = Memory::GetPointer(addr);
|
||||
void CachedSurface::FlushGLBuffer(Tegra::GPUVAddr flush_start, Tegra::GPUVAddr flush_end) {
|
||||
u8* const dst_buffer = Memory::GetPointer(GetCpuAddr());
|
||||
if (dst_buffer == nullptr)
|
||||
return;
|
||||
|
||||
@@ -536,7 +546,8 @@ void CachedSurface::UploadGLTexture(const MathUtil::Rectangle<u32>& rect, GLuint
|
||||
|
||||
MICROPROFILE_SCOPE(OpenGL_TextureUL);
|
||||
|
||||
ASSERT(gl_buffer_size == width * height * GetGLBytesPerPixel(pixel_format));
|
||||
ASSERT(gl_buffer_size ==
|
||||
GetActualWidth() * GetActualHeight() * GetGLBytesPerPixel(pixel_format));
|
||||
|
||||
// Load data from memory to the surface
|
||||
GLint x0 = static_cast<GLint>(rect.left);
|
||||
@@ -571,11 +582,9 @@ void CachedSurface::UploadGLTexture(const MathUtil::Rectangle<u32>& rect, GLuint
|
||||
glActiveTexture(GL_TEXTURE0);
|
||||
if (tuple.compressed) {
|
||||
glCompressedTexImage2D(GL_TEXTURE_2D, 0, tuple.internal_format,
|
||||
static_cast<GLsizei>(rect.GetWidth()),
|
||||
static_cast<GLsizei>(rect.GetHeight()), 0,
|
||||
rect.GetWidth() * rect.GetHeight() *
|
||||
GetGLBytesPerPixel(pixel_format) / tuple.compression_factor,
|
||||
&gl_buffer[buffer_offset]);
|
||||
static_cast<GLsizei>(rect.GetWidth() * GetCompresssionFactor()),
|
||||
static_cast<GLsizei>(rect.GetHeight() * GetCompresssionFactor()), 0,
|
||||
size, &gl_buffer[buffer_offset]);
|
||||
} else {
|
||||
glTexSubImage2D(GL_TEXTURE_2D, 0, x0, y0, static_cast<GLsizei>(rect.GetWidth()),
|
||||
static_cast<GLsizei>(rect.GetHeight()), tuple.format, tuple.type,
|
||||
@@ -945,6 +954,33 @@ Surface RasterizerCacheOpenGL::GetSurface(const SurfaceParams& params, ScaleMatc
|
||||
return surface;
|
||||
}
|
||||
|
||||
boost::optional<Tegra::GPUVAddr> RasterizerCacheOpenGL::TryFindFramebufferGpuAddress(
|
||||
VAddr cpu_addr) const {
|
||||
// Tries to find the GPU address of a framebuffer based on the CPU address. This is because
|
||||
// final output framebuffers are specified by CPU address, but internally our GPU cache uses GPU
|
||||
// addresses. We iterate through all cached framebuffers, and compare their starting CPU address
|
||||
// to the one provided. This is obviously not great, and won't work if the framebuffer overlaps
|
||||
// surfaces.
|
||||
|
||||
std::vector<Tegra::GPUVAddr> gpu_addresses;
|
||||
for (const auto& pair : surface_cache) {
|
||||
for (const auto& surface : pair.second) {
|
||||
const VAddr surface_cpu_addr = surface->GetCpuAddr();
|
||||
if (cpu_addr >= surface_cpu_addr && cpu_addr < (surface_cpu_addr + surface->size)) {
|
||||
ASSERT_MSG(cpu_addr == surface_cpu_addr, "overlapping surfaces are unsupported");
|
||||
gpu_addresses.push_back(surface->addr);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if (gpu_addresses.empty()) {
|
||||
return {};
|
||||
}
|
||||
|
||||
ASSERT_MSG(gpu_addresses.size() == 1, ">1 surface is unsupported");
|
||||
return gpu_addresses[0];
|
||||
}
|
||||
|
||||
SurfaceRect_Tuple RasterizerCacheOpenGL::GetSurfaceSubRect(const SurfaceParams& params,
|
||||
ScaleMatch match_res_scale,
|
||||
bool load_if_create) {
|
||||
@@ -1028,11 +1064,11 @@ Surface RasterizerCacheOpenGL::GetTextureSurface(const Tegra::Texture::FullTextu
|
||||
auto& gpu = Core::System::GetInstance().GPU();
|
||||
|
||||
SurfaceParams params;
|
||||
params.addr = gpu.memory_manager->PhysicalToVirtualAddress(config.tic.Address());
|
||||
params.width = config.tic.Width();
|
||||
params.height = config.tic.Height();
|
||||
params.addr = config.tic.Address();
|
||||
params.is_tiled = config.tic.IsTiled();
|
||||
params.pixel_format = SurfaceParams::PixelFormatFromTextureFormat(config.tic.format);
|
||||
params.width = config.tic.Width() / params.GetCompresssionFactor();
|
||||
params.height = config.tic.Height() / params.GetCompresssionFactor();
|
||||
|
||||
// TODO(Subv): Different types per component are not supported.
|
||||
ASSERT(config.tic.r_type.Value() == config.tic.g_type.Value() &&
|
||||
@@ -1045,7 +1081,7 @@ Surface RasterizerCacheOpenGL::GetTextureSurface(const Tegra::Texture::FullTextu
|
||||
params.block_height = config.tic.BlockHeight();
|
||||
} else {
|
||||
// Use the texture-provided stride value if the texture isn't tiled.
|
||||
params.stride = params.PixelsInBytes(config.tic.Pitch());
|
||||
params.stride = static_cast<u32>(params.PixelsInBytes(config.tic.Pitch()));
|
||||
}
|
||||
|
||||
params.UpdateParams();
|
||||
@@ -1073,11 +1109,10 @@ Surface RasterizerCacheOpenGL::GetTextureSurface(const Tegra::Texture::FullTextu
|
||||
SurfaceSurfaceRect_Tuple RasterizerCacheOpenGL::GetFramebufferSurfaces(
|
||||
bool using_color_fb, bool using_depth_fb, const MathUtil::Rectangle<s32>& viewport) {
|
||||
const auto& regs = Core::System().GetInstance().GPU().Maxwell3D().regs;
|
||||
const auto& memory_manager = Core::System().GetInstance().GPU().memory_manager;
|
||||
const auto& config = regs.rt[0];
|
||||
|
||||
// TODO(bunnei): This is hard corded to use just the first render buffer
|
||||
LOG_WARNING(Render_OpenGL, "hard-coded for render target 0!");
|
||||
NGLOG_WARNING(Render_OpenGL, "hard-coded for render target 0!");
|
||||
|
||||
// update resolution_scale_factor and reset cache if changed
|
||||
// TODO (bunnei): This code was ported as-is from Citra, and is technically not thread-safe. We
|
||||
@@ -1106,7 +1141,7 @@ SurfaceSurfaceRect_Tuple RasterizerCacheOpenGL::GetFramebufferSurfaces(
|
||||
color_params.block_height = Tegra::Texture::TICEntry::DefaultBlockHeight;
|
||||
SurfaceParams depth_params = color_params;
|
||||
|
||||
color_params.addr = memory_manager->PhysicalToVirtualAddress(config.Address());
|
||||
color_params.addr = config.Address();
|
||||
color_params.pixel_format = SurfaceParams::PixelFormatFromRenderTargetFormat(config.format);
|
||||
color_params.component_type = SurfaceParams::ComponentTypeFromRenderTarget(config.format);
|
||||
color_params.UpdateParams();
|
||||
@@ -1122,8 +1157,8 @@ SurfaceSurfaceRect_Tuple RasterizerCacheOpenGL::GetFramebufferSurfaces(
|
||||
// Make sure that framebuffers don't overlap if both color and depth are being used
|
||||
if (using_color_fb && using_depth_fb &&
|
||||
boost::icl::length(color_vp_interval & depth_vp_interval)) {
|
||||
LOG_CRITICAL(Render_OpenGL, "Color and depth framebuffer memory regions overlap; "
|
||||
"overlapping framebuffers not supported!");
|
||||
NGLOG_CRITICAL(Render_OpenGL, "Color and depth framebuffer memory regions overlap; "
|
||||
"overlapping framebuffers not supported!");
|
||||
using_depth_fb = false;
|
||||
}
|
||||
|
||||
@@ -1222,7 +1257,8 @@ void RasterizerCacheOpenGL::DuplicateSurface(const Surface& src_surface,
|
||||
}
|
||||
}
|
||||
|
||||
void RasterizerCacheOpenGL::ValidateSurface(const Surface& surface, VAddr addr, u64 size) {
|
||||
void RasterizerCacheOpenGL::ValidateSurface(const Surface& surface, Tegra::GPUVAddr addr,
|
||||
u64 size) {
|
||||
if (size == 0)
|
||||
return;
|
||||
|
||||
@@ -1261,7 +1297,7 @@ void RasterizerCacheOpenGL::ValidateSurface(const Surface& surface, VAddr addr,
|
||||
}
|
||||
}
|
||||
|
||||
void RasterizerCacheOpenGL::FlushRegion(VAddr addr, u64 size, Surface flush_surface) {
|
||||
void RasterizerCacheOpenGL::FlushRegion(Tegra::GPUVAddr addr, u64 size, Surface flush_surface) {
|
||||
if (size == 0)
|
||||
return;
|
||||
|
||||
@@ -1297,7 +1333,8 @@ void RasterizerCacheOpenGL::FlushAll() {
|
||||
FlushRegion(0, Kernel::VMManager::MAX_ADDRESS);
|
||||
}
|
||||
|
||||
void RasterizerCacheOpenGL::InvalidateRegion(VAddr addr, u64 size, const Surface& region_owner) {
|
||||
void RasterizerCacheOpenGL::InvalidateRegion(Tegra::GPUVAddr addr, u64 size,
|
||||
const Surface& region_owner) {
|
||||
if (size == 0)
|
||||
return;
|
||||
|
||||
@@ -1390,10 +1427,10 @@ void RasterizerCacheOpenGL::UnregisterSurface(const Surface& surface) {
|
||||
surface_cache.subtract({surface->GetInterval(), SurfaceSet{surface}});
|
||||
}
|
||||
|
||||
void RasterizerCacheOpenGL::UpdatePagesCachedCount(VAddr addr, u64 size, int delta) {
|
||||
const u64 num_pages =
|
||||
((addr + size - 1) >> Memory::PAGE_BITS) - (addr >> Memory::PAGE_BITS) + 1;
|
||||
const u64 page_start = addr >> Memory::PAGE_BITS;
|
||||
void RasterizerCacheOpenGL::UpdatePagesCachedCount(Tegra::GPUVAddr addr, u64 size, int delta) {
|
||||
const u64 num_pages = ((addr + size - 1) >> Tegra::MemoryManager::PAGE_BITS) -
|
||||
(addr >> Tegra::MemoryManager::PAGE_BITS) + 1;
|
||||
const u64 page_start = addr >> Tegra::MemoryManager::PAGE_BITS;
|
||||
const u64 page_end = page_start + num_pages;
|
||||
|
||||
// Interval maps will erase segments if count reaches 0, so if delta is negative we have to
|
||||
@@ -1406,8 +1443,10 @@ void RasterizerCacheOpenGL::UpdatePagesCachedCount(VAddr addr, u64 size, int del
|
||||
const auto interval = pair.first & pages_interval;
|
||||
const int count = pair.second;
|
||||
|
||||
const VAddr interval_start_addr = boost::icl::first(interval) << Memory::PAGE_BITS;
|
||||
const VAddr interval_end_addr = boost::icl::last_next(interval) << Memory::PAGE_BITS;
|
||||
const Tegra::GPUVAddr interval_start_addr = boost::icl::first(interval)
|
||||
<< Tegra::MemoryManager::PAGE_BITS;
|
||||
const Tegra::GPUVAddr interval_end_addr = boost::icl::last_next(interval)
|
||||
<< Tegra::MemoryManager::PAGE_BITS;
|
||||
const u64 interval_size = interval_end_addr - interval_start_addr;
|
||||
|
||||
if (delta > 0 && count == delta)
|
||||
|
||||
@@ -17,12 +17,14 @@
|
||||
#ifdef __GNUC__
|
||||
#pragma GCC diagnostic pop
|
||||
#endif
|
||||
#include <boost/optional.hpp>
|
||||
#include <glad/glad.h>
|
||||
#include "common/assert.h"
|
||||
#include "common/common_funcs.h"
|
||||
#include "common/common_types.h"
|
||||
#include "common/math_util.h"
|
||||
#include "video_core/gpu.h"
|
||||
#include "video_core/memory_manager.h"
|
||||
#include "video_core/renderer_opengl/gl_resource_manager.h"
|
||||
#include "video_core/textures/texture.h"
|
||||
|
||||
@@ -30,9 +32,9 @@ struct CachedSurface;
|
||||
using Surface = std::shared_ptr<CachedSurface>;
|
||||
using SurfaceSet = std::set<Surface>;
|
||||
|
||||
using SurfaceRegions = boost::icl::interval_set<VAddr>;
|
||||
using SurfaceMap = boost::icl::interval_map<VAddr, Surface>;
|
||||
using SurfaceCache = boost::icl::interval_map<VAddr, SurfaceSet>;
|
||||
using SurfaceRegions = boost::icl::interval_set<Tegra::GPUVAddr>;
|
||||
using SurfaceMap = boost::icl::interval_map<Tegra::GPUVAddr, Surface>;
|
||||
using SurfaceCache = boost::icl::interval_map<Tegra::GPUVAddr, SurfaceSet>;
|
||||
|
||||
using SurfaceInterval = SurfaceCache::interval_type;
|
||||
static_assert(std::is_same<SurfaceRegions::interval_type, SurfaceCache::interval_type>() &&
|
||||
@@ -82,23 +84,49 @@ struct SurfaceParams {
|
||||
Invalid = 4,
|
||||
};
|
||||
|
||||
static constexpr unsigned int GetFormatBpp(PixelFormat format) {
|
||||
/**
|
||||
* Gets the compression factor for the specified PixelFormat. This applies to just the
|
||||
* "compressed width" and "compressed height", not the overall compression factor of a
|
||||
* compressed image. This is used for maintaining proper surface sizes for compressed texture
|
||||
* formats.
|
||||
*/
|
||||
static constexpr u32 GetCompresssionFactor(PixelFormat format) {
|
||||
if (format == PixelFormat::Invalid)
|
||||
return 0;
|
||||
|
||||
constexpr std::array<unsigned int, MaxPixelFormat> bpp_table = {
|
||||
constexpr std::array<u32, MaxPixelFormat> compression_factor_table = {{
|
||||
1, // ABGR8
|
||||
1, // B5G6R5
|
||||
1, // A2B10G10R10
|
||||
4, // DXT1
|
||||
4, // DXT23
|
||||
4, // DXT45
|
||||
}};
|
||||
|
||||
ASSERT(static_cast<size_t>(format) < compression_factor_table.size());
|
||||
return compression_factor_table[static_cast<size_t>(format)];
|
||||
}
|
||||
u32 GetCompresssionFactor() const {
|
||||
return GetCompresssionFactor(pixel_format);
|
||||
}
|
||||
|
||||
static constexpr u32 GetFormatBpp(PixelFormat format) {
|
||||
if (format == PixelFormat::Invalid)
|
||||
return 0;
|
||||
|
||||
constexpr std::array<u32, MaxPixelFormat> bpp_table = {{
|
||||
32, // ABGR8
|
||||
16, // B5G6R5
|
||||
32, // A2B10G10R10
|
||||
64, // DXT1
|
||||
128, // DXT23
|
||||
128, // DXT45
|
||||
};
|
||||
}};
|
||||
|
||||
ASSERT(static_cast<size_t>(format) < bpp_table.size());
|
||||
return bpp_table[static_cast<size_t>(format)];
|
||||
}
|
||||
unsigned int GetFormatBpp() const {
|
||||
u32 GetFormatBpp() const {
|
||||
return GetFormatBpp(pixel_format);
|
||||
}
|
||||
|
||||
@@ -253,6 +281,24 @@ struct SurfaceParams {
|
||||
// Returns the region of the biggest valid rectange within interval
|
||||
SurfaceInterval GetCopyableInterval(const Surface& src_surface) const;
|
||||
|
||||
/**
|
||||
* Gets the actual width (in pixels) of the surface. This is provided because `width` is used
|
||||
* for tracking the surface region in memory, which may be compressed for certain formats. In
|
||||
* this scenario, `width` is actually the compressed width.
|
||||
*/
|
||||
u32 GetActualWidth() const {
|
||||
return width * GetCompresssionFactor();
|
||||
}
|
||||
|
||||
/**
|
||||
* Gets the actual height (in pixels) of the surface. This is provided because `height` is used
|
||||
* for tracking the surface region in memory, which may be compressed for certain formats. In
|
||||
* this scenario, `height` is actually the compressed height.
|
||||
*/
|
||||
u32 GetActualHeight() const {
|
||||
return height * GetCompresssionFactor();
|
||||
}
|
||||
|
||||
u32 GetScaledWidth() const {
|
||||
return width * res_scale;
|
||||
}
|
||||
@@ -277,6 +323,8 @@ struct SurfaceParams {
|
||||
return pixels * GetFormatBpp(pixel_format) / CHAR_BIT;
|
||||
}
|
||||
|
||||
VAddr GetCpuAddr() const;
|
||||
|
||||
bool ExactMatch(const SurfaceParams& other_surface) const;
|
||||
bool CanSubRect(const SurfaceParams& sub_surface) const;
|
||||
bool CanExpand(const SurfaceParams& expanded_surface) const;
|
||||
@@ -285,8 +333,9 @@ struct SurfaceParams {
|
||||
MathUtil::Rectangle<u32> GetSubRect(const SurfaceParams& sub_surface) const;
|
||||
MathUtil::Rectangle<u32> GetScaledSubRect(const SurfaceParams& sub_surface) const;
|
||||
|
||||
VAddr addr = 0;
|
||||
VAddr end = 0;
|
||||
Tegra::GPUVAddr addr = 0;
|
||||
Tegra::GPUVAddr end = 0;
|
||||
boost::optional<VAddr> cpu_addr;
|
||||
u64 size = 0;
|
||||
|
||||
u32 width = 0;
|
||||
@@ -325,15 +374,15 @@ struct CachedSurface : SurfaceParams {
|
||||
if (format == PixelFormat::Invalid)
|
||||
return 0;
|
||||
|
||||
return SurfaceParams::GetFormatBpp(format) / 8;
|
||||
return SurfaceParams::GetFormatBpp(format) / CHAR_BIT;
|
||||
}
|
||||
|
||||
std::unique_ptr<u8[]> gl_buffer;
|
||||
size_t gl_buffer_size = 0;
|
||||
|
||||
// Read/Write data in Switch memory to/from gl_buffer
|
||||
void LoadGLBuffer(VAddr load_start, VAddr load_end);
|
||||
void FlushGLBuffer(VAddr flush_start, VAddr flush_end);
|
||||
void LoadGLBuffer(Tegra::GPUVAddr load_start, Tegra::GPUVAddr load_end);
|
||||
void FlushGLBuffer(Tegra::GPUVAddr flush_start, Tegra::GPUVAddr flush_end);
|
||||
|
||||
// Upload/Download data in gl_buffer in/to this surface's texture
|
||||
void UploadGLTexture(const MathUtil::Rectangle<u32>& rect, GLuint read_fb_handle,
|
||||
@@ -362,6 +411,9 @@ public:
|
||||
Surface GetSurface(const SurfaceParams& params, ScaleMatch match_res_scale,
|
||||
bool load_if_create);
|
||||
|
||||
/// Tries to find a framebuffer GPU address based on the provided CPU address
|
||||
boost::optional<Tegra::GPUVAddr> TryFindFramebufferGpuAddress(VAddr cpu_addr) const;
|
||||
|
||||
/// Attempt to find a subrect (resolution scaled) of a surface, otherwise loads a texture from
|
||||
/// Switch memory to OpenGL and caches it (if not already cached)
|
||||
SurfaceRect_Tuple GetSurfaceSubRect(const SurfaceParams& params, ScaleMatch match_res_scale,
|
||||
@@ -381,10 +433,10 @@ public:
|
||||
SurfaceRect_Tuple GetTexCopySurface(const SurfaceParams& params);
|
||||
|
||||
/// Write any cached resources overlapping the region back to memory (if dirty)
|
||||
void FlushRegion(VAddr addr, u64 size, Surface flush_surface = nullptr);
|
||||
void FlushRegion(Tegra::GPUVAddr addr, u64 size, Surface flush_surface = nullptr);
|
||||
|
||||
/// Mark region as being invalidated by region_owner (nullptr if Switch memory)
|
||||
void InvalidateRegion(VAddr addr, u64 size, const Surface& region_owner);
|
||||
void InvalidateRegion(Tegra::GPUVAddr addr, u64 size, const Surface& region_owner);
|
||||
|
||||
/// Flush all cached resources tracked by this cache manager
|
||||
void FlushAll();
|
||||
@@ -393,7 +445,7 @@ private:
|
||||
void DuplicateSurface(const Surface& src_surface, const Surface& dest_surface);
|
||||
|
||||
/// Update surface's texture for given region when necessary
|
||||
void ValidateSurface(const Surface& surface, VAddr addr, u64 size);
|
||||
void ValidateSurface(const Surface& surface, Tegra::GPUVAddr addr, u64 size);
|
||||
|
||||
/// Create a new surface
|
||||
Surface CreateSurface(const SurfaceParams& params);
|
||||
@@ -405,7 +457,7 @@ private:
|
||||
void UnregisterSurface(const Surface& surface);
|
||||
|
||||
/// Increase/decrease the number of surface in pages touching the specified region
|
||||
void UpdatePagesCachedCount(VAddr addr, u64 size, int delta);
|
||||
void UpdatePagesCachedCount(Tegra::GPUVAddr addr, u64 size, int delta);
|
||||
|
||||
SurfaceCache surface_cache;
|
||||
PageMap cached_pages;
|
||||
|
||||
@@ -152,7 +152,8 @@ void RendererOpenGL::LoadFBToScreenInfo(const Tegra::FramebufferConfig& framebuf
|
||||
screen_info.display_texture = screen_info.texture.resource.handle;
|
||||
screen_info.display_texcoords = MathUtil::Rectangle<float>(0.f, 0.f, 1.f, 1.f);
|
||||
|
||||
Rasterizer()->FlushRegion(framebuffer_addr, size_in_bytes);
|
||||
Memory::RasterizerFlushVirtualRegion(framebuffer_addr, size_in_bytes,
|
||||
Memory::FlushMode::Flush);
|
||||
|
||||
VideoCore::MortonCopyPixels128(framebuffer.width, framebuffer.height, bytes_per_pixel, 4,
|
||||
Memory::GetPointer(framebuffer_addr),
|
||||
@@ -269,10 +270,9 @@ void RendererOpenGL::ConfigureFramebufferTexture(TextureInfo& texture,
|
||||
GLint internal_format;
|
||||
switch (framebuffer.pixel_format) {
|
||||
case Tegra::FramebufferConfig::PixelFormat::ABGR8:
|
||||
// Use RGBA8 and swap in the fragment shader
|
||||
internal_format = GL_RGBA;
|
||||
texture.gl_format = GL_RGBA;
|
||||
texture.gl_type = GL_UNSIGNED_INT_8_8_8_8;
|
||||
texture.gl_type = GL_UNSIGNED_INT_8_8_8_8_REV;
|
||||
gl_framebuffer_data.resize(texture.width * texture.height * 4);
|
||||
break;
|
||||
default:
|
||||
@@ -302,8 +302,8 @@ void RendererOpenGL::DrawScreenTriangles(const ScreenInfo& screen_info, float x,
|
||||
right = texcoords.left;
|
||||
} else {
|
||||
// Other transformations are unsupported
|
||||
LOG_CRITICAL(Render_OpenGL, "Unsupported framebuffer_transform_flags=%d",
|
||||
framebuffer_transform_flags);
|
||||
NGLOG_CRITICAL(Render_OpenGL, "Unsupported framebuffer_transform_flags={}",
|
||||
static_cast<u32>(framebuffer_transform_flags));
|
||||
UNIMPLEMENTED();
|
||||
}
|
||||
}
|
||||
@@ -428,9 +428,9 @@ bool RendererOpenGL::Init() {
|
||||
const char* gpu_vendor{reinterpret_cast<char const*>(glGetString(GL_VENDOR))};
|
||||
const char* gpu_model{reinterpret_cast<char const*>(glGetString(GL_RENDERER))};
|
||||
|
||||
LOG_INFO(Render_OpenGL, "GL_VERSION: %s", gl_version);
|
||||
LOG_INFO(Render_OpenGL, "GL_VENDOR: %s", gpu_vendor);
|
||||
LOG_INFO(Render_OpenGL, "GL_RENDERER: %s", gpu_model);
|
||||
NGLOG_INFO(Render_OpenGL, "GL_VERSION: {}", gl_version);
|
||||
NGLOG_INFO(Render_OpenGL, "GL_VENDOR: {}", gpu_vendor);
|
||||
NGLOG_INFO(Render_OpenGL, "GL_RENDERER: {}", gpu_model);
|
||||
|
||||
Core::Telemetry().AddField(Telemetry::FieldType::UserSystem, "GPU_Vendor", gpu_vendor);
|
||||
Core::Telemetry().AddField(Telemetry::FieldType::UserSystem, "GPU_Model", gpu_model);
|
||||
|
||||
@@ -4,6 +4,7 @@
|
||||
|
||||
#include <cstring>
|
||||
#include "common/assert.h"
|
||||
#include "core/memory.h"
|
||||
#include "video_core/textures/decoders.h"
|
||||
#include "video_core/textures/texture.h"
|
||||
|
||||
|
||||
@@ -24,9 +24,9 @@ bool Init(EmuWindow* emu_window) {
|
||||
g_renderer = std::make_unique<RendererOpenGL>();
|
||||
g_renderer->SetWindow(g_emu_window);
|
||||
if (g_renderer->Init()) {
|
||||
LOG_DEBUG(Render, "initialized OK");
|
||||
NGLOG_DEBUG(Render, "initialized OK");
|
||||
} else {
|
||||
LOG_CRITICAL(Render, "initialization failed !");
|
||||
NGLOG_CRITICAL(Render, "initialization failed !");
|
||||
return false;
|
||||
}
|
||||
return true;
|
||||
@@ -36,7 +36,7 @@ bool Init(EmuWindow* emu_window) {
|
||||
void Shutdown() {
|
||||
g_renderer.reset();
|
||||
|
||||
LOG_DEBUG(Render, "shutdown OK");
|
||||
NGLOG_DEBUG(Render, "shutdown OK");
|
||||
}
|
||||
|
||||
} // namespace VideoCore
|
||||
|
||||
@@ -378,10 +378,10 @@ void GraphicsSurfaceWidget::OnUpdate() {
|
||||
// TODO: Implement a good way to visualize alpha components!
|
||||
|
||||
QImage decoded_image(surface_width, surface_height, QImage::Format_ARGB32);
|
||||
VAddr address = gpu.memory_manager->PhysicalToVirtualAddress(surface_address);
|
||||
boost::optional<VAddr> address = gpu.memory_manager->GpuToCpuAddress(surface_address);
|
||||
|
||||
auto unswizzled_data =
|
||||
Tegra::Texture::UnswizzleTexture(address, surface_format, surface_width, surface_height);
|
||||
Tegra::Texture::UnswizzleTexture(*address, surface_format, surface_width, surface_height);
|
||||
|
||||
auto texture_data = Tegra::Texture::DecodeTexture(unswizzled_data, surface_format,
|
||||
surface_width, surface_height);
|
||||
@@ -437,9 +437,9 @@ void GraphicsSurfaceWidget::SaveSurface() {
|
||||
pixmap->save(&file, "PNG");
|
||||
} else if (selectedFilter == bin_filter) {
|
||||
auto& gpu = Core::System::GetInstance().GPU();
|
||||
VAddr address = gpu.memory_manager->PhysicalToVirtualAddress(surface_address);
|
||||
boost::optional<VAddr> address = gpu.memory_manager->GpuToCpuAddress(surface_address);
|
||||
|
||||
const u8* buffer = Memory::GetPointer(address);
|
||||
const u8* buffer = Memory::GetPointer(*address);
|
||||
ASSERT_MSG(buffer != nullptr, "Memory not accessible");
|
||||
|
||||
QFile file(filename);
|
||||
|
||||
Reference in New Issue
Block a user