Compare commits

...

79 Commits

Author SHA1 Message Date
Lioncash
1913cf4783 kernel/shared_memory: Remove unnecessary semicolon at end of ConvertPermissions()
Functions don't need to be terminated by semicolons.
2018-04-25 20:32:16 -04:00
Lioncash
40dee76c57 kernel: Migrate logging macros to fmt-compatible ones 2018-04-25 20:32:09 -04:00
bunnei
23d68a07dc Merge pull request #390 from mailwl/pctl-module
Service/PCTL: convert to module, add services, stub
2018-04-25 15:51:43 -04:00
bunnei
60746e4e52 Merge pull request #397 from lioncash/core
core/memory: Move logging macros over to the new fmt-capable ones
2018-04-25 15:25:03 -04:00
Lioncash
6d00780045 core/memory: Amend address widths in asserts
Addresses are 64-bit, these formatting specifiers are simply holdovers from citra. Adjust them to be the correct width.
2018-04-25 12:17:26 -04:00
Lioncash
59dae03dbe core/memory: Move logging macros over to new fmt-capable ones
While we're at it, correct addresses to print all 64 bits where applicable, which were holdovers from citra.
2018-04-25 12:16:33 -04:00
bunnei
22420612db Merge pull request #394 from lioncash/video-core
video-core: Move logging macros over to new fmt-capable ones
2018-04-25 11:42:59 -04:00
Lioncash
b7551e457b video-core: Move logging macros over to new fmt-capable ones 2018-04-25 09:13:57 -04:00
mailwl
2ba4e2263c Service/PCTL: convert to module, add services, stub
PCTL::CreateServiceWithoutInitialize and IParentalControlService::Initialize, required by Kirby Star Allies
2018-04-25 09:02:27 +03:00
bunnei
ea3151f475 Merge pull request #388 from bunnei/refactor-rasterizer-cache
Refactor rasterizer cache
2018-04-24 23:22:24 -04:00
bunnei
6c9ca8cbca Merge pull request #393 from lioncash/loader
loader: Move old logging macros over to new fmt-capable ones
2018-04-24 22:44:18 -04:00
bunnei
c30cd898fc renderer_opengl: Use correct byte order for framebuffer pixel format ABGR8. 2018-04-24 22:31:46 -04:00
bunnei
f1a4a004fb gl_rasterizer_cache: Use CHAR_BIT for bpp conversions instead of 8. 2018-04-24 22:31:46 -04:00
bunnei
0a023cfb4f gl_rasterizer_cache: Use GPU PAGE_BITS/SIZE, not CPU. 2018-04-24 22:31:46 -04:00
bunnei
9022d926eb gl_rasterizer_cache: Use new logger. 2018-04-24 22:31:46 -04:00
bunnei
fbb3cd110c gl_rasterizer_cache: Add a function for finding framebuffer GPU address. 2018-04-24 22:31:46 -04:00
bunnei
bc0f1896fc gl_rasterizer_cache: Handle compressed texture sizes. 2018-04-24 22:31:46 -04:00
bunnei
4415e00181 gl_rasterizer_cache: Update to be based on GPU addresses, not CPU addresses. 2018-04-24 22:31:45 -04:00
Lioncash
cc2e14ec2a loader: Move old logging macros over to new fmt-capable ones 2018-04-24 20:22:32 -04:00
bunnei
10c6d89119 memory_manager: Add implement CpuToGpuAddress. 2018-04-24 17:49:20 -04:00
bunnei
239ac8abe2 memory_manager: Make GpuToCpuAddress return an optional. 2018-04-24 17:49:19 -04:00
bunnei
9e11a76e92 memory_manager: Use GPUVAdddr, not PAddr, for GPU addresses. 2018-04-24 17:40:43 -04:00
bunnei
e8c2bb24b2 Merge pull request #386 from Subv/gpu_query
GPU: Added asserts to our code for handling the QUERY_GET GPU command.
2018-04-24 16:13:51 -04:00
bunnei
b7953d2ebf Merge pull request #392 from lioncash/log
service: Move logging macros over to the new fmt-compatible ones
2018-04-24 14:13:54 -04:00
Lioncash
d08cfb55fe service: Move logging macros over to new fmt-compatible ones 2018-04-24 12:01:32 -04:00
Lioncash
88eb612718 vi: Move logging macros over to new fmt-compatible ones 2018-04-24 12:01:31 -04:00
Lioncash
bd9c2aa51f time: Move logging macros over to new fmt-compatible ones 2018-04-24 12:01:31 -04:00
Lioncash
bfe49edb2a ssl: Move logging macros over to new fmt-compatible ones 2018-04-24 12:01:31 -04:00
Lioncash
82413a6c89 spl: Move logging macros over to new fmt-compatible ones 2018-04-24 12:01:31 -04:00
Lioncash
62c69f4a1e sockets: Move logging macros over to new fmt-compatible ones 2018-04-24 12:01:31 -04:00
Lioncash
2a3f3bf977 sm: Move logging macros over to new fmt-compatible ones 2018-04-24 12:01:31 -04:00
Lioncash
32ece18bb6 set: Move logging macros over to new fmt-compatible ones 2018-04-24 12:01:30 -04:00
Lioncash
13f9cf2bd0 pctl: Move logging macros over to new fmt-compatible ones 2018-04-24 12:01:30 -04:00
Lioncash
72b497e876 nvflinger: Move logging macros over to new fmt-compatible ones 2018-04-24 12:01:30 -04:00
Lioncash
285d8d8b7d nvdrv: Move logging macros over to new fmt-compatible ones 2018-04-24 12:01:27 -04:00
Lioncash
022fc59dcd ns: Move logging macros over to new fmt-compatible ones 2018-04-24 12:00:56 -04:00
Lioncash
47054327c2 nifm: Move logging macros over to new fmt-compatible ones 2018-04-24 12:00:56 -04:00
Lioncash
28b92db7fd nfp: Move logging macros over to new fmt-compatible ones 2018-04-24 12:00:56 -04:00
Lioncash
dabfd90dfe lm: Move logging macros over to new fmt-compatible ones 2018-04-24 12:00:56 -04:00
Lioncash
9cd7485cd7 hid: Move logging macros over to new fmt-compatible ones 2018-04-24 12:00:56 -04:00
Lioncash
8fc4003dab friend: Move logging macros over to new fmt-compatible ones 2018-04-24 12:00:56 -04:00
Lioncash
b5b613ea29 filesystem: Move logging macros over to new fmt-compatible ones 2018-04-24 12:00:52 -04:00
bunnei
299f943202 Merge pull request #391 from lioncash/video
renderer_opengl: Silence a -Wdangling-else warning in DrawScreenTriangles()
2018-04-24 11:54:44 -04:00
Lioncash
d1b23b2b51 renderer_opengl: Silence a -Wdangling-else warning in DrawScreenTriangles() 2018-04-24 11:13:08 -04:00
Lioncash
c6a740d7c2 fatal: Move logging macros over to new fmt-compatible ones 2018-04-24 10:18:58 -04:00
Lioncash
8d32bf9a96 audio: Move logging macros over to new fmt-compatible ones 2018-04-24 10:18:09 -04:00
Lioncash
d652e41365 apm: Move logging macros over to new fmt-compatible ones 2018-04-24 10:16:03 -04:00
Lioncash
e74dbfc572 aoc: Move logging macros over to new fmt-compatible ones 2018-04-24 10:14:52 -04:00
Lioncash
5483c08b44 am: Move logging macros over to new fmt-compatible ones 2018-04-24 10:14:11 -04:00
Lioncash
f85d880ac6 acc: Move logging macros over to new fmt-compatible ones 2018-04-24 10:04:22 -04:00
bunnei
7495142688 Merge pull request #389 from mailwl/fs-renamefile
Service/FS: implement IFileSystem::RenameFile
2018-04-24 08:57:46 -04:00
mailwl
a0179e5ca5 Service/FS: implement IFileSystem::RenameFile 2018-04-24 10:56:05 +03:00
bunnei
07dc0bbf3e Merge pull request #379 from Subv/multi_buffers
GPU: Support multiple enabled vertex arrays.
2018-04-24 01:09:02 -04:00
Subv
f208953585 GPU: Added asserts to our code for handling the QUERY_GET GPU command.
This is based on research from nouveau. Many things are currently unknown and will require hwtests in the future.
This commit also stubs QueryMode::Write2 to do the same as Write. Nouveau code treats them interchangeably, it is currently unknown what the difference is.
2018-04-23 17:06:57 -05:00
bunnei
0214351f4f Merge pull request #370 from Subv/sync_primitives
Kernel: Reworked the new kernel synchronization primitives.
2018-04-23 16:33:00 -04:00
bunnei
bf25299272 Merge pull request #384 from Subv/nvhost-remap
Nvdrv/nvhost-as-gpu: Implemented the ioctl REMAP command.
2018-04-23 15:23:55 -04:00
bunnei
d4f87e9af4 Merge pull request #385 from Subv/unimpl_ioctls
Nvdrv: Assert when receiving an unimplemented ioctl in the nv* handlers.
2018-04-23 15:22:49 -04:00
bunnei
3967f9c6ef Merge pull request #383 from Subv/gpu_mmu
GPU: Make the GPU virtual memory manager use 16 page bits and 10 pagetable bits.
2018-04-23 14:00:52 -04:00
bunnei
55d0b0609d Merge pull request #382 from Subv/a2rgb10_rt
GPU: Implement the RGB10_A2 RenderTarget format
2018-04-23 13:16:48 -04:00
Subv
9531a29283 GPU: Support multiple enabled vertex arrays.
The vertex arrays will be copied to the stream buffer one after the other, and the attributes will be set using the ARB_vertex_attrib_binding extension.

yuzu now thus requires OpenGL 4.3 or the ARB_vertex_attrib_binding extension.
2018-04-23 11:34:50 -05:00
Subv
46572d027d Kernel: Implemented mutex priority inheritance.
Verified with a hwtest and implemented based on reverse engineering.

Thread A's priority will get bumped to the highest priority among all the threads that are waiting for a mutex that A holds.
Once A releases the mutex and ownership is transferred to B, A's priority will return to normal and B's priority will be bumped.
2018-04-23 11:23:44 -05:00
Subv
0d6eafe11a NvDrv/nvhost-as-gpu: Ensure that the object passed to MapBufferEx has already been allocated.
Also added a consistency check and a comment for the case when the object id is different than its handle. The real nvservices doesn't make a distinction between ids and handles, each object gets an unique handle which doubles as its id.
2018-04-23 11:21:46 -05:00
Subv
e4bd0bddea Nvdrv/nvhost-as-gpu: Implemented the ioctl REMAP command.
It takes a previously-reserved (AllocateSpace) GPU memory address and maps it to the address of the nvmap object passed to Remap.
2018-04-23 11:21:46 -05:00
Subv
e862c50a70 Nvdrv: Assert when receiving an unimplemented ioctl in the nv* handlers. 2018-04-23 11:13:53 -05:00
Subv
f823c1d599 GPU: Make the GPU virtual memory manager use 16 page bits and 10 page table bits.
Also removed some dead code and added memory map consistency asserts.
2018-04-23 10:57:12 -05:00
Subv
010227e149 GPU: Implement the RGB10_A2 RenderTarget format, it will use the same format as the A2BGR10 texture format. 2018-04-23 10:50:28 -05:00
bunnei
ebb8e06df0 Merge pull request #378 from Subv/a2bgr10
GPU: Implement the A2BGR10 texture format.
2018-04-21 21:43:17 -04:00
Subv
c079cf4eec GPU: Implement the A2BGR10 texture format. 2018-04-21 17:32:25 -05:00
bunnei
62937798a0 Merge pull request #377 from adityaruplaha/sdl2-fullscreen
SDL2: Implement fullscreen. (Original PR: citra-emu/citra#3607)
2018-04-21 13:53:55 -04:00
adityaruplaha
f48d5e4c4c SDL2: Implement fullscreen. (Original PR: citra-emu/citra#3607) 2018-04-21 13:24:33 +05:30
bunnei
f8764bb5d3 Merge pull request #376 from bunnei/shader-decoder
Shader opcode decoding
2018-04-21 00:04:51 -04:00
bunnei
f8a037ead4 Merge pull request #375 from lioncash/header
opengl: Remove unnecessary header inclusions
2018-04-20 23:08:47 -04:00
Subv
a70ed9c8ae Kernel: Use 0x2C as default main thread priority for homebrew and lone NRO/NSOs 2018-04-20 21:04:35 -05:00
Subv
013778aa21 Qt: Update the WaitTree widget to show info about the current mutex of each thread. 2018-04-20 21:04:34 -05:00
Subv
be155f4d9d Kernel: Remove unused ConditionVariable class. 2018-04-20 21:04:33 -05:00
Subv
5fdfbfe25a Kernel: Remove old and unused Mutex code. 2018-04-20 21:04:32 -05:00
Subv
b18ccf9399 Kernel: Properly implemented svcWaitProcessWideKey and svcSignalProcessWideKey
They work in tandem with guest code to provide synchronization primitives along with svcArbitrateLock/Unlock
2018-04-20 21:04:27 -05:00
Subv
e81a2080eb Kernel: Corrected the implementation of svcArbitrateLock and svcArbitrateUnlock.
Switch mutexes are no longer kernel objects, they are managed in userland and only use the kernel to handle the contention case.
Mutex addresses store a special flag value (0x40000000) to notify the guest code that there are still some threads waiting for the mutex to be released. This flag is updated when a thread calls ArbitrateUnlock.

TODO:
* Fix svcWaitProcessWideKey
* Fix svcSignalProcessWideKey
* Remove the Mutex class.
2018-04-20 21:04:25 -05:00
Lioncash
eafdcc1b8a opengl: Remove unnecessary header inclusions 2018-04-20 20:19:37 -04:00
96 changed files with 1477 additions and 1304 deletions

View File

@@ -42,8 +42,6 @@ add_library(core STATIC
hle/kernel/client_port.h
hle/kernel/client_session.cpp
hle/kernel/client_session.h
hle/kernel/condition_variable.cpp
hle/kernel/condition_variable.h
hle/kernel/errors.h
hle/kernel/event.cpp
hle/kernel/event.h
@@ -183,10 +181,10 @@ add_library(core STATIC
hle/service/nvflinger/buffer_queue.h
hle/service/nvflinger/nvflinger.cpp
hle/service/nvflinger/nvflinger.h
hle/service/pctl/module.cpp
hle/service/pctl/module.h
hle/service/pctl/pctl.cpp
hle/service/pctl/pctl.h
hle/service/pctl/pctl_a.cpp
hle/service/pctl/pctl_a.h
hle/service/service.cpp
hle/service/service.h
hle/service/set/set.cpp

View File

@@ -67,10 +67,16 @@ ResultCode Disk_FileSystem::DeleteFile(const std::string& path) const {
return RESULT_SUCCESS;
}
ResultCode Disk_FileSystem::RenameFile(const Path& src_path, const Path& dest_path) const {
LOG_WARNING(Service_FS, "(STUBBED) called");
ResultCode Disk_FileSystem::RenameFile(const std::string& src_path,
const std::string& dest_path) const {
const std::string full_src_path = base_directory + src_path;
const std::string full_dest_path = base_directory + dest_path;
if (!FileUtil::Exists(full_src_path)) {
return ERROR_PATH_NOT_FOUND;
}
// TODO(wwylele): Use correct error code
return ResultCode(-1);
return FileUtil::Rename(full_src_path, full_dest_path) ? RESULT_SUCCESS : ResultCode(-1);
}
ResultCode Disk_FileSystem::DeleteDirectory(const Path& path) const {

View File

@@ -26,7 +26,7 @@ public:
ResultVal<std::unique_ptr<StorageBackend>> OpenFile(const std::string& path,
Mode mode) const override;
ResultCode DeleteFile(const std::string& path) const override;
ResultCode RenameFile(const Path& src_path, const Path& dest_path) const override;
ResultCode RenameFile(const std::string& src_path, const std::string& dest_path) const override;
ResultCode DeleteDirectory(const Path& path) const override;
ResultCode DeleteDirectoryRecursively(const Path& path) const override;
ResultCode CreateFile(const std::string& path, u64 size) const override;

View File

@@ -126,7 +126,8 @@ public:
* @param dest_path Destination path relative to the archive
* @return Result of the operation
*/
virtual ResultCode RenameFile(const Path& src_path, const Path& dest_path) const = 0;
virtual ResultCode RenameFile(const std::string& src_path,
const std::string& dest_path) const = 0;
/**
* Rename a Directory specified by its path

View File

@@ -27,7 +27,8 @@ ResultCode RomFS_FileSystem::DeleteFile(const std::string& path) const {
return ResultCode(-1);
}
ResultCode RomFS_FileSystem::RenameFile(const Path& src_path, const Path& dest_path) const {
ResultCode RomFS_FileSystem::RenameFile(const std::string& src_path,
const std::string& dest_path) const {
LOG_CRITICAL(Service_FS, "Attempted to rename a file within an ROMFS archive (%s).",
GetName().c_str());
// TODO(wwylele): Use correct error code

View File

@@ -32,7 +32,7 @@ public:
ResultVal<std::unique_ptr<StorageBackend>> OpenFile(const std::string& path,
Mode mode) const override;
ResultCode DeleteFile(const std::string& path) const override;
ResultCode RenameFile(const Path& src_path, const Path& dest_path) const override;
ResultCode RenameFile(const std::string& src_path, const std::string& dest_path) const override;
ResultCode DeleteDirectory(const Path& path) const override;
ResultCode DeleteDirectoryRecursively(const Path& path) const override;
ResultCode CreateFile(const std::string& path, u64 size) const override;

View File

@@ -1,64 +0,0 @@
// Copyright 2018 yuzu emulator team
// Licensed under GPLv2 or any later version
// Refer to the license.txt file included.
#include "common/assert.h"
#include "core/hle/kernel/condition_variable.h"
#include "core/hle/kernel/errors.h"
#include "core/hle/kernel/kernel.h"
#include "core/hle/kernel/object_address_table.h"
#include "core/hle/kernel/thread.h"
namespace Kernel {
ConditionVariable::ConditionVariable() {}
ConditionVariable::~ConditionVariable() {}
ResultVal<SharedPtr<ConditionVariable>> ConditionVariable::Create(VAddr guest_addr,
std::string name) {
SharedPtr<ConditionVariable> condition_variable(new ConditionVariable);
condition_variable->name = std::move(name);
condition_variable->guest_addr = guest_addr;
condition_variable->mutex_addr = 0;
// Condition variables are referenced by guest address, so track this in the kernel
g_object_address_table.Insert(guest_addr, condition_variable);
return MakeResult<SharedPtr<ConditionVariable>>(std::move(condition_variable));
}
bool ConditionVariable::ShouldWait(Thread* thread) const {
return GetAvailableCount() <= 0;
}
void ConditionVariable::Acquire(Thread* thread) {
if (GetAvailableCount() <= 0)
return;
SetAvailableCount(GetAvailableCount() - 1);
}
ResultCode ConditionVariable::Release(s32 target) {
if (target == -1) {
// When -1, wake up all waiting threads
SetAvailableCount(static_cast<s32>(GetWaitingThreads().size()));
WakeupAllWaitingThreads();
} else {
// Otherwise, wake up just a single thread
SetAvailableCount(target);
WakeupWaitingThread(GetHighestPriorityReadyThread());
}
return RESULT_SUCCESS;
}
s32 ConditionVariable::GetAvailableCount() const {
return Memory::Read32(guest_addr);
}
void ConditionVariable::SetAvailableCount(s32 value) const {
Memory::Write32(guest_addr, value);
}
} // namespace Kernel

View File

@@ -1,63 +0,0 @@
// Copyright 2018 yuzu emulator team
// Licensed under GPLv2 or any later version
// Refer to the license.txt file included.
#pragma once
#include <string>
#include <queue>
#include "common/common_types.h"
#include "core/hle/kernel/kernel.h"
#include "core/hle/kernel/wait_object.h"
#include "core/hle/result.h"
namespace Kernel {
class ConditionVariable final : public WaitObject {
public:
/**
* Creates a condition variable.
* @param guest_addr Address of the object tracking the condition variable in guest memory. If
* specified, this condition variable will update the guest object when its state changes.
* @param name Optional name of condition variable.
* @return The created condition variable.
*/
static ResultVal<SharedPtr<ConditionVariable>> Create(VAddr guest_addr,
std::string name = "Unknown");
std::string GetTypeName() const override {
return "ConditionVariable";
}
std::string GetName() const override {
return name;
}
static const HandleType HANDLE_TYPE = HandleType::ConditionVariable;
HandleType GetHandleType() const override {
return HANDLE_TYPE;
}
s32 GetAvailableCount() const;
void SetAvailableCount(s32 value) const;
std::string name; ///< Name of condition variable (optional)
VAddr guest_addr; ///< Address of the guest condition variable value
VAddr mutex_addr; ///< (optional) Address of guest mutex value associated with this condition
///< variable, used for implementing events
bool ShouldWait(Thread* thread) const override;
void Acquire(Thread* thread) override;
/**
* Releases a slot from a condition variable.
* @param target The number of threads to wakeup, -1 is all.
* @return ResultCode indicating if the operation succeeded.
*/
ResultCode Release(s32 target);
private:
ConditionVariable();
~ConditionVariable() override;
};
} // namespace Kernel

View File

@@ -20,6 +20,7 @@ enum {
MaxConnectionsReached = 52,
// Confirmed Switch OS error codes
MisalignedAddress = 102,
InvalidHandle = 114,
Timeout = 117,
SynchronizationCanceled = 118,

View File

@@ -26,7 +26,7 @@ ResultVal<Handle> HandleTable::Create(SharedPtr<Object> obj) {
u16 slot = next_free_slot;
if (slot >= generations.size()) {
LOG_ERROR(Kernel, "Unable to allocate Handle, too many slots in use.");
NGLOG_ERROR(Kernel, "Unable to allocate Handle, too many slots in use.");
return ERR_OUT_OF_HANDLES;
}
next_free_slot = generations[slot];
@@ -48,7 +48,7 @@ ResultVal<Handle> HandleTable::Create(SharedPtr<Object> obj) {
ResultVal<Handle> HandleTable::Duplicate(Handle handle) {
SharedPtr<Object> object = GetGeneric(handle);
if (object == nullptr) {
LOG_ERROR(Kernel, "Tried to duplicate invalid handle: %08X", handle);
NGLOG_ERROR(Kernel, "Tried to duplicate invalid handle: {:08X}", handle);
return ERR_INVALID_HANDLE;
}
return Create(std::move(object));

View File

@@ -118,7 +118,7 @@ void HLERequestContext::ParseCommandBuffer(u32_le* src_cmdbuf, bool incoming) {
std::make_shared<IPC::DomainMessageHeader>(rp.PopRaw<IPC::DomainMessageHeader>());
} else {
if (Session()->IsDomain())
LOG_WARNING(IPC, "Domain request has no DomainMessageHeader!");
NGLOG_WARNING(IPC, "Domain request has no DomainMessageHeader!");
}
}
@@ -270,7 +270,8 @@ size_t HLERequestContext::WriteBuffer(const void* buffer, size_t size) const {
const bool is_buffer_b{BufferDescriptorB().size() && BufferDescriptorB()[0].Size()};
const size_t buffer_size{GetWriteBufferSize()};
if (size > buffer_size) {
LOG_CRITICAL(Core, "size (%016zx) is greater than buffer_size (%016zx)", size, buffer_size);
NGLOG_CRITICAL(Core, "size ({:016X}) is greater than buffer_size ({:016X})", size,
buffer_size);
size = buffer_size; // TODO(bunnei): This needs to be HW tested
}

View File

@@ -18,12 +18,10 @@ using Handle = u32;
enum class HandleType : u32 {
Unknown,
Event,
Mutex,
SharedMemory,
Thread,
Process,
AddressArbiter,
ConditionVariable,
Timer,
ResourceLimit,
CodeSet,
@@ -63,9 +61,7 @@ public:
bool IsWaitable() const {
switch (GetHandleType()) {
case HandleType::Event:
case HandleType::Mutex:
case HandleType::Thread:
case HandleType::ConditionVariable:
case HandleType::Timer:
case HandleType::ServerPort:
case HandleType::ServerSession:

View File

@@ -7,6 +7,7 @@
#include <boost/range/algorithm_ext/erase.hpp>
#include "common/assert.h"
#include "core/core.h"
#include "core/hle/kernel/errors.h"
#include "core/hle/kernel/handle_table.h"
#include "core/hle/kernel/kernel.h"
#include "core/hle/kernel/mutex.h"
@@ -15,124 +16,120 @@
namespace Kernel {
void ReleaseThreadMutexes(Thread* thread) {
for (auto& mtx : thread->held_mutexes) {
mtx->SetHasWaiters(false);
mtx->SetHoldingThread(nullptr);
mtx->WakeupAllWaitingThreads();
}
thread->held_mutexes.clear();
}
/// Returns the number of threads that are waiting for a mutex, and the highest priority one among
/// those.
static std::pair<SharedPtr<Thread>, u32> GetHighestPriorityMutexWaitingThread(
SharedPtr<Thread> current_thread, VAddr mutex_addr) {
Mutex::Mutex() {}
Mutex::~Mutex() {}
SharedPtr<Thread> highest_priority_thread;
u32 num_waiters = 0;
SharedPtr<Mutex> Mutex::Create(SharedPtr<Kernel::Thread> holding_thread, VAddr guest_addr,
std::string name) {
SharedPtr<Mutex> mutex(new Mutex);
for (auto& thread : current_thread->wait_mutex_threads) {
if (thread->mutex_wait_address != mutex_addr)
continue;
mutex->guest_addr = guest_addr;
mutex->name = std::move(name);
ASSERT(thread->status == THREADSTATUS_WAIT_MUTEX);
// If mutex was initialized with a holding thread, acquire it by the holding thread
if (holding_thread) {
mutex->Acquire(holding_thread.get());
++num_waiters;
if (highest_priority_thread == nullptr ||
thread->GetPriority() < highest_priority_thread->GetPriority()) {
highest_priority_thread = thread;
}
}
// Mutexes are referenced by guest address, so track this in the kernel
g_object_address_table.Insert(guest_addr, mutex);
return mutex;
return {highest_priority_thread, num_waiters};
}
bool Mutex::ShouldWait(Thread* thread) const {
auto holding_thread = GetHoldingThread();
return holding_thread != nullptr && thread != holding_thread;
/// Update the mutex owner field of all threads waiting on the mutex to point to the new owner.
static void TransferMutexOwnership(VAddr mutex_addr, SharedPtr<Thread> current_thread,
SharedPtr<Thread> new_owner) {
auto threads = current_thread->wait_mutex_threads;
for (auto& thread : threads) {
if (thread->mutex_wait_address != mutex_addr)
continue;
ASSERT(thread->lock_owner == current_thread);
current_thread->RemoveMutexWaiter(thread);
if (new_owner != thread)
new_owner->AddMutexWaiter(thread);
}
}
void Mutex::Acquire(Thread* thread) {
ASSERT_MSG(!ShouldWait(thread), "object unavailable!");
ResultCode Mutex::TryAcquire(VAddr address, Handle holding_thread_handle,
Handle requesting_thread_handle) {
// The mutex address must be 4-byte aligned
if ((address % sizeof(u32)) != 0) {
return ResultCode(ErrorModule::Kernel, ErrCodes::MisalignedAddress);
}
priority = thread->current_priority;
thread->held_mutexes.insert(this);
SetHoldingThread(thread);
thread->UpdatePriority();
Core::System::GetInstance().PrepareReschedule();
}
SharedPtr<Thread> holding_thread = g_handle_table.Get<Thread>(holding_thread_handle);
SharedPtr<Thread> requesting_thread = g_handle_table.Get<Thread>(requesting_thread_handle);
ResultCode Mutex::Release(Thread* thread) {
auto holding_thread = GetHoldingThread();
ASSERT(holding_thread);
// TODO(Subv): It is currently unknown if it is possible to lock a mutex in behalf of another
// thread.
ASSERT(requesting_thread == GetCurrentThread());
// We can only release the mutex if it's held by the calling thread.
ASSERT(thread == holding_thread);
u32 addr_value = Memory::Read32(address);
// If the mutex isn't being held, just return success.
if (addr_value != (holding_thread_handle | Mutex::MutexHasWaitersFlag)) {
return RESULT_SUCCESS;
}
if (holding_thread == nullptr)
return ERR_INVALID_HANDLE;
// Wait until the mutex is released
GetCurrentThread()->mutex_wait_address = address;
GetCurrentThread()->wait_handle = requesting_thread_handle;
GetCurrentThread()->status = THREADSTATUS_WAIT_MUTEX;
GetCurrentThread()->wakeup_callback = nullptr;
// Update the lock holder thread's priority to prevent priority inversion.
holding_thread->AddMutexWaiter(GetCurrentThread());
holding_thread->held_mutexes.erase(this);
holding_thread->UpdatePriority();
SetHoldingThread(nullptr);
SetHasWaiters(!GetWaitingThreads().empty());
WakeupAllWaitingThreads();
Core::System::GetInstance().PrepareReschedule();
return RESULT_SUCCESS;
}
void Mutex::AddWaitingThread(SharedPtr<Thread> thread) {
WaitObject::AddWaitingThread(thread);
thread->pending_mutexes.insert(this);
SetHasWaiters(true);
UpdatePriority();
}
void Mutex::RemoveWaitingThread(Thread* thread) {
WaitObject::RemoveWaitingThread(thread);
thread->pending_mutexes.erase(this);
if (!GetHasWaiters())
SetHasWaiters(!GetWaitingThreads().empty());
UpdatePriority();
}
void Mutex::UpdatePriority() {
if (!GetHoldingThread())
return;
u32 best_priority = THREADPRIO_LOWEST;
for (auto& waiter : GetWaitingThreads()) {
if (waiter->current_priority < best_priority)
best_priority = waiter->current_priority;
ResultCode Mutex::Release(VAddr address) {
// The mutex address must be 4-byte aligned
if ((address % sizeof(u32)) != 0) {
return ResultCode(ErrorModule::Kernel, ErrCodes::MisalignedAddress);
}
if (best_priority != priority) {
priority = best_priority;
GetHoldingThread()->UpdatePriority();
auto [thread, num_waiters] = GetHighestPriorityMutexWaitingThread(GetCurrentThread(), address);
// There are no more threads waiting for the mutex, release it completely.
if (thread == nullptr) {
ASSERT(GetCurrentThread()->wait_mutex_threads.empty());
Memory::Write32(address, 0);
return RESULT_SUCCESS;
}
}
Handle Mutex::GetOwnerHandle() const {
GuestState guest_state{Memory::Read32(guest_addr)};
return guest_state.holding_thread_handle;
}
// Transfer the ownership of the mutex from the previous owner to the new one.
TransferMutexOwnership(address, GetCurrentThread(), thread);
SharedPtr<Thread> Mutex::GetHoldingThread() const {
GuestState guest_state{Memory::Read32(guest_addr)};
return g_handle_table.Get<Thread>(guest_state.holding_thread_handle);
}
u32 mutex_value = thread->wait_handle;
void Mutex::SetHoldingThread(SharedPtr<Thread> thread) {
GuestState guest_state{Memory::Read32(guest_addr)};
guest_state.holding_thread_handle.Assign(thread ? thread->guest_handle : 0);
Memory::Write32(guest_addr, guest_state.raw);
}
if (num_waiters >= 2) {
// Notify the guest that there are still some threads waiting for the mutex
mutex_value |= Mutex::MutexHasWaitersFlag;
}
bool Mutex::GetHasWaiters() const {
GuestState guest_state{Memory::Read32(guest_addr)};
return guest_state.has_waiters != 0;
}
// Grant the mutex to the next waiting thread and resume it.
Memory::Write32(address, mutex_value);
void Mutex::SetHasWaiters(bool has_waiters) {
GuestState guest_state{Memory::Read32(guest_addr)};
guest_state.has_waiters.Assign(has_waiters ? 1 : 0);
Memory::Write32(guest_addr, guest_state.raw);
}
ASSERT(thread->status == THREADSTATUS_WAIT_MUTEX);
thread->ResumeFromWait();
thread->lock_owner = nullptr;
thread->condvar_wait_address = 0;
thread->mutex_wait_address = 0;
thread->wait_handle = 0;
return RESULT_SUCCESS;
}
} // namespace Kernel

View File

@@ -15,87 +15,23 @@ namespace Kernel {
class Thread;
class Mutex final : public WaitObject {
class Mutex final {
public:
/**
* Creates a mutex.
* @param holding_thread Specifies a thread already holding the mutex. If not nullptr, this
* thread will acquire the mutex.
* @param guest_addr Address of the object tracking the mutex in guest memory. If specified,
* this mutex will update the guest object when its state changes.
* @param name Optional name of mutex
* @return Pointer to new Mutex object
*/
static SharedPtr<Mutex> Create(SharedPtr<Kernel::Thread> holding_thread, VAddr guest_addr = 0,
std::string name = "Unknown");
/// Flag that indicates that a mutex still has threads waiting for it.
static constexpr u32 MutexHasWaitersFlag = 0x40000000;
/// Mask of the bits in a mutex address value that contain the mutex owner.
static constexpr u32 MutexOwnerMask = 0xBFFFFFFF;
std::string GetTypeName() const override {
return "Mutex";
}
std::string GetName() const override {
return name;
}
/// Attempts to acquire a mutex at the specified address.
static ResultCode TryAcquire(VAddr address, Handle holding_thread_handle,
Handle requesting_thread_handle);
static const HandleType HANDLE_TYPE = HandleType::Mutex;
HandleType GetHandleType() const override {
return HANDLE_TYPE;
}
u32 priority; ///< The priority of the mutex, used for priority inheritance.
std::string name; ///< Name of mutex (optional)
VAddr guest_addr; ///< Address of the guest mutex value
/**
* Elevate the mutex priority to the best priority
* among the priorities of all its waiting threads.
*/
void UpdatePriority();
bool ShouldWait(Thread* thread) const override;
void Acquire(Thread* thread) override;
void AddWaitingThread(SharedPtr<Thread> thread) override;
void RemoveWaitingThread(Thread* thread) override;
/**
* Attempts to release the mutex from the specified thread.
* @param thread Thread that wants to release the mutex.
* @returns The result code of the operation.
*/
ResultCode Release(Thread* thread);
/// Gets the handle to the holding process stored in the guest state.
Handle GetOwnerHandle() const;
/// Gets the Thread pointed to by the owner handle
SharedPtr<Thread> GetHoldingThread() const;
/// Sets the holding process handle in the guest state.
void SetHoldingThread(SharedPtr<Thread> thread);
/// Returns the has_waiters bit in the guest state.
bool GetHasWaiters() const;
/// Sets the has_waiters bit in the guest state.
void SetHasWaiters(bool has_waiters);
/// Releases the mutex at the specified address.
static ResultCode Release(VAddr address);
private:
Mutex();
~Mutex() override;
/// Object in guest memory used to track the mutex state
union GuestState {
u32_le raw;
/// Handle of the thread that currently holds the mutex, 0 if available
BitField<0, 30, u32_le> holding_thread_handle;
/// 1 when there are threads waiting for this mutex, otherwise 0
BitField<30, 1, u32_le> has_waiters;
};
static_assert(sizeof(GuestState) == 4, "GuestState size is incorrect");
Mutex() = default;
~Mutex() = default;
};
/**
* Releases all the mutexes held by the specified thread
* @param thread Thread that is holding the mutexes
*/
void ReleaseThreadMutexes(Thread* thread);
} // namespace Kernel

View File

@@ -54,7 +54,7 @@ void Process::ParseKernelCaps(const u32* kernel_caps, size_t len) {
continue;
} else if ((type & 0xF00) == 0xE00) { // 0x0FFF
// Allowed interrupts list
LOG_WARNING(Loader, "ExHeader allowed interrupts list ignored");
NGLOG_WARNING(Loader, "ExHeader allowed interrupts list ignored");
} else if ((type & 0xF80) == 0xF00) { // 0x07FF
// Allowed syscalls mask
unsigned int index = ((descriptor >> 24) & 7) * 24;
@@ -74,7 +74,7 @@ void Process::ParseKernelCaps(const u32* kernel_caps, size_t len) {
} else if ((type & 0xFFE) == 0xFF8) { // 0x001F
// Mapped memory range
if (i + 1 >= len || ((kernel_caps[i + 1] >> 20) & 0xFFE) != 0xFF8) {
LOG_WARNING(Loader, "Incomplete exheader memory range descriptor ignored.");
NGLOG_WARNING(Loader, "Incomplete exheader memory range descriptor ignored.");
continue;
}
u32 end_desc = kernel_caps[i + 1];
@@ -109,9 +109,9 @@ void Process::ParseKernelCaps(const u32* kernel_caps, size_t len) {
int minor = kernel_version & 0xFF;
int major = (kernel_version >> 8) & 0xFF;
LOG_INFO(Loader, "ExHeader kernel version: %d.%d", major, minor);
NGLOG_INFO(Loader, "ExHeader kernel version: {}.{}", major, minor);
} else {
LOG_ERROR(Loader, "Unhandled kernel caps descriptor: 0x%08X", descriptor);
NGLOG_ERROR(Loader, "Unhandled kernel caps descriptor: {:#010X}", descriptor);
}
}
}

View File

@@ -29,7 +29,7 @@ SharedPtr<ResourceLimit> ResourceLimit::GetForCategory(ResourceLimitCategory cat
case ResourceLimitCategory::OTHER:
return resource_limits[static_cast<u8>(category)];
default:
LOG_CRITICAL(Kernel, "Unknown resource limit category");
NGLOG_CRITICAL(Kernel, "Unknown resource limit category");
UNREACHABLE();
}
}
@@ -55,7 +55,7 @@ s32 ResourceLimit::GetCurrentResourceValue(ResourceType resource) const {
case ResourceType::CPUTime:
return current_cpu_time;
default:
LOG_ERROR(Kernel, "Unknown resource type=%08X", static_cast<u32>(resource));
NGLOG_ERROR(Kernel, "Unknown resource type={:08X}", static_cast<u32>(resource));
UNIMPLEMENTED();
return 0;
}
@@ -84,7 +84,7 @@ u32 ResourceLimit::GetMaxResourceValue(ResourceType resource) const {
case ResourceType::CPUTime:
return max_cpu_time;
default:
LOG_ERROR(Kernel, "Unknown resource type=%08X", static_cast<u32>(resource));
NGLOG_ERROR(Kernel, "Unknown resource type={:08X}", static_cast<u32>(resource));
UNIMPLEMENTED();
return 0;
}

View File

@@ -94,11 +94,11 @@ void Scheduler::Reschedule() {
Thread* next = PopNextReadyThread();
if (cur && next) {
LOG_TRACE(Kernel, "context switch %u -> %u", cur->GetObjectId(), next->GetObjectId());
NGLOG_TRACE(Kernel, "context switch {} -> {}", cur->GetObjectId(), next->GetObjectId());
} else if (cur) {
LOG_TRACE(Kernel, "context switch %u -> idle", cur->GetObjectId());
NGLOG_TRACE(Kernel, "context switch {} -> idle", cur->GetObjectId());
} else if (next) {
LOG_TRACE(Kernel, "context switch idle -> %u", next->GetObjectId());
NGLOG_TRACE(Kernel, "context switch idle -> {}", next->GetObjectId());
}
SwitchContext(next);

View File

@@ -68,7 +68,7 @@ ResultCode ServerSession::HandleDomainSyncRequest(Kernel::HLERequestContext& con
return domain_request_handlers[object_id - 1]->HandleSyncRequest(context);
case IPC::DomainMessageHeader::CommandType::CloseVirtualHandle: {
LOG_DEBUG(IPC, "CloseVirtualHandle, object_id=0x%08X", object_id);
NGLOG_DEBUG(IPC, "CloseVirtualHandle, object_id={:#010X}", object_id);
domain_request_handlers[object_id - 1] = nullptr;
@@ -78,8 +78,8 @@ ResultCode ServerSession::HandleDomainSyncRequest(Kernel::HLERequestContext& con
}
}
LOG_CRITICAL(IPC, "Unknown domain command=%d",
static_cast<int>(domain_message_header->command.Value()));
NGLOG_CRITICAL(IPC, "Unknown domain command={}",
static_cast<int>(domain_message_header->command.Value()));
ASSERT(false);
}

View File

@@ -107,16 +107,16 @@ ResultCode SharedMemory::Map(Process* target_process, VAddr address, MemoryPermi
// Error out if the requested permissions don't match what the creator process allows.
if (static_cast<u32>(permissions) & ~static_cast<u32>(own_other_permissions)) {
LOG_ERROR(Kernel, "cannot map id=%u, address=0x%lx name=%s, permissions don't match",
GetObjectId(), address, name.c_str());
NGLOG_ERROR(Kernel, "cannot map id={}, address={:#X} name={}, permissions don't match",
GetObjectId(), address, name);
return ERR_INVALID_COMBINATION;
}
// Error out if the provided permissions are not compatible with what the creator process needs.
if (other_permissions != MemoryPermission::DontCare &&
static_cast<u32>(this->permissions) & ~static_cast<u32>(other_permissions)) {
LOG_ERROR(Kernel, "cannot map id=%u, address=0x%lx name=%s, permissions don't match",
GetObjectId(), address, name.c_str());
NGLOG_ERROR(Kernel, "cannot map id={}, address={:#X} name={}, permissions don't match",
GetObjectId(), address, name);
return ERR_WRONG_PERMISSION;
}
@@ -131,9 +131,10 @@ ResultCode SharedMemory::Map(Process* target_process, VAddr address, MemoryPermi
auto result = target_process->vm_manager.MapMemoryBlock(
target_address, backing_block, backing_block_offset, size, MemoryState::Shared);
if (result.Failed()) {
LOG_ERROR(Kernel,
"cannot map id=%u, target_address=0x%lx name=%s, error mapping to virtual memory",
GetObjectId(), target_address, name.c_str());
NGLOG_ERROR(
Kernel,
"cannot map id={}, target_address={:#X} name={}, error mapping to virtual memory",
GetObjectId(), target_address, name);
return result.Code();
}
@@ -151,7 +152,7 @@ VMAPermission SharedMemory::ConvertPermissions(MemoryPermission permission) {
u32 masked_permissions =
static_cast<u32>(permission) & static_cast<u32>(MemoryPermission::ReadWriteExecute);
return static_cast<VMAPermission>(masked_permissions);
};
}
u8* SharedMemory::GetPointer(u32 offset) {
return backing_block->data() + backing_block_offset + offset;

View File

@@ -13,7 +13,6 @@
#include "core/core_timing.h"
#include "core/hle/kernel/client_port.h"
#include "core/hle/kernel/client_session.h"
#include "core/hle/kernel/condition_variable.h"
#include "core/hle/kernel/event.h"
#include "core/hle/kernel/handle_table.h"
#include "core/hle/kernel/mutex.h"
@@ -32,7 +31,7 @@ namespace Kernel {
/// Set the process heap to a given Size. It can both extend and shrink the heap.
static ResultCode SetHeapSize(VAddr* heap_addr, u64 heap_size) {
LOG_TRACE(Kernel_SVC, "called, heap_size=0x%llx", heap_size);
NGLOG_TRACE(Kernel_SVC, "called, heap_size={:#X}", heap_size);
auto& process = *Core::CurrentProcess();
CASCADE_RESULT(*heap_addr,
process.HeapAllocate(Memory::HEAP_VADDR, heap_size, VMAPermission::ReadWrite));
@@ -40,21 +39,21 @@ static ResultCode SetHeapSize(VAddr* heap_addr, u64 heap_size) {
}
static ResultCode SetMemoryAttribute(VAddr addr, u64 size, u32 state0, u32 state1) {
LOG_WARNING(Kernel_SVC, "(STUBBED) called, addr=0x%lx", addr);
NGLOG_WARNING(Kernel_SVC, "(STUBBED) called, addr={:#X}", addr);
return RESULT_SUCCESS;
}
/// Maps a memory range into a different range.
static ResultCode MapMemory(VAddr dst_addr, VAddr src_addr, u64 size) {
LOG_TRACE(Kernel_SVC, "called, dst_addr=0x%llx, src_addr=0x%llx, size=0x%llx", dst_addr,
src_addr, size);
NGLOG_TRACE(Kernel_SVC, "called, dst_addr={:#X}, src_addr={:#X}, size={:#X}", dst_addr,
src_addr, size);
return Core::CurrentProcess()->MirrorMemory(dst_addr, src_addr, size);
}
/// Unmaps a region that was previously mapped with svcMapMemory
static ResultCode UnmapMemory(VAddr dst_addr, VAddr src_addr, u64 size) {
LOG_TRACE(Kernel_SVC, "called, dst_addr=0x%llx, src_addr=0x%llx, size=0x%llx", dst_addr,
src_addr, size);
NGLOG_TRACE(Kernel_SVC, "called, dst_addr={:#X}, src_addr={:#X}, size={:#X}", dst_addr,
src_addr, size);
return Core::CurrentProcess()->UnmapMemory(dst_addr, src_addr, size);
}
@@ -69,11 +68,11 @@ static ResultCode ConnectToNamedPort(Handle* out_handle, VAddr port_name_address
if (port_name.size() > PortNameMaxLength)
return ERR_PORT_NAME_TOO_LONG;
LOG_TRACE(Kernel_SVC, "called port_name=%s", port_name.c_str());
NGLOG_TRACE(Kernel_SVC, "called port_name={}", port_name);
auto it = Service::g_kernel_named_ports.find(port_name);
if (it == Service::g_kernel_named_ports.end()) {
LOG_WARNING(Kernel_SVC, "tried to connect to unknown port: %s", port_name.c_str());
NGLOG_WARNING(Kernel_SVC, "tried to connect to unknown port: {}", port_name);
return ERR_NOT_FOUND;
}
@@ -91,11 +90,11 @@ static ResultCode ConnectToNamedPort(Handle* out_handle, VAddr port_name_address
static ResultCode SendSyncRequest(Handle handle) {
SharedPtr<ClientSession> session = g_handle_table.Get<ClientSession>(handle);
if (!session) {
LOG_ERROR(Kernel_SVC, "called with invalid handle=0x%08X", handle);
NGLOG_ERROR(Kernel_SVC, "called with invalid handle={:#010X}", handle);
return ERR_INVALID_HANDLE;
}
LOG_TRACE(Kernel_SVC, "called handle=0x%08X(%s)", handle, session->GetName().c_str());
NGLOG_TRACE(Kernel_SVC, "called handle={:#010X}({})", handle, session->GetName());
Core::System::GetInstance().PrepareReschedule();
@@ -106,7 +105,7 @@ static ResultCode SendSyncRequest(Handle handle) {
/// Get the ID for the specified thread.
static ResultCode GetThreadId(u32* thread_id, Handle thread_handle) {
LOG_TRACE(Kernel_SVC, "called thread=0x%08X", thread_handle);
NGLOG_TRACE(Kernel_SVC, "called thread={:#010X}", thread_handle);
const SharedPtr<Thread> thread = g_handle_table.Get<Thread>(thread_handle);
if (!thread) {
@@ -119,7 +118,7 @@ static ResultCode GetThreadId(u32* thread_id, Handle thread_handle) {
/// Get the ID of the specified process
static ResultCode GetProcessId(u32* process_id, Handle process_handle) {
LOG_TRACE(Kernel_SVC, "called process=0x%08X", process_handle);
NGLOG_TRACE(Kernel_SVC, "called process={:#010X}", process_handle);
const SharedPtr<Process> process = g_handle_table.Get<Process>(process_handle);
if (!process) {
@@ -179,8 +178,8 @@ static ResultCode WaitSynchronization1(
/// Wait for the given handles to synchronize, timeout after the specified nanoseconds
static ResultCode WaitSynchronization(Handle* index, VAddr handles_address, u64 handle_count,
s64 nano_seconds) {
LOG_TRACE(Kernel_SVC, "called handles_address=0x%llx, handle_count=%d, nano_seconds=%d",
handles_address, handle_count, nano_seconds);
NGLOG_TRACE(Kernel_SVC, "called handles_address={:#X}, handle_count={}, nano_seconds={}",
handles_address, handle_count, nano_seconds);
if (!Memory::IsValidVirtualAddress(handles_address))
return ERR_INVALID_POINTER;
@@ -240,7 +239,7 @@ static ResultCode WaitSynchronization(Handle* index, VAddr handles_address, u64
/// Resumes a thread waiting on WaitSynchronization
static ResultCode CancelSynchronization(Handle thread_handle) {
LOG_TRACE(Kernel_SVC, "called thread=0x%08X", thread_handle);
NGLOG_TRACE(Kernel_SVC, "called thread={:#X}", thread_handle);
const SharedPtr<Thread> thread = g_handle_table.Get<Thread>(thread_handle);
if (!thread) {
@@ -257,56 +256,38 @@ static ResultCode CancelSynchronization(Handle thread_handle) {
/// Attempts to locks a mutex, creating it if it does not already exist
static ResultCode ArbitrateLock(Handle holding_thread_handle, VAddr mutex_addr,
Handle requesting_thread_handle) {
LOG_TRACE(Kernel_SVC,
"called holding_thread_handle=0x%08X, mutex_addr=0x%llx, "
"requesting_current_thread_handle=0x%08X",
holding_thread_handle, mutex_addr, requesting_thread_handle);
NGLOG_TRACE(Kernel_SVC,
"called holding_thread_handle={:#010X}, mutex_addr={:#X}, "
"requesting_current_thread_handle={:#010X}",
holding_thread_handle, mutex_addr, requesting_thread_handle);
SharedPtr<Thread> holding_thread = g_handle_table.Get<Thread>(holding_thread_handle);
SharedPtr<Thread> requesting_thread = g_handle_table.Get<Thread>(requesting_thread_handle);
ASSERT(requesting_thread);
ASSERT(requesting_thread == GetCurrentThread());
SharedPtr<Mutex> mutex = g_object_address_table.Get<Mutex>(mutex_addr);
if (!mutex) {
// Create a new mutex for the specified address if one does not already exist
mutex = Mutex::Create(holding_thread, mutex_addr);
mutex->name = Common::StringFromFormat("mutex-%llx", mutex_addr);
}
ASSERT(holding_thread == mutex->GetHoldingThread());
return WaitSynchronization1(mutex, requesting_thread.get());
return Mutex::TryAcquire(mutex_addr, holding_thread_handle, requesting_thread_handle);
}
/// Unlock a mutex
static ResultCode ArbitrateUnlock(VAddr mutex_addr) {
LOG_TRACE(Kernel_SVC, "called mutex_addr=0x%llx", mutex_addr);
NGLOG_TRACE(Kernel_SVC, "called mutex_addr={:#X}", mutex_addr);
SharedPtr<Mutex> mutex = g_object_address_table.Get<Mutex>(mutex_addr);
ASSERT(mutex);
return mutex->Release(GetCurrentThread());
return Mutex::Release(mutex_addr);
}
/// Break program execution
static void Break(u64 unk_0, u64 unk_1, u64 unk_2) {
LOG_CRITICAL(Debug_Emulated, "Emulated program broke execution!");
NGLOG_CRITICAL(Debug_Emulated, "Emulated program broke execution!");
ASSERT(false);
}
/// Used to output a message on a debug hardware unit - does nothing on a retail unit
static void OutputDebugString(VAddr address, s32 len) {
std::vector<char> string(len);
Memory::ReadBlock(address, string.data(), len);
LOG_DEBUG(Debug_Emulated, "%.*s", len, string.data());
std::string str(len, '\0');
Memory::ReadBlock(address, str.data(), str.size());
NGLOG_DEBUG(Debug_Emulated, "{}", str);
}
/// Gets system/memory information for the current process
static ResultCode GetInfo(u64* result, u64 info_id, u64 handle, u64 info_sub_id) {
LOG_TRACE(Kernel_SVC, "called info_id=0x%X, info_sub_id=0x%X, handle=0x%08X", info_id,
info_sub_id, handle);
NGLOG_TRACE(Kernel_SVC, "called info_id={:#X}, info_sub_id={:#X}, handle={:#010X}", info_id,
info_sub_id, handle);
auto& vm_manager = Core::CurrentProcess()->vm_manager;
@@ -357,12 +338,12 @@ static ResultCode GetInfo(u64* result, u64 info_id, u64 handle, u64 info_sub_id)
*result = Core::CurrentProcess()->is_virtual_address_memory_enabled;
break;
case GetInfoType::TitleId:
LOG_WARNING(Kernel_SVC, "(STUBBED) Attempted to query titleid, returned 0");
NGLOG_WARNING(Kernel_SVC, "(STUBBED) Attempted to query titleid, returned 0");
*result = 0;
break;
case GetInfoType::PrivilegedProcessId:
LOG_WARNING(Kernel_SVC,
"(STUBBED) Attempted to query priviledged process id bounds, returned 0");
NGLOG_WARNING(Kernel_SVC,
"(STUBBED) Attempted to query privileged process id bounds, returned 0");
*result = 0;
break;
default:
@@ -374,13 +355,14 @@ static ResultCode GetInfo(u64* result, u64 info_id, u64 handle, u64 info_sub_id)
/// Sets the thread activity
static ResultCode SetThreadActivity(Handle handle, u32 unknown) {
LOG_WARNING(Kernel_SVC, "(STUBBED) called, handle=0x%08X, unknown=0x%08X", handle, unknown);
NGLOG_WARNING(Kernel_SVC, "(STUBBED) called, handle={:#010X}, unknown={:#010X}", handle,
unknown);
return RESULT_SUCCESS;
}
/// Gets the thread context
static ResultCode GetThreadContext(Handle handle, VAddr addr) {
LOG_WARNING(Kernel_SVC, "(STUBBED) called, handle=0x%08X, addr=0x%" PRIx64, handle, addr);
NGLOG_WARNING(Kernel_SVC, "(STUBBED) called, handle={:#010X}, addr={:#X}", handle, addr);
return RESULT_SUCCESS;
}
@@ -412,11 +394,6 @@ static ResultCode SetThreadPriority(Handle handle, u32 priority) {
}
thread->SetPriority(priority);
thread->UpdatePriority();
// Update the mutexes that this thread is waiting for
for (auto& mutex : thread->pending_mutexes)
mutex->UpdatePriority();
Core::System::GetInstance().PrepareReschedule();
return RESULT_SUCCESS;
@@ -424,15 +401,15 @@ static ResultCode SetThreadPriority(Handle handle, u32 priority) {
/// Get which CPU core is executing the current thread
static u32 GetCurrentProcessorNumber() {
LOG_WARNING(Kernel_SVC, "(STUBBED) called, defaulting to processor 0");
NGLOG_WARNING(Kernel_SVC, "(STUBBED) called, defaulting to processor 0");
return 0;
}
static ResultCode MapSharedMemory(Handle shared_memory_handle, VAddr addr, u64 size,
u32 permissions) {
LOG_TRACE(Kernel_SVC,
"called, shared_memory_handle=0x%08X, addr=0x%llx, size=0x%llx, permissions=0x%08X",
shared_memory_handle, addr, size, permissions);
NGLOG_TRACE(Kernel_SVC,
"called, shared_memory_handle={:#X}, addr={:#X}, size={:#X}, permissions={:#010X}",
shared_memory_handle, addr, size, permissions);
SharedPtr<SharedMemory> shared_memory = g_handle_table.Get<SharedMemory>(shared_memory_handle);
if (!shared_memory) {
@@ -452,16 +429,15 @@ static ResultCode MapSharedMemory(Handle shared_memory_handle, VAddr addr, u64 s
return shared_memory->Map(Core::CurrentProcess().get(), addr, permissions_type,
MemoryPermission::DontCare);
default:
LOG_ERROR(Kernel_SVC, "unknown permissions=0x%08X", permissions);
NGLOG_ERROR(Kernel_SVC, "unknown permissions={:#010X}", permissions);
}
return RESULT_SUCCESS;
}
static ResultCode UnmapSharedMemory(Handle shared_memory_handle, VAddr addr, u64 size) {
LOG_WARNING(Kernel_SVC,
"called, shared_memory_handle=0x%08X, addr=0x%" PRIx64 ", size=0x%" PRIx64 "",
shared_memory_handle, addr, size);
NGLOG_WARNING(Kernel_SVC, "called, shared_memory_handle={:#010X}, addr={:#X}, size={:#X}",
shared_memory_handle, addr, size);
SharedPtr<SharedMemory> shared_memory = g_handle_table.Get<SharedMemory>(shared_memory_handle);
@@ -489,19 +465,19 @@ static ResultCode QueryProcessMemory(MemoryInfo* memory_info, PageInfo* /*page_i
memory_info->type = static_cast<u32>(vma->second.meminfo_state);
}
LOG_TRACE(Kernel_SVC, "called process=0x%08X addr=%llx", process_handle, addr);
NGLOG_TRACE(Kernel_SVC, "called process={:#010X} addr={:X}", process_handle, addr);
return RESULT_SUCCESS;
}
/// Query memory
static ResultCode QueryMemory(MemoryInfo* memory_info, PageInfo* page_info, VAddr addr) {
LOG_TRACE(Kernel_SVC, "called, addr=%llx", addr);
NGLOG_TRACE(Kernel_SVC, "called, addr={:X}", addr);
return QueryProcessMemory(memory_info, page_info, CurrentProcess, addr);
}
/// Exits the current process
static void ExitProcess() {
LOG_INFO(Kernel_SVC, "Process %u exiting", Core::CurrentProcess()->process_id);
NGLOG_INFO(Kernel_SVC, "Process {} exiting", Core::CurrentProcess()->process_id);
ASSERT_MSG(Core::CurrentProcess()->status == ProcessStatus::Running,
"Process has already exited");
@@ -558,9 +534,9 @@ static ResultCode CreateThread(Handle* out_handle, VAddr entry_point, u64 arg, V
case THREADPROCESSORID_2:
case THREADPROCESSORID_3:
// TODO(bunnei): Implement support for other processor IDs
LOG_ERROR(Kernel_SVC,
"Newly created thread must run in another thread (%u), unimplemented.",
processor_id);
NGLOG_ERROR(Kernel_SVC,
"Newly created thread must run in another thread ({}), unimplemented.",
processor_id);
break;
default:
ASSERT_MSG(false, "Unsupported thread processor ID: %d", processor_id);
@@ -575,17 +551,17 @@ static ResultCode CreateThread(Handle* out_handle, VAddr entry_point, u64 arg, V
Core::System::GetInstance().PrepareReschedule();
LOG_TRACE(Kernel_SVC,
"called entrypoint=0x%08X (%s), arg=0x%08X, stacktop=0x%08X, "
"threadpriority=0x%08X, processorid=0x%08X : created handle=0x%08X",
entry_point, name.c_str(), arg, stack_top, priority, processor_id, *out_handle);
NGLOG_TRACE(Kernel_SVC,
"called entrypoint={:#010X} ({}), arg={:#010X}, stacktop={:#010X}, "
"threadpriority={:#010X}, processorid={:#010X} : created handle={:#010X}",
entry_point, name, arg, stack_top, priority, processor_id, *out_handle);
return RESULT_SUCCESS;
}
/// Starts the thread for the provided handle
static ResultCode StartThread(Handle thread_handle) {
LOG_TRACE(Kernel_SVC, "called thread=0x%08X", thread_handle);
NGLOG_TRACE(Kernel_SVC, "called thread={:#010X}", thread_handle);
const SharedPtr<Thread> thread = g_handle_table.Get<Thread>(thread_handle);
if (!thread) {
@@ -599,7 +575,7 @@ static ResultCode StartThread(Handle thread_handle) {
/// Called when a thread exits
static void ExitThread() {
LOG_TRACE(Kernel_SVC, "called, pc=0x%08X", Core::CPU().GetPC());
NGLOG_TRACE(Kernel_SVC, "called, pc={:#010X}", Core::CPU().GetPC());
ExitCurrentThread();
Core::System::GetInstance().PrepareReschedule();
@@ -607,7 +583,7 @@ static void ExitThread() {
/// Sleep the current thread
static void SleepThread(s64 nanoseconds) {
LOG_TRACE(Kernel_SVC, "called nanoseconds=%lld", nanoseconds);
NGLOG_TRACE(Kernel_SVC, "called nanoseconds={}", nanoseconds);
// Don't attempt to yield execution if there are no available threads to run,
// this way we avoid a useless reschedule to the idle thread.
@@ -626,111 +602,83 @@ static void SleepThread(s64 nanoseconds) {
/// Signal process wide key atomic
static ResultCode WaitProcessWideKeyAtomic(VAddr mutex_addr, VAddr condition_variable_addr,
Handle thread_handle, s64 nano_seconds) {
LOG_TRACE(
NGLOG_TRACE(
Kernel_SVC,
"called mutex_addr=%llx, condition_variable_addr=%llx, thread_handle=0x%08X, timeout=%d",
"called mutex_addr={:X}, condition_variable_addr={:X}, thread_handle={:#010X}, timeout={}",
mutex_addr, condition_variable_addr, thread_handle, nano_seconds);
SharedPtr<Thread> thread = g_handle_table.Get<Thread>(thread_handle);
ASSERT(thread);
SharedPtr<Mutex> mutex = g_object_address_table.Get<Mutex>(mutex_addr);
if (!mutex) {
// Create a new mutex for the specified address if one does not already exist
mutex = Mutex::Create(thread, mutex_addr);
mutex->name = Common::StringFromFormat("mutex-%llx", mutex_addr);
}
CASCADE_CODE(Mutex::Release(mutex_addr));
SharedPtr<ConditionVariable> condition_variable =
g_object_address_table.Get<ConditionVariable>(condition_variable_addr);
if (!condition_variable) {
// Create a new condition_variable for the specified address if one does not already exist
condition_variable = ConditionVariable::Create(condition_variable_addr).Unwrap();
condition_variable->name =
Common::StringFromFormat("condition-variable-%llx", condition_variable_addr);
}
SharedPtr<Thread> current_thread = GetCurrentThread();
current_thread->condvar_wait_address = condition_variable_addr;
current_thread->mutex_wait_address = mutex_addr;
current_thread->wait_handle = thread_handle;
current_thread->status = THREADSTATUS_WAIT_MUTEX;
current_thread->wakeup_callback = nullptr;
if (condition_variable->mutex_addr) {
// Previously created the ConditionVariable using WaitProcessWideKeyAtomic, verify
// everything is correct
ASSERT(condition_variable->mutex_addr == mutex_addr);
} else {
// Previously created the ConditionVariable using SignalProcessWideKey, set the mutex
// associated with it
condition_variable->mutex_addr = mutex_addr;
}
current_thread->WakeAfterDelay(nano_seconds);
if (mutex->GetOwnerHandle()) {
// Release the mutex if the current thread is holding it
mutex->Release(thread.get());
}
auto wakeup_callback = [mutex, nano_seconds](ThreadWakeupReason reason,
SharedPtr<Thread> thread,
SharedPtr<WaitObject> object, size_t index) {
ASSERT(thread->status == THREADSTATUS_WAIT_SYNCH_ANY);
if (reason == ThreadWakeupReason::Timeout) {
thread->SetWaitSynchronizationResult(RESULT_TIMEOUT);
return true;
}
ASSERT(reason == ThreadWakeupReason::Signal);
// Now try to acquire the mutex and don't resume if it's not available.
if (!mutex->ShouldWait(thread.get())) {
mutex->Acquire(thread.get());
thread->SetWaitSynchronizationResult(RESULT_SUCCESS);
return true;
}
if (nano_seconds == 0) {
thread->SetWaitSynchronizationResult(RESULT_TIMEOUT);
return true;
}
thread->wait_objects = {mutex};
mutex->AddWaitingThread(thread);
thread->status = THREADSTATUS_WAIT_SYNCH_ANY;
// Create an event to wake the thread up after the
// specified nanosecond delay has passed
thread->WakeAfterDelay(nano_seconds);
thread->wakeup_callback = DefaultThreadWakeupCallback;
Core::System::GetInstance().PrepareReschedule();
return false;
};
CASCADE_CODE(
WaitSynchronization1(condition_variable, thread.get(), nano_seconds, wakeup_callback));
// Note: Deliberately don't attempt to inherit the lock owner's priority.
Core::System::GetInstance().PrepareReschedule();
return RESULT_SUCCESS;
}
/// Signal process wide key
static ResultCode SignalProcessWideKey(VAddr condition_variable_addr, s32 target) {
LOG_TRACE(Kernel_SVC, "called, condition_variable_addr=0x%llx, target=0x%08x",
condition_variable_addr, target);
NGLOG_TRACE(Kernel_SVC, "called, condition_variable_addr={:#X}, target={:#010X}",
condition_variable_addr, target);
// Wakeup all or one thread - Any other value is unimplemented
ASSERT(target == -1 || target == 1);
u32 processed = 0;
auto& thread_list = Core::System::GetInstance().Scheduler().GetThreadList();
SharedPtr<ConditionVariable> condition_variable =
g_object_address_table.Get<ConditionVariable>(condition_variable_addr);
if (!condition_variable) {
// Create a new condition_variable for the specified address if one does not already exist
condition_variable = ConditionVariable::Create(condition_variable_addr).Unwrap();
condition_variable->name =
Common::StringFromFormat("condition-variable-%llx", condition_variable_addr);
}
for (auto& thread : thread_list) {
if (thread->condvar_wait_address != condition_variable_addr)
continue;
CASCADE_CODE(condition_variable->Release(target));
// Only process up to 'target' threads, unless 'target' is -1, in which case process
// them all.
if (target != -1 && processed >= target)
break;
if (condition_variable->mutex_addr) {
// If a mutex was created for this condition_variable, wait the current thread on it
SharedPtr<Mutex> mutex = g_object_address_table.Get<Mutex>(condition_variable->mutex_addr);
return WaitSynchronization1(mutex, GetCurrentThread());
// If the mutex is not yet acquired, acquire it.
u32 mutex_val = Memory::Read32(thread->mutex_wait_address);
if (mutex_val == 0) {
// We were able to acquire the mutex, resume this thread.
Memory::Write32(thread->mutex_wait_address, thread->wait_handle);
ASSERT(thread->status == THREADSTATUS_WAIT_MUTEX);
thread->ResumeFromWait();
auto lock_owner = thread->lock_owner;
if (lock_owner)
lock_owner->RemoveMutexWaiter(thread);
thread->lock_owner = nullptr;
thread->mutex_wait_address = 0;
thread->condvar_wait_address = 0;
thread->wait_handle = 0;
} else {
// Couldn't acquire the mutex, block the thread.
Handle owner_handle = static_cast<Handle>(mutex_val & Mutex::MutexOwnerMask);
auto owner = g_handle_table.Get<Thread>(owner_handle);
ASSERT(owner);
ASSERT(thread->status != THREADSTATUS_RUNNING);
thread->status = THREADSTATUS_WAIT_MUTEX;
thread->wakeup_callback = nullptr;
// Signal that the mutex now has a waiting thread.
Memory::Write32(thread->mutex_wait_address, mutex_val | Mutex::MutexHasWaitersFlag);
owner->AddMutexWaiter(thread);
Core::System::GetInstance().PrepareReschedule();
}
++processed;
}
return RESULT_SUCCESS;
@@ -748,13 +696,13 @@ static u64 GetSystemTick() {
/// Close a handle
static ResultCode CloseHandle(Handle handle) {
LOG_TRACE(Kernel_SVC, "Closing handle 0x%08X", handle);
NGLOG_TRACE(Kernel_SVC, "Closing handle {:#010X}", handle);
return g_handle_table.Close(handle);
}
/// Reset an event
static ResultCode ResetSignal(Handle handle) {
LOG_WARNING(Kernel_SVC, "(STUBBED) called handle 0x%08X", handle);
NGLOG_WARNING(Kernel_SVC, "(STUBBED) called handle {:#010X}", handle);
auto event = g_handle_table.Get<Event>(handle);
ASSERT(event != nullptr);
event->Clear();
@@ -763,29 +711,29 @@ static ResultCode ResetSignal(Handle handle) {
/// Creates a TransferMemory object
static ResultCode CreateTransferMemory(Handle* handle, VAddr addr, u64 size, u32 permissions) {
LOG_WARNING(Kernel_SVC, "(STUBBED) called addr=0x%lx, size=0x%lx, perms=%08X", addr, size,
permissions);
NGLOG_WARNING(Kernel_SVC, "(STUBBED) called addr={:#X}, size={:#X}, perms={:010X}", addr, size,
permissions);
*handle = 0;
return RESULT_SUCCESS;
}
static ResultCode GetThreadCoreMask(Handle handle, u32* mask, u64* unknown) {
LOG_WARNING(Kernel_SVC, "(STUBBED) called, handle=0x%08X", handle);
NGLOG_WARNING(Kernel_SVC, "(STUBBED) called, handle={:010X}", handle);
*mask = 0x0;
*unknown = 0xf;
return RESULT_SUCCESS;
}
static ResultCode SetThreadCoreMask(Handle handle, u32 mask, u64 unknown) {
LOG_WARNING(Kernel_SVC, "(STUBBED) called, handle=0x%08X, mask=0x%08X, unknown=0x%lx", handle,
mask, unknown);
NGLOG_WARNING(Kernel_SVC, "(STUBBED) called, handle={:#010X}, mask={:#010X}, unknown={:#X}",
handle, mask, unknown);
return RESULT_SUCCESS;
}
static ResultCode CreateSharedMemory(Handle* handle, u64 size, u32 local_permissions,
u32 remote_permissions) {
LOG_TRACE(Kernel_SVC, "called, size=0x%llx, localPerms=0x%08x, remotePerms=0x%08x", size,
local_permissions, remote_permissions);
NGLOG_TRACE(Kernel_SVC, "called, size={:#X}, localPerms={:#010X}, remotePerms={:#010X}", size,
local_permissions, remote_permissions);
auto sharedMemHandle =
SharedMemory::Create(g_handle_table.Get<Process>(KernelHandle::CurrentProcess), size,
static_cast<MemoryPermission>(local_permissions),
@@ -796,7 +744,7 @@ static ResultCode CreateSharedMemory(Handle* handle, u64 size, u32 local_permiss
}
static ResultCode ClearEvent(Handle handle) {
LOG_TRACE(Kernel_SVC, "called, event=0xX", handle);
NGLOG_TRACE(Kernel_SVC, "called, event={:010X}", handle);
SharedPtr<Event> evt = g_handle_table.Get<Event>(handle);
if (evt == nullptr)
@@ -948,7 +896,7 @@ static const FunctionDef SVC_Table[] = {
static const FunctionDef* GetSVCInfo(u32 func_num) {
if (func_num >= std::size(SVC_Table)) {
LOG_ERROR(Kernel_SVC, "unknown svc=0x%02X", func_num);
NGLOG_ERROR(Kernel_SVC, "Unknown svc={:#04X}", func_num);
return nullptr;
}
return &SVC_Table[func_num];
@@ -967,10 +915,10 @@ void CallSVC(u32 immediate) {
if (info->func) {
info->func();
} else {
LOG_CRITICAL(Kernel_SVC, "unimplemented SVC function %s(..)", info->name);
NGLOG_CRITICAL(Kernel_SVC, "Unimplemented SVC function {}(..)", info->name);
}
} else {
LOG_CRITICAL(Kernel_SVC, "unknown SVC function 0x%x", immediate);
NGLOG_CRITICAL(Kernel_SVC, "Unknown SVC function {:#X}", immediate);
}
}

View File

@@ -77,9 +77,6 @@ void Thread::Stop() {
}
wait_objects.clear();
// Release all the mutexes that this thread holds
ReleaseThreadMutexes(this);
// Mark the TLS slot in the thread's page as free.
u64 tls_page = (tls_address - Memory::TLS_AREA_VADDR) / Memory::PAGE_SIZE;
u64 tls_slot =
@@ -104,9 +101,10 @@ void ExitCurrentThread() {
* @param cycles_late The number of CPU cycles that have passed since the desired wakeup time
*/
static void ThreadWakeupCallback(u64 thread_handle, int cycles_late) {
SharedPtr<Thread> thread = wakeup_callback_handle_table.Get<Thread>((Handle)thread_handle);
const auto proper_handle = static_cast<Handle>(thread_handle);
SharedPtr<Thread> thread = wakeup_callback_handle_table.Get<Thread>(proper_handle);
if (thread == nullptr) {
LOG_CRITICAL(Kernel, "Callback fired for invalid thread %08X", (Handle)thread_handle);
NGLOG_CRITICAL(Kernel, "Callback fired for invalid thread {:08X}", proper_handle);
return;
}
@@ -126,6 +124,19 @@ static void ThreadWakeupCallback(u64 thread_handle, int cycles_late) {
resume = thread->wakeup_callback(ThreadWakeupReason::Timeout, thread, nullptr, 0);
}
if (thread->mutex_wait_address != 0 || thread->condvar_wait_address != 0 ||
thread->wait_handle) {
ASSERT(thread->status == THREADSTATUS_WAIT_MUTEX);
thread->mutex_wait_address = 0;
thread->condvar_wait_address = 0;
thread->wait_handle = 0;
auto lock_owner = thread->lock_owner;
// Threads waking up by timeout from WaitProcessWideKey do not perform priority inheritance
// and don't have a lock owner.
ASSERT(lock_owner == nullptr);
}
if (resume)
thread->ResumeFromWait();
}
@@ -151,6 +162,7 @@ void Thread::ResumeFromWait() {
case THREADSTATUS_WAIT_HLE_EVENT:
case THREADSTATUS_WAIT_SLEEP:
case THREADSTATUS_WAIT_IPC:
case THREADSTATUS_WAIT_MUTEX:
break;
case THREADSTATUS_READY:
@@ -227,19 +239,19 @@ ResultVal<SharedPtr<Thread>> Thread::Create(std::string name, VAddr entry_point,
SharedPtr<Process> owner_process) {
// Check if priority is in ranged. Lowest priority -> highest priority id.
if (priority > THREADPRIO_LOWEST) {
LOG_ERROR(Kernel_SVC, "Invalid thread priority: %u", priority);
NGLOG_ERROR(Kernel_SVC, "Invalid thread priority: {}", priority);
return ERR_OUT_OF_RANGE;
}
if (processor_id > THREADPROCESSORID_MAX) {
LOG_ERROR(Kernel_SVC, "Invalid processor id: %d", processor_id);
NGLOG_ERROR(Kernel_SVC, "Invalid processor id: {}", processor_id);
return ERR_OUT_OF_RANGE_KERNEL;
}
// TODO(yuriks): Other checks, returning 0xD9001BEA
if (!Memory::IsValidVirtualAddress(*owner_process, entry_point)) {
LOG_ERROR(Kernel_SVC, "(name=%s): invalid entry %016" PRIx64, name.c_str(), entry_point);
NGLOG_ERROR(Kernel_SVC, "(name={}): invalid entry {:016X}", name, entry_point);
// TODO (bunnei): Find the correct error code to use here
return ResultCode(-1);
}
@@ -256,7 +268,9 @@ ResultVal<SharedPtr<Thread>> Thread::Create(std::string name, VAddr entry_point,
thread->last_running_ticks = CoreTiming::GetTicks();
thread->processor_id = processor_id;
thread->wait_objects.clear();
thread->wait_address = 0;
thread->mutex_wait_address = 0;
thread->condvar_wait_address = 0;
thread->wait_handle = 0;
thread->name = std::move(name);
thread->callback_handle = wakeup_callback_handle_table.Create(thread).Unwrap();
thread->owner_process = owner_process;
@@ -276,8 +290,8 @@ ResultVal<SharedPtr<Thread>> Thread::Create(std::string name, VAddr entry_point,
auto& linheap_memory = memory_region->linear_heap_memory;
if (linheap_memory->size() + Memory::PAGE_SIZE > memory_region->size) {
LOG_ERROR(Kernel_SVC,
"Not enough space in region to allocate a new TLS page for thread");
NGLOG_ERROR(Kernel_SVC,
"Not enough space in region to allocate a new TLS page for thread");
return ERR_OUT_OF_MEMORY;
}
@@ -317,17 +331,8 @@ ResultVal<SharedPtr<Thread>> Thread::Create(std::string name, VAddr entry_point,
void Thread::SetPriority(u32 priority) {
ASSERT_MSG(priority <= THREADPRIO_LOWEST && priority >= THREADPRIO_HIGHEST,
"Invalid priority value.");
Core::System::GetInstance().Scheduler().SetThreadPriority(this, priority);
nominal_priority = current_priority = priority;
}
void Thread::UpdatePriority() {
u32 best_priority = nominal_priority;
for (auto& mutex : held_mutexes) {
if (mutex->priority < best_priority)
best_priority = mutex->priority;
}
BoostPriority(best_priority);
nominal_priority = priority;
UpdatePriority();
}
void Thread::BoostPriority(u32 priority) {
@@ -377,6 +382,38 @@ VAddr Thread::GetCommandBufferAddress() const {
return GetTLSAddress() + CommandHeaderOffset;
}
void Thread::AddMutexWaiter(SharedPtr<Thread> thread) {
thread->lock_owner = this;
wait_mutex_threads.emplace_back(std::move(thread));
UpdatePriority();
}
void Thread::RemoveMutexWaiter(SharedPtr<Thread> thread) {
boost::remove_erase(wait_mutex_threads, thread);
thread->lock_owner = nullptr;
UpdatePriority();
}
void Thread::UpdatePriority() {
// Find the highest priority among all the threads that are waiting for this thread's lock
u32 new_priority = nominal_priority;
for (const auto& thread : wait_mutex_threads) {
if (thread->nominal_priority < new_priority)
new_priority = thread->nominal_priority;
}
if (new_priority == current_priority)
return;
Core::System::GetInstance().Scheduler().SetThreadPriority(this, new_priority);
current_priority = new_priority;
// Recursively update the priority of the thread that depends on the priority of this one.
if (lock_owner)
lock_owner->UpdatePriority();
}
////////////////////////////////////////////////////////////////////////////////////////////////////
/**

View File

@@ -18,7 +18,7 @@
enum ThreadPriority : u32 {
THREADPRIO_HIGHEST = 0, ///< Highest thread priority
THREADPRIO_USERLAND_MAX = 24, ///< Highest thread priority for userland apps
THREADPRIO_DEFAULT = 48, ///< Default thread priority for userland apps
THREADPRIO_DEFAULT = 44, ///< Default thread priority for userland apps
THREADPRIO_LOWEST = 63, ///< Lowest thread priority
};
@@ -43,6 +43,7 @@ enum ThreadStatus {
THREADSTATUS_WAIT_IPC, ///< Waiting for the reply from an IPC request
THREADSTATUS_WAIT_SYNCH_ANY, ///< Waiting due to WaitSynch1 or WaitSynchN with wait_all = false
THREADSTATUS_WAIT_SYNCH_ALL, ///< Waiting due to WaitSynchronizationN with wait_all = true
THREADSTATUS_WAIT_MUTEX, ///< Waiting due to an ArbitrateLock/WaitProcessWideKey svc
THREADSTATUS_DORMANT, ///< Created but not yet made ready
THREADSTATUS_DEAD ///< Run to completion, or forcefully terminated
};
@@ -54,7 +55,6 @@ enum class ThreadWakeupReason {
namespace Kernel {
class Mutex;
class Process;
class Thread final : public WaitObject {
@@ -103,18 +103,21 @@ public:
*/
void SetPriority(u32 priority);
/**
* Boost's a thread's priority to the best priority among the thread's held mutexes.
* This prevents priority inversion via priority inheritance.
*/
void UpdatePriority();
/**
* Temporarily boosts the thread's priority until the next time it is scheduled
* @param priority The new priority
*/
void BoostPriority(u32 priority);
/// Adds a thread to the list of threads that are waiting for a lock held by this thread.
void AddMutexWaiter(SharedPtr<Thread> thread);
/// Removes a thread from the list of threads that are waiting for a lock held by this thread.
void RemoveMutexWaiter(SharedPtr<Thread> thread);
/// Recalculates the current priority taking into account priority inheritance.
void UpdatePriority();
/**
* Gets the thread's thread ID
* @return The thread's ID
@@ -205,19 +208,22 @@ public:
VAddr tls_address; ///< Virtual address of the Thread Local Storage of the thread
/// Mutexes currently held by this thread, which will be released when it exits.
boost::container::flat_set<SharedPtr<Mutex>> held_mutexes;
/// Mutexes that this thread is currently waiting for.
boost::container::flat_set<SharedPtr<Mutex>> pending_mutexes;
SharedPtr<Process> owner_process; ///< Process that owns this thread
/// Objects that the thread is waiting on, in the same order as they were
// passed to WaitSynchronization1/N.
std::vector<SharedPtr<WaitObject>> wait_objects;
VAddr wait_address; ///< If waiting on an AddressArbiter, this is the arbitration address
/// List of threads that are waiting for a mutex that is held by this thread.
std::vector<SharedPtr<Thread>> wait_mutex_threads;
/// Thread that owns the lock that this thread is waiting for.
SharedPtr<Thread> lock_owner;
// If waiting on a ConditionVariable, this is the ConditionVariable address
VAddr condvar_wait_address;
VAddr mutex_wait_address; ///< If waiting on a Mutex, this is the mutex address
Handle wait_handle; ///< The handle used to wait for the mutex.
std::string name;

View File

@@ -77,7 +77,7 @@ void Timer::WakeupAllWaitingThreads() {
}
void Timer::Signal(int cycles_late) {
LOG_TRACE(Kernel, "Timer %u fired", GetObjectId());
NGLOG_TRACE(Kernel, "Timer {} fired", GetObjectId());
signaled = true;
@@ -97,7 +97,7 @@ static void TimerCallback(u64 timer_handle, int cycles_late) {
timer_callback_handle_table.Get<Timer>(static_cast<Handle>(timer_handle));
if (timer == nullptr) {
LOG_CRITICAL(Kernel, "Callback fired for invalid timer %08" PRIx64, timer_handle);
NGLOG_CRITICAL(Kernel, "Callback fired for invalid timer {:016X}", timer_handle);
return;
}

View File

@@ -379,22 +379,22 @@ void VMManager::UpdatePageTableForVMA(const VirtualMemoryArea& vma) {
}
u64 VMManager::GetTotalMemoryUsage() {
LOG_WARNING(Kernel, "(STUBBED) called");
NGLOG_WARNING(Kernel, "(STUBBED) called");
return 0xF8000000;
}
u64 VMManager::GetTotalHeapUsage() {
LOG_WARNING(Kernel, "(STUBBED) called");
NGLOG_WARNING(Kernel, "(STUBBED) called");
return 0x0;
}
VAddr VMManager::GetAddressSpaceBaseAddr() {
LOG_WARNING(Kernel, "(STUBBED) called");
NGLOG_WARNING(Kernel, "(STUBBED) called");
return 0x8000000;
}
u64 VMManager::GetAddressSpaceSize() {
LOG_WARNING(Kernel, "(STUBBED) called");
NGLOG_WARNING(Kernel, "(STUBBED) called");
return MAX_ADDRESS;
}

View File

@@ -47,7 +47,7 @@ public:
private:
void GetBase(Kernel::HLERequestContext& ctx) {
LOG_WARNING(Service_ACC, "(STUBBED) called");
NGLOG_WARNING(Service_ACC, "(STUBBED) called");
ProfileBase profile_base{};
IPC::ResponseBuilder rb{ctx, 16};
rb.Push(RESULT_SUCCESS);
@@ -72,14 +72,14 @@ public:
private:
void CheckAvailability(Kernel::HLERequestContext& ctx) {
LOG_WARNING(Service_ACC, "(STUBBED) called");
NGLOG_WARNING(Service_ACC, "(STUBBED) called");
IPC::ResponseBuilder rb{ctx, 3};
rb.Push(RESULT_SUCCESS);
rb.Push(true); // TODO: Check when this is supposed to return true and when not
}
void GetAccountId(Kernel::HLERequestContext& ctx) {
LOG_WARNING(Service_ACC, "(STUBBED) called");
NGLOG_WARNING(Service_ACC, "(STUBBED) called");
IPC::ResponseBuilder rb{ctx, 4};
rb.Push(RESULT_SUCCESS);
rb.Push<u64>(0x12345678ABCDEF);
@@ -87,14 +87,14 @@ private:
};
void Module::Interface::GetUserExistence(Kernel::HLERequestContext& ctx) {
LOG_WARNING(Service_ACC, "(STUBBED) called");
NGLOG_WARNING(Service_ACC, "(STUBBED) called");
IPC::ResponseBuilder rb{ctx, 3};
rb.Push(RESULT_SUCCESS);
rb.Push(true); // TODO: Check when this is supposed to return true and when not
}
void Module::Interface::ListAllUsers(Kernel::HLERequestContext& ctx) {
LOG_WARNING(Service_ACC, "(STUBBED) called");
NGLOG_WARNING(Service_ACC, "(STUBBED) called");
constexpr std::array<u128, 10> user_ids{DEFAULT_USER_ID};
ctx.WriteBuffer(user_ids.data(), user_ids.size());
IPC::ResponseBuilder rb{ctx, 2};
@@ -102,7 +102,7 @@ void Module::Interface::ListAllUsers(Kernel::HLERequestContext& ctx) {
}
void Module::Interface::ListOpenUsers(Kernel::HLERequestContext& ctx) {
LOG_WARNING(Service_ACC, "(STUBBED) called");
NGLOG_WARNING(Service_ACC, "(STUBBED) called");
constexpr std::array<u128, 10> user_ids{DEFAULT_USER_ID};
ctx.WriteBuffer(user_ids.data(), user_ids.size());
IPC::ResponseBuilder rb{ctx, 2};
@@ -113,11 +113,11 @@ void Module::Interface::GetProfile(Kernel::HLERequestContext& ctx) {
IPC::ResponseBuilder rb{ctx, 2, 0, 1};
rb.Push(RESULT_SUCCESS);
rb.PushIpcInterface<IProfile>();
LOG_DEBUG(Service_ACC, "called");
NGLOG_DEBUG(Service_ACC, "called");
}
void Module::Interface::InitializeApplicationInfo(Kernel::HLERequestContext& ctx) {
LOG_WARNING(Service_ACC, "(STUBBED) called");
NGLOG_WARNING(Service_ACC, "(STUBBED) called");
IPC::ResponseBuilder rb{ctx, 2};
rb.Push(RESULT_SUCCESS);
}
@@ -126,11 +126,11 @@ void Module::Interface::GetBaasAccountManagerForApplication(Kernel::HLERequestCo
IPC::ResponseBuilder rb{ctx, 2, 0, 1};
rb.Push(RESULT_SUCCESS);
rb.PushIpcInterface<IManagerForApplication>();
LOG_DEBUG(Service_ACC, "called");
NGLOG_DEBUG(Service_ACC, "called");
}
void Module::Interface::GetLastOpenedUser(Kernel::HLERequestContext& ctx) {
LOG_WARNING(Service_ACC, "(STUBBED) called");
NGLOG_WARNING(Service_ACC, "(STUBBED) called");
IPC::ResponseBuilder rb{ctx, 6};
rb.Push(RESULT_SUCCESS);
rb.PushRaw(DEFAULT_USER_ID);

View File

@@ -28,14 +28,14 @@ IWindowController::IWindowController() : ServiceFramework("IWindowController") {
}
void IWindowController::GetAppletResourceUserId(Kernel::HLERequestContext& ctx) {
LOG_WARNING(Service_AM, "(STUBBED) called");
NGLOG_WARNING(Service_AM, "(STUBBED) called");
IPC::ResponseBuilder rb{ctx, 4};
rb.Push(RESULT_SUCCESS);
rb.Push<u64>(0);
}
void IWindowController::AcquireForegroundRights(Kernel::HLERequestContext& ctx) {
LOG_WARNING(Service_AM, "(STUBBED) called");
NGLOG_WARNING(Service_AM, "(STUBBED) called");
IPC::ResponseBuilder rb{ctx, 2};
rb.Push(RESULT_SUCCESS);
}
@@ -54,20 +54,20 @@ IAudioController::IAudioController() : ServiceFramework("IAudioController") {
}
void IAudioController::SetExpectedMasterVolume(Kernel::HLERequestContext& ctx) {
LOG_WARNING(Service_AM, "(STUBBED) called");
NGLOG_WARNING(Service_AM, "(STUBBED) called");
IPC::ResponseBuilder rb{ctx, 2};
rb.Push(RESULT_SUCCESS);
}
void IAudioController::GetMainAppletExpectedMasterVolume(Kernel::HLERequestContext& ctx) {
LOG_WARNING(Service_AM, "(STUBBED) called");
NGLOG_WARNING(Service_AM, "(STUBBED) called");
IPC::ResponseBuilder rb{ctx, 3};
rb.Push(RESULT_SUCCESS);
rb.Push(volume);
}
void IAudioController::GetLibraryAppletExpectedMasterVolume(Kernel::HLERequestContext& ctx) {
LOG_WARNING(Service_AM, "(STUBBED) called");
NGLOG_WARNING(Service_AM, "(STUBBED) called");
IPC::ResponseBuilder rb{ctx, 3};
rb.Push(RESULT_SUCCESS);
rb.Push(volume);
@@ -139,14 +139,14 @@ void ISelfController::SetFocusHandlingMode(Kernel::HLERequestContext& ctx) {
IPC::ResponseBuilder rb{ctx, 2};
rb.Push(RESULT_SUCCESS);
LOG_WARNING(Service_AM, "(STUBBED) called");
NGLOG_WARNING(Service_AM, "(STUBBED) called");
}
void ISelfController::SetRestartMessageEnabled(Kernel::HLERequestContext& ctx) {
IPC::ResponseBuilder rb{ctx, 2};
rb.Push(RESULT_SUCCESS);
LOG_WARNING(Service_AM, "(STUBBED) called");
NGLOG_WARNING(Service_AM, "(STUBBED) called");
}
void ISelfController::SetPerformanceModeChangedNotification(Kernel::HLERequestContext& ctx) {
@@ -157,14 +157,14 @@ void ISelfController::SetPerformanceModeChangedNotification(Kernel::HLERequestCo
IPC::ResponseBuilder rb{ctx, 2};
rb.Push(RESULT_SUCCESS);
LOG_WARNING(Service_AM, "(STUBBED) called flag=%u", static_cast<u32>(flag));
NGLOG_WARNING(Service_AM, "(STUBBED) called flag={}", flag);
}
void ISelfController::SetScreenShotPermission(Kernel::HLERequestContext& ctx) {
IPC::ResponseBuilder rb{ctx, 2};
rb.Push(RESULT_SUCCESS);
LOG_WARNING(Service_AM, "(STUBBED) called");
NGLOG_WARNING(Service_AM, "(STUBBED) called");
}
void ISelfController::SetOperationModeChangedNotification(Kernel::HLERequestContext& ctx) {
@@ -175,7 +175,7 @@ void ISelfController::SetOperationModeChangedNotification(Kernel::HLERequestCont
IPC::ResponseBuilder rb{ctx, 2};
rb.Push(RESULT_SUCCESS);
LOG_WARNING(Service_AM, "(STUBBED) called flag=%u", static_cast<u32>(flag));
NGLOG_WARNING(Service_AM, "(STUBBED) called flag={}", flag);
}
void ISelfController::SetOutOfFocusSuspendingEnabled(Kernel::HLERequestContext& ctx) {
@@ -188,21 +188,21 @@ void ISelfController::SetOutOfFocusSuspendingEnabled(Kernel::HLERequestContext&
IPC::ResponseBuilder rb{ctx, 2};
rb.Push(RESULT_SUCCESS);
LOG_WARNING(Service_AM, "(STUBBED) called enabled=%u", static_cast<u32>(enabled));
NGLOG_WARNING(Service_AM, "(STUBBED) called enabled={}", enabled);
}
void ISelfController::LockExit(Kernel::HLERequestContext& ctx) {
IPC::ResponseBuilder rb{ctx, 2};
rb.Push(RESULT_SUCCESS);
LOG_WARNING(Service_AM, "(STUBBED) called");
NGLOG_WARNING(Service_AM, "(STUBBED) called");
}
void ISelfController::UnlockExit(Kernel::HLERequestContext& ctx) {
IPC::ResponseBuilder rb{ctx, 2};
rb.Push(RESULT_SUCCESS);
LOG_WARNING(Service_AM, "(STUBBED) called");
NGLOG_WARNING(Service_AM, "(STUBBED) called");
}
void ISelfController::GetLibraryAppletLaunchableEvent(Kernel::HLERequestContext& ctx) {
@@ -212,7 +212,7 @@ void ISelfController::GetLibraryAppletLaunchableEvent(Kernel::HLERequestContext&
rb.Push(RESULT_SUCCESS);
rb.PushCopyObjects(launchable_event);
LOG_WARNING(Service_AM, "(STUBBED) called");
NGLOG_WARNING(Service_AM, "(STUBBED) called");
}
void ISelfController::CreateManagedDisplayLayer(Kernel::HLERequestContext& ctx) {
@@ -225,7 +225,7 @@ void ISelfController::CreateManagedDisplayLayer(Kernel::HLERequestContext& ctx)
rb.Push(RESULT_SUCCESS);
rb.Push(layer_id);
LOG_WARNING(Service_AM, "(STUBBED) called");
NGLOG_WARNING(Service_AM, "(STUBBED) called");
}
ICommonStateGetter::ICommonStateGetter() : ServiceFramework("ICommonStateGetter") {
@@ -269,7 +269,7 @@ void ICommonStateGetter::GetEventHandle(Kernel::HLERequestContext& ctx) {
rb.Push(RESULT_SUCCESS);
rb.PushCopyObjects(event);
LOG_WARNING(Service_AM, "(STUBBED) called");
NGLOG_WARNING(Service_AM, "(STUBBED) called");
}
void ICommonStateGetter::ReceiveMessage(Kernel::HLERequestContext& ctx) {
@@ -277,7 +277,7 @@ void ICommonStateGetter::ReceiveMessage(Kernel::HLERequestContext& ctx) {
rb.Push(RESULT_SUCCESS);
rb.Push<u32>(15);
LOG_WARNING(Service_AM, "(STUBBED) called");
NGLOG_WARNING(Service_AM, "(STUBBED) called");
}
void ICommonStateGetter::GetCurrentFocusState(Kernel::HLERequestContext& ctx) {
@@ -285,7 +285,7 @@ void ICommonStateGetter::GetCurrentFocusState(Kernel::HLERequestContext& ctx) {
rb.Push(RESULT_SUCCESS);
rb.Push(static_cast<u8>(FocusState::InFocus));
LOG_WARNING(Service_AM, "(STUBBED) called");
NGLOG_WARNING(Service_AM, "(STUBBED) called");
}
void ICommonStateGetter::GetOperationMode(Kernel::HLERequestContext& ctx) {
@@ -294,7 +294,7 @@ void ICommonStateGetter::GetOperationMode(Kernel::HLERequestContext& ctx) {
rb.Push(RESULT_SUCCESS);
rb.Push(static_cast<u8>(use_docked_mode ? OperationMode::Docked : OperationMode::Handheld));
LOG_WARNING(Service_AM, "(STUBBED) called");
NGLOG_WARNING(Service_AM, "(STUBBED) called");
}
void ICommonStateGetter::GetPerformanceMode(Kernel::HLERequestContext& ctx) {
@@ -304,7 +304,7 @@ void ICommonStateGetter::GetPerformanceMode(Kernel::HLERequestContext& ctx) {
rb.Push(static_cast<u32>(use_docked_mode ? APM::PerformanceMode::Docked
: APM::PerformanceMode::Handheld));
LOG_WARNING(Service_AM, "(STUBBED) called");
NGLOG_WARNING(Service_AM, "(STUBBED) called");
}
class ILibraryAppletAccessor final : public ServiceFramework<ILibraryAppletAccessor> {
@@ -344,7 +344,7 @@ private:
rb.Push(RESULT_SUCCESS);
rb.PushCopyObjects(state_changed_event);
LOG_WARNING(Service_AM, "(STUBBED) called");
NGLOG_WARNING(Service_AM, "(STUBBED) called");
}
Kernel::SharedPtr<Kernel::Event> state_changed_event;
@@ -368,7 +368,7 @@ void ILibraryAppletCreator::CreateLibraryApplet(Kernel::HLERequestContext& ctx)
rb.Push(RESULT_SUCCESS);
rb.PushIpcInterface<AM::ILibraryAppletAccessor>();
LOG_DEBUG(Service_AM, "called");
NGLOG_DEBUG(Service_AM, "called");
}
class IStorageAccessor final : public ServiceFramework<IStorageAccessor> {
@@ -392,7 +392,7 @@ private:
rb.Push(RESULT_SUCCESS);
rb.Push(static_cast<u64>(buffer.size()));
LOG_DEBUG(Service_AM, "called");
NGLOG_DEBUG(Service_AM, "called");
}
void Read(Kernel::HLERequestContext& ctx) {
@@ -410,7 +410,7 @@ private:
rb.Push(RESULT_SUCCESS);
LOG_DEBUG(Service_AM, "called");
NGLOG_DEBUG(Service_AM, "called");
}
};
@@ -434,7 +434,7 @@ private:
rb.Push(RESULT_SUCCESS);
rb.PushIpcInterface<AM::IStorageAccessor>(buffer);
LOG_DEBUG(Service_AM, "called");
NGLOG_DEBUG(Service_AM, "called");
}
};
@@ -498,14 +498,14 @@ void IApplicationFunctions::PopLaunchParameter(Kernel::HLERequestContext& ctx) {
rb.Push(RESULT_SUCCESS);
rb.PushIpcInterface<AM::IStorage>(buffer);
LOG_DEBUG(Service_AM, "called");
NGLOG_DEBUG(Service_AM, "called");
}
void IApplicationFunctions::EnsureSaveData(Kernel::HLERequestContext& ctx) {
IPC::RequestParser rp{ctx};
u128 uid = rp.PopRaw<u128>();
LOG_WARNING(Service, "(STUBBED) called uid = %016" PRIX64 "%016" PRIX64, uid[1], uid[0]);
NGLOG_WARNING(Service, "(STUBBED) called uid = {:016X}{:016X}", uid[1], uid[0]);
IPC::ResponseBuilder rb{ctx, 4};
@@ -533,27 +533,27 @@ void IApplicationFunctions::SetTerminateResult(Kernel::HLERequestContext& ctx) {
IPC::ResponseBuilder rb{ctx, 2};
rb.Push(RESULT_SUCCESS);
LOG_WARNING(Service_AM, "(STUBBED) called, result=0x%08X", result);
NGLOG_WARNING(Service_AM, "(STUBBED) called, result={:#010}", result);
}
void IApplicationFunctions::GetDesiredLanguage(Kernel::HLERequestContext& ctx) {
IPC::ResponseBuilder rb{ctx, 4};
rb.Push(RESULT_SUCCESS);
rb.Push<u64>(SystemLanguage::English);
LOG_WARNING(Service_AM, "(STUBBED) called");
NGLOG_WARNING(Service_AM, "(STUBBED) called");
}
void IApplicationFunctions::InitializeGamePlayRecording(Kernel::HLERequestContext& ctx) {
IPC::ResponseBuilder rb{ctx, 2};
rb.Push(RESULT_SUCCESS);
LOG_WARNING(Service_AM, "(STUBBED) called");
NGLOG_WARNING(Service_AM, "(STUBBED) called");
}
void IApplicationFunctions::SetGamePlayRecordingState(Kernel::HLERequestContext& ctx) {
IPC::ResponseBuilder rb{ctx, 2};
rb.Push(RESULT_SUCCESS);
LOG_WARNING(Service_AM, "(STUBBED) called");
NGLOG_WARNING(Service_AM, "(STUBBED) called");
}
void IApplicationFunctions::NotifyRunning(Kernel::HLERequestContext& ctx) {
@@ -561,7 +561,7 @@ void IApplicationFunctions::NotifyRunning(Kernel::HLERequestContext& ctx) {
rb.Push(RESULT_SUCCESS);
rb.Push<u8>(0); // Unknown, seems to be ignored by official processes
LOG_WARNING(Service_AM, "(STUBBED) called");
NGLOG_WARNING(Service_AM, "(STUBBED) called");
}
void InstallInterfaces(SM::ServiceManager& service_manager,

View File

@@ -33,56 +33,56 @@ private:
IPC::ResponseBuilder rb{ctx, 2, 0, 1};
rb.Push(RESULT_SUCCESS);
rb.PushIpcInterface<ICommonStateGetter>();
LOG_DEBUG(Service_AM, "called");
NGLOG_DEBUG(Service_AM, "called");
}
void GetSelfController(Kernel::HLERequestContext& ctx) {
IPC::ResponseBuilder rb{ctx, 2, 0, 1};
rb.Push(RESULT_SUCCESS);
rb.PushIpcInterface<ISelfController>(nvflinger);
LOG_DEBUG(Service_AM, "called");
NGLOG_DEBUG(Service_AM, "called");
}
void GetWindowController(Kernel::HLERequestContext& ctx) {
IPC::ResponseBuilder rb{ctx, 2, 0, 1};
rb.Push(RESULT_SUCCESS);
rb.PushIpcInterface<IWindowController>();
LOG_DEBUG(Service_AM, "called");
NGLOG_DEBUG(Service_AM, "called");
}
void GetAudioController(Kernel::HLERequestContext& ctx) {
IPC::ResponseBuilder rb{ctx, 2, 0, 1};
rb.Push(RESULT_SUCCESS);
rb.PushIpcInterface<IAudioController>();
LOG_DEBUG(Service_AM, "called");
NGLOG_DEBUG(Service_AM, "called");
}
void GetDisplayController(Kernel::HLERequestContext& ctx) {
IPC::ResponseBuilder rb{ctx, 2, 0, 1};
rb.Push(RESULT_SUCCESS);
rb.PushIpcInterface<IDisplayController>();
LOG_DEBUG(Service_AM, "called");
NGLOG_DEBUG(Service_AM, "called");
}
void GetDebugFunctions(Kernel::HLERequestContext& ctx) {
IPC::ResponseBuilder rb{ctx, 2, 0, 1};
rb.Push(RESULT_SUCCESS);
rb.PushIpcInterface<IDebugFunctions>();
LOG_DEBUG(Service_AM, "called");
NGLOG_DEBUG(Service_AM, "called");
}
void GetLibraryAppletCreator(Kernel::HLERequestContext& ctx) {
IPC::ResponseBuilder rb{ctx, 2, 0, 1};
rb.Push(RESULT_SUCCESS);
rb.PushIpcInterface<ILibraryAppletCreator>();
LOG_DEBUG(Service_AM, "called");
NGLOG_DEBUG(Service_AM, "called");
}
void GetApplicationFunctions(Kernel::HLERequestContext& ctx) {
IPC::ResponseBuilder rb{ctx, 2, 0, 1};
rb.Push(RESULT_SUCCESS);
rb.PushIpcInterface<IApplicationFunctions>();
LOG_DEBUG(Service_AM, "called");
NGLOG_DEBUG(Service_AM, "called");
}
std::shared_ptr<NVFlinger::NVFlinger> nvflinger;
@@ -92,7 +92,7 @@ void AppletAE::OpenLibraryAppletProxyOld(Kernel::HLERequestContext& ctx) {
IPC::ResponseBuilder rb{ctx, 2, 0, 1};
rb.Push(RESULT_SUCCESS);
rb.PushIpcInterface<ILibraryAppletProxy>(nvflinger);
LOG_DEBUG(Service_AM, "called");
NGLOG_DEBUG(Service_AM, "called");
}
AppletAE::AppletAE(std::shared_ptr<NVFlinger::NVFlinger> nvflinger)

View File

@@ -33,56 +33,56 @@ private:
IPC::ResponseBuilder rb{ctx, 2, 0, 1};
rb.Push(RESULT_SUCCESS);
rb.PushIpcInterface<IAudioController>();
LOG_DEBUG(Service_AM, "called");
NGLOG_DEBUG(Service_AM, "called");
}
void GetDisplayController(Kernel::HLERequestContext& ctx) {
IPC::ResponseBuilder rb{ctx, 2, 0, 1};
rb.Push(RESULT_SUCCESS);
rb.PushIpcInterface<IDisplayController>();
LOG_DEBUG(Service_AM, "called");
NGLOG_DEBUG(Service_AM, "called");
}
void GetDebugFunctions(Kernel::HLERequestContext& ctx) {
IPC::ResponseBuilder rb{ctx, 2, 0, 1};
rb.Push(RESULT_SUCCESS);
rb.PushIpcInterface<IDebugFunctions>();
LOG_DEBUG(Service_AM, "called");
NGLOG_DEBUG(Service_AM, "called");
}
void GetWindowController(Kernel::HLERequestContext& ctx) {
IPC::ResponseBuilder rb{ctx, 2, 0, 1};
rb.Push(RESULT_SUCCESS);
rb.PushIpcInterface<IWindowController>();
LOG_DEBUG(Service_AM, "called");
NGLOG_DEBUG(Service_AM, "called");
}
void GetSelfController(Kernel::HLERequestContext& ctx) {
IPC::ResponseBuilder rb{ctx, 2, 0, 1};
rb.Push(RESULT_SUCCESS);
rb.PushIpcInterface<ISelfController>(nvflinger);
LOG_DEBUG(Service_AM, "called");
NGLOG_DEBUG(Service_AM, "called");
}
void GetCommonStateGetter(Kernel::HLERequestContext& ctx) {
IPC::ResponseBuilder rb{ctx, 2, 0, 1};
rb.Push(RESULT_SUCCESS);
rb.PushIpcInterface<ICommonStateGetter>();
LOG_DEBUG(Service_AM, "called");
NGLOG_DEBUG(Service_AM, "called");
}
void GetLibraryAppletCreator(Kernel::HLERequestContext& ctx) {
IPC::ResponseBuilder rb{ctx, 2, 0, 1};
rb.Push(RESULT_SUCCESS);
rb.PushIpcInterface<ILibraryAppletCreator>();
LOG_DEBUG(Service_AM, "called");
NGLOG_DEBUG(Service_AM, "called");
}
void GetApplicationFunctions(Kernel::HLERequestContext& ctx) {
IPC::ResponseBuilder rb{ctx, 2, 0, 1};
rb.Push(RESULT_SUCCESS);
rb.PushIpcInterface<IApplicationFunctions>();
LOG_DEBUG(Service_AM, "called");
NGLOG_DEBUG(Service_AM, "called");
}
std::shared_ptr<NVFlinger::NVFlinger> nvflinger;
@@ -92,7 +92,7 @@ void AppletOE::OpenApplicationProxy(Kernel::HLERequestContext& ctx) {
IPC::ResponseBuilder rb{ctx, 2, 0, 1};
rb.Push(RESULT_SUCCESS);
rb.PushIpcInterface<IApplicationProxy>(nvflinger);
LOG_DEBUG(Service_AM, "called");
NGLOG_DEBUG(Service_AM, "called");
}
AppletOE::AppletOE(std::shared_ptr<NVFlinger::NVFlinger> nvflinger)

View File

@@ -27,14 +27,14 @@ void AOC_U::CountAddOnContent(Kernel::HLERequestContext& ctx) {
IPC::ResponseBuilder rb{ctx, 4};
rb.Push(RESULT_SUCCESS);
rb.Push<u64>(0);
LOG_WARNING(Service_AOC, "(STUBBED) called");
NGLOG_WARNING(Service_AOC, "(STUBBED) called");
}
void AOC_U::ListAddOnContent(Kernel::HLERequestContext& ctx) {
IPC::ResponseBuilder rb{ctx, 4};
rb.Push(RESULT_SUCCESS);
rb.Push<u64>(0);
LOG_WARNING(Service_AOC, "(STUBBED) called");
NGLOG_WARNING(Service_AOC, "(STUBBED) called");
}
void InstallInterfaces(SM::ServiceManager& service_manager) {

View File

@@ -29,8 +29,8 @@ private:
IPC::ResponseBuilder rb{ctx, 2};
rb.Push(RESULT_SUCCESS);
LOG_WARNING(Service_APM, "(STUBBED) called mode=%u config=%u", static_cast<u32>(mode),
config);
NGLOG_WARNING(Service_APM, "(STUBBED) called mode={} config={}", static_cast<u32>(mode),
config);
}
void GetPerformanceConfiguration(Kernel::HLERequestContext& ctx) {
@@ -42,7 +42,7 @@ private:
rb.Push(RESULT_SUCCESS);
rb.Push<u32>(0); // Performance configuration
LOG_WARNING(Service_APM, "(STUBBED) called mode=%u", static_cast<u32>(mode));
NGLOG_WARNING(Service_APM, "(STUBBED) called mode={}", static_cast<u32>(mode));
}
};

View File

@@ -60,14 +60,14 @@ public:
private:
void GetAudioOutState(Kernel::HLERequestContext& ctx) {
LOG_DEBUG(Service_Audio, "called");
NGLOG_DEBUG(Service_Audio, "called");
IPC::ResponseBuilder rb{ctx, 3};
rb.Push(RESULT_SUCCESS);
rb.Push(static_cast<u32>(audio_out_state));
}
void StartAudioOut(Kernel::HLERequestContext& ctx) {
LOG_WARNING(Service_Audio, "(STUBBED) called");
NGLOG_WARNING(Service_Audio, "(STUBBED) called");
// Start audio
audio_out_state = AudioState::Started;
@@ -77,7 +77,7 @@ private:
}
void StopAudioOut(Kernel::HLERequestContext& ctx) {
LOG_WARNING(Service_Audio, "(STUBBED) called");
NGLOG_WARNING(Service_Audio, "(STUBBED) called");
// Stop audio
audio_out_state = AudioState::Stopped;
@@ -89,7 +89,7 @@ private:
}
void RegisterBufferEvent(Kernel::HLERequestContext& ctx) {
LOG_WARNING(Service_Audio, "(STUBBED) called");
NGLOG_WARNING(Service_Audio, "(STUBBED) called");
IPC::ResponseBuilder rb{ctx, 2, 1};
rb.Push(RESULT_SUCCESS);
@@ -97,7 +97,7 @@ private:
}
void AppendAudioOutBuffer(Kernel::HLERequestContext& ctx) {
LOG_WARNING(Service_Audio, "(STUBBED) called");
NGLOG_WARNING(Service_Audio, "(STUBBED) called");
IPC::RequestParser rp{ctx};
const u64 key{rp.Pop<u64>()};
@@ -108,7 +108,7 @@ private:
}
void GetReleasedAudioOutBuffer(Kernel::HLERequestContext& ctx) {
LOG_WARNING(Service_Audio, "(STUBBED) called");
NGLOG_WARNING(Service_Audio, "(STUBBED) called");
// TODO(st4rk): This is how libtransistor currently implements the
// GetReleasedAudioOutBuffer, it should return the key (a VAddr) to the app and this address
@@ -164,7 +164,7 @@ private:
};
void AudOutU::ListAudioOuts(Kernel::HLERequestContext& ctx) {
LOG_WARNING(Service_Audio, "(STUBBED) called");
NGLOG_WARNING(Service_Audio, "(STUBBED) called");
IPC::RequestParser rp{ctx};
const std::string audio_interface = "AudioInterface";
@@ -180,7 +180,7 @@ void AudOutU::ListAudioOuts(Kernel::HLERequestContext& ctx) {
}
void AudOutU::OpenAudioOut(Kernel::HLERequestContext& ctx) {
LOG_WARNING(Service_Audio, "(STUBBED) called");
NGLOG_WARNING(Service_Audio, "(STUBBED) called");
if (!audio_out_interface) {
audio_out_interface = std::make_shared<IAudioOut>();

View File

@@ -56,7 +56,7 @@ private:
}
void RequestUpdateAudioRenderer(Kernel::HLERequestContext& ctx) {
LOG_DEBUG(Service_Audio, "%s", ctx.Description().c_str());
NGLOG_DEBUG(Service_Audio, "{}", ctx.Description());
AudioRendererResponseData response_data{};
response_data.section_0_size =
@@ -79,7 +79,7 @@ private:
rb.Push(RESULT_SUCCESS);
LOG_WARNING(Service_Audio, "(STUBBED) called");
NGLOG_WARNING(Service_Audio, "(STUBBED) called");
}
void StartAudioRenderer(Kernel::HLERequestContext& ctx) {
@@ -87,7 +87,7 @@ private:
rb.Push(RESULT_SUCCESS);
LOG_WARNING(Service_Audio, "(STUBBED) called");
NGLOG_WARNING(Service_Audio, "(STUBBED) called");
}
void StopAudioRenderer(Kernel::HLERequestContext& ctx) {
@@ -95,7 +95,7 @@ private:
rb.Push(RESULT_SUCCESS);
LOG_WARNING(Service_Audio, "(STUBBED) called");
NGLOG_WARNING(Service_Audio, "(STUBBED) called");
}
void QuerySystemEvent(Kernel::HLERequestContext& ctx) {
@@ -105,7 +105,7 @@ private:
rb.Push(RESULT_SUCCESS);
rb.PushCopyObjects(system_event);
LOG_WARNING(Service_Audio, "(STUBBED) called");
NGLOG_WARNING(Service_Audio, "(STUBBED) called");
}
struct AudioRendererStateEntry {
@@ -176,7 +176,7 @@ public:
private:
void ListAudioDeviceName(Kernel::HLERequestContext& ctx) {
LOG_WARNING(Service_Audio, "(STUBBED) called");
NGLOG_WARNING(Service_Audio, "(STUBBED) called");
IPC::RequestParser rp{ctx};
const std::string audio_interface = "AudioInterface";
@@ -188,7 +188,7 @@ private:
}
void SetAudioDeviceOutputVolume(Kernel::HLERequestContext& ctx) {
LOG_WARNING(Service_Audio, "(STUBBED) called");
NGLOG_WARNING(Service_Audio, "(STUBBED) called");
IPC::RequestParser rp{ctx};
f32 volume = static_cast<f32>(rp.Pop<u32>());
@@ -201,7 +201,7 @@ private:
}
void GetActiveAudioDeviceName(Kernel::HLERequestContext& ctx) {
LOG_WARNING(Service_Audio, "(STUBBED) called");
NGLOG_WARNING(Service_Audio, "(STUBBED) called");
IPC::RequestParser rp{ctx};
const std::string audio_interface = "AudioDevice";
@@ -213,7 +213,7 @@ private:
}
void QueryAudioDeviceSystemEvent(Kernel::HLERequestContext& ctx) {
LOG_WARNING(Service_Audio, "(STUBBED) called");
NGLOG_WARNING(Service_Audio, "(STUBBED) called");
buffer_event->Signal();
@@ -223,7 +223,7 @@ private:
}
void GetActiveChannelCount(Kernel::HLERequestContext& ctx) {
LOG_WARNING(Service_Audio, "(STUBBED) called");
NGLOG_WARNING(Service_Audio, "(STUBBED) called");
IPC::ResponseBuilder rb{ctx, 3};
rb.Push(RESULT_SUCCESS);
rb.Push<u32>(1);
@@ -250,7 +250,7 @@ void AudRenU::OpenAudioRenderer(Kernel::HLERequestContext& ctx) {
rb.Push(RESULT_SUCCESS);
rb.PushIpcInterface<Audio::IAudioRenderer>();
LOG_DEBUG(Service_Audio, "called");
NGLOG_DEBUG(Service_Audio, "called");
}
void AudRenU::GetAudioRendererWorkBufferSize(Kernel::HLERequestContext& ctx) {
@@ -259,7 +259,7 @@ void AudRenU::GetAudioRendererWorkBufferSize(Kernel::HLERequestContext& ctx) {
rb.Push(RESULT_SUCCESS);
rb.Push<u64>(0x400);
LOG_WARNING(Service_Audio, "(STUBBED) called");
NGLOG_WARNING(Service_Audio, "(STUBBED) called");
}
void AudRenU::GetAudioDevice(Kernel::HLERequestContext& ctx) {
@@ -268,7 +268,7 @@ void AudRenU::GetAudioDevice(Kernel::HLERequestContext& ctx) {
rb.Push(RESULT_SUCCESS);
rb.PushIpcInterface<Audio::IAudioDevice>();
LOG_DEBUG(Service_Audio, "called");
NGLOG_DEBUG(Service_Audio, "called");
}
} // namespace Service::Audio

View File

@@ -16,13 +16,13 @@ Module::Interface::Interface(std::shared_ptr<Module> module, const char* name)
void Module::Interface::FatalSimple(Kernel::HLERequestContext& ctx) {
IPC::RequestParser rp(ctx);
u32 error_code = rp.Pop<u32>();
LOG_WARNING(Service_Fatal, "(STUBBED) called, error_code=0x%X", error_code);
NGLOG_WARNING(Service_Fatal, "(STUBBED) called, error_code={:#X}", error_code);
IPC::ResponseBuilder rb{ctx, 2};
rb.Push(RESULT_SUCCESS);
}
void Module::Interface::TransitionToFatalError(Kernel::HLERequestContext& ctx) {
LOG_WARNING(Service_Fatal, "(STUBBED) called");
NGLOG_WARNING(Service_Fatal, "(STUBBED) called");
IPC::ResponseBuilder rb{ctx, 2};
rb.Push(RESULT_SUCCESS);
}

View File

@@ -25,14 +25,14 @@ ResultCode RegisterFileSystem(std::unique_ptr<FileSys::FileSystemFactory>&& fact
ASSERT_MSG(inserted, "Tried to register more than one system with same id code");
auto& filesystem = result.first->second;
LOG_DEBUG(Service_FS, "Registered file system %s with id code 0x%08X",
filesystem->GetName().c_str(), static_cast<u32>(type));
NGLOG_DEBUG(Service_FS, "Registered file system {} with id code {:#010X}",
filesystem->GetName(), static_cast<u32>(type));
return RESULT_SUCCESS;
}
ResultVal<std::unique_ptr<FileSys::FileSystemBackend>> OpenFileSystem(Type type,
FileSys::Path& path) {
LOG_TRACE(Service_FS, "Opening FileSystem with type=%d", type);
NGLOG_TRACE(Service_FS, "Opening FileSystem with type={}", static_cast<u32>(type));
auto itr = filesystem_map.find(type);
if (itr == filesystem_map.end()) {
@@ -44,7 +44,7 @@ ResultVal<std::unique_ptr<FileSys::FileSystemBackend>> OpenFileSystem(Type type,
}
ResultCode FormatFileSystem(Type type) {
LOG_TRACE(Service_FS, "Formatting FileSystem with type=%d", type);
NGLOG_TRACE(Service_FS, "Formatting FileSystem with type={}", static_cast<u32>(type));
auto itr = filesystem_map.find(type);
if (itr == filesystem_map.end()) {

View File

@@ -35,7 +35,7 @@ private:
const s64 offset = rp.Pop<s64>();
const s64 length = rp.Pop<s64>();
LOG_DEBUG(Service_FS, "called, offset=0x%ld, length=0x%ld", offset, length);
NGLOG_DEBUG(Service_FS, "called, offset={:#X}, length={}", offset, length);
// Error checking
if (length < 0) {
@@ -87,7 +87,7 @@ private:
const s64 offset = rp.Pop<s64>();
const s64 length = rp.Pop<s64>();
LOG_DEBUG(Service_FS, "called, offset=0x%ld, length=0x%ld", offset, length);
NGLOG_DEBUG(Service_FS, "called, offset={:#X}, length={}", offset, length);
// Error checking
if (length < 0) {
@@ -124,7 +124,7 @@ private:
const s64 offset = rp.Pop<s64>();
const s64 length = rp.Pop<s64>();
LOG_DEBUG(Service_FS, "called, offset=0x%ld, length=0x%ld", offset, length);
NGLOG_DEBUG(Service_FS, "called, offset={:#X}, length={}", offset, length);
// Error checking
if (length < 0) {
@@ -152,7 +152,7 @@ private:
}
void Flush(Kernel::HLERequestContext& ctx) {
LOG_DEBUG(Service_FS, "called");
NGLOG_DEBUG(Service_FS, "called");
backend->Flush();
IPC::ResponseBuilder rb{ctx, 2};
@@ -163,7 +163,7 @@ private:
IPC::RequestParser rp{ctx};
const u64 size = rp.Pop<u64>();
backend->SetSize(size);
LOG_DEBUG(Service_FS, "called, size=%" PRIu64, size);
NGLOG_DEBUG(Service_FS, "called, size={}", size);
IPC::ResponseBuilder rb{ctx, 2};
rb.Push(RESULT_SUCCESS);
@@ -171,7 +171,7 @@ private:
void GetSize(Kernel::HLERequestContext& ctx) {
const u64 size = backend->GetSize();
LOG_DEBUG(Service_FS, "called, size=%" PRIu64, size);
NGLOG_DEBUG(Service_FS, "called, size={}", size);
IPC::ResponseBuilder rb{ctx, 4};
rb.Push(RESULT_SUCCESS);
@@ -197,7 +197,7 @@ private:
IPC::RequestParser rp{ctx};
const u64 unk = rp.Pop<u64>();
LOG_DEBUG(Service_FS, "called, unk=0x%llx", unk);
NGLOG_DEBUG(Service_FS, "called, unk={:#X}", unk);
// Calculate how many entries we can fit in the output buffer
u64 count_entries = ctx.GetWriteBufferSize() / sizeof(FileSys::Entry);
@@ -219,7 +219,7 @@ private:
}
void GetEntryCount(Kernel::HLERequestContext& ctx) {
LOG_DEBUG(Service_FS, "called");
NGLOG_DEBUG(Service_FS, "called");
u64 count = backend->GetEntryCount();
@@ -239,7 +239,7 @@ public:
{2, &IFileSystem::CreateDirectory, "CreateDirectory"},
{3, nullptr, "DeleteDirectory"},
{4, nullptr, "DeleteDirectoryRecursively"},
{5, nullptr, "RenameFile"},
{5, &IFileSystem::RenameFile, "RenameFile"},
{6, nullptr, "RenameDirectory"},
{7, &IFileSystem::GetEntryType, "GetEntryType"},
{8, &IFileSystem::OpenFile, "OpenFile"},
@@ -265,8 +265,7 @@ public:
u64 mode = rp.Pop<u64>();
u32 size = rp.Pop<u32>();
LOG_DEBUG(Service_FS, "called file %s mode 0x%" PRIX64 " size 0x%08X", name.c_str(), mode,
size);
NGLOG_DEBUG(Service_FS, "called file {} mode {:#X} size {:#010X}", name, mode, size);
IPC::ResponseBuilder rb{ctx, 2};
rb.Push(backend->CreateFile(name, size));
@@ -280,7 +279,7 @@ public:
std::string name(file_buffer.begin(), end);
LOG_DEBUG(Service_FS, "called file %s", name.c_str());
NGLOG_DEBUG(Service_FS, "called file {}", name);
IPC::ResponseBuilder rb{ctx, 2};
rb.Push(backend->DeleteFile(name));
@@ -294,12 +293,32 @@ public:
std::string name(file_buffer.begin(), end);
LOG_DEBUG(Service_FS, "called directory %s", name.c_str());
NGLOG_DEBUG(Service_FS, "called directory {}", name);
IPC::ResponseBuilder rb{ctx, 2};
rb.Push(backend->CreateDirectory(name));
}
void RenameFile(Kernel::HLERequestContext& ctx) {
IPC::RequestParser rp{ctx};
std::vector<u8> buffer;
buffer.resize(ctx.BufferDescriptorX()[0].Size());
Memory::ReadBlock(ctx.BufferDescriptorX()[0].Address(), buffer.data(), buffer.size());
auto end = std::find(buffer.begin(), buffer.end(), '\0');
std::string src_name(buffer.begin(), end);
buffer.resize(ctx.BufferDescriptorX()[1].Size());
Memory::ReadBlock(ctx.BufferDescriptorX()[1].Address(), buffer.data(), buffer.size());
end = std::find(buffer.begin(), buffer.end(), '\0');
std::string dst_name(buffer.begin(), end);
NGLOG_DEBUG(Service_FS, "called file '{}' to file '{}'", src_name, dst_name);
IPC::ResponseBuilder rb{ctx, 2};
rb.Push(backend->RenameFile(src_name, dst_name));
}
void OpenFile(Kernel::HLERequestContext& ctx) {
IPC::RequestParser rp{ctx};
@@ -310,7 +329,7 @@ public:
auto mode = static_cast<FileSys::Mode>(rp.Pop<u32>());
LOG_DEBUG(Service_FS, "called file %s mode %u", name.c_str(), static_cast<u32>(mode));
NGLOG_DEBUG(Service_FS, "called file {} mode {}", name, static_cast<u32>(mode));
auto result = backend->OpenFile(name, mode);
if (result.Failed()) {
@@ -337,7 +356,7 @@ public:
// TODO(Subv): Implement this filter.
u32 filter_flags = rp.Pop<u32>();
LOG_DEBUG(Service_FS, "called directory %s filter %u", name.c_str(), filter_flags);
NGLOG_DEBUG(Service_FS, "called directory {} filter {}", name, filter_flags);
auto result = backend->OpenDirectory(name);
if (result.Failed()) {
@@ -361,7 +380,7 @@ public:
std::string name(file_buffer.begin(), end);
LOG_DEBUG(Service_FS, "called file %s", name.c_str());
NGLOG_DEBUG(Service_FS, "called file {}", name);
auto result = backend->GetEntryType(name);
if (result.Failed()) {
@@ -376,7 +395,7 @@ public:
}
void Commit(Kernel::HLERequestContext& ctx) {
LOG_WARNING(Service_FS, "(STUBBED) called");
NGLOG_WARNING(Service_FS, "(STUBBED) called");
IPC::ResponseBuilder rb{ctx, 2};
rb.Push(RESULT_SUCCESS);
@@ -492,14 +511,14 @@ void FSP_SRV::TryLoadRomFS() {
}
void FSP_SRV::Initialize(Kernel::HLERequestContext& ctx) {
LOG_WARNING(Service_FS, "(STUBBED) called");
NGLOG_WARNING(Service_FS, "(STUBBED) called");
IPC::ResponseBuilder rb{ctx, 2};
rb.Push(RESULT_SUCCESS);
}
void FSP_SRV::MountSdCard(Kernel::HLERequestContext& ctx) {
LOG_DEBUG(Service_FS, "called");
NGLOG_DEBUG(Service_FS, "called");
FileSys::Path unused;
auto filesystem = OpenFileSystem(Type::SDMC, unused).Unwrap();
@@ -516,14 +535,14 @@ void FSP_SRV::CreateSaveData(Kernel::HLERequestContext& ctx) {
auto save_create_struct = rp.PopRaw<std::array<u8, 0x40>>();
u128 uid = rp.PopRaw<u128>();
LOG_WARNING(Service_FS, "(STUBBED) called uid = %016" PRIX64 "%016" PRIX64, uid[1], uid[0]);
NGLOG_WARNING(Service_FS, "(STUBBED) called uid = {:016X}{:016X}", uid[1], uid[0]);
IPC::ResponseBuilder rb{ctx, 2};
rb.Push(RESULT_SUCCESS);
}
void FSP_SRV::MountSaveData(Kernel::HLERequestContext& ctx) {
LOG_WARNING(Service_FS, "(STUBBED) called");
NGLOG_WARNING(Service_FS, "(STUBBED) called");
FileSys::Path unused;
auto filesystem = OpenFileSystem(Type::SaveData, unused).Unwrap();
@@ -534,7 +553,7 @@ void FSP_SRV::MountSaveData(Kernel::HLERequestContext& ctx) {
}
void FSP_SRV::GetGlobalAccessLogMode(Kernel::HLERequestContext& ctx) {
LOG_WARNING(Service_FS, "(STUBBED) called");
NGLOG_WARNING(Service_FS, "(STUBBED) called");
IPC::ResponseBuilder rb{ctx, 3};
rb.Push(RESULT_SUCCESS);
@@ -542,12 +561,12 @@ void FSP_SRV::GetGlobalAccessLogMode(Kernel::HLERequestContext& ctx) {
}
void FSP_SRV::OpenDataStorageByCurrentProcess(Kernel::HLERequestContext& ctx) {
LOG_DEBUG(Service_FS, "called");
NGLOG_DEBUG(Service_FS, "called");
TryLoadRomFS();
if (!romfs) {
// TODO (bunnei): Find the right error code to use here
LOG_CRITICAL(Service_FS, "no file system interface available!");
NGLOG_CRITICAL(Service_FS, "no file system interface available!");
IPC::ResponseBuilder rb{ctx, 2};
rb.Push(ResultCode(-1));
return;
@@ -556,7 +575,7 @@ void FSP_SRV::OpenDataStorageByCurrentProcess(Kernel::HLERequestContext& ctx) {
// Attempt to open a StorageBackend interface to the RomFS
auto storage = romfs->OpenFile({}, {});
if (storage.Failed()) {
LOG_CRITICAL(Service_FS, "no storage interface available!");
NGLOG_CRITICAL(Service_FS, "no storage interface available!");
IPC::ResponseBuilder rb{ctx, 2};
rb.Push(storage.Code());
return;
@@ -568,7 +587,7 @@ void FSP_SRV::OpenDataStorageByCurrentProcess(Kernel::HLERequestContext& ctx) {
}
void FSP_SRV::OpenRomStorage(Kernel::HLERequestContext& ctx) {
LOG_WARNING(Service_FS, "(STUBBED) called, using OpenDataStorageByCurrentProcess");
NGLOG_WARNING(Service_FS, "(STUBBED) called, using OpenDataStorageByCurrentProcess");
OpenDataStorageByCurrentProcess(ctx);
}

View File

@@ -13,7 +13,7 @@ namespace Service::Friend {
void Module::Interface::CreateFriendService(Kernel::HLERequestContext& ctx) {
IPC::ResponseBuilder rb{ctx, 2};
rb.Push(RESULT_SUCCESS);
LOG_WARNING(Service_Friend, "(STUBBED) called");
NGLOG_WARNING(Service_Friend, "(STUBBED) called");
}
Module::Interface::Interface(std::shared_ptr<Module> module, const char* name)

View File

@@ -53,7 +53,7 @@ private:
IPC::ResponseBuilder rb{ctx, 2, 1};
rb.Push(RESULT_SUCCESS);
rb.PushCopyObjects(shared_mem);
LOG_DEBUG(Service_HID, "called");
NGLOG_DEBUG(Service_HID, "called");
}
void LoadInputDevices() {
@@ -184,7 +184,7 @@ private:
void ActivateVibrationDevice(Kernel::HLERequestContext& ctx) {
IPC::ResponseBuilder rb{ctx, 2};
rb.Push(RESULT_SUCCESS);
LOG_WARNING(Service_HID, "(STUBBED) called");
NGLOG_WARNING(Service_HID, "(STUBBED) called");
}
};
@@ -286,144 +286,144 @@ private:
IPC::ResponseBuilder rb{ctx, 2, 0, 1};
rb.Push(RESULT_SUCCESS);
rb.PushIpcInterface<IAppletResource>(applet_resource);
LOG_DEBUG(Service_HID, "called");
NGLOG_DEBUG(Service_HID, "called");
}
void ActivateDebugPad(Kernel::HLERequestContext& ctx) {
IPC::ResponseBuilder rb{ctx, 2};
rb.Push(RESULT_SUCCESS);
LOG_WARNING(Service_HID, "(STUBBED) called");
NGLOG_WARNING(Service_HID, "(STUBBED) called");
}
void ActivateTouchScreen(Kernel::HLERequestContext& ctx) {
IPC::ResponseBuilder rb{ctx, 2};
rb.Push(RESULT_SUCCESS);
LOG_WARNING(Service_HID, "(STUBBED) called");
NGLOG_WARNING(Service_HID, "(STUBBED) called");
}
void ActivateMouse(Kernel::HLERequestContext& ctx) {
IPC::ResponseBuilder rb{ctx, 2};
rb.Push(RESULT_SUCCESS);
LOG_WARNING(Service_HID, "(STUBBED) called");
NGLOG_WARNING(Service_HID, "(STUBBED) called");
}
void ActivateKeyboard(Kernel::HLERequestContext& ctx) {
IPC::ResponseBuilder rb{ctx, 2};
rb.Push(RESULT_SUCCESS);
LOG_WARNING(Service_HID, "(STUBBED) called");
NGLOG_WARNING(Service_HID, "(STUBBED) called");
}
void StartSixAxisSensor(Kernel::HLERequestContext& ctx) {
IPC::ResponseBuilder rb{ctx, 2};
rb.Push(RESULT_SUCCESS);
LOG_WARNING(Service_HID, "(STUBBED) called");
NGLOG_WARNING(Service_HID, "(STUBBED) called");
}
void SetGyroscopeZeroDriftMode(Kernel::HLERequestContext& ctx) {
IPC::ResponseBuilder rb{ctx, 2};
rb.Push(RESULT_SUCCESS);
LOG_WARNING(Service_HID, "(STUBBED) called");
NGLOG_WARNING(Service_HID, "(STUBBED) called");
}
void SetSupportedNpadStyleSet(Kernel::HLERequestContext& ctx) {
IPC::ResponseBuilder rb{ctx, 2};
rb.Push(RESULT_SUCCESS);
LOG_WARNING(Service_HID, "(STUBBED) called");
NGLOG_WARNING(Service_HID, "(STUBBED) called");
}
void GetSupportedNpadStyleSet(Kernel::HLERequestContext& ctx) {
IPC::ResponseBuilder rb{ctx, 3};
rb.Push(RESULT_SUCCESS);
rb.Push<u32>(0);
LOG_WARNING(Service_HID, "(STUBBED) called");
NGLOG_WARNING(Service_HID, "(STUBBED) called");
}
void SetSupportedNpadIdType(Kernel::HLERequestContext& ctx) {
IPC::ResponseBuilder rb{ctx, 2};
rb.Push(RESULT_SUCCESS);
LOG_WARNING(Service_HID, "(STUBBED) called");
NGLOG_WARNING(Service_HID, "(STUBBED) called");
}
void ActivateNpad(Kernel::HLERequestContext& ctx) {
IPC::ResponseBuilder rb{ctx, 2};
rb.Push(RESULT_SUCCESS);
LOG_WARNING(Service_HID, "(STUBBED) called");
NGLOG_WARNING(Service_HID, "(STUBBED) called");
}
void AcquireNpadStyleSetUpdateEventHandle(Kernel::HLERequestContext& ctx) {
IPC::ResponseBuilder rb{ctx, 2, 1};
rb.Push(RESULT_SUCCESS);
rb.PushCopyObjects(event);
LOG_WARNING(Service_HID, "(STUBBED) called");
NGLOG_WARNING(Service_HID, "(STUBBED) called");
}
void GetPlayerLedPattern(Kernel::HLERequestContext& ctx) {
IPC::ResponseBuilder rb{ctx, 2};
rb.Push(RESULT_SUCCESS);
LOG_WARNING(Service_HID, "(STUBBED) called");
NGLOG_WARNING(Service_HID, "(STUBBED) called");
}
void SetNpadJoyHoldType(Kernel::HLERequestContext& ctx) {
IPC::ResponseBuilder rb{ctx, 2};
rb.Push(RESULT_SUCCESS);
LOG_WARNING(Service_HID, "(STUBBED) called");
NGLOG_WARNING(Service_HID, "(STUBBED) called");
}
void GetNpadJoyHoldType(Kernel::HLERequestContext& ctx) {
IPC::ResponseBuilder rb{ctx, 3};
rb.Push(RESULT_SUCCESS);
rb.Push(joy_hold_type);
LOG_WARNING(Service_HID, "(STUBBED) called");
NGLOG_WARNING(Service_HID, "(STUBBED) called");
}
void SetNpadJoyAssignmentModeSingleByDefault(Kernel::HLERequestContext& ctx) {
IPC::ResponseBuilder rb{ctx, 2};
rb.Push(RESULT_SUCCESS);
LOG_WARNING(Service_HID, "(STUBBED) called");
NGLOG_WARNING(Service_HID, "(STUBBED) called");
}
void SendVibrationValue(Kernel::HLERequestContext& ctx) {
IPC::ResponseBuilder rb{ctx, 2};
rb.Push(RESULT_SUCCESS);
LOG_WARNING(Service_HID, "(STUBBED) called");
NGLOG_WARNING(Service_HID, "(STUBBED) called");
}
void GetActualVibrationValue(Kernel::HLERequestContext& ctx) {
IPC::ResponseBuilder rb{ctx, 2};
rb.Push(RESULT_SUCCESS);
LOG_WARNING(Service_HID, "(STUBBED) called");
NGLOG_WARNING(Service_HID, "(STUBBED) called");
}
void SetNpadJoyAssignmentModeDual(Kernel::HLERequestContext& ctx) {
IPC::ResponseBuilder rb{ctx, 2};
rb.Push(RESULT_SUCCESS);
LOG_WARNING(Service_HID, "(STUBBED) called");
NGLOG_WARNING(Service_HID, "(STUBBED) called");
}
void SetNpadHandheldActivationMode(Kernel::HLERequestContext& ctx) {
IPC::ResponseBuilder rb{ctx, 2};
rb.Push(RESULT_SUCCESS);
LOG_WARNING(Service_HID, "(STUBBED) called");
NGLOG_WARNING(Service_HID, "(STUBBED) called");
}
void GetVibrationDeviceInfo(Kernel::HLERequestContext& ctx) {
IPC::ResponseBuilder rb{ctx, 4};
rb.Push(RESULT_SUCCESS);
rb.Push<u64>(0);
LOG_WARNING(Service_HID, "(STUBBED) called");
NGLOG_WARNING(Service_HID, "(STUBBED) called");
}
void CreateActiveVibrationDeviceList(Kernel::HLERequestContext& ctx) {
IPC::ResponseBuilder rb{ctx, 2, 0, 1};
rb.Push(RESULT_SUCCESS);
rb.PushIpcInterface<IActiveVibrationDeviceList>();
LOG_DEBUG(Service_HID, "called");
NGLOG_DEBUG(Service_HID, "called");
}
void SendVibrationValues(Kernel::HLERequestContext& ctx) {
IPC::ResponseBuilder rb{ctx, 2};
rb.Push(RESULT_SUCCESS);
LOG_WARNING(Service_HID, "(STUBBED) called");
NGLOG_WARNING(Service_HID, "(STUBBED) called");
}
};

View File

@@ -141,19 +141,19 @@ private:
if (header.IsTailLog()) {
switch (header.severity) {
case MessageHeader::Severity::Trace:
LOG_TRACE(Debug_Emulated, "%s", log_stream.str().c_str());
NGLOG_TRACE(Debug_Emulated, "{}", log_stream.str());
break;
case MessageHeader::Severity::Info:
LOG_INFO(Debug_Emulated, "%s", log_stream.str().c_str());
NGLOG_INFO(Debug_Emulated, "{}", log_stream.str());
break;
case MessageHeader::Severity::Warning:
LOG_WARNING(Debug_Emulated, "%s", log_stream.str().c_str());
NGLOG_WARNING(Debug_Emulated, "{}", log_stream.str());
break;
case MessageHeader::Severity::Error:
LOG_ERROR(Debug_Emulated, "%s", log_stream.str().c_str());
NGLOG_ERROR(Debug_Emulated, "{}", log_stream.str());
break;
case MessageHeader::Severity::Critical:
LOG_CRITICAL(Debug_Emulated, "%s", log_stream.str().c_str());
NGLOG_CRITICAL(Debug_Emulated, "{}", log_stream.str());
break;
}
}
@@ -178,7 +178,7 @@ void LM::Initialize(Kernel::HLERequestContext& ctx) {
rb.Push(RESULT_SUCCESS);
rb.PushIpcInterface<Logger>();
LOG_DEBUG(Service_LM, "called");
NGLOG_DEBUG(Service_LM, "called");
}
LM::LM() : ServiceFramework("lm") {

View File

@@ -13,7 +13,7 @@ Module::Interface::Interface(std::shared_ptr<Module> module, const char* name)
: ServiceFramework(name), module(std::move(module)) {}
void Module::Interface::Unknown(Kernel::HLERequestContext& ctx) {
LOG_WARNING(Service_NFP, "(STUBBED) called");
NGLOG_WARNING(Service_NFP, "(STUBBED) called");
IPC::ResponseBuilder rb{ctx, 2};
rb.Push(RESULT_SUCCESS);
}

View File

@@ -62,24 +62,24 @@ public:
private:
void GetRequestState(Kernel::HLERequestContext& ctx) {
LOG_WARNING(Service_NIFM, "(STUBBED) called");
NGLOG_WARNING(Service_NIFM, "(STUBBED) called");
IPC::ResponseBuilder rb{ctx, 3};
rb.Push(RESULT_SUCCESS);
rb.Push<u32>(0);
}
void GetResult(Kernel::HLERequestContext& ctx) {
LOG_WARNING(Service_NIFM, "(STUBBED) called");
NGLOG_WARNING(Service_NIFM, "(STUBBED) called");
IPC::ResponseBuilder rb{ctx, 2};
rb.Push(RESULT_SUCCESS);
}
void GetSystemEventReadableHandles(Kernel::HLERequestContext& ctx) {
LOG_WARNING(Service_NIFM, "(STUBBED) called");
NGLOG_WARNING(Service_NIFM, "(STUBBED) called");
IPC::ResponseBuilder rb{ctx, 2, 2};
rb.Push(RESULT_SUCCESS);
rb.PushCopyObjects(event1, event2);
}
void Cancel(Kernel::HLERequestContext& ctx) {
LOG_WARNING(Service_NIFM, "(STUBBED) called");
NGLOG_WARNING(Service_NIFM, "(STUBBED) called");
IPC::ResponseBuilder rb{ctx, 2};
rb.Push(RESULT_SUCCESS);
}
@@ -105,7 +105,7 @@ public:
private:
void GetClientId(Kernel::HLERequestContext& ctx) {
LOG_WARNING(Service_NIFM, "(STUBBED) called");
NGLOG_WARNING(Service_NIFM, "(STUBBED) called");
IPC::ResponseBuilder rb{ctx, 4};
rb.Push(RESULT_SUCCESS);
rb.Push<u64>(0);
@@ -116,7 +116,7 @@ private:
rb.Push(RESULT_SUCCESS);
rb.PushIpcInterface<IScanRequest>();
LOG_DEBUG(Service_NIFM, "called");
NGLOG_DEBUG(Service_NIFM, "called");
}
void CreateRequest(Kernel::HLERequestContext& ctx) {
IPC::ResponseBuilder rb{ctx, 2, 0, 1};
@@ -124,10 +124,10 @@ private:
rb.Push(RESULT_SUCCESS);
rb.PushIpcInterface<IRequest>();
LOG_DEBUG(Service_NIFM, "called");
NGLOG_DEBUG(Service_NIFM, "called");
}
void RemoveNetworkProfile(Kernel::HLERequestContext& ctx) {
LOG_WARNING(Service_NIFM, "(STUBBED) called");
NGLOG_WARNING(Service_NIFM, "(STUBBED) called");
IPC::ResponseBuilder rb{ctx, 2};
rb.Push(RESULT_SUCCESS);
}
@@ -137,7 +137,7 @@ private:
rb.Push(RESULT_SUCCESS);
rb.PushIpcInterface<INetworkProfile>();
LOG_DEBUG(Service_NIFM, "called");
NGLOG_DEBUG(Service_NIFM, "called");
}
};
@@ -187,14 +187,14 @@ void Module::Interface::CreateGeneralServiceOld(Kernel::HLERequestContext& ctx)
IPC::ResponseBuilder rb{ctx, 2, 0, 1};
rb.Push(RESULT_SUCCESS);
rb.PushIpcInterface<IGeneralService>();
LOG_DEBUG(Service_NIFM, "called");
NGLOG_DEBUG(Service_NIFM, "called");
}
void Module::Interface::CreateGeneralService(Kernel::HLERequestContext& ctx) {
IPC::ResponseBuilder rb{ctx, 2, 0, 1};
rb.Push(RESULT_SUCCESS);
rb.PushIpcInterface<IGeneralService>();
LOG_DEBUG(Service_NIFM, "called");
NGLOG_DEBUG(Service_NIFM, "called");
}
Module::Interface::Interface(std::shared_ptr<Module> module, const char* name)

View File

@@ -52,7 +52,7 @@ PL_U::PL_U() : ServiceFramework("pl:u") {
ASSERT(file.GetSize() == SHARED_FONT_MEM_SIZE);
file.ReadBytes(shared_font->data(), shared_font->size());
} else {
LOG_WARNING(Service_NS, "Unable to load shared font: %s", filepath.c_str());
NGLOG_WARNING(Service_NS, "Unable to load shared font: {}", filepath);
}
}
@@ -60,7 +60,7 @@ void PL_U::RequestLoad(Kernel::HLERequestContext& ctx) {
IPC::RequestParser rp{ctx};
const u32 shared_font_type{rp.Pop<u32>()};
LOG_DEBUG(Service_NS, "called, shared_font_type=%d", shared_font_type);
NGLOG_DEBUG(Service_NS, "called, shared_font_type={}", shared_font_type);
IPC::ResponseBuilder rb{ctx, 2};
rb.Push(RESULT_SUCCESS);
}
@@ -69,7 +69,7 @@ void PL_U::GetLoadState(Kernel::HLERequestContext& ctx) {
IPC::RequestParser rp{ctx};
const u32 font_id{rp.Pop<u32>()};
LOG_DEBUG(Service_NS, "called, font_id=%d", font_id);
NGLOG_DEBUG(Service_NS, "called, font_id={}", font_id);
IPC::ResponseBuilder rb{ctx, 3};
rb.Push(RESULT_SUCCESS);
rb.Push<u32>(static_cast<u32>(LoadState::Done));
@@ -79,7 +79,7 @@ void PL_U::GetSize(Kernel::HLERequestContext& ctx) {
IPC::RequestParser rp{ctx};
const u32 font_id{rp.Pop<u32>()};
LOG_DEBUG(Service_NS, "called, font_id=%d", font_id);
NGLOG_DEBUG(Service_NS, "called, font_id={}", font_id);
IPC::ResponseBuilder rb{ctx, 3};
rb.Push(RESULT_SUCCESS);
rb.Push<u32>(SHARED_FONT_REGIONS[font_id].size);
@@ -89,7 +89,7 @@ void PL_U::GetSharedMemoryAddressOffset(Kernel::HLERequestContext& ctx) {
IPC::RequestParser rp{ctx};
const u32 font_id{rp.Pop<u32>()};
LOG_DEBUG(Service_NS, "called, font_id=%d", font_id);
NGLOG_DEBUG(Service_NS, "called, font_id={}", font_id);
IPC::ResponseBuilder rb{ctx, 3};
rb.Push(RESULT_SUCCESS);
rb.Push<u32>(SHARED_FONT_REGIONS[font_id].offset);
@@ -110,7 +110,7 @@ void PL_U::GetSharedMemoryNativeHandle(Kernel::HLERequestContext& ctx) {
Kernel::MemoryPermission::Read, SHARED_FONT_MEM_VADDR, Kernel::MemoryRegion::BASE,
"PL_U:shared_font_mem");
LOG_DEBUG(Service_NS, "called");
NGLOG_DEBUG(Service_NS, "called");
IPC::ResponseBuilder rb{ctx, 2, 1};
rb.Push(RESULT_SUCCESS);
rb.PushCopyObjects(shared_font_mem);

View File

@@ -13,16 +13,16 @@
namespace Service::Nvidia::Devices {
u32 nvdisp_disp0::ioctl(Ioctl command, const std::vector<u8>& input, std::vector<u8>& output) {
UNIMPLEMENTED();
UNIMPLEMENTED_MSG("Unimplemented ioctl");
return 0;
}
void nvdisp_disp0::flip(u32 buffer_handle, u32 offset, u32 format, u32 width, u32 height,
u32 stride, NVFlinger::BufferQueue::BufferTransformFlags transform) {
VAddr addr = nvmap_dev->GetObjectAddress(buffer_handle);
LOG_WARNING(Service,
"Drawing from address %lx offset %08X Width %u Height %u Stride %u Format %u", addr,
offset, width, height, stride, format);
NGLOG_WARNING(Service,
"Drawing from address {:X} offset {:08X} Width {} Height {} Stride {} Format {}",
addr, offset, width, height, stride, format);
using PixelFormat = Tegra::FramebufferConfig::PixelFormat;
const Tegra::FramebufferConfig framebuffer{

View File

@@ -12,8 +12,8 @@
namespace Service::Nvidia::Devices {
u32 nvhost_as_gpu::ioctl(Ioctl command, const std::vector<u8>& input, std::vector<u8>& output) {
LOG_DEBUG(Service_NVDRV, "called, command=0x%08x, input_size=0x%zx, output_size=0x%zx",
command.raw, input.size(), output.size());
NGLOG_DEBUG(Service_NVDRV, "called, command={:#010X}, input_size={:#X}, output_size={:#X}",
command.raw, input.size(), output.size());
switch (static_cast<IoctlCommand>(command.raw)) {
case IoctlCommand::IocInitalizeExCommand:
@@ -27,13 +27,18 @@ u32 nvhost_as_gpu::ioctl(Ioctl command, const std::vector<u8>& input, std::vecto
case IoctlCommand::IocGetVaRegionsCommand:
return GetVARegions(input, output);
}
if (static_cast<IoctlCommand>(command.cmd.Value()) == IoctlCommand::IocRemapCommand)
return Remap(input, output);
UNIMPLEMENTED_MSG("Unimplemented ioctl command");
return 0;
}
u32 nvhost_as_gpu::InitalizeEx(const std::vector<u8>& input, std::vector<u8>& output) {
IoctlInitalizeEx params{};
std::memcpy(&params, input.data(), input.size());
LOG_WARNING(Service_NVDRV, "(STUBBED) called, big_page_size=0x%x", params.big_page_size);
NGLOG_WARNING(Service_NVDRV, "(STUBBED) called, big_page_size={:#X}", params.big_page_size);
std::memcpy(output.data(), &params, output.size());
return 0;
}
@@ -41,8 +46,8 @@ u32 nvhost_as_gpu::InitalizeEx(const std::vector<u8>& input, std::vector<u8>& ou
u32 nvhost_as_gpu::AllocateSpace(const std::vector<u8>& input, std::vector<u8>& output) {
IoctlAllocSpace params{};
std::memcpy(&params, input.data(), input.size());
LOG_DEBUG(Service_NVDRV, "called, pages=%x, page_size=%x, flags=%x", params.pages,
params.page_size, params.flags);
NGLOG_DEBUG(Service_NVDRV, "called, pages={:X}, page_size={:X}, flags={:X}", params.pages,
params.page_size, params.flags);
auto& gpu = Core::System::GetInstance().GPU();
const u64 size{static_cast<u64>(params.pages) * static_cast<u64>(params.page_size)};
@@ -56,15 +61,45 @@ u32 nvhost_as_gpu::AllocateSpace(const std::vector<u8>& input, std::vector<u8>&
return 0;
}
u32 nvhost_as_gpu::Remap(const std::vector<u8>& input, std::vector<u8>& output) {
size_t num_entries = input.size() / sizeof(IoctlRemapEntry);
NGLOG_WARNING(Service_NVDRV, "(STUBBED) called, num_entries=0x{:X}", num_entries);
std::vector<IoctlRemapEntry> entries(num_entries);
std::memcpy(entries.data(), input.data(), input.size());
auto& gpu = Core::System::GetInstance().GPU();
for (const auto& entry : entries) {
NGLOG_WARNING(Service_NVDRV, "remap entry, offset=0x{:X} handle=0x{:X} pages=0x{:X}",
entry.offset, entry.nvmap_handle, entry.pages);
Tegra::GPUVAddr offset = static_cast<Tegra::GPUVAddr>(entry.offset) << 0x10;
auto object = nvmap_dev->GetObject(entry.nvmap_handle);
ASSERT(object);
ASSERT(object->status == nvmap::Object::Status::Allocated);
u64 size = static_cast<u64>(entry.pages) << 0x10;
ASSERT(size <= object->size);
Tegra::GPUVAddr returned = gpu.memory_manager->MapBufferEx(object->addr, offset, size);
ASSERT(returned == offset);
}
std::memcpy(output.data(), entries.data(), output.size());
return 0;
}
u32 nvhost_as_gpu::MapBufferEx(const std::vector<u8>& input, std::vector<u8>& output) {
IoctlMapBufferEx params{};
std::memcpy(&params, input.data(), input.size());
LOG_DEBUG(Service_NVDRV,
"called, flags=%x, nvmap_handle=%x, buffer_offset=%" PRIu64 ", mapping_size=%" PRIu64
", offset=%" PRIu64,
params.flags, params.nvmap_handle, params.buffer_offset, params.mapping_size,
params.offset);
NGLOG_DEBUG(Service_NVDRV,
"called, flags={:X}, nvmap_handle={:X}, buffer_offset={}, mapping_size={}"
", offset={}",
params.flags, params.nvmap_handle, params.buffer_offset, params.mapping_size,
params.offset);
if (!params.nvmap_handle) {
return 0;
@@ -73,6 +108,16 @@ u32 nvhost_as_gpu::MapBufferEx(const std::vector<u8>& input, std::vector<u8>& ou
auto object = nvmap_dev->GetObject(params.nvmap_handle);
ASSERT(object);
// We can only map objects that have already been assigned a CPU address.
ASSERT(object->status == nvmap::Object::Status::Allocated);
ASSERT(params.buffer_offset == 0);
// The real nvservices doesn't make a distinction between handles and ids, and
// object can only have one handle and it will be the same as its id. Assert that this is the
// case to prevent unexpected behavior.
ASSERT(object->id == params.nvmap_handle);
auto& gpu = Core::System::GetInstance().GPU();
if (params.flags & 1) {
@@ -88,7 +133,7 @@ u32 nvhost_as_gpu::MapBufferEx(const std::vector<u8>& input, std::vector<u8>& ou
u32 nvhost_as_gpu::BindChannel(const std::vector<u8>& input, std::vector<u8>& output) {
IoctlBindChannel params{};
std::memcpy(&params, input.data(), input.size());
LOG_DEBUG(Service_NVDRV, "called, fd=%x", params.fd);
NGLOG_DEBUG(Service_NVDRV, "called, fd={:X}", params.fd);
channel = params.fd;
std::memcpy(output.data(), &params, output.size());
return 0;
@@ -97,8 +142,8 @@ u32 nvhost_as_gpu::BindChannel(const std::vector<u8>& input, std::vector<u8>& ou
u32 nvhost_as_gpu::GetVARegions(const std::vector<u8>& input, std::vector<u8>& output) {
IoctlGetVaRegions params{};
std::memcpy(&params, input.data(), input.size());
LOG_WARNING(Service_NVDRV, "(STUBBED) called, buf_addr=%" PRIu64 ", buf_size=%x",
params.buf_addr, params.buf_size);
NGLOG_WARNING(Service_NVDRV, "(STUBBED) called, buf_addr={:X}, buf_size={:X}", params.buf_addr,
params.buf_size);
params.buf_size = 0x30;
params.regions[0].offset = 0x04000000;

View File

@@ -26,6 +26,7 @@ private:
enum class IoctlCommand : u32_le {
IocInitalizeExCommand = 0x40284109,
IocAllocateSpaceCommand = 0xC0184102,
IocRemapCommand = 0x00000014,
IocMapBufferExCommand = 0xC0284106,
IocBindChannelCommand = 0x40044101,
IocGetVaRegionsCommand = 0xC0404108,
@@ -54,6 +55,16 @@ private:
};
static_assert(sizeof(IoctlAllocSpace) == 24, "IoctlInitalizeEx is incorrect size");
struct IoctlRemapEntry {
u16_le flags;
u16_le kind;
u32_le nvmap_handle;
INSERT_PADDING_WORDS(1);
u32_le offset;
u32_le pages;
};
static_assert(sizeof(IoctlRemapEntry) == 20, "IoctlRemapEntry is incorrect size");
struct IoctlMapBufferEx {
u32_le flags; // bit0: fixed_offset, bit2: cacheable
u32_le kind; // -1 is default
@@ -91,6 +102,7 @@ private:
u32 InitalizeEx(const std::vector<u8>& input, std::vector<u8>& output);
u32 AllocateSpace(const std::vector<u8>& input, std::vector<u8>& output);
u32 Remap(const std::vector<u8>& input, std::vector<u8>& output);
u32 MapBufferEx(const std::vector<u8>& input, std::vector<u8>& output);
u32 BindChannel(const std::vector<u8>& input, std::vector<u8>& output);
u32 GetVARegions(const std::vector<u8>& input, std::vector<u8>& output);

View File

@@ -9,8 +9,8 @@
namespace Service::Nvidia::Devices {
u32 nvhost_ctrl::ioctl(Ioctl command, const std::vector<u8>& input, std::vector<u8>& output) {
LOG_DEBUG(Service_NVDRV, "called, command=0x%08x, input_size=0x%zx, output_size=0x%zx",
command.raw, input.size(), output.size());
NGLOG_DEBUG(Service_NVDRV, "called, command={:#010X}, input_size={:#X}, output_size={:#X}",
command.raw, input.size(), output.size());
switch (static_cast<IoctlCommand>(command.raw)) {
case IoctlCommand::IocGetConfigCommand:
@@ -18,15 +18,15 @@ u32 nvhost_ctrl::ioctl(Ioctl command, const std::vector<u8>& input, std::vector<
case IoctlCommand::IocCtrlEventWaitCommand:
return IocCtrlEventWait(input, output);
}
UNIMPLEMENTED();
UNIMPLEMENTED_MSG("Unimplemented ioctl");
return 0;
}
u32 nvhost_ctrl::NvOsGetConfigU32(const std::vector<u8>& input, std::vector<u8>& output) {
IocGetConfigParams params{};
std::memcpy(&params, input.data(), sizeof(params));
LOG_DEBUG(Service_NVDRV, "called, setting=%s!%s", params.domain_str.data(),
params.param_str.data());
NGLOG_DEBUG(Service_NVDRV, "called, setting={}!{}", params.domain_str.data(),
params.param_str.data());
if (!strcmp(params.domain_str.data(), "nv")) {
if (!strcmp(params.param_str.data(), "NV_MEMORY_PROFILER")) {
@@ -48,8 +48,8 @@ u32 nvhost_ctrl::NvOsGetConfigU32(const std::vector<u8>& input, std::vector<u8>&
u32 nvhost_ctrl::IocCtrlEventWait(const std::vector<u8>& input, std::vector<u8>& output) {
IocCtrlEventWaitParams params{};
std::memcpy(&params, input.data(), sizeof(params));
LOG_WARNING(Service_NVDRV, "(STUBBED) called, syncpt_id=%u threshold=%u timeout=%d",
params.syncpt_id, params.threshold, params.timeout);
NGLOG_WARNING(Service_NVDRV, "(STUBBED) called, syncpt_id={} threshold={} timeout={}",
params.syncpt_id, params.threshold, params.timeout);
// TODO(Subv): Implement actual syncpt waiting.
params.value = 0;

View File

@@ -10,8 +10,8 @@
namespace Service::Nvidia::Devices {
u32 nvhost_ctrl_gpu::ioctl(Ioctl command, const std::vector<u8>& input, std::vector<u8>& output) {
LOG_DEBUG(Service_NVDRV, "called, command=0x%08x, input_size=0x%zx, output_size=0x%zx",
command.raw, input.size(), output.size());
NGLOG_DEBUG(Service_NVDRV, "called, command={:#010X}, input_size={:#X}, output_size={:#X}",
command.raw, input.size(), output.size());
switch (static_cast<IoctlCommand>(command.raw)) {
case IoctlCommand::IocGetCharacteristicsCommand:
@@ -25,12 +25,12 @@ u32 nvhost_ctrl_gpu::ioctl(Ioctl command, const std::vector<u8>& input, std::vec
case IoctlCommand::IocZcullGetInfo:
return ZCullGetInfo(input, output);
}
UNIMPLEMENTED();
UNIMPLEMENTED_MSG("Unimplemented ioctl");
return 0;
}
u32 nvhost_ctrl_gpu::GetCharacteristics(const std::vector<u8>& input, std::vector<u8>& output) {
LOG_DEBUG(Service_NVDRV, "called");
NGLOG_DEBUG(Service_NVDRV, "called");
IoctlCharacteristics params{};
std::memcpy(&params, input.data(), input.size());
params.gc.arch = 0x120;
@@ -77,14 +77,14 @@ u32 nvhost_ctrl_gpu::GetCharacteristics(const std::vector<u8>& input, std::vecto
u32 nvhost_ctrl_gpu::GetTPCMasks(const std::vector<u8>& input, std::vector<u8>& output) {
IoctlGpuGetTpcMasksArgs params{};
std::memcpy(&params, input.data(), input.size());
LOG_WARNING(Service_NVDRV, "(STUBBED) called, mask=0x%x, mask_buf_addr=0x%" PRIx64,
params.mask_buf_size, params.mask_buf_addr);
NGLOG_WARNING(Service_NVDRV, "(STUBBED) called, mask={:#X}, mask_buf_addr={:#X}",
params.mask_buf_size, params.mask_buf_addr);
std::memcpy(output.data(), &params, sizeof(params));
return 0;
}
u32 nvhost_ctrl_gpu::GetActiveSlotMask(const std::vector<u8>& input, std::vector<u8>& output) {
LOG_DEBUG(Service_NVDRV, "called");
NGLOG_DEBUG(Service_NVDRV, "called");
IoctlActiveSlotMask params{};
std::memcpy(&params, input.data(), input.size());
params.slot = 0x07;
@@ -94,7 +94,7 @@ u32 nvhost_ctrl_gpu::GetActiveSlotMask(const std::vector<u8>& input, std::vector
}
u32 nvhost_ctrl_gpu::ZCullGetCtxSize(const std::vector<u8>& input, std::vector<u8>& output) {
LOG_DEBUG(Service_NVDRV, "called");
NGLOG_DEBUG(Service_NVDRV, "called");
IoctlZcullGetCtxSize params{};
std::memcpy(&params, input.data(), input.size());
params.size = 0x1;
@@ -103,7 +103,7 @@ u32 nvhost_ctrl_gpu::ZCullGetCtxSize(const std::vector<u8>& input, std::vector<u
}
u32 nvhost_ctrl_gpu::ZCullGetInfo(const std::vector<u8>& input, std::vector<u8>& output) {
LOG_DEBUG(Service_NVDRV, "called");
NGLOG_DEBUG(Service_NVDRV, "called");
IoctlNvgpuGpuZcullGetInfoArgs params{};
std::memcpy(&params, input.data(), input.size());
params.width_align_pixels = 0x20;

View File

@@ -12,8 +12,8 @@
namespace Service::Nvidia::Devices {
u32 nvhost_gpu::ioctl(Ioctl command, const std::vector<u8>& input, std::vector<u8>& output) {
LOG_DEBUG(Service_NVDRV, "called, command=0x%08x, input_size=0x%zx, output_size=0x%zx",
command.raw, input.size(), output.size());
NGLOG_DEBUG(Service_NVDRV, "called, command={:#010X}, input_size={:#X}, output_size={:#X}",
command.raw, input.size(), output.size());
switch (static_cast<IoctlCommand>(command.raw)) {
case IoctlCommand::IocSetNVMAPfdCommand:
@@ -40,21 +40,21 @@ u32 nvhost_gpu::ioctl(Ioctl command, const std::vector<u8>& input, std::vector<u
}
}
UNIMPLEMENTED();
UNIMPLEMENTED_MSG("Unimplemented ioctl");
return 0;
};
u32 nvhost_gpu::SetNVMAPfd(const std::vector<u8>& input, std::vector<u8>& output) {
IoctlSetNvmapFD params{};
std::memcpy(&params, input.data(), input.size());
LOG_DEBUG(Service_NVDRV, "called, fd=%x", params.nvmap_fd);
NGLOG_DEBUG(Service_NVDRV, "called, fd={}", params.nvmap_fd);
nvmap_fd = params.nvmap_fd;
std::memcpy(output.data(), &params, output.size());
return 0;
}
u32 nvhost_gpu::SetClientData(const std::vector<u8>& input, std::vector<u8>& output) {
LOG_DEBUG(Service_NVDRV, "called");
NGLOG_DEBUG(Service_NVDRV, "called");
IoctlClientData params{};
std::memcpy(&params, input.data(), input.size());
user_data = params.data;
@@ -63,7 +63,7 @@ u32 nvhost_gpu::SetClientData(const std::vector<u8>& input, std::vector<u8>& out
}
u32 nvhost_gpu::GetClientData(const std::vector<u8>& input, std::vector<u8>& output) {
LOG_DEBUG(Service_NVDRV, "called");
NGLOG_DEBUG(Service_NVDRV, "called");
IoctlClientData params{};
std::memcpy(&params, input.data(), input.size());
params.data = user_data;
@@ -73,8 +73,8 @@ u32 nvhost_gpu::GetClientData(const std::vector<u8>& input, std::vector<u8>& out
u32 nvhost_gpu::ZCullBind(const std::vector<u8>& input, std::vector<u8>& output) {
std::memcpy(&zcull_params, input.data(), input.size());
LOG_DEBUG(Service_NVDRV, "called, gpu_va=%" PRIx64 ", mode=%x", zcull_params.gpu_va,
zcull_params.mode);
NGLOG_DEBUG(Service_NVDRV, "called, gpu_va={:X}, mode={:X}", zcull_params.gpu_va,
zcull_params.mode);
std::memcpy(output.data(), &zcull_params, output.size());
return 0;
}
@@ -82,15 +82,15 @@ u32 nvhost_gpu::ZCullBind(const std::vector<u8>& input, std::vector<u8>& output)
u32 nvhost_gpu::SetErrorNotifier(const std::vector<u8>& input, std::vector<u8>& output) {
IoctlSetErrorNotifier params{};
std::memcpy(&params, input.data(), input.size());
LOG_WARNING(Service_NVDRV, "(STUBBED) called, offset=%" PRIx64 ", size=%" PRIx64 ", mem=%x",
params.offset, params.size, params.mem);
NGLOG_WARNING(Service_NVDRV, "(STUBBED) called, offset={:X}, size={:X}, mem={:X}",
params.offset, params.size, params.mem);
std::memcpy(output.data(), &params, output.size());
return 0;
}
u32 nvhost_gpu::SetChannelPriority(const std::vector<u8>& input, std::vector<u8>& output) {
std::memcpy(&channel_priority, input.data(), input.size());
LOG_DEBUG(Service_NVDRV, "(STUBBED) called, priority=%x", channel_priority);
NGLOG_DEBUG(Service_NVDRV, "(STUBBED) called, priority={:X}", channel_priority);
std::memcpy(output.data(), &channel_priority, output.size());
return 0;
}
@@ -98,10 +98,11 @@ u32 nvhost_gpu::SetChannelPriority(const std::vector<u8>& input, std::vector<u8>
u32 nvhost_gpu::AllocGPFIFOEx2(const std::vector<u8>& input, std::vector<u8>& output) {
IoctlAllocGpfifoEx2 params{};
std::memcpy(&params, input.data(), input.size());
LOG_WARNING(Service_NVDRV,
"(STUBBED) called, num_entries=%x, flags=%x, unk0=%x, unk1=%x, unk2=%x, unk3=%x",
params.num_entries, params.flags, params.unk0, params.unk1, params.unk2,
params.unk3);
NGLOG_WARNING(Service_NVDRV,
"(STUBBED) called, num_entries={:X}, flags={:X}, unk0={:X}, "
"unk1={:X}, unk2={:X}, unk3={:X}",
params.num_entries, params.flags, params.unk0, params.unk1, params.unk2,
params.unk3);
params.fence_out.id = 0;
params.fence_out.value = 0;
std::memcpy(output.data(), &params, output.size());
@@ -111,8 +112,8 @@ u32 nvhost_gpu::AllocGPFIFOEx2(const std::vector<u8>& input, std::vector<u8>& ou
u32 nvhost_gpu::AllocateObjectContext(const std::vector<u8>& input, std::vector<u8>& output) {
IoctlAllocObjCtx params{};
std::memcpy(&params, input.data(), input.size());
LOG_WARNING(Service_NVDRV, "(STUBBED) called, class_num=%x, flags=%x", params.class_num,
params.flags);
NGLOG_WARNING(Service_NVDRV, "(STUBBED) called, class_num={:X}, flags={:X}", params.class_num,
params.flags);
params.obj_id = 0x0;
std::memcpy(output.data(), &params, output.size());
return 0;
@@ -123,8 +124,8 @@ u32 nvhost_gpu::SubmitGPFIFO(const std::vector<u8>& input, std::vector<u8>& outp
UNIMPLEMENTED();
IoctlSubmitGpfifo params{};
std::memcpy(&params, input.data(), sizeof(IoctlSubmitGpfifo));
LOG_WARNING(Service_NVDRV, "(STUBBED) called, gpfifo=%" PRIx64 ", num_entries=%x, flags=%x",
params.gpfifo, params.num_entries, params.flags);
NGLOG_WARNING(Service_NVDRV, "(STUBBED) called, gpfifo={:X}, num_entries={:X}, flags={:X}",
params.gpfifo, params.num_entries, params.flags);
auto entries = std::vector<IoctlGpfifoEntry>();
entries.resize(params.num_entries);

View File

@@ -32,7 +32,7 @@ u32 nvmap::ioctl(Ioctl command, const std::vector<u8>& input, std::vector<u8>& o
return IocParam(input, output);
}
UNIMPLEMENTED();
UNIMPLEMENTED_MSG("Unimplemented ioctl");
return 0;
}
@@ -49,7 +49,7 @@ u32 nvmap::IocCreate(const std::vector<u8>& input, std::vector<u8>& output) {
u32 handle = next_handle++;
handles[handle] = std::move(object);
LOG_DEBUG(Service_NVDRV, "size=0x%08X", params.size);
NGLOG_DEBUG(Service_NVDRV, "size={:#010X}", params.size);
params.handle = handle;
@@ -70,7 +70,7 @@ u32 nvmap::IocAlloc(const std::vector<u8>& input, std::vector<u8>& output) {
object->addr = params.addr;
object->status = Object::Status::Allocated;
LOG_DEBUG(Service_NVDRV, "called, addr=0x%" PRIx64, params.addr);
NGLOG_DEBUG(Service_NVDRV, "called, addr={:X}", params.addr);
std::memcpy(output.data(), &params, sizeof(params));
return 0;
@@ -80,7 +80,7 @@ u32 nvmap::IocGetId(const std::vector<u8>& input, std::vector<u8>& output) {
IocGetIdParams params;
std::memcpy(&params, input.data(), sizeof(params));
LOG_WARNING(Service_NVDRV, "called");
NGLOG_WARNING(Service_NVDRV, "called");
auto object = GetObject(params.handle);
ASSERT(object);
@@ -95,7 +95,7 @@ u32 nvmap::IocFromId(const std::vector<u8>& input, std::vector<u8>& output) {
IocFromIdParams params;
std::memcpy(&params, input.data(), sizeof(params));
LOG_WARNING(Service_NVDRV, "(STUBBED) called");
NGLOG_WARNING(Service_NVDRV, "(STUBBED) called");
auto itr = std::find_if(handles.begin(), handles.end(),
[&](const auto& entry) { return entry.second->id == params.id; });
@@ -114,7 +114,7 @@ u32 nvmap::IocParam(const std::vector<u8>& input, std::vector<u8>& output) {
IocParamParams params;
std::memcpy(&params, input.data(), sizeof(params));
LOG_WARNING(Service_NVDRV, "(STUBBED) called type=%u", params.type);
NGLOG_WARNING(Service_NVDRV, "(STUBBED) called type={}", params.type);
auto object = GetObject(params.handle);
ASSERT(object);

View File

@@ -12,7 +12,7 @@
namespace Service::Nvidia {
void NVDRV::Open(Kernel::HLERequestContext& ctx) {
LOG_DEBUG(Service_NVDRV, "called");
NGLOG_DEBUG(Service_NVDRV, "called");
const auto& buffer = ctx.ReadBuffer();
std::string device_name(buffer.begin(), buffer.end());
@@ -25,7 +25,7 @@ void NVDRV::Open(Kernel::HLERequestContext& ctx) {
}
void NVDRV::Ioctl(Kernel::HLERequestContext& ctx) {
LOG_DEBUG(Service_NVDRV, "called");
NGLOG_DEBUG(Service_NVDRV, "called");
IPC::RequestParser rp{ctx};
u32 fd = rp.Pop<u32>();
@@ -41,7 +41,7 @@ void NVDRV::Ioctl(Kernel::HLERequestContext& ctx) {
}
void NVDRV::Close(Kernel::HLERequestContext& ctx) {
LOG_DEBUG(Service_NVDRV, "called");
NGLOG_DEBUG(Service_NVDRV, "called");
IPC::RequestParser rp{ctx};
u32 fd = rp.Pop<u32>();
@@ -53,7 +53,7 @@ void NVDRV::Close(Kernel::HLERequestContext& ctx) {
}
void NVDRV::Initialize(Kernel::HLERequestContext& ctx) {
LOG_WARNING(Service_NVDRV, "(STUBBED) called");
NGLOG_WARNING(Service_NVDRV, "(STUBBED) called");
IPC::ResponseBuilder rb{ctx, 3};
rb.Push(RESULT_SUCCESS);
rb.Push<u32>(0);
@@ -63,7 +63,7 @@ void NVDRV::QueryEvent(Kernel::HLERequestContext& ctx) {
IPC::RequestParser rp{ctx};
u32 fd = rp.Pop<u32>();
u32 event_id = rp.Pop<u32>();
LOG_WARNING(Service_NVDRV, "(STUBBED) called, fd=%x, event_id=%x", fd, event_id);
NGLOG_WARNING(Service_NVDRV, "(STUBBED) called, fd={:X}, event_id={:X}", fd, event_id);
IPC::ResponseBuilder rb{ctx, 3, 1};
rb.Push(RESULT_SUCCESS);
@@ -75,14 +75,14 @@ void NVDRV::SetClientPID(Kernel::HLERequestContext& ctx) {
IPC::RequestParser rp{ctx};
pid = rp.Pop<u64>();
LOG_WARNING(Service_NVDRV, "(STUBBED) called, pid=0x%" PRIx64, pid);
NGLOG_WARNING(Service_NVDRV, "(STUBBED) called, pid={:#X}", pid);
IPC::ResponseBuilder rb{ctx, 3};
rb.Push(RESULT_SUCCESS);
rb.Push<u32>(0);
}
void NVDRV::FinishInitialize(Kernel::HLERequestContext& ctx) {
LOG_WARNING(Service_NVDRV, "(STUBBED) called");
NGLOG_WARNING(Service_NVDRV, "(STUBBED) called");
IPC::ResponseBuilder rb{ctx, 2};
rb.Push(RESULT_SUCCESS);
}

View File

@@ -9,7 +9,8 @@
#include "core/core_timing.h"
#include "core/hle/service/nvflinger/buffer_queue.h"
namespace Service::NVFlinger {
namespace Service {
namespace NVFlinger {
BufferQueue::BufferQueue(u32 id, u64 layer_id) : id(id), layer_id(layer_id) {
native_handle = Kernel::Event::Create(Kernel::ResetType::OneShot, "BufferQueue NativeHandle");
@@ -22,7 +23,7 @@ void BufferQueue::SetPreallocatedBuffer(u32 slot, IGBPBuffer& igbp_buffer) {
buffer.igbp_buffer = igbp_buffer;
buffer.status = Buffer::Status::Free;
LOG_WARNING(Service, "Adding graphics buffer %u", slot);
NGLOG_WARNING(Service, "Adding graphics buffer {}", slot);
queue.emplace_back(buffer);
@@ -93,7 +94,7 @@ void BufferQueue::ReleaseBuffer(u32 slot) {
}
u32 BufferQueue::Query(QueryType type) {
LOG_WARNING(Service, "(STUBBED) called type=%u", static_cast<u32>(type));
NGLOG_WARNING(Service, "(STUBBED) called type={}", static_cast<u32>(type));
switch (type) {
case QueryType::NativeWindowFormat:
// TODO(Subv): Use an enum for this
@@ -110,4 +111,5 @@ void BufferQueue::SetBufferWaitEvent(Kernel::SharedPtr<Kernel::Event>&& wait_eve
buffer_wait_event = std::move(wait_event);
}
} // namespace Service::NVFlinger
} // namespace NVFlinger
} // namespace Service

View File

@@ -13,7 +13,8 @@ namespace CoreTiming {
struct EventType;
}
namespace Service::NVFlinger {
namespace Service {
namespace NVFlinger {
struct IGBPBuffer {
u32_le magic;
@@ -97,4 +98,5 @@ private:
Kernel::SharedPtr<Kernel::Event> buffer_wait_event;
};
} // namespace Service::NVFlinger
} // namespace NVFlinger
} // namespace Service

View File

@@ -48,7 +48,7 @@ NVFlinger::~NVFlinger() {
}
u64 NVFlinger::OpenDisplay(const std::string& name) {
LOG_WARNING(Service, "Opening display %s", name.c_str());
NGLOG_WARNING(Service, "Opening display {}", name);
// TODO(Subv): Currently we only support the Default display.
ASSERT(name == "Default");

View File

@@ -4,7 +4,8 @@
#include "common/logging/log.h"
#include "core/hle/ipc_helpers.h"
#include "core/hle/service/pctl/pctl_a.h"
#include "core/hle/service/pctl/module.h"
#include "core/hle/service/pctl/pctl.h"
namespace Service::PCTL {
@@ -12,7 +13,7 @@ class IParentalControlService final : public ServiceFramework<IParentalControlSe
public:
IParentalControlService() : ServiceFramework("IParentalControlService") {
static const FunctionInfo functions[] = {
{1, nullptr, "Initialize"},
{1, &IParentalControlService::Initialize, "Initialize"},
{1001, nullptr, "CheckFreeCommunicationPermission"},
{1002, nullptr, "ConfirmLaunchApplicationPermission"},
{1003, nullptr, "ConfirmResumeApplicationPermission"},
@@ -108,20 +109,38 @@ public:
};
RegisterHandlers(functions);
}
private:
void Initialize(Kernel::HLERequestContext& ctx) {
NGLOG_WARNING(Service_PCTL, "(STUBBED) called");
IPC::ResponseBuilder rb{ctx, 2, 0, 0};
rb.Push(RESULT_SUCCESS);
}
};
void PCTL_A::CreateService(Kernel::HLERequestContext& ctx) {
void Module::Interface::CreateService(Kernel::HLERequestContext& ctx) {
IPC::ResponseBuilder rb{ctx, 2, 0, 1};
rb.Push(RESULT_SUCCESS);
rb.PushIpcInterface<IParentalControlService>();
LOG_DEBUG(Service_PCTL, "called");
NGLOG_DEBUG(Service_PCTL, "called");
}
PCTL_A::PCTL_A() : ServiceFramework("pctl:a") {
static const FunctionInfo functions[] = {
{0, &PCTL_A::CreateService, "CreateService"},
{1, nullptr, "CreateServiceWithoutInitialize"},
};
RegisterHandlers(functions);
void Module::Interface::CreateServiceWithoutInitialize(Kernel::HLERequestContext& ctx) {
IPC::ResponseBuilder rb{ctx, 2, 0, 1};
rb.Push(RESULT_SUCCESS);
rb.PushIpcInterface<IParentalControlService>();
NGLOG_DEBUG(Service_PCTL, "called");
}
Module::Interface::Interface(std::shared_ptr<Module> module, const char* name)
: ServiceFramework(name), module(std::move(module)) {}
void InstallInterfaces(SM::ServiceManager& service_manager) {
auto module = std::make_shared<Module>();
std::make_shared<PCTL>(module, "pctl")->InstallAsService(service_manager);
std::make_shared<PCTL>(module, "pctl:a")->InstallAsService(service_manager);
std::make_shared<PCTL>(module, "pctl:r")->InstallAsService(service_manager);
std::make_shared<PCTL>(module, "pctl:s")->InstallAsService(service_manager);
}
} // namespace Service::PCTL

View File

@@ -0,0 +1,28 @@
// Copyright 2018 yuzu emulator team
// Licensed under GPLv2 or any later version
// Refer to the license.txt file included.
#pragma once
#include "core/hle/service/service.h"
namespace Service::PCTL {
class Module final {
public:
class Interface : public ServiceFramework<Interface> {
public:
Interface(std::shared_ptr<Module> module, const char* name);
void CreateService(Kernel::HLERequestContext& ctx);
void CreateServiceWithoutInitialize(Kernel::HLERequestContext& ctx);
protected:
std::shared_ptr<Module> module;
};
};
/// Registers all PCTL services with the specified service manager.
void InstallInterfaces(SM::ServiceManager& service_manager);
} // namespace Service::PCTL

View File

@@ -3,12 +3,15 @@
// Refer to the license.txt file included.
#include "core/hle/service/pctl/pctl.h"
#include "core/hle/service/pctl/pctl_a.h"
namespace Service::PCTL {
void InstallInterfaces(SM::ServiceManager& service_manager) {
std::make_shared<PCTL_A>()->InstallAsService(service_manager);
PCTL::PCTL(std::shared_ptr<Module> module, const char* name)
: Module::Interface(std::move(module), name) {
static const FunctionInfo functions[] = {
{0, &PCTL::CreateService, "CreateService"},
{1, &PCTL::CreateServiceWithoutInitialize, "CreateServiceWithoutInitialize"},
};
RegisterHandlers(functions);
}
} // namespace Service::PCTL

View File

@@ -4,11 +4,13 @@
#pragma once
#include "core/hle/service/service.h"
#include "core/hle/service/pctl/module.h"
namespace Service::PCTL {
/// Registers all PCTL services with the specified service manager.
void InstallInterfaces(SM::ServiceManager& service_manager);
class PCTL final : public Module::Interface {
public:
explicit PCTL(std::shared_ptr<Module> module, const char* name);
};
} // namespace Service::PCTL

View File

@@ -1,20 +0,0 @@
// Copyright 2018 yuzu emulator team
// Licensed under GPLv2 or any later version
// Refer to the license.txt file included.
#pragma once
#include "core/hle/service/service.h"
namespace Service::PCTL {
class PCTL_A final : public ServiceFramework<PCTL_A> {
public:
PCTL_A();
~PCTL_A() = default;
private:
void CreateService(Kernel::HLERequestContext& ctx);
};
} // namespace Service::PCTL

View File

@@ -29,7 +29,7 @@
#include "core/hle/service/nifm/nifm.h"
#include "core/hle/service/ns/ns.h"
#include "core/hle/service/nvdrv/nvdrv.h"
#include "core/hle/service/pctl/pctl.h"
#include "core/hle/service/pctl/module.h"
#include "core/hle/service/service.h"
#include "core/hle/service/set/settings.h"
#include "core/hle/service/sm/controller.h"
@@ -120,7 +120,7 @@ void ServiceFrameworkBase::ReportUnimplementedFunction(Kernel::HLERequestContext
}
buf.push_back('}');
LOG_ERROR(Service, "unknown / unimplemented %s", fmt::to_string(buf).c_str());
NGLOG_ERROR(Service, "unknown / unimplemented {}", fmt::to_string(buf));
UNIMPLEMENTED();
}
@@ -131,8 +131,8 @@ void ServiceFrameworkBase::InvokeRequest(Kernel::HLERequestContext& ctx) {
return ReportUnimplementedFunction(ctx, info);
}
LOG_TRACE(
Service, "%s",
NGLOG_TRACE(
Service, "{}",
MakeFunctionString(info->name, GetServiceName().c_str(), ctx.CommandBuffer()).c_str());
handler_invoker(this, info->handler_callback, ctx);
}
@@ -199,12 +199,12 @@ void Init(std::shared_ptr<SM::ServiceManager>& sm) {
VI::InstallInterfaces(*sm, nv_flinger);
Set::InstallInterfaces(*sm);
LOG_DEBUG(Service, "initialized OK");
NGLOG_DEBUG(Service, "initialized OK");
}
/// Shutdown ServiceManager
void Shutdown() {
g_kernel_named_ports.clear();
LOG_DEBUG(Service, "shutdown OK");
NGLOG_DEBUG(Service, "shutdown OK");
}
} // namespace Service

View File

@@ -22,7 +22,7 @@ void SET::GetAvailableLanguageCodes(Kernel::HLERequestContext& ctx) {
rb.Push(RESULT_SUCCESS);
LOG_WARNING(Service_SET, "(STUBBED) called");
NGLOG_WARNING(Service_SET, "(STUBBED) called");
}
SET::SET() : ServiceFramework("set") {

View File

@@ -16,7 +16,7 @@ void SET_SYS::GetColorSetId(Kernel::HLERequestContext& ctx) {
rb.Push(RESULT_SUCCESS);
rb.Push<u32>(0);
LOG_WARNING(Service_SET, "(STUBBED) called");
NGLOG_WARNING(Service_SET, "(STUBBED) called");
}
SET_SYS::SET_SYS() : ServiceFramework("set:sys") {

View File

@@ -17,7 +17,7 @@ void Controller::ConvertSessionToDomain(Kernel::HLERequestContext& ctx) {
rb.Push(RESULT_SUCCESS);
rb.Push<u32>(1); // Converted sessions start with 1 request handler
LOG_DEBUG(Service, "called, server_session=%d", ctx.Session()->GetObjectId());
NGLOG_DEBUG(Service, "called, server_session={}", ctx.Session()->GetObjectId());
}
void Controller::DuplicateSession(Kernel::HLERequestContext& ctx) {
@@ -29,11 +29,11 @@ void Controller::DuplicateSession(Kernel::HLERequestContext& ctx) {
Kernel::SharedPtr<Kernel::ClientSession> session{ctx.Session()->parent->client};
rb.PushMoveObjects(session);
LOG_DEBUG(Service, "called, session=%u", session->GetObjectId());
NGLOG_DEBUG(Service, "called, session={}", session->GetObjectId());
}
void Controller::DuplicateSessionEx(Kernel::HLERequestContext& ctx) {
LOG_WARNING(Service, "(STUBBED) called, using DuplicateSession");
NGLOG_WARNING(Service, "(STUBBED) called, using DuplicateSession");
DuplicateSession(ctx);
}
@@ -43,7 +43,7 @@ void Controller::QueryPointerBufferSize(Kernel::HLERequestContext& ctx) {
rb.Push(RESULT_SUCCESS);
rb.Push<u32>(0x500);
LOG_WARNING(Service, "(STUBBED) called");
NGLOG_WARNING(Service, "(STUBBED) called");
}
Controller::Controller() : ServiceFramework("IpcController") {

View File

@@ -86,7 +86,7 @@ SM::~SM() = default;
void SM::Initialize(Kernel::HLERequestContext& ctx) {
IPC::ResponseBuilder rb{ctx, 2};
rb.Push(RESULT_SUCCESS);
LOG_DEBUG(Service_SM, "called");
NGLOG_DEBUG(Service_SM, "called");
}
void SM::GetService(Kernel::HLERequestContext& ctx) {
@@ -102,8 +102,8 @@ void SM::GetService(Kernel::HLERequestContext& ctx) {
if (client_port.Failed()) {
IPC::ResponseBuilder rb = rp.MakeBuilder(2, 0, 0);
rb.Push(client_port.Code());
LOG_ERROR(Service_SM, "called service=%s -> error 0x%08X", name.c_str(),
client_port.Code().raw);
NGLOG_ERROR(Service_SM, "called service={} -> error {:#010X}", name,
client_port.Code().raw);
if (name.length() == 0)
return; // LibNX Fix
UNIMPLEMENTED();
@@ -113,8 +113,7 @@ void SM::GetService(Kernel::HLERequestContext& ctx) {
auto session = client_port.Unwrap()->Connect();
ASSERT(session.Succeeded());
if (session.Succeeded()) {
LOG_DEBUG(Service_SM, "called service=%s -> session=%u", name.c_str(),
(*session)->GetObjectId());
NGLOG_DEBUG(Service_SM, "called service={} -> session={}", name, (*session)->GetObjectId());
IPC::ResponseBuilder rb =
rp.MakeBuilder(2, 0, 1, IPC::ResponseBuilder::Flags::AlwaysMoveHandles);
rb.Push(session.Code());

View File

@@ -8,7 +8,7 @@
namespace Service::Sockets {
void BSD::RegisterClient(Kernel::HLERequestContext& ctx) {
LOG_WARNING(Service, "(STUBBED) called");
NGLOG_WARNING(Service, "(STUBBED) called");
IPC::ResponseBuilder rb{ctx, 3};
@@ -17,7 +17,7 @@ void BSD::RegisterClient(Kernel::HLERequestContext& ctx) {
}
void BSD::StartMonitoring(Kernel::HLERequestContext& ctx) {
LOG_WARNING(Service, "(STUBBED) called");
NGLOG_WARNING(Service, "(STUBBED) called");
IPC::ResponseBuilder rb{ctx, 3};
@@ -32,7 +32,8 @@ void BSD::Socket(Kernel::HLERequestContext& ctx) {
u32 type = rp.Pop<u32>();
u32 protocol = rp.Pop<u32>();
LOG_WARNING(Service, "(STUBBED) called domain=%u type=%u protocol=%u", domain, type, protocol);
NGLOG_WARNING(Service, "(STUBBED) called domain={} type={} protocol={}", domain, type,
protocol);
u32 fd = next_fd++;
@@ -44,7 +45,7 @@ void BSD::Socket(Kernel::HLERequestContext& ctx) {
}
void BSD::Connect(Kernel::HLERequestContext& ctx) {
LOG_WARNING(Service, "(STUBBED) called");
NGLOG_WARNING(Service, "(STUBBED) called");
IPC::ResponseBuilder rb{ctx, 4};
@@ -54,7 +55,7 @@ void BSD::Connect(Kernel::HLERequestContext& ctx) {
}
void BSD::SendTo(Kernel::HLERequestContext& ctx) {
LOG_WARNING(Service, "(STUBBED) called");
NGLOG_WARNING(Service, "(STUBBED) called");
IPC::ResponseBuilder rb{ctx, 4};
@@ -64,7 +65,7 @@ void BSD::SendTo(Kernel::HLERequestContext& ctx) {
}
void BSD::Close(Kernel::HLERequestContext& ctx) {
LOG_WARNING(Service, "(STUBBED) called");
NGLOG_WARNING(Service, "(STUBBED) called");
IPC::ResponseBuilder rb{ctx, 4};

View File

@@ -10,7 +10,7 @@ namespace Service::Sockets {
void SFDNSRES::GetAddrInfo(Kernel::HLERequestContext& ctx) {
IPC::RequestParser rp{ctx};
LOG_WARNING(Service, "(STUBBED) called");
NGLOG_WARNING(Service, "(STUBBED) called");
IPC::ResponseBuilder rb{ctx, 2};

View File

@@ -28,7 +28,7 @@ void Module::Interface::GetRandomBytes(Kernel::HLERequestContext& ctx) {
IPC::ResponseBuilder rb{ctx, 2};
rb.Push(RESULT_SUCCESS);
LOG_DEBUG(Service_SPL, "called");
NGLOG_DEBUG(Service_SPL, "called");
}
void InstallInterfaces(SM::ServiceManager& service_manager) {

View File

@@ -65,7 +65,7 @@ public:
private:
void SetOption(Kernel::HLERequestContext& ctx) {
LOG_WARNING(Service_SSL, "(STUBBED) called");
NGLOG_WARNING(Service_SSL, "(STUBBED) called");
IPC::RequestParser rp{ctx};
IPC::ResponseBuilder rb = rp.MakeBuilder(2, 0, 0);
@@ -73,7 +73,7 @@ private:
}
void CreateConnection(Kernel::HLERequestContext& ctx) {
LOG_WARNING(Service_SSL, "(STUBBED) called");
NGLOG_WARNING(Service_SSL, "(STUBBED) called");
IPC::ResponseBuilder rb{ctx, 2, 0, 1};
rb.Push(RESULT_SUCCESS);
@@ -82,7 +82,7 @@ private:
};
void SSL::CreateContext(Kernel::HLERequestContext& ctx) {
LOG_WARNING(Service_SSL, "(STUBBED) called");
NGLOG_WARNING(Service_SSL, "(STUBBED) called");
IPC::ResponseBuilder rb{ctx, 2, 0, 1};
rb.Push(RESULT_SUCCESS);

View File

@@ -32,14 +32,14 @@ private:
const s64 time_since_epoch{std::chrono::duration_cast<std::chrono::seconds>(
std::chrono::system_clock::now().time_since_epoch())
.count()};
LOG_DEBUG(Service_Time, "called");
NGLOG_DEBUG(Service_Time, "called");
IPC::ResponseBuilder rb{ctx, 4};
rb.Push(RESULT_SUCCESS);
rb.Push<u64>(time_since_epoch);
}
void GetSystemClockContext(Kernel::HLERequestContext& ctx) {
LOG_WARNING(Service_Time, "(STUBBED) called");
NGLOG_WARNING(Service_Time, "(STUBBED) called");
SystemClockContext system_clock_ontext{};
IPC::ResponseBuilder rb{ctx, (sizeof(SystemClockContext) / 4) + 2};
rb.Push(RESULT_SUCCESS);
@@ -58,7 +58,7 @@ public:
private:
void GetCurrentTimePoint(Kernel::HLERequestContext& ctx) {
LOG_DEBUG(Service_Time, "called");
NGLOG_DEBUG(Service_Time, "called");
SteadyClockTimePoint steady_clock_time_point{cyclesToMs(CoreTiming::GetTicks()) / 1000};
IPC::ResponseBuilder rb{ctx, (sizeof(SteadyClockTimePoint) / 4) + 2};
rb.Push(RESULT_SUCCESS);
@@ -86,7 +86,7 @@ public:
private:
void GetDeviceLocationName(Kernel::HLERequestContext& ctx) {
LOG_WARNING(Service_Time, "(STUBBED) called");
NGLOG_WARNING(Service_Time, "(STUBBED) called");
LocationName location_name{};
IPC::ResponseBuilder rb{ctx, (sizeof(LocationName) / 4) + 2};
rb.Push(RESULT_SUCCESS);
@@ -94,14 +94,14 @@ private:
}
void GetTotalLocationNameCount(Kernel::HLERequestContext& ctx) {
LOG_WARNING(Service_Time, "(STUBBED) called");
NGLOG_WARNING(Service_Time, "(STUBBED) called");
IPC::ResponseBuilder rb{ctx, 3};
rb.Push(RESULT_SUCCESS);
rb.Push<u32>(0);
}
void LoadTimeZoneRule(Kernel::HLERequestContext& ctx) {
LOG_WARNING(Service_Time, "(STUBBED) called");
NGLOG_WARNING(Service_Time, "(STUBBED) called");
IPC::ResponseBuilder rb{ctx, 2};
rb.Push(RESULT_SUCCESS);
}
@@ -110,7 +110,7 @@ private:
IPC::RequestParser rp{ctx};
u64 posix_time = rp.Pop<u64>();
LOG_WARNING(Service_Time, "(STUBBED) called, posix_time=0x%016lX", posix_time);
NGLOG_WARNING(Service_Time, "(STUBBED) called, posix_time={:#018X}", posix_time);
CalendarTime calendar_time{2018, 1, 1, 0, 0, 0};
CalendarAdditionalInfo additional_info{};
@@ -125,35 +125,35 @@ void Module::Interface::GetStandardUserSystemClock(Kernel::HLERequestContext& ct
IPC::ResponseBuilder rb{ctx, 2, 0, 1};
rb.Push(RESULT_SUCCESS);
rb.PushIpcInterface<ISystemClock>();
LOG_DEBUG(Service_Time, "called");
NGLOG_DEBUG(Service_Time, "called");
}
void Module::Interface::GetStandardNetworkSystemClock(Kernel::HLERequestContext& ctx) {
IPC::ResponseBuilder rb{ctx, 2, 0, 1};
rb.Push(RESULT_SUCCESS);
rb.PushIpcInterface<ISystemClock>();
LOG_DEBUG(Service_Time, "called");
NGLOG_DEBUG(Service_Time, "called");
}
void Module::Interface::GetStandardSteadyClock(Kernel::HLERequestContext& ctx) {
IPC::ResponseBuilder rb{ctx, 2, 0, 1};
rb.Push(RESULT_SUCCESS);
rb.PushIpcInterface<ISteadyClock>();
LOG_DEBUG(Service_Time, "called");
NGLOG_DEBUG(Service_Time, "called");
}
void Module::Interface::GetTimeZoneService(Kernel::HLERequestContext& ctx) {
IPC::ResponseBuilder rb{ctx, 2, 0, 1};
rb.Push(RESULT_SUCCESS);
rb.PushIpcInterface<ITimeZoneService>();
LOG_DEBUG(Service_Time, "called");
NGLOG_DEBUG(Service_Time, "called");
}
void Module::Interface::GetStandardLocalSystemClock(Kernel::HLERequestContext& ctx) {
IPC::ResponseBuilder rb{ctx, 2, 0, 1};
rb.Push(RESULT_SUCCESS);
rb.PushIpcInterface<ISystemClock>();
LOG_DEBUG(Service_Time, "called");
NGLOG_DEBUG(Service_Time, "called");
}
Module::Interface::Interface(std::shared_ptr<Module> time, const char* name)

View File

@@ -470,7 +470,7 @@ private:
u32 flags = rp.Pop<u32>();
auto buffer_queue = nv_flinger->GetBufferQueue(id);
LOG_DEBUG(Service_VI, "called, transaction=%x", static_cast<u32>(transaction));
NGLOG_DEBUG(Service_VI, "called, transaction={:X}", static_cast<u32>(transaction));
if (transaction == TransactionId::Connect) {
IGBPConnectRequestParcel request{ctx.ReadBuffer()};
@@ -532,7 +532,7 @@ private:
IGBPQueryResponseParcel response{value};
ctx.WriteBuffer(response.Serialize());
} else if (transaction == TransactionId::CancelBuffer) {
LOG_WARNING(Service_VI, "(STUBBED) called, transaction=CancelBuffer");
NGLOG_WARNING(Service_VI, "(STUBBED) called, transaction=CancelBuffer");
} else {
ASSERT_MSG(false, "Unimplemented");
}
@@ -547,7 +547,8 @@ private:
s32 addval = rp.PopRaw<s32>();
u32 type = rp.Pop<u32>();
LOG_WARNING(Service_VI, "(STUBBED) called id=%u, addval=%08X, type=%08X", id, addval, type);
NGLOG_WARNING(Service_VI, "(STUBBED) called id={}, addval={:08X}, type={:08X}", id, addval,
type);
IPC::ResponseBuilder rb{ctx, 2};
rb.Push(RESULT_SUCCESS);
}
@@ -561,7 +562,7 @@ private:
// TODO(Subv): Find out what this actually is.
LOG_WARNING(Service_VI, "(STUBBED) called id=%u, unknown=%08X", id, unknown);
NGLOG_WARNING(Service_VI, "(STUBBED) called id={}, unknown={:08X}", id, unknown);
IPC::ResponseBuilder rb{ctx, 2, 1};
rb.Push(RESULT_SUCCESS);
rb.PushCopyObjects(buffer_queue->GetNativeHandle());
@@ -624,7 +625,7 @@ public:
private:
void SetLayerZ(Kernel::HLERequestContext& ctx) {
LOG_WARNING(Service_VI, "(STUBBED) called");
NGLOG_WARNING(Service_VI, "(STUBBED) called");
IPC::RequestParser rp{ctx};
u64 layer_id = rp.Pop<u64>();
u64 z_value = rp.Pop<u64>();
@@ -639,8 +640,8 @@ private:
bool visibility = rp.Pop<bool>();
IPC::ResponseBuilder rb = rp.MakeBuilder(2, 0, 0);
rb.Push(RESULT_SUCCESS);
LOG_WARNING(Service_VI, "(STUBBED) called, layer_id=0x%x, visibility=%u", layer_id,
visibility);
NGLOG_WARNING(Service_VI, "(STUBBED) called, layer_id={:#010X}, visibility={}", layer_id,
visibility);
}
};
@@ -722,7 +723,7 @@ public:
private:
void CloseDisplay(Kernel::HLERequestContext& ctx) {
LOG_WARNING(Service_VI, "(STUBBED) called");
NGLOG_WARNING(Service_VI, "(STUBBED) called");
IPC::RequestParser rp{ctx};
u64 display = rp.Pop<u64>();
@@ -731,7 +732,7 @@ private:
}
void CreateManagedLayer(Kernel::HLERequestContext& ctx) {
LOG_WARNING(Service_VI, "(STUBBED) called");
NGLOG_WARNING(Service_VI, "(STUBBED) called");
IPC::RequestParser rp{ctx};
u32 unknown = rp.Pop<u32>();
rp.Skip(1, false);
@@ -746,7 +747,7 @@ private:
}
void AddToLayerStack(Kernel::HLERequestContext& ctx) {
LOG_WARNING(Service_VI, "(STUBBED) called");
NGLOG_WARNING(Service_VI, "(STUBBED) called");
IPC::RequestParser rp{ctx};
u32 stack = rp.Pop<u32>();
u64 layer_id = rp.Pop<u64>();
@@ -761,8 +762,8 @@ private:
bool visibility = rp.Pop<bool>();
IPC::ResponseBuilder rb = rp.MakeBuilder(2, 0, 0);
rb.Push(RESULT_SUCCESS);
LOG_WARNING(Service_VI, "(STUBBED) called, layer_id=0x%x, visibility=%u", layer_id,
visibility);
NGLOG_WARNING(Service_VI, "(STUBBED) called, layer_id={:#X}, visibility={}", layer_id,
visibility);
}
std::shared_ptr<NVFlinger::NVFlinger> nv_flinger;
@@ -775,7 +776,7 @@ public:
private:
void GetRelayService(Kernel::HLERequestContext& ctx) {
LOG_WARNING(Service_VI, "(STUBBED) called");
NGLOG_WARNING(Service_VI, "(STUBBED) called");
IPC::ResponseBuilder rb{ctx, 2, 0, 1};
rb.Push(RESULT_SUCCESS);
@@ -783,7 +784,7 @@ private:
}
void GetSystemDisplayService(Kernel::HLERequestContext& ctx) {
LOG_WARNING(Service_VI, "(STUBBED) called");
NGLOG_WARNING(Service_VI, "(STUBBED) called");
IPC::ResponseBuilder rb{ctx, 2, 0, 1};
rb.Push(RESULT_SUCCESS);
@@ -791,7 +792,7 @@ private:
}
void GetManagerDisplayService(Kernel::HLERequestContext& ctx) {
LOG_WARNING(Service_VI, "(STUBBED) called");
NGLOG_WARNING(Service_VI, "(STUBBED) called");
IPC::ResponseBuilder rb{ctx, 2, 0, 1};
rb.Push(RESULT_SUCCESS);
@@ -799,7 +800,7 @@ private:
}
void GetIndirectDisplayTransactionService(Kernel::HLERequestContext& ctx) {
LOG_WARNING(Service_VI, "(STUBBED) called");
NGLOG_WARNING(Service_VI, "(STUBBED) called");
IPC::ResponseBuilder rb{ctx, 2, 0, 1};
rb.Push(RESULT_SUCCESS);
@@ -807,7 +808,7 @@ private:
}
void OpenDisplay(Kernel::HLERequestContext& ctx) {
LOG_WARNING(Service_VI, "(STUBBED) called");
NGLOG_WARNING(Service_VI, "(STUBBED) called");
IPC::RequestParser rp{ctx};
auto name_buf = rp.PopRaw<std::array<u8, 0x40>>();
auto end = std::find(name_buf.begin(), name_buf.end(), '\0');
@@ -822,7 +823,7 @@ private:
}
void CloseDisplay(Kernel::HLERequestContext& ctx) {
LOG_WARNING(Service_VI, "(STUBBED) called");
NGLOG_WARNING(Service_VI, "(STUBBED) called");
IPC::RequestParser rp{ctx};
u64 display_id = rp.Pop<u64>();
@@ -831,7 +832,7 @@ private:
}
void GetDisplayResolution(Kernel::HLERequestContext& ctx) {
LOG_WARNING(Service_VI, "(STUBBED) called");
NGLOG_WARNING(Service_VI, "(STUBBED) called");
IPC::RequestParser rp{ctx};
u64 display_id = rp.Pop<u64>();
@@ -848,7 +849,7 @@ private:
}
void SetLayerScalingMode(Kernel::HLERequestContext& ctx) {
LOG_WARNING(Service_VI, "(STUBBED) called");
NGLOG_WARNING(Service_VI, "(STUBBED) called");
IPC::RequestParser rp{ctx};
u32 scaling_mode = rp.Pop<u32>();
u64 unknown = rp.Pop<u64>();
@@ -864,11 +865,11 @@ private:
IPC::ResponseBuilder rb = rp.MakeBuilder(4, 0, 0);
rb.Push(RESULT_SUCCESS);
rb.Push<u64>(1);
LOG_WARNING(Service_VI, "(STUBBED) called");
NGLOG_WARNING(Service_VI, "(STUBBED) called");
}
void OpenLayer(Kernel::HLERequestContext& ctx) {
LOG_DEBUG(Service_VI, "called");
NGLOG_DEBUG(Service_VI, "called");
IPC::RequestParser rp{ctx};
auto name_buf = rp.PopRaw<std::array<u8, 0x40>>();
auto end = std::find(name_buf.begin(), name_buf.end(), '\0');
@@ -888,7 +889,7 @@ private:
}
void CreateStrayLayer(Kernel::HLERequestContext& ctx) {
LOG_DEBUG(Service_VI, "called");
NGLOG_DEBUG(Service_VI, "called");
IPC::RequestParser rp{ctx};
u32 flags = rp.Pop<u32>();
@@ -908,7 +909,7 @@ private:
}
void DestroyStrayLayer(Kernel::HLERequestContext& ctx) {
LOG_WARNING(Service_VI, "(STUBBED) called");
NGLOG_WARNING(Service_VI, "(STUBBED) called");
IPC::RequestParser rp{ctx};
u64 layer_id = rp.Pop<u64>();
@@ -918,7 +919,7 @@ private:
}
void GetDisplayVsyncEvent(Kernel::HLERequestContext& ctx) {
LOG_WARNING(Service_VI, "(STUBBED) called");
NGLOG_WARNING(Service_VI, "(STUBBED) called");
IPC::RequestParser rp{ctx};
u64 display_id = rp.Pop<u64>();
@@ -967,7 +968,7 @@ Module::Interface::Interface(std::shared_ptr<Module> module, const char* name,
: ServiceFramework(name), module(std::move(module)), nv_flinger(std::move(nv_flinger)) {}
void Module::Interface::GetDisplayService(Kernel::HLERequestContext& ctx) {
LOG_WARNING(Service_VI, "(STUBBED) called");
NGLOG_WARNING(Service_VI, "(STUBBED) called");
IPC::ResponseBuilder rb{ctx, 2, 0, 1};
rb.Push(RESULT_SUCCESS);

View File

@@ -132,7 +132,7 @@ ResultStatus AppLoader_DeconstructedRomDirectory::Load(
const VAddr load_addr = next_load_addr;
next_load_addr = AppLoader_NSO::LoadModule(path, load_addr);
if (next_load_addr) {
LOG_DEBUG(Loader, "loaded module %s @ 0x%" PRIx64, module, load_addr);
NGLOG_DEBUG(Loader, "loaded module {} @ {:#X}", module, load_addr);
} else {
next_load_addr = load_addr;
}
@@ -163,7 +163,7 @@ ResultStatus AppLoader_DeconstructedRomDirectory::ReadRomFS(
std::shared_ptr<FileUtil::IOFile>& romfs_file, u64& offset, u64& size) {
if (filepath_romfs.empty()) {
LOG_DEBUG(Loader, "No RomFS available");
NGLOG_DEBUG(Loader, "No RomFS available");
return ResultStatus::ErrorNotUsed;
}
@@ -176,8 +176,8 @@ ResultStatus AppLoader_DeconstructedRomDirectory::ReadRomFS(
offset = 0;
size = romfs_file->GetSize();
LOG_DEBUG(Loader, "RomFS offset: 0x%016" PRIX64, offset);
LOG_DEBUG(Loader, "RomFS size: 0x%016" PRIX64, size);
NGLOG_DEBUG(Loader, "RomFS offset: {:#018X}", offset);
NGLOG_DEBUG(Loader, "RomFS size: {:#018X}", size);
// Reset read pointer
file.Seek(0, SEEK_SET);

View File

@@ -273,18 +273,18 @@ const char* ElfReader::GetSectionName(int section) const {
}
SharedPtr<CodeSet> ElfReader::LoadInto(u32 vaddr) {
LOG_DEBUG(Loader, "String section: %i", header->e_shstrndx);
NGLOG_DEBUG(Loader, "String section: {}", header->e_shstrndx);
// Should we relocate?
relocate = (header->e_type != ET_EXEC);
if (relocate) {
LOG_DEBUG(Loader, "Relocatable module");
NGLOG_DEBUG(Loader, "Relocatable module");
entryPoint += vaddr;
} else {
LOG_DEBUG(Loader, "Prerelocated executable");
NGLOG_DEBUG(Loader, "Prerelocated executable");
}
LOG_DEBUG(Loader, "%i segments:", header->e_phnum);
NGLOG_DEBUG(Loader, "{} segments:", header->e_phnum);
// First pass : Get the bits into RAM
u32 base_addr = relocate ? vaddr : 0;
@@ -304,8 +304,8 @@ SharedPtr<CodeSet> ElfReader::LoadInto(u32 vaddr) {
for (unsigned int i = 0; i < header->e_phnum; ++i) {
Elf32_Phdr* p = &segments[i];
LOG_DEBUG(Loader, "Type: %i Vaddr: %08X Filesz: %8X Memsz: %8X ", p->p_type, p->p_vaddr,
p->p_filesz, p->p_memsz);
NGLOG_DEBUG(Loader, "Type: {} Vaddr: {:08X} Filesz: {:08X} Memsz: {:08X} ", p->p_type,
p->p_vaddr, p->p_filesz, p->p_memsz);
if (p->p_type == PT_LOAD) {
CodeSet::Segment* codeset_segment;
@@ -317,16 +317,16 @@ SharedPtr<CodeSet> ElfReader::LoadInto(u32 vaddr) {
} else if (permission_flags == (PF_R | PF_W)) {
codeset_segment = &codeset->data;
} else {
LOG_ERROR(Loader, "Unexpected ELF PT_LOAD segment id %u with flags %X", i,
p->p_flags);
NGLOG_ERROR(Loader, "Unexpected ELF PT_LOAD segment id {} with flags {:X}", i,
p->p_flags);
continue;
}
if (codeset_segment->size != 0) {
LOG_ERROR(Loader,
"ELF has more than one segment of the same type. Skipping extra "
"segment (id %i)",
i);
NGLOG_ERROR(Loader,
"ELF has more than one segment of the same type. Skipping extra "
"segment (id {})",
i);
continue;
}
@@ -345,7 +345,7 @@ SharedPtr<CodeSet> ElfReader::LoadInto(u32 vaddr) {
codeset->entrypoint = base_addr + header->e_entry;
codeset->memory = std::make_shared<std::vector<u8>>(std::move(program_image));
LOG_DEBUG(Loader, "Done loading.");
NGLOG_DEBUG(Loader, "Done loading.");
return codeset;
}

View File

@@ -84,7 +84,7 @@ void Linker::WriteRelocations(std::vector<u8>& program_image, const std::vector<
}
break;
default:
LOG_CRITICAL(Loader, "Unknown relocation type: %d", static_cast<int>(rela.type));
NGLOG_CRITICAL(Loader, "Unknown relocation type: {}", static_cast<int>(rela.type));
break;
}
}
@@ -141,7 +141,7 @@ void Linker::ResolveImports() {
if (search != exports.end()) {
Memory::Write64(import.second.ea, search->second + import.second.addend);
} else {
LOG_ERROR(Loader, "Unresolved import: %s", import.first.c_str());
NGLOG_ERROR(Loader, "Unresolved import: {}", import.first);
}
}
}

View File

@@ -41,7 +41,7 @@ FileType IdentifyFile(FileUtil::IOFile& file, const std::string& filepath) {
FileType IdentifyFile(const std::string& file_name) {
FileUtil::IOFile file(file_name, "rb");
if (!file.IsOpen()) {
LOG_ERROR(Loader, "Failed to load file %s", file_name.c_str());
NGLOG_ERROR(Loader, "Failed to load file {}", file_name);
return FileType::Unknown;
}
@@ -116,7 +116,7 @@ static std::unique_ptr<AppLoader> GetFileLoader(FileUtil::IOFile&& file, FileTyp
std::unique_ptr<AppLoader> GetLoader(const std::string& filename) {
FileUtil::IOFile file(filename, "rb");
if (!file.IsOpen()) {
LOG_ERROR(Loader, "Failed to load file %s", filename.c_str());
NGLOG_ERROR(Loader, "Failed to load file {}", filename);
return nullptr;
}
@@ -127,12 +127,12 @@ std::unique_ptr<AppLoader> GetLoader(const std::string& filename) {
FileType filename_type = GuessFromExtension(filename_extension);
if (type != filename_type) {
LOG_WARNING(Loader, "File %s has a different type than its extension.", filename.c_str());
NGLOG_WARNING(Loader, "File {} has a different type than its extension.", filename);
if (FileType::Unknown == type)
type = filename_type;
}
LOG_DEBUG(Loader, "Loading file %s as %s...", filename.c_str(), GetFileTypeString(type));
NGLOG_DEBUG(Loader, "Loading file {} as {}...", filename, GetFileTypeString(type));
return GetFileLoader(std::move(file), type, filename_filename, filename);
}

View File

@@ -137,7 +137,7 @@ ResultStatus AppLoader_NRO::Load(Kernel::SharedPtr<Kernel::Process>& process) {
process->address_mappings = default_address_mappings;
process->resource_limit =
Kernel::ResourceLimit::GetForCategory(Kernel::ResourceLimitCategory::APPLICATION);
process->Run(base_addr, 48, Memory::DEFAULT_STACK_SIZE);
process->Run(base_addr, THREADPRIO_DEFAULT, Memory::DEFAULT_STACK_SIZE);
is_loaded = true;
return ResultStatus::Success;

View File

@@ -73,7 +73,7 @@ static std::vector<u8> ReadSegment(FileUtil::IOFile& file, const NsoSegmentHeade
file.Seek(header.offset, SEEK_SET);
if (compressed_size != file.ReadBytes(compressed_data.data(), compressed_size)) {
LOG_CRITICAL(Loader, "Failed to read %d NSO LZ4 compressed bytes", compressed_size);
NGLOG_CRITICAL(Loader, "Failed to read {} NSO LZ4 compressed bytes", compressed_size);
return {};
}
@@ -158,14 +158,13 @@ ResultStatus AppLoader_NSO::Load(Kernel::SharedPtr<Kernel::Process>& process) {
// Load module
LoadModule(filepath, Memory::PROCESS_IMAGE_VADDR);
LOG_DEBUG(Loader, "loaded module %s @ 0x%" PRIx64, filepath.c_str(),
Memory::PROCESS_IMAGE_VADDR);
NGLOG_DEBUG(Loader, "loaded module {} @ {:#X}", filepath, Memory::PROCESS_IMAGE_VADDR);
process->svc_access_mask.set();
process->address_mappings = default_address_mappings;
process->resource_limit =
Kernel::ResourceLimit::GetForCategory(Kernel::ResourceLimitCategory::APPLICATION);
process->Run(Memory::PROCESS_IMAGE_VADDR, 48, Memory::DEFAULT_STACK_SIZE);
process->Run(Memory::PROCESS_IMAGE_VADDR, THREADPRIO_DEFAULT, Memory::DEFAULT_STACK_SIZE);
is_loaded = true;
return ResultStatus::Success;

View File

@@ -39,8 +39,8 @@ PageTable* GetCurrentPageTable() {
}
static void MapPages(PageTable& page_table, VAddr base, u64 size, u8* memory, PageType type) {
LOG_DEBUG(HW_Memory, "Mapping %p onto %016" PRIX64 "-%016" PRIX64, memory, base * PAGE_SIZE,
(base + size) * PAGE_SIZE);
NGLOG_DEBUG(HW_Memory, "Mapping {} onto {:016X}-{:016X}", fmt::ptr(memory), base * PAGE_SIZE,
(base + size) * PAGE_SIZE);
RasterizerFlushVirtualRegion(base << PAGE_BITS, size * PAGE_SIZE,
FlushMode::FlushAndInvalidate);
@@ -169,10 +169,10 @@ T Read(const VAddr vaddr) {
PageType type = current_page_table->attributes[vaddr >> PAGE_BITS];
switch (type) {
case PageType::Unmapped:
LOG_ERROR(HW_Memory, "unmapped Read%lu @ 0x%08X", sizeof(T) * 8, vaddr);
NGLOG_ERROR(HW_Memory, "Unmapped Read{} @ {:#010X}", sizeof(T) * 8, vaddr);
return 0;
case PageType::Memory:
ASSERT_MSG(false, "Mapped memory page without a pointer @ %08X", vaddr);
ASSERT_MSG(false, "Mapped memory page without a pointer @ %016" PRIX64, vaddr);
break;
case PageType::RasterizerCachedMemory: {
RasterizerFlushVirtualRegion(vaddr, sizeof(T), FlushMode::Flush);
@@ -201,11 +201,11 @@ void Write(const VAddr vaddr, const T data) {
PageType type = current_page_table->attributes[vaddr >> PAGE_BITS];
switch (type) {
case PageType::Unmapped:
LOG_ERROR(HW_Memory, "unmapped Write%lu 0x%08X @ 0x%08X", sizeof(data) * 8, (u32)data,
vaddr);
NGLOG_ERROR(HW_Memory, "Unmapped Write{} {:#010X} @ {:#018X}", sizeof(data) * 8, (u32)data,
vaddr);
return;
case PageType::Memory:
ASSERT_MSG(false, "Mapped memory page without a pointer @ %08X", vaddr);
ASSERT_MSG(false, "Mapped memory page without a pointer @ %016" PRIX64, vaddr);
break;
case PageType::RasterizerCachedMemory: {
RasterizerFlushVirtualRegion(vaddr, sizeof(T), FlushMode::Invalidate);
@@ -251,7 +251,7 @@ u8* GetPointer(const VAddr vaddr) {
return GetPointerFromVMA(vaddr);
}
LOG_ERROR(HW_Memory, "unknown GetPointer @ 0x%08x", vaddr);
NGLOG_ERROR(HW_Memory, "Unknown GetPointer @ {:#018X}", vaddr);
return nullptr;
}
@@ -288,13 +288,12 @@ u8* GetPhysicalPointer(PAddr address) {
});
if (area == std::end(memory_areas)) {
LOG_ERROR(HW_Memory, "unknown GetPhysicalPointer @ 0x%016" PRIX64, address);
NGLOG_ERROR(HW_Memory, "Unknown GetPhysicalPointer @ {:#018X}", address);
return nullptr;
}
if (area->paddr_base == IO_AREA_PADDR) {
LOG_ERROR(HW_Memory, "MMIO mappings are not supported yet. phys_addr=0x%016" PRIX64,
address);
NGLOG_ERROR(HW_Memory, "MMIO mappings are not supported yet. phys_addr={:018X}", address);
return nullptr;
}
@@ -325,15 +324,29 @@ u8* GetPhysicalPointer(PAddr address) {
return target_pointer;
}
void RasterizerMarkRegionCached(VAddr start, u64 size, bool cached) {
if (start == 0) {
void RasterizerMarkRegionCached(Tegra::GPUVAddr gpu_addr, u64 size, bool cached) {
if (gpu_addr == 0) {
return;
}
u64 num_pages = ((start + size - 1) >> PAGE_BITS) - (start >> PAGE_BITS) + 1;
VAddr vaddr = start;
// Iterate over a contiguous CPU address space, which corresponds to the specified GPU address
// space, marking the region as un/cached. The region is marked un/cached at a granularity of
// CPU pages, hence why we iterate on a CPU page basis (note: GPU page size is different). This
// assumes the specified GPU address region is contiguous as well.
u64 num_pages = ((gpu_addr + size - 1) >> PAGE_BITS) - (gpu_addr >> PAGE_BITS) + 1;
for (unsigned i = 0; i < num_pages; ++i, gpu_addr += PAGE_SIZE) {
boost::optional<VAddr> maybe_vaddr =
Core::System::GetInstance().GPU().memory_manager->GpuToCpuAddress(gpu_addr);
// The GPU <-> CPU virtual memory mapping is not 1:1
if (!maybe_vaddr) {
NGLOG_ERROR(HW_Memory,
"Trying to flush a cached region to an invalid physical address {:016X}",
gpu_addr);
continue;
}
VAddr vaddr = *maybe_vaddr;
for (unsigned i = 0; i < num_pages; ++i, vaddr += PAGE_SIZE) {
PageType& page_type = current_page_table->attributes[vaddr >> PAGE_BITS];
if (cached) {
@@ -347,6 +360,10 @@ void RasterizerMarkRegionCached(VAddr start, u64 size, bool cached) {
page_type = PageType::RasterizerCachedMemory;
current_page_table->pointers[vaddr >> PAGE_BITS] = nullptr;
break;
case PageType::RasterizerCachedMemory:
// There can be more than one GPU region mapped per CPU region, so it's common that
// this area is already marked as cached.
break;
default:
UNREACHABLE();
}
@@ -357,6 +374,10 @@ void RasterizerMarkRegionCached(VAddr start, u64 size, bool cached) {
// It is not necessary for a process to have this region mapped into its address
// space, for example, a system module need not have a VRAM mapping.
break;
case PageType::Memory:
// There can be more than one GPU region mapped per CPU region, so it's common that
// this area is already unmarked as cached.
break;
case PageType::RasterizerCachedMemory: {
u8* pointer = GetPointerFromVMA(vaddr & ~PAGE_MASK);
if (pointer == nullptr) {
@@ -394,19 +415,29 @@ void RasterizerFlushVirtualRegion(VAddr start, u64 size, FlushMode mode) {
VAddr overlap_start = std::max(start, region_start);
VAddr overlap_end = std::min(end, region_end);
std::vector<Tegra::GPUVAddr> gpu_addresses =
Core::System::GetInstance().GPU().memory_manager->CpuToGpuAddress(overlap_start);
if (gpu_addresses.empty()) {
return;
}
u64 overlap_size = overlap_end - overlap_start;
auto* rasterizer = VideoCore::g_renderer->Rasterizer();
switch (mode) {
case FlushMode::Flush:
rasterizer->FlushRegion(overlap_start, overlap_size);
break;
case FlushMode::Invalidate:
rasterizer->InvalidateRegion(overlap_start, overlap_size);
break;
case FlushMode::FlushAndInvalidate:
rasterizer->FlushAndInvalidateRegion(overlap_start, overlap_size);
break;
for (const auto& gpu_address : gpu_addresses) {
auto* rasterizer = VideoCore::g_renderer->Rasterizer();
switch (mode) {
case FlushMode::Flush:
rasterizer->FlushRegion(gpu_address, overlap_size);
break;
case FlushMode::Invalidate:
rasterizer->InvalidateRegion(gpu_address, overlap_size);
break;
case FlushMode::FlushAndInvalidate:
rasterizer->FlushAndInvalidateRegion(gpu_address, overlap_size);
break;
}
}
};
@@ -445,8 +476,9 @@ void ReadBlock(const Kernel::Process& process, const VAddr src_addr, void* dest_
switch (page_table.attributes[page_index]) {
case PageType::Unmapped: {
LOG_ERROR(HW_Memory, "unmapped ReadBlock @ 0x%08X (start address = 0x%08X, size = %zu)",
current_vaddr, src_addr, size);
NGLOG_ERROR(HW_Memory,
"Unmapped ReadBlock @ {:#018X} (start address = {:#018X}, size = {})",
current_vaddr, src_addr, size);
std::memset(dest_buffer, 0, copy_amount);
break;
}
@@ -508,9 +540,9 @@ void WriteBlock(const Kernel::Process& process, const VAddr dest_addr, const voi
switch (page_table.attributes[page_index]) {
case PageType::Unmapped: {
LOG_ERROR(HW_Memory,
"unmapped WriteBlock @ 0x%08X (start address = 0x%08X, size = %zu)",
current_vaddr, dest_addr, size);
NGLOG_ERROR(HW_Memory,
"Unmapped WriteBlock @ {:#018X} (start address = {:#018X}, size = {})",
current_vaddr, dest_addr, size);
break;
}
case PageType::Memory: {
@@ -556,8 +588,9 @@ void ZeroBlock(const Kernel::Process& process, const VAddr dest_addr, const size
switch (page_table.attributes[page_index]) {
case PageType::Unmapped: {
LOG_ERROR(HW_Memory, "unmapped ZeroBlock @ 0x%08X (start address = 0x%08X, size = %zu)",
current_vaddr, dest_addr, size);
NGLOG_ERROR(HW_Memory,
"Unmapped ZeroBlock @ {:#018X} (start address = {#:018X}, size = {})",
current_vaddr, dest_addr, size);
break;
}
case PageType::Memory: {
@@ -596,8 +629,9 @@ void CopyBlock(const Kernel::Process& process, VAddr dest_addr, VAddr src_addr,
switch (page_table.attributes[page_index]) {
case PageType::Unmapped: {
LOG_ERROR(HW_Memory, "unmapped CopyBlock @ 0x%08X (start address = 0x%08X, size = %zu)",
current_vaddr, src_addr, size);
NGLOG_ERROR(HW_Memory,
"Unmapped CopyBlock @ {:#018X} (start address = {:#018X}, size = {})",
current_vaddr, src_addr, size);
ZeroBlock(process, dest_addr, copy_amount);
break;
}
@@ -646,7 +680,7 @@ boost::optional<PAddr> TryVirtualToPhysicalAddress(const VAddr addr) {
PAddr VirtualToPhysicalAddress(const VAddr addr) {
auto paddr = TryVirtualToPhysicalAddress(addr);
if (!paddr) {
LOG_ERROR(HW_Memory, "Unknown virtual address @ 0x%016" PRIX64, addr);
NGLOG_ERROR(HW_Memory, "Unknown virtual address @ {:#018X}", addr);
// To help with debugging, set bit on address so that it's obviously invalid.
return addr | 0x80000000;
}

View File

@@ -14,6 +14,7 @@
#include <boost/optional.hpp>
#include "common/common_types.h"
#include "core/memory_hook.h"
#include "video_core/memory_manager.h"
namespace Kernel {
class Process;
@@ -258,7 +259,7 @@ enum class FlushMode {
/**
* Mark each page touching the region as cached.
*/
void RasterizerMarkRegionCached(VAddr start, u64 size, bool cached);
void RasterizerMarkRegionCached(Tegra::GPUVAddr start, u64 size, bool cached);
/**
* Flushes and invalidates any externally cached rasterizer resources touching the given virtual

View File

@@ -31,12 +31,14 @@ enum class BufferMethods {
};
void GPU::WriteReg(u32 method, u32 subchannel, u32 value, u32 remaining_params) {
LOG_WARNING(HW_GPU, "Processing method %08X on subchannel %u value %08X remaining params %u",
method, subchannel, value, remaining_params);
NGLOG_WARNING(HW_GPU,
"Processing method {:08X} on subchannel {} value "
"{:08X} remaining params {}",
method, subchannel, value, remaining_params);
if (method == static_cast<u32>(BufferMethods::SetGraphMacroEntry)) {
// Prepare to upload a new macro, reset the upload counter.
LOG_DEBUG(HW_GPU, "Uploading GPU macro %08X", value);
NGLOG_DEBUG(HW_GPU, "Uploading GPU macro {:08X}", value);
current_macro_entry = value;
current_macro_code.clear();
return;
@@ -58,7 +60,7 @@ void GPU::WriteReg(u32 method, u32 subchannel, u32 value, u32 remaining_params)
if (method == static_cast<u32>(BufferMethods::BindObject)) {
// Bind the current subchannel to the desired engine id.
LOG_DEBUG(HW_GPU, "Binding subchannel %u to engine %u", subchannel, value);
NGLOG_DEBUG(HW_GPU, "Binding subchannel {} to engine {}", subchannel, value);
ASSERT(bound_engines.find(subchannel) == bound_engines.end());
bound_engines[subchannel] = static_cast<EngineID>(value);
return;
@@ -66,7 +68,7 @@ void GPU::WriteReg(u32 method, u32 subchannel, u32 value, u32 remaining_params)
if (method < static_cast<u32>(BufferMethods::CountBufferMethods)) {
// TODO(Subv): Research and implement these methods.
LOG_ERROR(HW_GPU, "Special buffer methods other than Bind are not implemented");
NGLOG_ERROR(HW_GPU, "Special buffer methods other than Bind are not implemented");
return;
}
@@ -90,11 +92,9 @@ void GPU::WriteReg(u32 method, u32 subchannel, u32 value, u32 remaining_params)
}
void GPU::ProcessCommandList(GPUVAddr address, u32 size) {
// TODO(Subv): PhysicalToVirtualAddress is a misnomer, it converts a GPU VAddr into an
// application VAddr.
const VAddr head_address = memory_manager->PhysicalToVirtualAddress(address);
VAddr current_addr = head_address;
while (current_addr < head_address + size * sizeof(CommandHeader)) {
const boost::optional<VAddr> head_address = memory_manager->GpuToCpuAddress(address);
VAddr current_addr = *head_address;
while (current_addr < *head_address + size * sizeof(CommandHeader)) {
const CommandHeader header = {Memory::Read32(current_addr)};
current_addr += sizeof(u32);

View File

@@ -145,13 +145,38 @@ void Maxwell3D::ProcessQueryGet() {
GPUVAddr sequence_address = regs.query.QueryAddress();
// Since the sequence address is given as a GPU VAddr, we have to convert it to an application
// VAddr before writing.
VAddr address = memory_manager.PhysicalToVirtualAddress(sequence_address);
boost::optional<VAddr> address = memory_manager.GpuToCpuAddress(sequence_address);
// TODO(Subv): Support the other query units.
ASSERT_MSG(regs.query.query_get.unit == Regs::QueryUnit::Crop,
"Units other than CROP are unimplemented");
ASSERT_MSG(regs.query.query_get.short_query,
"Writing the entire query result structure is unimplemented");
u32 value = Memory::Read32(*address);
u32 result = 0;
// TODO(Subv): Support the other query variables
switch (regs.query.query_get.select) {
case Regs::QuerySelect::Zero:
result = 0;
break;
default:
UNIMPLEMENTED_MSG("Unimplemented query select type %u",
static_cast<u32>(regs.query.query_get.select.Value()));
}
// TODO(Subv): Research and implement how query sync conditions work.
switch (regs.query.query_get.mode) {
case Regs::QueryMode::Write: {
case Regs::QueryMode::Write:
case Regs::QueryMode::Write2: {
// Write the current query sequence to the sequence address.
u32 sequence = regs.query.query_sequence;
Memory::Write32(address, sequence);
Memory::Write32(*address, sequence);
// TODO(Subv): Write the proper query response structure to the address when not using short
// mode.
break;
}
default:
@@ -161,8 +186,8 @@ void Maxwell3D::ProcessQueryGet() {
}
void Maxwell3D::DrawArrays() {
LOG_DEBUG(HW_GPU, "called, topology=%d, count=%d", regs.draw.topology.Value(),
regs.vertex_buffer.count);
NGLOG_DEBUG(HW_GPU, "called, topology={}, count={}",
static_cast<u32>(regs.draw.topology.Value()), regs.vertex_buffer.count);
ASSERT_MSG(!(regs.index_array.count && regs.vertex_buffer.count), "Both indexed and direct?");
auto debug_context = Core::System::GetInstance().GetGPUDebugContext();
@@ -200,10 +225,10 @@ void Maxwell3D::ProcessCBData(u32 value) {
// Don't allow writing past the end of the buffer.
ASSERT(regs.const_buffer.cb_pos + sizeof(u32) <= regs.const_buffer.cb_size);
VAddr address =
memory_manager.PhysicalToVirtualAddress(buffer_address + regs.const_buffer.cb_pos);
boost::optional<VAddr> address =
memory_manager.GpuToCpuAddress(buffer_address + regs.const_buffer.cb_pos);
Memory::Write32(address, value);
Memory::Write32(*address, value);
// Increment the current buffer position.
regs.const_buffer.cb_pos = regs.const_buffer.cb_pos + 4;
@@ -213,10 +238,10 @@ Texture::TICEntry Maxwell3D::GetTICEntry(u32 tic_index) const {
GPUVAddr tic_base_address = regs.tic.TICAddress();
GPUVAddr tic_address_gpu = tic_base_address + tic_index * sizeof(Texture::TICEntry);
VAddr tic_address_cpu = memory_manager.PhysicalToVirtualAddress(tic_address_gpu);
boost::optional<VAddr> tic_address_cpu = memory_manager.GpuToCpuAddress(tic_address_gpu);
Texture::TICEntry tic_entry;
Memory::ReadBlock(tic_address_cpu, &tic_entry, sizeof(Texture::TICEntry));
Memory::ReadBlock(*tic_address_cpu, &tic_entry, sizeof(Texture::TICEntry));
ASSERT_MSG(tic_entry.header_version == Texture::TICHeaderVersion::BlockLinear ||
tic_entry.header_version == Texture::TICHeaderVersion::Pitch,
@@ -243,10 +268,10 @@ Texture::TSCEntry Maxwell3D::GetTSCEntry(u32 tsc_index) const {
GPUVAddr tsc_base_address = regs.tsc.TSCAddress();
GPUVAddr tsc_address_gpu = tsc_base_address + tsc_index * sizeof(Texture::TSCEntry);
VAddr tsc_address_cpu = memory_manager.PhysicalToVirtualAddress(tsc_address_gpu);
boost::optional<VAddr> tsc_address_cpu = memory_manager.GpuToCpuAddress(tsc_address_gpu);
Texture::TSCEntry tsc_entry;
Memory::ReadBlock(tsc_address_cpu, &tsc_entry, sizeof(Texture::TSCEntry));
Memory::ReadBlock(*tsc_address_cpu, &tsc_entry, sizeof(Texture::TSCEntry));
return tsc_entry;
}
@@ -268,7 +293,7 @@ std::vector<Texture::FullTextureInfo> Maxwell3D::GetStageTextures(Regs::ShaderSt
current_texture < tex_info_buffer_end; current_texture += sizeof(Texture::TextureHandle)) {
Texture::TextureHandle tex_handle{
Memory::Read32(memory_manager.PhysicalToVirtualAddress(current_texture))};
Memory::Read32(*memory_manager.GpuToCpuAddress(current_texture))};
Texture::FullTextureInfo tex_info{};
// TODO(Subv): Use the shader to determine which textures are actually accessed.

View File

@@ -46,6 +46,29 @@ public:
enum class QueryMode : u32 {
Write = 0,
Sync = 1,
// TODO(Subv): It is currently unknown what the difference between method 2 and method 0
// is.
Write2 = 2,
};
enum class QueryUnit : u32 {
VFetch = 1,
VP = 2,
Rast = 4,
StrmOut = 5,
GP = 6,
ZCull = 7,
Prop = 10,
Crop = 15,
};
enum class QuerySelect : u32 {
Zero = 0,
};
enum class QuerySyncCondition : u32 {
NotEqual = 0,
GreaterThan = 1,
};
enum class ShaderProgram : u32 {
@@ -476,7 +499,10 @@ public:
u32 raw;
BitField<0, 2, QueryMode> mode;
BitField<4, 1, u32> fence;
BitField<12, 4, u32> unit;
BitField<12, 4, QueryUnit> unit;
BitField<16, 1, QuerySyncCondition> sync_cond;
BitField<23, 5, QuerySelect> select;
BitField<28, 1, u32> short_query;
} query_get;
GPUVAddr QueryAddress() const {
@@ -500,6 +526,11 @@ public:
return static_cast<GPUVAddr>((static_cast<GPUVAddr>(start_high) << 32) |
start_low);
}
bool IsEnabled() const {
return enable != 0 && StartAddress() != 0;
}
} vertex_array[NumVertexArrays];
Blend blend;

View File

@@ -2,109 +2,118 @@
// Licensed under GPLv2 or any later version
// Refer to the license.txt file included.
#include "common/alignment.h"
#include "common/assert.h"
#include "video_core/memory_manager.h"
namespace Tegra {
PAddr MemoryManager::AllocateSpace(u64 size, u64 align) {
boost::optional<PAddr> paddr = FindFreeBlock(size, align);
ASSERT(paddr);
GPUVAddr MemoryManager::AllocateSpace(u64 size, u64 align) {
boost::optional<GPUVAddr> gpu_addr = FindFreeBlock(size, align);
ASSERT(gpu_addr);
for (u64 offset = 0; offset < size; offset += Memory::PAGE_SIZE) {
PageSlot(*paddr + offset) = static_cast<u64>(PageStatus::Allocated);
for (u64 offset = 0; offset < size; offset += PAGE_SIZE) {
ASSERT(PageSlot(*gpu_addr + offset) == static_cast<u64>(PageStatus::Unmapped));
PageSlot(*gpu_addr + offset) = static_cast<u64>(PageStatus::Allocated);
}
return *paddr;
return *gpu_addr;
}
PAddr MemoryManager::AllocateSpace(PAddr paddr, u64 size, u64 align) {
for (u64 offset = 0; offset < size; offset += Memory::PAGE_SIZE) {
if (IsPageMapped(paddr + offset)) {
return AllocateSpace(size, align);
}
GPUVAddr MemoryManager::AllocateSpace(GPUVAddr gpu_addr, u64 size, u64 align) {
for (u64 offset = 0; offset < size; offset += PAGE_SIZE) {
ASSERT(PageSlot(gpu_addr + offset) == static_cast<u64>(PageStatus::Unmapped));
PageSlot(gpu_addr + offset) = static_cast<u64>(PageStatus::Allocated);
}
for (u64 offset = 0; offset < size; offset += Memory::PAGE_SIZE) {
PageSlot(paddr + offset) = static_cast<u64>(PageStatus::Allocated);
}
return paddr;
return gpu_addr;
}
PAddr MemoryManager::MapBufferEx(VAddr vaddr, u64 size) {
vaddr &= ~Memory::PAGE_MASK;
GPUVAddr MemoryManager::MapBufferEx(VAddr cpu_addr, u64 size) {
boost::optional<GPUVAddr> gpu_addr = FindFreeBlock(size, PAGE_SIZE);
ASSERT(gpu_addr);
boost::optional<PAddr> paddr = FindFreeBlock(size);
ASSERT(paddr);
for (u64 offset = 0; offset < size; offset += Memory::PAGE_SIZE) {
PageSlot(*paddr + offset) = vaddr + offset;
for (u64 offset = 0; offset < size; offset += PAGE_SIZE) {
ASSERT(PageSlot(*gpu_addr + offset) == static_cast<u64>(PageStatus::Unmapped));
PageSlot(*gpu_addr + offset) = cpu_addr + offset;
}
return *paddr;
MappedRegion region{cpu_addr, *gpu_addr, size};
mapped_regions.push_back(region);
return *gpu_addr;
}
PAddr MemoryManager::MapBufferEx(VAddr vaddr, PAddr paddr, u64 size) {
vaddr &= ~Memory::PAGE_MASK;
paddr &= ~Memory::PAGE_MASK;
GPUVAddr MemoryManager::MapBufferEx(VAddr cpu_addr, GPUVAddr gpu_addr, u64 size) {
ASSERT((gpu_addr & PAGE_MASK) == 0);
for (u64 offset = 0; offset < size; offset += Memory::PAGE_SIZE) {
if (PageSlot(paddr + offset) != static_cast<u64>(PageStatus::Allocated)) {
return MapBufferEx(vaddr, size);
}
for (u64 offset = 0; offset < size; offset += PAGE_SIZE) {
ASSERT(PageSlot(gpu_addr + offset) == static_cast<u64>(PageStatus::Allocated));
PageSlot(gpu_addr + offset) = cpu_addr + offset;
}
for (u64 offset = 0; offset < size; offset += Memory::PAGE_SIZE) {
PageSlot(paddr + offset) = vaddr + offset;
}
MappedRegion region{cpu_addr, gpu_addr, size};
mapped_regions.push_back(region);
return paddr;
return gpu_addr;
}
boost::optional<PAddr> MemoryManager::FindFreeBlock(u64 size, u64 align) {
PAddr paddr{};
u64 free_space{};
align = (align + Memory::PAGE_MASK) & ~Memory::PAGE_MASK;
boost::optional<GPUVAddr> MemoryManager::FindFreeBlock(u64 size, u64 align) {
GPUVAddr gpu_addr = 0;
u64 free_space = 0;
align = (align + PAGE_MASK) & ~PAGE_MASK;
while (paddr + free_space < MAX_ADDRESS) {
if (!IsPageMapped(paddr + free_space)) {
free_space += Memory::PAGE_SIZE;
while (gpu_addr + free_space < MAX_ADDRESS) {
if (!IsPageMapped(gpu_addr + free_space)) {
free_space += PAGE_SIZE;
if (free_space >= size) {
return paddr;
return gpu_addr;
}
} else {
paddr += free_space + Memory::PAGE_SIZE;
gpu_addr += free_space + PAGE_SIZE;
free_space = 0;
const u64 remainder{paddr % align};
if (!remainder) {
paddr = (paddr - remainder) + align;
}
gpu_addr = Common::AlignUp(gpu_addr, align);
}
}
return {};
}
VAddr MemoryManager::PhysicalToVirtualAddress(PAddr paddr) {
VAddr base_addr = PageSlot(paddr);
boost::optional<VAddr> MemoryManager::GpuToCpuAddress(GPUVAddr gpu_addr) {
VAddr base_addr = PageSlot(gpu_addr);
ASSERT(base_addr != static_cast<u64>(PageStatus::Unmapped));
return base_addr + (paddr & Memory::PAGE_MASK);
if (base_addr == static_cast<u64>(PageStatus::Allocated)) {
return {};
}
return base_addr + (gpu_addr & PAGE_MASK);
}
bool MemoryManager::IsPageMapped(PAddr paddr) {
return PageSlot(paddr) != static_cast<u64>(PageStatus::Unmapped);
std::vector<GPUVAddr> MemoryManager::CpuToGpuAddress(VAddr cpu_addr) const {
std::vector<GPUVAddr> results;
for (const auto& region : mapped_regions) {
if (cpu_addr >= region.cpu_addr && cpu_addr < (region.cpu_addr + region.size)) {
u64 offset = cpu_addr - region.cpu_addr;
results.push_back(region.gpu_addr + offset);
}
}
return results;
}
VAddr& MemoryManager::PageSlot(PAddr paddr) {
auto& block = page_table[(paddr >> (Memory::PAGE_BITS + PAGE_TABLE_BITS)) & PAGE_TABLE_MASK];
bool MemoryManager::IsPageMapped(GPUVAddr gpu_addr) {
return PageSlot(gpu_addr) != static_cast<u64>(PageStatus::Unmapped);
}
VAddr& MemoryManager::PageSlot(GPUVAddr gpu_addr) {
auto& block = page_table[(gpu_addr >> (PAGE_BITS + PAGE_TABLE_BITS)) & PAGE_TABLE_MASK];
if (!block) {
block = std::make_unique<PageBlock>();
for (unsigned index = 0; index < PAGE_BLOCK_SIZE; index++) {
(*block)[index] = static_cast<u64>(PageStatus::Unmapped);
}
}
return (*block)[(paddr >> Memory::PAGE_BITS) & PAGE_BLOCK_MASK];
return (*block)[(gpu_addr >> PAGE_BITS) & PAGE_BLOCK_MASK];
}
} // namespace Tegra

View File

@@ -6,8 +6,11 @@
#include <array>
#include <memory>
#include <vector>
#include <boost/optional.hpp>
#include "common/common_types.h"
#include "core/memory.h"
namespace Tegra {
@@ -18,16 +21,21 @@ class MemoryManager final {
public:
MemoryManager() = default;
PAddr AllocateSpace(u64 size, u64 align);
PAddr AllocateSpace(PAddr paddr, u64 size, u64 align);
PAddr MapBufferEx(VAddr vaddr, u64 size);
PAddr MapBufferEx(VAddr vaddr, PAddr paddr, u64 size);
VAddr PhysicalToVirtualAddress(PAddr paddr);
GPUVAddr AllocateSpace(u64 size, u64 align);
GPUVAddr AllocateSpace(GPUVAddr gpu_addr, u64 size, u64 align);
GPUVAddr MapBufferEx(VAddr cpu_addr, u64 size);
GPUVAddr MapBufferEx(VAddr cpu_addr, GPUVAddr gpu_addr, u64 size);
boost::optional<VAddr> GpuToCpuAddress(GPUVAddr gpu_addr);
std::vector<GPUVAddr> CpuToGpuAddress(VAddr cpu_addr) const;
static constexpr u64 PAGE_BITS = 16;
static constexpr u64 PAGE_SIZE = 1 << PAGE_BITS;
static constexpr u64 PAGE_MASK = PAGE_SIZE - 1;
private:
boost::optional<PAddr> FindFreeBlock(u64 size, u64 align = 1);
bool IsPageMapped(PAddr paddr);
VAddr& PageSlot(PAddr paddr);
boost::optional<GPUVAddr> FindFreeBlock(u64 size, u64 align = 1);
bool IsPageMapped(GPUVAddr gpu_addr);
VAddr& PageSlot(GPUVAddr gpu_addr);
enum class PageStatus : u64 {
Unmapped = 0xFFFFFFFFFFFFFFFFULL,
@@ -35,7 +43,7 @@ private:
};
static constexpr u64 MAX_ADDRESS{0x10000000000ULL};
static constexpr u64 PAGE_TABLE_BITS{14};
static constexpr u64 PAGE_TABLE_BITS{10};
static constexpr u64 PAGE_TABLE_SIZE{1 << PAGE_TABLE_BITS};
static constexpr u64 PAGE_TABLE_MASK{PAGE_TABLE_SIZE - 1};
static constexpr u64 PAGE_BLOCK_BITS{14};
@@ -44,6 +52,14 @@ private:
using PageBlock = std::array<VAddr, PAGE_BLOCK_SIZE>;
std::array<std::unique_ptr<PageBlock>, PAGE_TABLE_SIZE> page_table{};
struct MappedRegion {
VAddr cpu_addr;
GPUVAddr gpu_addr;
u64 size;
};
std::vector<MappedRegion> mapped_regions;
};
} // namespace Tegra

View File

@@ -6,6 +6,7 @@
#include "common/common_types.h"
#include "video_core/gpu.h"
#include "video_core/memory_manager.h"
struct ScreenInfo;
@@ -25,14 +26,14 @@ public:
virtual void FlushAll() = 0;
/// Notify rasterizer that any caches of the specified region should be flushed to Switch memory
virtual void FlushRegion(VAddr addr, u64 size) = 0;
virtual void FlushRegion(Tegra::GPUVAddr addr, u64 size) = 0;
/// Notify rasterizer that any caches of the specified region should be invalidated
virtual void InvalidateRegion(VAddr addr, u64 size) = 0;
virtual void InvalidateRegion(Tegra::GPUVAddr addr, u64 size) = 0;
/// Notify rasterizer that any caches of the specified region should be flushed to Switch memory
/// and invalidated
virtual void FlushAndInvalidateRegion(VAddr addr, u64 size) = 0;
virtual void FlushAndInvalidateRegion(Tegra::GPUVAddr addr, u64 size) = 0;
/// Attempt to use a faster method to perform a display transfer with is_texture_copy = 0
virtual bool AccelerateDisplayTransfer(const void* config) {

View File

@@ -14,7 +14,6 @@
#include "common/math_util.h"
#include "common/microprofile.h"
#include "common/scope_exit.h"
#include "common/vector_math.h"
#include "core/core.h"
#include "core/hle/kernel/process.h"
#include "core/settings.h"
@@ -117,7 +116,7 @@ RasterizerOpenGL::RasterizerOpenGL() {
glEnable(GL_BLEND);
LOG_CRITICAL(Render_OpenGL, "Sync fixed function OpenGL state here!");
NGLOG_CRITICAL(Render_OpenGL, "Sync fixed function OpenGL state here!");
}
RasterizerOpenGL::~RasterizerOpenGL() {
@@ -128,7 +127,8 @@ RasterizerOpenGL::~RasterizerOpenGL() {
}
}
void RasterizerOpenGL::SetupVertexArray(u8* array_ptr, GLintptr buffer_offset) {
std::pair<u8*, GLintptr> RasterizerOpenGL::SetupVertexArrays(u8* array_ptr,
GLintptr buffer_offset) {
MICROPROFILE_SCOPE(OpenGL_VAO);
const auto& regs = Core::System().GetInstance().GPU().Maxwell3D().regs;
const auto& memory_manager = Core::System().GetInstance().GPU().memory_manager;
@@ -137,43 +137,58 @@ void RasterizerOpenGL::SetupVertexArray(u8* array_ptr, GLintptr buffer_offset) {
state.draw.vertex_buffer = stream_buffer->GetHandle();
state.Apply();
// TODO(bunnei): Add support for 1+ vertex arrays
const auto& vertex_array{regs.vertex_array[0]};
const auto& vertex_array_limit{regs.vertex_array_limit[0]};
ASSERT_MSG(vertex_array.enable, "vertex array 0 is disabled?");
ASSERT_MSG(!vertex_array.divisor, "vertex array 0 divisor is unimplemented!");
for (unsigned index = 1; index < Maxwell::NumVertexArrays; ++index) {
ASSERT_MSG(!regs.vertex_array[index].enable, "vertex array %d is unimplemented!", index);
// Upload all guest vertex arrays sequentially to our buffer
for (u32 index = 0; index < Maxwell::NumVertexArrays; ++index) {
const auto& vertex_array = regs.vertex_array[index];
if (!vertex_array.IsEnabled())
continue;
const Tegra::GPUVAddr start = vertex_array.StartAddress();
const Tegra::GPUVAddr end = regs.vertex_array_limit[index].LimitAddress();
ASSERT(end > start);
u64 size = end - start + 1;
// Copy vertex array data
res_cache.FlushRegion(start, size, nullptr);
Memory::ReadBlock(*memory_manager->GpuToCpuAddress(start), array_ptr, size);
// Bind the vertex array to the buffer at the current offset.
glBindVertexBuffer(index, stream_buffer->GetHandle(), buffer_offset, vertex_array.stride);
ASSERT_MSG(vertex_array.divisor == 0, "Vertex buffer divisor unimplemented");
array_ptr += size;
buffer_offset += size;
}
// Use the vertex array as-is, assumes that the data is formatted correctly for OpenGL.
// Enables the first 16 vertex attributes always, as we don't know which ones are actually used
// until shader time. Note, Tegra technically supports 32, but we're cappinig this to 16 for now
// until shader time. Note, Tegra technically supports 32, but we're capping this to 16 for now
// to avoid OpenGL errors.
// TODO(Subv): Analyze the shader to identify which attributes are actually used and don't
// assume every shader uses them all.
for (unsigned index = 0; index < 16; ++index) {
auto& attrib = regs.vertex_attrib_format[index];
NGLOG_DEBUG(HW_GPU, "vertex attrib {}, count={}, size={}, type={}, offset={}, normalize={}",
index, attrib.ComponentCount(), attrib.SizeString(), attrib.TypeString(),
attrib.offset.Value(), attrib.IsNormalized());
glVertexAttribPointer(index, attrib.ComponentCount(), MaxwellToGL::VertexType(attrib),
attrib.IsNormalized() ? GL_TRUE : GL_FALSE, vertex_array.stride,
reinterpret_cast<GLvoid*>(buffer_offset + attrib.offset));
auto& buffer = regs.vertex_array[attrib.buffer];
ASSERT(buffer.IsEnabled());
glEnableVertexAttribArray(index);
glVertexAttribFormat(index, attrib.ComponentCount(), MaxwellToGL::VertexType(attrib),
attrib.IsNormalized() ? GL_TRUE : GL_FALSE, attrib.offset);
glVertexAttribBinding(index, attrib.buffer);
hw_vao_enabled_attributes[index] = true;
}
// Copy vertex array data
const u64 data_size{vertex_array_limit.LimitAddress() - vertex_array.StartAddress() + 1};
const VAddr data_addr{memory_manager->PhysicalToVirtualAddress(vertex_array.StartAddress())};
res_cache.FlushRegion(data_addr, data_size, nullptr);
Memory::ReadBlock(data_addr, array_ptr, data_size);
array_ptr += data_size;
buffer_offset += data_size;
return {array_ptr, buffer_offset};
}
void RasterizerOpenGL::SetupShaders(u8* buffer_ptr, GLintptr buffer_offset, size_t ptr_pos) {
void RasterizerOpenGL::SetupShaders(u8* buffer_ptr, GLintptr buffer_offset) {
// Helper function for uploading uniform data
const auto copy_buffer = [&](GLuint handle, GLintptr offset, GLsizeiptr size) {
if (has_ARB_direct_state_access) {
@@ -191,8 +206,6 @@ void RasterizerOpenGL::SetupShaders(u8* buffer_ptr, GLintptr buffer_offset, size
u32 current_constbuffer_bindpoint = 0;
for (unsigned index = 1; index < Maxwell::MaxShaderProgram; ++index) {
ptr_pos += sizeof(GLShader::MaxwellUniformData);
auto& shader_config = gpu.regs.shader_config[index];
const Maxwell::ShaderProgram program{static_cast<Maxwell::ShaderProgram>(index)};
@@ -206,18 +219,21 @@ void RasterizerOpenGL::SetupShaders(u8* buffer_ptr, GLintptr buffer_offset, size
}
// Upload uniform data as one UBO per stage
const GLintptr ubo_offset = buffer_offset + static_cast<GLintptr>(ptr_pos);
const GLintptr ubo_offset = buffer_offset;
copy_buffer(uniform_buffers[stage].handle, ubo_offset,
sizeof(GLShader::MaxwellUniformData));
GLShader::MaxwellUniformData* ub_ptr =
reinterpret_cast<GLShader::MaxwellUniformData*>(&buffer_ptr[ptr_pos]);
reinterpret_cast<GLShader::MaxwellUniformData*>(buffer_ptr);
ub_ptr->SetFromRegs(gpu.state.shader_stages[stage]);
buffer_ptr += sizeof(GLShader::MaxwellUniformData);
buffer_offset += sizeof(GLShader::MaxwellUniformData);
// Fetch program code from memory
GLShader::ProgramCode program_code;
const u64 gpu_address{gpu.regs.code_address.CodeAddress() + shader_config.offset};
const VAddr cpu_address{gpu.memory_manager.PhysicalToVirtualAddress(gpu_address)};
Memory::ReadBlock(cpu_address, program_code.data(), program_code.size() * sizeof(u64));
const boost::optional<VAddr> cpu_address{gpu.memory_manager.GpuToCpuAddress(gpu_address)};
Memory::ReadBlock(*cpu_address, program_code.data(), program_code.size() * sizeof(u64));
GLShader::ShaderSetup setup{std::move(program_code)};
GLShader::ShaderEntries shader_resources;
@@ -236,8 +252,8 @@ void RasterizerOpenGL::SetupShaders(u8* buffer_ptr, GLintptr buffer_offset, size
break;
}
default:
LOG_CRITICAL(HW_GPU, "Unimplemented shader index=%d, enable=%d, offset=0x%08X", index,
shader_config.enable.Value(), shader_config.offset);
NGLOG_CRITICAL(HW_GPU, "Unimplemented shader index={}, enable={}, offset={:#010X}",
index, shader_config.enable.Value(), shader_config.offset);
UNREACHABLE();
}
@@ -253,6 +269,24 @@ void RasterizerOpenGL::SetupShaders(u8* buffer_ptr, GLintptr buffer_offset, size
shader_program_manager->UseTrivialGeometryShader();
}
size_t RasterizerOpenGL::CalculateVertexArraysSize() const {
const auto& regs = Core::System().GetInstance().GPU().Maxwell3D().regs;
size_t size = 0;
for (u32 index = 0; index < Maxwell::NumVertexArrays; ++index) {
if (!regs.vertex_array[index].IsEnabled())
continue;
const Tegra::GPUVAddr start = regs.vertex_array[index].StartAddress();
const Tegra::GPUVAddr end = regs.vertex_array_limit[index].LimitAddress();
ASSERT(end > start);
size += end - start + 1;
}
return size;
}
bool RasterizerOpenGL::AccelerateDrawBatch(bool is_indexed) {
accelerate_draw = is_indexed ? AccelDraw::Indexed : AccelDraw::Arrays;
DrawArrays();
@@ -330,44 +364,49 @@ void RasterizerOpenGL::DrawArrays() {
const u64 index_buffer_size{regs.index_array.count * regs.index_array.FormatSizeInBytes()};
const unsigned vertex_num{is_indexed ? regs.index_array.count : regs.vertex_buffer.count};
// TODO(bunnei): Add support for 1+ vertex arrays
vs_input_size = vertex_num * regs.vertex_array[0].stride;
state.draw.vertex_buffer = stream_buffer->GetHandle();
state.Apply();
size_t buffer_size = static_cast<size_t>(vs_input_size);
size_t buffer_size = CalculateVertexArraysSize();
if (is_indexed) {
buffer_size = Common::AlignUp(buffer_size, 4) + index_buffer_size;
buffer_size = Common::AlignUp<size_t>(buffer_size, 4) + index_buffer_size;
}
// Uniform space for the 5 shader stages
buffer_size += sizeof(GLShader::MaxwellUniformData) * Maxwell::MaxShaderStage;
buffer_size = Common::AlignUp<size_t>(buffer_size, 4) +
sizeof(GLShader::MaxwellUniformData) * Maxwell::MaxShaderStage;
size_t ptr_pos = 0;
u8* buffer_ptr;
GLintptr buffer_offset;
std::tie(buffer_ptr, buffer_offset) =
stream_buffer->Map(static_cast<GLsizeiptr>(buffer_size), 4);
SetupVertexArray(buffer_ptr, buffer_offset);
ptr_pos += vs_input_size;
u8* offseted_buffer;
std::tie(offseted_buffer, buffer_offset) = SetupVertexArrays(buffer_ptr, buffer_offset);
offseted_buffer =
reinterpret_cast<u8*>(Common::AlignUp(reinterpret_cast<size_t>(offseted_buffer), 4));
buffer_offset = Common::AlignUp<size_t>(buffer_offset, 4);
// If indexed mode, copy the index buffer
GLintptr index_buffer_offset = 0;
if (is_indexed) {
ptr_pos = Common::AlignUp(ptr_pos, 4);
const auto& memory_manager = Core::System().GetInstance().GPU().memory_manager;
const VAddr index_data_addr{
memory_manager->PhysicalToVirtualAddress(regs.index_array.StartAddress())};
Memory::ReadBlock(index_data_addr, &buffer_ptr[ptr_pos], index_buffer_size);
const boost::optional<VAddr> index_data_addr{
memory_manager->GpuToCpuAddress(regs.index_array.StartAddress())};
Memory::ReadBlock(*index_data_addr, offseted_buffer, index_buffer_size);
index_buffer_offset = buffer_offset + static_cast<GLintptr>(ptr_pos);
ptr_pos += index_buffer_size;
index_buffer_offset = buffer_offset;
offseted_buffer += index_buffer_size;
buffer_offset += index_buffer_size;
}
SetupShaders(buffer_ptr, buffer_offset, ptr_pos);
offseted_buffer =
reinterpret_cast<u8*>(Common::AlignUp(reinterpret_cast<size_t>(offseted_buffer), 4));
buffer_offset = Common::AlignUp<size_t>(buffer_offset, 4);
SetupShaders(offseted_buffer, buffer_offset);
stream_buffer->Unmap();
@@ -479,17 +518,17 @@ void RasterizerOpenGL::FlushAll() {
res_cache.FlushAll();
}
void RasterizerOpenGL::FlushRegion(VAddr addr, u64 size) {
void RasterizerOpenGL::FlushRegion(Tegra::GPUVAddr addr, u64 size) {
MICROPROFILE_SCOPE(OpenGL_CacheManagement);
res_cache.FlushRegion(addr, size);
}
void RasterizerOpenGL::InvalidateRegion(VAddr addr, u64 size) {
void RasterizerOpenGL::InvalidateRegion(Tegra::GPUVAddr addr, u64 size) {
MICROPROFILE_SCOPE(OpenGL_CacheManagement);
res_cache.InvalidateRegion(addr, size, nullptr);
}
void RasterizerOpenGL::FlushAndInvalidateRegion(VAddr addr, u64 size) {
void RasterizerOpenGL::FlushAndInvalidateRegion(Tegra::GPUVAddr addr, u64 size) {
MICROPROFILE_SCOPE(OpenGL_CacheManagement);
res_cache.FlushRegion(addr, size);
res_cache.InvalidateRegion(addr, size, nullptr);
@@ -520,7 +559,8 @@ bool RasterizerOpenGL::AccelerateDisplay(const Tegra::FramebufferConfig& framebu
MICROPROFILE_SCOPE(OpenGL_CacheManagement);
SurfaceParams src_params;
src_params.addr = framebuffer_addr;
src_params.cpu_addr = framebuffer_addr;
src_params.addr = res_cache.TryFindFramebufferGpuAddress(framebuffer_addr).get_value_or(0);
src_params.width = std::min(framebuffer.width, pixel_stride);
src_params.height = framebuffer.height;
src_params.stride = pixel_stride;
@@ -619,9 +659,9 @@ u32 RasterizerOpenGL::SetupConstBuffers(Maxwell::ShaderStage stage, GLuint progr
buffer_draw_state.enabled = true;
buffer_draw_state.bindpoint = current_bindpoint + bindpoint;
VAddr addr = gpu.memory_manager->PhysicalToVirtualAddress(buffer.address);
boost::optional<VAddr> addr = gpu.memory_manager->GpuToCpuAddress(buffer.address);
std::vector<u8> data(used_buffer.GetSize() * sizeof(float));
Memory::ReadBlock(addr, data.data(), data.size());
Memory::ReadBlock(*addr, data.data(), data.size());
glBindBuffer(GL_SHADER_STORAGE_BUFFER, buffer_draw_state.ssbo);
glBufferData(GL_SHADER_STORAGE_BUFFER, data.size(), data.data(), GL_DYNAMIC_DRAW);

View File

@@ -6,16 +6,12 @@
#include <array>
#include <cstddef>
#include <cstring>
#include <memory>
#include <unordered_map>
#include <vector>
#include <glad/glad.h>
#include "common/bit_field.h"
#include "common/common_types.h"
#include "common/hash.h"
#include "common/vector_math.h"
#include "video_core/engines/maxwell_3d.h"
#include "video_core/memory_manager.h"
#include "video_core/rasterizer_interface.h"
#include "video_core/renderer_opengl/gl_rasterizer_cache.h"
#include "video_core/renderer_opengl/gl_resource_manager.h"
@@ -34,9 +30,9 @@ public:
void DrawArrays() override;
void NotifyMaxwellRegisterChanged(u32 method) override;
void FlushAll() override;
void FlushRegion(VAddr addr, u64 size) override;
void InvalidateRegion(VAddr addr, u64 size) override;
void FlushAndInvalidateRegion(VAddr addr, u64 size) override;
void FlushRegion(Tegra::GPUVAddr addr, u64 size) override;
void InvalidateRegion(Tegra::GPUVAddr addr, u64 size) override;
void FlushAndInvalidateRegion(Tegra::GPUVAddr addr, u64 size) override;
bool AccelerateDisplayTransfer(const void* config) override;
bool AccelerateTextureCopy(const void* config) override;
bool AccelerateFill(const void* config) override;
@@ -153,13 +149,13 @@ private:
static constexpr size_t STREAM_BUFFER_SIZE = 4 * 1024 * 1024;
std::unique_ptr<OGLStreamBuffer> stream_buffer;
GLsizeiptr vs_input_size;
size_t CalculateVertexArraysSize() const;
void SetupVertexArray(u8* array_ptr, GLintptr buffer_offset);
std::pair<u8*, GLintptr> SetupVertexArrays(u8* array_ptr, GLintptr buffer_offset);
std::array<OGLBuffer, Tegra::Engines::Maxwell3D::Regs::MaxShaderStage> uniform_buffers;
void SetupShaders(u8* buffer_ptr, GLintptr buffer_offset, size_t ptr_pos);
void SetupShaders(u8* buffer_ptr, GLintptr buffer_offset);
enum class AccelDraw { Disabled, Arrays, Indexed };
AccelDraw accelerate_draw;

View File

@@ -7,7 +7,6 @@
#include <cstring>
#include <iterator>
#include <memory>
#include <unordered_set>
#include <utility>
#include <vector>
#include <boost/optional.hpp>
@@ -20,7 +19,6 @@
#include "common/math_util.h"
#include "common/microprofile.h"
#include "common/scope_exit.h"
#include "common/vector_math.h"
#include "core/core.h"
#include "core/frontend/emu_window.h"
#include "core/hle/kernel/process.h"
@@ -43,17 +41,15 @@ struct FormatTuple {
GLenum format;
GLenum type;
bool compressed;
// How many pixels in the original texture are equivalent to one pixel in the compressed
// texture.
u32 compression_factor;
};
static constexpr std::array<FormatTuple, SurfaceParams::MaxPixelFormat> tex_format_tuples = {{
{GL_RGBA8, GL_RGBA, GL_UNSIGNED_INT_8_8_8_8_REV, false, 1}, // ABGR8
{GL_RGB, GL_RGB, GL_UNSIGNED_SHORT_5_6_5_REV, false, 1}, // B5G6R5
{GL_COMPRESSED_RGB_S3TC_DXT1_EXT, GL_RGB, GL_UNSIGNED_INT_8_8_8_8, true, 16}, // DXT1
{GL_COMPRESSED_RGBA_S3TC_DXT3_EXT, GL_RGBA, GL_UNSIGNED_INT_8_8_8_8, true, 16}, // DXT23
{GL_COMPRESSED_RGBA_S3TC_DXT5_EXT, GL_RGBA, GL_UNSIGNED_INT_8_8_8_8, true, 16}, // DXT45
{GL_RGBA8, GL_RGBA, GL_UNSIGNED_INT_8_8_8_8_REV, false}, // ABGR8
{GL_RGB, GL_RGB, GL_UNSIGNED_SHORT_5_6_5_REV, false}, // B5G6R5
{GL_RGB10_A2, GL_RGBA, GL_UNSIGNED_INT_2_10_10_10_REV, false}, // A2B10G10R10
{GL_COMPRESSED_RGB_S3TC_DXT1_EXT, GL_RGB, GL_UNSIGNED_INT_8_8_8_8, true}, // DXT1
{GL_COMPRESSED_RGBA_S3TC_DXT3_EXT, GL_RGBA, GL_UNSIGNED_INT_8_8_8_8, true}, // DXT23
{GL_COMPRESSED_RGBA_S3TC_DXT5_EXT, GL_RGBA, GL_UNSIGNED_INT_8_8_8_8, true}, // DXT45
}};
static const FormatTuple& GetFormatTuple(PixelFormat pixel_format, ComponentType component_type) {
@@ -84,38 +80,44 @@ static u16 GetResolutionScaleFactor() {
}
template <bool morton_to_gl, PixelFormat format>
void MortonCopy(u32 stride, u32 block_height, u32 height, u8* gl_buffer, VAddr base, VAddr start,
VAddr end) {
constexpr u32 bytes_per_pixel = SurfaceParams::GetFormatBpp(format) / 8;
void MortonCopy(u32 stride, u32 block_height, u32 height, u8* gl_buffer, Tegra::GPUVAddr base,
Tegra::GPUVAddr start, Tegra::GPUVAddr end) {
constexpr u32 bytes_per_pixel = SurfaceParams::GetFormatBpp(format) / CHAR_BIT;
constexpr u32 gl_bytes_per_pixel = CachedSurface::GetGLBytesPerPixel(format);
const auto& gpu = Core::System::GetInstance().GPU();
if (morton_to_gl) {
auto data = Tegra::Texture::UnswizzleTexture(
base, SurfaceParams::TextureFormatFromPixelFormat(format), stride, height,
block_height);
*gpu.memory_manager->GpuToCpuAddress(base),
SurfaceParams::TextureFormatFromPixelFormat(format), stride, height, block_height);
std::memcpy(gl_buffer, data.data(), data.size());
} else {
// TODO(bunnei): Assumes the default rendering GOB size of 16 (128 lines). We should check
// the configuration for this and perform more generic un/swizzle
LOG_WARNING(Render_OpenGL, "need to use correct swizzle/GOB parameters!");
VideoCore::MortonCopyPixels128(stride, height, bytes_per_pixel, gl_bytes_per_pixel,
Memory::GetPointer(base), gl_buffer, morton_to_gl);
NGLOG_WARNING(Render_OpenGL, "need to use correct swizzle/GOB parameters!");
VideoCore::MortonCopyPixels128(
stride, height, bytes_per_pixel, gl_bytes_per_pixel,
Memory::GetPointer(*gpu.memory_manager->GpuToCpuAddress(base)), gl_buffer,
morton_to_gl);
}
}
static constexpr std::array<void (*)(u32, u32, u32, u8*, VAddr, VAddr, VAddr),
static constexpr std::array<void (*)(u32, u32, u32, u8*, Tegra::GPUVAddr, Tegra::GPUVAddr,
Tegra::GPUVAddr),
SurfaceParams::MaxPixelFormat>
morton_to_gl_fns = {
MortonCopy<true, PixelFormat::ABGR8>, MortonCopy<true, PixelFormat::B5G6R5>,
MortonCopy<true, PixelFormat::DXT1>, MortonCopy<true, PixelFormat::DXT23>,
MortonCopy<true, PixelFormat::DXT45>,
MortonCopy<true, PixelFormat::ABGR8>, MortonCopy<true, PixelFormat::B5G6R5>,
MortonCopy<true, PixelFormat::A2B10G10R10>, MortonCopy<true, PixelFormat::DXT1>,
MortonCopy<true, PixelFormat::DXT23>, MortonCopy<true, PixelFormat::DXT45>,
};
static constexpr std::array<void (*)(u32, u32, u32, u8*, VAddr, VAddr, VAddr),
static constexpr std::array<void (*)(u32, u32, u32, u8*, Tegra::GPUVAddr, Tegra::GPUVAddr,
Tegra::GPUVAddr),
SurfaceParams::MaxPixelFormat>
gl_to_morton_fns = {
MortonCopy<false, PixelFormat::ABGR8>,
MortonCopy<false, PixelFormat::B5G6R5>,
MortonCopy<false, PixelFormat::A2B10G10R10>,
// TODO(Subv): Swizzling the DXT1/DXT23/DXT45 formats is not yet supported
nullptr,
nullptr,
@@ -219,9 +221,9 @@ SurfaceParams SurfaceParams::FromInterval(SurfaceInterval interval) const {
SurfaceParams params = *this;
const u32 tiled_size = is_tiled ? 8 : 1;
const u64 stride_tiled_bytes = BytesInPixels(stride * tiled_size);
VAddr aligned_start =
Tegra::GPUVAddr aligned_start =
addr + Common::AlignDown(boost::icl::first(interval) - addr, stride_tiled_bytes);
VAddr aligned_end =
Tegra::GPUVAddr aligned_end =
addr + Common::AlignUp(boost::icl::last_next(interval) - addr, stride_tiled_bytes);
if (aligned_end - aligned_start > stride_tiled_bytes) {
@@ -342,6 +344,13 @@ bool SurfaceParams::CanTexCopy(const SurfaceParams& texcopy_params) const {
return FromInterval(texcopy_params.GetInterval()).GetInterval() == texcopy_params.GetInterval();
}
VAddr SurfaceParams::GetCpuAddr() const {
// When this function is used, only cpu_addr or (GPU) addr should be set, not both
ASSERT(!(cpu_addr && addr));
const auto& gpu = Core::System::GetInstance().GPU();
return cpu_addr.get_value_or(*gpu.memory_manager->GpuToCpuAddress(addr));
}
bool CachedSurface::CanFill(const SurfaceParams& dest_surface,
SurfaceInterval fill_interval) const {
if (type == SurfaceType::Fill && IsRegionValid(fill_interval) &&
@@ -349,9 +358,9 @@ bool CachedSurface::CanFill(const SurfaceParams& dest_surface,
boost::icl::last_next(fill_interval) <= end && // dest_surface is within our fill range
dest_surface.FromInterval(fill_interval).GetInterval() ==
fill_interval) { // make sure interval is a rectangle in dest surface
if (fill_size * 8 != dest_surface.GetFormatBpp()) {
if (fill_size * CHAR_BIT != dest_surface.GetFormatBpp()) {
// Check if bits repeat for our fill_size
const u32 dest_bytes_per_pixel = std::max(dest_surface.GetFormatBpp() / 8, 1u);
const u32 dest_bytes_per_pixel = std::max(dest_surface.GetFormatBpp() / CHAR_BIT, 1u);
std::vector<u8> fill_test(fill_size * dest_bytes_per_pixel);
for (u32 i = 0; i < dest_bytes_per_pixel; ++i)
@@ -456,15 +465,15 @@ void RasterizerCacheOpenGL::CopySurface(const Surface& src_surface, const Surfac
}
MICROPROFILE_DEFINE(OpenGL_SurfaceLoad, "OpenGL", "Surface Load", MP_RGB(128, 64, 192));
void CachedSurface::LoadGLBuffer(VAddr load_start, VAddr load_end) {
void CachedSurface::LoadGLBuffer(Tegra::GPUVAddr load_start, Tegra::GPUVAddr load_end) {
ASSERT(type != SurfaceType::Fill);
u8* const texture_src_data = Memory::GetPointer(addr);
u8* const texture_src_data = Memory::GetPointer(GetCpuAddr());
if (texture_src_data == nullptr)
return;
if (gl_buffer == nullptr) {
gl_buffer_size = width * height * GetGLBytesPerPixel(pixel_format);
gl_buffer_size = GetActualWidth() * GetActualHeight() * GetGLBytesPerPixel(pixel_format);
gl_buffer.reset(new u8[gl_buffer_size]);
}
@@ -479,14 +488,15 @@ void CachedSurface::LoadGLBuffer(VAddr load_start, VAddr load_end) {
std::memcpy(&gl_buffer[start_offset], texture_src_data + start_offset,
bytes_per_pixel * width * height);
} else {
morton_to_gl_fns[static_cast<size_t>(pixel_format)](
stride, block_height, height, &gl_buffer[0], addr, load_start, load_end);
morton_to_gl_fns[static_cast<size_t>(pixel_format)](GetActualWidth(), block_height,
GetActualHeight(), &gl_buffer[0], addr,
load_start, load_end);
}
}
MICROPROFILE_DEFINE(OpenGL_SurfaceFlush, "OpenGL", "Surface Flush", MP_RGB(128, 192, 64));
void CachedSurface::FlushGLBuffer(VAddr flush_start, VAddr flush_end) {
u8* const dst_buffer = Memory::GetPointer(addr);
void CachedSurface::FlushGLBuffer(Tegra::GPUVAddr flush_start, Tegra::GPUVAddr flush_end) {
u8* const dst_buffer = Memory::GetPointer(GetCpuAddr());
if (dst_buffer == nullptr)
return;
@@ -536,7 +546,8 @@ void CachedSurface::UploadGLTexture(const MathUtil::Rectangle<u32>& rect, GLuint
MICROPROFILE_SCOPE(OpenGL_TextureUL);
ASSERT(gl_buffer_size == width * height * GetGLBytesPerPixel(pixel_format));
ASSERT(gl_buffer_size ==
GetActualWidth() * GetActualHeight() * GetGLBytesPerPixel(pixel_format));
// Load data from memory to the surface
GLint x0 = static_cast<GLint>(rect.left);
@@ -571,11 +582,9 @@ void CachedSurface::UploadGLTexture(const MathUtil::Rectangle<u32>& rect, GLuint
glActiveTexture(GL_TEXTURE0);
if (tuple.compressed) {
glCompressedTexImage2D(GL_TEXTURE_2D, 0, tuple.internal_format,
static_cast<GLsizei>(rect.GetWidth()),
static_cast<GLsizei>(rect.GetHeight()), 0,
rect.GetWidth() * rect.GetHeight() *
GetGLBytesPerPixel(pixel_format) / tuple.compression_factor,
&gl_buffer[buffer_offset]);
static_cast<GLsizei>(rect.GetWidth() * GetCompresssionFactor()),
static_cast<GLsizei>(rect.GetHeight() * GetCompresssionFactor()), 0,
size, &gl_buffer[buffer_offset]);
} else {
glTexSubImage2D(GL_TEXTURE_2D, 0, x0, y0, static_cast<GLsizei>(rect.GetWidth()),
static_cast<GLsizei>(rect.GetHeight()), tuple.format, tuple.type,
@@ -945,6 +954,33 @@ Surface RasterizerCacheOpenGL::GetSurface(const SurfaceParams& params, ScaleMatc
return surface;
}
boost::optional<Tegra::GPUVAddr> RasterizerCacheOpenGL::TryFindFramebufferGpuAddress(
VAddr cpu_addr) const {
// Tries to find the GPU address of a framebuffer based on the CPU address. This is because
// final output framebuffers are specified by CPU address, but internally our GPU cache uses GPU
// addresses. We iterate through all cached framebuffers, and compare their starting CPU address
// to the one provided. This is obviously not great, and won't work if the framebuffer overlaps
// surfaces.
std::vector<Tegra::GPUVAddr> gpu_addresses;
for (const auto& pair : surface_cache) {
for (const auto& surface : pair.second) {
const VAddr surface_cpu_addr = surface->GetCpuAddr();
if (cpu_addr >= surface_cpu_addr && cpu_addr < (surface_cpu_addr + surface->size)) {
ASSERT_MSG(cpu_addr == surface_cpu_addr, "overlapping surfaces are unsupported");
gpu_addresses.push_back(surface->addr);
}
}
}
if (gpu_addresses.empty()) {
return {};
}
ASSERT_MSG(gpu_addresses.size() == 1, ">1 surface is unsupported");
return gpu_addresses[0];
}
SurfaceRect_Tuple RasterizerCacheOpenGL::GetSurfaceSubRect(const SurfaceParams& params,
ScaleMatch match_res_scale,
bool load_if_create) {
@@ -1028,11 +1064,11 @@ Surface RasterizerCacheOpenGL::GetTextureSurface(const Tegra::Texture::FullTextu
auto& gpu = Core::System::GetInstance().GPU();
SurfaceParams params;
params.addr = gpu.memory_manager->PhysicalToVirtualAddress(config.tic.Address());
params.width = config.tic.Width();
params.height = config.tic.Height();
params.addr = config.tic.Address();
params.is_tiled = config.tic.IsTiled();
params.pixel_format = SurfaceParams::PixelFormatFromTextureFormat(config.tic.format);
params.width = config.tic.Width() / params.GetCompresssionFactor();
params.height = config.tic.Height() / params.GetCompresssionFactor();
// TODO(Subv): Different types per component are not supported.
ASSERT(config.tic.r_type.Value() == config.tic.g_type.Value() &&
@@ -1045,7 +1081,7 @@ Surface RasterizerCacheOpenGL::GetTextureSurface(const Tegra::Texture::FullTextu
params.block_height = config.tic.BlockHeight();
} else {
// Use the texture-provided stride value if the texture isn't tiled.
params.stride = params.PixelsInBytes(config.tic.Pitch());
params.stride = static_cast<u32>(params.PixelsInBytes(config.tic.Pitch()));
}
params.UpdateParams();
@@ -1073,11 +1109,10 @@ Surface RasterizerCacheOpenGL::GetTextureSurface(const Tegra::Texture::FullTextu
SurfaceSurfaceRect_Tuple RasterizerCacheOpenGL::GetFramebufferSurfaces(
bool using_color_fb, bool using_depth_fb, const MathUtil::Rectangle<s32>& viewport) {
const auto& regs = Core::System().GetInstance().GPU().Maxwell3D().regs;
const auto& memory_manager = Core::System().GetInstance().GPU().memory_manager;
const auto& config = regs.rt[0];
// TODO(bunnei): This is hard corded to use just the first render buffer
LOG_WARNING(Render_OpenGL, "hard-coded for render target 0!");
NGLOG_WARNING(Render_OpenGL, "hard-coded for render target 0!");
// update resolution_scale_factor and reset cache if changed
// TODO (bunnei): This code was ported as-is from Citra, and is technically not thread-safe. We
@@ -1106,7 +1141,7 @@ SurfaceSurfaceRect_Tuple RasterizerCacheOpenGL::GetFramebufferSurfaces(
color_params.block_height = Tegra::Texture::TICEntry::DefaultBlockHeight;
SurfaceParams depth_params = color_params;
color_params.addr = memory_manager->PhysicalToVirtualAddress(config.Address());
color_params.addr = config.Address();
color_params.pixel_format = SurfaceParams::PixelFormatFromRenderTargetFormat(config.format);
color_params.component_type = SurfaceParams::ComponentTypeFromRenderTarget(config.format);
color_params.UpdateParams();
@@ -1122,8 +1157,8 @@ SurfaceSurfaceRect_Tuple RasterizerCacheOpenGL::GetFramebufferSurfaces(
// Make sure that framebuffers don't overlap if both color and depth are being used
if (using_color_fb && using_depth_fb &&
boost::icl::length(color_vp_interval & depth_vp_interval)) {
LOG_CRITICAL(Render_OpenGL, "Color and depth framebuffer memory regions overlap; "
"overlapping framebuffers not supported!");
NGLOG_CRITICAL(Render_OpenGL, "Color and depth framebuffer memory regions overlap; "
"overlapping framebuffers not supported!");
using_depth_fb = false;
}
@@ -1222,7 +1257,8 @@ void RasterizerCacheOpenGL::DuplicateSurface(const Surface& src_surface,
}
}
void RasterizerCacheOpenGL::ValidateSurface(const Surface& surface, VAddr addr, u64 size) {
void RasterizerCacheOpenGL::ValidateSurface(const Surface& surface, Tegra::GPUVAddr addr,
u64 size) {
if (size == 0)
return;
@@ -1261,7 +1297,7 @@ void RasterizerCacheOpenGL::ValidateSurface(const Surface& surface, VAddr addr,
}
}
void RasterizerCacheOpenGL::FlushRegion(VAddr addr, u64 size, Surface flush_surface) {
void RasterizerCacheOpenGL::FlushRegion(Tegra::GPUVAddr addr, u64 size, Surface flush_surface) {
if (size == 0)
return;
@@ -1297,7 +1333,8 @@ void RasterizerCacheOpenGL::FlushAll() {
FlushRegion(0, Kernel::VMManager::MAX_ADDRESS);
}
void RasterizerCacheOpenGL::InvalidateRegion(VAddr addr, u64 size, const Surface& region_owner) {
void RasterizerCacheOpenGL::InvalidateRegion(Tegra::GPUVAddr addr, u64 size,
const Surface& region_owner) {
if (size == 0)
return;
@@ -1390,10 +1427,10 @@ void RasterizerCacheOpenGL::UnregisterSurface(const Surface& surface) {
surface_cache.subtract({surface->GetInterval(), SurfaceSet{surface}});
}
void RasterizerCacheOpenGL::UpdatePagesCachedCount(VAddr addr, u64 size, int delta) {
const u64 num_pages =
((addr + size - 1) >> Memory::PAGE_BITS) - (addr >> Memory::PAGE_BITS) + 1;
const u64 page_start = addr >> Memory::PAGE_BITS;
void RasterizerCacheOpenGL::UpdatePagesCachedCount(Tegra::GPUVAddr addr, u64 size, int delta) {
const u64 num_pages = ((addr + size - 1) >> Tegra::MemoryManager::PAGE_BITS) -
(addr >> Tegra::MemoryManager::PAGE_BITS) + 1;
const u64 page_start = addr >> Tegra::MemoryManager::PAGE_BITS;
const u64 page_end = page_start + num_pages;
// Interval maps will erase segments if count reaches 0, so if delta is negative we have to
@@ -1406,8 +1443,10 @@ void RasterizerCacheOpenGL::UpdatePagesCachedCount(VAddr addr, u64 size, int del
const auto interval = pair.first & pages_interval;
const int count = pair.second;
const VAddr interval_start_addr = boost::icl::first(interval) << Memory::PAGE_BITS;
const VAddr interval_end_addr = boost::icl::last_next(interval) << Memory::PAGE_BITS;
const Tegra::GPUVAddr interval_start_addr = boost::icl::first(interval)
<< Tegra::MemoryManager::PAGE_BITS;
const Tegra::GPUVAddr interval_end_addr = boost::icl::last_next(interval)
<< Tegra::MemoryManager::PAGE_BITS;
const u64 interval_size = interval_end_addr - interval_start_addr;
if (delta > 0 && count == delta)

View File

@@ -17,12 +17,14 @@
#ifdef __GNUC__
#pragma GCC diagnostic pop
#endif
#include <boost/optional.hpp>
#include <glad/glad.h>
#include "common/assert.h"
#include "common/common_funcs.h"
#include "common/common_types.h"
#include "common/math_util.h"
#include "video_core/gpu.h"
#include "video_core/memory_manager.h"
#include "video_core/renderer_opengl/gl_resource_manager.h"
#include "video_core/textures/texture.h"
@@ -30,9 +32,9 @@ struct CachedSurface;
using Surface = std::shared_ptr<CachedSurface>;
using SurfaceSet = std::set<Surface>;
using SurfaceRegions = boost::icl::interval_set<VAddr>;
using SurfaceMap = boost::icl::interval_map<VAddr, Surface>;
using SurfaceCache = boost::icl::interval_map<VAddr, SurfaceSet>;
using SurfaceRegions = boost::icl::interval_set<Tegra::GPUVAddr>;
using SurfaceMap = boost::icl::interval_map<Tegra::GPUVAddr, Surface>;
using SurfaceCache = boost::icl::interval_map<Tegra::GPUVAddr, SurfaceSet>;
using SurfaceInterval = SurfaceCache::interval_type;
static_assert(std::is_same<SurfaceRegions::interval_type, SurfaceCache::interval_type>() &&
@@ -54,9 +56,10 @@ struct SurfaceParams {
enum class PixelFormat {
ABGR8 = 0,
B5G6R5 = 1,
DXT1 = 2,
DXT23 = 3,
DXT45 = 4,
A2B10G10R10 = 2,
DXT1 = 3,
DXT23 = 4,
DXT45 = 5,
Max,
Invalid = 255,
@@ -81,22 +84,49 @@ struct SurfaceParams {
Invalid = 4,
};
static constexpr unsigned int GetFormatBpp(PixelFormat format) {
/**
* Gets the compression factor for the specified PixelFormat. This applies to just the
* "compressed width" and "compressed height", not the overall compression factor of a
* compressed image. This is used for maintaining proper surface sizes for compressed texture
* formats.
*/
static constexpr u32 GetCompresssionFactor(PixelFormat format) {
if (format == PixelFormat::Invalid)
return 0;
constexpr std::array<unsigned int, MaxPixelFormat> bpp_table = {
constexpr std::array<u32, MaxPixelFormat> compression_factor_table = {{
1, // ABGR8
1, // B5G6R5
1, // A2B10G10R10
4, // DXT1
4, // DXT23
4, // DXT45
}};
ASSERT(static_cast<size_t>(format) < compression_factor_table.size());
return compression_factor_table[static_cast<size_t>(format)];
}
u32 GetCompresssionFactor() const {
return GetCompresssionFactor(pixel_format);
}
static constexpr u32 GetFormatBpp(PixelFormat format) {
if (format == PixelFormat::Invalid)
return 0;
constexpr std::array<u32, MaxPixelFormat> bpp_table = {{
32, // ABGR8
16, // B5G6R5
32, // A2B10G10R10
64, // DXT1
128, // DXT23
128, // DXT45
};
}};
ASSERT(static_cast<size_t>(format) < bpp_table.size());
return bpp_table[static_cast<size_t>(format)];
}
unsigned int GetFormatBpp() const {
u32 GetFormatBpp() const {
return GetFormatBpp(pixel_format);
}
@@ -104,6 +134,8 @@ struct SurfaceParams {
switch (format) {
case Tegra::RenderTargetFormat::RGBA8_UNORM:
return PixelFormat::ABGR8;
case Tegra::RenderTargetFormat::RGB10_A2_UNORM:
return PixelFormat::A2B10G10R10;
default:
NGLOG_CRITICAL(HW_GPU, "Unimplemented format={}", static_cast<u32>(format));
UNREACHABLE();
@@ -127,6 +159,8 @@ struct SurfaceParams {
return PixelFormat::ABGR8;
case Tegra::Texture::TextureFormat::B5G6R5:
return PixelFormat::B5G6R5;
case Tegra::Texture::TextureFormat::A2B10G10R10:
return PixelFormat::A2B10G10R10;
case Tegra::Texture::TextureFormat::DXT1:
return PixelFormat::DXT1;
case Tegra::Texture::TextureFormat::DXT23:
@@ -146,6 +180,8 @@ struct SurfaceParams {
return Tegra::Texture::TextureFormat::A8R8G8B8;
case PixelFormat::B5G6R5:
return Tegra::Texture::TextureFormat::B5G6R5;
case PixelFormat::A2B10G10R10:
return Tegra::Texture::TextureFormat::A2B10G10R10;
case PixelFormat::DXT1:
return Tegra::Texture::TextureFormat::DXT1;
case PixelFormat::DXT23:
@@ -245,6 +281,24 @@ struct SurfaceParams {
// Returns the region of the biggest valid rectange within interval
SurfaceInterval GetCopyableInterval(const Surface& src_surface) const;
/**
* Gets the actual width (in pixels) of the surface. This is provided because `width` is used
* for tracking the surface region in memory, which may be compressed for certain formats. In
* this scenario, `width` is actually the compressed width.
*/
u32 GetActualWidth() const {
return width * GetCompresssionFactor();
}
/**
* Gets the actual height (in pixels) of the surface. This is provided because `height` is used
* for tracking the surface region in memory, which may be compressed for certain formats. In
* this scenario, `height` is actually the compressed height.
*/
u32 GetActualHeight() const {
return height * GetCompresssionFactor();
}
u32 GetScaledWidth() const {
return width * res_scale;
}
@@ -269,6 +323,8 @@ struct SurfaceParams {
return pixels * GetFormatBpp(pixel_format) / CHAR_BIT;
}
VAddr GetCpuAddr() const;
bool ExactMatch(const SurfaceParams& other_surface) const;
bool CanSubRect(const SurfaceParams& sub_surface) const;
bool CanExpand(const SurfaceParams& expanded_surface) const;
@@ -277,8 +333,9 @@ struct SurfaceParams {
MathUtil::Rectangle<u32> GetSubRect(const SurfaceParams& sub_surface) const;
MathUtil::Rectangle<u32> GetScaledSubRect(const SurfaceParams& sub_surface) const;
VAddr addr = 0;
VAddr end = 0;
Tegra::GPUVAddr addr = 0;
Tegra::GPUVAddr end = 0;
boost::optional<VAddr> cpu_addr;
u64 size = 0;
u32 width = 0;
@@ -317,15 +374,15 @@ struct CachedSurface : SurfaceParams {
if (format == PixelFormat::Invalid)
return 0;
return SurfaceParams::GetFormatBpp(format) / 8;
return SurfaceParams::GetFormatBpp(format) / CHAR_BIT;
}
std::unique_ptr<u8[]> gl_buffer;
size_t gl_buffer_size = 0;
// Read/Write data in Switch memory to/from gl_buffer
void LoadGLBuffer(VAddr load_start, VAddr load_end);
void FlushGLBuffer(VAddr flush_start, VAddr flush_end);
void LoadGLBuffer(Tegra::GPUVAddr load_start, Tegra::GPUVAddr load_end);
void FlushGLBuffer(Tegra::GPUVAddr flush_start, Tegra::GPUVAddr flush_end);
// Upload/Download data in gl_buffer in/to this surface's texture
void UploadGLTexture(const MathUtil::Rectangle<u32>& rect, GLuint read_fb_handle,
@@ -354,6 +411,9 @@ public:
Surface GetSurface(const SurfaceParams& params, ScaleMatch match_res_scale,
bool load_if_create);
/// Tries to find a framebuffer GPU address based on the provided CPU address
boost::optional<Tegra::GPUVAddr> TryFindFramebufferGpuAddress(VAddr cpu_addr) const;
/// Attempt to find a subrect (resolution scaled) of a surface, otherwise loads a texture from
/// Switch memory to OpenGL and caches it (if not already cached)
SurfaceRect_Tuple GetSurfaceSubRect(const SurfaceParams& params, ScaleMatch match_res_scale,
@@ -373,10 +433,10 @@ public:
SurfaceRect_Tuple GetTexCopySurface(const SurfaceParams& params);
/// Write any cached resources overlapping the region back to memory (if dirty)
void FlushRegion(VAddr addr, u64 size, Surface flush_surface = nullptr);
void FlushRegion(Tegra::GPUVAddr addr, u64 size, Surface flush_surface = nullptr);
/// Mark region as being invalidated by region_owner (nullptr if Switch memory)
void InvalidateRegion(VAddr addr, u64 size, const Surface& region_owner);
void InvalidateRegion(Tegra::GPUVAddr addr, u64 size, const Surface& region_owner);
/// Flush all cached resources tracked by this cache manager
void FlushAll();
@@ -385,7 +445,7 @@ private:
void DuplicateSurface(const Surface& src_surface, const Surface& dest_surface);
/// Update surface's texture for given region when necessary
void ValidateSurface(const Surface& surface, VAddr addr, u64 size);
void ValidateSurface(const Surface& surface, Tegra::GPUVAddr addr, u64 size);
/// Create a new surface
Surface CreateSurface(const SurfaceParams& params);
@@ -397,7 +457,7 @@ private:
void UnregisterSurface(const Surface& surface);
/// Increase/decrease the number of surface in pages touching the specified region
void UpdatePagesCachedCount(VAddr addr, u64 size, int delta);
void UpdatePagesCachedCount(Tegra::GPUVAddr addr, u64 size, int delta);
SurfaceCache surface_cache;
PageMap cached_pages;

View File

@@ -9,13 +9,10 @@
#include <memory>
#include <glad/glad.h>
#include "common/assert.h"
#include "common/bit_field.h"
#include "common/logging/log.h"
#include "core/core.h"
#include "core/core_timing.h"
#include "core/frontend/emu_window.h"
#include "core/hw/hw.h"
#include "core/hw/lcd.h"
#include "core/memory.h"
#include "core/settings.h"
#include "core/tracer/recorder.h"
@@ -155,7 +152,8 @@ void RendererOpenGL::LoadFBToScreenInfo(const Tegra::FramebufferConfig& framebuf
screen_info.display_texture = screen_info.texture.resource.handle;
screen_info.display_texcoords = MathUtil::Rectangle<float>(0.f, 0.f, 1.f, 1.f);
Rasterizer()->FlushRegion(framebuffer_addr, size_in_bytes);
Memory::RasterizerFlushVirtualRegion(framebuffer_addr, size_in_bytes,
Memory::FlushMode::Flush);
VideoCore::MortonCopyPixels128(framebuffer.width, framebuffer.height, bytes_per_pixel, 4,
Memory::GetPointer(framebuffer_addr),
@@ -272,10 +270,9 @@ void RendererOpenGL::ConfigureFramebufferTexture(TextureInfo& texture,
GLint internal_format;
switch (framebuffer.pixel_format) {
case Tegra::FramebufferConfig::PixelFormat::ABGR8:
// Use RGBA8 and swap in the fragment shader
internal_format = GL_RGBA;
texture.gl_format = GL_RGBA;
texture.gl_type = GL_UNSIGNED_INT_8_8_8_8;
texture.gl_type = GL_UNSIGNED_INT_8_8_8_8_REV;
gl_framebuffer_data.resize(texture.width * texture.height * 4);
break;
default:
@@ -298,17 +295,18 @@ void RendererOpenGL::DrawScreenTriangles(const ScreenInfo& screen_info, float x,
const auto& texcoords = screen_info.display_texcoords;
auto left = texcoords.left;
auto right = texcoords.right;
if (framebuffer_transform_flags != Tegra::FramebufferConfig::TransformFlags::Unset)
if (framebuffer_transform_flags != Tegra::FramebufferConfig::TransformFlags::Unset) {
if (framebuffer_transform_flags == Tegra::FramebufferConfig::TransformFlags::FlipV) {
// Flip the framebuffer vertically
left = texcoords.right;
right = texcoords.left;
} else {
// Other transformations are unsupported
LOG_CRITICAL(Render_OpenGL, "Unsupported framebuffer_transform_flags=%d",
framebuffer_transform_flags);
NGLOG_CRITICAL(Render_OpenGL, "Unsupported framebuffer_transform_flags={}",
static_cast<u32>(framebuffer_transform_flags));
UNIMPLEMENTED();
}
}
std::array<ScreenRectVertex, 4> vertices = {{
ScreenRectVertex(x, y, texcoords.top, left),
@@ -430,9 +428,9 @@ bool RendererOpenGL::Init() {
const char* gpu_vendor{reinterpret_cast<char const*>(glGetString(GL_VENDOR))};
const char* gpu_model{reinterpret_cast<char const*>(glGetString(GL_RENDERER))};
LOG_INFO(Render_OpenGL, "GL_VERSION: %s", gl_version);
LOG_INFO(Render_OpenGL, "GL_VENDOR: %s", gpu_vendor);
LOG_INFO(Render_OpenGL, "GL_RENDERER: %s", gpu_model);
NGLOG_INFO(Render_OpenGL, "GL_VERSION: {}", gl_version);
NGLOG_INFO(Render_OpenGL, "GL_VENDOR: {}", gpu_vendor);
NGLOG_INFO(Render_OpenGL, "GL_RENDERER: {}", gpu_model);
Core::Telemetry().AddField(Telemetry::FieldType::UserSystem, "GPU_Vendor", gpu_vendor);
Core::Telemetry().AddField(Telemetry::FieldType::UserSystem, "GPU_Model", gpu_model);

View File

@@ -4,6 +4,7 @@
#include <cstring>
#include "common/assert.h"
#include "core/memory.h"
#include "video_core/textures/decoders.h"
#include "video_core/textures/texture.h"
@@ -53,6 +54,7 @@ u32 BytesPerPixel(TextureFormat format) {
// In this case a 'pixel' actually refers to a 4x4 tile.
return 16;
case TextureFormat::A8R8G8B8:
case TextureFormat::A2B10G10R10:
return 4;
case TextureFormat::B5G6R5:
return 2;
@@ -78,6 +80,7 @@ std::vector<u8> UnswizzleTexture(VAddr address, TextureFormat format, u32 width,
unswizzled_data.data(), true, block_height);
break;
case TextureFormat::A8R8G8B8:
case TextureFormat::A2B10G10R10:
case TextureFormat::B5G6R5:
CopySwizzledData(width, height, bytes_per_pixel, bytes_per_pixel, data,
unswizzled_data.data(), true, block_height);
@@ -100,6 +103,7 @@ std::vector<u8> DecodeTexture(const std::vector<u8>& texture_data, TextureFormat
case TextureFormat::DXT23:
case TextureFormat::DXT45:
case TextureFormat::A8R8G8B8:
case TextureFormat::A2B10G10R10:
case TextureFormat::B5G6R5:
// TODO(Subv): For the time being just forward the same data without any decoding.
rgba_data = texture_data;

View File

@@ -15,6 +15,7 @@ namespace Texture {
enum class TextureFormat : u32 {
A8R8G8B8 = 0x8,
A2B10G10R10 = 0x9,
B5G6R5 = 0x15,
DXT1 = 0x24,
DXT23 = 0x25,

View File

@@ -24,9 +24,9 @@ bool Init(EmuWindow* emu_window) {
g_renderer = std::make_unique<RendererOpenGL>();
g_renderer->SetWindow(g_emu_window);
if (g_renderer->Init()) {
LOG_DEBUG(Render, "initialized OK");
NGLOG_DEBUG(Render, "initialized OK");
} else {
LOG_CRITICAL(Render, "initialization failed !");
NGLOG_CRITICAL(Render, "initialization failed !");
return false;
}
return true;
@@ -36,7 +36,7 @@ bool Init(EmuWindow* emu_window) {
void Shutdown() {
g_renderer.reset();
LOG_DEBUG(Render, "shutdown OK");
NGLOG_DEBUG(Render, "shutdown OK");
}
} // namespace VideoCore

View File

@@ -25,6 +25,8 @@ static Tegra::Texture::TextureFormat ConvertToTextureFormat(
switch (render_target_format) {
case Tegra::RenderTargetFormat::RGBA8_UNORM:
return Tegra::Texture::TextureFormat::A8R8G8B8;
case Tegra::RenderTargetFormat::RGB10_A2_UNORM:
return Tegra::Texture::TextureFormat::A2B10G10R10;
default:
UNIMPLEMENTED_MSG("Unimplemented RT format");
}
@@ -376,10 +378,10 @@ void GraphicsSurfaceWidget::OnUpdate() {
// TODO: Implement a good way to visualize alpha components!
QImage decoded_image(surface_width, surface_height, QImage::Format_ARGB32);
VAddr address = gpu.memory_manager->PhysicalToVirtualAddress(surface_address);
boost::optional<VAddr> address = gpu.memory_manager->GpuToCpuAddress(surface_address);
auto unswizzled_data =
Tegra::Texture::UnswizzleTexture(address, surface_format, surface_width, surface_height);
Tegra::Texture::UnswizzleTexture(*address, surface_format, surface_width, surface_height);
auto texture_data = Tegra::Texture::DecodeTexture(unswizzled_data, surface_format,
surface_width, surface_height);
@@ -435,9 +437,9 @@ void GraphicsSurfaceWidget::SaveSurface() {
pixmap->save(&file, "PNG");
} else if (selectedFilter == bin_filter) {
auto& gpu = Core::System::GetInstance().GPU();
VAddr address = gpu.memory_manager->PhysicalToVirtualAddress(surface_address);
boost::optional<VAddr> address = gpu.memory_manager->GpuToCpuAddress(surface_address);
const u8* buffer = Memory::GetPointer(address);
const u8* buffer = Memory::GetPointer(*address);
ASSERT_MSG(buffer != nullptr, "Memory not accessible");
QFile file(filename);

View File

@@ -6,8 +6,8 @@
#include "yuzu/util/util.h"
#include "core/core.h"
#include "core/hle/kernel/condition_variable.h"
#include "core/hle/kernel/event.h"
#include "core/hle/kernel/handle_table.h"
#include "core/hle/kernel/mutex.h"
#include "core/hle/kernel/thread.h"
#include "core/hle/kernel/timer.h"
@@ -67,6 +67,29 @@ QString WaitTreeText::GetText() const {
return text;
}
WaitTreeMutexInfo::WaitTreeMutexInfo(VAddr mutex_address) : mutex_address(mutex_address) {
mutex_value = Memory::Read32(mutex_address);
owner_handle = static_cast<Kernel::Handle>(mutex_value & Kernel::Mutex::MutexOwnerMask);
owner = Kernel::g_handle_table.Get<Kernel::Thread>(owner_handle);
}
QString WaitTreeMutexInfo::GetText() const {
return tr("waiting for mutex 0x%1").arg(mutex_address, 16, 16, QLatin1Char('0'));
}
std::vector<std::unique_ptr<WaitTreeItem>> WaitTreeMutexInfo::GetChildren() const {
std::vector<std::unique_ptr<WaitTreeItem>> list;
bool has_waiters = (mutex_value & Kernel::Mutex::MutexHasWaitersFlag) != 0;
list.push_back(std::make_unique<WaitTreeText>(tr("has waiters: %1").arg(has_waiters)));
list.push_back(std::make_unique<WaitTreeText>(
tr("owner handle: 0x%1").arg(owner_handle, 8, 16, QLatin1Char('0'))));
if (owner != nullptr)
list.push_back(std::make_unique<WaitTreeThread>(*owner));
return list;
}
WaitTreeWaitObject::WaitTreeWaitObject(const Kernel::WaitObject& o) : object(o) {}
bool WaitTreeExpandableItem::IsExpandable() const {
@@ -84,11 +107,6 @@ std::unique_ptr<WaitTreeWaitObject> WaitTreeWaitObject::make(const Kernel::WaitO
switch (object.GetHandleType()) {
case Kernel::HandleType::Event:
return std::make_unique<WaitTreeEvent>(static_cast<const Kernel::Event&>(object));
case Kernel::HandleType::Mutex:
return std::make_unique<WaitTreeMutex>(static_cast<const Kernel::Mutex&>(object));
case Kernel::HandleType::ConditionVariable:
return std::make_unique<WaitTreeConditionVariable>(
static_cast<const Kernel::ConditionVariable&>(object));
case Kernel::HandleType::Timer:
return std::make_unique<WaitTreeTimer>(static_cast<const Kernel::Timer&>(object));
case Kernel::HandleType::Thread:
@@ -160,6 +178,9 @@ QString WaitTreeThread::GetText() const {
case THREADSTATUS_WAIT_SYNCH_ANY:
status = tr("waiting for objects");
break;
case THREADSTATUS_WAIT_MUTEX:
status = tr("waiting for mutex");
break;
case THREADSTATUS_DORMANT:
status = tr("dormant");
break;
@@ -186,6 +207,7 @@ QColor WaitTreeThread::GetColor() const {
return QColor(Qt::GlobalColor::darkYellow);
case THREADSTATUS_WAIT_SYNCH_ALL:
case THREADSTATUS_WAIT_SYNCH_ANY:
case THREADSTATUS_WAIT_MUTEX:
return QColor(Qt::GlobalColor::red);
case THREADSTATUS_DORMANT:
return QColor(Qt::GlobalColor::darkCyan);
@@ -225,11 +247,11 @@ std::vector<std::unique_ptr<WaitTreeItem>> WaitTreeThread::GetChildren() const {
list.push_back(std::make_unique<WaitTreeText>(
tr("last running ticks = %1").arg(thread.last_running_ticks)));
if (thread.held_mutexes.empty()) {
list.push_back(std::make_unique<WaitTreeText>(tr("not holding mutex")));
} else {
list.push_back(std::make_unique<WaitTreeMutexList>(thread.held_mutexes));
}
if (thread.mutex_wait_address != 0)
list.push_back(std::make_unique<WaitTreeMutexInfo>(thread.mutex_wait_address));
else
list.push_back(std::make_unique<WaitTreeText>(tr("not waiting for mutex")));
if (thread.status == THREADSTATUS_WAIT_SYNCH_ANY ||
thread.status == THREADSTATUS_WAIT_SYNCH_ALL) {
list.push_back(std::make_unique<WaitTreeObjectList>(thread.wait_objects,
@@ -250,33 +272,6 @@ std::vector<std::unique_ptr<WaitTreeItem>> WaitTreeEvent::GetChildren() const {
return list;
}
WaitTreeMutex::WaitTreeMutex(const Kernel::Mutex& object) : WaitTreeWaitObject(object) {}
std::vector<std::unique_ptr<WaitTreeItem>> WaitTreeMutex::GetChildren() const {
std::vector<std::unique_ptr<WaitTreeItem>> list(WaitTreeWaitObject::GetChildren());
const auto& mutex = static_cast<const Kernel::Mutex&>(object);
if (mutex.GetHasWaiters()) {
list.push_back(std::make_unique<WaitTreeText>(tr("locked by thread:")));
list.push_back(std::make_unique<WaitTreeThread>(*mutex.GetHoldingThread()));
} else {
list.push_back(std::make_unique<WaitTreeText>(tr("free")));
}
return list;
}
WaitTreeConditionVariable::WaitTreeConditionVariable(const Kernel::ConditionVariable& object)
: WaitTreeWaitObject(object) {}
std::vector<std::unique_ptr<WaitTreeItem>> WaitTreeConditionVariable::GetChildren() const {
std::vector<std::unique_ptr<WaitTreeItem>> list(WaitTreeWaitObject::GetChildren());
const auto& condition_variable = static_cast<const Kernel::ConditionVariable&>(object);
list.push_back(std::make_unique<WaitTreeText>(
tr("available count = %1").arg(condition_variable.GetAvailableCount())));
return list;
}
WaitTreeTimer::WaitTreeTimer(const Kernel::Timer& object) : WaitTreeWaitObject(object) {}
std::vector<std::unique_ptr<WaitTreeItem>> WaitTreeTimer::GetChildren() const {
@@ -293,21 +288,6 @@ std::vector<std::unique_ptr<WaitTreeItem>> WaitTreeTimer::GetChildren() const {
return list;
}
WaitTreeMutexList::WaitTreeMutexList(
const boost::container::flat_set<Kernel::SharedPtr<Kernel::Mutex>>& list)
: mutex_list(list) {}
QString WaitTreeMutexList::GetText() const {
return tr("holding mutexes");
}
std::vector<std::unique_ptr<WaitTreeItem>> WaitTreeMutexList::GetChildren() const {
std::vector<std::unique_ptr<WaitTreeItem>> list(mutex_list.size());
std::transform(mutex_list.begin(), mutex_list.end(), list.begin(),
[](const auto& t) { return std::make_unique<WaitTreeMutex>(*t); });
return list;
}
WaitTreeThreadList::WaitTreeThreadList(const std::vector<Kernel::SharedPtr<Kernel::Thread>>& list)
: thread_list(list) {}

View File

@@ -16,8 +16,6 @@ class EmuThread;
namespace Kernel {
class WaitObject;
class Event;
class Mutex;
class ConditionVariable;
class Thread;
class Timer;
} // namespace Kernel
@@ -61,6 +59,20 @@ public:
bool IsExpandable() const override;
};
class WaitTreeMutexInfo : public WaitTreeExpandableItem {
Q_OBJECT
public:
explicit WaitTreeMutexInfo(VAddr mutex_address);
QString GetText() const override;
std::vector<std::unique_ptr<WaitTreeItem>> GetChildren() const override;
private:
VAddr mutex_address;
u32 mutex_value;
Kernel::Handle owner_handle;
Kernel::SharedPtr<Kernel::Thread> owner;
};
class WaitTreeWaitObject : public WaitTreeExpandableItem {
Q_OBJECT
public:
@@ -104,20 +116,6 @@ public:
std::vector<std::unique_ptr<WaitTreeItem>> GetChildren() const override;
};
class WaitTreeMutex : public WaitTreeWaitObject {
Q_OBJECT
public:
explicit WaitTreeMutex(const Kernel::Mutex& object);
std::vector<std::unique_ptr<WaitTreeItem>> GetChildren() const override;
};
class WaitTreeConditionVariable : public WaitTreeWaitObject {
Q_OBJECT
public:
explicit WaitTreeConditionVariable(const Kernel::ConditionVariable& object);
std::vector<std::unique_ptr<WaitTreeItem>> GetChildren() const override;
};
class WaitTreeTimer : public WaitTreeWaitObject {
Q_OBJECT
public:
@@ -125,19 +123,6 @@ public:
std::vector<std::unique_ptr<WaitTreeItem>> GetChildren() const override;
};
class WaitTreeMutexList : public WaitTreeExpandableItem {
Q_OBJECT
public:
explicit WaitTreeMutexList(
const boost::container::flat_set<Kernel::SharedPtr<Kernel::Mutex>>& list);
QString GetText() const override;
std::vector<std::unique_ptr<WaitTreeItem>> GetChildren() const override;
private:
const boost::container::flat_set<Kernel::SharedPtr<Kernel::Mutex>>& mutex_list;
};
class WaitTreeThreadList : public WaitTreeExpandableItem {
Q_OBJECT
public:

View File

@@ -56,7 +56,28 @@ void EmuWindow_SDL2::OnResize() {
UpdateCurrentFramebufferLayout(width, height);
}
EmuWindow_SDL2::EmuWindow_SDL2() {
void EmuWindow_SDL2::Fullscreen() {
if (SDL_SetWindowFullscreen(render_window, SDL_WINDOW_FULLSCREEN) == 0) {
return;
}
NGLOG_ERROR(Frontend, "Fullscreening failed: {}", SDL_GetError());
// Try a different fullscreening method
NGLOG_INFO(Frontend, "Attempting to use borderless fullscreen...");
if (SDL_SetWindowFullscreen(render_window, SDL_WINDOW_FULLSCREEN_DESKTOP) == 0) {
return;
}
NGLOG_ERROR(Frontend, "Borderless fullscreening failed: {}", SDL_GetError());
// Fallback algorithm: Maximise window.
// Works on all systems (unless something is seriously wrong), so no fallback for this one.
NGLOG_INFO(Frontend, "Falling back on a maximised window...");
SDL_MaximizeWindow(render_window);
}
EmuWindow_SDL2::EmuWindow_SDL2(bool fullscreen) {
InputCommon::Init();
SDL_SetMainReady();
@@ -90,6 +111,10 @@ EmuWindow_SDL2::EmuWindow_SDL2() {
exit(1);
}
if (fullscreen) {
Fullscreen();
}
gl_context = SDL_GL_CreateContext(render_window);
if (gl_context == nullptr) {

View File

@@ -12,7 +12,7 @@ struct SDL_Window;
class EmuWindow_SDL2 : public EmuWindow {
public:
EmuWindow_SDL2();
explicit EmuWindow_SDL2(bool fullscreen);
~EmuWindow_SDL2();
/// Swap buffers to display the next frame
@@ -43,6 +43,9 @@ private:
/// Called by PollEvents when any event that may cause the window to be resized occurs
void OnResize();
/// Called when user passes the fullscreen parameter flag
void Fullscreen();
/// Called when a configuration change affects the minimal size of the window
void OnMinimalClientAreaChangeRequest(
const std::pair<unsigned, unsigned>& minimal_size) override;

View File

@@ -50,6 +50,7 @@ static void PrintHelp(const char* argv0) {
std::cout << "Usage: " << argv0
<< " [options] <filename>\n"
"-g, --gdbport=NUMBER Enable gdb stub on port NUMBER\n"
"-f, --fullscreen Start in fullscreen mode\n"
"-h, --help Display this help and exit\n"
"-v, --version Output version information and exit\n";
}
@@ -76,15 +77,18 @@ int main(int argc, char** argv) {
#endif
std::string filepath;
bool fullscreen = false;
static struct option long_options[] = {
{"gdbport", required_argument, 0, 'g'},
{"fullscreen", no_argument, 0, 'f'},
{"help", no_argument, 0, 'h'},
{"version", no_argument, 0, 'v'},
{0, 0, 0, 0},
};
while (optind < argc) {
char arg = getopt_long(argc, argv, "g:hv", long_options, &option_index);
char arg = getopt_long(argc, argv, "g:fhv", long_options, &option_index);
if (arg != -1) {
switch (arg) {
case 'g':
@@ -98,6 +102,10 @@ int main(int argc, char** argv) {
exit(1);
}
break;
case 'f':
fullscreen = true;
NGLOG_INFO(Frontend, "Starting in fullscreen mode...");
break;
case 'h':
PrintHelp(argv[0]);
return 0;
@@ -137,7 +145,7 @@ int main(int argc, char** argv) {
Settings::values.use_gdbstub = use_gdbstub;
Settings::Apply();
std::unique_ptr<EmuWindow_SDL2> emu_window{std::make_unique<EmuWindow_SDL2>()};
std::unique_ptr<EmuWindow_SDL2> emu_window{std::make_unique<EmuWindow_SDL2>(fullscreen)};
Core::System& system{Core::System::GetInstance()};