Compare commits

...

22 Commits

Author SHA1 Message Date
N00byKing
1d8b6ad13b Clang Fixes 2018-03-19 17:53:35 +01:00
N00byKing
d16e08454d oops 2018-03-19 17:43:04 +01:00
N00byKing
0e72d0d826 More Warning cleanups 2018-03-19 17:27:04 +01:00
N00byKing
ef875d6a35 Clean Warnings (?) 2018-03-19 17:07:08 +01:00
bunnei
b2d7c92cae Merge pull request #251 from Subv/tic_tsc
GPU: Added TIC and TSC registers to the Maxwell3D register structure.
2018-03-19 10:33:21 -04:00
Subv
dcae0c9a4f GPU: Added the TSC registers to the Maxwell3D register structure. 2018-03-19 00:36:25 -05:00
Subv
cff7b29bba GPU: Added the TIC registers to the Maxwell3D register structure. 2018-03-19 00:32:57 -05:00
bunnei
23a0d2d7b7 Merge pull request #193 from N00byKing/3184_2_robotic_boogaloo
Implement Pull #3184 from citra: core/arm: Improve timing accuracy before service calls in JIT (Rebased)
2018-03-18 22:35:47 -04:00
bunnei
2dc3a56e96 Merge pull request #250 from bunnei/buffer-dequeue-wait
vi: TransactParcel DequeueBuffer should wait current thread
2018-03-18 22:25:09 -04:00
bunnei
2332a44b68 Merge pull request #249 from Subv/macro_E1A
GPU: Implement macro 0xE1A BindTextureInfoBuffer in HLE.
2018-03-18 21:04:29 -04:00
bunnei
c1c92c30f9 vi: Remove DequeueBuffer and wait until next available buffer. 2018-03-18 20:56:35 -04:00
bunnei
c86af6939c hle_ipc: Add SleepClientThread to block current thread within HLE routines. 2018-03-18 20:56:34 -04:00
bunnei
2faa83ca13 hle_ipc: Use shared_ptr instead of unique_ptr to allow copies. 2018-03-18 20:56:33 -04:00
bunnei
019f1a0cf0 hle_ipc: Remove GetPointer(..) usage with WriteToOutgoingCommandBuffer. 2018-03-18 20:56:33 -04:00
bunnei
e353b9fb3d thread: Add THREADSTATUS_WAIT_HLE_EVENT, remove THREADSTATUS_WAIT_ARB. 2018-03-18 20:56:32 -04:00
Subv
03156d0c9a GPU: Implement macro 0xE1A BindTextureInfoBuffer in HLE.
This macro simply sets the current CB_ADDRESS to the texture buffer address for the input shader stage.
2018-03-18 19:03:40 -05:00
bunnei
6317a0b2ca Merge pull request #248 from Subv/cb_data
GPU: Handle writes to the CB_DATA method.
2018-03-18 19:45:40 -04:00
Subv
7b6868e908 GPU: Implement the BindStorageBuffer macro method in HLE.
This macro binds the SSBO Info Buffer as the current ConstBuffer.
This buffer is usually bound to c0 during shader execution.
Games seem to use this macro instead of directly writing the address for some reason.
2018-03-18 16:50:42 -05:00
Subv
85d820b1b4 GPU: Handle writes to the CB_DATA method.
Writing to this method will cause the written value to be stored in the currently-set ConstBuffer plus CB_POS.

This method is usually used to upload uniforms or other shader-visible data.
2018-03-18 15:23:24 -05:00
Subv
a64b936cbe GPU: Move the GPU's class constructor and destructors to a cpp file.
This should reduce recompile times when editing the Maxwell3D register structure.
2018-03-18 15:23:24 -05:00
Sebastian Valle
46f9d4b4a3 Merge pull request #246 from Subv/gpu_macro_calls
GPU: Store uploaded GPU macros and keep track of the number of method arguments.
2018-03-18 15:13:40 -05:00
N00byKing
bc88cae0c7 Implements citra-emu/citra#3184 2018-02-25 11:44:21 +01:00
31 changed files with 360 additions and 130 deletions

View File

@@ -25,19 +25,11 @@ public:
VAddr tls_address;
};
/**
* Runs the CPU for the given number of instructions
* @param num_instructions Number of instructions to run
*/
void Run(int num_instructions) {
ExecuteInstructions(num_instructions);
this->num_instructions += num_instructions;
}
/// Runs the CPU until an event happens
virtual void Run() = 0;
/// Step CPU by one instruction
void Step() {
Run(1);
}
virtual void Step() = 0;
/// Maps a backing memory region for the CPU
virtual void MapBackingMemory(VAddr address, size_t size, u8* memory,
@@ -126,19 +118,4 @@ public:
/// Prepare core for thread reschedule (if needed to correctly handle state)
virtual void PrepareReschedule() = 0;
/// Getter for num_instructions
u64 GetNumInstructions() const {
return num_instructions;
}
protected:
/**
* Executes the given number of instructions
* @param num_instructions Number of instructions to executes
*/
virtual void ExecuteInstructions(int num_instructions) = 0;
private:
u64 num_instructions = 0; ///< Number of instructions executed
};

View File

@@ -122,11 +122,22 @@ std::unique_ptr<Dynarmic::A64::Jit> MakeJit(const std::unique_ptr<ARM_Dynarmic_C
return std::make_unique<Dynarmic::A64::Jit>(config);
}
void ARM_Dynarmic::Run() {
ASSERT(Memory::GetCurrentPageTable() == current_page_table);
jit->Run();
}
void ARM_Dynarmic::Step() {
cb->InterpreterFallback(jit->GetPC(), 1);
}
ARM_Dynarmic::ARM_Dynarmic()
: cb(std::make_unique<ARM_Dynarmic_Callbacks>(*this)), jit(MakeJit(cb)) {
ARM_Interface::ThreadContext ctx;
inner_unicorn.SaveContext(ctx);
LoadContext(ctx);
PageTableChanged();
}
ARM_Dynarmic::~ARM_Dynarmic() = default;
@@ -189,13 +200,6 @@ void ARM_Dynarmic::SetTlsAddress(u64 address) {
cb->tpidrro_el0 = address;
}
void ARM_Dynarmic::ExecuteInstructions(int num_instructions) {
cb->ticks_remaining = num_instructions;
jit->Run();
CoreTiming::AddTicks(num_instructions - cb->num_interpreted_instructions);
cb->num_interpreted_instructions = 0;
}
void ARM_Dynarmic::SaveContext(ARM_Interface::ThreadContext& ctx) {
ctx.cpu_registers = jit->GetRegisters();
ctx.sp = jit->GetSP();
@@ -228,4 +232,5 @@ void ARM_Dynarmic::ClearInstructionCache() {
void ARM_Dynarmic::PageTableChanged() {
jit = MakeJit(cb);
current_page_table = Memory::GetCurrentPageTable();
}

View File

@@ -29,6 +29,8 @@ public:
u32 GetVFPReg(int index) const override;
void SetVFPReg(int index, u32 value) override;
u32 GetCPSR() const override;
void Run() override;
void Step() override;
void SetCPSR(u32 cpsr) override;
VAddr GetTlsAddress() const override;
void SetTlsAddress(VAddr address) override;
@@ -37,7 +39,6 @@ public:
void LoadContext(const ThreadContext& ctx) override;
void PrepareReschedule() override;
void ExecuteInstructions(int num_instructions) override;
void ClearInstructionCache() override;
void PageTableChanged() override;
@@ -47,4 +48,6 @@ private:
std::unique_ptr<ARM_Dynarmic_Callbacks> cb;
std::unique_ptr<Dynarmic::A64::Jit> jit;
ARM_Unicorn inner_unicorn;
Memory::PageTable* current_page_table = nullptr;
};

View File

@@ -2,6 +2,7 @@
// Licensed under GPLv2 or any later version
// Refer to the license.txt file included.
#include <algorithm>
#include <unicorn/arm64.h>
#include "common/assert.h"
#include "common/microprofile.h"
@@ -52,7 +53,7 @@ static bool UnmappedMemoryHook(uc_engine* uc, uc_mem_type type, u64 addr, int si
void* user_data) {
ARM_Interface::ThreadContext ctx{};
Core::CPU().SaveContext(ctx);
ASSERT_MSG(false, "Attempted to read from unmapped memory: 0x%llx, pc=0x%llx, lr=0x%llx", addr,
ASSERT_MSG(false, "Attempted to read from unmapped memory: 0x%lx, pc=0x%lx, lr=0x%lx", addr,
ctx.pc, ctx.cpu_registers[30]);
return {};
}
@@ -153,6 +154,14 @@ void ARM_Unicorn::SetTlsAddress(VAddr base) {
CHECKED(uc_reg_write(uc, UC_ARM64_REG_TPIDRRO_EL0, &base));
}
void ARM_Unicorn::Run() {
ExecuteInstructions(std::max(CoreTiming::GetDowncount(), 0));
}
void ARM_Unicorn::Step() {
ExecuteInstructions(1);
}
MICROPROFILE_DEFINE(ARM_Jit, "ARM JIT", "ARM JIT", MP_RGB(255, 64, 64));
void ARM_Unicorn::ExecuteInstructions(int num_instructions) {

View File

@@ -30,7 +30,9 @@ public:
void SaveContext(ThreadContext& ctx) override;
void LoadContext(const ThreadContext& ctx) override;
void PrepareReschedule() override;
void ExecuteInstructions(int num_instructions) override;
void ExecuteInstructions(int num_instructions);
void Run() override;
void Step() override;
void ClearInstructionCache() override;
void PageTableChanged() override{};

View File

@@ -26,7 +26,7 @@ namespace Core {
/*static*/ System System::s_instance;
System::ResultStatus System::RunLoop(int tight_loop) {
System::ResultStatus System::RunLoop(bool tight_loop) {
status = ResultStatus::Success;
if (!cpu_core) {
return ResultStatus::ErrorNotInitialized;
@@ -40,7 +40,7 @@ System::ResultStatus System::RunLoop(int tight_loop) {
if (GDBStub::GetCpuHaltFlag()) {
if (GDBStub::GetCpuStepFlag()) {
GDBStub::SetCpuStepFlag(false);
tight_loop = 1;
tight_loop = false;
} else {
return ResultStatus::Success;
}
@@ -56,7 +56,11 @@ System::ResultStatus System::RunLoop(int tight_loop) {
PrepareReschedule();
} else {
CoreTiming::Advance();
cpu_core->Run(tight_loop);
if (tight_loop) {
cpu_core->Run();
} else {
cpu_core->Step();
}
}
HW::Update();
@@ -66,7 +70,7 @@ System::ResultStatus System::RunLoop(int tight_loop) {
}
System::ResultStatus System::SingleStep() {
return RunLoop(1);
return RunLoop(false);
}
System::ResultStatus System::Load(EmuWindow* emu_window, const std::string& filepath) {
@@ -95,14 +99,15 @@ System::ResultStatus System::Load(EmuWindow* emu_window, const std::string& file
ResultStatus init_result{Init(emu_window, system_mode.first.get())};
if (init_result != ResultStatus::Success) {
LOG_CRITICAL(Core, "Failed to initialize system (Error %i)!", init_result);
LOG_CRITICAL(Core, "Failed to initialize system (Error %i)!",
static_cast<int>(init_result));
System::Shutdown();
return init_result;
}
const Loader::ResultStatus load_result{app_loader->Load(current_process)};
if (Loader::ResultStatus::Success != load_result) {
LOG_CRITICAL(Core, "Failed to load ROM (Error %i)!", load_result);
LOG_CRITICAL(Core, "Failed to load ROM (Error %i)!", static_cast<int>(load_result));
System::Shutdown();
switch (load_result) {

View File

@@ -53,10 +53,10 @@ public:
* is not required to do a full dispatch with each instruction. NOTE: the number of instructions
* requested is not guaranteed to run, as this will be interrupted preemptively if a hardware
* update is requested (e.g. on a thread switch).
* @param tight_loop Number of instructions to execute.
* @param tight_loop If false, the CPU single-steps.
* @return Result status, indicating whether or not the operation succeeded.
*/
ResultStatus RunLoop(int tight_loop = 100000);
ResultStatus RunLoop(bool tight_loop = true);
/**
* Step the CPU one instruction

View File

@@ -693,7 +693,7 @@ static void ReadMemory() {
u64 len =
HexToLong(start_offset, static_cast<u64>((command_buffer + command_length) - start_offset));
LOG_DEBUG(Debug_GDBStub, "gdb: addr: %016llx len: %016llx\n", addr, len);
LOG_DEBUG(Debug_GDBStub, "gdb: addr: %016lx len: %016lx\n", addr, len);
if (len * 2 > sizeof(reply)) {
SendReply("E01");

View File

@@ -7,6 +7,7 @@
#include "common/common_funcs.h"
#include "common/common_types.h"
#include "core/hle/ipc_helpers.h"
#include "core/hle/kernel/event.h"
#include "core/hle/kernel/handle_table.h"
#include "core/hle/kernel/hle_ipc.h"
#include "core/hle/kernel/kernel.h"
@@ -26,6 +27,32 @@ void SessionRequestHandler::ClientDisconnected(SharedPtr<ServerSession> server_s
boost::range::remove_erase(connected_sessions, server_session);
}
SharedPtr<Event> HLERequestContext::SleepClientThread(SharedPtr<Thread> thread,
const std::string& reason, u64 timeout,
WakeupCallback&& callback) {
// Put the client thread to sleep until the wait event is signaled or the timeout expires.
thread->wakeup_callback =
[context = *this, callback](ThreadWakeupReason reason, SharedPtr<Thread> thread,
SharedPtr<WaitObject> object, size_t index) mutable -> bool {
ASSERT(thread->status == THREADSTATUS_WAIT_HLE_EVENT);
callback(thread, context, reason);
context.WriteToOutgoingCommandBuffer(*thread);
return true;
};
auto event = Kernel::Event::Create(Kernel::ResetType::OneShot, "HLE Pause Event: " + reason);
thread->status = THREADSTATUS_WAIT_HLE_EVENT;
thread->wait_objects = {event};
event->AddWaitingThread(thread);
if (timeout > 0) {
thread->WakeAfterDelay(timeout);
}
return event;
}
HLERequestContext::HLERequestContext(SharedPtr<Kernel::ServerSession> server_session)
: server_session(std::move(server_session)) {
cmd_buf[0] = 0;
@@ -35,7 +62,7 @@ HLERequestContext::~HLERequestContext() = default;
void HLERequestContext::ParseCommandBuffer(u32_le* src_cmdbuf, bool incoming) {
IPC::RequestParser rp(src_cmdbuf);
command_header = std::make_unique<IPC::CommandHeader>(rp.PopRaw<IPC::CommandHeader>());
command_header = std::make_shared<IPC::CommandHeader>(rp.PopRaw<IPC::CommandHeader>());
if (command_header->type == IPC::CommandType::Close) {
// Close does not populate the rest of the IPC header
@@ -45,7 +72,7 @@ void HLERequestContext::ParseCommandBuffer(u32_le* src_cmdbuf, bool incoming) {
// If handle descriptor is present, add size of it
if (command_header->enable_handle_descriptor) {
handle_descriptor_header =
std::make_unique<IPC::HandleDescriptorHeader>(rp.PopRaw<IPC::HandleDescriptorHeader>());
std::make_shared<IPC::HandleDescriptorHeader>(rp.PopRaw<IPC::HandleDescriptorHeader>());
if (handle_descriptor_header->send_current_pid) {
rp.Skip(2, false);
}
@@ -88,7 +115,7 @@ void HLERequestContext::ParseCommandBuffer(u32_le* src_cmdbuf, bool incoming) {
// All outgoing domain messages have the domain header, if only incoming has it
if (incoming || domain_message_header) {
domain_message_header =
std::make_unique<IPC::DomainMessageHeader>(rp.PopRaw<IPC::DomainMessageHeader>());
std::make_shared<IPC::DomainMessageHeader>(rp.PopRaw<IPC::DomainMessageHeader>());
} else {
if (Session()->IsDomain())
LOG_WARNING(IPC, "Domain request has no DomainMessageHeader!");
@@ -96,7 +123,7 @@ void HLERequestContext::ParseCommandBuffer(u32_le* src_cmdbuf, bool incoming) {
}
data_payload_header =
std::make_unique<IPC::DataPayloadHeader>(rp.PopRaw<IPC::DataPayloadHeader>());
std::make_shared<IPC::DataPayloadHeader>(rp.PopRaw<IPC::DataPayloadHeader>());
data_payload_offset = rp.GetCurrentOffset();
@@ -159,8 +186,11 @@ ResultCode HLERequestContext::PopulateFromIncomingCommandBuffer(u32_le* src_cmdb
return RESULT_SUCCESS;
}
ResultCode HLERequestContext::WriteToOutgoingCommandBuffer(u32_le* dst_cmdbuf, Process& dst_process,
HandleTable& dst_table) {
ResultCode HLERequestContext::WriteToOutgoingCommandBuffer(Thread& thread) {
std::array<u32, IPC::COMMAND_BUFFER_LENGTH> dst_cmdbuf;
Memory::ReadBlock(*thread.owner_process, thread.GetTLSAddress(), dst_cmdbuf.data(),
dst_cmdbuf.size() * sizeof(u32));
// The header was already built in the internal command buffer. Attempt to parse it to verify
// the integrity and then copy it over to the target command buffer.
ParseCommandBuffer(cmd_buf.data(), false);
@@ -171,7 +201,7 @@ ResultCode HLERequestContext::WriteToOutgoingCommandBuffer(u32_le* dst_cmdbuf, P
if (domain_message_header)
size -= sizeof(IPC::DomainMessageHeader) / sizeof(u32);
std::copy_n(cmd_buf.begin(), size, dst_cmdbuf);
std::copy_n(cmd_buf.begin(), size, dst_cmdbuf.data());
if (command_header->enable_handle_descriptor) {
ASSERT_MSG(!move_objects.empty() || !copy_objects.empty(),
@@ -213,6 +243,11 @@ ResultCode HLERequestContext::WriteToOutgoingCommandBuffer(u32_le* dst_cmdbuf, P
dst_cmdbuf[domain_offset++] = static_cast<u32_le>(request_handlers.size());
}
}
// Copy the translated command buffer back into the thread's command buffer area.
Memory::WriteBlock(*thread.owner_process, thread.GetTLSAddress(), dst_cmdbuf.data(),
dst_cmdbuf.size() * sizeof(u32));
return RESULT_SUCCESS;
}
@@ -234,7 +269,7 @@ std::vector<u8> HLERequestContext::ReadBuffer() const {
size_t HLERequestContext::WriteBuffer(const void* buffer, size_t size) const {
const bool is_buffer_b{BufferDescriptorB().size() && BufferDescriptorB()[0].Size()};
ASSERT_MSG(size <= GetWriteBufferSize(), "Size %d is too big", size);
ASSERT_MSG(size <= GetWriteBufferSize(), "Size %lx is too big", size);
if (is_buffer_b) {
Memory::WriteBlock(BufferDescriptorB()[0].Address(), buffer, size);

View File

@@ -6,6 +6,7 @@
#include <array>
#include <memory>
#include <string>
#include <vector>
#include <boost/container/small_vector.hpp>
#include "common/common_types.h"
@@ -13,6 +14,7 @@
#include "core/hle/ipc.h"
#include "core/hle/kernel/kernel.h"
#include "core/hle/kernel/server_session.h"
#include "core/hle/kernel/thread.h"
namespace Service {
class ServiceFrameworkBase;
@@ -24,6 +26,7 @@ class Domain;
class HandleTable;
class HLERequestContext;
class Process;
class Event;
/**
* Interface implemented by HLE Session handlers.
@@ -102,14 +105,31 @@ public:
return server_session;
}
using WakeupCallback = std::function<void(SharedPtr<Thread> thread, HLERequestContext& context,
ThreadWakeupReason reason)>;
/**
* Puts the specified guest thread to sleep until the returned event is signaled or until the
* specified timeout expires.
* @param thread Thread to be put to sleep.
* @param reason Reason for pausing the thread, to be used for debugging purposes.
* @param timeout Timeout in nanoseconds after which the thread will be awoken and the callback
* invoked with a Timeout reason.
* @param callback Callback to be invoked when the thread is resumed. This callback must write
* the entire command response once again, regardless of the state of it before this function
* was called.
* @returns Event that when signaled will resume the thread and call the callback function.
*/
SharedPtr<Event> SleepClientThread(SharedPtr<Thread> thread, const std::string& reason,
u64 timeout, WakeupCallback&& callback);
void ParseCommandBuffer(u32_le* src_cmdbuf, bool incoming);
/// Populates this context with data from the requesting process/thread.
ResultCode PopulateFromIncomingCommandBuffer(u32_le* src_cmdbuf, Process& src_process,
HandleTable& src_table);
/// Writes data from this context back to the requesting process/thread.
ResultCode WriteToOutgoingCommandBuffer(u32_le* dst_cmdbuf, Process& dst_process,
HandleTable& dst_table);
ResultCode WriteToOutgoingCommandBuffer(Thread& thread);
u32_le GetCommand() const {
return command;
@@ -139,7 +159,7 @@ public:
return buffer_c_desciptors;
}
const std::unique_ptr<IPC::DomainMessageHeader>& GetDomainMessageHeader() const {
const std::shared_ptr<IPC::DomainMessageHeader>& GetDomainMessageHeader() const {
return domain_message_header;
}
@@ -212,10 +232,10 @@ private:
boost::container::small_vector<SharedPtr<Object>, 8> copy_objects;
boost::container::small_vector<std::shared_ptr<SessionRequestHandler>, 8> domain_objects;
std::unique_ptr<IPC::CommandHeader> command_header;
std::unique_ptr<IPC::HandleDescriptorHeader> handle_descriptor_header;
std::unique_ptr<IPC::DataPayloadHeader> data_payload_header;
std::unique_ptr<IPC::DomainMessageHeader> domain_message_header;
std::shared_ptr<IPC::CommandHeader> command_header;
std::shared_ptr<IPC::HandleDescriptorHeader> handle_descriptor_header;
std::shared_ptr<IPC::DataPayloadHeader> data_payload_header;
std::shared_ptr<IPC::DomainMessageHeader> domain_message_header;
std::vector<IPC::BufferDescriptorX> buffer_x_desciptors;
std::vector<IPC::BufferDescriptorABW> buffer_a_desciptors;
std::vector<IPC::BufferDescriptorABW> buffer_b_desciptors;

View File

@@ -10,12 +10,12 @@ namespace Kernel {
ObjectAddressTable g_object_address_table;
void ObjectAddressTable::Insert(VAddr addr, SharedPtr<Object> obj) {
ASSERT_MSG(objects.find(addr) == objects.end(), "Object already exists with addr=0x%llx", addr);
ASSERT_MSG(objects.find(addr) == objects.end(), "Object already exists with addr=0x%lx", addr);
objects[addr] = obj;
}
void ObjectAddressTable::Close(VAddr addr) {
ASSERT_MSG(objects.find(addr) != objects.end(), "Object does not exist with addr=0x%llx", addr);
ASSERT_MSG(objects.find(addr) != objects.end(), "Object does not exist with addr=0x%lx", addr);
objects.erase(addr);
}

View File

@@ -78,7 +78,8 @@ ResultCode ServerSession::HandleDomainSyncRequest(Kernel::HLERequestContext& con
}
}
LOG_CRITICAL(IPC, "Unknown domain command=%d", domain_message_header->command.Value());
LOG_CRITICAL(IPC, "Unknown domain command=%d",
static_cast<int>(domain_message_header->command.Value()));
ASSERT(false);
}

View File

@@ -107,7 +107,7 @@ ResultCode SharedMemory::Map(Process* target_process, VAddr address, MemoryPermi
// Error out if the requested permissions don't match what the creator process allows.
if (static_cast<u32>(permissions) & ~static_cast<u32>(own_other_permissions)) {
LOG_ERROR(Kernel, "cannot map id=%u, address=0x%llx name=%s, permissions don't match",
LOG_ERROR(Kernel, "cannot map id=%u, address=0x%lx name=%s, permissions don't match",
GetObjectId(), address, name.c_str());
return ERR_INVALID_COMBINATION;
}
@@ -115,7 +115,7 @@ ResultCode SharedMemory::Map(Process* target_process, VAddr address, MemoryPermi
// Error out if the provided permissions are not compatible with what the creator process needs.
if (other_permissions != MemoryPermission::DontCare &&
static_cast<u32>(this->permissions) & ~static_cast<u32>(other_permissions)) {
LOG_ERROR(Kernel, "cannot map id=%u, address=0x%llx name=%s, permissions don't match",
LOG_ERROR(Kernel, "cannot map id=%u, address=0x%lx name=%s, permissions don't match",
GetObjectId(), address, name.c_str());
return ERR_WRONG_PERMISSION;
}
@@ -126,7 +126,7 @@ ResultCode SharedMemory::Map(Process* target_process, VAddr address, MemoryPermi
if (address != 0) {
// TODO(shinyquagsire23): Check for virtual/mappable memory here too?
if (address >= Memory::HEAP_VADDR && address < Memory::HEAP_VADDR_END) {
LOG_ERROR(Kernel, "cannot map id=%u, address=0x%llx name=%s, invalid address",
LOG_ERROR(Kernel, "cannot map id=%u, address=0x%lx name=%s, invalid address",
GetObjectId(), address, name.c_str());
return ERR_INVALID_ADDRESS;
}
@@ -143,10 +143,9 @@ ResultCode SharedMemory::Map(Process* target_process, VAddr address, MemoryPermi
auto result = target_process->vm_manager.MapMemoryBlock(
target_address, backing_block, backing_block_offset, size, MemoryState::Shared);
if (result.Failed()) {
LOG_ERROR(
Kernel,
"cannot map id=%u, target_address=0x%llx name=%s, error mapping to virtual memory",
GetObjectId(), target_address, name.c_str());
LOG_ERROR(Kernel,
"cannot map id=%u, target_address=0x%lx name=%s, error mapping to virtual memory",
GetObjectId(), target_address, name.c_str());
return result.Code();
}

View File

@@ -39,7 +39,7 @@ static ResultCode SetHeapSize(VAddr* heap_addr, u64 heap_size) {
}
static ResultCode SetMemoryAttribute(VAddr addr, u64 size, u32 state0, u32 state1) {
LOG_WARNING(Kernel_SVC, "(STUBBED) called, addr=0x%llx", addr);
LOG_WARNING(Kernel_SVC, "(STUBBED) called, addr=0x%lx", addr);
return RESULT_SUCCESS;
}
@@ -750,7 +750,7 @@ static ResultCode ResetSignal(Handle handle) {
/// Creates a TransferMemory object
static ResultCode CreateTransferMemory(Handle* handle, VAddr addr, u64 size, u32 permissions) {
LOG_WARNING(Kernel_SVC, "(STUBBED) called addr=0x%llx, size=0x%llx, perms=%08X", addr, size,
LOG_WARNING(Kernel_SVC, "(STUBBED) called addr=0x%lx, size=0x%lx, perms=%08X", addr, size,
permissions);
*handle = 0;
return RESULT_SUCCESS;

View File

@@ -55,16 +55,6 @@ inline static u32 const NewThreadId() {
Thread::Thread() {}
Thread::~Thread() {}
/**
* Check if the specified thread is waiting on the specified address to be arbitrated
* @param thread The thread to test
* @param wait_address The address to test against
* @return True if the thread is waiting, false otherwise
*/
static bool CheckWait_AddressArbiter(const Thread* thread, VAddr wait_address) {
return thread->status == THREADSTATUS_WAIT_ARB && wait_address == thread->wait_address;
}
void Thread::Stop() {
// Cancel any outstanding wakeup events for this thread
CoreTiming::UnscheduleEvent(ThreadWakeupEventType, callback_handle);
@@ -102,12 +92,6 @@ void WaitCurrentThread_Sleep() {
thread->status = THREADSTATUS_WAIT_SLEEP;
}
void WaitCurrentThread_ArbitrateAddress(VAddr wait_address) {
Thread* thread = GetCurrentThread();
thread->wait_address = wait_address;
thread->status = THREADSTATUS_WAIT_ARB;
}
void ExitCurrentThread() {
Thread* thread = GetCurrentThread();
thread->Stop();
@@ -129,7 +113,8 @@ static void ThreadWakeupCallback(u64 thread_handle, int cycles_late) {
bool resume = true;
if (thread->status == THREADSTATUS_WAIT_SYNCH_ANY ||
thread->status == THREADSTATUS_WAIT_SYNCH_ALL || thread->status == THREADSTATUS_WAIT_ARB) {
thread->status == THREADSTATUS_WAIT_SYNCH_ALL ||
thread->status == THREADSTATUS_WAIT_HLE_EVENT) {
// Remove the thread from each of its waiting objects' waitlists
for (auto& object : thread->wait_objects)
@@ -163,7 +148,7 @@ void Thread::ResumeFromWait() {
switch (status) {
case THREADSTATUS_WAIT_SYNCH_ALL:
case THREADSTATUS_WAIT_SYNCH_ANY:
case THREADSTATUS_WAIT_ARB:
case THREADSTATUS_WAIT_HLE_EVENT:
case THREADSTATUS_WAIT_SLEEP:
case THREADSTATUS_WAIT_IPC:
break;

View File

@@ -38,7 +38,7 @@ enum ThreadProcessorId : s32 {
enum ThreadStatus {
THREADSTATUS_RUNNING, ///< Currently running
THREADSTATUS_READY, ///< Ready to run
THREADSTATUS_WAIT_ARB, ///< Waiting on an address arbiter
THREADSTATUS_WAIT_HLE_EVENT, ///< Waiting for hle event to finish
THREADSTATUS_WAIT_SLEEP, ///< Waiting due to a SleepThread SVC
THREADSTATUS_WAIT_IPC, ///< Waiting for the reply from an IPC request
THREADSTATUS_WAIT_SYNCH_ANY, ///< Waiting due to WaitSynch1 or WaitSynchN with wait_all = false

View File

@@ -39,7 +39,8 @@ SharedPtr<Thread> WaitObject::GetHighestPriorityReadyThread() {
for (const auto& thread : waiting_threads) {
// The list of waiting threads must not contain threads that are not waiting to be awakened.
ASSERT_MSG(thread->status == THREADSTATUS_WAIT_SYNCH_ANY ||
thread->status == THREADSTATUS_WAIT_SYNCH_ALL,
thread->status == THREADSTATUS_WAIT_SYNCH_ALL ||
thread->status == THREADSTATUS_WAIT_HLE_EVENT,
"Inconsistent thread statuses in waiting_threads");
if (thread->current_priority >= candidate_priority)

View File

@@ -35,7 +35,7 @@ private:
const s64 offset = rp.Pop<s64>();
const s64 length = rp.Pop<s64>();
LOG_DEBUG(Service_FS, "called, offset=0x%llx, length=0x%llx", offset, length);
LOG_DEBUG(Service_FS, "called, offset=0x%ld, length=0x%ld", offset, length);
// Error checking
if (length < 0) {
@@ -86,7 +86,7 @@ private:
const s64 offset = rp.Pop<s64>();
const s64 length = rp.Pop<s64>();
LOG_DEBUG(Service_FS, "called, offset=0x%llx, length=0x%llx", offset, length);
LOG_DEBUG(Service_FS, "called, offset=0x%ld, length=0x%ld", offset, length);
// Error checking
if (length < 0) {
@@ -123,7 +123,7 @@ private:
const s64 offset = rp.Pop<s64>();
const s64 length = rp.Pop<s64>();
LOG_DEBUG(Service_FS, "called, offset=0x%llx, length=0x%llx", offset, length);
LOG_DEBUG(Service_FS, "called, offset=0x%ld, length=0x%ld", offset, length);
// Error checking
if (length < 0) {

View File

@@ -23,8 +23,8 @@ void nvdisp_disp0::flip(u32 buffer_handle, u32 offset, u32 format, u32 width, u3
u32 stride, NVFlinger::BufferQueue::BufferTransformFlags transform) {
VAddr addr = nvmap_dev->GetObjectAddress(buffer_handle);
LOG_WARNING(Service,
"Drawing from address %llx offset %08X Width %u Height %u Stride %u Format %u",
addr, offset, width, height, stride, format);
"Drawing from address %lx offset %08X Width %u Height %u Stride %u Format %u", addr,
offset, width, height, stride, format);
using PixelFormat = RendererBase::FramebufferInfo::PixelFormat;
using Flags = NVFlinger::BufferQueue::BufferTransformFlags;

View File

@@ -26,24 +26,30 @@ void BufferQueue::SetPreallocatedBuffer(u32 slot, IGBPBuffer& igbp_buffer) {
LOG_WARNING(Service, "Adding graphics buffer %u", slot);
queue.emplace_back(buffer);
if (buffer_wait_event) {
buffer_wait_event->Signal();
}
}
u32 BufferQueue::DequeueBuffer(u32 pixel_format, u32 width, u32 height) {
boost::optional<u32> BufferQueue::DequeueBuffer(u32 width, u32 height) {
auto itr = std::find_if(queue.begin(), queue.end(), [&](const Buffer& buffer) {
// Only consider free buffers. Buffers become free once again after they've been Acquired
// and Released by the compositor, see the NVFlinger::Compose method.
if (buffer.status != Buffer::Status::Free)
if (buffer.status != Buffer::Status::Free) {
return false;
}
// Make sure that the parameters match.
return buffer.igbp_buffer.width == width && buffer.igbp_buffer.height == height;
});
if (itr == queue.end()) {
LOG_CRITICAL(Service_NVDRV, "no free buffers for pixel_format=%d, width=%d, height=%d",
pixel_format, width, height);
itr = queue.begin();
return boost::none;
}
buffer_wait_event = nullptr;
itr->status = Buffer::Status::Dequeued;
return itr->slot;
}
@@ -81,6 +87,10 @@ void BufferQueue::ReleaseBuffer(u32 slot) {
ASSERT(itr != queue.end());
ASSERT(itr->status == Buffer::Status::Acquired);
itr->status = Buffer::Status::Free;
if (buffer_wait_event) {
buffer_wait_event->Signal();
}
}
u32 BufferQueue::Query(QueryType type) {
@@ -96,5 +106,10 @@ u32 BufferQueue::Query(QueryType type) {
return 0;
}
void BufferQueue::SetBufferWaitEvent(Kernel::SharedPtr<Kernel::Event>&& wait_event) {
ASSERT_MSG(!buffer_wait_event, "buffer_wait_event only supports a single waiting thread!");
buffer_wait_event = std::move(wait_event);
}
} // namespace NVFlinger
} // namespace Service

View File

@@ -69,12 +69,13 @@ public:
};
void SetPreallocatedBuffer(u32 slot, IGBPBuffer& buffer);
u32 DequeueBuffer(u32 pixel_format, u32 width, u32 height);
boost::optional<u32> DequeueBuffer(u32 width, u32 height);
const IGBPBuffer& RequestBuffer(u32 slot) const;
void QueueBuffer(u32 slot, BufferTransformFlags transform);
boost::optional<const Buffer&> AcquireBuffer();
void ReleaseBuffer(u32 slot);
u32 Query(QueryType type);
void SetBufferWaitEvent(Kernel::SharedPtr<Kernel::Event>&& wait_event);
u32 GetId() const {
return id;
@@ -90,6 +91,9 @@ private:
std::vector<Buffer> queue;
Kernel::SharedPtr<Kernel::Event> native_handle;
/// Used to signal waiting thread when no buffers are available
Kernel::SharedPtr<Kernel::Event> buffer_wait_event;
};
} // namespace NVFlinger

View File

@@ -149,11 +149,10 @@ ResultCode ServiceFrameworkBase::HandleSyncRequest(Kernel::HLERequestContext& co
break;
}
default:
UNIMPLEMENTED_MSG("command_type=%d", context.GetCommandType());
UNIMPLEMENTED_MSG("command_type=%d", static_cast<int>(context.GetCommandType()));
}
u32* cmd_buf = (u32*)Memory::GetPointer(Kernel::GetCurrentThread()->GetTLSAddress());
context.WriteToOutgoingCommandBuffer(cmd_buf, *Core::CurrentProcess(), Kernel::g_handle_table);
context.WriteToOutgoingCommandBuffer(*Kernel::GetCurrentThread());
return RESULT_SUCCESS;
}

View File

@@ -107,7 +107,7 @@ private:
IPC::RequestParser rp{ctx};
u64 posix_time = rp.Pop<u64>();
LOG_WARNING(Service_Time, "(STUBBED) called, posix_time=0x%016llX", posix_time);
LOG_WARNING(Service_Time, "(STUBBED) called, posix_time=0x%016lX", posix_time);
CalendarTime calendar_time{2018, 1, 1, 0, 0, 0};
CalendarAdditionalInfo additional_info{};

View File

@@ -471,7 +471,7 @@ private:
u32 flags = rp.Pop<u32>();
auto buffer_queue = nv_flinger->GetBufferQueue(id);
LOG_DEBUG(Service_VI, "called, transaction=%x", transaction);
LOG_DEBUG(Service_VI, "called, transaction=%x", static_cast<u32>(transaction));
if (transaction == TransactionId::Connect) {
IGBPConnectRequestParcel request{ctx.ReadBuffer()};
@@ -486,12 +486,30 @@ private:
ctx.WriteBuffer(response.Serialize());
} else if (transaction == TransactionId::DequeueBuffer) {
IGBPDequeueBufferRequestParcel request{ctx.ReadBuffer()};
const u32 width{request.data.width};
const u32 height{request.data.height};
boost::optional<u32> slot = buffer_queue->DequeueBuffer(width, height);
u32 slot = buffer_queue->DequeueBuffer(request.data.pixel_format, request.data.width,
request.data.height);
IGBPDequeueBufferResponseParcel response{slot};
ctx.WriteBuffer(response.Serialize());
if (slot != boost::none) {
// Buffer is available
IGBPDequeueBufferResponseParcel response{*slot};
ctx.WriteBuffer(response.Serialize());
} else {
// Wait the current thread until a buffer becomes available
auto wait_event = ctx.SleepClientThread(
Kernel::GetCurrentThread(), "IHOSBinderDriver::DequeueBuffer", -1,
[=](Kernel::SharedPtr<Kernel::Thread> thread, Kernel::HLERequestContext& ctx,
ThreadWakeupReason reason) {
// Repeat TransactParcel DequeueBuffer when a buffer is available
auto buffer_queue = nv_flinger->GetBufferQueue(id);
boost::optional<u32> slot = buffer_queue->DequeueBuffer(width, height);
IGBPDequeueBufferResponseParcel response{*slot};
ctx.WriteBuffer(response.Serialize());
IPC::ResponseBuilder rb{ctx, 2};
rb.Push(RESULT_SUCCESS);
});
buffer_queue->SetBufferWaitEvent(std::move(wait_event));
}
} else if (transaction == TransactionId::RequestBuffer) {
IGBPRequestBufferRequestParcel request{ctx.ReadBuffer()};

View File

@@ -84,7 +84,7 @@ void Linker::WriteRelocations(std::vector<u8>& program_image, const std::vector<
}
break;
default:
LOG_CRITICAL(Loader, "Unknown relocation type: %d", rela.type);
LOG_CRITICAL(Loader, "Unknown relocation type: %d", static_cast<int>(rela.type));
break;
}
}

View File

@@ -7,6 +7,7 @@ add_library(video_core STATIC
engines/maxwell_3d.h
engines/maxwell_compute.cpp
engines/maxwell_compute.h
gpu.cpp
gpu.h
memory_manager.cpp
memory_manager.h

View File

@@ -12,7 +12,9 @@ namespace Engines {
constexpr u32 MacroRegistersStart = 0xE00;
const std::unordered_map<u32, Maxwell3D::MethodInfo> Maxwell3D::method_handlers = {
{0xE1A, {"BindTextureInfoBuffer", 1, &Maxwell3D::BindTextureInfoBuffer}},
{0xE24, {"SetShader", 5, &Maxwell3D::SetShader}},
{0xE2A, {"BindStorageBuffer", 1, &Maxwell3D::BindStorageBuffer}},
};
Maxwell3D::Maxwell3D(MemoryManager& memory_manager) : memory_manager(memory_manager) {}
@@ -83,6 +85,25 @@ void Maxwell3D::WriteReg(u32 method, u32 value, u32 remaining_params) {
ASSERT_MSG(regs.code_address.CodeAddress() == 0, "Unexpected CODE_ADDRESS register value.");
break;
}
case MAXWELL3D_REG_INDEX(const_buffer.cb_data[0]):
case MAXWELL3D_REG_INDEX(const_buffer.cb_data[1]):
case MAXWELL3D_REG_INDEX(const_buffer.cb_data[2]):
case MAXWELL3D_REG_INDEX(const_buffer.cb_data[3]):
case MAXWELL3D_REG_INDEX(const_buffer.cb_data[4]):
case MAXWELL3D_REG_INDEX(const_buffer.cb_data[5]):
case MAXWELL3D_REG_INDEX(const_buffer.cb_data[6]):
case MAXWELL3D_REG_INDEX(const_buffer.cb_data[7]):
case MAXWELL3D_REG_INDEX(const_buffer.cb_data[8]):
case MAXWELL3D_REG_INDEX(const_buffer.cb_data[9]):
case MAXWELL3D_REG_INDEX(const_buffer.cb_data[10]):
case MAXWELL3D_REG_INDEX(const_buffer.cb_data[11]):
case MAXWELL3D_REG_INDEX(const_buffer.cb_data[12]):
case MAXWELL3D_REG_INDEX(const_buffer.cb_data[13]):
case MAXWELL3D_REG_INDEX(const_buffer.cb_data[14]):
case MAXWELL3D_REG_INDEX(const_buffer.cb_data[15]): {
ProcessCBData(value);
break;
}
case MAXWELL3D_REG_INDEX(cb_bind[0].raw_config): {
ProcessCBBind(Regs::ShaderStage::Vertex);
break;
@@ -132,7 +153,8 @@ void Maxwell3D::ProcessQueryGet() {
break;
}
default:
UNIMPLEMENTED_MSG("Query mode %u not implemented", regs.query.query_get.mode.Value());
UNIMPLEMENTED_MSG("Query mode %u not implemented",
static_cast<u32>(regs.query.query_get.mode.Value()));
}
}
@@ -140,6 +162,23 @@ void Maxwell3D::DrawArrays() {
LOG_WARNING(HW_GPU, "Game requested a DrawArrays, ignoring");
}
void Maxwell3D::BindTextureInfoBuffer(const std::vector<u32>& parameters) {
/**
* Parameters description:
* [0] = Shader stage, usually 4 for FragmentShader
*/
u32 stage = parameters[0];
// Perform the same operations as the real macro code.
GPUVAddr address = static_cast<GPUVAddr>(regs.tex_info_buffers.address[stage]) << 8;
u32 size = regs.tex_info_buffers.size[stage];
regs.const_buffer.cb_size = size;
regs.const_buffer.cb_address_high = address >> 32;
regs.const_buffer.cb_address_low = address & 0xFFFFFFFF;
}
void Maxwell3D::SetShader(const std::vector<u32>& parameters) {
/**
* Parameters description:
@@ -181,6 +220,26 @@ void Maxwell3D::SetShader(const std::vector<u32>& parameters) {
ProcessCBBind(shader_stage);
}
void Maxwell3D::BindStorageBuffer(const std::vector<u32>& parameters) {
/**
* Parameters description:
* [0] = Buffer offset >> 2
*/
u32 buffer_offset = parameters[0] << 2;
// Perform the same operations as the real macro code.
// Note: This value is hardcoded in the macro's code.
static constexpr u32 DefaultCBSize = 0x5F00;
regs.const_buffer.cb_size = DefaultCBSize;
GPUVAddr address = regs.ssbo_info.BufferAddress();
regs.const_buffer.cb_address_high = address >> 32;
regs.const_buffer.cb_address_low = address & 0xFFFFFFFF;
regs.const_buffer.cb_pos = buffer_offset;
}
void Maxwell3D::ProcessCBBind(Regs::ShaderStage stage) {
// Bind the buffer currently in CB_ADDRESS to the specified index in the desired shader stage.
auto& shader = state.shader_stages[static_cast<size_t>(stage)];
@@ -194,5 +253,22 @@ void Maxwell3D::ProcessCBBind(Regs::ShaderStage stage) {
buffer.size = regs.const_buffer.cb_size;
}
void Maxwell3D::ProcessCBData(u32 value) {
// Write the input value to the current const buffer at the current position.
GPUVAddr buffer_address = regs.const_buffer.BufferAddress();
ASSERT(buffer_address != 0);
// Don't allow writing past the end of the buffer.
ASSERT(regs.const_buffer.cb_pos + sizeof(u32) <= regs.const_buffer.cb_size);
VAddr address =
memory_manager.PhysicalToVirtualAddress(buffer_address + regs.const_buffer.cb_pos);
Memory::Write32(address, value);
// Increment the current buffer position.
regs.const_buffer.cb_pos = regs.const_buffer.cb_pos + 4;
}
} // namespace Engines
} // namespace Tegra

View File

@@ -62,7 +62,34 @@ public:
union {
struct {
INSERT_PADDING_WORDS(0x582);
INSERT_PADDING_WORDS(0x557);
struct {
u32 tsc_address_high;
u32 tsc_address_low;
u32 tsc_limit;
GPUVAddr TSCAddress() const {
return static_cast<GPUVAddr>(
(static_cast<GPUVAddr>(tsc_address_high) << 32) | tsc_address_low);
}
} tsc;
INSERT_PADDING_WORDS(0x3);
struct {
u32 tic_address_high;
u32 tic_address_low;
u32 tic_limit;
GPUVAddr TICAddress() const {
return static_cast<GPUVAddr>(
(static_cast<GPUVAddr>(tic_address_high) << 32) | tic_address_low);
}
} tic;
INSERT_PADDING_WORDS(0x22);
struct {
u32 code_address_high;
u32 code_address_low;
@@ -166,7 +193,26 @@ public:
u32 tex_cb_index;
INSERT_PADDING_WORDS(0x4B3);
INSERT_PADDING_WORDS(0x395);
struct {
/// Compressed address of a buffer that holds information about bound SSBOs.
/// This address is usually bound to c0 in the shaders.
u32 buffer_address;
GPUVAddr BufferAddress() const {
return static_cast<GPUVAddr>(buffer_address) << 8;
}
} ssbo_info;
INSERT_PADDING_WORDS(0x11);
struct {
u32 address[MaxShaderStage];
u32 size[MaxShaderStage];
} tex_info_buffers;
INSERT_PADDING_WORDS(0x102);
};
std::array<u32, NUM_REGS> reg_array;
};
@@ -218,6 +264,9 @@ private:
/// Handles a write to the QUERY_GET register.
void ProcessQueryGet();
/// Handles a write to the CB_DATA[i] register.
void ProcessCBData(u32 value);
/// Handles a write to the CB_BIND register.
void ProcessCBBind(Regs::ShaderStage stage);
@@ -225,7 +274,9 @@ private:
void DrawArrays();
/// Method call handlers
void BindTextureInfoBuffer(const std::vector<u32>& parameters);
void SetShader(const std::vector<u32>& parameters);
void BindStorageBuffer(const std::vector<u32>& parameters);
struct MethodInfo {
const char* name;
@@ -240,6 +291,8 @@ private:
static_assert(offsetof(Maxwell3D::Regs, field_name) == position * 4, \
"Field " #field_name " has invalid position")
ASSERT_REG_POSITION(tsc, 0x557);
ASSERT_REG_POSITION(tic, 0x55D);
ASSERT_REG_POSITION(code_address, 0x582);
ASSERT_REG_POSITION(draw, 0x585);
ASSERT_REG_POSITION(query, 0x6C0);
@@ -249,6 +302,9 @@ ASSERT_REG_POSITION(shader_config[0], 0x800);
ASSERT_REG_POSITION(const_buffer, 0x8E0);
ASSERT_REG_POSITION(cb_bind[0], 0x904);
ASSERT_REG_POSITION(tex_cb_index, 0x982);
ASSERT_REG_POSITION(ssbo_info, 0xD18);
ASSERT_REG_POSITION(tex_info_buffers.address[0], 0xD2A);
ASSERT_REG_POSITION(tex_info_buffers.size[0], 0xD2F);
#undef ASSERT_REG_POSITION

21
src/video_core/gpu.cpp Normal file
View File

@@ -0,0 +1,21 @@
// Copyright 2018 yuzu Emulator Project
// Licensed under GPLv2 or any later version
// Refer to the license.txt file included.
#include "video_core/engines/fermi_2d.h"
#include "video_core/engines/maxwell_3d.h"
#include "video_core/engines/maxwell_compute.h"
#include "video_core/gpu.h"
namespace Tegra {
GPU::GPU() {
memory_manager = std::make_unique<MemoryManager>();
maxwell_3d = std::make_unique<Engines::Maxwell3D>(*memory_manager);
fermi_2d = std::make_unique<Engines::Fermi2D>();
maxwell_compute = std::make_unique<Engines::MaxwellCompute>();
}
GPU::~GPU() = default;
} // namespace Tegra

View File

@@ -8,13 +8,16 @@
#include <unordered_map>
#include <vector>
#include "common/common_types.h"
#include "video_core/engines/fermi_2d.h"
#include "video_core/engines/maxwell_3d.h"
#include "video_core/engines/maxwell_compute.h"
#include "video_core/memory_manager.h"
namespace Tegra {
namespace Engines {
class Fermi2D;
class Maxwell3D;
class MaxwellCompute;
} // namespace Engines
enum class EngineID {
FERMI_TWOD_A = 0x902D, // 2D Engine
MAXWELL_B = 0xB197, // 3D Engine
@@ -25,13 +28,8 @@ enum class EngineID {
class GPU final {
public:
GPU() {
memory_manager = std::make_unique<MemoryManager>();
maxwell_3d = std::make_unique<Engines::Maxwell3D>(*memory_manager);
fermi_2d = std::make_unique<Engines::Fermi2D>();
maxwell_compute = std::make_unique<Engines::MaxwellCompute>();
}
~GPU() = default;
GPU();
~GPU();
/// Processes a command list stored at the specified address in GPU memory.
void ProcessCommandList(GPUVAddr address, u32 size);

View File

@@ -150,8 +150,8 @@ QString WaitTreeThread::GetText() const {
case THREADSTATUS_READY:
status = tr("ready");
break;
case THREADSTATUS_WAIT_ARB:
status = tr("waiting for address 0x%1").arg(thread.wait_address, 8, 16, QLatin1Char('0'));
case THREADSTATUS_WAIT_HLE_EVENT:
status = tr("waiting for HLE return");
break;
case THREADSTATUS_WAIT_SLEEP:
status = tr("sleeping");
@@ -180,7 +180,7 @@ QColor WaitTreeThread::GetColor() const {
return QColor(Qt::GlobalColor::darkGreen);
case THREADSTATUS_READY:
return QColor(Qt::GlobalColor::darkBlue);
case THREADSTATUS_WAIT_ARB:
case THREADSTATUS_WAIT_HLE_EVENT:
return QColor(Qt::GlobalColor::darkRed);
case THREADSTATUS_WAIT_SLEEP:
return QColor(Qt::GlobalColor::darkYellow);