Compare commits

..

4 Commits

Author SHA1 Message Date
Lioncash
221613d4ea kernel/server_session: Make data members private
Makes it much nicer to locally reason about server session behavior, as
part of its functionality isn't placed around other classes.
2019-03-05 20:10:07 -05:00
Lioncash
7526b6fce3 kernel/client_session: Make data members private
These can be made private, as they aren't accessed in contexts that
require them to be public.
2019-03-05 20:10:03 -05:00
bunnei
cc92c054ec Merge pull request #2185 from FearlessTobi/port-4630
Port citra-emu/citra#4630: "Memory: don't lock hle mutex in memory read/write"
2019-03-04 18:44:53 -05:00
Weiyi Wang
5159f4eee8 Memory: don't lock hle mutex in memory read/write
The comment already invalidates itself: neither MMIO nor rasterizer cache belongsHLE kernel state. This mutex has a too large scope if MMIO or cache is included, which is prone to dead lock when multiple thread acquires these resource at the same time. If necessary, each MMIO component or rasterizer should have their own lock.
2019-03-02 15:20:05 +01:00
12 changed files with 95 additions and 65 deletions

View File

@@ -11,7 +11,6 @@
#endif
#include "core/arm/exclusive_monitor.h"
#include "core/arm/unicorn/arm_unicorn.h"
#include "core/core.h"
#include "core/core_cpu.h"
#include "core/core_timing.h"
#include "core/hle/kernel/scheduler.h"
@@ -50,9 +49,9 @@ bool CpuBarrier::Rendezvous() {
return false;
}
Cpu::Cpu(System& system, ExclusiveMonitor& exclusive_monitor, CpuBarrier& cpu_barrier,
std::size_t core_index)
: cpu_barrier{cpu_barrier}, core_timing{system.CoreTiming()}, core_index{core_index} {
Cpu::Cpu(Timing::CoreTiming& core_timing, ExclusiveMonitor& exclusive_monitor,
CpuBarrier& cpu_barrier, std::size_t core_index)
: cpu_barrier{cpu_barrier}, core_timing{core_timing}, core_index{core_index} {
if (Settings::values.use_cpu_jit) {
#ifdef ARCHITECTURE_x86_64
arm_interface = std::make_unique<ARM_Dynarmic>(core_timing, exclusive_monitor, core_index);
@@ -64,7 +63,7 @@ Cpu::Cpu(System& system, ExclusiveMonitor& exclusive_monitor, CpuBarrier& cpu_ba
arm_interface = std::make_unique<ARM_Unicorn>(core_timing);
}
scheduler = std::make_unique<Kernel::Scheduler>(system, *arm_interface);
scheduler = std::make_unique<Kernel::Scheduler>(*arm_interface);
}
Cpu::~Cpu() = default;

View File

@@ -15,10 +15,6 @@ namespace Kernel {
class Scheduler;
}
namespace Core {
class System;
}
namespace Core::Timing {
class CoreTiming;
}
@@ -49,8 +45,8 @@ private:
class Cpu {
public:
Cpu(System& system, ExclusiveMonitor& exclusive_monitor, CpuBarrier& cpu_barrier,
std::size_t core_index);
Cpu(Timing::CoreTiming& core_timing, ExclusiveMonitor& exclusive_monitor,
CpuBarrier& cpu_barrier, std::size_t core_index);
~Cpu();
void RunLoop(bool tight_loop = true);

View File

@@ -27,7 +27,8 @@ void CpuCoreManager::Initialize(System& system) {
exclusive_monitor = Cpu::MakeExclusiveMonitor(cores.size());
for (std::size_t index = 0; index < cores.size(); ++index) {
cores[index] = std::make_unique<Cpu>(system, *exclusive_monitor, *barrier, index);
cores[index] =
std::make_unique<Cpu>(system.CoreTiming(), *exclusive_monitor, *barrier, index);
}
// Create threads for CPU cores 1-3, and build thread_to_cpu map

View File

@@ -17,21 +17,11 @@ ClientSession::~ClientSession() {
// This destructor will be called automatically when the last ClientSession handle is closed by
// the emulated application.
// Local references to ServerSession and SessionRequestHandler are necessary to guarantee they
// A local reference to the ServerSession is necessary to guarantee it
// will be kept alive until after ClientDisconnected() returns.
SharedPtr<ServerSession> server = parent->server;
if (server) {
std::shared_ptr<SessionRequestHandler> hle_handler = server->hle_handler;
if (hle_handler)
hle_handler->ClientDisconnected(server);
// TODO(Subv): Force a wake up of all the ServerSession's waiting threads and set
// their WaitSynchronization result to 0xC920181A.
// Clean up the list of client threads with pending requests, they are unneeded now that the
// client endpoint is closed.
server->pending_requesting_threads.clear();
server->currently_handling = nullptr;
server->ClientDisconnected();
}
parent->client = nullptr;

View File

@@ -36,14 +36,15 @@ public:
ResultCode SendSyncRequest(SharedPtr<Thread> thread);
std::string name; ///< Name of client port (optional)
private:
explicit ClientSession(KernelCore& kernel);
~ClientSession() override;
/// The parent session, which links to the server endpoint.
std::shared_ptr<Session> parent;
private:
explicit ClientSession(KernelCore& kernel);
~ClientSession() override;
/// Name of the client session (optional)
std::string name;
};
} // namespace Kernel

View File

@@ -264,11 +264,11 @@ ResultCode HLERequestContext::WriteToOutgoingCommandBuffer(Thread& thread) {
// Write the domain objects to the command buffer, these go after the raw untranslated data.
// TODO(Subv): This completely ignores C buffers.
std::size_t domain_offset = size - domain_message_header->num_objects;
auto& request_handlers = server_session->domain_request_handlers;
for (auto& object : domain_objects) {
request_handlers.emplace_back(object);
dst_cmdbuf[domain_offset++] = static_cast<u32_le>(request_handlers.size());
for (const auto& object : domain_objects) {
server_session->AppendDomainRequestHandler(object);
dst_cmdbuf[domain_offset++] =
static_cast<u32_le>(server_session->NumDomainRequestHandlers());
}
}

View File

@@ -19,8 +19,7 @@ namespace Kernel {
std::mutex Scheduler::scheduler_mutex;
Scheduler::Scheduler(Core::System& system, Core::ARM_Interface& cpu_core)
: cpu_core{cpu_core}, system{system} {}
Scheduler::Scheduler(Core::ARM_Interface& cpu_core) : cpu_core(cpu_core) {}
Scheduler::~Scheduler() {
for (auto& thread : thread_list) {
@@ -62,7 +61,7 @@ Thread* Scheduler::PopNextReadyThread() {
void Scheduler::SwitchContext(Thread* new_thread) {
Thread* const previous_thread = GetCurrentThread();
Process* const previous_process = system.Kernel().CurrentProcess();
Process* const previous_process = Core::CurrentProcess();
UpdateLastContextSwitchTime(previous_thread, previous_process);
@@ -95,8 +94,8 @@ void Scheduler::SwitchContext(Thread* new_thread) {
auto* const thread_owner_process = current_thread->GetOwnerProcess();
if (previous_process != thread_owner_process) {
system.Kernel().MakeCurrentProcess(thread_owner_process);
SetCurrentPageTable(&thread_owner_process->VMManager().page_table);
Core::System::GetInstance().Kernel().MakeCurrentProcess(thread_owner_process);
SetCurrentPageTable(&Core::CurrentProcess()->VMManager().page_table);
}
cpu_core.LoadContext(new_thread->GetContext());
@@ -112,7 +111,7 @@ void Scheduler::SwitchContext(Thread* new_thread) {
void Scheduler::UpdateLastContextSwitchTime(Thread* thread, Process* process) {
const u64 prev_switch_ticks = last_context_switch_time;
const u64 most_recent_switch_ticks = system.CoreTiming().GetTicks();
const u64 most_recent_switch_ticks = Core::System::GetInstance().CoreTiming().GetTicks();
const u64 update_ticks = most_recent_switch_ticks - prev_switch_ticks;
if (thread != nullptr) {
@@ -224,7 +223,8 @@ void Scheduler::YieldWithLoadBalancing(Thread* thread) {
// Take the first non-nullptr one
for (unsigned cur_core = 0; cur_core < Core::NUM_CPU_CORES; ++cur_core) {
const auto res =
system.CpuCore(cur_core).Scheduler().GetNextSuggestedThread(core, priority);
Core::System::GetInstance().CpuCore(cur_core).Scheduler().GetNextSuggestedThread(
core, priority);
// If scheduler provides a suggested thread
if (res != nullptr) {

View File

@@ -13,8 +13,7 @@
namespace Core {
class ARM_Interface;
class System;
} // namespace Core
}
namespace Kernel {
@@ -22,7 +21,7 @@ class Process;
class Scheduler final {
public:
explicit Scheduler(Core::System& system, Core::ARM_Interface& cpu_core);
explicit Scheduler(Core::ARM_Interface& cpu_core);
~Scheduler();
/// Returns whether there are any threads that are ready to run.
@@ -163,7 +162,6 @@ private:
Core::ARM_Interface& cpu_core;
u64 last_context_switch_time = 0;
Core::System& system;
static std::mutex scheduler_mutex;
};

View File

@@ -63,6 +63,34 @@ void ServerSession::Acquire(Thread* thread) {
pending_requesting_threads.pop_back();
}
void ServerSession::ClientDisconnected() {
// We keep a shared pointer to the hle handler to keep it alive throughout
// the call to ClientDisconnected, as ClientDisconnected invalidates the
// hle_handler member itself during the course of the function executing.
std::shared_ptr<SessionRequestHandler> handler = hle_handler;
if (handler) {
// Note that after this returns, this server session's hle_handler is
// invalidated (set to null).
handler->ClientDisconnected(this);
}
// TODO(Subv): Force a wake up of all the ServerSession's waiting threads and set
// their WaitSynchronization result to 0xC920181A.
// Clean up the list of client threads with pending requests, they are unneeded now that the
// client endpoint is closed.
pending_requesting_threads.clear();
currently_handling = nullptr;
}
void ServerSession::AppendDomainRequestHandler(std::shared_ptr<SessionRequestHandler> handler) {
domain_request_handlers.push_back(std::move(handler));
}
std::size_t ServerSession::NumDomainRequestHandlers() const {
return domain_request_handlers.size();
}
ResultCode ServerSession::HandleDomainSyncRequest(Kernel::HLERequestContext& context) {
auto* const domain_message_header = context.GetDomainMessageHeader();
if (domain_message_header) {

View File

@@ -46,6 +46,14 @@ public:
return HANDLE_TYPE;
}
Session* GetParent() {
return parent.get();
}
const Session* GetParent() const {
return parent.get();
}
using SessionPair = std::tuple<SharedPtr<ServerSession>, SharedPtr<ClientSession>>;
/**
@@ -78,23 +86,16 @@ public:
void Acquire(Thread* thread) override;
std::string name; ///< The name of this session (optional)
std::shared_ptr<Session> parent; ///< The parent session, which links to the client endpoint.
std::shared_ptr<SessionRequestHandler>
hle_handler; ///< This session's HLE request handler (applicable when not a domain)
/// Called when a client disconnection occurs.
void ClientDisconnected();
/// This is the list of domain request handlers (after conversion to a domain)
std::vector<std::shared_ptr<SessionRequestHandler>> domain_request_handlers;
/// Adds a new domain request handler to the collection of request handlers within
/// this ServerSession instance.
void AppendDomainRequestHandler(std::shared_ptr<SessionRequestHandler> handler);
/// List of threads that are pending a response after a sync request. This list is processed in
/// a LIFO manner, thus, the last request will be dispatched first.
/// TODO(Subv): Verify if this is indeed processed in LIFO using a hardware test.
std::vector<SharedPtr<Thread>> pending_requesting_threads;
/// Thread whose request is currently being handled. A request is considered "handled" when a
/// response is sent via svcReplyAndReceive.
/// TODO(Subv): Find a better name for this.
SharedPtr<Thread> currently_handling;
/// Retrieves the total number of domain request handlers that have been
/// appended to this ServerSession instance.
std::size_t NumDomainRequestHandlers() const;
/// Returns true if the session has been converted to a domain, otherwise False
bool IsDomain() const {
@@ -129,8 +130,30 @@ private:
/// object handle.
ResultCode HandleDomainSyncRequest(Kernel::HLERequestContext& context);
/// The parent session, which links to the client endpoint.
std::shared_ptr<Session> parent;
/// This session's HLE request handler (applicable when not a domain)
std::shared_ptr<SessionRequestHandler> hle_handler;
/// This is the list of domain request handlers (after conversion to a domain)
std::vector<std::shared_ptr<SessionRequestHandler>> domain_request_handlers;
/// List of threads that are pending a response after a sync request. This list is processed in
/// a LIFO manner, thus, the last request will be dispatched first.
/// TODO(Subv): Verify if this is indeed processed in LIFO using a hardware test.
std::vector<SharedPtr<Thread>> pending_requesting_threads;
/// Thread whose request is currently being handled. A request is considered "handled" when a
/// response is sent via svcReplyAndReceive.
/// TODO(Subv): Find a better name for this.
SharedPtr<Thread> currently_handling;
/// When set to True, converts the session to a domain at the end of the command
bool convert_to_domain{};
/// The name of this session (optional)
std::string name;
};
} // namespace Kernel

View File

@@ -30,7 +30,7 @@ void Controller::DuplicateSession(Kernel::HLERequestContext& ctx) {
IPC::ResponseBuilder rb{ctx, 2, 0, 1, IPC::ResponseBuilder::Flags::AlwaysMoveHandles};
rb.Push(RESULT_SUCCESS);
Kernel::SharedPtr<Kernel::ClientSession> session{ctx.Session()->parent->client};
Kernel::SharedPtr<Kernel::ClientSession> session{ctx.Session()->GetParent()->client};
rb.PushMoveObjects(session);
LOG_DEBUG(Service, "session={}", session->GetObjectId());

View File

@@ -171,9 +171,6 @@ T Read(const VAddr vaddr) {
return value;
}
// The memory access might do an MMIO or cached access, so we have to lock the HLE kernel state
std::lock_guard<std::recursive_mutex> lock(HLE::g_hle_lock);
PageType type = current_page_table->attributes[vaddr >> PAGE_BITS];
switch (type) {
case PageType::Unmapped:
@@ -204,9 +201,6 @@ void Write(const VAddr vaddr, const T data) {
return;
}
// The memory access might do an MMIO or cached access, so we have to lock the HLE kernel state
std::lock_guard<std::recursive_mutex> lock(HLE::g_hle_lock);
PageType type = current_page_table->attributes[vaddr >> PAGE_BITS];
switch (type) {
case PageType::Unmapped: