Compare commits

..

4 Commits

Author SHA1 Message Date
Morph
25429998e3 bounded_threadsafe_queue: Use constexpr capacity and mask
While this is the primary change, we also:
- Remove the mpsc namespace and rename Queue to MPSCQueue
- Make Slot a private struct within MPSCQueue
- Remove the AlignedAllocator template argument, as we use std::allocator
- Replace instances of mask + 1 with capacity, and mask + 2 with capacity + 1
2022-06-15 16:59:13 -04:00
liamwhite
bd3bfe411d Merge pull request #8458 from lat9nq/no-constexpr-flow-block
structured_control_flow: Remove constexpr Flow::Block
2022-06-13 20:06:38 -04:00
bunnei
741da9c8bf Merge pull request #8388 from liamwhite/simpler-pause
CpuManager: simplify pausing
2022-06-13 15:48:03 -07:00
Liam
af022294dd CpuManager: simplify pausing 2022-06-08 21:47:29 -04:00
5 changed files with 110 additions and 182 deletions

View File

@@ -1,10 +1,7 @@
// SPDX-FileCopyrightText: Copyright (c) 2020 Erik Rigtorp <erik@rigtorp.se>
// SPDX-License-Identifier: MIT
#pragma once
#ifdef _MSC_VER
#pragma warning(push)
#pragma warning(disable : 4324)
#endif
#include <atomic>
#include <bit>
@@ -12,105 +9,63 @@
#include <memory>
#include <mutex>
#include <new>
#include <stdexcept>
#include <stop_token>
#include <type_traits>
#include <utility>
namespace Common {
namespace mpsc {
#if defined(__cpp_lib_hardware_interference_size)
constexpr size_t hardware_interference_size = std::hardware_destructive_interference_size;
#else
constexpr size_t hardware_interference_size = 64;
#endif
template <typename T>
using AlignedAllocator = std::allocator<T>;
#ifdef _MSC_VER
#pragma warning(push)
#pragma warning(disable : 4324)
#endif
template <typename T>
struct Slot {
~Slot() noexcept {
if (turn.test()) {
destroy();
}
}
template <typename... Args>
void construct(Args&&... args) noexcept {
static_assert(std::is_nothrow_constructible_v<T, Args&&...>,
"T must be nothrow constructible with Args&&...");
std::construct_at(reinterpret_cast<T*>(&storage), std::forward<Args>(args)...);
}
void destroy() noexcept {
static_assert(std::is_nothrow_destructible_v<T>, "T must be nothrow destructible");
std::destroy_at(reinterpret_cast<T*>(&storage));
}
T&& move() noexcept {
return reinterpret_cast<T&&>(storage);
}
// Align to avoid false sharing between adjacent slots
alignas(hardware_interference_size) std::atomic_flag turn{};
struct aligned_store {
struct type {
alignas(T) unsigned char data[sizeof(T)];
};
};
typename aligned_store::type storage;
};
template <typename T, typename Allocator = AlignedAllocator<Slot<T>>>
class Queue {
template <typename T, size_t capacity = 0x400>
class MPSCQueue {
public:
explicit Queue(const size_t capacity, const Allocator& allocator = Allocator())
: allocator_(allocator) {
if (capacity < 1) {
throw std::invalid_argument("capacity < 1");
}
// Ensure that the queue length is an integer power of 2
// This is so that idx(i) can be a simple i & mask_ insted of i % capacity
// https://github.com/rigtorp/MPMCQueue/pull/36
if (!std::has_single_bit(capacity)) {
throw std::invalid_argument("capacity must be an integer power of 2");
}
mask_ = capacity - 1;
explicit MPSCQueue() : allocator{std::allocator<Slot<T>>()} {
// Allocate one extra slot to prevent false sharing on the last slot
slots_ = allocator_.allocate(mask_ + 2);
slots = allocator.allocate(capacity + 1);
// Allocators are not required to honor alignment for over-aligned types
// (see http://eel.is/c++draft/allocator.requirements#10) so we verify
// alignment here
if (reinterpret_cast<uintptr_t>(slots_) % alignof(Slot<T>) != 0) {
allocator_.deallocate(slots_, mask_ + 2);
if (reinterpret_cast<uintptr_t>(slots) % alignof(Slot<T>) != 0) {
allocator.deallocate(slots, capacity + 1);
throw std::bad_alloc();
}
for (size_t i = 0; i < mask_ + 1; ++i) {
std::construct_at(&slots_[i]);
for (size_t i = 0; i < capacity; ++i) {
std::construct_at(&slots[i]);
}
static_assert(std::has_single_bit(capacity), "capacity must be an integer power of 2");
static_assert(alignof(Slot<T>) == hardware_interference_size,
"Slot must be aligned to cache line boundary to prevent false sharing");
static_assert(sizeof(Slot<T>) % hardware_interference_size == 0,
"Slot size must be a multiple of cache line size to prevent "
"false sharing between adjacent slots");
static_assert(sizeof(Queue) % hardware_interference_size == 0,
static_assert(sizeof(MPSCQueue) % hardware_interference_size == 0,
"Queue size must be a multiple of cache line size to "
"prevent false sharing between adjacent queues");
}
~Queue() noexcept {
for (size_t i = 0; i < mask_ + 1; ++i) {
slots_[i].~Slot();
~MPSCQueue() noexcept {
for (size_t i = 0; i < capacity; ++i) {
std::destroy_at(&slots[i]);
}
allocator_.deallocate(slots_, mask_ + 2);
allocator.deallocate(slots, capacity + 1);
}
// non-copyable and non-movable
Queue(const Queue&) = delete;
Queue& operator=(const Queue&) = delete;
// The queue must be both non-copyable and non-movable
MPSCQueue(const MPSCQueue&) = delete;
MPSCQueue& operator=(const MPSCQueue&) = delete;
MPSCQueue(MPSCQueue&&) = delete;
MPSCQueue& operator=(MPSCQueue&&) = delete;
void Push(const T& v) noexcept {
static_assert(std::is_nothrow_copy_constructible_v<T>,
@@ -125,8 +80,8 @@ public:
void Pop(T& v, std::stop_token stop) noexcept {
auto const tail = tail_.fetch_add(1);
auto& slot = slots_[idx(tail)];
if (false == slot.turn.test()) {
auto& slot = slots[idx(tail)];
if (!slot.turn.test()) {
std::unique_lock lock{cv_mutex};
cv.wait(lock, stop, [&slot] { return slot.turn.test(); });
}
@@ -137,12 +92,46 @@ public:
}
private:
template <typename U = T>
struct Slot {
~Slot() noexcept {
if (turn.test()) {
destroy();
}
}
template <typename... Args>
void construct(Args&&... args) noexcept {
static_assert(std::is_nothrow_constructible_v<U, Args&&...>,
"T must be nothrow constructible with Args&&...");
std::construct_at(reinterpret_cast<U*>(&storage), std::forward<Args>(args)...);
}
void destroy() noexcept {
static_assert(std::is_nothrow_destructible_v<U>, "T must be nothrow destructible");
std::destroy_at(reinterpret_cast<U*>(&storage));
}
U&& move() noexcept {
return reinterpret_cast<U&&>(storage);
}
// Align to avoid false sharing between adjacent slots
alignas(hardware_interference_size) std::atomic_flag turn{};
struct aligned_store {
struct type {
alignas(U) unsigned char data[sizeof(U)];
};
};
typename aligned_store::type storage;
};
template <typename... Args>
void emplace(Args&&... args) noexcept {
static_assert(std::is_nothrow_constructible_v<T, Args&&...>,
"T must be nothrow constructible with Args&&...");
auto const head = head_.fetch_add(1);
auto& slot = slots_[idx(head)];
auto& slot = slots[idx(head)];
slot.turn.wait(true);
slot.construct(std::forward<Args>(args)...);
slot.turn.test_and_set();
@@ -150,31 +139,29 @@ private:
}
constexpr size_t idx(size_t i) const noexcept {
return i & mask_;
return i & mask;
}
std::conditional_t<true, std::condition_variable_any, std::condition_variable> cv;
std::mutex cv_mutex;
size_t mask_;
Slot<T>* slots_;
[[no_unique_address]] Allocator allocator_;
static constexpr size_t mask = capacity - 1;
// Align to avoid false sharing between head_ and tail_
alignas(hardware_interference_size) std::atomic<size_t> head_{0};
alignas(hardware_interference_size) std::atomic<size_t> tail_{0};
std::mutex cv_mutex;
std::condition_variable_any cv;
Slot<T>* slots;
[[no_unique_address]] std::allocator<Slot<T>> allocator;
static_assert(std::is_nothrow_copy_assignable_v<T> || std::is_nothrow_move_assignable_v<T>,
"T must be nothrow copy or move assignable");
static_assert(std::is_nothrow_destructible_v<T>, "T must be nothrow destructible");
};
} // namespace mpsc
template <typename T, typename Allocator = mpsc::AlignedAllocator<mpsc::Slot<T>>>
using MPSCQueue = mpsc::Queue<T, Allocator>;
} // namespace Common
#ifdef _MSC_VER
#pragma warning(pop)
#endif
} // namespace Common

View File

@@ -16,7 +16,8 @@
namespace Core {
CpuManager::CpuManager(System& system_) : system{system_} {}
CpuManager::CpuManager(System& system_)
: pause_barrier{std::make_unique<Common::Barrier>(1)}, system{system_} {}
CpuManager::~CpuManager() = default;
void CpuManager::ThreadStart(std::stop_token stop_token, CpuManager& cpu_manager,
@@ -30,8 +31,10 @@ void CpuManager::Initialize() {
for (std::size_t core = 0; core < Core::Hardware::NUM_CPU_CORES; core++) {
core_data[core].host_thread = std::jthread(ThreadStart, std::ref(*this), core);
}
pause_barrier = std::make_unique<Common::Barrier>(Core::Hardware::NUM_CPU_CORES + 1);
} else {
core_data[0].host_thread = std::jthread(ThreadStart, std::ref(*this), 0);
pause_barrier = std::make_unique<Common::Barrier>(2);
}
}
@@ -138,51 +141,14 @@ void CpuManager::MultiCoreRunSuspendThread() {
auto core = kernel.CurrentPhysicalCoreIndex();
auto& scheduler = *kernel.CurrentScheduler();
Kernel::KThread* current_thread = scheduler.GetCurrentThread();
current_thread->DisableDispatch();
Common::Fiber::YieldTo(current_thread->GetHostContext(), *core_data[core].host_context);
ASSERT(scheduler.ContextSwitchPending());
ASSERT(core == kernel.CurrentPhysicalCoreIndex());
scheduler.RescheduleCurrentCore();
}
}
void CpuManager::MultiCorePause(bool paused) {
if (!paused) {
bool all_not_barrier = false;
while (!all_not_barrier) {
all_not_barrier = true;
for (const auto& data : core_data) {
all_not_barrier &= !data.is_running.load() && data.initialized.load();
}
}
for (auto& data : core_data) {
data.enter_barrier->Set();
}
if (paused_state.load()) {
bool all_barrier = false;
while (!all_barrier) {
all_barrier = true;
for (const auto& data : core_data) {
all_barrier &= data.is_paused.load() && data.initialized.load();
}
}
for (auto& data : core_data) {
data.exit_barrier->Set();
}
}
} else {
/// Wait until all cores are paused.
bool all_barrier = false;
while (!all_barrier) {
all_barrier = true;
for (const auto& data : core_data) {
all_barrier &= data.is_paused.load() && data.initialized.load();
}
}
/// Don't release the barrier
}
paused_state = paused;
}
///////////////////////////////////////////////////////////////////////////////
/// SingleCore ///
///////////////////////////////////////////////////////////////////////////////
@@ -235,8 +201,9 @@ void CpuManager::SingleCoreRunSuspendThread() {
auto core = kernel.GetCurrentHostThreadID();
auto& scheduler = *kernel.CurrentScheduler();
Kernel::KThread* current_thread = scheduler.GetCurrentThread();
current_thread->DisableDispatch();
Common::Fiber::YieldTo(current_thread->GetHostContext(), *core_data[0].host_context);
ASSERT(scheduler.ContextSwitchPending());
ASSERT(core == kernel.GetCurrentHostThreadID());
scheduler.RescheduleCurrentCore();
}
@@ -274,37 +241,21 @@ void CpuManager::PreemptSingleCore(bool from_running_enviroment) {
}
}
void CpuManager::SingleCorePause(bool paused) {
if (!paused) {
bool all_not_barrier = false;
while (!all_not_barrier) {
all_not_barrier = !core_data[0].is_running.load() && core_data[0].initialized.load();
}
core_data[0].enter_barrier->Set();
if (paused_state.load()) {
bool all_barrier = false;
while (!all_barrier) {
all_barrier = core_data[0].is_paused.load() && core_data[0].initialized.load();
}
core_data[0].exit_barrier->Set();
}
} else {
/// Wait until all cores are paused.
bool all_barrier = false;
while (!all_barrier) {
all_barrier = core_data[0].is_paused.load() && core_data[0].initialized.load();
}
/// Don't release the barrier
}
paused_state = paused;
}
void CpuManager::Pause(bool paused) {
if (is_multicore) {
MultiCorePause(paused);
} else {
SingleCorePause(paused);
std::scoped_lock lk{pause_lock};
if (pause_state == paused) {
return;
}
// Set the new state
pause_state.store(paused);
// Wake up any waiting threads
pause_state.notify_all();
// Wait for all threads to successfully change state before returning
pause_barrier->Sync();
}
void CpuManager::RunThread(std::stop_token stop_token, std::size_t core) {
@@ -320,27 +271,29 @@ void CpuManager::RunThread(std::stop_token stop_token, std::size_t core) {
Common::SetCurrentThreadName(name.c_str());
Common::SetCurrentThreadPriority(Common::ThreadPriority::High);
auto& data = core_data[core];
data.enter_barrier = std::make_unique<Common::Event>();
data.exit_barrier = std::make_unique<Common::Event>();
data.host_context = Common::Fiber::ThreadToFiber();
data.is_running = false;
data.initialized = true;
const bool sc_sync = !is_async_gpu && !is_multicore;
bool sc_sync_first_use = sc_sync;
// Cleanup
SCOPE_EXIT({
data.host_context->Exit();
data.enter_barrier.reset();
data.exit_barrier.reset();
data.initialized = false;
MicroProfileOnThreadExit();
});
/// Running
while (running_mode) {
data.is_running = false;
data.enter_barrier->Wait();
if (pause_state.load(std::memory_order_relaxed)) {
// Wait for caller to acknowledge pausing
pause_barrier->Sync();
// Wait until unpaused
pause_state.wait(true, std::memory_order_relaxed);
// Wait for caller to acknowledge unpausing
pause_barrier->Sync();
}
if (sc_sync_first_use) {
system.GPU().ObtainContext();
sc_sync_first_use = false;
@@ -352,12 +305,7 @@ void CpuManager::RunThread(std::stop_token stop_token, std::size_t core) {
}
auto current_thread = system.Kernel().CurrentScheduler()->GetCurrentThread();
data.is_running = true;
Common::Fiber::YieldTo(data.host_context, *current_thread->GetHostContext());
data.is_running = false;
data.is_paused = true;
data.exit_barrier->Wait();
data.is_paused = false;
}
}

View File

@@ -69,13 +69,11 @@ private:
void MultiCoreRunGuestLoop();
void MultiCoreRunIdleThread();
void MultiCoreRunSuspendThread();
void MultiCorePause(bool paused);
void SingleCoreRunGuestThread();
void SingleCoreRunGuestLoop();
void SingleCoreRunIdleThread();
void SingleCoreRunSuspendThread();
void SingleCorePause(bool paused);
static void ThreadStart(std::stop_token stop_token, CpuManager& cpu_manager, std::size_t core);
@@ -83,16 +81,13 @@ private:
struct CoreData {
std::shared_ptr<Common::Fiber> host_context;
std::unique_ptr<Common::Event> enter_barrier;
std::unique_ptr<Common::Event> exit_barrier;
std::atomic<bool> is_running;
std::atomic<bool> is_paused;
std::atomic<bool> initialized;
std::jthread host_thread;
};
std::atomic<bool> running_mode{};
std::atomic<bool> paused_state{};
std::atomic<bool> pause_state{};
std::unique_ptr<Common::Barrier> pause_barrier{};
std::mutex pause_lock{};
std::array<CoreData, Core::Hardware::NUM_CPU_CORES> core_data{};

View File

@@ -252,6 +252,7 @@ struct KernelCore::Impl {
core_id)
.IsSuccess());
suspend_threads[core_id]->SetName(fmt::format("SuspendThread:{}", core_id));
suspend_threads[core_id]->DisableDispatch();
}
}
@@ -1073,9 +1074,6 @@ void KernelCore::Suspend(bool in_suspention) {
impl->suspend_threads[core_id]->SetState(state);
impl->suspend_threads[core_id]->SetWaitReasonForDebugging(
ThreadWaitReasonForDebugging::Suspended);
if (!should_suspend) {
impl->suspend_threads[core_id]->DisableDispatch();
}
}
}
}

View File

@@ -98,7 +98,7 @@ struct CommandDataContainer {
struct SynchState final {
using CommandQueue = Common::MPSCQueue<CommandDataContainer>;
std::mutex write_lock;
CommandQueue queue{512}; // size must be 2^n
CommandQueue queue;
u64 last_fence{};
std::atomic<u64> signaled_fence{};
std::condition_variable_any cv;