Compare commits

...

13 Commits

Author SHA1 Message Date
Fernando Sahmkow
db42bcb306 Fixes and corrections on formatting. 2019-03-27 14:49:43 -04:00
Fernando Sahmkow
f35e09fe0d Fixes to multilevelqueue's iterator. 2019-03-27 14:34:33 -04:00
Fernando Sahmkow
dde0814837 Use MultiLevelQueue instead of old ThreadQueueList 2019-03-27 14:34:32 -04:00
Fernando Sahmkow
9dbba9240b Add MultiLevelQueue Tests 2019-03-27 14:34:31 -04:00
Fernando Sahmkow
3bc815a5dc Implement intrinsics CountTrailingZeroes and test it. 2019-03-27 14:34:29 -04:00
Fernando Sahmkow
522957f9f3 Implement a MultiLevelQueue 2019-03-27 14:33:44 -04:00
bunnei
47f2405ab1 Merge pull request #2285 from lioncash/unused-struct
kernel/process: Remove unused AddressMapping struct
2019-03-26 11:17:03 -04:00
bunnei
595511876e Merge pull request #2287 from lioncash/coretiming-cb
core/core_timing: Make callback parameters consistent
2019-03-25 21:06:33 -04:00
bunnei
8a24a804c5 Merge pull request #2286 from lioncash/fwd
kernel/kernel: Remove unnecessary forward declaration
2019-03-25 21:05:33 -04:00
bunnei
b93a8a368f Merge pull request #2288 from lioncash/linkage
core/cheat_engine: Make MemoryReadImpl and MemoryWriteImpl internally linked
2019-03-25 21:02:25 -04:00
Lioncash
b26481c94b core/cheat_engine: Make MemoryReadImpl and MemoryWriteImpl internally linked
These don't need to be visible outside of the translation unit, so they
can be enclosed within an anonymous namespace.
2019-03-24 18:34:42 -04:00
Lioncash
c5d41fd812 core/core_timing: Make callback parameters consistent
In some cases, our callbacks were using s64 as a parameter, and in other
cases, they were using an int, which is inconsistent.

To make all callbacks consistent, we can just use an s64 as the type for
late cycles, given it gets rid of the need to cast internally.

While we're at it, also resolve some signed/unsigned conversions that
were occurring related to the callback registration.
2019-03-24 18:12:17 -04:00
Lioncash
7c4bc7b883 kernel/process: Remove unused AddressMapping struct
Another leftover from citra that's now no longer necessary.
2019-03-24 17:40:11 -04:00
17 changed files with 498 additions and 39 deletions

View File

@@ -38,7 +38,7 @@ Stream::Stream(Core::Timing::CoreTiming& core_timing, u32 sample_rate, Format fo
sink_stream{sink_stream}, core_timing{core_timing}, name{std::move(name_)} {
release_event = core_timing.RegisterEvent(
name, [this](u64 userdata, int cycles_late) { ReleaseActiveBuffer(); });
name, [this](u64 userdata, s64 cycles_late) { ReleaseActiveBuffer(); });
}
void Stream::Play() {

View File

@@ -98,6 +98,7 @@ add_library(common STATIC
microprofile.h
microprofileui.h
misc.cpp
multi_level_queue.h
page_table.cpp
page_table.h
param_package.cpp

View File

@@ -58,4 +58,43 @@ inline u64 CountLeadingZeroes64(u64 value) {
return __builtin_clzll(value);
}
#endif
#ifdef _MSC_VER
inline u32 CountTrailingZeroes32(u32 value) {
unsigned long trailing_zero = 0;
if (_BitScanForward(&trailing_zero, value) != 0) {
return trailing_zero;
}
return 32;
}
inline u64 CountTrailingZeroes64(u64 value) {
unsigned long trailing_zero = 0;
if (_BitScanForward64(&trailing_zero, value) != 0) {
return trailing_zero;
}
return 64;
}
#else
inline u32 CountTrailingZeroes32(u32 value) {
if (value == 0) {
return 32;
}
return __builtin_ctz(value);
}
inline u64 CountTrailingZeroes64(u64 value) {
if (value == 0) {
return 64;
}
return __builtin_ctzll(value);
}
#endif
} // namespace Common

View File

@@ -0,0 +1,337 @@
// Copyright 2019 TuxSH
// Licensed under GPLv2 or any later version
// Refer to the license.txt file included.
#pragma once
#include <array>
#include <iterator>
#include <list>
#include <utility>
#include "common/bit_util.h"
#include "common/common_types.h"
namespace Common {
/**
* A MultiLevelQueue is a type of priority queue which has the following characteristics:
* - iteratable through each of its elements.
* - back can be obtained.
* - O(1) add, lookup (both front and back)
* - discrete priorities and a max of 64 priorities (limited domain)
* This type of priority queue is normaly used for managing threads within an scheduler
*/
template <typename T, std::size_t Depth>
class MultiLevelQueue {
public:
using value_type = T;
using reference = value_type&;
using const_reference = const value_type&;
using pointer = value_type*;
using const_pointer = const value_type*;
using difference_type = typename std::pointer_traits<pointer>::difference_type;
using size_type = std::size_t;
template <bool is_constant>
class iterator_impl {
public:
using iterator_category = std::bidirectional_iterator_tag;
using value_type = T;
using pointer = std::conditional_t<is_constant, T*, const T*>;
using reference = std::conditional_t<is_constant, const T&, T&>;
using difference_type = typename std::pointer_traits<pointer>::difference_type;
friend bool operator==(const iterator_impl& lhs, const iterator_impl& rhs) {
if (lhs.IsEnd() && rhs.IsEnd())
return true;
return std::tie(lhs.current_priority, lhs.it) == std::tie(rhs.current_priority, rhs.it);
}
friend bool operator!=(const iterator_impl& lhs, const iterator_impl& rhs) {
return !operator==(lhs, rhs);
}
reference operator*() const {
return *it;
}
pointer operator->() const {
return it.operator->();
}
iterator_impl& operator++() {
if (IsEnd()) {
return *this;
}
++it;
if (it == GetEndItForPrio()) {
u64 prios = mlq.used_priorities;
prios &= ~((1ULL << (current_priority + 1)) - 1);
if (prios == 0) {
current_priority = mlq.depth();
} else {
current_priority = CountTrailingZeroes64(prios);
it = GetBeginItForPrio();
}
}
return *this;
}
iterator_impl& operator--() {
if (IsEnd()) {
if (mlq.used_priorities != 0) {
current_priority = 63 - CountLeadingZeroes64(mlq.used_priorities);
it = GetEndItForPrio();
--it;
}
} else if (it == GetBeginItForPrio()) {
u64 prios = mlq.used_priorities;
prios &= (1ULL << current_priority) - 1;
if (prios != 0) {
current_priority = CountTrailingZeroes64(prios);
it = GetEndItForPrio();
--it;
}
} else {
--it;
}
return *this;
}
iterator_impl operator++(int) {
const iterator_impl v{*this};
++(*this);
return v;
}
iterator_impl operator--(int) {
const iterator_impl v{*this};
--(*this);
return v;
}
// allow implicit const->non-const
iterator_impl(const iterator_impl<false>& other)
: mlq(other.mlq), it(other.it), current_priority(other.current_priority) {}
iterator_impl(const iterator_impl<true>& other)
: mlq(other.mlq), it(other.it), current_priority(other.current_priority) {}
iterator_impl& operator=(const iterator_impl<false>& other) {
mlq = other.mlq;
it = other.it;
current_priority = other.current_priority;
return *this;
}
friend class iterator_impl<true>;
iterator_impl() = default;
private:
friend class MultiLevelQueue;
using container_ref =
std::conditional_t<is_constant, const MultiLevelQueue&, MultiLevelQueue&>;
using list_iterator = std::conditional_t<is_constant, typename std::list<T>::const_iterator,
typename std::list<T>::iterator>;
explicit iterator_impl(container_ref mlq, list_iterator it, u32 current_priority)
: mlq(mlq), it(it), current_priority(current_priority) {}
explicit iterator_impl(container_ref mlq, u32 current_priority)
: mlq(mlq), it(), current_priority(current_priority) {}
bool IsEnd() const {
return current_priority == mlq.depth();
}
list_iterator GetBeginItForPrio() const {
return mlq.levels[current_priority].begin();
}
list_iterator GetEndItForPrio() const {
return mlq.levels[current_priority].end();
}
container_ref mlq;
list_iterator it;
u32 current_priority;
};
using iterator = iterator_impl<false>;
using const_iterator = iterator_impl<true>;
void add(const T& element, u32 priority, bool send_back = true) {
if (send_back)
levels[priority].push_back(element);
else
levels[priority].push_front(element);
used_priorities |= 1ULL << priority;
}
void remove(const T& element, u32 priority) {
auto it = ListIterateTo(levels[priority], element);
if (it == levels[priority].end())
return;
levels[priority].erase(it);
if (levels[priority].empty()) {
used_priorities &= ~(1ULL << priority);
}
}
void adjust(const T& element, u32 old_priority, u32 new_priority, bool adjust_front = false) {
remove(element, old_priority);
add(element, new_priority, !adjust_front);
}
void adjust(const_iterator it, u32 old_priority, u32 new_priority, bool adjust_front = false) {
adjust(*it, old_priority, new_priority, adjust_front);
}
void transfer_to_front(const T& element, u32 priority, MultiLevelQueue& other) {
ListSplice(other.levels[priority], other.levels[priority].begin(), levels[priority],
ListIterateTo(levels[priority], element));
other.used_priorities |= 1ULL << priority;
if (levels[priority].empty()) {
used_priorities &= ~(1ULL << priority);
}
}
void transfer_to_front(const_iterator it, u32 priority, MultiLevelQueue& other) {
transfer_to_front(*it, priority, other);
}
void transfer_to_back(const T& element, u32 priority, MultiLevelQueue& other) {
ListSplice(other.levels[priority], other.levels[priority].end(), levels[priority],
ListIterateTo(levels[priority], element));
other.used_priorities |= 1ULL << priority;
if (levels[priority].empty()) {
used_priorities &= ~(1ULL << priority);
}
}
void transfer_to_back(const_iterator it, u32 priority, MultiLevelQueue& other) {
transfer_to_back(*it, priority, other);
}
void yield(u32 priority, std::size_t n = 1) {
ListShiftForward(levels[priority], n);
}
std::size_t depth() const {
return Depth;
}
std::size_t size(u32 priority) const {
return levels[priority].size();
}
std::size_t size() const {
u64 priorities = used_priorities;
std::size_t size = 0;
while (priorities != 0) {
const u64 current_priority = CountTrailingZeroes64(priorities);
size += levels[current_priority].size();
priorities &= ~(1ULL << current_priority);
}
return size;
}
bool empty() const {
return used_priorities == 0;
}
bool empty(u32 priority) const {
return (used_priorities & (1ULL << priority)) == 0;
}
u32 highest_priority_set(u32 max_priority = 0) const {
const u64 priorities =
max_priority == 0 ? used_priorities : (used_priorities & ~((1ULL << max_priority) - 1));
return priorities == 0 ? Depth : static_cast<u32>(CountTrailingZeroes64(priorities));
}
u32 lowest_priority_set(u32 min_priority = Depth - 1) const {
const u64 priorities = min_priority >= Depth - 1
? used_priorities
: (used_priorities & ((1ULL << (min_priority + 1)) - 1));
return priorities == 0 ? Depth : 63 - CountLeadingZeroes64(priorities);
}
const_iterator cbegin(u32 max_prio = 0) const {
const u32 priority = highest_priority_set(max_prio);
return priority == Depth ? cend()
: const_iterator{*this, levels[priority].cbegin(), priority};
}
const_iterator begin(u32 max_prio = 0) const {
return cbegin(max_prio);
}
iterator begin(u32 max_prio = 0) {
const u32 priority = highest_priority_set(max_prio);
return priority == Depth ? end() : iterator{*this, levels[priority].begin(), priority};
}
const_iterator cend(u32 min_prio = Depth - 1) const {
return min_prio == Depth - 1 ? const_iterator{*this, Depth} : cbegin(min_prio + 1);
}
const_iterator end(u32 min_prio = Depth - 1) const {
return cend(min_prio);
}
iterator end(u32 min_prio = Depth - 1) {
return min_prio == Depth - 1 ? iterator{*this, Depth} : begin(min_prio + 1);
}
T& front(u32 max_priority = 0) {
const u32 priority = highest_priority_set(max_priority);
return levels[priority == Depth ? 0 : priority].front();
}
const T& front(u32 max_priority = 0) const {
const u32 priority = highest_priority_set(max_priority);
return levels[priority == Depth ? 0 : priority].front();
}
T back(u32 min_priority = Depth - 1) {
const u32 priority = lowest_priority_set(min_priority); // intended
return levels[priority == Depth ? 63 : priority].back();
}
const T& back(u32 min_priority = Depth - 1) const {
const u32 priority = lowest_priority_set(min_priority); // intended
return levels[priority == Depth ? 63 : priority].back();
}
private:
using const_list_iterator = typename std::list<T>::const_iterator;
static void ListShiftForward(std::list<T>& list, const std::size_t shift = 1) {
if (shift >= list.size()) {
return;
}
const auto begin_range = list.begin();
const auto end_range = std::next(begin_range, shift);
list.splice(list.end(), list, begin_range, end_range);
}
static void ListSplice(std::list<T>& in_list, const_list_iterator position,
std::list<T>& out_list, const_list_iterator element) {
in_list.splice(position, out_list, element);
}
static const_list_iterator ListIterateTo(const std::list<T>& list, const T& element) {
auto it = list.cbegin();
while (it != list.cend() && *it != element) {
++it;
}
return it;
}
std::array<std::list<T>, Depth> levels;
u64 used_priorities = 0;
};
} // namespace Common

View File

@@ -186,7 +186,7 @@ void CoreTiming::Advance() {
Event evt = std::move(event_queue.front());
std::pop_heap(event_queue.begin(), event_queue.end(), std::greater<>());
event_queue.pop_back();
evt.type->callback(evt.userdata, static_cast<int>(global_timer - evt.time));
evt.type->callback(evt.userdata, global_timer - evt.time);
}
is_global_timer_sane = false;

View File

@@ -15,7 +15,7 @@
namespace Core::Timing {
/// A callback that may be scheduled for a particular core timing event.
using TimedCallback = std::function<void(u64 userdata, int cycles_late)>;
using TimedCallback = std::function<void(u64 userdata, s64 cycles_late)>;
/// Contains the characteristics of a particular event.
struct EventType {

View File

@@ -423,6 +423,7 @@ std::array<u8, 16> TextCheatParser::ParseSingleLineCheat(const std::string& line
return out;
}
namespace {
u64 MemoryReadImpl(u32 width, VAddr addr) {
switch (width) {
case 1:
@@ -457,6 +458,7 @@ void MemoryWriteImpl(u32 width, VAddr addr, u64 value) {
UNREACHABLE();
}
}
} // Anonymous namespace
CheatEngine::CheatEngine(Core::System& system, std::vector<CheatList> cheats_,
const std::string& build_id, VAddr code_region_start,

View File

@@ -29,7 +29,7 @@ namespace Kernel {
* @param thread_handle The handle of the thread that's been awoken
* @param cycles_late The number of CPU cycles that have passed since the desired wakeup time
*/
static void ThreadWakeupCallback(u64 thread_handle, [[maybe_unused]] int cycles_late) {
static void ThreadWakeupCallback(u64 thread_handle, [[maybe_unused]] s64 cycles_late) {
const auto proper_handle = static_cast<Handle>(thread_handle);
const auto& system = Core::System::GetInstance();

View File

@@ -35,14 +35,6 @@ class Thread;
struct CodeSet;
struct AddressMapping {
// Address and size must be page-aligned
VAddr address;
u64 size;
bool read_only;
bool unk_flag;
};
enum class MemoryRegion : u16 {
APPLICATION = 1,
SYSTEM = 2,

View File

@@ -30,7 +30,7 @@ Scheduler::~Scheduler() {
bool Scheduler::HaveReadyThreads() const {
std::lock_guard<std::mutex> lock(scheduler_mutex);
return ready_queue.get_first() != nullptr;
return !ready_queue.empty();
}
Thread* Scheduler::GetCurrentThread() const {
@@ -46,22 +46,27 @@ Thread* Scheduler::PopNextReadyThread() {
Thread* thread = GetCurrentThread();
if (thread && thread->GetStatus() == ThreadStatus::Running) {
if (ready_queue.empty()) {
return thread;
}
// We have to do better than the current thread.
// This call returns null when that's not possible.
next = ready_queue.pop_first_better(thread->GetPriority());
if (!next) {
// Otherwise just keep going with the current thread
next = ready_queue.front();
if (next == nullptr || next->GetPriority() >= thread->GetPriority()) {
next = thread;
}
} else {
next = ready_queue.pop_first();
if (ready_queue.empty()) {
return nullptr;
}
next = ready_queue.front();
}
return next;
}
void Scheduler::SwitchContext(Thread* new_thread) {
Thread* const previous_thread = GetCurrentThread();
Thread* previous_thread = GetCurrentThread();
Process* const previous_process = system.Kernel().CurrentProcess();
UpdateLastContextSwitchTime(previous_thread, previous_process);
@@ -75,7 +80,7 @@ void Scheduler::SwitchContext(Thread* new_thread) {
if (previous_thread->GetStatus() == ThreadStatus::Running) {
// This is only the case when a reschedule is triggered without the current thread
// yielding execution (i.e. an event triggered, system core time-sliced, etc)
ready_queue.push_front(previous_thread->GetPriority(), previous_thread);
ready_queue.add(previous_thread, previous_thread->GetPriority(), false);
previous_thread->SetStatus(ThreadStatus::Ready);
}
}
@@ -90,7 +95,7 @@ void Scheduler::SwitchContext(Thread* new_thread) {
current_thread = new_thread;
ready_queue.remove(new_thread->GetPriority(), new_thread);
ready_queue.remove(new_thread, new_thread->GetPriority());
new_thread->SetStatus(ThreadStatus::Running);
auto* const thread_owner_process = current_thread->GetOwnerProcess();
@@ -147,7 +152,6 @@ void Scheduler::AddThread(SharedPtr<Thread> thread, u32 priority) {
std::lock_guard<std::mutex> lock(scheduler_mutex);
thread_list.push_back(std::move(thread));
ready_queue.prepare(priority);
}
void Scheduler::RemoveThread(Thread* thread) {
@@ -161,33 +165,37 @@ void Scheduler::ScheduleThread(Thread* thread, u32 priority) {
std::lock_guard<std::mutex> lock(scheduler_mutex);
ASSERT(thread->GetStatus() == ThreadStatus::Ready);
ready_queue.push_back(priority, thread);
ready_queue.add(thread, priority);
}
void Scheduler::UnscheduleThread(Thread* thread, u32 priority) {
std::lock_guard<std::mutex> lock(scheduler_mutex);
ASSERT(thread->GetStatus() == ThreadStatus::Ready);
ready_queue.remove(priority, thread);
ready_queue.remove(thread, priority);
}
void Scheduler::SetThreadPriority(Thread* thread, u32 priority) {
std::lock_guard<std::mutex> lock(scheduler_mutex);
if (thread->GetPriority() == priority) {
return;
}
// If thread was ready, adjust queues
if (thread->GetStatus() == ThreadStatus::Ready)
ready_queue.move(thread, thread->GetPriority(), priority);
else
ready_queue.prepare(priority);
ready_queue.adjust(thread, thread->GetPriority(), priority);
}
Thread* Scheduler::GetNextSuggestedThread(u32 core, u32 maximum_priority) const {
std::lock_guard<std::mutex> lock(scheduler_mutex);
const u32 mask = 1U << core;
return ready_queue.get_first_filter([mask, maximum_priority](Thread const* thread) {
return (thread->GetAffinityMask() & mask) != 0 && thread->GetPriority() < maximum_priority;
});
for (auto* thread : ready_queue) {
if ((thread->GetAffinityMask() & mask) != 0 && thread->GetPriority() < maximum_priority) {
return thread;
}
}
return nullptr;
}
void Scheduler::YieldWithoutLoadBalancing(Thread* thread) {

View File

@@ -7,7 +7,7 @@
#include <mutex>
#include <vector>
#include "common/common_types.h"
#include "common/thread_queue_list.h"
#include "common/multi_level_queue.h"
#include "core/hle/kernel/object.h"
#include "core/hle/kernel/thread.h"
@@ -156,7 +156,7 @@ private:
std::vector<SharedPtr<Thread>> thread_list;
/// Lists only ready thread ids.
Common::ThreadQueueList<Thread*, THREADPRIO_LOWEST + 1> ready_queue;
Common::MultiLevelQueue<Thread*, THREADPRIO_LOWEST + 1> ready_queue;
SharedPtr<Thread> current_thread = nullptr;

View File

@@ -36,9 +36,9 @@ namespace Service::HID {
// Updating period for each HID device.
// TODO(ogniK): Find actual polling rate of hid
constexpr u64 pad_update_ticks = Core::Timing::BASE_CLOCK_RATE / 66;
constexpr u64 accelerometer_update_ticks = Core::Timing::BASE_CLOCK_RATE / 100;
constexpr u64 gyroscope_update_ticks = Core::Timing::BASE_CLOCK_RATE / 100;
constexpr s64 pad_update_ticks = static_cast<s64>(Core::Timing::BASE_CLOCK_RATE / 66);
constexpr s64 accelerometer_update_ticks = static_cast<s64>(Core::Timing::BASE_CLOCK_RATE / 100);
constexpr s64 gyroscope_update_ticks = static_cast<s64>(Core::Timing::BASE_CLOCK_RATE / 100);
constexpr std::size_t SHARED_MEMORY_SIZE = 0x40000;
IAppletResource::IAppletResource() : ServiceFramework("IAppletResource") {
@@ -75,7 +75,7 @@ IAppletResource::IAppletResource() : ServiceFramework("IAppletResource") {
// Register update callbacks
auto& core_timing = Core::System::GetInstance().CoreTiming();
pad_update_event =
core_timing.RegisterEvent("HID::UpdatePadCallback", [this](u64 userdata, int cycles_late) {
core_timing.RegisterEvent("HID::UpdatePadCallback", [this](u64 userdata, s64 cycles_late) {
UpdateControllers(userdata, cycles_late);
});
@@ -106,7 +106,7 @@ void IAppletResource::GetSharedMemoryHandle(Kernel::HLERequestContext& ctx) {
rb.PushCopyObjects(shared_mem);
}
void IAppletResource::UpdateControllers(u64 userdata, int cycles_late) {
void IAppletResource::UpdateControllers(u64 userdata, s64 cycles_late) {
auto& core_timing = Core::System::GetInstance().CoreTiming();
const bool should_reload = Settings::values.is_device_reload_pending.exchange(false);

View File

@@ -65,7 +65,7 @@ private:
}
void GetSharedMemoryHandle(Kernel::HLERequestContext& ctx);
void UpdateControllers(u64 userdata, int cycles_late);
void UpdateControllers(u64 userdata, s64 cycles_late);
Kernel::SharedPtr<Kernel::SharedMemory> shared_mem;

View File

@@ -26,7 +26,7 @@
namespace Service::NVFlinger {
constexpr std::size_t SCREEN_REFRESH_RATE = 60;
constexpr u64 frame_ticks = static_cast<u64>(Core::Timing::BASE_CLOCK_RATE / SCREEN_REFRESH_RATE);
constexpr s64 frame_ticks = static_cast<s64>(Core::Timing::BASE_CLOCK_RATE / SCREEN_REFRESH_RATE);
NVFlinger::NVFlinger(Core::Timing::CoreTiming& core_timing) : core_timing{core_timing} {
displays.emplace_back(0, "Default");
@@ -37,7 +37,7 @@ NVFlinger::NVFlinger(Core::Timing::CoreTiming& core_timing) : core_timing{core_t
// Schedule the screen composition events
composition_event =
core_timing.RegisterEvent("ScreenComposition", [this](u64 userdata, int cycles_late) {
core_timing.RegisterEvent("ScreenComposition", [this](u64 userdata, s64 cycles_late) {
Compose();
this->core_timing.ScheduleEvent(frame_ticks - cycles_late, composition_event);
});

View File

@@ -1,5 +1,7 @@
add_executable(tests
common/bit_field.cpp
common/bit_utils.cpp
common/multi_level_queue.cpp
common/param_package.cpp
common/ring_buffer.cpp
core/arm/arm_test_common.cpp

View File

@@ -0,0 +1,23 @@
// Copyright 2017 Citra Emulator Project
// Licensed under GPLv2 or any later version
// Refer to the license.txt file included.
#include <catch2/catch.hpp>
#include <math.h>
#include "common/bit_util.h"
namespace Common {
TEST_CASE("BitUtils::CountTrailingZeroes", "[common]") {
REQUIRE(Common::CountTrailingZeroes32(0) == 32);
REQUIRE(Common::CountTrailingZeroes64(0) == 64);
REQUIRE(Common::CountTrailingZeroes32(9) == 0);
REQUIRE(Common::CountTrailingZeroes32(8) == 3);
REQUIRE(Common::CountTrailingZeroes32(0x801000) == 12);
REQUIRE(Common::CountTrailingZeroes64(9) == 0);
REQUIRE(Common::CountTrailingZeroes64(8) == 3);
REQUIRE(Common::CountTrailingZeroes64(0x801000) == 12);
REQUIRE(Common::CountTrailingZeroes64(0x801000000000UL) == 36);
}
} // namespace Common

View File

@@ -0,0 +1,55 @@
// Copyright 2019 Yuzu Emulator Project
// Licensed under GPLv2 or any later version
// Refer to the license.txt file included.
#include <catch2/catch.hpp>
#include <math.h>
#include "common/common_types.h"
#include "common/multi_level_queue.h"
namespace Common {
TEST_CASE("MultiLevelQueue", "[common]") {
std::array<f32, 8> values = {0.0, 5.0, 1.0, 9.0, 8.0, 2.0, 6.0, 7.0};
Common::MultiLevelQueue<f32, 64> mlq;
REQUIRE(mlq.empty());
mlq.add(values[2], 2);
mlq.add(values[7], 7);
mlq.add(values[3], 3);
mlq.add(values[4], 4);
mlq.add(values[0], 0);
mlq.add(values[5], 5);
mlq.add(values[6], 6);
mlq.add(values[1], 1);
u32 index = 0;
bool all_set = true;
for (auto& f : mlq) {
all_set &= (f == values[index]);
index++;
}
REQUIRE(all_set);
REQUIRE(!mlq.empty());
f32 v = 8.0;
mlq.add(v, 2);
v = -7.0;
mlq.add(v, 2, false);
REQUIRE(mlq.front(2) == -7.0);
mlq.yield(2);
REQUIRE(mlq.front(2) == values[2]);
REQUIRE(mlq.back(2) == -7.0);
REQUIRE(mlq.empty(8));
v = 10.0;
mlq.add(v, 8);
mlq.adjust(v, 8, 9);
REQUIRE(mlq.front(9) == v);
REQUIRE(mlq.empty(8));
REQUIRE(!mlq.empty(9));
mlq.adjust(values[0], 0, 9);
REQUIRE(mlq.highest_priority_set() == 1);
REQUIRE(mlq.lowest_priority_set() == 9);
mlq.remove(values[1], 1);
REQUIRE(mlq.highest_priority_set() == 2);
REQUIRE(mlq.empty(1));
}
} // namespace Common