Compare commits

..

4 Commits

Author SHA1 Message Date
Fernando Sahmkow
a8d4927e29 Corrections, documenting and fixes. 2019-02-16 16:52:24 -04:00
Fernando Sahmkow
ecccfe0337 Use u128 on Clock Cycles calculation. 2019-02-15 22:57:16 -04:00
Fernando Sahmkow
3ea48e8ebe Implement 128 bits Unsigned Integer Multiplication and Division. 2019-02-15 22:55:31 -04:00
Fernando Sahmkow
5b7ec71fb7 Correct CNTPCT to use Clock Cycles instead of Cpu Cycles. 2019-02-15 22:55:29 -04:00
36 changed files with 303 additions and 927 deletions

View File

@@ -21,7 +21,7 @@ public:
Buffer(Tag tag, std::vector<s16>&& samples) : tag{tag}, samples{std::move(samples)} {}
/// Returns the raw audio data for the buffer
std::vector<s16>& GetSamples() {
std::vector<s16>& Samples() {
return samples;
}

View File

@@ -95,7 +95,7 @@ void Stream::PlayNextBuffer() {
active_buffer = queued_buffers.front();
queued_buffers.pop();
VolumeAdjustSamples(active_buffer->GetSamples());
VolumeAdjustSamples(active_buffer->Samples());
sink_stream.EnqueueSamples(GetNumChannels(), active_buffer->GetSamples());

View File

@@ -113,6 +113,8 @@ add_library(common STATIC
threadsafe_queue.h
timer.cpp
timer.h
uint128.cpp
uint128.h
vector_math.h
web_result.h
)

View File

@@ -40,7 +40,9 @@ public:
const Impl& operator=(Impl const&) = delete;
void PushEntry(Entry e) {
std::lock_guard<std::mutex> lock(message_mutex);
message_queue.Push(std::move(e));
message_cv.notify_one();
}
void AddBackend(std::unique_ptr<Backend> backend) {
@@ -84,13 +86,15 @@ private:
}
};
while (true) {
entry = message_queue.PopWait();
if (entry.final_entry) {
{
std::unique_lock<std::mutex> lock(message_mutex);
message_cv.wait(lock, [&] { return !running || message_queue.Pop(entry); });
}
if (!running) {
break;
}
write_logs(entry);
}
// Drain the logging queue. Only writes out up to MAX_LOGS_TO_WRITE to prevent a case
// where a system is repeatedly spamming logs even on close.
const int MAX_LOGS_TO_WRITE = filter.IsDebug() ? INT_MAX : 100;
@@ -102,13 +106,14 @@ private:
}
~Impl() {
Entry entry;
entry.final_entry = true;
message_queue.Push(entry);
running = false;
message_cv.notify_one();
backend_thread.join();
}
std::mutex writing_mutex;
std::atomic_bool running{true};
std::mutex message_mutex, writing_mutex;
std::condition_variable message_cv;
std::thread backend_thread;
std::vector<std::unique_ptr<Backend>> backends;
Common::MPSCQueue<Log::Entry> message_queue;

View File

@@ -27,7 +27,6 @@ struct Entry {
unsigned int line_num;
std::string function;
std::string message;
bool final_entry = false;
Entry() = default;
Entry(Entry&& o) = default;

View File

@@ -8,7 +8,6 @@
// single reader, single writer queue
#include <atomic>
#include <condition_variable>
#include <cstddef>
#include <mutex>
#include <utility>
@@ -46,7 +45,6 @@ public:
ElementPtr* new_ptr = new ElementPtr();
write_ptr->next.store(new_ptr, std::memory_order_release);
write_ptr = new_ptr;
cv.notify_one();
++size;
}
@@ -76,16 +74,6 @@ public:
return true;
}
T PopWait() {
if (Empty()) {
std::unique_lock<std::mutex> lock(cv_mutex);
cv.wait(lock, [this]() { return !Empty(); });
}
T t;
Pop(t);
return t;
}
// not thread-safe
void Clear() {
size.store(0);
@@ -113,8 +101,6 @@ private:
ElementPtr* write_ptr;
ElementPtr* read_ptr;
std::atomic_size_t size{0};
std::mutex cv_mutex;
std::condition_variable cv;
};
// a simple thread-safe,
@@ -149,10 +135,6 @@ public:
return spsc_queue.Pop(t);
}
T PopWait() {
return spsc_queue.PopWait();
}
// not thread-safe
void Clear() {
spsc_queue.Clear();

41
src/common/uint128.cpp Normal file
View File

@@ -0,0 +1,41 @@
#ifdef _MSC_VER
#include <intrin.h>
#pragma intrinsic(_umul128)
#endif
#include <cstring>
#include "common/uint128.h"
namespace Common {
u128 Multiply64Into128(u64 a, u64 b) {
u128 result;
#ifdef _MSC_VER
result[0] = _umul128(a, b, &result[1]);
#else
unsigned __int128 tmp = a;
tmp *= b;
std::memcpy(&result, &tmp, sizeof(u128));
#endif
return result;
}
std::pair<u64, u64> Divide128On32(u128 dividend, u32 divisor) {
u64 remainder = dividend[0] % divisor;
u64 accum = dividend[0] / divisor;
if (dividend[1] == 0)
return {accum, remainder};
// We ignore dividend[1] / divisor as that overflows
const u64 first_segment = (dividend[1] % divisor) << 32;
accum += (first_segment / divisor) << 32;
const u64 second_segment = (first_segment % divisor) << 32;
accum += (second_segment / divisor);
remainder += second_segment % divisor;
if (remainder >= divisor) {
accum++;
remainder -= divisor;
}
return {accum, remainder};
}
} // namespace Common

14
src/common/uint128.h Normal file
View File

@@ -0,0 +1,14 @@
#include <utility>
#include "common/common_types.h"
namespace Common {
// This function multiplies 2 u64 values and produces a u128 value;
u128 Multiply64Into128(u64 a, u64 b);
// This function divides a u128 by a u32 value and produces two u64 values:
// the result of division and the remainder
std::pair<u64, u64> Divide128On32(u128 dividend, u32 divisor);
} // namespace Common

View File

@@ -400,10 +400,6 @@ add_library(core STATIC
hle/service/time/time.h
hle/service/usb/usb.cpp
hle/service/usb/usb.h
hle/service/vi/display/vi_display.cpp
hle/service/vi/display/vi_display.h
hle/service/vi/layer/vi_layer.cpp
hle/service/vi/layer/vi_layer.h
hle/service/vi/vi.cpp
hle/service/vi/vi.h
hle/service/vi/vi_m.cpp

View File

@@ -12,6 +12,7 @@
#include "core/core.h"
#include "core/core_cpu.h"
#include "core/core_timing.h"
#include "core/core_timing_util.h"
#include "core/gdbstub/gdbstub.h"
#include "core/hle/kernel/process.h"
#include "core/hle/kernel/svc.h"
@@ -119,7 +120,7 @@ public:
return std::max(parent.core_timing.GetDowncount(), 0);
}
u64 GetCNTPCT() override {
return parent.core_timing.GetTicks();
return Timing::CpuCyclesToClockCycles(parent.core_timing.GetTicks());
}
ARM_Dynarmic& parent;
@@ -151,7 +152,7 @@ std::unique_ptr<Dynarmic::A64::Jit> ARM_Dynarmic::MakeJit() const {
config.tpidr_el0 = &cb->tpidr_el0;
config.dczid_el0 = 4;
config.ctr_el0 = 0x8444c004;
config.cntfrq_el0 = 19200000; // Value from fusee.
config.cntfrq_el0 = Timing::CNTFREQ;
// Unpredictable instructions
config.define_unpredictable_behaviour = true;

View File

@@ -128,7 +128,7 @@ struct System::Impl {
return ResultStatus::ErrorVideoCore;
}
gpu_core = std::make_unique<Tegra::GPU>(system, renderer->Rasterizer());
gpu_core = std::make_unique<Tegra::GPU>(renderer->Rasterizer());
cpu_core_manager.Initialize(system);
is_powered_on = true;

View File

@@ -7,6 +7,7 @@
#include <cinttypes>
#include <limits>
#include "common/logging/log.h"
#include "common/uint128.h"
namespace Core::Timing {
@@ -60,4 +61,9 @@ s64 nsToCycles(u64 ns) {
return (BASE_CLOCK_RATE * static_cast<s64>(ns)) / 1000000000;
}
u64 CpuCyclesToClockCycles(u64 ticks) {
const u128 temporal = Common::Multiply64Into128(ticks, CNTFREQ);
return Common::Divide128On32(temporal, static_cast<u32>(BASE_CLOCK_RATE)).first;
}
} // namespace Core::Timing

View File

@@ -11,6 +11,7 @@ namespace Core::Timing {
// The below clock rate is based on Switch's clockspeed being widely known as 1.020GHz
// The exact value used is of course unverified.
constexpr u64 BASE_CLOCK_RATE = 1019215872; // Switch clock speed is 1020MHz un/docked
constexpr u64 CNTFREQ = 19200000; // Value from fusee.
inline s64 msToCycles(int ms) {
// since ms is int there is no way to overflow
@@ -61,4 +62,6 @@ inline u64 cyclesToMs(s64 cycles) {
return cycles * 1000 / BASE_CLOCK_RATE;
}
u64 CpuCyclesToClockCycles(u64 ticks);
} // namespace Core::Timing

View File

@@ -398,8 +398,7 @@ static bool ValidCryptoRevisionString(std::string_view base, size_t begin, size_
}
void KeyManager::LoadFromFile(const std::string& filename, bool is_title_keys) {
std::ifstream file;
OpenFStream(file, filename, std::ios_base::in);
std::ifstream file(filename);
if (!file.is_open())
return;

View File

@@ -17,7 +17,8 @@
#include "core/hle/result.h"
#include "core/memory.h"
namespace Kernel::AddressArbiter {
namespace Kernel {
namespace AddressArbiter {
// Performs actual address waiting logic.
static ResultCode WaitForAddress(VAddr address, s64 timeout) {
@@ -175,4 +176,5 @@ ResultCode WaitForAddressIfEqual(VAddr address, s32 value, s64 timeout) {
return WaitForAddress(address, timeout);
}
} // namespace Kernel::AddressArbiter
} // namespace AddressArbiter
} // namespace Kernel

View File

@@ -8,8 +8,9 @@
union ResultCode;
namespace Kernel::AddressArbiter {
namespace Kernel {
namespace AddressArbiter {
enum class ArbitrationType {
WaitIfLessThan = 0,
DecrementAndWaitIfLessThan = 1,
@@ -28,5 +29,6 @@ ResultCode ModifyByWaitingCountAndSignalToAddressIfEqual(VAddr address, s32 valu
ResultCode WaitForAddressIfLessThan(VAddr address, s32 value, s64 timeout, bool should_decrement);
ResultCode WaitForAddressIfEqual(VAddr address, s32 value, s64 timeout);
} // namespace AddressArbiter
} // namespace Kernel::AddressArbiter
} // namespace Kernel

View File

@@ -14,12 +14,11 @@
#include "core/core_timing_util.h"
#include "core/hle/kernel/kernel.h"
#include "core/hle/kernel/readable_event.h"
#include "core/hle/kernel/writable_event.h"
#include "core/hle/service/nvdrv/devices/nvdisp_disp0.h"
#include "core/hle/service/nvdrv/nvdrv.h"
#include "core/hle/service/nvflinger/buffer_queue.h"
#include "core/hle/service/nvflinger/nvflinger.h"
#include "core/hle/service/vi/display/vi_display.h"
#include "core/hle/service/vi/layer/vi_layer.h"
#include "core/perf_stats.h"
#include "video_core/renderer_base.h"
@@ -29,12 +28,6 @@ constexpr std::size_t SCREEN_REFRESH_RATE = 60;
constexpr u64 frame_ticks = static_cast<u64>(Core::Timing::BASE_CLOCK_RATE / SCREEN_REFRESH_RATE);
NVFlinger::NVFlinger(Core::Timing::CoreTiming& core_timing) : core_timing{core_timing} {
displays.emplace_back(0, "Default");
displays.emplace_back(1, "External");
displays.emplace_back(2, "Edid");
displays.emplace_back(3, "Internal");
displays.emplace_back(4, "Null");
// Schedule the screen composition events
composition_event =
core_timing.RegisterEvent("ScreenComposition", [this](u64 userdata, int cycles_late) {
@@ -59,14 +52,13 @@ std::optional<u64> NVFlinger::OpenDisplay(std::string_view name) {
// TODO(Subv): Currently we only support the Default display.
ASSERT(name == "Default");
const auto itr =
std::find_if(displays.begin(), displays.end(),
[&](const VI::Display& display) { return display.GetName() == name; });
const auto itr = std::find_if(displays.begin(), displays.end(),
[&](const Display& display) { return display.name == name; });
if (itr == displays.end()) {
return {};
}
return itr->GetID();
return itr->id;
}
std::optional<u64> NVFlinger::CreateLayer(u64 display_id) {
@@ -76,10 +68,13 @@ std::optional<u64> NVFlinger::CreateLayer(u64 display_id) {
return {};
}
ASSERT_MSG(display->layers.empty(), "Only one layer is supported per display at the moment");
const u64 layer_id = next_layer_id++;
const u32 buffer_queue_id = next_buffer_queue_id++;
buffer_queues.emplace_back(buffer_queue_id, layer_id);
display->CreateLayer(layer_id, buffer_queues.back());
auto buffer_queue = std::make_shared<BufferQueue>(buffer_queue_id, layer_id);
display->layers.emplace_back(layer_id, buffer_queue);
buffer_queues.emplace_back(std::move(buffer_queue));
return layer_id;
}
@@ -90,7 +85,7 @@ std::optional<u32> NVFlinger::FindBufferQueueId(u64 display_id, u64 layer_id) co
return {};
}
return layer->GetBufferQueue().GetId();
return layer->buffer_queue->GetId();
}
Kernel::SharedPtr<Kernel::ReadableEvent> NVFlinger::FindVsyncEvent(u64 display_id) const {
@@ -100,29 +95,20 @@ Kernel::SharedPtr<Kernel::ReadableEvent> NVFlinger::FindVsyncEvent(u64 display_i
return nullptr;
}
return display->GetVSyncEvent();
return display->vsync_event.readable;
}
BufferQueue& NVFlinger::FindBufferQueue(u32 id) {
std::shared_ptr<BufferQueue> NVFlinger::FindBufferQueue(u32 id) const {
const auto itr = std::find_if(buffer_queues.begin(), buffer_queues.end(),
[id](const auto& queue) { return queue.GetId() == id; });
[&](const auto& queue) { return queue->GetId() == id; });
ASSERT(itr != buffer_queues.end());
return *itr;
}
const BufferQueue& NVFlinger::FindBufferQueue(u32 id) const {
const auto itr = std::find_if(buffer_queues.begin(), buffer_queues.end(),
[id](const auto& queue) { return queue.GetId() == id; });
ASSERT(itr != buffer_queues.end());
return *itr;
}
VI::Display* NVFlinger::FindDisplay(u64 display_id) {
const auto itr =
std::find_if(displays.begin(), displays.end(),
[&](const VI::Display& display) { return display.GetID() == display_id; });
Display* NVFlinger::FindDisplay(u64 display_id) {
const auto itr = std::find_if(displays.begin(), displays.end(),
[&](const Display& display) { return display.id == display_id; });
if (itr == displays.end()) {
return nullptr;
@@ -131,10 +117,9 @@ VI::Display* NVFlinger::FindDisplay(u64 display_id) {
return &*itr;
}
const VI::Display* NVFlinger::FindDisplay(u64 display_id) const {
const auto itr =
std::find_if(displays.begin(), displays.end(),
[&](const VI::Display& display) { return display.GetID() == display_id; });
const Display* NVFlinger::FindDisplay(u64 display_id) const {
const auto itr = std::find_if(displays.begin(), displays.end(),
[&](const Display& display) { return display.id == display_id; });
if (itr == displays.end()) {
return nullptr;
@@ -143,41 +128,57 @@ const VI::Display* NVFlinger::FindDisplay(u64 display_id) const {
return &*itr;
}
VI::Layer* NVFlinger::FindLayer(u64 display_id, u64 layer_id) {
Layer* NVFlinger::FindLayer(u64 display_id, u64 layer_id) {
auto* const display = FindDisplay(display_id);
if (display == nullptr) {
return nullptr;
}
return display->FindLayer(layer_id);
const auto itr = std::find_if(display->layers.begin(), display->layers.end(),
[&](const Layer& layer) { return layer.id == layer_id; });
if (itr == display->layers.end()) {
return nullptr;
}
return &*itr;
}
const VI::Layer* NVFlinger::FindLayer(u64 display_id, u64 layer_id) const {
const Layer* NVFlinger::FindLayer(u64 display_id, u64 layer_id) const {
const auto* const display = FindDisplay(display_id);
if (display == nullptr) {
return nullptr;
}
return display->FindLayer(layer_id);
const auto itr = std::find_if(display->layers.begin(), display->layers.end(),
[&](const Layer& layer) { return layer.id == layer_id; });
if (itr == display->layers.end()) {
return nullptr;
}
return &*itr;
}
void NVFlinger::Compose() {
for (auto& display : displays) {
// Trigger vsync for this display at the end of drawing
SCOPE_EXIT({ display.SignalVSyncEvent(); });
SCOPE_EXIT({ display.vsync_event.writable->Signal(); });
// Don't do anything for displays without layers.
if (!display.HasLayers())
if (display.layers.empty())
continue;
// TODO(Subv): Support more than 1 layer.
VI::Layer& layer = display.GetLayer(0);
auto& buffer_queue = layer.GetBufferQueue();
ASSERT_MSG(display.layers.size() == 1, "Max 1 layer per display is supported");
Layer& layer = display.layers[0];
auto& buffer_queue = layer.buffer_queue;
// Search for a queued buffer and acquire it
auto buffer = buffer_queue.AcquireBuffer();
auto buffer = buffer_queue->AcquireBuffer();
MicroProfileFlip();
@@ -202,8 +203,19 @@ void NVFlinger::Compose() {
igbp_buffer.width, igbp_buffer.height, igbp_buffer.stride,
buffer->get().transform, buffer->get().crop_rect);
buffer_queue.ReleaseBuffer(buffer->get().slot);
buffer_queue->ReleaseBuffer(buffer->get().slot);
}
}
Layer::Layer(u64 id, std::shared_ptr<BufferQueue> queue) : id(id), buffer_queue(std::move(queue)) {}
Layer::~Layer() = default;
Display::Display(u64 id, std::string name) : id(id), name(std::move(name)) {
auto& kernel = Core::System::GetInstance().Kernel();
vsync_event = Kernel::WritableEvent::CreateEventPair(kernel, Kernel::ResetType::Sticky,
fmt::format("Display VSync Event {}", id));
}
Display::~Display() = default;
} // namespace Service::NVFlinger

View File

@@ -4,6 +4,7 @@
#pragma once
#include <array>
#include <memory>
#include <optional>
#include <string>
@@ -25,17 +26,31 @@ class WritableEvent;
namespace Service::Nvidia {
class Module;
} // namespace Service::Nvidia
namespace Service::VI {
class Display;
class Layer;
} // namespace Service::VI
}
namespace Service::NVFlinger {
class BufferQueue;
struct Layer {
Layer(u64 id, std::shared_ptr<BufferQueue> queue);
~Layer();
u64 id;
std::shared_ptr<BufferQueue> buffer_queue;
};
struct Display {
Display(u64 id, std::string name);
~Display();
u64 id;
std::string name;
std::vector<Layer> layers;
Kernel::EventPair vsync_event;
};
class NVFlinger final {
public:
explicit NVFlinger(Core::Timing::CoreTiming& core_timing);
@@ -65,10 +80,7 @@ public:
Kernel::SharedPtr<Kernel::ReadableEvent> FindVsyncEvent(u64 display_id) const;
/// Obtains a buffer queue identified by the ID.
BufferQueue& FindBufferQueue(u32 id);
/// Obtains a buffer queue identified by the ID.
const BufferQueue& FindBufferQueue(u32 id) const;
std::shared_ptr<BufferQueue> FindBufferQueue(u32 id) const;
/// Performs a composition request to the emulated nvidia GPU and triggers the vsync events when
/// finished.
@@ -76,21 +88,27 @@ public:
private:
/// Finds the display identified by the specified ID.
VI::Display* FindDisplay(u64 display_id);
Display* FindDisplay(u64 display_id);
/// Finds the display identified by the specified ID.
const VI::Display* FindDisplay(u64 display_id) const;
const Display* FindDisplay(u64 display_id) const;
/// Finds the layer identified by the specified ID in the desired display.
VI::Layer* FindLayer(u64 display_id, u64 layer_id);
Layer* FindLayer(u64 display_id, u64 layer_id);
/// Finds the layer identified by the specified ID in the desired display.
const VI::Layer* FindLayer(u64 display_id, u64 layer_id) const;
const Layer* FindLayer(u64 display_id, u64 layer_id) const;
std::shared_ptr<Nvidia::Module> nvdrv;
std::vector<VI::Display> displays;
std::vector<BufferQueue> buffer_queues;
std::array<Display, 5> displays{{
{0, "Default"},
{1, "External"},
{2, "Edid"},
{3, "Internal"},
{4, "Null"},
}};
std::vector<std::shared_ptr<BufferQueue>> buffer_queues;
/// Id to use for the next layer that is created, this counter is shared among all displays.
u64 next_layer_id = 1;

View File

@@ -1,71 +0,0 @@
// Copyright 2019 yuzu emulator team
// Licensed under GPLv2 or any later version
// Refer to the license.txt file included.
#include <algorithm>
#include <utility>
#include <fmt/format.h>
#include "common/assert.h"
#include "core/core.h"
#include "core/hle/kernel/readable_event.h"
#include "core/hle/service/vi/display/vi_display.h"
#include "core/hle/service/vi/layer/vi_layer.h"
namespace Service::VI {
Display::Display(u64 id, std::string name) : id{id}, name{std::move(name)} {
auto& kernel = Core::System::GetInstance().Kernel();
vsync_event = Kernel::WritableEvent::CreateEventPair(kernel, Kernel::ResetType::Sticky,
fmt::format("Display VSync Event {}", id));
}
Display::~Display() = default;
Layer& Display::GetLayer(std::size_t index) {
return layers.at(index);
}
const Layer& Display::GetLayer(std::size_t index) const {
return layers.at(index);
}
Kernel::SharedPtr<Kernel::ReadableEvent> Display::GetVSyncEvent() const {
return vsync_event.readable;
}
void Display::SignalVSyncEvent() {
vsync_event.writable->Signal();
}
void Display::CreateLayer(u64 id, NVFlinger::BufferQueue& buffer_queue) {
// TODO(Subv): Support more than 1 layer.
ASSERT_MSG(layers.empty(), "Only one layer is supported per display at the moment");
layers.emplace_back(id, buffer_queue);
}
Layer* Display::FindLayer(u64 id) {
const auto itr = std::find_if(layers.begin(), layers.end(),
[id](const VI::Layer& layer) { return layer.GetID() == id; });
if (itr == layers.end()) {
return nullptr;
}
return &*itr;
}
const Layer* Display::FindLayer(u64 id) const {
const auto itr = std::find_if(layers.begin(), layers.end(),
[id](const VI::Layer& layer) { return layer.GetID() == id; });
if (itr == layers.end()) {
return nullptr;
}
return &*itr;
}
} // namespace Service::VI

View File

@@ -1,98 +0,0 @@
// Copyright 2019 yuzu emulator team
// Licensed under GPLv2 or any later version
// Refer to the license.txt file included.
#pragma once
#include <string>
#include <vector>
#include "common/common_types.h"
#include "core/hle/kernel/writable_event.h"
namespace Service::NVFlinger {
class BufferQueue;
}
namespace Service::VI {
class Layer;
/// Represents a single display type
class Display {
public:
/// Constructs a display with a given unique ID and name.
///
/// @param id The unique ID for this display.
/// @param name The name for this display.
///
Display(u64 id, std::string name);
~Display();
Display(const Display&) = delete;
Display& operator=(const Display&) = delete;
Display(Display&&) = default;
Display& operator=(Display&&) = default;
/// Gets the unique ID assigned to this display.
u64 GetID() const {
return id;
}
/// Gets the name of this display
const std::string& GetName() const {
return name;
}
/// Whether or not this display has any layers added to it.
bool HasLayers() const {
return !layers.empty();
}
/// Gets a layer for this display based off an index.
Layer& GetLayer(std::size_t index);
/// Gets a layer for this display based off an index.
const Layer& GetLayer(std::size_t index) const;
/// Gets the readable vsync event.
Kernel::SharedPtr<Kernel::ReadableEvent> GetVSyncEvent() const;
/// Signals the internal vsync event.
void SignalVSyncEvent();
/// Creates and adds a layer to this display with the given ID.
///
/// @param id The ID to assign to the created layer.
/// @param buffer_queue The buffer queue for the layer instance to use.
///
void CreateLayer(u64 id, NVFlinger::BufferQueue& buffer_queue);
/// Attempts to find a layer with the given ID.
///
/// @param id The layer ID.
///
/// @returns If found, the Layer instance with the given ID.
/// If not found, then nullptr is returned.
///
Layer* FindLayer(u64 id);
/// Attempts to find a layer with the given ID.
///
/// @param id The layer ID.
///
/// @returns If found, the Layer instance with the given ID.
/// If not found, then nullptr is returned.
///
const Layer* FindLayer(u64 id) const;
private:
u64 id;
std::string name;
std::vector<Layer> layers;
Kernel::EventPair vsync_event;
};
} // namespace Service::VI

View File

@@ -1,13 +0,0 @@
// Copyright 2019 yuzu emulator team
// Licensed under GPLv2 or any later version
// Refer to the license.txt file included.
#include "core/hle/service/vi/layer/vi_layer.h"
namespace Service::VI {
Layer::Layer(u64 id, NVFlinger::BufferQueue& queue) : id{id}, buffer_queue{queue} {}
Layer::~Layer() = default;
} // namespace Service::VI

View File

@@ -1,52 +0,0 @@
// Copyright 2019 yuzu emulator team
// Licensed under GPLv2 or any later version
// Refer to the license.txt file included.
#pragma once
#include "common/common_types.h"
namespace Service::NVFlinger {
class BufferQueue;
}
namespace Service::VI {
/// Represents a single display layer.
class Layer {
public:
/// Constructs a layer with a given ID and buffer queue.
///
/// @param id The ID to assign to this layer.
/// @param queue The buffer queue for this layer to use.
///
Layer(u64 id, NVFlinger::BufferQueue& queue);
~Layer();
Layer(const Layer&) = delete;
Layer& operator=(const Layer&) = delete;
Layer(Layer&&) = default;
Layer& operator=(Layer&&) = delete;
/// Gets the ID for this layer.
u64 GetID() const {
return id;
}
/// Gets a reference to the buffer queue this layer is using.
NVFlinger::BufferQueue& GetBufferQueue() {
return buffer_queue;
}
/// Gets a const reference to the buffer queue this layer is using.
const NVFlinger::BufferQueue& GetBufferQueue() const {
return buffer_queue;
}
private:
u64 id;
NVFlinger::BufferQueue& buffer_queue;
};
} // namespace Service::VI

View File

@@ -525,7 +525,7 @@ private:
LOG_DEBUG(Service_VI, "called. id=0x{:08X} transaction={:X}, flags=0x{:08X}", id,
static_cast<u32>(transaction), flags);
auto& buffer_queue = nv_flinger->FindBufferQueue(id);
auto buffer_queue = nv_flinger->FindBufferQueue(id);
if (transaction == TransactionId::Connect) {
IGBPConnectRequestParcel request{ctx.ReadBuffer()};
@@ -538,7 +538,7 @@ private:
} else if (transaction == TransactionId::SetPreallocatedBuffer) {
IGBPSetPreallocatedBufferRequestParcel request{ctx.ReadBuffer()};
buffer_queue.SetPreallocatedBuffer(request.data.slot, request.buffer);
buffer_queue->SetPreallocatedBuffer(request.data.slot, request.buffer);
IGBPSetPreallocatedBufferResponseParcel response{};
ctx.WriteBuffer(response.Serialize());
@@ -546,7 +546,7 @@ private:
IGBPDequeueBufferRequestParcel request{ctx.ReadBuffer()};
const u32 width{request.data.width};
const u32 height{request.data.height};
std::optional<u32> slot = buffer_queue.DequeueBuffer(width, height);
std::optional<u32> slot = buffer_queue->DequeueBuffer(width, height);
if (slot) {
// Buffer is available
@@ -559,8 +559,8 @@ private:
[=](Kernel::SharedPtr<Kernel::Thread> thread, Kernel::HLERequestContext& ctx,
Kernel::ThreadWakeupReason reason) {
// Repeat TransactParcel DequeueBuffer when a buffer is available
auto& buffer_queue = nv_flinger->FindBufferQueue(id);
std::optional<u32> slot = buffer_queue.DequeueBuffer(width, height);
auto buffer_queue = nv_flinger->FindBufferQueue(id);
std::optional<u32> slot = buffer_queue->DequeueBuffer(width, height);
ASSERT_MSG(slot != std::nullopt, "Could not dequeue buffer.");
IGBPDequeueBufferResponseParcel response{*slot};
@@ -568,28 +568,28 @@ private:
IPC::ResponseBuilder rb{ctx, 2};
rb.Push(RESULT_SUCCESS);
},
buffer_queue.GetWritableBufferWaitEvent());
buffer_queue->GetWritableBufferWaitEvent());
}
} else if (transaction == TransactionId::RequestBuffer) {
IGBPRequestBufferRequestParcel request{ctx.ReadBuffer()};
auto& buffer = buffer_queue.RequestBuffer(request.slot);
auto& buffer = buffer_queue->RequestBuffer(request.slot);
IGBPRequestBufferResponseParcel response{buffer};
ctx.WriteBuffer(response.Serialize());
} else if (transaction == TransactionId::QueueBuffer) {
IGBPQueueBufferRequestParcel request{ctx.ReadBuffer()};
buffer_queue.QueueBuffer(request.data.slot, request.data.transform,
request.data.GetCropRect());
buffer_queue->QueueBuffer(request.data.slot, request.data.transform,
request.data.GetCropRect());
IGBPQueueBufferResponseParcel response{1280, 720};
ctx.WriteBuffer(response.Serialize());
} else if (transaction == TransactionId::Query) {
IGBPQueryRequestParcel request{ctx.ReadBuffer()};
const u32 value =
buffer_queue.Query(static_cast<NVFlinger::BufferQueue::QueryType>(request.type));
u32 value =
buffer_queue->Query(static_cast<NVFlinger::BufferQueue::QueryType>(request.type));
IGBPQueryResponseParcel response{value};
ctx.WriteBuffer(response.Serialize());
@@ -629,12 +629,12 @@ private:
LOG_WARNING(Service_VI, "(STUBBED) called id={}, unknown={:08X}", id, unknown);
const auto& buffer_queue = nv_flinger->FindBufferQueue(id);
const auto buffer_queue = nv_flinger->FindBufferQueue(id);
// TODO(Subv): Find out what this actually is.
IPC::ResponseBuilder rb{ctx, 2, 1};
rb.Push(RESULT_SUCCESS);
rb.PushCopyObjects(buffer_queue.GetBufferWaitEvent());
rb.PushCopyObjects(buffer_queue->GetBufferWaitEvent());
}
std::shared_ptr<NVFlinger::NVFlinger> nv_flinger;

View File

@@ -105,9 +105,7 @@ if (ENABLE_VULKAN)
target_sources(video_core PRIVATE
renderer_vulkan/declarations.h
renderer_vulkan/vk_device.cpp
renderer_vulkan/vk_device.h
renderer_vulkan/vk_resource_manager.cpp
renderer_vulkan/vk_resource_manager.h)
renderer_vulkan/vk_device.h)
target_include_directories(video_core PRIVATE ../../externals/Vulkan-Headers/include)
target_compile_definitions(video_core PRIVATE HAS_VULKAN)

View File

@@ -2,7 +2,6 @@
// Licensed under GPLv2 or any later version
// Refer to the license.txt file included.
#include "common/assert.h"
#include "common/logging/log.h"
#include "core/core.h"
#include "core/memory.h"
@@ -12,9 +11,9 @@
namespace Tegra::Engines {
KeplerMemory::KeplerMemory(Core::System& system, VideoCore::RasterizerInterface& rasterizer,
KeplerMemory::KeplerMemory(VideoCore::RasterizerInterface& rasterizer,
MemoryManager& memory_manager)
: system{system}, memory_manager(memory_manager), rasterizer{rasterizer} {}
: memory_manager(memory_manager), rasterizer{rasterizer} {}
KeplerMemory::~KeplerMemory() = default;
@@ -51,7 +50,7 @@ void KeplerMemory::ProcessData(u32 data) {
rasterizer.InvalidateRegion(*dest_address, sizeof(u32));
Memory::Write32(*dest_address, data);
system.GPU().Maxwell3D().dirty_flags.OnMemoryWrite();
Core::System::GetInstance().GPU().Maxwell3D().dirty_flags.OnMemoryWrite();
state.write_offset++;
}

View File

@@ -5,16 +5,13 @@
#pragma once
#include <array>
#include "common/assert.h"
#include "common/bit_field.h"
#include "common/common_funcs.h"
#include "common/common_types.h"
#include "video_core/gpu.h"
#include "video_core/memory_manager.h"
namespace Core {
class System;
}
namespace VideoCore {
class RasterizerInterface;
}
@@ -26,8 +23,7 @@ namespace Tegra::Engines {
class KeplerMemory final {
public:
KeplerMemory(Core::System& system, VideoCore::RasterizerInterface& rasterizer,
MemoryManager& memory_manager);
KeplerMemory(VideoCore::RasterizerInterface& rasterizer, MemoryManager& memory_manager);
~KeplerMemory();
/// Write the value to the register identified by method.
@@ -80,7 +76,6 @@ public:
} state{};
private:
Core::System& system;
MemoryManager& memory_manager;
VideoCore::RasterizerInterface& rasterizer;

View File

@@ -19,10 +19,8 @@ namespace Tegra::Engines {
/// First register id that is actually a Macro call.
constexpr u32 MacroRegistersStart = 0xE00;
Maxwell3D::Maxwell3D(Core::System& system, VideoCore::RasterizerInterface& rasterizer,
MemoryManager& memory_manager)
: memory_manager(memory_manager), system{system}, rasterizer{rasterizer},
macro_interpreter(*this) {
Maxwell3D::Maxwell3D(VideoCore::RasterizerInterface& rasterizer, MemoryManager& memory_manager)
: memory_manager(memory_manager), rasterizer{rasterizer}, macro_interpreter(*this) {
InitializeRegisterDefaults();
}
@@ -105,7 +103,7 @@ void Maxwell3D::CallMacroMethod(u32 method, std::vector<u32> parameters) {
}
void Maxwell3D::CallMethod(const GPU::MethodCall& method_call) {
auto debug_context = system.GetGPUDebugContext();
auto debug_context = Core::System::GetInstance().GetGPUDebugContext();
// It is an error to write to a register other than the current macro's ARG register before it
// has finished execution.
@@ -319,7 +317,7 @@ void Maxwell3D::ProcessQueryGet() {
LongQueryResult query_result{};
query_result.value = result;
// TODO(Subv): Generate a real GPU timestamp and write it here instead of CoreTiming
query_result.timestamp = system.CoreTiming().GetTicks();
query_result.timestamp = Core::System::GetInstance().CoreTiming().GetTicks();
Memory::WriteBlock(*address, &query_result, sizeof(query_result));
}
dirty_flags.OnMemoryWrite();
@@ -336,7 +334,7 @@ void Maxwell3D::DrawArrays() {
regs.vertex_buffer.count);
ASSERT_MSG(!(regs.index_array.count && regs.vertex_buffer.count), "Both indexed and direct?");
auto debug_context = system.GetGPUDebugContext();
auto debug_context = Core::System::GetInstance().GetGPUDebugContext();
if (debug_context) {
debug_context->OnEvent(Tegra::DebugContext::Event::IncomingPrimitiveBatch, nullptr);

View File

@@ -17,10 +17,6 @@
#include "video_core/memory_manager.h"
#include "video_core/textures/texture.h"
namespace Core {
class System;
}
namespace VideoCore {
class RasterizerInterface;
}
@@ -32,8 +28,7 @@ namespace Tegra::Engines {
class Maxwell3D final {
public:
explicit Maxwell3D(Core::System& system, VideoCore::RasterizerInterface& rasterizer,
MemoryManager& memory_manager);
explicit Maxwell3D(VideoCore::RasterizerInterface& rasterizer, MemoryManager& memory_manager);
~Maxwell3D() = default;
/// Register structure of the Maxwell3D engine.
@@ -1136,8 +1131,6 @@ public:
private:
void InitializeRegisterDefaults();
Core::System& system;
VideoCore::RasterizerInterface& rasterizer;
/// Start offsets of each macro in macro_memory

View File

@@ -2,7 +2,6 @@
// Licensed under GPLv2 or any later version
// Refer to the license.txt file included.
#include "common/assert.h"
#include "core/core.h"
#include "core/memory.h"
#include "video_core/engines/maxwell_3d.h"
@@ -12,9 +11,8 @@
namespace Tegra::Engines {
MaxwellDMA::MaxwellDMA(Core::System& system, VideoCore::RasterizerInterface& rasterizer,
MemoryManager& memory_manager)
: memory_manager(memory_manager), system{system}, rasterizer{rasterizer} {}
MaxwellDMA::MaxwellDMA(VideoCore::RasterizerInterface& rasterizer, MemoryManager& memory_manager)
: memory_manager(memory_manager), rasterizer{rasterizer} {}
void MaxwellDMA::CallMethod(const GPU::MethodCall& method_call) {
ASSERT_MSG(method_call.method < Regs::NUM_REGS,
@@ -61,7 +59,7 @@ void MaxwellDMA::HandleCopy() {
}
// All copies here update the main memory, so mark all rasterizer states as invalid.
system.GPU().Maxwell3D().dirty_flags.OnMemoryWrite();
Core::System::GetInstance().GPU().Maxwell3D().dirty_flags.OnMemoryWrite();
if (regs.exec.is_dst_linear && regs.exec.is_src_linear) {
// When the enable_2d bit is disabled, the copy is performed as if we were copying a 1D

View File

@@ -5,16 +5,13 @@
#pragma once
#include <array>
#include "common/assert.h"
#include "common/bit_field.h"
#include "common/common_funcs.h"
#include "common/common_types.h"
#include "video_core/gpu.h"
#include "video_core/memory_manager.h"
namespace Core {
class System;
}
namespace VideoCore {
class RasterizerInterface;
}
@@ -23,8 +20,7 @@ namespace Tegra::Engines {
class MaxwellDMA final {
public:
explicit MaxwellDMA(Core::System& system, VideoCore::RasterizerInterface& rasterizer,
MemoryManager& memory_manager);
explicit MaxwellDMA(VideoCore::RasterizerInterface& rasterizer, MemoryManager& memory_manager);
~MaxwellDMA() = default;
/// Write the value to the register identified by method.
@@ -141,8 +137,6 @@ public:
MemoryManager& memory_manager;
private:
Core::System& system;
VideoCore::RasterizerInterface& rasterizer;
/// Performs the copy from the source buffer to the destination buffer as configured in the

View File

@@ -28,14 +28,14 @@ u32 FramebufferConfig::BytesPerPixel(PixelFormat format) {
UNREACHABLE();
}
GPU::GPU(Core::System& system, VideoCore::RasterizerInterface& rasterizer) {
GPU::GPU(VideoCore::RasterizerInterface& rasterizer) {
memory_manager = std::make_unique<Tegra::MemoryManager>();
dma_pusher = std::make_unique<Tegra::DmaPusher>(*this);
maxwell_3d = std::make_unique<Engines::Maxwell3D>(system, rasterizer, *memory_manager);
maxwell_3d = std::make_unique<Engines::Maxwell3D>(rasterizer, *memory_manager);
fermi_2d = std::make_unique<Engines::Fermi2D>(rasterizer, *memory_manager);
kepler_compute = std::make_unique<Engines::KeplerCompute>(*memory_manager);
maxwell_dma = std::make_unique<Engines::MaxwellDMA>(system, rasterizer, *memory_manager);
kepler_memory = std::make_unique<Engines::KeplerMemory>(system, rasterizer, *memory_manager);
maxwell_dma = std::make_unique<Engines::MaxwellDMA>(rasterizer, *memory_manager);
kepler_memory = std::make_unique<Engines::KeplerMemory>(rasterizer, *memory_manager);
}
GPU::~GPU() = default;

View File

@@ -6,15 +6,12 @@
#include <array>
#include <memory>
#include <vector>
#include "common/common_types.h"
#include "core/hle/service/nvflinger/buffer_queue.h"
#include "video_core/dma_pusher.h"
#include "video_core/memory_manager.h"
namespace Core {
class System;
}
namespace VideoCore {
class RasterizerInterface;
}
@@ -121,7 +118,7 @@ enum class EngineID {
class GPU final {
public:
explicit GPU(Core::System& system, VideoCore::RasterizerInterface& rasterizer);
explicit GPU(VideoCore::RasterizerInterface& rasterizer);
~GPU();
struct MethodCall {

View File

@@ -11,9 +11,7 @@
namespace OpenGL {
OpenGLState OpenGLState::cur_state;
bool OpenGLState::s_rgb_used;
OpenGLState::OpenGLState() {
// These all match default OpenGL values
geometry_shaders.enabled = false;
@@ -114,6 +112,7 @@ void OpenGLState::ApplyDefaultState() {
}
void OpenGLState::ApplySRgb() const {
// sRGB
if (framebuffer_srgb.enabled != cur_state.framebuffer_srgb.enabled) {
if (framebuffer_srgb.enabled) {
// Track if sRGB is used
@@ -126,20 +125,23 @@ void OpenGLState::ApplySRgb() const {
}
void OpenGLState::ApplyCulling() const {
if (cull.enabled != cur_state.cull.enabled) {
// Culling
const bool cull_changed = cull.enabled != cur_state.cull.enabled;
if (cull_changed) {
if (cull.enabled) {
glEnable(GL_CULL_FACE);
} else {
glDisable(GL_CULL_FACE);
}
}
if (cull.enabled) {
if (cull_changed || cull.mode != cur_state.cull.mode) {
glCullFace(cull.mode);
}
if (cull.mode != cur_state.cull.mode) {
glCullFace(cull.mode);
}
if (cull.front_face != cur_state.cull.front_face) {
glFrontFace(cull.front_face);
if (cull_changed || cull.front_face != cur_state.cull.front_face) {
glFrontFace(cull.front_face);
}
}
}
@@ -170,63 +172,72 @@ void OpenGLState::ApplyColorMask() const {
}
void OpenGLState::ApplyDepth() const {
if (depth.test_enabled != cur_state.depth.test_enabled) {
// Depth test
const bool depth_test_changed = depth.test_enabled != cur_state.depth.test_enabled;
if (depth_test_changed) {
if (depth.test_enabled) {
glEnable(GL_DEPTH_TEST);
} else {
glDisable(GL_DEPTH_TEST);
}
}
if (depth.test_func != cur_state.depth.test_func) {
if (depth.test_enabled &&
(depth_test_changed || depth.test_func != cur_state.depth.test_func)) {
glDepthFunc(depth.test_func);
}
// Depth mask
if (depth.write_mask != cur_state.depth.write_mask) {
glDepthMask(depth.write_mask);
}
}
void OpenGLState::ApplyPrimitiveRestart() const {
if (primitive_restart.enabled != cur_state.primitive_restart.enabled) {
const bool primitive_restart_changed =
primitive_restart.enabled != cur_state.primitive_restart.enabled;
if (primitive_restart_changed) {
if (primitive_restart.enabled) {
glEnable(GL_PRIMITIVE_RESTART);
} else {
glDisable(GL_PRIMITIVE_RESTART);
}
}
if (primitive_restart.index != cur_state.primitive_restart.index) {
if (primitive_restart_changed ||
(primitive_restart.enabled &&
primitive_restart.index != cur_state.primitive_restart.index)) {
glPrimitiveRestartIndex(primitive_restart.index);
}
}
void OpenGLState::ApplyStencilTest() const {
if (stencil.test_enabled != cur_state.stencil.test_enabled) {
const bool stencil_test_changed = stencil.test_enabled != cur_state.stencil.test_enabled;
if (stencil_test_changed) {
if (stencil.test_enabled) {
glEnable(GL_STENCIL_TEST);
} else {
glDisable(GL_STENCIL_TEST);
}
}
const auto ConfigStencil = [](GLenum face, const auto& config, const auto& prev_config) {
if (config.test_func != prev_config.test_func || config.test_ref != prev_config.test_ref ||
config.test_mask != prev_config.test_mask) {
glStencilFuncSeparate(face, config.test_func, config.test_ref, config.test_mask);
}
if (config.action_depth_fail != prev_config.action_depth_fail ||
config.action_depth_pass != prev_config.action_depth_pass ||
config.action_stencil_fail != prev_config.action_stencil_fail) {
glStencilOpSeparate(face, config.action_stencil_fail, config.action_depth_fail,
config.action_depth_pass);
}
if (config.write_mask != prev_config.write_mask) {
glStencilMaskSeparate(face, config.write_mask);
}
};
ConfigStencil(GL_FRONT, stencil.front, cur_state.stencil.front);
ConfigStencil(GL_BACK, stencil.back, cur_state.stencil.back);
if (stencil.test_enabled) {
auto config_stencil = [stencil_test_changed](GLenum face, const auto& config,
const auto& prev_config) {
if (stencil_test_changed || config.test_func != prev_config.test_func ||
config.test_ref != prev_config.test_ref ||
config.test_mask != prev_config.test_mask) {
glStencilFuncSeparate(face, config.test_func, config.test_ref, config.test_mask);
}
if (stencil_test_changed || config.action_depth_fail != prev_config.action_depth_fail ||
config.action_depth_pass != prev_config.action_depth_pass ||
config.action_stencil_fail != prev_config.action_stencil_fail) {
glStencilOpSeparate(face, config.action_stencil_fail, config.action_depth_fail,
config.action_depth_pass);
}
if (config.write_mask != prev_config.write_mask) {
glStencilMaskSeparate(face, config.write_mask);
}
};
config_stencil(GL_FRONT, stencil.front, cur_state.stencil.front);
config_stencil(GL_BACK, stencil.back, cur_state.stencil.back);
}
}
// Viewport does not affects glClearBuffer so emulate viewport using scissor test
void OpenGLState::EmulateViewportWithScissor() {
@@ -267,18 +278,19 @@ void OpenGLState::ApplyViewport() const {
updated.depth_range_far != current.depth_range_far) {
glDepthRangeIndexed(i, updated.depth_range_near, updated.depth_range_far);
}
if (updated.scissor.enabled != current.scissor.enabled) {
const bool scissor_changed = updated.scissor.enabled != current.scissor.enabled;
if (scissor_changed) {
if (updated.scissor.enabled) {
glEnablei(GL_SCISSOR_TEST, i);
} else {
glDisablei(GL_SCISSOR_TEST, i);
}
}
if (updated.scissor.x != current.scissor.x || updated.scissor.y != current.scissor.y ||
updated.scissor.width != current.scissor.width ||
updated.scissor.height != current.scissor.height) {
if (updated.scissor.enabled &&
(scissor_changed || updated.scissor.x != current.scissor.x ||
updated.scissor.y != current.scissor.y ||
updated.scissor.width != current.scissor.width ||
updated.scissor.height != current.scissor.height)) {
glScissorIndexed(i, updated.scissor.x, updated.scissor.y, updated.scissor.width,
updated.scissor.height);
}
@@ -290,23 +302,22 @@ void OpenGLState::ApplyViewport() const {
updated.height != current.height) {
glViewport(updated.x, updated.y, updated.width, updated.height);
}
if (updated.depth_range_near != current.depth_range_near ||
updated.depth_range_far != current.depth_range_far) {
glDepthRange(updated.depth_range_near, updated.depth_range_far);
}
if (updated.scissor.enabled != current.scissor.enabled) {
const bool scissor_changed = updated.scissor.enabled != current.scissor.enabled;
if (scissor_changed) {
if (updated.scissor.enabled) {
glEnable(GL_SCISSOR_TEST);
} else {
glDisable(GL_SCISSOR_TEST);
}
}
if (updated.scissor.x != current.scissor.x || updated.scissor.y != current.scissor.y ||
updated.scissor.width != current.scissor.width ||
updated.scissor.height != current.scissor.height) {
if (updated.scissor.enabled && (scissor_changed || updated.scissor.x != current.scissor.x ||
updated.scissor.y != current.scissor.y ||
updated.scissor.width != current.scissor.width ||
updated.scissor.height != current.scissor.height)) {
glScissor(updated.scissor.x, updated.scissor.y, updated.scissor.width,
updated.scissor.height);
}
@@ -316,7 +327,8 @@ void OpenGLState::ApplyViewport() const {
void OpenGLState::ApplyGlobalBlending() const {
const Blend& current = cur_state.blend[0];
const Blend& updated = blend[0];
if (updated.enabled != current.enabled) {
const bool blend_changed = updated.enabled != current.enabled;
if (blend_changed) {
if (updated.enabled) {
glEnable(GL_BLEND);
} else {
@@ -326,14 +338,15 @@ void OpenGLState::ApplyGlobalBlending() const {
if (!updated.enabled) {
return;
}
if (updated.src_rgb_func != current.src_rgb_func ||
if (blend_changed || updated.src_rgb_func != current.src_rgb_func ||
updated.dst_rgb_func != current.dst_rgb_func || updated.src_a_func != current.src_a_func ||
updated.dst_a_func != current.dst_a_func) {
glBlendFuncSeparate(updated.src_rgb_func, updated.dst_rgb_func, updated.src_a_func,
updated.dst_a_func);
}
if (updated.rgb_equation != current.rgb_equation || updated.a_equation != current.a_equation) {
if (blend_changed || updated.rgb_equation != current.rgb_equation ||
updated.a_equation != current.a_equation) {
glBlendEquationSeparate(updated.rgb_equation, updated.a_equation);
}
}
@@ -341,22 +354,26 @@ void OpenGLState::ApplyGlobalBlending() const {
void OpenGLState::ApplyTargetBlending(std::size_t target, bool force) const {
const Blend& updated = blend[target];
const Blend& current = cur_state.blend[target];
if (updated.enabled != current.enabled || force) {
const bool blend_changed = updated.enabled != current.enabled || force;
if (blend_changed) {
if (updated.enabled) {
glEnablei(GL_BLEND, static_cast<GLuint>(target));
} else {
glDisablei(GL_BLEND, static_cast<GLuint>(target));
}
}
if (updated.src_rgb_func != current.src_rgb_func ||
if (!updated.enabled) {
return;
}
if (blend_changed || updated.src_rgb_func != current.src_rgb_func ||
updated.dst_rgb_func != current.dst_rgb_func || updated.src_a_func != current.src_a_func ||
updated.dst_a_func != current.dst_a_func) {
glBlendFuncSeparatei(static_cast<GLuint>(target), updated.src_rgb_func,
updated.dst_rgb_func, updated.src_a_func, updated.dst_a_func);
}
if (updated.rgb_equation != current.rgb_equation || updated.a_equation != current.a_equation) {
if (blend_changed || updated.rgb_equation != current.rgb_equation ||
updated.a_equation != current.a_equation) {
glBlendEquationSeparatei(static_cast<GLuint>(target), updated.rgb_equation,
updated.a_equation);
}
@@ -380,7 +397,8 @@ void OpenGLState::ApplyBlending() const {
}
void OpenGLState::ApplyLogicOp() const {
if (logic_op.enabled != cur_state.logic_op.enabled) {
const bool logic_op_changed = logic_op.enabled != cur_state.logic_op.enabled;
if (logic_op_changed) {
if (logic_op.enabled) {
glEnable(GL_COLOR_LOGIC_OP);
} else {
@@ -388,12 +406,14 @@ void OpenGLState::ApplyLogicOp() const {
}
}
if (logic_op.operation != cur_state.logic_op.operation) {
if (logic_op.enabled &&
(logic_op_changed || logic_op.operation != cur_state.logic_op.operation)) {
glLogicOp(logic_op.operation);
}
}
void OpenGLState::ApplyPolygonOffset() const {
const bool fill_enable_changed =
polygon_offset.fill_enable != cur_state.polygon_offset.fill_enable;
const bool line_enable_changed =
@@ -428,7 +448,9 @@ void OpenGLState::ApplyPolygonOffset() const {
}
}
if (factor_changed || units_changed || clamp_changed) {
if ((polygon_offset.fill_enable || polygon_offset.line_enable || polygon_offset.point_enable) &&
(factor_changed || units_changed || clamp_changed)) {
if (GLAD_GL_EXT_polygon_offset_clamp && polygon_offset.clamp != 0) {
glPolygonOffsetClamp(polygon_offset.factor, polygon_offset.units, polygon_offset.clamp);
} else {
@@ -506,9 +528,9 @@ void OpenGLState::ApplyDepthClamp() const {
depth_clamp.near_plane == cur_state.depth_clamp.near_plane) {
return;
}
UNIMPLEMENTED_IF_MSG(depth_clamp.far_plane != depth_clamp.near_plane,
"Unimplemented Depth Clamp Separation!");
if (depth_clamp.far_plane != depth_clamp.near_plane) {
UNIMPLEMENTED_MSG("Unimplemented Depth Clamp Separation!");
}
if (depth_clamp.far_plane || depth_clamp.near_plane) {
glEnable(GL_DEPTH_CLAMP);
} else {

View File

@@ -380,8 +380,7 @@ void RendererOpenGL::CaptureScreenshot() {
GLuint renderbuffer;
glGenRenderbuffers(1, &renderbuffer);
glBindRenderbuffer(GL_RENDERBUFFER, renderbuffer);
glRenderbufferStorage(GL_RENDERBUFFER, state.GetsRGBUsed() ? GL_SRGB8 : GL_RGB8, layout.width,
layout.height);
glRenderbufferStorage(GL_RENDERBUFFER, GL_RGB8, layout.width, layout.height);
glFramebufferRenderbuffer(GL_FRAMEBUFFER, GL_COLOR_ATTACHMENT0, GL_RENDERBUFFER, renderbuffer);
DrawScreen(layout);

View File

@@ -1,285 +0,0 @@
// Copyright 2018 yuzu Emulator Project
// Licensed under GPLv2 or any later version
// Refer to the license.txt file included.
#include <algorithm>
#include <optional>
#include "common/assert.h"
#include "common/logging/log.h"
#include "video_core/renderer_vulkan/declarations.h"
#include "video_core/renderer_vulkan/vk_device.h"
#include "video_core/renderer_vulkan/vk_resource_manager.h"
namespace Vulkan {
// TODO(Rodrigo): Fine tune these numbers.
constexpr std::size_t COMMAND_BUFFER_POOL_SIZE = 0x1000;
constexpr std::size_t FENCES_GROW_STEP = 0x40;
class CommandBufferPool final : public VKFencedPool {
public:
CommandBufferPool(const VKDevice& device)
: VKFencedPool(COMMAND_BUFFER_POOL_SIZE), device{device} {}
void Allocate(std::size_t begin, std::size_t end) {
const auto dev = device.GetLogical();
const auto& dld = device.GetDispatchLoader();
const u32 graphics_family = device.GetGraphicsFamily();
auto pool = std::make_unique<Pool>();
// Command buffers are going to be commited, recorded, executed every single usage cycle.
// They are also going to be reseted when commited.
const auto pool_flags = vk::CommandPoolCreateFlagBits::eTransient |
vk::CommandPoolCreateFlagBits::eResetCommandBuffer;
const vk::CommandPoolCreateInfo cmdbuf_pool_ci(pool_flags, graphics_family);
pool->handle = dev.createCommandPoolUnique(cmdbuf_pool_ci, nullptr, dld);
const vk::CommandBufferAllocateInfo cmdbuf_ai(*pool->handle,
vk::CommandBufferLevel::ePrimary,
static_cast<u32>(COMMAND_BUFFER_POOL_SIZE));
pool->cmdbufs =
dev.allocateCommandBuffersUnique<std::allocator<UniqueCommandBuffer>>(cmdbuf_ai, dld);
pools.push_back(std::move(pool));
}
vk::CommandBuffer Commit(VKFence& fence) {
const std::size_t index = CommitResource(fence);
const auto pool_index = index / COMMAND_BUFFER_POOL_SIZE;
const auto sub_index = index % COMMAND_BUFFER_POOL_SIZE;
return *pools[pool_index]->cmdbufs[sub_index];
}
private:
struct Pool {
UniqueCommandPool handle;
std::vector<UniqueCommandBuffer> cmdbufs;
};
const VKDevice& device;
std::vector<std::unique_ptr<Pool>> pools;
};
VKResource::VKResource() = default;
VKResource::~VKResource() = default;
VKFence::VKFence(const VKDevice& device, UniqueFence handle)
: device{device}, handle{std::move(handle)} {}
VKFence::~VKFence() = default;
void VKFence::Wait() {
const auto dev = device.GetLogical();
const auto& dld = device.GetDispatchLoader();
dev.waitForFences({*handle}, true, std::numeric_limits<u64>::max(), dld);
}
void VKFence::Release() {
is_owned = false;
}
void VKFence::Commit() {
is_owned = true;
is_used = true;
}
bool VKFence::Tick(bool gpu_wait, bool owner_wait) {
if (!is_used) {
// If a fence is not used it's always free.
return true;
}
if (is_owned && !owner_wait) {
// The fence is still being owned (Release has not been called) and ownership wait has
// not been asked.
return false;
}
const auto dev = device.GetLogical();
const auto& dld = device.GetDispatchLoader();
if (gpu_wait) {
// Wait for the fence if it has been requested.
dev.waitForFences({*handle}, true, std::numeric_limits<u64>::max(), dld);
} else {
if (dev.getFenceStatus(*handle, dld) != vk::Result::eSuccess) {
// Vulkan fence is not ready, not much it can do here
return false;
}
}
// Broadcast resources their free state.
for (auto* resource : protected_resources) {
resource->OnFenceRemoval(this);
}
protected_resources.clear();
// Prepare fence for reusage.
dev.resetFences({*handle}, dld);
is_used = false;
return true;
}
void VKFence::Protect(VKResource* resource) {
protected_resources.push_back(resource);
}
void VKFence::Unprotect(const VKResource* resource) {
const auto it = std::find(protected_resources.begin(), protected_resources.end(), resource);
if (it != protected_resources.end()) {
protected_resources.erase(it);
}
}
VKFenceWatch::VKFenceWatch() = default;
VKFenceWatch::~VKFenceWatch() {
if (fence) {
fence->Unprotect(this);
}
}
void VKFenceWatch::Wait() {
if (!fence) {
return;
}
fence->Wait();
fence->Unprotect(this);
fence = nullptr;
}
void VKFenceWatch::Watch(VKFence& new_fence) {
Wait();
fence = &new_fence;
fence->Protect(this);
}
bool VKFenceWatch::TryWatch(VKFence& new_fence) {
if (fence) {
return false;
}
fence = &new_fence;
fence->Protect(this);
return true;
}
void VKFenceWatch::OnFenceRemoval(VKFence* signaling_fence) {
ASSERT_MSG(signaling_fence == fence, "Removing the wrong fence");
fence = nullptr;
}
VKFencedPool::VKFencedPool(std::size_t grow_step) : grow_step{grow_step} {}
VKFencedPool::~VKFencedPool() = default;
std::size_t VKFencedPool::CommitResource(VKFence& fence) {
const auto Search = [&](std::size_t begin, std::size_t end) -> std::optional<std::size_t> {
for (std::size_t iterator = begin; iterator < end; ++iterator) {
if (watches[iterator]->TryWatch(fence)) {
// The resource is now being watched, a free resource was successfully found.
return iterator;
}
}
return {};
};
// Try to find a free resource from the hinted position to the end.
auto found = Search(free_iterator, watches.size());
if (!found) {
// Search from beginning to the hinted position.
found = Search(0, free_iterator);
if (!found) {
// Both searches failed, the pool is full; handle it.
const std::size_t free_resource = ManageOverflow();
// Watch will wait for the resource to be free.
watches[free_resource]->Watch(fence);
found = free_resource;
}
}
// Free iterator is hinted to the resource after the one that's been commited.
free_iterator = (*found + 1) % watches.size();
return *found;
}
std::size_t VKFencedPool::ManageOverflow() {
const std::size_t old_capacity = watches.size();
Grow();
// The last entry is guaranted to be free, since it's the first element of the freshly
// allocated resources.
return old_capacity;
}
void VKFencedPool::Grow() {
const std::size_t old_capacity = watches.size();
watches.resize(old_capacity + grow_step);
std::generate(watches.begin() + old_capacity, watches.end(),
[]() { return std::make_unique<VKFenceWatch>(); });
Allocate(old_capacity, old_capacity + grow_step);
}
VKResourceManager::VKResourceManager(const VKDevice& device) : device{device} {
GrowFences(FENCES_GROW_STEP);
command_buffer_pool = std::make_unique<CommandBufferPool>(device);
}
VKResourceManager::~VKResourceManager() = default;
VKFence& VKResourceManager::CommitFence() {
const auto StepFences = [&](bool gpu_wait, bool owner_wait) -> VKFence* {
const auto Tick = [=](auto& fence) { return fence->Tick(gpu_wait, owner_wait); };
const auto hinted = fences.begin() + fences_iterator;
auto it = std::find_if(hinted, fences.end(), Tick);
if (it == fences.end()) {
it = std::find_if(fences.begin(), hinted, Tick);
if (it == hinted) {
return nullptr;
}
}
fences_iterator = std::distance(fences.begin(), it) + 1;
if (fences_iterator >= fences.size())
fences_iterator = 0;
auto& fence = *it;
fence->Commit();
return fence.get();
};
VKFence* found_fence = StepFences(false, false);
if (!found_fence) {
// Try again, this time waiting.
found_fence = StepFences(true, false);
if (!found_fence) {
// Allocate new fences and try again.
LOG_INFO(Render_Vulkan, "Allocating new fences {} -> {}", fences.size(),
fences.size() + FENCES_GROW_STEP);
GrowFences(FENCES_GROW_STEP);
found_fence = StepFences(true, false);
ASSERT(found_fence != nullptr);
}
}
return *found_fence;
}
vk::CommandBuffer VKResourceManager::CommitCommandBuffer(VKFence& fence) {
return command_buffer_pool->Commit(fence);
}
void VKResourceManager::GrowFences(std::size_t new_fences_count) {
const auto dev = device.GetLogical();
const auto& dld = device.GetDispatchLoader();
const vk::FenceCreateInfo fence_ci;
const std::size_t previous_size = fences.size();
fences.resize(previous_size + new_fences_count);
std::generate(fences.begin() + previous_size, fences.end(), [&]() {
return std::make_unique<VKFence>(device, dev.createFenceUnique(fence_ci, nullptr, dld));
});
}
} // namespace Vulkan

View File

@@ -1,180 +0,0 @@
// Copyright 2018 yuzu Emulator Project
// Licensed under GPLv2 or any later version
// Refer to the license.txt file included.
#pragma once
#include <cstddef>
#include <memory>
#include <vector>
#include "video_core/renderer_vulkan/declarations.h"
namespace Vulkan {
class VKDevice;
class VKFence;
class VKResourceManager;
class CommandBufferPool;
/// Interface for a Vulkan resource
class VKResource {
public:
explicit VKResource();
virtual ~VKResource();
/**
* Signals the object that an owning fence has been signaled.
* @param signaling_fence Fence that signals its usage end.
*/
virtual void OnFenceRemoval(VKFence* signaling_fence) = 0;
};
/**
* Fences take ownership of objects, protecting them from GPU-side or driver-side concurrent access.
* They must be commited from the resource manager. Their usage flow is: commit the fence from the
* resource manager, protect resources with it and use them, send the fence to an execution queue
* and Wait for it if needed and then call Release. Used resources will automatically be signaled
* when they are free to be reused.
* @brief Protects resources for concurrent usage and signals its release.
*/
class VKFence {
friend class VKResourceManager;
public:
explicit VKFence(const VKDevice& device, UniqueFence handle);
~VKFence();
/**
* Waits for the fence to be signaled.
* @warning You must have ownership of the fence and it has to be previously sent to a queue to
* call this function.
*/
void Wait();
/**
* Releases ownership of the fence. Pass after it has been sent to an execution queue.
* Unmanaged usage of the fence after the call will result in undefined behavior because it may
* be being used for something else.
*/
void Release();
/// Protects a resource with this fence.
void Protect(VKResource* resource);
/// Removes protection for a resource.
void Unprotect(const VKResource* resource);
/// Retreives the fence.
operator vk::Fence() const {
return *handle;
}
private:
/// Take ownership of the fence.
void Commit();
/**
* Updates the fence status.
* @warning Waiting for the owner might soft lock the execution.
* @param gpu_wait Wait for the fence to be signaled by the driver.
* @param owner_wait Wait for the owner to signal its freedom.
* @returns True if the fence is free. Waiting for gpu and owner will always return true.
*/
bool Tick(bool gpu_wait, bool owner_wait);
const VKDevice& device; ///< Device handler
UniqueFence handle; ///< Vulkan fence
std::vector<VKResource*> protected_resources; ///< List of resources protected by this fence
bool is_owned = false; ///< The fence has been commited but not released yet.
bool is_used = false; ///< The fence has been commited but it has not been checked to be free.
};
/**
* A fence watch is used to keep track of the usage of a fence and protect a resource or set of
* resources without having to inherit VKResource from their handlers.
*/
class VKFenceWatch final : public VKResource {
public:
explicit VKFenceWatch();
~VKFenceWatch();
/// Waits for the fence to be released.
void Wait();
/**
* Waits for a previous fence and watches a new one.
* @param new_fence New fence to wait to.
*/
void Watch(VKFence& new_fence);
/**
* Checks if it's currently being watched and starts watching it if it's available.
* @returns True if a watch has started, false if it's being watched.
*/
bool TryWatch(VKFence& new_fence);
void OnFenceRemoval(VKFence* signaling_fence) override;
private:
VKFence* fence{}; ///< Fence watching this resource. nullptr when the watch is free.
};
/**
* Handles a pool of resources protected by fences. Manages resource overflow allocating more
* resources.
*/
class VKFencedPool {
public:
explicit VKFencedPool(std::size_t grow_step);
virtual ~VKFencedPool();
protected:
/**
* Commits a free resource and protects it with a fence. It may allocate new resources.
* @param fence Fence that protects the commited resource.
* @returns Index of the resource commited.
*/
std::size_t CommitResource(VKFence& fence);
/// Called when a chunk of resources have to be allocated.
virtual void Allocate(std::size_t begin, std::size_t end) = 0;
private:
/// Manages pool overflow allocating new resources.
std::size_t ManageOverflow();
/// Allocates a new page of resources.
void Grow();
std::size_t grow_step = 0; ///< Number of new resources created after an overflow
std::size_t free_iterator = 0; ///< Hint to where the next free resources is likely to be found
std::vector<std::unique_ptr<VKFenceWatch>> watches; ///< Set of watched resources
};
/**
* The resource manager handles all resources that can be protected with a fence avoiding
* driver-side or GPU-side concurrent usage. Usage is documented in VKFence.
*/
class VKResourceManager final {
public:
explicit VKResourceManager(const VKDevice& device);
~VKResourceManager();
/// Commits a fence. It has to be sent to a queue and released.
VKFence& CommitFence();
/// Commits an unused command buffer and protects it with a fence.
vk::CommandBuffer CommitCommandBuffer(VKFence& fence);
private:
/// Allocates new fences.
void GrowFences(std::size_t new_fences_count);
const VKDevice& device; ///< Device handler.
std::size_t fences_iterator = 0; ///< Index where a free fence is likely to be found.
std::vector<std::unique_ptr<VKFence>> fences; ///< Pool of fences.
std::unique_ptr<CommandBufferPool> command_buffer_pool; ///< Pool of command buffers.
};
} // namespace Vulkan