Compare commits
31 Commits
__refs_pul
...
__refs_pul
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
5ca63d0675 | ||
|
|
48e6f77c03 | ||
|
|
c3471bf618 | ||
|
|
da1b45de34 | ||
|
|
1cffd3848b | ||
|
|
93c1630570 | ||
|
|
04d7b7e09d | ||
|
|
8250f9bb1c | ||
|
|
7cdeec20ec | ||
|
|
a12f4efa2f | ||
|
|
c1b2e35625 | ||
|
|
be7dad5e7e | ||
|
|
c07987dfab | ||
|
|
c4243c07cc | ||
|
|
c6170565b5 | ||
|
|
57985fb16a | ||
|
|
517933adcb | ||
|
|
030814b1cb | ||
|
|
90c780e6f3 | ||
|
|
f7090bacc5 | ||
|
|
d062991643 | ||
|
|
4ab978d670 | ||
|
|
92050c4d86 | ||
|
|
abef11a540 | ||
|
|
f546fb35ed | ||
|
|
94b27bb8a5 | ||
|
|
6dd40976d0 | ||
|
|
717394c980 | ||
|
|
b675c97cdd | ||
|
|
10682ad7e0 | ||
|
|
bb41683394 |
@@ -73,6 +73,7 @@ set(HASH_FILES
|
||||
"${VIDEO_CORE}/shader/decode/integer_set.cpp"
|
||||
"${VIDEO_CORE}/shader/decode/integer_set_predicate.cpp"
|
||||
"${VIDEO_CORE}/shader/decode/memory.cpp"
|
||||
"${VIDEO_CORE}/shader/decode/texture.cpp"
|
||||
"${VIDEO_CORE}/shader/decode/other.cpp"
|
||||
"${VIDEO_CORE}/shader/decode/predicate_set_predicate.cpp"
|
||||
"${VIDEO_CORE}/shader/decode/predicate_set_register.cpp"
|
||||
|
||||
2
externals/Vulkan-Headers
vendored
2
externals/Vulkan-Headers
vendored
Submodule externals/Vulkan-Headers updated: 7f02d9bb81...15e5c4db75
@@ -68,8 +68,8 @@ std::vector<s16> DecodeADPCM(const u8* const data, std::size_t size, const ADPCM
|
||||
}
|
||||
}
|
||||
|
||||
state.yn1 = yn1;
|
||||
state.yn2 = yn2;
|
||||
state.yn1 = static_cast<s16>(yn1);
|
||||
state.yn2 = static_cast<s16>(yn2);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
@@ -46,7 +46,7 @@ public:
|
||||
}
|
||||
}
|
||||
|
||||
~CubebSinkStream() {
|
||||
~CubebSinkStream() override {
|
||||
if (!ctx) {
|
||||
return;
|
||||
}
|
||||
@@ -75,11 +75,11 @@ public:
|
||||
queue.Push(samples);
|
||||
}
|
||||
|
||||
std::size_t SamplesInQueue(u32 num_channels) const override {
|
||||
std::size_t SamplesInQueue(u32 channel_count) const override {
|
||||
if (!ctx)
|
||||
return 0;
|
||||
|
||||
return queue.Size() / num_channels;
|
||||
return queue.Size() / channel_count;
|
||||
}
|
||||
|
||||
void Flush() override {
|
||||
@@ -98,7 +98,7 @@ private:
|
||||
u32 num_channels{};
|
||||
|
||||
Common::RingBuffer<s16, 0x10000> queue;
|
||||
std::array<s16, 2> last_frame;
|
||||
std::array<s16, 2> last_frame{};
|
||||
std::atomic<bool> should_flush{};
|
||||
TimeStretcher time_stretch;
|
||||
|
||||
|
||||
@@ -47,6 +47,7 @@ add_custom_command(OUTPUT scm_rev.cpp
|
||||
"${VIDEO_CORE}/shader/decode/integer_set.cpp"
|
||||
"${VIDEO_CORE}/shader/decode/integer_set_predicate.cpp"
|
||||
"${VIDEO_CORE}/shader/decode/memory.cpp"
|
||||
"${VIDEO_CORE}/shader/decode/texture.cpp"
|
||||
"${VIDEO_CORE}/shader/decode/other.cpp"
|
||||
"${VIDEO_CORE}/shader/decode/predicate_set_predicate.cpp"
|
||||
"${VIDEO_CORE}/shader/decode/predicate_set_register.cpp"
|
||||
|
||||
@@ -28,8 +28,8 @@
|
||||
#include <cstring>
|
||||
#include "common/common_types.h"
|
||||
|
||||
// GCC 4.6+
|
||||
#if __GNUC__ >= 5 || (__GNUC__ == 4 && __GNUC_MINOR__ >= 6)
|
||||
// GCC
|
||||
#ifdef __GNUC__
|
||||
|
||||
#if __BYTE_ORDER__ && (__BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__) && !defined(COMMON_LITTLE_ENDIAN)
|
||||
#define COMMON_LITTLE_ENDIAN 1
|
||||
@@ -38,7 +38,7 @@
|
||||
#endif
|
||||
|
||||
// LLVM/clang
|
||||
#elif __clang__
|
||||
#elif defined(__clang__)
|
||||
|
||||
#if __LITTLE_ENDIAN__ && !defined(COMMON_LITTLE_ENDIAN)
|
||||
#define COMMON_LITTLE_ENDIAN 1
|
||||
|
||||
@@ -47,7 +47,7 @@ std::size_t VectorVfsFile::Write(const u8* data_, std::size_t length, std::size_
|
||||
if (offset + length > data.size())
|
||||
data.resize(offset + length);
|
||||
const auto write = std::min(length, data.size() - offset);
|
||||
std::memcpy(data.data(), data_, write);
|
||||
std::memcpy(data.data() + offset, data_, write);
|
||||
return write;
|
||||
}
|
||||
|
||||
|
||||
@@ -752,6 +752,7 @@ public:
|
||||
{1102, nullptr, "GetDisplayResolution"},
|
||||
{2010, &IManagerDisplayService::CreateManagedLayer, "CreateManagedLayer"},
|
||||
{2011, nullptr, "DestroyManagedLayer"},
|
||||
{2012, nullptr, "CreateStrayLayer"},
|
||||
{2050, nullptr, "CreateIndirectLayer"},
|
||||
{2051, nullptr, "DestroyIndirectLayer"},
|
||||
{2052, nullptr, "CreateIndirectProducerEndPoint"},
|
||||
|
||||
@@ -74,6 +74,7 @@ add_library(video_core STATIC
|
||||
shader/decode/hfma2.cpp
|
||||
shader/decode/conversion.cpp
|
||||
shader/decode/memory.cpp
|
||||
shader/decode/texture.cpp
|
||||
shader/decode/float_set_predicate.cpp
|
||||
shader/decode/integer_set_predicate.cpp
|
||||
shader/decode/half_set_predicate.cpp
|
||||
@@ -106,8 +107,12 @@ if (ENABLE_VULKAN)
|
||||
renderer_vulkan/declarations.h
|
||||
renderer_vulkan/vk_device.cpp
|
||||
renderer_vulkan/vk_device.h
|
||||
renderer_vulkan/vk_memory_manager.cpp
|
||||
renderer_vulkan/vk_memory_manager.h
|
||||
renderer_vulkan/vk_resource_manager.cpp
|
||||
renderer_vulkan/vk_resource_manager.h)
|
||||
renderer_vulkan/vk_resource_manager.h
|
||||
renderer_vulkan/vk_scheduler.cpp
|
||||
renderer_vulkan/vk_scheduler.h)
|
||||
|
||||
target_include_directories(video_core PRIVATE ../../externals/Vulkan-Headers/include)
|
||||
target_compile_definitions(video_core PRIVATE HAS_VULKAN)
|
||||
|
||||
@@ -33,18 +33,36 @@ void DmaPusher::DispatchCalls() {
|
||||
}
|
||||
|
||||
bool DmaPusher::Step() {
|
||||
if (dma_get != dma_put) {
|
||||
// Push buffer non-empty, read a word
|
||||
const auto address = gpu.MemoryManager().GpuToCpuAddress(dma_get);
|
||||
ASSERT_MSG(address, "Invalid GPU address");
|
||||
if (!ib_enable || dma_pushbuffer.empty()) {
|
||||
// pushbuffer empty and IB empty or nonexistent - nothing to do
|
||||
return false;
|
||||
}
|
||||
|
||||
const CommandHeader command_header{Memory::Read32(*address)};
|
||||
const CommandList& command_list{dma_pushbuffer.front()};
|
||||
const CommandListHeader& command_list_header{command_list[dma_pushbuffer_subindex++]};
|
||||
GPUVAddr dma_get = command_list_header.addr;
|
||||
GPUVAddr dma_put = dma_get + command_list_header.size * sizeof(u32);
|
||||
bool non_main = command_list_header.is_non_main;
|
||||
|
||||
dma_get += sizeof(u32);
|
||||
if (dma_pushbuffer_subindex >= command_list.size()) {
|
||||
// We've gone through the current list, remove it from the queue
|
||||
dma_pushbuffer.pop();
|
||||
dma_pushbuffer_subindex = 0;
|
||||
}
|
||||
|
||||
if (!non_main) {
|
||||
dma_mget = dma_get;
|
||||
}
|
||||
if (command_list_header.size == 0) {
|
||||
return true;
|
||||
}
|
||||
|
||||
// Push buffer non-empty, read a word
|
||||
const auto address = gpu.MemoryManager().GpuToCpuAddress(dma_get);
|
||||
ASSERT_MSG(address, "Invalid GPU address");
|
||||
|
||||
command_headers.resize(command_list_header.size);
|
||||
|
||||
Memory::ReadBlock(*address, command_headers.data(), command_list_header.size * sizeof(u32));
|
||||
|
||||
for (const CommandHeader& command_header : command_headers) {
|
||||
|
||||
// now, see if we're in the middle of a command
|
||||
if (dma_state.length_pending) {
|
||||
@@ -91,22 +109,11 @@ bool DmaPusher::Step() {
|
||||
break;
|
||||
}
|
||||
}
|
||||
} else if (ib_enable && !dma_pushbuffer.empty()) {
|
||||
// Current pushbuffer empty, but we have more IB entries to read
|
||||
const CommandList& command_list{dma_pushbuffer.front()};
|
||||
const CommandListHeader& command_list_header{command_list[dma_pushbuffer_subindex++]};
|
||||
dma_get = command_list_header.addr;
|
||||
dma_put = dma_get + command_list_header.size * sizeof(u32);
|
||||
non_main = command_list_header.is_non_main;
|
||||
}
|
||||
|
||||
if (dma_pushbuffer_subindex >= command_list.size()) {
|
||||
// We've gone through the current list, remove it from the queue
|
||||
dma_pushbuffer.pop();
|
||||
dma_pushbuffer_subindex = 0;
|
||||
}
|
||||
} else {
|
||||
// Otherwise, pushbuffer empty and IB empty or nonexistent - nothing to do
|
||||
return {};
|
||||
if (!non_main) {
|
||||
// TODO (degasus): This is dead code, as dma_mget is never read.
|
||||
dma_mget = dma_put;
|
||||
}
|
||||
|
||||
return true;
|
||||
|
||||
@@ -75,6 +75,8 @@ private:
|
||||
|
||||
GPU& gpu;
|
||||
|
||||
std::vector<CommandHeader> command_headers; ///< Buffer for list of commands fetched at once
|
||||
|
||||
std::queue<CommandList> dma_pushbuffer; ///< Queue of command lists to be processed
|
||||
std::size_t dma_pushbuffer_subindex{}; ///< Index within a command list within the pushbuffer
|
||||
|
||||
@@ -89,11 +91,8 @@ private:
|
||||
DmaState dma_state{};
|
||||
bool dma_increment_once{};
|
||||
|
||||
GPUVAddr dma_put{}; ///< pushbuffer current end address
|
||||
GPUVAddr dma_get{}; ///< pushbuffer current read address
|
||||
GPUVAddr dma_mget{}; ///< main pushbuffer last read address
|
||||
bool ib_enable{true}; ///< IB mode enabled
|
||||
bool non_main{}; ///< non-main pushbuffer active
|
||||
};
|
||||
|
||||
} // namespace Tegra
|
||||
|
||||
@@ -325,11 +325,11 @@ enum class TextureQueryType : u64 {
|
||||
|
||||
enum class TextureProcessMode : u64 {
|
||||
None = 0,
|
||||
LZ = 1, // Unknown, appears to be the same as none.
|
||||
LZ = 1, // Load LOD of zero.
|
||||
LB = 2, // Load Bias.
|
||||
LL = 3, // Load LOD (LevelOfDetail)
|
||||
LBA = 6, // Load Bias. The A is unknown, does not appear to differ with LB
|
||||
LLA = 7 // Load LOD. The A is unknown, does not appear to differ with LL
|
||||
LL = 3, // Load LOD.
|
||||
LBA = 6, // Load Bias. The A is unknown, does not appear to differ with LB.
|
||||
LLA = 7 // Load LOD. The A is unknown, does not appear to differ with LL.
|
||||
};
|
||||
|
||||
enum class TextureMiscMode : u64 {
|
||||
@@ -376,9 +376,9 @@ enum class R2pMode : u64 {
|
||||
};
|
||||
|
||||
enum class IpaInterpMode : u64 {
|
||||
Linear = 0,
|
||||
Perspective = 1,
|
||||
Flat = 2,
|
||||
Pass = 0,
|
||||
Multiply = 1,
|
||||
Constant = 2,
|
||||
Sc = 3,
|
||||
};
|
||||
|
||||
@@ -1446,6 +1446,7 @@ public:
|
||||
Flow,
|
||||
Synch,
|
||||
Memory,
|
||||
Texture,
|
||||
FloatSet,
|
||||
FloatSetPredicate,
|
||||
IntegerSet,
|
||||
@@ -1576,14 +1577,14 @@ private:
|
||||
INST("1110111101010---", Id::ST_L, Type::Memory, "ST_L"),
|
||||
INST("1110111011010---", Id::LDG, Type::Memory, "LDG"),
|
||||
INST("1110111011011---", Id::STG, Type::Memory, "STG"),
|
||||
INST("110000----111---", Id::TEX, Type::Memory, "TEX"),
|
||||
INST("1101111101001---", Id::TXQ, Type::Memory, "TXQ"),
|
||||
INST("1101-00---------", Id::TEXS, Type::Memory, "TEXS"),
|
||||
INST("1101101---------", Id::TLDS, Type::Memory, "TLDS"),
|
||||
INST("110010----111---", Id::TLD4, Type::Memory, "TLD4"),
|
||||
INST("1101111100------", Id::TLD4S, Type::Memory, "TLD4S"),
|
||||
INST("110111110110----", Id::TMML_B, Type::Memory, "TMML_B"),
|
||||
INST("1101111101011---", Id::TMML, Type::Memory, "TMML"),
|
||||
INST("110000----111---", Id::TEX, Type::Texture, "TEX"),
|
||||
INST("1101111101001---", Id::TXQ, Type::Texture, "TXQ"),
|
||||
INST("1101-00---------", Id::TEXS, Type::Texture, "TEXS"),
|
||||
INST("1101101---------", Id::TLDS, Type::Texture, "TLDS"),
|
||||
INST("110010----111---", Id::TLD4, Type::Texture, "TLD4"),
|
||||
INST("1101111100------", Id::TLD4S, Type::Texture, "TLD4S"),
|
||||
INST("110111110110----", Id::TMML_B, Type::Texture, "TMML_B"),
|
||||
INST("1101111101011---", Id::TMML, Type::Texture, "TMML"),
|
||||
INST("111000110000----", Id::EXIT, Type::Trivial, "EXIT"),
|
||||
INST("11100000--------", Id::IPA, Type::Trivial, "IPA"),
|
||||
INST("1111101111100---", Id::OUT_R, Type::Trivial, "OUT_R"),
|
||||
|
||||
@@ -16,6 +16,13 @@ enum class OutputTopology : u32 {
|
||||
TriangleStrip = 7,
|
||||
};
|
||||
|
||||
enum class AttributeUse : u8 {
|
||||
Unused = 0,
|
||||
Constant = 1,
|
||||
Perspective = 2,
|
||||
ScreenLinear = 3,
|
||||
};
|
||||
|
||||
// Documentation in:
|
||||
// http://download.nvidia.com/open-gpu-doc/Shader-Program-Header/1/Shader-Program-Header.html#ImapTexture
|
||||
struct Header {
|
||||
@@ -84,9 +91,15 @@ struct Header {
|
||||
} vtg;
|
||||
|
||||
struct {
|
||||
INSERT_PADDING_BYTES(3); // ImapSystemValuesA
|
||||
INSERT_PADDING_BYTES(1); // ImapSystemValuesB
|
||||
INSERT_PADDING_BYTES(32); // ImapGenericVector[32]
|
||||
INSERT_PADDING_BYTES(3); // ImapSystemValuesA
|
||||
INSERT_PADDING_BYTES(1); // ImapSystemValuesB
|
||||
union {
|
||||
BitField<0, 2, AttributeUse> x;
|
||||
BitField<2, 2, AttributeUse> y;
|
||||
BitField<4, 2, AttributeUse> w;
|
||||
BitField<6, 2, AttributeUse> z;
|
||||
u8 raw;
|
||||
} imap_generic_vector[32];
|
||||
INSERT_PADDING_BYTES(2); // ImapColor
|
||||
INSERT_PADDING_BYTES(2); // ImapSystemValuesC
|
||||
INSERT_PADDING_BYTES(10); // ImapFixedFncTexture[10]
|
||||
@@ -103,6 +116,28 @@ struct Header {
|
||||
const u32 bit = render_target * 4 + component;
|
||||
return omap.target & (1 << bit);
|
||||
}
|
||||
AttributeUse GetAttributeIndexUse(u32 attribute, u32 index) const {
|
||||
return static_cast<AttributeUse>(
|
||||
(imap_generic_vector[attribute].raw >> (index * 2)) & 0x03);
|
||||
}
|
||||
AttributeUse GetAttributeUse(u32 attribute) const {
|
||||
AttributeUse result = AttributeUse::Unused;
|
||||
for (u32 i = 0; i < 4; i++) {
|
||||
const auto index = GetAttributeIndexUse(attribute, i);
|
||||
if (index == AttributeUse::Unused) {
|
||||
continue;
|
||||
}
|
||||
if (result == AttributeUse::Unused || result == index) {
|
||||
result = index;
|
||||
continue;
|
||||
}
|
||||
LOG_CRITICAL(HW_GPU, "Generic Attribute Conflict in Interpolation Mode");
|
||||
if (index == AttributeUse::Perspective) {
|
||||
result = index;
|
||||
}
|
||||
}
|
||||
return result;
|
||||
}
|
||||
} ps;
|
||||
};
|
||||
|
||||
|
||||
@@ -423,7 +423,7 @@ void SwizzleFunc(const MortonSwizzleMode& mode, const SurfaceParams& params,
|
||||
for (u32 i = 0; i < params.depth; i++) {
|
||||
MortonSwizzle(mode, params.pixel_format, params.MipWidth(mip_level),
|
||||
params.MipBlockHeight(mip_level), params.MipHeight(mip_level),
|
||||
params.MipBlockDepth(mip_level), params.tile_width_spacing, 1,
|
||||
params.MipBlockDepth(mip_level), 1, params.tile_width_spacing,
|
||||
gl_buffer.data() + offset_gl, gl_size, params.addr + offset);
|
||||
offset += layer_size;
|
||||
offset_gl += gl_size;
|
||||
@@ -1257,7 +1257,11 @@ Surface RasterizerCacheOpenGL::RecreateSurface(const Surface& old_surface,
|
||||
case SurfaceTarget::TextureCubemap:
|
||||
case SurfaceTarget::Texture2DArray:
|
||||
case SurfaceTarget::TextureCubeArray:
|
||||
FastLayeredCopySurface(old_surface, new_surface);
|
||||
if (old_params.pixel_format == new_params.pixel_format)
|
||||
FastLayeredCopySurface(old_surface, new_surface);
|
||||
else {
|
||||
AccurateCopySurface(old_surface, new_surface);
|
||||
}
|
||||
break;
|
||||
default:
|
||||
LOG_CRITICAL(Render_OpenGL, "Unimplemented surface target={}",
|
||||
|
||||
@@ -5,7 +5,9 @@
|
||||
#include <array>
|
||||
#include <string>
|
||||
#include <string_view>
|
||||
#include <utility>
|
||||
#include <variant>
|
||||
#include <vector>
|
||||
|
||||
#include <fmt/format.h>
|
||||
|
||||
@@ -20,6 +22,7 @@
|
||||
namespace OpenGL::GLShader {
|
||||
|
||||
using Tegra::Shader::Attribute;
|
||||
using Tegra::Shader::AttributeUse;
|
||||
using Tegra::Shader::Header;
|
||||
using Tegra::Shader::IpaInterpMode;
|
||||
using Tegra::Shader::IpaMode;
|
||||
@@ -288,34 +291,22 @@ private:
|
||||
code.AddNewLine();
|
||||
}
|
||||
|
||||
std::string GetInputFlags(const IpaMode& input_mode) {
|
||||
const IpaSampleMode sample_mode = input_mode.sampling_mode;
|
||||
const IpaInterpMode interp_mode = input_mode.interpolation_mode;
|
||||
std::string GetInputFlags(AttributeUse attribute) {
|
||||
std::string out;
|
||||
|
||||
switch (interp_mode) {
|
||||
case IpaInterpMode::Flat:
|
||||
switch (attribute) {
|
||||
case AttributeUse::Constant:
|
||||
out += "flat ";
|
||||
break;
|
||||
case IpaInterpMode::Linear:
|
||||
case AttributeUse::ScreenLinear:
|
||||
out += "noperspective ";
|
||||
break;
|
||||
case IpaInterpMode::Perspective:
|
||||
case AttributeUse::Perspective:
|
||||
// Default, Smooth
|
||||
break;
|
||||
default:
|
||||
UNIMPLEMENTED_MSG("Unhandled IPA interp mode: {}", static_cast<u32>(interp_mode));
|
||||
}
|
||||
switch (sample_mode) {
|
||||
case IpaSampleMode::Centroid:
|
||||
// It can be implemented with the "centroid " keyword in GLSL
|
||||
UNIMPLEMENTED_MSG("Unimplemented IPA sampler mode centroid");
|
||||
break;
|
||||
case IpaSampleMode::Default:
|
||||
// Default, n/a
|
||||
break;
|
||||
default:
|
||||
UNIMPLEMENTED_MSG("Unimplemented IPA sampler mode: {}", static_cast<u32>(sample_mode));
|
||||
LOG_CRITICAL(HW_GPU, "Unused attribute being fetched");
|
||||
UNREACHABLE();
|
||||
}
|
||||
return out;
|
||||
}
|
||||
@@ -324,16 +315,11 @@ private:
|
||||
const auto& attributes = ir.GetInputAttributes();
|
||||
for (const auto element : attributes) {
|
||||
const Attribute::Index index = element.first;
|
||||
const IpaMode& input_mode = *element.second.begin();
|
||||
if (index < Attribute::Index::Attribute_0 || index > Attribute::Index::Attribute_31) {
|
||||
// Skip when it's not a generic attribute
|
||||
continue;
|
||||
}
|
||||
|
||||
ASSERT(element.second.size() > 0);
|
||||
UNIMPLEMENTED_IF_MSG(element.second.size() > 1,
|
||||
"Multiple input flag modes are not supported in GLSL");
|
||||
|
||||
// TODO(bunnei): Use proper number of elements for these
|
||||
u32 idx = static_cast<u32>(index) - static_cast<u32>(Attribute::Index::Attribute_0);
|
||||
if (stage != ShaderStage::Vertex) {
|
||||
@@ -345,8 +331,14 @@ private:
|
||||
if (stage == ShaderStage::Geometry) {
|
||||
attr = "gs_" + attr + "[]";
|
||||
}
|
||||
code.AddLine("layout (location = " + std::to_string(idx) + ") " +
|
||||
GetInputFlags(input_mode) + "in vec4 " + attr + ';');
|
||||
std::string suffix;
|
||||
if (stage == ShaderStage::Fragment) {
|
||||
const auto input_mode =
|
||||
header.ps.GetAttributeUse(idx - GENERIC_VARYING_START_LOCATION);
|
||||
suffix = GetInputFlags(input_mode);
|
||||
}
|
||||
code.AddLine("layout (location = " + std::to_string(idx) + ") " + suffix + "in vec4 " +
|
||||
attr + ';');
|
||||
}
|
||||
if (!attributes.empty())
|
||||
code.AddNewLine();
|
||||
@@ -727,7 +719,7 @@ private:
|
||||
}
|
||||
|
||||
std::string GenerateTexture(Operation operation, const std::string& func,
|
||||
bool is_extra_int = false) {
|
||||
const std::vector<std::pair<Type, Node>>& extras) {
|
||||
constexpr std::array<const char*, 4> coord_constructors = {"float", "vec2", "vec3", "vec4"};
|
||||
|
||||
const auto meta = std::get_if<MetaTexture>(&operation.GetMeta());
|
||||
@@ -748,36 +740,47 @@ private:
|
||||
expr += Visit(operation[i]);
|
||||
|
||||
const std::size_t next = i + 1;
|
||||
if (next < count || has_array || has_shadow)
|
||||
if (next < count)
|
||||
expr += ", ";
|
||||
}
|
||||
if (has_array) {
|
||||
expr += "float(ftoi(" + Visit(meta->array) + "))";
|
||||
expr += ", float(ftoi(" + Visit(meta->array) + "))";
|
||||
}
|
||||
if (has_shadow) {
|
||||
if (has_array)
|
||||
expr += ", ";
|
||||
expr += Visit(meta->depth_compare);
|
||||
expr += ", " + Visit(meta->depth_compare);
|
||||
}
|
||||
expr += ')';
|
||||
|
||||
for (const Node extra : meta->extras) {
|
||||
for (const auto& extra_pair : extras) {
|
||||
const auto [type, operand] = extra_pair;
|
||||
if (operand == nullptr) {
|
||||
continue;
|
||||
}
|
||||
expr += ", ";
|
||||
if (is_extra_int) {
|
||||
if (const auto immediate = std::get_if<ImmediateNode>(extra)) {
|
||||
|
||||
switch (type) {
|
||||
case Type::Int:
|
||||
if (const auto immediate = std::get_if<ImmediateNode>(operand)) {
|
||||
// Inline the string as an immediate integer in GLSL (some extra arguments are
|
||||
// required to be constant)
|
||||
expr += std::to_string(static_cast<s32>(immediate->GetValue()));
|
||||
} else {
|
||||
expr += "ftoi(" + Visit(extra) + ')';
|
||||
expr += "ftoi(" + Visit(operand) + ')';
|
||||
}
|
||||
} else {
|
||||
expr += Visit(extra);
|
||||
break;
|
||||
case Type::Float:
|
||||
expr += Visit(operand);
|
||||
break;
|
||||
default: {
|
||||
const auto type_int = static_cast<u32>(type);
|
||||
UNIMPLEMENTED_MSG("Unimplemented extra type={}", type_int);
|
||||
expr += '0';
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
expr += ')';
|
||||
return expr;
|
||||
return expr + ')';
|
||||
}
|
||||
|
||||
std::string Assign(Operation operation) {
|
||||
@@ -1156,7 +1159,7 @@ private:
|
||||
const auto meta = std::get_if<MetaTexture>(&operation.GetMeta());
|
||||
ASSERT(meta);
|
||||
|
||||
std::string expr = GenerateTexture(operation, "texture");
|
||||
std::string expr = GenerateTexture(operation, "texture", {{Type::Float, meta->bias}});
|
||||
if (meta->sampler.IsShadow()) {
|
||||
expr = "vec4(" + expr + ')';
|
||||
}
|
||||
@@ -1167,7 +1170,7 @@ private:
|
||||
const auto meta = std::get_if<MetaTexture>(&operation.GetMeta());
|
||||
ASSERT(meta);
|
||||
|
||||
std::string expr = GenerateTexture(operation, "textureLod");
|
||||
std::string expr = GenerateTexture(operation, "textureLod", {{Type::Float, meta->lod}});
|
||||
if (meta->sampler.IsShadow()) {
|
||||
expr = "vec4(" + expr + ')';
|
||||
}
|
||||
@@ -1178,7 +1181,8 @@ private:
|
||||
const auto meta = std::get_if<MetaTexture>(&operation.GetMeta());
|
||||
ASSERT(meta);
|
||||
|
||||
return GenerateTexture(operation, "textureGather", !meta->sampler.IsShadow()) +
|
||||
const auto type = meta->sampler.IsShadow() ? Type::Float : Type::Int;
|
||||
return GenerateTexture(operation, "textureGather", {{type, meta->component}}) +
|
||||
GetSwizzle(meta->element);
|
||||
}
|
||||
|
||||
@@ -1207,8 +1211,8 @@ private:
|
||||
ASSERT(meta);
|
||||
|
||||
if (meta->element < 2) {
|
||||
return "itof(int((" + GenerateTexture(operation, "textureQueryLod") + " * vec2(256))" +
|
||||
GetSwizzle(meta->element) + "))";
|
||||
return "itof(int((" + GenerateTexture(operation, "textureQueryLod", {}) +
|
||||
" * vec2(256))" + GetSwizzle(meta->element) + "))";
|
||||
}
|
||||
return "0";
|
||||
}
|
||||
@@ -1234,9 +1238,9 @@ private:
|
||||
else if (next < count)
|
||||
expr += ", ";
|
||||
}
|
||||
for (std::size_t i = 0; i < meta->extras.size(); ++i) {
|
||||
if (meta->lod) {
|
||||
expr += ", ";
|
||||
expr += CastOperand(Visit(meta->extras.at(i)), Type::Int);
|
||||
expr += CastOperand(Visit(meta->lod), Type::Int);
|
||||
}
|
||||
expr += ')';
|
||||
|
||||
@@ -1584,4 +1588,4 @@ ProgramResult Decompile(const ShaderIR& ir, Maxwell::ShaderStage stage, const st
|
||||
return {decompiler.GetResult(), decompiler.GetShaderEntries()};
|
||||
}
|
||||
|
||||
} // namespace OpenGL::GLShader
|
||||
} // namespace OpenGL::GLShader
|
||||
|
||||
@@ -124,7 +124,7 @@ layout (location = 5) out vec4 FragColor5;
|
||||
layout (location = 6) out vec4 FragColor6;
|
||||
layout (location = 7) out vec4 FragColor7;
|
||||
|
||||
layout (location = 0) in vec4 position;
|
||||
layout (location = 0) in noperspective vec4 position;
|
||||
|
||||
layout (std140, binding = EMULATION_UBO_BINDING) uniform fs_config {
|
||||
vec4 viewport_flip;
|
||||
@@ -172,4 +172,4 @@ void main() {
|
||||
return {out, program.second};
|
||||
}
|
||||
|
||||
} // namespace OpenGL::GLShader
|
||||
} // namespace OpenGL::GLShader
|
||||
|
||||
252
src/video_core/renderer_vulkan/vk_memory_manager.cpp
Normal file
252
src/video_core/renderer_vulkan/vk_memory_manager.cpp
Normal file
@@ -0,0 +1,252 @@
|
||||
// Copyright 2018 yuzu Emulator Project
|
||||
// Licensed under GPLv2 or any later version
|
||||
// Refer to the license.txt file included.
|
||||
|
||||
#include <algorithm>
|
||||
#include <optional>
|
||||
#include <tuple>
|
||||
#include <vector>
|
||||
#include "common/alignment.h"
|
||||
#include "common/assert.h"
|
||||
#include "common/common_types.h"
|
||||
#include "common/logging/log.h"
|
||||
#include "video_core/renderer_vulkan/declarations.h"
|
||||
#include "video_core/renderer_vulkan/vk_device.h"
|
||||
#include "video_core/renderer_vulkan/vk_memory_manager.h"
|
||||
|
||||
namespace Vulkan {
|
||||
|
||||
// TODO(Rodrigo): Fine tune this number
|
||||
constexpr u64 ALLOC_CHUNK_SIZE = 64 * 1024 * 1024;
|
||||
|
||||
class VKMemoryAllocation final {
|
||||
public:
|
||||
explicit VKMemoryAllocation(const VKDevice& device, vk::DeviceMemory memory,
|
||||
vk::MemoryPropertyFlags properties, u64 alloc_size, u32 type)
|
||||
: device{device}, memory{memory}, properties{properties}, alloc_size{alloc_size},
|
||||
shifted_type{ShiftType(type)}, is_mappable{properties &
|
||||
vk::MemoryPropertyFlagBits::eHostVisible} {
|
||||
if (is_mappable) {
|
||||
const auto dev = device.GetLogical();
|
||||
const auto& dld = device.GetDispatchLoader();
|
||||
base_address = static_cast<u8*>(dev.mapMemory(memory, 0, alloc_size, {}, dld));
|
||||
}
|
||||
}
|
||||
|
||||
~VKMemoryAllocation() {
|
||||
const auto dev = device.GetLogical();
|
||||
const auto& dld = device.GetDispatchLoader();
|
||||
if (is_mappable)
|
||||
dev.unmapMemory(memory, dld);
|
||||
dev.free(memory, nullptr, dld);
|
||||
}
|
||||
|
||||
VKMemoryCommit Commit(vk::DeviceSize commit_size, vk::DeviceSize alignment) {
|
||||
auto found = TryFindFreeSection(free_iterator, alloc_size, static_cast<u64>(commit_size),
|
||||
static_cast<u64>(alignment));
|
||||
if (!found) {
|
||||
found = TryFindFreeSection(0, free_iterator, static_cast<u64>(commit_size),
|
||||
static_cast<u64>(alignment));
|
||||
if (!found) {
|
||||
// Signal out of memory, it'll try to do more allocations.
|
||||
return nullptr;
|
||||
}
|
||||
}
|
||||
u8* address = is_mappable ? base_address + *found : nullptr;
|
||||
auto commit = std::make_unique<VKMemoryCommitImpl>(this, memory, address, *found,
|
||||
*found + commit_size);
|
||||
commits.push_back(commit.get());
|
||||
|
||||
// Last commit's address is highly probable to be free.
|
||||
free_iterator = *found + commit_size;
|
||||
|
||||
return commit;
|
||||
}
|
||||
|
||||
void Free(const VKMemoryCommitImpl* commit) {
|
||||
ASSERT(commit);
|
||||
const auto it =
|
||||
std::find_if(commits.begin(), commits.end(),
|
||||
[&](const auto& stored_commit) { return stored_commit == commit; });
|
||||
if (it == commits.end()) {
|
||||
LOG_CRITICAL(Render_Vulkan, "Freeing unallocated commit!");
|
||||
UNREACHABLE();
|
||||
return;
|
||||
}
|
||||
commits.erase(it);
|
||||
}
|
||||
|
||||
/// Returns whether this allocation is compatible with the arguments.
|
||||
bool IsCompatible(vk::MemoryPropertyFlags wanted_properties, u32 type_mask) const {
|
||||
return (wanted_properties & properties) != vk::MemoryPropertyFlagBits(0) &&
|
||||
(type_mask & shifted_type) != 0;
|
||||
}
|
||||
|
||||
private:
|
||||
static constexpr u32 ShiftType(u32 type) {
|
||||
return 1U << type;
|
||||
}
|
||||
|
||||
/// A memory allocator, it may return a free region between "start" and "end" with the solicited
|
||||
/// requeriments.
|
||||
std::optional<u64> TryFindFreeSection(u64 start, u64 end, u64 size, u64 alignment) const {
|
||||
u64 iterator = start;
|
||||
while (iterator + size < end) {
|
||||
const u64 try_left = Common::AlignUp(iterator, alignment);
|
||||
const u64 try_right = try_left + size;
|
||||
|
||||
bool overlap = false;
|
||||
for (const auto& commit : commits) {
|
||||
const auto [commit_left, commit_right] = commit->interval;
|
||||
if (try_left < commit_right && commit_left < try_right) {
|
||||
// There's an overlap, continue the search where the overlapping commit ends.
|
||||
iterator = commit_right;
|
||||
overlap = true;
|
||||
break;
|
||||
}
|
||||
}
|
||||
if (!overlap) {
|
||||
// A free address has been found.
|
||||
return try_left;
|
||||
}
|
||||
}
|
||||
// No free regions where found, return an empty optional.
|
||||
return std::nullopt;
|
||||
}
|
||||
|
||||
const VKDevice& device; ///< Vulkan device.
|
||||
const vk::DeviceMemory memory; ///< Vulkan memory allocation handler.
|
||||
const vk::MemoryPropertyFlags properties; ///< Vulkan properties.
|
||||
const u64 alloc_size; ///< Size of this allocation.
|
||||
const u32 shifted_type; ///< Stored Vulkan type of this allocation, shifted.
|
||||
const bool is_mappable; ///< Whether the allocation is mappable.
|
||||
|
||||
/// Base address of the mapped pointer.
|
||||
u8* base_address{};
|
||||
|
||||
/// Hints where the next free region is likely going to be.
|
||||
u64 free_iterator{};
|
||||
|
||||
/// Stores all commits done from this allocation.
|
||||
std::vector<const VKMemoryCommitImpl*> commits;
|
||||
};
|
||||
|
||||
VKMemoryManager::VKMemoryManager(const VKDevice& device)
|
||||
: device{device}, props{device.GetPhysical().getMemoryProperties(device.GetDispatchLoader())},
|
||||
is_memory_unified{GetMemoryUnified(props)} {}
|
||||
|
||||
VKMemoryManager::~VKMemoryManager() = default;
|
||||
|
||||
VKMemoryCommit VKMemoryManager::Commit(const vk::MemoryRequirements& reqs, bool host_visible) {
|
||||
ASSERT(reqs.size < ALLOC_CHUNK_SIZE);
|
||||
|
||||
// When a host visible commit is asked, search for host visible and coherent, otherwise search
|
||||
// for a fast device local type.
|
||||
const vk::MemoryPropertyFlags wanted_properties =
|
||||
host_visible
|
||||
? vk::MemoryPropertyFlagBits::eHostVisible | vk::MemoryPropertyFlagBits::eHostCoherent
|
||||
: vk::MemoryPropertyFlagBits::eDeviceLocal;
|
||||
|
||||
const auto TryCommit = [&]() -> VKMemoryCommit {
|
||||
for (auto& alloc : allocs) {
|
||||
if (!alloc->IsCompatible(wanted_properties, reqs.memoryTypeBits))
|
||||
continue;
|
||||
|
||||
if (auto commit = alloc->Commit(reqs.size, reqs.alignment); commit) {
|
||||
return commit;
|
||||
}
|
||||
}
|
||||
return {};
|
||||
};
|
||||
|
||||
if (auto commit = TryCommit(); commit) {
|
||||
return commit;
|
||||
}
|
||||
|
||||
// Commit has failed, allocate more memory.
|
||||
if (!AllocMemory(wanted_properties, reqs.memoryTypeBits, ALLOC_CHUNK_SIZE)) {
|
||||
// TODO(Rodrigo): Try to use host memory.
|
||||
LOG_CRITICAL(Render_Vulkan, "Ran out of memory!");
|
||||
UNREACHABLE();
|
||||
}
|
||||
|
||||
// Commit again, this time it won't fail since there's a fresh allocation above. If it does,
|
||||
// there's a bug.
|
||||
auto commit = TryCommit();
|
||||
ASSERT(commit);
|
||||
return commit;
|
||||
}
|
||||
|
||||
VKMemoryCommit VKMemoryManager::Commit(vk::Buffer buffer, bool host_visible) {
|
||||
const auto dev = device.GetLogical();
|
||||
const auto& dld = device.GetDispatchLoader();
|
||||
const auto requeriments = dev.getBufferMemoryRequirements(buffer, dld);
|
||||
auto commit = Commit(requeriments, host_visible);
|
||||
dev.bindBufferMemory(buffer, commit->GetMemory(), commit->GetOffset(), dld);
|
||||
return commit;
|
||||
}
|
||||
|
||||
VKMemoryCommit VKMemoryManager::Commit(vk::Image image, bool host_visible) {
|
||||
const auto dev = device.GetLogical();
|
||||
const auto& dld = device.GetDispatchLoader();
|
||||
const auto requeriments = dev.getImageMemoryRequirements(image, dld);
|
||||
auto commit = Commit(requeriments, host_visible);
|
||||
dev.bindImageMemory(image, commit->GetMemory(), commit->GetOffset(), dld);
|
||||
return commit;
|
||||
}
|
||||
|
||||
bool VKMemoryManager::AllocMemory(vk::MemoryPropertyFlags wanted_properties, u32 type_mask,
|
||||
u64 size) {
|
||||
const u32 type = [&]() {
|
||||
for (u32 type_index = 0; type_index < props.memoryTypeCount; ++type_index) {
|
||||
const auto flags = props.memoryTypes[type_index].propertyFlags;
|
||||
if ((type_mask & (1U << type_index)) && (flags & wanted_properties)) {
|
||||
// The type matches in type and in the wanted properties.
|
||||
return type_index;
|
||||
}
|
||||
}
|
||||
LOG_CRITICAL(Render_Vulkan, "Couldn't find a compatible memory type!");
|
||||
UNREACHABLE();
|
||||
return 0u;
|
||||
}();
|
||||
|
||||
const auto dev = device.GetLogical();
|
||||
const auto& dld = device.GetDispatchLoader();
|
||||
|
||||
// Try to allocate found type.
|
||||
const vk::MemoryAllocateInfo memory_ai(size, type);
|
||||
vk::DeviceMemory memory;
|
||||
if (const vk::Result res = dev.allocateMemory(&memory_ai, nullptr, &memory, dld);
|
||||
res != vk::Result::eSuccess) {
|
||||
LOG_CRITICAL(Render_Vulkan, "Device allocation failed with code {}!", vk::to_string(res));
|
||||
return false;
|
||||
}
|
||||
allocs.push_back(
|
||||
std::make_unique<VKMemoryAllocation>(device, memory, wanted_properties, size, type));
|
||||
return true;
|
||||
}
|
||||
|
||||
/*static*/ bool VKMemoryManager::GetMemoryUnified(const vk::PhysicalDeviceMemoryProperties& props) {
|
||||
for (u32 heap_index = 0; heap_index < props.memoryHeapCount; ++heap_index) {
|
||||
if (!(props.memoryHeaps[heap_index].flags & vk::MemoryHeapFlagBits::eDeviceLocal)) {
|
||||
// Memory is considered unified when heaps are device local only.
|
||||
return false;
|
||||
}
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
VKMemoryCommitImpl::VKMemoryCommitImpl(VKMemoryAllocation* allocation, vk::DeviceMemory memory,
|
||||
u8* data, u64 begin, u64 end)
|
||||
: allocation{allocation}, memory{memory}, data{data}, interval(std::make_pair(begin, end)) {}
|
||||
|
||||
VKMemoryCommitImpl::~VKMemoryCommitImpl() {
|
||||
allocation->Free(this);
|
||||
}
|
||||
|
||||
u8* VKMemoryCommitImpl::GetData() const {
|
||||
ASSERT_MSG(data != nullptr, "Trying to access an unmapped commit.");
|
||||
return data;
|
||||
}
|
||||
|
||||
} // namespace Vulkan
|
||||
87
src/video_core/renderer_vulkan/vk_memory_manager.h
Normal file
87
src/video_core/renderer_vulkan/vk_memory_manager.h
Normal file
@@ -0,0 +1,87 @@
|
||||
// Copyright 2019 yuzu Emulator Project
|
||||
// Licensed under GPLv2 or any later version
|
||||
// Refer to the license.txt file included.
|
||||
|
||||
#pragma once
|
||||
|
||||
#include <memory>
|
||||
#include <utility>
|
||||
#include <vector>
|
||||
#include "common/common_types.h"
|
||||
#include "video_core/renderer_vulkan/declarations.h"
|
||||
|
||||
namespace Vulkan {
|
||||
|
||||
class VKDevice;
|
||||
class VKMemoryAllocation;
|
||||
class VKMemoryCommitImpl;
|
||||
|
||||
using VKMemoryCommit = std::unique_ptr<VKMemoryCommitImpl>;
|
||||
|
||||
class VKMemoryManager final {
|
||||
public:
|
||||
explicit VKMemoryManager(const VKDevice& device);
|
||||
~VKMemoryManager();
|
||||
|
||||
/**
|
||||
* Commits a memory with the specified requeriments.
|
||||
* @param reqs Requeriments returned from a Vulkan call.
|
||||
* @param host_visible Signals the allocator that it *must* use host visible and coherent
|
||||
* memory. When passing false, it will try to allocate device local memory.
|
||||
* @returns A memory commit.
|
||||
*/
|
||||
VKMemoryCommit Commit(const vk::MemoryRequirements& reqs, bool host_visible);
|
||||
|
||||
/// Commits memory required by the buffer and binds it.
|
||||
VKMemoryCommit Commit(vk::Buffer buffer, bool host_visible);
|
||||
|
||||
/// Commits memory required by the image and binds it.
|
||||
VKMemoryCommit Commit(vk::Image image, bool host_visible);
|
||||
|
||||
/// Returns true if the memory allocations are done always in host visible and coherent memory.
|
||||
bool IsMemoryUnified() const {
|
||||
return is_memory_unified;
|
||||
}
|
||||
|
||||
private:
|
||||
/// Allocates a chunk of memory.
|
||||
bool AllocMemory(vk::MemoryPropertyFlags wanted_properties, u32 type_mask, u64 size);
|
||||
|
||||
/// Returns true if the device uses an unified memory model.
|
||||
static bool GetMemoryUnified(const vk::PhysicalDeviceMemoryProperties& props);
|
||||
|
||||
const VKDevice& device; ///< Device handler.
|
||||
const vk::PhysicalDeviceMemoryProperties props; ///< Physical device properties.
|
||||
const bool is_memory_unified; ///< True if memory model is unified.
|
||||
std::vector<std::unique_ptr<VKMemoryAllocation>> allocs; ///< Current allocations.
|
||||
};
|
||||
|
||||
class VKMemoryCommitImpl final {
|
||||
friend VKMemoryAllocation;
|
||||
|
||||
public:
|
||||
explicit VKMemoryCommitImpl(VKMemoryAllocation* allocation, vk::DeviceMemory memory, u8* data,
|
||||
u64 begin, u64 end);
|
||||
~VKMemoryCommitImpl();
|
||||
|
||||
/// Returns the writeable memory map. The commit has to be mappable.
|
||||
u8* GetData() const;
|
||||
|
||||
/// Returns the Vulkan memory handler.
|
||||
vk::DeviceMemory GetMemory() const {
|
||||
return memory;
|
||||
}
|
||||
|
||||
/// Returns the start position of the commit relative to the allocation.
|
||||
vk::DeviceSize GetOffset() const {
|
||||
return static_cast<vk::DeviceSize>(interval.first);
|
||||
}
|
||||
|
||||
private:
|
||||
std::pair<u64, u64> interval{}; ///< Interval where the commit exists.
|
||||
vk::DeviceMemory memory; ///< Vulkan device memory handler.
|
||||
VKMemoryAllocation* allocation{}; ///< Pointer to the large memory allocation.
|
||||
u8* data{}; ///< Pointer to the host mapped memory, it has the commit offset included.
|
||||
};
|
||||
|
||||
} // namespace Vulkan
|
||||
60
src/video_core/renderer_vulkan/vk_scheduler.cpp
Normal file
60
src/video_core/renderer_vulkan/vk_scheduler.cpp
Normal file
@@ -0,0 +1,60 @@
|
||||
// Copyright 2019 yuzu Emulator Project
|
||||
// Licensed under GPLv2 or any later version
|
||||
// Refer to the license.txt file included.
|
||||
|
||||
#include "common/assert.h"
|
||||
#include "common/logging/log.h"
|
||||
#include "video_core/renderer_vulkan/declarations.h"
|
||||
#include "video_core/renderer_vulkan/vk_device.h"
|
||||
#include "video_core/renderer_vulkan/vk_resource_manager.h"
|
||||
#include "video_core/renderer_vulkan/vk_scheduler.h"
|
||||
|
||||
namespace Vulkan {
|
||||
|
||||
VKScheduler::VKScheduler(const VKDevice& device, VKResourceManager& resource_manager)
|
||||
: device{device}, resource_manager{resource_manager} {
|
||||
next_fence = &resource_manager.CommitFence();
|
||||
AllocateNewContext();
|
||||
}
|
||||
|
||||
VKScheduler::~VKScheduler() = default;
|
||||
|
||||
VKExecutionContext VKScheduler::GetExecutionContext() const {
|
||||
return VKExecutionContext(current_fence, current_cmdbuf);
|
||||
}
|
||||
|
||||
VKExecutionContext VKScheduler::Flush(vk::Semaphore semaphore) {
|
||||
SubmitExecution(semaphore);
|
||||
current_fence->Release();
|
||||
AllocateNewContext();
|
||||
return GetExecutionContext();
|
||||
}
|
||||
|
||||
VKExecutionContext VKScheduler::Finish(vk::Semaphore semaphore) {
|
||||
SubmitExecution(semaphore);
|
||||
current_fence->Wait();
|
||||
current_fence->Release();
|
||||
AllocateNewContext();
|
||||
return GetExecutionContext();
|
||||
}
|
||||
|
||||
void VKScheduler::SubmitExecution(vk::Semaphore semaphore) {
|
||||
const auto& dld = device.GetDispatchLoader();
|
||||
current_cmdbuf.end(dld);
|
||||
|
||||
const auto queue = device.GetGraphicsQueue();
|
||||
const vk::SubmitInfo submit_info(0, nullptr, nullptr, 1, ¤t_cmdbuf, semaphore ? 1u : 0u,
|
||||
&semaphore);
|
||||
queue.submit({submit_info}, *current_fence, dld);
|
||||
}
|
||||
|
||||
void VKScheduler::AllocateNewContext() {
|
||||
current_fence = next_fence;
|
||||
current_cmdbuf = resource_manager.CommitCommandBuffer(*current_fence);
|
||||
next_fence = &resource_manager.CommitFence();
|
||||
|
||||
const auto& dld = device.GetDispatchLoader();
|
||||
current_cmdbuf.begin({vk::CommandBufferUsageFlagBits::eOneTimeSubmit}, dld);
|
||||
}
|
||||
|
||||
} // namespace Vulkan
|
||||
69
src/video_core/renderer_vulkan/vk_scheduler.h
Normal file
69
src/video_core/renderer_vulkan/vk_scheduler.h
Normal file
@@ -0,0 +1,69 @@
|
||||
// Copyright 2019 yuzu Emulator Project
|
||||
// Licensed under GPLv2 or any later version
|
||||
// Refer to the license.txt file included.
|
||||
|
||||
#pragma once
|
||||
|
||||
#include "common/common_types.h"
|
||||
#include "video_core/renderer_vulkan/declarations.h"
|
||||
|
||||
namespace Vulkan {
|
||||
|
||||
class VKDevice;
|
||||
class VKExecutionContext;
|
||||
class VKFence;
|
||||
class VKResourceManager;
|
||||
|
||||
/// The scheduler abstracts command buffer and fence management with an interface that's able to do
|
||||
/// OpenGL-like operations on Vulkan command buffers.
|
||||
class VKScheduler {
|
||||
public:
|
||||
explicit VKScheduler(const VKDevice& device, VKResourceManager& resource_manager);
|
||||
~VKScheduler();
|
||||
|
||||
/// Gets the current execution context.
|
||||
[[nodiscard]] VKExecutionContext GetExecutionContext() const;
|
||||
|
||||
/// Sends the current execution context to the GPU. It invalidates the current execution context
|
||||
/// and returns a new one.
|
||||
VKExecutionContext Flush(vk::Semaphore semaphore = nullptr);
|
||||
|
||||
/// Sends the current execution context to the GPU and waits for it to complete. It invalidates
|
||||
/// the current execution context and returns a new one.
|
||||
VKExecutionContext Finish(vk::Semaphore semaphore = nullptr);
|
||||
|
||||
private:
|
||||
void SubmitExecution(vk::Semaphore semaphore);
|
||||
|
||||
void AllocateNewContext();
|
||||
|
||||
const VKDevice& device;
|
||||
VKResourceManager& resource_manager;
|
||||
vk::CommandBuffer current_cmdbuf;
|
||||
VKFence* current_fence = nullptr;
|
||||
VKFence* next_fence = nullptr;
|
||||
};
|
||||
|
||||
class VKExecutionContext {
|
||||
friend class VKScheduler;
|
||||
|
||||
public:
|
||||
VKExecutionContext() = default;
|
||||
|
||||
VKFence& GetFence() const {
|
||||
return *fence;
|
||||
}
|
||||
|
||||
vk::CommandBuffer GetCommandBuffer() const {
|
||||
return cmdbuf;
|
||||
}
|
||||
|
||||
private:
|
||||
explicit VKExecutionContext(VKFence* fence, vk::CommandBuffer cmdbuf)
|
||||
: fence{fence}, cmdbuf{cmdbuf} {}
|
||||
|
||||
VKFence* fence{};
|
||||
vk::CommandBuffer cmdbuf;
|
||||
};
|
||||
|
||||
} // namespace Vulkan
|
||||
@@ -165,6 +165,7 @@ u32 ShaderIR::DecodeInstr(NodeBlock& bb, u32 pc) {
|
||||
{OpCode::Type::Hfma2, &ShaderIR::DecodeHfma2},
|
||||
{OpCode::Type::Conversion, &ShaderIR::DecodeConversion},
|
||||
{OpCode::Type::Memory, &ShaderIR::DecodeMemory},
|
||||
{OpCode::Type::Texture, &ShaderIR::DecodeTexture},
|
||||
{OpCode::Type::FloatSetPredicate, &ShaderIR::DecodeFloatSetPredicate},
|
||||
{OpCode::Type::IntegerSetPredicate, &ShaderIR::DecodeIntegerSetPredicate},
|
||||
{OpCode::Type::HalfSetPredicate, &ShaderIR::DecodeHalfSetPredicate},
|
||||
|
||||
@@ -17,24 +17,6 @@ using Tegra::Shader::Attribute;
|
||||
using Tegra::Shader::Instruction;
|
||||
using Tegra::Shader::OpCode;
|
||||
using Tegra::Shader::Register;
|
||||
using Tegra::Shader::TextureMiscMode;
|
||||
using Tegra::Shader::TextureProcessMode;
|
||||
using Tegra::Shader::TextureType;
|
||||
|
||||
static std::size_t GetCoordCount(TextureType texture_type) {
|
||||
switch (texture_type) {
|
||||
case TextureType::Texture1D:
|
||||
return 1;
|
||||
case TextureType::Texture2D:
|
||||
return 2;
|
||||
case TextureType::Texture3D:
|
||||
case TextureType::TextureCube:
|
||||
return 3;
|
||||
default:
|
||||
UNIMPLEMENTED_MSG("Unhandled texture type: {}", static_cast<u32>(texture_type));
|
||||
return 0;
|
||||
}
|
||||
}
|
||||
|
||||
u32 ShaderIR::DecodeMemory(NodeBlock& bb, u32 pc) {
|
||||
const Instruction instr = {program_code[pc]};
|
||||
@@ -48,7 +30,7 @@ u32 ShaderIR::DecodeMemory(NodeBlock& bb, u32 pc) {
|
||||
UNIMPLEMENTED_IF_MSG((instr.attribute.fmt20.immediate.Value() % sizeof(u32)) != 0,
|
||||
"Unaligned attribute loads are not supported");
|
||||
|
||||
Tegra::Shader::IpaMode input_mode{Tegra::Shader::IpaInterpMode::Perspective,
|
||||
Tegra::Shader::IpaMode input_mode{Tegra::Shader::IpaInterpMode::Pass,
|
||||
Tegra::Shader::IpaSampleMode::Default};
|
||||
|
||||
u64 next_element = instr.attribute.fmt20.element;
|
||||
@@ -247,194 +229,6 @@ u32 ShaderIR::DecodeMemory(NodeBlock& bb, u32 pc) {
|
||||
}
|
||||
break;
|
||||
}
|
||||
case OpCode::Id::TEX: {
|
||||
UNIMPLEMENTED_IF_MSG(instr.tex.UsesMiscMode(TextureMiscMode::AOFFI),
|
||||
"AOFFI is not implemented");
|
||||
|
||||
if (instr.tex.UsesMiscMode(TextureMiscMode::NODEP)) {
|
||||
LOG_WARNING(HW_GPU, "TEX.NODEP implementation is incomplete");
|
||||
}
|
||||
|
||||
const TextureType texture_type{instr.tex.texture_type};
|
||||
const bool is_array = instr.tex.array != 0;
|
||||
const bool depth_compare = instr.tex.UsesMiscMode(TextureMiscMode::DC);
|
||||
const auto process_mode = instr.tex.GetTextureProcessMode();
|
||||
WriteTexInstructionFloat(
|
||||
bb, instr, GetTexCode(instr, texture_type, process_mode, depth_compare, is_array));
|
||||
break;
|
||||
}
|
||||
case OpCode::Id::TEXS: {
|
||||
const TextureType texture_type{instr.texs.GetTextureType()};
|
||||
const bool is_array{instr.texs.IsArrayTexture()};
|
||||
const bool depth_compare = instr.texs.UsesMiscMode(TextureMiscMode::DC);
|
||||
const auto process_mode = instr.texs.GetTextureProcessMode();
|
||||
|
||||
if (instr.texs.UsesMiscMode(TextureMiscMode::NODEP)) {
|
||||
LOG_WARNING(HW_GPU, "TEXS.NODEP implementation is incomplete");
|
||||
}
|
||||
|
||||
const Node4 components =
|
||||
GetTexsCode(instr, texture_type, process_mode, depth_compare, is_array);
|
||||
|
||||
if (instr.texs.fp32_flag) {
|
||||
WriteTexsInstructionFloat(bb, instr, components);
|
||||
} else {
|
||||
WriteTexsInstructionHalfFloat(bb, instr, components);
|
||||
}
|
||||
break;
|
||||
}
|
||||
case OpCode::Id::TLD4: {
|
||||
ASSERT(instr.tld4.array == 0);
|
||||
UNIMPLEMENTED_IF_MSG(instr.tld4.UsesMiscMode(TextureMiscMode::AOFFI),
|
||||
"AOFFI is not implemented");
|
||||
UNIMPLEMENTED_IF_MSG(instr.tld4.UsesMiscMode(TextureMiscMode::NDV),
|
||||
"NDV is not implemented");
|
||||
UNIMPLEMENTED_IF_MSG(instr.tld4.UsesMiscMode(TextureMiscMode::PTP),
|
||||
"PTP is not implemented");
|
||||
|
||||
if (instr.tld4.UsesMiscMode(TextureMiscMode::NODEP)) {
|
||||
LOG_WARNING(HW_GPU, "TLD4.NODEP implementation is incomplete");
|
||||
}
|
||||
|
||||
const auto texture_type = instr.tld4.texture_type.Value();
|
||||
const bool depth_compare = instr.tld4.UsesMiscMode(TextureMiscMode::DC);
|
||||
const bool is_array = instr.tld4.array != 0;
|
||||
WriteTexInstructionFloat(bb, instr,
|
||||
GetTld4Code(instr, texture_type, depth_compare, is_array));
|
||||
break;
|
||||
}
|
||||
case OpCode::Id::TLD4S: {
|
||||
UNIMPLEMENTED_IF_MSG(instr.tld4s.UsesMiscMode(TextureMiscMode::AOFFI),
|
||||
"AOFFI is not implemented");
|
||||
if (instr.tld4s.UsesMiscMode(TextureMiscMode::NODEP)) {
|
||||
LOG_WARNING(HW_GPU, "TLD4S.NODEP implementation is incomplete");
|
||||
}
|
||||
|
||||
const bool depth_compare = instr.tld4s.UsesMiscMode(TextureMiscMode::DC);
|
||||
const Node op_a = GetRegister(instr.gpr8);
|
||||
const Node op_b = GetRegister(instr.gpr20);
|
||||
|
||||
// TODO(Subv): Figure out how the sampler type is encoded in the TLD4S instruction.
|
||||
std::vector<Node> coords;
|
||||
if (depth_compare) {
|
||||
// Note: TLD4S coordinate encoding works just like TEXS's
|
||||
const Node op_y = GetRegister(instr.gpr8.Value() + 1);
|
||||
coords.push_back(op_a);
|
||||
coords.push_back(op_y);
|
||||
coords.push_back(op_b);
|
||||
} else {
|
||||
coords.push_back(op_a);
|
||||
coords.push_back(op_b);
|
||||
}
|
||||
std::vector<Node> extras;
|
||||
extras.push_back(Immediate(static_cast<u32>(instr.tld4s.component)));
|
||||
|
||||
const auto& sampler =
|
||||
GetSampler(instr.sampler, TextureType::Texture2D, false, depth_compare);
|
||||
|
||||
Node4 values;
|
||||
for (u32 element = 0; element < values.size(); ++element) {
|
||||
auto coords_copy = coords;
|
||||
MetaTexture meta{sampler, {}, {}, extras, element};
|
||||
values[element] = Operation(OperationCode::TextureGather, meta, std::move(coords_copy));
|
||||
}
|
||||
|
||||
WriteTexsInstructionFloat(bb, instr, values);
|
||||
break;
|
||||
}
|
||||
case OpCode::Id::TXQ: {
|
||||
if (instr.txq.UsesMiscMode(TextureMiscMode::NODEP)) {
|
||||
LOG_WARNING(HW_GPU, "TXQ.NODEP implementation is incomplete");
|
||||
}
|
||||
|
||||
// TODO: The new commits on the texture refactor, change the way samplers work.
|
||||
// Sadly, not all texture instructions specify the type of texture their sampler
|
||||
// uses. This must be fixed at a later instance.
|
||||
const auto& sampler =
|
||||
GetSampler(instr.sampler, Tegra::Shader::TextureType::Texture2D, false, false);
|
||||
|
||||
u32 indexer = 0;
|
||||
switch (instr.txq.query_type) {
|
||||
case Tegra::Shader::TextureQueryType::Dimension: {
|
||||
for (u32 element = 0; element < 4; ++element) {
|
||||
if (!instr.txq.IsComponentEnabled(element)) {
|
||||
continue;
|
||||
}
|
||||
MetaTexture meta{sampler, {}, {}, {}, element};
|
||||
const Node value =
|
||||
Operation(OperationCode::TextureQueryDimensions, meta, GetRegister(instr.gpr8));
|
||||
SetTemporal(bb, indexer++, value);
|
||||
}
|
||||
for (u32 i = 0; i < indexer; ++i) {
|
||||
SetRegister(bb, instr.gpr0.Value() + i, GetTemporal(i));
|
||||
}
|
||||
break;
|
||||
}
|
||||
default:
|
||||
UNIMPLEMENTED_MSG("Unhandled texture query type: {}",
|
||||
static_cast<u32>(instr.txq.query_type.Value()));
|
||||
}
|
||||
break;
|
||||
}
|
||||
case OpCode::Id::TMML: {
|
||||
UNIMPLEMENTED_IF_MSG(instr.tmml.UsesMiscMode(Tegra::Shader::TextureMiscMode::NDV),
|
||||
"NDV is not implemented");
|
||||
|
||||
if (instr.tmml.UsesMiscMode(TextureMiscMode::NODEP)) {
|
||||
LOG_WARNING(HW_GPU, "TMML.NODEP implementation is incomplete");
|
||||
}
|
||||
|
||||
auto texture_type = instr.tmml.texture_type.Value();
|
||||
const bool is_array = instr.tmml.array != 0;
|
||||
const auto& sampler = GetSampler(instr.sampler, texture_type, is_array, false);
|
||||
|
||||
std::vector<Node> coords;
|
||||
|
||||
// TODO: Add coordinates for different samplers once other texture types are implemented.
|
||||
switch (texture_type) {
|
||||
case TextureType::Texture1D:
|
||||
coords.push_back(GetRegister(instr.gpr8));
|
||||
break;
|
||||
case TextureType::Texture2D:
|
||||
coords.push_back(GetRegister(instr.gpr8.Value() + 0));
|
||||
coords.push_back(GetRegister(instr.gpr8.Value() + 1));
|
||||
break;
|
||||
default:
|
||||
UNIMPLEMENTED_MSG("Unhandled texture type {}", static_cast<u32>(texture_type));
|
||||
|
||||
// Fallback to interpreting as a 2D texture for now
|
||||
coords.push_back(GetRegister(instr.gpr8.Value() + 0));
|
||||
coords.push_back(GetRegister(instr.gpr8.Value() + 1));
|
||||
texture_type = TextureType::Texture2D;
|
||||
}
|
||||
|
||||
for (u32 element = 0; element < 2; ++element) {
|
||||
auto params = coords;
|
||||
MetaTexture meta{sampler, {}, {}, {}, element};
|
||||
const Node value = Operation(OperationCode::TextureQueryLod, meta, std::move(params));
|
||||
SetTemporal(bb, element, value);
|
||||
}
|
||||
for (u32 element = 0; element < 2; ++element) {
|
||||
SetRegister(bb, instr.gpr0.Value() + element, GetTemporal(element));
|
||||
}
|
||||
|
||||
break;
|
||||
}
|
||||
case OpCode::Id::TLDS: {
|
||||
const Tegra::Shader::TextureType texture_type{instr.tlds.GetTextureType()};
|
||||
const bool is_array{instr.tlds.IsArrayTexture()};
|
||||
|
||||
UNIMPLEMENTED_IF_MSG(instr.tlds.UsesMiscMode(TextureMiscMode::AOFFI),
|
||||
"AOFFI is not implemented");
|
||||
UNIMPLEMENTED_IF_MSG(instr.tlds.UsesMiscMode(TextureMiscMode::MZ), "MZ is not implemented");
|
||||
|
||||
if (instr.tlds.UsesMiscMode(TextureMiscMode::NODEP)) {
|
||||
LOG_WARNING(HW_GPU, "TLDS.NODEP implementation is incomplete");
|
||||
}
|
||||
|
||||
WriteTexsInstructionFloat(bb, instr, GetTldsCode(instr, texture_type, is_array));
|
||||
break;
|
||||
}
|
||||
default:
|
||||
UNIMPLEMENTED_MSG("Unhandled memory instruction: {}", opcode->get().GetName());
|
||||
}
|
||||
@@ -442,291 +236,4 @@ u32 ShaderIR::DecodeMemory(NodeBlock& bb, u32 pc) {
|
||||
return pc;
|
||||
}
|
||||
|
||||
const Sampler& ShaderIR::GetSampler(const Tegra::Shader::Sampler& sampler, TextureType type,
|
||||
bool is_array, bool is_shadow) {
|
||||
const auto offset = static_cast<std::size_t>(sampler.index.Value());
|
||||
|
||||
// If this sampler has already been used, return the existing mapping.
|
||||
const auto itr =
|
||||
std::find_if(used_samplers.begin(), used_samplers.end(),
|
||||
[&](const Sampler& entry) { return entry.GetOffset() == offset; });
|
||||
if (itr != used_samplers.end()) {
|
||||
ASSERT(itr->GetType() == type && itr->IsArray() == is_array &&
|
||||
itr->IsShadow() == is_shadow);
|
||||
return *itr;
|
||||
}
|
||||
|
||||
// Otherwise create a new mapping for this sampler
|
||||
const std::size_t next_index = used_samplers.size();
|
||||
const Sampler entry{offset, next_index, type, is_array, is_shadow};
|
||||
return *used_samplers.emplace(entry).first;
|
||||
}
|
||||
|
||||
void ShaderIR::WriteTexInstructionFloat(NodeBlock& bb, Instruction instr, const Node4& components) {
|
||||
u32 dest_elem = 0;
|
||||
for (u32 elem = 0; elem < 4; ++elem) {
|
||||
if (!instr.tex.IsComponentEnabled(elem)) {
|
||||
// Skip disabled components
|
||||
continue;
|
||||
}
|
||||
SetTemporal(bb, dest_elem++, components[elem]);
|
||||
}
|
||||
// After writing values in temporals, move them to the real registers
|
||||
for (u32 i = 0; i < dest_elem; ++i) {
|
||||
SetRegister(bb, instr.gpr0.Value() + i, GetTemporal(i));
|
||||
}
|
||||
}
|
||||
|
||||
void ShaderIR::WriteTexsInstructionFloat(NodeBlock& bb, Instruction instr,
|
||||
const Node4& components) {
|
||||
// TEXS has two destination registers and a swizzle. The first two elements in the swizzle
|
||||
// go into gpr0+0 and gpr0+1, and the rest goes into gpr28+0 and gpr28+1
|
||||
|
||||
u32 dest_elem = 0;
|
||||
for (u32 component = 0; component < 4; ++component) {
|
||||
if (!instr.texs.IsComponentEnabled(component))
|
||||
continue;
|
||||
SetTemporal(bb, dest_elem++, components[component]);
|
||||
}
|
||||
|
||||
for (u32 i = 0; i < dest_elem; ++i) {
|
||||
if (i < 2) {
|
||||
// Write the first two swizzle components to gpr0 and gpr0+1
|
||||
SetRegister(bb, instr.gpr0.Value() + i % 2, GetTemporal(i));
|
||||
} else {
|
||||
ASSERT(instr.texs.HasTwoDestinations());
|
||||
// Write the rest of the swizzle components to gpr28 and gpr28+1
|
||||
SetRegister(bb, instr.gpr28.Value() + i % 2, GetTemporal(i));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
void ShaderIR::WriteTexsInstructionHalfFloat(NodeBlock& bb, Instruction instr,
|
||||
const Node4& components) {
|
||||
// TEXS.F16 destionation registers are packed in two registers in pairs (just like any half
|
||||
// float instruction).
|
||||
|
||||
Node4 values;
|
||||
u32 dest_elem = 0;
|
||||
for (u32 component = 0; component < 4; ++component) {
|
||||
if (!instr.texs.IsComponentEnabled(component))
|
||||
continue;
|
||||
values[dest_elem++] = components[component];
|
||||
}
|
||||
if (dest_elem == 0)
|
||||
return;
|
||||
|
||||
std::generate(values.begin() + dest_elem, values.end(), [&]() { return Immediate(0); });
|
||||
|
||||
const Node first_value = Operation(OperationCode::HPack2, values[0], values[1]);
|
||||
if (dest_elem <= 2) {
|
||||
SetRegister(bb, instr.gpr0, first_value);
|
||||
return;
|
||||
}
|
||||
|
||||
SetTemporal(bb, 0, first_value);
|
||||
SetTemporal(bb, 1, Operation(OperationCode::HPack2, values[2], values[3]));
|
||||
|
||||
SetRegister(bb, instr.gpr0, GetTemporal(0));
|
||||
SetRegister(bb, instr.gpr28, GetTemporal(1));
|
||||
}
|
||||
|
||||
Node4 ShaderIR::GetTextureCode(Instruction instr, TextureType texture_type,
|
||||
TextureProcessMode process_mode, std::vector<Node> coords,
|
||||
Node array, Node depth_compare, u32 bias_offset) {
|
||||
const bool is_array = array;
|
||||
const bool is_shadow = depth_compare;
|
||||
|
||||
UNIMPLEMENTED_IF_MSG((texture_type == TextureType::Texture3D && (is_array || is_shadow)) ||
|
||||
(texture_type == TextureType::TextureCube && is_array && is_shadow),
|
||||
"This method is not supported.");
|
||||
|
||||
const auto& sampler = GetSampler(instr.sampler, texture_type, is_array, is_shadow);
|
||||
|
||||
const bool lod_needed = process_mode == TextureProcessMode::LZ ||
|
||||
process_mode == TextureProcessMode::LL ||
|
||||
process_mode == TextureProcessMode::LLA;
|
||||
|
||||
// LOD selection (either via bias or explicit textureLod) not supported in GL for
|
||||
// sampler2DArrayShadow and samplerCubeArrayShadow.
|
||||
const bool gl_lod_supported =
|
||||
!((texture_type == Tegra::Shader::TextureType::Texture2D && is_array && is_shadow) ||
|
||||
(texture_type == Tegra::Shader::TextureType::TextureCube && is_array && is_shadow));
|
||||
|
||||
const OperationCode read_method =
|
||||
lod_needed && gl_lod_supported ? OperationCode::TextureLod : OperationCode::Texture;
|
||||
|
||||
UNIMPLEMENTED_IF(process_mode != TextureProcessMode::None && !gl_lod_supported);
|
||||
|
||||
std::vector<Node> extras;
|
||||
if (process_mode != TextureProcessMode::None && gl_lod_supported) {
|
||||
if (process_mode == TextureProcessMode::LZ) {
|
||||
extras.push_back(Immediate(0.0f));
|
||||
} else {
|
||||
// If present, lod or bias are always stored in the register indexed by the gpr20
|
||||
// field with an offset depending on the usage of the other registers
|
||||
extras.push_back(GetRegister(instr.gpr20.Value() + bias_offset));
|
||||
}
|
||||
}
|
||||
|
||||
Node4 values;
|
||||
for (u32 element = 0; element < values.size(); ++element) {
|
||||
auto copy_coords = coords;
|
||||
MetaTexture meta{sampler, array, depth_compare, extras, element};
|
||||
values[element] = Operation(read_method, meta, std::move(copy_coords));
|
||||
}
|
||||
|
||||
return values;
|
||||
}
|
||||
|
||||
Node4 ShaderIR::GetTexCode(Instruction instr, TextureType texture_type,
|
||||
TextureProcessMode process_mode, bool depth_compare, bool is_array) {
|
||||
const bool lod_bias_enabled =
|
||||
(process_mode != TextureProcessMode::None && process_mode != TextureProcessMode::LZ);
|
||||
|
||||
const auto [coord_count, total_coord_count] = ValidateAndGetCoordinateElement(
|
||||
texture_type, depth_compare, is_array, lod_bias_enabled, 4, 5);
|
||||
// If enabled arrays index is always stored in the gpr8 field
|
||||
const u64 array_register = instr.gpr8.Value();
|
||||
// First coordinate index is the gpr8 or gpr8 + 1 when arrays are used
|
||||
const u64 coord_register = array_register + (is_array ? 1 : 0);
|
||||
|
||||
std::vector<Node> coords;
|
||||
for (std::size_t i = 0; i < coord_count; ++i) {
|
||||
coords.push_back(GetRegister(coord_register + i));
|
||||
}
|
||||
// 1D.DC in OpenGL the 2nd component is ignored.
|
||||
if (depth_compare && !is_array && texture_type == TextureType::Texture1D) {
|
||||
coords.push_back(Immediate(0.0f));
|
||||
}
|
||||
|
||||
const Node array = is_array ? GetRegister(array_register) : nullptr;
|
||||
|
||||
Node dc{};
|
||||
if (depth_compare) {
|
||||
// Depth is always stored in the register signaled by gpr20 or in the next register if lod
|
||||
// or bias are used
|
||||
const u64 depth_register = instr.gpr20.Value() + (lod_bias_enabled ? 1 : 0);
|
||||
dc = GetRegister(depth_register);
|
||||
}
|
||||
|
||||
return GetTextureCode(instr, texture_type, process_mode, coords, array, dc, 0);
|
||||
}
|
||||
|
||||
Node4 ShaderIR::GetTexsCode(Instruction instr, TextureType texture_type,
|
||||
TextureProcessMode process_mode, bool depth_compare, bool is_array) {
|
||||
const bool lod_bias_enabled =
|
||||
(process_mode != TextureProcessMode::None && process_mode != TextureProcessMode::LZ);
|
||||
|
||||
const auto [coord_count, total_coord_count] = ValidateAndGetCoordinateElement(
|
||||
texture_type, depth_compare, is_array, lod_bias_enabled, 4, 4);
|
||||
// If enabled arrays index is always stored in the gpr8 field
|
||||
const u64 array_register = instr.gpr8.Value();
|
||||
// First coordinate index is stored in gpr8 field or (gpr8 + 1) when arrays are used
|
||||
const u64 coord_register = array_register + (is_array ? 1 : 0);
|
||||
const u64 last_coord_register =
|
||||
(is_array || !(lod_bias_enabled || depth_compare) || (coord_count > 2))
|
||||
? static_cast<u64>(instr.gpr20.Value())
|
||||
: coord_register + 1;
|
||||
const u32 bias_offset = coord_count > 2 ? 1 : 0;
|
||||
|
||||
std::vector<Node> coords;
|
||||
for (std::size_t i = 0; i < coord_count; ++i) {
|
||||
const bool last = (i == (coord_count - 1)) && (coord_count > 1);
|
||||
coords.push_back(GetRegister(last ? last_coord_register : coord_register + i));
|
||||
}
|
||||
|
||||
const Node array = is_array ? GetRegister(array_register) : nullptr;
|
||||
|
||||
Node dc{};
|
||||
if (depth_compare) {
|
||||
// Depth is always stored in the register signaled by gpr20 or in the next register if lod
|
||||
// or bias are used
|
||||
const u64 depth_register = instr.gpr20.Value() + (lod_bias_enabled ? 1 : 0);
|
||||
dc = GetRegister(depth_register);
|
||||
}
|
||||
|
||||
return GetTextureCode(instr, texture_type, process_mode, coords, array, dc, bias_offset);
|
||||
}
|
||||
|
||||
Node4 ShaderIR::GetTld4Code(Instruction instr, TextureType texture_type, bool depth_compare,
|
||||
bool is_array) {
|
||||
const std::size_t coord_count = GetCoordCount(texture_type);
|
||||
const std::size_t total_coord_count = coord_count + (is_array ? 1 : 0);
|
||||
const std::size_t total_reg_count = total_coord_count + (depth_compare ? 1 : 0);
|
||||
|
||||
// If enabled arrays index is always stored in the gpr8 field
|
||||
const u64 array_register = instr.gpr8.Value();
|
||||
// First coordinate index is the gpr8 or gpr8 + 1 when arrays are used
|
||||
const u64 coord_register = array_register + (is_array ? 1 : 0);
|
||||
|
||||
std::vector<Node> coords;
|
||||
for (size_t i = 0; i < coord_count; ++i)
|
||||
coords.push_back(GetRegister(coord_register + i));
|
||||
|
||||
const auto& sampler = GetSampler(instr.sampler, texture_type, is_array, depth_compare);
|
||||
|
||||
Node4 values;
|
||||
for (u32 element = 0; element < values.size(); ++element) {
|
||||
auto coords_copy = coords;
|
||||
MetaTexture meta{sampler, GetRegister(array_register), {}, {}, element};
|
||||
values[element] = Operation(OperationCode::TextureGather, meta, std::move(coords_copy));
|
||||
}
|
||||
|
||||
return values;
|
||||
}
|
||||
|
||||
Node4 ShaderIR::GetTldsCode(Instruction instr, TextureType texture_type, bool is_array) {
|
||||
const std::size_t type_coord_count = GetCoordCount(texture_type);
|
||||
const bool lod_enabled = instr.tlds.GetTextureProcessMode() == TextureProcessMode::LL;
|
||||
|
||||
// If enabled arrays index is always stored in the gpr8 field
|
||||
const u64 array_register = instr.gpr8.Value();
|
||||
// if is array gpr20 is used
|
||||
const u64 coord_register = is_array ? instr.gpr20.Value() : instr.gpr8.Value();
|
||||
|
||||
const u64 last_coord_register =
|
||||
((type_coord_count > 2) || (type_coord_count == 2 && !lod_enabled)) && !is_array
|
||||
? static_cast<u64>(instr.gpr20.Value())
|
||||
: coord_register + 1;
|
||||
|
||||
std::vector<Node> coords;
|
||||
for (std::size_t i = 0; i < type_coord_count; ++i) {
|
||||
const bool last = (i == (type_coord_count - 1)) && (type_coord_count > 1);
|
||||
coords.push_back(GetRegister(last ? last_coord_register : coord_register + i));
|
||||
}
|
||||
|
||||
const Node array = is_array ? GetRegister(array_register) : nullptr;
|
||||
// When lod is used always is in gpr20
|
||||
const Node lod = lod_enabled ? GetRegister(instr.gpr20) : Immediate(0);
|
||||
|
||||
const auto& sampler = GetSampler(instr.sampler, texture_type, is_array, false);
|
||||
|
||||
Node4 values;
|
||||
for (u32 element = 0; element < values.size(); ++element) {
|
||||
auto coords_copy = coords;
|
||||
MetaTexture meta{sampler, array, {}, {lod}, element};
|
||||
values[element] = Operation(OperationCode::TexelFetch, meta, std::move(coords_copy));
|
||||
}
|
||||
return values;
|
||||
}
|
||||
|
||||
std::tuple<std::size_t, std::size_t> ShaderIR::ValidateAndGetCoordinateElement(
|
||||
TextureType texture_type, bool depth_compare, bool is_array, bool lod_bias_enabled,
|
||||
std::size_t max_coords, std::size_t max_inputs) {
|
||||
const std::size_t coord_count = GetCoordCount(texture_type);
|
||||
|
||||
std::size_t total_coord_count = coord_count + (is_array ? 1 : 0) + (depth_compare ? 1 : 0);
|
||||
const std::size_t total_reg_count = total_coord_count + (lod_bias_enabled ? 1 : 0);
|
||||
if (total_coord_count > max_coords || total_reg_count > max_inputs) {
|
||||
UNIMPLEMENTED_MSG("Unsupported Texture operation");
|
||||
total_coord_count = std::min(total_coord_count, max_coords);
|
||||
}
|
||||
// 1D.DC OpenGL is using a vec3 but 2nd component is ignored later.
|
||||
total_coord_count +=
|
||||
(depth_compare && !is_array && texture_type == TextureType::Texture1D) ? 1 : 0;
|
||||
|
||||
return {coord_count, total_coord_count};
|
||||
}
|
||||
|
||||
} // namespace VideoCommon::Shader
|
||||
|
||||
@@ -135,7 +135,18 @@ u32 ShaderIR::DecodeOther(NodeBlock& bb, u32 pc) {
|
||||
instr.ipa.sample_mode.Value()};
|
||||
|
||||
const Node attr = GetInputAttribute(attribute.index, attribute.element, input_mode);
|
||||
const Node value = GetSaturatedFloat(attr, instr.ipa.saturate);
|
||||
Node value = attr;
|
||||
const Tegra::Shader::Attribute::Index index = attribute.index.Value();
|
||||
if (index >= Tegra::Shader::Attribute::Index::Attribute_0 &&
|
||||
index <= Tegra::Shader::Attribute::Index::Attribute_31) {
|
||||
// TODO(Blinkhawk): There are cases where a perspective attribute use PASS.
|
||||
// In theory by setting them as perspective, OpenGL does the perspective correction.
|
||||
// A way must figured to reverse the last step of it.
|
||||
if (input_mode.interpolation_mode == Tegra::Shader::IpaInterpMode::Multiply) {
|
||||
value = Operation(OperationCode::FMul, PRECISE, value, GetRegister(instr.gpr20));
|
||||
}
|
||||
}
|
||||
value = GetSaturatedFloat(value, instr.ipa.saturate);
|
||||
|
||||
SetRegister(bb, instr.gpr0, value);
|
||||
break;
|
||||
@@ -175,4 +186,4 @@ u32 ShaderIR::DecodeOther(NodeBlock& bb, u32 pc) {
|
||||
return pc;
|
||||
}
|
||||
|
||||
} // namespace VideoCommon::Shader
|
||||
} // namespace VideoCommon::Shader
|
||||
|
||||
534
src/video_core/shader/decode/texture.cpp
Normal file
534
src/video_core/shader/decode/texture.cpp
Normal file
@@ -0,0 +1,534 @@
|
||||
// Copyright 2019 yuzu Emulator Project
|
||||
// Licensed under GPLv2 or any later version
|
||||
// Refer to the license.txt file included.
|
||||
|
||||
#include <algorithm>
|
||||
#include <vector>
|
||||
#include <fmt/format.h>
|
||||
|
||||
#include "common/assert.h"
|
||||
#include "common/common_types.h"
|
||||
#include "video_core/engines/shader_bytecode.h"
|
||||
#include "video_core/shader/shader_ir.h"
|
||||
|
||||
namespace VideoCommon::Shader {
|
||||
|
||||
using Tegra::Shader::Instruction;
|
||||
using Tegra::Shader::OpCode;
|
||||
using Tegra::Shader::Register;
|
||||
using Tegra::Shader::TextureMiscMode;
|
||||
using Tegra::Shader::TextureProcessMode;
|
||||
using Tegra::Shader::TextureType;
|
||||
|
||||
static std::size_t GetCoordCount(TextureType texture_type) {
|
||||
switch (texture_type) {
|
||||
case TextureType::Texture1D:
|
||||
return 1;
|
||||
case TextureType::Texture2D:
|
||||
return 2;
|
||||
case TextureType::Texture3D:
|
||||
case TextureType::TextureCube:
|
||||
return 3;
|
||||
default:
|
||||
UNIMPLEMENTED_MSG("Unhandled texture type: {}", static_cast<u32>(texture_type));
|
||||
return 0;
|
||||
}
|
||||
}
|
||||
|
||||
u32 ShaderIR::DecodeTexture(NodeBlock& bb, u32 pc) {
|
||||
const Instruction instr = {program_code[pc]};
|
||||
const auto opcode = OpCode::Decode(instr);
|
||||
|
||||
switch (opcode->get().GetId()) {
|
||||
case OpCode::Id::TEX: {
|
||||
UNIMPLEMENTED_IF_MSG(instr.tex.UsesMiscMode(TextureMiscMode::AOFFI),
|
||||
"AOFFI is not implemented");
|
||||
|
||||
if (instr.tex.UsesMiscMode(TextureMiscMode::NODEP)) {
|
||||
LOG_WARNING(HW_GPU, "TEX.NODEP implementation is incomplete");
|
||||
}
|
||||
|
||||
const TextureType texture_type{instr.tex.texture_type};
|
||||
const bool is_array = instr.tex.array != 0;
|
||||
const bool depth_compare = instr.tex.UsesMiscMode(TextureMiscMode::DC);
|
||||
const auto process_mode = instr.tex.GetTextureProcessMode();
|
||||
WriteTexInstructionFloat(
|
||||
bb, instr, GetTexCode(instr, texture_type, process_mode, depth_compare, is_array));
|
||||
break;
|
||||
}
|
||||
case OpCode::Id::TEXS: {
|
||||
const TextureType texture_type{instr.texs.GetTextureType()};
|
||||
const bool is_array{instr.texs.IsArrayTexture()};
|
||||
const bool depth_compare = instr.texs.UsesMiscMode(TextureMiscMode::DC);
|
||||
const auto process_mode = instr.texs.GetTextureProcessMode();
|
||||
|
||||
if (instr.texs.UsesMiscMode(TextureMiscMode::NODEP)) {
|
||||
LOG_WARNING(HW_GPU, "TEXS.NODEP implementation is incomplete");
|
||||
}
|
||||
|
||||
const Node4 components =
|
||||
GetTexsCode(instr, texture_type, process_mode, depth_compare, is_array);
|
||||
|
||||
if (instr.texs.fp32_flag) {
|
||||
WriteTexsInstructionFloat(bb, instr, components);
|
||||
} else {
|
||||
WriteTexsInstructionHalfFloat(bb, instr, components);
|
||||
}
|
||||
break;
|
||||
}
|
||||
case OpCode::Id::TLD4: {
|
||||
ASSERT(instr.tld4.array == 0);
|
||||
UNIMPLEMENTED_IF_MSG(instr.tld4.UsesMiscMode(TextureMiscMode::AOFFI),
|
||||
"AOFFI is not implemented");
|
||||
UNIMPLEMENTED_IF_MSG(instr.tld4.UsesMiscMode(TextureMiscMode::NDV),
|
||||
"NDV is not implemented");
|
||||
UNIMPLEMENTED_IF_MSG(instr.tld4.UsesMiscMode(TextureMiscMode::PTP),
|
||||
"PTP is not implemented");
|
||||
|
||||
if (instr.tld4.UsesMiscMode(TextureMiscMode::NODEP)) {
|
||||
LOG_WARNING(HW_GPU, "TLD4.NODEP implementation is incomplete");
|
||||
}
|
||||
|
||||
const auto texture_type = instr.tld4.texture_type.Value();
|
||||
const bool depth_compare = instr.tld4.UsesMiscMode(TextureMiscMode::DC);
|
||||
const bool is_array = instr.tld4.array != 0;
|
||||
WriteTexInstructionFloat(bb, instr,
|
||||
GetTld4Code(instr, texture_type, depth_compare, is_array));
|
||||
break;
|
||||
}
|
||||
case OpCode::Id::TLD4S: {
|
||||
UNIMPLEMENTED_IF_MSG(instr.tld4s.UsesMiscMode(TextureMiscMode::AOFFI),
|
||||
"AOFFI is not implemented");
|
||||
if (instr.tld4s.UsesMiscMode(TextureMiscMode::NODEP)) {
|
||||
LOG_WARNING(HW_GPU, "TLD4S.NODEP implementation is incomplete");
|
||||
}
|
||||
|
||||
const bool depth_compare = instr.tld4s.UsesMiscMode(TextureMiscMode::DC);
|
||||
const Node op_a = GetRegister(instr.gpr8);
|
||||
const Node op_b = GetRegister(instr.gpr20);
|
||||
|
||||
// TODO(Subv): Figure out how the sampler type is encoded in the TLD4S instruction.
|
||||
std::vector<Node> coords;
|
||||
if (depth_compare) {
|
||||
// Note: TLD4S coordinate encoding works just like TEXS's
|
||||
const Node op_y = GetRegister(instr.gpr8.Value() + 1);
|
||||
coords.push_back(op_a);
|
||||
coords.push_back(op_y);
|
||||
coords.push_back(op_b);
|
||||
} else {
|
||||
coords.push_back(op_a);
|
||||
coords.push_back(op_b);
|
||||
}
|
||||
const Node component = Immediate(static_cast<u32>(instr.tld4s.component));
|
||||
|
||||
const auto& sampler =
|
||||
GetSampler(instr.sampler, TextureType::Texture2D, false, depth_compare);
|
||||
|
||||
Node4 values;
|
||||
for (u32 element = 0; element < values.size(); ++element) {
|
||||
auto coords_copy = coords;
|
||||
MetaTexture meta{sampler, {}, {}, {}, {}, component, element};
|
||||
values[element] = Operation(OperationCode::TextureGather, meta, std::move(coords_copy));
|
||||
}
|
||||
|
||||
WriteTexsInstructionFloat(bb, instr, values);
|
||||
break;
|
||||
}
|
||||
case OpCode::Id::TXQ: {
|
||||
if (instr.txq.UsesMiscMode(TextureMiscMode::NODEP)) {
|
||||
LOG_WARNING(HW_GPU, "TXQ.NODEP implementation is incomplete");
|
||||
}
|
||||
|
||||
// TODO: The new commits on the texture refactor, change the way samplers work.
|
||||
// Sadly, not all texture instructions specify the type of texture their sampler
|
||||
// uses. This must be fixed at a later instance.
|
||||
const auto& sampler =
|
||||
GetSampler(instr.sampler, Tegra::Shader::TextureType::Texture2D, false, false);
|
||||
|
||||
u32 indexer = 0;
|
||||
switch (instr.txq.query_type) {
|
||||
case Tegra::Shader::TextureQueryType::Dimension: {
|
||||
for (u32 element = 0; element < 4; ++element) {
|
||||
if (!instr.txq.IsComponentEnabled(element)) {
|
||||
continue;
|
||||
}
|
||||
MetaTexture meta{sampler, {}, {}, {}, {}, {}, element};
|
||||
const Node value =
|
||||
Operation(OperationCode::TextureQueryDimensions, meta, GetRegister(instr.gpr8));
|
||||
SetTemporal(bb, indexer++, value);
|
||||
}
|
||||
for (u32 i = 0; i < indexer; ++i) {
|
||||
SetRegister(bb, instr.gpr0.Value() + i, GetTemporal(i));
|
||||
}
|
||||
break;
|
||||
}
|
||||
default:
|
||||
UNIMPLEMENTED_MSG("Unhandled texture query type: {}",
|
||||
static_cast<u32>(instr.txq.query_type.Value()));
|
||||
}
|
||||
break;
|
||||
}
|
||||
case OpCode::Id::TMML: {
|
||||
UNIMPLEMENTED_IF_MSG(instr.tmml.UsesMiscMode(Tegra::Shader::TextureMiscMode::NDV),
|
||||
"NDV is not implemented");
|
||||
|
||||
if (instr.tmml.UsesMiscMode(TextureMiscMode::NODEP)) {
|
||||
LOG_WARNING(HW_GPU, "TMML.NODEP implementation is incomplete");
|
||||
}
|
||||
|
||||
auto texture_type = instr.tmml.texture_type.Value();
|
||||
const bool is_array = instr.tmml.array != 0;
|
||||
const auto& sampler = GetSampler(instr.sampler, texture_type, is_array, false);
|
||||
|
||||
std::vector<Node> coords;
|
||||
|
||||
// TODO: Add coordinates for different samplers once other texture types are implemented.
|
||||
switch (texture_type) {
|
||||
case TextureType::Texture1D:
|
||||
coords.push_back(GetRegister(instr.gpr8));
|
||||
break;
|
||||
case TextureType::Texture2D:
|
||||
coords.push_back(GetRegister(instr.gpr8.Value() + 0));
|
||||
coords.push_back(GetRegister(instr.gpr8.Value() + 1));
|
||||
break;
|
||||
default:
|
||||
UNIMPLEMENTED_MSG("Unhandled texture type {}", static_cast<u32>(texture_type));
|
||||
|
||||
// Fallback to interpreting as a 2D texture for now
|
||||
coords.push_back(GetRegister(instr.gpr8.Value() + 0));
|
||||
coords.push_back(GetRegister(instr.gpr8.Value() + 1));
|
||||
texture_type = TextureType::Texture2D;
|
||||
}
|
||||
|
||||
for (u32 element = 0; element < 2; ++element) {
|
||||
auto params = coords;
|
||||
MetaTexture meta{sampler, {}, {}, {}, {}, {}, element};
|
||||
const Node value = Operation(OperationCode::TextureQueryLod, meta, std::move(params));
|
||||
SetTemporal(bb, element, value);
|
||||
}
|
||||
for (u32 element = 0; element < 2; ++element) {
|
||||
SetRegister(bb, instr.gpr0.Value() + element, GetTemporal(element));
|
||||
}
|
||||
|
||||
break;
|
||||
}
|
||||
case OpCode::Id::TLDS: {
|
||||
const Tegra::Shader::TextureType texture_type{instr.tlds.GetTextureType()};
|
||||
const bool is_array{instr.tlds.IsArrayTexture()};
|
||||
|
||||
UNIMPLEMENTED_IF_MSG(instr.tlds.UsesMiscMode(TextureMiscMode::AOFFI),
|
||||
"AOFFI is not implemented");
|
||||
UNIMPLEMENTED_IF_MSG(instr.tlds.UsesMiscMode(TextureMiscMode::MZ), "MZ is not implemented");
|
||||
|
||||
if (instr.tlds.UsesMiscMode(TextureMiscMode::NODEP)) {
|
||||
LOG_WARNING(HW_GPU, "TLDS.NODEP implementation is incomplete");
|
||||
}
|
||||
|
||||
WriteTexsInstructionFloat(bb, instr, GetTldsCode(instr, texture_type, is_array));
|
||||
break;
|
||||
}
|
||||
default:
|
||||
UNIMPLEMENTED_MSG("Unhandled memory instruction: {}", opcode->get().GetName());
|
||||
}
|
||||
|
||||
return pc;
|
||||
}
|
||||
|
||||
const Sampler& ShaderIR::GetSampler(const Tegra::Shader::Sampler& sampler, TextureType type,
|
||||
bool is_array, bool is_shadow) {
|
||||
const auto offset = static_cast<std::size_t>(sampler.index.Value());
|
||||
|
||||
// If this sampler has already been used, return the existing mapping.
|
||||
const auto itr =
|
||||
std::find_if(used_samplers.begin(), used_samplers.end(),
|
||||
[&](const Sampler& entry) { return entry.GetOffset() == offset; });
|
||||
if (itr != used_samplers.end()) {
|
||||
ASSERT(itr->GetType() == type && itr->IsArray() == is_array &&
|
||||
itr->IsShadow() == is_shadow);
|
||||
return *itr;
|
||||
}
|
||||
|
||||
// Otherwise create a new mapping for this sampler
|
||||
const std::size_t next_index = used_samplers.size();
|
||||
const Sampler entry{offset, next_index, type, is_array, is_shadow};
|
||||
return *used_samplers.emplace(entry).first;
|
||||
}
|
||||
|
||||
void ShaderIR::WriteTexInstructionFloat(NodeBlock& bb, Instruction instr, const Node4& components) {
|
||||
u32 dest_elem = 0;
|
||||
for (u32 elem = 0; elem < 4; ++elem) {
|
||||
if (!instr.tex.IsComponentEnabled(elem)) {
|
||||
// Skip disabled components
|
||||
continue;
|
||||
}
|
||||
SetTemporal(bb, dest_elem++, components[elem]);
|
||||
}
|
||||
// After writing values in temporals, move them to the real registers
|
||||
for (u32 i = 0; i < dest_elem; ++i) {
|
||||
SetRegister(bb, instr.gpr0.Value() + i, GetTemporal(i));
|
||||
}
|
||||
}
|
||||
|
||||
void ShaderIR::WriteTexsInstructionFloat(NodeBlock& bb, Instruction instr,
|
||||
const Node4& components) {
|
||||
// TEXS has two destination registers and a swizzle. The first two elements in the swizzle
|
||||
// go into gpr0+0 and gpr0+1, and the rest goes into gpr28+0 and gpr28+1
|
||||
|
||||
u32 dest_elem = 0;
|
||||
for (u32 component = 0; component < 4; ++component) {
|
||||
if (!instr.texs.IsComponentEnabled(component))
|
||||
continue;
|
||||
SetTemporal(bb, dest_elem++, components[component]);
|
||||
}
|
||||
|
||||
for (u32 i = 0; i < dest_elem; ++i) {
|
||||
if (i < 2) {
|
||||
// Write the first two swizzle components to gpr0 and gpr0+1
|
||||
SetRegister(bb, instr.gpr0.Value() + i % 2, GetTemporal(i));
|
||||
} else {
|
||||
ASSERT(instr.texs.HasTwoDestinations());
|
||||
// Write the rest of the swizzle components to gpr28 and gpr28+1
|
||||
SetRegister(bb, instr.gpr28.Value() + i % 2, GetTemporal(i));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
void ShaderIR::WriteTexsInstructionHalfFloat(NodeBlock& bb, Instruction instr,
|
||||
const Node4& components) {
|
||||
// TEXS.F16 destionation registers are packed in two registers in pairs (just like any half
|
||||
// float instruction).
|
||||
|
||||
Node4 values;
|
||||
u32 dest_elem = 0;
|
||||
for (u32 component = 0; component < 4; ++component) {
|
||||
if (!instr.texs.IsComponentEnabled(component))
|
||||
continue;
|
||||
values[dest_elem++] = components[component];
|
||||
}
|
||||
if (dest_elem == 0)
|
||||
return;
|
||||
|
||||
std::generate(values.begin() + dest_elem, values.end(), [&]() { return Immediate(0); });
|
||||
|
||||
const Node first_value = Operation(OperationCode::HPack2, values[0], values[1]);
|
||||
if (dest_elem <= 2) {
|
||||
SetRegister(bb, instr.gpr0, first_value);
|
||||
return;
|
||||
}
|
||||
|
||||
SetTemporal(bb, 0, first_value);
|
||||
SetTemporal(bb, 1, Operation(OperationCode::HPack2, values[2], values[3]));
|
||||
|
||||
SetRegister(bb, instr.gpr0, GetTemporal(0));
|
||||
SetRegister(bb, instr.gpr28, GetTemporal(1));
|
||||
}
|
||||
|
||||
Node4 ShaderIR::GetTextureCode(Instruction instr, TextureType texture_type,
|
||||
TextureProcessMode process_mode, std::vector<Node> coords,
|
||||
Node array, Node depth_compare, u32 bias_offset) {
|
||||
const bool is_array = array;
|
||||
const bool is_shadow = depth_compare;
|
||||
|
||||
UNIMPLEMENTED_IF_MSG((texture_type == TextureType::Texture3D && (is_array || is_shadow)) ||
|
||||
(texture_type == TextureType::TextureCube && is_array && is_shadow),
|
||||
"This method is not supported.");
|
||||
|
||||
const auto& sampler = GetSampler(instr.sampler, texture_type, is_array, is_shadow);
|
||||
|
||||
const bool lod_needed = process_mode == TextureProcessMode::LZ ||
|
||||
process_mode == TextureProcessMode::LL ||
|
||||
process_mode == TextureProcessMode::LLA;
|
||||
|
||||
// LOD selection (either via bias or explicit textureLod) not supported in GL for
|
||||
// sampler2DArrayShadow and samplerCubeArrayShadow.
|
||||
const bool gl_lod_supported =
|
||||
!((texture_type == Tegra::Shader::TextureType::Texture2D && is_array && is_shadow) ||
|
||||
(texture_type == Tegra::Shader::TextureType::TextureCube && is_array && is_shadow));
|
||||
|
||||
const OperationCode read_method =
|
||||
(lod_needed && gl_lod_supported) ? OperationCode::TextureLod : OperationCode::Texture;
|
||||
|
||||
UNIMPLEMENTED_IF(process_mode != TextureProcessMode::None && !gl_lod_supported);
|
||||
|
||||
Node bias = {};
|
||||
Node lod = {};
|
||||
if (process_mode != TextureProcessMode::None && gl_lod_supported) {
|
||||
switch (process_mode) {
|
||||
case TextureProcessMode::LZ:
|
||||
lod = Immediate(0.0f);
|
||||
break;
|
||||
case TextureProcessMode::LB:
|
||||
// If present, lod or bias are always stored in the register indexed by the gpr20
|
||||
// field with an offset depending on the usage of the other registers
|
||||
bias = GetRegister(instr.gpr20.Value() + bias_offset);
|
||||
break;
|
||||
case TextureProcessMode::LL:
|
||||
lod = GetRegister(instr.gpr20.Value() + bias_offset);
|
||||
break;
|
||||
default:
|
||||
UNIMPLEMENTED_MSG("Unimplemented process mode={}", static_cast<u32>(process_mode));
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
Node4 values;
|
||||
for (u32 element = 0; element < values.size(); ++element) {
|
||||
auto copy_coords = coords;
|
||||
MetaTexture meta{sampler, array, depth_compare, bias, lod, {}, element};
|
||||
values[element] = Operation(read_method, meta, std::move(copy_coords));
|
||||
}
|
||||
|
||||
return values;
|
||||
}
|
||||
|
||||
Node4 ShaderIR::GetTexCode(Instruction instr, TextureType texture_type,
|
||||
TextureProcessMode process_mode, bool depth_compare, bool is_array) {
|
||||
const bool lod_bias_enabled =
|
||||
(process_mode != TextureProcessMode::None && process_mode != TextureProcessMode::LZ);
|
||||
|
||||
const auto [coord_count, total_coord_count] = ValidateAndGetCoordinateElement(
|
||||
texture_type, depth_compare, is_array, lod_bias_enabled, 4, 5);
|
||||
// If enabled arrays index is always stored in the gpr8 field
|
||||
const u64 array_register = instr.gpr8.Value();
|
||||
// First coordinate index is the gpr8 or gpr8 + 1 when arrays are used
|
||||
const u64 coord_register = array_register + (is_array ? 1 : 0);
|
||||
|
||||
std::vector<Node> coords;
|
||||
for (std::size_t i = 0; i < coord_count; ++i) {
|
||||
coords.push_back(GetRegister(coord_register + i));
|
||||
}
|
||||
// 1D.DC in OpenGL the 2nd component is ignored.
|
||||
if (depth_compare && !is_array && texture_type == TextureType::Texture1D) {
|
||||
coords.push_back(Immediate(0.0f));
|
||||
}
|
||||
|
||||
const Node array = is_array ? GetRegister(array_register) : nullptr;
|
||||
|
||||
Node dc{};
|
||||
if (depth_compare) {
|
||||
// Depth is always stored in the register signaled by gpr20 or in the next register if lod
|
||||
// or bias are used
|
||||
const u64 depth_register = instr.gpr20.Value() + (lod_bias_enabled ? 1 : 0);
|
||||
dc = GetRegister(depth_register);
|
||||
}
|
||||
|
||||
return GetTextureCode(instr, texture_type, process_mode, coords, array, dc, 0);
|
||||
}
|
||||
|
||||
Node4 ShaderIR::GetTexsCode(Instruction instr, TextureType texture_type,
|
||||
TextureProcessMode process_mode, bool depth_compare, bool is_array) {
|
||||
const bool lod_bias_enabled =
|
||||
(process_mode != TextureProcessMode::None && process_mode != TextureProcessMode::LZ);
|
||||
|
||||
const auto [coord_count, total_coord_count] = ValidateAndGetCoordinateElement(
|
||||
texture_type, depth_compare, is_array, lod_bias_enabled, 4, 4);
|
||||
// If enabled arrays index is always stored in the gpr8 field
|
||||
const u64 array_register = instr.gpr8.Value();
|
||||
// First coordinate index is stored in gpr8 field or (gpr8 + 1) when arrays are used
|
||||
const u64 coord_register = array_register + (is_array ? 1 : 0);
|
||||
const u64 last_coord_register =
|
||||
(is_array || !(lod_bias_enabled || depth_compare) || (coord_count > 2))
|
||||
? static_cast<u64>(instr.gpr20.Value())
|
||||
: coord_register + 1;
|
||||
const u32 bias_offset = coord_count > 2 ? 1 : 0;
|
||||
|
||||
std::vector<Node> coords;
|
||||
for (std::size_t i = 0; i < coord_count; ++i) {
|
||||
const bool last = (i == (coord_count - 1)) && (coord_count > 1);
|
||||
coords.push_back(GetRegister(last ? last_coord_register : coord_register + i));
|
||||
}
|
||||
|
||||
const Node array = is_array ? GetRegister(array_register) : nullptr;
|
||||
|
||||
Node dc{};
|
||||
if (depth_compare) {
|
||||
// Depth is always stored in the register signaled by gpr20 or in the next register if lod
|
||||
// or bias are used
|
||||
const u64 depth_register = instr.gpr20.Value() + (lod_bias_enabled ? 1 : 0);
|
||||
dc = GetRegister(depth_register);
|
||||
}
|
||||
|
||||
return GetTextureCode(instr, texture_type, process_mode, coords, array, dc, bias_offset);
|
||||
}
|
||||
|
||||
Node4 ShaderIR::GetTld4Code(Instruction instr, TextureType texture_type, bool depth_compare,
|
||||
bool is_array) {
|
||||
const std::size_t coord_count = GetCoordCount(texture_type);
|
||||
const std::size_t total_coord_count = coord_count + (is_array ? 1 : 0);
|
||||
const std::size_t total_reg_count = total_coord_count + (depth_compare ? 1 : 0);
|
||||
|
||||
// If enabled arrays index is always stored in the gpr8 field
|
||||
const u64 array_register = instr.gpr8.Value();
|
||||
// First coordinate index is the gpr8 or gpr8 + 1 when arrays are used
|
||||
const u64 coord_register = array_register + (is_array ? 1 : 0);
|
||||
|
||||
std::vector<Node> coords;
|
||||
for (size_t i = 0; i < coord_count; ++i)
|
||||
coords.push_back(GetRegister(coord_register + i));
|
||||
|
||||
const auto& sampler = GetSampler(instr.sampler, texture_type, is_array, depth_compare);
|
||||
|
||||
Node4 values;
|
||||
for (u32 element = 0; element < values.size(); ++element) {
|
||||
auto coords_copy = coords;
|
||||
MetaTexture meta{sampler, GetRegister(array_register), {}, {}, {}, {}, element};
|
||||
values[element] = Operation(OperationCode::TextureGather, meta, std::move(coords_copy));
|
||||
}
|
||||
|
||||
return values;
|
||||
}
|
||||
|
||||
Node4 ShaderIR::GetTldsCode(Instruction instr, TextureType texture_type, bool is_array) {
|
||||
const std::size_t type_coord_count = GetCoordCount(texture_type);
|
||||
const bool lod_enabled = instr.tlds.GetTextureProcessMode() == TextureProcessMode::LL;
|
||||
|
||||
// If enabled arrays index is always stored in the gpr8 field
|
||||
const u64 array_register = instr.gpr8.Value();
|
||||
// if is array gpr20 is used
|
||||
const u64 coord_register = is_array ? instr.gpr20.Value() : instr.gpr8.Value();
|
||||
|
||||
const u64 last_coord_register =
|
||||
((type_coord_count > 2) || (type_coord_count == 2 && !lod_enabled)) && !is_array
|
||||
? static_cast<u64>(instr.gpr20.Value())
|
||||
: coord_register + 1;
|
||||
|
||||
std::vector<Node> coords;
|
||||
for (std::size_t i = 0; i < type_coord_count; ++i) {
|
||||
const bool last = (i == (type_coord_count - 1)) && (type_coord_count > 1);
|
||||
coords.push_back(GetRegister(last ? last_coord_register : coord_register + i));
|
||||
}
|
||||
|
||||
const Node array = is_array ? GetRegister(array_register) : nullptr;
|
||||
// When lod is used always is in gpr20
|
||||
const Node lod = lod_enabled ? GetRegister(instr.gpr20) : Immediate(0);
|
||||
|
||||
const auto& sampler = GetSampler(instr.sampler, texture_type, is_array, false);
|
||||
|
||||
Node4 values;
|
||||
for (u32 element = 0; element < values.size(); ++element) {
|
||||
auto coords_copy = coords;
|
||||
MetaTexture meta{sampler, array, {}, {}, lod, {}, element};
|
||||
values[element] = Operation(OperationCode::TexelFetch, meta, std::move(coords_copy));
|
||||
}
|
||||
return values;
|
||||
}
|
||||
|
||||
std::tuple<std::size_t, std::size_t> ShaderIR::ValidateAndGetCoordinateElement(
|
||||
TextureType texture_type, bool depth_compare, bool is_array, bool lod_bias_enabled,
|
||||
std::size_t max_coords, std::size_t max_inputs) {
|
||||
const std::size_t coord_count = GetCoordCount(texture_type);
|
||||
|
||||
std::size_t total_coord_count = coord_count + (is_array ? 1 : 0) + (depth_compare ? 1 : 0);
|
||||
const std::size_t total_reg_count = total_coord_count + (lod_bias_enabled ? 1 : 0);
|
||||
if (total_coord_count > max_coords || total_reg_count > max_inputs) {
|
||||
UNIMPLEMENTED_MSG("Unsupported Texture operation");
|
||||
total_coord_count = std::min(total_coord_count, max_coords);
|
||||
}
|
||||
// 1D.DC OpenGL is using a vec3 but 2nd component is ignored later.
|
||||
total_coord_count +=
|
||||
(depth_compare && !is_array && texture_type == TextureType::Texture1D) ? 1 : 0;
|
||||
|
||||
return {coord_count, total_coord_count};
|
||||
}
|
||||
|
||||
} // namespace VideoCommon::Shader
|
||||
@@ -290,7 +290,9 @@ struct MetaTexture {
|
||||
const Sampler& sampler;
|
||||
Node array{};
|
||||
Node depth_compare{};
|
||||
std::vector<Node> extras;
|
||||
Node bias{};
|
||||
Node lod{};
|
||||
Node component{};
|
||||
u32 element{};
|
||||
};
|
||||
|
||||
@@ -614,6 +616,7 @@ private:
|
||||
u32 DecodeHfma2(NodeBlock& bb, u32 pc);
|
||||
u32 DecodeConversion(NodeBlock& bb, u32 pc);
|
||||
u32 DecodeMemory(NodeBlock& bb, u32 pc);
|
||||
u32 DecodeTexture(NodeBlock& bb, u32 pc);
|
||||
u32 DecodeFloatSetPredicate(NodeBlock& bb, u32 pc);
|
||||
u32 DecodeIntegerSetPredicate(NodeBlock& bb, u32 pc);
|
||||
u32 DecodeHalfSetPredicate(NodeBlock& bb, u32 pc);
|
||||
|
||||
@@ -20,9 +20,9 @@ std::pair<Node, s64> FindOperation(const NodeBlock& code, s64 cursor,
|
||||
return {node, cursor};
|
||||
}
|
||||
if (const auto conditional = std::get_if<ConditionalNode>(node)) {
|
||||
const auto& code = conditional->GetCode();
|
||||
const auto [found, internal_cursor] =
|
||||
FindOperation(code, static_cast<s64>(code.size() - 1), operation_code);
|
||||
const auto& conditional_code = conditional->GetCode();
|
||||
const auto [found, internal_cursor] = FindOperation(
|
||||
conditional_code, static_cast<s64>(conditional_code.size() - 1), operation_code);
|
||||
if (found)
|
||||
return {found, cursor};
|
||||
}
|
||||
@@ -58,8 +58,8 @@ Node ShaderIR::TrackCbuf(Node tracked, const NodeBlock& code, s64 cursor) {
|
||||
return nullptr;
|
||||
}
|
||||
if (const auto conditional = std::get_if<ConditionalNode>(tracked)) {
|
||||
const auto& code = conditional->GetCode();
|
||||
return TrackCbuf(tracked, code, static_cast<s64>(code.size()));
|
||||
const auto& conditional_code = conditional->GetCode();
|
||||
return TrackCbuf(tracked, conditional_code, static_cast<s64>(conditional_code.size()));
|
||||
}
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
Reference in New Issue
Block a user