From 970d9e57c817028ad4c2695adc8205e303de20b9 Mon Sep 17 00:00:00 2001 From: ReinUsesLisp Date: Thu, 14 Mar 2019 02:17:19 -0300 Subject: [PATCH 01/11] video_core: Add sirit as optional dependency with Vulkan sirit is a runtime assembler for SPIR-V --- .gitmodules | 3 +++ externals/CMakeLists.txt | 5 +++++ externals/sirit | 1 + src/video_core/CMakeLists.txt | 5 ++++- 4 files changed, 13 insertions(+), 1 deletion(-) create mode 160000 externals/sirit diff --git a/.gitmodules b/.gitmodules index 26b4e5272..3a49c4874 100644 --- a/.gitmodules +++ b/.gitmodules @@ -43,3 +43,6 @@ [submodule "externals/zstd"] path = externals/zstd url = https://github.com/facebook/zstd +[submodule "sirit"] + path = externals/sirit + url = https://github.com/ReinUsesLisp/sirit diff --git a/externals/CMakeLists.txt b/externals/CMakeLists.txt index aa3319eb1..3f8b6cda8 100644 --- a/externals/CMakeLists.txt +++ b/externals/CMakeLists.txt @@ -72,6 +72,11 @@ if (USE_DISCORD_PRESENCE) target_include_directories(discord-rpc INTERFACE ./discord-rpc/include) endif() +# Sirit +if (ENABLE_VULKAN) + add_subdirectory(sirit) +endif() + if (ENABLE_WEB_SERVICE) # LibreSSL set(LIBRESSL_SKIP_INSTALL ON CACHE BOOL "") diff --git a/externals/sirit b/externals/sirit new file mode 160000 index 000000000..f7c4b07a7 --- /dev/null +++ b/externals/sirit @@ -0,0 +1 @@ +Subproject commit f7c4b07a7e14edb1dcd93bc9879c823423705c2e diff --git a/src/video_core/CMakeLists.txt b/src/video_core/CMakeLists.txt index 242a0d1cd..fe6b6ef7d 100644 --- a/src/video_core/CMakeLists.txt +++ b/src/video_core/CMakeLists.txt @@ -132,7 +132,7 @@ if (ENABLE_VULKAN) renderer_vulkan/vk_swapchain.cpp renderer_vulkan/vk_swapchain.h) - target_include_directories(video_core PRIVATE ../../externals/Vulkan-Headers/include) + target_include_directories(video_core PRIVATE sirit ../../externals/Vulkan-Headers/include) target_compile_definitions(video_core PRIVATE HAS_VULKAN) endif() @@ -140,3 +140,6 @@ create_target_directory_groups(video_core) target_link_libraries(video_core PUBLIC common core) target_link_libraries(video_core PRIVATE glad) +if (ENABLE_VULKAN) + target_link_libraries(video_core PRIVATE sirit) +endif() From ad53b233c58fdf90da0e05283eb8146acb50fad3 Mon Sep 17 00:00:00 2001 From: ReinUsesLisp Date: Thu, 14 Mar 2019 02:19:02 -0300 Subject: [PATCH 02/11] vk_shader_decompiler: Declare and stub interface for a SPIR-V decompiler --- src/video_core/CMakeLists.txt | 2 + .../renderer_vulkan/vk_shader_decompiler.cpp | 45 +++++++++++ .../renderer_vulkan/vk_shader_decompiler.h | 80 +++++++++++++++++++ 3 files changed, 127 insertions(+) create mode 100644 src/video_core/renderer_vulkan/vk_shader_decompiler.cpp create mode 100644 src/video_core/renderer_vulkan/vk_shader_decompiler.h diff --git a/src/video_core/CMakeLists.txt b/src/video_core/CMakeLists.txt index fe6b6ef7d..21e52db61 100644 --- a/src/video_core/CMakeLists.txt +++ b/src/video_core/CMakeLists.txt @@ -127,6 +127,8 @@ if (ENABLE_VULKAN) renderer_vulkan/vk_sampler_cache.h renderer_vulkan/vk_scheduler.cpp renderer_vulkan/vk_scheduler.h + renderer_vulkan/vk_shader_decompiler.cpp + renderer_vulkan/vk_shader_decompiler.h renderer_vulkan/vk_stream_buffer.cpp renderer_vulkan/vk_stream_buffer.h renderer_vulkan/vk_swapchain.cpp diff --git a/src/video_core/renderer_vulkan/vk_shader_decompiler.cpp b/src/video_core/renderer_vulkan/vk_shader_decompiler.cpp new file mode 100644 index 000000000..3b8793840 --- /dev/null +++ b/src/video_core/renderer_vulkan/vk_shader_decompiler.cpp @@ -0,0 +1,45 @@ +// Copyright 2019 yuzu Emulator Project +// Licensed under GPLv2 or any later version +// Refer to the license.txt file included. + +#include + +#include "common/assert.h" +#include "video_core/engines/maxwell_3d.h" +#include "video_core/engines/shader_header.h" +#include "video_core/renderer_vulkan/vk_shader_decompiler.h" +#include "video_core/shader/shader_ir.h" + +namespace Vulkan::VKShader { + +using namespace VideoCommon::Shader; + +using ShaderStage = Tegra::Engines::Maxwell3D::Regs::ShaderStage; + +class SPIRVDecompiler : public Sirit::Module { +public: + explicit SPIRVDecompiler(const ShaderIR& ir, ShaderStage stage) + : Module(0x00010300), ir{ir}, stage{stage}, header{ir.GetHeader()} {} + + void Decompile() { + UNIMPLEMENTED(); + } + + ShaderEntries GetShaderEntries() const { + UNIMPLEMENTED(); + return {}; + } + +private: + const ShaderIR& ir; + const ShaderStage stage; + const Tegra::Shader::Header header; +}; + +DecompilerResult Decompile(const VideoCommon::Shader::ShaderIR& ir, Maxwell::ShaderStage stage) { + auto decompiler = std::make_unique(ir, stage); + decompiler->Decompile(); + return {std::move(decompiler), decompiler->GetShaderEntries()}; +} + +} // namespace Vulkan::VKShader diff --git a/src/video_core/renderer_vulkan/vk_shader_decompiler.h b/src/video_core/renderer_vulkan/vk_shader_decompiler.h new file mode 100644 index 000000000..329d8fa38 --- /dev/null +++ b/src/video_core/renderer_vulkan/vk_shader_decompiler.h @@ -0,0 +1,80 @@ +// Copyright 2019 yuzu Emulator Project +// Licensed under GPLv2 or any later version +// Refer to the license.txt file included. + +#pragma once + +#include +#include +#include +#include +#include + +#include + +#include "common/common_types.h" +#include "video_core/engines/maxwell_3d.h" +#include "video_core/shader/shader_ir.h" + +namespace VideoCommon::Shader { +class ShaderIR; +} + +namespace Vulkan::VKShader { + +using Maxwell = Tegra::Engines::Maxwell3D::Regs; + +using SamplerEntry = VideoCommon::Shader::Sampler; + +constexpr u32 DESCRIPTOR_SET = 0; + +class ConstBufferEntry : public VideoCommon::Shader::ConstBuffer { +public: + explicit constexpr ConstBufferEntry(const VideoCommon::Shader::ConstBuffer& entry, u32 index) + : VideoCommon::Shader::ConstBuffer{entry}, index{index} {} + + constexpr u32 GetIndex() const { + return index; + } + +private: + u32 index{}; +}; + +class GlobalBufferEntry { +public: + explicit GlobalBufferEntry(u32 cbuf_index, u32 cbuf_offset) + : cbuf_index{cbuf_index}, cbuf_offset{cbuf_offset} {} + + u32 GetCbufIndex() const { + return cbuf_index; + } + + u32 GetCbufOffset() const { + return cbuf_offset; + } + +private: + u32 cbuf_index{}; + u32 cbuf_offset{}; +}; + +struct ShaderEntries { + u32 const_buffers_base_binding{}; + u32 global_buffers_base_binding{}; + u32 samplers_base_binding{}; + std::vector const_buffers; + std::vector global_buffers; + std::vector samplers; + std::set attributes; + std::array clip_distances{}; + std::size_t shader_length{}; + Sirit::Id entry_function{}; + std::vector interfaces; +}; + +using DecompilerResult = std::pair, ShaderEntries>; + +DecompilerResult Decompile(const VideoCommon::Shader::ShaderIR& ir, Maxwell::ShaderStage stage); + +} // namespace Vulkan::VKShader From 13aa664f3f0e778b8b4d4b01c28eee07677431e4 Mon Sep 17 00:00:00 2001 From: ReinUsesLisp Date: Thu, 14 Mar 2019 02:23:37 -0300 Subject: [PATCH 03/11] vk_shader_decompiler: Implement declarations --- .../renderer_vulkan/vk_shader_decompiler.cpp | 460 +++++++++++++++++- 1 file changed, 457 insertions(+), 3 deletions(-) diff --git a/src/video_core/renderer_vulkan/vk_shader_decompiler.cpp b/src/video_core/renderer_vulkan/vk_shader_decompiler.cpp index 3b8793840..563b3eb51 100644 --- a/src/video_core/renderer_vulkan/vk_shader_decompiler.cpp +++ b/src/video_core/renderer_vulkan/vk_shader_decompiler.cpp @@ -2,38 +2,492 @@ // Licensed under GPLv2 or any later version // Refer to the license.txt file included. +#include +#include +#include + +#include + #include +#include "common/alignment.h" #include "common/assert.h" +#include "common/common_types.h" +#include "common/logging/log.h" #include "video_core/engines/maxwell_3d.h" +#include "video_core/engines/shader_bytecode.h" #include "video_core/engines/shader_header.h" #include "video_core/renderer_vulkan/vk_shader_decompiler.h" #include "video_core/shader/shader_ir.h" namespace Vulkan::VKShader { +using Sirit::Id; +using Tegra::Shader::Attribute; +using Tegra::Shader::AttributeUse; +using Tegra::Shader::Register; using namespace VideoCommon::Shader; +using Maxwell = Tegra::Engines::Maxwell3D::Regs; using ShaderStage = Tegra::Engines::Maxwell3D::Regs::ShaderStage; +// TODO(Rodrigo): Use rasterizer's value +constexpr u32 MAX_CONSTBUFFER_ELEMENTS = 0x1000; +constexpr u32 STAGE_BINDING_STRIDE = 0x100; + +enum class Type { Bool, Bool2, Float, Int, Uint, HalfFloat }; + +struct SamplerImage { + Id image_type; + Id sampled_image_type; + Id sampler; +}; + +namespace { + +spv::Dim GetSamplerDim(const Sampler& sampler) { + switch (sampler.GetType()) { + case Tegra::Shader::TextureType::Texture1D: + return spv::Dim::Dim1D; + case Tegra::Shader::TextureType::Texture2D: + return spv::Dim::Dim2D; + case Tegra::Shader::TextureType::Texture3D: + return spv::Dim::Dim3D; + case Tegra::Shader::TextureType::TextureCube: + return spv::Dim::Cube; + default: + UNIMPLEMENTED_MSG("Unimplemented sampler type={}", static_cast(sampler.GetType())); + return spv::Dim::Dim2D; + } +} + +/// Returns true if an attribute index is one of the 32 generic attributes +constexpr bool IsGenericAttribute(Attribute::Index attribute) { + return attribute >= Attribute::Index::Attribute_0 && + attribute <= Attribute::Index::Attribute_31; +} + +/// Returns the location of a generic attribute +constexpr u32 GetGenericAttributeLocation(Attribute::Index attribute) { + ASSERT(IsGenericAttribute(attribute)); + return static_cast(attribute) - static_cast(Attribute::Index::Attribute_0); +} + +} // namespace + class SPIRVDecompiler : public Sirit::Module { public: explicit SPIRVDecompiler(const ShaderIR& ir, ShaderStage stage) - : Module(0x00010300), ir{ir}, stage{stage}, header{ir.GetHeader()} {} + : Module(0x00010300), ir{ir}, stage{stage}, header{ir.GetHeader()} { + AddCapability(spv::Capability::Shader); + AddExtension("SPV_KHR_storage_buffer_storage_class"); + AddExtension("SPV_KHR_variable_pointers"); + } void Decompile() { + AllocateBindings(); + + DeclareVertex(); + DeclareGeometry(); + DeclareFragment(); + DeclareRegisters(); + DeclarePredicates(); + DeclareLocalMemory(); + DeclareInternalFlags(); + DeclareInputAttributes(); + DeclareOutputAttributes(); + DeclareConstantBuffers(); + DeclareGlobalBuffers(); + DeclareSamplers(); + UNIMPLEMENTED(); } ShaderEntries GetShaderEntries() const { - UNIMPLEMENTED(); - return {}; + ShaderEntries entries; + entries.const_buffers_base_binding = const_buffers_base_binding; + entries.global_buffers_base_binding = global_buffers_base_binding; + entries.samplers_base_binding = samplers_base_binding; + for (const auto& cbuf : ir.GetConstantBuffers()) { + entries.const_buffers.emplace_back(cbuf.second, cbuf.first); + } + for (const auto& gmem : ir.GetGlobalMemoryBases()) { + entries.global_buffers.emplace_back(gmem.cbuf_index, gmem.cbuf_offset); + } + for (const auto& sampler : ir.GetSamplers()) { + entries.samplers.emplace_back(sampler); + } + for (const auto& attr : ir.GetInputAttributes()) { + entries.attributes.insert(GetGenericAttributeLocation(attr.first)); + } + entries.clip_distances = ir.GetClipDistances(); + entries.shader_length = ir.GetLength(); + entries.entry_function = execute_function; + entries.interfaces = interfaces; + return entries; } private: + static constexpr auto INTERNAL_FLAGS_COUNT = static_cast(InternalFlag::Amount); + static constexpr u32 CBUF_STRIDE = 16; + + void AllocateBindings() { + const u32 binding_base = static_cast(stage) * STAGE_BINDING_STRIDE; + u32 binding_iterator = binding_base; + + const auto Allocate = [&binding_iterator](std::size_t count) { + const u32 current_binding = binding_iterator; + binding_iterator += static_cast(count); + return current_binding; + }; + const_buffers_base_binding = Allocate(ir.GetConstantBuffers().size()); + global_buffers_base_binding = Allocate(ir.GetGlobalMemoryBases().size()); + samplers_base_binding = Allocate(ir.GetSamplers().size()); + + ASSERT_MSG(binding_iterator - binding_base < STAGE_BINDING_STRIDE, + "Stage binding stride is too small"); + } + + void DeclareVertex() { + if (stage != ShaderStage::Vertex) + return; + + DeclareVertexRedeclarations(); + } + + void DeclareGeometry() { + if (stage != ShaderStage::Geometry) + return; + + UNIMPLEMENTED(); + } + + void DeclareFragment() { + if (stage != ShaderStage::Fragment) + return; + + for (u32 rt = 0; rt < static_cast(frag_colors.size()); ++rt) { + if (!IsRenderTargetUsed(rt)) { + continue; + } + + const Id id = AddGlobalVariable(OpVariable(t_out_float4, spv::StorageClass::Output)); + Name(id, fmt::format("frag_color{}", rt)); + Decorate(id, spv::Decoration::Location, rt); + + frag_colors[rt] = id; + interfaces.push_back(id); + } + + if (header.ps.omap.depth) { + frag_depth = AddGlobalVariable(OpVariable(t_out_float, spv::StorageClass::Output)); + Name(frag_depth, "frag_depth"); + Decorate(frag_depth, spv::Decoration::BuiltIn, + static_cast(spv::BuiltIn::FragDepth)); + + interfaces.push_back(frag_depth); + } + + frag_coord = DeclareBuiltIn(spv::BuiltIn::FragCoord, spv::StorageClass::Input, t_in_float4, + "frag_coord"); + front_facing = DeclareBuiltIn(spv::BuiltIn::FrontFacing, spv::StorageClass::Input, + t_in_bool, "front_facing"); + } + + void DeclareRegisters() { + for (const u32 gpr : ir.GetRegisters()) { + const Id id = OpVariable(t_prv_float, spv::StorageClass::Private, v_float_zero); + Name(id, fmt::format("gpr_{}", gpr)); + registers.emplace(gpr, AddGlobalVariable(id)); + } + } + + void DeclarePredicates() { + for (const auto pred : ir.GetPredicates()) { + const Id id = OpVariable(t_prv_bool, spv::StorageClass::Private, v_false); + Name(id, fmt::format("pred_{}", static_cast(pred))); + predicates.emplace(pred, AddGlobalVariable(id)); + } + } + + void DeclareLocalMemory() { + if (const u64 local_memory_size = header.GetLocalMemorySize(); local_memory_size > 0) { + const auto element_count = static_cast(Common::AlignUp(local_memory_size, 4) / 4); + const Id type_array = TypeArray(t_float, Constant(t_uint, element_count)); + const Id type_pointer = TypePointer(spv::StorageClass::Private, type_array); + Name(type_pointer, "LocalMemory"); + + local_memory = + OpVariable(type_pointer, spv::StorageClass::Private, ConstantNull(type_array)); + AddGlobalVariable(Name(local_memory, "local_memory")); + } + } + + void DeclareInternalFlags() { + constexpr std::array names = {"zero", "sign", "carry", + "overflow"}; + for (std::size_t flag = 0; flag < INTERNAL_FLAGS_COUNT; ++flag) { + const auto flag_code = static_cast(flag); + const Id id = OpVariable(t_prv_bool, spv::StorageClass::Private, v_false); + internal_flags[flag] = AddGlobalVariable(Name(id, names[flag])); + } + } + + void DeclareInputAttributes() { + for (const auto element : ir.GetInputAttributes()) { + const Attribute::Index index = element.first; + if (!IsGenericAttribute(index)) { + continue; + } + + UNIMPLEMENTED_IF(stage == ShaderStage::Geometry); + + const u32 location = GetGenericAttributeLocation(index); + const Id id = OpVariable(t_in_float4, spv::StorageClass::Input); + Name(AddGlobalVariable(id), fmt::format("in_attr{}", location)); + input_attributes.emplace(index, id); + interfaces.push_back(id); + + Decorate(id, spv::Decoration::Location, location); + + if (stage != ShaderStage::Fragment) { + continue; + } + switch (header.ps.GetAttributeUse(location)) { + case AttributeUse::Constant: + Decorate(id, spv::Decoration::Flat); + break; + case AttributeUse::ScreenLinear: + Decorate(id, spv::Decoration::NoPerspective); + break; + case AttributeUse::Perspective: + // Default + break; + default: + UNREACHABLE_MSG("Unused attribute being fetched"); + } + } + } + + void DeclareOutputAttributes() { + for (const auto index : ir.GetOutputAttributes()) { + if (!IsGenericAttribute(index)) { + continue; + } + const auto location = GetGenericAttributeLocation(index); + const Id id = OpVariable(t_out_float4, spv::StorageClass::Output); + Name(AddGlobalVariable(id), fmt::format("out_attr{}", location)); + output_attributes.emplace(index, id); + interfaces.push_back(id); + + Decorate(id, spv::Decoration::Location, location); + } + } + + void DeclareConstantBuffers() { + u32 binding = const_buffers_base_binding; + for (const auto& entry : ir.GetConstantBuffers()) { + const auto [index, size] = entry; + const Id id = OpVariable(t_cbuf_ubo, spv::StorageClass::Uniform); + AddGlobalVariable(Name(id, fmt::format("cbuf_{}", index))); + + Decorate(id, spv::Decoration::Binding, binding++); + Decorate(id, spv::Decoration::DescriptorSet, DESCRIPTOR_SET); + constant_buffers.emplace(index, id); + } + } + + void DeclareGlobalBuffers() { + u32 binding = global_buffers_base_binding; + for (const auto& entry : ir.GetGlobalMemoryBases()) { + const Id id = OpVariable(t_gmem_ssbo, spv::StorageClass::StorageBuffer); + AddGlobalVariable( + Name(id, fmt::format("gmem_{}_{}", entry.cbuf_index, entry.cbuf_offset))); + + Decorate(id, spv::Decoration::Binding, binding++); + Decorate(id, spv::Decoration::DescriptorSet, DESCRIPTOR_SET); + global_buffers.emplace(entry, id); + } + } + + void DeclareSamplers() { + u32 binding = samplers_base_binding; + for (const auto& sampler : ir.GetSamplers()) { + const auto dim = GetSamplerDim(sampler); + const int depth = sampler.IsShadow() ? 1 : 0; + const int arrayed = sampler.IsArray() ? 1 : 0; + // TODO(Rodrigo): Sampled 1 indicates that the image will be used with a sampler. When + // SULD and SUST instructions are implemented, replace this value. + const int sampled = 1; + const Id image_type = + TypeImage(t_float, dim, depth, arrayed, false, sampled, spv::ImageFormat::Unknown); + const Id sampled_image_type = TypeSampledImage(image_type); + const Id pointer_type = + TypePointer(spv::StorageClass::UniformConstant, sampled_image_type); + const Id id = OpVariable(pointer_type, spv::StorageClass::UniformConstant); + AddGlobalVariable(Name(id, fmt::format("sampler_{}", sampler.GetIndex()))); + + sampler_images.insert( + {static_cast(sampler.GetIndex()), {image_type, sampled_image_type, id}}); + + Decorate(id, spv::Decoration::Binding, binding++); + Decorate(id, spv::Decoration::DescriptorSet, DESCRIPTOR_SET); + } + } + + void DeclareVertexRedeclarations() { + vertex_index = DeclareBuiltIn(spv::BuiltIn::VertexIndex, spv::StorageClass::Input, + t_in_uint, "vertex_index"); + instance_index = DeclareBuiltIn(spv::BuiltIn::InstanceIndex, spv::StorageClass::Input, + t_in_uint, "instance_index"); + + bool is_point_size_declared = false; + bool is_clip_distances_declared = false; + for (const auto index : ir.GetOutputAttributes()) { + if (index == Attribute::Index::PointSize) { + is_point_size_declared = true; + } else if (index == Attribute::Index::ClipDistances0123 || + index == Attribute::Index::ClipDistances4567) { + is_clip_distances_declared = true; + } + } + + std::vector members; + members.push_back(t_float4); + if (is_point_size_declared) { + members.push_back(t_float); + } + if (is_clip_distances_declared) { + members.push_back(TypeArray(t_float, Constant(t_uint, 8))); + } + + const Id gl_per_vertex_struct = Name(TypeStruct(members), "PerVertex"); + Decorate(gl_per_vertex_struct, spv::Decoration::Block); + + u32 declaration_index = 0; + const auto MemberDecorateBuiltIn = [&](spv::BuiltIn builtin, std::string name, + bool condition) { + if (!condition) + return u32{}; + MemberName(gl_per_vertex_struct, declaration_index, name); + MemberDecorate(gl_per_vertex_struct, declaration_index, spv::Decoration::BuiltIn, + static_cast(builtin)); + return declaration_index++; + }; + + position_index = MemberDecorateBuiltIn(spv::BuiltIn::Position, "position", true); + point_size_index = + MemberDecorateBuiltIn(spv::BuiltIn::PointSize, "point_size", is_point_size_declared); + clip_distances_index = MemberDecorateBuiltIn(spv::BuiltIn::ClipDistance, "clip_distances", + is_clip_distances_declared); + + const Id type_pointer = TypePointer(spv::StorageClass::Output, gl_per_vertex_struct); + per_vertex = OpVariable(type_pointer, spv::StorageClass::Output); + AddGlobalVariable(Name(per_vertex, "per_vertex")); + interfaces.push_back(per_vertex); + } + + Id DeclareBuiltIn(spv::BuiltIn builtin, spv::StorageClass storage, Id type, + const std::string& name) { + const Id id = OpVariable(type, storage); + Decorate(id, spv::Decoration::BuiltIn, static_cast(builtin)); + AddGlobalVariable(Name(id, name)); + interfaces.push_back(id); + return id; + } + + bool IsRenderTargetUsed(u32 rt) const { + for (u32 component = 0; component < 4; ++component) { + if (header.ps.IsColorComponentOutputEnabled(rt, component)) { + return true; + } + } + return false; + } + const ShaderIR& ir; const ShaderStage stage; const Tegra::Shader::Header header; + + const Id t_void = Name(TypeVoid(), "void"); + + const Id t_bool = Name(TypeBool(), "bool"); + const Id t_bool2 = Name(TypeVector(t_bool, 2), "bool2"); + + const Id t_int = Name(TypeInt(32, true), "int"); + const Id t_int2 = Name(TypeVector(t_int, 2), "int2"); + const Id t_int3 = Name(TypeVector(t_int, 3), "int3"); + const Id t_int4 = Name(TypeVector(t_int, 4), "int4"); + + const Id t_uint = Name(TypeInt(32, false), "uint"); + const Id t_uint2 = Name(TypeVector(t_uint, 2), "uint2"); + const Id t_uint3 = Name(TypeVector(t_uint, 3), "uint3"); + const Id t_uint4 = Name(TypeVector(t_uint, 4), "uint4"); + + const Id t_float = Name(TypeFloat(32), "float"); + const Id t_float2 = Name(TypeVector(t_float, 2), "float2"); + const Id t_float3 = Name(TypeVector(t_float, 3), "float3"); + const Id t_float4 = Name(TypeVector(t_float, 4), "float4"); + + const Id t_prv_bool = Name(TypePointer(spv::StorageClass::Private, t_bool), "prv_bool"); + const Id t_prv_float = Name(TypePointer(spv::StorageClass::Private, t_float), "prv_float"); + + const Id t_in_bool = Name(TypePointer(spv::StorageClass::Input, t_bool), "in_bool"); + const Id t_in_uint = Name(TypePointer(spv::StorageClass::Input, t_uint), "in_uint"); + const Id t_in_float = Name(TypePointer(spv::StorageClass::Input, t_float), "in_float"); + const Id t_in_float4 = Name(TypePointer(spv::StorageClass::Input, t_float4), "in_float4"); + + const Id t_out_float = Name(TypePointer(spv::StorageClass::Output, t_float), "out_float"); + const Id t_out_float4 = Name(TypePointer(spv::StorageClass::Output, t_float4), "out_float4"); + + const Id t_cbuf_float = TypePointer(spv::StorageClass::Uniform, t_float); + const Id t_cbuf_array = + Decorate(Name(TypeArray(t_float4, Constant(t_uint, MAX_CONSTBUFFER_ELEMENTS)), "CbufArray"), + spv::Decoration::ArrayStride, CBUF_STRIDE); + const Id t_cbuf_struct = MemberDecorate( + Decorate(TypeStruct(t_cbuf_array), spv::Decoration::Block), 0, spv::Decoration::Offset, 0); + const Id t_cbuf_ubo = TypePointer(spv::StorageClass::Uniform, t_cbuf_struct); + + const Id t_gmem_float = TypePointer(spv::StorageClass::StorageBuffer, t_float); + const Id t_gmem_array = + Name(Decorate(TypeRuntimeArray(t_float), spv::Decoration::ArrayStride, 4u), "GmemArray"); + const Id t_gmem_struct = MemberDecorate( + Decorate(TypeStruct(t_gmem_array), spv::Decoration::Block), 0, spv::Decoration::Offset, 0); + const Id t_gmem_ssbo = TypePointer(spv::StorageClass::StorageBuffer, t_gmem_struct); + + const Id v_float_zero = Constant(t_float, 0.0f); + const Id v_true = ConstantTrue(t_bool); + const Id v_false = ConstantFalse(t_bool); + + Id per_vertex{}; + std::map registers; + std::map predicates; + Id local_memory{}; + std::array internal_flags{}; + std::map input_attributes; + std::map output_attributes; + std::map constant_buffers; + std::map global_buffers; + std::map sampler_images; + + Id instance_index{}; + Id vertex_index{}; + std::array frag_colors{}; + Id frag_depth{}; + Id frag_coord{}; + Id front_facing{}; + + u32 position_index{}; + u32 point_size_index{}; + u32 clip_distances_index{}; + + std::vector interfaces; + + u32 const_buffers_base_binding{}; + u32 global_buffers_base_binding{}; + u32 samplers_base_binding{}; + + Id execute_function{}; }; DecompilerResult Decompile(const VideoCommon::Shader::ShaderIR& ir, Maxwell::ShaderStage stage) { From ca51f9984076ec99ce463f9804ed4bcaa644e285 Mon Sep 17 00:00:00 2001 From: ReinUsesLisp Date: Thu, 14 Mar 2019 02:27:06 -0300 Subject: [PATCH 04/11] vk_shader_decompiler: Implement labels tree and flow --- .../renderer_vulkan/vk_shader_decompiler.cpp | 71 +++++++++++++++++++ 1 file changed, 71 insertions(+) diff --git a/src/video_core/renderer_vulkan/vk_shader_decompiler.cpp b/src/video_core/renderer_vulkan/vk_shader_decompiler.cpp index 563b3eb51..7af8e79df 100644 --- a/src/video_core/renderer_vulkan/vk_shader_decompiler.cpp +++ b/src/video_core/renderer_vulkan/vk_shader_decompiler.cpp @@ -86,6 +86,7 @@ public: void Decompile() { AllocateBindings(); + AllocateLabels(); DeclareVertex(); DeclareGeometry(); @@ -100,7 +101,63 @@ public: DeclareGlobalBuffers(); DeclareSamplers(); + execute_function = + Emit(OpFunction(t_void, spv::FunctionControlMask::Inline, TypeFunction(t_void))); + Emit(OpLabel()); + + const u32 first_address = ir.GetBasicBlocks().begin()->first; + const Id loop_label = OpLabel("loop"); + const Id merge_label = OpLabel("merge"); + const Id dummy_label = OpLabel(); + const Id jump_label = OpLabel(); + continue_label = OpLabel("continue"); + + std::vector literals; + std::vector branch_labels; + for (const auto& pair : labels) { + const auto [literal, label] = pair; + literals.push_back(literal); + branch_labels.push_back(label); + } + + // TODO(Rodrigo): Figure out the actual depth of the flow stack, for now it seems unlikely + // that shaders will use 20 nested SSYs and PBKs. + constexpr u32 FLOW_STACK_SIZE = 20; + const Id flow_stack_type = TypeArray(t_uint, Constant(t_uint, FLOW_STACK_SIZE)); + jmp_to = Emit(OpVariable(TypePointer(spv::StorageClass::Function, t_uint), + spv::StorageClass::Function, Constant(t_uint, first_address))); + flow_stack = Emit(OpVariable(TypePointer(spv::StorageClass::Function, flow_stack_type), + spv::StorageClass::Function, ConstantNull(flow_stack_type))); + flow_stack_top = + Emit(OpVariable(t_func_uint, spv::StorageClass::Function, Constant(t_uint, 0))); + + Name(jmp_to, "jmp_to"); + Name(flow_stack, "flow_stack"); + Name(flow_stack_top, "flow_stack_top"); + + Emit(OpBranch(loop_label)); + Emit(loop_label); + Emit(OpLoopMerge(merge_label, continue_label, spv::LoopControlMask::Unroll)); + Emit(OpBranch(dummy_label)); + + Emit(dummy_label); + const Id default_branch = OpLabel(); + const Id jmp_to_load = Emit(OpLoad(t_uint, jmp_to)); + Emit(OpSelectionMerge(jump_label, spv::SelectionControlMask::MaskNone)); + Emit(OpSwitch(jmp_to_load, default_branch, literals, branch_labels)); + + Emit(default_branch); + Emit(OpReturn()); + UNIMPLEMENTED(); + + Emit(jump_label); + Emit(OpBranch(continue_label)); + Emit(continue_label); + Emit(OpBranch(loop_label)); + Emit(merge_label); + Emit(OpReturn()); + Emit(OpFunctionEnd()); } ShaderEntries GetShaderEntries() const { @@ -148,6 +205,13 @@ private: "Stage binding stride is too small"); } + void AllocateLabels() { + for (const auto& pair : ir.GetBasicBlocks()) { + const u32 address = pair.first; + labels.emplace(address, OpLabel(fmt::format("label_0x{:x}", address))); + } + } + void DeclareVertex() { if (stage != ShaderStage::Vertex) return; @@ -432,6 +496,8 @@ private: const Id t_prv_bool = Name(TypePointer(spv::StorageClass::Private, t_bool), "prv_bool"); const Id t_prv_float = Name(TypePointer(spv::StorageClass::Private, t_float), "prv_float"); + const Id t_func_uint = Name(TypePointer(spv::StorageClass::Function, t_uint), "func_uint"); + const Id t_in_bool = Name(TypePointer(spv::StorageClass::Input, t_bool), "in_bool"); const Id t_in_uint = Name(TypePointer(spv::StorageClass::Input, t_uint), "in_uint"); const Id t_in_float = Name(TypePointer(spv::StorageClass::Input, t_float), "in_float"); @@ -488,6 +554,11 @@ private: u32 samplers_base_binding{}; Id execute_function{}; + Id jmp_to{}; + Id flow_stack_top{}; + Id flow_stack{}; + Id continue_label{}; + std::map labels; }; DecompilerResult Decompile(const VideoCommon::Shader::ShaderIR& ir, Maxwell::ShaderStage stage) { From fec4eb9776bc063d109e5fd5f23906df45e715a1 Mon Sep 17 00:00:00 2001 From: ReinUsesLisp Date: Thu, 14 Mar 2019 02:30:19 -0300 Subject: [PATCH 05/11] vk_shader_decompiler: Implement Visit --- .../renderer_vulkan/vk_shader_decompiler.cpp | 51 ++++++++++++++++++- 1 file changed, 50 insertions(+), 1 deletion(-) diff --git a/src/video_core/renderer_vulkan/vk_shader_decompiler.cpp b/src/video_core/renderer_vulkan/vk_shader_decompiler.cpp index 7af8e79df..5f174bb7f 100644 --- a/src/video_core/renderer_vulkan/vk_shader_decompiler.cpp +++ b/src/video_core/renderer_vulkan/vk_shader_decompiler.cpp @@ -149,7 +149,16 @@ public: Emit(default_branch); Emit(OpReturn()); - UNIMPLEMENTED(); + for (const auto& pair : ir.GetBasicBlocks()) { + const auto& [address, bb] = pair; + Emit(labels.at(address)); + + VisitBasicBlock(bb); + + const auto next_it = labels.lower_bound(address + 1); + const Id next_label = next_it != labels.end() ? next_it->second : default_branch; + Emit(OpBranch(next_label)); + } Emit(jump_label); Emit(OpBranch(continue_label)); @@ -451,6 +460,46 @@ private: interfaces.push_back(per_vertex); } + void VisitBasicBlock(const NodeBlock& bb) { + for (const Node node : bb) { + static_cast(Visit(node)); + } + } + + Id Visit(Node node) { + if (const auto operation = std::get_if(node)) { + UNIMPLEMENTED(); + + } else if (const auto gpr = std::get_if(node)) { + UNIMPLEMENTED(); + + } else if (const auto immediate = std::get_if(node)) { + UNIMPLEMENTED(); + + } else if (const auto predicate = std::get_if(node)) { + UNIMPLEMENTED(); + + } else if (const auto abuf = std::get_if(node)) { + UNIMPLEMENTED(); + + } else if (const auto cbuf = std::get_if(node)) { + UNIMPLEMENTED(); + + } else if (const auto gmem = std::get_if(node)) { + UNIMPLEMENTED(); + + } else if (const auto conditional = std::get_if(node)) { + UNIMPLEMENTED(); + + } else if (const auto comment = std::get_if(node)) { + Name(Emit(OpUndef(t_void)), comment->GetText()); + return {}; + } + + UNREACHABLE(); + return {}; + } + Id DeclareBuiltIn(spv::BuiltIn builtin, spv::StorageClass storage, Id type, const std::string& name) { const Id id = OpVariable(type, storage); From b758c861b06a37cdf823476898a289497d0ddf60 Mon Sep 17 00:00:00 2001 From: ReinUsesLisp Date: Thu, 14 Mar 2019 02:36:14 -0300 Subject: [PATCH 06/11] vk_shader_decompiler: Implement OperationCode decompilation interface --- .../renderer_vulkan/vk_shader_decompiler.cpp | 412 +++++++++++++++++- 1 file changed, 411 insertions(+), 1 deletion(-) diff --git a/src/video_core/renderer_vulkan/vk_shader_decompiler.cpp b/src/video_core/renderer_vulkan/vk_shader_decompiler.cpp index 5f174bb7f..e4c3e3d9c 100644 --- a/src/video_core/renderer_vulkan/vk_shader_decompiler.cpp +++ b/src/video_core/renderer_vulkan/vk_shader_decompiler.cpp @@ -30,6 +30,7 @@ using namespace VideoCommon::Shader; using Maxwell = Tegra::Engines::Maxwell3D::Regs; using ShaderStage = Tegra::Engines::Maxwell3D::Regs::ShaderStage; +using Operation = const OperationNode&; // TODO(Rodrigo): Use rasterizer's value constexpr u32 MAX_CONSTBUFFER_ELEMENTS = 0x1000; @@ -73,6 +74,19 @@ constexpr u32 GetGenericAttributeLocation(Attribute::Index attribute) { return static_cast(attribute) - static_cast(Attribute::Index::Attribute_0); } +/// Returns true if an object has to be treated as precise +bool IsPrecise(Operation operand) { + const auto& meta = operand.GetMeta(); + + if (std::holds_alternative(meta)) { + return std::get(meta).precise; + } + if (std::holds_alternative(meta)) { + return std::get(meta).precise; + } + return false; +} + } // namespace class SPIRVDecompiler : public Sirit::Module { @@ -194,6 +208,10 @@ public: } private: + using OperationDecompilerFn = Id (SPIRVDecompiler::*)(Operation); + using OperationDecompilersArray = + std::array(OperationCode::Amount)>; + static constexpr auto INTERNAL_FLAGS_COUNT = static_cast(InternalFlag::Amount); static constexpr u32 CBUF_STRIDE = 16; @@ -468,7 +486,12 @@ private: Id Visit(Node node) { if (const auto operation = std::get_if(node)) { - UNIMPLEMENTED(); + const auto operation_index = static_cast(operation->GetCode()); + const auto decompiler = operation_decompilers[operation_index]; + if (decompiler == nullptr) { + UNREACHABLE_MSG("Operation decompiler {} not defined", operation_index); + } + return (this->*decompiler)(*operation); } else if (const auto gpr = std::get_if(node)) { UNIMPLEMENTED(); @@ -500,6 +523,184 @@ private: return {}; } + template + Id Unary(Operation operation) { + const Id type_def = GetTypeDefinition(result_type); + const Id op_a = VisitOperand(operation, 0); + + const Id value = BitcastFrom(Emit((this->*func)(type_def, op_a))); + if (IsPrecise(operation)) { + Decorate(value, spv::Decoration::NoContraction); + } + return value; + } + + template + Id Binary(Operation operation) { + const Id type_def = GetTypeDefinition(result_type); + const Id op_a = VisitOperand(operation, 0); + const Id op_b = VisitOperand(operation, 1); + + const Id value = BitcastFrom(Emit((this->*func)(type_def, op_a, op_b))); + if (IsPrecise(operation)) { + Decorate(value, spv::Decoration::NoContraction); + } + return value; + } + + template + Id Ternary(Operation operation) { + const Id type_def = GetTypeDefinition(result_type); + const Id op_a = VisitOperand(operation, 0); + const Id op_b = VisitOperand(operation, 1); + const Id op_c = VisitOperand(operation, 2); + + const Id value = BitcastFrom(Emit((this->*func)(type_def, op_a, op_b, op_c))); + if (IsPrecise(operation)) { + Decorate(value, spv::Decoration::NoContraction); + } + return value; + } + + template + Id Quaternary(Operation operation) { + const Id type_def = GetTypeDefinition(result_type); + const Id op_a = VisitOperand(operation, 0); + const Id op_b = VisitOperand(operation, 1); + const Id op_c = VisitOperand(operation, 2); + const Id op_d = VisitOperand(operation, 3); + + const Id value = + BitcastFrom(Emit((this->*func)(type_def, op_a, op_b, op_c, op_d))); + if (IsPrecise(operation)) { + Decorate(value, spv::Decoration::NoContraction); + } + return value; + } + + Id Assign(Operation operation) { + UNIMPLEMENTED(); + return {}; + } + + Id HNegate(Operation operation) { + UNIMPLEMENTED(); + return {}; + } + + Id HMergeF32(Operation operation) { + UNIMPLEMENTED(); + return {}; + } + + Id HMergeH0(Operation operation) { + UNIMPLEMENTED(); + return {}; + } + + Id HMergeH1(Operation operation) { + UNIMPLEMENTED(); + return {}; + } + + Id HPack2(Operation operation) { + UNIMPLEMENTED(); + return {}; + } + + Id LogicalAssign(Operation operation) { + UNIMPLEMENTED(); + return {}; + } + + Id LogicalPick2(Operation operation) { + UNIMPLEMENTED(); + return {}; + } + + Id LogicalAll2(Operation operation) { + UNIMPLEMENTED(); + return {}; + } + + Id LogicalAny2(Operation operation) { + UNIMPLEMENTED(); + return {}; + } + + Id Texture(Operation operation) { + UNIMPLEMENTED(); + return {}; + } + + Id TextureLod(Operation operation) { + UNIMPLEMENTED(); + return {}; + } + + Id TextureGather(Operation operation) { + UNIMPLEMENTED(); + return {}; + } + + Id TextureQueryDimensions(Operation operation) { + UNIMPLEMENTED(); + return {}; + } + + Id TextureQueryLod(Operation operation) { + UNIMPLEMENTED(); + return {}; + } + + Id TexelFetch(Operation operation) { + UNIMPLEMENTED(); + return {}; + } + + Id Branch(Operation operation) { + UNIMPLEMENTED(); + return {}; + } + + Id PushFlowStack(Operation operation) { + UNIMPLEMENTED(); + return {}; + } + + Id PopFlowStack(Operation operation) { + UNIMPLEMENTED(); + return {}; + } + + Id Exit(Operation operation) { + UNIMPLEMENTED(); + return {}; + } + + Id Discard(Operation operation) { + UNIMPLEMENTED(); + return {}; + } + + Id EmitVertex(Operation operation) { + UNIMPLEMENTED(); + return {}; + } + + Id EndPrimitive(Operation operation) { + UNIMPLEMENTED(); + return {}; + } + + Id YNegate(Operation operation) { + UNIMPLEMENTED(); + return {}; + } + Id DeclareBuiltIn(spv::BuiltIn builtin, spv::StorageClass storage, Id type, const std::string& name) { const Id id = OpVariable(type, storage); @@ -518,6 +719,215 @@ private: return false; } + template + Id VisitOperand(Operation operation, std::size_t operand_index) { + const Id value = Visit(operation[operand_index]); + + switch (type) { + case Type::Bool: + case Type::Bool2: + case Type::Float: + return value; + case Type::Int: + return Emit(OpBitcast(t_int, value)); + case Type::Uint: + return Emit(OpBitcast(t_uint, value)); + case Type::HalfFloat: + UNIMPLEMENTED(); + } + UNREACHABLE(); + return value; + } + + template + Id BitcastFrom(Id value) { + switch (type) { + case Type::Bool: + case Type::Bool2: + case Type::Float: + return value; + case Type::Int: + case Type::Uint: + return Emit(OpBitcast(t_float, value)); + case Type::HalfFloat: + UNIMPLEMENTED(); + } + UNREACHABLE(); + return value; + } + + template + Id BitcastTo(Id value) { + switch (type) { + case Type::Bool: + case Type::Bool2: + UNREACHABLE(); + case Type::Float: + return Emit(OpBitcast(t_float, value)); + case Type::Int: + return Emit(OpBitcast(t_int, value)); + case Type::Uint: + return Emit(OpBitcast(t_uint, value)); + case Type::HalfFloat: + UNIMPLEMENTED(); + } + UNREACHABLE(); + return value; + } + + Id GetTypeDefinition(Type type) { + switch (type) { + case Type::Bool: + return t_bool; + case Type::Bool2: + return t_bool2; + case Type::Float: + return t_float; + case Type::Int: + return t_int; + case Type::Uint: + return t_uint; + case Type::HalfFloat: + UNIMPLEMENTED(); + } + UNREACHABLE(); + return {}; + } + + static constexpr OperationDecompilersArray operation_decompilers = { + &SPIRVDecompiler::Assign, + + &SPIRVDecompiler::Ternary<&Module::OpSelect, Type::Float, Type::Bool, Type::Float, + Type::Float>, + + &SPIRVDecompiler::Binary<&Module::OpFAdd, Type::Float>, + &SPIRVDecompiler::Binary<&Module::OpFMul, Type::Float>, + &SPIRVDecompiler::Binary<&Module::OpFDiv, Type::Float>, + &SPIRVDecompiler::Ternary<&Module::OpFma, Type::Float>, + &SPIRVDecompiler::Unary<&Module::OpFNegate, Type::Float>, + &SPIRVDecompiler::Unary<&Module::OpFAbs, Type::Float>, + &SPIRVDecompiler::Ternary<&Module::OpFClamp, Type::Float>, + &SPIRVDecompiler::Binary<&Module::OpFMin, Type::Float>, + &SPIRVDecompiler::Binary<&Module::OpFMax, Type::Float>, + &SPIRVDecompiler::Unary<&Module::OpCos, Type::Float>, + &SPIRVDecompiler::Unary<&Module::OpSin, Type::Float>, + &SPIRVDecompiler::Unary<&Module::OpExp2, Type::Float>, + &SPIRVDecompiler::Unary<&Module::OpLog2, Type::Float>, + &SPIRVDecompiler::Unary<&Module::OpInverseSqrt, Type::Float>, + &SPIRVDecompiler::Unary<&Module::OpSqrt, Type::Float>, + &SPIRVDecompiler::Unary<&Module::OpRoundEven, Type::Float>, + &SPIRVDecompiler::Unary<&Module::OpFloor, Type::Float>, + &SPIRVDecompiler::Unary<&Module::OpCeil, Type::Float>, + &SPIRVDecompiler::Unary<&Module::OpTrunc, Type::Float>, + &SPIRVDecompiler::Unary<&Module::OpConvertSToF, Type::Float, Type::Int>, + &SPIRVDecompiler::Unary<&Module::OpConvertUToF, Type::Float, Type::Uint>, + + &SPIRVDecompiler::Binary<&Module::OpIAdd, Type::Int>, + &SPIRVDecompiler::Binary<&Module::OpIMul, Type::Int>, + &SPIRVDecompiler::Binary<&Module::OpSDiv, Type::Int>, + &SPIRVDecompiler::Unary<&Module::OpSNegate, Type::Int>, + &SPIRVDecompiler::Unary<&Module::OpSAbs, Type::Int>, + &SPIRVDecompiler::Binary<&Module::OpSMin, Type::Int>, + &SPIRVDecompiler::Binary<&Module::OpSMax, Type::Int>, + + &SPIRVDecompiler::Unary<&Module::OpConvertFToS, Type::Int, Type::Float>, + &SPIRVDecompiler::Unary<&Module::OpBitcast, Type::Int, Type::Uint>, + &SPIRVDecompiler::Binary<&Module::OpShiftLeftLogical, Type::Int, Type::Int, Type::Uint>, + &SPIRVDecompiler::Binary<&Module::OpShiftRightLogical, Type::Int, Type::Int, Type::Uint>, + &SPIRVDecompiler::Binary<&Module::OpShiftRightArithmetic, Type::Int, Type::Int, Type::Uint>, + &SPIRVDecompiler::Binary<&Module::OpBitwiseAnd, Type::Int>, + &SPIRVDecompiler::Binary<&Module::OpBitwiseOr, Type::Int>, + &SPIRVDecompiler::Binary<&Module::OpBitwiseXor, Type::Int>, + &SPIRVDecompiler::Unary<&Module::OpNot, Type::Int>, + &SPIRVDecompiler::Quaternary<&Module::OpBitFieldInsert, Type::Int>, + &SPIRVDecompiler::Ternary<&Module::OpBitFieldSExtract, Type::Int>, + &SPIRVDecompiler::Unary<&Module::OpBitCount, Type::Int>, + + &SPIRVDecompiler::Binary<&Module::OpIAdd, Type::Uint>, + &SPIRVDecompiler::Binary<&Module::OpIMul, Type::Uint>, + &SPIRVDecompiler::Binary<&Module::OpUDiv, Type::Uint>, + &SPIRVDecompiler::Binary<&Module::OpUMin, Type::Uint>, + &SPIRVDecompiler::Binary<&Module::OpUMax, Type::Uint>, + &SPIRVDecompiler::Unary<&Module::OpConvertFToU, Type::Uint, Type::Float>, + &SPIRVDecompiler::Unary<&Module::OpBitcast, Type::Uint, Type::Int>, + &SPIRVDecompiler::Binary<&Module::OpShiftLeftLogical, Type::Uint>, + &SPIRVDecompiler::Binary<&Module::OpShiftRightLogical, Type::Uint>, + &SPIRVDecompiler::Binary<&Module::OpShiftRightArithmetic, Type::Uint>, + &SPIRVDecompiler::Binary<&Module::OpBitwiseAnd, Type::Uint>, + &SPIRVDecompiler::Binary<&Module::OpBitwiseOr, Type::Uint>, + &SPIRVDecompiler::Binary<&Module::OpBitwiseXor, Type::Uint>, + &SPIRVDecompiler::Unary<&Module::OpNot, Type::Uint>, + &SPIRVDecompiler::Quaternary<&Module::OpBitFieldInsert, Type::Uint>, + &SPIRVDecompiler::Ternary<&Module::OpBitFieldUExtract, Type::Uint>, + &SPIRVDecompiler::Unary<&Module::OpBitCount, Type::Uint>, + + &SPIRVDecompiler::Binary<&Module::OpFAdd, Type::HalfFloat>, + &SPIRVDecompiler::Binary<&Module::OpFMul, Type::HalfFloat>, + &SPIRVDecompiler::Ternary<&Module::OpFma, Type::HalfFloat>, + &SPIRVDecompiler::Unary<&Module::OpFAbs, Type::HalfFloat>, + &SPIRVDecompiler::HNegate, + &SPIRVDecompiler::HMergeF32, + &SPIRVDecompiler::HMergeH0, + &SPIRVDecompiler::HMergeH1, + &SPIRVDecompiler::HPack2, + + &SPIRVDecompiler::LogicalAssign, + &SPIRVDecompiler::Binary<&Module::OpLogicalAnd, Type::Bool>, + &SPIRVDecompiler::Binary<&Module::OpLogicalOr, Type::Bool>, + &SPIRVDecompiler::Binary<&Module::OpLogicalNotEqual, Type::Bool>, + &SPIRVDecompiler::Unary<&Module::OpLogicalNot, Type::Bool>, + &SPIRVDecompiler::LogicalPick2, + &SPIRVDecompiler::LogicalAll2, + &SPIRVDecompiler::LogicalAny2, + + &SPIRVDecompiler::Binary<&Module::OpFOrdLessThan, Type::Bool, Type::Float>, + &SPIRVDecompiler::Binary<&Module::OpFOrdEqual, Type::Bool, Type::Float>, + &SPIRVDecompiler::Binary<&Module::OpFOrdLessThanEqual, Type::Bool, Type::Float>, + &SPIRVDecompiler::Binary<&Module::OpFOrdGreaterThan, Type::Bool, Type::Float>, + &SPIRVDecompiler::Binary<&Module::OpFOrdNotEqual, Type::Bool, Type::Float>, + &SPIRVDecompiler::Binary<&Module::OpFOrdGreaterThanEqual, Type::Bool, Type::Float>, + &SPIRVDecompiler::Unary<&Module::OpIsNan, Type::Bool>, + + &SPIRVDecompiler::Binary<&Module::OpSLessThan, Type::Bool, Type::Int>, + &SPIRVDecompiler::Binary<&Module::OpIEqual, Type::Bool, Type::Int>, + &SPIRVDecompiler::Binary<&Module::OpSLessThanEqual, Type::Bool, Type::Int>, + &SPIRVDecompiler::Binary<&Module::OpSGreaterThan, Type::Bool, Type::Int>, + &SPIRVDecompiler::Binary<&Module::OpINotEqual, Type::Bool, Type::Int>, + &SPIRVDecompiler::Binary<&Module::OpSGreaterThanEqual, Type::Bool, Type::Int>, + + &SPIRVDecompiler::Binary<&Module::OpULessThan, Type::Bool, Type::Uint>, + &SPIRVDecompiler::Binary<&Module::OpIEqual, Type::Bool, Type::Uint>, + &SPIRVDecompiler::Binary<&Module::OpULessThanEqual, Type::Bool, Type::Uint>, + &SPIRVDecompiler::Binary<&Module::OpUGreaterThan, Type::Bool, Type::Uint>, + &SPIRVDecompiler::Binary<&Module::OpINotEqual, Type::Bool, Type::Uint>, + &SPIRVDecompiler::Binary<&Module::OpUGreaterThanEqual, Type::Bool, Type::Uint>, + + &SPIRVDecompiler::Binary<&Module::OpFOrdLessThan, Type::Bool, Type::HalfFloat>, + &SPIRVDecompiler::Binary<&Module::OpFOrdEqual, Type::Bool, Type::HalfFloat>, + &SPIRVDecompiler::Binary<&Module::OpFOrdLessThanEqual, Type::Bool, Type::HalfFloat>, + &SPIRVDecompiler::Binary<&Module::OpFOrdGreaterThan, Type::Bool, Type::HalfFloat>, + &SPIRVDecompiler::Binary<&Module::OpFOrdNotEqual, Type::Bool, Type::HalfFloat>, + &SPIRVDecompiler::Binary<&Module::OpFOrdGreaterThanEqual, Type::Bool, Type::HalfFloat>, + + &SPIRVDecompiler::Texture, + &SPIRVDecompiler::TextureLod, + &SPIRVDecompiler::TextureGather, + &SPIRVDecompiler::TextureQueryDimensions, + &SPIRVDecompiler::TextureQueryLod, + &SPIRVDecompiler::TexelFetch, + + &SPIRVDecompiler::Branch, + &SPIRVDecompiler::PushFlowStack, + &SPIRVDecompiler::PopFlowStack, + &SPIRVDecompiler::Exit, + &SPIRVDecompiler::Discard, + + &SPIRVDecompiler::EmitVertex, + &SPIRVDecompiler::EndPrimitive, + + &SPIRVDecompiler::YNegate, + }; + const ShaderIR& ir; const ShaderStage stage; const Tegra::Shader::Header header; From d316d248ab2876653d53ad083aac255674f442c6 Mon Sep 17 00:00:00 2001 From: ReinUsesLisp Date: Thu, 14 Mar 2019 02:43:40 -0300 Subject: [PATCH 07/11] vk_shader_decompiler: Implement non-OperationCode visits --- .../renderer_vulkan/vk_shader_decompiler.cpp | 136 +++++++++++++++++- 1 file changed, 129 insertions(+), 7 deletions(-) diff --git a/src/video_core/renderer_vulkan/vk_shader_decompiler.cpp b/src/video_core/renderer_vulkan/vk_shader_decompiler.cpp index e4c3e3d9c..5060dbba9 100644 --- a/src/video_core/renderer_vulkan/vk_shader_decompiler.cpp +++ b/src/video_core/renderer_vulkan/vk_shader_decompiler.cpp @@ -494,25 +494,136 @@ private: return (this->*decompiler)(*operation); } else if (const auto gpr = std::get_if(node)) { - UNIMPLEMENTED(); + const u32 index = gpr->GetIndex(); + if (index == Register::ZeroIndex) { + return Constant(t_float, 0.0f); + } + return Emit(OpLoad(t_float, registers.at(index))); } else if (const auto immediate = std::get_if(node)) { - UNIMPLEMENTED(); + return BitcastTo(Constant(t_uint, immediate->GetValue())); } else if (const auto predicate = std::get_if(node)) { - UNIMPLEMENTED(); + const auto value = [&]() -> Id { + switch (const auto index = predicate->GetIndex(); index) { + case Tegra::Shader::Pred::UnusedIndex: + return v_true; + case Tegra::Shader::Pred::NeverExecute: + return v_false; + default: + return Emit(OpLoad(t_bool, predicates.at(index))); + } + }(); + if (predicate->IsNegated()) { + return Emit(OpLogicalNot(t_bool, value)); + } + return value; } else if (const auto abuf = std::get_if(node)) { - UNIMPLEMENTED(); + const auto attribute = abuf->GetIndex(); + const auto element = abuf->GetElement(); + + switch (attribute) { + case Attribute::Index::Position: + if (stage != ShaderStage::Fragment) { + UNIMPLEMENTED(); + break; + } else { + if (element == 3) { + return Constant(t_float, 1.0f); + } + return Emit(OpLoad(t_float, AccessElement(t_in_float, frag_coord, element))); + } + case Attribute::Index::TessCoordInstanceIDVertexID: + // TODO(Subv): Find out what the values are for the first two elements when inside a + // vertex shader, and what's the value of the fourth element when inside a Tess Eval + // shader. + ASSERT(stage == ShaderStage::Vertex); + switch (element) { + case 2: + return BitcastFrom(Emit(OpLoad(t_uint, instance_index))); + case 3: + return BitcastFrom(Emit(OpLoad(t_uint, vertex_index))); + } + UNIMPLEMENTED_MSG("Unmanaged TessCoordInstanceIDVertexID element={}", element); + return Constant(t_float, 0); + case Attribute::Index::FrontFacing: + // TODO(Subv): Find out what the values are for the other elements. + ASSERT(stage == ShaderStage::Fragment); + if (element == 3) { + const Id is_front_facing = Emit(OpLoad(t_bool, front_facing)); + const Id true_value = + BitcastTo(Constant(t_int, static_cast(-1))); + const Id false_value = BitcastTo(Constant(t_int, 0)); + return Emit(OpSelect(t_float, is_front_facing, true_value, false_value)); + } + UNIMPLEMENTED_MSG("Unmanaged FrontFacing element={}", element); + return Constant(t_float, 0.0f); + default: + if (IsGenericAttribute(attribute)) { + const Id pointer = + AccessElement(t_in_float, input_attributes.at(attribute), element); + return Emit(OpLoad(t_float, pointer)); + } + break; + } + UNIMPLEMENTED_MSG("Unhandled input attribute: {}", static_cast(attribute)); } else if (const auto cbuf = std::get_if(node)) { - UNIMPLEMENTED(); + const Node offset = cbuf->GetOffset(); + const Id buffer_id = constant_buffers.at(cbuf->GetIndex()); + + Id buffer_index{}; + Id buffer_element{}; + + if (const auto immediate = std::get_if(offset)) { + // Direct access + const u32 offset_imm = immediate->GetValue(); + ASSERT(offset_imm % 4 == 0); + buffer_index = Constant(t_uint, offset_imm / 16); + buffer_element = Constant(t_uint, (offset_imm / 4) % 4); + + } else if (std::holds_alternative(*offset)) { + // Indirect access + // TODO(Rodrigo): Use a uniform buffer stride of 4 and drop this slow math (which + // emits sub-optimal code on GLSL from my testing). + const Id offset_id = BitcastTo(Visit(offset)); + const Id unsafe_offset = Emit(OpUDiv(t_uint, offset_id, Constant(t_uint, 4))); + const Id final_offset = Emit( + OpUMod(t_uint, unsafe_offset, Constant(t_uint, MAX_CONSTBUFFER_ELEMENTS - 1))); + buffer_index = Emit(OpUDiv(t_uint, final_offset, Constant(t_uint, 4))); + buffer_element = Emit(OpUMod(t_uint, final_offset, Constant(t_uint, 4))); + + } else { + UNREACHABLE_MSG("Unmanaged offset node type"); + } + + const Id pointer = Emit(OpAccessChain(t_cbuf_float, buffer_id, Constant(t_uint, 0), + buffer_index, buffer_element)); + return Emit(OpLoad(t_float, pointer)); } else if (const auto gmem = std::get_if(node)) { - UNIMPLEMENTED(); + const Id gmem_buffer = global_buffers.at(gmem->GetDescriptor()); + const Id real = BitcastTo(Visit(gmem->GetRealAddress())); + const Id base = BitcastTo(Visit(gmem->GetBaseAddress())); + + Id offset = Emit(OpISub(t_uint, real, base)); + offset = Emit(OpUDiv(t_uint, offset, Constant(t_uint, 4u))); + return Emit(OpLoad(t_float, Emit(OpAccessChain(t_gmem_float, gmem_buffer, + Constant(t_uint, 0u), offset)))); } else if (const auto conditional = std::get_if(node)) { - UNIMPLEMENTED(); + // It's invalid to call conditional on nested nodes, use an operation instead + const Id true_label = OpLabel(); + const Id skip_label = OpLabel(); + Emit(OpBranchConditional(Visit(conditional->GetCondition()), true_label, skip_label)); + Emit(true_label); + + VisitBasicBlock(conditional->GetCode()); + + Emit(OpBranch(skip_label)); + Emit(skip_label); + return {}; } else if (const auto comment = std::get_if(node)) { Name(Emit(OpUndef(t_void)), comment->GetText()); @@ -719,6 +830,17 @@ private: return false; } + template + Id AccessElement(Id pointer_type, Id composite, Args... elements_) { + std::vector members; + auto elements = {elements_...}; + for (const auto element : elements) { + members.push_back(Constant(t_uint, element)); + } + + return Emit(OpAccessChain(pointer_type, composite, members)); + } + template Id VisitOperand(Operation operation, std::size_t operand_index) { const Id value = Visit(operation[operand_index]); From 676172e20d0a0c2f92ace94ca7cfb37a862fd8dc Mon Sep 17 00:00:00 2001 From: ReinUsesLisp Date: Thu, 14 Mar 2019 02:44:23 -0300 Subject: [PATCH 08/11] vk_shader_decompiler: Implement Assign and LogicalAssign --- .../renderer_vulkan/vk_shader_decompiler.cpp | 66 ++++++++++++++++++- 1 file changed, 64 insertions(+), 2 deletions(-) diff --git a/src/video_core/renderer_vulkan/vk_shader_decompiler.cpp b/src/video_core/renderer_vulkan/vk_shader_decompiler.cpp index 5060dbba9..9f717b836 100644 --- a/src/video_core/renderer_vulkan/vk_shader_decompiler.cpp +++ b/src/video_core/renderer_vulkan/vk_shader_decompiler.cpp @@ -693,7 +693,49 @@ private: } Id Assign(Operation operation) { - UNIMPLEMENTED(); + const Node dest = operation[0]; + const Node src = operation[1]; + + Id target{}; + if (const auto gpr = std::get_if(dest)) { + if (gpr->GetIndex() == Register::ZeroIndex) { + // Writing to Register::ZeroIndex is a no op + return {}; + } + target = registers.at(gpr->GetIndex()); + + } else if (const auto abuf = std::get_if(dest)) { + target = [&]() -> Id { + switch (const auto attribute = abuf->GetIndex(); attribute) { + case Attribute::Index::Position: + return AccessElement(t_out_float, per_vertex, position_index, + abuf->GetElement()); + case Attribute::Index::PointSize: + return AccessElement(t_out_float, per_vertex, point_size_index); + case Attribute::Index::ClipDistances0123: + return AccessElement(t_out_float, per_vertex, clip_distances_index, + abuf->GetElement()); + case Attribute::Index::ClipDistances4567: + return AccessElement(t_out_float, per_vertex, clip_distances_index, + abuf->GetElement() + 4); + default: + if (IsGenericAttribute(attribute)) { + return AccessElement(t_out_float, output_attributes.at(attribute), + abuf->GetElement()); + } + UNIMPLEMENTED_MSG("Unhandled output attribute: {}", + static_cast(attribute)); + return {}; + } + }(); + + } else if (const auto lmem = std::get_if(dest)) { + Id address = BitcastTo(Visit(lmem->GetAddress())); + address = Emit(OpUDiv(t_uint, address, Constant(t_uint, 4))); + target = Emit(OpAccessChain(t_prv_float, local_memory, {address})); + } + + Emit(OpStore(target, Visit(src))); return {}; } @@ -723,7 +765,27 @@ private: } Id LogicalAssign(Operation operation) { - UNIMPLEMENTED(); + const Node dest = operation[0]; + const Node src = operation[1]; + + Id target{}; + if (const auto pred = std::get_if(dest)) { + ASSERT_MSG(!pred->IsNegated(), "Negating logical assignment"); + + const auto index = pred->GetIndex(); + switch (index) { + case Tegra::Shader::Pred::NeverExecute: + case Tegra::Shader::Pred::UnusedIndex: + // Writing to these predicates is a no-op + return {}; + } + target = predicates.at(index); + + } else if (const auto flag = std::get_if(dest)) { + target = internal_flags.at(static_cast(flag->GetFlag())); + } + + Emit(OpStore(target, Visit(src))); return {}; } From 4667ed8e22cff003cebfdd469b65b13b035cfe55 Mon Sep 17 00:00:00 2001 From: ReinUsesLisp Date: Thu, 14 Mar 2019 02:45:51 -0300 Subject: [PATCH 09/11] vk_shader_decompiler: Implement texture decompilation helper functions --- .../renderer_vulkan/vk_shader_decompiler.cpp | 32 +++++++++++++++++++ 1 file changed, 32 insertions(+) diff --git a/src/video_core/renderer_vulkan/vk_shader_decompiler.cpp b/src/video_core/renderer_vulkan/vk_shader_decompiler.cpp index 9f717b836..3f23ba749 100644 --- a/src/video_core/renderer_vulkan/vk_shader_decompiler.cpp +++ b/src/video_core/renderer_vulkan/vk_shader_decompiler.cpp @@ -804,6 +804,38 @@ private: return {}; } + Id GetTextureSampler(Operation operation) { + const auto meta = std::get_if(&operation.GetMeta()); + const auto entry = sampler_images.at(static_cast(meta->sampler.GetIndex())); + return Emit(OpLoad(entry.sampled_image_type, entry.sampler)); + } + + Id GetTextureImage(Operation operation) { + const auto meta = std::get_if(&operation.GetMeta()); + const auto entry = sampler_images.at(static_cast(meta->sampler.GetIndex())); + return Emit(OpImage(entry.image_type, GetTextureSampler(operation))); + } + + Id GetTextureCoordinates(Operation operation) { + const auto meta = std::get_if(&operation.GetMeta()); + std::vector coords; + for (std::size_t i = 0; i < operation.GetOperandsCount(); ++i) { + coords.push_back(Visit(operation[i])); + } + if (meta->sampler.IsArray()) { + const Id array_integer = BitcastTo(Visit(meta->array)); + coords.push_back(Emit(OpConvertSToF(t_float, array_integer))); + } + if (meta->sampler.IsShadow()) { + coords.push_back(Visit(meta->depth_compare)); + } + + const std::array t_float_lut = {nullptr, t_float2, t_float3, t_float4}; + return coords.size() == 1 + ? coords[0] + : Emit(OpCompositeConstruct(t_float_lut.at(coords.size() - 1), coords)); + } + Id Texture(Operation operation) { UNIMPLEMENTED(); return {}; From 58ad8dfac61c6532ebfb69179ed82f812a104a8a Mon Sep 17 00:00:00 2001 From: ReinUsesLisp Date: Thu, 14 Mar 2019 02:47:09 -0300 Subject: [PATCH 10/11] vk_shader_decompiler: Implement most common texture primitives --- .../renderer_vulkan/vk_shader_decompiler.cpp | 73 +++++++++++++++++-- 1 file changed, 65 insertions(+), 8 deletions(-) diff --git a/src/video_core/renderer_vulkan/vk_shader_decompiler.cpp b/src/video_core/renderer_vulkan/vk_shader_decompiler.cpp index 3f23ba749..6c5c6500a 100644 --- a/src/video_core/renderer_vulkan/vk_shader_decompiler.cpp +++ b/src/video_core/renderer_vulkan/vk_shader_decompiler.cpp @@ -836,24 +836,81 @@ private: : Emit(OpCompositeConstruct(t_float_lut.at(coords.size() - 1), coords)); } + Id GetTextureElement(Operation operation, Id sample_value) { + const auto meta = std::get_if(&operation.GetMeta()); + ASSERT(meta); + return Emit(OpCompositeExtract(t_float, sample_value, meta->element)); + } + Id Texture(Operation operation) { - UNIMPLEMENTED(); - return {}; + const Id texture = Emit(OpImageSampleImplicitLod(t_float4, GetTextureSampler(operation), + GetTextureCoordinates(operation))); + return GetTextureElement(operation, texture); } Id TextureLod(Operation operation) { - UNIMPLEMENTED(); - return {}; + const auto meta = std::get_if(&operation.GetMeta()); + const Id texture = Emit(OpImageSampleExplicitLod( + t_float4, GetTextureSampler(operation), GetTextureCoordinates(operation), + spv::ImageOperandsMask::Lod, Visit(meta->lod))); + return GetTextureElement(operation, texture); } Id TextureGather(Operation operation) { - UNIMPLEMENTED(); - return {}; + const auto meta = std::get_if(&operation.GetMeta()); + const auto coords = GetTextureCoordinates(operation); + + Id texture; + if (meta->sampler.IsShadow()) { + texture = Emit(OpImageDrefGather(t_float4, GetTextureSampler(operation), coords, + Visit(meta->component))); + } else { + u32 component_value = 0; + if (meta->component) { + const auto component = std::get_if(meta->component); + ASSERT_MSG(component, "Component is not an immediate value"); + component_value = component->GetValue(); + } + texture = Emit(OpImageGather(t_float4, GetTextureSampler(operation), coords, + Constant(t_uint, component_value))); + } + + return GetTextureElement(operation, texture); } Id TextureQueryDimensions(Operation operation) { - UNIMPLEMENTED(); - return {}; + const auto meta = std::get_if(&operation.GetMeta()); + const auto image_id = GetTextureImage(operation); + AddCapability(spv::Capability::ImageQuery); + + if (meta->element == 3) { + return BitcastTo(Emit(OpImageQueryLevels(t_int, image_id))); + } + + const Id lod = VisitOperand(operation, 0); + const std::size_t coords_count = [&]() { + switch (const auto type = meta->sampler.GetType(); type) { + case Tegra::Shader::TextureType::Texture1D: + return 1; + case Tegra::Shader::TextureType::Texture2D: + case Tegra::Shader::TextureType::TextureCube: + return 2; + case Tegra::Shader::TextureType::Texture3D: + return 3; + default: + UNREACHABLE_MSG("Invalid texture type={}", static_cast(type)); + return 2; + } + }(); + + if (meta->element >= coords_count) { + return Constant(t_float, 0.0f); + } + + const std::array types = {t_int, t_int2, t_int3}; + const Id sizes = Emit(OpImageQuerySizeLod(types.at(coords_count - 1), image_id, lod)); + const Id size = Emit(OpCompositeExtract(t_int, sizes, meta->element)); + return BitcastTo(size); } Id TextureQueryLod(Operation operation) { From 75d23a36790a18e00e2d102ddd9ccbc4943d7a2f Mon Sep 17 00:00:00 2001 From: ReinUsesLisp Date: Thu, 14 Mar 2019 02:48:16 -0300 Subject: [PATCH 11/11] vk_shader_decompiler: Implement flow primitives --- .../renderer_vulkan/vk_shader_decompiler.cpp | 87 +++++++++++++++++-- 1 file changed, 82 insertions(+), 5 deletions(-) diff --git a/src/video_core/renderer_vulkan/vk_shader_decompiler.cpp b/src/video_core/renderer_vulkan/vk_shader_decompiler.cpp index 6c5c6500a..e0a6f5e87 100644 --- a/src/video_core/renderer_vulkan/vk_shader_decompiler.cpp +++ b/src/video_core/renderer_vulkan/vk_shader_decompiler.cpp @@ -924,27 +924,93 @@ private: } Id Branch(Operation operation) { - UNIMPLEMENTED(); + const auto target = std::get_if(operation[0]); + UNIMPLEMENTED_IF(!target); + + Emit(OpStore(jmp_to, Constant(t_uint, target->GetValue()))); + BranchingOp([&]() { Emit(OpBranch(continue_label)); }); return {}; } Id PushFlowStack(Operation operation) { - UNIMPLEMENTED(); + const auto target = std::get_if(operation[0]); + ASSERT(target); + + const Id current = Emit(OpLoad(t_uint, flow_stack_top)); + const Id next = Emit(OpIAdd(t_uint, current, Constant(t_uint, 1))); + const Id access = Emit(OpAccessChain(t_func_uint, flow_stack, current)); + + Emit(OpStore(access, Constant(t_uint, target->GetValue()))); + Emit(OpStore(flow_stack_top, next)); return {}; } Id PopFlowStack(Operation operation) { - UNIMPLEMENTED(); + const Id current = Emit(OpLoad(t_uint, flow_stack_top)); + const Id previous = Emit(OpISub(t_uint, current, Constant(t_uint, 1))); + const Id access = Emit(OpAccessChain(t_func_uint, flow_stack, previous)); + const Id target = Emit(OpLoad(t_uint, access)); + + Emit(OpStore(flow_stack_top, previous)); + Emit(OpStore(jmp_to, target)); + BranchingOp([&]() { Emit(OpBranch(continue_label)); }); return {}; } Id Exit(Operation operation) { - UNIMPLEMENTED(); + switch (stage) { + case ShaderStage::Vertex: { + // TODO(Rodrigo): We should use VK_EXT_depth_range_unrestricted instead, but it doesn't + // seem to be working on Nvidia's drivers and Intel (mesa and blob) doesn't support it. + const Id position = AccessElement(t_float4, per_vertex, position_index); + Id depth = Emit(OpLoad(t_float, AccessElement(t_out_float, position, 2))); + depth = Emit(OpFAdd(t_float, depth, Constant(t_float, 1.0f))); + depth = Emit(OpFMul(t_float, depth, Constant(t_float, 0.5f))); + Emit(OpStore(AccessElement(t_out_float, position, 2), depth)); + break; + } + case ShaderStage::Fragment: { + const auto SafeGetRegister = [&](u32 reg) { + // TODO(Rodrigo): Replace with contains once C++20 releases + if (const auto it = registers.find(reg); it != registers.end()) { + return Emit(OpLoad(t_float, it->second)); + } + return Constant(t_float, 0.0f); + }; + + UNIMPLEMENTED_IF_MSG(header.ps.omap.sample_mask != 0, + "Sample mask write is unimplemented"); + + // TODO(Rodrigo): Alpha testing + + // Write the color outputs using the data in the shader registers, disabled + // rendertargets/components are skipped in the register assignment. + u32 current_reg = 0; + for (u32 rt = 0; rt < Maxwell::NumRenderTargets; ++rt) { + // TODO(Subv): Figure out how dual-source blending is configured in the Switch. + for (u32 component = 0; component < 4; ++component) { + if (header.ps.IsColorComponentOutputEnabled(rt, component)) { + Emit(OpStore(AccessElement(t_out_float, frag_colors.at(rt), component), + SafeGetRegister(current_reg))); + ++current_reg; + } + } + } + if (header.ps.omap.depth) { + // The depth output is always 2 registers after the last color output, and + // current_reg already contains one past the last color register. + Emit(OpStore(frag_depth, SafeGetRegister(current_reg + 1))); + } + break; + } + } + + BranchingOp([&]() { Emit(OpReturn()); }); return {}; } Id Discard(Operation operation) { - UNIMPLEMENTED(); + BranchingOp([&]() { Emit(OpKill()); }); return {}; } @@ -1067,6 +1133,17 @@ private: return {}; } + void BranchingOp(std::function call) { + const Id true_label = OpLabel(); + const Id skip_label = OpLabel(); + Emit(OpSelectionMerge(skip_label, spv::SelectionControlMask::Flatten)); + Emit(OpBranchConditional(v_true, true_label, skip_label, 1, 0)); + Emit(true_label); + call(); + + Emit(skip_label); + } + static constexpr OperationDecompilersArray operation_decompilers = { &SPIRVDecompiler::Assign,