glsl: Cleanup and address feedback

This commit is contained in:
ameerj 2021-06-03 20:57:52 -04:00
parent 5355568a2d
commit 34fdb6471d
10 changed files with 69 additions and 86 deletions

View file

@ -22,9 +22,9 @@ std::string_view InterpDecorator(Interpolation interp) {
case Interpolation::Smooth:
return "";
case Interpolation::Flat:
return "flat";
return "flat ";
case Interpolation::NoPerspective:
return "noperspective";
return "noperspective ";
}
throw InvalidArgument("Invalid interpolation {}", interp);
}
@ -77,7 +77,6 @@ std::string_view SamplerType(TextureType type, bool is_depth) {
case TextureType::ColorArrayCube:
return "samplerCubeArrayShadow";
default:
fmt::print("Texture type: {}", type);
throw NotImplementedException("Texture type: {}", type);
}
}
@ -191,29 +190,27 @@ void SetupOutPerVertex(EmitContext& ctx, std::string& header) {
if (!StoresPerVertexAttributes(ctx.stage)) {
return;
}
header += "out gl_PerVertex{";
header += "vec4 gl_Position;";
header += "out gl_PerVertex{vec4 gl_Position;";
if (ctx.info.stores_point_size) {
header += "float gl_PointSize;";
}
if (ctx.info.stores_clip_distance) {
header += "float gl_ClipDistance[];";
}
if (ctx.info.stores_viewport_index && ctx.supports_viewport_layer &&
if (ctx.info.stores_viewport_index && ctx.profile.support_gl_vertex_viewport_layer &&
ctx.stage != Stage::Geometry) {
header += "int gl_ViewportIndex;";
}
header += "};\n";
header += "};";
if (ctx.info.stores_viewport_index && ctx.stage == Stage::Geometry) {
header += "out int gl_ViewportIndex;";
}
}
} // namespace
} // Anonymous namespace
EmitContext::EmitContext(IR::Program& program, Bindings& bindings, const Profile& profile_,
const RuntimeInfo& runtime_info_)
: info{program.info}, profile{profile_}, runtime_info{runtime_info_} {
supports_viewport_layer = profile.support_gl_vertex_viewport_layer;
SetupExtensions(header);
stage = program.stage;
switch (program.stage) {
@ -222,18 +219,18 @@ EmitContext::EmitContext(IR::Program& program, Bindings& bindings, const Profile
stage_name = "vs";
break;
case Stage::TessellationControl:
stage_name = "tsc";
header += fmt::format("layout(vertices={})out;\n", program.invocations);
stage_name = "tcs";
header += fmt::format("layout(vertices={})out;", program.invocations);
break;
case Stage::TessellationEval:
stage_name = "tse";
header += fmt::format("layout({},{},{})in;\n", GetTessMode(runtime_info.tess_primitive),
stage_name = "tes";
header += fmt::format("layout({},{},{})in;", GetTessMode(runtime_info.tess_primitive),
GetTessSpacing(runtime_info.tess_spacing),
runtime_info.tess_clockwise ? "cw" : "ccw");
break;
case Stage::Geometry:
stage_name = "gs";
header += fmt::format("layout({})in;layout({},max_vertices={})out;\n",
header += fmt::format("layout({})in;layout({},max_vertices={})out;",
InputPrimitive(runtime_info.input_topology),
OutputPrimitive(program.output_topology), program.output_vertices);
break;
@ -242,7 +239,7 @@ EmitContext::EmitContext(IR::Program& program, Bindings& bindings, const Profile
break;
case Stage::Compute:
stage_name = "cs";
header += fmt::format("layout(local_size_x={},local_size_y={},local_size_z={}) in;\n",
header += fmt::format("layout(local_size_x={},local_size_y={},local_size_z={}) in;",
program.workgroup_size[0], program.workgroup_size[1],
program.workgroup_size[2]);
break;
@ -251,7 +248,7 @@ EmitContext::EmitContext(IR::Program& program, Bindings& bindings, const Profile
for (size_t index = 0; index < info.input_generics.size(); ++index) {
const auto& generic{info.input_generics[index]};
if (generic.used) {
header += fmt::format("layout(location={}){} in vec4 in_attr{}{};", index,
header += fmt::format("layout(location={}){}in vec4 in_attr{}{};", index,
InterpDecorator(generic.interpolation), index,
InputArrayDecorator(stage));
}
@ -260,11 +257,8 @@ EmitContext::EmitContext(IR::Program& program, Bindings& bindings, const Profile
if (!info.uses_patches[index]) {
continue;
}
if (stage == Stage::TessellationControl) {
header += fmt::format("layout(location={})patch out vec4 patch{};", index, index);
} else {
header += fmt::format("layout(location={})patch in vec4 patch{};", index, index);
}
const auto qualifier{stage == Stage::TessellationControl ? "out" : "in"};
header += fmt::format("layout(location={})patch {} vec4 patch{};", index, qualifier, index);
}
for (size_t index = 0; index < info.stores_frag_color.size(); ++index) {
if (!info.stores_frag_color[index]) {
@ -278,18 +272,18 @@ EmitContext::EmitContext(IR::Program& program, Bindings& bindings, const Profile
DefineGenericOutput(index, program.invocations);
}
}
header += "\n";
DefineConstantBuffers(bindings);
DefineStorageBuffers(bindings);
SetupImages(bindings);
SetupTextures(bindings);
DefineHelperFunctions();
}
void EmitContext::SetupExtensions(std::string&) {
// TODO: track this usage
header += "#extension GL_ARB_sparse_texture2 : enable\n";
header += "#extension GL_EXT_texture_shadow_lod : enable\n";
header += "#extension GL_EXT_shader_image_load_formatted : enable\n";
header += "#extension GL_ARB_sparse_texture2 : enable\n"
"#extension GL_EXT_texture_shadow_lod : enable\n"
"#extension GL_EXT_shader_image_load_formatted : enable\n";
if (info.uses_int64) {
header += "#extension GL_ARB_gpu_shader_int64 : enable\n";
}
@ -312,13 +306,14 @@ void EmitContext::SetupExtensions(std::string&) {
}
if (info.uses_subgroup_invocation_id || info.uses_subgroup_mask || info.uses_subgroup_vote ||
info.uses_subgroup_shuffles || info.uses_fswzadd) {
header += "#extension GL_ARB_shader_ballot : enable\n";
header += "#extension GL_ARB_shader_group_vote : enable\n";
header += "#extension GL_ARB_shader_ballot : enable\n"
"#extension GL_ARB_shader_group_vote : enable\n";
if (!info.uses_int64) {
header += "#extension GL_ARB_gpu_shader_int64 : enable\n";
}
}
if (info.stores_viewport_index && supports_viewport_layer && stage != Stage::Geometry) {
if (info.stores_viewport_index && profile.support_gl_vertex_viewport_layer &&
stage != Stage::Geometry) {
header += "#extension GL_ARB_shader_viewport_layer_array : enable\n";
}
}
@ -386,46 +381,45 @@ void EmitContext::DefineGenericOutput(size_t index, u32 invocations) {
std::fill_n(output_generics[index].begin() + element, num_components, element_info);
element += num_components;
}
header += "\n";
}
void EmitContext::DefineHelperFunctions() {
header += "\n#define ftoi floatBitsToInt\n#define ftou floatBitsToUint\n"
"#define itof intBitsToFloat\n#define utof uintBitsToFloat\n";
if (info.uses_global_increment || info.uses_shared_increment) {
header += "uint CasIncrement(uint op_a,uint op_b){return(op_a>=op_b)?0u:(op_a+1u);}\n";
header += "uint CasIncrement(uint op_a,uint op_b){return op_a>=op_b?0u:(op_a+1u);}";
}
if (info.uses_global_decrement || info.uses_shared_decrement) {
header += "uint CasDecrement(uint op_a,uint "
"op_b){return(op_a==0||op_a>op_b)?op_b:(op_a-1u);}\n";
"op_b){return op_a==0||op_a>op_b?op_b:(op_a-1u);}";
}
if (info.uses_atomic_f32_add) {
header += "uint CasFloatAdd(uint op_a,float op_b){return "
"ftou(utof(op_a)+op_b);}\n";
"ftou(utof(op_a)+op_b);}";
}
if (info.uses_atomic_f32x2_add) {
header += "uint CasFloatAdd32x2(uint op_a,vec2 op_b){return "
"packHalf2x16(unpackHalf2x16(op_a)+op_b);}\n";
"packHalf2x16(unpackHalf2x16(op_a)+op_b);}";
}
if (info.uses_atomic_f32x2_min) {
header += "uint CasFloatMin32x2(uint op_a,vec2 op_b){return "
"packHalf2x16(min(unpackHalf2x16(op_a),op_b));}\n";
"packHalf2x16(min(unpackHalf2x16(op_a),op_b));}";
}
if (info.uses_atomic_f32x2_max) {
header += "uint CasFloatMax32x2(uint op_a,vec2 op_b){return "
"packHalf2x16(max(unpackHalf2x16(op_a),op_b));}\n";
"packHalf2x16(max(unpackHalf2x16(op_a),op_b));}";
}
if (info.uses_atomic_f16x2_add) {
header += "uint CasFloatAdd16x2(uint op_a,f16vec2 op_b){return "
"packFloat2x16(unpackFloat2x16(op_a)+op_b);}\n";
"packFloat2x16(unpackFloat2x16(op_a)+op_b);}";
}
if (info.uses_atomic_f16x2_min) {
header += "uint CasFloatMin16x2(uint op_a,f16vec2 op_b){return "
"packFloat2x16(min(unpackFloat2x16(op_a),op_b));}\n";
"packFloat2x16(min(unpackFloat2x16(op_a),op_b));}";
}
if (info.uses_atomic_f16x2_max) {
header += "uint CasFloatMax16x2(uint op_a,f16vec2 op_b){return "
"packFloat2x16(max(unpackFloat2x16(op_a),op_b));}\n";
"packFloat2x16(max(unpackFloat2x16(op_a),op_b));}";
}
if (info.uses_atomic_s32_min) {
header += "uint CasMinS32(uint op_a,uint op_b){return uint(min(int(op_a),int(op_b)));}";
@ -534,6 +528,9 @@ void EmitContext::SetupImages(Bindings& bindings) {
}
bindings.image += desc.count;
}
}
void EmitContext::SetupTextures(Bindings& bindings) {
texture_buffer_bindings.reserve(info.texture_buffer_descriptors.size());
for (const auto& desc : info.texture_buffer_descriptors) {
texture_buffer_bindings.push_back(bindings.texture);

View file

@ -31,7 +31,7 @@ struct Program;
namespace Shader::Backend::GLSL {
struct GenericElementInfo {
std::string name{};
std::string name;
u32 first_element{};
u32 num_components{};
};
@ -159,7 +159,6 @@ public:
bool uses_y_direction{};
bool uses_cc_carry{};
bool supports_viewport_layer{};
private:
void SetupExtensions(std::string& header);
@ -169,6 +168,7 @@ private:
void DefineHelperFunctions();
std::string DefineGlobalMemoryFunctions();
void SetupImages(Bindings& bindings);
void SetupTextures(Bindings& bindings);
};
} // namespace Shader::Backend::GLSL

View file

@ -83,7 +83,6 @@ void Invoke(EmitContext& ctx, IR::Inst* inst) {
}
void EmitInst(EmitContext& ctx, IR::Inst* inst) {
// ctx.Add("/* $ {} $ */", inst->GetOpcode());
switch (inst->GetOpcode()) {
#define OPCODE(name, result_type, ...) \
case IR::Opcode::name: \
@ -134,7 +133,7 @@ void EmitCode(EmitContext& ctx, const IR::Program& program) {
}
break;
case IR::AbstractSyntaxNode::Type::If:
ctx.Add("if ({}){{", ctx.var_alloc.Consume(node.data.if_node.cond));
ctx.Add("if({}){{", ctx.var_alloc.Consume(node.data.if_node.cond));
break;
case IR::AbstractSyntaxNode::Type::EndIf:
ctx.Add("}}");
@ -156,12 +155,10 @@ void EmitCode(EmitContext& ctx, const IR::Program& program) {
ctx.Add("for(;;){{");
break;
case IR::AbstractSyntaxNode::Type::Repeat:
ctx.Add("if({}){{", ctx.var_alloc.Consume(node.data.repeat.cond));
ctx.Add("continue;\n}}else{{");
ctx.Add("break;\n}}\n}}");
ctx.Add("if({}){{continue;}}else{{break;}}}}",
ctx.var_alloc.Consume(node.data.repeat.cond));
break;
default:
fmt::print("{}", node.type);
throw NotImplementedException("AbstractSyntaxNode::Type {}", node.type);
break;
}
@ -200,7 +197,7 @@ std::string EmitGLSL(const Profile& profile, const RuntimeInfo& runtime_info, IR
EmitContext ctx{program, bindings, profile, runtime_info};
Precolor(program);
EmitCode(ctx, program);
const std::string version{fmt::format("#version 460{}\n", GlslVersionSpecifier(ctx))};
const std::string version{fmt::format("#version 450{}\n", GlslVersionSpecifier(ctx))};
ctx.header.insert(0, version);
if (program.local_memory_size > 0) {
ctx.header += fmt::format("uint lmem[{}];", program.local_memory_size / 4);
@ -225,10 +222,8 @@ std::string EmitGLSL(const Profile& profile, const RuntimeInfo& runtime_info, IR
if (program.info.uses_subgroup_shuffles) {
ctx.header += "bool shfl_in_bounds;";
}
ctx.header += "\n";
ctx.code.insert(0, ctx.header);
ctx.code += "}";
// fmt::print("\n{}\n", ctx.code);
ctx.code += '}';
return ctx.code;
}

View file

@ -11,7 +11,7 @@
namespace Shader::Backend::GLSL {
namespace {
static constexpr std::string_view cas_loop{R"(for (;;){{
constexpr const char cas_loop[]{R"(for (;;){{
uint old_value={};
{}=atomicCompSwap({},old_value,{}({},{}));
if ({}==old_value){{break;}}
@ -21,7 +21,7 @@ void SharedCasFunction(EmitContext& ctx, IR::Inst& inst, std::string_view offset
std::string_view value, std::string_view function) {
const auto ret{ctx.var_alloc.Define(inst, GlslVarType::U32)};
const std::string smem{fmt::format("smem[{}>>2]", offset)};
ctx.Add(cas_loop.data(), smem, ret, smem, function, smem, value, ret);
ctx.Add(cas_loop, smem, ret, smem, function, smem, value, ret);
}
void SsboCasFunction(EmitContext& ctx, IR::Inst& inst, const IR::Value& binding,
@ -29,7 +29,7 @@ void SsboCasFunction(EmitContext& ctx, IR::Inst& inst, const IR::Value& binding,
const auto ret{ctx.var_alloc.Define(inst, GlslVarType::U32)};
const std::string ssbo{fmt::format("{}_ssbo{}[{}>>2]", ctx.stage_name, binding.U32(),
ctx.var_alloc.Consume(offset))};
ctx.Add(cas_loop.data(), ssbo, ret, ssbo, function, ssbo, value, ret);
ctx.Add(cas_loop, ssbo, ret, ssbo, function, ssbo, value, ret);
}
void SsboCasFunctionF32(EmitContext& ctx, IR::Inst& inst, const IR::Value& binding,
@ -38,10 +38,10 @@ void SsboCasFunctionF32(EmitContext& ctx, IR::Inst& inst, const IR::Value& bindi
const std::string ssbo{fmt::format("{}_ssbo{}[{}>>2]", ctx.stage_name, binding.U32(),
ctx.var_alloc.Consume(offset))};
const auto ret{ctx.var_alloc.Define(inst, GlslVarType::U32)};
ctx.Add(cas_loop.data(), ssbo, ret, ssbo, function, ssbo, value, ret);
ctx.Add(cas_loop, ssbo, ret, ssbo, function, ssbo, value, ret);
ctx.AddF32("{}=utof({});", inst, ret);
}
} // namespace
} // Anonymous namespace
void EmitSharedAtomicIAdd32(EmitContext& ctx, IR::Inst& inst, std::string_view pointer_offset,
std::string_view value) {

View file

@ -10,7 +10,7 @@
namespace Shader::Backend::GLSL {
namespace {
static void Alias(IR::Inst& inst, const IR::Value& value) {
void Alias(IR::Inst& inst, const IR::Value& value) {
if (value.IsImmediate()) {
return;
}

View file

@ -10,13 +10,14 @@
namespace Shader::Backend::GLSL {
namespace {
static constexpr std::string_view SWIZZLE{"xyzw"};
constexpr std::string_view SWIZZLE{"xyzw"};
void CompositeInsert(EmitContext& ctx, std::string_view result, std::string_view composite,
std::string_view object, u32 index) {
ctx.Add("{}={};", result, composite);
ctx.Add("{}.{}={};", result, SWIZZLE[index], object);
}
} // namespace
} // Anonymous namespace
void EmitCompositeConstructU32x2(EmitContext& ctx, IR::Inst& inst, std::string_view e1,
std::string_view e2) {
ctx.AddU32x2("{}=uvec2({},{});", inst, e1, e2);

View file

@ -7,6 +7,7 @@
#include "shader_recompiler/backend/glsl/emit_context.h"
#include "shader_recompiler/backend/glsl/emit_glsl_instructions.h"
#include "shader_recompiler/frontend/ir/value.h"
#include "shader_recompiler/profile.h"
namespace Shader::Backend::GLSL {
namespace {
@ -39,11 +40,10 @@ std::string OutputVertexIndex(EmitContext& ctx, std::string_view vertex) {
return "";
}
}
} // namespace
} // Anonymous namespace
void EmitGetCbufU8([[maybe_unused]] EmitContext& ctx, [[maybe_unused]] IR::Inst& inst,
[[maybe_unused]] const IR::Value& binding,
[[maybe_unused]] const IR::Value& offset) {
void EmitGetCbufU8(EmitContext& ctx, IR::Inst& inst, const IR::Value& binding,
const IR::Value& offset) {
if (offset.IsImmediate()) {
ctx.AddU32("{}=bitfieldExtract(ftou({}_cbuf{}[{}].{}),int({}),8);", inst, ctx.stage_name,
binding.U32(), offset.U32() / 16, OffsetSwizzle(offset.U32()),
@ -55,9 +55,8 @@ void EmitGetCbufU8([[maybe_unused]] EmitContext& ctx, [[maybe_unused]] IR::Inst&
}
}
void EmitGetCbufS8([[maybe_unused]] EmitContext& ctx, [[maybe_unused]] IR::Inst& inst,
[[maybe_unused]] const IR::Value& binding,
[[maybe_unused]] const IR::Value& offset) {
void EmitGetCbufS8(EmitContext& ctx, IR::Inst& inst, const IR::Value& binding,
const IR::Value& offset) {
if (offset.IsImmediate()) {
ctx.AddU32("{}=bitfieldExtract(ftoi({}_cbuf{}[{}].{}),int({}),8);", inst, ctx.stage_name,
binding.U32(), offset.U32() / 16, OffsetSwizzle(offset.U32()),
@ -69,9 +68,8 @@ void EmitGetCbufS8([[maybe_unused]] EmitContext& ctx, [[maybe_unused]] IR::Inst&
}
}
void EmitGetCbufU16([[maybe_unused]] EmitContext& ctx, [[maybe_unused]] IR::Inst& inst,
[[maybe_unused]] const IR::Value& binding,
[[maybe_unused]] const IR::Value& offset) {
void EmitGetCbufU16(EmitContext& ctx, IR::Inst& inst, const IR::Value& binding,
const IR::Value& offset) {
if (offset.IsImmediate()) {
ctx.AddU32("{}=bitfieldExtract(ftou({}_cbuf{}[{}].{}),int({}),16);", inst, ctx.stage_name,
binding.U32(), offset.U32() / 16, OffsetSwizzle(offset.U32()),
@ -84,9 +82,8 @@ void EmitGetCbufU16([[maybe_unused]] EmitContext& ctx, [[maybe_unused]] IR::Inst
}
}
void EmitGetCbufS16([[maybe_unused]] EmitContext& ctx, [[maybe_unused]] IR::Inst& inst,
[[maybe_unused]] const IR::Value& binding,
[[maybe_unused]] const IR::Value& offset) {
void EmitGetCbufS16(EmitContext& ctx, IR::Inst& inst, const IR::Value& binding,
const IR::Value& offset) {
if (offset.IsImmediate()) {
ctx.AddU32("{}=bitfieldExtract(ftoi({}_cbuf{}[{}].{}),int({}),16);", inst, ctx.stage_name,
binding.U32(), offset.U32() / 16, OffsetSwizzle(offset.U32()),
@ -196,7 +193,7 @@ void EmitGetAttribute(EmitContext& ctx, IR::Inst& inst, IR::Attribute attr,
}
void EmitSetAttribute(EmitContext& ctx, IR::Attribute attr, std::string_view value,
[[maybe_unused]] std::string_view vertex) {
std::string_view vertex) {
if (IR::IsGeneric(attr)) {
const u32 index{IR::GenericAttributeIndex(attr)};
const u32 element{IR::GenericAttributeElement(attr)};
@ -223,7 +220,7 @@ void EmitSetAttribute(EmitContext& ctx, IR::Attribute attr, std::string_view val
ctx.Add("gl_Position.{}={};", swizzle, value);
break;
case IR::Attribute::ViewportIndex:
if (ctx.stage != Stage::Geometry && !ctx.supports_viewport_layer) {
if (ctx.stage != Stage::Geometry && !ctx.profile.support_gl_vertex_viewport_layer) {
// LOG_WARNING(..., "Shader stores viewport index but device does not support viewport
// layer extension");
break;
@ -247,8 +244,7 @@ void EmitSetAttribute(EmitContext& ctx, IR::Attribute attr, std::string_view val
}
}
void EmitGetPatch([[maybe_unused]] EmitContext& ctx, IR::Inst& inst,
[[maybe_unused]] IR::Patch patch) {
void EmitGetPatch(EmitContext& ctx, IR::Inst& inst, IR::Patch patch) {
if (!IR::IsGeneric(patch)) {
throw NotImplementedException("Non-generic patch load");
}

View file

@ -25,7 +25,7 @@ void Compare(EmitContext& ctx, IR::Inst& inst, std::string_view lhs, std::string
bool Precise(IR::Inst& inst) {
return {inst.Flags<IR::FpControl>().no_contraction};
}
} // namespace
} // Anonymous namespace
void EmitFPAbs16([[maybe_unused]] EmitContext& ctx, [[maybe_unused]] IR::Inst& inst,
[[maybe_unused]] std::string_view value) {

View file

@ -102,7 +102,7 @@ IR::Inst* PrepareSparse(IR::Inst& inst) {
}
return sparse_inst;
}
} // namespace
} // Anonymous namespace
void EmitImageSampleImplicitLod([[maybe_unused]] EmitContext& ctx, [[maybe_unused]] IR::Inst& inst,
[[maybe_unused]] const IR::Value& index,

View file

@ -25,9 +25,7 @@ void EmitPhi(EmitContext& ctx, IR::Inst& phi) {
}
}
void EmitVoid(EmitContext& ctx) {
// NotImplemented();
}
void EmitVoid(EmitContext& ctx) {}
void EmitReference(EmitContext& ctx, const IR::Value& value) {
ctx.var_alloc.Consume(value);
@ -94,13 +92,9 @@ void EmitDeviceMemoryBarrier(EmitContext& ctx) {
NotImplemented();
}
void EmitPrologue(EmitContext& ctx) {
// NotImplemented();
}
void EmitPrologue(EmitContext& ctx) {}
void EmitEpilogue(EmitContext& ctx) {
// NotImplemented();
}
void EmitEpilogue(EmitContext& ctx) {}
void EmitEmitVertex(EmitContext& ctx, const IR::Value& stream) {
ctx.Add("EmitStreamVertex(int({}));", ctx.var_alloc.Consume(stream));