mirror of
https://github.com/yuzu-mirror/yuzu.git
synced 2024-11-16 18:39:57 +00:00
shader: Rework varyings and implement passthrough geometry shaders
Put all varyings into a single std::bitset with helpers to access it. Implement passthrough geometry shaders using host's.
This commit is contained in:
parent
4f052a1f39
commit
7dafa96ab5
29 changed files with 351 additions and 337 deletions
|
@ -229,6 +229,7 @@ add_library(shader_recompiler STATIC
|
|||
program_header.h
|
||||
runtime_info.h
|
||||
shader_info.h
|
||||
varying_state.h
|
||||
)
|
||||
|
||||
target_link_libraries(shader_recompiler PUBLIC common fmt::fmt sirit)
|
||||
|
|
|
@ -83,14 +83,13 @@ EmitContext::EmitContext(IR::Program& program, Bindings& bindings, const Profile
|
|||
break;
|
||||
}
|
||||
const std::string_view attr_stage{stage == Stage::Fragment ? "fragment" : "vertex"};
|
||||
for (size_t index = 0; index < info.input_generics.size(); ++index) {
|
||||
const auto& generic{info.input_generics[index]};
|
||||
if (generic.used) {
|
||||
for (size_t index = 0; index < IR::NUM_GENERICS; ++index) {
|
||||
if (info.loads.Generic(index)) {
|
||||
Add("{}ATTRIB in_attr{}[]={{{}.attrib[{}..{}]}};",
|
||||
InterpDecorator(generic.interpolation), index, attr_stage, index, index);
|
||||
InterpDecorator(info.interpolation[index]), index, attr_stage, index, index);
|
||||
}
|
||||
}
|
||||
if (IsInputArray(stage) && info.loads_position) {
|
||||
if (IsInputArray(stage) && info.loads.AnyComponent(IR::Attribute::PositionX)) {
|
||||
Add("ATTRIB vertex_position=vertex.position;");
|
||||
}
|
||||
if (info.uses_invocation_id) {
|
||||
|
@ -102,7 +101,7 @@ EmitContext::EmitContext(IR::Program& program, Bindings& bindings, const Profile
|
|||
if (info.stores_tess_level_inner) {
|
||||
Add("OUTPUT result_patch_tessinner[]={{result.patch.tessinner[0..1]}};");
|
||||
}
|
||||
if (info.stores_clip_distance) {
|
||||
if (info.stores.ClipDistances()) {
|
||||
Add("OUTPUT result_clip[]={{result.clip[0..7]}};");
|
||||
}
|
||||
for (size_t index = 0; index < info.uses_patches.size(); ++index) {
|
||||
|
@ -124,8 +123,8 @@ EmitContext::EmitContext(IR::Program& program, Bindings& bindings, const Profile
|
|||
Add("OUTPUT frag_color{}=result.color[{}];", index, index);
|
||||
}
|
||||
}
|
||||
for (size_t index = 0; index < info.stores_generics.size(); ++index) {
|
||||
if (info.stores_generics[index]) {
|
||||
for (size_t index = 0; index < IR::NUM_GENERICS; ++index) {
|
||||
if (info.stores.Generic(index)) {
|
||||
Add("OUTPUT out_attr{}[]={{result.attrib[{}..{}]}};", index, index, index);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -296,8 +296,10 @@ void SetupOptions(const IR::Program& program, const Profile& profile,
|
|||
if (info.uses_sparse_residency) {
|
||||
header += "OPTION EXT_sparse_texture2;";
|
||||
}
|
||||
if (((info.stores_viewport_index || info.stores_layer) && stage != Stage::Geometry) ||
|
||||
info.stores_viewport_mask) {
|
||||
const bool stores_viewport_layer{info.stores[IR::Attribute::ViewportIndex] ||
|
||||
info.stores[IR::Attribute::Layer]};
|
||||
if ((stage != Stage::Geometry && stores_viewport_layer) ||
|
||||
info.stores[IR::Attribute::ViewportMask]) {
|
||||
if (profile.support_viewport_index_layer_non_geometry) {
|
||||
header += "OPTION NV_viewport_array2;";
|
||||
}
|
||||
|
|
|
@ -261,7 +261,7 @@ void EmitGetAttributeIndexed(EmitContext& ctx, IR::Inst& inst, ScalarS32 offset,
|
|||
fmt::format("{}.z", value), fmt::format("{}.w", value)};
|
||||
read(compare_index, values);
|
||||
}};
|
||||
if (ctx.info.loads_position) {
|
||||
if (ctx.info.loads.AnyComponent(IR::Attribute::PositionX)) {
|
||||
const u32 index{static_cast<u32>(IR::Attribute::PositionX)};
|
||||
if (IsInputArray(ctx.stage)) {
|
||||
read_swizzled(index, fmt::format("vertex_position{}", VertexIndex(ctx, vertex)));
|
||||
|
@ -269,8 +269,8 @@ void EmitGetAttributeIndexed(EmitContext& ctx, IR::Inst& inst, ScalarS32 offset,
|
|||
read_swizzled(index, fmt::format("{}.position", ctx.attrib_name));
|
||||
}
|
||||
}
|
||||
for (u32 index = 0; index < ctx.info.input_generics.size(); ++index) {
|
||||
if (!ctx.info.input_generics[index].used) {
|
||||
for (u32 index = 0; index < static_cast<u32>(IR::NUM_GENERICS); ++index) {
|
||||
if (!ctx.info.loads.Generic(index)) {
|
||||
continue;
|
||||
}
|
||||
read_swizzled(index, fmt::format("in_attr{}{}[0]", index, VertexIndex(ctx, vertex)));
|
||||
|
|
|
@ -212,22 +212,22 @@ std::string_view OutputPrimitive(OutputTopology topology) {
|
|||
}
|
||||
|
||||
void SetupLegacyOutPerVertex(EmitContext& ctx, std::string& header) {
|
||||
if (!ctx.info.stores_legacy_varyings) {
|
||||
if (!ctx.info.stores.Legacy()) {
|
||||
return;
|
||||
}
|
||||
if (ctx.info.stores_fixed_fnc_textures) {
|
||||
if (ctx.info.stores.FixedFunctionTexture()) {
|
||||
header += "vec4 gl_TexCoord[8];";
|
||||
}
|
||||
if (ctx.info.stores_color_front_diffuse) {
|
||||
if (ctx.info.stores.AnyComponent(IR::Attribute::ColorFrontDiffuseR)) {
|
||||
header += "vec4 gl_FrontColor;";
|
||||
}
|
||||
if (ctx.info.stores_color_front_specular) {
|
||||
if (ctx.info.stores.AnyComponent(IR::Attribute::ColorFrontSpecularR)) {
|
||||
header += "vec4 gl_FrontSecondaryColor;";
|
||||
}
|
||||
if (ctx.info.stores_color_back_diffuse) {
|
||||
if (ctx.info.stores.AnyComponent(IR::Attribute::ColorBackDiffuseR)) {
|
||||
header += "vec4 gl_BackColor;";
|
||||
}
|
||||
if (ctx.info.stores_color_back_specular) {
|
||||
if (ctx.info.stores.AnyComponent(IR::Attribute::ColorBackSpecularR)) {
|
||||
header += "vec4 gl_BackSecondaryColor;";
|
||||
}
|
||||
}
|
||||
|
@ -237,32 +237,32 @@ void SetupOutPerVertex(EmitContext& ctx, std::string& header) {
|
|||
return;
|
||||
}
|
||||
header += "out gl_PerVertex{vec4 gl_Position;";
|
||||
if (ctx.info.stores_point_size) {
|
||||
if (ctx.info.stores[IR::Attribute::PointSize]) {
|
||||
header += "float gl_PointSize;";
|
||||
}
|
||||
if (ctx.info.stores_clip_distance) {
|
||||
if (ctx.info.stores.ClipDistances()) {
|
||||
header += "float gl_ClipDistance[];";
|
||||
}
|
||||
if (ctx.info.stores_viewport_index && ctx.profile.support_viewport_index_layer_non_geometry &&
|
||||
ctx.stage != Stage::Geometry) {
|
||||
if (ctx.info.stores[IR::Attribute::ViewportIndex] &&
|
||||
ctx.profile.support_viewport_index_layer_non_geometry && ctx.stage != Stage::Geometry) {
|
||||
header += "int gl_ViewportIndex;";
|
||||
}
|
||||
SetupLegacyOutPerVertex(ctx, header);
|
||||
header += "};";
|
||||
if (ctx.info.stores_viewport_index && ctx.stage == Stage::Geometry) {
|
||||
if (ctx.info.stores[IR::Attribute::ViewportIndex] && ctx.stage == Stage::Geometry) {
|
||||
header += "out int gl_ViewportIndex;";
|
||||
}
|
||||
}
|
||||
|
||||
void SetupLegacyInPerFragment(EmitContext& ctx, std::string& header) {
|
||||
if (!ctx.info.loads_legacy_varyings) {
|
||||
if (!ctx.info.loads.Legacy()) {
|
||||
return;
|
||||
}
|
||||
header += "in gl_PerFragment{";
|
||||
if (ctx.info.loads_fixed_fnc_textures) {
|
||||
if (ctx.info.loads.FixedFunctionTexture()) {
|
||||
header += "vec4 gl_TexCoord[8];";
|
||||
}
|
||||
if (ctx.info.loads_color_front_diffuse) {
|
||||
if (ctx.info.loads.AnyComponent(IR::Attribute::ColorFrontDiffuseR)) {
|
||||
header += "vec4 gl_Color;";
|
||||
}
|
||||
header += "};";
|
||||
|
@ -325,14 +325,13 @@ EmitContext::EmitContext(IR::Program& program, Bindings& bindings, const Profile
|
|||
SetupOutPerVertex(*this, header);
|
||||
SetupLegacyInPerFragment(*this, header);
|
||||
|
||||
for (size_t index = 0; index < info.input_generics.size(); ++index) {
|
||||
const auto& generic{info.input_generics[index]};
|
||||
if (!generic.used || !runtime_info.previous_stage_stores_generic[index]) {
|
||||
for (size_t index = 0; index < IR::NUM_GENERICS; ++index) {
|
||||
if (!info.loads.Generic(index) || !runtime_info.previous_stage_stores.Generic(index)) {
|
||||
continue;
|
||||
}
|
||||
header +=
|
||||
fmt::format("layout(location={}){}in vec4 in_attr{}{};", index,
|
||||
InterpDecorator(generic.interpolation), index, InputArrayDecorator(stage));
|
||||
header += fmt::format("layout(location={}){}in vec4 in_attr{}{};", index,
|
||||
InterpDecorator(info.interpolation[index]), index,
|
||||
InputArrayDecorator(stage));
|
||||
}
|
||||
for (size_t index = 0; index < info.uses_patches.size(); ++index) {
|
||||
if (!info.uses_patches[index]) {
|
||||
|
@ -349,11 +348,10 @@ EmitContext::EmitContext(IR::Program& program, Bindings& bindings, const Profile
|
|||
header += fmt::format("layout(location={})out vec4 frag_color{};", index, index);
|
||||
}
|
||||
}
|
||||
for (size_t index = 0; index < info.stores_generics.size(); ++index) {
|
||||
if (!info.stores_generics[index]) {
|
||||
continue;
|
||||
for (size_t index = 0; index < IR::NUM_GENERICS; ++index) {
|
||||
if (info.stores.Generic(index)) {
|
||||
DefineGenericOutput(index, program.invocations);
|
||||
}
|
||||
DefineGenericOutput(index, program.invocations);
|
||||
}
|
||||
DefineConstantBuffers(bindings);
|
||||
DefineStorageBuffers(bindings);
|
||||
|
@ -398,14 +396,14 @@ void EmitContext::SetupExtensions() {
|
|||
header += "#extension GL_NV_shader_thread_shuffle : enable\n";
|
||||
}
|
||||
}
|
||||
if ((info.stores_viewport_index || info.stores_layer) &&
|
||||
if ((info.stores[IR::Attribute::ViewportIndex] || info.stores[IR::Attribute::Layer]) &&
|
||||
profile.support_viewport_index_layer_non_geometry && stage != Stage::Geometry) {
|
||||
header += "#extension GL_ARB_shader_viewport_layer_array : enable\n";
|
||||
}
|
||||
if (info.uses_sparse_residency && profile.support_gl_sparse_textures) {
|
||||
header += "#extension GL_ARB_sparse_texture2 : enable\n";
|
||||
}
|
||||
if (info.stores_viewport_mask && profile.support_viewport_mask) {
|
||||
if (info.stores[IR::Attribute::ViewportMask] && profile.support_viewport_mask) {
|
||||
header += "#extension GL_NV_viewport_array2 : enable\n";
|
||||
}
|
||||
if (info.uses_typeless_image_reads) {
|
||||
|
@ -535,20 +533,20 @@ void EmitContext::DefineHelperFunctions() {
|
|||
fmt::format("float IndexedAttrLoad(int offset{}){{int base_index=offset>>2;uint "
|
||||
"masked_index=uint(base_index)&3u;switch(base_index>>2){{",
|
||||
vertex_arg)};
|
||||
if (info.loads_position) {
|
||||
if (info.loads.AnyComponent(IR::Attribute::PositionX)) {
|
||||
const auto position_idx{is_array ? "gl_in[vertex]." : ""};
|
||||
func += fmt::format("case {}:return {}{}[masked_index];",
|
||||
static_cast<u32>(IR::Attribute::PositionX) >> 2, position_idx,
|
||||
position_name);
|
||||
}
|
||||
const u32 base_attribute_value = static_cast<u32>(IR::Attribute::Generic0X) >> 2;
|
||||
for (u32 i = 0; i < info.input_generics.size(); ++i) {
|
||||
if (!info.input_generics[i].used) {
|
||||
for (u32 index = 0; index < IR::NUM_GENERICS; ++index) {
|
||||
if (!info.loads.Generic(index)) {
|
||||
continue;
|
||||
}
|
||||
const auto vertex_idx{is_array ? "[vertex]" : ""};
|
||||
func += fmt::format("case {}:return in_attr{}{}[masked_index];",
|
||||
base_attribute_value + i, i, vertex_idx);
|
||||
base_attribute_value + index, index, vertex_idx);
|
||||
}
|
||||
func += "default: return 0.0;}}";
|
||||
header += func;
|
||||
|
|
|
@ -171,7 +171,7 @@ void EmitCode(EmitContext& ctx, const IR::Program& program) {
|
|||
}
|
||||
|
||||
std::string GlslVersionSpecifier(const EmitContext& ctx) {
|
||||
if (ctx.uses_y_direction || ctx.info.stores_legacy_varyings || ctx.info.loads_legacy_varyings) {
|
||||
if (ctx.uses_y_direction || ctx.info.stores.Legacy() || ctx.info.loads.Legacy()) {
|
||||
return " compatibility";
|
||||
}
|
||||
return "";
|
||||
|
|
|
@ -179,7 +179,7 @@ void EmitGetAttribute(EmitContext& ctx, IR::Inst& inst, IR::Attribute attr,
|
|||
const char swizzle{"xyzw"[element]};
|
||||
if (IR::IsGeneric(attr)) {
|
||||
const u32 index{IR::GenericAttributeIndex(attr)};
|
||||
if (!ctx.runtime_info.previous_stage_stores_generic[index]) {
|
||||
if (!ctx.runtime_info.previous_stage_stores.Generic(index)) {
|
||||
ctx.AddF32("{}=0.f;", inst, attr);
|
||||
return;
|
||||
}
|
||||
|
|
|
@ -20,8 +20,8 @@ void InitializeOutputVaryings(EmitContext& ctx) {
|
|||
if (ctx.stage == Stage::VertexB || ctx.stage == Stage::Geometry) {
|
||||
ctx.Add("gl_Position=vec4(0,0,0,1);");
|
||||
}
|
||||
for (size_t index = 0; index < ctx.info.stores_generics.size(); ++index) {
|
||||
if (!ctx.info.stores_generics[index]) {
|
||||
for (size_t index = 0; index < IR::NUM_GENERICS; ++index) {
|
||||
if (!ctx.info.stores.Generic(index)) {
|
||||
continue;
|
||||
}
|
||||
const auto& info_array{ctx.output_generics.at(index)};
|
||||
|
|
|
@ -557,7 +557,7 @@ void EmitContext::DefineCommonConstants() {
|
|||
}
|
||||
|
||||
void EmitContext::DefineInterfaces(const IR::Program& program) {
|
||||
DefineInputs(program.info);
|
||||
DefineInputs(program);
|
||||
DefineOutputs(program);
|
||||
}
|
||||
|
||||
|
@ -693,16 +693,16 @@ void EmitContext::DefineAttributeMemAccess(const Info& info) {
|
|||
const Id compare_index{OpShiftRightArithmetic(U32[1], base_index, Const(2U))};
|
||||
std::vector<Sirit::Literal> literals;
|
||||
std::vector<Id> labels;
|
||||
if (info.loads_position) {
|
||||
if (info.loads.AnyComponent(IR::Attribute::PositionX)) {
|
||||
literals.push_back(static_cast<u32>(IR::Attribute::PositionX) >> 2);
|
||||
labels.push_back(OpLabel());
|
||||
}
|
||||
const u32 base_attribute_value = static_cast<u32>(IR::Attribute::Generic0X) >> 2;
|
||||
for (u32 i = 0; i < info.input_generics.size(); ++i) {
|
||||
if (!info.input_generics[i].used) {
|
||||
for (u32 index = 0; index < static_cast<u32>(IR::NUM_GENERICS); ++index) {
|
||||
if (!info.loads.Generic(index)) {
|
||||
continue;
|
||||
}
|
||||
literals.push_back(base_attribute_value + i);
|
||||
literals.push_back(base_attribute_value + index);
|
||||
labels.push_back(OpLabel());
|
||||
}
|
||||
OpSelectionMerge(end_block, spv::SelectionControlMask::MaskNone);
|
||||
|
@ -710,7 +710,7 @@ void EmitContext::DefineAttributeMemAccess(const Info& info) {
|
|||
AddLabel(default_label);
|
||||
OpReturnValue(Const(0.0f));
|
||||
size_t label_index{0};
|
||||
if (info.loads_position) {
|
||||
if (info.loads.AnyComponent(IR::Attribute::PositionX)) {
|
||||
AddLabel(labels[label_index]);
|
||||
const Id pointer{is_array
|
||||
? OpAccessChain(input_f32, input_position, vertex, masked_index)
|
||||
|
@ -719,18 +719,18 @@ void EmitContext::DefineAttributeMemAccess(const Info& info) {
|
|||
OpReturnValue(result);
|
||||
++label_index;
|
||||
}
|
||||
for (size_t i = 0; i < info.input_generics.size(); i++) {
|
||||
if (!info.input_generics[i].used) {
|
||||
for (size_t index = 0; index < IR::NUM_GENERICS; ++index) {
|
||||
if (!info.loads.Generic(index)) {
|
||||
continue;
|
||||
}
|
||||
AddLabel(labels[label_index]);
|
||||
const auto type{AttrTypes(*this, static_cast<u32>(i))};
|
||||
const auto type{AttrTypes(*this, static_cast<u32>(index))};
|
||||
if (!type) {
|
||||
OpReturnValue(Const(0.0f));
|
||||
++label_index;
|
||||
continue;
|
||||
}
|
||||
const Id generic_id{input_generics.at(i)};
|
||||
const Id generic_id{input_generics.at(index)};
|
||||
const Id pointer{is_array
|
||||
? OpAccessChain(type->pointer, generic_id, vertex, masked_index)
|
||||
: OpAccessChain(type->pointer, generic_id, masked_index)};
|
||||
|
@ -758,19 +758,19 @@ void EmitContext::DefineAttributeMemAccess(const Info& info) {
|
|||
const Id compare_index{OpShiftRightArithmetic(U32[1], base_index, Const(2U))};
|
||||
std::vector<Sirit::Literal> literals;
|
||||
std::vector<Id> labels;
|
||||
if (info.stores_position) {
|
||||
if (info.stores.AnyComponent(IR::Attribute::PositionX)) {
|
||||
literals.push_back(static_cast<u32>(IR::Attribute::PositionX) >> 2);
|
||||
labels.push_back(OpLabel());
|
||||
}
|
||||
const u32 base_attribute_value = static_cast<u32>(IR::Attribute::Generic0X) >> 2;
|
||||
for (size_t i = 0; i < info.stores_generics.size(); i++) {
|
||||
if (!info.stores_generics[i]) {
|
||||
for (size_t index = 0; index < IR::NUM_GENERICS; ++index) {
|
||||
if (!info.stores.Generic(index)) {
|
||||
continue;
|
||||
}
|
||||
literals.push_back(base_attribute_value + static_cast<u32>(i));
|
||||
literals.push_back(base_attribute_value + static_cast<u32>(index));
|
||||
labels.push_back(OpLabel());
|
||||
}
|
||||
if (info.stores_clip_distance) {
|
||||
if (info.stores.ClipDistances()) {
|
||||
literals.push_back(static_cast<u32>(IR::Attribute::ClipDistance0) >> 2);
|
||||
labels.push_back(OpLabel());
|
||||
literals.push_back(static_cast<u32>(IR::Attribute::ClipDistance4) >> 2);
|
||||
|
@ -781,28 +781,28 @@ void EmitContext::DefineAttributeMemAccess(const Info& info) {
|
|||
AddLabel(default_label);
|
||||
OpReturn();
|
||||
size_t label_index{0};
|
||||
if (info.stores_position) {
|
||||
if (info.stores.AnyComponent(IR::Attribute::PositionX)) {
|
||||
AddLabel(labels[label_index]);
|
||||
const Id pointer{OpAccessChain(output_f32, output_position, masked_index)};
|
||||
OpStore(pointer, store_value);
|
||||
OpReturn();
|
||||
++label_index;
|
||||
}
|
||||
for (size_t i = 0; i < info.stores_generics.size(); ++i) {
|
||||
if (!info.stores_generics[i]) {
|
||||
for (size_t index = 0; index < IR::NUM_GENERICS; ++index) {
|
||||
if (!info.stores.Generic(index)) {
|
||||
continue;
|
||||
}
|
||||
if (output_generics[i][0].num_components != 4) {
|
||||
if (output_generics[index][0].num_components != 4) {
|
||||
throw NotImplementedException("Physical stores and transform feedbacks");
|
||||
}
|
||||
AddLabel(labels[label_index]);
|
||||
const Id generic_id{output_generics[i][0].id};
|
||||
const Id generic_id{output_generics[index][0].id};
|
||||
const Id pointer{OpAccessChain(output_f32, generic_id, masked_index)};
|
||||
OpStore(pointer, store_value);
|
||||
OpReturn();
|
||||
++label_index;
|
||||
}
|
||||
if (info.stores_clip_distance) {
|
||||
if (info.stores.ClipDistances()) {
|
||||
AddLabel(labels[label_index]);
|
||||
const Id pointer{OpAccessChain(output_f32, clip_distances, masked_index)};
|
||||
OpStore(pointer, store_value);
|
||||
|
@ -1146,7 +1146,10 @@ void EmitContext::DefineImages(const Info& info, u32& binding) {
|
|||
}
|
||||
}
|
||||
|
||||
void EmitContext::DefineInputs(const Info& info) {
|
||||
void EmitContext::DefineInputs(const IR::Program& program) {
|
||||
const Info& info{program.info};
|
||||
const VaryingState loads{info.loads.mask | info.passthrough.mask};
|
||||
|
||||
if (info.uses_workgroup_id) {
|
||||
workgroup_id = DefineInput(*this, U32[3], false, spv::BuiltIn::WorkgroupId);
|
||||
}
|
||||
|
@ -1183,15 +1186,20 @@ void EmitContext::DefineInputs(const Info& info) {
|
|||
fswzadd_lut_b =
|
||||
ConstantComposite(F32[4], f32_minus_one, f32_minus_one, f32_one, f32_minus_one);
|
||||
}
|
||||
if (info.loads_primitive_id) {
|
||||
if (loads[IR::Attribute::PrimitiveId]) {
|
||||
primitive_id = DefineInput(*this, U32[1], false, spv::BuiltIn::PrimitiveId);
|
||||
}
|
||||
if (info.loads_position) {
|
||||
if (loads.AnyComponent(IR::Attribute::PositionX)) {
|
||||
const bool is_fragment{stage != Stage::Fragment};
|
||||
const spv::BuiltIn built_in{is_fragment ? spv::BuiltIn::Position : spv::BuiltIn::FragCoord};
|
||||
input_position = DefineInput(*this, F32[4], true, built_in);
|
||||
if (profile.support_geometry_shader_passthrough) {
|
||||
if (info.passthrough.AnyComponent(IR::Attribute::PositionX)) {
|
||||
Decorate(input_position, spv::Decoration::PassthroughNV);
|
||||
}
|
||||
}
|
||||
}
|
||||
if (info.loads_instance_id) {
|
||||
if (loads[IR::Attribute::InstanceId]) {
|
||||
if (profile.support_vertex_instance_id) {
|
||||
instance_id = DefineInput(*this, U32[1], true, spv::BuiltIn::InstanceId);
|
||||
} else {
|
||||
|
@ -1199,7 +1207,7 @@ void EmitContext::DefineInputs(const Info& info) {
|
|||
base_instance = DefineInput(*this, U32[1], true, spv::BuiltIn::BaseInstance);
|
||||
}
|
||||
}
|
||||
if (info.loads_vertex_id) {
|
||||
if (loads[IR::Attribute::VertexId]) {
|
||||
if (profile.support_vertex_instance_id) {
|
||||
vertex_id = DefineInput(*this, U32[1], true, spv::BuiltIn::VertexId);
|
||||
} else {
|
||||
|
@ -1207,24 +1215,24 @@ void EmitContext::DefineInputs(const Info& info) {
|
|||
base_vertex = DefineInput(*this, U32[1], true, spv::BuiltIn::BaseVertex);
|
||||
}
|
||||
}
|
||||
if (info.loads_front_face) {
|
||||
if (loads[IR::Attribute::FrontFace]) {
|
||||
front_face = DefineInput(*this, U1, true, spv::BuiltIn::FrontFacing);
|
||||
}
|
||||
if (info.loads_point_coord) {
|
||||
if (loads[IR::Attribute::PointSpriteS] || loads[IR::Attribute::PointSpriteT]) {
|
||||
point_coord = DefineInput(*this, F32[2], true, spv::BuiltIn::PointCoord);
|
||||
}
|
||||
if (info.loads_tess_coord) {
|
||||
if (loads[IR::Attribute::TessellationEvaluationPointU] ||
|
||||
loads[IR::Attribute::TessellationEvaluationPointV]) {
|
||||
tess_coord = DefineInput(*this, F32[3], false, spv::BuiltIn::TessCoord);
|
||||
}
|
||||
for (size_t index = 0; index < info.input_generics.size(); ++index) {
|
||||
if (!runtime_info.previous_stage_stores_generic[index]) {
|
||||
continue;
|
||||
}
|
||||
const InputVarying generic{info.input_generics[index]};
|
||||
if (!generic.used) {
|
||||
continue;
|
||||
}
|
||||
for (size_t index = 0; index < IR::NUM_GENERICS; ++index) {
|
||||
const AttributeType input_type{runtime_info.generic_input_types[index]};
|
||||
if (!runtime_info.previous_stage_stores.Generic(index)) {
|
||||
continue;
|
||||
}
|
||||
if (!loads.Generic(index)) {
|
||||
continue;
|
||||
}
|
||||
if (input_type == AttributeType::Disabled) {
|
||||
continue;
|
||||
}
|
||||
|
@ -1234,10 +1242,13 @@ void EmitContext::DefineInputs(const Info& info) {
|
|||
Name(id, fmt::format("in_attr{}", index));
|
||||
input_generics[index] = id;
|
||||
|
||||
if (info.passthrough.Generic(index) && profile.support_geometry_shader_passthrough) {
|
||||
Decorate(id, spv::Decoration::PassthroughNV);
|
||||
}
|
||||
if (stage != Stage::Fragment) {
|
||||
continue;
|
||||
}
|
||||
switch (generic.interpolation) {
|
||||
switch (info.interpolation[index]) {
|
||||
case Interpolation::Smooth:
|
||||
// Default
|
||||
// Decorate(id, spv::Decoration::Smooth);
|
||||
|
@ -1266,42 +1277,42 @@ void EmitContext::DefineInputs(const Info& info) {
|
|||
void EmitContext::DefineOutputs(const IR::Program& program) {
|
||||
const Info& info{program.info};
|
||||
const std::optional<u32> invocations{program.invocations};
|
||||
if (info.stores_position || stage == Stage::VertexB) {
|
||||
if (info.stores.AnyComponent(IR::Attribute::PositionX) || stage == Stage::VertexB) {
|
||||
output_position = DefineOutput(*this, F32[4], invocations, spv::BuiltIn::Position);
|
||||
}
|
||||
if (info.stores_point_size || runtime_info.fixed_state_point_size) {
|
||||
if (info.stores[IR::Attribute::PointSize] || runtime_info.fixed_state_point_size) {
|
||||
if (stage == Stage::Fragment) {
|
||||
throw NotImplementedException("Storing PointSize in fragment stage");
|
||||
}
|
||||
output_point_size = DefineOutput(*this, F32[1], invocations, spv::BuiltIn::PointSize);
|
||||
}
|
||||
if (info.stores_clip_distance) {
|
||||
if (info.stores.ClipDistances()) {
|
||||
if (stage == Stage::Fragment) {
|
||||
throw NotImplementedException("Storing ClipDistance in fragment stage");
|
||||
}
|
||||
const Id type{TypeArray(F32[1], Const(8U))};
|
||||
clip_distances = DefineOutput(*this, type, invocations, spv::BuiltIn::ClipDistance);
|
||||
}
|
||||
if (info.stores_layer &&
|
||||
if (info.stores[IR::Attribute::Layer] &&
|
||||
(profile.support_viewport_index_layer_non_geometry || stage == Stage::Geometry)) {
|
||||
if (stage == Stage::Fragment) {
|
||||
throw NotImplementedException("Storing Layer in fragment stage");
|
||||
}
|
||||
layer = DefineOutput(*this, U32[1], invocations, spv::BuiltIn::Layer);
|
||||
}
|
||||
if (info.stores_viewport_index &&
|
||||
if (info.stores[IR::Attribute::ViewportIndex] &&
|
||||
(profile.support_viewport_index_layer_non_geometry || stage == Stage::Geometry)) {
|
||||
if (stage == Stage::Fragment) {
|
||||
throw NotImplementedException("Storing ViewportIndex in fragment stage");
|
||||
}
|
||||
viewport_index = DefineOutput(*this, U32[1], invocations, spv::BuiltIn::ViewportIndex);
|
||||
}
|
||||
if (info.stores_viewport_mask && profile.support_viewport_mask) {
|
||||
if (info.stores[IR::Attribute::ViewportMask] && profile.support_viewport_mask) {
|
||||
viewport_mask = DefineOutput(*this, TypeArray(U32[1], Const(1u)), std::nullopt,
|
||||
spv::BuiltIn::ViewportMaskNV);
|
||||
}
|
||||
for (size_t index = 0; index < info.stores_generics.size(); ++index) {
|
||||
if (info.stores_generics[index]) {
|
||||
for (size_t index = 0; index < IR::NUM_GENERICS; ++index) {
|
||||
if (info.stores.Generic(index)) {
|
||||
DefineGenericOutput(*this, index, invocations);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -300,7 +300,7 @@ private:
|
|||
void DefineAttributeMemAccess(const Info& info);
|
||||
void DefineGlobalMemoryFunctions(const Info& info);
|
||||
|
||||
void DefineInputs(const Info& info);
|
||||
void DefineInputs(const IR::Program& program);
|
||||
void DefineOutputs(const IR::Program& program);
|
||||
};
|
||||
|
||||
|
|
|
@ -281,11 +281,19 @@ void DefineEntryPoint(const IR::Program& program, EmitContext& ctx, Id main) {
|
|||
ctx.AddExecutionMode(main, spv::ExecutionMode::OutputTriangleStrip);
|
||||
break;
|
||||
}
|
||||
if (program.info.stores_point_size) {
|
||||
if (program.info.stores[IR::Attribute::PointSize]) {
|
||||
ctx.AddCapability(spv::Capability::GeometryPointSize);
|
||||
}
|
||||
ctx.AddExecutionMode(main, spv::ExecutionMode::OutputVertices, program.output_vertices);
|
||||
ctx.AddExecutionMode(main, spv::ExecutionMode::Invocations, program.invocations);
|
||||
if (program.is_geometry_passthrough) {
|
||||
if (ctx.profile.support_geometry_shader_passthrough) {
|
||||
ctx.AddExtension("SPV_NV_geometry_shader_passthrough");
|
||||
ctx.AddCapability(spv::Capability::GeometryShaderPassthroughNV);
|
||||
} else {
|
||||
LOG_WARNING(Shader_SPIRV, "Geometry shader passthrough used with no support");
|
||||
}
|
||||
}
|
||||
break;
|
||||
case Stage::Fragment:
|
||||
execution_model = spv::ExecutionModel::Fragment;
|
||||
|
@ -377,20 +385,21 @@ void SetupCapabilities(const Profile& profile, const Info& info, EmitContext& ct
|
|||
ctx.AddExtension("SPV_EXT_demote_to_helper_invocation");
|
||||
ctx.AddCapability(spv::Capability::DemoteToHelperInvocationEXT);
|
||||
}
|
||||
if (info.stores_viewport_index) {
|
||||
if (info.stores[IR::Attribute::ViewportIndex]) {
|
||||
ctx.AddCapability(spv::Capability::MultiViewport);
|
||||
}
|
||||
if (info.stores_viewport_mask && profile.support_viewport_mask) {
|
||||
if (info.stores[IR::Attribute::ViewportMask] && profile.support_viewport_mask) {
|
||||
ctx.AddExtension("SPV_NV_viewport_array2");
|
||||
ctx.AddCapability(spv::Capability::ShaderViewportMaskNV);
|
||||
}
|
||||
if (info.stores_layer || info.stores_viewport_index) {
|
||||
if (info.stores[IR::Attribute::Layer] || info.stores[IR::Attribute::ViewportIndex]) {
|
||||
if (profile.support_viewport_index_layer_non_geometry && ctx.stage != Stage::Geometry) {
|
||||
ctx.AddExtension("SPV_EXT_shader_viewport_index_layer");
|
||||
ctx.AddCapability(spv::Capability::ShaderViewportIndexLayerEXT);
|
||||
}
|
||||
}
|
||||
if (!profile.support_vertex_instance_id && (info.loads_instance_id || info.loads_vertex_id)) {
|
||||
if (!profile.support_vertex_instance_id &&
|
||||
(info.loads[IR::Attribute::InstanceId] || info.loads[IR::Attribute::VertexId])) {
|
||||
ctx.AddExtension("SPV_KHR_shader_draw_parameters");
|
||||
ctx.AddCapability(spv::Capability::DrawParameters);
|
||||
}
|
||||
|
|
|
@ -298,7 +298,7 @@ Id EmitGetAttribute(EmitContext& ctx, IR::Attribute attr, Id vertex) {
|
|||
if (IR::IsGeneric(attr)) {
|
||||
const u32 index{IR::GenericAttributeIndex(attr)};
|
||||
const std::optional<AttrInfo> type{AttrTypes(ctx, index)};
|
||||
if (!type || !ctx.runtime_info.previous_stage_stores_generic[index]) {
|
||||
if (!type || !ctx.runtime_info.previous_stage_stores.Generic(index)) {
|
||||
// Attribute is disabled
|
||||
return ctx.Const(0.0f);
|
||||
}
|
||||
|
|
|
@ -31,6 +31,10 @@ public:
|
|||
return sph;
|
||||
}
|
||||
|
||||
[[nodiscard]] const std::array<u32, 8>& GpPassthroughMask() const noexcept {
|
||||
return gp_passthrough_mask;
|
||||
}
|
||||
|
||||
[[nodiscard]] Stage ShaderStage() const noexcept {
|
||||
return stage;
|
||||
}
|
||||
|
@ -41,6 +45,7 @@ public:
|
|||
|
||||
protected:
|
||||
ProgramHeader sph{};
|
||||
std::array<u32, 8> gp_passthrough_mask{};
|
||||
Stage stage{};
|
||||
u32 start_address{};
|
||||
};
|
||||
|
|
|
@ -222,6 +222,8 @@ enum class Attribute : u64 {
|
|||
FrontFace = 255,
|
||||
};
|
||||
|
||||
constexpr size_t NUM_GENERICS = 32;
|
||||
|
||||
[[nodiscard]] bool IsGeneric(Attribute attribute) noexcept;
|
||||
|
||||
[[nodiscard]] u32 GenericAttributeIndex(Attribute attribute);
|
||||
|
@ -230,6 +232,10 @@ enum class Attribute : u64 {
|
|||
|
||||
[[nodiscard]] std::string NameOf(Attribute attribute);
|
||||
|
||||
[[nodiscard]] constexpr IR::Attribute operator+(IR::Attribute attribute, size_t value) noexcept {
|
||||
return static_cast<IR::Attribute>(static_cast<size_t>(attribute) + value);
|
||||
}
|
||||
|
||||
} // namespace Shader::IR
|
||||
|
||||
template <>
|
||||
|
|
|
@ -27,6 +27,7 @@ struct Program {
|
|||
u32 invocations{};
|
||||
u32 local_memory_size{};
|
||||
u32 shared_memory_size{};
|
||||
bool is_geometry_passthrough{};
|
||||
};
|
||||
|
||||
[[nodiscard]] std::string DumpProgram(const Program& program);
|
||||
|
|
|
@ -46,7 +46,7 @@ void CollectInterpolationInfo(Environment& env, IR::Program& program) {
|
|||
return;
|
||||
}
|
||||
const ProgramHeader& sph{env.SPH()};
|
||||
for (size_t index = 0; index < program.info.input_generics.size(); ++index) {
|
||||
for (size_t index = 0; index < IR::NUM_GENERICS; ++index) {
|
||||
std::optional<PixelImap> imap;
|
||||
for (const PixelImap value : sph.ps.GenericInputMap(static_cast<u32>(index))) {
|
||||
if (value == PixelImap::Unused) {
|
||||
|
@ -60,7 +60,7 @@ void CollectInterpolationInfo(Environment& env, IR::Program& program) {
|
|||
if (!imap) {
|
||||
continue;
|
||||
}
|
||||
program.info.input_generics[index].interpolation = [&] {
|
||||
program.info.interpolation[index] = [&] {
|
||||
switch (*imap) {
|
||||
case PixelImap::Unused:
|
||||
case PixelImap::Perspective:
|
||||
|
@ -140,6 +140,11 @@ IR::Program TranslateProgram(ObjectPool<IR::Inst>& inst_pool, ObjectPool<IR::Blo
|
|||
program.output_topology = sph.common3.output_topology;
|
||||
program.output_vertices = sph.common4.max_output_vertices;
|
||||
program.invocations = sph.common2.threads_per_input_primitive;
|
||||
program.is_geometry_passthrough = sph.common0.geometry_passthrough != 0;
|
||||
if (program.is_geometry_passthrough) {
|
||||
const auto mask{env.GpPassthroughMask()};
|
||||
program.info.passthrough.mask |= ~Common::BitCast<std::bitset<256>>(mask);
|
||||
}
|
||||
break;
|
||||
}
|
||||
case Stage::Compute:
|
||||
|
@ -194,12 +199,9 @@ IR::Program MergeDualVertexPrograms(IR::Program& vertex_a, IR::Program& vertex_b
|
|||
result.stage = Stage::VertexB;
|
||||
result.info = vertex_a.info;
|
||||
result.local_memory_size = std::max(vertex_a.local_memory_size, vertex_b.local_memory_size);
|
||||
for (size_t index = 0; index < 32; ++index) {
|
||||
result.info.input_generics[index].used |= vertex_b.info.input_generics[index].used;
|
||||
if (vertex_b.info.stores_generics[index]) {
|
||||
result.info.stores_generics[index] = true;
|
||||
}
|
||||
}
|
||||
result.info.loads.mask |= vertex_b.info.loads.mask;
|
||||
result.info.stores.mask |= vertex_b.info.stores.mask;
|
||||
|
||||
Optimization::JoinTextureInfo(result.info, vertex_b.info);
|
||||
Optimization::JoinStorageInfo(result.info, vertex_b.info);
|
||||
Optimization::DeadCodeEliminationPass(result);
|
||||
|
|
|
@ -29,130 +29,6 @@ void AddConstantBufferDescriptor(Info& info, u32 index, u32 count) {
|
|||
});
|
||||
}
|
||||
|
||||
void GetAttribute(Info& info, IR::Attribute attr) {
|
||||
if (IR::IsGeneric(attr)) {
|
||||
info.input_generics.at(IR::GenericAttributeIndex(attr)).used = true;
|
||||
return;
|
||||
}
|
||||
if (attr >= IR::Attribute::FixedFncTexture0S && attr <= IR::Attribute::FixedFncTexture9Q) {
|
||||
info.loads_fixed_fnc_textures = true;
|
||||
info.loads_legacy_varyings = true;
|
||||
return;
|
||||
}
|
||||
switch (attr) {
|
||||
case IR::Attribute::PrimitiveId:
|
||||
info.loads_primitive_id = true;
|
||||
break;
|
||||
case IR::Attribute::PositionX:
|
||||
case IR::Attribute::PositionY:
|
||||
case IR::Attribute::PositionZ:
|
||||
case IR::Attribute::PositionW:
|
||||
info.loads_position = true;
|
||||
break;
|
||||
case IR::Attribute::ColorFrontDiffuseR:
|
||||
case IR::Attribute::ColorFrontDiffuseG:
|
||||
case IR::Attribute::ColorFrontDiffuseB:
|
||||
case IR::Attribute::ColorFrontDiffuseA:
|
||||
info.loads_color_front_diffuse = true;
|
||||
info.loads_legacy_varyings = true;
|
||||
break;
|
||||
case IR::Attribute::PointSpriteS:
|
||||
case IR::Attribute::PointSpriteT:
|
||||
info.loads_point_coord = true;
|
||||
break;
|
||||
case IR::Attribute::TessellationEvaluationPointU:
|
||||
case IR::Attribute::TessellationEvaluationPointV:
|
||||
info.loads_tess_coord = true;
|
||||
break;
|
||||
case IR::Attribute::InstanceId:
|
||||
info.loads_instance_id = true;
|
||||
break;
|
||||
case IR::Attribute::VertexId:
|
||||
info.loads_vertex_id = true;
|
||||
break;
|
||||
case IR::Attribute::FrontFace:
|
||||
info.loads_front_face = true;
|
||||
break;
|
||||
default:
|
||||
throw NotImplementedException("Get attribute {}", attr);
|
||||
}
|
||||
}
|
||||
|
||||
void SetAttribute(Info& info, IR::Attribute attr) {
|
||||
if (IR::IsGeneric(attr)) {
|
||||
info.stores_generics[IR::GenericAttributeIndex(attr)] = true;
|
||||
return;
|
||||
}
|
||||
if (attr >= IR::Attribute::FixedFncTexture0S && attr <= IR::Attribute::FixedFncTexture9Q) {
|
||||
info.stores_fixed_fnc_textures = true;
|
||||
info.stores_legacy_varyings = true;
|
||||
return;
|
||||
}
|
||||
switch (attr) {
|
||||
case IR::Attribute::Layer:
|
||||
info.stores_layer = true;
|
||||
break;
|
||||
case IR::Attribute::ViewportIndex:
|
||||
info.stores_viewport_index = true;
|
||||
break;
|
||||
case IR::Attribute::PointSize:
|
||||
info.stores_point_size = true;
|
||||
break;
|
||||
case IR::Attribute::PositionX:
|
||||
case IR::Attribute::PositionY:
|
||||
case IR::Attribute::PositionZ:
|
||||
case IR::Attribute::PositionW:
|
||||
info.stores_position = true;
|
||||
break;
|
||||
case IR::Attribute::ColorFrontDiffuseR:
|
||||
case IR::Attribute::ColorFrontDiffuseG:
|
||||
case IR::Attribute::ColorFrontDiffuseB:
|
||||
case IR::Attribute::ColorFrontDiffuseA:
|
||||
info.stores_color_front_diffuse = true;
|
||||
info.stores_legacy_varyings = true;
|
||||
break;
|
||||
case IR::Attribute::ColorFrontSpecularR:
|
||||
case IR::Attribute::ColorFrontSpecularG:
|
||||
case IR::Attribute::ColorFrontSpecularB:
|
||||
case IR::Attribute::ColorFrontSpecularA:
|
||||
info.stores_color_front_specular = true;
|
||||
info.stores_legacy_varyings = true;
|
||||
break;
|
||||
case IR::Attribute::ColorBackDiffuseR:
|
||||
case IR::Attribute::ColorBackDiffuseG:
|
||||
case IR::Attribute::ColorBackDiffuseB:
|
||||
case IR::Attribute::ColorBackDiffuseA:
|
||||
info.stores_color_back_diffuse = true;
|
||||
info.stores_legacy_varyings = true;
|
||||
break;
|
||||
case IR::Attribute::ColorBackSpecularR:
|
||||
case IR::Attribute::ColorBackSpecularG:
|
||||
case IR::Attribute::ColorBackSpecularB:
|
||||
case IR::Attribute::ColorBackSpecularA:
|
||||
info.stores_color_back_specular = true;
|
||||
info.stores_legacy_varyings = true;
|
||||
break;
|
||||
case IR::Attribute::ClipDistance0:
|
||||
case IR::Attribute::ClipDistance1:
|
||||
case IR::Attribute::ClipDistance2:
|
||||
case IR::Attribute::ClipDistance3:
|
||||
case IR::Attribute::ClipDistance4:
|
||||
case IR::Attribute::ClipDistance5:
|
||||
case IR::Attribute::ClipDistance6:
|
||||
case IR::Attribute::ClipDistance7:
|
||||
info.stores_clip_distance = true;
|
||||
break;
|
||||
case IR::Attribute::FogCoordinate:
|
||||
info.stores_fog_coordinate = true;
|
||||
break;
|
||||
case IR::Attribute::ViewportMask:
|
||||
info.stores_viewport_mask = true;
|
||||
break;
|
||||
default:
|
||||
throw NotImplementedException("Set attribute {}", attr);
|
||||
}
|
||||
}
|
||||
|
||||
void GetPatch(Info& info, IR::Patch patch) {
|
||||
if (!IR::IsGeneric(patch)) {
|
||||
throw NotImplementedException("Reading non-generic patch {}", patch);
|
||||
|
@ -511,10 +387,10 @@ void VisitUsages(Info& info, IR::Inst& inst) {
|
|||
info.uses_demote_to_helper_invocation = true;
|
||||
break;
|
||||
case IR::Opcode::GetAttribute:
|
||||
GetAttribute(info, inst.Arg(0).Attribute());
|
||||
info.loads.mask[static_cast<size_t>(inst.Arg(0).Attribute())] = true;
|
||||
break;
|
||||
case IR::Opcode::SetAttribute:
|
||||
SetAttribute(info, inst.Arg(0).Attribute());
|
||||
info.stores.mask[static_cast<size_t>(inst.Arg(0).Attribute())] = true;
|
||||
break;
|
||||
case IR::Opcode::GetPatch:
|
||||
GetPatch(info, inst.Arg(0).Patch());
|
||||
|
@ -943,26 +819,78 @@ void GatherInfoFromHeader(Environment& env, Info& info) {
|
|||
if (!info.loads_indexed_attributes) {
|
||||
return;
|
||||
}
|
||||
for (size_t i = 0; i < info.input_generics.size(); i++) {
|
||||
info.input_generics[i].used |= header.ps.IsGenericVectorActive(i);
|
||||
for (size_t index = 0; index < IR::NUM_GENERICS; ++index) {
|
||||
const size_t offset{static_cast<size_t>(IR::Attribute::Generic0X) + index * 4};
|
||||
const auto vector{header.ps.imap_generic_vector[index]};
|
||||
info.loads.mask[offset + 0] = vector.x != PixelImap::Unused;
|
||||
info.loads.mask[offset + 1] = vector.y != PixelImap::Unused;
|
||||
info.loads.mask[offset + 2] = vector.z != PixelImap::Unused;
|
||||
info.loads.mask[offset + 3] = vector.w != PixelImap::Unused;
|
||||
}
|
||||
info.loads_position |= header.ps.imap_systemb.position != 0;
|
||||
return;
|
||||
}
|
||||
if (info.loads_indexed_attributes) {
|
||||
for (size_t i = 0; i < info.input_generics.size(); i++) {
|
||||
info.input_generics[i].used |= header.vtg.IsInputGenericVectorActive(i);
|
||||
}
|
||||
info.loads_position |= header.vtg.imap_systemb.position != 0;
|
||||
}
|
||||
if (info.stores_indexed_attributes) {
|
||||
for (size_t i = 0; i < info.stores_generics.size(); i++) {
|
||||
if (header.vtg.IsOutputGenericVectorActive(i)) {
|
||||
info.stores_generics[i] = true;
|
||||
for (size_t index = 0; index < IR::NUM_GENERICS; ++index) {
|
||||
const IR::Attribute attribute{IR::Attribute::Generic0X + index * 4};
|
||||
const auto mask = header.vtg.InputGeneric(index);
|
||||
for (size_t i = 0; i < 4; ++i) {
|
||||
info.loads.Set(attribute + i, mask[i]);
|
||||
}
|
||||
}
|
||||
info.stores_clip_distance |= header.vtg.omap_systemc.clip_distances != 0;
|
||||
info.stores_position |= header.vtg.omap_systemb.position != 0;
|
||||
for (size_t index = 0; index < 8; ++index) {
|
||||
const u16 mask{header.vtg.clip_distances};
|
||||
info.loads.Set(IR::Attribute::ClipDistance0 + index, ((mask >> index) & 1) != 0);
|
||||
}
|
||||
info.loads.Set(IR::Attribute::PrimitiveId, header.vtg.imap_systemb.primitive_array_id != 0);
|
||||
info.loads.Set(IR::Attribute::Layer, header.vtg.imap_systemb.rt_array_index != 0);
|
||||
info.loads.Set(IR::Attribute::ViewportIndex, header.vtg.imap_systemb.viewport_index != 0);
|
||||
info.loads.Set(IR::Attribute::PointSize, header.vtg.imap_systemb.point_size != 0);
|
||||
info.loads.Set(IR::Attribute::PositionX, header.vtg.imap_systemb.position_x != 0);
|
||||
info.loads.Set(IR::Attribute::PositionY, header.vtg.imap_systemb.position_y != 0);
|
||||
info.loads.Set(IR::Attribute::PositionZ, header.vtg.imap_systemb.position_z != 0);
|
||||
info.loads.Set(IR::Attribute::PositionW, header.vtg.imap_systemb.position_w != 0);
|
||||
info.loads.Set(IR::Attribute::PointSpriteS, header.vtg.point_sprite_s != 0);
|
||||
info.loads.Set(IR::Attribute::PointSpriteT, header.vtg.point_sprite_t != 0);
|
||||
info.loads.Set(IR::Attribute::FogCoordinate, header.vtg.fog_coordinate != 0);
|
||||
info.loads.Set(IR::Attribute::TessellationEvaluationPointU,
|
||||
header.vtg.tessellation_eval_point_u != 0);
|
||||
info.loads.Set(IR::Attribute::TessellationEvaluationPointV,
|
||||
header.vtg.tessellation_eval_point_v != 0);
|
||||
info.loads.Set(IR::Attribute::InstanceId, header.vtg.instance_id != 0);
|
||||
info.loads.Set(IR::Attribute::VertexId, header.vtg.vertex_id != 0);
|
||||
// TODO: Legacy varyings
|
||||
}
|
||||
if (info.stores_indexed_attributes) {
|
||||
for (size_t index = 0; index < IR::NUM_GENERICS; ++index) {
|
||||
const IR::Attribute attribute{IR::Attribute::Generic0X + index * 4};
|
||||
const auto mask{header.vtg.OutputGeneric(index)};
|
||||
for (size_t i = 0; i < 4; ++i) {
|
||||
info.stores.Set(attribute + i, mask[i]);
|
||||
}
|
||||
}
|
||||
for (size_t index = 0; index < 8; ++index) {
|
||||
const u16 mask{header.vtg.omap_systemc.clip_distances};
|
||||
info.stores.Set(IR::Attribute::ClipDistance0 + index, ((mask >> index) & 1) != 0);
|
||||
}
|
||||
info.stores.Set(IR::Attribute::PrimitiveId,
|
||||
header.vtg.omap_systemb.primitive_array_id != 0);
|
||||
info.stores.Set(IR::Attribute::Layer, header.vtg.omap_systemb.rt_array_index != 0);
|
||||
info.stores.Set(IR::Attribute::ViewportIndex, header.vtg.omap_systemb.viewport_index != 0);
|
||||
info.stores.Set(IR::Attribute::PointSize, header.vtg.omap_systemb.point_size != 0);
|
||||
info.stores.Set(IR::Attribute::PositionX, header.vtg.omap_systemb.position_x != 0);
|
||||
info.stores.Set(IR::Attribute::PositionY, header.vtg.omap_systemb.position_y != 0);
|
||||
info.stores.Set(IR::Attribute::PositionZ, header.vtg.omap_systemb.position_z != 0);
|
||||
info.stores.Set(IR::Attribute::PositionW, header.vtg.omap_systemb.position_w != 0);
|
||||
info.stores.Set(IR::Attribute::PointSpriteS, header.vtg.omap_systemc.point_sprite_s != 0);
|
||||
info.stores.Set(IR::Attribute::PointSpriteT, header.vtg.omap_systemc.point_sprite_t != 0);
|
||||
info.stores.Set(IR::Attribute::FogCoordinate, header.vtg.omap_systemc.fog_coordinate != 0);
|
||||
info.stores.Set(IR::Attribute::TessellationEvaluationPointU,
|
||||
header.vtg.omap_systemc.tessellation_eval_point_u != 0);
|
||||
info.stores.Set(IR::Attribute::TessellationEvaluationPointV,
|
||||
header.vtg.omap_systemc.tessellation_eval_point_v != 0);
|
||||
info.stores.Set(IR::Attribute::InstanceId, header.vtg.omap_systemc.instance_id != 0);
|
||||
info.stores.Set(IR::Attribute::VertexId, header.vtg.omap_systemc.vertex_id != 0);
|
||||
// TODO: Legacy varyings
|
||||
}
|
||||
}
|
||||
} // Anonymous namespace
|
||||
|
|
|
@ -34,6 +34,7 @@ struct Profile {
|
|||
bool support_demote_to_helper_invocation{};
|
||||
bool support_int64_atomics{};
|
||||
bool support_derivative_control{};
|
||||
bool support_geometry_shader_passthrough{};
|
||||
bool support_gl_nv_gpu_shader_5{};
|
||||
bool support_gl_amd_gpu_shader_half_float{};
|
||||
bool support_gl_texture_shadow_lod{};
|
||||
|
|
|
@ -37,7 +37,9 @@ struct ProgramHeader {
|
|||
BitField<15, 1, u32> kills_pixels;
|
||||
BitField<16, 1, u32> does_global_store;
|
||||
BitField<17, 4, u32> sass_version;
|
||||
BitField<21, 5, u32> reserved;
|
||||
BitField<21, 2, u32> reserved1;
|
||||
BitField<24, 1, u32> geometry_passthrough;
|
||||
BitField<25, 1, u32> reserved2;
|
||||
BitField<26, 1, u32> does_load_or_store;
|
||||
BitField<27, 1, u32> does_fp64;
|
||||
BitField<28, 4, u32> stream_out_mask;
|
||||
|
@ -79,24 +81,10 @@ struct ProgramHeader {
|
|||
BitField<5, 1, u8> position_y;
|
||||
BitField<6, 1, u8> position_z;
|
||||
BitField<7, 1, u8> position_w;
|
||||
BitField<0, 4, u8> first;
|
||||
BitField<4, 4, u8> position;
|
||||
u8 raw;
|
||||
} imap_systemb;
|
||||
|
||||
union {
|
||||
BitField<0, 1, u8> x;
|
||||
BitField<1, 1, u8> y;
|
||||
BitField<2, 1, u8> z;
|
||||
BitField<3, 1, u8> w;
|
||||
BitField<4, 1, u8> x2;
|
||||
BitField<5, 1, u8> y2;
|
||||
BitField<6, 1, u8> z2;
|
||||
BitField<7, 1, u8> w2;
|
||||
BitField<0, 4, u8> first;
|
||||
BitField<4, 4, u8> second;
|
||||
u8 raw;
|
||||
} imap_generic_vector[16];
|
||||
std::array<u8, 16> imap_generic_vector;
|
||||
|
||||
INSERT_PADDING_BYTES_NOINIT(2); // ImapColor
|
||||
union {
|
||||
|
@ -122,24 +110,10 @@ struct ProgramHeader {
|
|||
BitField<5, 1, u8> position_y;
|
||||
BitField<6, 1, u8> position_z;
|
||||
BitField<7, 1, u8> position_w;
|
||||
BitField<0, 4, u8> first;
|
||||
BitField<4, 4, u8> position;
|
||||
u8 raw;
|
||||
} omap_systemb;
|
||||
|
||||
union {
|
||||
BitField<0, 1, u8> x;
|
||||
BitField<1, 1, u8> y;
|
||||
BitField<2, 1, u8> z;
|
||||
BitField<3, 1, u8> w;
|
||||
BitField<4, 1, u8> x2;
|
||||
BitField<5, 1, u8> y2;
|
||||
BitField<6, 1, u8> z2;
|
||||
BitField<7, 1, u8> w2;
|
||||
BitField<0, 4, u8> first;
|
||||
BitField<4, 4, u8> second;
|
||||
u8 raw;
|
||||
} omap_generic_vector[16];
|
||||
std::array<u8, 16> omap_generic_vector;
|
||||
|
||||
INSERT_PADDING_BYTES_NOINIT(2); // OmapColor
|
||||
|
||||
|
@ -157,18 +131,24 @@ struct ProgramHeader {
|
|||
INSERT_PADDING_BYTES_NOINIT(5); // OmapFixedFncTexture[10]
|
||||
INSERT_PADDING_BYTES_NOINIT(1); // OmapReserved
|
||||
|
||||
[[nodiscard]] bool IsInputGenericVectorActive(size_t index) const {
|
||||
if ((index & 1) == 0) {
|
||||
return imap_generic_vector[index >> 1].first != 0;
|
||||
}
|
||||
return imap_generic_vector[index >> 1].second != 0;
|
||||
[[nodiscard]] std::array<bool, 4> InputGeneric(size_t index) const noexcept {
|
||||
const int data{imap_generic_vector[index >> 1] >> ((index % 2) * 4)};
|
||||
return {
|
||||
(data & 1) != 0,
|
||||
(data & 2) != 0,
|
||||
(data & 4) != 0,
|
||||
(data & 8) != 0,
|
||||
};
|
||||
}
|
||||
|
||||
[[nodiscard]] bool IsOutputGenericVectorActive(size_t index) const {
|
||||
if ((index & 1) == 0) {
|
||||
return omap_generic_vector[index >> 1].first != 0;
|
||||
}
|
||||
return omap_generic_vector[index >> 1].second != 0;
|
||||
[[nodiscard]] std::array<bool, 4> OutputGeneric(size_t index) const noexcept {
|
||||
const int data{omap_generic_vector[index >> 1] >> ((index % 2) * 4)};
|
||||
return {
|
||||
(data & 1) != 0,
|
||||
(data & 2) != 0,
|
||||
(data & 4) != 0,
|
||||
(data & 8) != 0,
|
||||
};
|
||||
}
|
||||
} vtg;
|
||||
|
||||
|
|
|
@ -10,6 +10,7 @@
|
|||
#include <vector>
|
||||
|
||||
#include "common/common_types.h"
|
||||
#include "shader_recompiler/varying_state.h"
|
||||
|
||||
namespace Shader {
|
||||
|
||||
|
@ -60,7 +61,7 @@ struct TransformFeedbackVarying {
|
|||
|
||||
struct RuntimeInfo {
|
||||
std::array<AttributeType, 32> generic_input_types{};
|
||||
std::bitset<32> previous_stage_stores_generic{};
|
||||
VaryingState previous_stage_stores;
|
||||
|
||||
bool convert_depth_mode{};
|
||||
bool force_early_z{};
|
||||
|
|
|
@ -9,6 +9,7 @@
|
|||
|
||||
#include "common/common_types.h"
|
||||
#include "shader_recompiler/frontend/ir/type.h"
|
||||
#include "shader_recompiler/varying_state.h"
|
||||
|
||||
#include <boost/container/small_vector.hpp>
|
||||
#include <boost/container/static_vector.hpp>
|
||||
|
@ -44,11 +45,6 @@ enum class Interpolation {
|
|||
NoPerspective,
|
||||
};
|
||||
|
||||
struct InputVarying {
|
||||
Interpolation interpolation{Interpolation::Smooth};
|
||||
bool used{false};
|
||||
};
|
||||
|
||||
struct ConstantBufferDescriptor {
|
||||
u32 index;
|
||||
u32 count;
|
||||
|
@ -121,18 +117,10 @@ struct Info {
|
|||
bool uses_subgroup_shuffles{};
|
||||
std::array<bool, 30> uses_patches{};
|
||||
|
||||
std::array<InputVarying, 32> input_generics{};
|
||||
bool loads_primitive_id{};
|
||||
bool loads_position{};
|
||||
bool loads_color_front_diffuse{};
|
||||
bool loads_fixed_fnc_textures{};
|
||||
bool loads_point_coord{};
|
||||
bool loads_instance_id{};
|
||||
bool loads_vertex_id{};
|
||||
bool loads_front_face{};
|
||||
bool loads_legacy_varyings{};
|
||||
|
||||
bool loads_tess_coord{};
|
||||
std::array<Interpolation, 32> interpolation{};
|
||||
VaryingState loads;
|
||||
VaryingState stores;
|
||||
VaryingState passthrough;
|
||||
|
||||
bool loads_indexed_attributes{};
|
||||
|
||||
|
@ -140,21 +128,6 @@ struct Info {
|
|||
bool stores_sample_mask{};
|
||||
bool stores_frag_depth{};
|
||||
|
||||
std::bitset<32> stores_generics{};
|
||||
bool stores_layer{};
|
||||
bool stores_viewport_index{};
|
||||
bool stores_point_size{};
|
||||
bool stores_position{};
|
||||
bool stores_color_front_diffuse{};
|
||||
bool stores_color_front_specular{};
|
||||
bool stores_color_back_diffuse{};
|
||||
bool stores_color_back_specular{};
|
||||
bool stores_fixed_fnc_textures{};
|
||||
bool stores_clip_distance{};
|
||||
bool stores_fog_coordinate{};
|
||||
bool stores_viewport_mask{};
|
||||
bool stores_legacy_varyings{};
|
||||
|
||||
bool stores_tess_level_outer{};
|
||||
bool stores_tess_level_inner{};
|
||||
|
||||
|
|
69
src/shader_recompiler/varying_state.h
Normal file
69
src/shader_recompiler/varying_state.h
Normal file
|
@ -0,0 +1,69 @@
|
|||
// Copyright 2021 yuzu Emulator Project
|
||||
// Licensed under GPLv2 or any later version
|
||||
// Refer to the license.txt file included.
|
||||
|
||||
#pragma once
|
||||
|
||||
#include <bitset>
|
||||
#include <cstddef>
|
||||
|
||||
#include "shader_recompiler/frontend/ir/attribute.h"
|
||||
|
||||
namespace Shader {
|
||||
|
||||
struct VaryingState {
|
||||
std::bitset<256> mask{};
|
||||
|
||||
void Set(IR::Attribute attribute, bool state = true) {
|
||||
mask[static_cast<size_t>(attribute)] = state;
|
||||
}
|
||||
|
||||
[[nodiscard]] bool operator[](IR::Attribute attribute) const noexcept {
|
||||
return mask[static_cast<size_t>(attribute)];
|
||||
}
|
||||
|
||||
[[nodiscard]] bool AnyComponent(IR::Attribute base) const noexcept {
|
||||
return mask[static_cast<size_t>(base) + 0] || mask[static_cast<size_t>(base) + 1] ||
|
||||
mask[static_cast<size_t>(base) + 2] || mask[static_cast<size_t>(base) + 3];
|
||||
}
|
||||
|
||||
[[nodiscard]] bool AllComponents(IR::Attribute base) const noexcept {
|
||||
return mask[static_cast<size_t>(base) + 0] && mask[static_cast<size_t>(base) + 1] &&
|
||||
mask[static_cast<size_t>(base) + 2] && mask[static_cast<size_t>(base) + 3];
|
||||
}
|
||||
|
||||
[[nodiscard]] bool IsUniform(IR::Attribute base) const noexcept {
|
||||
return AnyComponent(base) == AllComponents(base);
|
||||
}
|
||||
|
||||
[[nodiscard]] bool Generic(size_t index, size_t component) const noexcept {
|
||||
return mask[static_cast<size_t>(IR::Attribute::Generic0X) + index * 4 + component];
|
||||
}
|
||||
|
||||
[[nodiscard]] bool Generic(size_t index) const noexcept {
|
||||
return Generic(index, 0) || Generic(index, 1) || Generic(index, 2) || Generic(index, 3);
|
||||
}
|
||||
|
||||
[[nodiscard]] bool ClipDistances() const noexcept {
|
||||
return AnyComponent(IR::Attribute::ClipDistance0) ||
|
||||
AnyComponent(IR::Attribute::ClipDistance4);
|
||||
}
|
||||
|
||||
[[nodiscard]] bool Legacy() const noexcept {
|
||||
return AnyComponent(IR::Attribute::ColorFrontDiffuseR) ||
|
||||
AnyComponent(IR::Attribute::ColorFrontSpecularR) ||
|
||||
AnyComponent(IR::Attribute::ColorBackDiffuseR) ||
|
||||
AnyComponent(IR::Attribute::ColorBackSpecularR) || FixedFunctionTexture();
|
||||
}
|
||||
|
||||
[[nodiscard]] bool FixedFunctionTexture() const noexcept {
|
||||
for (size_t index = 0; index < 10; ++index) {
|
||||
if (AnyComponent(IR::Attribute::FixedFncTexture0S + index * 4)) {
|
||||
return true;
|
||||
}
|
||||
}
|
||||
return false;
|
||||
}
|
||||
};
|
||||
|
||||
} // namespace Shader
|
|
@ -961,7 +961,11 @@ public:
|
|||
|
||||
SamplerIndex sampler_index;
|
||||
|
||||
INSERT_PADDING_WORDS_NOINIT(0x25);
|
||||
INSERT_PADDING_WORDS_NOINIT(0x2);
|
||||
|
||||
std::array<u32, 8> gp_passthrough_mask;
|
||||
|
||||
INSERT_PADDING_WORDS_NOINIT(0x1B);
|
||||
|
||||
u32 depth_test_enable;
|
||||
|
||||
|
@ -1628,6 +1632,7 @@ ASSERT_REG_POSITION(zeta_width, 0x48a);
|
|||
ASSERT_REG_POSITION(zeta_height, 0x48b);
|
||||
ASSERT_REG_POSITION(zeta_depth, 0x48c);
|
||||
ASSERT_REG_POSITION(sampler_index, 0x48D);
|
||||
ASSERT_REG_POSITION(gp_passthrough_mask, 0x490);
|
||||
ASSERT_REG_POSITION(depth_test_enable, 0x4B3);
|
||||
ASSERT_REG_POSITION(independent_blend_enable, 0x4B9);
|
||||
ASSERT_REG_POSITION(depth_write_enabled, 0x4BA);
|
||||
|
|
|
@ -61,10 +61,10 @@ Shader::RuntimeInfo MakeRuntimeInfo(const GraphicsPipelineKey& key,
|
|||
bool glasm_use_storage_buffers, bool use_assembly_shaders) {
|
||||
Shader::RuntimeInfo info;
|
||||
if (previous_program) {
|
||||
info.previous_stage_stores_generic = previous_program->info.stores_generics;
|
||||
info.previous_stage_stores = previous_program->info.stores;
|
||||
} else {
|
||||
// Mark all stores as available
|
||||
info.previous_stage_stores_generic.flip();
|
||||
// Mark all stores as available for vertex shaders
|
||||
info.previous_stage_stores.mask.set();
|
||||
}
|
||||
switch (program.stage) {
|
||||
case Shader::Stage::VertexB:
|
||||
|
@ -187,6 +187,7 @@ ShaderCache::ShaderCache(RasterizerOpenGL& rasterizer_, Core::Frontend::EmuWindo
|
|||
.support_demote_to_helper_invocation = false,
|
||||
.support_int64_atomics = false,
|
||||
.support_derivative_control = device.HasDerivativeControl(),
|
||||
.support_geometry_shader_passthrough = false, // TODO
|
||||
.support_gl_nv_gpu_shader_5 = device.HasNvGpuShader5(),
|
||||
.support_gl_amd_gpu_shader_half_float = device.HasAmdShaderHalfFloat(),
|
||||
.support_gl_texture_shadow_lod = device.HasTextureShadowLod(),
|
||||
|
|
|
@ -487,10 +487,9 @@ void GraphicsPipeline::MakePipeline(VkRenderPass render_pass) {
|
|||
static_vector<VkVertexInputBindingDivisorDescriptionEXT, 32> vertex_binding_divisors;
|
||||
static_vector<VkVertexInputAttributeDescription, 32> vertex_attributes;
|
||||
if (key.state.dynamic_vertex_input) {
|
||||
const auto& input_attributes = stage_infos[0].input_generics;
|
||||
for (size_t index = 0; index < key.state.attributes.size(); ++index) {
|
||||
const u32 type = key.state.DynamicAttributeType(index);
|
||||
if (!input_attributes[index].used || type == 0) {
|
||||
if (!stage_infos[0].loads.Generic(index) || type == 0) {
|
||||
continue;
|
||||
}
|
||||
vertex_attributes.push_back({
|
||||
|
@ -526,10 +525,9 @@ void GraphicsPipeline::MakePipeline(VkRenderPass render_pass) {
|
|||
});
|
||||
}
|
||||
}
|
||||
const auto& input_attributes = stage_infos[0].input_generics;
|
||||
for (size_t index = 0; index < key.state.attributes.size(); ++index) {
|
||||
const auto& attribute = key.state.attributes[index];
|
||||
if (!attribute.enabled || !input_attributes[index].used) {
|
||||
if (!attribute.enabled || !stage_infos[0].loads.Generic(index)) {
|
||||
continue;
|
||||
}
|
||||
vertex_attributes.push_back({
|
||||
|
|
|
@ -123,18 +123,21 @@ Shader::AttributeType AttributeType(const FixedPipelineState& state, size_t inde
|
|||
return Shader::AttributeType::Disabled;
|
||||
}
|
||||
|
||||
Shader::RuntimeInfo MakeRuntimeInfo(const GraphicsPipelineCacheKey& key,
|
||||
Shader::RuntimeInfo MakeRuntimeInfo(std::span<const Shader::IR::Program> programs,
|
||||
const GraphicsPipelineCacheKey& key,
|
||||
const Shader::IR::Program& program,
|
||||
const Shader::IR::Program* previous_program) {
|
||||
Shader::RuntimeInfo info;
|
||||
if (previous_program) {
|
||||
info.previous_stage_stores_generic = previous_program->info.stores_generics;
|
||||
info.previous_stage_stores = previous_program->info.stores;
|
||||
if (previous_program->is_geometry_passthrough) {
|
||||
info.previous_stage_stores.mask |= previous_program->info.passthrough.mask;
|
||||
}
|
||||
} else {
|
||||
// Mark all stores as available
|
||||
info.previous_stage_stores_generic.flip();
|
||||
info.previous_stage_stores.mask.set();
|
||||
}
|
||||
const Shader::Stage stage{program.stage};
|
||||
const bool has_geometry{key.unique_hashes[4] != 0};
|
||||
const bool has_geometry{key.unique_hashes[4] != 0 && !programs[4].is_geometry_passthrough};
|
||||
const bool gl_ndc{key.state.ndc_minus_one_to_one != 0};
|
||||
const float point_size{Common::BitCast<float>(key.state.point_size)};
|
||||
switch (stage) {
|
||||
|
@ -302,6 +305,7 @@ PipelineCache::PipelineCache(RasterizerVulkan& rasterizer_, Tegra::Engines::Maxw
|
|||
.support_demote_to_helper_invocation = true,
|
||||
.support_int64_atomics = device.IsExtShaderAtomicInt64Supported(),
|
||||
.support_derivative_control = true,
|
||||
.support_geometry_shader_passthrough = device.IsNvGeometryShaderPassthroughSupported(),
|
||||
|
||||
.warp_size_potentially_larger_than_guest = device.IsWarpSizePotentiallyBiggerThanGuest(),
|
||||
|
||||
|
@ -518,7 +522,7 @@ std::unique_ptr<GraphicsPipeline> PipelineCache::CreateGraphicsPipeline(
|
|||
const size_t stage_index{index - 1};
|
||||
infos[stage_index] = &program.info;
|
||||
|
||||
const Shader::RuntimeInfo runtime_info{MakeRuntimeInfo(key, program, previous_stage)};
|
||||
const auto runtime_info{MakeRuntimeInfo(programs, key, program, previous_stage)};
|
||||
const std::vector<u32> code{EmitSPIRV(profile, runtime_info, program, binding)};
|
||||
device.SaveShader(code);
|
||||
modules[stage_index] = BuildShader(device, code);
|
||||
|
|
|
@ -22,7 +22,7 @@
|
|||
namespace VideoCommon {
|
||||
|
||||
constexpr std::array<char, 8> MAGIC_NUMBER{'y', 'u', 'z', 'u', 'c', 'a', 'c', 'h'};
|
||||
constexpr u32 CACHE_VERSION = 4;
|
||||
constexpr u32 CACHE_VERSION = 5;
|
||||
|
||||
constexpr size_t INST_SIZE = sizeof(u64);
|
||||
|
||||
|
@ -155,6 +155,10 @@ void GenericEnvironment::Serialize(std::ofstream& file) const {
|
|||
.write(reinterpret_cast<const char*>(&shared_memory_size), sizeof(shared_memory_size));
|
||||
} else {
|
||||
file.write(reinterpret_cast<const char*>(&sph), sizeof(sph));
|
||||
if (stage == Shader::Stage::Geometry) {
|
||||
file.write(reinterpret_cast<const char*>(&gp_passthrough_mask),
|
||||
sizeof(gp_passthrough_mask));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -202,6 +206,7 @@ GraphicsEnvironment::GraphicsEnvironment(Tegra::Engines::Maxwell3D& maxwell3d_,
|
|||
u32 start_address_)
|
||||
: GenericEnvironment{gpu_memory_, program_base_, start_address_}, maxwell3d{&maxwell3d_} {
|
||||
gpu_memory->ReadBlock(program_base + start_address, &sph, sizeof(sph));
|
||||
gp_passthrough_mask = maxwell3d->regs.gp_passthrough_mask;
|
||||
switch (program) {
|
||||
case Maxwell::ShaderProgram::VertexA:
|
||||
stage = Shader::Stage::VertexA;
|
||||
|
@ -319,6 +324,9 @@ void FileEnvironment::Deserialize(std::ifstream& file) {
|
|||
.read(reinterpret_cast<char*>(&shared_memory_size), sizeof(shared_memory_size));
|
||||
} else {
|
||||
file.read(reinterpret_cast<char*>(&sph), sizeof(sph));
|
||||
if (stage == Shader::Stage::Geometry) {
|
||||
file.read(reinterpret_cast<char*>(&gp_passthrough_mask), sizeof(gp_passthrough_mask));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -350,6 +350,10 @@ Device::Device(VkInstance instance_, vk::PhysicalDevice physical_, VkSurfaceKHR
|
|||
LOG_INFO(Render_Vulkan, "Device doesn't support viewport masks");
|
||||
}
|
||||
|
||||
if (!nv_geometry_shader_passthrough) {
|
||||
LOG_INFO(Render_Vulkan, "Device doesn't support passthrough geometry shaders");
|
||||
}
|
||||
|
||||
VkPhysicalDeviceUniformBufferStandardLayoutFeaturesKHR std430_layout;
|
||||
if (khr_uniform_buffer_standard_layout) {
|
||||
std430_layout = {
|
||||
|
@ -768,6 +772,8 @@ std::vector<const char*> Device::LoadExtensions(bool requires_surface) {
|
|||
};
|
||||
test(nv_viewport_swizzle, VK_NV_VIEWPORT_SWIZZLE_EXTENSION_NAME, true);
|
||||
test(nv_viewport_array2, VK_NV_VIEWPORT_ARRAY2_EXTENSION_NAME, true);
|
||||
test(nv_geometry_shader_passthrough, VK_NV_GEOMETRY_SHADER_PASSTHROUGH_EXTENSION_NAME,
|
||||
true);
|
||||
test(khr_uniform_buffer_standard_layout,
|
||||
VK_KHR_UNIFORM_BUFFER_STANDARD_LAYOUT_EXTENSION_NAME, true);
|
||||
test(khr_spirv_1_4, VK_KHR_SPIRV_1_4_EXTENSION_NAME, true);
|
||||
|
|
|
@ -194,6 +194,11 @@ public:
|
|||
return nv_viewport_array2;
|
||||
}
|
||||
|
||||
/// Returns true if the device supports VK_NV_geometry_shader_passthrough.
|
||||
bool IsNvGeometryShaderPassthroughSupported() const {
|
||||
return nv_geometry_shader_passthrough;
|
||||
}
|
||||
|
||||
/// Returns true if the device supports VK_KHR_uniform_buffer_standard_layout.
|
||||
bool IsKhrUniformBufferStandardLayoutSupported() const {
|
||||
return khr_uniform_buffer_standard_layout;
|
||||
|
@ -363,6 +368,7 @@ private:
|
|||
bool is_blit_depth_stencil_supported{}; ///< Support for blitting from and to depth stencil.
|
||||
bool nv_viewport_swizzle{}; ///< Support for VK_NV_viewport_swizzle.
|
||||
bool nv_viewport_array2{}; ///< Support for VK_NV_viewport_array2.
|
||||
bool nv_geometry_shader_passthrough{}; ///< Support for VK_NV_geometry_shader_passthrough.
|
||||
bool khr_uniform_buffer_standard_layout{}; ///< Support for scalar uniform buffer layouts.
|
||||
bool khr_spirv_1_4{}; ///< Support for VK_KHR_spirv_1_4.
|
||||
bool khr_workgroup_memory_explicit_layout{}; ///< Support for explicit workgroup layouts.
|
||||
|
|
Loading…
Reference in a new issue