|
@@ -5102,8 +5102,27 @@ string CompilerGLSL::to_extract_constant_composite_expression(uint32_t result_ty
|
|
|
return constant_expression(tmp);
|
|
return constant_expression(tmp);
|
|
|
}
|
|
}
|
|
|
|
|
|
|
|
-string CompilerGLSL::to_rerolled_array_expression(const string &base_expr, const SPIRType &type)
|
|
|
|
|
|
|
+string CompilerGLSL::to_rerolled_array_expression(const SPIRType &parent_type,
|
|
|
|
|
+ const string &base_expr, const SPIRType &type)
|
|
|
{
|
|
{
|
|
|
|
|
+ bool remapped_boolean = parent_type.basetype == SPIRType::Struct &&
|
|
|
|
|
+ type.basetype == SPIRType::Boolean &&
|
|
|
|
|
+ backend.boolean_in_struct_remapped_type != SPIRType::Boolean;
|
|
|
|
|
+
|
|
|
|
|
+ SPIRType tmp_type;
|
|
|
|
|
+ if (remapped_boolean)
|
|
|
|
|
+ {
|
|
|
|
|
+ tmp_type = get<SPIRType>(type.parent_type);
|
|
|
|
|
+ tmp_type.basetype = backend.boolean_in_struct_remapped_type;
|
|
|
|
|
+ }
|
|
|
|
|
+ else if (type.basetype == SPIRType::Boolean && backend.boolean_in_struct_remapped_type != SPIRType::Boolean)
|
|
|
|
|
+ {
|
|
|
|
|
+ // It's possible that we have an r-value expression that was OpLoaded from a struct.
|
|
|
|
|
+ // We have to reroll this and explicitly cast the input to bool, because the r-value is short.
|
|
|
|
|
+ tmp_type = get<SPIRType>(type.parent_type);
|
|
|
|
|
+ remapped_boolean = true;
|
|
|
|
|
+ }
|
|
|
|
|
+
|
|
|
uint32_t size = to_array_size_literal(type);
|
|
uint32_t size = to_array_size_literal(type);
|
|
|
auto &parent = get<SPIRType>(type.parent_type);
|
|
auto &parent = get<SPIRType>(type.parent_type);
|
|
|
string expr = "{ ";
|
|
string expr = "{ ";
|
|
@@ -5111,10 +5130,14 @@ string CompilerGLSL::to_rerolled_array_expression(const string &base_expr, const
|
|
|
for (uint32_t i = 0; i < size; i++)
|
|
for (uint32_t i = 0; i < size; i++)
|
|
|
{
|
|
{
|
|
|
auto subexpr = join(base_expr, "[", convert_to_string(i), "]");
|
|
auto subexpr = join(base_expr, "[", convert_to_string(i), "]");
|
|
|
- if (parent.array.empty())
|
|
|
|
|
|
|
+ if (!type_is_top_level_array(parent))
|
|
|
|
|
+ {
|
|
|
|
|
+ if (remapped_boolean)
|
|
|
|
|
+ subexpr = join(type_to_glsl(tmp_type), "(", subexpr, ")");
|
|
|
expr += subexpr;
|
|
expr += subexpr;
|
|
|
|
|
+ }
|
|
|
else
|
|
else
|
|
|
- expr += to_rerolled_array_expression(subexpr, parent);
|
|
|
|
|
|
|
+ expr += to_rerolled_array_expression(parent_type, subexpr, parent);
|
|
|
|
|
|
|
|
if (i + 1 < size)
|
|
if (i + 1 < size)
|
|
|
expr += ", ";
|
|
expr += ", ";
|
|
@@ -5124,13 +5147,26 @@ string CompilerGLSL::to_rerolled_array_expression(const string &base_expr, const
|
|
|
return expr;
|
|
return expr;
|
|
|
}
|
|
}
|
|
|
|
|
|
|
|
-string CompilerGLSL::to_composite_constructor_expression(uint32_t id, bool block_like_type)
|
|
|
|
|
|
|
+string CompilerGLSL::to_composite_constructor_expression(const SPIRType &parent_type, uint32_t id, bool block_like_type)
|
|
|
{
|
|
{
|
|
|
auto &type = expression_type(id);
|
|
auto &type = expression_type(id);
|
|
|
|
|
|
|
|
- bool reroll_array = !type.array.empty() &&
|
|
|
|
|
- (!backend.array_is_value_type ||
|
|
|
|
|
- (block_like_type && !backend.array_is_value_type_in_buffer_blocks));
|
|
|
|
|
|
|
+ bool reroll_array = false;
|
|
|
|
|
+ bool remapped_boolean = parent_type.basetype == SPIRType::Struct &&
|
|
|
|
|
+ type.basetype == SPIRType::Boolean &&
|
|
|
|
|
+ backend.boolean_in_struct_remapped_type != SPIRType::Boolean;
|
|
|
|
|
+
|
|
|
|
|
+ if (type_is_top_level_array(type))
|
|
|
|
|
+ {
|
|
|
|
|
+ reroll_array = !backend.array_is_value_type ||
|
|
|
|
|
+ (block_like_type && !backend.array_is_value_type_in_buffer_blocks);
|
|
|
|
|
+
|
|
|
|
|
+ if (remapped_boolean)
|
|
|
|
|
+ {
|
|
|
|
|
+ // Forced to reroll if we have to change bool[] to short[].
|
|
|
|
|
+ reroll_array = true;
|
|
|
|
|
+ }
|
|
|
|
|
+ }
|
|
|
|
|
|
|
|
if (reroll_array)
|
|
if (reroll_array)
|
|
|
{
|
|
{
|
|
@@ -5144,10 +5180,20 @@ string CompilerGLSL::to_composite_constructor_expression(uint32_t id, bool block
|
|
|
|
|
|
|
|
// We're only triggering one read of the array expression, but this is fine since arrays have to be declared
|
|
// We're only triggering one read of the array expression, but this is fine since arrays have to be declared
|
|
|
// as temporaries anyways.
|
|
// as temporaries anyways.
|
|
|
- return to_rerolled_array_expression(to_enclosed_expression(id), type);
|
|
|
|
|
|
|
+ return to_rerolled_array_expression(parent_type, to_enclosed_expression(id), type);
|
|
|
}
|
|
}
|
|
|
else
|
|
else
|
|
|
- return to_unpacked_expression(id);
|
|
|
|
|
|
|
+ {
|
|
|
|
|
+ auto expr = to_unpacked_expression(id);
|
|
|
|
|
+ if (remapped_boolean)
|
|
|
|
|
+ {
|
|
|
|
|
+ auto tmp_type = type;
|
|
|
|
|
+ tmp_type.basetype = backend.boolean_in_struct_remapped_type;
|
|
|
|
|
+ expr = join(type_to_glsl(tmp_type), "(", expr, ")");
|
|
|
|
|
+ }
|
|
|
|
|
+
|
|
|
|
|
+ return expr;
|
|
|
|
|
+ }
|
|
|
}
|
|
}
|
|
|
|
|
|
|
|
string CompilerGLSL::to_non_uniform_aware_expression(uint32_t id)
|
|
string CompilerGLSL::to_non_uniform_aware_expression(uint32_t id)
|
|
@@ -5657,11 +5703,13 @@ string CompilerGLSL::constant_op_expression(const SPIRConstantOp &cop)
|
|
|
}
|
|
}
|
|
|
}
|
|
}
|
|
|
|
|
|
|
|
-string CompilerGLSL::constant_expression(const SPIRConstant &c, bool inside_block_like_struct_scope)
|
|
|
|
|
|
|
+string CompilerGLSL::constant_expression(const SPIRConstant &c,
|
|
|
|
|
+ bool inside_block_like_struct_scope,
|
|
|
|
|
+ bool inside_struct_scope)
|
|
|
{
|
|
{
|
|
|
auto &type = get<SPIRType>(c.constant_type);
|
|
auto &type = get<SPIRType>(c.constant_type);
|
|
|
|
|
|
|
|
- if (type.pointer)
|
|
|
|
|
|
|
+ if (type_is_top_level_pointer(type))
|
|
|
{
|
|
{
|
|
|
return backend.null_pointer_literal;
|
|
return backend.null_pointer_literal;
|
|
|
}
|
|
}
|
|
@@ -5676,19 +5724,32 @@ string CompilerGLSL::constant_expression(const SPIRConstant &c, bool inside_bloc
|
|
|
// with Offset = 0, using no ArrayStride on the enclosed array type.
|
|
// with Offset = 0, using no ArrayStride on the enclosed array type.
|
|
|
// A particular CTS test hits this scenario.
|
|
// A particular CTS test hits this scenario.
|
|
|
bool array_type_decays = inside_block_like_struct_scope &&
|
|
bool array_type_decays = inside_block_like_struct_scope &&
|
|
|
- !type.array.empty() && !backend.array_is_value_type_in_buffer_blocks;
|
|
|
|
|
|
|
+ type_is_top_level_array(type) &&
|
|
|
|
|
+ !backend.array_is_value_type_in_buffer_blocks;
|
|
|
|
|
|
|
|
// Allow Metal to use the array<T> template to make arrays a value type
|
|
// Allow Metal to use the array<T> template to make arrays a value type
|
|
|
bool needs_trailing_tracket = false;
|
|
bool needs_trailing_tracket = false;
|
|
|
if (backend.use_initializer_list && backend.use_typed_initializer_list && type.basetype == SPIRType::Struct &&
|
|
if (backend.use_initializer_list && backend.use_typed_initializer_list && type.basetype == SPIRType::Struct &&
|
|
|
- type.array.empty())
|
|
|
|
|
|
|
+ !type_is_top_level_array(type))
|
|
|
{
|
|
{
|
|
|
res = type_to_glsl_constructor(type) + "{ ";
|
|
res = type_to_glsl_constructor(type) + "{ ";
|
|
|
}
|
|
}
|
|
|
else if (backend.use_initializer_list && backend.use_typed_initializer_list && backend.array_is_value_type &&
|
|
else if (backend.use_initializer_list && backend.use_typed_initializer_list && backend.array_is_value_type &&
|
|
|
- !type.array.empty() && !array_type_decays)
|
|
|
|
|
|
|
+ type_is_top_level_array(type) && !array_type_decays)
|
|
|
{
|
|
{
|
|
|
- res = type_to_glsl_constructor(type) + "({ ";
|
|
|
|
|
|
|
+ const auto *p_type = &type;
|
|
|
|
|
+ SPIRType tmp_type;
|
|
|
|
|
+
|
|
|
|
|
+ if (inside_struct_scope &&
|
|
|
|
|
+ backend.boolean_in_struct_remapped_type != SPIRType::Boolean &&
|
|
|
|
|
+ type.basetype == SPIRType::Boolean)
|
|
|
|
|
+ {
|
|
|
|
|
+ tmp_type = type;
|
|
|
|
|
+ tmp_type.basetype = backend.boolean_in_struct_remapped_type;
|
|
|
|
|
+ p_type = &tmp_type;
|
|
|
|
|
+ }
|
|
|
|
|
+
|
|
|
|
|
+ res = type_to_glsl_constructor(*p_type) + "({ ";
|
|
|
needs_trailing_tracket = true;
|
|
needs_trailing_tracket = true;
|
|
|
}
|
|
}
|
|
|
else if (backend.use_initializer_list)
|
|
else if (backend.use_initializer_list)
|
|
@@ -5718,7 +5779,7 @@ string CompilerGLSL::constant_expression(const SPIRConstant &c, bool inside_bloc
|
|
|
res += to_name(elem);
|
|
res += to_name(elem);
|
|
|
else
|
|
else
|
|
|
{
|
|
{
|
|
|
- if (type.array.empty() && type.basetype == SPIRType::Struct)
|
|
|
|
|
|
|
+ if (!type_is_top_level_array(type) && type.basetype == SPIRType::Struct)
|
|
|
{
|
|
{
|
|
|
// When we get down to emitting struct members, override the block-like information.
|
|
// When we get down to emitting struct members, override the block-like information.
|
|
|
// For constants, we can freely mix and match block-like state.
|
|
// For constants, we can freely mix and match block-like state.
|
|
@@ -5726,7 +5787,10 @@ string CompilerGLSL::constant_expression(const SPIRConstant &c, bool inside_bloc
|
|
|
has_member_decoration(type.self, subconstant_index, DecorationOffset);
|
|
has_member_decoration(type.self, subconstant_index, DecorationOffset);
|
|
|
}
|
|
}
|
|
|
|
|
|
|
|
- res += constant_expression(subc, inside_block_like_struct_scope);
|
|
|
|
|
|
|
+ if (type.basetype == SPIRType::Struct)
|
|
|
|
|
+ inside_struct_scope = true;
|
|
|
|
|
+
|
|
|
|
|
+ res += constant_expression(subc, inside_block_like_struct_scope, inside_struct_scope);
|
|
|
}
|
|
}
|
|
|
}
|
|
}
|
|
|
|
|
|
|
@@ -5748,19 +5812,30 @@ string CompilerGLSL::constant_expression(const SPIRConstant &c, bool inside_bloc
|
|
|
if (backend.supports_empty_struct)
|
|
if (backend.supports_empty_struct)
|
|
|
return "{ }";
|
|
return "{ }";
|
|
|
else if (backend.use_typed_initializer_list)
|
|
else if (backend.use_typed_initializer_list)
|
|
|
- return join(type_to_glsl(get<SPIRType>(c.constant_type)), "{ 0 }");
|
|
|
|
|
|
|
+ return join(type_to_glsl(type), "{ 0 }");
|
|
|
else if (backend.use_initializer_list)
|
|
else if (backend.use_initializer_list)
|
|
|
return "{ 0 }";
|
|
return "{ 0 }";
|
|
|
else
|
|
else
|
|
|
- return join(type_to_glsl(get<SPIRType>(c.constant_type)), "(0)");
|
|
|
|
|
|
|
+ return join(type_to_glsl(type), "(0)");
|
|
|
}
|
|
}
|
|
|
else if (c.columns() == 1)
|
|
else if (c.columns() == 1)
|
|
|
{
|
|
{
|
|
|
- return constant_expression_vector(c, 0);
|
|
|
|
|
|
|
+ auto res = constant_expression_vector(c, 0);
|
|
|
|
|
+
|
|
|
|
|
+ if (inside_struct_scope &&
|
|
|
|
|
+ backend.boolean_in_struct_remapped_type != SPIRType::Boolean &&
|
|
|
|
|
+ type.basetype == SPIRType::Boolean)
|
|
|
|
|
+ {
|
|
|
|
|
+ SPIRType tmp_type = type;
|
|
|
|
|
+ tmp_type.basetype = backend.boolean_in_struct_remapped_type;
|
|
|
|
|
+ res = join(type_to_glsl(tmp_type), "(", res, ")");
|
|
|
|
|
+ }
|
|
|
|
|
+
|
|
|
|
|
+ return res;
|
|
|
}
|
|
}
|
|
|
else
|
|
else
|
|
|
{
|
|
{
|
|
|
- string res = type_to_glsl(get<SPIRType>(c.constant_type)) + "(";
|
|
|
|
|
|
|
+ string res = type_to_glsl(type) + "(";
|
|
|
for (uint32_t col = 0; col < c.columns(); col++)
|
|
for (uint32_t col = 0; col < c.columns(); col++)
|
|
|
{
|
|
{
|
|
|
if (c.specialization_constant_id(col) != 0)
|
|
if (c.specialization_constant_id(col) != 0)
|
|
@@ -5772,6 +5847,16 @@ string CompilerGLSL::constant_expression(const SPIRConstant &c, bool inside_bloc
|
|
|
res += ", ";
|
|
res += ", ";
|
|
|
}
|
|
}
|
|
|
res += ")";
|
|
res += ")";
|
|
|
|
|
+
|
|
|
|
|
+ if (inside_struct_scope &&
|
|
|
|
|
+ backend.boolean_in_struct_remapped_type != SPIRType::Boolean &&
|
|
|
|
|
+ type.basetype == SPIRType::Boolean)
|
|
|
|
|
+ {
|
|
|
|
|
+ SPIRType tmp_type = type;
|
|
|
|
|
+ tmp_type.basetype = backend.boolean_in_struct_remapped_type;
|
|
|
|
|
+ res = join(type_to_glsl(tmp_type), "(", res, ")");
|
|
|
|
|
+ }
|
|
|
|
|
+
|
|
|
return res;
|
|
return res;
|
|
|
}
|
|
}
|
|
|
}
|
|
}
|
|
@@ -11107,7 +11192,7 @@ string CompilerGLSL::build_composite_combiner(uint32_t return_type, const uint32
|
|
|
|
|
|
|
|
bool uses_buffer_offset =
|
|
bool uses_buffer_offset =
|
|
|
type.basetype == SPIRType::Struct && has_member_decoration(type.self, i, DecorationOffset);
|
|
type.basetype == SPIRType::Struct && has_member_decoration(type.self, i, DecorationOffset);
|
|
|
- subop = to_composite_constructor_expression(elems[i], uses_buffer_offset);
|
|
|
|
|
|
|
+ subop = to_composite_constructor_expression(type, elems[i], uses_buffer_offset);
|
|
|
}
|
|
}
|
|
|
|
|
|
|
|
base = e ? e->base_expression : ID(0);
|
|
base = e ? e->base_expression : ID(0);
|
|
@@ -11197,8 +11282,15 @@ void CompilerGLSL::emit_block_instructions(SPIRBlock &block)
|
|
|
if (backend.requires_relaxed_precision_analysis)
|
|
if (backend.requires_relaxed_precision_analysis)
|
|
|
{
|
|
{
|
|
|
// If PHI variables are consumed in unexpected precision contexts, copy them here.
|
|
// If PHI variables are consumed in unexpected precision contexts, copy them here.
|
|
|
- for (auto &phi : block.phi_variables)
|
|
|
|
|
|
|
+ for (size_t i = 0, n = block.phi_variables.size(); i < n; i++)
|
|
|
{
|
|
{
|
|
|
|
|
+ auto &phi = block.phi_variables[i];
|
|
|
|
|
+
|
|
|
|
|
+ // Ensure we only copy once. We know a-priori that this array will lay out
|
|
|
|
|
+ // the same function variables together.
|
|
|
|
|
+ if (i && block.phi_variables[i - 1].function_variable == phi.function_variable)
|
|
|
|
|
+ continue;
|
|
|
|
|
+
|
|
|
auto itr = temporary_to_mirror_precision_alias.find(phi.function_variable);
|
|
auto itr = temporary_to_mirror_precision_alias.find(phi.function_variable);
|
|
|
if (itr != temporary_to_mirror_precision_alias.end())
|
|
if (itr != temporary_to_mirror_precision_alias.end())
|
|
|
{
|
|
{
|
|
@@ -11737,7 +11829,7 @@ void CompilerGLSL::emit_instruction(const Instruction &instruction)
|
|
|
// it is an array, and our backend does not support arrays as value types.
|
|
// it is an array, and our backend does not support arrays as value types.
|
|
|
// Emit the temporary, and copy it explicitly.
|
|
// Emit the temporary, and copy it explicitly.
|
|
|
e = &emit_uninitialized_temporary_expression(result_type, id);
|
|
e = &emit_uninitialized_temporary_expression(result_type, id);
|
|
|
- emit_array_copy(to_expression(id), id, ptr, StorageClassFunction, get_expression_effective_storage_class(ptr));
|
|
|
|
|
|
|
+ emit_array_copy(nullptr, id, ptr, StorageClassFunction, get_expression_effective_storage_class(ptr));
|
|
|
}
|
|
}
|
|
|
else
|
|
else
|
|
|
e = &emit_op(result_type, id, expr, forward, !usage_tracking);
|
|
e = &emit_op(result_type, id, expr, forward, !usage_tracking);
|
|
@@ -15565,6 +15657,11 @@ void CompilerGLSL::require_extension(const std::string &ext)
|
|
|
forced_extensions.push_back(ext);
|
|
forced_extensions.push_back(ext);
|
|
|
}
|
|
}
|
|
|
|
|
|
|
|
|
|
+const SmallVector<std::string> &CompilerGLSL::get_required_extensions() const
|
|
|
|
|
+{
|
|
|
|
|
+ return forced_extensions;
|
|
|
|
|
+}
|
|
|
|
|
+
|
|
|
void CompilerGLSL::require_extension_internal(const string &ext)
|
|
void CompilerGLSL::require_extension_internal(const string &ext)
|
|
|
{
|
|
{
|
|
|
if (backend.supports_extensions && !has_extension(ext))
|
|
if (backend.supports_extensions && !has_extension(ext))
|
|
@@ -16329,6 +16426,17 @@ bool CompilerGLSL::for_loop_initializers_are_same_type(const SPIRBlock &block)
|
|
|
return true;
|
|
return true;
|
|
|
}
|
|
}
|
|
|
|
|
|
|
|
|
|
+void CompilerGLSL::emit_block_instructions_with_masked_debug(SPIRBlock &block)
|
|
|
|
|
+{
|
|
|
|
|
+ // Have to block debug instructions such as OpLine here, since it will be treated as a statement otherwise,
|
|
|
|
|
+ // which breaks loop optimizations.
|
|
|
|
|
+ // Any line directive would be declared outside the loop body, which would just be confusing either way.
|
|
|
|
|
+ bool old_block_debug_directives = block_debug_directives;
|
|
|
|
|
+ block_debug_directives = true;
|
|
|
|
|
+ emit_block_instructions(block);
|
|
|
|
|
+ block_debug_directives = old_block_debug_directives;
|
|
|
|
|
+}
|
|
|
|
|
+
|
|
|
bool CompilerGLSL::attempt_emit_loop_header(SPIRBlock &block, SPIRBlock::Method method)
|
|
bool CompilerGLSL::attempt_emit_loop_header(SPIRBlock &block, SPIRBlock::Method method)
|
|
|
{
|
|
{
|
|
|
SPIRBlock::ContinueBlockType continue_type = continue_block_type(get<SPIRBlock>(block.continue_block));
|
|
SPIRBlock::ContinueBlockType continue_type = continue_block_type(get<SPIRBlock>(block.continue_block));
|
|
@@ -16339,7 +16447,7 @@ bool CompilerGLSL::attempt_emit_loop_header(SPIRBlock &block, SPIRBlock::Method
|
|
|
// If we're trying to create a true for loop,
|
|
// If we're trying to create a true for loop,
|
|
|
// we need to make sure that all opcodes before branch statement do not actually emit any code.
|
|
// we need to make sure that all opcodes before branch statement do not actually emit any code.
|
|
|
// We can then take the condition expression and create a for (; cond ; ) { body; } structure instead.
|
|
// We can then take the condition expression and create a for (; cond ; ) { body; } structure instead.
|
|
|
- emit_block_instructions(block);
|
|
|
|
|
|
|
+ emit_block_instructions_with_masked_debug(block);
|
|
|
|
|
|
|
|
bool condition_is_temporary = forced_temporaries.find(block.condition) == end(forced_temporaries);
|
|
bool condition_is_temporary = forced_temporaries.find(block.condition) == end(forced_temporaries);
|
|
|
|
|
|
|
@@ -16419,7 +16527,7 @@ bool CompilerGLSL::attempt_emit_loop_header(SPIRBlock &block, SPIRBlock::Method
|
|
|
// If we're trying to create a true for loop,
|
|
// If we're trying to create a true for loop,
|
|
|
// we need to make sure that all opcodes before branch statement do not actually emit any code.
|
|
// we need to make sure that all opcodes before branch statement do not actually emit any code.
|
|
|
// We can then take the condition expression and create a for (; cond ; ) { body; } structure instead.
|
|
// We can then take the condition expression and create a for (; cond ; ) { body; } structure instead.
|
|
|
- emit_block_instructions(child);
|
|
|
|
|
|
|
+ emit_block_instructions_with_masked_debug(child);
|
|
|
|
|
|
|
|
bool condition_is_temporary = forced_temporaries.find(child.condition) == end(forced_temporaries);
|
|
bool condition_is_temporary = forced_temporaries.find(child.condition) == end(forced_temporaries);
|
|
|
|
|
|
|
@@ -16557,6 +16665,24 @@ void CompilerGLSL::emit_block_chain(SPIRBlock &block)
|
|
|
if (block.merge == SPIRBlock::MergeLoop)
|
|
if (block.merge == SPIRBlock::MergeLoop)
|
|
|
add_loop_level();
|
|
add_loop_level();
|
|
|
|
|
|
|
|
|
|
+ // If we're emitting PHI variables with precision aliases, we have to emit them as hoisted temporaries.
|
|
|
|
|
+ for (auto var_id : block.dominated_variables)
|
|
|
|
|
+ {
|
|
|
|
|
+ auto &var = get<SPIRVariable>(var_id);
|
|
|
|
|
+ if (var.phi_variable)
|
|
|
|
|
+ {
|
|
|
|
|
+ auto mirrored_precision_itr = temporary_to_mirror_precision_alias.find(var_id);
|
|
|
|
|
+ if (mirrored_precision_itr != temporary_to_mirror_precision_alias.end() &&
|
|
|
|
|
+ find_if(block.declare_temporary.begin(), block.declare_temporary.end(),
|
|
|
|
|
+ [mirrored_precision_itr](const std::pair<TypeID, VariableID> &p) {
|
|
|
|
|
+ return p.second == mirrored_precision_itr->second;
|
|
|
|
|
+ }) == block.declare_temporary.end())
|
|
|
|
|
+ {
|
|
|
|
|
+ block.declare_temporary.push_back({ var.basetype, mirrored_precision_itr->second });
|
|
|
|
|
+ }
|
|
|
|
|
+ }
|
|
|
|
|
+ }
|
|
|
|
|
+
|
|
|
emit_hoisted_temporaries(block.declare_temporary);
|
|
emit_hoisted_temporaries(block.declare_temporary);
|
|
|
|
|
|
|
|
SPIRBlock::ContinueBlockType continue_type = SPIRBlock::ContinueNone;
|
|
SPIRBlock::ContinueBlockType continue_type = SPIRBlock::ContinueNone;
|
|
@@ -17338,9 +17464,16 @@ uint32_t CompilerGLSL::mask_relevant_memory_semantics(uint32_t semantics)
|
|
|
MemorySemanticsCrossWorkgroupMemoryMask | MemorySemanticsSubgroupMemoryMask);
|
|
MemorySemanticsCrossWorkgroupMemoryMask | MemorySemanticsSubgroupMemoryMask);
|
|
|
}
|
|
}
|
|
|
|
|
|
|
|
-void CompilerGLSL::emit_array_copy(const string &lhs, uint32_t, uint32_t rhs_id, StorageClass, StorageClass)
|
|
|
|
|
|
|
+bool CompilerGLSL::emit_array_copy(const char *expr, uint32_t lhs_id, uint32_t rhs_id, StorageClass, StorageClass)
|
|
|
{
|
|
{
|
|
|
|
|
+ string lhs;
|
|
|
|
|
+ if (expr)
|
|
|
|
|
+ lhs = expr;
|
|
|
|
|
+ else
|
|
|
|
|
+ lhs = to_expression(lhs_id);
|
|
|
|
|
+
|
|
|
statement(lhs, " = ", to_expression(rhs_id), ";");
|
|
statement(lhs, " = ", to_expression(rhs_id), ";");
|
|
|
|
|
+ return true;
|
|
|
}
|
|
}
|
|
|
|
|
|
|
|
bool CompilerGLSL::unroll_array_to_complex_store(uint32_t target_id, uint32_t source_id)
|
|
bool CompilerGLSL::unroll_array_to_complex_store(uint32_t target_id, uint32_t source_id)
|
|
@@ -17773,6 +17906,11 @@ void CompilerGLSL::emit_line_directive(uint32_t file_id, uint32_t line_literal)
|
|
|
if (redirect_statement)
|
|
if (redirect_statement)
|
|
|
return;
|
|
return;
|
|
|
|
|
|
|
|
|
|
+ // If we're emitting code in a sensitive context such as condition blocks in for loops, don't emit
|
|
|
|
|
+ // any line directives, because it's not possible.
|
|
|
|
|
+ if (block_debug_directives)
|
|
|
|
|
+ return;
|
|
|
|
|
+
|
|
|
if (options.emit_line_directives)
|
|
if (options.emit_line_directives)
|
|
|
{
|
|
{
|
|
|
require_extension_internal("GL_GOOGLE_cpp_style_line_directive");
|
|
require_extension_internal("GL_GOOGLE_cpp_style_line_directive");
|