|
@@ -6755,7 +6755,7 @@ void CompilerGLSL::emit_subgroup_op(const Instruction &i)
|
|
|
uint32_t result_type = ops[0];
|
|
uint32_t result_type = ops[0];
|
|
|
uint32_t id = ops[1];
|
|
uint32_t id = ops[1];
|
|
|
|
|
|
|
|
- auto scope = static_cast<Scope>(get<SPIRConstant>(ops[2]).scalar());
|
|
|
|
|
|
|
+ auto scope = static_cast<Scope>(evaluate_constant_u32(ops[2]));
|
|
|
if (scope != ScopeSubgroup)
|
|
if (scope != ScopeSubgroup)
|
|
|
SPIRV_CROSS_THROW("Only subgroup scope is supported.");
|
|
SPIRV_CROSS_THROW("Only subgroup scope is supported.");
|
|
|
|
|
|
|
@@ -6889,7 +6889,7 @@ case OpGroupNonUniform##op: \
|
|
|
|
|
|
|
|
case OpGroupNonUniformQuadSwap:
|
|
case OpGroupNonUniformQuadSwap:
|
|
|
{
|
|
{
|
|
|
- uint32_t direction = get<SPIRConstant>(ops[4]).scalar();
|
|
|
|
|
|
|
+ uint32_t direction = evaluate_constant_u32(ops[4]);
|
|
|
if (direction == 0)
|
|
if (direction == 0)
|
|
|
emit_unary_func_op(result_type, id, ops[3], "subgroupQuadSwapHorizontal");
|
|
emit_unary_func_op(result_type, id, ops[3], "subgroupQuadSwapHorizontal");
|
|
|
else if (direction == 1)
|
|
else if (direction == 1)
|
|
@@ -7635,7 +7635,7 @@ string CompilerGLSL::access_chain_internal(uint32_t base, const uint32_t *indice
|
|
|
else if (type->basetype == SPIRType::Struct)
|
|
else if (type->basetype == SPIRType::Struct)
|
|
|
{
|
|
{
|
|
|
if (!is_literal)
|
|
if (!is_literal)
|
|
|
- index = get<SPIRConstant>(index).scalar();
|
|
|
|
|
|
|
+ index = evaluate_constant_u32(index);
|
|
|
|
|
|
|
|
if (index >= type->member_types.size())
|
|
if (index >= type->member_types.size())
|
|
|
SPIRV_CROSS_THROW("Member index is out of bounds!");
|
|
SPIRV_CROSS_THROW("Member index is out of bounds!");
|
|
@@ -8156,7 +8156,7 @@ std::pair<std::string, uint32_t> CompilerGLSL::flattened_access_chain_offset(
|
|
|
// We also check if this member is a builtin, since we then replace the entire expression with the builtin one.
|
|
// We also check if this member is a builtin, since we then replace the entire expression with the builtin one.
|
|
|
else if (type->basetype == SPIRType::Struct)
|
|
else if (type->basetype == SPIRType::Struct)
|
|
|
{
|
|
{
|
|
|
- index = get<SPIRConstant>(index).scalar();
|
|
|
|
|
|
|
+ index = evaluate_constant_u32(index);
|
|
|
|
|
|
|
|
if (index >= type->member_types.size())
|
|
if (index >= type->member_types.size())
|
|
|
SPIRV_CROSS_THROW("Member index is out of bounds!");
|
|
SPIRV_CROSS_THROW("Member index is out of bounds!");
|
|
@@ -8184,7 +8184,7 @@ std::pair<std::string, uint32_t> CompilerGLSL::flattened_access_chain_offset(
|
|
|
auto *constant = maybe_get<SPIRConstant>(index);
|
|
auto *constant = maybe_get<SPIRConstant>(index);
|
|
|
if (constant)
|
|
if (constant)
|
|
|
{
|
|
{
|
|
|
- index = get<SPIRConstant>(index).scalar();
|
|
|
|
|
|
|
+ index = evaluate_constant_u32(index);
|
|
|
offset += index * (row_major_matrix_needs_conversion ? (type->width / 8) : matrix_stride);
|
|
offset += index * (row_major_matrix_needs_conversion ? (type->width / 8) : matrix_stride);
|
|
|
}
|
|
}
|
|
|
else
|
|
else
|
|
@@ -8213,7 +8213,7 @@ std::pair<std::string, uint32_t> CompilerGLSL::flattened_access_chain_offset(
|
|
|
auto *constant = maybe_get<SPIRConstant>(index);
|
|
auto *constant = maybe_get<SPIRConstant>(index);
|
|
|
if (constant)
|
|
if (constant)
|
|
|
{
|
|
{
|
|
|
- index = get<SPIRConstant>(index).scalar();
|
|
|
|
|
|
|
+ index = evaluate_constant_u32(index);
|
|
|
offset += index * (row_major_matrix_needs_conversion ? matrix_stride : (type->width / 8));
|
|
offset += index * (row_major_matrix_needs_conversion ? matrix_stride : (type->width / 8));
|
|
|
}
|
|
}
|
|
|
else
|
|
else
|
|
@@ -10805,14 +10805,14 @@ void CompilerGLSL::emit_instruction(const Instruction &instruction)
|
|
|
|
|
|
|
|
if (opcode == OpMemoryBarrier)
|
|
if (opcode == OpMemoryBarrier)
|
|
|
{
|
|
{
|
|
|
- memory = get<SPIRConstant>(ops[0]).scalar();
|
|
|
|
|
- semantics = get<SPIRConstant>(ops[1]).scalar();
|
|
|
|
|
|
|
+ memory = evaluate_constant_u32(ops[0]);
|
|
|
|
|
+ semantics = evaluate_constant_u32(ops[1]);
|
|
|
}
|
|
}
|
|
|
else
|
|
else
|
|
|
{
|
|
{
|
|
|
- execution_scope = get<SPIRConstant>(ops[0]).scalar();
|
|
|
|
|
- memory = get<SPIRConstant>(ops[1]).scalar();
|
|
|
|
|
- semantics = get<SPIRConstant>(ops[2]).scalar();
|
|
|
|
|
|
|
+ execution_scope = evaluate_constant_u32(ops[0]);
|
|
|
|
|
+ memory = evaluate_constant_u32(ops[1]);
|
|
|
|
|
+ semantics = evaluate_constant_u32(ops[2]);
|
|
|
}
|
|
}
|
|
|
|
|
|
|
|
if (execution_scope == ScopeSubgroup || memory == ScopeSubgroup)
|
|
if (execution_scope == ScopeSubgroup || memory == ScopeSubgroup)
|
|
@@ -10841,8 +10841,8 @@ void CompilerGLSL::emit_instruction(const Instruction &instruction)
|
|
|
if (next && next->op == OpControlBarrier)
|
|
if (next && next->op == OpControlBarrier)
|
|
|
{
|
|
{
|
|
|
auto *next_ops = stream(*next);
|
|
auto *next_ops = stream(*next);
|
|
|
- uint32_t next_memory = get<SPIRConstant>(next_ops[1]).scalar();
|
|
|
|
|
- uint32_t next_semantics = get<SPIRConstant>(next_ops[2]).scalar();
|
|
|
|
|
|
|
+ uint32_t next_memory = evaluate_constant_u32(next_ops[1]);
|
|
|
|
|
+ uint32_t next_semantics = evaluate_constant_u32(next_ops[2]);
|
|
|
next_semantics = mask_relevant_memory_semantics(next_semantics);
|
|
next_semantics = mask_relevant_memory_semantics(next_semantics);
|
|
|
|
|
|
|
|
bool memory_scope_covered = false;
|
|
bool memory_scope_covered = false;
|
|
@@ -11795,15 +11795,7 @@ uint32_t CompilerGLSL::to_array_size_literal(const SPIRType &type, uint32_t inde
|
|
|
{
|
|
{
|
|
|
// Use the default spec constant value.
|
|
// Use the default spec constant value.
|
|
|
// This is the best we can do.
|
|
// This is the best we can do.
|
|
|
- uint32_t array_size_id = type.array[index];
|
|
|
|
|
-
|
|
|
|
|
- // Explicitly check for this case. The error message you would get (bad cast) makes no sense otherwise.
|
|
|
|
|
- if (ir.ids[array_size_id].get_type() == TypeConstantOp)
|
|
|
|
|
- SPIRV_CROSS_THROW("An array size was found to be an OpSpecConstantOp. This is not supported since "
|
|
|
|
|
- "SPIRV-Cross cannot deduce the actual size here.");
|
|
|
|
|
-
|
|
|
|
|
- uint32_t array_size = get<SPIRConstant>(array_size_id).scalar();
|
|
|
|
|
- return array_size;
|
|
|
|
|
|
|
+ return evaluate_constant_u32(type.array[index]);
|
|
|
}
|
|
}
|
|
|
}
|
|
}
|
|
|
|
|
|
|
@@ -12740,64 +12732,37 @@ void CompilerGLSL::branch(BlockID from, uint32_t cond, BlockID true_block, Block
|
|
|
auto &from_block = get<SPIRBlock>(from);
|
|
auto &from_block = get<SPIRBlock>(from);
|
|
|
BlockID merge_block = from_block.merge == SPIRBlock::MergeSelection ? from_block.next_block : BlockID(0);
|
|
BlockID merge_block = from_block.merge == SPIRBlock::MergeSelection ? from_block.next_block : BlockID(0);
|
|
|
|
|
|
|
|
- // If we branch directly to a selection merge target, we don't need a code path.
|
|
|
|
|
- // This covers both merge out of if () / else () as well as a break for switch blocks.
|
|
|
|
|
- bool true_sub = !is_conditional(true_block);
|
|
|
|
|
- bool false_sub = !is_conditional(false_block);
|
|
|
|
|
|
|
+ // If we branch directly to our selection merge target, we don't need a code path.
|
|
|
|
|
+ bool true_block_needs_code = true_block != merge_block || flush_phi_required(from, true_block);
|
|
|
|
|
+ bool false_block_needs_code = false_block != merge_block || flush_phi_required(from, false_block);
|
|
|
|
|
|
|
|
- bool true_block_is_selection_merge = true_block == merge_block;
|
|
|
|
|
- bool false_block_is_selection_merge = false_block == merge_block;
|
|
|
|
|
|
|
+ if (!true_block_needs_code && !false_block_needs_code)
|
|
|
|
|
+ return;
|
|
|
|
|
|
|
|
- if (true_sub)
|
|
|
|
|
|
|
+ emit_block_hints(get<SPIRBlock>(from));
|
|
|
|
|
+
|
|
|
|
|
+ if (true_block_needs_code)
|
|
|
{
|
|
{
|
|
|
- emit_block_hints(get<SPIRBlock>(from));
|
|
|
|
|
statement("if (", to_expression(cond), ")");
|
|
statement("if (", to_expression(cond), ")");
|
|
|
begin_scope();
|
|
begin_scope();
|
|
|
branch(from, true_block);
|
|
branch(from, true_block);
|
|
|
end_scope();
|
|
end_scope();
|
|
|
|
|
|
|
|
- // If we merge to continue, we handle that explicitly in emit_block_chain(),
|
|
|
|
|
- // so there is no need to branch to it directly here.
|
|
|
|
|
- // break; is required to handle ladder fallthrough cases, so keep that in for now, even
|
|
|
|
|
- // if we could potentially handle it in emit_block_chain().
|
|
|
|
|
- if (false_sub || (!false_block_is_selection_merge && is_continue(false_block)) || is_break(false_block))
|
|
|
|
|
|
|
+ if (false_block_needs_code)
|
|
|
{
|
|
{
|
|
|
statement("else");
|
|
statement("else");
|
|
|
begin_scope();
|
|
begin_scope();
|
|
|
branch(from, false_block);
|
|
branch(from, false_block);
|
|
|
end_scope();
|
|
end_scope();
|
|
|
}
|
|
}
|
|
|
- else if (flush_phi_required(from, false_block))
|
|
|
|
|
- {
|
|
|
|
|
- statement("else");
|
|
|
|
|
- begin_scope();
|
|
|
|
|
- flush_phi(from, false_block);
|
|
|
|
|
- end_scope();
|
|
|
|
|
- }
|
|
|
|
|
}
|
|
}
|
|
|
- else if (false_sub)
|
|
|
|
|
|
|
+ else if (false_block_needs_code)
|
|
|
{
|
|
{
|
|
|
// Only need false path, use negative conditional.
|
|
// Only need false path, use negative conditional.
|
|
|
- emit_block_hints(get<SPIRBlock>(from));
|
|
|
|
|
statement("if (!", to_enclosed_expression(cond), ")");
|
|
statement("if (!", to_enclosed_expression(cond), ")");
|
|
|
begin_scope();
|
|
begin_scope();
|
|
|
branch(from, false_block);
|
|
branch(from, false_block);
|
|
|
end_scope();
|
|
end_scope();
|
|
|
-
|
|
|
|
|
- if ((!true_block_is_selection_merge && is_continue(true_block)) || is_break(true_block))
|
|
|
|
|
- {
|
|
|
|
|
- statement("else");
|
|
|
|
|
- begin_scope();
|
|
|
|
|
- branch(from, true_block);
|
|
|
|
|
- end_scope();
|
|
|
|
|
- }
|
|
|
|
|
- else if (flush_phi_required(from, true_block))
|
|
|
|
|
- {
|
|
|
|
|
- statement("else");
|
|
|
|
|
- begin_scope();
|
|
|
|
|
- flush_phi(from, true_block);
|
|
|
|
|
- end_scope();
|
|
|
|
|
- }
|
|
|
|
|
}
|
|
}
|
|
|
}
|
|
}
|
|
|
|
|
|