|
@@ -1,5 +1,5 @@
|
|
/*
|
|
/*
|
|
- * Copyright 2016-2019 Robert Konrad
|
|
|
|
|
|
+ * Copyright 2016-2020 Robert Konrad
|
|
*
|
|
*
|
|
* Licensed under the Apache License, Version 2.0 (the "License");
|
|
* Licensed under the Apache License, Version 2.0 (the "License");
|
|
* you may not use this file except in compliance with the License.
|
|
* you may not use this file except in compliance with the License.
|
|
@@ -210,6 +210,8 @@ string CompilerHLSL::image_type_hlsl_modern(const SPIRType &type, uint32_t id)
|
|
bool typed_load = false;
|
|
bool typed_load = false;
|
|
uint32_t components = 4;
|
|
uint32_t components = 4;
|
|
|
|
|
|
|
|
+ bool force_image_srv = hlsl_options.nonwritable_uav_texture_as_srv && has_decoration(id, DecorationNonWritable);
|
|
|
|
+
|
|
switch (type.image.dim)
|
|
switch (type.image.dim)
|
|
{
|
|
{
|
|
case Dim1D:
|
|
case Dim1D:
|
|
@@ -239,7 +241,14 @@ string CompilerHLSL::image_type_hlsl_modern(const SPIRType &type, uint32_t id)
|
|
if (interlocked_resources.count(id))
|
|
if (interlocked_resources.count(id))
|
|
return join("RasterizerOrderedBuffer<", image_format_to_type(type.image.format, imagetype.basetype),
|
|
return join("RasterizerOrderedBuffer<", image_format_to_type(type.image.format, imagetype.basetype),
|
|
">");
|
|
">");
|
|
- return join("RWBuffer<", image_format_to_type(type.image.format, imagetype.basetype), ">");
|
|
|
|
|
|
+
|
|
|
|
+ typed_load = !force_image_srv && type.image.sampled == 2;
|
|
|
|
+
|
|
|
|
+ const char *rw = force_image_srv ? "" : "RW";
|
|
|
|
+ return join(rw, "Buffer<",
|
|
|
|
+ typed_load ? image_format_to_type(type.image.format, imagetype.basetype) :
|
|
|
|
+ join(type_to_glsl(imagetype), components),
|
|
|
|
+ ">");
|
|
}
|
|
}
|
|
else
|
|
else
|
|
SPIRV_CROSS_THROW("Sampler buffers must be either sampled or unsampled. Cannot deduce in runtime.");
|
|
SPIRV_CROSS_THROW("Sampler buffers must be either sampled or unsampled. Cannot deduce in runtime.");
|
|
@@ -252,9 +261,14 @@ string CompilerHLSL::image_type_hlsl_modern(const SPIRType &type, uint32_t id)
|
|
}
|
|
}
|
|
const char *arrayed = type.image.arrayed ? "Array" : "";
|
|
const char *arrayed = type.image.arrayed ? "Array" : "";
|
|
const char *ms = type.image.ms ? "MS" : "";
|
|
const char *ms = type.image.ms ? "MS" : "";
|
|
- const char *rw = typed_load ? "RW" : "";
|
|
|
|
|
|
+ const char *rw = typed_load && !force_image_srv ? "RW" : "";
|
|
|
|
+
|
|
|
|
+ if (force_image_srv)
|
|
|
|
+ typed_load = false;
|
|
|
|
+
|
|
if (typed_load && interlocked_resources.count(id))
|
|
if (typed_load && interlocked_resources.count(id))
|
|
rw = "RasterizerOrdered";
|
|
rw = "RasterizerOrdered";
|
|
|
|
+
|
|
return join(rw, "Texture", dim, ms, arrayed, "<",
|
|
return join(rw, "Texture", dim, ms, arrayed, "<",
|
|
typed_load ? image_format_to_type(type.image.format, imagetype.basetype) :
|
|
typed_load ? image_format_to_type(type.image.format, imagetype.basetype) :
|
|
join(type_to_glsl(imagetype), components),
|
|
join(type_to_glsl(imagetype), components),
|
|
@@ -387,8 +401,12 @@ string CompilerHLSL::type_to_glsl(const SPIRType &type, uint32_t id)
|
|
case SPIRType::Double:
|
|
case SPIRType::Double:
|
|
return "double";
|
|
return "double";
|
|
case SPIRType::Int64:
|
|
case SPIRType::Int64:
|
|
|
|
+ if (hlsl_options.shader_model < 60)
|
|
|
|
+ SPIRV_CROSS_THROW("64-bit integers only supported in SM 6.0.");
|
|
return "int64_t";
|
|
return "int64_t";
|
|
case SPIRType::UInt64:
|
|
case SPIRType::UInt64:
|
|
|
|
+ if (hlsl_options.shader_model < 60)
|
|
|
|
+ SPIRV_CROSS_THROW("64-bit integers only supported in SM 6.0.");
|
|
return "uint64_t";
|
|
return "uint64_t";
|
|
default:
|
|
default:
|
|
return "???";
|
|
return "???";
|
|
@@ -1113,16 +1131,25 @@ void CompilerHLSL::replace_illegal_names()
|
|
"line", "linear", "matrix", "point", "row_major", "sampler",
|
|
"line", "linear", "matrix", "point", "row_major", "sampler",
|
|
};
|
|
};
|
|
|
|
|
|
- ir.for_each_typed_id<SPIRVariable>([&](uint32_t, SPIRVariable &var) {
|
|
|
|
- if (!is_hidden_variable(var))
|
|
|
|
- {
|
|
|
|
- auto &m = ir.meta[var.self].decoration;
|
|
|
|
- if (keywords.find(m.alias) != end(keywords))
|
|
|
|
- m.alias = join("_", m.alias);
|
|
|
|
- }
|
|
|
|
|
|
+ CompilerGLSL::replace_illegal_names(keywords);
|
|
|
|
+ CompilerGLSL::replace_illegal_names();
|
|
|
|
+}
|
|
|
|
+
|
|
|
|
+void CompilerHLSL::declare_undefined_values()
|
|
|
|
+{
|
|
|
|
+ bool emitted = false;
|
|
|
|
+ ir.for_each_typed_id<SPIRUndef>([&](uint32_t, const SPIRUndef &undef) {
|
|
|
|
+ string initializer;
|
|
|
|
+ if (options.force_zero_initialized_variables && type_can_zero_initialize(this->get<SPIRType>(undef.basetype)))
|
|
|
|
+ initializer = join(" = ", to_zero_initialized_expression(undef.basetype));
|
|
|
|
+
|
|
|
|
+ statement("static ", variable_decl(this->get<SPIRType>(undef.basetype), to_name(undef.self), undef.self),
|
|
|
|
+ initializer, ";");
|
|
|
|
+ emitted = true;
|
|
});
|
|
});
|
|
|
|
|
|
- CompilerGLSL::replace_illegal_names();
|
|
|
|
|
|
+ if (emitted)
|
|
|
|
+ statement("");
|
|
}
|
|
}
|
|
|
|
|
|
void CompilerHLSL::emit_resources()
|
|
void CompilerHLSL::emit_resources()
|
|
@@ -1358,7 +1385,15 @@ void CompilerHLSL::emit_resources()
|
|
storage = "static";
|
|
storage = "static";
|
|
break;
|
|
break;
|
|
}
|
|
}
|
|
- statement(storage, " ", variable_decl(var), ";");
|
|
|
|
|
|
+
|
|
|
|
+ string initializer;
|
|
|
|
+ if (options.force_zero_initialized_variables && var.storage == StorageClassPrivate &&
|
|
|
|
+ !var.initializer && !var.static_expression && type_can_zero_initialize(get_variable_data_type(var)))
|
|
|
|
+ {
|
|
|
|
+ initializer = join(" = ", to_zero_initialized_expression(get_variable_data_type_id(var)));
|
|
|
|
+ }
|
|
|
|
+ statement(storage, " ", variable_decl(var), initializer, ";");
|
|
|
|
+
|
|
emitted = true;
|
|
emitted = true;
|
|
}
|
|
}
|
|
}
|
|
}
|
|
@@ -1468,6 +1503,24 @@ void CompilerHLSL::emit_resources()
|
|
statement("");
|
|
statement("");
|
|
}
|
|
}
|
|
|
|
|
|
|
|
+ if (requires_uint2_packing)
|
|
|
|
+ {
|
|
|
|
+ statement("uint64_t SPIRV_Cross_packUint2x32(uint2 value)");
|
|
|
|
+ begin_scope();
|
|
|
|
+ statement("return (uint64_t(value.y) << 32) | uint64_t(value.x);");
|
|
|
|
+ end_scope();
|
|
|
|
+ statement("");
|
|
|
|
+
|
|
|
|
+ statement("uint2 SPIRV_Cross_unpackUint2x32(uint64_t value)");
|
|
|
|
+ begin_scope();
|
|
|
|
+ statement("uint2 Unpacked;");
|
|
|
|
+ statement("Unpacked.x = uint(value & 0xffffffff);");
|
|
|
|
+ statement("Unpacked.y = uint(value >> 32);");
|
|
|
|
+ statement("return Unpacked;");
|
|
|
|
+ end_scope();
|
|
|
|
+ statement("");
|
|
|
|
+ }
|
|
|
|
+
|
|
if (requires_explicit_fp16_packing)
|
|
if (requires_explicit_fp16_packing)
|
|
{
|
|
{
|
|
// HLSL does not pack into a single word sadly :(
|
|
// HLSL does not pack into a single word sadly :(
|
|
@@ -1853,8 +1906,8 @@ void CompilerHLSL::emit_buffer_block(const SPIRVariable &var)
|
|
if (is_uav)
|
|
if (is_uav)
|
|
{
|
|
{
|
|
Bitset flags = ir.get_buffer_block_flags(var);
|
|
Bitset flags = ir.get_buffer_block_flags(var);
|
|
- bool is_readonly = flags.get(DecorationNonWritable);
|
|
|
|
- bool is_coherent = flags.get(DecorationCoherent);
|
|
|
|
|
|
+ bool is_readonly = flags.get(DecorationNonWritable) && !hlsl_options.force_storage_buffer_as_uav;
|
|
|
|
+ bool is_coherent = flags.get(DecorationCoherent) && !is_readonly;
|
|
bool is_interlocked = interlocked_resources.count(var.self) > 0;
|
|
bool is_interlocked = interlocked_resources.count(var.self) > 0;
|
|
const char *type_name = "ByteAddressBuffer ";
|
|
const char *type_name = "ByteAddressBuffer ";
|
|
if (!is_readonly)
|
|
if (!is_readonly)
|
|
@@ -2934,16 +2987,15 @@ void CompilerHLSL::emit_texture_op(const Instruction &i)
|
|
|
|
|
|
string CompilerHLSL::to_resource_binding(const SPIRVariable &var)
|
|
string CompilerHLSL::to_resource_binding(const SPIRVariable &var)
|
|
{
|
|
{
|
|
- // TODO: Basic implementation, might need special consideration for RW/RO structured buffers,
|
|
|
|
- // RW/RO images, and so on.
|
|
|
|
|
|
+ const auto &type = get<SPIRType>(var.basetype);
|
|
|
|
|
|
- if (!has_decoration(var.self, DecorationBinding))
|
|
|
|
|
|
+ // We can remap push constant blocks, even if they don't have any binding decoration.
|
|
|
|
+ if (type.storage != StorageClassPushConstant && !has_decoration(var.self, DecorationBinding))
|
|
return "";
|
|
return "";
|
|
|
|
|
|
- const auto &type = get<SPIRType>(var.basetype);
|
|
|
|
char space = '\0';
|
|
char space = '\0';
|
|
|
|
|
|
- HLSLBindingFlags resource_flags = 0;
|
|
|
|
|
|
+ HLSLBindingFlagBits resource_flags = HLSL_BINDING_AUTO_NONE_BIT;
|
|
|
|
|
|
switch (type.basetype)
|
|
switch (type.basetype)
|
|
{
|
|
{
|
|
@@ -2955,8 +3007,16 @@ string CompilerHLSL::to_resource_binding(const SPIRVariable &var)
|
|
case SPIRType::Image:
|
|
case SPIRType::Image:
|
|
if (type.image.sampled == 2 && type.image.dim != DimSubpassData)
|
|
if (type.image.sampled == 2 && type.image.dim != DimSubpassData)
|
|
{
|
|
{
|
|
- space = 'u'; // UAV
|
|
|
|
- resource_flags = HLSL_BINDING_AUTO_UAV_BIT;
|
|
|
|
|
|
+ if (has_decoration(var.self, DecorationNonWritable) && hlsl_options.nonwritable_uav_texture_as_srv)
|
|
|
|
+ {
|
|
|
|
+ space = 't'; // SRV
|
|
|
|
+ resource_flags = HLSL_BINDING_AUTO_SRV_BIT;
|
|
|
|
+ }
|
|
|
|
+ else
|
|
|
|
+ {
|
|
|
|
+ space = 'u'; // UAV
|
|
|
|
+ resource_flags = HLSL_BINDING_AUTO_UAV_BIT;
|
|
|
|
+ }
|
|
}
|
|
}
|
|
else
|
|
else
|
|
{
|
|
{
|
|
@@ -2978,7 +3038,7 @@ string CompilerHLSL::to_resource_binding(const SPIRVariable &var)
|
|
if (has_decoration(type.self, DecorationBufferBlock))
|
|
if (has_decoration(type.self, DecorationBufferBlock))
|
|
{
|
|
{
|
|
Bitset flags = ir.get_buffer_block_flags(var);
|
|
Bitset flags = ir.get_buffer_block_flags(var);
|
|
- bool is_readonly = flags.get(DecorationNonWritable);
|
|
|
|
|
|
+ bool is_readonly = flags.get(DecorationNonWritable) && !hlsl_options.force_storage_buffer_as_uav;
|
|
space = is_readonly ? 't' : 'u'; // UAV
|
|
space = is_readonly ? 't' : 'u'; // UAV
|
|
resource_flags = is_readonly ? HLSL_BINDING_AUTO_SRV_BIT : HLSL_BINDING_AUTO_UAV_BIT;
|
|
resource_flags = is_readonly ? HLSL_BINDING_AUTO_SRV_BIT : HLSL_BINDING_AUTO_UAV_BIT;
|
|
}
|
|
}
|
|
@@ -2997,7 +3057,7 @@ string CompilerHLSL::to_resource_binding(const SPIRVariable &var)
|
|
{
|
|
{
|
|
// UAV or SRV depending on readonly flag.
|
|
// UAV or SRV depending on readonly flag.
|
|
Bitset flags = ir.get_buffer_block_flags(var);
|
|
Bitset flags = ir.get_buffer_block_flags(var);
|
|
- bool is_readonly = flags.get(DecorationNonWritable);
|
|
|
|
|
|
+ bool is_readonly = flags.get(DecorationNonWritable) && !hlsl_options.force_storage_buffer_as_uav;
|
|
space = is_readonly ? 't' : 'u';
|
|
space = is_readonly ? 't' : 'u';
|
|
resource_flags = is_readonly ? HLSL_BINDING_AUTO_SRV_BIT : HLSL_BINDING_AUTO_UAV_BIT;
|
|
resource_flags = is_readonly ? HLSL_BINDING_AUTO_SRV_BIT : HLSL_BINDING_AUTO_UAV_BIT;
|
|
}
|
|
}
|
|
@@ -3011,8 +3071,16 @@ string CompilerHLSL::to_resource_binding(const SPIRVariable &var)
|
|
if (!space)
|
|
if (!space)
|
|
return "";
|
|
return "";
|
|
|
|
|
|
- return to_resource_register(resource_flags, space, get_decoration(var.self, DecorationBinding),
|
|
|
|
- get_decoration(var.self, DecorationDescriptorSet));
|
|
|
|
|
|
+ uint32_t desc_set =
|
|
|
|
+ resource_flags == HLSL_BINDING_AUTO_PUSH_CONSTANT_BIT ? ResourceBindingPushConstantDescriptorSet : 0u;
|
|
|
|
+ uint32_t binding = resource_flags == HLSL_BINDING_AUTO_PUSH_CONSTANT_BIT ? ResourceBindingPushConstantBinding : 0u;
|
|
|
|
+
|
|
|
|
+ if (has_decoration(var.self, DecorationBinding))
|
|
|
|
+ binding = get_decoration(var.self, DecorationBinding);
|
|
|
|
+ if (has_decoration(var.self, DecorationDescriptorSet))
|
|
|
|
+ desc_set = get_decoration(var.self, DecorationDescriptorSet);
|
|
|
|
+
|
|
|
|
+ return to_resource_register(resource_flags, space, binding, desc_set);
|
|
}
|
|
}
|
|
|
|
|
|
string CompilerHLSL::to_resource_binding_sampler(const SPIRVariable &var)
|
|
string CompilerHLSL::to_resource_binding_sampler(const SPIRVariable &var)
|
|
@@ -3025,10 +3093,54 @@ string CompilerHLSL::to_resource_binding_sampler(const SPIRVariable &var)
|
|
get_decoration(var.self, DecorationDescriptorSet));
|
|
get_decoration(var.self, DecorationDescriptorSet));
|
|
}
|
|
}
|
|
|
|
|
|
-string CompilerHLSL::to_resource_register(uint32_t flags, char space, uint32_t binding, uint32_t space_set)
|
|
|
|
|
|
+void CompilerHLSL::remap_hlsl_resource_binding(HLSLBindingFlagBits type, uint32_t &desc_set, uint32_t &binding)
|
|
{
|
|
{
|
|
- if ((flags & resource_binding_flags) == 0)
|
|
|
|
|
|
+ auto itr = resource_bindings.find({ get_execution_model(), desc_set, binding });
|
|
|
|
+ if (itr != end(resource_bindings))
|
|
{
|
|
{
|
|
|
|
+ auto &remap = itr->second;
|
|
|
|
+ remap.second = true;
|
|
|
|
+
|
|
|
|
+ switch (type)
|
|
|
|
+ {
|
|
|
|
+ case HLSL_BINDING_AUTO_PUSH_CONSTANT_BIT:
|
|
|
|
+ case HLSL_BINDING_AUTO_CBV_BIT:
|
|
|
|
+ desc_set = remap.first.cbv.register_space;
|
|
|
|
+ binding = remap.first.cbv.register_binding;
|
|
|
|
+ break;
|
|
|
|
+
|
|
|
|
+ case HLSL_BINDING_AUTO_SRV_BIT:
|
|
|
|
+ desc_set = remap.first.srv.register_space;
|
|
|
|
+ binding = remap.first.srv.register_binding;
|
|
|
|
+ break;
|
|
|
|
+
|
|
|
|
+ case HLSL_BINDING_AUTO_SAMPLER_BIT:
|
|
|
|
+ desc_set = remap.first.sampler.register_space;
|
|
|
|
+ binding = remap.first.sampler.register_binding;
|
|
|
|
+ break;
|
|
|
|
+
|
|
|
|
+ case HLSL_BINDING_AUTO_UAV_BIT:
|
|
|
|
+ desc_set = remap.first.uav.register_space;
|
|
|
|
+ binding = remap.first.uav.register_binding;
|
|
|
|
+ break;
|
|
|
|
+
|
|
|
|
+ default:
|
|
|
|
+ break;
|
|
|
|
+ }
|
|
|
|
+ }
|
|
|
|
+}
|
|
|
|
+
|
|
|
|
+string CompilerHLSL::to_resource_register(HLSLBindingFlagBits flag, char space, uint32_t binding, uint32_t space_set)
|
|
|
|
+{
|
|
|
|
+ if ((flag & resource_binding_flags) == 0)
|
|
|
|
+ {
|
|
|
|
+ remap_hlsl_resource_binding(flag, space_set, binding);
|
|
|
|
+
|
|
|
|
+ // The push constant block did not have a binding, and there were no remap for it,
|
|
|
|
+ // so, declare without register binding.
|
|
|
|
+ if (flag == HLSL_BINDING_AUTO_PUSH_CONSTANT_BIT && space_set == ResourceBindingPushConstantDescriptorSet)
|
|
|
|
+ return "";
|
|
|
|
+
|
|
if (hlsl_options.shader_model >= 51)
|
|
if (hlsl_options.shader_model >= 51)
|
|
return join(" : register(", space, binding, ", space", space_set, ")");
|
|
return join(" : register(", space, binding, ", space", space_set, ")");
|
|
else
|
|
else
|
|
@@ -3104,6 +3216,11 @@ void CompilerHLSL::emit_uniform(const SPIRVariable &var)
|
|
emit_legacy_uniform(var);
|
|
emit_legacy_uniform(var);
|
|
}
|
|
}
|
|
|
|
|
|
|
|
+bool CompilerHLSL::emit_complex_bitcast(uint32_t, uint32_t, uint32_t)
|
|
|
|
+{
|
|
|
|
+ return false;
|
|
|
|
+}
|
|
|
|
+
|
|
string CompilerHLSL::bitcast_glsl_op(const SPIRType &out_type, const SPIRType &in_type)
|
|
string CompilerHLSL::bitcast_glsl_op(const SPIRType &out_type, const SPIRType &in_type)
|
|
{
|
|
{
|
|
if (out_type.basetype == SPIRType::UInt && in_type.basetype == SPIRType::Int)
|
|
if (out_type.basetype == SPIRType::UInt && in_type.basetype == SPIRType::Int)
|
|
@@ -3955,6 +4072,9 @@ void CompilerHLSL::emit_access_chain(const Instruction &instruction)
|
|
inherit_expression_dependencies(ops[1], ops[i]);
|
|
inherit_expression_dependencies(ops[1], ops[i]);
|
|
add_implied_read_expression(e, ops[i]);
|
|
add_implied_read_expression(e, ops[i]);
|
|
}
|
|
}
|
|
|
|
+
|
|
|
|
+ if (has_decoration(ops[1], DecorationNonUniformEXT))
|
|
|
|
+ propagate_nonuniform_qualifier(ops[1]);
|
|
}
|
|
}
|
|
else
|
|
else
|
|
{
|
|
{
|
|
@@ -4079,6 +4199,11 @@ void CompilerHLSL::emit_subgroup_op(const Instruction &i)
|
|
return join(expr, " * ", to_expression(ops[4]));
|
|
return join(expr, " * ", to_expression(ops[4]));
|
|
};
|
|
};
|
|
|
|
|
|
|
|
+ // If we need to do implicit bitcasts, make sure we do it with the correct type.
|
|
|
|
+ uint32_t integer_width = get_integer_width_for_instruction(i);
|
|
|
|
+ auto int_type = to_signed_basetype(integer_width);
|
|
|
|
+ auto uint_type = to_unsigned_basetype(integer_width);
|
|
|
|
+
|
|
#define make_inclusive_BitAnd(expr) ""
|
|
#define make_inclusive_BitAnd(expr) ""
|
|
#define make_inclusive_BitOr(expr) ""
|
|
#define make_inclusive_BitOr(expr) ""
|
|
#define make_inclusive_BitXor(expr) ""
|
|
#define make_inclusive_BitXor(expr) ""
|
|
@@ -4187,20 +4312,34 @@ case OpGroupNonUniform##op: \
|
|
SPIRV_CROSS_THROW("Invalid group operation."); \
|
|
SPIRV_CROSS_THROW("Invalid group operation."); \
|
|
break; \
|
|
break; \
|
|
}
|
|
}
|
|
|
|
+
|
|
|
|
+#define HLSL_GROUP_OP_CAST(op, hlsl_op, type) \
|
|
|
|
+case OpGroupNonUniform##op: \
|
|
|
|
+ { \
|
|
|
|
+ auto operation = static_cast<GroupOperation>(ops[3]); \
|
|
|
|
+ if (operation == GroupOperationReduce) \
|
|
|
|
+ emit_unary_func_op_cast(result_type, id, ops[4], "WaveActive" #hlsl_op, type, type); \
|
|
|
|
+ else \
|
|
|
|
+ SPIRV_CROSS_THROW("Invalid group operation."); \
|
|
|
|
+ break; \
|
|
|
|
+ }
|
|
|
|
+
|
|
HLSL_GROUP_OP(FAdd, Sum, true)
|
|
HLSL_GROUP_OP(FAdd, Sum, true)
|
|
HLSL_GROUP_OP(FMul, Product, true)
|
|
HLSL_GROUP_OP(FMul, Product, true)
|
|
HLSL_GROUP_OP(FMin, Min, false)
|
|
HLSL_GROUP_OP(FMin, Min, false)
|
|
HLSL_GROUP_OP(FMax, Max, false)
|
|
HLSL_GROUP_OP(FMax, Max, false)
|
|
HLSL_GROUP_OP(IAdd, Sum, true)
|
|
HLSL_GROUP_OP(IAdd, Sum, true)
|
|
HLSL_GROUP_OP(IMul, Product, true)
|
|
HLSL_GROUP_OP(IMul, Product, true)
|
|
- HLSL_GROUP_OP(SMin, Min, false)
|
|
|
|
- HLSL_GROUP_OP(SMax, Max, false)
|
|
|
|
- HLSL_GROUP_OP(UMin, Min, false)
|
|
|
|
- HLSL_GROUP_OP(UMax, Max, false)
|
|
|
|
|
|
+ HLSL_GROUP_OP_CAST(SMin, Min, int_type)
|
|
|
|
+ HLSL_GROUP_OP_CAST(SMax, Max, int_type)
|
|
|
|
+ HLSL_GROUP_OP_CAST(UMin, Min, uint_type)
|
|
|
|
+ HLSL_GROUP_OP_CAST(UMax, Max, uint_type)
|
|
HLSL_GROUP_OP(BitwiseAnd, BitAnd, false)
|
|
HLSL_GROUP_OP(BitwiseAnd, BitAnd, false)
|
|
HLSL_GROUP_OP(BitwiseOr, BitOr, false)
|
|
HLSL_GROUP_OP(BitwiseOr, BitOr, false)
|
|
HLSL_GROUP_OP(BitwiseXor, BitXor, false)
|
|
HLSL_GROUP_OP(BitwiseXor, BitXor, false)
|
|
|
|
+
|
|
#undef HLSL_GROUP_OP
|
|
#undef HLSL_GROUP_OP
|
|
|
|
+#undef HLSL_GROUP_OP_CAST
|
|
// clang-format on
|
|
// clang-format on
|
|
|
|
|
|
case OpGroupNonUniformQuadSwap:
|
|
case OpGroupNonUniformQuadSwap:
|
|
@@ -4260,6 +4399,27 @@ void CompilerHLSL::emit_instruction(const Instruction &instruction)
|
|
emit_access_chain(instruction);
|
|
emit_access_chain(instruction);
|
|
break;
|
|
break;
|
|
}
|
|
}
|
|
|
|
+ case OpBitcast:
|
|
|
|
+ {
|
|
|
|
+ auto bitcast_type = get_bitcast_type(ops[0], ops[2]);
|
|
|
|
+ if (bitcast_type == CompilerHLSL::TypeNormal)
|
|
|
|
+ CompilerGLSL::emit_instruction(instruction);
|
|
|
|
+ else
|
|
|
|
+ {
|
|
|
|
+ if (!requires_uint2_packing)
|
|
|
|
+ {
|
|
|
|
+ requires_uint2_packing = true;
|
|
|
|
+ force_recompile();
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ if (bitcast_type == CompilerHLSL::TypePackUint2x32)
|
|
|
|
+ emit_unary_func_op(ops[0], ops[1], ops[2], "SPIRV_Cross_packUint2x32");
|
|
|
|
+ else
|
|
|
|
+ emit_unary_func_op(ops[0], ops[1], ops[2], "SPIRV_Cross_unpackUint2x32");
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ break;
|
|
|
|
+ }
|
|
|
|
|
|
case OpStore:
|
|
case OpStore:
|
|
{
|
|
{
|
|
@@ -4723,7 +4883,12 @@ void CompilerHLSL::emit_instruction(const Instruction &instruction)
|
|
imgexpr = join(to_expression(ops[2]), "[", to_expression(ops[3]), "]");
|
|
imgexpr = join(to_expression(ops[2]), "[", to_expression(ops[3]), "]");
|
|
// The underlying image type in HLSL depends on the image format, unlike GLSL, where all images are "vec4",
|
|
// The underlying image type in HLSL depends on the image format, unlike GLSL, where all images are "vec4",
|
|
// except that the underlying type changes how the data is interpreted.
|
|
// except that the underlying type changes how the data is interpreted.
|
|
- if (var && !subpass_data)
|
|
|
|
|
|
+
|
|
|
|
+ bool force_srv =
|
|
|
|
+ hlsl_options.nonwritable_uav_texture_as_srv && var && has_decoration(var->self, DecorationNonWritable);
|
|
|
|
+ pure = force_srv;
|
|
|
|
+
|
|
|
|
+ if (var && !subpass_data && !force_srv)
|
|
imgexpr = remap_swizzle(get<SPIRType>(result_type),
|
|
imgexpr = remap_swizzle(get<SPIRType>(result_type),
|
|
image_format_to_components(get<SPIRType>(var->basetype).image.format), imgexpr);
|
|
image_format_to_components(get<SPIRType>(var->basetype).image.format), imgexpr);
|
|
}
|
|
}
|
|
@@ -4774,8 +4939,13 @@ void CompilerHLSL::emit_instruction(const Instruction &instruction)
|
|
{
|
|
{
|
|
uint32_t result_type = ops[0];
|
|
uint32_t result_type = ops[0];
|
|
uint32_t id = ops[1];
|
|
uint32_t id = ops[1];
|
|
- auto &e =
|
|
|
|
- set<SPIRExpression>(id, join(to_expression(ops[2]), "[", to_expression(ops[3]), "]"), result_type, true);
|
|
|
|
|
|
+
|
|
|
|
+ auto expr = to_expression(ops[2]);
|
|
|
|
+ if (has_decoration(id, DecorationNonUniformEXT) || has_decoration(ops[2], DecorationNonUniformEXT))
|
|
|
|
+ convert_non_uniform_expression(expression_type(ops[2]), expr);
|
|
|
|
+ expr += join("[", to_expression(ops[3]), "]");
|
|
|
|
+
|
|
|
|
+ auto &e = set<SPIRExpression>(id, expr, result_type, true);
|
|
|
|
|
|
// When using the pointer, we need to know which variable it is actually loaded from.
|
|
// When using the pointer, we need to know which variable it is actually loaded from.
|
|
auto *var = maybe_get_backing_variable(ops[2]);
|
|
auto *var = maybe_get_backing_variable(ops[2]);
|
|
@@ -5215,3 +5385,31 @@ string CompilerHLSL::get_unique_identifier()
|
|
{
|
|
{
|
|
return join("_", unique_identifier_count++, "ident");
|
|
return join("_", unique_identifier_count++, "ident");
|
|
}
|
|
}
|
|
|
|
+
|
|
|
|
+void CompilerHLSL::add_hlsl_resource_binding(const HLSLResourceBinding &binding)
|
|
|
|
+{
|
|
|
|
+ StageSetBinding tuple = { binding.stage, binding.desc_set, binding.binding };
|
|
|
|
+ resource_bindings[tuple] = { binding, false };
|
|
|
|
+}
|
|
|
|
+
|
|
|
|
+bool CompilerHLSL::is_hlsl_resource_binding_used(ExecutionModel model, uint32_t desc_set, uint32_t binding) const
|
|
|
|
+{
|
|
|
|
+ StageSetBinding tuple = { model, desc_set, binding };
|
|
|
|
+ auto itr = resource_bindings.find(tuple);
|
|
|
|
+ return itr != end(resource_bindings) && itr->second.second;
|
|
|
|
+}
|
|
|
|
+
|
|
|
|
+CompilerHLSL::BitcastType CompilerHLSL::get_bitcast_type(uint32_t result_type, uint32_t op0)
|
|
|
|
+{
|
|
|
|
+ auto &rslt_type = get<SPIRType>(result_type);
|
|
|
|
+ auto &expr_type = expression_type(op0);
|
|
|
|
+
|
|
|
|
+ if (rslt_type.basetype == SPIRType::BaseType::UInt64 && expr_type.basetype == SPIRType::BaseType::UInt &&
|
|
|
|
+ expr_type.vecsize == 2)
|
|
|
|
+ return BitcastType::TypePackUint2x32;
|
|
|
|
+ else if (rslt_type.basetype == SPIRType::BaseType::UInt && rslt_type.vecsize == 2 &&
|
|
|
|
+ expr_type.basetype == SPIRType::BaseType::UInt64)
|
|
|
|
+ return BitcastType::TypeUnpackUint64;
|
|
|
|
+
|
|
|
|
+ return BitcastType::TypeNormal;
|
|
|
|
+}
|