|
|
@@ -129,6 +129,10 @@ protected:
|
|
|
spv::Decoration TranslateInterpolationDecoration(const glslang::TQualifier& qualifier);
|
|
|
spv::Decoration TranslateAuxiliaryStorageDecoration(const glslang::TQualifier& qualifier);
|
|
|
spv::Decoration TranslateNonUniformDecoration(const glslang::TQualifier& qualifier);
|
|
|
+ spv::Builder::AccessChain::CoherentFlags TranslateCoherent(const glslang::TType& type);
|
|
|
+ spv::MemoryAccessMask TranslateMemoryAccess(const spv::Builder::AccessChain::CoherentFlags &coherentFlags);
|
|
|
+ spv::ImageOperandsMask TranslateImageOperands(const spv::Builder::AccessChain::CoherentFlags &coherentFlags);
|
|
|
+ spv::Scope TranslateMemoryScope(const spv::Builder::AccessChain::CoherentFlags &coherentFlags);
|
|
|
spv::BuiltIn TranslateBuiltInDecoration(glslang::TBuiltInVariable, bool memberDeclaration);
|
|
|
spv::ImageFormat TranslateImageFormat(const glslang::TType& type);
|
|
|
spv::SelectionControlMask TranslateSelectionControl(const glslang::TIntermSelection&) const;
|
|
|
@@ -327,13 +331,15 @@ spv::Decoration TranslateBlockDecoration(const glslang::TType& type, bool useSto
|
|
|
}
|
|
|
|
|
|
// Translate glslang type to SPIR-V memory decorations.
|
|
|
-void TranslateMemoryDecoration(const glslang::TQualifier& qualifier, std::vector<spv::Decoration>& memory)
|
|
|
+void TranslateMemoryDecoration(const glslang::TQualifier& qualifier, std::vector<spv::Decoration>& memory, bool useVulkanMemoryModel)
|
|
|
{
|
|
|
- if (qualifier.coherent)
|
|
|
- memory.push_back(spv::DecorationCoherent);
|
|
|
- if (qualifier.volatil) {
|
|
|
- memory.push_back(spv::DecorationVolatile);
|
|
|
- memory.push_back(spv::DecorationCoherent);
|
|
|
+ if (!useVulkanMemoryModel) {
|
|
|
+ if (qualifier.coherent)
|
|
|
+ memory.push_back(spv::DecorationCoherent);
|
|
|
+ if (qualifier.volatil) {
|
|
|
+ memory.push_back(spv::DecorationVolatile);
|
|
|
+ memory.push_back(spv::DecorationCoherent);
|
|
|
+ }
|
|
|
}
|
|
|
if (qualifier.restrict)
|
|
|
memory.push_back(spv::DecorationRestrict);
|
|
|
@@ -450,6 +456,105 @@ spv::Decoration TGlslangToSpvTraverser::TranslateNonUniformDecoration(const glsl
|
|
|
return spv::DecorationMax;
|
|
|
}
|
|
|
|
|
|
+spv::MemoryAccessMask TGlslangToSpvTraverser::TranslateMemoryAccess(const spv::Builder::AccessChain::CoherentFlags &coherentFlags)
|
|
|
+{
|
|
|
+ if (!glslangIntermediate->usingVulkanMemoryModel() || coherentFlags.isImage) {
|
|
|
+ return spv::MemoryAccessMaskNone;
|
|
|
+ }
|
|
|
+ spv::MemoryAccessMask mask = spv::MemoryAccessMaskNone;
|
|
|
+ if (coherentFlags.volatil ||
|
|
|
+ coherentFlags.coherent ||
|
|
|
+ coherentFlags.devicecoherent ||
|
|
|
+ coherentFlags.queuefamilycoherent ||
|
|
|
+ coherentFlags.workgroupcoherent ||
|
|
|
+ coherentFlags.subgroupcoherent) {
|
|
|
+ mask = mask | spv::MemoryAccessMakePointerAvailableKHRMask |
|
|
|
+ spv::MemoryAccessMakePointerVisibleKHRMask;
|
|
|
+ }
|
|
|
+ if (coherentFlags.nonprivate) {
|
|
|
+ mask = mask | spv::MemoryAccessNonPrivatePointerKHRMask;
|
|
|
+ }
|
|
|
+ if (coherentFlags.volatil) {
|
|
|
+ mask = mask | spv::MemoryAccessVolatileMask;
|
|
|
+ }
|
|
|
+ if (mask != spv::MemoryAccessMaskNone) {
|
|
|
+ builder.addCapability(spv::CapabilityVulkanMemoryModelKHR);
|
|
|
+ }
|
|
|
+ return mask;
|
|
|
+}
|
|
|
+
|
|
|
+spv::ImageOperandsMask TGlslangToSpvTraverser::TranslateImageOperands(const spv::Builder::AccessChain::CoherentFlags &coherentFlags)
|
|
|
+{
|
|
|
+ if (!glslangIntermediate->usingVulkanMemoryModel()) {
|
|
|
+ return spv::ImageOperandsMaskNone;
|
|
|
+ }
|
|
|
+ spv::ImageOperandsMask mask = spv::ImageOperandsMaskNone;
|
|
|
+ if (coherentFlags.volatil ||
|
|
|
+ coherentFlags.coherent ||
|
|
|
+ coherentFlags.devicecoherent ||
|
|
|
+ coherentFlags.queuefamilycoherent ||
|
|
|
+ coherentFlags.workgroupcoherent ||
|
|
|
+ coherentFlags.subgroupcoherent) {
|
|
|
+ mask = mask | spv::ImageOperandsMakeTexelAvailableKHRMask |
|
|
|
+ spv::ImageOperandsMakeTexelVisibleKHRMask;
|
|
|
+ }
|
|
|
+ if (coherentFlags.nonprivate) {
|
|
|
+ mask = mask | spv::ImageOperandsNonPrivateTexelKHRMask;
|
|
|
+ }
|
|
|
+ if (coherentFlags.volatil) {
|
|
|
+ mask = mask | spv::ImageOperandsVolatileTexelKHRMask;
|
|
|
+ }
|
|
|
+ if (mask != spv::ImageOperandsMaskNone) {
|
|
|
+ builder.addCapability(spv::CapabilityVulkanMemoryModelKHR);
|
|
|
+ }
|
|
|
+ return mask;
|
|
|
+}
|
|
|
+
|
|
|
+spv::Builder::AccessChain::CoherentFlags TGlslangToSpvTraverser::TranslateCoherent(const glslang::TType& type)
|
|
|
+{
|
|
|
+ spv::Builder::AccessChain::CoherentFlags flags;
|
|
|
+ flags.coherent = type.getQualifier().coherent;
|
|
|
+ flags.devicecoherent = type.getQualifier().devicecoherent;
|
|
|
+ flags.queuefamilycoherent = type.getQualifier().queuefamilycoherent;
|
|
|
+ // shared variables are implicitly workgroupcoherent in GLSL.
|
|
|
+ flags.workgroupcoherent = type.getQualifier().workgroupcoherent ||
|
|
|
+ type.getQualifier().storage == glslang::EvqShared;
|
|
|
+ flags.subgroupcoherent = type.getQualifier().subgroupcoherent;
|
|
|
+ // *coherent variables are implicitly nonprivate in GLSL
|
|
|
+ flags.nonprivate = type.getQualifier().nonprivate ||
|
|
|
+ type.getQualifier().subgroupcoherent ||
|
|
|
+ type.getQualifier().workgroupcoherent ||
|
|
|
+ type.getQualifier().queuefamilycoherent ||
|
|
|
+ type.getQualifier().devicecoherent ||
|
|
|
+ type.getQualifier().coherent;
|
|
|
+ flags.volatil = type.getQualifier().volatil;
|
|
|
+ flags.isImage = type.getBasicType() == glslang::EbtSampler;
|
|
|
+ return flags;
|
|
|
+}
|
|
|
+
|
|
|
+spv::Scope TGlslangToSpvTraverser::TranslateMemoryScope(const spv::Builder::AccessChain::CoherentFlags &coherentFlags)
|
|
|
+{
|
|
|
+ spv::Scope scope;
|
|
|
+ if (coherentFlags.coherent) {
|
|
|
+ // coherent defaults to Device scope in the old model, QueueFamilyKHR scope in the new model
|
|
|
+ scope = glslangIntermediate->usingVulkanMemoryModel() ? spv::ScopeQueueFamilyKHR : spv::ScopeDevice;
|
|
|
+ } else if (coherentFlags.devicecoherent) {
|
|
|
+ scope = spv::ScopeDevice;
|
|
|
+ } else if (coherentFlags.queuefamilycoherent) {
|
|
|
+ scope = spv::ScopeQueueFamilyKHR;
|
|
|
+ } else if (coherentFlags.workgroupcoherent) {
|
|
|
+ scope = spv::ScopeWorkgroup;
|
|
|
+ } else if (coherentFlags.subgroupcoherent) {
|
|
|
+ scope = spv::ScopeSubgroup;
|
|
|
+ } else {
|
|
|
+ scope = spv::ScopeMax;
|
|
|
+ }
|
|
|
+ if (glslangIntermediate->usingVulkanMemoryModel() && scope == spv::ScopeDevice) {
|
|
|
+ builder.addCapability(spv::CapabilityVulkanMemoryModelDeviceScopeKHR);
|
|
|
+ }
|
|
|
+ return scope;
|
|
|
+}
|
|
|
+
|
|
|
// Translate a glslang built-in variable to a SPIR-V built in decoration. Also generate
|
|
|
// associated capabilities when required. For some built-in variables, a capability
|
|
|
// is generated only when using the variable in an executable instruction, but not when
|
|
|
@@ -979,6 +1084,16 @@ void InheritQualifiers(glslang::TQualifier& child, const glslang::TQualifier& pa
|
|
|
child.sample = true;
|
|
|
if (parent.coherent)
|
|
|
child.coherent = true;
|
|
|
+ if (parent.devicecoherent)
|
|
|
+ child.devicecoherent = true;
|
|
|
+ if (parent.queuefamilycoherent)
|
|
|
+ child.queuefamilycoherent = true;
|
|
|
+ if (parent.workgroupcoherent)
|
|
|
+ child.workgroupcoherent = true;
|
|
|
+ if (parent.subgroupcoherent)
|
|
|
+ child.subgroupcoherent = true;
|
|
|
+ if (parent.nonprivate)
|
|
|
+ child.nonprivate = true;
|
|
|
if (parent.volatil)
|
|
|
child.volatil = true;
|
|
|
if (parent.restrict)
|
|
|
@@ -1045,7 +1160,12 @@ TGlslangToSpvTraverser::TGlslangToSpvTraverser(unsigned int spvVersion, const gl
|
|
|
builder.setSourceText(text);
|
|
|
}
|
|
|
stdBuiltins = builder.import("GLSL.std.450");
|
|
|
- builder.setMemoryModel(spv::AddressingModelLogical, spv::MemoryModelGLSL450);
|
|
|
+ if (glslangIntermediate->usingVulkanMemoryModel()) {
|
|
|
+ builder.setMemoryModel(spv::AddressingModelLogical, spv::MemoryModelVulkanKHR);
|
|
|
+ builder.addExtension(spv::E_SPV_KHR_vulkan_memory_model);
|
|
|
+ } else {
|
|
|
+ builder.setMemoryModel(spv::AddressingModelLogical, spv::MemoryModelGLSL450);
|
|
|
+ }
|
|
|
shaderEntry = builder.makeEntryPoint(glslangIntermediate->getEntryPointName().c_str());
|
|
|
entryPoint = builder.addEntryPoint(executionModel, shaderEntry, glslangIntermediate->getEntryPointName().c_str());
|
|
|
|
|
|
@@ -1351,7 +1471,7 @@ bool TGlslangToSpvTraverser::visitBinary(glslang::TVisit /* visit */, glslang::T
|
|
|
|
|
|
// store the result
|
|
|
builder.setAccessChain(lValue);
|
|
|
- multiTypeStore(node->getType(), rValue);
|
|
|
+ multiTypeStore(node->getLeft()->getType(), rValue);
|
|
|
|
|
|
// assignments are expressions having an rValue after they are evaluated...
|
|
|
builder.clearAccessChain();
|
|
|
@@ -1388,7 +1508,7 @@ bool TGlslangToSpvTraverser::visitBinary(glslang::TVisit /* visit */, glslang::T
|
|
|
}
|
|
|
|
|
|
// normal case for indexing array or structure or block
|
|
|
- builder.accessChainPush(builder.makeIntConstant(spvIndex));
|
|
|
+ builder.accessChainPush(builder.makeIntConstant(spvIndex), TranslateCoherent(node->getLeft()->getType()));
|
|
|
|
|
|
// Add capabilities here for accessing PointSize and clip/cull distance.
|
|
|
// We have deferred generation of associated capabilities until now.
|
|
|
@@ -1424,7 +1544,7 @@ bool TGlslangToSpvTraverser::visitBinary(glslang::TVisit /* visit */, glslang::T
|
|
|
if (! node->getLeft()->getType().isArray() && node->getLeft()->getType().isVector())
|
|
|
builder.accessChainPushComponent(index, convertGlslangToSpvType(node->getLeft()->getType()));
|
|
|
else
|
|
|
- builder.accessChainPush(index);
|
|
|
+ builder.accessChainPush(index, TranslateCoherent(node->getLeft()->getType()));
|
|
|
}
|
|
|
return false;
|
|
|
case glslang::EOpVectorSwizzle:
|
|
|
@@ -1658,11 +1778,11 @@ bool TGlslangToSpvTraverser::visitAggregate(glslang::TVisit visit, glslang::TInt
|
|
|
builder.setAccessChainRValue(result);
|
|
|
|
|
|
return false;
|
|
|
+ } else if (node->getOp() == glslang::EOpImageStore ||
|
|
|
#ifdef AMD_EXTENSIONS
|
|
|
- } else if (node->getOp() == glslang::EOpImageStore || node->getOp() == glslang::EOpImageStoreLod) {
|
|
|
-#else
|
|
|
- } else if (node->getOp() == glslang::EOpImageStore) {
|
|
|
+ node->getOp() == glslang::EOpImageStoreLod ||
|
|
|
#endif
|
|
|
+ node->getOp() == glslang::EOpImageAtomicStore) {
|
|
|
// "imageStore" is a special case, which has no result
|
|
|
return false;
|
|
|
}
|
|
|
@@ -1952,6 +2072,10 @@ bool TGlslangToSpvTraverser::visitAggregate(glslang::TVisit visit, glslang::TInt
|
|
|
// These all have 0 operands and will naturally finish up in the code below for 0 operands
|
|
|
break;
|
|
|
|
|
|
+ case glslang::EOpAtomicStore:
|
|
|
+ noReturnValue = true;
|
|
|
+ // fallthrough
|
|
|
+ case glslang::EOpAtomicLoad:
|
|
|
case glslang::EOpAtomicAdd:
|
|
|
case glslang::EOpAtomicMin:
|
|
|
case glslang::EOpAtomicMax:
|
|
|
@@ -2050,6 +2174,8 @@ bool TGlslangToSpvTraverser::visitAggregate(glslang::TVisit visit, glslang::TInt
|
|
|
case glslang::EOpAtomicXor:
|
|
|
case glslang::EOpAtomicExchange:
|
|
|
case glslang::EOpAtomicCompSwap:
|
|
|
+ case glslang::EOpAtomicLoad:
|
|
|
+ case glslang::EOpAtomicStore:
|
|
|
case glslang::EOpAtomicCounterAdd:
|
|
|
case glslang::EOpAtomicCounterSubtract:
|
|
|
case glslang::EOpAtomicCounterMin:
|
|
|
@@ -2876,7 +3002,7 @@ void TGlslangToSpvTraverser::decorateStructType(const glslang::TType& type,
|
|
|
qualifier.storage == glslang::EvqBuffer) {
|
|
|
// Add memory decorations only to top-level members of shader storage block
|
|
|
std::vector<spv::Decoration> memory;
|
|
|
- TranslateMemoryDecoration(memberQualifier, memory);
|
|
|
+ TranslateMemoryDecoration(memberQualifier, memory, glslangIntermediate->usingVulkanMemoryModel());
|
|
|
for (unsigned int i = 0; i < memory.size(); ++i)
|
|
|
builder.addMemberDecoration(spvType, member, memory[i]);
|
|
|
}
|
|
|
@@ -2987,8 +3113,15 @@ spv::Id TGlslangToSpvTraverser::makeArraySizeId(const glslang::TArraySizes& arra
|
|
|
spv::Id TGlslangToSpvTraverser::accessChainLoad(const glslang::TType& type)
|
|
|
{
|
|
|
spv::Id nominalTypeId = builder.accessChainGetInferredType();
|
|
|
+
|
|
|
+ spv::Builder::AccessChain::CoherentFlags coherentFlags = builder.getAccessChain().coherentFlags;
|
|
|
+ coherentFlags |= TranslateCoherent(type);
|
|
|
+
|
|
|
spv::Id loadedId = builder.accessChainLoad(TranslatePrecisionDecoration(type),
|
|
|
- TranslateNonUniformDecoration(type.getQualifier()), nominalTypeId);
|
|
|
+ TranslateNonUniformDecoration(type.getQualifier()),
|
|
|
+ nominalTypeId,
|
|
|
+ spv::MemoryAccessMask(TranslateMemoryAccess(coherentFlags) & ~spv::MemoryAccessMakePointerAvailableKHRMask),
|
|
|
+ TranslateMemoryScope(coherentFlags));
|
|
|
|
|
|
// Need to convert to abstract types when necessary
|
|
|
if (type.getBasicType() == glslang::EbtBool) {
|
|
|
@@ -3044,7 +3177,12 @@ void TGlslangToSpvTraverser::accessChainStore(const glslang::TType& type, spv::I
|
|
|
}
|
|
|
}
|
|
|
|
|
|
- builder.accessChainStore(rvalue);
|
|
|
+ spv::Builder::AccessChain::CoherentFlags coherentFlags = builder.getAccessChain().coherentFlags;
|
|
|
+ coherentFlags |= TranslateCoherent(type);
|
|
|
+
|
|
|
+ builder.accessChainStore(rvalue,
|
|
|
+ spv::MemoryAccessMask(TranslateMemoryAccess(coherentFlags) & ~spv::MemoryAccessMakePointerVisibleKHRMask),
|
|
|
+ TranslateMemoryScope(coherentFlags));
|
|
|
}
|
|
|
|
|
|
// For storing when types match at the glslang level, but not might match at the
|
|
|
@@ -3090,7 +3228,7 @@ void TGlslangToSpvTraverser::multiTypeStore(const glslang::TType& type, spv::Id
|
|
|
// set up the target storage
|
|
|
builder.clearAccessChain();
|
|
|
builder.setAccessChainLValue(lValue);
|
|
|
- builder.accessChainPush(builder.makeIntConstant(index));
|
|
|
+ builder.accessChainPush(builder.makeIntConstant(index), TranslateCoherent(type));
|
|
|
|
|
|
// store the member
|
|
|
multiTypeStore(glslangElementType, elementRValue);
|
|
|
@@ -3110,7 +3248,7 @@ void TGlslangToSpvTraverser::multiTypeStore(const glslang::TType& type, spv::Id
|
|
|
// set up the target storage
|
|
|
builder.clearAccessChain();
|
|
|
builder.setAccessChainLValue(lValue);
|
|
|
- builder.accessChainPush(builder.makeIntConstant(m));
|
|
|
+ builder.accessChainPush(builder.makeIntConstant(m), TranslateCoherent(type));
|
|
|
|
|
|
// store the member
|
|
|
multiTypeStore(glslangMemberType, memberRValue);
|
|
|
@@ -3287,11 +3425,11 @@ bool TGlslangToSpvTraverser::originalParam(glslang::TStorageQualifier qualifier,
|
|
|
// Make all the functions, skeletally, without actually visiting their bodies.
|
|
|
void TGlslangToSpvTraverser::makeFunctions(const glslang::TIntermSequence& glslFunctions)
|
|
|
{
|
|
|
- const auto getParamDecorations = [](std::vector<spv::Decoration>& decorations, const glslang::TType& type) {
|
|
|
+ const auto getParamDecorations = [](std::vector<spv::Decoration>& decorations, const glslang::TType& type, bool useVulkanMemoryModel) {
|
|
|
spv::Decoration paramPrecision = TranslatePrecisionDecoration(type);
|
|
|
if (paramPrecision != spv::NoPrecision)
|
|
|
decorations.push_back(paramPrecision);
|
|
|
- TranslateMemoryDecoration(type.getQualifier(), decorations);
|
|
|
+ TranslateMemoryDecoration(type.getQualifier(), decorations, useVulkanMemoryModel);
|
|
|
};
|
|
|
|
|
|
for (int f = 0; f < (int)glslFunctions.size(); ++f) {
|
|
|
@@ -3330,7 +3468,7 @@ void TGlslangToSpvTraverser::makeFunctions(const glslang::TIntermSequence& glslF
|
|
|
typeId = builder.makePointer(spv::StorageClassFunction, typeId);
|
|
|
else
|
|
|
rValueParameters.insert(parameters[p]->getAsSymbolNode()->getId());
|
|
|
- getParamDecorations(paramDecorations[p], paramType);
|
|
|
+ getParamDecorations(paramDecorations[p], paramType, glslangIntermediate->usingVulkanMemoryModel());
|
|
|
paramTypes.push_back(typeId);
|
|
|
}
|
|
|
|
|
|
@@ -3420,6 +3558,8 @@ void TGlslangToSpvTraverser::translateArguments(const glslang::TIntermAggregate&
|
|
|
case glslang::EOpImageAtomicXor:
|
|
|
case glslang::EOpImageAtomicExchange:
|
|
|
case glslang::EOpImageAtomicCompSwap:
|
|
|
+ case glslang::EOpImageAtomicLoad:
|
|
|
+ case glslang::EOpImageAtomicStore:
|
|
|
if (i == 0)
|
|
|
lvalue = true;
|
|
|
break;
|
|
|
@@ -3547,8 +3687,10 @@ spv::Id TGlslangToSpvTraverser::createImageTextureFunctionCall(glslang::TIntermO
|
|
|
builder.setLine(node->getLoc().line);
|
|
|
|
|
|
// Process a GLSL texturing op (will be SPV image)
|
|
|
- const glslang::TSampler sampler = node->getAsAggregate() ? node->getAsAggregate()->getSequence()[0]->getAsTyped()->getType().getSampler()
|
|
|
- : node->getAsUnaryNode()->getOperand()->getAsTyped()->getType().getSampler();
|
|
|
+
|
|
|
+ const glslang::TType &imageType = node->getAsAggregate() ? node->getAsAggregate()->getSequence()[0]->getAsTyped()->getType()
|
|
|
+ : node->getAsUnaryNode()->getOperand()->getAsTyped()->getType();
|
|
|
+ const glslang::TSampler sampler = imageType.getSampler();
|
|
|
#ifdef AMD_EXTENSIONS
|
|
|
bool f16ShadowCompare = (sampler.shadow && node->getAsAggregate())
|
|
|
? node->getAsAggregate()->getSequence()[1]->getAsTyped()->getType().getBasicType() == glslang::EbtFloat16
|
|
|
@@ -3651,22 +3793,38 @@ spv::Id TGlslangToSpvTraverser::createImageTextureFunctionCall(glslang::TIntermO
|
|
|
#else
|
|
|
if (node->getOp() == glslang::EOpImageLoad) {
|
|
|
#endif
|
|
|
+ spv::ImageOperandsMask mask = spv::ImageOperandsMaskNone;
|
|
|
if (sampler.ms) {
|
|
|
- spv::IdImmediate imageOperands = { false, spv::ImageOperandsSampleMask };
|
|
|
- operands.push_back(imageOperands);
|
|
|
- spv::IdImmediate imageOperand = { true, *opIt };
|
|
|
- operands.push_back(imageOperand);
|
|
|
+ mask = mask | spv::ImageOperandsSampleMask;
|
|
|
+ }
|
|
|
#ifdef AMD_EXTENSIONS
|
|
|
- } else if (cracked.lod) {
|
|
|
+ if (cracked.lod) {
|
|
|
builder.addExtension(spv::E_SPV_AMD_shader_image_load_store_lod);
|
|
|
builder.addCapability(spv::CapabilityImageReadWriteLodAMD);
|
|
|
-
|
|
|
- spv::IdImmediate imageOperands = { false, spv::ImageOperandsLodMask };
|
|
|
+ mask = mask | spv::ImageOperandsLodMask;
|
|
|
+ }
|
|
|
+#endif
|
|
|
+ mask = mask | TranslateImageOperands(TranslateCoherent(imageType));
|
|
|
+ mask = (spv::ImageOperandsMask)(mask & ~spv::ImageOperandsMakeTexelAvailableKHRMask);
|
|
|
+ if (mask) {
|
|
|
+ spv::IdImmediate imageOperands = { false, (unsigned int)mask };
|
|
|
operands.push_back(imageOperands);
|
|
|
- spv::IdImmediate imageOperand = { true, *opIt };
|
|
|
+ }
|
|
|
+ if (mask & spv::ImageOperandsSampleMask) {
|
|
|
+ spv::IdImmediate imageOperand = { true, *opIt++ };
|
|
|
+ operands.push_back(imageOperand);
|
|
|
+ }
|
|
|
+#ifdef AMD_EXTENSIONS
|
|
|
+ if (mask & spv::ImageOperandsLodMask) {
|
|
|
+ spv::IdImmediate imageOperand = { true, *opIt++ };
|
|
|
operands.push_back(imageOperand);
|
|
|
+ }
|
|
|
#endif
|
|
|
+ if (mask & spv::ImageOperandsMakeTexelVisibleKHRMask) {
|
|
|
+ spv::IdImmediate imageOperand = { true, builder.makeUintConstant(TranslateMemoryScope(TranslateCoherent(imageType))) };
|
|
|
+ operands.push_back(imageOperand);
|
|
|
}
|
|
|
+
|
|
|
if (builder.getImageTypeFormat(builder.getImageType(operands.front().word)) == spv::ImageFormatUnknown)
|
|
|
builder.addCapability(spv::CapabilityStorageImageReadWithoutFormat);
|
|
|
|
|
|
@@ -3683,29 +3841,52 @@ spv::Id TGlslangToSpvTraverser::createImageTextureFunctionCall(glslang::TIntermO
|
|
|
#else
|
|
|
} else if (node->getOp() == glslang::EOpImageStore) {
|
|
|
#endif
|
|
|
+
|
|
|
+ // Push the texel value before the operands
|
|
|
+#ifdef AMD_EXTENSIONS
|
|
|
+ if (sampler.ms || cracked.lod) {
|
|
|
+#else
|
|
|
if (sampler.ms) {
|
|
|
+#endif
|
|
|
spv::IdImmediate texel = { true, *(opIt + 1) };
|
|
|
operands.push_back(texel);
|
|
|
- spv::IdImmediate imageOperands = { false, spv::ImageOperandsSampleMask };
|
|
|
- operands.push_back(imageOperands);
|
|
|
- spv::IdImmediate imageOperand = { true, *opIt };
|
|
|
- operands.push_back(imageOperand);
|
|
|
+ } else {
|
|
|
+ spv::IdImmediate texel = { true, *opIt };
|
|
|
+ operands.push_back(texel);
|
|
|
+ }
|
|
|
+
|
|
|
+ spv::ImageOperandsMask mask = spv::ImageOperandsMaskNone;
|
|
|
+ if (sampler.ms) {
|
|
|
+ mask = mask | spv::ImageOperandsSampleMask;
|
|
|
+ }
|
|
|
#ifdef AMD_EXTENSIONS
|
|
|
- } else if (cracked.lod) {
|
|
|
+ if (cracked.lod) {
|
|
|
builder.addExtension(spv::E_SPV_AMD_shader_image_load_store_lod);
|
|
|
builder.addCapability(spv::CapabilityImageReadWriteLodAMD);
|
|
|
-
|
|
|
- spv::IdImmediate texel = { true, *(opIt + 1) };
|
|
|
- operands.push_back(texel);
|
|
|
- spv::IdImmediate imageOperands = { false, spv::ImageOperandsLodMask };
|
|
|
+ mask = mask | spv::ImageOperandsLodMask;
|
|
|
+ }
|
|
|
+#endif
|
|
|
+ mask = mask | TranslateImageOperands(TranslateCoherent(imageType));
|
|
|
+ mask = (spv::ImageOperandsMask)(mask & ~spv::ImageOperandsMakeTexelVisibleKHRMask);
|
|
|
+ if (mask) {
|
|
|
+ spv::IdImmediate imageOperands = { false, (unsigned int)mask };
|
|
|
operands.push_back(imageOperands);
|
|
|
- spv::IdImmediate imageOperand = { true, *opIt };
|
|
|
+ }
|
|
|
+ if (mask & spv::ImageOperandsSampleMask) {
|
|
|
+ spv::IdImmediate imageOperand = { true, *opIt++ };
|
|
|
+ operands.push_back(imageOperand);
|
|
|
+ }
|
|
|
+#ifdef AMD_EXTENSIONS
|
|
|
+ if (mask & spv::ImageOperandsLodMask) {
|
|
|
+ spv::IdImmediate imageOperand = { true, *opIt++ };
|
|
|
operands.push_back(imageOperand);
|
|
|
+ }
|
|
|
#endif
|
|
|
- } else {
|
|
|
- spv::IdImmediate texel = { true, *opIt };
|
|
|
- operands.push_back(texel);
|
|
|
+ if (mask & spv::ImageOperandsMakeTexelAvailableKHRMask) {
|
|
|
+ spv::IdImmediate imageOperand = { true, builder.makeUintConstant(TranslateMemoryScope(TranslateCoherent(imageType))) };
|
|
|
+ operands.push_back(imageOperand);
|
|
|
}
|
|
|
+
|
|
|
builder.createNoResultOp(spv::OpImageWrite, operands);
|
|
|
if (builder.getImageTypeFormat(builder.getImageType(operands.front().word)) == spv::ImageFormatUnknown)
|
|
|
builder.addCapability(spv::CapabilityStorageImageWriteWithoutFormat);
|
|
|
@@ -3719,21 +3900,37 @@ spv::Id TGlslangToSpvTraverser::createImageTextureFunctionCall(glslang::TIntermO
|
|
|
if (builder.getImageTypeFormat(builder.getImageType(operands.front().word)) == spv::ImageFormatUnknown)
|
|
|
builder.addCapability(spv::CapabilityStorageImageReadWithoutFormat);
|
|
|
|
|
|
+ spv::ImageOperandsMask mask = spv::ImageOperandsMaskNone;
|
|
|
if (sampler.ms) {
|
|
|
- spv::IdImmediate imageOperands = { false, spv::ImageOperandsSampleMask };
|
|
|
- operands.push_back(imageOperands);
|
|
|
- spv::IdImmediate imageOperand = { true, *opIt++ };
|
|
|
- operands.push_back(imageOperand);
|
|
|
+ mask = mask | spv::ImageOperandsSampleMask;
|
|
|
+ }
|
|
|
#ifdef AMD_EXTENSIONS
|
|
|
- } else if (cracked.lod) {
|
|
|
+ if (cracked.lod) {
|
|
|
builder.addExtension(spv::E_SPV_AMD_shader_image_load_store_lod);
|
|
|
builder.addCapability(spv::CapabilityImageReadWriteLodAMD);
|
|
|
|
|
|
- spv::IdImmediate imageOperands = { false, spv::ImageOperandsLodMask };
|
|
|
+ mask = mask | spv::ImageOperandsLodMask;
|
|
|
+ }
|
|
|
+#endif
|
|
|
+ mask = mask | TranslateImageOperands(TranslateCoherent(imageType));
|
|
|
+ mask = (spv::ImageOperandsMask)(mask & ~spv::ImageOperandsMakeTexelAvailableKHRMask);
|
|
|
+ if (mask) {
|
|
|
+ spv::IdImmediate imageOperands = { false, (unsigned int)mask };
|
|
|
operands.push_back(imageOperands);
|
|
|
+ }
|
|
|
+ if (mask & spv::ImageOperandsSampleMask) {
|
|
|
spv::IdImmediate imageOperand = { true, *opIt++ };
|
|
|
operands.push_back(imageOperand);
|
|
|
+ }
|
|
|
+#ifdef AMD_EXTENSIONS
|
|
|
+ if (mask & spv::ImageOperandsLodMask) {
|
|
|
+ spv::IdImmediate imageOperand = { true, *opIt++ };
|
|
|
+ operands.push_back(imageOperand);
|
|
|
+ }
|
|
|
#endif
|
|
|
+ if (mask & spv::ImageOperandsMakeTexelVisibleKHRMask) {
|
|
|
+ spv::IdImmediate imageOperand = { true, builder.makeUintConstant(TranslateMemoryScope(TranslateCoherent(imageType))) };
|
|
|
+ operands.push_back(imageOperand);
|
|
|
}
|
|
|
|
|
|
// Create the return type that was a special structure
|
|
|
@@ -3756,7 +3953,14 @@ spv::Id TGlslangToSpvTraverser::createImageTextureFunctionCall(glslang::TIntermO
|
|
|
spv::IdImmediate sample = { true, sampler.ms ? *(opIt++) : builder.makeUintConstant(0) };
|
|
|
operands.push_back(sample);
|
|
|
|
|
|
- spv::Id resultTypeId = builder.makePointer(spv::StorageClassImage, resultType());
|
|
|
+ spv::Id resultTypeId;
|
|
|
+ // imageAtomicStore has a void return type so base the pointer type on
|
|
|
+ // the type of the value operand.
|
|
|
+ if (node->getOp() == glslang::EOpImageAtomicStore) {
|
|
|
+ resultTypeId = builder.makePointer(spv::StorageClassImage, builder.getTypeId(operands[2].word));
|
|
|
+ } else {
|
|
|
+ resultTypeId = builder.makePointer(spv::StorageClassImage, resultType());
|
|
|
+ }
|
|
|
spv::Id pointer = builder.createOp(spv::OpImageTexelPointer, resultTypeId, operands);
|
|
|
|
|
|
std::vector<spv::Id> operands;
|
|
|
@@ -3972,6 +4176,16 @@ spv::Id TGlslangToSpvTraverser::createImageTextureFunctionCall(glslang::TIntermO
|
|
|
}
|
|
|
}
|
|
|
|
|
|
+ // nonprivate
|
|
|
+ if (imageType.getQualifier().nonprivate) {
|
|
|
+ params.nonprivate = true;
|
|
|
+ }
|
|
|
+
|
|
|
+ // volatile
|
|
|
+ if (imageType.getQualifier().volatil) {
|
|
|
+ params.volatil = true;
|
|
|
+ }
|
|
|
+
|
|
|
std::vector<spv::Id> result( 1,
|
|
|
builder.createTextureCall(precision, resultType(), sparse, cracked.fetch, cracked.proj, cracked.gather, noImplicitLod, params)
|
|
|
);
|
|
|
@@ -5340,8 +5554,14 @@ spv::Id TGlslangToSpvTraverser::createAtomicOperation(glslang::TOperator op, spv
|
|
|
opCode = spv::OpAtomicIDecrement;
|
|
|
break;
|
|
|
case glslang::EOpAtomicCounter:
|
|
|
+ case glslang::EOpImageAtomicLoad:
|
|
|
+ case glslang::EOpAtomicLoad:
|
|
|
opCode = spv::OpAtomicLoad;
|
|
|
break;
|
|
|
+ case glslang::EOpAtomicStore:
|
|
|
+ case glslang::EOpImageAtomicStore:
|
|
|
+ opCode = spv::OpAtomicStore;
|
|
|
+ break;
|
|
|
default:
|
|
|
assert(0);
|
|
|
break;
|
|
|
@@ -5352,36 +5572,82 @@ spv::Id TGlslangToSpvTraverser::createAtomicOperation(glslang::TOperator op, spv
|
|
|
|
|
|
// Sort out the operands
|
|
|
// - mapping from glslang -> SPV
|
|
|
- // - there are extra SPV operands with no glslang source
|
|
|
+ // - there are extra SPV operands that are optional in glslang
|
|
|
// - compare-exchange swaps the value and comparator
|
|
|
// - compare-exchange has an extra memory semantics
|
|
|
// - EOpAtomicCounterDecrement needs a post decrement
|
|
|
+ spv::Id pointerId = 0, compareId = 0, valueId = 0;
|
|
|
+ // scope defaults to Device in the old model, QueueFamilyKHR in the new model
|
|
|
+ spv::Id scopeId;
|
|
|
+ if (glslangIntermediate->usingVulkanMemoryModel()) {
|
|
|
+ scopeId = builder.makeUintConstant(spv::ScopeQueueFamilyKHR);
|
|
|
+ } else {
|
|
|
+ scopeId = builder.makeUintConstant(spv::ScopeDevice);
|
|
|
+ }
|
|
|
+ // semantics default to relaxed
|
|
|
+ spv::Id semanticsId = builder.makeUintConstant(spv::MemorySemanticsMaskNone);
|
|
|
+ spv::Id semanticsId2 = semanticsId;
|
|
|
+
|
|
|
+ pointerId = operands[0];
|
|
|
+ if (opCode == spv::OpAtomicIIncrement || opCode == spv::OpAtomicIDecrement) {
|
|
|
+ // no additional operands
|
|
|
+ } else if (opCode == spv::OpAtomicCompareExchange) {
|
|
|
+ compareId = operands[1];
|
|
|
+ valueId = operands[2];
|
|
|
+ if (operands.size() > 3) {
|
|
|
+ scopeId = operands[3];
|
|
|
+ semanticsId = builder.makeUintConstant(builder.getConstantScalar(operands[4]) | builder.getConstantScalar(operands[5]));
|
|
|
+ semanticsId2 = builder.makeUintConstant(builder.getConstantScalar(operands[6]) | builder.getConstantScalar(operands[7]));
|
|
|
+ }
|
|
|
+ } else if (opCode == spv::OpAtomicLoad) {
|
|
|
+ if (operands.size() > 1) {
|
|
|
+ scopeId = operands[1];
|
|
|
+ semanticsId = builder.makeUintConstant(builder.getConstantScalar(operands[2]) | builder.getConstantScalar(operands[3]));
|
|
|
+ }
|
|
|
+ } else {
|
|
|
+ // atomic store or RMW
|
|
|
+ valueId = operands[1];
|
|
|
+ if (operands.size() > 2) {
|
|
|
+ scopeId = operands[2];
|
|
|
+ semanticsId = builder.makeUintConstant(builder.getConstantScalar(operands[3]) | builder.getConstantScalar(operands[4]));
|
|
|
+ }
|
|
|
+ }
|
|
|
+
|
|
|
+ // Check for capabilities
|
|
|
+ unsigned semanticsImmediate = builder.getConstantScalar(semanticsId) | builder.getConstantScalar(semanticsId2);
|
|
|
+ if (semanticsImmediate & (spv::MemorySemanticsMakeAvailableKHRMask | spv::MemorySemanticsMakeVisibleKHRMask | spv::MemorySemanticsOutputMemoryKHRMask)) {
|
|
|
+ builder.addCapability(spv::CapabilityVulkanMemoryModelKHR);
|
|
|
+ }
|
|
|
+
|
|
|
+ if (glslangIntermediate->usingVulkanMemoryModel() && builder.getConstantScalar(scopeId) == spv::ScopeDevice) {
|
|
|
+ builder.addCapability(spv::CapabilityVulkanMemoryModelDeviceScopeKHR);
|
|
|
+ }
|
|
|
+
|
|
|
std::vector<spv::Id> spvAtomicOperands; // hold the spv operands
|
|
|
- auto opIt = operands.begin(); // walk the glslang operands
|
|
|
- spvAtomicOperands.push_back(*(opIt++));
|
|
|
- spvAtomicOperands.push_back(builder.makeUintConstant(spv::ScopeDevice)); // TBD: what is the correct scope?
|
|
|
- spvAtomicOperands.push_back(builder.makeUintConstant(spv::MemorySemanticsMaskNone)); // TBD: what are the correct memory semantics?
|
|
|
+ spvAtomicOperands.push_back(pointerId);
|
|
|
+ spvAtomicOperands.push_back(scopeId);
|
|
|
+ spvAtomicOperands.push_back(semanticsId);
|
|
|
if (opCode == spv::OpAtomicCompareExchange) {
|
|
|
- // There are 2 memory semantics for compare-exchange. And the operand order of "comparator" and "new value" in GLSL
|
|
|
- // differs from that in SPIR-V. Hence, special processing is required.
|
|
|
- spvAtomicOperands.push_back(builder.makeUintConstant(spv::MemorySemanticsMaskNone));
|
|
|
- spvAtomicOperands.push_back(*(opIt + 1));
|
|
|
- spvAtomicOperands.push_back(*opIt);
|
|
|
- opIt += 2;
|
|
|
+ spvAtomicOperands.push_back(semanticsId2);
|
|
|
+ spvAtomicOperands.push_back(valueId);
|
|
|
+ spvAtomicOperands.push_back(compareId);
|
|
|
+ } else if (opCode != spv::OpAtomicLoad && opCode != spv::OpAtomicIIncrement && opCode != spv::OpAtomicIDecrement) {
|
|
|
+ spvAtomicOperands.push_back(valueId);
|
|
|
}
|
|
|
|
|
|
- // Add the rest of the operands, skipping any that were dealt with above.
|
|
|
- for (; opIt != operands.end(); ++opIt)
|
|
|
- spvAtomicOperands.push_back(*opIt);
|
|
|
-
|
|
|
- spv::Id resultId = builder.createOp(opCode, typeId, spvAtomicOperands);
|
|
|
+ if (opCode == spv::OpAtomicStore) {
|
|
|
+ builder.createNoResultOp(opCode, spvAtomicOperands);
|
|
|
+ return 0;
|
|
|
+ } else {
|
|
|
+ spv::Id resultId = builder.createOp(opCode, typeId, spvAtomicOperands);
|
|
|
|
|
|
- // GLSL and HLSL atomic-counter decrement return post-decrement value,
|
|
|
- // while SPIR-V returns pre-decrement value. Translate between these semantics.
|
|
|
- if (op == glslang::EOpAtomicCounterDecrement)
|
|
|
- resultId = builder.createBinOp(spv::OpISub, typeId, resultId, builder.makeIntConstant(1));
|
|
|
+ // GLSL and HLSL atomic-counter decrement return post-decrement value,
|
|
|
+ // while SPIR-V returns pre-decrement value. Translate between these semantics.
|
|
|
+ if (op == glslang::EOpAtomicCounterDecrement)
|
|
|
+ resultId = builder.createBinOp(spv::OpISub, typeId, resultId, builder.makeIntConstant(1));
|
|
|
|
|
|
- return resultId;
|
|
|
+ return resultId;
|
|
|
+ }
|
|
|
}
|
|
|
|
|
|
// Create group invocation operations.
|
|
|
@@ -6282,7 +6548,41 @@ spv::Id TGlslangToSpvTraverser::createMiscOperation(glslang::TOperator op, spv::
|
|
|
libCall = spv::InterpolateAtVertexAMD;
|
|
|
break;
|
|
|
#endif
|
|
|
-
|
|
|
+ case glslang::EOpBarrier:
|
|
|
+ {
|
|
|
+ // This is for the extended controlBarrier function, with four operands.
|
|
|
+ // The unextended barrier() goes through createNoArgOperation.
|
|
|
+ assert(operands.size() == 4);
|
|
|
+ unsigned int executionScope = builder.getConstantScalar(operands[0]);
|
|
|
+ unsigned int memoryScope = builder.getConstantScalar(operands[1]);
|
|
|
+ unsigned int semantics = builder.getConstantScalar(operands[2]) | builder.getConstantScalar(operands[3]);
|
|
|
+ builder.createControlBarrier((spv::Scope)executionScope, (spv::Scope)memoryScope, (spv::MemorySemanticsMask)semantics);
|
|
|
+ if (semantics & (spv::MemorySemanticsMakeAvailableKHRMask | spv::MemorySemanticsMakeVisibleKHRMask | spv::MemorySemanticsOutputMemoryKHRMask)) {
|
|
|
+ builder.addCapability(spv::CapabilityVulkanMemoryModelKHR);
|
|
|
+ }
|
|
|
+ if (glslangIntermediate->usingVulkanMemoryModel() && (executionScope == spv::ScopeDevice || memoryScope == spv::ScopeDevice)) {
|
|
|
+ builder.addCapability(spv::CapabilityVulkanMemoryModelDeviceScopeKHR);
|
|
|
+ }
|
|
|
+ return 0;
|
|
|
+ }
|
|
|
+ break;
|
|
|
+ case glslang::EOpMemoryBarrier:
|
|
|
+ {
|
|
|
+ // This is for the extended memoryBarrier function, with three operands.
|
|
|
+ // The unextended memoryBarrier() goes through createNoArgOperation.
|
|
|
+ assert(operands.size() == 3);
|
|
|
+ unsigned int memoryScope = builder.getConstantScalar(operands[0]);
|
|
|
+ unsigned int semantics = builder.getConstantScalar(operands[1]) | builder.getConstantScalar(operands[2]);
|
|
|
+ builder.createMemoryBarrier((spv::Scope)memoryScope, (spv::MemorySemanticsMask)semantics);
|
|
|
+ if (semantics & (spv::MemorySemanticsMakeAvailableKHRMask | spv::MemorySemanticsMakeVisibleKHRMask | spv::MemorySemanticsOutputMemoryKHRMask)) {
|
|
|
+ builder.addCapability(spv::CapabilityVulkanMemoryModelKHR);
|
|
|
+ }
|
|
|
+ if (glslangIntermediate->usingVulkanMemoryModel() && memoryScope == spv::ScopeDevice) {
|
|
|
+ builder.addCapability(spv::CapabilityVulkanMemoryModelDeviceScopeKHR);
|
|
|
+ }
|
|
|
+ return 0;
|
|
|
+ }
|
|
|
+ break;
|
|
|
default:
|
|
|
return 0;
|
|
|
}
|
|
|
@@ -6351,7 +6651,8 @@ spv::Id TGlslangToSpvTraverser::createMiscOperation(glslang::TOperator op, spv::
|
|
|
// Intrinsics with no arguments (or no return value, and no precision).
|
|
|
spv::Id TGlslangToSpvTraverser::createNoArgOperation(glslang::TOperator op, spv::Decoration precision, spv::Id typeId)
|
|
|
{
|
|
|
- // TODO: get the barrier operands correct
|
|
|
+ // GLSL memory barriers use queuefamily scope in new model, device scope in old model
|
|
|
+ spv::Scope memoryBarrierScope = glslangIntermediate->usingVulkanMemoryModel() ? spv::ScopeQueueFamilyKHR : spv::ScopeDevice;
|
|
|
|
|
|
switch (op) {
|
|
|
case glslang::EOpEmitVertex:
|
|
|
@@ -6362,11 +6663,14 @@ spv::Id TGlslangToSpvTraverser::createNoArgOperation(glslang::TOperator op, spv:
|
|
|
return 0;
|
|
|
case glslang::EOpBarrier:
|
|
|
if (glslangIntermediate->getStage() == EShLangTessControl) {
|
|
|
- builder.createControlBarrier(spv::ScopeWorkgroup, spv::ScopeInvocation, spv::MemorySemanticsMaskNone);
|
|
|
- // TODO: prefer the following, when available:
|
|
|
- // builder.createControlBarrier(spv::ScopePatch, spv::ScopePatch,
|
|
|
- // spv::MemorySemanticsPatchMask |
|
|
|
- // spv::MemorySemanticsAcquireReleaseMask);
|
|
|
+ if (glslangIntermediate->usingVulkanMemoryModel()) {
|
|
|
+ builder.createControlBarrier(spv::ScopeWorkgroup, spv::ScopeWorkgroup,
|
|
|
+ spv::MemorySemanticsOutputMemoryKHRMask |
|
|
|
+ spv::MemorySemanticsAcquireReleaseMask);
|
|
|
+ builder.addCapability(spv::CapabilityVulkanMemoryModelKHR);
|
|
|
+ } else {
|
|
|
+ builder.createControlBarrier(spv::ScopeWorkgroup, spv::ScopeInvocation, spv::MemorySemanticsMaskNone);
|
|
|
+ }
|
|
|
} else {
|
|
|
builder.createControlBarrier(spv::ScopeWorkgroup, spv::ScopeWorkgroup,
|
|
|
spv::MemorySemanticsWorkgroupMemoryMask |
|
|
|
@@ -6374,24 +6678,24 @@ spv::Id TGlslangToSpvTraverser::createNoArgOperation(glslang::TOperator op, spv:
|
|
|
}
|
|
|
return 0;
|
|
|
case glslang::EOpMemoryBarrier:
|
|
|
- builder.createMemoryBarrier(spv::ScopeDevice, spv::MemorySemanticsAllMemory |
|
|
|
- spv::MemorySemanticsAcquireReleaseMask);
|
|
|
+ builder.createMemoryBarrier(memoryBarrierScope, spv::MemorySemanticsAllMemory |
|
|
|
+ spv::MemorySemanticsAcquireReleaseMask);
|
|
|
return 0;
|
|
|
case glslang::EOpMemoryBarrierAtomicCounter:
|
|
|
- builder.createMemoryBarrier(spv::ScopeDevice, spv::MemorySemanticsAtomicCounterMemoryMask |
|
|
|
- spv::MemorySemanticsAcquireReleaseMask);
|
|
|
+ builder.createMemoryBarrier(memoryBarrierScope, spv::MemorySemanticsAtomicCounterMemoryMask |
|
|
|
+ spv::MemorySemanticsAcquireReleaseMask);
|
|
|
return 0;
|
|
|
case glslang::EOpMemoryBarrierBuffer:
|
|
|
- builder.createMemoryBarrier(spv::ScopeDevice, spv::MemorySemanticsUniformMemoryMask |
|
|
|
- spv::MemorySemanticsAcquireReleaseMask);
|
|
|
+ builder.createMemoryBarrier(memoryBarrierScope, spv::MemorySemanticsUniformMemoryMask |
|
|
|
+ spv::MemorySemanticsAcquireReleaseMask);
|
|
|
return 0;
|
|
|
case glslang::EOpMemoryBarrierImage:
|
|
|
- builder.createMemoryBarrier(spv::ScopeDevice, spv::MemorySemanticsImageMemoryMask |
|
|
|
- spv::MemorySemanticsAcquireReleaseMask);
|
|
|
+ builder.createMemoryBarrier(memoryBarrierScope, spv::MemorySemanticsImageMemoryMask |
|
|
|
+ spv::MemorySemanticsAcquireReleaseMask);
|
|
|
return 0;
|
|
|
case glslang::EOpMemoryBarrierShared:
|
|
|
- builder.createMemoryBarrier(spv::ScopeDevice, spv::MemorySemanticsWorkgroupMemoryMask |
|
|
|
- spv::MemorySemanticsAcquireReleaseMask);
|
|
|
+ builder.createMemoryBarrier(memoryBarrierScope, spv::MemorySemanticsWorkgroupMemoryMask |
|
|
|
+ spv::MemorySemanticsAcquireReleaseMask);
|
|
|
return 0;
|
|
|
case glslang::EOpGroupMemoryBarrier:
|
|
|
builder.createMemoryBarrier(spv::ScopeWorkgroup, spv::MemorySemanticsAllMemory |
|
|
|
@@ -6520,7 +6824,7 @@ spv::Id TGlslangToSpvTraverser::getSymbolId(const glslang::TIntermSymbol* symbol
|
|
|
|
|
|
if (symbol->getType().isImage()) {
|
|
|
std::vector<spv::Decoration> memory;
|
|
|
- TranslateMemoryDecoration(symbol->getType().getQualifier(), memory);
|
|
|
+ TranslateMemoryDecoration(symbol->getType().getQualifier(), memory, glslangIntermediate->usingVulkanMemoryModel());
|
|
|
for (unsigned int i = 0; i < memory.size(); ++i)
|
|
|
builder.addDecoration(id, memory[i]);
|
|
|
}
|