|
@@ -3,6 +3,7 @@
|
|
|
// Copyright (C) 2012-2015 LunarG, Inc.
|
|
|
// Copyright (C) 2015-2018 Google, Inc.
|
|
|
// Copyright (C) 2017, 2019 ARM Limited.
|
|
|
+// Modifications Copyright (C) 2020 Advanced Micro Devices, Inc. All rights reserved.
|
|
|
//
|
|
|
// All rights reserved.
|
|
|
//
|
|
@@ -245,7 +246,9 @@ void TParseContext::handlePragma(const TSourceLoc& loc, const TVector<TString>&
|
|
|
else if (tokens[2].compare("off") == 0)
|
|
|
contextPragma.optimize = false;
|
|
|
else {
|
|
|
- error(loc, "\"on\" or \"off\" expected after '(' for 'optimize' pragma", "#pragma", "");
|
|
|
+ if(relaxedErrors())
|
|
|
+ // If an implementation does not recognize the tokens following #pragma, then it will ignore that pragma.
|
|
|
+ warn(loc, "\"on\" or \"off\" expected after '(' for 'optimize' pragma", "#pragma", "");
|
|
|
return;
|
|
|
}
|
|
|
|
|
@@ -269,7 +272,9 @@ void TParseContext::handlePragma(const TSourceLoc& loc, const TVector<TString>&
|
|
|
else if (tokens[2].compare("off") == 0)
|
|
|
contextPragma.debug = false;
|
|
|
else {
|
|
|
- error(loc, "\"on\" or \"off\" expected after '(' for 'debug' pragma", "#pragma", "");
|
|
|
+ if(relaxedErrors())
|
|
|
+ // If an implementation does not recognize the tokens following #pragma, then it will ignore that pragma.
|
|
|
+ warn(loc, "\"on\" or \"off\" expected after '(' for 'debug' pragma", "#pragma", "");
|
|
|
return;
|
|
|
}
|
|
|
|
|
@@ -427,8 +432,18 @@ TIntermTyped* TParseContext::handleBracketDereference(const TSourceLoc& loc, TIn
|
|
|
#ifndef GLSLANG_WEB
|
|
|
if (base->isReference() && ! base->isArray()) {
|
|
|
requireExtensions(loc, 1, &E_GL_EXT_buffer_reference2, "buffer reference indexing");
|
|
|
- result = intermediate.addBinaryMath(EOpAdd, base, index, loc);
|
|
|
- result->setType(base->getType());
|
|
|
+ if (base->getType().getReferentType()->containsUnsizedArray()) {
|
|
|
+ error(loc, "cannot index reference to buffer containing an unsized array", "", "");
|
|
|
+ result = nullptr;
|
|
|
+ } else {
|
|
|
+ result = intermediate.addBinaryMath(EOpAdd, base, index, loc);
|
|
|
+ if (result != nullptr)
|
|
|
+ result->setType(base->getType());
|
|
|
+ }
|
|
|
+ if (result == nullptr) {
|
|
|
+ error(loc, "cannot index buffer reference", "", "");
|
|
|
+ result = intermediate.addConstantUnion(0.0, EbtFloat, loc);
|
|
|
+ }
|
|
|
return result;
|
|
|
}
|
|
|
if (base->getAsSymbolNode() && isIoResizeArray(base->getType()))
|
|
@@ -740,8 +755,11 @@ TIntermTyped* TParseContext::handleBinaryMath(const TSourceLoc& loc, const char*
|
|
|
}
|
|
|
|
|
|
TIntermTyped* result = nullptr;
|
|
|
- if (allowed)
|
|
|
+ if (allowed) {
|
|
|
+ if ((left->isReference() || right->isReference()))
|
|
|
+ requireExtensions(loc, 1, &E_GL_EXT_buffer_reference2, "buffer reference math");
|
|
|
result = intermediate.addBinaryMath(op, left, right, loc);
|
|
|
+ }
|
|
|
|
|
|
if (result == nullptr)
|
|
|
binaryOpError(loc, str, left->getCompleteString(), right->getCompleteString());
|
|
@@ -821,50 +839,7 @@ TIntermTyped* TParseContext::handleDotDereference(const TSourceLoc& loc, TInterm
|
|
|
TIntermTyped* result = base;
|
|
|
if ((base->isVector() || base->isScalar()) &&
|
|
|
(base->isFloatingDomain() || base->isIntegerDomain() || base->getBasicType() == EbtBool)) {
|
|
|
- if (base->isScalar()) {
|
|
|
- const char* dotFeature = "scalar swizzle";
|
|
|
- requireProfile(loc, ~EEsProfile, dotFeature);
|
|
|
- profileRequires(loc, ~EEsProfile, 420, E_GL_ARB_shading_language_420pack, dotFeature);
|
|
|
- }
|
|
|
-
|
|
|
- TSwizzleSelectors<TVectorSelector> selectors;
|
|
|
- parseSwizzleSelector(loc, field, base->getVectorSize(), selectors);
|
|
|
-
|
|
|
- if (base->isVector() && selectors.size() != 1 && base->getType().contains16BitFloat())
|
|
|
- requireFloat16Arithmetic(loc, ".", "can't swizzle types containing float16");
|
|
|
- if (base->isVector() && selectors.size() != 1 && base->getType().contains16BitInt())
|
|
|
- requireInt16Arithmetic(loc, ".", "can't swizzle types containing (u)int16");
|
|
|
- if (base->isVector() && selectors.size() != 1 && base->getType().contains8BitInt())
|
|
|
- requireInt8Arithmetic(loc, ".", "can't swizzle types containing (u)int8");
|
|
|
-
|
|
|
- if (base->isScalar()) {
|
|
|
- if (selectors.size() == 1)
|
|
|
- return result;
|
|
|
- else {
|
|
|
- TType type(base->getBasicType(), EvqTemporary, selectors.size());
|
|
|
- // Swizzle operations propagate specialization-constantness
|
|
|
- if (base->getQualifier().isSpecConstant())
|
|
|
- type.getQualifier().makeSpecConstant();
|
|
|
- return addConstructor(loc, base, type);
|
|
|
- }
|
|
|
- }
|
|
|
-
|
|
|
- if (base->getType().getQualifier().isFrontEndConstant())
|
|
|
- result = intermediate.foldSwizzle(base, selectors, loc);
|
|
|
- else {
|
|
|
- if (selectors.size() == 1) {
|
|
|
- TIntermTyped* index = intermediate.addConstantUnion(selectors[0], loc);
|
|
|
- result = intermediate.addIndex(EOpIndexDirect, base, index, loc);
|
|
|
- result->setType(TType(base->getBasicType(), EvqTemporary, base->getType().getQualifier().precision));
|
|
|
- } else {
|
|
|
- TIntermTyped* index = intermediate.addSwizzle(selectors, loc);
|
|
|
- result = intermediate.addIndex(EOpVectorSwizzle, base, index, loc);
|
|
|
- result->setType(TType(base->getBasicType(), EvqTemporary, base->getType().getQualifier().precision, selectors.size()));
|
|
|
- }
|
|
|
- // Swizzle operations propagate specialization-constantness
|
|
|
- if (base->getType().getQualifier().isSpecConstant())
|
|
|
- result->getWritableType().getQualifier().makeSpecConstant();
|
|
|
- }
|
|
|
+ result = handleDotSwizzle(loc, base, field);
|
|
|
} else if (base->isStruct() || base->isReference()) {
|
|
|
const TTypeList* fields = base->isReference() ?
|
|
|
base->getType().getReferentType()->getStruct() :
|
|
@@ -905,6 +880,60 @@ TIntermTyped* TParseContext::handleDotDereference(const TSourceLoc& loc, TInterm
|
|
|
return result;
|
|
|
}
|
|
|
|
|
|
+//
|
|
|
+// Handle seeing a base.swizzle, a subset of base.identifier in the grammar.
|
|
|
+//
|
|
|
+TIntermTyped* TParseContext::handleDotSwizzle(const TSourceLoc& loc, TIntermTyped* base, const TString& field)
|
|
|
+{
|
|
|
+ TIntermTyped* result = base;
|
|
|
+ if (base->isScalar()) {
|
|
|
+ const char* dotFeature = "scalar swizzle";
|
|
|
+ requireProfile(loc, ~EEsProfile, dotFeature);
|
|
|
+ profileRequires(loc, ~EEsProfile, 420, E_GL_ARB_shading_language_420pack, dotFeature);
|
|
|
+ }
|
|
|
+
|
|
|
+ TSwizzleSelectors<TVectorSelector> selectors;
|
|
|
+ parseSwizzleSelector(loc, field, base->getVectorSize(), selectors);
|
|
|
+
|
|
|
+ if (base->isVector() && selectors.size() != 1 && base->getType().contains16BitFloat())
|
|
|
+ requireFloat16Arithmetic(loc, ".", "can't swizzle types containing float16");
|
|
|
+ if (base->isVector() && selectors.size() != 1 && base->getType().contains16BitInt())
|
|
|
+ requireInt16Arithmetic(loc, ".", "can't swizzle types containing (u)int16");
|
|
|
+ if (base->isVector() && selectors.size() != 1 && base->getType().contains8BitInt())
|
|
|
+ requireInt8Arithmetic(loc, ".", "can't swizzle types containing (u)int8");
|
|
|
+
|
|
|
+ if (base->isScalar()) {
|
|
|
+ if (selectors.size() == 1)
|
|
|
+ return result;
|
|
|
+ else {
|
|
|
+ TType type(base->getBasicType(), EvqTemporary, selectors.size());
|
|
|
+ // Swizzle operations propagate specialization-constantness
|
|
|
+ if (base->getQualifier().isSpecConstant())
|
|
|
+ type.getQualifier().makeSpecConstant();
|
|
|
+ return addConstructor(loc, base, type);
|
|
|
+ }
|
|
|
+ }
|
|
|
+
|
|
|
+ if (base->getType().getQualifier().isFrontEndConstant())
|
|
|
+ result = intermediate.foldSwizzle(base, selectors, loc);
|
|
|
+ else {
|
|
|
+ if (selectors.size() == 1) {
|
|
|
+ TIntermTyped* index = intermediate.addConstantUnion(selectors[0], loc);
|
|
|
+ result = intermediate.addIndex(EOpIndexDirect, base, index, loc);
|
|
|
+ result->setType(TType(base->getBasicType(), EvqTemporary, base->getType().getQualifier().precision));
|
|
|
+ } else {
|
|
|
+ TIntermTyped* index = intermediate.addSwizzle(selectors, loc);
|
|
|
+ result = intermediate.addIndex(EOpVectorSwizzle, base, index, loc);
|
|
|
+ result->setType(TType(base->getBasicType(), EvqTemporary, base->getType().getQualifier().precision, selectors.size()));
|
|
|
+ }
|
|
|
+ // Swizzle operations propagate specialization-constantness
|
|
|
+ if (base->getType().getQualifier().isSpecConstant())
|
|
|
+ result->getWritableType().getQualifier().makeSpecConstant();
|
|
|
+ }
|
|
|
+
|
|
|
+ return result;
|
|
|
+}
|
|
|
+
|
|
|
void TParseContext::blockMemberExtensionCheck(const TSourceLoc& loc, const TIntermTyped* base, int member, const TString& memberName)
|
|
|
{
|
|
|
// a block that needs extension checking is either 'base', or if arrayed,
|
|
@@ -1273,7 +1302,7 @@ TIntermTyped* TParseContext::handleBuiltInFunctionCall(TSourceLoc loc, TIntermNo
|
|
|
TIntermTyped *result = intermediate.addBuiltInFunctionCall(loc, function.getBuiltInOp(),
|
|
|
function.getParamCount() == 1,
|
|
|
arguments, function.getType());
|
|
|
- if (obeyPrecisionQualifiers())
|
|
|
+ if (result != nullptr && obeyPrecisionQualifiers())
|
|
|
computeBuiltinPrecisions(*result, function);
|
|
|
|
|
|
if (result == nullptr) {
|
|
@@ -1353,6 +1382,9 @@ void TParseContext::computeBuiltinPrecisions(TIntermTyped& node, const TFunction
|
|
|
case EOpInterpolateAtSample:
|
|
|
numArgs = 1;
|
|
|
break;
|
|
|
+ case EOpDebugPrintf:
|
|
|
+ numArgs = 0;
|
|
|
+ break;
|
|
|
default:
|
|
|
break;
|
|
|
}
|
|
@@ -1390,23 +1422,28 @@ TIntermNode* TParseContext::handleReturnValue(const TSourceLoc& loc, TIntermType
|
|
|
#endif
|
|
|
|
|
|
functionReturnsValue = true;
|
|
|
+ TIntermBranch* branch = nullptr;
|
|
|
if (currentFunctionType->getBasicType() == EbtVoid) {
|
|
|
error(loc, "void function cannot return a value", "return", "");
|
|
|
- return intermediate.addBranch(EOpReturn, loc);
|
|
|
+ branch = intermediate.addBranch(EOpReturn, loc);
|
|
|
} else if (*currentFunctionType != value->getType()) {
|
|
|
TIntermTyped* converted = intermediate.addConversion(EOpReturn, *currentFunctionType, value);
|
|
|
if (converted) {
|
|
|
if (*currentFunctionType != converted->getType())
|
|
|
error(loc, "cannot convert return value to function return type", "return", "");
|
|
|
if (version < 420)
|
|
|
- warn(loc, "type conversion on return values was not explicitly allowed until version 420", "return", "");
|
|
|
- return intermediate.addBranch(EOpReturn, converted, loc);
|
|
|
+ warn(loc, "type conversion on return values was not explicitly allowed until version 420",
|
|
|
+ "return", "");
|
|
|
+ branch = intermediate.addBranch(EOpReturn, converted, loc);
|
|
|
} else {
|
|
|
error(loc, "type does not match, or is not convertible to, the function's return type", "return", "");
|
|
|
- return intermediate.addBranch(EOpReturn, value, loc);
|
|
|
+ branch = intermediate.addBranch(EOpReturn, value, loc);
|
|
|
}
|
|
|
} else
|
|
|
- return intermediate.addBranch(EOpReturn, value, loc);
|
|
|
+ branch = intermediate.addBranch(EOpReturn, value, loc);
|
|
|
+
|
|
|
+ branch->updatePrecision(currentFunctionType->getQualifier().precision);
|
|
|
+ return branch;
|
|
|
}
|
|
|
|
|
|
// See if the operation is being done in an illegal location.
|
|
@@ -1650,6 +1687,14 @@ TIntermTyped* TParseContext::addOutputArgumentConversions(const TFunction& funct
|
|
|
#endif
|
|
|
}
|
|
|
|
|
|
+TIntermTyped* TParseContext::addAssign(const TSourceLoc& loc, TOperator op, TIntermTyped* left, TIntermTyped* right)
|
|
|
+{
|
|
|
+ if ((op == EOpAddAssign || op == EOpSubAssign) && left->isReference())
|
|
|
+ requireExtensions(loc, 1, &E_GL_EXT_buffer_reference2, "+= and -= on a buffer reference");
|
|
|
+
|
|
|
+ return intermediate.addAssign(op, left, right, loc);
|
|
|
+}
|
|
|
+
|
|
|
void TParseContext::memorySemanticsCheck(const TSourceLoc& loc, const TFunction& fnCandidate, const TIntermOperator& callNode)
|
|
|
{
|
|
|
const TIntermSequence* argp = &callNode.getAsAggregate()->getSequence();
|
|
@@ -1672,6 +1717,9 @@ void TParseContext::memorySemanticsCheck(const TSourceLoc& loc, const TFunction&
|
|
|
unsigned int semantics = 0, storageClassSemantics = 0;
|
|
|
unsigned int semantics2 = 0, storageClassSemantics2 = 0;
|
|
|
|
|
|
+ const TIntermTyped* arg0 = (*argp)[0]->getAsTyped();
|
|
|
+ const bool isMS = arg0->getBasicType() == EbtSampler && arg0->getType().getSampler().isMultiSample();
|
|
|
+
|
|
|
// Grab the semantics and storage class semantics from the operands, based on opcode
|
|
|
switch (callNode.getOp()) {
|
|
|
case EOpAtomicAdd:
|
|
@@ -1704,18 +1752,18 @@ void TParseContext::memorySemanticsCheck(const TSourceLoc& loc, const TFunction&
|
|
|
case EOpImageAtomicXor:
|
|
|
case EOpImageAtomicExchange:
|
|
|
case EOpImageAtomicStore:
|
|
|
- storageClassSemantics = (*argp)[4]->getAsConstantUnion()->getConstArray()[0].getIConst();
|
|
|
- semantics = (*argp)[5]->getAsConstantUnion()->getConstArray()[0].getIConst();
|
|
|
+ storageClassSemantics = (*argp)[isMS ? 5 : 4]->getAsConstantUnion()->getConstArray()[0].getIConst();
|
|
|
+ semantics = (*argp)[isMS ? 6 : 5]->getAsConstantUnion()->getConstArray()[0].getIConst();
|
|
|
break;
|
|
|
case EOpImageAtomicLoad:
|
|
|
- storageClassSemantics = (*argp)[3]->getAsConstantUnion()->getConstArray()[0].getIConst();
|
|
|
- semantics = (*argp)[4]->getAsConstantUnion()->getConstArray()[0].getIConst();
|
|
|
+ storageClassSemantics = (*argp)[isMS ? 4 : 3]->getAsConstantUnion()->getConstArray()[0].getIConst();
|
|
|
+ semantics = (*argp)[isMS ? 5 : 4]->getAsConstantUnion()->getConstArray()[0].getIConst();
|
|
|
break;
|
|
|
case EOpImageAtomicCompSwap:
|
|
|
- storageClassSemantics = (*argp)[5]->getAsConstantUnion()->getConstArray()[0].getIConst();
|
|
|
- semantics = (*argp)[6]->getAsConstantUnion()->getConstArray()[0].getIConst();
|
|
|
- storageClassSemantics2 = (*argp)[7]->getAsConstantUnion()->getConstArray()[0].getIConst();
|
|
|
- semantics2 = (*argp)[8]->getAsConstantUnion()->getConstArray()[0].getIConst();
|
|
|
+ storageClassSemantics = (*argp)[isMS ? 6 : 5]->getAsConstantUnion()->getConstArray()[0].getIConst();
|
|
|
+ semantics = (*argp)[isMS ? 7 : 6]->getAsConstantUnion()->getConstArray()[0].getIConst();
|
|
|
+ storageClassSemantics2 = (*argp)[isMS ? 8 : 7]->getAsConstantUnion()->getConstArray()[0].getIConst();
|
|
|
+ semantics2 = (*argp)[isMS ? 9 : 8]->getAsConstantUnion()->getConstArray()[0].getIConst();
|
|
|
break;
|
|
|
|
|
|
case EOpBarrier:
|
|
@@ -2006,18 +2054,20 @@ void TParseContext::builtInOpCheck(const TSourceLoc& loc, const TFunction& fnCan
|
|
|
if (arg > 0) {
|
|
|
|
|
|
#ifndef GLSLANG_WEB
|
|
|
- bool f16ShadowCompare = (*argp)[1]->getAsTyped()->getBasicType() == EbtFloat16 && arg0->getType().getSampler().shadow;
|
|
|
+ bool f16ShadowCompare = (*argp)[1]->getAsTyped()->getBasicType() == EbtFloat16 &&
|
|
|
+ arg0->getType().getSampler().shadow;
|
|
|
if (f16ShadowCompare)
|
|
|
++arg;
|
|
|
#endif
|
|
|
- if (! (*argp)[arg]->getAsConstantUnion())
|
|
|
+ if (! (*argp)[arg]->getAsTyped()->getQualifier().isConstant())
|
|
|
error(loc, "argument must be compile-time constant", "texel offset", "");
|
|
|
- else {
|
|
|
+ else if ((*argp)[arg]->getAsConstantUnion()) {
|
|
|
const TType& type = (*argp)[arg]->getAsTyped()->getType();
|
|
|
for (int c = 0; c < type.getVectorSize(); ++c) {
|
|
|
int offset = (*argp)[arg]->getAsConstantUnion()->getConstArray()[c].getIConst();
|
|
|
if (offset > resources.maxProgramTexelOffset || offset < resources.minProgramTexelOffset)
|
|
|
- error(loc, "value is out of range:", "texel offset", "[gl_MinProgramTexelOffset, gl_MaxProgramTexelOffset]");
|
|
|
+ error(loc, "value is out of range:", "texel offset",
|
|
|
+ "[gl_MinProgramTexelOffset, gl_MaxProgramTexelOffset]");
|
|
|
}
|
|
|
}
|
|
|
}
|
|
@@ -2026,15 +2076,32 @@ void TParseContext::builtInOpCheck(const TSourceLoc& loc, const TFunction& fnCan
|
|
|
}
|
|
|
|
|
|
#ifndef GLSLANG_WEB
|
|
|
- case EOpTraceNV:
|
|
|
+ case EOpTrace:
|
|
|
if (!(*argp)[10]->getAsConstantUnion())
|
|
|
error(loc, "argument must be compile-time constant", "payload number", "");
|
|
|
break;
|
|
|
- case EOpExecuteCallableNV:
|
|
|
+ case EOpExecuteCallable:
|
|
|
if (!(*argp)[1]->getAsConstantUnion())
|
|
|
error(loc, "argument must be compile-time constant", "callable data number", "");
|
|
|
break;
|
|
|
|
|
|
+ case EOpRayQueryGetIntersectionType:
|
|
|
+ case EOpRayQueryGetIntersectionT:
|
|
|
+ case EOpRayQueryGetIntersectionInstanceCustomIndex:
|
|
|
+ case EOpRayQueryGetIntersectionInstanceId:
|
|
|
+ case EOpRayQueryGetIntersectionInstanceShaderBindingTableRecordOffset:
|
|
|
+ case EOpRayQueryGetIntersectionGeometryIndex:
|
|
|
+ case EOpRayQueryGetIntersectionPrimitiveIndex:
|
|
|
+ case EOpRayQueryGetIntersectionBarycentrics:
|
|
|
+ case EOpRayQueryGetIntersectionFrontFace:
|
|
|
+ case EOpRayQueryGetIntersectionObjectRayDirection:
|
|
|
+ case EOpRayQueryGetIntersectionObjectRayOrigin:
|
|
|
+ case EOpRayQueryGetIntersectionObjectToWorld:
|
|
|
+ case EOpRayQueryGetIntersectionWorldToObject:
|
|
|
+ if (!(*argp)[1]->getAsConstantUnion())
|
|
|
+ error(loc, "argument must be compile-time constant", "committed", "");
|
|
|
+ break;
|
|
|
+
|
|
|
case EOpTextureQuerySamples:
|
|
|
case EOpImageQuerySamples:
|
|
|
// GL_ARB_shader_texture_image_samples
|
|
@@ -2058,7 +2125,14 @@ void TParseContext::builtInOpCheck(const TSourceLoc& loc, const TFunction& fnCan
|
|
|
if (imageType.getQualifier().getFormat() != ElfR32i && imageType.getQualifier().getFormat() != ElfR32ui)
|
|
|
error(loc, "only supported on image with format r32i or r32ui", fnCandidate.getName().c_str(), "");
|
|
|
} else {
|
|
|
- if (fnCandidate.getName().compare(0, 19, "imageAtomicExchange") != 0)
|
|
|
+ bool isImageAtomicOnFloatAllowed = ((fnCandidate.getName().compare(0, 14, "imageAtomicAdd") == 0) ||
|
|
|
+ (fnCandidate.getName().compare(0, 15, "imageAtomicLoad") == 0) ||
|
|
|
+ (fnCandidate.getName().compare(0, 16, "imageAtomicStore") == 0) ||
|
|
|
+ (fnCandidate.getName().compare(0, 19, "imageAtomicExchange") == 0));
|
|
|
+ if (imageType.getSampler().type == EbtFloat && isImageAtomicOnFloatAllowed &&
|
|
|
+ (fnCandidate.getName().compare(0, 19, "imageAtomicExchange") != 0)) // imageAtomicExchange doesn't require GL_EXT_shader_atomic_float
|
|
|
+ requireExtensions(loc, 1, &E_GL_EXT_shader_atomic_float, fnCandidate.getName().c_str());
|
|
|
+ if (!isImageAtomicOnFloatAllowed)
|
|
|
error(loc, "only supported on integer images", fnCandidate.getName().c_str(), "");
|
|
|
else if (imageType.getQualifier().getFormat() != ElfR32f && isEsProfile())
|
|
|
error(loc, "only supported on image with format r32f", fnCandidate.getName().c_str(), "");
|
|
@@ -2087,10 +2161,18 @@ void TParseContext::builtInOpCheck(const TSourceLoc& loc, const TFunction& fnCan
|
|
|
if (argp->size() > 3) {
|
|
|
requireExtensions(loc, 1, &E_GL_KHR_memory_scope_semantics, fnCandidate.getName().c_str());
|
|
|
memorySemanticsCheck(loc, fnCandidate, callNode);
|
|
|
+ if ((callNode.getOp() == EOpAtomicAdd || callNode.getOp() == EOpAtomicExchange ||
|
|
|
+ callNode.getOp() == EOpAtomicLoad || callNode.getOp() == EOpAtomicStore) &&
|
|
|
+ (arg0->getType().isFloatingDomain())) {
|
|
|
+ requireExtensions(loc, 1, &E_GL_EXT_shader_atomic_float, fnCandidate.getName().c_str());
|
|
|
+ }
|
|
|
} else if (arg0->getType().getBasicType() == EbtInt64 || arg0->getType().getBasicType() == EbtUint64) {
|
|
|
const char* const extensions[2] = { E_GL_NV_shader_atomic_int64,
|
|
|
E_GL_EXT_shader_atomic_int64 };
|
|
|
requireExtensions(loc, 2, extensions, fnCandidate.getName().c_str());
|
|
|
+ } else if ((callNode.getOp() == EOpAtomicAdd || callNode.getOp() == EOpAtomicExchange) &&
|
|
|
+ (arg0->getType().isFloatingDomain())) {
|
|
|
+ requireExtensions(loc, 1, &E_GL_EXT_shader_atomic_float, fnCandidate.getName().c_str());
|
|
|
}
|
|
|
break;
|
|
|
}
|
|
@@ -2172,6 +2254,28 @@ void TParseContext::builtInOpCheck(const TSourceLoc& loc, const TFunction& fnCan
|
|
|
memorySemanticsCheck(loc, fnCandidate, callNode);
|
|
|
}
|
|
|
break;
|
|
|
+
|
|
|
+ case EOpMix:
|
|
|
+ if (profile == EEsProfile && version < 310) {
|
|
|
+ // Look for specific signatures
|
|
|
+ if ((*argp)[0]->getAsTyped()->getBasicType() != EbtFloat &&
|
|
|
+ (*argp)[1]->getAsTyped()->getBasicType() != EbtFloat &&
|
|
|
+ (*argp)[2]->getAsTyped()->getBasicType() == EbtBool) {
|
|
|
+ requireExtensions(loc, 1, &E_GL_EXT_shader_integer_mix, "specific signature of builtin mix");
|
|
|
+ }
|
|
|
+ }
|
|
|
+
|
|
|
+ if (profile != EEsProfile && version < 450) {
|
|
|
+ if ((*argp)[0]->getAsTyped()->getBasicType() != EbtFloat &&
|
|
|
+ (*argp)[0]->getAsTyped()->getBasicType() != EbtDouble &&
|
|
|
+ (*argp)[1]->getAsTyped()->getBasicType() != EbtFloat &&
|
|
|
+ (*argp)[1]->getAsTyped()->getBasicType() != EbtDouble &&
|
|
|
+ (*argp)[2]->getAsTyped()->getBasicType() == EbtBool) {
|
|
|
+ requireExtensions(loc, 1, &E_GL_EXT_shader_integer_mix, fnCandidate.getName().c_str());
|
|
|
+ }
|
|
|
+ }
|
|
|
+
|
|
|
+ break;
|
|
|
#endif
|
|
|
|
|
|
default:
|
|
@@ -2698,7 +2802,10 @@ void TParseContext::reservedPpErrorCheck(const TSourceLoc& loc, const char* iden
|
|
|
if (strncmp(identifier, "GL_", 3) == 0)
|
|
|
ppError(loc, "names beginning with \"GL_\" can't be (un)defined:", op, identifier);
|
|
|
else if (strncmp(identifier, "defined", 8) == 0)
|
|
|
- ppError(loc, "\"defined\" can't be (un)defined:", op, identifier);
|
|
|
+ if (relaxedErrors())
|
|
|
+ ppWarn(loc, "\"defined\" is (un)defined:", op, identifier);
|
|
|
+ else
|
|
|
+ ppError(loc, "\"defined\" can't be (un)defined:", op, identifier);
|
|
|
else if (strstr(identifier, "__") != 0) {
|
|
|
if (isEsProfile() && version >= 300 &&
|
|
|
(strcmp(identifier, "__LINE__") == 0 ||
|
|
@@ -2706,7 +2813,7 @@ void TParseContext::reservedPpErrorCheck(const TSourceLoc& loc, const char* iden
|
|
|
strcmp(identifier, "__VERSION__") == 0))
|
|
|
ppError(loc, "predefined names can't be (un)defined:", op, identifier);
|
|
|
else {
|
|
|
- if (isEsProfile() && version < 300)
|
|
|
+ if (isEsProfile() && version < 300 && !relaxedErrors())
|
|
|
ppError(loc, "names containing consecutive underscores are reserved, and an error if version < 300:", op, identifier);
|
|
|
else
|
|
|
ppWarn(loc, "names containing consecutive underscores are reserved:", op, identifier);
|
|
@@ -3083,7 +3190,7 @@ bool TParseContext::constructorError(const TSourceLoc& loc, TIntermNode* node, T
|
|
|
error(loc, "constructor argument does not have a type", "constructor", "");
|
|
|
return true;
|
|
|
}
|
|
|
- if (op != EOpConstructStruct && typed->getBasicType() == EbtSampler) {
|
|
|
+ if (op != EOpConstructStruct && op != EOpConstructNonuniform && typed->getBasicType() == EbtSampler) {
|
|
|
error(loc, "cannot convert a sampler", "constructor", "");
|
|
|
return true;
|
|
|
}
|
|
@@ -3128,7 +3235,7 @@ bool TParseContext::constructorTextureSamplerError(const TSourceLoc& loc, const
|
|
|
if (function[0].type->getBasicType() != EbtSampler ||
|
|
|
! function[0].type->getSampler().isTexture() ||
|
|
|
function[0].type->isArray()) {
|
|
|
- error(loc, "sampler-constructor first argument must be a scalar textureXXX type", token, "");
|
|
|
+ error(loc, "sampler-constructor first argument must be a scalar *texture* type", token, "");
|
|
|
return true;
|
|
|
}
|
|
|
// simulate the first argument's impact on the result type, so it can be compared with the encapsulated operator!=()
|
|
@@ -3136,7 +3243,8 @@ bool TParseContext::constructorTextureSamplerError(const TSourceLoc& loc, const
|
|
|
texture.setCombined(false);
|
|
|
texture.shadow = false;
|
|
|
if (texture != function[0].type->getSampler()) {
|
|
|
- error(loc, "sampler-constructor first argument must match type and dimensionality of constructor type", token, "");
|
|
|
+ error(loc, "sampler-constructor first argument must be a *texture* type"
|
|
|
+ " matching the dimensionality and sampled type of the constructor", token, "");
|
|
|
return true;
|
|
|
}
|
|
|
|
|
@@ -3146,7 +3254,7 @@ bool TParseContext::constructorTextureSamplerError(const TSourceLoc& loc, const
|
|
|
if ( function[1].type->getBasicType() != EbtSampler ||
|
|
|
! function[1].type->getSampler().isPureSampler() ||
|
|
|
function[1].type->isArray()) {
|
|
|
- error(loc, "sampler-constructor second argument must be a scalar type 'sampler'", token, "");
|
|
|
+ error(loc, "sampler-constructor second argument must be a scalar sampler or samplerShadow", token, "");
|
|
|
return true;
|
|
|
}
|
|
|
|
|
@@ -3222,14 +3330,14 @@ void TParseContext::atomicUintCheck(const TSourceLoc& loc, const TType& type, co
|
|
|
error(loc, "atomic_uints can only be used in uniform variables or function parameters:", type.getBasicTypeString().c_str(), identifier.c_str());
|
|
|
}
|
|
|
|
|
|
-void TParseContext::accStructNVCheck(const TSourceLoc& loc, const TType& type, const TString& identifier)
|
|
|
+void TParseContext::accStructCheck(const TSourceLoc& loc, const TType& type, const TString& identifier)
|
|
|
{
|
|
|
if (type.getQualifier().storage == EvqUniform)
|
|
|
return;
|
|
|
|
|
|
- if (type.getBasicType() == EbtStruct && containsFieldWithBasicType(type, EbtAccStructNV))
|
|
|
+ if (type.getBasicType() == EbtStruct && containsFieldWithBasicType(type, EbtAccStruct))
|
|
|
error(loc, "non-uniform struct contains an accelerationStructureNV:", type.getBasicTypeString().c_str(), identifier.c_str());
|
|
|
- else if (type.getBasicType() == EbtAccStructNV && type.getQualifier().storage != EvqUniform)
|
|
|
+ else if (type.getBasicType() == EbtAccStruct && type.getQualifier().storage != EvqUniform)
|
|
|
error(loc, "accelerationStructureNV can only be used in uniform variables or function parameters:",
|
|
|
type.getBasicTypeString().c_str(), identifier.c_str());
|
|
|
|
|
@@ -3327,6 +3435,11 @@ void TParseContext::globalQualifierTypeCheck(const TSourceLoc& loc, const TQuali
|
|
|
!qualifier.hasBufferReference())
|
|
|
error(loc, "buffers can be declared only as blocks", "buffer", "");
|
|
|
|
|
|
+ if (qualifier.storage != EvqVaryingIn && publicType.basicType == EbtDouble &&
|
|
|
+ extensionTurnedOn(E_GL_ARB_vertex_attrib_64bit) && language == EShLangVertex &&
|
|
|
+ version < 400) {
|
|
|
+ profileRequires(loc, ECoreProfile | ECompatibilityProfile, 410, E_GL_ARB_gpu_shader_fp64, "vertex-shader `double` type");
|
|
|
+ }
|
|
|
if (qualifier.storage != EvqVaryingIn && qualifier.storage != EvqVaryingOut)
|
|
|
return;
|
|
|
|
|
@@ -3377,7 +3490,7 @@ void TParseContext::globalQualifierTypeCheck(const TSourceLoc& loc, const TQuali
|
|
|
profileRequires(loc, ENoProfile, 150, nullptr, "vertex input arrays");
|
|
|
}
|
|
|
if (publicType.basicType == EbtDouble)
|
|
|
- profileRequires(loc, ~EEsProfile, 410, nullptr, "vertex-shader `double` type input");
|
|
|
+ profileRequires(loc, ~EEsProfile, 410, E_GL_ARB_vertex_attrib_64bit, "vertex-shader `double` type input");
|
|
|
if (qualifier.isAuxiliary() || qualifier.isInterpolation() || qualifier.isMemory() || qualifier.invariant)
|
|
|
error(loc, "vertex input cannot be further qualified", "", "");
|
|
|
break;
|
|
@@ -3513,12 +3626,14 @@ void TParseContext::mergeQualifiers(const TSourceLoc& loc, TQualifier& dst, cons
|
|
|
dst.precision = src.precision;
|
|
|
|
|
|
#ifndef GLSLANG_WEB
|
|
|
- if (!force && ((src.coherent && (dst.devicecoherent || dst.queuefamilycoherent || dst.workgroupcoherent || dst.subgroupcoherent)) ||
|
|
|
- (src.devicecoherent && (dst.coherent || dst.queuefamilycoherent || dst.workgroupcoherent || dst.subgroupcoherent)) ||
|
|
|
- (src.queuefamilycoherent && (dst.coherent || dst.devicecoherent || dst.workgroupcoherent || dst.subgroupcoherent)) ||
|
|
|
- (src.workgroupcoherent && (dst.coherent || dst.devicecoherent || dst.queuefamilycoherent || dst.subgroupcoherent)) ||
|
|
|
- (src.subgroupcoherent && (dst.coherent || dst.devicecoherent || dst.queuefamilycoherent || dst.workgroupcoherent)))) {
|
|
|
- error(loc, "only one coherent/devicecoherent/queuefamilycoherent/workgroupcoherent/subgroupcoherent qualifier allowed", GetPrecisionQualifierString(src.precision), "");
|
|
|
+ if (!force && ((src.coherent && (dst.devicecoherent || dst.queuefamilycoherent || dst.workgroupcoherent || dst.subgroupcoherent || dst.shadercallcoherent)) ||
|
|
|
+ (src.devicecoherent && (dst.coherent || dst.queuefamilycoherent || dst.workgroupcoherent || dst.subgroupcoherent || dst.shadercallcoherent)) ||
|
|
|
+ (src.queuefamilycoherent && (dst.coherent || dst.devicecoherent || dst.workgroupcoherent || dst.subgroupcoherent || dst.shadercallcoherent)) ||
|
|
|
+ (src.workgroupcoherent && (dst.coherent || dst.devicecoherent || dst.queuefamilycoherent || dst.subgroupcoherent || dst.shadercallcoherent)) ||
|
|
|
+ (src.subgroupcoherent && (dst.coherent || dst.devicecoherent || dst.queuefamilycoherent || dst.workgroupcoherent || dst.shadercallcoherent)) ||
|
|
|
+ (src.shadercallcoherent && (dst.coherent || dst.devicecoherent || dst.queuefamilycoherent || dst.workgroupcoherent || dst.subgroupcoherent)))) {
|
|
|
+ error(loc, "only one coherent/devicecoherent/queuefamilycoherent/workgroupcoherent/subgroupcoherent/shadercallcoherent qualifier allowed",
|
|
|
+ GetPrecisionQualifierString(src.precision), "");
|
|
|
}
|
|
|
#endif
|
|
|
// Layout qualifiers
|
|
@@ -3546,6 +3661,7 @@ void TParseContext::mergeQualifiers(const TSourceLoc& loc, TQualifier& dst, cons
|
|
|
MERGE_SINGLETON(queuefamilycoherent);
|
|
|
MERGE_SINGLETON(workgroupcoherent);
|
|
|
MERGE_SINGLETON(subgroupcoherent);
|
|
|
+ MERGE_SINGLETON(shadercallcoherent);
|
|
|
MERGE_SINGLETON(nonprivate);
|
|
|
MERGE_SINGLETON(volatil);
|
|
|
MERGE_SINGLETON(restrict);
|
|
@@ -3983,7 +4099,7 @@ void TParseContext::checkRuntimeSizable(const TSourceLoc& loc, const TIntermType
|
|
|
}
|
|
|
|
|
|
// check for additional things allowed by GL_EXT_nonuniform_qualifier
|
|
|
- if (base.getBasicType() == EbtSampler || base.getBasicType() == EbtAccStructNV ||
|
|
|
+ if (base.getBasicType() == EbtSampler || base.getBasicType() == EbtAccStruct || base.getBasicType() == EbtRayQuery ||
|
|
|
(base.getBasicType() == EbtBlock && base.getType().getQualifier().isUniformOrBuffer()))
|
|
|
requireExtensions(loc, 1, &E_GL_EXT_nonuniform_qualifier, "variable index");
|
|
|
else
|
|
@@ -4487,6 +4603,7 @@ void TParseContext::paramCheckFix(const TSourceLoc& loc, const TQualifier& quali
|
|
|
type.getQualifier().queuefamilycoherent = qualifier.queuefamilycoherent;
|
|
|
type.getQualifier().workgroupcoherent = qualifier.workgroupcoherent;
|
|
|
type.getQualifier().subgroupcoherent = qualifier.subgroupcoherent;
|
|
|
+ type.getQualifier().shadercallcoherent = qualifier.shadercallcoherent;
|
|
|
type.getQualifier().nonprivate = qualifier.nonprivate;
|
|
|
type.getQualifier().readonly = qualifier.readonly;
|
|
|
type.getQualifier().writeonly = qualifier.writeonly;
|
|
@@ -4868,7 +4985,7 @@ void TParseContext::setLayoutQualifier(const TSourceLoc& loc, TPublicType& publi
|
|
|
#ifndef GLSLANG_WEB
|
|
|
if (id == TQualifier::getLayoutPackingString(ElpStd430)) {
|
|
|
requireProfile(loc, EEsProfile | ECoreProfile | ECompatibilityProfile, "std430");
|
|
|
- profileRequires(loc, ECoreProfile | ECompatibilityProfile, 430, nullptr, "std430");
|
|
|
+ profileRequires(loc, ECoreProfile | ECompatibilityProfile, 430, E_GL_ARB_shader_storage_buffer_object, "std430");
|
|
|
profileRequires(loc, EEsProfile, 310, nullptr, "std430");
|
|
|
publicType.qualifier.layoutPacking = ElpStd430;
|
|
|
return;
|
|
@@ -5067,13 +5184,19 @@ void TParseContext::setLayoutQualifier(const TSourceLoc& loc, TPublicType& publi
|
|
|
return;
|
|
|
}
|
|
|
} else {
|
|
|
- if (language == EShLangRayGenNV || language == EShLangIntersectNV ||
|
|
|
- language == EShLangAnyHitNV || language == EShLangClosestHitNV ||
|
|
|
- language == EShLangMissNV || language == EShLangCallableNV) {
|
|
|
- if (id == "shaderrecordnv") {
|
|
|
- publicType.qualifier.layoutShaderRecordNV = true;
|
|
|
+ if (language == EShLangRayGen || language == EShLangIntersect ||
|
|
|
+ language == EShLangAnyHit || language == EShLangClosestHit ||
|
|
|
+ language == EShLangMiss || language == EShLangCallable) {
|
|
|
+ if (id == "shaderrecordnv" || id == "shaderrecordext") {
|
|
|
+ if (id == "shaderrecordnv") {
|
|
|
+ requireExtensions(loc, 1, &E_GL_NV_ray_tracing, "shader record NV");
|
|
|
+ } else {
|
|
|
+ requireExtensions(loc, 1, &E_GL_EXT_ray_tracing, "shader record EXT");
|
|
|
+ }
|
|
|
+ publicType.qualifier.layoutShaderRecord = true;
|
|
|
return;
|
|
|
}
|
|
|
+
|
|
|
}
|
|
|
}
|
|
|
if (language == EShLangCompute) {
|
|
@@ -5088,6 +5211,12 @@ void TParseContext::setLayoutQualifier(const TSourceLoc& loc, TPublicType& publi
|
|
|
}
|
|
|
}
|
|
|
}
|
|
|
+
|
|
|
+ if (id == "primitive_culling") {
|
|
|
+ requireExtensions(loc, 1, &E_GL_EXT_ray_flags_primitive_culling, "primitive culling");
|
|
|
+ publicType.shaderQualifiers.layoutPrimitiveCulling = true;
|
|
|
+ return;
|
|
|
+ }
|
|
|
#endif
|
|
|
|
|
|
error(loc, "unrecognized layout identifier, or qualifier requires assignment (e.g., binding = 4)", id.c_str(), "");
|
|
@@ -5135,6 +5264,7 @@ void TParseContext::setLayoutQualifier(const TSourceLoc& loc, TPublicType& publi
|
|
|
profileRequires(loc, EEsProfile, 310, nullptr, feature);
|
|
|
}
|
|
|
publicType.qualifier.layoutOffset = value;
|
|
|
+ publicType.qualifier.explicitOffset = true;
|
|
|
if (nonLiteral)
|
|
|
error(loc, "needs a literal integer", "offset", "");
|
|
|
return;
|
|
@@ -5347,10 +5477,10 @@ void TParseContext::setLayoutQualifier(const TSourceLoc& loc, TPublicType& publi
|
|
|
|
|
|
case EShLangFragment:
|
|
|
if (id == "index") {
|
|
|
- requireProfile(loc, ECompatibilityProfile | ECoreProfile, "index layout qualifier on fragment output");
|
|
|
+ requireProfile(loc, ECompatibilityProfile | ECoreProfile | EEsProfile, "index layout qualifier on fragment output");
|
|
|
const char* exts[2] = { E_GL_ARB_separate_shader_objects, E_GL_ARB_explicit_attrib_location };
|
|
|
profileRequires(loc, ECompatibilityProfile | ECoreProfile, 330, 2, exts, "index layout qualifier on fragment output");
|
|
|
-
|
|
|
+ profileRequires(loc, EEsProfile ,310, E_GL_EXT_blend_func_extended, "index layout qualifier on fragment output");
|
|
|
// "It is also a compile-time error if a fragment shader sets a layout index to less than 0 or greater than 1."
|
|
|
if (value < 0 || value > 1) {
|
|
|
value = 0;
|
|
@@ -5514,8 +5644,8 @@ void TParseContext::mergeObjectLayoutQualifiers(TQualifier& dst, const TQualifie
|
|
|
dst.layoutViewportRelative = true;
|
|
|
if (src.layoutSecondaryViewportRelativeOffset != -2048)
|
|
|
dst.layoutSecondaryViewportRelativeOffset = src.layoutSecondaryViewportRelativeOffset;
|
|
|
- if (src.layoutShaderRecordNV)
|
|
|
- dst.layoutShaderRecordNV = true;
|
|
|
+ if (src.layoutShaderRecord)
|
|
|
+ dst.layoutShaderRecord = true;
|
|
|
if (src.pervertexNV)
|
|
|
dst.pervertexNV = true;
|
|
|
#endif
|
|
@@ -5583,7 +5713,7 @@ void TParseContext::layoutObjectCheck(const TSourceLoc& loc, const TSymbol& symb
|
|
|
error(loc, "cannot specify on a variable declaration", "align", "");
|
|
|
if (qualifier.isPushConstant())
|
|
|
error(loc, "can only specify on a uniform block", "push_constant", "");
|
|
|
- if (qualifier.isShaderRecordNV())
|
|
|
+ if (qualifier.isShaderRecord())
|
|
|
error(loc, "can only specify on a buffer block", "shaderRecordNV", "");
|
|
|
}
|
|
|
break;
|
|
@@ -5657,11 +5787,11 @@ void TParseContext::layoutTypeCheck(const TSourceLoc& loc, const TType& type)
|
|
|
error(loc, "cannot apply to uniform or buffer block", "location", "");
|
|
|
break;
|
|
|
#ifndef GLSLANG_WEB
|
|
|
- case EvqPayloadNV:
|
|
|
- case EvqPayloadInNV:
|
|
|
- case EvqHitAttrNV:
|
|
|
- case EvqCallableDataNV:
|
|
|
- case EvqCallableDataInNV:
|
|
|
+ case EvqPayload:
|
|
|
+ case EvqPayloadIn:
|
|
|
+ case EvqHitAttr:
|
|
|
+ case EvqCallableData:
|
|
|
+ case EvqCallableDataIn:
|
|
|
break;
|
|
|
#endif
|
|
|
default:
|
|
@@ -5684,6 +5814,8 @@ void TParseContext::layoutTypeCheck(const TSourceLoc& loc, const TType& type)
|
|
|
int repeated = intermediate.addXfbBufferOffset(type);
|
|
|
if (repeated >= 0)
|
|
|
error(loc, "overlapping offsets at", "xfb_offset", "offset %d in buffer %d", repeated, qualifier.layoutXfbBuffer);
|
|
|
+ if (type.isUnsizedArray())
|
|
|
+ error(loc, "unsized array", "xfb_offset", "in buffer %d", qualifier.layoutXfbBuffer);
|
|
|
|
|
|
// "The offset must be a multiple of the size of the first component of the first
|
|
|
// qualified variable or block member, or a compile-time error results. Further, if applied to an aggregate
|
|
@@ -5756,7 +5888,7 @@ void TParseContext::layoutTypeCheck(const TSourceLoc& loc, const TType& type)
|
|
|
if (spvVersion.spv > 0) {
|
|
|
if (qualifier.isUniformOrBuffer()) {
|
|
|
if (type.getBasicType() == EbtBlock && !qualifier.isPushConstant() &&
|
|
|
- !qualifier.isShaderRecordNV() &&
|
|
|
+ !qualifier.isShaderRecord() &&
|
|
|
!qualifier.hasAttachment() &&
|
|
|
!qualifier.hasBufferReference())
|
|
|
error(loc, "uniform/buffer blocks require layout(binding=X)", "binding", "");
|
|
@@ -5813,7 +5945,7 @@ void TParseContext::layoutTypeCheck(const TSourceLoc& loc, const TType& type)
|
|
|
if (qualifier.hasBufferReference() && type.getBasicType() != EbtBlock)
|
|
|
error(loc, "can only be used with a block", "buffer_reference", "");
|
|
|
|
|
|
- if (qualifier.isShaderRecordNV() && type.getBasicType() != EbtBlock)
|
|
|
+ if (qualifier.isShaderRecord() && type.getBasicType() != EbtBlock)
|
|
|
error(loc, "can only be used with a block", "shaderRecordNV", "");
|
|
|
|
|
|
// input attachment
|
|
@@ -5958,7 +6090,7 @@ void TParseContext::layoutQualifierCheck(const TSourceLoc& loc, const TQualifier
|
|
|
if (qualifier.storage != EvqBuffer)
|
|
|
error(loc, "can only be used with buffer", "buffer_reference", "");
|
|
|
}
|
|
|
- if (qualifier.isShaderRecordNV()) {
|
|
|
+ if (qualifier.isShaderRecord()) {
|
|
|
if (qualifier.storage != EvqBuffer)
|
|
|
error(loc, "can only be used with a buffer", "shaderRecordNV", "");
|
|
|
if (qualifier.hasBinding())
|
|
@@ -5967,7 +6099,7 @@ void TParseContext::layoutQualifierCheck(const TSourceLoc& loc, const TQualifier
|
|
|
error(loc, "cannot be used with shaderRecordNV", "set", "");
|
|
|
|
|
|
}
|
|
|
- if (qualifier.storage == EvqHitAttrNV && qualifier.hasLayout()) {
|
|
|
+ if (qualifier.storage == EvqHitAttr && qualifier.hasLayout()) {
|
|
|
error(loc, "cannot apply layout qualifiers to hitAttributeNV variable", "hitAttributeNV", "");
|
|
|
}
|
|
|
}
|
|
@@ -6018,6 +6150,8 @@ void TParseContext::checkNoShaderLayouts(const TSourceLoc& loc, const TShaderQua
|
|
|
error(loc, message, "num_views", "");
|
|
|
if (shaderQualifiers.interlockOrdering != EioNone)
|
|
|
error(loc, message, TQualifier::getInterlockOrderingString(shaderQualifiers.interlockOrdering), "");
|
|
|
+ if (shaderQualifiers.layoutPrimitiveCulling)
|
|
|
+ error(loc, "can only be applied as standalone", "primitive_culling", "");
|
|
|
#endif
|
|
|
}
|
|
|
|
|
@@ -6079,6 +6213,15 @@ const TFunction* TParseContext::findFunction(const TSourceLoc& loc, const TFunct
|
|
|
#endif
|
|
|
|
|
|
const TFunction* function = nullptr;
|
|
|
+
|
|
|
+ // debugPrintfEXT has var args and is in the symbol table as "debugPrintfEXT()",
|
|
|
+ // mangled to "debugPrintfEXT("
|
|
|
+ if (call.getName() == "debugPrintfEXT") {
|
|
|
+ TSymbol* symbol = symbolTable.find("debugPrintfEXT(", &builtIn);
|
|
|
+ if (symbol)
|
|
|
+ return symbol->getAsFunction();
|
|
|
+ }
|
|
|
+
|
|
|
bool explicitTypesEnabled = extensionTurnedOn(E_GL_EXT_shader_explicit_arithmetic_types) ||
|
|
|
extensionTurnedOn(E_GL_EXT_shader_explicit_arithmetic_types_int8) ||
|
|
|
extensionTurnedOn(E_GL_EXT_shader_explicit_arithmetic_types_int16) ||
|
|
@@ -6088,7 +6231,10 @@ const TFunction* TParseContext::findFunction(const TSourceLoc& loc, const TFunct
|
|
|
extensionTurnedOn(E_GL_EXT_shader_explicit_arithmetic_types_float32) ||
|
|
|
extensionTurnedOn(E_GL_EXT_shader_explicit_arithmetic_types_float64);
|
|
|
|
|
|
- if (isEsProfile() || version < 120)
|
|
|
+ if (isEsProfile())
|
|
|
+ function = (extensionTurnedOn(E_GL_EXT_shader_implicit_conversions) && version >= 310) ?
|
|
|
+ findFunction120(loc, call, builtIn) : findFunctionExact(loc, call, builtIn);
|
|
|
+ else if (version < 120)
|
|
|
function = findFunctionExact(loc, call, builtIn);
|
|
|
else if (version < 400)
|
|
|
function = extensionTurnedOn(E_GL_ARB_gpu_shader_fp64) ? findFunction400(loc, call, builtIn) : findFunction120(loc, call, builtIn);
|
|
@@ -6422,6 +6568,12 @@ TIntermNode* TParseContext::declareVariable(const TSourceLoc& loc, TString& iden
|
|
|
type.copyArrayInnerSizes(publicType.arraySizes);
|
|
|
arrayOfArrayVersionCheck(loc, type.getArraySizes());
|
|
|
|
|
|
+ if (initializer) {
|
|
|
+ if (type.getBasicType() == EbtRayQuery) {
|
|
|
+ error(loc, "ray queries can only be initialized by using the rayQueryInitializeEXT intrinsic:", "=", identifier.c_str());
|
|
|
+ }
|
|
|
+ }
|
|
|
+
|
|
|
if (type.isCoopMat()) {
|
|
|
intermediate.setUseVulkanMemoryModel();
|
|
|
intermediate.setUseStorageBuffer();
|
|
@@ -6461,7 +6613,7 @@ TIntermNode* TParseContext::declareVariable(const TSourceLoc& loc, TString& iden
|
|
|
transparentOpaqueCheck(loc, type, identifier);
|
|
|
#ifndef GLSLANG_WEB
|
|
|
atomicUintCheck(loc, type, identifier);
|
|
|
- accStructNVCheck(loc, type, identifier);
|
|
|
+ accStructCheck(loc, type, identifier);
|
|
|
checkAndResizeMeshViewDim(loc, type, /*isBlockMember*/ false);
|
|
|
#endif
|
|
|
if (type.getQualifier().storage == EvqConst && type.containsReference()) {
|
|
@@ -7177,6 +7329,8 @@ TIntermTyped* TParseContext::constructBuiltIn(const TType& type, TOperator op, T
|
|
|
if (!node->getType().isCoopMat()) {
|
|
|
if (type.getBasicType() != node->getType().getBasicType()) {
|
|
|
node = intermediate.addConversion(type.getBasicType(), node);
|
|
|
+ if (node == nullptr)
|
|
|
+ return nullptr;
|
|
|
}
|
|
|
node = intermediate.setAggregateOperator(node, EOpConstructCooperativeMatrix, type, node->getLoc());
|
|
|
} else {
|
|
@@ -7355,8 +7509,8 @@ void TParseContext::declareBlock(const TSourceLoc& loc, TTypeList& typeList, con
|
|
|
arraySizesCheck(memberLoc, currentBlockQualifier, memberType.getArraySizes(), nullptr, member == typeList.size() - 1);
|
|
|
if (memberQualifier.hasOffset()) {
|
|
|
if (spvVersion.spv == 0) {
|
|
|
- requireProfile(memberLoc, ~EEsProfile, "offset on block member");
|
|
|
- profileRequires(memberLoc, ~EEsProfile, 440, E_GL_ARB_enhanced_layouts, "offset on block member");
|
|
|
+ profileRequires(memberLoc, ~EEsProfile, 440, E_GL_ARB_enhanced_layouts, "\"offset\" on block member");
|
|
|
+ profileRequires(memberLoc, EEsProfile, 300, E_GL_ARB_enhanced_layouts, "\"offset\" on block member");
|
|
|
}
|
|
|
}
|
|
|
|
|
@@ -7395,7 +7549,7 @@ void TParseContext::declareBlock(const TSourceLoc& loc, TTypeList& typeList, con
|
|
|
// Special case for "push_constant uniform", which has a default of std430,
|
|
|
// contrary to normal uniform defaults, and can't have a default tracked for it.
|
|
|
if ((currentBlockQualifier.isPushConstant() && !currentBlockQualifier.hasPacking()) ||
|
|
|
- (currentBlockQualifier.isShaderRecordNV() && !currentBlockQualifier.hasPacking()))
|
|
|
+ (currentBlockQualifier.isShaderRecord() && !currentBlockQualifier.hasPacking()))
|
|
|
currentBlockQualifier.layoutPacking = ElpStd430;
|
|
|
|
|
|
// Special case for "taskNV in/out", which has a default of std430,
|
|
@@ -7495,6 +7649,8 @@ void TParseContext::declareBlock(const TSourceLoc& loc, TTypeList& typeList, con
|
|
|
fixBlockLocations(loc, currentBlockQualifier, typeList, memberWithLocation, memberWithoutLocation);
|
|
|
fixXfbOffsets(currentBlockQualifier, typeList);
|
|
|
fixBlockUniformOffsets(currentBlockQualifier, typeList);
|
|
|
+ fixBlockUniformLayoutMatrix(currentBlockQualifier, &typeList, nullptr);
|
|
|
+ fixBlockUniformLayoutPacking(currentBlockQualifier, &typeList, nullptr);
|
|
|
for (unsigned int member = 0; member < typeList.size(); ++member)
|
|
|
layoutTypeCheck(typeList[member].loc, *typeList[member].type);
|
|
|
|
|
@@ -7612,6 +7768,7 @@ void TParseContext::declareBlock(const TSourceLoc& loc, TTypeList& typeList, con
|
|
|
// with a particular stage.
|
|
|
void TParseContext::blockStageIoCheck(const TSourceLoc& loc, const TQualifier& qualifier)
|
|
|
{
|
|
|
+ const char *extsrt[2] = { E_GL_NV_ray_tracing, E_GL_EXT_ray_tracing };
|
|
|
switch (qualifier.storage) {
|
|
|
case EvqUniform:
|
|
|
profileRequires(loc, EEsProfile, 300, nullptr, "uniform block");
|
|
@@ -7621,7 +7778,7 @@ void TParseContext::blockStageIoCheck(const TSourceLoc& loc, const TQualifier& q
|
|
|
break;
|
|
|
case EvqBuffer:
|
|
|
requireProfile(loc, EEsProfile | ECoreProfile | ECompatibilityProfile, "buffer block");
|
|
|
- profileRequires(loc, ECoreProfile | ECompatibilityProfile, 430, nullptr, "buffer block");
|
|
|
+ profileRequires(loc, ECoreProfile | ECompatibilityProfile, 430, E_GL_ARB_shader_storage_buffer_object, "buffer block");
|
|
|
profileRequires(loc, EEsProfile, 310, nullptr, "buffer block");
|
|
|
break;
|
|
|
case EvqVaryingIn:
|
|
@@ -7650,28 +7807,28 @@ void TParseContext::blockStageIoCheck(const TSourceLoc& loc, const TQualifier& q
|
|
|
}
|
|
|
break;
|
|
|
#ifndef GLSLANG_WEB
|
|
|
- case EvqPayloadNV:
|
|
|
- profileRequires(loc, ~EEsProfile, 460, E_GL_NV_ray_tracing, "rayPayloadNV block");
|
|
|
- requireStage(loc, (EShLanguageMask)(EShLangRayGenNVMask | EShLangAnyHitNVMask | EShLangClosestHitNVMask | EShLangMissNVMask),
|
|
|
+ case EvqPayload:
|
|
|
+ profileRequires(loc, ~EEsProfile, 460, 2, extsrt, "rayPayloadNV block");
|
|
|
+ requireStage(loc, (EShLanguageMask)(EShLangRayGenMask | EShLangAnyHitMask | EShLangClosestHitMask | EShLangMissMask),
|
|
|
"rayPayloadNV block");
|
|
|
break;
|
|
|
- case EvqPayloadInNV:
|
|
|
- profileRequires(loc, ~EEsProfile, 460, E_GL_NV_ray_tracing, "rayPayloadInNV block");
|
|
|
- requireStage(loc, (EShLanguageMask)(EShLangAnyHitNVMask | EShLangClosestHitNVMask | EShLangMissNVMask),
|
|
|
+ case EvqPayloadIn:
|
|
|
+ profileRequires(loc, ~EEsProfile, 460, 2, extsrt, "rayPayloadInNV block");
|
|
|
+ requireStage(loc, (EShLanguageMask)(EShLangAnyHitMask | EShLangClosestHitMask | EShLangMissMask),
|
|
|
"rayPayloadInNV block");
|
|
|
break;
|
|
|
- case EvqHitAttrNV:
|
|
|
- profileRequires(loc, ~EEsProfile, 460, E_GL_NV_ray_tracing, "hitAttributeNV block");
|
|
|
- requireStage(loc, (EShLanguageMask)(EShLangIntersectNVMask | EShLangAnyHitNVMask | EShLangClosestHitNVMask), "hitAttributeNV block");
|
|
|
+ case EvqHitAttr:
|
|
|
+ profileRequires(loc, ~EEsProfile, 460, 2, extsrt, "hitAttributeNV block");
|
|
|
+ requireStage(loc, (EShLanguageMask)(EShLangIntersectMask | EShLangAnyHitMask | EShLangClosestHitMask), "hitAttributeNV block");
|
|
|
break;
|
|
|
- case EvqCallableDataNV:
|
|
|
- profileRequires(loc, ~EEsProfile, 460, E_GL_NV_ray_tracing, "callableDataNV block");
|
|
|
- requireStage(loc, (EShLanguageMask)(EShLangRayGenNVMask | EShLangClosestHitNVMask | EShLangMissNVMask | EShLangCallableNVMask),
|
|
|
+ case EvqCallableData:
|
|
|
+ profileRequires(loc, ~EEsProfile, 460, 2, extsrt, "callableDataNV block");
|
|
|
+ requireStage(loc, (EShLanguageMask)(EShLangRayGenMask | EShLangClosestHitMask | EShLangMissMask | EShLangCallableMask),
|
|
|
"callableDataNV block");
|
|
|
break;
|
|
|
- case EvqCallableDataInNV:
|
|
|
- profileRequires(loc, ~EEsProfile, 460, E_GL_NV_ray_tracing, "callableDataInNV block");
|
|
|
- requireStage(loc, (EShLanguageMask)(EShLangCallableNVMask), "callableDataInNV block");
|
|
|
+ case EvqCallableDataIn:
|
|
|
+ profileRequires(loc, ~EEsProfile, 460, 2, extsrt, "callableDataInNV block");
|
|
|
+ requireStage(loc, (EShLanguageMask)(EShLangCallableMask), "callableDataInNV block");
|
|
|
break;
|
|
|
#endif
|
|
|
default:
|
|
@@ -7710,8 +7867,8 @@ void TParseContext::blockQualifierCheck(const TSourceLoc& loc, const TQualifier&
|
|
|
error(loc, "cannot use invariant qualifier on an interface block", "invariant", "");
|
|
|
if (qualifier.isPushConstant())
|
|
|
intermediate.addPushConstantCount();
|
|
|
- if (qualifier.isShaderRecordNV())
|
|
|
- intermediate.addShaderRecordNVCount();
|
|
|
+ if (qualifier.isShaderRecord())
|
|
|
+ intermediate.addShaderRecordCount();
|
|
|
if (qualifier.isTaskMemory())
|
|
|
intermediate.addTaskNVCount();
|
|
|
}
|
|
@@ -7864,6 +8021,101 @@ void TParseContext::fixBlockUniformOffsets(TQualifier& qualifier, TTypeList& typ
|
|
|
}
|
|
|
}
|
|
|
|
|
|
+//
|
|
|
+// Spread LayoutMatrix to uniform block member, if a uniform block member is a struct,
|
|
|
+// we need spread LayoutMatrix to this struct member too. and keep this rule for recursive.
|
|
|
+//
|
|
|
+void TParseContext::fixBlockUniformLayoutMatrix(TQualifier& qualifier, TTypeList* originTypeList,
|
|
|
+ TTypeList* tmpTypeList)
|
|
|
+{
|
|
|
+ assert(tmpTypeList == nullptr || originTypeList->size() == tmpTypeList->size());
|
|
|
+ for (unsigned int member = 0; member < originTypeList->size(); ++member) {
|
|
|
+ if (qualifier.layoutPacking != ElpNone) {
|
|
|
+ if (tmpTypeList == nullptr) {
|
|
|
+ if (((*originTypeList)[member].type->isMatrix() ||
|
|
|
+ (*originTypeList)[member].type->getBasicType() == EbtStruct) &&
|
|
|
+ (*originTypeList)[member].type->getQualifier().layoutMatrix == ElmNone) {
|
|
|
+ (*originTypeList)[member].type->getQualifier().layoutMatrix = qualifier.layoutMatrix;
|
|
|
+ }
|
|
|
+ } else {
|
|
|
+ if (((*tmpTypeList)[member].type->isMatrix() ||
|
|
|
+ (*tmpTypeList)[member].type->getBasicType() == EbtStruct) &&
|
|
|
+ (*tmpTypeList)[member].type->getQualifier().layoutMatrix == ElmNone) {
|
|
|
+ (*tmpTypeList)[member].type->getQualifier().layoutMatrix = qualifier.layoutMatrix;
|
|
|
+ }
|
|
|
+ }
|
|
|
+ }
|
|
|
+
|
|
|
+ if ((*originTypeList)[member].type->getBasicType() == EbtStruct) {
|
|
|
+ TQualifier* memberQualifier = nullptr;
|
|
|
+ // block member can be declare a matrix style, so it should be update to the member's style
|
|
|
+ if ((*originTypeList)[member].type->getQualifier().layoutMatrix == ElmNone) {
|
|
|
+ memberQualifier = &qualifier;
|
|
|
+ } else {
|
|
|
+ memberQualifier = &((*originTypeList)[member].type->getQualifier());
|
|
|
+ }
|
|
|
+
|
|
|
+ const TType* tmpType = tmpTypeList == nullptr ?
|
|
|
+ (*originTypeList)[member].type->clone() : (*tmpTypeList)[member].type;
|
|
|
+
|
|
|
+ fixBlockUniformLayoutMatrix(*memberQualifier, (*originTypeList)[member].type->getWritableStruct(),
|
|
|
+ tmpType->getWritableStruct());
|
|
|
+
|
|
|
+ const TTypeList* structure = recordStructCopy(matrixFixRecord, (*originTypeList)[member].type, tmpType);
|
|
|
+
|
|
|
+ if (tmpTypeList == nullptr) {
|
|
|
+ (*originTypeList)[member].type->setStruct(const_cast<TTypeList*>(structure));
|
|
|
+ }
|
|
|
+ if (tmpTypeList != nullptr) {
|
|
|
+ (*tmpTypeList)[member].type->setStruct(const_cast<TTypeList*>(structure));
|
|
|
+ }
|
|
|
+ }
|
|
|
+ }
|
|
|
+}
|
|
|
+
|
|
|
+//
|
|
|
+// Spread LayoutPacking to block member, if a block member is a struct, we need spread LayoutPacking to
|
|
|
+// this struct member too. and keep this rule for recursive.
|
|
|
+//
|
|
|
+void TParseContext::fixBlockUniformLayoutPacking(TQualifier& qualifier, TTypeList* originTypeList,
|
|
|
+ TTypeList* tmpTypeList)
|
|
|
+{
|
|
|
+ assert(tmpTypeList == nullptr || originTypeList->size() == tmpTypeList->size());
|
|
|
+ for (unsigned int member = 0; member < originTypeList->size(); ++member) {
|
|
|
+ if (qualifier.layoutPacking != ElpNone) {
|
|
|
+ if (tmpTypeList == nullptr) {
|
|
|
+ if ((*originTypeList)[member].type->getQualifier().layoutPacking == ElpNone) {
|
|
|
+ (*originTypeList)[member].type->getQualifier().layoutPacking = qualifier.layoutPacking;
|
|
|
+ }
|
|
|
+ } else {
|
|
|
+ if ((*tmpTypeList)[member].type->getQualifier().layoutPacking == ElpNone) {
|
|
|
+ (*tmpTypeList)[member].type->getQualifier().layoutPacking = qualifier.layoutPacking;
|
|
|
+ }
|
|
|
+ }
|
|
|
+ }
|
|
|
+
|
|
|
+ if ((*originTypeList)[member].type->getBasicType() == EbtStruct) {
|
|
|
+ // Deep copy the type in pool.
|
|
|
+ // Because, struct use in different block may have different layout qualifier.
|
|
|
+ // We have to new a object to distinguish between them.
|
|
|
+ const TType* tmpType = tmpTypeList == nullptr ?
|
|
|
+ (*originTypeList)[member].type->clone() : (*tmpTypeList)[member].type;
|
|
|
+
|
|
|
+ fixBlockUniformLayoutPacking(qualifier, (*originTypeList)[member].type->getWritableStruct(),
|
|
|
+ tmpType->getWritableStruct());
|
|
|
+
|
|
|
+ const TTypeList* structure = recordStructCopy(packingFixRecord, (*originTypeList)[member].type, tmpType);
|
|
|
+
|
|
|
+ if (tmpTypeList == nullptr) {
|
|
|
+ (*originTypeList)[member].type->setStruct(const_cast<TTypeList*>(structure));
|
|
|
+ }
|
|
|
+ if (tmpTypeList != nullptr) {
|
|
|
+ (*tmpTypeList)[member].type->setStruct(const_cast<TTypeList*>(structure));
|
|
|
+ }
|
|
|
+ }
|
|
|
+ }
|
|
|
+}
|
|
|
+
|
|
|
// For an identifier that is already declared, add more qualification to it.
|
|
|
void TParseContext::addQualifierToExisting(const TSourceLoc& loc, TQualifier qualifier, const TString& identifier)
|
|
|
{
|
|
@@ -8172,6 +8424,16 @@ void TParseContext::updateStandaloneQualifierDefaults(const TSourceLoc& loc, con
|
|
|
{
|
|
|
checkIoArraysConsistency(loc);
|
|
|
}
|
|
|
+
|
|
|
+ if (publicType.shaderQualifiers.layoutPrimitiveCulling) {
|
|
|
+ if (publicType.qualifier.storage != EvqTemporary)
|
|
|
+ error(loc, "layout qualifier can not have storage qualifiers", "primitive_culling","", "");
|
|
|
+ else {
|
|
|
+ intermediate.setLayoutPrimitiveCulling();
|
|
|
+ }
|
|
|
+ // Exit early as further checks are not valid
|
|
|
+ return;
|
|
|
+ }
|
|
|
#endif
|
|
|
const TQualifier& qualifier = publicType.qualifier;
|
|
|
|
|
@@ -8233,7 +8495,7 @@ void TParseContext::updateStandaloneQualifierDefaults(const TSourceLoc& loc, con
|
|
|
error(loc, "cannot declare a default, can only be used on a block", "buffer_reference", "");
|
|
|
if (qualifier.hasSpecConstantId())
|
|
|
error(loc, "cannot declare a default, can only be used on a scalar", "constant_id", "");
|
|
|
- if (qualifier.isShaderRecordNV())
|
|
|
+ if (qualifier.isShaderRecord())
|
|
|
error(loc, "cannot declare a default, can only be used on a block", "shaderRecordNV", "");
|
|
|
}
|
|
|
|
|
@@ -8322,5 +8584,43 @@ TIntermNode* TParseContext::addSwitch(const TSourceLoc& loc, TIntermTyped* expre
|
|
|
return switchNode;
|
|
|
}
|
|
|
|
|
|
+//
|
|
|
+// When a struct used in block, and has it's own layout packing, layout matrix,
|
|
|
+// record the origin structure of a struct to map, and Record the structure copy to the copy table,
|
|
|
+//
|
|
|
+const TTypeList* TParseContext::recordStructCopy(TStructRecord& record, const TType* originType, const TType* tmpType)
|
|
|
+{
|
|
|
+ size_t memberCount = tmpType->getStruct()->size();
|
|
|
+ size_t originHash = 0, tmpHash = 0;
|
|
|
+ std::hash<size_t> hasher;
|
|
|
+ for (size_t i = 0; i < memberCount; i++) {
|
|
|
+ size_t originMemberHash = hasher(originType->getStruct()->at(i).type->getQualifier().layoutPacking +
|
|
|
+ originType->getStruct()->at(i).type->getQualifier().layoutMatrix);
|
|
|
+ size_t tmpMemberHash = hasher(tmpType->getStruct()->at(i).type->getQualifier().layoutPacking +
|
|
|
+ tmpType->getStruct()->at(i).type->getQualifier().layoutMatrix);
|
|
|
+ originHash = hasher((originHash ^ originMemberHash) << 1);
|
|
|
+ tmpHash = hasher((tmpHash ^ tmpMemberHash) << 1);
|
|
|
+ }
|
|
|
+ const TTypeList* originStruct = originType->getStruct();
|
|
|
+ const TTypeList* tmpStruct = tmpType->getStruct();
|
|
|
+ if (originHash != tmpHash) {
|
|
|
+ auto fixRecords = record.find(originStruct);
|
|
|
+ if (fixRecords != record.end()) {
|
|
|
+ auto fixRecord = fixRecords->second.find(tmpHash);
|
|
|
+ if (fixRecord != fixRecords->second.end()) {
|
|
|
+ return fixRecord->second;
|
|
|
+ } else {
|
|
|
+ record[originStruct][tmpHash] = tmpStruct;
|
|
|
+ return tmpStruct;
|
|
|
+ }
|
|
|
+ } else {
|
|
|
+ record[originStruct] = std::map<size_t, const TTypeList*>();
|
|
|
+ record[originStruct][tmpHash] = tmpStruct;
|
|
|
+ return tmpStruct;
|
|
|
+ }
|
|
|
+ }
|
|
|
+ return originStruct;
|
|
|
+}
|
|
|
+
|
|
|
} // end namespace glslang
|
|
|
|