|
@@ -93,72 +93,128 @@ public:
|
|
|
// Use a degenerate (empty) set of dereferences to immediately put as at the end of
|
|
|
// the dereference change expected by blowUpActiveAggregate.
|
|
|
TList<TIntermBinary*> derefs;
|
|
|
- blowUpActiveAggregate(base.getType(), base.getName(), derefs, derefs.end(), -1, -1, 0);
|
|
|
+ blowUpActiveAggregate(base.getType(), base.getName(), derefs, derefs.end(), -1, -1, 0, 0,
|
|
|
+ base.getQualifier().storage, true);
|
|
|
}
|
|
|
}
|
|
|
|
|
|
- void addAttribute(const TIntermSymbol& base)
|
|
|
+ void addPipeIOVariable(const TIntermSymbol& base)
|
|
|
{
|
|
|
if (processedDerefs.find(&base) == processedDerefs.end()) {
|
|
|
processedDerefs.insert(&base);
|
|
|
|
|
|
const TString &name = base.getName();
|
|
|
const TType &type = base.getType();
|
|
|
+ const bool input = base.getQualifier().isPipeInput();
|
|
|
|
|
|
- TReflection::TNameToIndex::const_iterator it = reflection.nameToIndex.find(name);
|
|
|
- if (it == reflection.nameToIndex.end()) {
|
|
|
- reflection.nameToIndex[name] = (int)reflection.indexToAttribute.size();
|
|
|
- reflection.indexToAttribute.push_back(TObjectReflection(name, type, 0, mapToGlType(type), 0, 0));
|
|
|
+ TReflection::TMapIndexToReflection &ioItems =
|
|
|
+ input ? reflection.indexToPipeInput : reflection.indexToPipeOutput;
|
|
|
+
|
|
|
+ if (reflection.options & EShReflectionUnwrapIOBlocks) {
|
|
|
+ bool anonymous = IsAnonymous(name);
|
|
|
+
|
|
|
+ TString baseName;
|
|
|
+ if (type.getBasicType() == EbtBlock) {
|
|
|
+ baseName = anonymous ? TString() : type.getTypeName();
|
|
|
+ } else {
|
|
|
+ baseName = anonymous ? TString() : name;
|
|
|
+ }
|
|
|
+
|
|
|
+ // by convention if this is an arrayed block we ignore the array in the reflection
|
|
|
+ if (type.isArray() && type.getBasicType() == EbtBlock) {
|
|
|
+ blowUpIOAggregate(input, baseName, TType(type, 0));
|
|
|
+ } else {
|
|
|
+ blowUpIOAggregate(input, baseName, type);
|
|
|
+ }
|
|
|
+ } else {
|
|
|
+ TReflection::TNameToIndex::const_iterator it = reflection.nameToIndex.find(name.c_str());
|
|
|
+ if (it == reflection.nameToIndex.end()) {
|
|
|
+ reflection.nameToIndex[name.c_str()] = (int)ioItems.size();
|
|
|
+ ioItems.push_back(
|
|
|
+ TObjectReflection(name.c_str(), type, 0, mapToGlType(type), mapToGlArraySize(type), 0));
|
|
|
+
|
|
|
+ EShLanguageMask& stages = ioItems.back().stages;
|
|
|
+ stages = static_cast<EShLanguageMask>(stages | 1 << intermediate.getStage());
|
|
|
+ } else {
|
|
|
+ EShLanguageMask& stages = ioItems[it->second].stages;
|
|
|
+ stages = static_cast<EShLanguageMask>(stages | 1 << intermediate.getStage());
|
|
|
+ }
|
|
|
}
|
|
|
}
|
|
|
}
|
|
|
|
|
|
- // Lookup or calculate the offset of a block member, using the recursively
|
|
|
+ // Lookup or calculate the offset of all block members at once, using the recursively
|
|
|
// defined block offset rules.
|
|
|
- int getOffset(const TType& type, int index)
|
|
|
+ void getOffsets(const TType& type, TVector<int>& offsets)
|
|
|
{
|
|
|
const TTypeList& memberList = *type.getStruct();
|
|
|
|
|
|
- // Don't calculate offset if one is present, it could be user supplied
|
|
|
- // and different than what would be calculated. That is, this is faster,
|
|
|
- // but not just an optimization.
|
|
|
- if (memberList[index].type->getQualifier().hasOffset())
|
|
|
- return memberList[index].type->getQualifier().layoutOffset;
|
|
|
-
|
|
|
- int memberSize;
|
|
|
- int dummyStride;
|
|
|
+ int memberSize = 0;
|
|
|
int offset = 0;
|
|
|
- for (int m = 0; m <= index; ++m) {
|
|
|
- // modify just the children's view of matrix layout, if there is one for this member
|
|
|
- TLayoutMatrix subMatrixLayout = memberList[m].type->getQualifier().layoutMatrix;
|
|
|
- int memberAlignment = intermediate.getMemberAlignment(*memberList[m].type, memberSize, dummyStride,
|
|
|
- type.getQualifier().layoutPacking,
|
|
|
- subMatrixLayout != ElmNone
|
|
|
- ? subMatrixLayout == ElmRowMajor
|
|
|
- : type.getQualifier().layoutMatrix == ElmRowMajor);
|
|
|
- RoundToPow2(offset, memberAlignment);
|
|
|
- if (m < index)
|
|
|
- offset += memberSize;
|
|
|
+ for (size_t m = 0; m < offsets.size(); ++m) {
|
|
|
+ // if the user supplied an offset, snap to it now
|
|
|
+ if (memberList[m].type->getQualifier().hasOffset())
|
|
|
+ offset = memberList[m].type->getQualifier().layoutOffset;
|
|
|
+
|
|
|
+ // calculate the offset of the next member and align the current offset to this member
|
|
|
+ intermediate.updateOffset(type, *memberList[m].type, offset, memberSize);
|
|
|
+
|
|
|
+ // save the offset of this member
|
|
|
+ offsets[m] = offset;
|
|
|
+
|
|
|
+ // update for the next member
|
|
|
+ offset += memberSize;
|
|
|
}
|
|
|
+ }
|
|
|
+
|
|
|
+ // Calculate the stride of an array type
|
|
|
+ int getArrayStride(const TType& baseType, const TType& type)
|
|
|
+ {
|
|
|
+ int dummySize;
|
|
|
+ int stride;
|
|
|
+
|
|
|
+ // consider blocks to have 0 stride, so that all offsets are relative to the start of their block
|
|
|
+ if (type.getBasicType() == EbtBlock)
|
|
|
+ return 0;
|
|
|
+
|
|
|
+ TLayoutMatrix subMatrixLayout = type.getQualifier().layoutMatrix;
|
|
|
+ intermediate.getMemberAlignment(type, dummySize, stride,
|
|
|
+ baseType.getQualifier().layoutPacking,
|
|
|
+ subMatrixLayout != ElmNone
|
|
|
+ ? subMatrixLayout == ElmRowMajor
|
|
|
+ : baseType.getQualifier().layoutMatrix == ElmRowMajor);
|
|
|
|
|
|
- return offset;
|
|
|
+ return stride;
|
|
|
}
|
|
|
|
|
|
- // Calculate the block data size.
|
|
|
- // Block arrayness is not taken into account, each element is backed by a separate buffer.
|
|
|
- int getBlockSize(const TType& blockType)
|
|
|
+ // count the total number of leaf members from iterating out of a block type
|
|
|
+ int countAggregateMembers(const TType& parentType)
|
|
|
{
|
|
|
- const TTypeList& memberList = *blockType.getStruct();
|
|
|
- int lastIndex = (int)memberList.size() - 1;
|
|
|
- int lastOffset = getOffset(blockType, lastIndex);
|
|
|
+ if (! parentType.isStruct())
|
|
|
+ return 1;
|
|
|
|
|
|
- int lastMemberSize;
|
|
|
- int dummyStride;
|
|
|
- intermediate.getMemberAlignment(*memberList[lastIndex].type, lastMemberSize, dummyStride,
|
|
|
- blockType.getQualifier().layoutPacking,
|
|
|
- blockType.getQualifier().layoutMatrix == ElmRowMajor);
|
|
|
+ const bool strictArraySuffix = (reflection.options & EShReflectionStrictArraySuffix);
|
|
|
|
|
|
- return lastOffset + lastMemberSize;
|
|
|
+ bool blockParent = (parentType.getBasicType() == EbtBlock && parentType.getQualifier().storage == EvqBuffer);
|
|
|
+
|
|
|
+ const TTypeList &memberList = *parentType.getStruct();
|
|
|
+
|
|
|
+ int ret = 0;
|
|
|
+
|
|
|
+ for (size_t i = 0; i < memberList.size(); i++)
|
|
|
+ {
|
|
|
+ const TType &memberType = *memberList[i].type;
|
|
|
+ int numMembers = countAggregateMembers(memberType);
|
|
|
+ // for sized arrays of structs, apply logic to expand out the same as we would below in
|
|
|
+ // blowUpActiveAggregate
|
|
|
+ if (memberType.isArray() && ! memberType.getArraySizes()->hasUnsized() && memberType.isStruct()) {
|
|
|
+ if (! strictArraySuffix || ! blockParent)
|
|
|
+ numMembers *= memberType.getArraySizes()->getCumulativeSize();
|
|
|
+ }
|
|
|
+ ret += numMembers;
|
|
|
+ }
|
|
|
+
|
|
|
+ return ret;
|
|
|
}
|
|
|
|
|
|
// Traverse the provided deref chain, including the base, and
|
|
@@ -169,8 +225,19 @@ public:
|
|
|
// arraySize tracks, just for the final dereference in the chain, if there was a specific known size.
|
|
|
// A value of 0 for arraySize will mean to use the full array's size.
|
|
|
void blowUpActiveAggregate(const TType& baseType, const TString& baseName, const TList<TIntermBinary*>& derefs,
|
|
|
- TList<TIntermBinary*>::const_iterator deref, int offset, int blockIndex, int arraySize)
|
|
|
+ TList<TIntermBinary*>::const_iterator deref, int offset, int blockIndex, int arraySize,
|
|
|
+ int topLevelArrayStride, TStorageQualifier baseStorage, bool active)
|
|
|
{
|
|
|
+ // when strictArraySuffix is enabled, we closely follow the rules from ARB_program_interface_query.
|
|
|
+ // Broadly:
|
|
|
+ // * arrays-of-structs always have a [x] suffix.
|
|
|
+ // * with array-of-struct variables in the root of a buffer block, only ever return [0].
|
|
|
+ // * otherwise, array suffixes are added whenever we iterate, even if that means expanding out an array.
|
|
|
+ const bool strictArraySuffix = (reflection.options & EShReflectionStrictArraySuffix);
|
|
|
+
|
|
|
+ // is this variable inside a buffer block. This flag is set back to false after we iterate inside the first array element.
|
|
|
+ bool blockParent = (baseType.getBasicType() == EbtBlock && baseType.getQualifier().storage == EvqBuffer);
|
|
|
+
|
|
|
// process the part of the dereference chain that was explicit in the shader
|
|
|
TString name = baseName;
|
|
|
const TType* terminalType = &baseType;
|
|
@@ -179,29 +246,54 @@ public:
|
|
|
terminalType = &visitNode->getType();
|
|
|
int index;
|
|
|
switch (visitNode->getOp()) {
|
|
|
- case EOpIndexIndirect:
|
|
|
+ case EOpIndexIndirect: {
|
|
|
+ int stride = getArrayStride(baseType, visitNode->getLeft()->getType());
|
|
|
+
|
|
|
+ if (topLevelArrayStride == 0)
|
|
|
+ topLevelArrayStride = stride;
|
|
|
+
|
|
|
// Visit all the indices of this array, and for each one add on the remaining dereferencing
|
|
|
for (int i = 0; i < std::max(visitNode->getLeft()->getType().getOuterArraySize(), 1); ++i) {
|
|
|
TString newBaseName = name;
|
|
|
- if (baseType.getBasicType() != EbtBlock)
|
|
|
+ if (strictArraySuffix && blockParent)
|
|
|
+ newBaseName.append(TString("[0]"));
|
|
|
+ else if (strictArraySuffix || baseType.getBasicType() != EbtBlock)
|
|
|
newBaseName.append(TString("[") + String(i) + "]");
|
|
|
TList<TIntermBinary*>::const_iterator nextDeref = deref;
|
|
|
++nextDeref;
|
|
|
- TType derefType(*terminalType, 0);
|
|
|
- blowUpActiveAggregate(derefType, newBaseName, derefs, nextDeref, offset, blockIndex, arraySize);
|
|
|
+ blowUpActiveAggregate(*terminalType, newBaseName, derefs, nextDeref, offset, blockIndex, arraySize,
|
|
|
+ topLevelArrayStride, baseStorage, active);
|
|
|
+
|
|
|
+ if (offset >= 0)
|
|
|
+ offset += stride;
|
|
|
}
|
|
|
|
|
|
// it was all completed in the recursive calls above
|
|
|
return;
|
|
|
- case EOpIndexDirect:
|
|
|
+ }
|
|
|
+ case EOpIndexDirect: {
|
|
|
+ int stride = getArrayStride(baseType, visitNode->getLeft()->getType());
|
|
|
+
|
|
|
index = visitNode->getRight()->getAsConstantUnion()->getConstArray()[0].getIConst();
|
|
|
- if (baseType.getBasicType() != EbtBlock)
|
|
|
+ if (strictArraySuffix && blockParent) {
|
|
|
+ name.append(TString("[0]"));
|
|
|
+ } else if (strictArraySuffix || baseType.getBasicType() != EbtBlock) {
|
|
|
name.append(TString("[") + String(index) + "]");
|
|
|
+
|
|
|
+ if (offset >= 0)
|
|
|
+ offset += stride * index;
|
|
|
+ }
|
|
|
+
|
|
|
+ if (topLevelArrayStride == 0)
|
|
|
+ topLevelArrayStride = stride;
|
|
|
+
|
|
|
+ blockParent = false;
|
|
|
break;
|
|
|
+ }
|
|
|
case EOpIndexDirectStruct:
|
|
|
index = visitNode->getRight()->getAsConstantUnion()->getConstArray()[0].getIConst();
|
|
|
if (offset >= 0)
|
|
|
- offset += getOffset(visitNode->getLeft()->getType(), index);
|
|
|
+ offset += intermediate.getOffset(visitNode->getLeft()->getType(), index);
|
|
|
if (name.size() > 0)
|
|
|
name.append(".");
|
|
|
name.append((*visitNode->getLeft()->getType().getStruct())[index].type->getFieldName());
|
|
@@ -213,24 +305,65 @@ public:
|
|
|
|
|
|
// if the terminalType is still too coarse a granularity, this is still an aggregate to expand, expand it...
|
|
|
if (! isReflectionGranularity(*terminalType)) {
|
|
|
+ // the base offset of this node, that children are relative to
|
|
|
+ int baseOffset = offset;
|
|
|
+
|
|
|
if (terminalType->isArray()) {
|
|
|
// Visit all the indices of this array, and for each one,
|
|
|
// fully explode the remaining aggregate to dereference
|
|
|
- for (int i = 0; i < std::max(terminalType->getOuterArraySize(), 1); ++i) {
|
|
|
+
|
|
|
+ int stride = 0;
|
|
|
+ if (offset >= 0)
|
|
|
+ stride = getArrayStride(baseType, *terminalType);
|
|
|
+
|
|
|
+ if (topLevelArrayStride == 0)
|
|
|
+ topLevelArrayStride = stride;
|
|
|
+
|
|
|
+ int arrayIterateSize = std::max(terminalType->getOuterArraySize(), 1);
|
|
|
+
|
|
|
+ // for top-level arrays in blocks, only expand [0] to avoid explosion of items
|
|
|
+ if (strictArraySuffix && blockParent)
|
|
|
+ arrayIterateSize = 1;
|
|
|
+
|
|
|
+ for (int i = 0; i < arrayIterateSize; ++i) {
|
|
|
TString newBaseName = name;
|
|
|
newBaseName.append(TString("[") + String(i) + "]");
|
|
|
TType derefType(*terminalType, 0);
|
|
|
- blowUpActiveAggregate(derefType, newBaseName, derefs, derefs.end(), offset, blockIndex, 0);
|
|
|
+ if (offset >= 0)
|
|
|
+ offset = baseOffset + stride * i;
|
|
|
+
|
|
|
+ blowUpActiveAggregate(derefType, newBaseName, derefs, derefs.end(), offset, blockIndex, 0,
|
|
|
+ topLevelArrayStride, baseStorage, active);
|
|
|
}
|
|
|
} else {
|
|
|
// Visit all members of this aggregate, and for each one,
|
|
|
// fully explode the remaining aggregate to dereference
|
|
|
const TTypeList& typeList = *terminalType->getStruct();
|
|
|
+
|
|
|
+ TVector<int> memberOffsets;
|
|
|
+
|
|
|
+ if (baseOffset >= 0) {
|
|
|
+ memberOffsets.resize(typeList.size());
|
|
|
+ getOffsets(*terminalType, memberOffsets);
|
|
|
+ }
|
|
|
+
|
|
|
for (int i = 0; i < (int)typeList.size(); ++i) {
|
|
|
TString newBaseName = name;
|
|
|
- newBaseName.append(TString(".") + typeList[i].type->getFieldName());
|
|
|
+ if (newBaseName.size() > 0)
|
|
|
+ newBaseName.append(".");
|
|
|
+ newBaseName.append(typeList[i].type->getFieldName());
|
|
|
TType derefType(*terminalType, i);
|
|
|
- blowUpActiveAggregate(derefType, newBaseName, derefs, derefs.end(), offset, blockIndex, 0);
|
|
|
+ if (offset >= 0)
|
|
|
+ offset = baseOffset + memberOffsets[i];
|
|
|
+
|
|
|
+ int arrayStride = topLevelArrayStride;
|
|
|
+ if (terminalType->getBasicType() == EbtBlock && terminalType->getQualifier().storage == EvqBuffer &&
|
|
|
+ derefType.isArray()) {
|
|
|
+ arrayStride = getArrayStride(baseType, derefType);
|
|
|
+ }
|
|
|
+
|
|
|
+ blowUpActiveAggregate(derefType, newBaseName, derefs, derefs.end(), offset, blockIndex, 0,
|
|
|
+ arrayStride, baseStorage, active);
|
|
|
}
|
|
|
}
|
|
|
|
|
@@ -238,6 +371,10 @@ public:
|
|
|
return;
|
|
|
}
|
|
|
|
|
|
+ if ((reflection.options & EShReflectionBasicArraySuffix) && terminalType->isArray()) {
|
|
|
+ name.append(TString("[0]"));
|
|
|
+ }
|
|
|
+
|
|
|
// Finally, add a full string to the reflection database, and update the array size if necessary.
|
|
|
// If the dereferenced entity to record is an array, compute the size and update the maximum size.
|
|
|
|
|
@@ -245,15 +382,100 @@ public:
|
|
|
if (arraySize == 0)
|
|
|
arraySize = mapToGlArraySize(*terminalType);
|
|
|
|
|
|
- TReflection::TNameToIndex::const_iterator it = reflection.nameToIndex.find(name);
|
|
|
+ TReflection::TMapIndexToReflection& variables = reflection.GetVariableMapForStorage(baseStorage);
|
|
|
+
|
|
|
+ TReflection::TNameToIndex::const_iterator it = reflection.nameToIndex.find(name.c_str());
|
|
|
+ if (it == reflection.nameToIndex.end()) {
|
|
|
+ int uniformIndex = (int)variables.size();
|
|
|
+ reflection.nameToIndex[name.c_str()] = uniformIndex;
|
|
|
+ variables.push_back(TObjectReflection(name.c_str(), *terminalType, offset, mapToGlType(*terminalType),
|
|
|
+ arraySize, blockIndex));
|
|
|
+ if (terminalType->isArray()) {
|
|
|
+ variables.back().arrayStride = getArrayStride(baseType, *terminalType);
|
|
|
+ if (topLevelArrayStride == 0)
|
|
|
+ topLevelArrayStride = variables.back().arrayStride;
|
|
|
+ }
|
|
|
+
|
|
|
+ if ((reflection.options & EShReflectionSeparateBuffers) && terminalType->getBasicType() == EbtAtomicUint)
|
|
|
+ reflection.atomicCounterUniformIndices.push_back(uniformIndex);
|
|
|
+
|
|
|
+ variables.back().topLevelArrayStride = topLevelArrayStride;
|
|
|
+
|
|
|
+ if ((reflection.options & EShReflectionAllBlockVariables) && active) {
|
|
|
+ EShLanguageMask& stages = variables.back().stages;
|
|
|
+ stages = static_cast<EShLanguageMask>(stages | 1 << intermediate.getStage());
|
|
|
+ }
|
|
|
+ } else {
|
|
|
+ if (arraySize > 1) {
|
|
|
+ int& reflectedArraySize = variables[it->second].size;
|
|
|
+ reflectedArraySize = std::max(arraySize, reflectedArraySize);
|
|
|
+ }
|
|
|
+
|
|
|
+ if ((reflection.options & EShReflectionAllBlockVariables) && active) {
|
|
|
+ EShLanguageMask& stages = variables[it->second].stages;
|
|
|
+ stages = static_cast<EShLanguageMask>(stages | 1 << intermediate.getStage());
|
|
|
+ }
|
|
|
+ }
|
|
|
+ }
|
|
|
+
|
|
|
+ // similar to blowUpActiveAggregate, but with simpler rules and no dereferences to follow.
|
|
|
+ void blowUpIOAggregate(bool input, const TString &baseName, const TType &type)
|
|
|
+ {
|
|
|
+ TString name = baseName;
|
|
|
+
|
|
|
+ // if the type is still too coarse a granularity, this is still an aggregate to expand, expand it...
|
|
|
+ if (! isReflectionGranularity(type)) {
|
|
|
+ if (type.isArray()) {
|
|
|
+ // Visit all the indices of this array, and for each one,
|
|
|
+ // fully explode the remaining aggregate to dereference
|
|
|
+ for (int i = 0; i < std::max(type.getOuterArraySize(), 1); ++i) {
|
|
|
+ TString newBaseName = name;
|
|
|
+ newBaseName.append(TString("[") + String(i) + "]");
|
|
|
+ TType derefType(type, 0);
|
|
|
+
|
|
|
+ blowUpIOAggregate(input, newBaseName, derefType);
|
|
|
+ }
|
|
|
+ } else {
|
|
|
+ // Visit all members of this aggregate, and for each one,
|
|
|
+ // fully explode the remaining aggregate to dereference
|
|
|
+ const TTypeList& typeList = *type.getStruct();
|
|
|
+
|
|
|
+ for (int i = 0; i < (int)typeList.size(); ++i) {
|
|
|
+ TString newBaseName = name;
|
|
|
+ if (newBaseName.size() > 0)
|
|
|
+ newBaseName.append(".");
|
|
|
+ newBaseName.append(typeList[i].type->getFieldName());
|
|
|
+ TType derefType(type, i);
|
|
|
+
|
|
|
+ blowUpIOAggregate(input, newBaseName, derefType);
|
|
|
+ }
|
|
|
+ }
|
|
|
+
|
|
|
+ // it was all completed in the recursive calls above
|
|
|
+ return;
|
|
|
+ }
|
|
|
+
|
|
|
+ if ((reflection.options & EShReflectionBasicArraySuffix) && type.isArray()) {
|
|
|
+ name.append(TString("[0]"));
|
|
|
+ }
|
|
|
+
|
|
|
+ TReflection::TMapIndexToReflection &ioItems =
|
|
|
+ input ? reflection.indexToPipeInput : reflection.indexToPipeOutput;
|
|
|
+
|
|
|
+ std::string namespacedName = input ? "in " : "out ";
|
|
|
+ namespacedName += name.c_str();
|
|
|
+
|
|
|
+ TReflection::TNameToIndex::const_iterator it = reflection.nameToIndex.find(namespacedName);
|
|
|
if (it == reflection.nameToIndex.end()) {
|
|
|
- reflection.nameToIndex[name] = (int)reflection.indexToUniform.size();
|
|
|
- reflection.indexToUniform.push_back(TObjectReflection(name, *terminalType, offset,
|
|
|
- mapToGlType(*terminalType),
|
|
|
- arraySize, blockIndex));
|
|
|
- } else if (arraySize > 1) {
|
|
|
- int& reflectedArraySize = reflection.indexToUniform[it->second].size;
|
|
|
- reflectedArraySize = std::max(arraySize, reflectedArraySize);
|
|
|
+ reflection.nameToIndex[namespacedName] = (int)ioItems.size();
|
|
|
+ ioItems.push_back(
|
|
|
+ TObjectReflection(name.c_str(), type, 0, mapToGlType(type), mapToGlArraySize(type), 0));
|
|
|
+
|
|
|
+ EShLanguageMask& stages = ioItems.back().stages;
|
|
|
+ stages = static_cast<EShLanguageMask>(stages | 1 << intermediate.getStage());
|
|
|
+ } else {
|
|
|
+ EShLanguageMask& stages = ioItems[it->second].stages;
|
|
|
+ stages = static_cast<EShLanguageMask>(stages | 1 << intermediate.getStage());
|
|
|
}
|
|
|
}
|
|
|
|
|
@@ -303,6 +525,10 @@ public:
|
|
|
anonymous = IsAnonymous(base->getName());
|
|
|
|
|
|
const TString& blockName = base->getType().getTypeName();
|
|
|
+ TString baseName;
|
|
|
+
|
|
|
+ if (! anonymous)
|
|
|
+ baseName = blockName;
|
|
|
|
|
|
if (base->getType().isArray()) {
|
|
|
TType derefType(base->getType(), 0);
|
|
@@ -310,9 +536,57 @@ public:
|
|
|
assert(! anonymous);
|
|
|
for (int e = 0; e < base->getType().getCumulativeArraySize(); ++e)
|
|
|
blockIndex = addBlockName(blockName + "[" + String(e) + "]", derefType,
|
|
|
- getBlockSize(base->getType()));
|
|
|
+ intermediate.getBlockSize(base->getType()));
|
|
|
+ baseName.append(TString("[0]"));
|
|
|
} else
|
|
|
- blockIndex = addBlockName(blockName, base->getType(), getBlockSize(base->getType()));
|
|
|
+ blockIndex = addBlockName(blockName, base->getType(), intermediate.getBlockSize(base->getType()));
|
|
|
+
|
|
|
+ if (reflection.options & EShReflectionAllBlockVariables) {
|
|
|
+ // Use a degenerate (empty) set of dereferences to immediately put as at the end of
|
|
|
+ // the dereference change expected by blowUpActiveAggregate.
|
|
|
+ TList<TIntermBinary*> derefs;
|
|
|
+
|
|
|
+ // because we don't have any derefs, the first thing blowUpActiveAggregate will do is iterate over each
|
|
|
+ // member in the struct definition. This will lose any information about whether the parent was a buffer
|
|
|
+ // block. So if we're using strict array rules which don't expand the first child of a buffer block we
|
|
|
+ // instead iterate over the children here.
|
|
|
+ const bool strictArraySuffix = (reflection.options & EShReflectionStrictArraySuffix);
|
|
|
+ bool blockParent = (base->getType().getBasicType() == EbtBlock && base->getQualifier().storage == EvqBuffer);
|
|
|
+
|
|
|
+ if (strictArraySuffix && blockParent) {
|
|
|
+ const TTypeList& typeList = *base->getType().getStruct();
|
|
|
+
|
|
|
+ TVector<int> memberOffsets;
|
|
|
+
|
|
|
+ memberOffsets.resize(typeList.size());
|
|
|
+ getOffsets(base->getType(), memberOffsets);
|
|
|
+
|
|
|
+ for (int i = 0; i < (int)typeList.size(); ++i) {
|
|
|
+ TType derefType(base->getType(), i);
|
|
|
+ TString name = baseName;
|
|
|
+ if (name.size() > 0)
|
|
|
+ name.append(".");
|
|
|
+ name.append(typeList[i].type->getFieldName());
|
|
|
+
|
|
|
+ // if this member is an array, store the top-level array stride but start the explosion from
|
|
|
+ // the inner struct type.
|
|
|
+ if (derefType.isArray() && derefType.isStruct()) {
|
|
|
+ name.append("[0]");
|
|
|
+ blowUpActiveAggregate(TType(derefType, 0), name, derefs, derefs.end(), memberOffsets[i],
|
|
|
+ blockIndex, 0, getArrayStride(base->getType(), derefType),
|
|
|
+ base->getQualifier().storage, false);
|
|
|
+ } else {
|
|
|
+ blowUpActiveAggregate(derefType, name, derefs, derefs.end(), memberOffsets[i], blockIndex,
|
|
|
+ 0, 0, base->getQualifier().storage, false);
|
|
|
+ }
|
|
|
+ }
|
|
|
+ } else {
|
|
|
+ // otherwise - if we're not using strict array suffix rules, or this isn't a block so we are
|
|
|
+ // expanding root arrays anyway, just start the iteration from the base block type.
|
|
|
+ blowUpActiveAggregate(base->getType(), baseName, derefs, derefs.end(), 0, blockIndex, 0, 0,
|
|
|
+ base->getQualifier().storage, false);
|
|
|
+ }
|
|
|
+ }
|
|
|
}
|
|
|
|
|
|
// Process the dereference chain, backward, accumulating the pieces for later forward traversal.
|
|
@@ -342,27 +616,39 @@ public:
|
|
|
else
|
|
|
baseName = base->getName();
|
|
|
}
|
|
|
- blowUpActiveAggregate(base->getType(), baseName, derefs, derefs.begin(), offset, blockIndex, arraySize);
|
|
|
+ blowUpActiveAggregate(base->getType(), baseName, derefs, derefs.begin(), offset, blockIndex, arraySize, 0,
|
|
|
+ base->getQualifier().storage, true);
|
|
|
}
|
|
|
|
|
|
int addBlockName(const TString& name, const TType& type, int size)
|
|
|
{
|
|
|
+ TReflection::TMapIndexToReflection& blocks = reflection.GetBlockMapForStorage(type.getQualifier().storage);
|
|
|
+
|
|
|
int blockIndex;
|
|
|
- TReflection::TNameToIndex::const_iterator it = reflection.nameToIndex.find(name);
|
|
|
- if (reflection.nameToIndex.find(name) == reflection.nameToIndex.end()) {
|
|
|
- blockIndex = (int)reflection.indexToUniformBlock.size();
|
|
|
- reflection.nameToIndex[name] = blockIndex;
|
|
|
- reflection.indexToUniformBlock.push_back(TObjectReflection(name, type, -1, -1, size, -1));
|
|
|
- } else
|
|
|
+ TReflection::TNameToIndex::const_iterator it = reflection.nameToIndex.find(name.c_str());
|
|
|
+ if (reflection.nameToIndex.find(name.c_str()) == reflection.nameToIndex.end()) {
|
|
|
+ blockIndex = (int)blocks.size();
|
|
|
+ reflection.nameToIndex[name.c_str()] = blockIndex;
|
|
|
+ blocks.push_back(TObjectReflection(name.c_str(), type, -1, -1, size, -1));
|
|
|
+
|
|
|
+ blocks.back().numMembers = countAggregateMembers(type);
|
|
|
+
|
|
|
+ EShLanguageMask& stages = blocks.back().stages;
|
|
|
+ stages = static_cast<EShLanguageMask>(stages | 1 << intermediate.getStage());
|
|
|
+ } else {
|
|
|
blockIndex = it->second;
|
|
|
|
|
|
+ EShLanguageMask& stages = blocks[blockIndex].stages;
|
|
|
+ stages = static_cast<EShLanguageMask>(stages | 1 << intermediate.getStage());
|
|
|
+ }
|
|
|
+
|
|
|
return blockIndex;
|
|
|
}
|
|
|
|
|
|
// Are we at a level in a dereference chain at which individual active uniform queries are made?
|
|
|
bool isReflectionGranularity(const TType& type)
|
|
|
{
|
|
|
- return type.getBasicType() != EbtBlock && type.getBasicType() != EbtStruct;
|
|
|
+ return type.getBasicType() != EbtBlock && type.getBasicType() != EbtStruct && !type.isArrayOfArrays();
|
|
|
}
|
|
|
|
|
|
// For a binary operation indexing into an aggregate, chase down the base of the aggregate.
|
|
@@ -746,8 +1032,47 @@ void TReflectionTraverser::visitSymbol(TIntermSymbol* base)
|
|
|
if (base->getQualifier().storage == EvqUniform)
|
|
|
addUniform(*base);
|
|
|
|
|
|
- if (intermediate.getStage() == EShLangVertex && base->getQualifier().isPipeInput())
|
|
|
- addAttribute(*base);
|
|
|
+ if ((intermediate.getStage() == reflection.firstStage && base->getQualifier().isPipeInput()) ||
|
|
|
+ (intermediate.getStage() == reflection.lastStage && base->getQualifier().isPipeOutput()))
|
|
|
+ addPipeIOVariable(*base);
|
|
|
+}
|
|
|
+
|
|
|
+//
|
|
|
+// Implement TObjectReflection methods.
|
|
|
+//
|
|
|
+
|
|
|
+TObjectReflection::TObjectReflection(const std::string &pName, const TType &pType, int pOffset, int pGLDefineType,
|
|
|
+ int pSize, int pIndex)
|
|
|
+ : name(pName), offset(pOffset), glDefineType(pGLDefineType), size(pSize), index(pIndex), counterIndex(-1),
|
|
|
+ numMembers(-1), arrayStride(0), topLevelArrayStride(0), stages(EShLanguageMask(0)), type(pType.clone())
|
|
|
+{
|
|
|
+}
|
|
|
+
|
|
|
+int TObjectReflection::getBinding() const
|
|
|
+{
|
|
|
+ if (type == nullptr || !type->getQualifier().hasBinding())
|
|
|
+ return -1;
|
|
|
+ return type->getQualifier().layoutBinding;
|
|
|
+}
|
|
|
+
|
|
|
+void TObjectReflection::dump() const
|
|
|
+{
|
|
|
+ printf("%s: offset %d, type %x, size %d, index %d, binding %d, stages %d", name.c_str(), offset, glDefineType, size,
|
|
|
+ index, getBinding(), stages);
|
|
|
+
|
|
|
+ if (counterIndex != -1)
|
|
|
+ printf(", counter %d", counterIndex);
|
|
|
+
|
|
|
+ if (numMembers != -1)
|
|
|
+ printf(", numMembers %d", numMembers);
|
|
|
+
|
|
|
+ if (arrayStride != 0)
|
|
|
+ printf(", arrayStride %d", arrayStride);
|
|
|
+
|
|
|
+ if (topLevelArrayStride != 0)
|
|
|
+ printf(", topLevelArrayStride %d", topLevelArrayStride);
|
|
|
+
|
|
|
+ printf("\n");
|
|
|
}
|
|
|
|
|
|
//
|
|
@@ -770,7 +1095,7 @@ void TReflection::buildCounterIndices(const TIntermediate& intermediate)
|
|
|
{
|
|
|
// search for ones that have counters
|
|
|
for (int i = 0; i < int(indexToUniformBlock.size()); ++i) {
|
|
|
- const TString counterName(intermediate.addCounterBufferName(indexToUniformBlock[i].name));
|
|
|
+ const TString counterName(intermediate.addCounterBufferName(indexToUniformBlock[i].name).c_str());
|
|
|
const int index = getIndex(counterName);
|
|
|
|
|
|
if (index >= 0)
|
|
@@ -781,9 +1106,17 @@ void TReflection::buildCounterIndices(const TIntermediate& intermediate)
|
|
|
// build Shader Stages mask for all uniforms
|
|
|
void TReflection::buildUniformStageMask(const TIntermediate& intermediate)
|
|
|
{
|
|
|
+ if (options & EShReflectionAllBlockVariables)
|
|
|
+ return;
|
|
|
+
|
|
|
for (int i = 0; i < int(indexToUniform.size()); ++i) {
|
|
|
indexToUniform[i].stages = static_cast<EShLanguageMask>(indexToUniform[i].stages | 1 << intermediate.getStage());
|
|
|
}
|
|
|
+
|
|
|
+ for (int i = 0; i < int(indexToBufferVariable.size()); ++i) {
|
|
|
+ indexToBufferVariable[i].stages =
|
|
|
+ static_cast<EShLanguageMask>(indexToBufferVariable[i].stages | 1 << intermediate.getStage());
|
|
|
+ }
|
|
|
}
|
|
|
|
|
|
// Merge live symbols from 'intermediate' into the existing reflection database.
|
|
@@ -828,9 +1161,24 @@ void TReflection::dump()
|
|
|
indexToUniformBlock[i].dump();
|
|
|
printf("\n");
|
|
|
|
|
|
- printf("Vertex attribute reflection:\n");
|
|
|
- for (size_t i = 0; i < indexToAttribute.size(); ++i)
|
|
|
- indexToAttribute[i].dump();
|
|
|
+ printf("Buffer variable reflection:\n");
|
|
|
+ for (size_t i = 0; i < indexToBufferVariable.size(); ++i)
|
|
|
+ indexToBufferVariable[i].dump();
|
|
|
+ printf("\n");
|
|
|
+
|
|
|
+ printf("Buffer block reflection:\n");
|
|
|
+ for (size_t i = 0; i < indexToBufferBlock.size(); ++i)
|
|
|
+ indexToBufferBlock[i].dump();
|
|
|
+ printf("\n");
|
|
|
+
|
|
|
+ printf("Pipeline input reflection:\n");
|
|
|
+ for (size_t i = 0; i < indexToPipeInput.size(); ++i)
|
|
|
+ indexToPipeInput[i].dump();
|
|
|
+ printf("\n");
|
|
|
+
|
|
|
+ printf("Pipeline output reflection:\n");
|
|
|
+ for (size_t i = 0; i < indexToPipeOutput.size(); ++i)
|
|
|
+ indexToPipeOutput[i].dump();
|
|
|
printf("\n");
|
|
|
|
|
|
if (getLocalSize(0) > 1) {
|