reflection.cpp 55 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274
  1. //
  2. // Copyright (C) 2013-2016 LunarG, Inc.
  3. //
  4. // All rights reserved.
  5. //
  6. // Redistribution and use in source and binary forms, with or without
  7. // modification, are permitted provided that the following conditions
  8. // are met:
  9. //
  10. // Redistributions of source code must retain the above copyright
  11. // notice, this list of conditions and the following disclaimer.
  12. //
  13. // Redistributions in binary form must reproduce the above
  14. // copyright notice, this list of conditions and the following
  15. // disclaimer in the documentation and/or other materials provided
  16. // with the distribution.
  17. //
  18. // Neither the name of 3Dlabs Inc. Ltd. nor the names of its
  19. // contributors may be used to endorse or promote products derived
  20. // from this software without specific prior written permission.
  21. //
  22. // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
  23. // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
  24. // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
  25. // FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
  26. // COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
  27. // INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
  28. // BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
  29. // LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
  30. // CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
  31. // LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
  32. // ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
  33. // POSSIBILITY OF SUCH DAMAGE.
  34. //
  35. #if !defined(GLSLANG_WEB)
  36. #include "../Include/Common.h"
  37. #include "reflection.h"
  38. #include "LiveTraverser.h"
  39. #include "localintermediate.h"
  40. #include "gl_types.h"
  41. //
  42. // Grow the reflection database through a friend traverser class of TReflection and a
  43. // collection of functions to do a liveness traversal that note what uniforms are used
  44. // in semantically non-dead code.
  45. //
  46. // Can be used multiple times, once per stage, to grow a program reflection.
  47. //
  48. // High-level algorithm for one stage:
  49. //
  50. // 1. Put the entry point on the list of live functions.
  51. //
  52. // 2. Traverse any live function, while skipping if-tests with a compile-time constant
  53. // condition of false, and while adding any encountered function calls to the live
  54. // function list.
  55. //
  56. // Repeat until the live function list is empty.
  57. //
  58. // 3. Add any encountered uniform variables and blocks to the reflection database.
  59. //
  60. // Can be attempted with a failed link, but will return false if recursion had been detected, or
  61. // there wasn't exactly one entry point.
  62. //
  63. namespace glslang {
  64. //
  65. // The traverser: mostly pass through, except
  66. // - processing binary nodes to see if they are dereferences of an aggregates to track
  67. // - processing symbol nodes to see if they are non-aggregate objects to track
  68. //
  69. // This ignores semantically dead code by using TLiveTraverser.
  70. //
  71. // This is in the glslang namespace directly so it can be a friend of TReflection.
  72. //
  73. class TReflectionTraverser : public TIntermTraverser {
  74. public:
  75. TReflectionTraverser(const TIntermediate& i, TReflection& r) :
  76. TIntermTraverser(), intermediate(i), reflection(r), updateStageMasks(true) { }
  77. virtual bool visitBinary(TVisit, TIntermBinary* node);
  78. virtual void visitSymbol(TIntermSymbol* base);
  79. // Add a simple reference to a uniform variable to the uniform database, no dereference involved.
  80. // However, no dereference doesn't mean simple... it could be a complex aggregate.
  81. void addUniform(const TIntermSymbol& base)
  82. {
  83. if (processedDerefs.find(&base) == processedDerefs.end()) {
  84. processedDerefs.insert(&base);
  85. int blockIndex = -1;
  86. int offset = -1;
  87. TList<TIntermBinary*> derefs;
  88. TString baseName = base.getName();
  89. if (base.getType().getBasicType() == EbtBlock) {
  90. offset = 0;
  91. bool anonymous = IsAnonymous(baseName);
  92. const TString& blockName = base.getType().getTypeName();
  93. if (!anonymous)
  94. baseName = blockName;
  95. else
  96. baseName = "";
  97. blockIndex = addBlockName(blockName, base.getType(), intermediate.getBlockSize(base.getType()));
  98. }
  99. // Use a degenerate (empty) set of dereferences to immediately put as at the end of
  100. // the dereference change expected by blowUpActiveAggregate.
  101. blowUpActiveAggregate(base.getType(), baseName, derefs, derefs.end(), offset, blockIndex, 0, -1, 0,
  102. base.getQualifier().storage, updateStageMasks);
  103. }
  104. }
  105. void addPipeIOVariable(const TIntermSymbol& base)
  106. {
  107. if (processedDerefs.find(&base) == processedDerefs.end()) {
  108. processedDerefs.insert(&base);
  109. const TString &name = base.getName();
  110. const TType &type = base.getType();
  111. const bool input = base.getQualifier().isPipeInput();
  112. TReflection::TMapIndexToReflection &ioItems =
  113. input ? reflection.indexToPipeInput : reflection.indexToPipeOutput;
  114. TReflection::TNameToIndex &ioMapper =
  115. input ? reflection.pipeInNameToIndex : reflection.pipeOutNameToIndex;
  116. if (reflection.options & EShReflectionUnwrapIOBlocks) {
  117. bool anonymous = IsAnonymous(name);
  118. TString baseName;
  119. if (type.getBasicType() == EbtBlock) {
  120. baseName = anonymous ? TString() : type.getTypeName();
  121. } else {
  122. baseName = anonymous ? TString() : name;
  123. }
  124. // by convention if this is an arrayed block we ignore the array in the reflection
  125. if (type.isArray() && type.getBasicType() == EbtBlock) {
  126. blowUpIOAggregate(input, baseName, TType(type, 0));
  127. } else {
  128. blowUpIOAggregate(input, baseName, type);
  129. }
  130. } else {
  131. TReflection::TNameToIndex::const_iterator it = ioMapper.find(name.c_str());
  132. if (it == ioMapper.end()) {
  133. // seperate pipe i/o params from uniforms and blocks
  134. // in is only for input in first stage as out is only for last stage. check traverse in call stack.
  135. ioMapper[name.c_str()] = static_cast<int>(ioItems.size());
  136. ioItems.push_back(
  137. TObjectReflection(name.c_str(), type, 0, mapToGlType(type), mapToGlArraySize(type), 0));
  138. EShLanguageMask& stages = ioItems.back().stages;
  139. stages = static_cast<EShLanguageMask>(stages | 1 << intermediate.getStage());
  140. } else {
  141. EShLanguageMask& stages = ioItems[it->second].stages;
  142. stages = static_cast<EShLanguageMask>(stages | 1 << intermediate.getStage());
  143. }
  144. }
  145. }
  146. }
  147. // Lookup or calculate the offset of all block members at once, using the recursively
  148. // defined block offset rules.
  149. void getOffsets(const TType& type, TVector<int>& offsets)
  150. {
  151. const TTypeList& memberList = *type.getStruct();
  152. int memberSize = 0;
  153. int offset = 0;
  154. for (size_t m = 0; m < offsets.size(); ++m) {
  155. // if the user supplied an offset, snap to it now
  156. if (memberList[m].type->getQualifier().hasOffset())
  157. offset = memberList[m].type->getQualifier().layoutOffset;
  158. // calculate the offset of the next member and align the current offset to this member
  159. intermediate.updateOffset(type, *memberList[m].type, offset, memberSize);
  160. // save the offset of this member
  161. offsets[m] = offset;
  162. // update for the next member
  163. offset += memberSize;
  164. }
  165. }
  166. // Calculate the stride of an array type
  167. int getArrayStride(const TType& baseType, const TType& type)
  168. {
  169. int dummySize;
  170. int stride;
  171. // consider blocks to have 0 stride, so that all offsets are relative to the start of their block
  172. if (type.getBasicType() == EbtBlock)
  173. return 0;
  174. TLayoutMatrix subMatrixLayout = type.getQualifier().layoutMatrix;
  175. intermediate.getMemberAlignment(type, dummySize, stride,
  176. baseType.getQualifier().layoutPacking,
  177. subMatrixLayout != ElmNone
  178. ? subMatrixLayout == ElmRowMajor
  179. : baseType.getQualifier().layoutMatrix == ElmRowMajor);
  180. return stride;
  181. }
  182. // count the total number of leaf members from iterating out of a block type
  183. int countAggregateMembers(const TType& parentType)
  184. {
  185. if (! parentType.isStruct())
  186. return 1;
  187. const bool strictArraySuffix = (reflection.options & EShReflectionStrictArraySuffix);
  188. bool blockParent = (parentType.getBasicType() == EbtBlock && parentType.getQualifier().storage == EvqBuffer);
  189. const TTypeList &memberList = *parentType.getStruct();
  190. int ret = 0;
  191. for (size_t i = 0; i < memberList.size(); i++)
  192. {
  193. const TType &memberType = *memberList[i].type;
  194. int numMembers = countAggregateMembers(memberType);
  195. // for sized arrays of structs, apply logic to expand out the same as we would below in
  196. // blowUpActiveAggregate
  197. if (memberType.isArray() && ! memberType.getArraySizes()->hasUnsized() && memberType.isStruct()) {
  198. if (! strictArraySuffix || ! blockParent)
  199. numMembers *= memberType.getArraySizes()->getCumulativeSize();
  200. }
  201. ret += numMembers;
  202. }
  203. return ret;
  204. }
  205. // Traverse the provided deref chain, including the base, and
  206. // - build a full reflection-granularity name, array size, etc. entry out of it, if it goes down to that granularity
  207. // - recursively expand any variable array index in the middle of that traversal
  208. // - recursively expand what's left at the end if the deref chain did not reach down to reflection granularity
  209. //
  210. // arraySize tracks, just for the final dereference in the chain, if there was a specific known size.
  211. // A value of 0 for arraySize will mean to use the full array's size.
  212. void blowUpActiveAggregate(const TType& baseType, const TString& baseName, const TList<TIntermBinary*>& derefs,
  213. TList<TIntermBinary*>::const_iterator deref, int offset, int blockIndex, int arraySize,
  214. int topLevelArraySize, int topLevelArrayStride, TStorageQualifier baseStorage, bool active)
  215. {
  216. // when strictArraySuffix is enabled, we closely follow the rules from ARB_program_interface_query.
  217. // Broadly:
  218. // * arrays-of-structs always have a [x] suffix.
  219. // * with array-of-struct variables in the root of a buffer block, only ever return [0].
  220. // * otherwise, array suffixes are added whenever we iterate, even if that means expanding out an array.
  221. const bool strictArraySuffix = (reflection.options & EShReflectionStrictArraySuffix);
  222. // is this variable inside a buffer block. This flag is set back to false after we iterate inside the first array element.
  223. bool blockParent = (baseType.getBasicType() == EbtBlock && baseType.getQualifier().storage == EvqBuffer);
  224. // process the part of the dereference chain that was explicit in the shader
  225. TString name = baseName;
  226. const TType* terminalType = &baseType;
  227. for (; deref != derefs.end(); ++deref) {
  228. TIntermBinary* visitNode = *deref;
  229. terminalType = &visitNode->getType();
  230. int index;
  231. switch (visitNode->getOp()) {
  232. case EOpIndexIndirect: {
  233. int stride = getArrayStride(baseType, visitNode->getLeft()->getType());
  234. if (topLevelArrayStride == 0)
  235. topLevelArrayStride = stride;
  236. // Visit all the indices of this array, and for each one add on the remaining dereferencing
  237. for (int i = 0; i < std::max(visitNode->getLeft()->getType().getOuterArraySize(), 1); ++i) {
  238. TString newBaseName = name;
  239. if (terminalType->getBasicType() == EbtBlock) {}
  240. else if (strictArraySuffix && blockParent)
  241. newBaseName.append(TString("[0]"));
  242. else if (strictArraySuffix || baseType.getBasicType() != EbtBlock)
  243. newBaseName.append(TString("[") + String(i) + "]");
  244. TList<TIntermBinary*>::const_iterator nextDeref = deref;
  245. ++nextDeref;
  246. blowUpActiveAggregate(*terminalType, newBaseName, derefs, nextDeref, offset, blockIndex, arraySize,
  247. topLevelArraySize, topLevelArrayStride, baseStorage, active);
  248. if (offset >= 0)
  249. offset += stride;
  250. }
  251. // it was all completed in the recursive calls above
  252. return;
  253. }
  254. case EOpIndexDirect: {
  255. int stride = getArrayStride(baseType, visitNode->getLeft()->getType());
  256. index = visitNode->getRight()->getAsConstantUnion()->getConstArray()[0].getIConst();
  257. if (terminalType->getBasicType() == EbtBlock) {}
  258. else if (strictArraySuffix && blockParent)
  259. name.append(TString("[0]"));
  260. else if (strictArraySuffix || baseType.getBasicType() != EbtBlock) {
  261. name.append(TString("[") + String(index) + "]");
  262. if (offset >= 0)
  263. offset += stride * index;
  264. }
  265. if (topLevelArrayStride == 0)
  266. topLevelArrayStride = stride;
  267. // expand top-level arrays in blocks with [0] suffix
  268. if (topLevelArrayStride != 0 && visitNode->getLeft()->getType().isArray()) {
  269. blockParent = false;
  270. }
  271. break;
  272. }
  273. case EOpIndexDirectStruct:
  274. index = visitNode->getRight()->getAsConstantUnion()->getConstArray()[0].getIConst();
  275. if (offset >= 0)
  276. offset += intermediate.getOffset(visitNode->getLeft()->getType(), index);
  277. if (name.size() > 0)
  278. name.append(".");
  279. name.append((*visitNode->getLeft()->getType().getStruct())[index].type->getFieldName());
  280. // expand non top-level arrays with [x] suffix
  281. if (visitNode->getLeft()->getType().getBasicType() != EbtBlock && terminalType->isArray())
  282. {
  283. blockParent = false;
  284. }
  285. break;
  286. default:
  287. break;
  288. }
  289. }
  290. // if the terminalType is still too coarse a granularity, this is still an aggregate to expand, expand it...
  291. if (! isReflectionGranularity(*terminalType)) {
  292. // the base offset of this node, that children are relative to
  293. int baseOffset = offset;
  294. if (terminalType->isArray()) {
  295. // Visit all the indices of this array, and for each one,
  296. // fully explode the remaining aggregate to dereference
  297. int stride = 0;
  298. if (offset >= 0)
  299. stride = getArrayStride(baseType, *terminalType);
  300. int arrayIterateSize = std::max(terminalType->getOuterArraySize(), 1);
  301. // for top-level arrays in blocks, only expand [0] to avoid explosion of items
  302. if ((strictArraySuffix && blockParent) ||
  303. ((topLevelArraySize == arrayIterateSize) && (topLevelArrayStride == 0))) {
  304. arrayIterateSize = 1;
  305. }
  306. if (topLevelArrayStride == 0)
  307. topLevelArrayStride = stride;
  308. for (int i = 0; i < arrayIterateSize; ++i) {
  309. TString newBaseName = name;
  310. if (terminalType->getBasicType() != EbtBlock)
  311. newBaseName.append(TString("[") + String(i) + "]");
  312. TType derefType(*terminalType, 0);
  313. if (offset >= 0)
  314. offset = baseOffset + stride * i;
  315. blowUpActiveAggregate(derefType, newBaseName, derefs, derefs.end(), offset, blockIndex, 0,
  316. topLevelArraySize, topLevelArrayStride, baseStorage, active);
  317. }
  318. } else {
  319. // Visit all members of this aggregate, and for each one,
  320. // fully explode the remaining aggregate to dereference
  321. const TTypeList& typeList = *terminalType->getStruct();
  322. TVector<int> memberOffsets;
  323. if (baseOffset >= 0) {
  324. memberOffsets.resize(typeList.size());
  325. getOffsets(*terminalType, memberOffsets);
  326. }
  327. for (int i = 0; i < (int)typeList.size(); ++i) {
  328. TString newBaseName = name;
  329. if (newBaseName.size() > 0)
  330. newBaseName.append(".");
  331. newBaseName.append(typeList[i].type->getFieldName());
  332. TType derefType(*terminalType, i);
  333. if (offset >= 0)
  334. offset = baseOffset + memberOffsets[i];
  335. int arrayStride = topLevelArrayStride;
  336. if (terminalType->getBasicType() == EbtBlock && terminalType->getQualifier().storage == EvqBuffer &&
  337. derefType.isArray()) {
  338. arrayStride = getArrayStride(baseType, derefType);
  339. }
  340. if (topLevelArraySize == -1 && arrayStride == 0 && blockParent)
  341. topLevelArraySize = 1;
  342. if (strictArraySuffix && blockParent) {
  343. // if this member is an array, store the top-level array stride but start the explosion from
  344. // the inner struct type.
  345. if (derefType.isArray() && derefType.isStruct()) {
  346. newBaseName.append("[0]");
  347. auto dimSize = derefType.isUnsizedArray() ? 0 : derefType.getArraySizes()->getDimSize(0);
  348. blowUpActiveAggregate(TType(derefType, 0), newBaseName, derefs, derefs.end(), memberOffsets[i],
  349. blockIndex, 0, dimSize, arrayStride, terminalType->getQualifier().storage, false);
  350. }
  351. else if (derefType.isArray()) {
  352. auto dimSize = derefType.isUnsizedArray() ? 0 : derefType.getArraySizes()->getDimSize(0);
  353. blowUpActiveAggregate(derefType, newBaseName, derefs, derefs.end(), memberOffsets[i], blockIndex,
  354. 0, dimSize, 0, terminalType->getQualifier().storage, false);
  355. }
  356. else {
  357. blowUpActiveAggregate(derefType, newBaseName, derefs, derefs.end(), memberOffsets[i], blockIndex,
  358. 0, 1, 0, terminalType->getQualifier().storage, false);
  359. }
  360. } else {
  361. blowUpActiveAggregate(derefType, newBaseName, derefs, derefs.end(), offset, blockIndex, 0,
  362. topLevelArraySize, arrayStride, baseStorage, active);
  363. }
  364. }
  365. }
  366. // it was all completed in the recursive calls above
  367. return;
  368. }
  369. if ((reflection.options & EShReflectionBasicArraySuffix) && terminalType->isArray()) {
  370. name.append(TString("[0]"));
  371. }
  372. // Finally, add a full string to the reflection database, and update the array size if necessary.
  373. // If the dereferenced entity to record is an array, compute the size and update the maximum size.
  374. // there might not be a final array dereference, it could have been copied as an array object
  375. if (arraySize == 0)
  376. arraySize = mapToGlArraySize(*terminalType);
  377. TReflection::TMapIndexToReflection& variables = reflection.GetVariableMapForStorage(baseStorage);
  378. TReflection::TNameToIndex::const_iterator it = reflection.nameToIndex.find(name.c_str());
  379. if (it == reflection.nameToIndex.end()) {
  380. int uniformIndex = (int)variables.size();
  381. reflection.nameToIndex[name.c_str()] = uniformIndex;
  382. variables.push_back(TObjectReflection(name.c_str(), *terminalType, offset, mapToGlType(*terminalType),
  383. arraySize, blockIndex));
  384. if (terminalType->isArray()) {
  385. variables.back().arrayStride = getArrayStride(baseType, *terminalType);
  386. if (topLevelArrayStride == 0)
  387. topLevelArrayStride = variables.back().arrayStride;
  388. }
  389. if ((reflection.options & EShReflectionSeparateBuffers) && terminalType->isAtomic())
  390. reflection.atomicCounterUniformIndices.push_back(uniformIndex);
  391. variables.back().topLevelArraySize = topLevelArraySize;
  392. variables.back().topLevelArrayStride = topLevelArrayStride;
  393. if ((reflection.options & EShReflectionAllBlockVariables) && active) {
  394. EShLanguageMask& stages = variables.back().stages;
  395. stages = static_cast<EShLanguageMask>(stages | 1 << intermediate.getStage());
  396. }
  397. } else {
  398. if (arraySize > 1) {
  399. int& reflectedArraySize = variables[it->second].size;
  400. reflectedArraySize = std::max(arraySize, reflectedArraySize);
  401. }
  402. if ((reflection.options & EShReflectionAllBlockVariables) && active) {
  403. EShLanguageMask& stages = variables[it->second].stages;
  404. stages = static_cast<EShLanguageMask>(stages | 1 << intermediate.getStage());
  405. }
  406. }
  407. }
  408. // similar to blowUpActiveAggregate, but with simpler rules and no dereferences to follow.
  409. void blowUpIOAggregate(bool input, const TString &baseName, const TType &type)
  410. {
  411. TString name = baseName;
  412. // if the type is still too coarse a granularity, this is still an aggregate to expand, expand it...
  413. if (! isReflectionGranularity(type)) {
  414. if (type.isArray()) {
  415. // Visit all the indices of this array, and for each one,
  416. // fully explode the remaining aggregate to dereference
  417. for (int i = 0; i < std::max(type.getOuterArraySize(), 1); ++i) {
  418. TString newBaseName = name;
  419. newBaseName.append(TString("[") + String(i) + "]");
  420. TType derefType(type, 0);
  421. blowUpIOAggregate(input, newBaseName, derefType);
  422. }
  423. } else {
  424. // Visit all members of this aggregate, and for each one,
  425. // fully explode the remaining aggregate to dereference
  426. const TTypeList& typeList = *type.getStruct();
  427. for (int i = 0; i < (int)typeList.size(); ++i) {
  428. TString newBaseName = name;
  429. if (newBaseName.size() > 0)
  430. newBaseName.append(".");
  431. newBaseName.append(typeList[i].type->getFieldName());
  432. TType derefType(type, i);
  433. blowUpIOAggregate(input, newBaseName, derefType);
  434. }
  435. }
  436. // it was all completed in the recursive calls above
  437. return;
  438. }
  439. if ((reflection.options & EShReflectionBasicArraySuffix) && type.isArray()) {
  440. name.append(TString("[0]"));
  441. }
  442. TReflection::TMapIndexToReflection &ioItems =
  443. input ? reflection.indexToPipeInput : reflection.indexToPipeOutput;
  444. std::string namespacedName = input ? "in " : "out ";
  445. namespacedName += name.c_str();
  446. TReflection::TNameToIndex::const_iterator it = reflection.nameToIndex.find(namespacedName);
  447. if (it == reflection.nameToIndex.end()) {
  448. reflection.nameToIndex[namespacedName] = (int)ioItems.size();
  449. ioItems.push_back(
  450. TObjectReflection(name.c_str(), type, 0, mapToGlType(type), mapToGlArraySize(type), 0));
  451. EShLanguageMask& stages = ioItems.back().stages;
  452. stages = static_cast<EShLanguageMask>(stages | 1 << intermediate.getStage());
  453. } else {
  454. EShLanguageMask& stages = ioItems[it->second].stages;
  455. stages = static_cast<EShLanguageMask>(stages | 1 << intermediate.getStage());
  456. }
  457. }
  458. // Add a uniform dereference where blocks/struct/arrays are involved in the access.
  459. // Handles the situation where the left node is at the correct or too coarse a
  460. // granularity for reflection. (That is, further dereferences up the tree will be
  461. // skipped.) Earlier dereferences, down the tree, will be handled
  462. // at the same time, and logged to prevent reprocessing as the tree is traversed.
  463. //
  464. // Note: Other things like the following must be caught elsewhere:
  465. // - a simple non-array, non-struct variable (no dereference even conceivable)
  466. // - an aggregrate consumed en masse, without a dereference
  467. //
  468. // So, this code is for cases like
  469. // - a struct/block dereferencing a member (whether the member is array or not)
  470. // - an array of struct
  471. // - structs/arrays containing the above
  472. //
  473. void addDereferencedUniform(TIntermBinary* topNode)
  474. {
  475. // See if too fine-grained to process (wait to get further down the tree)
  476. const TType& leftType = topNode->getLeft()->getType();
  477. if ((leftType.isVector() || leftType.isMatrix()) && ! leftType.isArray())
  478. return;
  479. // We have an array or structure or block dereference, see if it's a uniform
  480. // based dereference (if not, skip it).
  481. TIntermSymbol* base = findBase(topNode);
  482. if (! base || ! base->getQualifier().isUniformOrBuffer())
  483. return;
  484. // See if we've already processed this (e.g., in the middle of something
  485. // we did earlier), and if so skip it
  486. if (processedDerefs.find(topNode) != processedDerefs.end())
  487. return;
  488. // Process this uniform dereference
  489. int offset = -1;
  490. int blockIndex = -1;
  491. bool anonymous = false;
  492. // See if we need to record the block itself
  493. bool block = base->getBasicType() == EbtBlock;
  494. if (block) {
  495. offset = 0;
  496. anonymous = IsAnonymous(base->getName());
  497. const TString& blockName = base->getType().getTypeName();
  498. TString baseName;
  499. if (! anonymous)
  500. baseName = blockName;
  501. blockIndex = addBlockName(blockName, base->getType(), intermediate.getBlockSize(base->getType()));
  502. if (reflection.options & EShReflectionAllBlockVariables) {
  503. // Use a degenerate (empty) set of dereferences to immediately put as at the end of
  504. // the dereference change expected by blowUpActiveAggregate.
  505. TList<TIntermBinary*> derefs;
  506. // otherwise - if we're not using strict array suffix rules, or this isn't a block so we are
  507. // expanding root arrays anyway, just start the iteration from the base block type.
  508. blowUpActiveAggregate(base->getType(), baseName, derefs, derefs.end(), 0, blockIndex, 0, -1, 0,
  509. base->getQualifier().storage, false);
  510. }
  511. }
  512. // Process the dereference chain, backward, accumulating the pieces for later forward traversal.
  513. // If the topNode is a reflection-granularity-array dereference, don't include that last dereference.
  514. TList<TIntermBinary*> derefs;
  515. for (TIntermBinary* visitNode = topNode; visitNode; visitNode = visitNode->getLeft()->getAsBinaryNode()) {
  516. if (isReflectionGranularity(visitNode->getLeft()->getType()))
  517. continue;
  518. derefs.push_front(visitNode);
  519. processedDerefs.insert(visitNode);
  520. }
  521. processedDerefs.insert(base);
  522. // See if we have a specific array size to stick to while enumerating the explosion of the aggregate
  523. int arraySize = 0;
  524. if (isReflectionGranularity(topNode->getLeft()->getType()) && topNode->getLeft()->isArray()) {
  525. if (topNode->getOp() == EOpIndexDirect)
  526. arraySize = topNode->getRight()->getAsConstantUnion()->getConstArray()[0].getIConst() + 1;
  527. }
  528. // Put the dereference chain together, forward
  529. TString baseName;
  530. if (! anonymous) {
  531. if (block)
  532. baseName = base->getType().getTypeName();
  533. else
  534. baseName = base->getName();
  535. }
  536. blowUpActiveAggregate(base->getType(), baseName, derefs, derefs.begin(), offset, blockIndex, arraySize, -1, 0,
  537. base->getQualifier().storage, true);
  538. }
  539. int addBlockName(const TString& name, const TType& type, int size)
  540. {
  541. int blockIndex = 0;
  542. if (type.isArray()) {
  543. TType derefType(type, 0);
  544. for (int e = 0; e < type.getOuterArraySize(); ++e) {
  545. int memberBlockIndex = addBlockName(name + "[" + String(e) + "]", derefType, size);
  546. if (e == 0)
  547. blockIndex = memberBlockIndex;
  548. }
  549. } else {
  550. TReflection::TMapIndexToReflection& blocks = reflection.GetBlockMapForStorage(type.getQualifier().storage);
  551. TReflection::TNameToIndex::const_iterator it = reflection.nameToIndex.find(name.c_str());
  552. if (reflection.nameToIndex.find(name.c_str()) == reflection.nameToIndex.end()) {
  553. blockIndex = (int)blocks.size();
  554. reflection.nameToIndex[name.c_str()] = blockIndex;
  555. blocks.push_back(TObjectReflection(name.c_str(), type, -1, -1, size, blockIndex));
  556. blocks.back().numMembers = countAggregateMembers(type);
  557. if (updateStageMasks) {
  558. EShLanguageMask& stages = blocks.back().stages;
  559. stages = static_cast<EShLanguageMask>(stages | 1 << intermediate.getStage());
  560. }
  561. }
  562. else {
  563. blockIndex = it->second;
  564. if (updateStageMasks) {
  565. EShLanguageMask& stages = blocks[blockIndex].stages;
  566. stages = static_cast<EShLanguageMask>(stages | 1 << intermediate.getStage());
  567. }
  568. }
  569. }
  570. return blockIndex;
  571. }
  572. // Are we at a level in a dereference chain at which individual active uniform queries are made?
  573. bool isReflectionGranularity(const TType& type)
  574. {
  575. return type.getBasicType() != EbtBlock && type.getBasicType() != EbtStruct && !type.isArrayOfArrays();
  576. }
  577. // For a binary operation indexing into an aggregate, chase down the base of the aggregate.
  578. // Return nullptr if the topology does not fit this situation.
  579. TIntermSymbol* findBase(const TIntermBinary* node)
  580. {
  581. TIntermSymbol *base = node->getLeft()->getAsSymbolNode();
  582. if (base)
  583. return base;
  584. TIntermBinary* left = node->getLeft()->getAsBinaryNode();
  585. if (! left)
  586. return nullptr;
  587. return findBase(left);
  588. }
  589. //
  590. // Translate a glslang sampler type into the GL API #define number.
  591. //
  592. int mapSamplerToGlType(TSampler sampler)
  593. {
  594. if (! sampler.image) {
  595. // a sampler...
  596. switch (sampler.type) {
  597. case EbtFloat:
  598. switch ((int)sampler.dim) {
  599. case Esd1D:
  600. switch ((int)sampler.shadow) {
  601. case false: return sampler.arrayed ? GL_SAMPLER_1D_ARRAY : GL_SAMPLER_1D;
  602. case true: return sampler.arrayed ? GL_SAMPLER_1D_ARRAY_SHADOW : GL_SAMPLER_1D_SHADOW;
  603. }
  604. case Esd2D:
  605. switch ((int)sampler.ms) {
  606. case false:
  607. switch ((int)sampler.shadow) {
  608. case false: return sampler.arrayed ? GL_SAMPLER_2D_ARRAY : GL_SAMPLER_2D;
  609. case true: return sampler.arrayed ? GL_SAMPLER_2D_ARRAY_SHADOW : GL_SAMPLER_2D_SHADOW;
  610. }
  611. case true: return sampler.arrayed ? GL_SAMPLER_2D_MULTISAMPLE_ARRAY : GL_SAMPLER_2D_MULTISAMPLE;
  612. }
  613. case Esd3D:
  614. return GL_SAMPLER_3D;
  615. case EsdCube:
  616. switch ((int)sampler.shadow) {
  617. case false: return sampler.arrayed ? GL_SAMPLER_CUBE_MAP_ARRAY : GL_SAMPLER_CUBE;
  618. case true: return sampler.arrayed ? GL_SAMPLER_CUBE_MAP_ARRAY_SHADOW : GL_SAMPLER_CUBE_SHADOW;
  619. }
  620. case EsdRect:
  621. return sampler.shadow ? GL_SAMPLER_2D_RECT_SHADOW : GL_SAMPLER_2D_RECT;
  622. case EsdBuffer:
  623. return GL_SAMPLER_BUFFER;
  624. }
  625. case EbtFloat16:
  626. switch ((int)sampler.dim) {
  627. case Esd1D:
  628. switch ((int)sampler.shadow) {
  629. case false: return sampler.arrayed ? GL_FLOAT16_SAMPLER_1D_ARRAY_AMD : GL_FLOAT16_SAMPLER_1D_AMD;
  630. case true: return sampler.arrayed ? GL_FLOAT16_SAMPLER_1D_ARRAY_SHADOW_AMD : GL_FLOAT16_SAMPLER_1D_SHADOW_AMD;
  631. }
  632. case Esd2D:
  633. switch ((int)sampler.ms) {
  634. case false:
  635. switch ((int)sampler.shadow) {
  636. case false: return sampler.arrayed ? GL_FLOAT16_SAMPLER_2D_ARRAY_AMD : GL_FLOAT16_SAMPLER_2D_AMD;
  637. case true: return sampler.arrayed ? GL_FLOAT16_SAMPLER_2D_ARRAY_SHADOW_AMD : GL_FLOAT16_SAMPLER_2D_SHADOW_AMD;
  638. }
  639. case true: return sampler.arrayed ? GL_FLOAT16_SAMPLER_2D_MULTISAMPLE_ARRAY_AMD : GL_FLOAT16_SAMPLER_2D_MULTISAMPLE_AMD;
  640. }
  641. case Esd3D:
  642. return GL_FLOAT16_SAMPLER_3D_AMD;
  643. case EsdCube:
  644. switch ((int)sampler.shadow) {
  645. case false: return sampler.arrayed ? GL_FLOAT16_SAMPLER_CUBE_MAP_ARRAY_AMD : GL_FLOAT16_SAMPLER_CUBE_AMD;
  646. case true: return sampler.arrayed ? GL_FLOAT16_SAMPLER_CUBE_MAP_ARRAY_SHADOW_AMD : GL_FLOAT16_SAMPLER_CUBE_SHADOW_AMD;
  647. }
  648. case EsdRect:
  649. return sampler.shadow ? GL_FLOAT16_SAMPLER_2D_RECT_SHADOW_AMD : GL_FLOAT16_SAMPLER_2D_RECT_AMD;
  650. case EsdBuffer:
  651. return GL_FLOAT16_SAMPLER_BUFFER_AMD;
  652. }
  653. case EbtInt:
  654. switch ((int)sampler.dim) {
  655. case Esd1D:
  656. return sampler.arrayed ? GL_INT_SAMPLER_1D_ARRAY : GL_INT_SAMPLER_1D;
  657. case Esd2D:
  658. switch ((int)sampler.ms) {
  659. case false: return sampler.arrayed ? GL_INT_SAMPLER_2D_ARRAY : GL_INT_SAMPLER_2D;
  660. case true: return sampler.arrayed ? GL_INT_SAMPLER_2D_MULTISAMPLE_ARRAY
  661. : GL_INT_SAMPLER_2D_MULTISAMPLE;
  662. }
  663. case Esd3D:
  664. return GL_INT_SAMPLER_3D;
  665. case EsdCube:
  666. return sampler.arrayed ? GL_INT_SAMPLER_CUBE_MAP_ARRAY : GL_INT_SAMPLER_CUBE;
  667. case EsdRect:
  668. return GL_INT_SAMPLER_2D_RECT;
  669. case EsdBuffer:
  670. return GL_INT_SAMPLER_BUFFER;
  671. }
  672. case EbtUint:
  673. switch ((int)sampler.dim) {
  674. case Esd1D:
  675. return sampler.arrayed ? GL_UNSIGNED_INT_SAMPLER_1D_ARRAY : GL_UNSIGNED_INT_SAMPLER_1D;
  676. case Esd2D:
  677. switch ((int)sampler.ms) {
  678. case false: return sampler.arrayed ? GL_UNSIGNED_INT_SAMPLER_2D_ARRAY : GL_UNSIGNED_INT_SAMPLER_2D;
  679. case true: return sampler.arrayed ? GL_UNSIGNED_INT_SAMPLER_2D_MULTISAMPLE_ARRAY
  680. : GL_UNSIGNED_INT_SAMPLER_2D_MULTISAMPLE;
  681. }
  682. case Esd3D:
  683. return GL_UNSIGNED_INT_SAMPLER_3D;
  684. case EsdCube:
  685. return sampler.arrayed ? GL_UNSIGNED_INT_SAMPLER_CUBE_MAP_ARRAY : GL_UNSIGNED_INT_SAMPLER_CUBE;
  686. case EsdRect:
  687. return GL_UNSIGNED_INT_SAMPLER_2D_RECT;
  688. case EsdBuffer:
  689. return GL_UNSIGNED_INT_SAMPLER_BUFFER;
  690. }
  691. default:
  692. return 0;
  693. }
  694. } else {
  695. // an image...
  696. switch (sampler.type) {
  697. case EbtFloat:
  698. switch ((int)sampler.dim) {
  699. case Esd1D:
  700. return sampler.arrayed ? GL_IMAGE_1D_ARRAY : GL_IMAGE_1D;
  701. case Esd2D:
  702. switch ((int)sampler.ms) {
  703. case false: return sampler.arrayed ? GL_IMAGE_2D_ARRAY : GL_IMAGE_2D;
  704. case true: return sampler.arrayed ? GL_IMAGE_2D_MULTISAMPLE_ARRAY : GL_IMAGE_2D_MULTISAMPLE;
  705. }
  706. case Esd3D:
  707. return GL_IMAGE_3D;
  708. case EsdCube:
  709. return sampler.arrayed ? GL_IMAGE_CUBE_MAP_ARRAY : GL_IMAGE_CUBE;
  710. case EsdRect:
  711. return GL_IMAGE_2D_RECT;
  712. case EsdBuffer:
  713. return GL_IMAGE_BUFFER;
  714. }
  715. case EbtFloat16:
  716. switch ((int)sampler.dim) {
  717. case Esd1D:
  718. return sampler.arrayed ? GL_FLOAT16_IMAGE_1D_ARRAY_AMD : GL_FLOAT16_IMAGE_1D_AMD;
  719. case Esd2D:
  720. switch ((int)sampler.ms) {
  721. case false: return sampler.arrayed ? GL_FLOAT16_IMAGE_2D_ARRAY_AMD : GL_FLOAT16_IMAGE_2D_AMD;
  722. case true: return sampler.arrayed ? GL_FLOAT16_IMAGE_2D_MULTISAMPLE_ARRAY_AMD : GL_FLOAT16_IMAGE_2D_MULTISAMPLE_AMD;
  723. }
  724. case Esd3D:
  725. return GL_FLOAT16_IMAGE_3D_AMD;
  726. case EsdCube:
  727. return sampler.arrayed ? GL_FLOAT16_IMAGE_CUBE_MAP_ARRAY_AMD : GL_FLOAT16_IMAGE_CUBE_AMD;
  728. case EsdRect:
  729. return GL_FLOAT16_IMAGE_2D_RECT_AMD;
  730. case EsdBuffer:
  731. return GL_FLOAT16_IMAGE_BUFFER_AMD;
  732. }
  733. case EbtInt:
  734. switch ((int)sampler.dim) {
  735. case Esd1D:
  736. return sampler.arrayed ? GL_INT_IMAGE_1D_ARRAY : GL_INT_IMAGE_1D;
  737. case Esd2D:
  738. switch ((int)sampler.ms) {
  739. case false: return sampler.arrayed ? GL_INT_IMAGE_2D_ARRAY : GL_INT_IMAGE_2D;
  740. case true: return sampler.arrayed ? GL_INT_IMAGE_2D_MULTISAMPLE_ARRAY : GL_INT_IMAGE_2D_MULTISAMPLE;
  741. }
  742. case Esd3D:
  743. return GL_INT_IMAGE_3D;
  744. case EsdCube:
  745. return sampler.arrayed ? GL_INT_IMAGE_CUBE_MAP_ARRAY : GL_INT_IMAGE_CUBE;
  746. case EsdRect:
  747. return GL_INT_IMAGE_2D_RECT;
  748. case EsdBuffer:
  749. return GL_INT_IMAGE_BUFFER;
  750. }
  751. case EbtUint:
  752. switch ((int)sampler.dim) {
  753. case Esd1D:
  754. return sampler.arrayed ? GL_UNSIGNED_INT_IMAGE_1D_ARRAY : GL_UNSIGNED_INT_IMAGE_1D;
  755. case Esd2D:
  756. switch ((int)sampler.ms) {
  757. case false: return sampler.arrayed ? GL_UNSIGNED_INT_IMAGE_2D_ARRAY : GL_UNSIGNED_INT_IMAGE_2D;
  758. case true: return sampler.arrayed ? GL_UNSIGNED_INT_IMAGE_2D_MULTISAMPLE_ARRAY
  759. : GL_UNSIGNED_INT_IMAGE_2D_MULTISAMPLE;
  760. }
  761. case Esd3D:
  762. return GL_UNSIGNED_INT_IMAGE_3D;
  763. case EsdCube:
  764. return sampler.arrayed ? GL_UNSIGNED_INT_IMAGE_CUBE_MAP_ARRAY : GL_UNSIGNED_INT_IMAGE_CUBE;
  765. case EsdRect:
  766. return GL_UNSIGNED_INT_IMAGE_2D_RECT;
  767. case EsdBuffer:
  768. return GL_UNSIGNED_INT_IMAGE_BUFFER;
  769. }
  770. default:
  771. return 0;
  772. }
  773. }
  774. }
  775. //
  776. // Translate a glslang type into the GL API #define number.
  777. // Ignores arrayness.
  778. //
  779. int mapToGlType(const TType& type)
  780. {
  781. switch (type.getBasicType()) {
  782. case EbtSampler:
  783. return mapSamplerToGlType(type.getSampler());
  784. case EbtStruct:
  785. case EbtBlock:
  786. case EbtVoid:
  787. return 0;
  788. default:
  789. break;
  790. }
  791. if (type.isVector()) {
  792. int offset = type.getVectorSize() - 2;
  793. switch (type.getBasicType()) {
  794. case EbtFloat: return GL_FLOAT_VEC2 + offset;
  795. case EbtDouble: return GL_DOUBLE_VEC2 + offset;
  796. case EbtFloat16: return GL_FLOAT16_VEC2_NV + offset;
  797. case EbtInt: return GL_INT_VEC2 + offset;
  798. case EbtUint: return GL_UNSIGNED_INT_VEC2 + offset;
  799. case EbtInt64: return GL_INT64_VEC2_ARB + offset;
  800. case EbtUint64: return GL_UNSIGNED_INT64_VEC2_ARB + offset;
  801. case EbtBool: return GL_BOOL_VEC2 + offset;
  802. case EbtAtomicUint: return GL_UNSIGNED_INT_ATOMIC_COUNTER + offset;
  803. default: return 0;
  804. }
  805. }
  806. if (type.isMatrix()) {
  807. switch (type.getBasicType()) {
  808. case EbtFloat:
  809. switch (type.getMatrixCols()) {
  810. case 2:
  811. switch (type.getMatrixRows()) {
  812. case 2: return GL_FLOAT_MAT2;
  813. case 3: return GL_FLOAT_MAT2x3;
  814. case 4: return GL_FLOAT_MAT2x4;
  815. default: return 0;
  816. }
  817. case 3:
  818. switch (type.getMatrixRows()) {
  819. case 2: return GL_FLOAT_MAT3x2;
  820. case 3: return GL_FLOAT_MAT3;
  821. case 4: return GL_FLOAT_MAT3x4;
  822. default: return 0;
  823. }
  824. case 4:
  825. switch (type.getMatrixRows()) {
  826. case 2: return GL_FLOAT_MAT4x2;
  827. case 3: return GL_FLOAT_MAT4x3;
  828. case 4: return GL_FLOAT_MAT4;
  829. default: return 0;
  830. }
  831. }
  832. case EbtDouble:
  833. switch (type.getMatrixCols()) {
  834. case 2:
  835. switch (type.getMatrixRows()) {
  836. case 2: return GL_DOUBLE_MAT2;
  837. case 3: return GL_DOUBLE_MAT2x3;
  838. case 4: return GL_DOUBLE_MAT2x4;
  839. default: return 0;
  840. }
  841. case 3:
  842. switch (type.getMatrixRows()) {
  843. case 2: return GL_DOUBLE_MAT3x2;
  844. case 3: return GL_DOUBLE_MAT3;
  845. case 4: return GL_DOUBLE_MAT3x4;
  846. default: return 0;
  847. }
  848. case 4:
  849. switch (type.getMatrixRows()) {
  850. case 2: return GL_DOUBLE_MAT4x2;
  851. case 3: return GL_DOUBLE_MAT4x3;
  852. case 4: return GL_DOUBLE_MAT4;
  853. default: return 0;
  854. }
  855. }
  856. case EbtFloat16:
  857. switch (type.getMatrixCols()) {
  858. case 2:
  859. switch (type.getMatrixRows()) {
  860. case 2: return GL_FLOAT16_MAT2_AMD;
  861. case 3: return GL_FLOAT16_MAT2x3_AMD;
  862. case 4: return GL_FLOAT16_MAT2x4_AMD;
  863. default: return 0;
  864. }
  865. case 3:
  866. switch (type.getMatrixRows()) {
  867. case 2: return GL_FLOAT16_MAT3x2_AMD;
  868. case 3: return GL_FLOAT16_MAT3_AMD;
  869. case 4: return GL_FLOAT16_MAT3x4_AMD;
  870. default: return 0;
  871. }
  872. case 4:
  873. switch (type.getMatrixRows()) {
  874. case 2: return GL_FLOAT16_MAT4x2_AMD;
  875. case 3: return GL_FLOAT16_MAT4x3_AMD;
  876. case 4: return GL_FLOAT16_MAT4_AMD;
  877. default: return 0;
  878. }
  879. }
  880. default:
  881. return 0;
  882. }
  883. }
  884. if (type.getVectorSize() == 1) {
  885. switch (type.getBasicType()) {
  886. case EbtFloat: return GL_FLOAT;
  887. case EbtDouble: return GL_DOUBLE;
  888. case EbtFloat16: return GL_FLOAT16_NV;
  889. case EbtInt: return GL_INT;
  890. case EbtUint: return GL_UNSIGNED_INT;
  891. case EbtInt64: return GL_INT64_ARB;
  892. case EbtUint64: return GL_UNSIGNED_INT64_ARB;
  893. case EbtBool: return GL_BOOL;
  894. case EbtAtomicUint: return GL_UNSIGNED_INT_ATOMIC_COUNTER;
  895. default: return 0;
  896. }
  897. }
  898. return 0;
  899. }
  900. int mapToGlArraySize(const TType& type)
  901. {
  902. return type.isArray() ? type.getOuterArraySize() : 1;
  903. }
  904. const TIntermediate& intermediate;
  905. TReflection& reflection;
  906. std::set<const TIntermNode*> processedDerefs;
  907. bool updateStageMasks;
  908. protected:
  909. TReflectionTraverser(TReflectionTraverser&);
  910. TReflectionTraverser& operator=(TReflectionTraverser&);
  911. };
  912. //
  913. // Implement the traversal functions of interest.
  914. //
  915. // To catch dereferenced aggregates that must be reflected.
  916. // This catches them at the highest level possible in the tree.
  917. bool TReflectionTraverser::visitBinary(TVisit /* visit */, TIntermBinary* node)
  918. {
  919. switch (node->getOp()) {
  920. case EOpIndexDirect:
  921. case EOpIndexIndirect:
  922. case EOpIndexDirectStruct:
  923. addDereferencedUniform(node);
  924. break;
  925. default:
  926. break;
  927. }
  928. // still need to visit everything below, which could contain sub-expressions
  929. // containing different uniforms
  930. return true;
  931. }
  932. // To reflect non-dereferenced objects.
  933. void TReflectionTraverser::visitSymbol(TIntermSymbol* base)
  934. {
  935. if (base->getQualifier().storage == EvqUniform) {
  936. if (base->getBasicType() == EbtBlock) {
  937. if (reflection.options & EShReflectionSharedStd140UBO) {
  938. addUniform(*base);
  939. }
  940. } else {
  941. addUniform(*base);
  942. }
  943. }
  944. // #TODO add std140/layout active rules for ssbo, same with ubo.
  945. // Storage buffer blocks will be collected and expanding in this part.
  946. if((reflection.options & EShReflectionSharedStd140SSBO) &&
  947. (base->getQualifier().storage == EvqBuffer && base->getBasicType() == EbtBlock &&
  948. (base->getQualifier().layoutPacking == ElpStd140 || base->getQualifier().layoutPacking == ElpShared)))
  949. addUniform(*base);
  950. if ((intermediate.getStage() == reflection.firstStage && base->getQualifier().isPipeInput()) ||
  951. (intermediate.getStage() == reflection.lastStage && base->getQualifier().isPipeOutput()))
  952. addPipeIOVariable(*base);
  953. }
  954. //
  955. // Implement TObjectReflection methods.
  956. //
  957. TObjectReflection::TObjectReflection(const std::string &pName, const TType &pType, int pOffset, int pGLDefineType,
  958. int pSize, int pIndex)
  959. : name(pName), offset(pOffset), glDefineType(pGLDefineType), size(pSize), index(pIndex), counterIndex(-1),
  960. numMembers(-1), arrayStride(0), topLevelArrayStride(0), stages(EShLanguageMask(0)), type(pType.clone())
  961. {
  962. }
  963. int TObjectReflection::getBinding() const
  964. {
  965. if (type == nullptr || !type->getQualifier().hasBinding())
  966. return -1;
  967. return type->getQualifier().layoutBinding;
  968. }
  969. void TObjectReflection::dump() const
  970. {
  971. printf("%s: offset %d, type %x, size %d, index %d, binding %d, stages %d", name.c_str(), offset, glDefineType, size,
  972. index, getBinding(), stages);
  973. if (counterIndex != -1)
  974. printf(", counter %d", counterIndex);
  975. if (numMembers != -1)
  976. printf(", numMembers %d", numMembers);
  977. if (arrayStride != 0)
  978. printf(", arrayStride %d", arrayStride);
  979. if (topLevelArrayStride != 0)
  980. printf(", topLevelArrayStride %d", topLevelArrayStride);
  981. printf("\n");
  982. }
  983. //
  984. // Implement TReflection methods.
  985. //
  986. // Track any required attribute reflection, such as compute shader numthreads.
  987. //
  988. void TReflection::buildAttributeReflection(EShLanguage stage, const TIntermediate& intermediate)
  989. {
  990. if (stage == EShLangCompute) {
  991. // Remember thread dimensions
  992. for (int dim=0; dim<3; ++dim)
  993. localSize[dim] = intermediate.getLocalSize(dim);
  994. }
  995. }
  996. // build counter block index associations for buffers
  997. void TReflection::buildCounterIndices(const TIntermediate& intermediate)
  998. {
  999. #ifdef ENABLE_HLSL
  1000. // search for ones that have counters
  1001. for (int i = 0; i < int(indexToUniformBlock.size()); ++i) {
  1002. const TString counterName(intermediate.addCounterBufferName(indexToUniformBlock[i].name).c_str());
  1003. const int index = getIndex(counterName);
  1004. if (index >= 0)
  1005. indexToUniformBlock[i].counterIndex = index;
  1006. }
  1007. #else
  1008. (void)intermediate;
  1009. #endif
  1010. }
  1011. // build Shader Stages mask for all uniforms
  1012. void TReflection::buildUniformStageMask(const TIntermediate& intermediate)
  1013. {
  1014. if (options & EShReflectionAllBlockVariables)
  1015. return;
  1016. for (int i = 0; i < int(indexToUniform.size()); ++i) {
  1017. indexToUniform[i].stages = static_cast<EShLanguageMask>(indexToUniform[i].stages | 1 << intermediate.getStage());
  1018. }
  1019. for (int i = 0; i < int(indexToBufferVariable.size()); ++i) {
  1020. indexToBufferVariable[i].stages =
  1021. static_cast<EShLanguageMask>(indexToBufferVariable[i].stages | 1 << intermediate.getStage());
  1022. }
  1023. }
  1024. // Merge live symbols from 'intermediate' into the existing reflection database.
  1025. //
  1026. // Returns false if the input is too malformed to do this.
  1027. bool TReflection::addStage(EShLanguage stage, const TIntermediate& intermediate)
  1028. {
  1029. if (intermediate.getTreeRoot() == nullptr ||
  1030. intermediate.getNumEntryPoints() != 1 ||
  1031. intermediate.isRecursive())
  1032. return false;
  1033. buildAttributeReflection(stage, intermediate);
  1034. TReflectionTraverser it(intermediate, *this);
  1035. for (auto& sequnence : intermediate.getTreeRoot()->getAsAggregate()->getSequence()) {
  1036. if (sequnence->getAsAggregate() != nullptr) {
  1037. if (sequnence->getAsAggregate()->getOp() == glslang::EOpLinkerObjects) {
  1038. it.updateStageMasks = false;
  1039. TIntermAggregate* linkerObjects = sequnence->getAsAggregate();
  1040. for (auto& sequnence : linkerObjects->getSequence()) {
  1041. auto pNode = sequnence->getAsSymbolNode();
  1042. if (pNode != nullptr) {
  1043. if ((pNode->getQualifier().storage == EvqUniform &&
  1044. (options & EShReflectionSharedStd140UBO)) ||
  1045. (pNode->getQualifier().storage == EvqBuffer &&
  1046. (options & EShReflectionSharedStd140SSBO))) {
  1047. // collect std140 and shared uniform block form AST
  1048. if ((pNode->getBasicType() == EbtBlock) &&
  1049. ((pNode->getQualifier().layoutPacking == ElpStd140) ||
  1050. (pNode->getQualifier().layoutPacking == ElpShared))) {
  1051. pNode->traverse(&it);
  1052. }
  1053. }
  1054. else if ((options & EShReflectionAllIOVariables) &&
  1055. (pNode->getQualifier().isPipeInput() || pNode->getQualifier().isPipeOutput()))
  1056. {
  1057. pNode->traverse(&it);
  1058. }
  1059. }
  1060. }
  1061. } else {
  1062. // This traverser will travers all function in AST.
  1063. // If we want reflect uncalled function, we need set linke message EShMsgKeepUncalled.
  1064. // When EShMsgKeepUncalled been set to true, all function will be keep in AST, even it is a uncalled function.
  1065. // This will keep some uniform variables in reflection, if those uniform variables is used in these uncalled function.
  1066. //
  1067. // If we just want reflect only live node, we can use a default link message or set EShMsgKeepUncalled false.
  1068. // When linke message not been set EShMsgKeepUncalled, linker won't keep uncalled function in AST.
  1069. // So, travers all function node can equivalent to travers live function.
  1070. it.updateStageMasks = true;
  1071. sequnence->getAsAggregate()->traverse(&it);
  1072. }
  1073. }
  1074. }
  1075. it.updateStageMasks = true;
  1076. buildCounterIndices(intermediate);
  1077. buildUniformStageMask(intermediate);
  1078. return true;
  1079. }
  1080. void TReflection::dump()
  1081. {
  1082. printf("Uniform reflection:\n");
  1083. for (size_t i = 0; i < indexToUniform.size(); ++i)
  1084. indexToUniform[i].dump();
  1085. printf("\n");
  1086. printf("Uniform block reflection:\n");
  1087. for (size_t i = 0; i < indexToUniformBlock.size(); ++i)
  1088. indexToUniformBlock[i].dump();
  1089. printf("\n");
  1090. printf("Buffer variable reflection:\n");
  1091. for (size_t i = 0; i < indexToBufferVariable.size(); ++i)
  1092. indexToBufferVariable[i].dump();
  1093. printf("\n");
  1094. printf("Buffer block reflection:\n");
  1095. for (size_t i = 0; i < indexToBufferBlock.size(); ++i)
  1096. indexToBufferBlock[i].dump();
  1097. printf("\n");
  1098. printf("Pipeline input reflection:\n");
  1099. for (size_t i = 0; i < indexToPipeInput.size(); ++i)
  1100. indexToPipeInput[i].dump();
  1101. printf("\n");
  1102. printf("Pipeline output reflection:\n");
  1103. for (size_t i = 0; i < indexToPipeOutput.size(); ++i)
  1104. indexToPipeOutput[i].dump();
  1105. printf("\n");
  1106. if (getLocalSize(0) > 1) {
  1107. static const char* axis[] = { "X", "Y", "Z" };
  1108. for (int dim=0; dim<3; ++dim)
  1109. if (getLocalSize(dim) > 1)
  1110. printf("Local size %s: %u\n", axis[dim], getLocalSize(dim));
  1111. printf("\n");
  1112. }
  1113. // printf("Live names\n");
  1114. // for (TNameToIndex::const_iterator it = nameToIndex.begin(); it != nameToIndex.end(); ++it)
  1115. // printf("%s: %d\n", it->first.c_str(), it->second);
  1116. // printf("\n");
  1117. }
  1118. } // end namespace glslang
  1119. #endif // !GLSLANG_WEB