intermediate.h 59 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847184818491850
  1. //
  2. // Copyright (C) 2002-2005 3Dlabs Inc. Ltd.
  3. // Copyright (C) 2012-2016 LunarG, Inc.
  4. // Copyright (C) 2017 ARM Limited.
  5. // Modifications Copyright (C) 2020 Advanced Micro Devices, Inc. All rights reserved.
  6. //
  7. // All rights reserved.
  8. //
  9. // Redistribution and use in source and binary forms, with or without
  10. // modification, are permitted provided that the following conditions
  11. // are met:
  12. //
  13. // Redistributions of source code must retain the above copyright
  14. // notice, this list of conditions and the following disclaimer.
  15. //
  16. // Redistributions in binary form must reproduce the above
  17. // copyright notice, this list of conditions and the following
  18. // disclaimer in the documentation and/or other materials provided
  19. // with the distribution.
  20. //
  21. // Neither the name of 3Dlabs Inc. Ltd. nor the names of its
  22. // contributors may be used to endorse or promote products derived
  23. // from this software without specific prior written permission.
  24. //
  25. // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
  26. // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
  27. // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
  28. // FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
  29. // COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
  30. // INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
  31. // BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
  32. // LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
  33. // CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
  34. // LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
  35. // ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
  36. // POSSIBILITY OF SUCH DAMAGE.
  37. //
  38. //
  39. // Definition of the in-memory high-level intermediate representation
  40. // of shaders. This is a tree that parser creates.
  41. //
  42. // Nodes in the tree are defined as a hierarchy of classes derived from
  43. // TIntermNode. Each is a node in a tree. There is no preset branching factor;
  44. // each node can have it's own type of list of children.
  45. //
  46. #ifndef __INTERMEDIATE_H
  47. #define __INTERMEDIATE_H
  48. #if defined(_MSC_VER) && _MSC_VER >= 1900
  49. #pragma warning(disable : 4464) // relative include path contains '..'
  50. #pragma warning(disable : 5026) // 'glslang::TIntermUnary': move constructor was implicitly defined as deleted
  51. #endif
  52. #include "../Include/Common.h"
  53. #include "../Include/Types.h"
  54. #include "../Include/ConstantUnion.h"
  55. namespace glslang {
  56. class TIntermediate;
  57. //
  58. // Operators used by the high-level (parse tree) representation.
  59. //
  60. enum TOperator {
  61. EOpNull, // if in a node, should only mean a node is still being built
  62. EOpSequence, // denotes a list of statements, or parameters, etc.
  63. EOpScope, // Used by debugging to denote a scoped list of statements
  64. EOpLinkerObjects, // for aggregate node of objects the linker may need, if not reference by the rest of the AST
  65. EOpFunctionCall,
  66. EOpFunction, // For function definition
  67. EOpParameters, // an aggregate listing the parameters to a function
  68. #ifndef GLSLANG_WEB
  69. EOpSpirvInst,
  70. #endif
  71. //
  72. // Unary operators
  73. //
  74. EOpNegative,
  75. EOpLogicalNot,
  76. EOpVectorLogicalNot,
  77. EOpBitwiseNot,
  78. EOpPostIncrement,
  79. EOpPostDecrement,
  80. EOpPreIncrement,
  81. EOpPreDecrement,
  82. EOpCopyObject,
  83. EOpDeclare, // Used by debugging to force declaration of variable in correct scope
  84. // (u)int* -> bool
  85. EOpConvInt8ToBool,
  86. EOpConvUint8ToBool,
  87. EOpConvInt16ToBool,
  88. EOpConvUint16ToBool,
  89. EOpConvIntToBool,
  90. EOpConvUintToBool,
  91. EOpConvInt64ToBool,
  92. EOpConvUint64ToBool,
  93. // float* -> bool
  94. EOpConvFloat16ToBool,
  95. EOpConvFloatToBool,
  96. EOpConvDoubleToBool,
  97. // bool -> (u)int*
  98. EOpConvBoolToInt8,
  99. EOpConvBoolToUint8,
  100. EOpConvBoolToInt16,
  101. EOpConvBoolToUint16,
  102. EOpConvBoolToInt,
  103. EOpConvBoolToUint,
  104. EOpConvBoolToInt64,
  105. EOpConvBoolToUint64,
  106. // bool -> float*
  107. EOpConvBoolToFloat16,
  108. EOpConvBoolToFloat,
  109. EOpConvBoolToDouble,
  110. // int8_t -> (u)int*
  111. EOpConvInt8ToInt16,
  112. EOpConvInt8ToInt,
  113. EOpConvInt8ToInt64,
  114. EOpConvInt8ToUint8,
  115. EOpConvInt8ToUint16,
  116. EOpConvInt8ToUint,
  117. EOpConvInt8ToUint64,
  118. // uint8_t -> (u)int*
  119. EOpConvUint8ToInt8,
  120. EOpConvUint8ToInt16,
  121. EOpConvUint8ToInt,
  122. EOpConvUint8ToInt64,
  123. EOpConvUint8ToUint16,
  124. EOpConvUint8ToUint,
  125. EOpConvUint8ToUint64,
  126. // int8_t -> float*
  127. EOpConvInt8ToFloat16,
  128. EOpConvInt8ToFloat,
  129. EOpConvInt8ToDouble,
  130. // uint8_t -> float*
  131. EOpConvUint8ToFloat16,
  132. EOpConvUint8ToFloat,
  133. EOpConvUint8ToDouble,
  134. // int16_t -> (u)int*
  135. EOpConvInt16ToInt8,
  136. EOpConvInt16ToInt,
  137. EOpConvInt16ToInt64,
  138. EOpConvInt16ToUint8,
  139. EOpConvInt16ToUint16,
  140. EOpConvInt16ToUint,
  141. EOpConvInt16ToUint64,
  142. // uint16_t -> (u)int*
  143. EOpConvUint16ToInt8,
  144. EOpConvUint16ToInt16,
  145. EOpConvUint16ToInt,
  146. EOpConvUint16ToInt64,
  147. EOpConvUint16ToUint8,
  148. EOpConvUint16ToUint,
  149. EOpConvUint16ToUint64,
  150. // int16_t -> float*
  151. EOpConvInt16ToFloat16,
  152. EOpConvInt16ToFloat,
  153. EOpConvInt16ToDouble,
  154. // uint16_t -> float*
  155. EOpConvUint16ToFloat16,
  156. EOpConvUint16ToFloat,
  157. EOpConvUint16ToDouble,
  158. // int32_t -> (u)int*
  159. EOpConvIntToInt8,
  160. EOpConvIntToInt16,
  161. EOpConvIntToInt64,
  162. EOpConvIntToUint8,
  163. EOpConvIntToUint16,
  164. EOpConvIntToUint,
  165. EOpConvIntToUint64,
  166. // uint32_t -> (u)int*
  167. EOpConvUintToInt8,
  168. EOpConvUintToInt16,
  169. EOpConvUintToInt,
  170. EOpConvUintToInt64,
  171. EOpConvUintToUint8,
  172. EOpConvUintToUint16,
  173. EOpConvUintToUint64,
  174. // int32_t -> float*
  175. EOpConvIntToFloat16,
  176. EOpConvIntToFloat,
  177. EOpConvIntToDouble,
  178. // uint32_t -> float*
  179. EOpConvUintToFloat16,
  180. EOpConvUintToFloat,
  181. EOpConvUintToDouble,
  182. // int64_t -> (u)int*
  183. EOpConvInt64ToInt8,
  184. EOpConvInt64ToInt16,
  185. EOpConvInt64ToInt,
  186. EOpConvInt64ToUint8,
  187. EOpConvInt64ToUint16,
  188. EOpConvInt64ToUint,
  189. EOpConvInt64ToUint64,
  190. // uint64_t -> (u)int*
  191. EOpConvUint64ToInt8,
  192. EOpConvUint64ToInt16,
  193. EOpConvUint64ToInt,
  194. EOpConvUint64ToInt64,
  195. EOpConvUint64ToUint8,
  196. EOpConvUint64ToUint16,
  197. EOpConvUint64ToUint,
  198. // int64_t -> float*
  199. EOpConvInt64ToFloat16,
  200. EOpConvInt64ToFloat,
  201. EOpConvInt64ToDouble,
  202. // uint64_t -> float*
  203. EOpConvUint64ToFloat16,
  204. EOpConvUint64ToFloat,
  205. EOpConvUint64ToDouble,
  206. // float16_t -> (u)int*
  207. EOpConvFloat16ToInt8,
  208. EOpConvFloat16ToInt16,
  209. EOpConvFloat16ToInt,
  210. EOpConvFloat16ToInt64,
  211. EOpConvFloat16ToUint8,
  212. EOpConvFloat16ToUint16,
  213. EOpConvFloat16ToUint,
  214. EOpConvFloat16ToUint64,
  215. // float16_t -> float*
  216. EOpConvFloat16ToFloat,
  217. EOpConvFloat16ToDouble,
  218. // float -> (u)int*
  219. EOpConvFloatToInt8,
  220. EOpConvFloatToInt16,
  221. EOpConvFloatToInt,
  222. EOpConvFloatToInt64,
  223. EOpConvFloatToUint8,
  224. EOpConvFloatToUint16,
  225. EOpConvFloatToUint,
  226. EOpConvFloatToUint64,
  227. // float -> float*
  228. EOpConvFloatToFloat16,
  229. EOpConvFloatToDouble,
  230. // float64 _t-> (u)int*
  231. EOpConvDoubleToInt8,
  232. EOpConvDoubleToInt16,
  233. EOpConvDoubleToInt,
  234. EOpConvDoubleToInt64,
  235. EOpConvDoubleToUint8,
  236. EOpConvDoubleToUint16,
  237. EOpConvDoubleToUint,
  238. EOpConvDoubleToUint64,
  239. // float64_t -> float*
  240. EOpConvDoubleToFloat16,
  241. EOpConvDoubleToFloat,
  242. // uint64_t <-> pointer
  243. EOpConvUint64ToPtr,
  244. EOpConvPtrToUint64,
  245. // uvec2 <-> pointer
  246. EOpConvUvec2ToPtr,
  247. EOpConvPtrToUvec2,
  248. // uint64_t -> accelerationStructureEXT
  249. EOpConvUint64ToAccStruct,
  250. // uvec2 -> accelerationStructureEXT
  251. EOpConvUvec2ToAccStruct,
  252. //
  253. // binary operations
  254. //
  255. EOpAdd,
  256. EOpSub,
  257. EOpMul,
  258. EOpDiv,
  259. EOpMod,
  260. EOpRightShift,
  261. EOpLeftShift,
  262. EOpAnd,
  263. EOpInclusiveOr,
  264. EOpExclusiveOr,
  265. EOpEqual,
  266. EOpNotEqual,
  267. EOpVectorEqual,
  268. EOpVectorNotEqual,
  269. EOpLessThan,
  270. EOpGreaterThan,
  271. EOpLessThanEqual,
  272. EOpGreaterThanEqual,
  273. EOpComma,
  274. EOpVectorTimesScalar,
  275. EOpVectorTimesMatrix,
  276. EOpMatrixTimesVector,
  277. EOpMatrixTimesScalar,
  278. EOpLogicalOr,
  279. EOpLogicalXor,
  280. EOpLogicalAnd,
  281. EOpIndexDirect,
  282. EOpIndexIndirect,
  283. EOpIndexDirectStruct,
  284. EOpVectorSwizzle,
  285. EOpMethod,
  286. EOpScoping,
  287. //
  288. // Built-in functions mapped to operators
  289. //
  290. EOpRadians,
  291. EOpDegrees,
  292. EOpSin,
  293. EOpCos,
  294. EOpTan,
  295. EOpAsin,
  296. EOpAcos,
  297. EOpAtan,
  298. EOpSinh,
  299. EOpCosh,
  300. EOpTanh,
  301. EOpAsinh,
  302. EOpAcosh,
  303. EOpAtanh,
  304. EOpPow,
  305. EOpExp,
  306. EOpLog,
  307. EOpExp2,
  308. EOpLog2,
  309. EOpSqrt,
  310. EOpInverseSqrt,
  311. EOpAbs,
  312. EOpSign,
  313. EOpFloor,
  314. EOpTrunc,
  315. EOpRound,
  316. EOpRoundEven,
  317. EOpCeil,
  318. EOpFract,
  319. EOpModf,
  320. EOpMin,
  321. EOpMax,
  322. EOpClamp,
  323. EOpMix,
  324. EOpStep,
  325. EOpSmoothStep,
  326. EOpIsNan,
  327. EOpIsInf,
  328. EOpFma,
  329. EOpFrexp,
  330. EOpLdexp,
  331. EOpFloatBitsToInt,
  332. EOpFloatBitsToUint,
  333. EOpIntBitsToFloat,
  334. EOpUintBitsToFloat,
  335. EOpDoubleBitsToInt64,
  336. EOpDoubleBitsToUint64,
  337. EOpInt64BitsToDouble,
  338. EOpUint64BitsToDouble,
  339. EOpFloat16BitsToInt16,
  340. EOpFloat16BitsToUint16,
  341. EOpInt16BitsToFloat16,
  342. EOpUint16BitsToFloat16,
  343. EOpPackSnorm2x16,
  344. EOpUnpackSnorm2x16,
  345. EOpPackUnorm2x16,
  346. EOpUnpackUnorm2x16,
  347. EOpPackSnorm4x8,
  348. EOpUnpackSnorm4x8,
  349. EOpPackUnorm4x8,
  350. EOpUnpackUnorm4x8,
  351. EOpPackHalf2x16,
  352. EOpUnpackHalf2x16,
  353. EOpPackDouble2x32,
  354. EOpUnpackDouble2x32,
  355. EOpPackInt2x32,
  356. EOpUnpackInt2x32,
  357. EOpPackUint2x32,
  358. EOpUnpackUint2x32,
  359. EOpPackFloat2x16,
  360. EOpUnpackFloat2x16,
  361. EOpPackInt2x16,
  362. EOpUnpackInt2x16,
  363. EOpPackUint2x16,
  364. EOpUnpackUint2x16,
  365. EOpPackInt4x16,
  366. EOpUnpackInt4x16,
  367. EOpPackUint4x16,
  368. EOpUnpackUint4x16,
  369. EOpPack16,
  370. EOpPack32,
  371. EOpPack64,
  372. EOpUnpack32,
  373. EOpUnpack16,
  374. EOpUnpack8,
  375. EOpLength,
  376. EOpDistance,
  377. EOpDot,
  378. EOpCross,
  379. EOpNormalize,
  380. EOpFaceForward,
  381. EOpReflect,
  382. EOpRefract,
  383. EOpMin3,
  384. EOpMax3,
  385. EOpMid3,
  386. EOpDPdx, // Fragment only
  387. EOpDPdy, // Fragment only
  388. EOpFwidth, // Fragment only
  389. EOpDPdxFine, // Fragment only
  390. EOpDPdyFine, // Fragment only
  391. EOpFwidthFine, // Fragment only
  392. EOpDPdxCoarse, // Fragment only
  393. EOpDPdyCoarse, // Fragment only
  394. EOpFwidthCoarse, // Fragment only
  395. EOpInterpolateAtCentroid, // Fragment only
  396. EOpInterpolateAtSample, // Fragment only
  397. EOpInterpolateAtOffset, // Fragment only
  398. EOpInterpolateAtVertex,
  399. EOpMatrixTimesMatrix,
  400. EOpOuterProduct,
  401. EOpDeterminant,
  402. EOpMatrixInverse,
  403. EOpTranspose,
  404. EOpFtransform,
  405. EOpNoise,
  406. EOpEmitVertex, // geometry only
  407. EOpEndPrimitive, // geometry only
  408. EOpEmitStreamVertex, // geometry only
  409. EOpEndStreamPrimitive, // geometry only
  410. EOpBarrier,
  411. EOpMemoryBarrier,
  412. EOpMemoryBarrierAtomicCounter,
  413. EOpMemoryBarrierBuffer,
  414. EOpMemoryBarrierImage,
  415. EOpMemoryBarrierShared, // compute only
  416. EOpGroupMemoryBarrier, // compute only
  417. EOpBallot,
  418. EOpReadInvocation,
  419. EOpReadFirstInvocation,
  420. EOpAnyInvocation,
  421. EOpAllInvocations,
  422. EOpAllInvocationsEqual,
  423. EOpSubgroupGuardStart,
  424. EOpSubgroupBarrier,
  425. EOpSubgroupMemoryBarrier,
  426. EOpSubgroupMemoryBarrierBuffer,
  427. EOpSubgroupMemoryBarrierImage,
  428. EOpSubgroupMemoryBarrierShared, // compute only
  429. EOpSubgroupElect,
  430. EOpSubgroupAll,
  431. EOpSubgroupAny,
  432. EOpSubgroupAllEqual,
  433. EOpSubgroupBroadcast,
  434. EOpSubgroupBroadcastFirst,
  435. EOpSubgroupBallot,
  436. EOpSubgroupInverseBallot,
  437. EOpSubgroupBallotBitExtract,
  438. EOpSubgroupBallotBitCount,
  439. EOpSubgroupBallotInclusiveBitCount,
  440. EOpSubgroupBallotExclusiveBitCount,
  441. EOpSubgroupBallotFindLSB,
  442. EOpSubgroupBallotFindMSB,
  443. EOpSubgroupShuffle,
  444. EOpSubgroupShuffleXor,
  445. EOpSubgroupShuffleUp,
  446. EOpSubgroupShuffleDown,
  447. EOpSubgroupAdd,
  448. EOpSubgroupMul,
  449. EOpSubgroupMin,
  450. EOpSubgroupMax,
  451. EOpSubgroupAnd,
  452. EOpSubgroupOr,
  453. EOpSubgroupXor,
  454. EOpSubgroupInclusiveAdd,
  455. EOpSubgroupInclusiveMul,
  456. EOpSubgroupInclusiveMin,
  457. EOpSubgroupInclusiveMax,
  458. EOpSubgroupInclusiveAnd,
  459. EOpSubgroupInclusiveOr,
  460. EOpSubgroupInclusiveXor,
  461. EOpSubgroupExclusiveAdd,
  462. EOpSubgroupExclusiveMul,
  463. EOpSubgroupExclusiveMin,
  464. EOpSubgroupExclusiveMax,
  465. EOpSubgroupExclusiveAnd,
  466. EOpSubgroupExclusiveOr,
  467. EOpSubgroupExclusiveXor,
  468. EOpSubgroupClusteredAdd,
  469. EOpSubgroupClusteredMul,
  470. EOpSubgroupClusteredMin,
  471. EOpSubgroupClusteredMax,
  472. EOpSubgroupClusteredAnd,
  473. EOpSubgroupClusteredOr,
  474. EOpSubgroupClusteredXor,
  475. EOpSubgroupQuadBroadcast,
  476. EOpSubgroupQuadSwapHorizontal,
  477. EOpSubgroupQuadSwapVertical,
  478. EOpSubgroupQuadSwapDiagonal,
  479. EOpSubgroupPartition,
  480. EOpSubgroupPartitionedAdd,
  481. EOpSubgroupPartitionedMul,
  482. EOpSubgroupPartitionedMin,
  483. EOpSubgroupPartitionedMax,
  484. EOpSubgroupPartitionedAnd,
  485. EOpSubgroupPartitionedOr,
  486. EOpSubgroupPartitionedXor,
  487. EOpSubgroupPartitionedInclusiveAdd,
  488. EOpSubgroupPartitionedInclusiveMul,
  489. EOpSubgroupPartitionedInclusiveMin,
  490. EOpSubgroupPartitionedInclusiveMax,
  491. EOpSubgroupPartitionedInclusiveAnd,
  492. EOpSubgroupPartitionedInclusiveOr,
  493. EOpSubgroupPartitionedInclusiveXor,
  494. EOpSubgroupPartitionedExclusiveAdd,
  495. EOpSubgroupPartitionedExclusiveMul,
  496. EOpSubgroupPartitionedExclusiveMin,
  497. EOpSubgroupPartitionedExclusiveMax,
  498. EOpSubgroupPartitionedExclusiveAnd,
  499. EOpSubgroupPartitionedExclusiveOr,
  500. EOpSubgroupPartitionedExclusiveXor,
  501. EOpSubgroupGuardStop,
  502. EOpMinInvocations,
  503. EOpMaxInvocations,
  504. EOpAddInvocations,
  505. EOpMinInvocationsNonUniform,
  506. EOpMaxInvocationsNonUniform,
  507. EOpAddInvocationsNonUniform,
  508. EOpMinInvocationsInclusiveScan,
  509. EOpMaxInvocationsInclusiveScan,
  510. EOpAddInvocationsInclusiveScan,
  511. EOpMinInvocationsInclusiveScanNonUniform,
  512. EOpMaxInvocationsInclusiveScanNonUniform,
  513. EOpAddInvocationsInclusiveScanNonUniform,
  514. EOpMinInvocationsExclusiveScan,
  515. EOpMaxInvocationsExclusiveScan,
  516. EOpAddInvocationsExclusiveScan,
  517. EOpMinInvocationsExclusiveScanNonUniform,
  518. EOpMaxInvocationsExclusiveScanNonUniform,
  519. EOpAddInvocationsExclusiveScanNonUniform,
  520. EOpSwizzleInvocations,
  521. EOpSwizzleInvocationsMasked,
  522. EOpWriteInvocation,
  523. EOpMbcnt,
  524. EOpCubeFaceIndex,
  525. EOpCubeFaceCoord,
  526. EOpTime,
  527. EOpAtomicAdd,
  528. EOpAtomicSubtract,
  529. EOpAtomicMin,
  530. EOpAtomicMax,
  531. EOpAtomicAnd,
  532. EOpAtomicOr,
  533. EOpAtomicXor,
  534. EOpAtomicExchange,
  535. EOpAtomicCompSwap,
  536. EOpAtomicLoad,
  537. EOpAtomicStore,
  538. EOpAtomicCounterIncrement, // results in pre-increment value
  539. EOpAtomicCounterDecrement, // results in post-decrement value
  540. EOpAtomicCounter,
  541. EOpAtomicCounterAdd,
  542. EOpAtomicCounterSubtract,
  543. EOpAtomicCounterMin,
  544. EOpAtomicCounterMax,
  545. EOpAtomicCounterAnd,
  546. EOpAtomicCounterOr,
  547. EOpAtomicCounterXor,
  548. EOpAtomicCounterExchange,
  549. EOpAtomicCounterCompSwap,
  550. EOpAny,
  551. EOpAll,
  552. EOpCooperativeMatrixLoad,
  553. EOpCooperativeMatrixStore,
  554. EOpCooperativeMatrixMulAdd,
  555. EOpBeginInvocationInterlock, // Fragment only
  556. EOpEndInvocationInterlock, // Fragment only
  557. EOpIsHelperInvocation,
  558. EOpDebugPrintf,
  559. //
  560. // Branch
  561. //
  562. EOpKill, // Fragment only
  563. EOpTerminateInvocation, // Fragment only
  564. EOpDemote, // Fragment only
  565. EOpTerminateRayKHR, // Any-hit only
  566. EOpIgnoreIntersectionKHR, // Any-hit only
  567. EOpReturn,
  568. EOpBreak,
  569. EOpContinue,
  570. EOpCase,
  571. EOpDefault,
  572. //
  573. // Constructors
  574. //
  575. EOpConstructGuardStart,
  576. EOpConstructInt, // these first scalar forms also identify what implicit conversion is needed
  577. EOpConstructUint,
  578. EOpConstructInt8,
  579. EOpConstructUint8,
  580. EOpConstructInt16,
  581. EOpConstructUint16,
  582. EOpConstructInt64,
  583. EOpConstructUint64,
  584. EOpConstructBool,
  585. EOpConstructFloat,
  586. EOpConstructDouble,
  587. // Keep vector and matrix constructors in a consistent relative order for
  588. // TParseContext::constructBuiltIn, which converts between 8/16/32 bit
  589. // vector constructors
  590. EOpConstructVec2,
  591. EOpConstructVec3,
  592. EOpConstructVec4,
  593. EOpConstructMat2x2,
  594. EOpConstructMat2x3,
  595. EOpConstructMat2x4,
  596. EOpConstructMat3x2,
  597. EOpConstructMat3x3,
  598. EOpConstructMat3x4,
  599. EOpConstructMat4x2,
  600. EOpConstructMat4x3,
  601. EOpConstructMat4x4,
  602. EOpConstructDVec2,
  603. EOpConstructDVec3,
  604. EOpConstructDVec4,
  605. EOpConstructBVec2,
  606. EOpConstructBVec3,
  607. EOpConstructBVec4,
  608. EOpConstructI8Vec2,
  609. EOpConstructI8Vec3,
  610. EOpConstructI8Vec4,
  611. EOpConstructU8Vec2,
  612. EOpConstructU8Vec3,
  613. EOpConstructU8Vec4,
  614. EOpConstructI16Vec2,
  615. EOpConstructI16Vec3,
  616. EOpConstructI16Vec4,
  617. EOpConstructU16Vec2,
  618. EOpConstructU16Vec3,
  619. EOpConstructU16Vec4,
  620. EOpConstructIVec2,
  621. EOpConstructIVec3,
  622. EOpConstructIVec4,
  623. EOpConstructUVec2,
  624. EOpConstructUVec3,
  625. EOpConstructUVec4,
  626. EOpConstructI64Vec2,
  627. EOpConstructI64Vec3,
  628. EOpConstructI64Vec4,
  629. EOpConstructU64Vec2,
  630. EOpConstructU64Vec3,
  631. EOpConstructU64Vec4,
  632. EOpConstructDMat2x2,
  633. EOpConstructDMat2x3,
  634. EOpConstructDMat2x4,
  635. EOpConstructDMat3x2,
  636. EOpConstructDMat3x3,
  637. EOpConstructDMat3x4,
  638. EOpConstructDMat4x2,
  639. EOpConstructDMat4x3,
  640. EOpConstructDMat4x4,
  641. EOpConstructIMat2x2,
  642. EOpConstructIMat2x3,
  643. EOpConstructIMat2x4,
  644. EOpConstructIMat3x2,
  645. EOpConstructIMat3x3,
  646. EOpConstructIMat3x4,
  647. EOpConstructIMat4x2,
  648. EOpConstructIMat4x3,
  649. EOpConstructIMat4x4,
  650. EOpConstructUMat2x2,
  651. EOpConstructUMat2x3,
  652. EOpConstructUMat2x4,
  653. EOpConstructUMat3x2,
  654. EOpConstructUMat3x3,
  655. EOpConstructUMat3x4,
  656. EOpConstructUMat4x2,
  657. EOpConstructUMat4x3,
  658. EOpConstructUMat4x4,
  659. EOpConstructBMat2x2,
  660. EOpConstructBMat2x3,
  661. EOpConstructBMat2x4,
  662. EOpConstructBMat3x2,
  663. EOpConstructBMat3x3,
  664. EOpConstructBMat3x4,
  665. EOpConstructBMat4x2,
  666. EOpConstructBMat4x3,
  667. EOpConstructBMat4x4,
  668. EOpConstructFloat16,
  669. EOpConstructF16Vec2,
  670. EOpConstructF16Vec3,
  671. EOpConstructF16Vec4,
  672. EOpConstructF16Mat2x2,
  673. EOpConstructF16Mat2x3,
  674. EOpConstructF16Mat2x4,
  675. EOpConstructF16Mat3x2,
  676. EOpConstructF16Mat3x3,
  677. EOpConstructF16Mat3x4,
  678. EOpConstructF16Mat4x2,
  679. EOpConstructF16Mat4x3,
  680. EOpConstructF16Mat4x4,
  681. EOpConstructStruct,
  682. EOpConstructTextureSampler,
  683. EOpConstructNonuniform, // expected to be transformed away, not present in final AST
  684. EOpConstructReference,
  685. EOpConstructCooperativeMatrix,
  686. EOpConstructAccStruct,
  687. EOpConstructGuardEnd,
  688. //
  689. // moves
  690. //
  691. EOpAssign,
  692. EOpAddAssign,
  693. EOpSubAssign,
  694. EOpMulAssign,
  695. EOpVectorTimesMatrixAssign,
  696. EOpVectorTimesScalarAssign,
  697. EOpMatrixTimesScalarAssign,
  698. EOpMatrixTimesMatrixAssign,
  699. EOpDivAssign,
  700. EOpModAssign,
  701. EOpAndAssign,
  702. EOpInclusiveOrAssign,
  703. EOpExclusiveOrAssign,
  704. EOpLeftShiftAssign,
  705. EOpRightShiftAssign,
  706. //
  707. // Array operators
  708. //
  709. // Can apply to arrays, vectors, or matrices.
  710. // Can be decomposed to a constant at compile time, but this does not always happen,
  711. // due to link-time effects. So, consumer can expect either a link-time sized or
  712. // run-time sized array.
  713. EOpArrayLength,
  714. //
  715. // Image operations
  716. //
  717. EOpImageGuardBegin,
  718. EOpImageQuerySize,
  719. EOpImageQuerySamples,
  720. EOpImageLoad,
  721. EOpImageStore,
  722. EOpImageLoadLod,
  723. EOpImageStoreLod,
  724. EOpImageAtomicAdd,
  725. EOpImageAtomicMin,
  726. EOpImageAtomicMax,
  727. EOpImageAtomicAnd,
  728. EOpImageAtomicOr,
  729. EOpImageAtomicXor,
  730. EOpImageAtomicExchange,
  731. EOpImageAtomicCompSwap,
  732. EOpImageAtomicLoad,
  733. EOpImageAtomicStore,
  734. EOpSubpassLoad,
  735. EOpSubpassLoadMS,
  736. EOpSparseImageLoad,
  737. EOpSparseImageLoadLod,
  738. EOpImageGuardEnd,
  739. //
  740. // Texture operations
  741. //
  742. EOpTextureGuardBegin,
  743. EOpTextureQuerySize,
  744. EOpTextureQueryLod,
  745. EOpTextureQueryLevels,
  746. EOpTextureQuerySamples,
  747. EOpSamplingGuardBegin,
  748. EOpTexture,
  749. EOpTextureProj,
  750. EOpTextureLod,
  751. EOpTextureOffset,
  752. EOpTextureFetch,
  753. EOpTextureFetchOffset,
  754. EOpTextureProjOffset,
  755. EOpTextureLodOffset,
  756. EOpTextureProjLod,
  757. EOpTextureProjLodOffset,
  758. EOpTextureGrad,
  759. EOpTextureGradOffset,
  760. EOpTextureProjGrad,
  761. EOpTextureProjGradOffset,
  762. EOpTextureGather,
  763. EOpTextureGatherOffset,
  764. EOpTextureGatherOffsets,
  765. EOpTextureClamp,
  766. EOpTextureOffsetClamp,
  767. EOpTextureGradClamp,
  768. EOpTextureGradOffsetClamp,
  769. EOpTextureGatherLod,
  770. EOpTextureGatherLodOffset,
  771. EOpTextureGatherLodOffsets,
  772. EOpFragmentMaskFetch,
  773. EOpFragmentFetch,
  774. EOpSparseTextureGuardBegin,
  775. EOpSparseTexture,
  776. EOpSparseTextureLod,
  777. EOpSparseTextureOffset,
  778. EOpSparseTextureFetch,
  779. EOpSparseTextureFetchOffset,
  780. EOpSparseTextureLodOffset,
  781. EOpSparseTextureGrad,
  782. EOpSparseTextureGradOffset,
  783. EOpSparseTextureGather,
  784. EOpSparseTextureGatherOffset,
  785. EOpSparseTextureGatherOffsets,
  786. EOpSparseTexelsResident,
  787. EOpSparseTextureClamp,
  788. EOpSparseTextureOffsetClamp,
  789. EOpSparseTextureGradClamp,
  790. EOpSparseTextureGradOffsetClamp,
  791. EOpSparseTextureGatherLod,
  792. EOpSparseTextureGatherLodOffset,
  793. EOpSparseTextureGatherLodOffsets,
  794. EOpSparseTextureGuardEnd,
  795. EOpImageFootprintGuardBegin,
  796. EOpImageSampleFootprintNV,
  797. EOpImageSampleFootprintClampNV,
  798. EOpImageSampleFootprintLodNV,
  799. EOpImageSampleFootprintGradNV,
  800. EOpImageSampleFootprintGradClampNV,
  801. EOpImageFootprintGuardEnd,
  802. EOpSamplingGuardEnd,
  803. EOpTextureGuardEnd,
  804. //
  805. // Integer operations
  806. //
  807. EOpAddCarry,
  808. EOpSubBorrow,
  809. EOpUMulExtended,
  810. EOpIMulExtended,
  811. EOpBitfieldExtract,
  812. EOpBitfieldInsert,
  813. EOpBitFieldReverse,
  814. EOpBitCount,
  815. EOpFindLSB,
  816. EOpFindMSB,
  817. EOpCountLeadingZeros,
  818. EOpCountTrailingZeros,
  819. EOpAbsDifference,
  820. EOpAddSaturate,
  821. EOpSubSaturate,
  822. EOpAverage,
  823. EOpAverageRounded,
  824. EOpMul32x16,
  825. EOpTraceNV,
  826. EOpTraceRayMotionNV,
  827. EOpTraceKHR,
  828. EOpReportIntersection,
  829. EOpIgnoreIntersectionNV,
  830. EOpTerminateRayNV,
  831. EOpExecuteCallableNV,
  832. EOpExecuteCallableKHR,
  833. EOpWritePackedPrimitiveIndices4x8NV,
  834. EOpEmitMeshTasksEXT,
  835. EOpSetMeshOutputsEXT,
  836. //
  837. // GL_EXT_ray_query operations
  838. //
  839. EOpRayQueryInitialize,
  840. EOpRayQueryTerminate,
  841. EOpRayQueryGenerateIntersection,
  842. EOpRayQueryConfirmIntersection,
  843. EOpRayQueryProceed,
  844. EOpRayQueryGetIntersectionType,
  845. EOpRayQueryGetRayTMin,
  846. EOpRayQueryGetRayFlags,
  847. EOpRayQueryGetIntersectionT,
  848. EOpRayQueryGetIntersectionInstanceCustomIndex,
  849. EOpRayQueryGetIntersectionInstanceId,
  850. EOpRayQueryGetIntersectionInstanceShaderBindingTableRecordOffset,
  851. EOpRayQueryGetIntersectionGeometryIndex,
  852. EOpRayQueryGetIntersectionPrimitiveIndex,
  853. EOpRayQueryGetIntersectionBarycentrics,
  854. EOpRayQueryGetIntersectionFrontFace,
  855. EOpRayQueryGetIntersectionCandidateAABBOpaque,
  856. EOpRayQueryGetIntersectionObjectRayDirection,
  857. EOpRayQueryGetIntersectionObjectRayOrigin,
  858. EOpRayQueryGetWorldRayDirection,
  859. EOpRayQueryGetWorldRayOrigin,
  860. EOpRayQueryGetIntersectionObjectToWorld,
  861. EOpRayQueryGetIntersectionWorldToObject,
  862. //
  863. // HLSL operations
  864. //
  865. EOpClip, // discard if input value < 0
  866. EOpIsFinite,
  867. EOpLog10, // base 10 log
  868. EOpRcp, // 1/x
  869. EOpSaturate, // clamp from 0 to 1
  870. EOpSinCos, // sin and cos in out parameters
  871. EOpGenMul, // mul(x,y) on any of mat/vec/scalars
  872. EOpDst, // x = 1, y=src0.y * src1.y, z=src0.z, w=src1.w
  873. EOpInterlockedAdd, // atomic ops, but uses [optional] out arg instead of return
  874. EOpInterlockedAnd, // ...
  875. EOpInterlockedCompareExchange, // ...
  876. EOpInterlockedCompareStore, // ...
  877. EOpInterlockedExchange, // ...
  878. EOpInterlockedMax, // ...
  879. EOpInterlockedMin, // ...
  880. EOpInterlockedOr, // ...
  881. EOpInterlockedXor, // ...
  882. EOpAllMemoryBarrierWithGroupSync, // memory barriers without non-hlsl AST equivalents
  883. EOpDeviceMemoryBarrier, // ...
  884. EOpDeviceMemoryBarrierWithGroupSync, // ...
  885. EOpWorkgroupMemoryBarrier, // ...
  886. EOpWorkgroupMemoryBarrierWithGroupSync, // ...
  887. EOpEvaluateAttributeSnapped, // InterpolateAtOffset with int position on 16x16 grid
  888. EOpF32tof16, // HLSL conversion: half of a PackHalf2x16
  889. EOpF16tof32, // HLSL conversion: half of an UnpackHalf2x16
  890. EOpLit, // HLSL lighting coefficient vector
  891. EOpTextureBias, // HLSL texture bias: will be lowered to EOpTexture
  892. EOpAsDouble, // slightly different from EOpUint64BitsToDouble
  893. EOpD3DCOLORtoUBYTE4, // convert and swizzle 4-component color to UBYTE4 range
  894. EOpMethodSample, // Texture object methods. These are translated to existing
  895. EOpMethodSampleBias, // AST methods, and exist to represent HLSL semantics until that
  896. EOpMethodSampleCmp, // translation is performed. See HlslParseContext::decomposeSampleMethods().
  897. EOpMethodSampleCmpLevelZero, // ...
  898. EOpMethodSampleGrad, // ...
  899. EOpMethodSampleLevel, // ...
  900. EOpMethodLoad, // ...
  901. EOpMethodGetDimensions, // ...
  902. EOpMethodGetSamplePosition, // ...
  903. EOpMethodGather, // ...
  904. EOpMethodCalculateLevelOfDetail, // ...
  905. EOpMethodCalculateLevelOfDetailUnclamped, // ...
  906. // Load already defined above for textures
  907. EOpMethodLoad2, // Structure buffer object methods. These are translated to existing
  908. EOpMethodLoad3, // AST methods, and exist to represent HLSL semantics until that
  909. EOpMethodLoad4, // translation is performed. See HlslParseContext::decomposeSampleMethods().
  910. EOpMethodStore, // ...
  911. EOpMethodStore2, // ...
  912. EOpMethodStore3, // ...
  913. EOpMethodStore4, // ...
  914. EOpMethodIncrementCounter, // ...
  915. EOpMethodDecrementCounter, // ...
  916. // EOpMethodAppend is defined for geo shaders below
  917. EOpMethodConsume,
  918. // SM5 texture methods
  919. EOpMethodGatherRed, // These are covered under the above EOpMethodSample comment about
  920. EOpMethodGatherGreen, // translation to existing AST opcodes. They exist temporarily
  921. EOpMethodGatherBlue, // because HLSL arguments are slightly different.
  922. EOpMethodGatherAlpha, // ...
  923. EOpMethodGatherCmp, // ...
  924. EOpMethodGatherCmpRed, // ...
  925. EOpMethodGatherCmpGreen, // ...
  926. EOpMethodGatherCmpBlue, // ...
  927. EOpMethodGatherCmpAlpha, // ...
  928. // geometry methods
  929. EOpMethodAppend, // Geometry shader methods
  930. EOpMethodRestartStrip, // ...
  931. // matrix
  932. EOpMatrixSwizzle, // select multiple matrix components (non-column)
  933. // SM6 wave ops
  934. EOpWaveGetLaneCount, // Will decompose to gl_SubgroupSize.
  935. EOpWaveGetLaneIndex, // Will decompose to gl_SubgroupInvocationID.
  936. EOpWaveActiveCountBits, // Will decompose to subgroupBallotBitCount(subgroupBallot()).
  937. EOpWavePrefixCountBits, // Will decompose to subgroupBallotInclusiveBitCount(subgroupBallot()).
  938. // Shader Clock Ops
  939. EOpReadClockSubgroupKHR,
  940. EOpReadClockDeviceKHR,
  941. };
  942. class TIntermTraverser;
  943. class TIntermOperator;
  944. class TIntermAggregate;
  945. class TIntermUnary;
  946. class TIntermBinary;
  947. class TIntermConstantUnion;
  948. class TIntermSelection;
  949. class TIntermSwitch;
  950. class TIntermBranch;
  951. class TIntermTyped;
  952. class TIntermMethod;
  953. class TIntermSymbol;
  954. class TIntermLoop;
  955. } // end namespace glslang
  956. //
  957. // Base class for the tree nodes
  958. //
  959. // (Put outside the glslang namespace, as it's used as part of the external interface.)
  960. //
  961. class TIntermNode {
  962. public:
  963. POOL_ALLOCATOR_NEW_DELETE(glslang::GetThreadPoolAllocator())
  964. TIntermNode() { loc.init(); }
  965. virtual const glslang::TSourceLoc& getLoc() const { return loc; }
  966. virtual void setLoc(const glslang::TSourceLoc& l) { loc = l; }
  967. virtual void traverse(glslang::TIntermTraverser*) = 0;
  968. virtual glslang::TIntermTyped* getAsTyped() { return nullptr; }
  969. virtual glslang::TIntermOperator* getAsOperator() { return nullptr; }
  970. virtual glslang::TIntermConstantUnion* getAsConstantUnion() { return nullptr; }
  971. virtual glslang::TIntermAggregate* getAsAggregate() { return nullptr; }
  972. virtual glslang::TIntermUnary* getAsUnaryNode() { return nullptr; }
  973. virtual glslang::TIntermBinary* getAsBinaryNode() { return nullptr; }
  974. virtual glslang::TIntermSelection* getAsSelectionNode() { return nullptr; }
  975. virtual glslang::TIntermSwitch* getAsSwitchNode() { return nullptr; }
  976. virtual glslang::TIntermMethod* getAsMethodNode() { return nullptr; }
  977. virtual glslang::TIntermSymbol* getAsSymbolNode() { return nullptr; }
  978. virtual glslang::TIntermBranch* getAsBranchNode() { return nullptr; }
  979. virtual glslang::TIntermLoop* getAsLoopNode() { return nullptr; }
  980. virtual const glslang::TIntermTyped* getAsTyped() const { return nullptr; }
  981. virtual const glslang::TIntermOperator* getAsOperator() const { return nullptr; }
  982. virtual const glslang::TIntermConstantUnion* getAsConstantUnion() const { return nullptr; }
  983. virtual const glslang::TIntermAggregate* getAsAggregate() const { return nullptr; }
  984. virtual const glslang::TIntermUnary* getAsUnaryNode() const { return nullptr; }
  985. virtual const glslang::TIntermBinary* getAsBinaryNode() const { return nullptr; }
  986. virtual const glslang::TIntermSelection* getAsSelectionNode() const { return nullptr; }
  987. virtual const glslang::TIntermSwitch* getAsSwitchNode() const { return nullptr; }
  988. virtual const glslang::TIntermMethod* getAsMethodNode() const { return nullptr; }
  989. virtual const glslang::TIntermSymbol* getAsSymbolNode() const { return nullptr; }
  990. virtual const glslang::TIntermBranch* getAsBranchNode() const { return nullptr; }
  991. virtual const glslang::TIntermLoop* getAsLoopNode() const { return nullptr; }
  992. virtual ~TIntermNode() { }
  993. protected:
  994. TIntermNode(const TIntermNode&);
  995. TIntermNode& operator=(const TIntermNode&);
  996. glslang::TSourceLoc loc;
  997. };
  998. namespace glslang {
  999. //
  1000. // This is just to help yacc.
  1001. //
  1002. struct TIntermNodePair {
  1003. TIntermNode* node1;
  1004. TIntermNode* node2;
  1005. };
  1006. //
  1007. // Intermediate class for nodes that have a type.
  1008. //
  1009. class TIntermTyped : public TIntermNode {
  1010. public:
  1011. TIntermTyped(const TType& t) { type.shallowCopy(t); }
  1012. TIntermTyped(TBasicType basicType) { TType bt(basicType); type.shallowCopy(bt); }
  1013. virtual TIntermTyped* getAsTyped() { return this; }
  1014. virtual const TIntermTyped* getAsTyped() const { return this; }
  1015. virtual void setType(const TType& t) { type.shallowCopy(t); }
  1016. virtual const TType& getType() const { return type; }
  1017. virtual TType& getWritableType() { return type; }
  1018. virtual TBasicType getBasicType() const { return type.getBasicType(); }
  1019. virtual TQualifier& getQualifier() { return type.getQualifier(); }
  1020. virtual const TQualifier& getQualifier() const { return type.getQualifier(); }
  1021. virtual TArraySizes* getArraySizes() { return type.getArraySizes(); }
  1022. virtual const TArraySizes* getArraySizes() const { return type.getArraySizes(); }
  1023. virtual void propagatePrecision(TPrecisionQualifier);
  1024. virtual int getVectorSize() const { return type.getVectorSize(); }
  1025. virtual int getMatrixCols() const { return type.getMatrixCols(); }
  1026. virtual int getMatrixRows() const { return type.getMatrixRows(); }
  1027. virtual bool isMatrix() const { return type.isMatrix(); }
  1028. virtual bool isArray() const { return type.isArray(); }
  1029. virtual bool isVector() const { return type.isVector(); }
  1030. virtual bool isScalar() const { return type.isScalar(); }
  1031. virtual bool isStruct() const { return type.isStruct(); }
  1032. virtual bool isFloatingDomain() const { return type.isFloatingDomain(); }
  1033. virtual bool isIntegerDomain() const { return type.isIntegerDomain(); }
  1034. bool isAtomic() const { return type.isAtomic(); }
  1035. bool isReference() const { return type.isReference(); }
  1036. TString getCompleteString(bool enhanced = false) const { return type.getCompleteString(enhanced); }
  1037. protected:
  1038. TIntermTyped& operator=(const TIntermTyped&);
  1039. TType type;
  1040. };
  1041. //
  1042. // Handle for, do-while, and while loops.
  1043. //
  1044. class TIntermLoop : public TIntermNode {
  1045. public:
  1046. TIntermLoop(TIntermNode* aBody, TIntermTyped* aTest, TIntermTyped* aTerminal, bool testFirst) :
  1047. body(aBody),
  1048. test(aTest),
  1049. terminal(aTerminal),
  1050. first(testFirst),
  1051. unroll(false),
  1052. dontUnroll(false),
  1053. dependency(0),
  1054. minIterations(0),
  1055. maxIterations(iterationsInfinite),
  1056. iterationMultiple(1),
  1057. peelCount(0),
  1058. partialCount(0)
  1059. { }
  1060. virtual TIntermLoop* getAsLoopNode() { return this; }
  1061. virtual const TIntermLoop* getAsLoopNode() const { return this; }
  1062. virtual void traverse(TIntermTraverser*);
  1063. TIntermNode* getBody() const { return body; }
  1064. TIntermTyped* getTest() const { return test; }
  1065. TIntermTyped* getTerminal() const { return terminal; }
  1066. bool testFirst() const { return first; }
  1067. void setUnroll() { unroll = true; }
  1068. void setDontUnroll() {
  1069. dontUnroll = true;
  1070. peelCount = 0;
  1071. partialCount = 0;
  1072. }
  1073. bool getUnroll() const { return unroll; }
  1074. bool getDontUnroll() const { return dontUnroll; }
  1075. static const unsigned int dependencyInfinite = 0xFFFFFFFF;
  1076. static const unsigned int iterationsInfinite = 0xFFFFFFFF;
  1077. void setLoopDependency(int d) { dependency = d; }
  1078. int getLoopDependency() const { return dependency; }
  1079. void setMinIterations(unsigned int v) { minIterations = v; }
  1080. unsigned int getMinIterations() const { return minIterations; }
  1081. void setMaxIterations(unsigned int v) { maxIterations = v; }
  1082. unsigned int getMaxIterations() const { return maxIterations; }
  1083. void setIterationMultiple(unsigned int v) { iterationMultiple = v; }
  1084. unsigned int getIterationMultiple() const { return iterationMultiple; }
  1085. void setPeelCount(unsigned int v) {
  1086. peelCount = v;
  1087. dontUnroll = false;
  1088. }
  1089. unsigned int getPeelCount() const { return peelCount; }
  1090. void setPartialCount(unsigned int v) {
  1091. partialCount = v;
  1092. dontUnroll = false;
  1093. }
  1094. unsigned int getPartialCount() const { return partialCount; }
  1095. protected:
  1096. TIntermNode* body; // code to loop over
  1097. TIntermTyped* test; // exit condition associated with loop, could be 0 for 'for' loops
  1098. TIntermTyped* terminal; // exists for for-loops
  1099. bool first; // true for while and for, not for do-while
  1100. bool unroll; // true if unroll requested
  1101. bool dontUnroll; // true if request to not unroll
  1102. unsigned int dependency; // loop dependency hint; 0 means not set or unknown
  1103. unsigned int minIterations; // as per the SPIR-V specification
  1104. unsigned int maxIterations; // as per the SPIR-V specification
  1105. unsigned int iterationMultiple; // as per the SPIR-V specification
  1106. unsigned int peelCount; // as per the SPIR-V specification
  1107. unsigned int partialCount; // as per the SPIR-V specification
  1108. };
  1109. //
  1110. // Handle case, break, continue, return, and kill.
  1111. //
  1112. class TIntermBranch : public TIntermNode {
  1113. public:
  1114. TIntermBranch(TOperator op, TIntermTyped* e) :
  1115. flowOp(op),
  1116. expression(e) { }
  1117. virtual TIntermBranch* getAsBranchNode() { return this; }
  1118. virtual const TIntermBranch* getAsBranchNode() const { return this; }
  1119. virtual void traverse(TIntermTraverser*);
  1120. TOperator getFlowOp() const { return flowOp; }
  1121. TIntermTyped* getExpression() const { return expression; }
  1122. void setExpression(TIntermTyped* pExpression) { expression = pExpression; }
  1123. void updatePrecision(TPrecisionQualifier parentPrecision);
  1124. protected:
  1125. TOperator flowOp;
  1126. TIntermTyped* expression;
  1127. };
  1128. //
  1129. // Represent method names before seeing their calling signature
  1130. // or resolving them to operations. Just an expression as the base object
  1131. // and a textural name.
  1132. //
  1133. class TIntermMethod : public TIntermTyped {
  1134. public:
  1135. TIntermMethod(TIntermTyped* o, const TType& t, const TString& m) : TIntermTyped(t), object(o), method(m) { }
  1136. virtual TIntermMethod* getAsMethodNode() { return this; }
  1137. virtual const TIntermMethod* getAsMethodNode() const { return this; }
  1138. virtual const TString& getMethodName() const { return method; }
  1139. virtual TIntermTyped* getObject() const { return object; }
  1140. virtual void traverse(TIntermTraverser*);
  1141. protected:
  1142. TIntermTyped* object;
  1143. TString method;
  1144. };
  1145. //
  1146. // Nodes that correspond to symbols or constants in the source code.
  1147. //
  1148. class TIntermSymbol : public TIntermTyped {
  1149. public:
  1150. // if symbol is initialized as symbol(sym), the memory comes from the pool allocator of sym. If sym comes from
  1151. // per process threadPoolAllocator, then it causes increased memory usage per compile
  1152. // it is essential to use "symbol = sym" to assign to symbol
  1153. TIntermSymbol(long long i, const TString& n, const TType& t)
  1154. : TIntermTyped(t), id(i),
  1155. #ifndef GLSLANG_WEB
  1156. flattenSubset(-1),
  1157. #endif
  1158. constSubtree(nullptr)
  1159. { name = n; }
  1160. virtual long long getId() const { return id; }
  1161. virtual void changeId(long long i) { id = i; }
  1162. virtual const TString& getName() const { return name; }
  1163. virtual void traverse(TIntermTraverser*);
  1164. virtual TIntermSymbol* getAsSymbolNode() { return this; }
  1165. virtual const TIntermSymbol* getAsSymbolNode() const { return this; }
  1166. void setConstArray(const TConstUnionArray& c) { constArray = c; }
  1167. const TConstUnionArray& getConstArray() const { return constArray; }
  1168. void setConstSubtree(TIntermTyped* subtree) { constSubtree = subtree; }
  1169. TIntermTyped* getConstSubtree() const { return constSubtree; }
  1170. #ifndef GLSLANG_WEB
  1171. void setFlattenSubset(int subset) { flattenSubset = subset; }
  1172. virtual const TString& getAccessName() const;
  1173. int getFlattenSubset() const { return flattenSubset; } // -1 means full object
  1174. #endif
  1175. // This is meant for cases where a node has already been constructed, and
  1176. // later on, it becomes necessary to switch to a different symbol.
  1177. virtual void switchId(long long newId) { id = newId; }
  1178. protected:
  1179. long long id; // the unique id of the symbol this node represents
  1180. #ifndef GLSLANG_WEB
  1181. int flattenSubset; // how deeply the flattened object rooted at id has been dereferenced
  1182. #endif
  1183. TString name; // the name of the symbol this node represents
  1184. TConstUnionArray constArray; // if the symbol is a front-end compile-time constant, this is its value
  1185. TIntermTyped* constSubtree;
  1186. };
  1187. class TIntermConstantUnion : public TIntermTyped {
  1188. public:
  1189. TIntermConstantUnion(const TConstUnionArray& ua, const TType& t) : TIntermTyped(t), constArray(ua), literal(false) { }
  1190. const TConstUnionArray& getConstArray() const { return constArray; }
  1191. virtual TIntermConstantUnion* getAsConstantUnion() { return this; }
  1192. virtual const TIntermConstantUnion* getAsConstantUnion() const { return this; }
  1193. virtual void traverse(TIntermTraverser*);
  1194. virtual TIntermTyped* fold(TOperator, const TIntermTyped*) const;
  1195. virtual TIntermTyped* fold(TOperator, const TType&) const;
  1196. void setLiteral() { literal = true; }
  1197. void setExpression() { literal = false; }
  1198. bool isLiteral() const { return literal; }
  1199. protected:
  1200. TIntermConstantUnion& operator=(const TIntermConstantUnion&);
  1201. const TConstUnionArray constArray;
  1202. bool literal; // true if node represents a literal in the source code
  1203. };
  1204. // Represent the independent aspects of a texturing TOperator
  1205. struct TCrackedTextureOp {
  1206. bool query;
  1207. bool proj;
  1208. bool lod;
  1209. bool fetch;
  1210. bool offset;
  1211. bool offsets;
  1212. bool gather;
  1213. bool grad;
  1214. bool subpass;
  1215. bool lodClamp;
  1216. bool fragMask;
  1217. };
  1218. //
  1219. // Intermediate class for node types that hold operators.
  1220. //
  1221. class TIntermOperator : public TIntermTyped {
  1222. public:
  1223. virtual TIntermOperator* getAsOperator() { return this; }
  1224. virtual const TIntermOperator* getAsOperator() const { return this; }
  1225. TOperator getOp() const { return op; }
  1226. void setOp(TOperator newOp) { op = newOp; }
  1227. bool modifiesState() const;
  1228. bool isConstructor() const;
  1229. bool isTexture() const { return op > EOpTextureGuardBegin && op < EOpTextureGuardEnd; }
  1230. bool isSampling() const { return op > EOpSamplingGuardBegin && op < EOpSamplingGuardEnd; }
  1231. #ifdef GLSLANG_WEB
  1232. bool isImage() const { return false; }
  1233. bool isSparseTexture() const { return false; }
  1234. bool isImageFootprint() const { return false; }
  1235. bool isSparseImage() const { return false; }
  1236. bool isSubgroup() const { return false; }
  1237. #else
  1238. bool isImage() const { return op > EOpImageGuardBegin && op < EOpImageGuardEnd; }
  1239. bool isSparseTexture() const { return op > EOpSparseTextureGuardBegin && op < EOpSparseTextureGuardEnd; }
  1240. bool isImageFootprint() const { return op > EOpImageFootprintGuardBegin && op < EOpImageFootprintGuardEnd; }
  1241. bool isSparseImage() const { return op == EOpSparseImageLoad; }
  1242. bool isSubgroup() const { return op > EOpSubgroupGuardStart && op < EOpSubgroupGuardStop; }
  1243. #endif
  1244. void setOperationPrecision(TPrecisionQualifier p) { operationPrecision = p; }
  1245. TPrecisionQualifier getOperationPrecision() const { return operationPrecision != EpqNone ?
  1246. operationPrecision :
  1247. type.getQualifier().precision; }
  1248. TString getCompleteString() const
  1249. {
  1250. TString cs = type.getCompleteString();
  1251. if (getOperationPrecision() != type.getQualifier().precision) {
  1252. cs += ", operation at ";
  1253. cs += GetPrecisionQualifierString(getOperationPrecision());
  1254. }
  1255. return cs;
  1256. }
  1257. // Crack the op into the individual dimensions of texturing operation.
  1258. void crackTexture(TSampler sampler, TCrackedTextureOp& cracked) const
  1259. {
  1260. cracked.query = false;
  1261. cracked.proj = false;
  1262. cracked.lod = false;
  1263. cracked.fetch = false;
  1264. cracked.offset = false;
  1265. cracked.offsets = false;
  1266. cracked.gather = false;
  1267. cracked.grad = false;
  1268. cracked.subpass = false;
  1269. cracked.lodClamp = false;
  1270. cracked.fragMask = false;
  1271. switch (op) {
  1272. case EOpImageQuerySize:
  1273. case EOpImageQuerySamples:
  1274. case EOpTextureQuerySize:
  1275. case EOpTextureQueryLod:
  1276. case EOpTextureQueryLevels:
  1277. case EOpTextureQuerySamples:
  1278. case EOpSparseTexelsResident:
  1279. cracked.query = true;
  1280. break;
  1281. case EOpTexture:
  1282. case EOpSparseTexture:
  1283. break;
  1284. case EOpTextureProj:
  1285. cracked.proj = true;
  1286. break;
  1287. case EOpTextureLod:
  1288. case EOpSparseTextureLod:
  1289. cracked.lod = true;
  1290. break;
  1291. case EOpTextureOffset:
  1292. case EOpSparseTextureOffset:
  1293. cracked.offset = true;
  1294. break;
  1295. case EOpTextureFetch:
  1296. case EOpSparseTextureFetch:
  1297. cracked.fetch = true;
  1298. if (sampler.is1D() || (sampler.dim == Esd2D && ! sampler.isMultiSample()) || sampler.dim == Esd3D)
  1299. cracked.lod = true;
  1300. break;
  1301. case EOpTextureFetchOffset:
  1302. case EOpSparseTextureFetchOffset:
  1303. cracked.fetch = true;
  1304. cracked.offset = true;
  1305. if (sampler.is1D() || (sampler.dim == Esd2D && ! sampler.isMultiSample()) || sampler.dim == Esd3D)
  1306. cracked.lod = true;
  1307. break;
  1308. case EOpTextureProjOffset:
  1309. cracked.offset = true;
  1310. cracked.proj = true;
  1311. break;
  1312. case EOpTextureLodOffset:
  1313. case EOpSparseTextureLodOffset:
  1314. cracked.offset = true;
  1315. cracked.lod = true;
  1316. break;
  1317. case EOpTextureProjLod:
  1318. cracked.lod = true;
  1319. cracked.proj = true;
  1320. break;
  1321. case EOpTextureProjLodOffset:
  1322. cracked.offset = true;
  1323. cracked.lod = true;
  1324. cracked.proj = true;
  1325. break;
  1326. case EOpTextureGrad:
  1327. case EOpSparseTextureGrad:
  1328. cracked.grad = true;
  1329. break;
  1330. case EOpTextureGradOffset:
  1331. case EOpSparseTextureGradOffset:
  1332. cracked.grad = true;
  1333. cracked.offset = true;
  1334. break;
  1335. case EOpTextureProjGrad:
  1336. cracked.grad = true;
  1337. cracked.proj = true;
  1338. break;
  1339. case EOpTextureProjGradOffset:
  1340. cracked.grad = true;
  1341. cracked.offset = true;
  1342. cracked.proj = true;
  1343. break;
  1344. #ifndef GLSLANG_WEB
  1345. case EOpTextureClamp:
  1346. case EOpSparseTextureClamp:
  1347. cracked.lodClamp = true;
  1348. break;
  1349. case EOpTextureOffsetClamp:
  1350. case EOpSparseTextureOffsetClamp:
  1351. cracked.offset = true;
  1352. cracked.lodClamp = true;
  1353. break;
  1354. case EOpTextureGradClamp:
  1355. case EOpSparseTextureGradClamp:
  1356. cracked.grad = true;
  1357. cracked.lodClamp = true;
  1358. break;
  1359. case EOpTextureGradOffsetClamp:
  1360. case EOpSparseTextureGradOffsetClamp:
  1361. cracked.grad = true;
  1362. cracked.offset = true;
  1363. cracked.lodClamp = true;
  1364. break;
  1365. case EOpTextureGather:
  1366. case EOpSparseTextureGather:
  1367. cracked.gather = true;
  1368. break;
  1369. case EOpTextureGatherOffset:
  1370. case EOpSparseTextureGatherOffset:
  1371. cracked.gather = true;
  1372. cracked.offset = true;
  1373. break;
  1374. case EOpTextureGatherOffsets:
  1375. case EOpSparseTextureGatherOffsets:
  1376. cracked.gather = true;
  1377. cracked.offsets = true;
  1378. break;
  1379. case EOpTextureGatherLod:
  1380. case EOpSparseTextureGatherLod:
  1381. cracked.gather = true;
  1382. cracked.lod = true;
  1383. break;
  1384. case EOpTextureGatherLodOffset:
  1385. case EOpSparseTextureGatherLodOffset:
  1386. cracked.gather = true;
  1387. cracked.offset = true;
  1388. cracked.lod = true;
  1389. break;
  1390. case EOpTextureGatherLodOffsets:
  1391. case EOpSparseTextureGatherLodOffsets:
  1392. cracked.gather = true;
  1393. cracked.offsets = true;
  1394. cracked.lod = true;
  1395. break;
  1396. case EOpImageLoadLod:
  1397. case EOpImageStoreLod:
  1398. case EOpSparseImageLoadLod:
  1399. cracked.lod = true;
  1400. break;
  1401. case EOpFragmentMaskFetch:
  1402. cracked.subpass = sampler.dim == EsdSubpass;
  1403. cracked.fragMask = true;
  1404. break;
  1405. case EOpFragmentFetch:
  1406. cracked.subpass = sampler.dim == EsdSubpass;
  1407. cracked.fragMask = true;
  1408. break;
  1409. case EOpImageSampleFootprintNV:
  1410. break;
  1411. case EOpImageSampleFootprintClampNV:
  1412. cracked.lodClamp = true;
  1413. break;
  1414. case EOpImageSampleFootprintLodNV:
  1415. cracked.lod = true;
  1416. break;
  1417. case EOpImageSampleFootprintGradNV:
  1418. cracked.grad = true;
  1419. break;
  1420. case EOpImageSampleFootprintGradClampNV:
  1421. cracked.lodClamp = true;
  1422. cracked.grad = true;
  1423. break;
  1424. case EOpSubpassLoad:
  1425. case EOpSubpassLoadMS:
  1426. cracked.subpass = true;
  1427. break;
  1428. #endif
  1429. default:
  1430. break;
  1431. }
  1432. }
  1433. protected:
  1434. TIntermOperator(TOperator o) : TIntermTyped(EbtFloat), op(o), operationPrecision(EpqNone) {}
  1435. TIntermOperator(TOperator o, TType& t) : TIntermTyped(t), op(o), operationPrecision(EpqNone) {}
  1436. TOperator op;
  1437. // The result precision is in the inherited TType, and is usually meant to be both
  1438. // the operation precision and the result precision. However, some more complex things,
  1439. // like built-in function calls, distinguish between the two, in which case non-EqpNone
  1440. // 'operationPrecision' overrides the result precision as far as operation precision
  1441. // is concerned.
  1442. TPrecisionQualifier operationPrecision;
  1443. };
  1444. //
  1445. // Nodes for all the basic binary math operators.
  1446. //
  1447. class TIntermBinary : public TIntermOperator {
  1448. public:
  1449. TIntermBinary(TOperator o) : TIntermOperator(o) {}
  1450. virtual void traverse(TIntermTraverser*);
  1451. virtual void setLeft(TIntermTyped* n) { left = n; }
  1452. virtual void setRight(TIntermTyped* n) { right = n; }
  1453. virtual TIntermTyped* getLeft() const { return left; }
  1454. virtual TIntermTyped* getRight() const { return right; }
  1455. virtual TIntermBinary* getAsBinaryNode() { return this; }
  1456. virtual const TIntermBinary* getAsBinaryNode() const { return this; }
  1457. virtual void updatePrecision();
  1458. protected:
  1459. TIntermTyped* left;
  1460. TIntermTyped* right;
  1461. };
  1462. //
  1463. // Nodes for unary math operators.
  1464. //
  1465. class TIntermUnary : public TIntermOperator {
  1466. public:
  1467. TIntermUnary(TOperator o, TType& t) : TIntermOperator(o, t), operand(nullptr) {}
  1468. TIntermUnary(TOperator o) : TIntermOperator(o), operand(nullptr) {}
  1469. virtual void traverse(TIntermTraverser*);
  1470. virtual void setOperand(TIntermTyped* o) { operand = o; }
  1471. virtual TIntermTyped* getOperand() { return operand; }
  1472. virtual const TIntermTyped* getOperand() const { return operand; }
  1473. virtual TIntermUnary* getAsUnaryNode() { return this; }
  1474. virtual const TIntermUnary* getAsUnaryNode() const { return this; }
  1475. virtual void updatePrecision();
  1476. #ifndef GLSLANG_WEB
  1477. void setSpirvInstruction(const TSpirvInstruction& inst) { spirvInst = inst; }
  1478. const TSpirvInstruction& getSpirvInstruction() const { return spirvInst; }
  1479. #endif
  1480. protected:
  1481. TIntermTyped* operand;
  1482. #ifndef GLSLANG_WEB
  1483. TSpirvInstruction spirvInst;
  1484. #endif
  1485. };
  1486. typedef TVector<TIntermNode*> TIntermSequence;
  1487. typedef TVector<TStorageQualifier> TQualifierList;
  1488. //
  1489. // Nodes that operate on an arbitrary sized set of children.
  1490. //
  1491. class TIntermAggregate : public TIntermOperator {
  1492. public:
  1493. TIntermAggregate() : TIntermOperator(EOpNull), userDefined(false), pragmaTable(nullptr) { }
  1494. TIntermAggregate(TOperator o) : TIntermOperator(o), pragmaTable(nullptr) { }
  1495. ~TIntermAggregate() { delete pragmaTable; }
  1496. virtual TIntermAggregate* getAsAggregate() { return this; }
  1497. virtual const TIntermAggregate* getAsAggregate() const { return this; }
  1498. virtual void updatePrecision();
  1499. virtual void setOperator(TOperator o) { op = o; }
  1500. virtual TIntermSequence& getSequence() { return sequence; }
  1501. virtual const TIntermSequence& getSequence() const { return sequence; }
  1502. virtual void setName(const TString& n) { name = n; }
  1503. virtual const TString& getName() const { return name; }
  1504. virtual void traverse(TIntermTraverser*);
  1505. virtual void setUserDefined() { userDefined = true; }
  1506. virtual bool isUserDefined() { return userDefined; }
  1507. virtual TQualifierList& getQualifierList() { return qualifier; }
  1508. virtual const TQualifierList& getQualifierList() const { return qualifier; }
  1509. void setOptimize(bool o) { optimize = o; }
  1510. void setDebug(bool d) { debug = d; }
  1511. bool getOptimize() const { return optimize; }
  1512. bool getDebug() const { return debug; }
  1513. void setPragmaTable(const TPragmaTable& pTable);
  1514. const TPragmaTable& getPragmaTable() const { return *pragmaTable; }
  1515. #ifndef GLSLANG_WEB
  1516. void setSpirvInstruction(const TSpirvInstruction& inst) { spirvInst = inst; }
  1517. const TSpirvInstruction& getSpirvInstruction() const { return spirvInst; }
  1518. #endif
  1519. protected:
  1520. TIntermAggregate(const TIntermAggregate&); // disallow copy constructor
  1521. TIntermAggregate& operator=(const TIntermAggregate&); // disallow assignment operator
  1522. TIntermSequence sequence;
  1523. TQualifierList qualifier;
  1524. TString name;
  1525. bool userDefined; // used for user defined function names
  1526. bool optimize;
  1527. bool debug;
  1528. TPragmaTable* pragmaTable;
  1529. #ifndef GLSLANG_WEB
  1530. TSpirvInstruction spirvInst;
  1531. #endif
  1532. };
  1533. //
  1534. // For if tests.
  1535. //
  1536. class TIntermSelection : public TIntermTyped {
  1537. public:
  1538. TIntermSelection(TIntermTyped* cond, TIntermNode* trueB, TIntermNode* falseB) :
  1539. TIntermTyped(EbtVoid), condition(cond), trueBlock(trueB), falseBlock(falseB),
  1540. shortCircuit(true),
  1541. flatten(false), dontFlatten(false) {}
  1542. TIntermSelection(TIntermTyped* cond, TIntermNode* trueB, TIntermNode* falseB, const TType& type) :
  1543. TIntermTyped(type), condition(cond), trueBlock(trueB), falseBlock(falseB),
  1544. shortCircuit(true),
  1545. flatten(false), dontFlatten(false) {}
  1546. virtual void traverse(TIntermTraverser*);
  1547. virtual TIntermTyped* getCondition() const { return condition; }
  1548. virtual void setCondition(TIntermTyped* c) { condition = c; }
  1549. virtual TIntermNode* getTrueBlock() const { return trueBlock; }
  1550. virtual void setTrueBlock(TIntermTyped* tb) { trueBlock = tb; }
  1551. virtual TIntermNode* getFalseBlock() const { return falseBlock; }
  1552. virtual void setFalseBlock(TIntermTyped* fb) { falseBlock = fb; }
  1553. virtual TIntermSelection* getAsSelectionNode() { return this; }
  1554. virtual const TIntermSelection* getAsSelectionNode() const { return this; }
  1555. void setNoShortCircuit() { shortCircuit = false; }
  1556. bool getShortCircuit() const { return shortCircuit; }
  1557. void setFlatten() { flatten = true; }
  1558. void setDontFlatten() { dontFlatten = true; }
  1559. bool getFlatten() const { return flatten; }
  1560. bool getDontFlatten() const { return dontFlatten; }
  1561. protected:
  1562. TIntermTyped* condition;
  1563. TIntermNode* trueBlock;
  1564. TIntermNode* falseBlock;
  1565. bool shortCircuit; // normally all if-then-else and all GLSL ?: short-circuit, but HLSL ?: does not
  1566. bool flatten; // true if flatten requested
  1567. bool dontFlatten; // true if requested to not flatten
  1568. };
  1569. //
  1570. // For switch statements. Designed use is that a switch will have sequence of nodes
  1571. // that are either case/default nodes or a *single* node that represents all the code
  1572. // in between (if any) consecutive case/defaults. So, a traversal need only deal with
  1573. // 0 or 1 nodes per case/default statement.
  1574. //
  1575. class TIntermSwitch : public TIntermNode {
  1576. public:
  1577. TIntermSwitch(TIntermTyped* cond, TIntermAggregate* b) : condition(cond), body(b),
  1578. flatten(false), dontFlatten(false) {}
  1579. virtual void traverse(TIntermTraverser*);
  1580. virtual TIntermNode* getCondition() const { return condition; }
  1581. virtual TIntermAggregate* getBody() const { return body; }
  1582. virtual TIntermSwitch* getAsSwitchNode() { return this; }
  1583. virtual const TIntermSwitch* getAsSwitchNode() const { return this; }
  1584. void setFlatten() { flatten = true; }
  1585. void setDontFlatten() { dontFlatten = true; }
  1586. bool getFlatten() const { return flatten; }
  1587. bool getDontFlatten() const { return dontFlatten; }
  1588. protected:
  1589. TIntermTyped* condition;
  1590. TIntermAggregate* body;
  1591. bool flatten; // true if flatten requested
  1592. bool dontFlatten; // true if requested to not flatten
  1593. };
  1594. enum TVisit
  1595. {
  1596. EvPreVisit,
  1597. EvInVisit,
  1598. EvPostVisit
  1599. };
  1600. //
  1601. // For traversing the tree. User should derive from this,
  1602. // put their traversal specific data in it, and then pass
  1603. // it to a Traverse method.
  1604. //
  1605. // When using this, just fill in the methods for nodes you want visited.
  1606. // Return false from a pre-visit to skip visiting that node's subtree.
  1607. //
  1608. // Explicitly set postVisit to true if you want post visiting, otherwise,
  1609. // filled in methods will only be called at pre-visit time (before processing
  1610. // the subtree). Similarly for inVisit for in-order visiting of nodes with
  1611. // multiple children.
  1612. //
  1613. // If you only want post-visits, explicitly turn off preVisit (and inVisit)
  1614. // and turn on postVisit.
  1615. //
  1616. // In general, for the visit*() methods, return true from interior nodes
  1617. // to have the traversal continue on to children.
  1618. //
  1619. // If you process children yourself, or don't want them processed, return false.
  1620. //
  1621. class TIntermTraverser {
  1622. public:
  1623. POOL_ALLOCATOR_NEW_DELETE(glslang::GetThreadPoolAllocator())
  1624. TIntermTraverser(bool preVisit = true, bool inVisit = false, bool postVisit = false, bool rightToLeft = false) :
  1625. preVisit(preVisit),
  1626. inVisit(inVisit),
  1627. postVisit(postVisit),
  1628. rightToLeft(rightToLeft),
  1629. depth(0),
  1630. maxDepth(0) { }
  1631. virtual ~TIntermTraverser() { }
  1632. virtual void visitSymbol(TIntermSymbol*) { }
  1633. virtual void visitConstantUnion(TIntermConstantUnion*) { }
  1634. virtual bool visitBinary(TVisit, TIntermBinary*) { return true; }
  1635. virtual bool visitUnary(TVisit, TIntermUnary*) { return true; }
  1636. virtual bool visitSelection(TVisit, TIntermSelection*) { return true; }
  1637. virtual bool visitAggregate(TVisit, TIntermAggregate*) { return true; }
  1638. virtual bool visitLoop(TVisit, TIntermLoop*) { return true; }
  1639. virtual bool visitBranch(TVisit, TIntermBranch*) { return true; }
  1640. virtual bool visitSwitch(TVisit, TIntermSwitch*) { return true; }
  1641. int getMaxDepth() const { return maxDepth; }
  1642. void incrementDepth(TIntermNode *current)
  1643. {
  1644. depth++;
  1645. maxDepth = (std::max)(maxDepth, depth);
  1646. path.push_back(current);
  1647. }
  1648. void decrementDepth()
  1649. {
  1650. depth--;
  1651. path.pop_back();
  1652. }
  1653. TIntermNode *getParentNode()
  1654. {
  1655. return path.size() == 0 ? nullptr : path.back();
  1656. }
  1657. const bool preVisit;
  1658. const bool inVisit;
  1659. const bool postVisit;
  1660. const bool rightToLeft;
  1661. protected:
  1662. TIntermTraverser& operator=(TIntermTraverser&);
  1663. int depth;
  1664. int maxDepth;
  1665. // All the nodes from root to the current node's parent during traversing.
  1666. TVector<TIntermNode *> path;
  1667. };
  1668. // KHR_vulkan_glsl says "Two arrays sized with specialization constants are the same type only if
  1669. // sized with the same symbol, involving no operations"
  1670. inline bool SameSpecializationConstants(TIntermTyped* node1, TIntermTyped* node2)
  1671. {
  1672. return node1->getAsSymbolNode() && node2->getAsSymbolNode() &&
  1673. node1->getAsSymbolNode()->getId() == node2->getAsSymbolNode()->getId();
  1674. }
  1675. } // end namespace glslang
  1676. #endif // __INTERMEDIATE_H