spirv_parser.cpp 38 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466
  1. /*
  2. * Copyright 2018-2021 Arm Limited
  3. * SPDX-License-Identifier: Apache-2.0 OR MIT
  4. *
  5. * Licensed under the Apache License, Version 2.0 (the "License");
  6. * you may not use this file except in compliance with the License.
  7. * You may obtain a copy of the License at
  8. *
  9. * http://www.apache.org/licenses/LICENSE-2.0
  10. *
  11. * Unless required by applicable law or agreed to in writing, software
  12. * distributed under the License is distributed on an "AS IS" BASIS,
  13. * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  14. * See the License for the specific language governing permissions and
  15. * limitations under the License.
  16. */
  17. /*
  18. * At your option, you may choose to accept this material under either:
  19. * 1. The Apache License, Version 2.0, found at <http://www.apache.org/licenses/LICENSE-2.0>, or
  20. * 2. The MIT License, found at <http://opensource.org/licenses/MIT>.
  21. */
  22. #include "spirv_parser.hpp"
  23. #include <assert.h>
  24. using namespace std;
  25. using namespace SPIRV_CROSS_SPV_HEADER_NAMESPACE;
  26. namespace SPIRV_CROSS_NAMESPACE
  27. {
  28. Parser::Parser(vector<uint32_t> spirv)
  29. {
  30. ir.spirv = std::move(spirv);
  31. }
  32. Parser::Parser(const uint32_t *spirv_data, size_t word_count)
  33. {
  34. ir.spirv = vector<uint32_t>(spirv_data, spirv_data + word_count);
  35. }
  36. static bool decoration_is_string(Decoration decoration)
  37. {
  38. switch (decoration)
  39. {
  40. case DecorationHlslSemanticGOOGLE:
  41. return true;
  42. default:
  43. return false;
  44. }
  45. }
  46. static inline uint32_t swap_endian(uint32_t v)
  47. {
  48. return ((v >> 24) & 0x000000ffu) | ((v >> 8) & 0x0000ff00u) | ((v << 8) & 0x00ff0000u) | ((v << 24) & 0xff000000u);
  49. }
  50. static bool is_valid_spirv_version(uint32_t version)
  51. {
  52. switch (version)
  53. {
  54. // Allow v99 since it tends to just work.
  55. case 99:
  56. case 0x10000: // SPIR-V 1.0
  57. case 0x10100: // SPIR-V 1.1
  58. case 0x10200: // SPIR-V 1.2
  59. case 0x10300: // SPIR-V 1.3
  60. case 0x10400: // SPIR-V 1.4
  61. case 0x10500: // SPIR-V 1.5
  62. case 0x10600: // SPIR-V 1.6
  63. return true;
  64. default:
  65. return false;
  66. }
  67. }
  68. void Parser::parse()
  69. {
  70. auto &spirv = ir.spirv;
  71. auto len = spirv.size();
  72. if (len < 5)
  73. SPIRV_CROSS_THROW("SPIRV file too small.");
  74. auto s = spirv.data();
  75. // Endian-swap if we need to.
  76. if (s[0] == swap_endian(MagicNumber))
  77. transform(begin(spirv), end(spirv), begin(spirv), [](uint32_t c) { return swap_endian(c); });
  78. if (s[0] != MagicNumber || !is_valid_spirv_version(s[1]))
  79. SPIRV_CROSS_THROW("Invalid SPIRV format.");
  80. uint32_t bound = s[3];
  81. const uint32_t MaximumNumberOfIDs = 0x3fffff;
  82. if (bound > MaximumNumberOfIDs)
  83. SPIRV_CROSS_THROW("ID bound exceeds limit of 0x3fffff.\n");
  84. ir.set_id_bounds(bound);
  85. uint32_t offset = 5;
  86. SmallVector<Instruction> instructions;
  87. while (offset < len)
  88. {
  89. Instruction instr = {};
  90. instr.op = spirv[offset] & 0xffff;
  91. instr.count = (spirv[offset] >> 16) & 0xffff;
  92. if (instr.count == 0)
  93. SPIRV_CROSS_THROW("SPIR-V instructions cannot consume 0 words. Invalid SPIR-V file.");
  94. instr.offset = offset + 1;
  95. instr.length = instr.count - 1;
  96. offset += instr.count;
  97. if (offset > spirv.size())
  98. SPIRV_CROSS_THROW("SPIR-V instruction goes out of bounds.");
  99. instructions.push_back(instr);
  100. }
  101. for (auto &i : instructions)
  102. parse(i);
  103. for (auto &fixup : forward_pointer_fixups)
  104. {
  105. auto &target = get<SPIRType>(fixup.first);
  106. auto &source = get<SPIRType>(fixup.second);
  107. target.member_types = source.member_types;
  108. target.basetype = source.basetype;
  109. target.self = source.self;
  110. }
  111. forward_pointer_fixups.clear();
  112. if (current_function)
  113. SPIRV_CROSS_THROW("Function was not terminated.");
  114. if (current_block)
  115. SPIRV_CROSS_THROW("Block was not terminated.");
  116. if (ir.default_entry_point == 0)
  117. SPIRV_CROSS_THROW("There is no entry point in the SPIR-V module.");
  118. }
  119. const uint32_t *Parser::stream(const Instruction &instr) const
  120. {
  121. // If we're not going to use any arguments, just return nullptr.
  122. // We want to avoid case where we return an out of range pointer
  123. // that trips debug assertions on some platforms.
  124. if (!instr.length)
  125. return nullptr;
  126. if (instr.offset + instr.length > ir.spirv.size())
  127. SPIRV_CROSS_THROW("Compiler::stream() out of range.");
  128. return &ir.spirv[instr.offset];
  129. }
  130. static string extract_string(const vector<uint32_t> &spirv, uint32_t offset)
  131. {
  132. string ret;
  133. for (uint32_t i = offset; i < spirv.size(); i++)
  134. {
  135. uint32_t w = spirv[i];
  136. for (uint32_t j = 0; j < 4; j++, w >>= 8)
  137. {
  138. char c = w & 0xff;
  139. if (c == '\0')
  140. return ret;
  141. ret += c;
  142. }
  143. }
  144. SPIRV_CROSS_THROW("String was not terminated before EOF");
  145. }
  146. void Parser::parse(const Instruction &instruction)
  147. {
  148. auto *ops = stream(instruction);
  149. auto op = static_cast<Op>(instruction.op);
  150. uint32_t length = instruction.length;
  151. // HACK for glslang that might emit OpEmitMeshTasksEXT followed by return / branch.
  152. // Instead of failing hard, just ignore it.
  153. if (ignore_trailing_block_opcodes)
  154. {
  155. ignore_trailing_block_opcodes = false;
  156. if (op == OpReturn || op == OpBranch || op == OpUnreachable)
  157. return;
  158. }
  159. switch (op)
  160. {
  161. case OpSourceContinued:
  162. case OpSourceExtension:
  163. case OpNop:
  164. case OpModuleProcessed:
  165. break;
  166. case OpString:
  167. {
  168. set<SPIRString>(ops[0], extract_string(ir.spirv, instruction.offset + 1));
  169. break;
  170. }
  171. case OpMemoryModel:
  172. ir.addressing_model = static_cast<AddressingModel>(ops[0]);
  173. ir.memory_model = static_cast<MemoryModel>(ops[1]);
  174. break;
  175. case OpSource:
  176. {
  177. ir.source.lang = static_cast<SourceLanguage>(ops[0]);
  178. switch (ir.source.lang)
  179. {
  180. case SourceLanguageESSL:
  181. ir.source.es = true;
  182. ir.source.version = ops[1];
  183. ir.source.known = true;
  184. ir.source.hlsl = false;
  185. break;
  186. case SourceLanguageGLSL:
  187. ir.source.es = false;
  188. ir.source.version = ops[1];
  189. ir.source.known = true;
  190. ir.source.hlsl = false;
  191. break;
  192. case SourceLanguageHLSL:
  193. // For purposes of cross-compiling, this is GLSL 450.
  194. ir.source.es = false;
  195. ir.source.version = 450;
  196. ir.source.known = true;
  197. ir.source.hlsl = true;
  198. break;
  199. default:
  200. ir.source.known = false;
  201. break;
  202. }
  203. break;
  204. }
  205. case OpUndef:
  206. {
  207. uint32_t result_type = ops[0];
  208. uint32_t id = ops[1];
  209. set<SPIRUndef>(id, result_type);
  210. if (current_block)
  211. current_block->ops.push_back(instruction);
  212. break;
  213. }
  214. case OpCapability:
  215. {
  216. uint32_t cap = ops[0];
  217. if (cap == CapabilityKernel)
  218. SPIRV_CROSS_THROW("Kernel capability not supported.");
  219. ir.declared_capabilities.push_back(static_cast<Capability>(ops[0]));
  220. break;
  221. }
  222. case OpExtension:
  223. {
  224. auto ext = extract_string(ir.spirv, instruction.offset);
  225. ir.declared_extensions.push_back(std::move(ext));
  226. break;
  227. }
  228. case OpExtInstImport:
  229. {
  230. uint32_t id = ops[0];
  231. SPIRExtension::Extension spirv_ext = SPIRExtension::Unsupported;
  232. auto ext = extract_string(ir.spirv, instruction.offset + 1);
  233. if (ext == "GLSL.std.450")
  234. spirv_ext = SPIRExtension::GLSL;
  235. else if (ext == "DebugInfo")
  236. spirv_ext = SPIRExtension::SPV_debug_info;
  237. else if (ext == "SPV_AMD_shader_ballot")
  238. spirv_ext = SPIRExtension::SPV_AMD_shader_ballot;
  239. else if (ext == "SPV_AMD_shader_explicit_vertex_parameter")
  240. spirv_ext = SPIRExtension::SPV_AMD_shader_explicit_vertex_parameter;
  241. else if (ext == "SPV_AMD_shader_trinary_minmax")
  242. spirv_ext = SPIRExtension::SPV_AMD_shader_trinary_minmax;
  243. else if (ext == "SPV_AMD_gcn_shader")
  244. spirv_ext = SPIRExtension::SPV_AMD_gcn_shader;
  245. else if (ext == "NonSemantic.DebugPrintf")
  246. spirv_ext = SPIRExtension::NonSemanticDebugPrintf;
  247. else if (ext == "NonSemantic.Shader.DebugInfo.100")
  248. spirv_ext = SPIRExtension::NonSemanticShaderDebugInfo;
  249. else if (ext.find("NonSemantic.") == 0)
  250. spirv_ext = SPIRExtension::NonSemanticGeneric;
  251. set<SPIRExtension>(id, spirv_ext);
  252. // Other SPIR-V extensions which have ExtInstrs are currently not supported.
  253. break;
  254. }
  255. case OpExtInst:
  256. case OpExtInstWithForwardRefsKHR:
  257. {
  258. // The SPIR-V debug information extended instructions might come at global scope.
  259. if (current_block)
  260. {
  261. current_block->ops.push_back(instruction);
  262. if (length >= 2)
  263. {
  264. const auto *type = maybe_get<SPIRType>(ops[0]);
  265. if (type)
  266. ir.load_type_width.insert({ ops[1], type->width });
  267. }
  268. }
  269. else if (op == OpExtInst)
  270. {
  271. // Don't want to deal with ForwardRefs here.
  272. auto &ext = get<SPIRExtension>(ops[2]);
  273. if (ext.ext == SPIRExtension::NonSemanticShaderDebugInfo)
  274. {
  275. // Parse global ShaderDebugInfo we care about.
  276. // Just forward the string information.
  277. if (ops[3] == SPIRExtension::DebugSource)
  278. set<SPIRString>(ops[1], get<SPIRString>(ops[4]).str);
  279. }
  280. }
  281. break;
  282. }
  283. case OpEntryPoint:
  284. {
  285. auto itr =
  286. ir.entry_points.insert(make_pair(ops[1], SPIREntryPoint(ops[1], static_cast<ExecutionModel>(ops[0]),
  287. extract_string(ir.spirv, instruction.offset + 2))));
  288. auto &e = itr.first->second;
  289. // Strings need nul-terminator and consume the whole word.
  290. uint32_t strlen_words = uint32_t((e.name.size() + 1 + 3) >> 2);
  291. for (uint32_t i = strlen_words + 2; i < instruction.length; i++)
  292. e.interface_variables.push_back(ops[i]);
  293. // Set the name of the entry point in case OpName is not provided later.
  294. ir.set_name(ops[1], e.name);
  295. // If we don't have an entry, make the first one our "default".
  296. if (!ir.default_entry_point)
  297. ir.default_entry_point = ops[1];
  298. break;
  299. }
  300. case OpExecutionMode:
  301. {
  302. auto &execution = ir.entry_points[ops[0]];
  303. auto mode = static_cast<ExecutionMode>(ops[1]);
  304. execution.flags.set(mode);
  305. switch (mode)
  306. {
  307. case ExecutionModeInvocations:
  308. execution.invocations = ops[2];
  309. break;
  310. case ExecutionModeLocalSize:
  311. execution.workgroup_size.x = ops[2];
  312. execution.workgroup_size.y = ops[3];
  313. execution.workgroup_size.z = ops[4];
  314. break;
  315. case ExecutionModeOutputVertices:
  316. execution.output_vertices = ops[2];
  317. break;
  318. case ExecutionModeOutputPrimitivesEXT:
  319. execution.output_primitives = ops[2];
  320. break;
  321. case ExecutionModeSignedZeroInfNanPreserve:
  322. switch (ops[2])
  323. {
  324. case 8:
  325. execution.signed_zero_inf_nan_preserve_8 = true;
  326. break;
  327. case 16:
  328. execution.signed_zero_inf_nan_preserve_16 = true;
  329. break;
  330. case 32:
  331. execution.signed_zero_inf_nan_preserve_32 = true;
  332. break;
  333. case 64:
  334. execution.signed_zero_inf_nan_preserve_64 = true;
  335. break;
  336. default:
  337. SPIRV_CROSS_THROW("Invalid bit-width for SignedZeroInfNanPreserve.");
  338. }
  339. break;
  340. default:
  341. break;
  342. }
  343. break;
  344. }
  345. case OpExecutionModeId:
  346. {
  347. auto &execution = ir.entry_points[ops[0]];
  348. auto mode = static_cast<ExecutionMode>(ops[1]);
  349. execution.flags.set(mode);
  350. switch (mode)
  351. {
  352. case ExecutionModeLocalSizeId:
  353. execution.workgroup_size.id_x = ops[2];
  354. execution.workgroup_size.id_y = ops[3];
  355. execution.workgroup_size.id_z = ops[4];
  356. break;
  357. case ExecutionModeFPFastMathDefault:
  358. execution.fp_fast_math_defaults[ops[2]] = ops[3];
  359. break;
  360. default:
  361. break;
  362. }
  363. break;
  364. }
  365. case OpName:
  366. {
  367. uint32_t id = ops[0];
  368. ir.set_name(id, extract_string(ir.spirv, instruction.offset + 1));
  369. break;
  370. }
  371. case OpMemberName:
  372. {
  373. uint32_t id = ops[0];
  374. uint32_t member = ops[1];
  375. ir.set_member_name(id, member, extract_string(ir.spirv, instruction.offset + 2));
  376. break;
  377. }
  378. case OpDecorationGroup:
  379. {
  380. // Noop, this simply means an ID should be a collector of decorations.
  381. // The meta array is already a flat array of decorations which will contain the relevant decorations.
  382. break;
  383. }
  384. case OpGroupDecorate:
  385. {
  386. uint32_t group_id = ops[0];
  387. auto &decorations = ir.meta[group_id].decoration;
  388. auto &flags = decorations.decoration_flags;
  389. // Copies decorations from one ID to another. Only copy decorations which are set in the group,
  390. // i.e., we cannot just copy the meta structure directly.
  391. for (uint32_t i = 1; i < length; i++)
  392. {
  393. uint32_t target = ops[i];
  394. flags.for_each_bit([&](uint32_t bit) {
  395. auto decoration = static_cast<Decoration>(bit);
  396. if (decoration_is_string(decoration))
  397. {
  398. ir.set_decoration_string(target, decoration, ir.get_decoration_string(group_id, decoration));
  399. }
  400. else
  401. {
  402. ir.meta[target].decoration_word_offset[decoration] =
  403. ir.meta[group_id].decoration_word_offset[decoration];
  404. ir.set_decoration(target, decoration, ir.get_decoration(group_id, decoration));
  405. }
  406. });
  407. }
  408. break;
  409. }
  410. case OpGroupMemberDecorate:
  411. {
  412. uint32_t group_id = ops[0];
  413. auto &flags = ir.meta[group_id].decoration.decoration_flags;
  414. // Copies decorations from one ID to another. Only copy decorations which are set in the group,
  415. // i.e., we cannot just copy the meta structure directly.
  416. for (uint32_t i = 1; i + 1 < length; i += 2)
  417. {
  418. uint32_t target = ops[i + 0];
  419. uint32_t index = ops[i + 1];
  420. flags.for_each_bit([&](uint32_t bit) {
  421. auto decoration = static_cast<Decoration>(bit);
  422. if (decoration_is_string(decoration))
  423. ir.set_member_decoration_string(target, index, decoration,
  424. ir.get_decoration_string(group_id, decoration));
  425. else
  426. ir.set_member_decoration(target, index, decoration, ir.get_decoration(group_id, decoration));
  427. });
  428. }
  429. break;
  430. }
  431. case OpDecorate:
  432. case OpDecorateId:
  433. {
  434. // OpDecorateId technically supports an array of arguments, but our only supported decorations are single uint,
  435. // so merge decorate and decorate-id here.
  436. uint32_t id = ops[0];
  437. auto decoration = static_cast<Decoration>(ops[1]);
  438. if (length >= 3)
  439. {
  440. ir.meta[id].decoration_word_offset[decoration] = uint32_t(&ops[2] - ir.spirv.data());
  441. ir.set_decoration(id, decoration, ops[2]);
  442. }
  443. else
  444. ir.set_decoration(id, decoration);
  445. break;
  446. }
  447. case OpDecorateStringGOOGLE:
  448. {
  449. uint32_t id = ops[0];
  450. auto decoration = static_cast<Decoration>(ops[1]);
  451. ir.set_decoration_string(id, decoration, extract_string(ir.spirv, instruction.offset + 2));
  452. break;
  453. }
  454. case OpMemberDecorate:
  455. {
  456. uint32_t id = ops[0];
  457. uint32_t member = ops[1];
  458. auto decoration = static_cast<Decoration>(ops[2]);
  459. if (length >= 4)
  460. ir.set_member_decoration(id, member, decoration, ops[3]);
  461. else
  462. ir.set_member_decoration(id, member, decoration);
  463. break;
  464. }
  465. case OpMemberDecorateStringGOOGLE:
  466. {
  467. uint32_t id = ops[0];
  468. uint32_t member = ops[1];
  469. auto decoration = static_cast<Decoration>(ops[2]);
  470. ir.set_member_decoration_string(id, member, decoration, extract_string(ir.spirv, instruction.offset + 3));
  471. break;
  472. }
  473. // Build up basic types.
  474. case OpTypeVoid:
  475. {
  476. uint32_t id = ops[0];
  477. auto &type = set<SPIRType>(id, op);
  478. type.basetype = SPIRType::Void;
  479. break;
  480. }
  481. case OpTypeBool:
  482. {
  483. uint32_t id = ops[0];
  484. auto &type = set<SPIRType>(id, op);
  485. type.basetype = SPIRType::Boolean;
  486. type.width = 1;
  487. break;
  488. }
  489. case OpTypeFloat:
  490. {
  491. uint32_t id = ops[0];
  492. uint32_t width = ops[1];
  493. auto &type = set<SPIRType>(id, op);
  494. if (width != 16 && width != 8 && length > 2)
  495. SPIRV_CROSS_THROW("Unrecognized FP encoding mode for OpTypeFloat.");
  496. if (width == 64)
  497. type.basetype = SPIRType::Double;
  498. else if (width == 32)
  499. type.basetype = SPIRType::Float;
  500. else if (width == 16)
  501. {
  502. if (length > 2)
  503. {
  504. if (ops[2] == FPEncodingBFloat16KHR)
  505. type.basetype = SPIRType::BFloat16;
  506. else
  507. SPIRV_CROSS_THROW("Unrecognized encoding for OpTypeFloat 16.");
  508. }
  509. else
  510. type.basetype = SPIRType::Half;
  511. }
  512. else if (width == 8)
  513. {
  514. if (length < 2)
  515. SPIRV_CROSS_THROW("Missing encoding for OpTypeFloat 8.");
  516. else if (ops[2] == FPEncodingFloat8E4M3EXT)
  517. type.basetype = SPIRType::FloatE4M3;
  518. else if (ops[2] == FPEncodingFloat8E5M2EXT)
  519. type.basetype = SPIRType::FloatE5M2;
  520. else
  521. SPIRV_CROSS_THROW("Invalid encoding for OpTypeFloat 8.");
  522. }
  523. else
  524. SPIRV_CROSS_THROW("Unrecognized bit-width of floating point type.");
  525. type.width = width;
  526. break;
  527. }
  528. case OpTypeInt:
  529. {
  530. uint32_t id = ops[0];
  531. uint32_t width = ops[1];
  532. bool signedness = ops[2] != 0;
  533. auto &type = set<SPIRType>(id, op);
  534. type.basetype = signedness ? to_signed_basetype(width) : to_unsigned_basetype(width);
  535. type.width = width;
  536. break;
  537. }
  538. // Build composite types by "inheriting".
  539. // NOTE: The self member is also copied! For pointers and array modifiers this is a good thing
  540. // since we can refer to decorations on pointee classes which is needed for UBO/SSBO, I/O blocks in geometry/tess etc.
  541. case OpTypeVector:
  542. {
  543. uint32_t id = ops[0];
  544. uint32_t vecsize = ops[2];
  545. auto &base = get<SPIRType>(ops[1]);
  546. auto &vecbase = set<SPIRType>(id, base);
  547. vecbase.op = op;
  548. vecbase.vecsize = vecsize;
  549. vecbase.self = id;
  550. vecbase.parent_type = ops[1];
  551. break;
  552. }
  553. case OpTypeMatrix:
  554. {
  555. uint32_t id = ops[0];
  556. uint32_t colcount = ops[2];
  557. auto &base = get<SPIRType>(ops[1]);
  558. auto &matrixbase = set<SPIRType>(id, base);
  559. matrixbase.op = op;
  560. matrixbase.columns = colcount;
  561. matrixbase.self = id;
  562. matrixbase.parent_type = ops[1];
  563. break;
  564. }
  565. case OpTypeCooperativeMatrixKHR:
  566. {
  567. uint32_t id = ops[0];
  568. auto &base = get<SPIRType>(ops[1]);
  569. auto &matrixbase = set<SPIRType>(id, base);
  570. matrixbase.op = op;
  571. matrixbase.ext.cooperative.scope_id = ops[2];
  572. matrixbase.ext.cooperative.rows_id = ops[3];
  573. matrixbase.ext.cooperative.columns_id = ops[4];
  574. matrixbase.ext.cooperative.use_id = ops[5];
  575. matrixbase.self = id;
  576. matrixbase.parent_type = ops[1];
  577. break;
  578. }
  579. case OpTypeCooperativeVectorNV:
  580. {
  581. uint32_t id = ops[0];
  582. auto &type = set<SPIRType>(id, op);
  583. type.basetype = SPIRType::CoopVecNV;
  584. type.op = op;
  585. type.ext.coopVecNV.component_type_id = ops[1];
  586. type.ext.coopVecNV.component_count_id = ops[2];
  587. type.parent_type = ops[1];
  588. // CoopVec-Nv can be used with integer operations like SMax where
  589. // where spirv-opt does explicit checks on integer bitwidth
  590. auto component_type = get<SPIRType>(type.ext.coopVecNV.component_type_id);
  591. type.width = component_type.width;
  592. break;
  593. }
  594. case OpTypeArray:
  595. {
  596. uint32_t id = ops[0];
  597. uint32_t tid = ops[1];
  598. auto &base = get<SPIRType>(tid);
  599. auto &arraybase = set<SPIRType>(id, base);
  600. arraybase.op = op;
  601. arraybase.parent_type = tid;
  602. uint32_t cid = ops[2];
  603. ir.mark_used_as_array_length(cid);
  604. auto *c = maybe_get<SPIRConstant>(cid);
  605. bool literal = c && !c->specialization;
  606. // We're copying type information into Array types, so we'll need a fixup for any physical pointer
  607. // references.
  608. if (base.forward_pointer)
  609. forward_pointer_fixups.push_back({ id, tid });
  610. arraybase.array_size_literal.push_back(literal);
  611. arraybase.array.push_back(literal ? c->scalar() : cid);
  612. // .self resolves down to non-array/non-pointer type.
  613. arraybase.self = base.self;
  614. break;
  615. }
  616. case OpTypeRuntimeArray:
  617. {
  618. uint32_t id = ops[0];
  619. auto &base = get<SPIRType>(ops[1]);
  620. auto &arraybase = set<SPIRType>(id, base);
  621. // We're copying type information into Array types, so we'll need a fixup for any physical pointer
  622. // references.
  623. if (base.forward_pointer)
  624. forward_pointer_fixups.push_back({ id, ops[1] });
  625. arraybase.op = op;
  626. arraybase.array.push_back(0);
  627. arraybase.array_size_literal.push_back(true);
  628. arraybase.parent_type = ops[1];
  629. // .self resolves down to non-array/non-pointer type.
  630. arraybase.self = base.self;
  631. break;
  632. }
  633. case OpTypeImage:
  634. {
  635. uint32_t id = ops[0];
  636. auto &type = set<SPIRType>(id, op);
  637. type.basetype = SPIRType::Image;
  638. type.image.type = ops[1];
  639. type.image.dim = static_cast<Dim>(ops[2]);
  640. type.image.depth = ops[3] == 1;
  641. type.image.arrayed = ops[4] != 0;
  642. type.image.ms = ops[5] != 0;
  643. type.image.sampled = ops[6];
  644. type.image.format = static_cast<ImageFormat>(ops[7]);
  645. type.image.access = (length >= 9) ? static_cast<AccessQualifier>(ops[8]) : AccessQualifierMax;
  646. break;
  647. }
  648. case OpTypeSampledImage:
  649. {
  650. uint32_t id = ops[0];
  651. uint32_t imagetype = ops[1];
  652. auto &type = set<SPIRType>(id, op);
  653. type = get<SPIRType>(imagetype);
  654. type.basetype = SPIRType::SampledImage;
  655. type.self = id;
  656. break;
  657. }
  658. case OpTypeSampler:
  659. {
  660. uint32_t id = ops[0];
  661. auto &type = set<SPIRType>(id, op);
  662. type.basetype = SPIRType::Sampler;
  663. break;
  664. }
  665. case OpTypePointer:
  666. {
  667. uint32_t id = ops[0];
  668. // Very rarely, we might receive a FunctionPrototype here.
  669. // We won't be able to compile it, but we shouldn't crash when parsing.
  670. // We should be able to reflect.
  671. auto *base = maybe_get<SPIRType>(ops[2]);
  672. auto &ptrbase = set<SPIRType>(id, op);
  673. if (base)
  674. {
  675. ptrbase = *base;
  676. ptrbase.op = op;
  677. }
  678. ptrbase.pointer = true;
  679. ptrbase.pointer_depth++;
  680. ptrbase.storage = static_cast<StorageClass>(ops[1]);
  681. if (ptrbase.storage == StorageClassAtomicCounter)
  682. ptrbase.basetype = SPIRType::AtomicCounter;
  683. if (base && base->forward_pointer)
  684. forward_pointer_fixups.push_back({ id, ops[2] });
  685. ptrbase.parent_type = ops[2];
  686. // Do NOT set ptrbase.self!
  687. break;
  688. }
  689. case OpTypeForwardPointer:
  690. {
  691. uint32_t id = ops[0];
  692. auto &ptrbase = set<SPIRType>(id, op);
  693. ptrbase.pointer = true;
  694. ptrbase.pointer_depth++;
  695. ptrbase.storage = static_cast<StorageClass>(ops[1]);
  696. ptrbase.forward_pointer = true;
  697. if (ptrbase.storage == StorageClassAtomicCounter)
  698. ptrbase.basetype = SPIRType::AtomicCounter;
  699. break;
  700. }
  701. case OpTypeStruct:
  702. {
  703. uint32_t id = ops[0];
  704. auto &type = set<SPIRType>(id, op);
  705. type.basetype = SPIRType::Struct;
  706. for (uint32_t i = 1; i < length; i++)
  707. type.member_types.push_back(ops[i]);
  708. // Check if we have seen this struct type before, with just different
  709. // decorations.
  710. //
  711. // Add workaround for issue #17 as well by looking at OpName for the struct
  712. // types, which we shouldn't normally do.
  713. // We should not normally have to consider type aliases like this to begin with
  714. // however ... glslang issues #304, #307 cover this.
  715. // For stripped names, never consider struct type aliasing.
  716. // We risk declaring the same struct multiple times, but type-punning is not allowed
  717. // so this is safe.
  718. bool consider_aliasing = !ir.get_name(type.self).empty();
  719. if (consider_aliasing)
  720. {
  721. for (auto &other : global_struct_cache)
  722. {
  723. if (ir.get_name(type.self) == ir.get_name(other) &&
  724. types_are_logically_equivalent(type, get<SPIRType>(other)))
  725. {
  726. type.type_alias = other;
  727. break;
  728. }
  729. }
  730. if (type.type_alias == TypeID(0))
  731. global_struct_cache.push_back(id);
  732. }
  733. break;
  734. }
  735. case OpTypeFunction:
  736. {
  737. uint32_t id = ops[0];
  738. uint32_t ret = ops[1];
  739. auto &func = set<SPIRFunctionPrototype>(id, ret);
  740. for (uint32_t i = 2; i < length; i++)
  741. func.parameter_types.push_back(ops[i]);
  742. break;
  743. }
  744. case OpTypeAccelerationStructureKHR:
  745. {
  746. uint32_t id = ops[0];
  747. auto &type = set<SPIRType>(id, op);
  748. type.basetype = SPIRType::AccelerationStructure;
  749. break;
  750. }
  751. case OpTypeRayQueryKHR:
  752. {
  753. uint32_t id = ops[0];
  754. auto &type = set<SPIRType>(id, op);
  755. type.basetype = SPIRType::RayQuery;
  756. break;
  757. }
  758. case OpTypeTensorARM:
  759. {
  760. uint32_t id = ops[0];
  761. auto &type = set<SPIRType>(id, op);
  762. type.basetype = SPIRType::Tensor;
  763. type.ext.tensor = {};
  764. type.ext.tensor.type = ops[1];
  765. if (length >= 3)
  766. type.ext.tensor.rank = ops[2];
  767. if (length >= 4)
  768. type.ext.tensor.shape = ops[3];
  769. break;
  770. }
  771. // Variable declaration
  772. // All variables are essentially pointers with a storage qualifier.
  773. case OpVariable:
  774. {
  775. uint32_t type = ops[0];
  776. uint32_t id = ops[1];
  777. auto storage = static_cast<StorageClass>(ops[2]);
  778. uint32_t initializer = length == 4 ? ops[3] : 0;
  779. if (storage == StorageClassFunction)
  780. {
  781. if (!current_function)
  782. SPIRV_CROSS_THROW("No function currently in scope");
  783. current_function->add_local_variable(id);
  784. }
  785. set<SPIRVariable>(id, type, storage, initializer);
  786. break;
  787. }
  788. // OpPhi
  789. // OpPhi is a fairly magical opcode.
  790. // It selects temporary variables based on which parent block we *came from*.
  791. // In high-level languages we can "de-SSA" by creating a function local, and flush out temporaries to this function-local
  792. // variable to emulate SSA Phi.
  793. case OpPhi:
  794. {
  795. if (!current_function)
  796. SPIRV_CROSS_THROW("No function currently in scope");
  797. if (!current_block)
  798. SPIRV_CROSS_THROW("No block currently in scope");
  799. uint32_t result_type = ops[0];
  800. uint32_t id = ops[1];
  801. // Instead of a temporary, create a new function-wide temporary with this ID instead.
  802. auto &var = set<SPIRVariable>(id, result_type, StorageClassFunction);
  803. var.phi_variable = true;
  804. current_function->add_local_variable(id);
  805. for (uint32_t i = 2; i + 2 <= length; i += 2)
  806. current_block->phi_variables.push_back({ ops[i], ops[i + 1], id });
  807. break;
  808. }
  809. // Constants
  810. case OpSpecConstant:
  811. case OpConstant:
  812. case OpConstantCompositeReplicateEXT:
  813. case OpSpecConstantCompositeReplicateEXT:
  814. {
  815. uint32_t id = ops[1];
  816. auto &type = get<SPIRType>(ops[0]);
  817. if (op == OpConstantCompositeReplicateEXT || op == OpSpecConstantCompositeReplicateEXT)
  818. {
  819. auto subconstant = uint32_t(ops[2]);
  820. set<SPIRConstant>(id, ops[0], &subconstant, 1, op == OpSpecConstantCompositeReplicateEXT, true);
  821. }
  822. else
  823. {
  824. if (type.width > 32)
  825. set<SPIRConstant>(id, ops[0], ops[2] | (uint64_t(ops[3]) << 32), op == OpSpecConstant);
  826. else
  827. set<SPIRConstant>(id, ops[0], ops[2], op == OpSpecConstant);
  828. }
  829. break;
  830. }
  831. case OpSpecConstantFalse:
  832. case OpConstantFalse:
  833. {
  834. uint32_t id = ops[1];
  835. set<SPIRConstant>(id, ops[0], uint32_t(0), op == OpSpecConstantFalse);
  836. break;
  837. }
  838. case OpSpecConstantTrue:
  839. case OpConstantTrue:
  840. {
  841. uint32_t id = ops[1];
  842. set<SPIRConstant>(id, ops[0], uint32_t(1), op == OpSpecConstantTrue);
  843. break;
  844. }
  845. case OpConstantNull:
  846. {
  847. uint32_t id = ops[1];
  848. uint32_t type = ops[0];
  849. ir.make_constant_null(id, type, true);
  850. break;
  851. }
  852. case OpSpecConstantComposite:
  853. case OpConstantComposite:
  854. {
  855. uint32_t id = ops[1];
  856. uint32_t type = ops[0];
  857. auto &ctype = get<SPIRType>(type);
  858. // We can have constants which are structs and arrays.
  859. // In this case, our SPIRConstant will be a list of other SPIRConstant ids which we
  860. // can refer to.
  861. if (ctype.basetype == SPIRType::Struct || !ctype.array.empty())
  862. {
  863. set<SPIRConstant>(id, type, ops + 2, length - 2, op == OpSpecConstantComposite);
  864. }
  865. else
  866. {
  867. uint32_t elements = length - 2;
  868. if (elements > 4)
  869. SPIRV_CROSS_THROW("OpConstantComposite only supports 1, 2, 3 and 4 elements.");
  870. SPIRConstant remapped_constant_ops[4];
  871. const SPIRConstant *c[4];
  872. for (uint32_t i = 0; i < elements; i++)
  873. {
  874. // Specialization constants operations can also be part of this.
  875. // We do not know their value, so any attempt to query SPIRConstant later
  876. // will fail. We can only propagate the ID of the expression and use to_expression on it.
  877. auto *constant_op = maybe_get<SPIRConstantOp>(ops[2 + i]);
  878. auto *undef_op = maybe_get<SPIRUndef>(ops[2 + i]);
  879. if (constant_op)
  880. {
  881. if (op == OpConstantComposite)
  882. SPIRV_CROSS_THROW("Specialization constant operation used in OpConstantComposite.");
  883. remapped_constant_ops[i].make_null(get<SPIRType>(constant_op->basetype));
  884. remapped_constant_ops[i].self = constant_op->self;
  885. remapped_constant_ops[i].constant_type = constant_op->basetype;
  886. remapped_constant_ops[i].specialization = true;
  887. c[i] = &remapped_constant_ops[i];
  888. }
  889. else if (undef_op)
  890. {
  891. // Undefined, just pick 0.
  892. remapped_constant_ops[i].make_null(get<SPIRType>(undef_op->basetype));
  893. remapped_constant_ops[i].constant_type = undef_op->basetype;
  894. c[i] = &remapped_constant_ops[i];
  895. }
  896. else
  897. c[i] = &get<SPIRConstant>(ops[2 + i]);
  898. }
  899. set<SPIRConstant>(id, type, c, elements, op == OpSpecConstantComposite);
  900. }
  901. break;
  902. }
  903. // Functions
  904. case OpFunction:
  905. {
  906. uint32_t res = ops[0];
  907. uint32_t id = ops[1];
  908. // Control
  909. uint32_t type = ops[3];
  910. if (current_function)
  911. SPIRV_CROSS_THROW("Must end a function before starting a new one!");
  912. current_function = &set<SPIRFunction>(id, res, type);
  913. break;
  914. }
  915. case OpFunctionParameter:
  916. {
  917. uint32_t type = ops[0];
  918. uint32_t id = ops[1];
  919. if (!current_function)
  920. SPIRV_CROSS_THROW("Must be in a function!");
  921. current_function->add_parameter(type, id);
  922. set<SPIRVariable>(id, type, StorageClassFunction);
  923. break;
  924. }
  925. case OpFunctionEnd:
  926. {
  927. if (current_block)
  928. {
  929. // Very specific error message, but seems to come up quite often.
  930. SPIRV_CROSS_THROW(
  931. "Cannot end a function before ending the current block.\n"
  932. "Likely cause: If this SPIR-V was created from glslang HLSL, make sure the entry point is valid.");
  933. }
  934. current_function = nullptr;
  935. break;
  936. }
  937. // Blocks
  938. case OpLabel:
  939. {
  940. // OpLabel always starts a block.
  941. if (!current_function)
  942. SPIRV_CROSS_THROW("Blocks cannot exist outside functions!");
  943. uint32_t id = ops[0];
  944. current_function->blocks.push_back(id);
  945. if (!current_function->entry_block)
  946. current_function->entry_block = id;
  947. if (current_block)
  948. SPIRV_CROSS_THROW("Cannot start a block before ending the current block.");
  949. current_block = &set<SPIRBlock>(id);
  950. break;
  951. }
  952. // Branch instructions end blocks.
  953. case OpBranch:
  954. {
  955. if (!current_block)
  956. SPIRV_CROSS_THROW("Trying to end a non-existing block.");
  957. uint32_t target = ops[0];
  958. current_block->terminator = SPIRBlock::Direct;
  959. current_block->next_block = target;
  960. current_block = nullptr;
  961. break;
  962. }
  963. case OpBranchConditional:
  964. {
  965. if (!current_block)
  966. SPIRV_CROSS_THROW("Trying to end a non-existing block.");
  967. current_block->condition = ops[0];
  968. current_block->true_block = ops[1];
  969. current_block->false_block = ops[2];
  970. current_block->terminator = SPIRBlock::Select;
  971. if (current_block->true_block == current_block->false_block)
  972. {
  973. // Bogus conditional, translate to a direct branch.
  974. // Avoids some ugly edge cases later when analyzing CFGs.
  975. // There are some super jank cases where the merge block is different from the true/false,
  976. // and later branches can "break" out of the selection construct this way.
  977. // This is complete nonsense, but CTS hits this case.
  978. // In this scenario, we should see the selection construct as more of a Switch with one default case.
  979. // The problem here is that this breaks any attempt to break out of outer switch statements,
  980. // but it's theoretically solvable if this ever comes up using the ladder breaking system ...
  981. if (current_block->true_block != current_block->next_block &&
  982. current_block->merge == SPIRBlock::MergeSelection)
  983. {
  984. uint32_t ids = ir.increase_bound_by(2);
  985. auto &type = set<SPIRType>(ids, OpTypeInt);
  986. type.basetype = SPIRType::Int;
  987. type.width = 32;
  988. auto &c = set<SPIRConstant>(ids + 1, ids);
  989. current_block->condition = c.self;
  990. current_block->default_block = current_block->true_block;
  991. current_block->terminator = SPIRBlock::MultiSelect;
  992. ir.block_meta[current_block->next_block] &= ~ParsedIR::BLOCK_META_SELECTION_MERGE_BIT;
  993. ir.block_meta[current_block->next_block] |= ParsedIR::BLOCK_META_MULTISELECT_MERGE_BIT;
  994. }
  995. else
  996. {
  997. // Collapse loops if we have to.
  998. bool collapsed_loop = current_block->true_block == current_block->merge_block &&
  999. current_block->merge == SPIRBlock::MergeLoop;
  1000. if (collapsed_loop)
  1001. {
  1002. ir.block_meta[current_block->merge_block] &= ~ParsedIR::BLOCK_META_LOOP_MERGE_BIT;
  1003. ir.block_meta[current_block->continue_block] &= ~ParsedIR::BLOCK_META_CONTINUE_BIT;
  1004. }
  1005. current_block->next_block = current_block->true_block;
  1006. current_block->condition = 0;
  1007. current_block->true_block = 0;
  1008. current_block->false_block = 0;
  1009. current_block->merge_block = 0;
  1010. current_block->merge = SPIRBlock::MergeNone;
  1011. current_block->terminator = SPIRBlock::Direct;
  1012. }
  1013. }
  1014. current_block = nullptr;
  1015. break;
  1016. }
  1017. case OpSwitch:
  1018. {
  1019. if (!current_block)
  1020. SPIRV_CROSS_THROW("Trying to end a non-existing block.");
  1021. current_block->terminator = SPIRBlock::MultiSelect;
  1022. current_block->condition = ops[0];
  1023. current_block->default_block = ops[1];
  1024. uint32_t remaining_ops = length - 2;
  1025. if ((remaining_ops % 2) == 0)
  1026. {
  1027. for (uint32_t i = 2; i + 2 <= length; i += 2)
  1028. current_block->cases_32bit.push_back({ ops[i], ops[i + 1] });
  1029. }
  1030. if ((remaining_ops % 3) == 0)
  1031. {
  1032. for (uint32_t i = 2; i + 3 <= length; i += 3)
  1033. {
  1034. uint64_t value = (static_cast<uint64_t>(ops[i + 1]) << 32) | ops[i];
  1035. current_block->cases_64bit.push_back({ value, ops[i + 2] });
  1036. }
  1037. }
  1038. // If we jump to next block, make it break instead since we're inside a switch case block at that point.
  1039. ir.block_meta[current_block->next_block] |= ParsedIR::BLOCK_META_MULTISELECT_MERGE_BIT;
  1040. current_block = nullptr;
  1041. break;
  1042. }
  1043. case OpKill:
  1044. case OpTerminateInvocation:
  1045. {
  1046. if (!current_block)
  1047. SPIRV_CROSS_THROW("Trying to end a non-existing block.");
  1048. current_block->terminator = SPIRBlock::Kill;
  1049. current_block = nullptr;
  1050. break;
  1051. }
  1052. case OpTerminateRayKHR:
  1053. // NV variant is not a terminator.
  1054. if (!current_block)
  1055. SPIRV_CROSS_THROW("Trying to end a non-existing block.");
  1056. current_block->terminator = SPIRBlock::TerminateRay;
  1057. current_block = nullptr;
  1058. break;
  1059. case OpIgnoreIntersectionKHR:
  1060. // NV variant is not a terminator.
  1061. if (!current_block)
  1062. SPIRV_CROSS_THROW("Trying to end a non-existing block.");
  1063. current_block->terminator = SPIRBlock::IgnoreIntersection;
  1064. current_block = nullptr;
  1065. break;
  1066. case OpEmitMeshTasksEXT:
  1067. if (!current_block)
  1068. SPIRV_CROSS_THROW("Trying to end a non-existing block.");
  1069. current_block->terminator = SPIRBlock::EmitMeshTasks;
  1070. for (uint32_t i = 0; i < 3; i++)
  1071. current_block->mesh.groups[i] = ops[i];
  1072. current_block->mesh.payload = length >= 4 ? ops[3] : 0;
  1073. current_block = nullptr;
  1074. // Currently glslang is bugged and does not treat EmitMeshTasksEXT as a terminator.
  1075. ignore_trailing_block_opcodes = true;
  1076. break;
  1077. case OpReturn:
  1078. {
  1079. if (!current_block)
  1080. SPIRV_CROSS_THROW("Trying to end a non-existing block.");
  1081. current_block->terminator = SPIRBlock::Return;
  1082. current_block = nullptr;
  1083. break;
  1084. }
  1085. case OpReturnValue:
  1086. {
  1087. if (!current_block)
  1088. SPIRV_CROSS_THROW("Trying to end a non-existing block.");
  1089. current_block->terminator = SPIRBlock::Return;
  1090. current_block->return_value = ops[0];
  1091. current_block = nullptr;
  1092. break;
  1093. }
  1094. case OpUnreachable:
  1095. {
  1096. if (!current_block)
  1097. SPIRV_CROSS_THROW("Trying to end a non-existing block.");
  1098. current_block->terminator = SPIRBlock::Unreachable;
  1099. current_block = nullptr;
  1100. break;
  1101. }
  1102. case OpSelectionMerge:
  1103. {
  1104. if (!current_block)
  1105. SPIRV_CROSS_THROW("Trying to modify a non-existing block.");
  1106. current_block->next_block = ops[0];
  1107. current_block->merge = SPIRBlock::MergeSelection;
  1108. ir.block_meta[current_block->next_block] |= ParsedIR::BLOCK_META_SELECTION_MERGE_BIT;
  1109. if (length >= 2)
  1110. {
  1111. if (ops[1] & SelectionControlFlattenMask)
  1112. current_block->hint = SPIRBlock::HintFlatten;
  1113. else if (ops[1] & SelectionControlDontFlattenMask)
  1114. current_block->hint = SPIRBlock::HintDontFlatten;
  1115. }
  1116. break;
  1117. }
  1118. case OpLoopMerge:
  1119. {
  1120. if (!current_block)
  1121. SPIRV_CROSS_THROW("Trying to modify a non-existing block.");
  1122. current_block->merge_block = ops[0];
  1123. current_block->continue_block = ops[1];
  1124. current_block->merge = SPIRBlock::MergeLoop;
  1125. ir.block_meta[current_block->self] |= ParsedIR::BLOCK_META_LOOP_HEADER_BIT;
  1126. ir.block_meta[current_block->merge_block] |= ParsedIR::BLOCK_META_LOOP_MERGE_BIT;
  1127. ir.continue_block_to_loop_header[current_block->continue_block] = BlockID(current_block->self);
  1128. // Don't add loop headers to continue blocks,
  1129. // which would make it impossible branch into the loop header since
  1130. // they are treated as continues.
  1131. if (current_block->continue_block != BlockID(current_block->self))
  1132. ir.block_meta[current_block->continue_block] |= ParsedIR::BLOCK_META_CONTINUE_BIT;
  1133. if (length >= 3)
  1134. {
  1135. if (ops[2] & LoopControlUnrollMask)
  1136. current_block->hint = SPIRBlock::HintUnroll;
  1137. else if (ops[2] & LoopControlDontUnrollMask)
  1138. current_block->hint = SPIRBlock::HintDontUnroll;
  1139. }
  1140. break;
  1141. }
  1142. case OpSpecConstantOp:
  1143. {
  1144. if (length < 3)
  1145. SPIRV_CROSS_THROW("OpSpecConstantOp not enough arguments.");
  1146. uint32_t result_type = ops[0];
  1147. uint32_t id = ops[1];
  1148. auto spec_op = static_cast<Op>(ops[2]);
  1149. set<SPIRConstantOp>(id, result_type, spec_op, ops + 3, length - 3);
  1150. break;
  1151. }
  1152. case OpLine:
  1153. {
  1154. // OpLine might come at global scope, but we don't care about those since they will not be declared in any
  1155. // meaningful correct order.
  1156. // Ignore all OpLine directives which live outside a function.
  1157. if (current_block)
  1158. current_block->ops.push_back(instruction);
  1159. // Line directives may arrive before first OpLabel.
  1160. // Treat this as the line of the function declaration,
  1161. // so warnings for arguments can propagate properly.
  1162. if (current_function)
  1163. {
  1164. // Store the first one we find and emit it before creating the function prototype.
  1165. if (current_function->entry_line.file_id == 0)
  1166. {
  1167. current_function->entry_line.file_id = ops[0];
  1168. current_function->entry_line.line_literal = ops[1];
  1169. }
  1170. }
  1171. break;
  1172. }
  1173. case OpNoLine:
  1174. {
  1175. // OpNoLine might come at global scope.
  1176. if (current_block)
  1177. current_block->ops.push_back(instruction);
  1178. break;
  1179. }
  1180. // Actual opcodes.
  1181. default:
  1182. {
  1183. if (length >= 2)
  1184. {
  1185. const auto *type = maybe_get<SPIRType>(ops[0]);
  1186. if (type)
  1187. ir.load_type_width.insert({ ops[1], type->width });
  1188. }
  1189. if (!current_block)
  1190. SPIRV_CROSS_THROW("Currently no block to insert opcode.");
  1191. current_block->ops.push_back(instruction);
  1192. break;
  1193. }
  1194. }
  1195. }
  1196. bool Parser::types_are_logically_equivalent(const SPIRType &a, const SPIRType &b) const
  1197. {
  1198. if (a.basetype != b.basetype)
  1199. return false;
  1200. if (a.width != b.width)
  1201. return false;
  1202. if (a.vecsize != b.vecsize)
  1203. return false;
  1204. if (a.columns != b.columns)
  1205. return false;
  1206. if (a.array.size() != b.array.size())
  1207. return false;
  1208. size_t array_count = a.array.size();
  1209. if (array_count && memcmp(a.array.data(), b.array.data(), array_count * sizeof(uint32_t)) != 0)
  1210. return false;
  1211. if (a.basetype == SPIRType::Image || a.basetype == SPIRType::SampledImage)
  1212. {
  1213. if (memcmp(&a.image, &b.image, sizeof(SPIRType::Image)) != 0)
  1214. return false;
  1215. }
  1216. if (a.member_types.size() != b.member_types.size())
  1217. return false;
  1218. size_t member_types = a.member_types.size();
  1219. for (size_t i = 0; i < member_types; i++)
  1220. {
  1221. if (!types_are_logically_equivalent(get<SPIRType>(a.member_types[i]), get<SPIRType>(b.member_types[i])))
  1222. return false;
  1223. }
  1224. return true;
  1225. }
  1226. bool Parser::variable_storage_is_aliased(const SPIRVariable &v) const
  1227. {
  1228. auto &type = get<SPIRType>(v.basetype);
  1229. auto *type_meta = ir.find_meta(type.self);
  1230. bool ssbo = v.storage == StorageClassStorageBuffer ||
  1231. (type_meta && type_meta->decoration.decoration_flags.get(DecorationBufferBlock));
  1232. bool image = type.basetype == SPIRType::Image;
  1233. bool counter = type.basetype == SPIRType::AtomicCounter;
  1234. bool is_restrict;
  1235. if (ssbo)
  1236. is_restrict = ir.get_buffer_block_flags(v).get(DecorationRestrict);
  1237. else
  1238. is_restrict = ir.has_decoration(v.self, DecorationRestrict);
  1239. return !is_restrict && (ssbo || image || counter);
  1240. }
  1241. } // namespace SPIRV_CROSS_NAMESPACE