spirv_parser.cpp 35 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332
  1. /*
  2. * Copyright 2018-2021 Arm Limited
  3. * SPDX-License-Identifier: Apache-2.0 OR MIT
  4. *
  5. * Licensed under the Apache License, Version 2.0 (the "License");
  6. * you may not use this file except in compliance with the License.
  7. * You may obtain a copy of the License at
  8. *
  9. * http://www.apache.org/licenses/LICENSE-2.0
  10. *
  11. * Unless required by applicable law or agreed to in writing, software
  12. * distributed under the License is distributed on an "AS IS" BASIS,
  13. * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  14. * See the License for the specific language governing permissions and
  15. * limitations under the License.
  16. */
  17. /*
  18. * At your option, you may choose to accept this material under either:
  19. * 1. The Apache License, Version 2.0, found at <http://www.apache.org/licenses/LICENSE-2.0>, or
  20. * 2. The MIT License, found at <http://opensource.org/licenses/MIT>.
  21. */
  22. #include "spirv_parser.hpp"
  23. #include <assert.h>
  24. using namespace std;
  25. using namespace spv;
  26. namespace SPIRV_CROSS_NAMESPACE
  27. {
  28. Parser::Parser(vector<uint32_t> spirv)
  29. {
  30. ir.spirv = std::move(spirv);
  31. }
  32. Parser::Parser(const uint32_t *spirv_data, size_t word_count)
  33. {
  34. ir.spirv = vector<uint32_t>(spirv_data, spirv_data + word_count);
  35. }
  36. static bool decoration_is_string(Decoration decoration)
  37. {
  38. switch (decoration)
  39. {
  40. case DecorationHlslSemanticGOOGLE:
  41. return true;
  42. default:
  43. return false;
  44. }
  45. }
  46. static inline uint32_t swap_endian(uint32_t v)
  47. {
  48. return ((v >> 24) & 0x000000ffu) | ((v >> 8) & 0x0000ff00u) | ((v << 8) & 0x00ff0000u) | ((v << 24) & 0xff000000u);
  49. }
  50. static bool is_valid_spirv_version(uint32_t version)
  51. {
  52. switch (version)
  53. {
  54. // Allow v99 since it tends to just work.
  55. case 99:
  56. case 0x10000: // SPIR-V 1.0
  57. case 0x10100: // SPIR-V 1.1
  58. case 0x10200: // SPIR-V 1.2
  59. case 0x10300: // SPIR-V 1.3
  60. case 0x10400: // SPIR-V 1.4
  61. case 0x10500: // SPIR-V 1.5
  62. case 0x10600: // SPIR-V 1.6
  63. return true;
  64. default:
  65. return false;
  66. }
  67. }
  68. void Parser::parse()
  69. {
  70. auto &spirv = ir.spirv;
  71. auto len = spirv.size();
  72. if (len < 5)
  73. SPIRV_CROSS_THROW("SPIRV file too small.");
  74. auto s = spirv.data();
  75. // Endian-swap if we need to.
  76. if (s[0] == swap_endian(MagicNumber))
  77. transform(begin(spirv), end(spirv), begin(spirv), [](uint32_t c) { return swap_endian(c); });
  78. if (s[0] != MagicNumber || !is_valid_spirv_version(s[1]))
  79. SPIRV_CROSS_THROW("Invalid SPIRV format.");
  80. uint32_t bound = s[3];
  81. const uint32_t MaximumNumberOfIDs = 0x3fffff;
  82. if (bound > MaximumNumberOfIDs)
  83. SPIRV_CROSS_THROW("ID bound exceeds limit of 0x3fffff.\n");
  84. ir.set_id_bounds(bound);
  85. uint32_t offset = 5;
  86. SmallVector<Instruction> instructions;
  87. while (offset < len)
  88. {
  89. Instruction instr = {};
  90. instr.op = spirv[offset] & 0xffff;
  91. instr.count = (spirv[offset] >> 16) & 0xffff;
  92. if (instr.count == 0)
  93. SPIRV_CROSS_THROW("SPIR-V instructions cannot consume 0 words. Invalid SPIR-V file.");
  94. instr.offset = offset + 1;
  95. instr.length = instr.count - 1;
  96. offset += instr.count;
  97. if (offset > spirv.size())
  98. SPIRV_CROSS_THROW("SPIR-V instruction goes out of bounds.");
  99. instructions.push_back(instr);
  100. }
  101. for (auto &i : instructions)
  102. parse(i);
  103. for (auto &fixup : forward_pointer_fixups)
  104. {
  105. auto &target = get<SPIRType>(fixup.first);
  106. auto &source = get<SPIRType>(fixup.second);
  107. target.member_types = source.member_types;
  108. target.basetype = source.basetype;
  109. target.self = source.self;
  110. }
  111. forward_pointer_fixups.clear();
  112. if (current_function)
  113. SPIRV_CROSS_THROW("Function was not terminated.");
  114. if (current_block)
  115. SPIRV_CROSS_THROW("Block was not terminated.");
  116. if (ir.default_entry_point == 0)
  117. SPIRV_CROSS_THROW("There is no entry point in the SPIR-V module.");
  118. }
  119. const uint32_t *Parser::stream(const Instruction &instr) const
  120. {
  121. // If we're not going to use any arguments, just return nullptr.
  122. // We want to avoid case where we return an out of range pointer
  123. // that trips debug assertions on some platforms.
  124. if (!instr.length)
  125. return nullptr;
  126. if (instr.offset + instr.length > ir.spirv.size())
  127. SPIRV_CROSS_THROW("Compiler::stream() out of range.");
  128. return &ir.spirv[instr.offset];
  129. }
  130. static string extract_string(const vector<uint32_t> &spirv, uint32_t offset)
  131. {
  132. string ret;
  133. for (uint32_t i = offset; i < spirv.size(); i++)
  134. {
  135. uint32_t w = spirv[i];
  136. for (uint32_t j = 0; j < 4; j++, w >>= 8)
  137. {
  138. char c = w & 0xff;
  139. if (c == '\0')
  140. return ret;
  141. ret += c;
  142. }
  143. }
  144. SPIRV_CROSS_THROW("String was not terminated before EOF");
  145. }
  146. void Parser::parse(const Instruction &instruction)
  147. {
  148. auto *ops = stream(instruction);
  149. auto op = static_cast<Op>(instruction.op);
  150. uint32_t length = instruction.length;
  151. // HACK for glslang that might emit OpEmitMeshTasksEXT followed by return / branch.
  152. // Instead of failing hard, just ignore it.
  153. if (ignore_trailing_block_opcodes)
  154. {
  155. ignore_trailing_block_opcodes = false;
  156. if (op == OpReturn || op == OpBranch || op == OpUnreachable)
  157. return;
  158. }
  159. switch (op)
  160. {
  161. case OpSourceContinued:
  162. case OpSourceExtension:
  163. case OpNop:
  164. case OpModuleProcessed:
  165. break;
  166. case OpString:
  167. {
  168. set<SPIRString>(ops[0], extract_string(ir.spirv, instruction.offset + 1));
  169. break;
  170. }
  171. case OpMemoryModel:
  172. ir.addressing_model = static_cast<AddressingModel>(ops[0]);
  173. ir.memory_model = static_cast<MemoryModel>(ops[1]);
  174. break;
  175. case OpSource:
  176. {
  177. auto lang = static_cast<SourceLanguage>(ops[0]);
  178. switch (lang)
  179. {
  180. case SourceLanguageESSL:
  181. ir.source.es = true;
  182. ir.source.version = ops[1];
  183. ir.source.known = true;
  184. ir.source.hlsl = false;
  185. break;
  186. case SourceLanguageGLSL:
  187. ir.source.es = false;
  188. ir.source.version = ops[1];
  189. ir.source.known = true;
  190. ir.source.hlsl = false;
  191. break;
  192. case SourceLanguageHLSL:
  193. // For purposes of cross-compiling, this is GLSL 450.
  194. ir.source.es = false;
  195. ir.source.version = 450;
  196. ir.source.known = true;
  197. ir.source.hlsl = true;
  198. break;
  199. default:
  200. ir.source.known = false;
  201. break;
  202. }
  203. break;
  204. }
  205. case OpUndef:
  206. {
  207. uint32_t result_type = ops[0];
  208. uint32_t id = ops[1];
  209. set<SPIRUndef>(id, result_type);
  210. if (current_block)
  211. current_block->ops.push_back(instruction);
  212. break;
  213. }
  214. case OpCapability:
  215. {
  216. uint32_t cap = ops[0];
  217. if (cap == CapabilityKernel)
  218. SPIRV_CROSS_THROW("Kernel capability not supported.");
  219. ir.declared_capabilities.push_back(static_cast<Capability>(ops[0]));
  220. break;
  221. }
  222. case OpExtension:
  223. {
  224. auto ext = extract_string(ir.spirv, instruction.offset);
  225. ir.declared_extensions.push_back(std::move(ext));
  226. break;
  227. }
  228. case OpExtInstImport:
  229. {
  230. uint32_t id = ops[0];
  231. SPIRExtension::Extension spirv_ext = SPIRExtension::Unsupported;
  232. auto ext = extract_string(ir.spirv, instruction.offset + 1);
  233. if (ext == "GLSL.std.450")
  234. spirv_ext = SPIRExtension::GLSL;
  235. else if (ext == "DebugInfo")
  236. spirv_ext = SPIRExtension::SPV_debug_info;
  237. else if (ext == "SPV_AMD_shader_ballot")
  238. spirv_ext = SPIRExtension::SPV_AMD_shader_ballot;
  239. else if (ext == "SPV_AMD_shader_explicit_vertex_parameter")
  240. spirv_ext = SPIRExtension::SPV_AMD_shader_explicit_vertex_parameter;
  241. else if (ext == "SPV_AMD_shader_trinary_minmax")
  242. spirv_ext = SPIRExtension::SPV_AMD_shader_trinary_minmax;
  243. else if (ext == "SPV_AMD_gcn_shader")
  244. spirv_ext = SPIRExtension::SPV_AMD_gcn_shader;
  245. else if (ext == "NonSemantic.DebugPrintf")
  246. spirv_ext = SPIRExtension::NonSemanticDebugPrintf;
  247. else if (ext == "NonSemantic.Shader.DebugInfo.100")
  248. spirv_ext = SPIRExtension::NonSemanticShaderDebugInfo;
  249. else if (ext.find("NonSemantic.") == 0)
  250. spirv_ext = SPIRExtension::NonSemanticGeneric;
  251. set<SPIRExtension>(id, spirv_ext);
  252. // Other SPIR-V extensions which have ExtInstrs are currently not supported.
  253. break;
  254. }
  255. case OpExtInst:
  256. {
  257. // The SPIR-V debug information extended instructions might come at global scope.
  258. if (current_block)
  259. {
  260. current_block->ops.push_back(instruction);
  261. if (length >= 2)
  262. {
  263. const auto *type = maybe_get<SPIRType>(ops[0]);
  264. if (type)
  265. ir.load_type_width.insert({ ops[1], type->width });
  266. }
  267. }
  268. break;
  269. }
  270. case OpEntryPoint:
  271. {
  272. auto itr =
  273. ir.entry_points.insert(make_pair(ops[1], SPIREntryPoint(ops[1], static_cast<ExecutionModel>(ops[0]),
  274. extract_string(ir.spirv, instruction.offset + 2))));
  275. auto &e = itr.first->second;
  276. // Strings need nul-terminator and consume the whole word.
  277. uint32_t strlen_words = uint32_t((e.name.size() + 1 + 3) >> 2);
  278. for (uint32_t i = strlen_words + 2; i < instruction.length; i++)
  279. e.interface_variables.push_back(ops[i]);
  280. // Set the name of the entry point in case OpName is not provided later.
  281. ir.set_name(ops[1], e.name);
  282. // If we don't have an entry, make the first one our "default".
  283. if (!ir.default_entry_point)
  284. ir.default_entry_point = ops[1];
  285. break;
  286. }
  287. case OpExecutionMode:
  288. {
  289. auto &execution = ir.entry_points[ops[0]];
  290. auto mode = static_cast<ExecutionMode>(ops[1]);
  291. execution.flags.set(mode);
  292. switch (mode)
  293. {
  294. case ExecutionModeInvocations:
  295. execution.invocations = ops[2];
  296. break;
  297. case ExecutionModeLocalSize:
  298. execution.workgroup_size.x = ops[2];
  299. execution.workgroup_size.y = ops[3];
  300. execution.workgroup_size.z = ops[4];
  301. break;
  302. case ExecutionModeOutputVertices:
  303. execution.output_vertices = ops[2];
  304. break;
  305. case ExecutionModeOutputPrimitivesEXT:
  306. execution.output_primitives = ops[2];
  307. break;
  308. default:
  309. break;
  310. }
  311. break;
  312. }
  313. case OpExecutionModeId:
  314. {
  315. auto &execution = ir.entry_points[ops[0]];
  316. auto mode = static_cast<ExecutionMode>(ops[1]);
  317. execution.flags.set(mode);
  318. if (mode == ExecutionModeLocalSizeId)
  319. {
  320. execution.workgroup_size.id_x = ops[2];
  321. execution.workgroup_size.id_y = ops[3];
  322. execution.workgroup_size.id_z = ops[4];
  323. }
  324. break;
  325. }
  326. case OpName:
  327. {
  328. uint32_t id = ops[0];
  329. ir.set_name(id, extract_string(ir.spirv, instruction.offset + 1));
  330. break;
  331. }
  332. case OpMemberName:
  333. {
  334. uint32_t id = ops[0];
  335. uint32_t member = ops[1];
  336. ir.set_member_name(id, member, extract_string(ir.spirv, instruction.offset + 2));
  337. break;
  338. }
  339. case OpDecorationGroup:
  340. {
  341. // Noop, this simply means an ID should be a collector of decorations.
  342. // The meta array is already a flat array of decorations which will contain the relevant decorations.
  343. break;
  344. }
  345. case OpGroupDecorate:
  346. {
  347. uint32_t group_id = ops[0];
  348. auto &decorations = ir.meta[group_id].decoration;
  349. auto &flags = decorations.decoration_flags;
  350. // Copies decorations from one ID to another. Only copy decorations which are set in the group,
  351. // i.e., we cannot just copy the meta structure directly.
  352. for (uint32_t i = 1; i < length; i++)
  353. {
  354. uint32_t target = ops[i];
  355. flags.for_each_bit([&](uint32_t bit) {
  356. auto decoration = static_cast<Decoration>(bit);
  357. if (decoration_is_string(decoration))
  358. {
  359. ir.set_decoration_string(target, decoration, ir.get_decoration_string(group_id, decoration));
  360. }
  361. else
  362. {
  363. ir.meta[target].decoration_word_offset[decoration] =
  364. ir.meta[group_id].decoration_word_offset[decoration];
  365. ir.set_decoration(target, decoration, ir.get_decoration(group_id, decoration));
  366. }
  367. });
  368. }
  369. break;
  370. }
  371. case OpGroupMemberDecorate:
  372. {
  373. uint32_t group_id = ops[0];
  374. auto &flags = ir.meta[group_id].decoration.decoration_flags;
  375. // Copies decorations from one ID to another. Only copy decorations which are set in the group,
  376. // i.e., we cannot just copy the meta structure directly.
  377. for (uint32_t i = 1; i + 1 < length; i += 2)
  378. {
  379. uint32_t target = ops[i + 0];
  380. uint32_t index = ops[i + 1];
  381. flags.for_each_bit([&](uint32_t bit) {
  382. auto decoration = static_cast<Decoration>(bit);
  383. if (decoration_is_string(decoration))
  384. ir.set_member_decoration_string(target, index, decoration,
  385. ir.get_decoration_string(group_id, decoration));
  386. else
  387. ir.set_member_decoration(target, index, decoration, ir.get_decoration(group_id, decoration));
  388. });
  389. }
  390. break;
  391. }
  392. case OpDecorate:
  393. case OpDecorateId:
  394. {
  395. // OpDecorateId technically supports an array of arguments, but our only supported decorations are single uint,
  396. // so merge decorate and decorate-id here.
  397. uint32_t id = ops[0];
  398. auto decoration = static_cast<Decoration>(ops[1]);
  399. if (length >= 3)
  400. {
  401. ir.meta[id].decoration_word_offset[decoration] = uint32_t(&ops[2] - ir.spirv.data());
  402. ir.set_decoration(id, decoration, ops[2]);
  403. }
  404. else
  405. ir.set_decoration(id, decoration);
  406. break;
  407. }
  408. case OpDecorateStringGOOGLE:
  409. {
  410. uint32_t id = ops[0];
  411. auto decoration = static_cast<Decoration>(ops[1]);
  412. ir.set_decoration_string(id, decoration, extract_string(ir.spirv, instruction.offset + 2));
  413. break;
  414. }
  415. case OpMemberDecorate:
  416. {
  417. uint32_t id = ops[0];
  418. uint32_t member = ops[1];
  419. auto decoration = static_cast<Decoration>(ops[2]);
  420. if (length >= 4)
  421. ir.set_member_decoration(id, member, decoration, ops[3]);
  422. else
  423. ir.set_member_decoration(id, member, decoration);
  424. break;
  425. }
  426. case OpMemberDecorateStringGOOGLE:
  427. {
  428. uint32_t id = ops[0];
  429. uint32_t member = ops[1];
  430. auto decoration = static_cast<Decoration>(ops[2]);
  431. ir.set_member_decoration_string(id, member, decoration, extract_string(ir.spirv, instruction.offset + 3));
  432. break;
  433. }
  434. // Build up basic types.
  435. case OpTypeVoid:
  436. {
  437. uint32_t id = ops[0];
  438. auto &type = set<SPIRType>(id);
  439. type.basetype = SPIRType::Void;
  440. break;
  441. }
  442. case OpTypeBool:
  443. {
  444. uint32_t id = ops[0];
  445. auto &type = set<SPIRType>(id);
  446. type.basetype = SPIRType::Boolean;
  447. type.width = 1;
  448. break;
  449. }
  450. case OpTypeFloat:
  451. {
  452. uint32_t id = ops[0];
  453. uint32_t width = ops[1];
  454. auto &type = set<SPIRType>(id);
  455. if (width == 64)
  456. type.basetype = SPIRType::Double;
  457. else if (width == 32)
  458. type.basetype = SPIRType::Float;
  459. else if (width == 16)
  460. type.basetype = SPIRType::Half;
  461. else
  462. SPIRV_CROSS_THROW("Unrecognized bit-width of floating point type.");
  463. type.width = width;
  464. break;
  465. }
  466. case OpTypeInt:
  467. {
  468. uint32_t id = ops[0];
  469. uint32_t width = ops[1];
  470. bool signedness = ops[2] != 0;
  471. auto &type = set<SPIRType>(id);
  472. type.basetype = signedness ? to_signed_basetype(width) : to_unsigned_basetype(width);
  473. type.width = width;
  474. break;
  475. }
  476. // Build composite types by "inheriting".
  477. // NOTE: The self member is also copied! For pointers and array modifiers this is a good thing
  478. // since we can refer to decorations on pointee classes which is needed for UBO/SSBO, I/O blocks in geometry/tess etc.
  479. case OpTypeVector:
  480. {
  481. uint32_t id = ops[0];
  482. uint32_t vecsize = ops[2];
  483. auto &base = get<SPIRType>(ops[1]);
  484. auto &vecbase = set<SPIRType>(id);
  485. vecbase = base;
  486. vecbase.vecsize = vecsize;
  487. vecbase.self = id;
  488. vecbase.parent_type = ops[1];
  489. break;
  490. }
  491. case OpTypeMatrix:
  492. {
  493. uint32_t id = ops[0];
  494. uint32_t colcount = ops[2];
  495. auto &base = get<SPIRType>(ops[1]);
  496. auto &matrixbase = set<SPIRType>(id);
  497. matrixbase = base;
  498. matrixbase.columns = colcount;
  499. matrixbase.self = id;
  500. matrixbase.parent_type = ops[1];
  501. break;
  502. }
  503. case OpTypeArray:
  504. {
  505. uint32_t id = ops[0];
  506. auto &arraybase = set<SPIRType>(id);
  507. uint32_t tid = ops[1];
  508. auto &base = get<SPIRType>(tid);
  509. arraybase = base;
  510. arraybase.parent_type = tid;
  511. uint32_t cid = ops[2];
  512. ir.mark_used_as_array_length(cid);
  513. auto *c = maybe_get<SPIRConstant>(cid);
  514. bool literal = c && !c->specialization;
  515. // We're copying type information into Array types, so we'll need a fixup for any physical pointer
  516. // references.
  517. if (base.forward_pointer)
  518. forward_pointer_fixups.push_back({ id, tid });
  519. arraybase.array_size_literal.push_back(literal);
  520. arraybase.array.push_back(literal ? c->scalar() : cid);
  521. // Do NOT set arraybase.self!
  522. break;
  523. }
  524. case OpTypeRuntimeArray:
  525. {
  526. uint32_t id = ops[0];
  527. auto &base = get<SPIRType>(ops[1]);
  528. auto &arraybase = set<SPIRType>(id);
  529. // We're copying type information into Array types, so we'll need a fixup for any physical pointer
  530. // references.
  531. if (base.forward_pointer)
  532. forward_pointer_fixups.push_back({ id, ops[1] });
  533. arraybase = base;
  534. arraybase.array.push_back(0);
  535. arraybase.array_size_literal.push_back(true);
  536. arraybase.parent_type = ops[1];
  537. // Do NOT set arraybase.self!
  538. break;
  539. }
  540. case OpTypeImage:
  541. {
  542. uint32_t id = ops[0];
  543. auto &type = set<SPIRType>(id);
  544. type.basetype = SPIRType::Image;
  545. type.image.type = ops[1];
  546. type.image.dim = static_cast<Dim>(ops[2]);
  547. type.image.depth = ops[3] == 1;
  548. type.image.arrayed = ops[4] != 0;
  549. type.image.ms = ops[5] != 0;
  550. type.image.sampled = ops[6];
  551. type.image.format = static_cast<ImageFormat>(ops[7]);
  552. type.image.access = (length >= 9) ? static_cast<AccessQualifier>(ops[8]) : AccessQualifierMax;
  553. break;
  554. }
  555. case OpTypeSampledImage:
  556. {
  557. uint32_t id = ops[0];
  558. uint32_t imagetype = ops[1];
  559. auto &type = set<SPIRType>(id);
  560. type = get<SPIRType>(imagetype);
  561. type.basetype = SPIRType::SampledImage;
  562. type.self = id;
  563. break;
  564. }
  565. case OpTypeSampler:
  566. {
  567. uint32_t id = ops[0];
  568. auto &type = set<SPIRType>(id);
  569. type.basetype = SPIRType::Sampler;
  570. break;
  571. }
  572. case OpTypePointer:
  573. {
  574. uint32_t id = ops[0];
  575. // Very rarely, we might receive a FunctionPrototype here.
  576. // We won't be able to compile it, but we shouldn't crash when parsing.
  577. // We should be able to reflect.
  578. auto *base = maybe_get<SPIRType>(ops[2]);
  579. auto &ptrbase = set<SPIRType>(id);
  580. if (base)
  581. ptrbase = *base;
  582. ptrbase.pointer = true;
  583. ptrbase.pointer_depth++;
  584. ptrbase.storage = static_cast<StorageClass>(ops[1]);
  585. if (ptrbase.storage == StorageClassAtomicCounter)
  586. ptrbase.basetype = SPIRType::AtomicCounter;
  587. if (base && base->forward_pointer)
  588. forward_pointer_fixups.push_back({ id, ops[2] });
  589. ptrbase.parent_type = ops[2];
  590. // Do NOT set ptrbase.self!
  591. break;
  592. }
  593. case OpTypeForwardPointer:
  594. {
  595. uint32_t id = ops[0];
  596. auto &ptrbase = set<SPIRType>(id);
  597. ptrbase.pointer = true;
  598. ptrbase.pointer_depth++;
  599. ptrbase.storage = static_cast<StorageClass>(ops[1]);
  600. ptrbase.forward_pointer = true;
  601. if (ptrbase.storage == StorageClassAtomicCounter)
  602. ptrbase.basetype = SPIRType::AtomicCounter;
  603. break;
  604. }
  605. case OpTypeStruct:
  606. {
  607. uint32_t id = ops[0];
  608. auto &type = set<SPIRType>(id);
  609. type.basetype = SPIRType::Struct;
  610. for (uint32_t i = 1; i < length; i++)
  611. type.member_types.push_back(ops[i]);
  612. // Check if we have seen this struct type before, with just different
  613. // decorations.
  614. //
  615. // Add workaround for issue #17 as well by looking at OpName for the struct
  616. // types, which we shouldn't normally do.
  617. // We should not normally have to consider type aliases like this to begin with
  618. // however ... glslang issues #304, #307 cover this.
  619. // For stripped names, never consider struct type aliasing.
  620. // We risk declaring the same struct multiple times, but type-punning is not allowed
  621. // so this is safe.
  622. bool consider_aliasing = !ir.get_name(type.self).empty();
  623. if (consider_aliasing)
  624. {
  625. for (auto &other : global_struct_cache)
  626. {
  627. if (ir.get_name(type.self) == ir.get_name(other) &&
  628. types_are_logically_equivalent(type, get<SPIRType>(other)))
  629. {
  630. type.type_alias = other;
  631. break;
  632. }
  633. }
  634. if (type.type_alias == TypeID(0))
  635. global_struct_cache.push_back(id);
  636. }
  637. break;
  638. }
  639. case OpTypeFunction:
  640. {
  641. uint32_t id = ops[0];
  642. uint32_t ret = ops[1];
  643. auto &func = set<SPIRFunctionPrototype>(id, ret);
  644. for (uint32_t i = 2; i < length; i++)
  645. func.parameter_types.push_back(ops[i]);
  646. break;
  647. }
  648. case OpTypeAccelerationStructureKHR:
  649. {
  650. uint32_t id = ops[0];
  651. auto &type = set<SPIRType>(id);
  652. type.basetype = SPIRType::AccelerationStructure;
  653. break;
  654. }
  655. case OpTypeRayQueryKHR:
  656. {
  657. uint32_t id = ops[0];
  658. auto &type = set<SPIRType>(id);
  659. type.basetype = SPIRType::RayQuery;
  660. break;
  661. }
  662. // Variable declaration
  663. // All variables are essentially pointers with a storage qualifier.
  664. case OpVariable:
  665. {
  666. uint32_t type = ops[0];
  667. uint32_t id = ops[1];
  668. auto storage = static_cast<StorageClass>(ops[2]);
  669. uint32_t initializer = length == 4 ? ops[3] : 0;
  670. if (storage == StorageClassFunction)
  671. {
  672. if (!current_function)
  673. SPIRV_CROSS_THROW("No function currently in scope");
  674. current_function->add_local_variable(id);
  675. }
  676. set<SPIRVariable>(id, type, storage, initializer);
  677. break;
  678. }
  679. // OpPhi
  680. // OpPhi is a fairly magical opcode.
  681. // It selects temporary variables based on which parent block we *came from*.
  682. // In high-level languages we can "de-SSA" by creating a function local, and flush out temporaries to this function-local
  683. // variable to emulate SSA Phi.
  684. case OpPhi:
  685. {
  686. if (!current_function)
  687. SPIRV_CROSS_THROW("No function currently in scope");
  688. if (!current_block)
  689. SPIRV_CROSS_THROW("No block currently in scope");
  690. uint32_t result_type = ops[0];
  691. uint32_t id = ops[1];
  692. // Instead of a temporary, create a new function-wide temporary with this ID instead.
  693. auto &var = set<SPIRVariable>(id, result_type, spv::StorageClassFunction);
  694. var.phi_variable = true;
  695. current_function->add_local_variable(id);
  696. for (uint32_t i = 2; i + 2 <= length; i += 2)
  697. current_block->phi_variables.push_back({ ops[i], ops[i + 1], id });
  698. break;
  699. }
  700. // Constants
  701. case OpSpecConstant:
  702. case OpConstant:
  703. {
  704. uint32_t id = ops[1];
  705. auto &type = get<SPIRType>(ops[0]);
  706. if (type.width > 32)
  707. set<SPIRConstant>(id, ops[0], ops[2] | (uint64_t(ops[3]) << 32), op == OpSpecConstant);
  708. else
  709. set<SPIRConstant>(id, ops[0], ops[2], op == OpSpecConstant);
  710. break;
  711. }
  712. case OpSpecConstantFalse:
  713. case OpConstantFalse:
  714. {
  715. uint32_t id = ops[1];
  716. set<SPIRConstant>(id, ops[0], uint32_t(0), op == OpSpecConstantFalse);
  717. break;
  718. }
  719. case OpSpecConstantTrue:
  720. case OpConstantTrue:
  721. {
  722. uint32_t id = ops[1];
  723. set<SPIRConstant>(id, ops[0], uint32_t(1), op == OpSpecConstantTrue);
  724. break;
  725. }
  726. case OpConstantNull:
  727. {
  728. uint32_t id = ops[1];
  729. uint32_t type = ops[0];
  730. ir.make_constant_null(id, type, true);
  731. break;
  732. }
  733. case OpSpecConstantComposite:
  734. case OpConstantComposite:
  735. {
  736. uint32_t id = ops[1];
  737. uint32_t type = ops[0];
  738. auto &ctype = get<SPIRType>(type);
  739. // We can have constants which are structs and arrays.
  740. // In this case, our SPIRConstant will be a list of other SPIRConstant ids which we
  741. // can refer to.
  742. if (ctype.basetype == SPIRType::Struct || !ctype.array.empty())
  743. {
  744. set<SPIRConstant>(id, type, ops + 2, length - 2, op == OpSpecConstantComposite);
  745. }
  746. else
  747. {
  748. uint32_t elements = length - 2;
  749. if (elements > 4)
  750. SPIRV_CROSS_THROW("OpConstantComposite only supports 1, 2, 3 and 4 elements.");
  751. SPIRConstant remapped_constant_ops[4];
  752. const SPIRConstant *c[4];
  753. for (uint32_t i = 0; i < elements; i++)
  754. {
  755. // Specialization constants operations can also be part of this.
  756. // We do not know their value, so any attempt to query SPIRConstant later
  757. // will fail. We can only propagate the ID of the expression and use to_expression on it.
  758. auto *constant_op = maybe_get<SPIRConstantOp>(ops[2 + i]);
  759. auto *undef_op = maybe_get<SPIRUndef>(ops[2 + i]);
  760. if (constant_op)
  761. {
  762. if (op == OpConstantComposite)
  763. SPIRV_CROSS_THROW("Specialization constant operation used in OpConstantComposite.");
  764. remapped_constant_ops[i].make_null(get<SPIRType>(constant_op->basetype));
  765. remapped_constant_ops[i].self = constant_op->self;
  766. remapped_constant_ops[i].constant_type = constant_op->basetype;
  767. remapped_constant_ops[i].specialization = true;
  768. c[i] = &remapped_constant_ops[i];
  769. }
  770. else if (undef_op)
  771. {
  772. // Undefined, just pick 0.
  773. remapped_constant_ops[i].make_null(get<SPIRType>(undef_op->basetype));
  774. remapped_constant_ops[i].constant_type = undef_op->basetype;
  775. c[i] = &remapped_constant_ops[i];
  776. }
  777. else
  778. c[i] = &get<SPIRConstant>(ops[2 + i]);
  779. }
  780. set<SPIRConstant>(id, type, c, elements, op == OpSpecConstantComposite);
  781. }
  782. break;
  783. }
  784. // Functions
  785. case OpFunction:
  786. {
  787. uint32_t res = ops[0];
  788. uint32_t id = ops[1];
  789. // Control
  790. uint32_t type = ops[3];
  791. if (current_function)
  792. SPIRV_CROSS_THROW("Must end a function before starting a new one!");
  793. current_function = &set<SPIRFunction>(id, res, type);
  794. break;
  795. }
  796. case OpFunctionParameter:
  797. {
  798. uint32_t type = ops[0];
  799. uint32_t id = ops[1];
  800. if (!current_function)
  801. SPIRV_CROSS_THROW("Must be in a function!");
  802. current_function->add_parameter(type, id);
  803. set<SPIRVariable>(id, type, StorageClassFunction);
  804. break;
  805. }
  806. case OpFunctionEnd:
  807. {
  808. if (current_block)
  809. {
  810. // Very specific error message, but seems to come up quite often.
  811. SPIRV_CROSS_THROW(
  812. "Cannot end a function before ending the current block.\n"
  813. "Likely cause: If this SPIR-V was created from glslang HLSL, make sure the entry point is valid.");
  814. }
  815. current_function = nullptr;
  816. break;
  817. }
  818. // Blocks
  819. case OpLabel:
  820. {
  821. // OpLabel always starts a block.
  822. if (!current_function)
  823. SPIRV_CROSS_THROW("Blocks cannot exist outside functions!");
  824. uint32_t id = ops[0];
  825. current_function->blocks.push_back(id);
  826. if (!current_function->entry_block)
  827. current_function->entry_block = id;
  828. if (current_block)
  829. SPIRV_CROSS_THROW("Cannot start a block before ending the current block.");
  830. current_block = &set<SPIRBlock>(id);
  831. break;
  832. }
  833. // Branch instructions end blocks.
  834. case OpBranch:
  835. {
  836. if (!current_block)
  837. SPIRV_CROSS_THROW("Trying to end a non-existing block.");
  838. uint32_t target = ops[0];
  839. current_block->terminator = SPIRBlock::Direct;
  840. current_block->next_block = target;
  841. current_block = nullptr;
  842. break;
  843. }
  844. case OpBranchConditional:
  845. {
  846. if (!current_block)
  847. SPIRV_CROSS_THROW("Trying to end a non-existing block.");
  848. current_block->condition = ops[0];
  849. current_block->true_block = ops[1];
  850. current_block->false_block = ops[2];
  851. current_block->terminator = SPIRBlock::Select;
  852. if (current_block->true_block == current_block->false_block)
  853. {
  854. // Bogus conditional, translate to a direct branch.
  855. // Avoids some ugly edge cases later when analyzing CFGs.
  856. // There are some super jank cases where the merge block is different from the true/false,
  857. // and later branches can "break" out of the selection construct this way.
  858. // This is complete nonsense, but CTS hits this case.
  859. // In this scenario, we should see the selection construct as more of a Switch with one default case.
  860. // The problem here is that this breaks any attempt to break out of outer switch statements,
  861. // but it's theoretically solvable if this ever comes up using the ladder breaking system ...
  862. if (current_block->true_block != current_block->next_block &&
  863. current_block->merge == SPIRBlock::MergeSelection)
  864. {
  865. uint32_t ids = ir.increase_bound_by(2);
  866. SPIRType type;
  867. type.basetype = SPIRType::Int;
  868. type.width = 32;
  869. set<SPIRType>(ids, type);
  870. auto &c = set<SPIRConstant>(ids + 1, ids);
  871. current_block->condition = c.self;
  872. current_block->default_block = current_block->true_block;
  873. current_block->terminator = SPIRBlock::MultiSelect;
  874. ir.block_meta[current_block->next_block] &= ~ParsedIR::BLOCK_META_SELECTION_MERGE_BIT;
  875. ir.block_meta[current_block->next_block] |= ParsedIR::BLOCK_META_MULTISELECT_MERGE_BIT;
  876. }
  877. else
  878. {
  879. // Collapse loops if we have to.
  880. bool collapsed_loop = current_block->true_block == current_block->merge_block &&
  881. current_block->merge == SPIRBlock::MergeLoop;
  882. if (collapsed_loop)
  883. {
  884. ir.block_meta[current_block->merge_block] &= ~ParsedIR::BLOCK_META_LOOP_MERGE_BIT;
  885. ir.block_meta[current_block->continue_block] &= ~ParsedIR::BLOCK_META_CONTINUE_BIT;
  886. }
  887. current_block->next_block = current_block->true_block;
  888. current_block->condition = 0;
  889. current_block->true_block = 0;
  890. current_block->false_block = 0;
  891. current_block->merge_block = 0;
  892. current_block->merge = SPIRBlock::MergeNone;
  893. current_block->terminator = SPIRBlock::Direct;
  894. }
  895. }
  896. current_block = nullptr;
  897. break;
  898. }
  899. case OpSwitch:
  900. {
  901. if (!current_block)
  902. SPIRV_CROSS_THROW("Trying to end a non-existing block.");
  903. current_block->terminator = SPIRBlock::MultiSelect;
  904. current_block->condition = ops[0];
  905. current_block->default_block = ops[1];
  906. uint32_t remaining_ops = length - 2;
  907. if ((remaining_ops % 2) == 0)
  908. {
  909. for (uint32_t i = 2; i + 2 <= length; i += 2)
  910. current_block->cases_32bit.push_back({ ops[i], ops[i + 1] });
  911. }
  912. if ((remaining_ops % 3) == 0)
  913. {
  914. for (uint32_t i = 2; i + 3 <= length; i += 3)
  915. {
  916. uint64_t value = (static_cast<uint64_t>(ops[i + 1]) << 32) | ops[i];
  917. current_block->cases_64bit.push_back({ value, ops[i + 2] });
  918. }
  919. }
  920. // If we jump to next block, make it break instead since we're inside a switch case block at that point.
  921. ir.block_meta[current_block->next_block] |= ParsedIR::BLOCK_META_MULTISELECT_MERGE_BIT;
  922. current_block = nullptr;
  923. break;
  924. }
  925. case OpKill:
  926. case OpTerminateInvocation:
  927. {
  928. if (!current_block)
  929. SPIRV_CROSS_THROW("Trying to end a non-existing block.");
  930. current_block->terminator = SPIRBlock::Kill;
  931. current_block = nullptr;
  932. break;
  933. }
  934. case OpTerminateRayKHR:
  935. // NV variant is not a terminator.
  936. if (!current_block)
  937. SPIRV_CROSS_THROW("Trying to end a non-existing block.");
  938. current_block->terminator = SPIRBlock::TerminateRay;
  939. current_block = nullptr;
  940. break;
  941. case OpIgnoreIntersectionKHR:
  942. // NV variant is not a terminator.
  943. if (!current_block)
  944. SPIRV_CROSS_THROW("Trying to end a non-existing block.");
  945. current_block->terminator = SPIRBlock::IgnoreIntersection;
  946. current_block = nullptr;
  947. break;
  948. case OpEmitMeshTasksEXT:
  949. if (!current_block)
  950. SPIRV_CROSS_THROW("Trying to end a non-existing block.");
  951. current_block->terminator = SPIRBlock::EmitMeshTasks;
  952. for (uint32_t i = 0; i < 3; i++)
  953. current_block->mesh.groups[i] = ops[i];
  954. current_block->mesh.payload = length >= 4 ? ops[3] : 0;
  955. current_block = nullptr;
  956. // Currently glslang is bugged and does not treat EmitMeshTasksEXT as a terminator.
  957. ignore_trailing_block_opcodes = true;
  958. break;
  959. case OpReturn:
  960. {
  961. if (!current_block)
  962. SPIRV_CROSS_THROW("Trying to end a non-existing block.");
  963. current_block->terminator = SPIRBlock::Return;
  964. current_block = nullptr;
  965. break;
  966. }
  967. case OpReturnValue:
  968. {
  969. if (!current_block)
  970. SPIRV_CROSS_THROW("Trying to end a non-existing block.");
  971. current_block->terminator = SPIRBlock::Return;
  972. current_block->return_value = ops[0];
  973. current_block = nullptr;
  974. break;
  975. }
  976. case OpUnreachable:
  977. {
  978. if (!current_block)
  979. SPIRV_CROSS_THROW("Trying to end a non-existing block.");
  980. current_block->terminator = SPIRBlock::Unreachable;
  981. current_block = nullptr;
  982. break;
  983. }
  984. case OpSelectionMerge:
  985. {
  986. if (!current_block)
  987. SPIRV_CROSS_THROW("Trying to modify a non-existing block.");
  988. current_block->next_block = ops[0];
  989. current_block->merge = SPIRBlock::MergeSelection;
  990. ir.block_meta[current_block->next_block] |= ParsedIR::BLOCK_META_SELECTION_MERGE_BIT;
  991. if (length >= 2)
  992. {
  993. if (ops[1] & SelectionControlFlattenMask)
  994. current_block->hint = SPIRBlock::HintFlatten;
  995. else if (ops[1] & SelectionControlDontFlattenMask)
  996. current_block->hint = SPIRBlock::HintDontFlatten;
  997. }
  998. break;
  999. }
  1000. case OpLoopMerge:
  1001. {
  1002. if (!current_block)
  1003. SPIRV_CROSS_THROW("Trying to modify a non-existing block.");
  1004. current_block->merge_block = ops[0];
  1005. current_block->continue_block = ops[1];
  1006. current_block->merge = SPIRBlock::MergeLoop;
  1007. ir.block_meta[current_block->self] |= ParsedIR::BLOCK_META_LOOP_HEADER_BIT;
  1008. ir.block_meta[current_block->merge_block] |= ParsedIR::BLOCK_META_LOOP_MERGE_BIT;
  1009. ir.continue_block_to_loop_header[current_block->continue_block] = BlockID(current_block->self);
  1010. // Don't add loop headers to continue blocks,
  1011. // which would make it impossible branch into the loop header since
  1012. // they are treated as continues.
  1013. if (current_block->continue_block != BlockID(current_block->self))
  1014. ir.block_meta[current_block->continue_block] |= ParsedIR::BLOCK_META_CONTINUE_BIT;
  1015. if (length >= 3)
  1016. {
  1017. if (ops[2] & LoopControlUnrollMask)
  1018. current_block->hint = SPIRBlock::HintUnroll;
  1019. else if (ops[2] & LoopControlDontUnrollMask)
  1020. current_block->hint = SPIRBlock::HintDontUnroll;
  1021. }
  1022. break;
  1023. }
  1024. case OpSpecConstantOp:
  1025. {
  1026. if (length < 3)
  1027. SPIRV_CROSS_THROW("OpSpecConstantOp not enough arguments.");
  1028. uint32_t result_type = ops[0];
  1029. uint32_t id = ops[1];
  1030. auto spec_op = static_cast<Op>(ops[2]);
  1031. set<SPIRConstantOp>(id, result_type, spec_op, ops + 3, length - 3);
  1032. break;
  1033. }
  1034. case OpLine:
  1035. {
  1036. // OpLine might come at global scope, but we don't care about those since they will not be declared in any
  1037. // meaningful correct order.
  1038. // Ignore all OpLine directives which live outside a function.
  1039. if (current_block)
  1040. current_block->ops.push_back(instruction);
  1041. // Line directives may arrive before first OpLabel.
  1042. // Treat this as the line of the function declaration,
  1043. // so warnings for arguments can propagate properly.
  1044. if (current_function)
  1045. {
  1046. // Store the first one we find and emit it before creating the function prototype.
  1047. if (current_function->entry_line.file_id == 0)
  1048. {
  1049. current_function->entry_line.file_id = ops[0];
  1050. current_function->entry_line.line_literal = ops[1];
  1051. }
  1052. }
  1053. break;
  1054. }
  1055. case OpNoLine:
  1056. {
  1057. // OpNoLine might come at global scope.
  1058. if (current_block)
  1059. current_block->ops.push_back(instruction);
  1060. break;
  1061. }
  1062. // Actual opcodes.
  1063. default:
  1064. {
  1065. if (length >= 2)
  1066. {
  1067. const auto *type = maybe_get<SPIRType>(ops[0]);
  1068. if (type)
  1069. ir.load_type_width.insert({ ops[1], type->width });
  1070. }
  1071. if (!current_block)
  1072. SPIRV_CROSS_THROW("Currently no block to insert opcode.");
  1073. current_block->ops.push_back(instruction);
  1074. break;
  1075. }
  1076. }
  1077. }
  1078. bool Parser::types_are_logically_equivalent(const SPIRType &a, const SPIRType &b) const
  1079. {
  1080. if (a.basetype != b.basetype)
  1081. return false;
  1082. if (a.width != b.width)
  1083. return false;
  1084. if (a.vecsize != b.vecsize)
  1085. return false;
  1086. if (a.columns != b.columns)
  1087. return false;
  1088. if (a.array.size() != b.array.size())
  1089. return false;
  1090. size_t array_count = a.array.size();
  1091. if (array_count && memcmp(a.array.data(), b.array.data(), array_count * sizeof(uint32_t)) != 0)
  1092. return false;
  1093. if (a.basetype == SPIRType::Image || a.basetype == SPIRType::SampledImage)
  1094. {
  1095. if (memcmp(&a.image, &b.image, sizeof(SPIRType::Image)) != 0)
  1096. return false;
  1097. }
  1098. if (a.member_types.size() != b.member_types.size())
  1099. return false;
  1100. size_t member_types = a.member_types.size();
  1101. for (size_t i = 0; i < member_types; i++)
  1102. {
  1103. if (!types_are_logically_equivalent(get<SPIRType>(a.member_types[i]), get<SPIRType>(b.member_types[i])))
  1104. return false;
  1105. }
  1106. return true;
  1107. }
  1108. bool Parser::variable_storage_is_aliased(const SPIRVariable &v) const
  1109. {
  1110. auto &type = get<SPIRType>(v.basetype);
  1111. auto *type_meta = ir.find_meta(type.self);
  1112. bool ssbo = v.storage == StorageClassStorageBuffer ||
  1113. (type_meta && type_meta->decoration.decoration_flags.get(DecorationBufferBlock));
  1114. bool image = type.basetype == SPIRType::Image;
  1115. bool counter = type.basetype == SPIRType::AtomicCounter;
  1116. bool is_restrict;
  1117. if (ssbo)
  1118. is_restrict = ir.get_buffer_block_flags(v).get(DecorationRestrict);
  1119. else
  1120. is_restrict = ir.has_decoration(v.self, DecorationRestrict);
  1121. return !is_restrict && (ssbo || image || counter);
  1122. }
  1123. } // namespace SPIRV_CROSS_NAMESPACE