spirv_glsl.hpp 48 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044
  1. /*
  2. * Copyright 2015-2021 Arm Limited
  3. * SPDX-License-Identifier: Apache-2.0 OR MIT
  4. *
  5. * Licensed under the Apache License, Version 2.0 (the "License");
  6. * you may not use this file except in compliance with the License.
  7. * You may obtain a copy of the License at
  8. *
  9. * http://www.apache.org/licenses/LICENSE-2.0
  10. *
  11. * Unless required by applicable law or agreed to in writing, software
  12. * distributed under the License is distributed on an "AS IS" BASIS,
  13. * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  14. * See the License for the specific language governing permissions and
  15. * limitations under the License.
  16. */
  17. /*
  18. * At your option, you may choose to accept this material under either:
  19. * 1. The Apache License, Version 2.0, found at <http://www.apache.org/licenses/LICENSE-2.0>, or
  20. * 2. The MIT License, found at <http://opensource.org/licenses/MIT>.
  21. */
  22. #ifndef SPIRV_CROSS_GLSL_HPP
  23. #define SPIRV_CROSS_GLSL_HPP
  24. #include "GLSL.std.450.h"
  25. #include "spirv_cross.hpp"
  26. #include <unordered_map>
  27. #include <unordered_set>
  28. #include <utility>
  29. namespace SPIRV_CROSS_NAMESPACE
  30. {
  31. enum PlsFormat
  32. {
  33. PlsNone = 0,
  34. PlsR11FG11FB10F,
  35. PlsR32F,
  36. PlsRG16F,
  37. PlsRGB10A2,
  38. PlsRGBA8,
  39. PlsRG16,
  40. PlsRGBA8I,
  41. PlsRG16I,
  42. PlsRGB10A2UI,
  43. PlsRGBA8UI,
  44. PlsRG16UI,
  45. PlsR32UI
  46. };
  47. struct PlsRemap
  48. {
  49. uint32_t id;
  50. PlsFormat format;
  51. };
  52. enum AccessChainFlagBits
  53. {
  54. ACCESS_CHAIN_INDEX_IS_LITERAL_BIT = 1 << 0,
  55. ACCESS_CHAIN_CHAIN_ONLY_BIT = 1 << 1,
  56. ACCESS_CHAIN_PTR_CHAIN_BIT = 1 << 2,
  57. ACCESS_CHAIN_SKIP_REGISTER_EXPRESSION_READ_BIT = 1 << 3,
  58. ACCESS_CHAIN_LITERAL_MSB_FORCE_ID = 1 << 4,
  59. ACCESS_CHAIN_FLATTEN_ALL_MEMBERS_BIT = 1 << 5,
  60. ACCESS_CHAIN_FORCE_COMPOSITE_BIT = 1 << 6
  61. };
  62. typedef uint32_t AccessChainFlags;
  63. class CompilerGLSL : public Compiler
  64. {
  65. public:
  66. struct Options
  67. {
  68. // The shading language version. Corresponds to #version $VALUE.
  69. uint32_t version = 450;
  70. // Emit the OpenGL ES shading language instead of desktop OpenGL.
  71. bool es = false;
  72. // Debug option to always emit temporary variables for all expressions.
  73. bool force_temporary = false;
  74. // Debug option, can be increased in an attempt to workaround SPIRV-Cross bugs temporarily.
  75. // If this limit has to be increased, it points to an implementation bug.
  76. // In certain scenarios, the maximum number of debug iterations may increase beyond this limit
  77. // as long as we can prove we're making certain kinds of forward progress.
  78. uint32_t force_recompile_max_debug_iterations = 3;
  79. // If true, Vulkan GLSL features are used instead of GL-compatible features.
  80. // Mostly useful for debugging SPIR-V files.
  81. bool vulkan_semantics = false;
  82. // If true, gl_PerVertex is explicitly redeclared in vertex, geometry and tessellation shaders.
  83. // The members of gl_PerVertex is determined by which built-ins are declared by the shader.
  84. // This option is ignored in ES versions, as redeclaration in ES is not required, and it depends on a different extension
  85. // (EXT_shader_io_blocks) which makes things a bit more fuzzy.
  86. bool separate_shader_objects = false;
  87. // Flattens multidimensional arrays, e.g. float foo[a][b][c] into single-dimensional arrays,
  88. // e.g. float foo[a * b * c].
  89. // This function does not change the actual SPIRType of any object.
  90. // Only the generated code, including declarations of interface variables are changed to be single array dimension.
  91. bool flatten_multidimensional_arrays = false;
  92. // For older desktop GLSL targets than version 420, the
  93. // GL_ARB_shading_language_420pack extensions is used to be able to support
  94. // layout(binding) on UBOs and samplers.
  95. // If disabled on older targets, binding decorations will be stripped.
  96. bool enable_420pack_extension = true;
  97. // In non-Vulkan GLSL, emit push constant blocks as UBOs rather than plain uniforms.
  98. bool emit_push_constant_as_uniform_buffer = false;
  99. // Always emit uniform blocks as plain uniforms, regardless of the GLSL version, even when UBOs are supported.
  100. // Does not apply to shader storage or push constant blocks.
  101. bool emit_uniform_buffer_as_plain_uniforms = false;
  102. // Emit OpLine directives if present in the module.
  103. // May not correspond exactly to original source, but should be a good approximation.
  104. bool emit_line_directives = false;
  105. // In cases where readonly/writeonly decoration are not used at all,
  106. // we try to deduce which qualifier(s) we should actually used, since actually emitting
  107. // read-write decoration is very rare, and older glslang/HLSL compilers tend to just emit readwrite as a matter of fact.
  108. // The default (true) is to enable automatic deduction for these cases, but if you trust the decorations set
  109. // by the SPIR-V, it's recommended to set this to false.
  110. bool enable_storage_image_qualifier_deduction = true;
  111. // On some targets (WebGPU), uninitialized variables are banned.
  112. // If this is enabled, all variables (temporaries, Private, Function)
  113. // which would otherwise be uninitialized will now be initialized to 0 instead.
  114. bool force_zero_initialized_variables = false;
  115. // In GLSL, force use of I/O block flattening, similar to
  116. // what happens on legacy GLSL targets for blocks and structs.
  117. bool force_flattened_io_blocks = false;
  118. // For opcodes where we have to perform explicit additional nan checks, very ugly code is generated.
  119. // If we opt-in, ignore these requirements.
  120. // In opcodes like NClamp/NMin/NMax and FP compare, ignore NaN behavior.
  121. // Use FClamp/FMin/FMax semantics for clamps and lets implementation choose ordered or unordered
  122. // compares.
  123. bool relax_nan_checks = false;
  124. // Loading row-major matrices from UBOs on older AMD Windows OpenGL drivers is problematic.
  125. // To load these types correctly, we must generate a wrapper. them in a dummy function which only purpose is to
  126. // ensure row_major decoration is actually respected.
  127. // This workaround may cause significant performance degeneration on some Android devices.
  128. bool enable_row_major_load_workaround = true;
  129. // If non-zero, controls layout(num_views = N) in; in GL_OVR_multiview2.
  130. uint32_t ovr_multiview_view_count = 0;
  131. enum Precision
  132. {
  133. DontCare,
  134. Lowp,
  135. Mediump,
  136. Highp
  137. };
  138. struct VertexOptions
  139. {
  140. // "Vertex-like shader" here is any shader stage that can write BuiltInPosition.
  141. // GLSL: In vertex-like shaders, rewrite [0, w] depth (Vulkan/D3D style) to [-w, w] depth (GL style).
  142. // MSL: In vertex-like shaders, rewrite [-w, w] depth (GL style) to [0, w] depth.
  143. // HLSL: In vertex-like shaders, rewrite [-w, w] depth (GL style) to [0, w] depth.
  144. bool fixup_clipspace = false;
  145. // In vertex-like shaders, inverts gl_Position.y or equivalent.
  146. bool flip_vert_y = false;
  147. // GLSL only, for HLSL version of this option, see CompilerHLSL.
  148. // If true, the backend will assume that InstanceIndex will need to apply
  149. // a base instance offset. Set to false if you know you will never use base instance
  150. // functionality as it might remove some internal uniforms.
  151. bool support_nonzero_base_instance = true;
  152. } vertex;
  153. struct FragmentOptions
  154. {
  155. // Add precision mediump float in ES targets when emitting GLES source.
  156. // Add precision highp int in ES targets when emitting GLES source.
  157. Precision default_float_precision = Mediump;
  158. Precision default_int_precision = Highp;
  159. } fragment;
  160. };
  161. void remap_pixel_local_storage(std::vector<PlsRemap> inputs, std::vector<PlsRemap> outputs)
  162. {
  163. pls_inputs = std::move(inputs);
  164. pls_outputs = std::move(outputs);
  165. remap_pls_variables();
  166. }
  167. // Redirect a subpassInput reading from input_attachment_index to instead load its value from
  168. // the color attachment at location = color_location. Requires ESSL.
  169. // If coherent, uses GL_EXT_shader_framebuffer_fetch, if not, uses noncoherent variant.
  170. void remap_ext_framebuffer_fetch(uint32_t input_attachment_index, uint32_t color_location, bool coherent);
  171. explicit CompilerGLSL(std::vector<uint32_t> spirv_)
  172. : Compiler(std::move(spirv_))
  173. {
  174. init();
  175. }
  176. CompilerGLSL(const uint32_t *ir_, size_t word_count)
  177. : Compiler(ir_, word_count)
  178. {
  179. init();
  180. }
  181. explicit CompilerGLSL(const ParsedIR &ir_)
  182. : Compiler(ir_)
  183. {
  184. init();
  185. }
  186. explicit CompilerGLSL(ParsedIR &&ir_)
  187. : Compiler(std::move(ir_))
  188. {
  189. init();
  190. }
  191. const Options &get_common_options() const
  192. {
  193. return options;
  194. }
  195. void set_common_options(const Options &opts)
  196. {
  197. options = opts;
  198. }
  199. std::string compile() override;
  200. // Returns the current string held in the conversion buffer. Useful for
  201. // capturing what has been converted so far when compile() throws an error.
  202. std::string get_partial_source();
  203. // Adds a line to be added right after #version in GLSL backend.
  204. // This is useful for enabling custom extensions which are outside the scope of SPIRV-Cross.
  205. // This can be combined with variable remapping.
  206. // A new-line will be added.
  207. //
  208. // While add_header_line() is a more generic way of adding arbitrary text to the header
  209. // of a GLSL file, require_extension() should be used when adding extensions since it will
  210. // avoid creating collisions with SPIRV-Cross generated extensions.
  211. //
  212. // Code added via add_header_line() is typically backend-specific.
  213. void add_header_line(const std::string &str);
  214. // Adds an extension which is required to run this shader, e.g.
  215. // require_extension("GL_KHR_my_extension");
  216. void require_extension(const std::string &ext);
  217. // Returns the list of required extensions. After compilation this will contains any other
  218. // extensions that the compiler used automatically, in addition to the user specified ones.
  219. const SmallVector<std::string> &get_required_extensions() const;
  220. // Legacy GLSL compatibility method.
  221. // Takes a uniform or push constant variable and flattens it into a (i|u)vec4 array[N]; array instead.
  222. // For this to work, all types in the block must be the same basic type, e.g. mixing vec2 and vec4 is fine, but
  223. // mixing int and float is not.
  224. // The name of the uniform array will be the same as the interface block name.
  225. void flatten_buffer_block(VariableID id);
  226. // After compilation, query if a variable ID was used as a depth resource.
  227. // This is meaningful for MSL since descriptor types depend on this knowledge.
  228. // Cases which return true:
  229. // - Images which are declared with depth = 1 image type.
  230. // - Samplers which are statically used at least once with Dref opcodes.
  231. // - Images which are statically used at least once with Dref opcodes.
  232. bool variable_is_depth_or_compare(VariableID id) const;
  233. // If a shader output is active in this stage, but inactive in a subsequent stage,
  234. // this can be signalled here. This can be used to work around certain cross-stage matching problems
  235. // which plagues MSL and HLSL in certain scenarios.
  236. // An output which matches one of these will not be emitted in stage output interfaces, but rather treated as a private
  237. // variable.
  238. // This option is only meaningful for MSL and HLSL, since GLSL matches by location directly.
  239. // Masking builtins only takes effect if the builtin in question is part of the stage output interface.
  240. void mask_stage_output_by_location(uint32_t location, uint32_t component);
  241. void mask_stage_output_by_builtin(spv::BuiltIn builtin);
  242. protected:
  243. struct ShaderSubgroupSupportHelper
  244. {
  245. // lower enum value = greater priority
  246. enum Candidate
  247. {
  248. KHR_shader_subgroup_ballot,
  249. KHR_shader_subgroup_basic,
  250. KHR_shader_subgroup_vote,
  251. KHR_shader_subgroup_arithmetic,
  252. NV_gpu_shader_5,
  253. NV_shader_thread_group,
  254. NV_shader_thread_shuffle,
  255. ARB_shader_ballot,
  256. ARB_shader_group_vote,
  257. AMD_gcn_shader,
  258. CandidateCount
  259. };
  260. static const char *get_extension_name(Candidate c);
  261. static SmallVector<std::string> get_extra_required_extension_names(Candidate c);
  262. static const char *get_extra_required_extension_predicate(Candidate c);
  263. enum Feature
  264. {
  265. SubgroupMask = 0,
  266. SubgroupSize = 1,
  267. SubgroupInvocationID = 2,
  268. SubgroupID = 3,
  269. NumSubgroups = 4,
  270. SubgroupBroadcast_First = 5,
  271. SubgroupBallotFindLSB_MSB = 6,
  272. SubgroupAll_Any_AllEqualBool = 7,
  273. SubgroupAllEqualT = 8,
  274. SubgroupElect = 9,
  275. SubgroupBarrier = 10,
  276. SubgroupMemBarrier = 11,
  277. SubgroupBallot = 12,
  278. SubgroupInverseBallot_InclBitCount_ExclBitCout = 13,
  279. SubgroupBallotBitExtract = 14,
  280. SubgroupBallotBitCount = 15,
  281. SubgroupArithmeticIAddReduce = 16,
  282. SubgroupArithmeticIAddExclusiveScan = 17,
  283. SubgroupArithmeticIAddInclusiveScan = 18,
  284. SubgroupArithmeticFAddReduce = 19,
  285. SubgroupArithmeticFAddExclusiveScan = 20,
  286. SubgroupArithmeticFAddInclusiveScan = 21,
  287. SubgroupArithmeticIMulReduce = 22,
  288. SubgroupArithmeticIMulExclusiveScan = 23,
  289. SubgroupArithmeticIMulInclusiveScan = 24,
  290. SubgroupArithmeticFMulReduce = 25,
  291. SubgroupArithmeticFMulExclusiveScan = 26,
  292. SubgroupArithmeticFMulInclusiveScan = 27,
  293. FeatureCount
  294. };
  295. using FeatureMask = uint32_t;
  296. static_assert(sizeof(FeatureMask) * 8u >= FeatureCount, "Mask type needs more bits.");
  297. using CandidateVector = SmallVector<Candidate, CandidateCount>;
  298. using FeatureVector = SmallVector<Feature>;
  299. static FeatureVector get_feature_dependencies(Feature feature);
  300. static FeatureMask get_feature_dependency_mask(Feature feature);
  301. static bool can_feature_be_implemented_without_extensions(Feature feature);
  302. static Candidate get_KHR_extension_for_feature(Feature feature);
  303. struct Result
  304. {
  305. Result();
  306. uint32_t weights[CandidateCount];
  307. };
  308. void request_feature(Feature feature);
  309. bool is_feature_requested(Feature feature) const;
  310. Result resolve() const;
  311. static CandidateVector get_candidates_for_feature(Feature ft, const Result &r);
  312. private:
  313. static CandidateVector get_candidates_for_feature(Feature ft);
  314. static FeatureMask build_mask(const SmallVector<Feature> &features);
  315. FeatureMask feature_mask = 0;
  316. };
  317. // TODO remove this function when all subgroup ops are supported (or make it always return true)
  318. static bool is_supported_subgroup_op_in_opengl(spv::Op op, const uint32_t *ops);
  319. void reset(uint32_t iteration_count);
  320. void emit_function(SPIRFunction &func, const Bitset &return_flags);
  321. bool has_extension(const std::string &ext) const;
  322. void require_extension_internal(const std::string &ext);
  323. // Virtualize methods which need to be overridden by subclass targets like C++ and such.
  324. virtual void emit_function_prototype(SPIRFunction &func, const Bitset &return_flags);
  325. SPIRBlock *current_emitting_block = nullptr;
  326. SmallVector<SPIRBlock *> current_emitting_switch_stack;
  327. bool current_emitting_switch_fallthrough = false;
  328. virtual void emit_instruction(const Instruction &instr);
  329. struct TemporaryCopy
  330. {
  331. uint32_t dst_id;
  332. uint32_t src_id;
  333. };
  334. TemporaryCopy handle_instruction_precision(const Instruction &instr);
  335. void emit_block_instructions(SPIRBlock &block);
  336. void emit_block_instructions_with_masked_debug(SPIRBlock &block);
  337. // For relax_nan_checks.
  338. GLSLstd450 get_remapped_glsl_op(GLSLstd450 std450_op) const;
  339. spv::Op get_remapped_spirv_op(spv::Op op) const;
  340. virtual void emit_glsl_op(uint32_t result_type, uint32_t result_id, uint32_t op, const uint32_t *args,
  341. uint32_t count);
  342. virtual void emit_spv_amd_shader_ballot_op(uint32_t result_type, uint32_t result_id, uint32_t op,
  343. const uint32_t *args, uint32_t count);
  344. virtual void emit_spv_amd_shader_explicit_vertex_parameter_op(uint32_t result_type, uint32_t result_id, uint32_t op,
  345. const uint32_t *args, uint32_t count);
  346. virtual void emit_spv_amd_shader_trinary_minmax_op(uint32_t result_type, uint32_t result_id, uint32_t op,
  347. const uint32_t *args, uint32_t count);
  348. virtual void emit_spv_amd_gcn_shader_op(uint32_t result_type, uint32_t result_id, uint32_t op, const uint32_t *args,
  349. uint32_t count);
  350. virtual void emit_header();
  351. void emit_line_directive(uint32_t file_id, uint32_t line_literal);
  352. void build_workgroup_size(SmallVector<std::string> &arguments, const SpecializationConstant &x,
  353. const SpecializationConstant &y, const SpecializationConstant &z);
  354. void request_subgroup_feature(ShaderSubgroupSupportHelper::Feature feature);
  355. virtual void emit_sampled_image_op(uint32_t result_type, uint32_t result_id, uint32_t image_id, uint32_t samp_id);
  356. virtual void emit_texture_op(const Instruction &i, bool sparse);
  357. virtual std::string to_texture_op(const Instruction &i, bool sparse, bool *forward,
  358. SmallVector<uint32_t> &inherited_expressions);
  359. virtual void emit_subgroup_op(const Instruction &i);
  360. virtual std::string type_to_glsl(const SPIRType &type, uint32_t id = 0);
  361. virtual std::string builtin_to_glsl(spv::BuiltIn builtin, spv::StorageClass storage);
  362. virtual void emit_struct_member(const SPIRType &type, uint32_t member_type_id, uint32_t index,
  363. const std::string &qualifier = "", uint32_t base_offset = 0);
  364. virtual void emit_struct_padding_target(const SPIRType &type);
  365. virtual std::string image_type_glsl(const SPIRType &type, uint32_t id = 0);
  366. std::string constant_expression(const SPIRConstant &c,
  367. bool inside_block_like_struct_scope = false,
  368. bool inside_struct_scope = false);
  369. virtual std::string constant_op_expression(const SPIRConstantOp &cop);
  370. virtual std::string constant_expression_vector(const SPIRConstant &c, uint32_t vector);
  371. virtual void emit_fixup();
  372. virtual std::string variable_decl(const SPIRType &type, const std::string &name, uint32_t id = 0);
  373. virtual bool variable_decl_is_remapped_storage(const SPIRVariable &var, spv::StorageClass storage) const;
  374. virtual std::string to_func_call_arg(const SPIRFunction::Parameter &arg, uint32_t id);
  375. struct TextureFunctionBaseArguments
  376. {
  377. // GCC 4.8 workarounds, it doesn't understand '{}' constructor here, use explicit default constructor.
  378. TextureFunctionBaseArguments() = default;
  379. VariableID img = 0;
  380. const SPIRType *imgtype = nullptr;
  381. bool is_fetch = false, is_gather = false, is_proj = false;
  382. };
  383. struct TextureFunctionNameArguments
  384. {
  385. // GCC 4.8 workarounds, it doesn't understand '{}' constructor here, use explicit default constructor.
  386. TextureFunctionNameArguments() = default;
  387. TextureFunctionBaseArguments base;
  388. bool has_array_offsets = false, has_offset = false, has_grad = false;
  389. bool has_dref = false, is_sparse_feedback = false, has_min_lod = false;
  390. uint32_t lod = 0;
  391. };
  392. virtual std::string to_function_name(const TextureFunctionNameArguments &args);
  393. struct TextureFunctionArguments
  394. {
  395. // GCC 4.8 workarounds, it doesn't understand '{}' constructor here, use explicit default constructor.
  396. TextureFunctionArguments() = default;
  397. TextureFunctionBaseArguments base;
  398. uint32_t coord = 0, coord_components = 0, dref = 0;
  399. uint32_t grad_x = 0, grad_y = 0, lod = 0, offset = 0;
  400. uint32_t bias = 0, component = 0, sample = 0, sparse_texel = 0, min_lod = 0;
  401. bool nonuniform_expression = false;
  402. };
  403. virtual std::string to_function_args(const TextureFunctionArguments &args, bool *p_forward);
  404. void emit_sparse_feedback_temporaries(uint32_t result_type_id, uint32_t id, uint32_t &feedback_id,
  405. uint32_t &texel_id);
  406. uint32_t get_sparse_feedback_texel_id(uint32_t id) const;
  407. virtual void emit_buffer_block(const SPIRVariable &type);
  408. virtual void emit_push_constant_block(const SPIRVariable &var);
  409. virtual void emit_uniform(const SPIRVariable &var);
  410. virtual std::string unpack_expression_type(std::string expr_str, const SPIRType &type, uint32_t physical_type_id,
  411. bool packed_type, bool row_major);
  412. virtual bool builtin_translates_to_nonarray(spv::BuiltIn builtin) const;
  413. virtual bool is_user_type_structured(uint32_t id) const;
  414. void emit_copy_logical_type(uint32_t lhs_id, uint32_t lhs_type_id, uint32_t rhs_id, uint32_t rhs_type_id,
  415. SmallVector<uint32_t> chain);
  416. StringStream<> buffer;
  417. template <typename T>
  418. inline void statement_inner(T &&t)
  419. {
  420. buffer << std::forward<T>(t);
  421. statement_count++;
  422. }
  423. template <typename T, typename... Ts>
  424. inline void statement_inner(T &&t, Ts &&... ts)
  425. {
  426. buffer << std::forward<T>(t);
  427. statement_count++;
  428. statement_inner(std::forward<Ts>(ts)...);
  429. }
  430. template <typename... Ts>
  431. inline void statement(Ts &&... ts)
  432. {
  433. if (is_forcing_recompilation())
  434. {
  435. // Do not bother emitting code while force_recompile is active.
  436. // We will compile again.
  437. statement_count++;
  438. return;
  439. }
  440. if (redirect_statement)
  441. {
  442. redirect_statement->push_back(join(std::forward<Ts>(ts)...));
  443. statement_count++;
  444. }
  445. else
  446. {
  447. for (uint32_t i = 0; i < indent; i++)
  448. buffer << " ";
  449. statement_inner(std::forward<Ts>(ts)...);
  450. buffer << '\n';
  451. }
  452. }
  453. template <typename... Ts>
  454. inline void statement_no_indent(Ts &&... ts)
  455. {
  456. auto old_indent = indent;
  457. indent = 0;
  458. statement(std::forward<Ts>(ts)...);
  459. indent = old_indent;
  460. }
  461. // Used for implementing continue blocks where
  462. // we want to obtain a list of statements we can merge
  463. // on a single line separated by comma.
  464. SmallVector<std::string> *redirect_statement = nullptr;
  465. const SPIRBlock *current_continue_block = nullptr;
  466. bool block_temporary_hoisting = false;
  467. bool block_debug_directives = false;
  468. void begin_scope();
  469. void end_scope();
  470. void end_scope(const std::string &trailer);
  471. void end_scope_decl();
  472. void end_scope_decl(const std::string &decl);
  473. Options options;
  474. virtual std::string type_to_array_glsl(
  475. const SPIRType &type); // Allow Metal to use the array<T> template to make arrays a value type
  476. std::string to_array_size(const SPIRType &type, uint32_t index);
  477. uint32_t to_array_size_literal(const SPIRType &type, uint32_t index) const;
  478. uint32_t to_array_size_literal(const SPIRType &type) const;
  479. virtual std::string variable_decl(const SPIRVariable &variable); // Threadgroup arrays can't have a wrapper type
  480. std::string variable_decl_function_local(SPIRVariable &variable);
  481. void add_local_variable_name(uint32_t id);
  482. void add_resource_name(uint32_t id);
  483. void add_member_name(SPIRType &type, uint32_t name);
  484. void add_function_overload(const SPIRFunction &func);
  485. virtual bool is_non_native_row_major_matrix(uint32_t id);
  486. virtual bool member_is_non_native_row_major_matrix(const SPIRType &type, uint32_t index);
  487. bool member_is_remapped_physical_type(const SPIRType &type, uint32_t index) const;
  488. bool member_is_packed_physical_type(const SPIRType &type, uint32_t index) const;
  489. virtual std::string convert_row_major_matrix(std::string exp_str, const SPIRType &exp_type,
  490. uint32_t physical_type_id, bool is_packed,
  491. bool relaxed = false);
  492. std::unordered_set<std::string> local_variable_names;
  493. std::unordered_set<std::string> resource_names;
  494. std::unordered_set<std::string> block_input_names;
  495. std::unordered_set<std::string> block_output_names;
  496. std::unordered_set<std::string> block_ubo_names;
  497. std::unordered_set<std::string> block_ssbo_names;
  498. std::unordered_set<std::string> block_names; // A union of all block_*_names.
  499. std::unordered_map<std::string, std::unordered_set<uint64_t>> function_overloads;
  500. std::unordered_map<uint32_t, std::string> preserved_aliases;
  501. void preserve_alias_on_reset(uint32_t id);
  502. void reset_name_caches();
  503. bool processing_entry_point = false;
  504. // Can be overriden by subclass backends for trivial things which
  505. // shouldn't need polymorphism.
  506. struct BackendVariations
  507. {
  508. std::string discard_literal = "discard";
  509. std::string demote_literal = "demote";
  510. std::string null_pointer_literal = "";
  511. bool float_literal_suffix = false;
  512. bool double_literal_suffix = true;
  513. bool uint32_t_literal_suffix = true;
  514. bool long_long_literal_suffix = false;
  515. const char *basic_int_type = "int";
  516. const char *basic_uint_type = "uint";
  517. const char *basic_int8_type = "int8_t";
  518. const char *basic_uint8_type = "uint8_t";
  519. const char *basic_int16_type = "int16_t";
  520. const char *basic_uint16_type = "uint16_t";
  521. const char *int16_t_literal_suffix = "s";
  522. const char *uint16_t_literal_suffix = "us";
  523. const char *nonuniform_qualifier = "nonuniformEXT";
  524. const char *boolean_mix_function = "mix";
  525. SPIRType::BaseType boolean_in_struct_remapped_type = SPIRType::Boolean;
  526. bool swizzle_is_function = false;
  527. bool shared_is_implied = false;
  528. bool unsized_array_supported = true;
  529. bool explicit_struct_type = false;
  530. bool use_initializer_list = false;
  531. bool use_typed_initializer_list = false;
  532. bool can_declare_struct_inline = true;
  533. bool can_declare_arrays_inline = true;
  534. bool native_row_major_matrix = true;
  535. bool use_constructor_splatting = true;
  536. bool allow_precision_qualifiers = false;
  537. bool can_swizzle_scalar = false;
  538. bool force_gl_in_out_block = false;
  539. bool force_merged_mesh_block = false;
  540. bool can_return_array = true;
  541. bool allow_truncated_access_chain = false;
  542. bool supports_extensions = false;
  543. bool supports_empty_struct = false;
  544. bool array_is_value_type = true;
  545. bool array_is_value_type_in_buffer_blocks = true;
  546. bool comparison_image_samples_scalar = false;
  547. bool native_pointers = false;
  548. bool support_small_type_sampling_result = false;
  549. bool support_case_fallthrough = true;
  550. bool use_array_constructor = false;
  551. bool needs_row_major_load_workaround = false;
  552. bool support_pointer_to_pointer = false;
  553. bool support_precise_qualifier = false;
  554. bool support_64bit_switch = false;
  555. bool workgroup_size_is_hidden = false;
  556. bool requires_relaxed_precision_analysis = false;
  557. bool implicit_c_integer_promotion_rules = false;
  558. } backend;
  559. void emit_struct(SPIRType &type);
  560. void emit_resources();
  561. void emit_extension_workarounds(spv::ExecutionModel model);
  562. void emit_subgroup_arithmetic_workaround(const std::string &func, spv::Op op, spv::GroupOperation group_op);
  563. void emit_polyfills(uint32_t polyfills, bool relaxed);
  564. void emit_buffer_block_native(const SPIRVariable &var);
  565. void emit_buffer_reference_block(uint32_t type_id, bool forward_declaration);
  566. void emit_buffer_block_legacy(const SPIRVariable &var);
  567. void emit_buffer_block_flattened(const SPIRVariable &type);
  568. void fixup_implicit_builtin_block_names(spv::ExecutionModel model);
  569. void emit_declared_builtin_block(spv::StorageClass storage, spv::ExecutionModel model);
  570. bool should_force_emit_builtin_block(spv::StorageClass storage);
  571. void emit_push_constant_block_vulkan(const SPIRVariable &var);
  572. void emit_push_constant_block_glsl(const SPIRVariable &var);
  573. void emit_interface_block(const SPIRVariable &type);
  574. void emit_flattened_io_block(const SPIRVariable &var, const char *qual);
  575. void emit_flattened_io_block_struct(const std::string &basename, const SPIRType &type, const char *qual,
  576. const SmallVector<uint32_t> &indices);
  577. void emit_flattened_io_block_member(const std::string &basename, const SPIRType &type, const char *qual,
  578. const SmallVector<uint32_t> &indices);
  579. void emit_block_chain(SPIRBlock &block);
  580. void emit_hoisted_temporaries(SmallVector<std::pair<TypeID, ID>> &temporaries);
  581. std::string constant_value_macro_name(uint32_t id);
  582. int get_constant_mapping_to_workgroup_component(const SPIRConstant &constant) const;
  583. void emit_constant(const SPIRConstant &constant);
  584. void emit_specialization_constant_op(const SPIRConstantOp &constant);
  585. std::string emit_continue_block(uint32_t continue_block, bool follow_true_block, bool follow_false_block);
  586. bool attempt_emit_loop_header(SPIRBlock &block, SPIRBlock::Method method);
  587. void branch(BlockID from, BlockID to);
  588. void branch_to_continue(BlockID from, BlockID to);
  589. void branch(BlockID from, uint32_t cond, BlockID true_block, BlockID false_block);
  590. void flush_phi(BlockID from, BlockID to);
  591. void flush_variable_declaration(uint32_t id);
  592. void flush_undeclared_variables(SPIRBlock &block);
  593. void emit_variable_temporary_copies(const SPIRVariable &var);
  594. bool should_dereference(uint32_t id);
  595. bool should_forward(uint32_t id) const;
  596. bool should_suppress_usage_tracking(uint32_t id) const;
  597. void emit_mix_op(uint32_t result_type, uint32_t id, uint32_t left, uint32_t right, uint32_t lerp);
  598. void emit_nminmax_op(uint32_t result_type, uint32_t id, uint32_t op0, uint32_t op1, GLSLstd450 op);
  599. void emit_emulated_ahyper_op(uint32_t result_type, uint32_t result_id, uint32_t op0, GLSLstd450 op);
  600. bool to_trivial_mix_op(const SPIRType &type, std::string &op, uint32_t left, uint32_t right, uint32_t lerp);
  601. void emit_quaternary_func_op(uint32_t result_type, uint32_t result_id, uint32_t op0, uint32_t op1, uint32_t op2,
  602. uint32_t op3, const char *op);
  603. void emit_trinary_func_op(uint32_t result_type, uint32_t result_id, uint32_t op0, uint32_t op1, uint32_t op2,
  604. const char *op);
  605. void emit_binary_func_op(uint32_t result_type, uint32_t result_id, uint32_t op0, uint32_t op1, const char *op);
  606. void emit_atomic_func_op(uint32_t result_type, uint32_t result_id, uint32_t op0, uint32_t op1, const char *op);
  607. void emit_atomic_func_op(uint32_t result_type, uint32_t result_id, uint32_t op0, uint32_t op1, uint32_t op2, const char *op);
  608. void emit_unary_func_op_cast(uint32_t result_type, uint32_t result_id, uint32_t op0, const char *op,
  609. SPIRType::BaseType input_type, SPIRType::BaseType expected_result_type);
  610. void emit_binary_func_op_cast(uint32_t result_type, uint32_t result_id, uint32_t op0, uint32_t op1, const char *op,
  611. SPIRType::BaseType input_type, bool skip_cast_if_equal_type);
  612. void emit_binary_func_op_cast_clustered(uint32_t result_type, uint32_t result_id, uint32_t op0, uint32_t op1,
  613. const char *op, SPIRType::BaseType input_type);
  614. void emit_trinary_func_op_cast(uint32_t result_type, uint32_t result_id, uint32_t op0, uint32_t op1, uint32_t op2,
  615. const char *op, SPIRType::BaseType input_type);
  616. void emit_trinary_func_op_bitextract(uint32_t result_type, uint32_t result_id, uint32_t op0, uint32_t op1,
  617. uint32_t op2, const char *op, SPIRType::BaseType expected_result_type,
  618. SPIRType::BaseType input_type0, SPIRType::BaseType input_type1,
  619. SPIRType::BaseType input_type2);
  620. void emit_bitfield_insert_op(uint32_t result_type, uint32_t result_id, uint32_t op0, uint32_t op1, uint32_t op2,
  621. uint32_t op3, const char *op, SPIRType::BaseType offset_count_type);
  622. void emit_unary_func_op(uint32_t result_type, uint32_t result_id, uint32_t op0, const char *op);
  623. void emit_unrolled_unary_op(uint32_t result_type, uint32_t result_id, uint32_t operand, const char *op);
  624. void emit_binary_op(uint32_t result_type, uint32_t result_id, uint32_t op0, uint32_t op1, const char *op);
  625. void emit_unrolled_binary_op(uint32_t result_type, uint32_t result_id, uint32_t op0, uint32_t op1, const char *op,
  626. bool negate, SPIRType::BaseType expected_type);
  627. void emit_binary_op_cast(uint32_t result_type, uint32_t result_id, uint32_t op0, uint32_t op1, const char *op,
  628. SPIRType::BaseType input_type, bool skip_cast_if_equal_type, bool implicit_integer_promotion);
  629. SPIRType binary_op_bitcast_helper(std::string &cast_op0, std::string &cast_op1, SPIRType::BaseType &input_type,
  630. uint32_t op0, uint32_t op1, bool skip_cast_if_equal_type);
  631. virtual bool emit_complex_bitcast(uint32_t result_type, uint32_t id, uint32_t op0);
  632. std::string to_ternary_expression(const SPIRType &result_type, uint32_t select, uint32_t true_value,
  633. uint32_t false_value);
  634. void emit_unary_op(uint32_t result_type, uint32_t result_id, uint32_t op0, const char *op);
  635. void emit_unary_op_cast(uint32_t result_type, uint32_t result_id, uint32_t op0, const char *op);
  636. virtual void emit_mesh_tasks(SPIRBlock &block);
  637. bool expression_is_forwarded(uint32_t id) const;
  638. bool expression_suppresses_usage_tracking(uint32_t id) const;
  639. bool expression_read_implies_multiple_reads(uint32_t id) const;
  640. SPIRExpression &emit_op(uint32_t result_type, uint32_t result_id, const std::string &rhs, bool forward_rhs,
  641. bool suppress_usage_tracking = false);
  642. void access_chain_internal_append_index(std::string &expr, uint32_t base, const SPIRType *type,
  643. AccessChainFlags flags, bool &access_chain_is_arrayed, uint32_t index);
  644. std::string access_chain_internal(uint32_t base, const uint32_t *indices, uint32_t count, AccessChainFlags flags,
  645. AccessChainMeta *meta);
  646. spv::StorageClass get_expression_effective_storage_class(uint32_t ptr);
  647. virtual bool access_chain_needs_stage_io_builtin_translation(uint32_t base);
  648. virtual void check_physical_type_cast(std::string &expr, const SPIRType *type, uint32_t physical_type);
  649. virtual void prepare_access_chain_for_scalar_access(std::string &expr, const SPIRType &type,
  650. spv::StorageClass storage, bool &is_packed);
  651. std::string access_chain(uint32_t base, const uint32_t *indices, uint32_t count, const SPIRType &target_type,
  652. AccessChainMeta *meta = nullptr, bool ptr_chain = false);
  653. std::string flattened_access_chain(uint32_t base, const uint32_t *indices, uint32_t count,
  654. const SPIRType &target_type, uint32_t offset, uint32_t matrix_stride,
  655. uint32_t array_stride, bool need_transpose);
  656. std::string flattened_access_chain_struct(uint32_t base, const uint32_t *indices, uint32_t count,
  657. const SPIRType &target_type, uint32_t offset);
  658. std::string flattened_access_chain_matrix(uint32_t base, const uint32_t *indices, uint32_t count,
  659. const SPIRType &target_type, uint32_t offset, uint32_t matrix_stride,
  660. bool need_transpose);
  661. std::string flattened_access_chain_vector(uint32_t base, const uint32_t *indices, uint32_t count,
  662. const SPIRType &target_type, uint32_t offset, uint32_t matrix_stride,
  663. bool need_transpose);
  664. std::pair<std::string, uint32_t> flattened_access_chain_offset(const SPIRType &basetype, const uint32_t *indices,
  665. uint32_t count, uint32_t offset,
  666. uint32_t word_stride, bool *need_transpose = nullptr,
  667. uint32_t *matrix_stride = nullptr,
  668. uint32_t *array_stride = nullptr,
  669. bool ptr_chain = false);
  670. const char *index_to_swizzle(uint32_t index);
  671. std::string remap_swizzle(const SPIRType &result_type, uint32_t input_components, const std::string &expr);
  672. std::string declare_temporary(uint32_t type, uint32_t id);
  673. void emit_uninitialized_temporary(uint32_t type, uint32_t id);
  674. SPIRExpression &emit_uninitialized_temporary_expression(uint32_t type, uint32_t id);
  675. void append_global_func_args(const SPIRFunction &func, uint32_t index, SmallVector<std::string> &arglist);
  676. std::string to_non_uniform_aware_expression(uint32_t id);
  677. std::string to_expression(uint32_t id, bool register_expression_read = true);
  678. std::string to_composite_constructor_expression(const SPIRType &parent_type, uint32_t id, bool block_like_type);
  679. std::string to_rerolled_array_expression(const SPIRType &parent_type, const std::string &expr, const SPIRType &type);
  680. std::string to_enclosed_expression(uint32_t id, bool register_expression_read = true);
  681. std::string to_unpacked_expression(uint32_t id, bool register_expression_read = true);
  682. std::string to_unpacked_row_major_matrix_expression(uint32_t id);
  683. std::string to_enclosed_unpacked_expression(uint32_t id, bool register_expression_read = true);
  684. std::string to_dereferenced_expression(uint32_t id, bool register_expression_read = true);
  685. std::string to_pointer_expression(uint32_t id, bool register_expression_read = true);
  686. std::string to_enclosed_pointer_expression(uint32_t id, bool register_expression_read = true);
  687. std::string to_extract_component_expression(uint32_t id, uint32_t index);
  688. std::string to_extract_constant_composite_expression(uint32_t result_type, const SPIRConstant &c,
  689. const uint32_t *chain, uint32_t length);
  690. static bool needs_enclose_expression(const std::string &expr);
  691. std::string enclose_expression(const std::string &expr);
  692. std::string dereference_expression(const SPIRType &expression_type, const std::string &expr);
  693. std::string address_of_expression(const std::string &expr);
  694. void strip_enclosed_expression(std::string &expr);
  695. std::string to_member_name(const SPIRType &type, uint32_t index);
  696. virtual std::string to_member_reference(uint32_t base, const SPIRType &type, uint32_t index, bool ptr_chain_is_resolved);
  697. std::string to_multi_member_reference(const SPIRType &type, const SmallVector<uint32_t> &indices);
  698. std::string type_to_glsl_constructor(const SPIRType &type);
  699. std::string argument_decl(const SPIRFunction::Parameter &arg);
  700. virtual std::string to_qualifiers_glsl(uint32_t id);
  701. void fixup_io_block_patch_primitive_qualifiers(const SPIRVariable &var);
  702. void emit_output_variable_initializer(const SPIRVariable &var);
  703. std::string to_precision_qualifiers_glsl(uint32_t id);
  704. virtual const char *to_storage_qualifiers_glsl(const SPIRVariable &var);
  705. std::string flags_to_qualifiers_glsl(const SPIRType &type, const Bitset &flags);
  706. const char *format_to_glsl(spv::ImageFormat format);
  707. virtual std::string layout_for_member(const SPIRType &type, uint32_t index);
  708. virtual std::string to_interpolation_qualifiers(const Bitset &flags);
  709. std::string layout_for_variable(const SPIRVariable &variable);
  710. std::string to_combined_image_sampler(VariableID image_id, VariableID samp_id);
  711. virtual bool skip_argument(uint32_t id) const;
  712. virtual bool emit_array_copy(const char *expr, uint32_t lhs_id, uint32_t rhs_id,
  713. spv::StorageClass lhs_storage, spv::StorageClass rhs_storage);
  714. virtual void emit_block_hints(const SPIRBlock &block);
  715. virtual std::string to_initializer_expression(const SPIRVariable &var);
  716. virtual std::string to_zero_initialized_expression(uint32_t type_id);
  717. bool type_can_zero_initialize(const SPIRType &type) const;
  718. bool buffer_is_packing_standard(const SPIRType &type, BufferPackingStandard packing,
  719. uint32_t *failed_index = nullptr, uint32_t start_offset = 0,
  720. uint32_t end_offset = ~(0u));
  721. std::string buffer_to_packing_standard(const SPIRType &type, bool support_std430_without_scalar_layout);
  722. uint32_t type_to_packed_base_size(const SPIRType &type, BufferPackingStandard packing);
  723. uint32_t type_to_packed_alignment(const SPIRType &type, const Bitset &flags, BufferPackingStandard packing);
  724. uint32_t type_to_packed_array_stride(const SPIRType &type, const Bitset &flags, BufferPackingStandard packing);
  725. uint32_t type_to_packed_size(const SPIRType &type, const Bitset &flags, BufferPackingStandard packing);
  726. uint32_t type_to_location_count(const SPIRType &type) const;
  727. std::string bitcast_glsl(const SPIRType &result_type, uint32_t arg);
  728. virtual std::string bitcast_glsl_op(const SPIRType &result_type, const SPIRType &argument_type);
  729. std::string bitcast_expression(SPIRType::BaseType target_type, uint32_t arg);
  730. std::string bitcast_expression(const SPIRType &target_type, SPIRType::BaseType expr_type, const std::string &expr);
  731. std::string build_composite_combiner(uint32_t result_type, const uint32_t *elems, uint32_t length);
  732. bool remove_duplicate_swizzle(std::string &op);
  733. bool remove_unity_swizzle(uint32_t base, std::string &op);
  734. // Can modify flags to remote readonly/writeonly if image type
  735. // and force recompile.
  736. bool check_atomic_image(uint32_t id);
  737. virtual void replace_illegal_names();
  738. void replace_illegal_names(const std::unordered_set<std::string> &keywords);
  739. virtual void emit_entry_point_declarations();
  740. void replace_fragment_output(SPIRVariable &var);
  741. void replace_fragment_outputs();
  742. std::string legacy_tex_op(const std::string &op, const SPIRType &imgtype, uint32_t id);
  743. void forward_relaxed_precision(uint32_t dst_id, const uint32_t *args, uint32_t length);
  744. void analyze_precision_requirements(uint32_t type_id, uint32_t dst_id, uint32_t *args, uint32_t length);
  745. Options::Precision analyze_expression_precision(const uint32_t *args, uint32_t length) const;
  746. uint32_t indent = 0;
  747. std::unordered_set<uint32_t> emitted_functions;
  748. // Ensure that we declare phi-variable copies even if the original declaration isn't deferred
  749. std::unordered_set<uint32_t> flushed_phi_variables;
  750. std::unordered_set<uint32_t> flattened_buffer_blocks;
  751. std::unordered_map<uint32_t, bool> flattened_structs;
  752. ShaderSubgroupSupportHelper shader_subgroup_supporter;
  753. std::string load_flattened_struct(const std::string &basename, const SPIRType &type);
  754. std::string to_flattened_struct_member(const std::string &basename, const SPIRType &type, uint32_t index);
  755. void store_flattened_struct(uint32_t lhs_id, uint32_t value);
  756. void store_flattened_struct(const std::string &basename, uint32_t rhs, const SPIRType &type,
  757. const SmallVector<uint32_t> &indices);
  758. std::string to_flattened_access_chain_expression(uint32_t id);
  759. // Usage tracking. If a temporary is used more than once, use the temporary instead to
  760. // avoid AST explosion when SPIRV is generated with pure SSA and doesn't write stuff to variables.
  761. std::unordered_map<uint32_t, uint32_t> expression_usage_counts;
  762. void track_expression_read(uint32_t id);
  763. SmallVector<std::string> forced_extensions;
  764. SmallVector<std::string> header_lines;
  765. // Used when expressions emit extra opcodes with their own unique IDs,
  766. // and we need to reuse the IDs across recompilation loops.
  767. // Currently used by NMin/Max/Clamp implementations.
  768. std::unordered_map<uint32_t, uint32_t> extra_sub_expressions;
  769. SmallVector<TypeID> workaround_ubo_load_overload_types;
  770. void request_workaround_wrapper_overload(TypeID id);
  771. void rewrite_load_for_wrapped_row_major(std::string &expr, TypeID loaded_type, ID ptr);
  772. uint32_t statement_count = 0;
  773. inline bool is_legacy() const
  774. {
  775. return (options.es && options.version < 300) || (!options.es && options.version < 130);
  776. }
  777. inline bool is_legacy_es() const
  778. {
  779. return options.es && options.version < 300;
  780. }
  781. inline bool is_legacy_desktop() const
  782. {
  783. return !options.es && options.version < 130;
  784. }
  785. enum Polyfill : uint32_t
  786. {
  787. PolyfillTranspose2x2 = 1 << 0,
  788. PolyfillTranspose3x3 = 1 << 1,
  789. PolyfillTranspose4x4 = 1 << 2,
  790. PolyfillDeterminant2x2 = 1 << 3,
  791. PolyfillDeterminant3x3 = 1 << 4,
  792. PolyfillDeterminant4x4 = 1 << 5,
  793. PolyfillMatrixInverse2x2 = 1 << 6,
  794. PolyfillMatrixInverse3x3 = 1 << 7,
  795. PolyfillMatrixInverse4x4 = 1 << 8,
  796. };
  797. uint32_t required_polyfills = 0;
  798. uint32_t required_polyfills_relaxed = 0;
  799. void require_polyfill(Polyfill polyfill, bool relaxed);
  800. bool ray_tracing_is_khr = false;
  801. bool barycentric_is_nv = false;
  802. void ray_tracing_khr_fixup_locations();
  803. bool args_will_forward(uint32_t id, const uint32_t *args, uint32_t num_args, bool pure);
  804. void register_call_out_argument(uint32_t id);
  805. void register_impure_function_call();
  806. void register_control_dependent_expression(uint32_t expr);
  807. // GL_EXT_shader_pixel_local_storage support.
  808. std::vector<PlsRemap> pls_inputs;
  809. std::vector<PlsRemap> pls_outputs;
  810. std::string pls_decl(const PlsRemap &variable);
  811. const char *to_pls_qualifiers_glsl(const SPIRVariable &variable);
  812. void emit_pls();
  813. void remap_pls_variables();
  814. // GL_EXT_shader_framebuffer_fetch support.
  815. std::vector<std::pair<uint32_t, uint32_t>> subpass_to_framebuffer_fetch_attachment;
  816. std::vector<std::pair<uint32_t, bool>> inout_color_attachments;
  817. bool location_is_framebuffer_fetch(uint32_t location) const;
  818. bool location_is_non_coherent_framebuffer_fetch(uint32_t location) const;
  819. bool subpass_input_is_framebuffer_fetch(uint32_t id) const;
  820. void emit_inout_fragment_outputs_copy_to_subpass_inputs();
  821. const SPIRVariable *find_subpass_input_by_attachment_index(uint32_t index) const;
  822. const SPIRVariable *find_color_output_by_location(uint32_t location) const;
  823. // A variant which takes two sets of name. The secondary is only used to verify there are no collisions,
  824. // but the set is not updated when we have found a new name.
  825. // Used primarily when adding block interface names.
  826. void add_variable(std::unordered_set<std::string> &variables_primary,
  827. const std::unordered_set<std::string> &variables_secondary, std::string &name);
  828. void check_function_call_constraints(const uint32_t *args, uint32_t length);
  829. void handle_invalid_expression(uint32_t id);
  830. void force_temporary_and_recompile(uint32_t id);
  831. void find_static_extensions();
  832. uint32_t consume_temporary_in_precision_context(uint32_t type_id, uint32_t id, Options::Precision precision);
  833. std::unordered_map<uint32_t, uint32_t> temporary_to_mirror_precision_alias;
  834. std::unordered_set<uint32_t> composite_insert_overwritten;
  835. std::unordered_set<uint32_t> block_composite_insert_overwrite;
  836. std::string emit_for_loop_initializers(const SPIRBlock &block);
  837. void emit_while_loop_initializers(const SPIRBlock &block);
  838. bool for_loop_initializers_are_same_type(const SPIRBlock &block);
  839. bool optimize_read_modify_write(const SPIRType &type, const std::string &lhs, const std::string &rhs);
  840. void fixup_image_load_store_access();
  841. bool type_is_empty(const SPIRType &type);
  842. bool can_use_io_location(spv::StorageClass storage, bool block);
  843. const Instruction *get_next_instruction_in_block(const Instruction &instr);
  844. static uint32_t mask_relevant_memory_semantics(uint32_t semantics);
  845. std::string convert_half_to_string(const SPIRConstant &value, uint32_t col, uint32_t row);
  846. std::string convert_float_to_string(const SPIRConstant &value, uint32_t col, uint32_t row);
  847. std::string convert_double_to_string(const SPIRConstant &value, uint32_t col, uint32_t row);
  848. std::string convert_separate_image_to_expression(uint32_t id);
  849. // Builtins in GLSL are always specific signedness, but the SPIR-V can declare them
  850. // as either unsigned or signed.
  851. // Sometimes we will need to automatically perform casts on load and store to make this work.
  852. virtual void cast_to_variable_store(uint32_t target_id, std::string &expr, const SPIRType &expr_type);
  853. virtual void cast_from_variable_load(uint32_t source_id, std::string &expr, const SPIRType &expr_type);
  854. void unroll_array_from_complex_load(uint32_t target_id, uint32_t source_id, std::string &expr);
  855. bool unroll_array_to_complex_store(uint32_t target_id, uint32_t source_id);
  856. void convert_non_uniform_expression(std::string &expr, uint32_t ptr_id);
  857. void handle_store_to_invariant_variable(uint32_t store_id, uint32_t value_id);
  858. void disallow_forwarding_in_expression_chain(const SPIRExpression &expr);
  859. bool expression_is_constant_null(uint32_t id) const;
  860. bool expression_is_non_value_type_array(uint32_t ptr);
  861. virtual void emit_store_statement(uint32_t lhs_expression, uint32_t rhs_expression);
  862. uint32_t get_integer_width_for_instruction(const Instruction &instr) const;
  863. uint32_t get_integer_width_for_glsl_instruction(GLSLstd450 op, const uint32_t *arguments, uint32_t length) const;
  864. bool variable_is_lut(const SPIRVariable &var) const;
  865. char current_locale_radix_character = '.';
  866. void fixup_type_alias();
  867. void reorder_type_alias();
  868. void fixup_anonymous_struct_names();
  869. void fixup_anonymous_struct_names(std::unordered_set<uint32_t> &visited, const SPIRType &type);
  870. static const char *vector_swizzle(int vecsize, int index);
  871. bool is_stage_output_location_masked(uint32_t location, uint32_t component) const;
  872. bool is_stage_output_builtin_masked(spv::BuiltIn builtin) const;
  873. bool is_stage_output_variable_masked(const SPIRVariable &var) const;
  874. bool is_stage_output_block_member_masked(const SPIRVariable &var, uint32_t index, bool strip_array) const;
  875. bool is_per_primitive_variable(const SPIRVariable &var) const;
  876. uint32_t get_accumulated_member_location(const SPIRVariable &var, uint32_t mbr_idx, bool strip_array) const;
  877. uint32_t get_declared_member_location(const SPIRVariable &var, uint32_t mbr_idx, bool strip_array) const;
  878. std::unordered_set<LocationComponentPair, InternalHasher> masked_output_locations;
  879. std::unordered_set<uint32_t> masked_output_builtins;
  880. private:
  881. void init();
  882. SmallVector<ConstantID> get_composite_constant_ids(ConstantID const_id);
  883. void fill_composite_constant(SPIRConstant &constant, TypeID type_id, const SmallVector<ConstantID> &initializers);
  884. void set_composite_constant(ConstantID const_id, TypeID type_id, const SmallVector<ConstantID> &initializers);
  885. TypeID get_composite_member_type(TypeID type_id, uint32_t member_idx);
  886. std::unordered_map<uint32_t, SmallVector<ConstantID>> const_composite_insert_ids;
  887. };
  888. } // namespace SPIRV_CROSS_NAMESPACE
  889. #endif