spirv_glsl.hpp 50 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074
  1. /*
  2. * Copyright 2015-2021 Arm Limited
  3. * SPDX-License-Identifier: Apache-2.0 OR MIT
  4. *
  5. * Licensed under the Apache License, Version 2.0 (the "License");
  6. * you may not use this file except in compliance with the License.
  7. * You may obtain a copy of the License at
  8. *
  9. * http://www.apache.org/licenses/LICENSE-2.0
  10. *
  11. * Unless required by applicable law or agreed to in writing, software
  12. * distributed under the License is distributed on an "AS IS" BASIS,
  13. * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  14. * See the License for the specific language governing permissions and
  15. * limitations under the License.
  16. */
  17. /*
  18. * At your option, you may choose to accept this material under either:
  19. * 1. The Apache License, Version 2.0, found at <http://www.apache.org/licenses/LICENSE-2.0>, or
  20. * 2. The MIT License, found at <http://opensource.org/licenses/MIT>.
  21. */
  22. #ifndef SPIRV_CROSS_GLSL_HPP
  23. #define SPIRV_CROSS_GLSL_HPP
  24. #include "GLSL.std.450.h"
  25. #include "spirv_cross.hpp"
  26. #include <unordered_map>
  27. #include <unordered_set>
  28. #include <utility>
  29. namespace SPIRV_CROSS_NAMESPACE
  30. {
  31. enum PlsFormat
  32. {
  33. PlsNone = 0,
  34. PlsR11FG11FB10F,
  35. PlsR32F,
  36. PlsRG16F,
  37. PlsRGB10A2,
  38. PlsRGBA8,
  39. PlsRG16,
  40. PlsRGBA8I,
  41. PlsRG16I,
  42. PlsRGB10A2UI,
  43. PlsRGBA8UI,
  44. PlsRG16UI,
  45. PlsR32UI
  46. };
  47. struct PlsRemap
  48. {
  49. uint32_t id;
  50. PlsFormat format;
  51. };
  52. enum AccessChainFlagBits
  53. {
  54. ACCESS_CHAIN_INDEX_IS_LITERAL_BIT = 1 << 0,
  55. ACCESS_CHAIN_CHAIN_ONLY_BIT = 1 << 1,
  56. ACCESS_CHAIN_PTR_CHAIN_BIT = 1 << 2,
  57. ACCESS_CHAIN_SKIP_REGISTER_EXPRESSION_READ_BIT = 1 << 3,
  58. ACCESS_CHAIN_LITERAL_MSB_FORCE_ID = 1 << 4,
  59. ACCESS_CHAIN_FLATTEN_ALL_MEMBERS_BIT = 1 << 5,
  60. ACCESS_CHAIN_FORCE_COMPOSITE_BIT = 1 << 6,
  61. ACCESS_CHAIN_PTR_CHAIN_POINTER_ARITH_BIT = 1 << 7,
  62. ACCESS_CHAIN_PTR_CHAIN_CAST_TO_SCALAR_BIT = 1 << 8
  63. };
  64. typedef uint32_t AccessChainFlags;
  65. class CompilerGLSL : public Compiler
  66. {
  67. public:
  68. struct Options
  69. {
  70. // The shading language version. Corresponds to #version $VALUE.
  71. uint32_t version = 450;
  72. // Emit the OpenGL ES shading language instead of desktop OpenGL.
  73. bool es = false;
  74. // Debug option to always emit temporary variables for all expressions.
  75. bool force_temporary = false;
  76. // Debug option, can be increased in an attempt to workaround SPIRV-Cross bugs temporarily.
  77. // If this limit has to be increased, it points to an implementation bug.
  78. // In certain scenarios, the maximum number of debug iterations may increase beyond this limit
  79. // as long as we can prove we're making certain kinds of forward progress.
  80. uint32_t force_recompile_max_debug_iterations = 3;
  81. // If true, Vulkan GLSL features are used instead of GL-compatible features.
  82. // Mostly useful for debugging SPIR-V files.
  83. bool vulkan_semantics = false;
  84. // If true, gl_PerVertex is explicitly redeclared in vertex, geometry and tessellation shaders.
  85. // The members of gl_PerVertex is determined by which built-ins are declared by the shader.
  86. // This option is ignored in ES versions, as redeclaration in ES is not required, and it depends on a different extension
  87. // (EXT_shader_io_blocks) which makes things a bit more fuzzy.
  88. bool separate_shader_objects = false;
  89. // Flattens multidimensional arrays, e.g. float foo[a][b][c] into single-dimensional arrays,
  90. // e.g. float foo[a * b * c].
  91. // This function does not change the actual SPIRType of any object.
  92. // Only the generated code, including declarations of interface variables are changed to be single array dimension.
  93. bool flatten_multidimensional_arrays = false;
  94. // For older desktop GLSL targets than version 420, the
  95. // GL_ARB_shading_language_420pack extensions is used to be able to support
  96. // layout(binding) on UBOs and samplers.
  97. // If disabled on older targets, binding decorations will be stripped.
  98. bool enable_420pack_extension = true;
  99. // In non-Vulkan GLSL, emit push constant blocks as UBOs rather than plain uniforms.
  100. bool emit_push_constant_as_uniform_buffer = false;
  101. // Always emit uniform blocks as plain uniforms, regardless of the GLSL version, even when UBOs are supported.
  102. // Does not apply to shader storage or push constant blocks.
  103. bool emit_uniform_buffer_as_plain_uniforms = false;
  104. // Emit OpLine directives if present in the module.
  105. // May not correspond exactly to original source, but should be a good approximation.
  106. bool emit_line_directives = false;
  107. // In cases where readonly/writeonly decoration are not used at all,
  108. // we try to deduce which qualifier(s) we should actually used, since actually emitting
  109. // read-write decoration is very rare, and older glslang/HLSL compilers tend to just emit readwrite as a matter of fact.
  110. // The default (true) is to enable automatic deduction for these cases, but if you trust the decorations set
  111. // by the SPIR-V, it's recommended to set this to false.
  112. bool enable_storage_image_qualifier_deduction = true;
  113. // On some targets (WebGPU), uninitialized variables are banned.
  114. // If this is enabled, all variables (temporaries, Private, Function)
  115. // which would otherwise be uninitialized will now be initialized to 0 instead.
  116. bool force_zero_initialized_variables = false;
  117. // In GLSL, force use of I/O block flattening, similar to
  118. // what happens on legacy GLSL targets for blocks and structs.
  119. bool force_flattened_io_blocks = false;
  120. // For opcodes where we have to perform explicit additional nan checks, very ugly code is generated.
  121. // If we opt-in, ignore these requirements.
  122. // In opcodes like NClamp/NMin/NMax and FP compare, ignore NaN behavior.
  123. // Use FClamp/FMin/FMax semantics for clamps and lets implementation choose ordered or unordered
  124. // compares.
  125. bool relax_nan_checks = false;
  126. // Loading row-major matrices from UBOs on older AMD Windows OpenGL drivers is problematic.
  127. // To load these types correctly, we must generate a wrapper. them in a dummy function which only purpose is to
  128. // ensure row_major decoration is actually respected.
  129. // This workaround may cause significant performance degeneration on some Android devices.
  130. bool enable_row_major_load_workaround = true;
  131. // If non-zero, controls layout(num_views = N) in; in GL_OVR_multiview2.
  132. uint32_t ovr_multiview_view_count = 0;
  133. enum Precision
  134. {
  135. DontCare,
  136. Lowp,
  137. Mediump,
  138. Highp
  139. };
  140. struct VertexOptions
  141. {
  142. // "Vertex-like shader" here is any shader stage that can write BuiltInPosition.
  143. // GLSL: In vertex-like shaders, rewrite [0, w] depth (Vulkan/D3D style) to [-w, w] depth (GL style).
  144. // MSL: In vertex-like shaders, rewrite [-w, w] depth (GL style) to [0, w] depth.
  145. // HLSL: In vertex-like shaders, rewrite [-w, w] depth (GL style) to [0, w] depth.
  146. bool fixup_clipspace = false;
  147. // In vertex-like shaders, inverts gl_Position.y or equivalent.
  148. bool flip_vert_y = false;
  149. // GLSL only, for HLSL version of this option, see CompilerHLSL.
  150. // If true, the backend will assume that InstanceIndex will need to apply
  151. // a base instance offset. Set to false if you know you will never use base instance
  152. // functionality as it might remove some internal uniforms.
  153. bool support_nonzero_base_instance = true;
  154. } vertex;
  155. struct FragmentOptions
  156. {
  157. // Add precision mediump float in ES targets when emitting GLES source.
  158. // Add precision highp int in ES targets when emitting GLES source.
  159. Precision default_float_precision = Mediump;
  160. Precision default_int_precision = Highp;
  161. } fragment;
  162. };
  163. void remap_pixel_local_storage(std::vector<PlsRemap> inputs, std::vector<PlsRemap> outputs)
  164. {
  165. pls_inputs = std::move(inputs);
  166. pls_outputs = std::move(outputs);
  167. remap_pls_variables();
  168. }
  169. // Redirect a subpassInput reading from input_attachment_index to instead load its value from
  170. // the color attachment at location = color_location. Requires ESSL.
  171. // If coherent, uses GL_EXT_shader_framebuffer_fetch, if not, uses noncoherent variant.
  172. void remap_ext_framebuffer_fetch(uint32_t input_attachment_index, uint32_t color_location, bool coherent);
  173. explicit CompilerGLSL(std::vector<uint32_t> spirv_)
  174. : Compiler(std::move(spirv_))
  175. {
  176. init();
  177. }
  178. CompilerGLSL(const uint32_t *ir_, size_t word_count)
  179. : Compiler(ir_, word_count)
  180. {
  181. init();
  182. }
  183. explicit CompilerGLSL(const ParsedIR &ir_)
  184. : Compiler(ir_)
  185. {
  186. init();
  187. }
  188. explicit CompilerGLSL(ParsedIR &&ir_)
  189. : Compiler(std::move(ir_))
  190. {
  191. init();
  192. }
  193. const Options &get_common_options() const
  194. {
  195. return options;
  196. }
  197. void set_common_options(const Options &opts)
  198. {
  199. options = opts;
  200. }
  201. std::string compile() override;
  202. // Returns the current string held in the conversion buffer. Useful for
  203. // capturing what has been converted so far when compile() throws an error.
  204. std::string get_partial_source();
  205. // Adds a line to be added right after #version in GLSL backend.
  206. // This is useful for enabling custom extensions which are outside the scope of SPIRV-Cross.
  207. // This can be combined with variable remapping.
  208. // A new-line will be added.
  209. //
  210. // While add_header_line() is a more generic way of adding arbitrary text to the header
  211. // of a GLSL file, require_extension() should be used when adding extensions since it will
  212. // avoid creating collisions with SPIRV-Cross generated extensions.
  213. //
  214. // Code added via add_header_line() is typically backend-specific.
  215. void add_header_line(const std::string &str);
  216. // Adds an extension which is required to run this shader, e.g.
  217. // require_extension("GL_KHR_my_extension");
  218. void require_extension(const std::string &ext);
  219. // Returns the list of required extensions. After compilation this will contains any other
  220. // extensions that the compiler used automatically, in addition to the user specified ones.
  221. const SmallVector<std::string> &get_required_extensions() const;
  222. // Legacy GLSL compatibility method.
  223. // Takes a uniform or push constant variable and flattens it into a (i|u)vec4 array[N]; array instead.
  224. // For this to work, all types in the block must be the same basic type, e.g. mixing vec2 and vec4 is fine, but
  225. // mixing int and float is not.
  226. // The name of the uniform array will be the same as the interface block name.
  227. void flatten_buffer_block(VariableID id);
  228. // After compilation, query if a variable ID was used as a depth resource.
  229. // This is meaningful for MSL since descriptor types depend on this knowledge.
  230. // Cases which return true:
  231. // - Images which are declared with depth = 1 image type.
  232. // - Samplers which are statically used at least once with Dref opcodes.
  233. // - Images which are statically used at least once with Dref opcodes.
  234. bool variable_is_depth_or_compare(VariableID id) const;
  235. // If a shader output is active in this stage, but inactive in a subsequent stage,
  236. // this can be signalled here. This can be used to work around certain cross-stage matching problems
  237. // which plagues MSL and HLSL in certain scenarios.
  238. // An output which matches one of these will not be emitted in stage output interfaces, but rather treated as a private
  239. // variable.
  240. // This option is only meaningful for MSL and HLSL, since GLSL matches by location directly.
  241. // Masking builtins only takes effect if the builtin in question is part of the stage output interface.
  242. void mask_stage_output_by_location(uint32_t location, uint32_t component);
  243. void mask_stage_output_by_builtin(spv::BuiltIn builtin);
  244. // Allow to control how to format float literals in the output.
  245. // Set to "nullptr" to use the default "convert_to_string" function.
  246. // This handle is not owned by SPIRV-Cross and must remain valid until compile() has been called.
  247. void set_float_formatter(FloatFormatter *formatter)
  248. {
  249. float_formatter = formatter;
  250. }
  251. protected:
  252. struct ShaderSubgroupSupportHelper
  253. {
  254. // lower enum value = greater priority
  255. enum Candidate
  256. {
  257. KHR_shader_subgroup_ballot,
  258. KHR_shader_subgroup_basic,
  259. KHR_shader_subgroup_vote,
  260. KHR_shader_subgroup_arithmetic,
  261. NV_gpu_shader_5,
  262. NV_shader_thread_group,
  263. NV_shader_thread_shuffle,
  264. ARB_shader_ballot,
  265. ARB_shader_group_vote,
  266. AMD_gcn_shader,
  267. CandidateCount
  268. };
  269. static const char *get_extension_name(Candidate c);
  270. static SmallVector<std::string> get_extra_required_extension_names(Candidate c);
  271. static const char *get_extra_required_extension_predicate(Candidate c);
  272. enum Feature
  273. {
  274. SubgroupMask = 0,
  275. SubgroupSize = 1,
  276. SubgroupInvocationID = 2,
  277. SubgroupID = 3,
  278. NumSubgroups = 4,
  279. SubgroupBroadcast_First = 5,
  280. SubgroupBallotFindLSB_MSB = 6,
  281. SubgroupAll_Any_AllEqualBool = 7,
  282. SubgroupAllEqualT = 8,
  283. SubgroupElect = 9,
  284. SubgroupBarrier = 10,
  285. SubgroupMemBarrier = 11,
  286. SubgroupBallot = 12,
  287. SubgroupInverseBallot_InclBitCount_ExclBitCout = 13,
  288. SubgroupBallotBitExtract = 14,
  289. SubgroupBallotBitCount = 15,
  290. SubgroupArithmeticIAddReduce = 16,
  291. SubgroupArithmeticIAddExclusiveScan = 17,
  292. SubgroupArithmeticIAddInclusiveScan = 18,
  293. SubgroupArithmeticFAddReduce = 19,
  294. SubgroupArithmeticFAddExclusiveScan = 20,
  295. SubgroupArithmeticFAddInclusiveScan = 21,
  296. SubgroupArithmeticIMulReduce = 22,
  297. SubgroupArithmeticIMulExclusiveScan = 23,
  298. SubgroupArithmeticIMulInclusiveScan = 24,
  299. SubgroupArithmeticFMulReduce = 25,
  300. SubgroupArithmeticFMulExclusiveScan = 26,
  301. SubgroupArithmeticFMulInclusiveScan = 27,
  302. FeatureCount
  303. };
  304. using FeatureMask = uint32_t;
  305. static_assert(sizeof(FeatureMask) * 8u >= FeatureCount, "Mask type needs more bits.");
  306. using CandidateVector = SmallVector<Candidate, CandidateCount>;
  307. using FeatureVector = SmallVector<Feature>;
  308. static FeatureVector get_feature_dependencies(Feature feature);
  309. static FeatureMask get_feature_dependency_mask(Feature feature);
  310. static bool can_feature_be_implemented_without_extensions(Feature feature);
  311. static Candidate get_KHR_extension_for_feature(Feature feature);
  312. struct Result
  313. {
  314. Result();
  315. uint32_t weights[CandidateCount];
  316. };
  317. void request_feature(Feature feature);
  318. bool is_feature_requested(Feature feature) const;
  319. Result resolve() const;
  320. static CandidateVector get_candidates_for_feature(Feature ft, const Result &r);
  321. private:
  322. static CandidateVector get_candidates_for_feature(Feature ft);
  323. static FeatureMask build_mask(const SmallVector<Feature> &features);
  324. FeatureMask feature_mask = 0;
  325. };
  326. // TODO remove this function when all subgroup ops are supported (or make it always return true)
  327. static bool is_supported_subgroup_op_in_opengl(spv::Op op, const uint32_t *ops);
  328. void reset(uint32_t iteration_count);
  329. void emit_function(SPIRFunction &func, const Bitset &return_flags);
  330. bool has_extension(const std::string &ext) const;
  331. void require_extension_internal(const std::string &ext);
  332. // Virtualize methods which need to be overridden by subclass targets like C++ and such.
  333. virtual void emit_function_prototype(SPIRFunction &func, const Bitset &return_flags);
  334. SPIRBlock *current_emitting_block = nullptr;
  335. SmallVector<SPIRBlock *> current_emitting_switch_stack;
  336. bool current_emitting_switch_fallthrough = false;
  337. virtual void emit_instruction(const Instruction &instr);
  338. struct TemporaryCopy
  339. {
  340. uint32_t dst_id;
  341. uint32_t src_id;
  342. };
  343. TemporaryCopy handle_instruction_precision(const Instruction &instr);
  344. void emit_block_instructions(SPIRBlock &block);
  345. void emit_block_instructions_with_masked_debug(SPIRBlock &block);
  346. // For relax_nan_checks.
  347. GLSLstd450 get_remapped_glsl_op(GLSLstd450 std450_op) const;
  348. spv::Op get_remapped_spirv_op(spv::Op op) const;
  349. virtual void emit_glsl_op(uint32_t result_type, uint32_t result_id, uint32_t op, const uint32_t *args,
  350. uint32_t count);
  351. virtual void emit_spv_amd_shader_ballot_op(uint32_t result_type, uint32_t result_id, uint32_t op,
  352. const uint32_t *args, uint32_t count);
  353. virtual void emit_spv_amd_shader_explicit_vertex_parameter_op(uint32_t result_type, uint32_t result_id, uint32_t op,
  354. const uint32_t *args, uint32_t count);
  355. virtual void emit_spv_amd_shader_trinary_minmax_op(uint32_t result_type, uint32_t result_id, uint32_t op,
  356. const uint32_t *args, uint32_t count);
  357. virtual void emit_spv_amd_gcn_shader_op(uint32_t result_type, uint32_t result_id, uint32_t op, const uint32_t *args,
  358. uint32_t count);
  359. virtual void emit_header();
  360. void emit_line_directive(uint32_t file_id, uint32_t line_literal);
  361. void build_workgroup_size(SmallVector<std::string> &arguments, const SpecializationConstant &x,
  362. const SpecializationConstant &y, const SpecializationConstant &z);
  363. void request_subgroup_feature(ShaderSubgroupSupportHelper::Feature feature);
  364. virtual void emit_sampled_image_op(uint32_t result_type, uint32_t result_id, uint32_t image_id, uint32_t samp_id);
  365. virtual void emit_texture_op(const Instruction &i, bool sparse);
  366. virtual std::string to_texture_op(const Instruction &i, bool sparse, bool *forward,
  367. SmallVector<uint32_t> &inherited_expressions);
  368. virtual void emit_subgroup_op(const Instruction &i);
  369. virtual std::string type_to_glsl(const SPIRType &type, uint32_t id = 0);
  370. virtual std::string builtin_to_glsl(spv::BuiltIn builtin, spv::StorageClass storage);
  371. virtual void emit_struct_member(const SPIRType &type, uint32_t member_type_id, uint32_t index,
  372. const std::string &qualifier = "", uint32_t base_offset = 0);
  373. virtual void emit_struct_padding_target(const SPIRType &type);
  374. virtual std::string image_type_glsl(const SPIRType &type, uint32_t id = 0, bool member = false);
  375. std::string constant_expression(const SPIRConstant &c,
  376. bool inside_block_like_struct_scope = false,
  377. bool inside_struct_scope = false);
  378. virtual std::string constant_op_expression(const SPIRConstantOp &cop);
  379. virtual std::string constant_expression_vector(const SPIRConstant &c, uint32_t vector);
  380. virtual void emit_fixup();
  381. virtual std::string variable_decl(const SPIRType &type, const std::string &name, uint32_t id = 0);
  382. virtual bool variable_decl_is_remapped_storage(const SPIRVariable &var, spv::StorageClass storage) const;
  383. virtual std::string to_func_call_arg(const SPIRFunction::Parameter &arg, uint32_t id);
  384. struct TextureFunctionBaseArguments
  385. {
  386. // GCC 4.8 workarounds, it doesn't understand '{}' constructor here, use explicit default constructor.
  387. TextureFunctionBaseArguments() = default;
  388. VariableID img = 0;
  389. const SPIRType *imgtype = nullptr;
  390. bool is_fetch = false, is_gather = false, is_proj = false;
  391. };
  392. struct TextureFunctionNameArguments
  393. {
  394. // GCC 4.8 workarounds, it doesn't understand '{}' constructor here, use explicit default constructor.
  395. TextureFunctionNameArguments() = default;
  396. TextureFunctionBaseArguments base;
  397. bool has_array_offsets = false, has_offset = false, has_grad = false;
  398. bool has_dref = false, is_sparse_feedback = false, has_min_lod = false;
  399. uint32_t lod = 0;
  400. };
  401. virtual std::string to_function_name(const TextureFunctionNameArguments &args);
  402. struct TextureFunctionArguments
  403. {
  404. // GCC 4.8 workarounds, it doesn't understand '{}' constructor here, use explicit default constructor.
  405. TextureFunctionArguments() = default;
  406. TextureFunctionBaseArguments base;
  407. uint32_t coord = 0, coord_components = 0, dref = 0;
  408. uint32_t grad_x = 0, grad_y = 0, lod = 0, offset = 0;
  409. uint32_t bias = 0, component = 0, sample = 0, sparse_texel = 0, min_lod = 0;
  410. bool nonuniform_expression = false, has_array_offsets = false;
  411. };
  412. virtual std::string to_function_args(const TextureFunctionArguments &args, bool *p_forward);
  413. void emit_sparse_feedback_temporaries(uint32_t result_type_id, uint32_t id, uint32_t &feedback_id,
  414. uint32_t &texel_id);
  415. uint32_t get_sparse_feedback_texel_id(uint32_t id) const;
  416. virtual void emit_buffer_block(const SPIRVariable &type);
  417. virtual void emit_push_constant_block(const SPIRVariable &var);
  418. virtual void emit_uniform(const SPIRVariable &var);
  419. virtual std::string unpack_expression_type(std::string expr_str, const SPIRType &type, uint32_t physical_type_id,
  420. bool packed_type, bool row_major);
  421. virtual bool builtin_translates_to_nonarray(spv::BuiltIn builtin) const;
  422. virtual bool is_user_type_structured(uint32_t id) const;
  423. void emit_copy_logical_type(uint32_t lhs_id, uint32_t lhs_type_id, uint32_t rhs_id, uint32_t rhs_type_id,
  424. SmallVector<uint32_t> chain);
  425. StringStream<> buffer;
  426. template <typename T>
  427. inline void statement_inner(T &&t)
  428. {
  429. buffer << std::forward<T>(t);
  430. statement_count++;
  431. }
  432. template <typename T, typename... Ts>
  433. inline void statement_inner(T &&t, Ts &&... ts)
  434. {
  435. buffer << std::forward<T>(t);
  436. statement_count++;
  437. statement_inner(std::forward<Ts>(ts)...);
  438. }
  439. template <typename... Ts>
  440. inline void statement(Ts &&... ts)
  441. {
  442. if (is_forcing_recompilation())
  443. {
  444. // Do not bother emitting code while force_recompile is active.
  445. // We will compile again.
  446. statement_count++;
  447. return;
  448. }
  449. if (redirect_statement)
  450. {
  451. redirect_statement->push_back(join(std::forward<Ts>(ts)...));
  452. statement_count++;
  453. }
  454. else
  455. {
  456. for (uint32_t i = 0; i < indent; i++)
  457. buffer << " ";
  458. statement_inner(std::forward<Ts>(ts)...);
  459. buffer << '\n';
  460. }
  461. }
  462. template <typename... Ts>
  463. inline void statement_no_indent(Ts &&... ts)
  464. {
  465. auto old_indent = indent;
  466. indent = 0;
  467. statement(std::forward<Ts>(ts)...);
  468. indent = old_indent;
  469. }
  470. // Used for implementing continue blocks where
  471. // we want to obtain a list of statements we can merge
  472. // on a single line separated by comma.
  473. SmallVector<std::string> *redirect_statement = nullptr;
  474. const SPIRBlock *current_continue_block = nullptr;
  475. bool block_temporary_hoisting = false;
  476. bool block_debug_directives = false;
  477. void begin_scope();
  478. void end_scope();
  479. void end_scope(const std::string &trailer);
  480. void end_scope_decl();
  481. void end_scope_decl(const std::string &decl);
  482. Options options;
  483. // Allow Metal to use the array<T> template to make arrays a value type
  484. virtual std::string type_to_array_glsl(const SPIRType &type, uint32_t variable_id);
  485. std::string to_array_size(const SPIRType &type, uint32_t index);
  486. uint32_t to_array_size_literal(const SPIRType &type, uint32_t index) const;
  487. uint32_t to_array_size_literal(const SPIRType &type) const;
  488. virtual std::string variable_decl(const SPIRVariable &variable); // Threadgroup arrays can't have a wrapper type
  489. std::string variable_decl_function_local(SPIRVariable &variable);
  490. void add_local_variable_name(uint32_t id);
  491. void add_resource_name(uint32_t id);
  492. void add_member_name(SPIRType &type, uint32_t name);
  493. void add_function_overload(const SPIRFunction &func);
  494. virtual bool is_non_native_row_major_matrix(uint32_t id);
  495. virtual bool member_is_non_native_row_major_matrix(const SPIRType &type, uint32_t index);
  496. bool member_is_remapped_physical_type(const SPIRType &type, uint32_t index) const;
  497. bool member_is_packed_physical_type(const SPIRType &type, uint32_t index) const;
  498. virtual std::string convert_row_major_matrix(std::string exp_str, const SPIRType &exp_type,
  499. uint32_t physical_type_id, bool is_packed,
  500. bool relaxed = false);
  501. std::unordered_set<std::string> local_variable_names;
  502. std::unordered_set<std::string> resource_names;
  503. std::unordered_set<std::string> block_input_names;
  504. std::unordered_set<std::string> block_output_names;
  505. std::unordered_set<std::string> block_ubo_names;
  506. std::unordered_set<std::string> block_ssbo_names;
  507. std::unordered_set<std::string> block_names; // A union of all block_*_names.
  508. std::unordered_map<std::string, std::unordered_set<uint64_t>> function_overloads;
  509. std::unordered_map<uint32_t, std::string> preserved_aliases;
  510. void preserve_alias_on_reset(uint32_t id);
  511. void reset_name_caches();
  512. bool processing_entry_point = false;
  513. // Can be overriden by subclass backends for trivial things which
  514. // shouldn't need polymorphism.
  515. struct BackendVariations
  516. {
  517. std::string discard_literal = "discard";
  518. std::string demote_literal = "demote";
  519. std::string null_pointer_literal = "";
  520. bool float_literal_suffix = false;
  521. bool double_literal_suffix = true;
  522. bool uint32_t_literal_suffix = true;
  523. bool long_long_literal_suffix = false;
  524. const char *basic_int_type = "int";
  525. const char *basic_uint_type = "uint";
  526. const char *basic_int8_type = "int8_t";
  527. const char *basic_uint8_type = "uint8_t";
  528. const char *basic_int16_type = "int16_t";
  529. const char *basic_uint16_type = "uint16_t";
  530. const char *int16_t_literal_suffix = "s";
  531. const char *uint16_t_literal_suffix = "us";
  532. const char *nonuniform_qualifier = "nonuniformEXT";
  533. const char *boolean_mix_function = "mix";
  534. SPIRType::BaseType boolean_in_struct_remapped_type = SPIRType::Boolean;
  535. bool swizzle_is_function = false;
  536. bool shared_is_implied = false;
  537. bool unsized_array_supported = true;
  538. bool explicit_struct_type = false;
  539. bool use_initializer_list = false;
  540. bool use_typed_initializer_list = false;
  541. bool can_declare_struct_inline = true;
  542. bool can_declare_arrays_inline = true;
  543. bool native_row_major_matrix = true;
  544. bool use_constructor_splatting = true;
  545. bool allow_precision_qualifiers = false;
  546. bool can_swizzle_scalar = false;
  547. bool force_gl_in_out_block = false;
  548. bool force_merged_mesh_block = false;
  549. bool can_return_array = true;
  550. bool allow_truncated_access_chain = false;
  551. bool supports_extensions = false;
  552. bool supports_empty_struct = false;
  553. bool array_is_value_type = true;
  554. bool array_is_value_type_in_buffer_blocks = true;
  555. bool comparison_image_samples_scalar = false;
  556. bool native_pointers = false;
  557. bool support_small_type_sampling_result = false;
  558. bool support_case_fallthrough = true;
  559. bool use_array_constructor = false;
  560. bool needs_row_major_load_workaround = false;
  561. bool support_pointer_to_pointer = false;
  562. bool support_precise_qualifier = false;
  563. bool support_64bit_switch = false;
  564. bool workgroup_size_is_hidden = false;
  565. bool requires_relaxed_precision_analysis = false;
  566. bool implicit_c_integer_promotion_rules = false;
  567. } backend;
  568. void emit_struct(SPIRType &type);
  569. void emit_resources();
  570. void emit_extension_workarounds(spv::ExecutionModel model);
  571. void emit_subgroup_arithmetic_workaround(const std::string &func, spv::Op op, spv::GroupOperation group_op);
  572. void emit_polyfills(uint32_t polyfills, bool relaxed);
  573. void emit_buffer_block_native(const SPIRVariable &var);
  574. void emit_buffer_reference_block(uint32_t type_id, bool forward_declaration);
  575. void emit_buffer_block_legacy(const SPIRVariable &var);
  576. void emit_buffer_block_flattened(const SPIRVariable &type);
  577. void fixup_implicit_builtin_block_names(spv::ExecutionModel model);
  578. void emit_declared_builtin_block(spv::StorageClass storage, spv::ExecutionModel model);
  579. bool should_force_emit_builtin_block(spv::StorageClass storage);
  580. void emit_push_constant_block_vulkan(const SPIRVariable &var);
  581. void emit_push_constant_block_glsl(const SPIRVariable &var);
  582. void emit_interface_block(const SPIRVariable &type);
  583. void emit_flattened_io_block(const SPIRVariable &var, const char *qual);
  584. void emit_flattened_io_block_struct(const std::string &basename, const SPIRType &type, const char *qual,
  585. const SmallVector<uint32_t> &indices);
  586. void emit_flattened_io_block_member(const std::string &basename, const SPIRType &type, const char *qual,
  587. const SmallVector<uint32_t> &indices);
  588. void emit_block_chain(SPIRBlock &block);
  589. void emit_hoisted_temporaries(SmallVector<std::pair<TypeID, ID>> &temporaries);
  590. std::string constant_value_macro_name(uint32_t id);
  591. int get_constant_mapping_to_workgroup_component(const SPIRConstant &constant) const;
  592. void emit_constant(const SPIRConstant &constant);
  593. void emit_specialization_constant_op(const SPIRConstantOp &constant);
  594. std::string emit_continue_block(uint32_t continue_block, bool follow_true_block, bool follow_false_block);
  595. bool attempt_emit_loop_header(SPIRBlock &block, SPIRBlock::Method method);
  596. void branch(BlockID from, BlockID to);
  597. void branch_to_continue(BlockID from, BlockID to);
  598. void branch(BlockID from, uint32_t cond, BlockID true_block, BlockID false_block);
  599. void flush_phi(BlockID from, BlockID to);
  600. void flush_variable_declaration(uint32_t id);
  601. void flush_undeclared_variables(SPIRBlock &block);
  602. void emit_variable_temporary_copies(const SPIRVariable &var);
  603. bool should_dereference(uint32_t id);
  604. bool should_forward(uint32_t id) const;
  605. bool should_suppress_usage_tracking(uint32_t id) const;
  606. void emit_mix_op(uint32_t result_type, uint32_t id, uint32_t left, uint32_t right, uint32_t lerp);
  607. void emit_nminmax_op(uint32_t result_type, uint32_t id, uint32_t op0, uint32_t op1, GLSLstd450 op);
  608. void emit_emulated_ahyper_op(uint32_t result_type, uint32_t result_id, uint32_t op0, GLSLstd450 op);
  609. bool to_trivial_mix_op(const SPIRType &type, std::string &op, uint32_t left, uint32_t right, uint32_t lerp);
  610. void emit_quaternary_func_op(uint32_t result_type, uint32_t result_id, uint32_t op0, uint32_t op1, uint32_t op2,
  611. uint32_t op3, const char *op);
  612. void emit_trinary_func_op(uint32_t result_type, uint32_t result_id, uint32_t op0, uint32_t op1, uint32_t op2,
  613. const char *op);
  614. void emit_binary_func_op(uint32_t result_type, uint32_t result_id, uint32_t op0, uint32_t op1, const char *op);
  615. void emit_atomic_func_op(uint32_t result_type, uint32_t result_id, uint32_t op0, uint32_t op1, const char *op);
  616. void emit_atomic_func_op(uint32_t result_type, uint32_t result_id, uint32_t op0, uint32_t op1, uint32_t op2, const char *op);
  617. void emit_unary_func_op_cast(uint32_t result_type, uint32_t result_id, uint32_t op0, const char *op,
  618. SPIRType::BaseType input_type, SPIRType::BaseType expected_result_type);
  619. void emit_binary_func_op_cast(uint32_t result_type, uint32_t result_id, uint32_t op0, uint32_t op1, const char *op,
  620. SPIRType::BaseType input_type, bool skip_cast_if_equal_type);
  621. void emit_binary_func_op_cast_clustered(uint32_t result_type, uint32_t result_id, uint32_t op0, uint32_t op1,
  622. const char *op, SPIRType::BaseType input_type);
  623. void emit_trinary_func_op_cast(uint32_t result_type, uint32_t result_id, uint32_t op0, uint32_t op1, uint32_t op2,
  624. const char *op, SPIRType::BaseType input_type);
  625. void emit_trinary_func_op_bitextract(uint32_t result_type, uint32_t result_id, uint32_t op0, uint32_t op1,
  626. uint32_t op2, const char *op, SPIRType::BaseType expected_result_type,
  627. SPIRType::BaseType input_type0, SPIRType::BaseType input_type1,
  628. SPIRType::BaseType input_type2);
  629. void emit_bitfield_insert_op(uint32_t result_type, uint32_t result_id, uint32_t op0, uint32_t op1, uint32_t op2,
  630. uint32_t op3, const char *op, SPIRType::BaseType offset_count_type);
  631. void emit_unary_func_op(uint32_t result_type, uint32_t result_id, uint32_t op0, const char *op);
  632. void emit_unrolled_unary_op(uint32_t result_type, uint32_t result_id, uint32_t operand, const char *op);
  633. void emit_binary_op(uint32_t result_type, uint32_t result_id, uint32_t op0, uint32_t op1, const char *op);
  634. void emit_unrolled_binary_op(uint32_t result_type, uint32_t result_id, uint32_t op0, uint32_t op1, const char *op,
  635. bool negate, SPIRType::BaseType expected_type);
  636. void emit_binary_op_cast(uint32_t result_type, uint32_t result_id, uint32_t op0, uint32_t op1, const char *op,
  637. SPIRType::BaseType input_type, bool skip_cast_if_equal_type, bool implicit_integer_promotion);
  638. SPIRType binary_op_bitcast_helper(std::string &cast_op0, std::string &cast_op1, SPIRType::BaseType &input_type,
  639. uint32_t op0, uint32_t op1, bool skip_cast_if_equal_type);
  640. virtual bool emit_complex_bitcast(uint32_t result_type, uint32_t id, uint32_t op0);
  641. std::string to_ternary_expression(const SPIRType &result_type, uint32_t select, uint32_t true_value,
  642. uint32_t false_value);
  643. void emit_unary_op(uint32_t result_type, uint32_t result_id, uint32_t op0, const char *op);
  644. void emit_unary_op_cast(uint32_t result_type, uint32_t result_id, uint32_t op0, const char *op);
  645. virtual void emit_mesh_tasks(SPIRBlock &block);
  646. bool expression_is_forwarded(uint32_t id) const;
  647. bool expression_suppresses_usage_tracking(uint32_t id) const;
  648. bool expression_read_implies_multiple_reads(uint32_t id) const;
  649. SPIRExpression &emit_op(uint32_t result_type, uint32_t result_id, const std::string &rhs, bool forward_rhs,
  650. bool suppress_usage_tracking = false);
  651. void access_chain_internal_append_index(std::string &expr, uint32_t base, const SPIRType *type,
  652. AccessChainFlags flags, bool &access_chain_is_arrayed, uint32_t index);
  653. std::string access_chain_internal(uint32_t base, const uint32_t *indices, uint32_t count, AccessChainFlags flags,
  654. AccessChainMeta *meta);
  655. // Only meaningful on backends with physical pointer support ala MSL.
  656. // Relevant for PtrAccessChain / BDA.
  657. virtual uint32_t get_physical_type_stride(const SPIRType &type) const;
  658. spv::StorageClass get_expression_effective_storage_class(uint32_t ptr);
  659. virtual bool access_chain_needs_stage_io_builtin_translation(uint32_t base);
  660. virtual void check_physical_type_cast(std::string &expr, const SPIRType *type, uint32_t physical_type);
  661. virtual bool prepare_access_chain_for_scalar_access(std::string &expr, const SPIRType &type,
  662. spv::StorageClass storage, bool &is_packed);
  663. std::string access_chain(uint32_t base, const uint32_t *indices, uint32_t count, const SPIRType &target_type,
  664. AccessChainMeta *meta = nullptr, bool ptr_chain = false);
  665. std::string flattened_access_chain(uint32_t base, const uint32_t *indices, uint32_t count,
  666. const SPIRType &target_type, uint32_t offset, uint32_t matrix_stride,
  667. uint32_t array_stride, bool need_transpose);
  668. std::string flattened_access_chain_struct(uint32_t base, const uint32_t *indices, uint32_t count,
  669. const SPIRType &target_type, uint32_t offset);
  670. std::string flattened_access_chain_matrix(uint32_t base, const uint32_t *indices, uint32_t count,
  671. const SPIRType &target_type, uint32_t offset, uint32_t matrix_stride,
  672. bool need_transpose);
  673. std::string flattened_access_chain_vector(uint32_t base, const uint32_t *indices, uint32_t count,
  674. const SPIRType &target_type, uint32_t offset, uint32_t matrix_stride,
  675. bool need_transpose);
  676. std::pair<std::string, uint32_t> flattened_access_chain_offset(const SPIRType &basetype, const uint32_t *indices,
  677. uint32_t count, uint32_t offset,
  678. uint32_t word_stride, bool *need_transpose = nullptr,
  679. uint32_t *matrix_stride = nullptr,
  680. uint32_t *array_stride = nullptr,
  681. bool ptr_chain = false);
  682. const char *index_to_swizzle(uint32_t index);
  683. std::string remap_swizzle(const SPIRType &result_type, uint32_t input_components, const std::string &expr);
  684. std::string declare_temporary(uint32_t type, uint32_t id);
  685. void emit_uninitialized_temporary(uint32_t type, uint32_t id);
  686. SPIRExpression &emit_uninitialized_temporary_expression(uint32_t type, uint32_t id);
  687. void append_global_func_args(const SPIRFunction &func, uint32_t index, SmallVector<std::string> &arglist);
  688. std::string to_non_uniform_aware_expression(uint32_t id);
  689. std::string to_expression(uint32_t id, bool register_expression_read = true);
  690. std::string to_composite_constructor_expression(const SPIRType &parent_type, uint32_t id, bool block_like_type);
  691. std::string to_rerolled_array_expression(const SPIRType &parent_type, const std::string &expr, const SPIRType &type);
  692. std::string to_enclosed_expression(uint32_t id, bool register_expression_read = true);
  693. std::string to_unpacked_expression(uint32_t id, bool register_expression_read = true);
  694. std::string to_unpacked_row_major_matrix_expression(uint32_t id);
  695. std::string to_enclosed_unpacked_expression(uint32_t id, bool register_expression_read = true);
  696. std::string to_dereferenced_expression(uint32_t id, bool register_expression_read = true);
  697. std::string to_pointer_expression(uint32_t id, bool register_expression_read = true);
  698. std::string to_enclosed_pointer_expression(uint32_t id, bool register_expression_read = true);
  699. std::string to_extract_component_expression(uint32_t id, uint32_t index);
  700. std::string to_extract_constant_composite_expression(uint32_t result_type, const SPIRConstant &c,
  701. const uint32_t *chain, uint32_t length);
  702. static bool needs_enclose_expression(const std::string &expr);
  703. std::string enclose_expression(const std::string &expr);
  704. std::string dereference_expression(const SPIRType &expression_type, const std::string &expr);
  705. std::string address_of_expression(const std::string &expr);
  706. void strip_enclosed_expression(std::string &expr);
  707. std::string to_member_name(const SPIRType &type, uint32_t index);
  708. virtual std::string to_member_reference(uint32_t base, const SPIRType &type, uint32_t index, bool ptr_chain_is_resolved);
  709. std::string to_multi_member_reference(const SPIRType &type, const SmallVector<uint32_t> &indices);
  710. std::string type_to_glsl_constructor(const SPIRType &type);
  711. std::string argument_decl(const SPIRFunction::Parameter &arg);
  712. virtual std::string to_qualifiers_glsl(uint32_t id);
  713. void fixup_io_block_patch_primitive_qualifiers(const SPIRVariable &var);
  714. void emit_output_variable_initializer(const SPIRVariable &var);
  715. std::string to_precision_qualifiers_glsl(uint32_t id);
  716. virtual const char *to_storage_qualifiers_glsl(const SPIRVariable &var);
  717. std::string flags_to_qualifiers_glsl(const SPIRType &type, const Bitset &flags);
  718. const char *format_to_glsl(spv::ImageFormat format);
  719. virtual std::string layout_for_member(const SPIRType &type, uint32_t index);
  720. virtual std::string to_interpolation_qualifiers(const Bitset &flags);
  721. std::string layout_for_variable(const SPIRVariable &variable);
  722. std::string to_combined_image_sampler(VariableID image_id, VariableID samp_id);
  723. virtual bool skip_argument(uint32_t id) const;
  724. virtual bool emit_array_copy(const char *expr, uint32_t lhs_id, uint32_t rhs_id,
  725. spv::StorageClass lhs_storage, spv::StorageClass rhs_storage);
  726. virtual void emit_block_hints(const SPIRBlock &block);
  727. virtual std::string to_initializer_expression(const SPIRVariable &var);
  728. virtual std::string to_zero_initialized_expression(uint32_t type_id);
  729. bool type_can_zero_initialize(const SPIRType &type) const;
  730. bool buffer_is_packing_standard(const SPIRType &type, BufferPackingStandard packing,
  731. uint32_t *failed_index = nullptr, uint32_t start_offset = 0,
  732. uint32_t end_offset = ~(0u));
  733. std::string buffer_to_packing_standard(const SPIRType &type,
  734. bool support_std430_without_scalar_layout,
  735. bool support_enhanced_layouts);
  736. uint32_t type_to_packed_base_size(const SPIRType &type, BufferPackingStandard packing);
  737. uint32_t type_to_packed_alignment(const SPIRType &type, const Bitset &flags, BufferPackingStandard packing);
  738. uint32_t type_to_packed_array_stride(const SPIRType &type, const Bitset &flags, BufferPackingStandard packing);
  739. uint32_t type_to_packed_size(const SPIRType &type, const Bitset &flags, BufferPackingStandard packing);
  740. uint32_t type_to_location_count(const SPIRType &type) const;
  741. std::string bitcast_glsl(const SPIRType &result_type, uint32_t arg);
  742. virtual std::string bitcast_glsl_op(const SPIRType &result_type, const SPIRType &argument_type);
  743. std::string bitcast_expression(SPIRType::BaseType target_type, uint32_t arg);
  744. std::string bitcast_expression(const SPIRType &target_type, SPIRType::BaseType expr_type, const std::string &expr);
  745. std::string build_composite_combiner(uint32_t result_type, const uint32_t *elems, uint32_t length);
  746. bool remove_duplicate_swizzle(std::string &op);
  747. bool remove_unity_swizzle(uint32_t base, std::string &op);
  748. // Can modify flags to remote readonly/writeonly if image type
  749. // and force recompile.
  750. bool check_atomic_image(uint32_t id);
  751. virtual void replace_illegal_names();
  752. void replace_illegal_names(const std::unordered_set<std::string> &keywords);
  753. virtual void emit_entry_point_declarations();
  754. void replace_fragment_output(SPIRVariable &var);
  755. void replace_fragment_outputs();
  756. std::string legacy_tex_op(const std::string &op, const SPIRType &imgtype, uint32_t id);
  757. void forward_relaxed_precision(uint32_t dst_id, const uint32_t *args, uint32_t length);
  758. void analyze_precision_requirements(uint32_t type_id, uint32_t dst_id, uint32_t *args, uint32_t length);
  759. Options::Precision analyze_expression_precision(const uint32_t *args, uint32_t length) const;
  760. uint32_t indent = 0;
  761. std::unordered_set<uint32_t> emitted_functions;
  762. // Ensure that we declare phi-variable copies even if the original declaration isn't deferred
  763. std::unordered_set<uint32_t> flushed_phi_variables;
  764. std::unordered_set<uint32_t> flattened_buffer_blocks;
  765. std::unordered_map<uint32_t, bool> flattened_structs;
  766. ShaderSubgroupSupportHelper shader_subgroup_supporter;
  767. std::string load_flattened_struct(const std::string &basename, const SPIRType &type);
  768. std::string to_flattened_struct_member(const std::string &basename, const SPIRType &type, uint32_t index);
  769. void store_flattened_struct(uint32_t lhs_id, uint32_t value);
  770. void store_flattened_struct(const std::string &basename, uint32_t rhs, const SPIRType &type,
  771. const SmallVector<uint32_t> &indices);
  772. std::string to_flattened_access_chain_expression(uint32_t id);
  773. // Usage tracking. If a temporary is used more than once, use the temporary instead to
  774. // avoid AST explosion when SPIRV is generated with pure SSA and doesn't write stuff to variables.
  775. std::unordered_map<uint32_t, uint32_t> expression_usage_counts;
  776. void track_expression_read(uint32_t id);
  777. SmallVector<std::string> forced_extensions;
  778. SmallVector<std::string> header_lines;
  779. // Used when expressions emit extra opcodes with their own unique IDs,
  780. // and we need to reuse the IDs across recompilation loops.
  781. // Currently used by NMin/Max/Clamp implementations.
  782. std::unordered_map<uint32_t, uint32_t> extra_sub_expressions;
  783. SmallVector<TypeID> workaround_ubo_load_overload_types;
  784. void request_workaround_wrapper_overload(TypeID id);
  785. void rewrite_load_for_wrapped_row_major(std::string &expr, TypeID loaded_type, ID ptr);
  786. uint32_t statement_count = 0;
  787. inline bool is_legacy() const
  788. {
  789. return (options.es && options.version < 300) || (!options.es && options.version < 130);
  790. }
  791. inline bool is_legacy_es() const
  792. {
  793. return options.es && options.version < 300;
  794. }
  795. inline bool is_legacy_desktop() const
  796. {
  797. return !options.es && options.version < 130;
  798. }
  799. enum Polyfill : uint32_t
  800. {
  801. PolyfillTranspose2x2 = 1 << 0,
  802. PolyfillTranspose3x3 = 1 << 1,
  803. PolyfillTranspose4x4 = 1 << 2,
  804. PolyfillDeterminant2x2 = 1 << 3,
  805. PolyfillDeterminant3x3 = 1 << 4,
  806. PolyfillDeterminant4x4 = 1 << 5,
  807. PolyfillMatrixInverse2x2 = 1 << 6,
  808. PolyfillMatrixInverse3x3 = 1 << 7,
  809. PolyfillMatrixInverse4x4 = 1 << 8,
  810. PolyfillNMin16 = 1 << 9,
  811. PolyfillNMin32 = 1 << 10,
  812. PolyfillNMin64 = 1 << 11,
  813. PolyfillNMax16 = 1 << 12,
  814. PolyfillNMax32 = 1 << 13,
  815. PolyfillNMax64 = 1 << 14,
  816. PolyfillNClamp16 = 1 << 15,
  817. PolyfillNClamp32 = 1 << 16,
  818. PolyfillNClamp64 = 1 << 17,
  819. };
  820. uint32_t required_polyfills = 0;
  821. uint32_t required_polyfills_relaxed = 0;
  822. void require_polyfill(Polyfill polyfill, bool relaxed);
  823. bool ray_tracing_is_khr = false;
  824. bool barycentric_is_nv = false;
  825. void ray_tracing_khr_fixup_locations();
  826. bool args_will_forward(uint32_t id, const uint32_t *args, uint32_t num_args, bool pure);
  827. void register_call_out_argument(uint32_t id);
  828. void register_impure_function_call();
  829. void register_control_dependent_expression(uint32_t expr);
  830. // GL_EXT_shader_pixel_local_storage support.
  831. std::vector<PlsRemap> pls_inputs;
  832. std::vector<PlsRemap> pls_outputs;
  833. std::string pls_decl(const PlsRemap &variable);
  834. const char *to_pls_qualifiers_glsl(const SPIRVariable &variable);
  835. void emit_pls();
  836. void remap_pls_variables();
  837. // GL_EXT_shader_framebuffer_fetch support.
  838. std::vector<std::pair<uint32_t, uint32_t>> subpass_to_framebuffer_fetch_attachment;
  839. std::vector<std::pair<uint32_t, bool>> inout_color_attachments;
  840. bool location_is_framebuffer_fetch(uint32_t location) const;
  841. bool location_is_non_coherent_framebuffer_fetch(uint32_t location) const;
  842. bool subpass_input_is_framebuffer_fetch(uint32_t id) const;
  843. void emit_inout_fragment_outputs_copy_to_subpass_inputs();
  844. const SPIRVariable *find_subpass_input_by_attachment_index(uint32_t index) const;
  845. const SPIRVariable *find_color_output_by_location(uint32_t location) const;
  846. // A variant which takes two sets of name. The secondary is only used to verify there are no collisions,
  847. // but the set is not updated when we have found a new name.
  848. // Used primarily when adding block interface names.
  849. void add_variable(std::unordered_set<std::string> &variables_primary,
  850. const std::unordered_set<std::string> &variables_secondary, std::string &name);
  851. void check_function_call_constraints(const uint32_t *args, uint32_t length);
  852. void handle_invalid_expression(uint32_t id);
  853. void force_temporary_and_recompile(uint32_t id);
  854. void find_static_extensions();
  855. uint32_t consume_temporary_in_precision_context(uint32_t type_id, uint32_t id, Options::Precision precision);
  856. std::unordered_map<uint32_t, uint32_t> temporary_to_mirror_precision_alias;
  857. std::unordered_set<uint32_t> composite_insert_overwritten;
  858. std::unordered_set<uint32_t> block_composite_insert_overwrite;
  859. std::string emit_for_loop_initializers(const SPIRBlock &block);
  860. void emit_while_loop_initializers(const SPIRBlock &block);
  861. bool for_loop_initializers_are_same_type(const SPIRBlock &block);
  862. bool optimize_read_modify_write(const SPIRType &type, const std::string &lhs, const std::string &rhs);
  863. void fixup_image_load_store_access();
  864. bool type_is_empty(const SPIRType &type);
  865. bool can_use_io_location(spv::StorageClass storage, bool block);
  866. const Instruction *get_next_instruction_in_block(const Instruction &instr);
  867. static uint32_t mask_relevant_memory_semantics(uint32_t semantics);
  868. std::string convert_half_to_string(const SPIRConstant &value, uint32_t col, uint32_t row);
  869. std::string convert_float_to_string(const SPIRConstant &value, uint32_t col, uint32_t row);
  870. std::string convert_double_to_string(const SPIRConstant &value, uint32_t col, uint32_t row);
  871. std::string convert_separate_image_to_expression(uint32_t id);
  872. // Builtins in GLSL are always specific signedness, but the SPIR-V can declare them
  873. // as either unsigned or signed.
  874. // Sometimes we will need to automatically perform casts on load and store to make this work.
  875. virtual SPIRType::BaseType get_builtin_basetype(spv::BuiltIn builtin, SPIRType::BaseType default_type);
  876. virtual void cast_to_variable_store(uint32_t target_id, std::string &expr, const SPIRType &expr_type);
  877. virtual void cast_from_variable_load(uint32_t source_id, std::string &expr, const SPIRType &expr_type);
  878. void unroll_array_from_complex_load(uint32_t target_id, uint32_t source_id, std::string &expr);
  879. bool unroll_array_to_complex_store(uint32_t target_id, uint32_t source_id);
  880. void convert_non_uniform_expression(std::string &expr, uint32_t ptr_id);
  881. void handle_store_to_invariant_variable(uint32_t store_id, uint32_t value_id);
  882. void disallow_forwarding_in_expression_chain(const SPIRExpression &expr);
  883. bool expression_is_constant_null(uint32_t id) const;
  884. bool expression_is_non_value_type_array(uint32_t ptr);
  885. virtual void emit_store_statement(uint32_t lhs_expression, uint32_t rhs_expression);
  886. uint32_t get_integer_width_for_instruction(const Instruction &instr) const;
  887. uint32_t get_integer_width_for_glsl_instruction(GLSLstd450 op, const uint32_t *arguments, uint32_t length) const;
  888. bool variable_is_lut(const SPIRVariable &var) const;
  889. char current_locale_radix_character = '.';
  890. void fixup_type_alias();
  891. void reorder_type_alias();
  892. void fixup_anonymous_struct_names();
  893. void fixup_anonymous_struct_names(std::unordered_set<uint32_t> &visited, const SPIRType &type);
  894. static const char *vector_swizzle(int vecsize, int index);
  895. bool is_stage_output_location_masked(uint32_t location, uint32_t component) const;
  896. bool is_stage_output_builtin_masked(spv::BuiltIn builtin) const;
  897. bool is_stage_output_variable_masked(const SPIRVariable &var) const;
  898. bool is_stage_output_block_member_masked(const SPIRVariable &var, uint32_t index, bool strip_array) const;
  899. bool is_per_primitive_variable(const SPIRVariable &var) const;
  900. uint32_t get_accumulated_member_location(const SPIRVariable &var, uint32_t mbr_idx, bool strip_array) const;
  901. uint32_t get_declared_member_location(const SPIRVariable &var, uint32_t mbr_idx, bool strip_array) const;
  902. std::unordered_set<LocationComponentPair, InternalHasher> masked_output_locations;
  903. std::unordered_set<uint32_t> masked_output_builtins;
  904. FloatFormatter *float_formatter = nullptr;
  905. std::string format_float(float value) const;
  906. std::string format_double(double value) const;
  907. private:
  908. void init();
  909. SmallVector<ConstantID> get_composite_constant_ids(ConstantID const_id);
  910. void fill_composite_constant(SPIRConstant &constant, TypeID type_id, const SmallVector<ConstantID> &initializers);
  911. void set_composite_constant(ConstantID const_id, TypeID type_id, const SmallVector<ConstantID> &initializers);
  912. TypeID get_composite_member_type(TypeID type_id, uint32_t member_idx);
  913. std::unordered_map<uint32_t, SmallVector<ConstantID>> const_composite_insert_ids;
  914. };
  915. } // namespace SPIRV_CROSS_NAMESPACE
  916. #endif