spirv_msl.hpp 68 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438
  1. /*
  2. * Copyright 2016-2021 The Brenwill Workshop Ltd.
  3. * SPDX-License-Identifier: Apache-2.0 OR MIT
  4. *
  5. * Licensed under the Apache License, Version 2.0 (the "License");
  6. * you may not use this file except in compliance with the License.
  7. * You may obtain a copy of the License at
  8. *
  9. * http://www.apache.org/licenses/LICENSE-2.0
  10. *
  11. * Unless required by applicable law or agreed to in writing, software
  12. * distributed under the License is distributed on an "AS IS" BASIS,
  13. * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  14. * See the License for the specific language governing permissions and
  15. * limitations under the License.
  16. */
  17. /*
  18. * At your option, you may choose to accept this material under either:
  19. * 1. The Apache License, Version 2.0, found at <http://www.apache.org/licenses/LICENSE-2.0>, or
  20. * 2. The MIT License, found at <http://opensource.org/licenses/MIT>.
  21. */
  22. #ifndef SPIRV_CROSS_MSL_HPP
  23. #define SPIRV_CROSS_MSL_HPP
  24. #include "spirv_glsl.hpp"
  25. #include <map>
  26. #include <set>
  27. #include <stddef.h>
  28. #include <unordered_map>
  29. #include <unordered_set>
  30. namespace SPIRV_CROSS_NAMESPACE
  31. {
  32. using namespace SPIRV_CROSS_SPV_HEADER_NAMESPACE;
  33. // Indicates the format of a shader interface variable. Currently limited to specifying
  34. // if the input is an 8-bit unsigned integer, 16-bit unsigned integer, or
  35. // some other format.
  36. enum MSLShaderVariableFormat
  37. {
  38. MSL_SHADER_VARIABLE_FORMAT_OTHER = 0,
  39. MSL_SHADER_VARIABLE_FORMAT_UINT8 = 1,
  40. MSL_SHADER_VARIABLE_FORMAT_UINT16 = 2,
  41. MSL_SHADER_VARIABLE_FORMAT_ANY16 = 3,
  42. MSL_SHADER_VARIABLE_FORMAT_ANY32 = 4,
  43. // Deprecated aliases.
  44. MSL_VERTEX_FORMAT_OTHER = MSL_SHADER_VARIABLE_FORMAT_OTHER,
  45. MSL_VERTEX_FORMAT_UINT8 = MSL_SHADER_VARIABLE_FORMAT_UINT8,
  46. MSL_VERTEX_FORMAT_UINT16 = MSL_SHADER_VARIABLE_FORMAT_UINT16,
  47. MSL_SHADER_INPUT_FORMAT_OTHER = MSL_SHADER_VARIABLE_FORMAT_OTHER,
  48. MSL_SHADER_INPUT_FORMAT_UINT8 = MSL_SHADER_VARIABLE_FORMAT_UINT8,
  49. MSL_SHADER_INPUT_FORMAT_UINT16 = MSL_SHADER_VARIABLE_FORMAT_UINT16,
  50. MSL_SHADER_INPUT_FORMAT_ANY16 = MSL_SHADER_VARIABLE_FORMAT_ANY16,
  51. MSL_SHADER_INPUT_FORMAT_ANY32 = MSL_SHADER_VARIABLE_FORMAT_ANY32,
  52. MSL_SHADER_VARIABLE_FORMAT_INT_MAX = 0x7fffffff
  53. };
  54. // Indicates the rate at which a variable changes value, one of: per-vertex,
  55. // per-primitive, or per-patch.
  56. enum MSLShaderVariableRate
  57. {
  58. MSL_SHADER_VARIABLE_RATE_PER_VERTEX = 0,
  59. MSL_SHADER_VARIABLE_RATE_PER_PRIMITIVE = 1,
  60. MSL_SHADER_VARIABLE_RATE_PER_PATCH = 2,
  61. MSL_SHADER_VARIABLE_RATE_INT_MAX = 0x7fffffff,
  62. };
  63. // Defines MSL characteristics of a shader interface variable at a particular location.
  64. // After compilation, it is possible to query whether or not this location was used.
  65. // If vecsize is nonzero, it must be greater than or equal to the vecsize declared in the shader,
  66. // or behavior is undefined.
  67. struct MSLShaderInterfaceVariable
  68. {
  69. uint32_t location = 0;
  70. uint32_t component = 0;
  71. MSLShaderVariableFormat format = MSL_SHADER_VARIABLE_FORMAT_OTHER;
  72. BuiltIn builtin = BuiltInMax;
  73. uint32_t vecsize = 0;
  74. MSLShaderVariableRate rate = MSL_SHADER_VARIABLE_RATE_PER_VERTEX;
  75. };
  76. // Matches the binding index of a MSL resource for a binding within a descriptor set.
  77. // Taken together, the stage, desc_set and binding combine to form a reference to a resource
  78. // descriptor used in a particular shading stage. The count field indicates the number of
  79. // resources consumed by this binding, if the binding represents an array of resources.
  80. // If the resource array is a run-time-sized array, which are legal in GLSL or SPIR-V, this value
  81. // will be used to declare the array size in MSL, which does not support run-time-sized arrays.
  82. // If pad_argument_buffer_resources is enabled, the base_type and count values are used to
  83. // specify the base type and array size of the resource in the argument buffer, if that resource
  84. // is not defined and used by the shader. With pad_argument_buffer_resources enabled, this
  85. // information will be used to pad the argument buffer structure, in order to align that
  86. // structure consistently for all uses, across all shaders, of the descriptor set represented
  87. // by the arugment buffer. If pad_argument_buffer_resources is disabled, base_type does not
  88. // need to be populated, and if the resource is also not a run-time sized array, the count
  89. // field does not need to be populated.
  90. // If using MSL 2.0 argument buffers, the descriptor set is not marked as a discrete descriptor set,
  91. // and (for iOS only) the resource is not a storage image (sampled != 2), the binding reference we
  92. // remap to will become an [[id(N)]] attribute within the "descriptor set" argument buffer structure.
  93. // For resources which are bound in the "classic" MSL 1.0 way or discrete descriptors, the remap will
  94. // become a [[buffer(N)]], [[texture(N)]] or [[sampler(N)]] depending on the resource types used.
  95. struct MSLResourceBinding
  96. {
  97. ExecutionModel stage = ExecutionModelMax;
  98. SPIRType::BaseType basetype = SPIRType::Unknown;
  99. uint32_t desc_set = 0;
  100. uint32_t binding = 0;
  101. uint32_t count = 0;
  102. uint32_t msl_buffer = 0;
  103. uint32_t msl_texture = 0;
  104. uint32_t msl_sampler = 0;
  105. };
  106. enum MSLSamplerCoord
  107. {
  108. MSL_SAMPLER_COORD_NORMALIZED = 0,
  109. MSL_SAMPLER_COORD_PIXEL = 1,
  110. MSL_SAMPLER_INT_MAX = 0x7fffffff
  111. };
  112. enum MSLSamplerFilter
  113. {
  114. MSL_SAMPLER_FILTER_NEAREST = 0,
  115. MSL_SAMPLER_FILTER_LINEAR = 1,
  116. MSL_SAMPLER_FILTER_INT_MAX = 0x7fffffff
  117. };
  118. enum MSLSamplerMipFilter
  119. {
  120. MSL_SAMPLER_MIP_FILTER_NONE = 0,
  121. MSL_SAMPLER_MIP_FILTER_NEAREST = 1,
  122. MSL_SAMPLER_MIP_FILTER_LINEAR = 2,
  123. MSL_SAMPLER_MIP_FILTER_INT_MAX = 0x7fffffff
  124. };
  125. enum MSLSamplerAddress
  126. {
  127. MSL_SAMPLER_ADDRESS_CLAMP_TO_ZERO = 0,
  128. MSL_SAMPLER_ADDRESS_CLAMP_TO_EDGE = 1,
  129. MSL_SAMPLER_ADDRESS_CLAMP_TO_BORDER = 2,
  130. MSL_SAMPLER_ADDRESS_REPEAT = 3,
  131. MSL_SAMPLER_ADDRESS_MIRRORED_REPEAT = 4,
  132. MSL_SAMPLER_ADDRESS_INT_MAX = 0x7fffffff
  133. };
  134. enum MSLSamplerCompareFunc
  135. {
  136. MSL_SAMPLER_COMPARE_FUNC_NEVER = 0,
  137. MSL_SAMPLER_COMPARE_FUNC_LESS = 1,
  138. MSL_SAMPLER_COMPARE_FUNC_LESS_EQUAL = 2,
  139. MSL_SAMPLER_COMPARE_FUNC_GREATER = 3,
  140. MSL_SAMPLER_COMPARE_FUNC_GREATER_EQUAL = 4,
  141. MSL_SAMPLER_COMPARE_FUNC_EQUAL = 5,
  142. MSL_SAMPLER_COMPARE_FUNC_NOT_EQUAL = 6,
  143. MSL_SAMPLER_COMPARE_FUNC_ALWAYS = 7,
  144. MSL_SAMPLER_COMPARE_FUNC_INT_MAX = 0x7fffffff
  145. };
  146. enum MSLSamplerBorderColor
  147. {
  148. MSL_SAMPLER_BORDER_COLOR_TRANSPARENT_BLACK = 0,
  149. MSL_SAMPLER_BORDER_COLOR_OPAQUE_BLACK = 1,
  150. MSL_SAMPLER_BORDER_COLOR_OPAQUE_WHITE = 2,
  151. MSL_SAMPLER_BORDER_COLOR_INT_MAX = 0x7fffffff
  152. };
  153. enum MSLFormatResolution
  154. {
  155. MSL_FORMAT_RESOLUTION_444 = 0,
  156. MSL_FORMAT_RESOLUTION_422,
  157. MSL_FORMAT_RESOLUTION_420,
  158. MSL_FORMAT_RESOLUTION_INT_MAX = 0x7fffffff
  159. };
  160. enum MSLChromaLocation
  161. {
  162. MSL_CHROMA_LOCATION_COSITED_EVEN = 0,
  163. MSL_CHROMA_LOCATION_MIDPOINT,
  164. MSL_CHROMA_LOCATION_INT_MAX = 0x7fffffff
  165. };
  166. enum MSLComponentSwizzle
  167. {
  168. MSL_COMPONENT_SWIZZLE_IDENTITY = 0,
  169. MSL_COMPONENT_SWIZZLE_ZERO,
  170. MSL_COMPONENT_SWIZZLE_ONE,
  171. MSL_COMPONENT_SWIZZLE_R,
  172. MSL_COMPONENT_SWIZZLE_G,
  173. MSL_COMPONENT_SWIZZLE_B,
  174. MSL_COMPONENT_SWIZZLE_A,
  175. MSL_COMPONENT_SWIZZLE_INT_MAX = 0x7fffffff
  176. };
  177. enum MSLSamplerYCbCrModelConversion
  178. {
  179. MSL_SAMPLER_YCBCR_MODEL_CONVERSION_RGB_IDENTITY = 0,
  180. MSL_SAMPLER_YCBCR_MODEL_CONVERSION_YCBCR_IDENTITY,
  181. MSL_SAMPLER_YCBCR_MODEL_CONVERSION_YCBCR_BT_709,
  182. MSL_SAMPLER_YCBCR_MODEL_CONVERSION_YCBCR_BT_601,
  183. MSL_SAMPLER_YCBCR_MODEL_CONVERSION_YCBCR_BT_2020,
  184. MSL_SAMPLER_YCBCR_MODEL_CONVERSION_INT_MAX = 0x7fffffff
  185. };
  186. enum MSLSamplerYCbCrRange
  187. {
  188. MSL_SAMPLER_YCBCR_RANGE_ITU_FULL = 0,
  189. MSL_SAMPLER_YCBCR_RANGE_ITU_NARROW,
  190. MSL_SAMPLER_YCBCR_RANGE_INT_MAX = 0x7fffffff
  191. };
  192. struct MSLConstexprSampler
  193. {
  194. MSLSamplerCoord coord = MSL_SAMPLER_COORD_NORMALIZED;
  195. MSLSamplerFilter min_filter = MSL_SAMPLER_FILTER_NEAREST;
  196. MSLSamplerFilter mag_filter = MSL_SAMPLER_FILTER_NEAREST;
  197. MSLSamplerMipFilter mip_filter = MSL_SAMPLER_MIP_FILTER_NONE;
  198. MSLSamplerAddress s_address = MSL_SAMPLER_ADDRESS_CLAMP_TO_EDGE;
  199. MSLSamplerAddress t_address = MSL_SAMPLER_ADDRESS_CLAMP_TO_EDGE;
  200. MSLSamplerAddress r_address = MSL_SAMPLER_ADDRESS_CLAMP_TO_EDGE;
  201. MSLSamplerCompareFunc compare_func = MSL_SAMPLER_COMPARE_FUNC_NEVER;
  202. MSLSamplerBorderColor border_color = MSL_SAMPLER_BORDER_COLOR_TRANSPARENT_BLACK;
  203. float lod_clamp_min = 0.0f;
  204. float lod_clamp_max = 1000.0f;
  205. int max_anisotropy = 1;
  206. // Sampler Y'CbCr conversion parameters
  207. uint32_t planes = 0;
  208. MSLFormatResolution resolution = MSL_FORMAT_RESOLUTION_444;
  209. MSLSamplerFilter chroma_filter = MSL_SAMPLER_FILTER_NEAREST;
  210. MSLChromaLocation x_chroma_offset = MSL_CHROMA_LOCATION_COSITED_EVEN;
  211. MSLChromaLocation y_chroma_offset = MSL_CHROMA_LOCATION_COSITED_EVEN;
  212. MSLComponentSwizzle swizzle[4]; // IDENTITY, IDENTITY, IDENTITY, IDENTITY
  213. MSLSamplerYCbCrModelConversion ycbcr_model = MSL_SAMPLER_YCBCR_MODEL_CONVERSION_RGB_IDENTITY;
  214. MSLSamplerYCbCrRange ycbcr_range = MSL_SAMPLER_YCBCR_RANGE_ITU_FULL;
  215. uint32_t bpc = 8;
  216. bool compare_enable = false;
  217. bool lod_clamp_enable = false;
  218. bool anisotropy_enable = false;
  219. bool ycbcr_conversion_enable = false;
  220. MSLConstexprSampler()
  221. {
  222. for (uint32_t i = 0; i < 4; i++)
  223. swizzle[i] = MSL_COMPONENT_SWIZZLE_IDENTITY;
  224. }
  225. bool swizzle_is_identity() const
  226. {
  227. return (swizzle[0] == MSL_COMPONENT_SWIZZLE_IDENTITY && swizzle[1] == MSL_COMPONENT_SWIZZLE_IDENTITY &&
  228. swizzle[2] == MSL_COMPONENT_SWIZZLE_IDENTITY && swizzle[3] == MSL_COMPONENT_SWIZZLE_IDENTITY);
  229. }
  230. bool swizzle_has_one_or_zero() const
  231. {
  232. return (swizzle[0] == MSL_COMPONENT_SWIZZLE_ZERO || swizzle[0] == MSL_COMPONENT_SWIZZLE_ONE ||
  233. swizzle[1] == MSL_COMPONENT_SWIZZLE_ZERO || swizzle[1] == MSL_COMPONENT_SWIZZLE_ONE ||
  234. swizzle[2] == MSL_COMPONENT_SWIZZLE_ZERO || swizzle[2] == MSL_COMPONENT_SWIZZLE_ONE ||
  235. swizzle[3] == MSL_COMPONENT_SWIZZLE_ZERO || swizzle[3] == MSL_COMPONENT_SWIZZLE_ONE);
  236. }
  237. };
  238. // Special constant used in a MSLResourceBinding desc_set
  239. // element to indicate the bindings for the push constants.
  240. // Kinda deprecated. Just use ResourceBindingPushConstant{DescriptorSet,Binding} directly.
  241. static const uint32_t kPushConstDescSet = ResourceBindingPushConstantDescriptorSet;
  242. // Special constant used in a MSLResourceBinding binding
  243. // element to indicate the bindings for the push constants.
  244. // Kinda deprecated. Just use ResourceBindingPushConstant{DescriptorSet,Binding} directly.
  245. static const uint32_t kPushConstBinding = ResourceBindingPushConstantBinding;
  246. // Special constant used in a MSLResourceBinding binding
  247. // element to indicate the buffer binding for swizzle buffers.
  248. static const uint32_t kSwizzleBufferBinding = ~(1u);
  249. // Special constant used in a MSLResourceBinding binding
  250. // element to indicate the buffer binding for buffer size buffers to support OpArrayLength.
  251. static const uint32_t kBufferSizeBufferBinding = ~(2u);
  252. // Special constant used in a MSLResourceBinding binding
  253. // element to indicate the buffer binding used for the argument buffer itself.
  254. // This buffer binding should be kept as small as possible as all automatic bindings for buffers
  255. // will start at max(kArgumentBufferBinding) + 1.
  256. static const uint32_t kArgumentBufferBinding = ~(3u);
  257. static const uint32_t kMaxArgumentBuffers = 8;
  258. // Decompiles SPIR-V to Metal Shading Language
  259. class CompilerMSL : public CompilerGLSL
  260. {
  261. public:
  262. // Options for compiling to Metal Shading Language
  263. struct Options
  264. {
  265. typedef enum
  266. {
  267. iOS = 0,
  268. macOS = 1
  269. } Platform;
  270. Platform platform = macOS;
  271. uint32_t msl_version = make_msl_version(1, 2);
  272. uint32_t texel_buffer_texture_width = 4096; // Width of 2D Metal textures used as 1D texel buffers
  273. uint32_t r32ui_linear_texture_alignment = 4;
  274. uint32_t r32ui_alignment_constant_id = 65535;
  275. uint32_t swizzle_buffer_index = 30;
  276. uint32_t indirect_params_buffer_index = 29;
  277. uint32_t shader_output_buffer_index = 28;
  278. uint32_t shader_patch_output_buffer_index = 27;
  279. uint32_t shader_tess_factor_buffer_index = 26;
  280. uint32_t buffer_size_buffer_index = 25;
  281. uint32_t view_mask_buffer_index = 24;
  282. uint32_t dynamic_offsets_buffer_index = 23;
  283. uint32_t shader_input_buffer_index = 22;
  284. uint32_t shader_index_buffer_index = 21;
  285. uint32_t shader_patch_input_buffer_index = 20;
  286. uint32_t shader_input_wg_index = 0;
  287. uint32_t device_index = 0;
  288. uint32_t enable_frag_output_mask = 0xffffffff;
  289. // Metal doesn't allow setting a fixed sample mask directly in the pipeline.
  290. // We can evade this restriction by ANDing the internal sample_mask output
  291. // of the shader with the additional fixed sample mask.
  292. uint32_t additional_fixed_sample_mask = 0xffffffff;
  293. bool enable_point_size_builtin = true;
  294. bool enable_point_size_default = false;
  295. float default_point_size = 1.0f;
  296. bool enable_frag_depth_builtin = true;
  297. bool enable_frag_stencil_ref_builtin = true;
  298. bool disable_rasterization = false;
  299. bool capture_output_to_buffer = false;
  300. bool swizzle_texture_samples = false;
  301. bool tess_domain_origin_lower_left = false;
  302. bool multiview = false;
  303. bool multiview_layered_rendering = true;
  304. bool view_index_from_device_index = false;
  305. bool dispatch_base = false;
  306. bool texture_1D_as_2D = false;
  307. // Enable use of Metal argument buffers.
  308. // MSL 2.0 must also be enabled.
  309. bool argument_buffers = false;
  310. // Defines Metal argument buffer tier levels.
  311. // Uses same values as Metal MTLArgumentBuffersTier enumeration.
  312. enum class ArgumentBuffersTier
  313. {
  314. Tier1 = 0,
  315. Tier2 = 1,
  316. };
  317. // When using Metal argument buffers, indicates the Metal argument buffer tier level supported by the Metal platform.
  318. // Ignored when Options::argument_buffers is disabled.
  319. // - Tier1 supports writable images on macOS, but not on iOS.
  320. // - Tier2 supports writable images on macOS and iOS, and higher resource count limits.
  321. // Tier capabilities based on recommendations from Apple engineering.
  322. ArgumentBuffersTier argument_buffers_tier = ArgumentBuffersTier::Tier1;
  323. // Enables specifick argument buffer format with extra information to track SSBO-length
  324. bool runtime_array_rich_descriptor = false;
  325. // Ensures vertex and instance indices start at zero. This reflects the behavior of HLSL with SV_VertexID and SV_InstanceID.
  326. bool enable_base_index_zero = false;
  327. // Fragment output in MSL must have at least as many components as the render pass.
  328. // Add support to explicit pad out components.
  329. bool pad_fragment_output_components = false;
  330. // Specifies whether the iOS target version supports the [[base_vertex]] and [[base_instance]] attributes.
  331. bool ios_support_base_vertex_instance = false;
  332. // Use Metal's native frame-buffer fetch API for subpass inputs.
  333. bool use_framebuffer_fetch_subpasses = false;
  334. // Enables use of "fma" intrinsic for invariant float math
  335. bool invariant_float_math = false;
  336. // Emulate texturecube_array with texture2d_array for iOS where this type is not available
  337. bool emulate_cube_array = false;
  338. // Allow user to enable decoration binding
  339. bool enable_decoration_binding = false;
  340. // Requires MSL 2.1, use the native support for texel buffers.
  341. bool texture_buffer_native = false;
  342. // Forces all resources which are part of an argument buffer to be considered active.
  343. // This ensures ABI compatibility between shaders where some resources might be unused,
  344. // and would otherwise declare a different IAB.
  345. bool force_active_argument_buffer_resources = false;
  346. // Aligns each resource in an argument buffer to its assigned index value, id(N),
  347. // by adding synthetic padding members in the argument buffer struct for any resources
  348. // in the argument buffer that are not defined and used by the shader. This allows
  349. // the shader to index into the correct argument in a descriptor set argument buffer
  350. // that is shared across shaders, where not all resources in the argument buffer are
  351. // defined in each shader. For this to work, an MSLResourceBinding must be provided for
  352. // all descriptors in any descriptor set held in an argument buffer in the shader, and
  353. // that MSLResourceBinding must have the basetype and count members populated correctly.
  354. // The implementation here assumes any inline blocks in the argument buffer is provided
  355. // in a Metal buffer, and doesn't take into consideration inline blocks that are
  356. // optionally embedded directly into the argument buffer via add_inline_uniform_block().
  357. bool pad_argument_buffer_resources = false;
  358. // Forces the use of plain arrays, which works around certain driver bugs on certain versions
  359. // of Intel Macbooks. See https://github.com/KhronosGroup/SPIRV-Cross/issues/1210.
  360. // May reduce performance in scenarios where arrays are copied around as value-types.
  361. bool force_native_arrays = false;
  362. // If a shader writes clip distance, also emit user varyings which
  363. // can be read in subsequent stages.
  364. bool enable_clip_distance_user_varying = true;
  365. // In a tessellation control shader, assume that more than one patch can be processed in a
  366. // single workgroup. This requires changes to the way the InvocationId and PrimitiveId
  367. // builtins are processed, but should result in more efficient usage of the GPU.
  368. bool multi_patch_workgroup = false;
  369. // Use storage buffers instead of vertex-style attributes for tessellation evaluation
  370. // input. This may require conversion of inputs in the generated post-tessellation
  371. // vertex shader, but allows the use of nested arrays.
  372. bool raw_buffer_tese_input = false;
  373. // If set, a vertex shader will be compiled as part of a tessellation pipeline.
  374. // It will be translated as a compute kernel, so it can use the global invocation ID
  375. // to index the output buffer.
  376. bool vertex_for_tessellation = false;
  377. // Assume that SubpassData images have multiple layers. Layered input attachments
  378. // are addressed relative to the Layer output from the vertex pipeline. This option
  379. // has no effect with multiview, since all input attachments are assumed to be layered
  380. // and will be addressed using the current ViewIndex.
  381. bool arrayed_subpass_input = false;
  382. // Whether to use SIMD-group or quadgroup functions to implement group non-uniform
  383. // operations. Some GPUs on iOS do not support the SIMD-group functions, only the
  384. // quadgroup functions.
  385. bool ios_use_simdgroup_functions = false;
  386. // If set, the subgroup size will be assumed to be one, and subgroup-related
  387. // builtins and operations will be emitted accordingly. This mode is intended to
  388. // be used by MoltenVK on hardware/software configurations which do not provide
  389. // sufficient support for subgroups.
  390. bool emulate_subgroups = false;
  391. // If nonzero, a fixed subgroup size to assume. Metal, similarly to VK_EXT_subgroup_size_control,
  392. // allows the SIMD-group size (aka thread execution width) to vary depending on
  393. // register usage and requirements. In certain circumstances--for example, a pipeline
  394. // in MoltenVK without VK_PIPELINE_SHADER_STAGE_CREATE_ALLOW_VARYING_SUBGROUP_SIZE_BIT_EXT--
  395. // this is undesirable. This fixes the value of the SubgroupSize builtin, instead of
  396. // mapping it to the Metal builtin [[thread_execution_width]]. If the thread
  397. // execution width is reduced, the extra invocations will appear to be inactive.
  398. // If zero, the SubgroupSize will be allowed to vary, and the builtin will be mapped
  399. // to the Metal [[thread_execution_width]] builtin.
  400. uint32_t fixed_subgroup_size = 0;
  401. enum class IndexType
  402. {
  403. None = 0,
  404. UInt16 = 1,
  405. UInt32 = 2
  406. };
  407. // The type of index in the index buffer, if present. For a compute shader, Metal
  408. // requires specifying the indexing at pipeline creation, rather than at draw time
  409. // as with graphics pipelines. This means we must create three different pipelines,
  410. // for no indexing, 16-bit indices, and 32-bit indices. Each requires different
  411. // handling for the gl_VertexIndex builtin. We may as well, then, create three
  412. // different shaders for these three scenarios.
  413. IndexType vertex_index_type = IndexType::None;
  414. // If set, a dummy [[sample_id]] input is added to a fragment shader if none is present.
  415. // This will force the shader to run at sample rate, assuming Metal does not optimize
  416. // the extra threads away.
  417. bool force_sample_rate_shading = false;
  418. // If set, gl_HelperInvocation will be set manually whenever a fragment is discarded.
  419. // Some Metal devices have a bug where simd_is_helper_thread() does not return true
  420. // after a fragment has been discarded. This is a workaround that is only expected to be needed
  421. // until the bug is fixed in Metal; it is provided as an option to allow disabling it when that occurs.
  422. bool manual_helper_invocation_updates = true;
  423. // If set, extra checks will be emitted in fragment shaders to prevent writes
  424. // from discarded fragments. Some Metal devices have a bug where writes to storage resources
  425. // from discarded fragment threads continue to occur, despite the fragment being
  426. // discarded. This is a workaround that is only expected to be needed until the
  427. // bug is fixed in Metal; it is provided as an option so it can be enabled
  428. // only when the bug is present.
  429. bool check_discarded_frag_stores = false;
  430. // If set, Lod operands to OpImageSample*DrefExplicitLod for 1D and 2D array images
  431. // will be implemented using a gradient instead of passing the level operand directly.
  432. // Some Metal devices have a bug where the level() argument to depth2d_array<T>::sample_compare()
  433. // in a fragment shader is biased by some unknown amount, possibly dependent on the
  434. // partial derivatives of the texture coordinates. This is a workaround that is only
  435. // expected to be needed until the bug is fixed in Metal; it is provided as an option
  436. // so it can be enabled only when the bug is present.
  437. bool sample_dref_lod_array_as_grad = false;
  438. // MSL doesn't guarantee coherence between writes and subsequent reads of read_write textures.
  439. // This inserts fences before each read of a read_write texture to ensure coherency.
  440. // If you're sure you never rely on this, you can set this to false for a possible performance improvement.
  441. // Note: Only Apple's GPU compiler takes advantage of the lack of coherency, so make sure to test on Apple GPUs if you disable this.
  442. bool readwrite_texture_fences = true;
  443. // Metal 3.1 introduced a Metal regression bug which causes infinite recursion during
  444. // Metal's analysis of an entry point input structure that is itself recursive. Enabling
  445. // this option will replace the recursive input declaration with a alternate variable of
  446. // type void*, and then cast to the correct type at the top of the entry point function.
  447. // The bug has been reported to Apple, and will hopefully be fixed in future releases.
  448. bool replace_recursive_inputs = false;
  449. // If set, manual fixups of gradient vectors for cube texture lookups will be performed.
  450. // All released Apple Silicon GPUs to date behave incorrectly when sampling a cube texture
  451. // with explicit gradients. They will ignore one of the three partial derivatives based
  452. // on the selected major axis, and expect the remaining derivatives to be partially
  453. // transformed.
  454. bool agx_manual_cube_grad_fixup = false;
  455. // Metal will discard fragments with side effects under certain circumstances prematurely.
  456. // Example: CTS test dEQP-VK.fragment_operations.early_fragment.discard_no_early_fragment_tests_depth
  457. // Test will render a full screen quad with varying depth [0,1] for each fragment.
  458. // Each fragment will do an operation with side effects, modify the depth value and
  459. // discard the fragment. The test expects the fragment to be run due to:
  460. // https://registry.khronos.org/vulkan/specs/1.0-extensions/html/vkspec.html#fragops-shader-depthreplacement
  461. // which states that the fragment shader must be run due to replacing the depth in shader.
  462. // However, Metal may prematurely discards fragments without executing them
  463. // (I believe this to be due to a greedy optimization on their end) making the test fail.
  464. // This option enforces fragment execution for such cases where the fragment has operations
  465. // with side effects. Provided as an option hoping Metal will fix this issue in the future.
  466. bool force_fragment_with_side_effects_execution = false;
  467. // If set, adds a depth pass through statement to circumvent the following issue:
  468. // When the same depth/stencil is used as input and depth/stencil attachment, we need to
  469. // force Metal to perform the depth/stencil write after fragment execution. Otherwise,
  470. // Metal will write to the depth attachment before fragment execution. This happens
  471. // if the fragment does not modify the depth value.
  472. bool input_attachment_is_ds_attachment = false;
  473. // If BuiltInPosition is not written, automatically disable rasterization.
  474. // The result can be queried with get_is_rasterization_disabled.
  475. bool auto_disable_rasterization = false;
  476. // Use Fast Math pragmas in MSL code, based on SPIR-V float controls and FP ExecutionModes.
  477. // Requires MSL 3.2 or above, and has no effect with earlier MSL versions.
  478. bool use_fast_math_pragmas = false;
  479. bool is_ios() const
  480. {
  481. return platform == iOS;
  482. }
  483. bool is_macos() const
  484. {
  485. return platform == macOS;
  486. }
  487. bool use_quadgroup_operation() const
  488. {
  489. return is_ios() && !ios_use_simdgroup_functions;
  490. }
  491. void set_msl_version(uint32_t major, uint32_t minor = 0, uint32_t patch = 0)
  492. {
  493. msl_version = make_msl_version(major, minor, patch);
  494. }
  495. bool supports_msl_version(uint32_t major, uint32_t minor = 0, uint32_t patch = 0) const
  496. {
  497. return msl_version >= make_msl_version(major, minor, patch);
  498. }
  499. static uint32_t make_msl_version(uint32_t major, uint32_t minor = 0, uint32_t patch = 0)
  500. {
  501. return (major * 10000) + (minor * 100) + patch;
  502. }
  503. };
  504. const Options &get_msl_options() const
  505. {
  506. return msl_options;
  507. }
  508. void set_msl_options(const Options &opts)
  509. {
  510. msl_options = opts;
  511. }
  512. // Provide feedback to calling API to allow runtime to disable pipeline
  513. // rasterization if vertex shader requires rasterization to be disabled.
  514. bool get_is_rasterization_disabled() const
  515. {
  516. return is_rasterization_disabled && (get_entry_point().model == ExecutionModelVertex ||
  517. get_entry_point().model == ExecutionModelTessellationControl ||
  518. get_entry_point().model == ExecutionModelTessellationEvaluation);
  519. }
  520. // Provide feedback to calling API to allow it to pass an auxiliary
  521. // swizzle buffer if the shader needs it.
  522. bool needs_swizzle_buffer() const
  523. {
  524. return used_swizzle_buffer;
  525. }
  526. // Provide feedback to calling API to allow it to pass a buffer
  527. // containing STORAGE_BUFFER buffer sizes to support OpArrayLength.
  528. bool needs_buffer_size_buffer() const
  529. {
  530. return !buffers_requiring_array_length.empty();
  531. }
  532. bool buffer_requires_array_length(VariableID id) const
  533. {
  534. return buffers_requiring_array_length.count(id) != 0;
  535. }
  536. // Provide feedback to calling API to allow it to pass a buffer
  537. // containing the view mask for the current multiview subpass.
  538. bool needs_view_mask_buffer() const
  539. {
  540. return msl_options.multiview && !msl_options.view_index_from_device_index;
  541. }
  542. // Provide feedback to calling API to allow it to pass a buffer
  543. // containing the dispatch base workgroup ID.
  544. bool needs_dispatch_base_buffer() const
  545. {
  546. return msl_options.dispatch_base && !msl_options.supports_msl_version(1, 2);
  547. }
  548. // Provide feedback to calling API to allow it to pass an output
  549. // buffer if the shader needs it.
  550. bool needs_output_buffer() const
  551. {
  552. return capture_output_to_buffer && stage_out_var_id != ID(0);
  553. }
  554. // Provide feedback to calling API to allow it to pass a patch output
  555. // buffer if the shader needs it.
  556. bool needs_patch_output_buffer() const
  557. {
  558. return capture_output_to_buffer && patch_stage_out_var_id != ID(0);
  559. }
  560. // Provide feedback to calling API to allow it to pass an input threadgroup
  561. // buffer if the shader needs it.
  562. bool needs_input_threadgroup_mem() const
  563. {
  564. return capture_output_to_buffer && stage_in_var_id != ID(0);
  565. }
  566. explicit CompilerMSL(std::vector<uint32_t> spirv);
  567. CompilerMSL(const uint32_t *ir, size_t word_count);
  568. explicit CompilerMSL(const ParsedIR &ir);
  569. explicit CompilerMSL(ParsedIR &&ir);
  570. // input is a shader interface variable description used to fix up shader input variables.
  571. // If shader inputs are provided, is_msl_shader_input_used() will return true after
  572. // calling ::compile() if the location were used by the MSL code.
  573. void add_msl_shader_input(const MSLShaderInterfaceVariable &input);
  574. // output is a shader interface variable description used to fix up shader output variables.
  575. // If shader outputs are provided, is_msl_shader_output_used() will return true after
  576. // calling ::compile() if the location were used by the MSL code.
  577. void add_msl_shader_output(const MSLShaderInterfaceVariable &output);
  578. // resource is a resource binding to indicate the MSL buffer,
  579. // texture or sampler index to use for a particular SPIR-V description set
  580. // and binding. If resource bindings are provided,
  581. // is_msl_resource_binding_used() will return true after calling ::compile() if
  582. // the set/binding combination was used by the MSL code.
  583. void add_msl_resource_binding(const MSLResourceBinding &resource);
  584. // desc_set and binding are the SPIR-V descriptor set and binding of a buffer resource
  585. // in this shader. index is the index within the dynamic offset buffer to use. This
  586. // function marks that resource as using a dynamic offset (VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC
  587. // or VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC). This function only has any effect if argument buffers
  588. // are enabled. If so, the buffer will have its address adjusted at the beginning of the shader with
  589. // an offset taken from the dynamic offset buffer.
  590. void add_dynamic_buffer(uint32_t desc_set, uint32_t binding, uint32_t index);
  591. // desc_set and binding are the SPIR-V descriptor set and binding of a buffer resource
  592. // in this shader. This function marks that resource as an inline uniform block
  593. // (VK_DESCRIPTOR_TYPE_INLINE_UNIFORM_BLOCK_EXT). This function only has any effect if argument buffers
  594. // are enabled. If so, the buffer block will be directly embedded into the argument
  595. // buffer, instead of being referenced indirectly via pointer.
  596. void add_inline_uniform_block(uint32_t desc_set, uint32_t binding);
  597. // When using MSL argument buffers, we can force "classic" MSL 1.0 binding schemes for certain descriptor sets.
  598. // This corresponds to VK_KHR_push_descriptor in Vulkan.
  599. void add_discrete_descriptor_set(uint32_t desc_set);
  600. // If an argument buffer is large enough, it may need to be in the device storage space rather than
  601. // constant. Opt-in to this behavior here on a per set basis.
  602. void set_argument_buffer_device_address_space(uint32_t desc_set, bool device_storage);
  603. // Query after compilation is done. This allows you to check if an input location was used by the shader.
  604. bool is_msl_shader_input_used(uint32_t location);
  605. // Query after compilation is done. This allows you to check if an output location were used by the shader.
  606. bool is_msl_shader_output_used(uint32_t location);
  607. // If not using add_msl_shader_input, it's possible
  608. // that certain builtin attributes need to be automatically assigned locations.
  609. // This is typical for tessellation builtin inputs such as tess levels, gl_Position, etc.
  610. // This returns k_unknown_location if the location was explicitly assigned with
  611. // add_msl_shader_input or the builtin is not used, otherwise returns N in [[attribute(N)]].
  612. uint32_t get_automatic_builtin_input_location(BuiltIn builtin) const;
  613. // If not using add_msl_shader_output, it's possible
  614. // that certain builtin attributes need to be automatically assigned locations.
  615. // This is typical for tessellation builtin outputs such as tess levels, gl_Position, etc.
  616. // This returns k_unknown_location if the location were explicitly assigned with
  617. // add_msl_shader_output or the builtin were not used, otherwise returns N in [[attribute(N)]].
  618. uint32_t get_automatic_builtin_output_location(BuiltIn builtin) const;
  619. // NOTE: Only resources which are remapped using add_msl_resource_binding will be reported here.
  620. // Constexpr samplers are always assumed to be emitted.
  621. // No specific MSLResourceBinding remapping is required for constexpr samplers as long as they are remapped
  622. // by remap_constexpr_sampler(_by_binding).
  623. bool is_msl_resource_binding_used(ExecutionModel model, uint32_t set, uint32_t binding) const;
  624. // This must only be called after a successful call to CompilerMSL::compile().
  625. // For a variable resource ID obtained through reflection API, report the automatically assigned resource index.
  626. // If the descriptor set was part of an argument buffer, report the [[id(N)]],
  627. // or [[buffer/texture/sampler]] binding for other resources.
  628. // If the resource was a combined image sampler, report the image binding here,
  629. // use the _secondary version of this call to query the sampler half of the resource.
  630. // If no binding exists, uint32_t(-1) is returned.
  631. uint32_t get_automatic_msl_resource_binding(uint32_t id) const;
  632. // Same as get_automatic_msl_resource_binding, but should only be used for combined image samplers, in which case the
  633. // sampler's binding is returned instead. For any other resource type, -1 is returned.
  634. // Secondary bindings are also used for the auxillary image atomic buffer.
  635. uint32_t get_automatic_msl_resource_binding_secondary(uint32_t id) const;
  636. // Same as get_automatic_msl_resource_binding, but should only be used for combined image samplers for multiplanar images,
  637. // in which case the second plane's binding is returned instead. For any other resource type, -1 is returned.
  638. uint32_t get_automatic_msl_resource_binding_tertiary(uint32_t id) const;
  639. // Same as get_automatic_msl_resource_binding, but should only be used for combined image samplers for triplanar images,
  640. // in which case the third plane's binding is returned instead. For any other resource type, -1 is returned.
  641. uint32_t get_automatic_msl_resource_binding_quaternary(uint32_t id) const;
  642. // Compiles the SPIR-V code into Metal Shading Language.
  643. std::string compile() override;
  644. // Remap a sampler with ID to a constexpr sampler.
  645. // Older iOS targets must use constexpr samplers in certain cases (PCF),
  646. // so a static sampler must be used.
  647. // The sampler will not consume a binding, but be declared in the entry point as a constexpr sampler.
  648. // This can be used on both combined image/samplers (sampler2D) or standalone samplers.
  649. // The remapped sampler must not be an array of samplers.
  650. // Prefer remap_constexpr_sampler_by_binding unless you're also doing reflection anyways.
  651. void remap_constexpr_sampler(VariableID id, const MSLConstexprSampler &sampler);
  652. // Same as remap_constexpr_sampler, except you provide set/binding, rather than variable ID.
  653. // Remaps based on ID take priority over set/binding remaps.
  654. void remap_constexpr_sampler_by_binding(uint32_t desc_set, uint32_t binding, const MSLConstexprSampler &sampler);
  655. // If using CompilerMSL::Options::pad_fragment_output_components, override the number of components we expect
  656. // to use for a particular location. The default is 4 if number of components is not overridden.
  657. void set_fragment_output_components(uint32_t location, uint32_t components);
  658. void set_combined_sampler_suffix(const char *suffix);
  659. const char *get_combined_sampler_suffix() const;
  660. // Information about specialization constants that are translated into MSL macros
  661. // instead of using function constant
  662. // These must only be called after a successful call to CompilerMSL::compile().
  663. bool specialization_constant_is_macro(uint32_t constant_id) const;
  664. // Returns a mask of SPIR-V FP Fast Math Mode flags, that represents the set of flags that can be applied
  665. // across all floating-point types. Each FPFastMathDefault execution mode operation identifies the flags
  666. // for one floating-point type, and the value returned here is a bitwise-AND combination across all types.
  667. // If incl_ops is enabled, the FPFastMathMode of any SPIR-V operations are also included in the bitwise-AND
  668. // to determine the minimal fast-math that applies to all default execution modes and all operations.
  669. // The returned value is also affected by execution modes SignedZeroInfNanPreserve and ContractionOff.
  670. uint32_t get_fp_fast_math_flags(bool incl_ops) const;
  671. protected:
  672. // An enum of SPIR-V functions that are implemented in additional
  673. // source code that is added to the shader if necessary.
  674. enum SPVFuncImpl : uint8_t
  675. {
  676. SPVFuncImplNone,
  677. SPVFuncImplMod,
  678. SPVFuncImplSMod,
  679. SPVFuncImplRadians,
  680. SPVFuncImplDegrees,
  681. SPVFuncImplFindILsb,
  682. SPVFuncImplFindSMsb,
  683. SPVFuncImplFindUMsb,
  684. SPVFuncImplSSign,
  685. SPVFuncImplArrayCopy,
  686. SPVFuncImplArrayCopyMultidim,
  687. SPVFuncImplTexelBufferCoords,
  688. SPVFuncImplImage2DAtomicCoords, // Emulate texture2D atomic operations
  689. SPVFuncImplGradientCube,
  690. SPVFuncImplFMul,
  691. SPVFuncImplFAdd,
  692. SPVFuncImplFSub,
  693. SPVFuncImplQuantizeToF16,
  694. SPVFuncImplCubemapTo2DArrayFace,
  695. SPVFuncImplUnsafeArray, // Allow Metal to use the array<T> template to make arrays a value type
  696. SPVFuncImplStorageMatrix, // Allow threadgroup construction of matrices
  697. SPVFuncImplInverse4x4,
  698. SPVFuncImplInverse3x3,
  699. SPVFuncImplInverse2x2,
  700. // It is very important that this come before *Swizzle, to ensure it's emitted before them.
  701. SPVFuncImplGetSwizzle,
  702. SPVFuncImplTextureSwizzle,
  703. SPVFuncImplGatherReturn,
  704. SPVFuncImplGatherCompareReturn,
  705. SPVFuncImplGatherSwizzle,
  706. SPVFuncImplGatherCompareSwizzle,
  707. SPVFuncImplGatherConstOffsets,
  708. SPVFuncImplGatherCompareConstOffsets,
  709. SPVFuncImplSubgroupBroadcast,
  710. SPVFuncImplSubgroupBroadcastFirst,
  711. SPVFuncImplSubgroupBallot,
  712. SPVFuncImplSubgroupBallotBitExtract,
  713. SPVFuncImplSubgroupBallotFindLSB,
  714. SPVFuncImplSubgroupBallotFindMSB,
  715. SPVFuncImplSubgroupBallotBitCount,
  716. SPVFuncImplSubgroupAllEqual,
  717. SPVFuncImplSubgroupShuffle,
  718. SPVFuncImplSubgroupShuffleXor,
  719. SPVFuncImplSubgroupShuffleUp,
  720. SPVFuncImplSubgroupShuffleDown,
  721. SPVFuncImplSubgroupRotate,
  722. SPVFuncImplSubgroupClusteredAdd,
  723. SPVFuncImplSubgroupClusteredFAdd = SPVFuncImplSubgroupClusteredAdd,
  724. SPVFuncImplSubgroupClusteredIAdd = SPVFuncImplSubgroupClusteredAdd,
  725. SPVFuncImplSubgroupClusteredMul,
  726. SPVFuncImplSubgroupClusteredFMul = SPVFuncImplSubgroupClusteredMul,
  727. SPVFuncImplSubgroupClusteredIMul = SPVFuncImplSubgroupClusteredMul,
  728. SPVFuncImplSubgroupClusteredMin,
  729. SPVFuncImplSubgroupClusteredFMin = SPVFuncImplSubgroupClusteredMin,
  730. SPVFuncImplSubgroupClusteredSMin = SPVFuncImplSubgroupClusteredMin,
  731. SPVFuncImplSubgroupClusteredUMin = SPVFuncImplSubgroupClusteredMin,
  732. SPVFuncImplSubgroupClusteredMax,
  733. SPVFuncImplSubgroupClusteredFMax = SPVFuncImplSubgroupClusteredMax,
  734. SPVFuncImplSubgroupClusteredSMax = SPVFuncImplSubgroupClusteredMax,
  735. SPVFuncImplSubgroupClusteredUMax = SPVFuncImplSubgroupClusteredMax,
  736. SPVFuncImplSubgroupClusteredAnd,
  737. SPVFuncImplSubgroupClusteredBitwiseAnd = SPVFuncImplSubgroupClusteredAnd,
  738. SPVFuncImplSubgroupClusteredLogicalAnd = SPVFuncImplSubgroupClusteredAnd,
  739. SPVFuncImplSubgroupClusteredOr,
  740. SPVFuncImplSubgroupClusteredBitwiseOr = SPVFuncImplSubgroupClusteredOr,
  741. SPVFuncImplSubgroupClusteredLogicalOr = SPVFuncImplSubgroupClusteredOr,
  742. SPVFuncImplSubgroupClusteredXor,
  743. SPVFuncImplSubgroupClusteredBitwiseXor = SPVFuncImplSubgroupClusteredXor,
  744. SPVFuncImplSubgroupClusteredLogicalXor = SPVFuncImplSubgroupClusteredXor,
  745. SPVFuncImplQuadBroadcast,
  746. SPVFuncImplQuadSwap,
  747. SPVFuncImplReflectScalar,
  748. SPVFuncImplRefractScalar,
  749. SPVFuncImplFaceForwardScalar,
  750. SPVFuncImplChromaReconstructNearest2Plane,
  751. SPVFuncImplChromaReconstructNearest3Plane,
  752. SPVFuncImplChromaReconstructLinear422CositedEven2Plane,
  753. SPVFuncImplChromaReconstructLinear422CositedEven3Plane,
  754. SPVFuncImplChromaReconstructLinear422Midpoint2Plane,
  755. SPVFuncImplChromaReconstructLinear422Midpoint3Plane,
  756. SPVFuncImplChromaReconstructLinear420XCositedEvenYCositedEven2Plane,
  757. SPVFuncImplChromaReconstructLinear420XCositedEvenYCositedEven3Plane,
  758. SPVFuncImplChromaReconstructLinear420XMidpointYCositedEven2Plane,
  759. SPVFuncImplChromaReconstructLinear420XMidpointYCositedEven3Plane,
  760. SPVFuncImplChromaReconstructLinear420XCositedEvenYMidpoint2Plane,
  761. SPVFuncImplChromaReconstructLinear420XCositedEvenYMidpoint3Plane,
  762. SPVFuncImplChromaReconstructLinear420XMidpointYMidpoint2Plane,
  763. SPVFuncImplChromaReconstructLinear420XMidpointYMidpoint3Plane,
  764. SPVFuncImplExpandITUFullRange,
  765. SPVFuncImplExpandITUNarrowRange,
  766. SPVFuncImplConvertYCbCrBT709,
  767. SPVFuncImplConvertYCbCrBT601,
  768. SPVFuncImplConvertYCbCrBT2020,
  769. SPVFuncImplDynamicImageSampler,
  770. SPVFuncImplRayQueryIntersectionParams,
  771. SPVFuncImplVariableDescriptor,
  772. SPVFuncImplVariableSizedDescriptor,
  773. SPVFuncImplVariableDescriptorArray,
  774. SPVFuncImplPaddedStd140,
  775. SPVFuncImplReduceAdd,
  776. SPVFuncImplImageFence,
  777. SPVFuncImplTextureCast,
  778. SPVFuncImplMulExtended,
  779. SPVFuncImplSetMeshOutputsEXT,
  780. SPVFuncImplAssume,
  781. };
  782. // If the underlying resource has been used for comparison then duplicate loads of that resource must be too
  783. // Use Metal's native frame-buffer fetch API for subpass inputs.
  784. void emit_texture_op(const Instruction &i, bool sparse) override;
  785. void emit_binary_ptr_op(uint32_t result_type, uint32_t result_id, uint32_t op0, uint32_t op1, const char *op);
  786. std::string to_ptr_expression(uint32_t id, bool register_expression_read = true);
  787. void emit_binary_unord_op(uint32_t result_type, uint32_t result_id, uint32_t op0, uint32_t op1, const char *op);
  788. void emit_instruction(const Instruction &instr) override;
  789. void emit_glsl_op(uint32_t result_type, uint32_t result_id, uint32_t op, const uint32_t *args,
  790. uint32_t count) override;
  791. void emit_spv_amd_shader_trinary_minmax_op(uint32_t result_type, uint32_t result_id, uint32_t op,
  792. const uint32_t *args, uint32_t count) override;
  793. void emit_header() override;
  794. void emit_function_prototype(SPIRFunction &func, const Bitset &return_flags) override;
  795. void emit_sampled_image_op(uint32_t result_type, uint32_t result_id, uint32_t image_id, uint32_t samp_id) override;
  796. void emit_subgroup_op(const Instruction &i) override;
  797. void emit_subgroup_cluster_op(uint32_t result_type, uint32_t result_id, uint32_t cluster_size, uint32_t op0,
  798. const char *op);
  799. void emit_subgroup_cluster_op_cast(uint32_t result_type, uint32_t result_id, uint32_t cluster_size, uint32_t op0,
  800. const char *op, SPIRType::BaseType input_type,
  801. SPIRType::BaseType expected_result_type);
  802. std::string to_texture_op(const Instruction &i, bool sparse, bool *forward,
  803. SmallVector<uint32_t> &inherited_expressions) override;
  804. void emit_fixup() override;
  805. std::string to_struct_member(const SPIRType &type, uint32_t member_type_id, uint32_t index,
  806. const std::string &qualifier = "");
  807. void emit_struct_member(const SPIRType &type, uint32_t member_type_id, uint32_t index,
  808. const std::string &qualifier = "", uint32_t base_offset = 0) override;
  809. void emit_struct_padding_target(const SPIRType &type) override;
  810. std::string type_to_glsl(const SPIRType &type, uint32_t id, bool member);
  811. std::string type_to_glsl(const SPIRType &type, uint32_t id = 0) override;
  812. void emit_block_hints(const SPIRBlock &block) override;
  813. void emit_mesh_entry_point();
  814. void emit_mesh_outputs();
  815. void emit_mesh_tasks(SPIRBlock &block) override;
  816. void emit_workgroup_initialization(const SPIRVariable &var) override;
  817. // Allow Metal to use the array<T> template to make arrays a value type
  818. std::string type_to_array_glsl(const SPIRType &type, uint32_t variable_id) override;
  819. std::string constant_op_expression(const SPIRConstantOp &cop) override;
  820. bool variable_decl_is_remapped_storage(const SPIRVariable &variable, StorageClass storage) const override;
  821. // GCC workaround of lambdas calling protected functions (for older GCC versions)
  822. std::string variable_decl(const SPIRType &type, const std::string &name, uint32_t id = 0) override;
  823. std::string image_type_glsl(const SPIRType &type, uint32_t id, bool member) override;
  824. std::string sampler_type(const SPIRType &type, uint32_t id, bool member);
  825. std::string builtin_to_glsl(BuiltIn builtin, StorageClass storage) override;
  826. std::string to_func_call_arg(const SPIRFunction::Parameter &arg, uint32_t id) override;
  827. std::string to_name(uint32_t id, bool allow_alias = true) const override;
  828. std::string to_function_name(const TextureFunctionNameArguments &args) override;
  829. std::string to_function_args(const TextureFunctionArguments &args, bool *p_forward) override;
  830. std::string to_initializer_expression(const SPIRVariable &var) override;
  831. std::string to_zero_initialized_expression(uint32_t type_id) override;
  832. std::string unpack_expression_type(std::string expr_str, const SPIRType &type, uint32_t physical_type_id,
  833. bool is_packed, bool row_major) override;
  834. // Returns true for BuiltInSampleMask because gl_SampleMask[] is an array in SPIR-V, but [[sample_mask]] is a scalar in Metal.
  835. bool builtin_translates_to_nonarray(BuiltIn builtin) const override;
  836. std::string bitcast_glsl_op(const SPIRType &result_type, const SPIRType &argument_type) override;
  837. bool emit_complex_bitcast(uint32_t result_id, uint32_t id, uint32_t op0) override;
  838. bool skip_argument(uint32_t id) const override;
  839. std::string to_member_reference(uint32_t base, const SPIRType &type, uint32_t index, bool ptr_chain_is_resolved) override;
  840. std::string to_qualifiers_glsl(uint32_t id) override;
  841. void replace_illegal_names() override;
  842. void declare_constant_arrays();
  843. void replace_illegal_entry_point_names();
  844. void sync_entry_point_aliases_and_names();
  845. static const std::unordered_set<std::string> &get_reserved_keyword_set();
  846. static const std::unordered_set<std::string> &get_illegal_func_names();
  847. // Constant arrays of non-primitive types (i.e. matrices) won't link properly into Metal libraries
  848. void declare_complex_constant_arrays();
  849. bool is_patch_block(const SPIRType &type);
  850. bool is_non_native_row_major_matrix(uint32_t id) override;
  851. bool member_is_non_native_row_major_matrix(const SPIRType &type, uint32_t index) override;
  852. std::string convert_row_major_matrix(std::string exp_str, const SPIRType &exp_type, uint32_t physical_type_id,
  853. bool is_packed, bool relaxed) override;
  854. bool is_tesc_shader() const;
  855. bool is_tese_shader() const;
  856. bool is_mesh_shader() const;
  857. void preprocess_op_codes();
  858. void localize_global_variables();
  859. void extract_global_variables_from_functions();
  860. void mark_packable_structs();
  861. void mark_as_packable(SPIRType &type);
  862. void mark_as_workgroup_struct(SPIRType &type);
  863. std::unordered_map<uint32_t, std::set<uint32_t>> function_global_vars;
  864. void extract_global_variables_from_function(uint32_t func_id, std::set<uint32_t> &added_arg_ids,
  865. std::unordered_set<uint32_t> &global_var_ids,
  866. std::unordered_set<uint32_t> &processed_func_ids);
  867. uint32_t add_interface_block(StorageClass storage, bool patch = false);
  868. uint32_t add_interface_block_pointer(uint32_t ib_var_id, StorageClass storage);
  869. uint32_t add_meshlet_block(bool per_primitive);
  870. struct InterfaceBlockMeta
  871. {
  872. struct LocationMeta
  873. {
  874. uint32_t base_type_id = 0;
  875. uint32_t num_components = 0;
  876. bool flat = false;
  877. bool noperspective = false;
  878. bool centroid = false;
  879. bool sample = false;
  880. };
  881. std::unordered_map<uint32_t, LocationMeta> location_meta;
  882. bool strip_array = false;
  883. bool allow_local_declaration = false;
  884. };
  885. std::string to_tesc_invocation_id();
  886. void emit_local_masked_variable(const SPIRVariable &masked_var, bool strip_array);
  887. void add_variable_to_interface_block(StorageClass storage, const std::string &ib_var_ref, SPIRType &ib_type,
  888. SPIRVariable &var, InterfaceBlockMeta &meta);
  889. void add_composite_variable_to_interface_block(StorageClass storage, const std::string &ib_var_ref,
  890. SPIRType &ib_type, SPIRVariable &var, InterfaceBlockMeta &meta);
  891. void add_plain_variable_to_interface_block(StorageClass storage, const std::string &ib_var_ref,
  892. SPIRType &ib_type, SPIRVariable &var, InterfaceBlockMeta &meta);
  893. bool add_component_variable_to_interface_block(StorageClass storage, const std::string &ib_var_ref,
  894. SPIRVariable &var, const SPIRType &type,
  895. InterfaceBlockMeta &meta);
  896. void add_plain_member_variable_to_interface_block(StorageClass storage,
  897. const std::string &ib_var_ref, SPIRType &ib_type,
  898. SPIRVariable &var, SPIRType &var_type,
  899. uint32_t mbr_idx, InterfaceBlockMeta &meta,
  900. const std::string &mbr_name_qual,
  901. const std::string &var_chain_qual,
  902. uint32_t &location, uint32_t &var_mbr_idx);
  903. void add_composite_member_variable_to_interface_block(StorageClass storage,
  904. const std::string &ib_var_ref, SPIRType &ib_type,
  905. SPIRVariable &var, SPIRType &var_type,
  906. uint32_t mbr_idx, InterfaceBlockMeta &meta,
  907. const std::string &mbr_name_qual,
  908. const std::string &var_chain_qual,
  909. uint32_t &location, uint32_t &var_mbr_idx,
  910. const Bitset &interpolation_qual);
  911. void add_tess_level_input_to_interface_block(const std::string &ib_var_ref, SPIRType &ib_type, SPIRVariable &var);
  912. void add_tess_level_input(const std::string &base_ref, const std::string &mbr_name, SPIRVariable &var);
  913. void ensure_struct_members_valid_vecsizes(SPIRType &struct_type, uint32_t &location);
  914. void fix_up_interface_member_indices(StorageClass storage, uint32_t ib_type_id);
  915. void mark_location_as_used_by_shader(uint32_t location, const SPIRType &type,
  916. StorageClass storage, bool fallback = false);
  917. uint32_t ensure_correct_builtin_type(uint32_t type_id, BuiltIn builtin);
  918. uint32_t ensure_correct_input_type(uint32_t type_id, uint32_t location, uint32_t component,
  919. uint32_t num_components, bool strip_array);
  920. void emit_custom_templates();
  921. void emit_custom_functions();
  922. void emit_resources();
  923. void emit_specialization_constants_and_structs();
  924. void emit_interface_block(uint32_t ib_var_id);
  925. bool maybe_emit_array_assignment(uint32_t id_lhs, uint32_t id_rhs);
  926. bool is_var_runtime_size_array(const SPIRVariable &var) const;
  927. uint32_t get_resource_array_size(const SPIRType &type, uint32_t id) const;
  928. void fix_up_shader_inputs_outputs();
  929. bool entry_point_is_vertex() const;
  930. bool entry_point_returns_stage_output() const;
  931. bool entry_point_requires_const_device_buffers() const;
  932. std::string func_type_decl(SPIRType &type);
  933. std::string entry_point_args_classic(bool append_comma);
  934. std::string entry_point_args_argument_buffer(bool append_comma);
  935. std::string entry_point_arg_stage_in();
  936. void entry_point_args_builtin(std::string &args);
  937. void entry_point_args_discrete_descriptors(std::string &args);
  938. std::string append_member_name(const std::string &qualifier, const SPIRType &type, uint32_t index);
  939. std::string ensure_valid_name(std::string name, std::string pfx);
  940. std::string to_sampler_expression(uint32_t id);
  941. std::string to_swizzle_expression(uint32_t id);
  942. std::string to_buffer_size_expression(uint32_t id);
  943. bool is_sample_rate() const;
  944. bool is_intersection_query() const;
  945. bool is_direct_input_builtin(BuiltIn builtin);
  946. std::string builtin_qualifier(BuiltIn builtin);
  947. std::string builtin_type_decl(BuiltIn builtin, uint32_t id = 0);
  948. std::string built_in_func_arg(BuiltIn builtin, bool prefix_comma);
  949. std::string member_attribute_qualifier(const SPIRType &type, uint32_t index);
  950. std::string member_location_attribute_qualifier(const SPIRType &type, uint32_t index);
  951. std::string argument_decl(const SPIRFunction::Parameter &arg);
  952. const char *descriptor_address_space(uint32_t id, StorageClass storage, const char *plain_address_space) const;
  953. std::string round_fp_tex_coords(std::string tex_coords, bool coord_is_fp);
  954. uint32_t get_metal_resource_index(SPIRVariable &var, SPIRType::BaseType basetype, uint32_t plane = 0);
  955. uint32_t get_member_location(uint32_t type_id, uint32_t index, uint32_t *comp = nullptr) const;
  956. uint32_t get_or_allocate_builtin_input_member_location(BuiltIn builtin,
  957. uint32_t type_id, uint32_t index, uint32_t *comp = nullptr);
  958. uint32_t get_or_allocate_builtin_output_member_location(BuiltIn builtin,
  959. uint32_t type_id, uint32_t index, uint32_t *comp = nullptr);
  960. uint32_t get_physical_tess_level_array_size(BuiltIn builtin) const;
  961. uint32_t get_physical_type_stride(const SPIRType &type) const override;
  962. // MSL packing rules. These compute the effective packing rules as observed by the MSL compiler in the MSL output.
  963. // These values can change depending on various extended decorations which control packing rules.
  964. // We need to make these rules match up with SPIR-V declared rules.
  965. uint32_t get_declared_type_size_msl(const SPIRType &type, bool packed, bool row_major) const;
  966. uint32_t get_declared_type_array_stride_msl(const SPIRType &type, bool packed, bool row_major) const;
  967. uint32_t get_declared_type_matrix_stride_msl(const SPIRType &type, bool packed, bool row_major) const;
  968. uint32_t get_declared_type_alignment_msl(const SPIRType &type, bool packed, bool row_major) const;
  969. uint32_t get_declared_struct_member_size_msl(const SPIRType &struct_type, uint32_t index) const;
  970. uint32_t get_declared_struct_member_array_stride_msl(const SPIRType &struct_type, uint32_t index) const;
  971. uint32_t get_declared_struct_member_matrix_stride_msl(const SPIRType &struct_type, uint32_t index) const;
  972. uint32_t get_declared_struct_member_alignment_msl(const SPIRType &struct_type, uint32_t index) const;
  973. uint32_t get_declared_input_size_msl(const SPIRType &struct_type, uint32_t index) const;
  974. uint32_t get_declared_input_array_stride_msl(const SPIRType &struct_type, uint32_t index) const;
  975. uint32_t get_declared_input_matrix_stride_msl(const SPIRType &struct_type, uint32_t index) const;
  976. uint32_t get_declared_input_alignment_msl(const SPIRType &struct_type, uint32_t index) const;
  977. const SPIRType &get_physical_member_type(const SPIRType &struct_type, uint32_t index) const;
  978. SPIRType get_presumed_input_type(const SPIRType &struct_type, uint32_t index) const;
  979. uint32_t get_declared_struct_size_msl(const SPIRType &struct_type, bool ignore_alignment = false,
  980. bool ignore_padding = false) const;
  981. std::string to_component_argument(uint32_t id);
  982. void align_struct(SPIRType &ib_type, std::unordered_set<uint32_t> &aligned_structs);
  983. void mark_scalar_layout_structs(const SPIRType &ib_type);
  984. void mark_struct_members_packed(const SPIRType &type);
  985. void ensure_member_packing_rules_msl(SPIRType &ib_type, uint32_t index);
  986. bool validate_member_packing_rules_msl(const SPIRType &type, uint32_t index) const;
  987. std::string get_variable_address_space(const SPIRVariable &argument);
  988. // Special case of get_variable_address_space which is only used for leaf functions.
  989. std::string get_leaf_argument_address_space(const SPIRVariable &argument);
  990. std::string get_type_address_space(const SPIRType &type, uint32_t id, bool argument = false);
  991. bool decoration_flags_signal_volatile(const Bitset &flags) const;
  992. bool decoration_flags_signal_coherent(const Bitset &flags) const;
  993. const char *to_restrict(uint32_t id, bool space);
  994. SPIRType &get_stage_in_struct_type();
  995. SPIRType &get_stage_out_struct_type();
  996. SPIRType &get_patch_stage_in_struct_type();
  997. SPIRType &get_patch_stage_out_struct_type();
  998. std::string get_tess_factor_struct_name();
  999. SPIRType &get_uint_type();
  1000. uint32_t get_uint_type_id();
  1001. void emit_atomic_func_op(uint32_t result_type, uint32_t result_id, const char *op, Op opcode,
  1002. uint32_t mem_order_1, uint32_t mem_order_2, bool has_mem_order_2, uint32_t op0, uint32_t op1 = 0,
  1003. bool op1_is_pointer = false, bool op1_is_literal = false, uint32_t op2 = 0);
  1004. const char *get_memory_order(uint32_t spv_mem_sem);
  1005. void add_pragma_line(const std::string &line, bool recompile_on_unique);
  1006. void add_typedef_line(const std::string &line);
  1007. void emit_barrier(uint32_t id_exe_scope, uint32_t id_mem_scope, uint32_t id_mem_sem);
  1008. bool emit_array_copy(const char *expr, uint32_t lhs_id, uint32_t rhs_id,
  1009. StorageClass lhs_storage, StorageClass rhs_storage) override;
  1010. void build_implicit_builtins();
  1011. uint32_t build_constant_uint_array_pointer();
  1012. void emit_entry_point_declarations() override;
  1013. bool uses_explicit_early_fragment_test();
  1014. uint32_t builtin_frag_coord_id = 0;
  1015. uint32_t builtin_sample_id_id = 0;
  1016. uint32_t builtin_sample_mask_id = 0;
  1017. uint32_t builtin_helper_invocation_id = 0;
  1018. uint32_t builtin_vertex_idx_id = 0;
  1019. uint32_t builtin_base_vertex_id = 0;
  1020. uint32_t builtin_instance_idx_id = 0;
  1021. uint32_t builtin_base_instance_id = 0;
  1022. uint32_t builtin_view_idx_id = 0;
  1023. uint32_t builtin_layer_id = 0;
  1024. uint32_t builtin_invocation_id_id = 0;
  1025. uint32_t builtin_primitive_id_id = 0;
  1026. uint32_t builtin_subgroup_invocation_id_id = 0;
  1027. uint32_t builtin_subgroup_size_id = 0;
  1028. uint32_t builtin_dispatch_base_id = 0;
  1029. uint32_t builtin_stage_input_size_id = 0;
  1030. uint32_t builtin_local_invocation_index_id = 0;
  1031. uint32_t builtin_workgroup_size_id = 0;
  1032. uint32_t builtin_mesh_primitive_indices_id = 0;
  1033. uint32_t builtin_mesh_sizes_id = 0;
  1034. uint32_t builtin_task_grid_id = 0;
  1035. uint32_t builtin_frag_depth_id = 0;
  1036. uint32_t swizzle_buffer_id = 0;
  1037. uint32_t buffer_size_buffer_id = 0;
  1038. uint32_t view_mask_buffer_id = 0;
  1039. uint32_t dynamic_offsets_buffer_id = 0;
  1040. uint32_t uint_type_id = 0;
  1041. uint32_t shared_uint_type_id = 0;
  1042. uint32_t meshlet_type_id = 0;
  1043. uint32_t argument_buffer_padding_buffer_type_id = 0;
  1044. uint32_t argument_buffer_padding_image_type_id = 0;
  1045. uint32_t argument_buffer_padding_sampler_type_id = 0;
  1046. bool does_shader_write_sample_mask = false;
  1047. bool frag_shader_needs_discard_checks = false;
  1048. void cast_to_variable_store(uint32_t target_id, std::string &expr, const SPIRType &expr_type) override;
  1049. void cast_from_variable_load(uint32_t source_id, std::string &expr, const SPIRType &expr_type) override;
  1050. void emit_store_statement(uint32_t lhs_expression, uint32_t rhs_expression) override;
  1051. void analyze_sampled_image_usage();
  1052. void analyze_workgroup_variables();
  1053. bool access_chain_needs_stage_io_builtin_translation(uint32_t base) override;
  1054. bool prepare_access_chain_for_scalar_access(std::string &expr, const SPIRType &type, StorageClass storage,
  1055. bool &is_packed) override;
  1056. void fix_up_interpolant_access_chain(const uint32_t *ops, uint32_t length);
  1057. bool check_physical_type_cast(std::string &expr, const SPIRType *type, uint32_t physical_type) override;
  1058. bool emit_tessellation_access_chain(const uint32_t *ops, uint32_t length);
  1059. bool emit_tessellation_io_load(uint32_t result_type, uint32_t id, uint32_t ptr);
  1060. bool is_out_of_bounds_tessellation_level(uint32_t id_lhs);
  1061. void ensure_builtin(StorageClass storage, BuiltIn builtin);
  1062. void mark_implicit_builtin(StorageClass storage, BuiltIn builtin, uint32_t id);
  1063. std::string convert_to_f32(const std::string &expr, uint32_t components);
  1064. Options msl_options;
  1065. std::set<SPVFuncImpl> spv_function_implementations;
  1066. // Must be ordered to ensure declarations are in a specific order.
  1067. std::map<LocationComponentPair, MSLShaderInterfaceVariable> inputs_by_location;
  1068. std::unordered_map<uint32_t, MSLShaderInterfaceVariable> inputs_by_builtin;
  1069. std::map<LocationComponentPair, MSLShaderInterfaceVariable> outputs_by_location;
  1070. std::unordered_map<uint32_t, MSLShaderInterfaceVariable> outputs_by_builtin;
  1071. std::unordered_set<uint32_t> location_inputs_in_use;
  1072. std::unordered_set<uint32_t> location_inputs_in_use_fallback;
  1073. std::unordered_set<uint32_t> location_outputs_in_use;
  1074. std::unordered_set<uint32_t> location_outputs_in_use_fallback;
  1075. std::unordered_map<uint32_t, uint32_t> fragment_output_components;
  1076. std::unordered_map<uint32_t, uint32_t> builtin_to_automatic_input_location;
  1077. std::unordered_map<uint32_t, uint32_t> builtin_to_automatic_output_location;
  1078. std::vector<std::string> pragma_lines;
  1079. std::vector<std::string> typedef_lines;
  1080. SmallVector<uint32_t> vars_needing_early_declaration;
  1081. std::unordered_set<uint32_t> constant_macro_ids;
  1082. std::unordered_map<StageSetBinding, std::pair<MSLResourceBinding, bool>, InternalHasher> resource_bindings;
  1083. std::unordered_map<StageSetBinding, uint32_t, InternalHasher> resource_arg_buff_idx_to_binding_number;
  1084. uint32_t next_metal_resource_index_buffer = 0;
  1085. uint32_t next_metal_resource_index_texture = 0;
  1086. uint32_t next_metal_resource_index_sampler = 0;
  1087. // Intentionally uninitialized, works around MSVC 2013 bug.
  1088. uint32_t next_metal_resource_ids[kMaxArgumentBuffers];
  1089. VariableID stage_in_var_id = 0;
  1090. VariableID stage_out_var_id = 0;
  1091. VariableID patch_stage_in_var_id = 0;
  1092. VariableID patch_stage_out_var_id = 0;
  1093. VariableID stage_in_ptr_var_id = 0;
  1094. VariableID stage_out_ptr_var_id = 0;
  1095. VariableID tess_level_inner_var_id = 0;
  1096. VariableID tess_level_outer_var_id = 0;
  1097. VariableID mesh_out_per_vertex = 0;
  1098. VariableID mesh_out_per_primitive = 0;
  1099. VariableID stage_out_masked_builtin_type_id = 0;
  1100. // Handle HLSL-style 0-based vertex/instance index.
  1101. enum class TriState
  1102. {
  1103. Neutral,
  1104. No,
  1105. Yes
  1106. };
  1107. TriState needs_base_vertex_arg = TriState::Neutral;
  1108. TriState needs_base_instance_arg = TriState::Neutral;
  1109. bool has_sampled_images = false;
  1110. bool builtin_declaration = false; // Handle HLSL-style 0-based vertex/instance index.
  1111. bool is_using_builtin_array = false; // Force the use of C style array declaration.
  1112. bool using_builtin_array() const;
  1113. bool is_rasterization_disabled = false;
  1114. bool has_descriptor_side_effects_buffer = false;
  1115. bool capture_output_to_buffer = false;
  1116. bool needs_swizzle_buffer_def = false;
  1117. bool used_swizzle_buffer = false;
  1118. bool added_builtin_tess_level = false;
  1119. bool needs_local_invocation_index = false;
  1120. bool needs_subgroup_invocation_id = false;
  1121. bool needs_subgroup_size = false;
  1122. bool needs_sample_id = false;
  1123. bool needs_helper_invocation = false;
  1124. bool needs_workgroup_zero_init = false;
  1125. bool needs_point_size_output = false;
  1126. bool writes_to_depth = false;
  1127. bool writes_to_point_size = false;
  1128. std::string qual_pos_var_name;
  1129. std::string stage_in_var_name = "in";
  1130. std::string stage_out_var_name = "out";
  1131. std::string patch_stage_in_var_name = "patchIn";
  1132. std::string patch_stage_out_var_name = "patchOut";
  1133. std::string sampler_name_suffix = "Smplr";
  1134. std::string swizzle_name_suffix = "Swzl";
  1135. std::string buffer_size_name_suffix = "BufferSize";
  1136. std::string plane_name_suffix = "Plane";
  1137. std::string input_wg_var_name = "gl_in";
  1138. std::string input_buffer_var_name = "spvIn";
  1139. std::string output_buffer_var_name = "spvOut";
  1140. std::string patch_input_buffer_var_name = "spvPatchIn";
  1141. std::string patch_output_buffer_var_name = "spvPatchOut";
  1142. std::string tess_factor_buffer_var_name = "spvTessLevel";
  1143. std::string index_buffer_var_name = "spvIndices";
  1144. Op previous_instruction_opcode = OpNop;
  1145. // Must be ordered since declaration is in a specific order.
  1146. std::map<uint32_t, MSLConstexprSampler> constexpr_samplers_by_id;
  1147. std::unordered_map<SetBindingPair, MSLConstexprSampler, InternalHasher> constexpr_samplers_by_binding;
  1148. const MSLConstexprSampler *find_constexpr_sampler(uint32_t id) const;
  1149. std::unordered_set<uint32_t> buffers_requiring_array_length;
  1150. SmallVector<uint32_t> buffer_aliases_discrete;
  1151. std::unordered_set<uint32_t> atomic_image_vars_emulated; // Emulate texture2D atomic operations
  1152. std::unordered_set<uint32_t> pull_model_inputs;
  1153. std::unordered_set<uint32_t> recursive_inputs;
  1154. SmallVector<SPIRVariable *> entry_point_bindings;
  1155. // Must be ordered since array is in a specific order.
  1156. struct DynamicBuffer
  1157. {
  1158. uint32_t base_index;
  1159. uint32_t var_id;
  1160. std::string mbr_name;
  1161. };
  1162. std::map<SetBindingPair, DynamicBuffer> buffers_requiring_dynamic_offset;
  1163. SmallVector<uint32_t> disabled_frag_outputs;
  1164. std::unordered_set<SetBindingPair, InternalHasher> inline_uniform_blocks;
  1165. uint32_t argument_buffer_ids[kMaxArgumentBuffers];
  1166. uint32_t argument_buffer_discrete_mask = 0;
  1167. uint32_t argument_buffer_device_storage_mask = 0;
  1168. void emit_argument_buffer_aliased_descriptor(const SPIRVariable &aliased_var,
  1169. const SPIRVariable &base_var);
  1170. void analyze_argument_buffers();
  1171. bool descriptor_set_is_argument_buffer(uint32_t desc_set) const;
  1172. const MSLResourceBinding &get_argument_buffer_resource(uint32_t desc_set, uint32_t arg_idx) const;
  1173. void add_argument_buffer_padding_buffer_type(SPIRType &struct_type, uint32_t &mbr_idx, uint32_t &arg_buff_index, MSLResourceBinding &rez_bind);
  1174. void add_argument_buffer_padding_image_type(SPIRType &struct_type, uint32_t &mbr_idx, uint32_t &arg_buff_index, MSLResourceBinding &rez_bind);
  1175. void add_argument_buffer_padding_sampler_type(SPIRType &struct_type, uint32_t &mbr_idx, uint32_t &arg_buff_index, MSLResourceBinding &rez_bind);
  1176. void add_argument_buffer_padding_type(uint32_t mbr_type_id, SPIRType &struct_type, uint32_t &mbr_idx, uint32_t &arg_buff_index, uint32_t count);
  1177. uint32_t get_target_components_for_fragment_location(uint32_t location) const;
  1178. uint32_t build_extended_vector_type(uint32_t type_id, uint32_t components,
  1179. SPIRType::BaseType basetype = SPIRType::Unknown);
  1180. uint32_t build_msl_interpolant_type(uint32_t type_id, bool is_noperspective);
  1181. bool suppress_missing_prototypes = false;
  1182. bool suppress_incompatible_pointer_types_discard_qualifiers = false;
  1183. bool suppress_sometimes_unitialized = false;
  1184. void add_spv_func_and_recompile(SPVFuncImpl spv_func);
  1185. void activate_argument_buffer_resources();
  1186. bool type_is_msl_framebuffer_fetch(const SPIRType &type) const;
  1187. bool is_supported_argument_buffer_type(const SPIRType &type) const;
  1188. bool variable_storage_requires_stage_io(StorageClass storage) const;
  1189. bool needs_manual_helper_invocation_updates() const
  1190. {
  1191. return msl_options.manual_helper_invocation_updates && msl_options.supports_msl_version(2, 3);
  1192. }
  1193. bool needs_frag_discard_checks() const
  1194. {
  1195. return get_execution_model() == ExecutionModelFragment && msl_options.supports_msl_version(2, 3) &&
  1196. msl_options.check_discarded_frag_stores && frag_shader_needs_discard_checks;
  1197. }
  1198. bool has_additional_fixed_sample_mask() const { return msl_options.additional_fixed_sample_mask != 0xffffffff; }
  1199. std::string additional_fixed_sample_mask_str() const;
  1200. // OpcodeHandler that handles several MSL preprocessing operations.
  1201. struct OpCodePreprocessor : OpcodeHandler
  1202. {
  1203. explicit OpCodePreprocessor(CompilerMSL &compiler_)
  1204. : OpcodeHandler(compiler_), self(compiler_)
  1205. {
  1206. enable_result_types = true;
  1207. }
  1208. bool handle(Op opcode, const uint32_t *args, uint32_t length) override;
  1209. CompilerMSL::SPVFuncImpl get_spv_func_impl(Op opcode, const uint32_t *args, uint32_t length);
  1210. void check_resource_write(uint32_t var_id);
  1211. CompilerMSL &self;
  1212. std::unordered_map<uint32_t, uint32_t> image_pointers_emulated; // Emulate texture2D atomic operations
  1213. bool suppress_missing_prototypes = false;
  1214. bool uses_atomics = false;
  1215. bool uses_image_write = false;
  1216. bool uses_buffer_write = false;
  1217. bool uses_discard = false;
  1218. bool needs_local_invocation_index = false;
  1219. bool needs_subgroup_invocation_id = false;
  1220. bool needs_subgroup_size = false;
  1221. bool needs_sample_id = false;
  1222. bool needs_helper_invocation = false;
  1223. };
  1224. // OpcodeHandler that scans for uses of sampled images
  1225. struct SampledImageScanner : OpcodeHandler
  1226. {
  1227. explicit SampledImageScanner(CompilerMSL &compiler_)
  1228. : OpcodeHandler(compiler_), self(compiler_)
  1229. {
  1230. }
  1231. CompilerMSL &self;
  1232. bool handle(Op opcode, const uint32_t *args, uint32_t) override;
  1233. };
  1234. // Sorts the members of a SPIRType and associated Meta info based on a settable sorting
  1235. // aspect, which defines which aspect of the struct members will be used to sort them.
  1236. // Regardless of the sorting aspect, built-in members always appear at the end of the struct.
  1237. struct MemberSorter
  1238. {
  1239. enum SortAspect
  1240. {
  1241. LocationThenBuiltInType,
  1242. Offset
  1243. };
  1244. void sort();
  1245. bool operator()(uint32_t mbr_idx1, uint32_t mbr_idx2);
  1246. MemberSorter(SPIRType &t, Meta &m, SortAspect sa);
  1247. SPIRType &type;
  1248. Meta &meta;
  1249. SortAspect sort_aspect;
  1250. };
  1251. };
  1252. } // namespace SPIRV_CROSS_NAMESPACE
  1253. #endif