internal_interface.hpp 13 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604
  1. /*
  2. * Copyright 2015-2017 ARM Limited
  3. * SPDX-License-Identifier: Apache-2.0
  4. *
  5. * Licensed under the Apache License, Version 2.0 (the "License");
  6. * you may not use this file except in compliance with the License.
  7. * You may obtain a copy of the License at
  8. *
  9. * http://www.apache.org/licenses/LICENSE-2.0
  10. *
  11. * Unless required by applicable law or agreed to in writing, software
  12. * distributed under the License is distributed on an "AS IS" BASIS,
  13. * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  14. * See the License for the specific language governing permissions and
  15. * limitations under the License.
  16. */
  17. #ifndef SPIRV_CROSS_INTERNAL_INTERFACE_HPP
  18. #define SPIRV_CROSS_INTERNAL_INTERFACE_HPP
  19. // This file must only be included by the shader generated by spirv-cross!
  20. #ifndef GLM_FORCE_SWIZZLE
  21. #define GLM_FORCE_SWIZZLE
  22. #endif
  23. #ifndef GLM_FORCE_RADIANS
  24. #define GLM_FORCE_RADIANS
  25. #endif
  26. #include <glm/glm.hpp>
  27. #include "barrier.hpp"
  28. #include "external_interface.h"
  29. #include "image.hpp"
  30. #include "sampler.hpp"
  31. #include "thread_group.hpp"
  32. #include <assert.h>
  33. #include <stdint.h>
  34. namespace internal
  35. {
  36. // Adaptor helpers to adapt GLSL access chain syntax to C++.
  37. // Don't bother with arrays of arrays on uniforms ...
  38. // Would likely need horribly complex variadic template munging.
  39. template <typename T>
  40. struct Interface
  41. {
  42. enum
  43. {
  44. ArraySize = 1,
  45. Size = sizeof(T)
  46. };
  47. Interface()
  48. : ptr(0)
  49. {
  50. }
  51. T &get()
  52. {
  53. assert(ptr);
  54. return *ptr;
  55. }
  56. T *ptr;
  57. };
  58. // For array types, return a pointer instead.
  59. template <typename T, unsigned U>
  60. struct Interface<T[U]>
  61. {
  62. enum
  63. {
  64. ArraySize = U,
  65. Size = U * sizeof(T)
  66. };
  67. Interface()
  68. : ptr(0)
  69. {
  70. }
  71. T *get()
  72. {
  73. assert(ptr);
  74. return ptr;
  75. }
  76. T *ptr;
  77. };
  78. // For case when array size is 1, avoid double dereference.
  79. template <typename T>
  80. struct PointerInterface
  81. {
  82. enum
  83. {
  84. ArraySize = 1,
  85. Size = sizeof(T *)
  86. };
  87. enum
  88. {
  89. PreDereference = true
  90. };
  91. PointerInterface()
  92. : ptr(0)
  93. {
  94. }
  95. T &get()
  96. {
  97. assert(ptr);
  98. return *ptr;
  99. }
  100. T *ptr;
  101. };
  102. // Automatically converts a pointer down to reference to match GLSL syntax.
  103. template <typename T>
  104. struct DereferenceAdaptor
  105. {
  106. DereferenceAdaptor(T **ptr)
  107. : ptr(ptr)
  108. {
  109. }
  110. T &operator[](unsigned index) const
  111. {
  112. return *(ptr[index]);
  113. }
  114. T **ptr;
  115. };
  116. // We can't have a linear array of T* since T* can be an abstract type in case of samplers.
  117. // We also need a list of pointers since we can have run-time length SSBOs.
  118. template <typename T, unsigned U>
  119. struct PointerInterface<T[U]>
  120. {
  121. enum
  122. {
  123. ArraySize = U,
  124. Size = sizeof(T *) * U
  125. };
  126. enum
  127. {
  128. PreDereference = false
  129. };
  130. PointerInterface()
  131. : ptr(0)
  132. {
  133. }
  134. DereferenceAdaptor<T> get()
  135. {
  136. assert(ptr);
  137. return DereferenceAdaptor<T>(ptr);
  138. }
  139. T **ptr;
  140. };
  141. // Resources can be more abstract and be unsized,
  142. // so we need to have an array of pointers for those cases.
  143. template <typename T>
  144. struct Resource : PointerInterface<T>
  145. {
  146. };
  147. // POD with no unknown sizes, so we can express these as flat arrays.
  148. template <typename T>
  149. struct UniformConstant : Interface<T>
  150. {
  151. };
  152. template <typename T>
  153. struct StageInput : Interface<T>
  154. {
  155. };
  156. template <typename T>
  157. struct StageOutput : Interface<T>
  158. {
  159. };
  160. template <typename T>
  161. struct PushConstant : Interface<T>
  162. {
  163. };
  164. }
  165. struct spirv_cross_shader
  166. {
  167. struct PPSize
  168. {
  169. PPSize()
  170. : ptr(0)
  171. , size(0)
  172. {
  173. }
  174. void **ptr;
  175. size_t size;
  176. };
  177. struct PPSizeResource
  178. {
  179. PPSizeResource()
  180. : ptr(0)
  181. , size(0)
  182. , pre_dereference(false)
  183. {
  184. }
  185. void **ptr;
  186. size_t size;
  187. bool pre_dereference;
  188. };
  189. PPSizeResource resources[SPIRV_CROSS_NUM_DESCRIPTOR_SETS][SPIRV_CROSS_NUM_DESCRIPTOR_BINDINGS];
  190. PPSize stage_inputs[SPIRV_CROSS_NUM_STAGE_INPUTS];
  191. PPSize stage_outputs[SPIRV_CROSS_NUM_STAGE_OUTPUTS];
  192. PPSize uniform_constants[SPIRV_CROSS_NUM_UNIFORM_CONSTANTS];
  193. PPSize push_constant;
  194. PPSize builtins[SPIRV_CROSS_NUM_BUILTINS];
  195. template <typename U>
  196. void register_builtin(spirv_cross_builtin builtin, const U &value)
  197. {
  198. assert(!builtins[builtin].ptr);
  199. builtins[builtin].ptr = (void **)&value.ptr;
  200. builtins[builtin].size = sizeof(*value.ptr) * U::ArraySize;
  201. }
  202. void set_builtin(spirv_cross_builtin builtin, void *data, size_t size)
  203. {
  204. assert(builtins[builtin].ptr);
  205. assert(size >= builtins[builtin].size);
  206. *builtins[builtin].ptr = data;
  207. }
  208. template <typename U>
  209. void register_resource(const internal::Resource<U> &value, unsigned set, unsigned binding)
  210. {
  211. assert(set < SPIRV_CROSS_NUM_DESCRIPTOR_SETS);
  212. assert(binding < SPIRV_CROSS_NUM_DESCRIPTOR_BINDINGS);
  213. assert(!resources[set][binding].ptr);
  214. resources[set][binding].ptr = (void **)&value.ptr;
  215. resources[set][binding].size = internal::Resource<U>::Size;
  216. resources[set][binding].pre_dereference = internal::Resource<U>::PreDereference;
  217. }
  218. template <typename U>
  219. void register_stage_input(const internal::StageInput<U> &value, unsigned location)
  220. {
  221. assert(location < SPIRV_CROSS_NUM_STAGE_INPUTS);
  222. assert(!stage_inputs[location].ptr);
  223. stage_inputs[location].ptr = (void **)&value.ptr;
  224. stage_inputs[location].size = internal::StageInput<U>::Size;
  225. }
  226. template <typename U>
  227. void register_stage_output(const internal::StageOutput<U> &value, unsigned location)
  228. {
  229. assert(location < SPIRV_CROSS_NUM_STAGE_OUTPUTS);
  230. assert(!stage_outputs[location].ptr);
  231. stage_outputs[location].ptr = (void **)&value.ptr;
  232. stage_outputs[location].size = internal::StageOutput<U>::Size;
  233. }
  234. template <typename U>
  235. void register_uniform_constant(const internal::UniformConstant<U> &value, unsigned location)
  236. {
  237. assert(location < SPIRV_CROSS_NUM_UNIFORM_CONSTANTS);
  238. assert(!uniform_constants[location].ptr);
  239. uniform_constants[location].ptr = (void **)&value.ptr;
  240. uniform_constants[location].size = internal::UniformConstant<U>::Size;
  241. }
  242. template <typename U>
  243. void register_push_constant(const internal::PushConstant<U> &value)
  244. {
  245. assert(!push_constant.ptr);
  246. push_constant.ptr = (void **)&value.ptr;
  247. push_constant.size = internal::PushConstant<U>::Size;
  248. }
  249. void set_stage_input(unsigned location, void *data, size_t size)
  250. {
  251. assert(location < SPIRV_CROSS_NUM_STAGE_INPUTS);
  252. assert(stage_inputs[location].ptr);
  253. assert(size >= stage_inputs[location].size);
  254. *stage_inputs[location].ptr = data;
  255. }
  256. void set_stage_output(unsigned location, void *data, size_t size)
  257. {
  258. assert(location < SPIRV_CROSS_NUM_STAGE_OUTPUTS);
  259. assert(stage_outputs[location].ptr);
  260. assert(size >= stage_outputs[location].size);
  261. *stage_outputs[location].ptr = data;
  262. }
  263. void set_uniform_constant(unsigned location, void *data, size_t size)
  264. {
  265. assert(location < SPIRV_CROSS_NUM_UNIFORM_CONSTANTS);
  266. assert(uniform_constants[location].ptr);
  267. assert(size >= uniform_constants[location].size);
  268. *uniform_constants[location].ptr = data;
  269. }
  270. void set_push_constant(void *data, size_t size)
  271. {
  272. assert(push_constant.ptr);
  273. assert(size >= push_constant.size);
  274. *push_constant.ptr = data;
  275. }
  276. void set_resource(unsigned set, unsigned binding, void **data, size_t size)
  277. {
  278. assert(set < SPIRV_CROSS_NUM_DESCRIPTOR_SETS);
  279. assert(binding < SPIRV_CROSS_NUM_DESCRIPTOR_BINDINGS);
  280. assert(resources[set][binding].ptr);
  281. assert(size >= resources[set][binding].size);
  282. // We're using the regular PointerInterface, dereference ahead of time.
  283. if (resources[set][binding].pre_dereference)
  284. *resources[set][binding].ptr = *data;
  285. else
  286. *resources[set][binding].ptr = data;
  287. }
  288. };
  289. namespace spirv_cross
  290. {
  291. template <typename T>
  292. struct BaseShader : spirv_cross_shader
  293. {
  294. void invoke()
  295. {
  296. static_cast<T *>(this)->main();
  297. }
  298. };
  299. struct FragmentResources
  300. {
  301. internal::StageOutput<glm::vec4> gl_FragCoord;
  302. void init(spirv_cross_shader &s)
  303. {
  304. s.register_builtin(SPIRV_CROSS_BUILTIN_FRAG_COORD, gl_FragCoord);
  305. }
  306. #define gl_FragCoord __res->gl_FragCoord.get()
  307. };
  308. template <typename T, typename Res>
  309. struct FragmentShader : BaseShader<FragmentShader<T, Res>>
  310. {
  311. inline void main()
  312. {
  313. impl.main();
  314. }
  315. FragmentShader()
  316. {
  317. resources.init(*this);
  318. impl.__res = &resources;
  319. }
  320. T impl;
  321. Res resources;
  322. };
  323. struct VertexResources
  324. {
  325. internal::StageOutput<glm::vec4> gl_Position;
  326. void init(spirv_cross_shader &s)
  327. {
  328. s.register_builtin(SPIRV_CROSS_BUILTIN_POSITION, gl_Position);
  329. }
  330. #define gl_Position __res->gl_Position.get()
  331. };
  332. template <typename T, typename Res>
  333. struct VertexShader : BaseShader<VertexShader<T, Res>>
  334. {
  335. inline void main()
  336. {
  337. impl.main();
  338. }
  339. VertexShader()
  340. {
  341. resources.init(*this);
  342. impl.__res = &resources;
  343. }
  344. T impl;
  345. Res resources;
  346. };
  347. struct TessEvaluationResources
  348. {
  349. inline void init(spirv_cross_shader &)
  350. {
  351. }
  352. };
  353. template <typename T, typename Res>
  354. struct TessEvaluationShader : BaseShader<TessEvaluationShader<T, Res>>
  355. {
  356. inline void main()
  357. {
  358. impl.main();
  359. }
  360. TessEvaluationShader()
  361. {
  362. resources.init(*this);
  363. impl.__res = &resources;
  364. }
  365. T impl;
  366. Res resources;
  367. };
  368. struct TessControlResources
  369. {
  370. inline void init(spirv_cross_shader &)
  371. {
  372. }
  373. };
  374. template <typename T, typename Res>
  375. struct TessControlShader : BaseShader<TessControlShader<T, Res>>
  376. {
  377. inline void main()
  378. {
  379. impl.main();
  380. }
  381. TessControlShader()
  382. {
  383. resources.init(*this);
  384. impl.__res = &resources;
  385. }
  386. T impl;
  387. Res resources;
  388. };
  389. struct GeometryResources
  390. {
  391. inline void init(spirv_cross_shader &)
  392. {
  393. }
  394. };
  395. template <typename T, typename Res>
  396. struct GeometryShader : BaseShader<GeometryShader<T, Res>>
  397. {
  398. inline void main()
  399. {
  400. impl.main();
  401. }
  402. GeometryShader()
  403. {
  404. resources.init(*this);
  405. impl.__res = &resources;
  406. }
  407. T impl;
  408. Res resources;
  409. };
  410. struct ComputeResources
  411. {
  412. internal::StageInput<glm::uvec3> gl_WorkGroupID__;
  413. internal::StageInput<glm::uvec3> gl_NumWorkGroups__;
  414. void init(spirv_cross_shader &s)
  415. {
  416. s.register_builtin(SPIRV_CROSS_BUILTIN_WORK_GROUP_ID, gl_WorkGroupID__);
  417. s.register_builtin(SPIRV_CROSS_BUILTIN_NUM_WORK_GROUPS, gl_NumWorkGroups__);
  418. }
  419. #define gl_WorkGroupID __res->gl_WorkGroupID__.get()
  420. #define gl_NumWorkGroups __res->gl_NumWorkGroups__.get()
  421. Barrier barrier__;
  422. #define barrier() __res->barrier__.wait()
  423. };
  424. struct ComputePrivateResources
  425. {
  426. uint32_t gl_LocalInvocationIndex__;
  427. #define gl_LocalInvocationIndex __priv_res.gl_LocalInvocationIndex__
  428. glm::uvec3 gl_LocalInvocationID__;
  429. #define gl_LocalInvocationID __priv_res.gl_LocalInvocationID__
  430. glm::uvec3 gl_GlobalInvocationID__;
  431. #define gl_GlobalInvocationID __priv_res.gl_GlobalInvocationID__
  432. };
  433. template <typename T, typename Res, unsigned WorkGroupX, unsigned WorkGroupY, unsigned WorkGroupZ>
  434. struct ComputeShader : BaseShader<ComputeShader<T, Res, WorkGroupX, WorkGroupY, WorkGroupZ>>
  435. {
  436. inline void main()
  437. {
  438. resources.barrier__.reset_counter();
  439. for (unsigned z = 0; z < WorkGroupZ; z++)
  440. for (unsigned y = 0; y < WorkGroupY; y++)
  441. for (unsigned x = 0; x < WorkGroupX; x++)
  442. impl[z][y][x].__priv_res.gl_GlobalInvocationID__ =
  443. glm::uvec3(WorkGroupX, WorkGroupY, WorkGroupZ) * resources.gl_WorkGroupID__.get() +
  444. glm::uvec3(x, y, z);
  445. group.run();
  446. group.wait();
  447. }
  448. ComputeShader()
  449. : group(&impl[0][0][0])
  450. {
  451. resources.init(*this);
  452. resources.barrier__.set_release_divisor(WorkGroupX * WorkGroupY * WorkGroupZ);
  453. unsigned i = 0;
  454. for (unsigned z = 0; z < WorkGroupZ; z++)
  455. {
  456. for (unsigned y = 0; y < WorkGroupY; y++)
  457. {
  458. for (unsigned x = 0; x < WorkGroupX; x++)
  459. {
  460. impl[z][y][x].__priv_res.gl_LocalInvocationID__ = glm::uvec3(x, y, z);
  461. impl[z][y][x].__priv_res.gl_LocalInvocationIndex__ = i++;
  462. impl[z][y][x].__res = &resources;
  463. }
  464. }
  465. }
  466. }
  467. T impl[WorkGroupZ][WorkGroupY][WorkGroupX];
  468. ThreadGroup<T, WorkGroupX * WorkGroupY * WorkGroupZ> group;
  469. Res resources;
  470. };
  471. inline void memoryBarrierShared()
  472. {
  473. Barrier::memoryBarrier();
  474. }
  475. inline void memoryBarrier()
  476. {
  477. Barrier::memoryBarrier();
  478. }
  479. // TODO: Rest of the barriers.
  480. // Atomics
  481. template <typename T>
  482. inline T atomicAdd(T &v, T a)
  483. {
  484. static_assert(sizeof(std::atomic<T>) == sizeof(T), "Cannot cast properly to std::atomic<T>.");
  485. // We need explicit memory barriers in GLSL to enfore any ordering.
  486. // FIXME: Can we really cast this? There is no other way I think ...
  487. return std::atomic_fetch_add_explicit(reinterpret_cast<std::atomic<T> *>(&v), a, std::memory_order_relaxed);
  488. }
  489. }
  490. void spirv_cross_set_stage_input(spirv_cross_shader_t *shader, unsigned location, void *data, size_t size)
  491. {
  492. shader->set_stage_input(location, data, size);
  493. }
  494. void spirv_cross_set_stage_output(spirv_cross_shader_t *shader, unsigned location, void *data, size_t size)
  495. {
  496. shader->set_stage_output(location, data, size);
  497. }
  498. void spirv_cross_set_uniform_constant(spirv_cross_shader_t *shader, unsigned location, void *data, size_t size)
  499. {
  500. shader->set_uniform_constant(location, data, size);
  501. }
  502. void spirv_cross_set_resource(spirv_cross_shader_t *shader, unsigned set, unsigned binding, void **data, size_t size)
  503. {
  504. shader->set_resource(set, binding, data, size);
  505. }
  506. void spirv_cross_set_push_constant(spirv_cross_shader_t *shader, void *data, size_t size)
  507. {
  508. shader->set_push_constant(data, size);
  509. }
  510. void spirv_cross_set_builtin(spirv_cross_shader_t *shader, spirv_cross_builtin builtin, void *data, size_t size)
  511. {
  512. shader->set_builtin(builtin, data, size);
  513. }
  514. #endif