VkShaderProgram.cpp 16 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550
  1. // Copyright (C) 2009-present, Panagiotis Christopoulos Charitos and contributors.
  2. // All rights reserved.
  3. // Code licensed under the BSD License.
  4. // http://www.anki3d.org/LICENSE
  5. #include <AnKi/Gr/Vulkan/VkShaderProgram.h>
  6. #include <AnKi/Gr/Vulkan/VkShader.h>
  7. #include <AnKi/Gr/Vulkan/VkGrManager.h>
  8. #include <AnKi/Gr/Vulkan/VkGraphicsState.h>
  9. #include <AnKi/Gr/BackendCommon/Functions.h>
  10. #include <AnKi/Gr/Vulkan/VkBuffer.h>
  11. #include <AnKi/ShaderCompiler/Dxc.h>
  12. #include <ThirdParty/SpirvCross/spirv.hpp>
  13. namespace anki {
  14. /// Used to avoid keeping alive many shader modules that are essentially the same code. Mainly used to save memory because graphics ShaderPrograms
  15. /// need to keep alive the shader modules for later when the pipeline is created.
  16. class ShaderModuleFactory : public MakeSingletonSimple<ShaderModuleFactory>
  17. {
  18. public:
  19. ~ShaderModuleFactory()
  20. {
  21. ANKI_ASSERT(m_entries.getSize() == 0 && "Forgot to release shader modules");
  22. }
  23. /// @note Thread-safe
  24. VkShaderModule getOrCreateShaderModule(ConstWeakArray<U32> spirv)
  25. {
  26. const U64 hash = computeHash(spirv.getBegin(), spirv.getSizeInBytes());
  27. LockGuard lock(m_mtx);
  28. Entry* entry = nullptr;
  29. for(Entry& e : m_entries)
  30. {
  31. if(e.m_hash == hash)
  32. {
  33. entry = &e;
  34. break;
  35. }
  36. }
  37. if(entry)
  38. {
  39. ++entry->m_refcount;
  40. return entry->m_module;
  41. }
  42. else
  43. {
  44. VkShaderModuleCreateInfo ci = {};
  45. ci.sType = VK_STRUCTURE_TYPE_SHADER_MODULE_CREATE_INFO;
  46. ci.codeSize = spirv.getSizeInBytes();
  47. ci.pCode = spirv.getBegin();
  48. Entry entry;
  49. ANKI_VK_CHECKF(vkCreateShaderModule(getVkDevice(), &ci, nullptr, &entry.m_module));
  50. entry.m_hash = hash;
  51. m_entries.emplaceBack(entry);
  52. return entry.m_module;
  53. }
  54. }
  55. /// @note Thread-safe
  56. void releaseShaderModule(VkShaderModule smodule)
  57. {
  58. LockGuard lock(m_mtx);
  59. U32 idx = kMaxU32;
  60. for(U32 i = 0; i < m_entries.getSize(); ++i)
  61. {
  62. if(m_entries[i].m_module == smodule)
  63. {
  64. idx = i;
  65. break;
  66. }
  67. }
  68. ANKI_ASSERT(idx != kMaxU32);
  69. ANKI_ASSERT(m_entries[idx].m_refcount > 0);
  70. --m_entries[idx].m_refcount;
  71. if(m_entries[idx].m_refcount == 0)
  72. {
  73. vkDestroyShaderModule(getVkDevice(), m_entries[idx].m_module, nullptr);
  74. m_entries.erase(m_entries.getBegin() + idx);
  75. }
  76. }
  77. private:
  78. class Entry
  79. {
  80. public:
  81. U64 m_hash = 0;
  82. VkShaderModule m_module = 0;
  83. U32 m_refcount = 1;
  84. };
  85. GrDynamicArray<Entry> m_entries;
  86. Mutex m_mtx;
  87. };
  88. ShaderProgram* ShaderProgram::newInstance(const ShaderProgramInitInfo& init)
  89. {
  90. ShaderProgramImpl* impl = anki::newInstance<ShaderProgramImpl>(GrMemoryPool::getSingleton(), init.getName());
  91. const Error err = impl->init(init);
  92. if(err)
  93. {
  94. deleteInstance(GrMemoryPool::getSingleton(), impl);
  95. impl = nullptr;
  96. }
  97. return impl;
  98. }
  99. ConstWeakArray<U8> ShaderProgram::getShaderGroupHandles() const
  100. {
  101. return static_cast<const ShaderProgramImpl&>(*this).getShaderGroupHandlesInternal();
  102. }
  103. Buffer& ShaderProgram::getShaderGroupHandlesGpuBuffer() const
  104. {
  105. return static_cast<const ShaderProgramImpl&>(*this).getShaderGroupHandlesGpuBufferInternal();
  106. }
  107. ShaderProgramImpl::~ShaderProgramImpl()
  108. {
  109. const Bool graphicsProg = !!(m_shaderTypes & ShaderTypeBit::kAllGraphics);
  110. if(graphicsProg)
  111. {
  112. for(const VkPipelineShaderStageCreateInfo& ci : m_graphics.m_shaderCreateInfos)
  113. {
  114. if(ci.module != 0)
  115. {
  116. ShaderModuleFactory::getSingleton().releaseShaderModule(ci.module);
  117. }
  118. }
  119. }
  120. if(m_graphics.m_pplineFactory)
  121. {
  122. deleteInstance(GrMemoryPool::getSingleton(), m_graphics.m_pplineFactory);
  123. }
  124. if(m_compute.m_ppline)
  125. {
  126. vkDestroyPipeline(getVkDevice(), m_compute.m_ppline, nullptr);
  127. }
  128. if(m_rt.m_ppline)
  129. {
  130. vkDestroyPipeline(getVkDevice(), m_rt.m_ppline, nullptr);
  131. }
  132. }
  133. Error ShaderProgramImpl::init(const ShaderProgramInitInfo& inf)
  134. {
  135. ANKI_ASSERT(inf.isValid());
  136. // Create the shader references
  137. //
  138. GrHashMap<U64, U32> shaderUuidToMShadersIdx; // Shader UUID to m_shaders idx
  139. if(inf.m_computeShader)
  140. {
  141. m_shaders.emplaceBack(inf.m_computeShader);
  142. }
  143. else if(inf.m_graphicsShaders[ShaderType::kFragment])
  144. {
  145. for(Shader* s : inf.m_graphicsShaders)
  146. {
  147. if(s)
  148. {
  149. m_shaders.emplaceBack(s);
  150. }
  151. }
  152. }
  153. else
  154. {
  155. // Ray tracing
  156. m_shaders.resizeStorage(inf.m_rayTracingShaders.m_rayGenShaders.getSize() + inf.m_rayTracingShaders.m_missShaders.getSize()
  157. + 1); // Plus at least one hit shader
  158. for(Shader* s : inf.m_rayTracingShaders.m_rayGenShaders)
  159. {
  160. m_shaders.emplaceBack(s);
  161. }
  162. for(Shader* s : inf.m_rayTracingShaders.m_missShaders)
  163. {
  164. m_shaders.emplaceBack(s);
  165. }
  166. m_rt.m_missShaderCount = inf.m_rayTracingShaders.m_missShaders.getSize();
  167. for(const RayTracingHitGroup& group : inf.m_rayTracingShaders.m_hitGroups)
  168. {
  169. if(group.m_anyHitShader)
  170. {
  171. auto it = shaderUuidToMShadersIdx.find(group.m_anyHitShader->getUuid());
  172. if(it == shaderUuidToMShadersIdx.getEnd())
  173. {
  174. shaderUuidToMShadersIdx.emplace(group.m_anyHitShader->getUuid(), m_shaders.getSize());
  175. m_shaders.emplaceBack(group.m_anyHitShader);
  176. }
  177. }
  178. if(group.m_closestHitShader)
  179. {
  180. auto it = shaderUuidToMShadersIdx.find(group.m_closestHitShader->getUuid());
  181. if(it == shaderUuidToMShadersIdx.getEnd())
  182. {
  183. shaderUuidToMShadersIdx.emplace(group.m_closestHitShader->getUuid(), m_shaders.getSize());
  184. m_shaders.emplaceBack(group.m_closestHitShader);
  185. }
  186. }
  187. }
  188. }
  189. ANKI_ASSERT(m_shaders.getSize() > 0);
  190. // Link reflection
  191. //
  192. Bool firstLink = true;
  193. for(ShaderPtr& shader : m_shaders)
  194. {
  195. m_shaderTypes |= ShaderTypeBit(1 << shader->getShaderType());
  196. const ShaderImpl& simpl = static_cast<const ShaderImpl&>(*shader);
  197. if(firstLink)
  198. {
  199. m_refl = simpl.m_reflection;
  200. firstLink = false;
  201. }
  202. else
  203. {
  204. ANKI_CHECK(ShaderReflection::linkShaderReflection(m_refl, simpl.m_reflection, m_refl));
  205. }
  206. m_refl.validate();
  207. }
  208. // Rewite SPIR-V to fix the bindings
  209. //
  210. GrDynamicArray<GrDynamicArray<U32>> rewrittenSpirvs;
  211. rewriteSpirv(m_refl.m_descriptor, rewrittenSpirvs);
  212. // Create the shader modules
  213. //
  214. const Bool graphicsProg = !!(m_shaderTypes & ShaderTypeBit::kAllGraphics);
  215. GrDynamicArray<VkShaderModule> shaderModules;
  216. shaderModules.resize(m_shaders.getSize());
  217. for(U32 ishader = 0; ishader < shaderModules.getSize(); ++ishader)
  218. {
  219. if(graphicsProg)
  220. {
  221. // Graphics prog, need to keep the modules alive for later
  222. shaderModules[ishader] = ShaderModuleFactory::getSingleton().getOrCreateShaderModule(rewrittenSpirvs[ishader]);
  223. }
  224. else
  225. {
  226. VkShaderModuleCreateInfo ci = {};
  227. ci.sType = VK_STRUCTURE_TYPE_SHADER_MODULE_CREATE_INFO;
  228. ci.codeSize = rewrittenSpirvs[ishader].getSizeInBytes();
  229. ci.pCode = rewrittenSpirvs[ishader].getBegin();
  230. ANKI_VK_CHECK(vkCreateShaderModule(getVkDevice(), &ci, nullptr, &shaderModules[ishader]));
  231. }
  232. }
  233. // Create the ppline layout
  234. //
  235. ANKI_CHECK(PipelineLayoutFactory2::getSingleton().getOrCreatePipelineLayout(m_refl.m_descriptor, m_pplineLayout));
  236. // Init the create infos
  237. //
  238. if(graphicsProg)
  239. {
  240. for(U32 ishader = 0; ishader < m_shaders.getSize(); ++ishader)
  241. {
  242. const ShaderImpl& shaderImpl = static_cast<const ShaderImpl&>(*m_shaders[ishader]);
  243. VkPipelineShaderStageCreateInfo& createInf = m_graphics.m_shaderCreateInfos[m_graphics.m_shaderCreateInfoCount++];
  244. createInf = {};
  245. createInf.sType = VK_STRUCTURE_TYPE_PIPELINE_SHADER_STAGE_CREATE_INFO;
  246. createInf.stage = VkShaderStageFlagBits(convertShaderTypeBit(ShaderTypeBit(1 << shaderImpl.getShaderType())));
  247. createInf.pName = "main";
  248. createInf.module = shaderModules[ishader];
  249. }
  250. }
  251. // Create the factory
  252. //
  253. if(graphicsProg)
  254. {
  255. m_graphics.m_pplineFactory = anki::newInstance<GraphicsPipelineFactory>(GrMemoryPool::getSingleton());
  256. }
  257. // Create the pipeline if compute
  258. //
  259. if(!!(m_shaderTypes & ShaderTypeBit::kCompute))
  260. {
  261. VkComputePipelineCreateInfo ci = {};
  262. if(!!(getGrManagerImpl().getExtensions() & VulkanExtensions::kKHR_pipeline_executable_properties))
  263. {
  264. ci.flags |= VK_PIPELINE_CREATE_CAPTURE_STATISTICS_BIT_KHR;
  265. }
  266. ci.sType = VK_STRUCTURE_TYPE_COMPUTE_PIPELINE_CREATE_INFO;
  267. ci.layout = m_pplineLayout->getHandle();
  268. ci.stage.sType = VK_STRUCTURE_TYPE_PIPELINE_SHADER_STAGE_CREATE_INFO;
  269. ci.stage.stage = VK_SHADER_STAGE_COMPUTE_BIT;
  270. ci.stage.pName = "main";
  271. ci.stage.module = shaderModules[0];
  272. ANKI_TRACE_SCOPED_EVENT(VkPipelineCreate);
  273. ANKI_VK_CHECK(vkCreateComputePipelines(getVkDevice(), PipelineCache::getSingleton().m_cacheHandle, 1, &ci, nullptr, &m_compute.m_ppline));
  274. getGrManagerImpl().printPipelineShaderInfo(m_compute.m_ppline, getName());
  275. }
  276. // Create the RT pipeline
  277. //
  278. if(!!(m_shaderTypes & ShaderTypeBit::kAllRayTracing))
  279. {
  280. // Create shaders
  281. GrDynamicArray<VkPipelineShaderStageCreateInfo> stages;
  282. stages.resize(m_shaders.getSize());
  283. for(U32 i = 0; i < stages.getSize(); ++i)
  284. {
  285. const ShaderImpl& impl = static_cast<const ShaderImpl&>(*m_shaders[i]);
  286. VkPipelineShaderStageCreateInfo& stage = stages[i];
  287. stage = {};
  288. stage.sType = VK_STRUCTURE_TYPE_PIPELINE_SHADER_STAGE_CREATE_INFO;
  289. stage.stage = VkShaderStageFlagBits(convertShaderTypeBit(ShaderTypeBit(1 << impl.getShaderType())));
  290. stage.pName = "main";
  291. stage.module = shaderModules[i];
  292. }
  293. // Create groups
  294. VkRayTracingShaderGroupCreateInfoKHR defaultGroup = {};
  295. defaultGroup.sType = VK_STRUCTURE_TYPE_RAY_TRACING_SHADER_GROUP_CREATE_INFO_KHR;
  296. defaultGroup.generalShader = VK_SHADER_UNUSED_KHR;
  297. defaultGroup.closestHitShader = VK_SHADER_UNUSED_KHR;
  298. defaultGroup.anyHitShader = VK_SHADER_UNUSED_KHR;
  299. defaultGroup.intersectionShader = VK_SHADER_UNUSED_KHR;
  300. U32 groupCount = inf.m_rayTracingShaders.m_rayGenShaders.getSize() + inf.m_rayTracingShaders.m_missShaders.getSize()
  301. + inf.m_rayTracingShaders.m_hitGroups.getSize();
  302. GrDynamicArray<VkRayTracingShaderGroupCreateInfoKHR> groups;
  303. groups.resize(groupCount, defaultGroup);
  304. // 1st group is the ray gen
  305. groupCount = 0;
  306. for(U32 i = 0; i < inf.m_rayTracingShaders.m_rayGenShaders.getSize(); ++i)
  307. {
  308. groups[groupCount].type = VK_RAY_TRACING_SHADER_GROUP_TYPE_GENERAL_KHR;
  309. groups[groupCount].generalShader = groupCount;
  310. ++groupCount;
  311. }
  312. // Miss
  313. for(U32 i = 0; i < inf.m_rayTracingShaders.m_missShaders.getSize(); ++i)
  314. {
  315. groups[groupCount].type = VK_RAY_TRACING_SHADER_GROUP_TYPE_GENERAL_KHR;
  316. groups[groupCount].generalShader = groupCount;
  317. ++groupCount;
  318. }
  319. // The rest of the groups are hit
  320. for(U32 i = 0; i < inf.m_rayTracingShaders.m_hitGroups.getSize(); ++i)
  321. {
  322. groups[groupCount].type = VK_RAY_TRACING_SHADER_GROUP_TYPE_TRIANGLES_HIT_GROUP_KHR;
  323. if(inf.m_rayTracingShaders.m_hitGroups[i].m_anyHitShader)
  324. {
  325. groups[groupCount].anyHitShader = *shaderUuidToMShadersIdx.find(inf.m_rayTracingShaders.m_hitGroups[i].m_anyHitShader->getUuid());
  326. }
  327. if(inf.m_rayTracingShaders.m_hitGroups[i].m_closestHitShader)
  328. {
  329. groups[groupCount].closestHitShader =
  330. *shaderUuidToMShadersIdx.find(inf.m_rayTracingShaders.m_hitGroups[i].m_closestHitShader->getUuid());
  331. }
  332. ++groupCount;
  333. }
  334. ANKI_ASSERT(groupCount == groups.getSize());
  335. VkRayTracingPipelineCreateInfoKHR ci = {};
  336. ci.sType = VK_STRUCTURE_TYPE_RAY_TRACING_PIPELINE_CREATE_INFO_KHR;
  337. ci.stageCount = stages.getSize();
  338. ci.pStages = &stages[0];
  339. ci.groupCount = groups.getSize();
  340. ci.pGroups = &groups[0];
  341. ci.maxPipelineRayRecursionDepth = inf.m_rayTracingShaders.m_maxRecursionDepth;
  342. ci.layout = m_pplineLayout->getHandle();
  343. {
  344. ANKI_TRACE_SCOPED_EVENT(VkPipelineCreate);
  345. ANKI_VK_CHECK(vkCreateRayTracingPipelinesKHR(getVkDevice(), VK_NULL_HANDLE, PipelineCache::getSingleton().m_cacheHandle, 1, &ci, nullptr,
  346. &m_rt.m_ppline));
  347. }
  348. // Get RT handles
  349. const U32 handleArraySize = getGrManagerImpl().getPhysicalDeviceRayTracingProperties().shaderGroupHandleSize * groupCount;
  350. m_rt.m_allHandles.resize(handleArraySize, 0_U8);
  351. ANKI_VK_CHECK(vkGetRayTracingShaderGroupHandlesKHR(getVkDevice(), m_rt.m_ppline, 0, groupCount, handleArraySize, &m_rt.m_allHandles[0]));
  352. // Upload RT handles
  353. BufferInitInfo buffInit("RT handles");
  354. buffInit.m_size = m_rt.m_allHandles.getSizeInBytes();
  355. buffInit.m_mapAccess = BufferMapAccessBit::kWrite;
  356. buffInit.m_usage = BufferUsageBit::kAllCompute & BufferUsageBit::kAllRead;
  357. m_rt.m_allHandlesBuff = getGrManagerImpl().newBuffer(buffInit);
  358. void* mapped = m_rt.m_allHandlesBuff->map(0, kMaxPtrSize, BufferMapAccessBit::kWrite);
  359. memcpy(mapped, m_rt.m_allHandles.getBegin(), m_rt.m_allHandles.getSizeInBytes());
  360. m_rt.m_allHandlesBuff->unmap();
  361. }
  362. // Get shader sizes and a few other things
  363. //
  364. for(const ShaderPtr& s : m_shaders)
  365. {
  366. if(!s.isCreated())
  367. {
  368. continue;
  369. }
  370. const ShaderType type = s->getShaderType();
  371. const U32 size = s->getShaderBinarySize();
  372. m_shaderBinarySizes[type] = size;
  373. }
  374. // Non graphics programs have created their pipeline, destroy the shader modules
  375. //
  376. if(!graphicsProg)
  377. {
  378. for(VkShaderModule smodule : shaderModules)
  379. {
  380. vkDestroyShaderModule(getVkDevice(), smodule, nullptr);
  381. }
  382. }
  383. return Error::kNone;
  384. }
  385. void ShaderProgramImpl::rewriteSpirv(ShaderReflectionDescriptorRelated& refl, GrDynamicArray<GrDynamicArray<U32>>& rewrittenSpirvs)
  386. {
  387. // Find a binding for the bindless DS
  388. if(refl.m_hasVkBindlessDescriptorSet)
  389. {
  390. for(U8 iset = 0; iset < kMaxRegisterSpaces; ++iset)
  391. {
  392. if(refl.m_bindingCounts[iset] == 0)
  393. {
  394. refl.m_vkBindlessDescriptorSet = iset;
  395. break;
  396. }
  397. }
  398. }
  399. // Re-write all SPIRVs and compute the new bindings
  400. rewrittenSpirvs.resize(m_shaders.getSize());
  401. Bool hasBindless = false;
  402. Array<U16, kMaxRegisterSpaces> vkBindingCount = {};
  403. for(U32 ishader = 0; ishader < m_shaders.getSize(); ++ishader)
  404. {
  405. ConstWeakArray<U32> inSpirv = static_cast<const ShaderImpl&>(*m_shaders[ishader]).m_spirvBin;
  406. GrDynamicArray<U32>& outSpv = rewrittenSpirvs[ishader];
  407. outSpv.resize(inSpirv.getSize());
  408. memcpy(outSpv.getBegin(), inSpirv.getBegin(), inSpirv.getSizeInBytes());
  409. visitSpirv(WeakArray<U32>(outSpv), [&](U32 cmd, WeakArray<U32> instructions) {
  410. if(cmd == spv::OpDecorate && instructions[1] == spv::DecorationBinding
  411. && instructions[2] >= kDxcVkBindingShifts[0][HlslResourceType::kFirst]
  412. && instructions[2] < kDxcVkBindingShifts[kMaxRegisterSpaces - 1][HlslResourceType::kCount - 1])
  413. {
  414. const U32 binding = instructions[2];
  415. // Look at the binding and derive a few things. See the DXC compilation on what they mean
  416. U32 set = kMaxRegisterSpaces;
  417. HlslResourceType hlslResourceType = HlslResourceType::kCount;
  418. for(set = 0; set < kMaxRegisterSpaces; ++set)
  419. {
  420. for(HlslResourceType hlslResourceType_ : EnumIterable<HlslResourceType>())
  421. {
  422. if(binding >= kDxcVkBindingShifts[set][hlslResourceType_] && binding < kDxcVkBindingShifts[set][hlslResourceType_] + 1000)
  423. {
  424. hlslResourceType = hlslResourceType_;
  425. break;
  426. }
  427. }
  428. if(hlslResourceType != HlslResourceType::kCount)
  429. {
  430. break;
  431. }
  432. }
  433. ANKI_ASSERT(set < kMaxRegisterSpaces);
  434. ANKI_ASSERT(hlslResourceType < HlslResourceType::kCount);
  435. const U32 registerBindingPoint = binding - kDxcVkBindingShifts[set][hlslResourceType];
  436. // Find the binding
  437. U32 foundBindingIdx = kMaxU32;
  438. for(U32 i = 0; i < refl.m_bindingCounts[set]; ++i)
  439. {
  440. const ShaderReflectionBinding& x = refl.m_bindings[set][i];
  441. if(x.m_registerBindingPoint == registerBindingPoint && hlslResourceType == descriptorTypeToHlslResourceType(x.m_type))
  442. {
  443. ANKI_ASSERT(foundBindingIdx == kMaxU32);
  444. foundBindingIdx = i;
  445. }
  446. }
  447. // Rewrite it
  448. ANKI_ASSERT(foundBindingIdx != kMaxU32);
  449. if(refl.m_bindings[set][foundBindingIdx].m_vkBinding != kMaxU16)
  450. {
  451. // Binding was set in another shader, just rewrite the SPIR-V
  452. instructions[2] = refl.m_bindings[set][foundBindingIdx].m_vkBinding;
  453. }
  454. else
  455. {
  456. // Binding is new
  457. refl.m_bindings[set][foundBindingIdx].m_vkBinding = vkBindingCount[set];
  458. instructions[2] = vkBindingCount[set];
  459. ++vkBindingCount[set];
  460. }
  461. }
  462. else if(cmd == spv::OpDecorate && instructions[1] == spv::DecorationDescriptorSet && instructions[2] == kDxcVkBindlessRegisterSpace)
  463. {
  464. // Bindless set, rewrite its set
  465. instructions[2] = refl.m_vkBindlessDescriptorSet;
  466. hasBindless = true;
  467. }
  468. });
  469. }
  470. ANKI_ASSERT(hasBindless == refl.m_hasVkBindlessDescriptorSet);
  471. }
  472. } // end namespace anki