RenderGraph.cpp 47 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643
  1. // Copyright (C) 2009-present, Panagiotis Christopoulos Charitos and contributors.
  2. // All rights reserved.
  3. // Code licensed under the BSD License.
  4. // http://www.anki3d.org/LICENSE
  5. #include <AnKi/Gr/RenderGraph.h>
  6. #include <AnKi/Gr/GrManager.h>
  7. #include <AnKi/Gr/Texture.h>
  8. #include <AnKi/Gr/Sampler.h>
  9. #include <AnKi/Gr/CommandBuffer.h>
  10. #include <AnKi/Util/Tracer.h>
  11. #include <AnKi/Util/BitSet.h>
  12. #include <AnKi/Util/File.h>
  13. #include <AnKi/Util/StringList.h>
  14. #include <AnKi/Util/HighRezTimer.h>
  15. #include <AnKi/Core/Common.h>
  16. namespace anki {
  17. #define ANKI_DBG_RENDER_GRAPH 0
  18. static inline U32 getTextureSurfOrVolCount(const TextureInternalPtr& tex)
  19. {
  20. return tex->getMipmapCount() * tex->getLayerCount() * (textureTypeIsCube(tex->getTextureType()) ? 6 : 1);
  21. }
  22. /// Contains some extra things for render targets.
  23. class RenderGraph::RT
  24. {
  25. public:
  26. DynamicArray<TextureUsageBit, MemoryPoolPtrWrapper<StackMemoryPool>> m_surfOrVolUsages;
  27. DynamicArray<U16, MemoryPoolPtrWrapper<StackMemoryPool>> m_lastBatchThatTransitionedIt;
  28. TextureInternalPtr m_texture; ///< Hold a reference.
  29. Bool m_imported;
  30. RT(StackMemoryPool* pool)
  31. : m_surfOrVolUsages(pool)
  32. , m_lastBatchThatTransitionedIt(pool)
  33. {
  34. }
  35. };
  36. /// Same as RT but for buffers.
  37. class RenderGraph::BufferRange
  38. {
  39. public:
  40. BufferUsageBit m_usage;
  41. BufferInternalPtr m_buffer; ///< Hold a reference.
  42. PtrSize m_offset;
  43. PtrSize m_range;
  44. };
  45. class RenderGraph::AS
  46. {
  47. public:
  48. AccelerationStructureUsageBit m_usage;
  49. AccelerationStructurePtr m_as; ///< Hold a reference.
  50. };
  51. /// Pipeline barrier.
  52. class RenderGraph::TextureBarrier
  53. {
  54. public:
  55. U32 m_idx;
  56. TextureUsageBit m_usageBefore;
  57. TextureUsageBit m_usageAfter;
  58. TextureSubresourceDesc m_subresource;
  59. TextureBarrier(U32 rtIdx, TextureUsageBit usageBefore, TextureUsageBit usageAfter, const TextureSubresourceDesc& sub)
  60. : m_idx(rtIdx)
  61. , m_usageBefore(usageBefore)
  62. , m_usageAfter(usageAfter)
  63. , m_subresource(sub)
  64. {
  65. }
  66. };
  67. /// Pipeline barrier.
  68. class RenderGraph::BufferBarrier
  69. {
  70. public:
  71. U32 m_idx;
  72. BufferUsageBit m_usageBefore;
  73. BufferUsageBit m_usageAfter;
  74. BufferBarrier(U32 buffIdx, BufferUsageBit usageBefore, BufferUsageBit usageAfter)
  75. : m_idx(buffIdx)
  76. , m_usageBefore(usageBefore)
  77. , m_usageAfter(usageAfter)
  78. {
  79. }
  80. };
  81. /// Pipeline barrier.
  82. class RenderGraph::ASBarrier
  83. {
  84. public:
  85. U32 m_idx;
  86. AccelerationStructureUsageBit m_usageBefore;
  87. AccelerationStructureUsageBit m_usageAfter;
  88. ASBarrier(U32 asIdx, AccelerationStructureUsageBit usageBefore, AccelerationStructureUsageBit usageAfter)
  89. : m_idx(asIdx)
  90. , m_usageBefore(usageBefore)
  91. , m_usageAfter(usageAfter)
  92. {
  93. }
  94. };
  95. /// Contains some extra things the RenderPassBase cannot hold.
  96. class RenderGraph::Pass
  97. {
  98. public:
  99. // WARNING!!!!!: Whatever you put here needs manual destruction in RenderGraph::reset()
  100. DynamicArray<U32, MemoryPoolPtrWrapper<StackMemoryPool>> m_dependsOn;
  101. DynamicArray<RenderPassDependency::TextureInfo, MemoryPoolPtrWrapper<StackMemoryPool>> m_consumedTextures;
  102. Function<void(RenderPassWorkContext&), MemoryPoolPtrWrapper<StackMemoryPool>> m_callback;
  103. class
  104. {
  105. public:
  106. Array<RenderTarget, kMaxColorRenderTargets> m_colorRts;
  107. RenderTarget m_dsRt;
  108. TextureView m_vrsRt;
  109. U8 m_colorRtCount = 0;
  110. U8 m_vrsTexelSizeX = 0;
  111. U8 m_vrsTexelSizeY = 0;
  112. Bool m_hasRenderpass = false;
  113. Array<TextureInternalPtr, kMaxColorRenderTargets + 2> m_refs;
  114. } m_beginRenderpassInfo;
  115. BaseString<MemoryPoolPtrWrapper<StackMemoryPool>> m_name;
  116. U32 m_batchIdx ANKI_DEBUG_CODE(= kMaxU32);
  117. Bool m_writesToSwapchain = false;
  118. Pass(StackMemoryPool* pool)
  119. : m_dependsOn(pool)
  120. , m_consumedTextures(pool)
  121. , m_name(pool)
  122. {
  123. }
  124. };
  125. /// A batch of render passes. These passes can run in parallel.
  126. /// @warning It's POD. Destructor won't be called.
  127. class RenderGraph::Batch
  128. {
  129. public:
  130. DynamicArray<U32, MemoryPoolPtrWrapper<StackMemoryPool>> m_passIndices;
  131. DynamicArray<TextureBarrier, MemoryPoolPtrWrapper<StackMemoryPool>> m_textureBarriersBefore;
  132. DynamicArray<BufferBarrier, MemoryPoolPtrWrapper<StackMemoryPool>> m_bufferBarriersBefore;
  133. DynamicArray<ASBarrier, MemoryPoolPtrWrapper<StackMemoryPool>> m_asBarriersBefore;
  134. Batch(StackMemoryPool* pool)
  135. : m_passIndices(pool)
  136. , m_textureBarriersBefore(pool)
  137. , m_bufferBarriersBefore(pool)
  138. , m_asBarriersBefore(pool)
  139. {
  140. }
  141. Batch(Batch&& b)
  142. {
  143. *this = std::move(b);
  144. }
  145. Batch& operator=(Batch&& b)
  146. {
  147. m_passIndices = std::move(b.m_passIndices);
  148. m_textureBarriersBefore = std::move(b.m_textureBarriersBefore);
  149. m_bufferBarriersBefore = std::move(b.m_bufferBarriersBefore);
  150. m_asBarriersBefore = std::move(b.m_asBarriersBefore);
  151. return *this;
  152. }
  153. };
  154. /// The RenderGraph build context.
  155. class RenderGraph::BakeContext
  156. {
  157. public:
  158. DynamicArray<Pass, MemoryPoolPtrWrapper<StackMemoryPool>> m_passes;
  159. BitSet<kMaxRenderGraphPasses, U64> m_passIsInBatch{false};
  160. DynamicArray<Batch, MemoryPoolPtrWrapper<StackMemoryPool>> m_batches;
  161. DynamicArray<RT, MemoryPoolPtrWrapper<StackMemoryPool>> m_rts;
  162. DynamicArray<BufferRange, MemoryPoolPtrWrapper<StackMemoryPool>> m_buffers;
  163. DynamicArray<AS, MemoryPoolPtrWrapper<StackMemoryPool>> m_as;
  164. Bool m_gatherStatistics = false;
  165. BakeContext(StackMemoryPool* pool)
  166. : m_passes(pool)
  167. , m_batches(pool)
  168. , m_rts(pool)
  169. , m_buffers(pool)
  170. , m_as(pool)
  171. {
  172. }
  173. };
  174. RenderGraph::RenderGraph(CString name)
  175. : GrObject(kClassType, name)
  176. {
  177. }
  178. RenderGraph::~RenderGraph()
  179. {
  180. ANKI_ASSERT(m_ctx == nullptr);
  181. }
  182. RenderGraph* RenderGraph::newInstance()
  183. {
  184. return anki::newInstance<RenderGraph>(GrMemoryPool::getSingleton(), "N/A");
  185. }
  186. void RenderGraph::reset()
  187. {
  188. ANKI_TRACE_SCOPED_EVENT(GrRenderGraphReset);
  189. if(!m_ctx)
  190. {
  191. return;
  192. }
  193. if((m_version % kPeriodicCleanupEvery) == 0)
  194. {
  195. // Do cleanup
  196. periodicCleanup();
  197. }
  198. // Extract the final usage of the imported RTs and clean all RTs
  199. for(RT& rt : m_ctx->m_rts)
  200. {
  201. if(rt.m_imported)
  202. {
  203. const U32 surfOrVolumeCount = getTextureSurfOrVolCount(rt.m_texture);
  204. // Create a new hash because our hash map dislikes concurent keys.
  205. const U64 uuid = rt.m_texture->getUuid();
  206. const U64 hash = computeHash(&uuid, sizeof(uuid));
  207. auto it = m_importedRenderTargets.find(hash);
  208. if(it != m_importedRenderTargets.getEnd())
  209. {
  210. // Found
  211. ANKI_ASSERT(it->m_surfOrVolLastUsages.getSize() == surfOrVolumeCount);
  212. ANKI_ASSERT(rt.m_surfOrVolUsages.getSize() == surfOrVolumeCount);
  213. }
  214. else
  215. {
  216. // Not found, create
  217. it = m_importedRenderTargets.emplace(hash);
  218. it->m_surfOrVolLastUsages.resize(surfOrVolumeCount);
  219. }
  220. // Update the usage
  221. for(U32 surfOrVolIdx = 0; surfOrVolIdx < surfOrVolumeCount; ++surfOrVolIdx)
  222. {
  223. it->m_surfOrVolLastUsages[surfOrVolIdx] = rt.m_surfOrVolUsages[surfOrVolIdx];
  224. }
  225. }
  226. rt.m_texture.reset(nullptr);
  227. }
  228. for(BufferRange& buff : m_ctx->m_buffers)
  229. {
  230. buff.m_buffer.reset(nullptr);
  231. }
  232. for(AS& as : m_ctx->m_as)
  233. {
  234. as.m_as.reset(nullptr);
  235. }
  236. for(auto& it : m_renderTargetCache)
  237. {
  238. it.m_texturesInUse = 0;
  239. }
  240. for(Pass& p : m_ctx->m_passes)
  241. {
  242. p.m_beginRenderpassInfo.m_refs.fill(TextureInternalPtr(nullptr));
  243. p.m_callback.destroy();
  244. p.m_name.destroy();
  245. }
  246. m_ctx = nullptr;
  247. ++m_version;
  248. }
  249. TextureInternalPtr RenderGraph::getOrCreateRenderTarget(const TextureInitInfo& initInf, U64 hash)
  250. {
  251. ANKI_ASSERT(hash);
  252. // Find a cache entry
  253. RenderTargetCacheEntry* entry = nullptr;
  254. auto it = m_renderTargetCache.find(hash);
  255. if(it == m_renderTargetCache.getEnd()) [[unlikely]]
  256. {
  257. // Didn't found the entry, create a new one
  258. auto it2 = m_renderTargetCache.emplace(hash);
  259. entry = &(*it2);
  260. }
  261. else
  262. {
  263. entry = &(*it);
  264. }
  265. ANKI_ASSERT(entry);
  266. // Create or pop one tex from the cache
  267. TextureInternalPtr tex;
  268. const Bool createNewTex = entry->m_textures.getSize() == entry->m_texturesInUse;
  269. if(!createNewTex)
  270. {
  271. // Pop
  272. tex = entry->m_textures[entry->m_texturesInUse++];
  273. }
  274. else
  275. {
  276. // Create it
  277. tex = GrManager::getSingleton().newTexture(initInf);
  278. ANKI_ASSERT(entry->m_texturesInUse == entry->m_textures.getSize());
  279. entry->m_textures.resize(entry->m_textures.getSize() + 1);
  280. entry->m_textures[entry->m_textures.getSize() - 1] = tex;
  281. ++entry->m_texturesInUse;
  282. }
  283. return tex;
  284. }
  285. Bool RenderGraph::passADependsOnB(const RenderPassBase& a, const RenderPassBase& b)
  286. {
  287. // Render targets
  288. {
  289. // Compute the 3 types of dependencies
  290. const BitSet<kMaxRenderGraphRenderTargets, U64> aReadBWrite = a.m_readRtMask & b.m_writeRtMask;
  291. const BitSet<kMaxRenderGraphRenderTargets, U64> aWriteBRead = a.m_writeRtMask & b.m_readRtMask;
  292. const BitSet<kMaxRenderGraphRenderTargets, U64> aWriteBWrite = a.m_writeRtMask & b.m_writeRtMask;
  293. const BitSet<kMaxRenderGraphRenderTargets, U64> fullDep = aReadBWrite | aWriteBRead | aWriteBWrite;
  294. if(fullDep.getAnySet())
  295. {
  296. // There might be an overlap
  297. for(const RenderPassDependency& aDep : a.m_rtDeps)
  298. {
  299. if(!fullDep.get(aDep.m_texture.m_handle.m_idx))
  300. {
  301. continue;
  302. }
  303. for(const RenderPassDependency& bDep : b.m_rtDeps)
  304. {
  305. if(aDep.m_texture.m_handle != bDep.m_texture.m_handle)
  306. {
  307. continue;
  308. }
  309. if(!((aDep.m_texture.m_usage | bDep.m_texture.m_usage) & TextureUsageBit::kAllWrite))
  310. {
  311. // Don't care about read to read deps
  312. continue;
  313. }
  314. if(aDep.m_texture.m_subresource.overlapsWith(bDep.m_texture.m_subresource))
  315. {
  316. return true;
  317. }
  318. }
  319. }
  320. }
  321. }
  322. // Buffers
  323. if(a.m_readBuffMask || a.m_writeBuffMask)
  324. {
  325. const BitSet<kMaxRenderGraphBuffers, U64> aReadBWrite = a.m_readBuffMask & b.m_writeBuffMask;
  326. const BitSet<kMaxRenderGraphBuffers, U64> aWriteBRead = a.m_writeBuffMask & b.m_readBuffMask;
  327. const BitSet<kMaxRenderGraphBuffers, U64> aWriteBWrite = a.m_writeBuffMask & b.m_writeBuffMask;
  328. const BitSet<kMaxRenderGraphBuffers, U64> fullDep = aReadBWrite | aWriteBRead | aWriteBWrite;
  329. if(fullDep.getAnySet())
  330. {
  331. // There might be an overlap
  332. for(const RenderPassDependency& aDep : a.m_buffDeps)
  333. {
  334. if(!fullDep.get(aDep.m_buffer.m_handle.m_idx))
  335. {
  336. continue;
  337. }
  338. for(const RenderPassDependency& bDep : b.m_buffDeps)
  339. {
  340. if(aDep.m_buffer.m_handle != bDep.m_buffer.m_handle)
  341. {
  342. continue;
  343. }
  344. if(!((aDep.m_buffer.m_usage | bDep.m_buffer.m_usage) & BufferUsageBit::kAllWrite))
  345. {
  346. // Don't care about read to read deps
  347. continue;
  348. }
  349. // TODO: Take into account the ranges
  350. return true;
  351. }
  352. }
  353. }
  354. }
  355. // AS
  356. if(a.m_readAsMask || a.m_writeAsMask)
  357. {
  358. const BitSet<kMaxRenderGraphAccelerationStructures, U32> aReadBWrite = a.m_readAsMask & b.m_writeAsMask;
  359. const BitSet<kMaxRenderGraphAccelerationStructures, U32> aWriteBRead = a.m_writeAsMask & b.m_readAsMask;
  360. const BitSet<kMaxRenderGraphAccelerationStructures, U32> aWriteBWrite = a.m_writeAsMask & b.m_writeAsMask;
  361. const BitSet<kMaxRenderGraphAccelerationStructures, U32> fullDep = aReadBWrite | aWriteBRead | aWriteBWrite;
  362. if(fullDep)
  363. {
  364. for(const RenderPassDependency& aDep : a.m_asDeps)
  365. {
  366. if(!fullDep.get(aDep.m_as.m_handle.m_idx))
  367. {
  368. continue;
  369. }
  370. for(const RenderPassDependency& bDep : b.m_asDeps)
  371. {
  372. if(aDep.m_as.m_handle != bDep.m_as.m_handle)
  373. {
  374. continue;
  375. }
  376. if(!((aDep.m_as.m_usage | bDep.m_as.m_usage) & AccelerationStructureUsageBit::kAllWrite))
  377. {
  378. // Don't care about read to read deps
  379. continue;
  380. }
  381. return true;
  382. }
  383. }
  384. }
  385. }
  386. return false;
  387. }
  388. Bool RenderGraph::passHasUnmetDependencies(const BakeContext& ctx, U32 passIdx)
  389. {
  390. Bool depends = false;
  391. if(ctx.m_batches.getSize() > 0)
  392. {
  393. // Check if the deps of passIdx are all in a batch
  394. for(const U32 depPassIdx : ctx.m_passes[passIdx].m_dependsOn)
  395. {
  396. if(!ctx.m_passIsInBatch.get(depPassIdx))
  397. {
  398. // Dependency pass is not in a batch
  399. depends = true;
  400. break;
  401. }
  402. }
  403. }
  404. else
  405. {
  406. // First batch, check if passIdx depends on any pass
  407. depends = ctx.m_passes[passIdx].m_dependsOn.getSize() != 0;
  408. }
  409. return depends;
  410. }
  411. RenderGraph::BakeContext* RenderGraph::newContext(const RenderGraphBuilder& descr, StackMemoryPool& pool)
  412. {
  413. ANKI_TRACE_FUNCTION();
  414. // Allocate
  415. BakeContext* ctx = anki::newInstance<BakeContext>(pool, &pool);
  416. // Init the resources
  417. ctx->m_rts.resizeStorage(descr.m_renderTargets.getSize());
  418. for(U32 rtIdx = 0; rtIdx < descr.m_renderTargets.getSize(); ++rtIdx)
  419. {
  420. RT& outRt = *ctx->m_rts.emplaceBack(&pool);
  421. const RenderGraphBuilder::RT& inRt = descr.m_renderTargets[rtIdx];
  422. const Bool imported = inRt.m_importedTex.isCreated();
  423. if(imported)
  424. {
  425. // It's imported
  426. outRt.m_texture = inRt.m_importedTex;
  427. }
  428. else
  429. {
  430. // Need to create new
  431. // Create a new TextureInitInfo with the derived usage
  432. TextureInitInfo initInf = inRt.m_initInfo;
  433. initInf.m_usage = inRt.m_usageDerivedByDeps;
  434. ANKI_ASSERT(initInf.m_usage != TextureUsageBit::kNone && "Probably not referenced by any pass");
  435. // Create the new hash
  436. const U64 hash = appendHash(&initInf.m_usage, sizeof(initInf.m_usage), inRt.m_hash);
  437. // Get or create the texture
  438. outRt.m_texture = getOrCreateRenderTarget(initInf, hash);
  439. }
  440. // Init the usage
  441. const U32 surfOrVolumeCount = getTextureSurfOrVolCount(outRt.m_texture);
  442. outRt.m_surfOrVolUsages.resize(surfOrVolumeCount, TextureUsageBit::kNone);
  443. if(imported && inRt.m_importedAndUndefinedUsage)
  444. {
  445. // Get the usage from previous frames
  446. // Create a new hash because our hash map dislikes concurent keys.
  447. const U64 uuid = outRt.m_texture->getUuid();
  448. const U64 hash = computeHash(&uuid, sizeof(uuid));
  449. auto it = m_importedRenderTargets.find(hash);
  450. ANKI_ASSERT(it != m_importedRenderTargets.getEnd() && "Can't find the imported RT");
  451. ANKI_ASSERT(it->m_surfOrVolLastUsages.getSize() == surfOrVolumeCount);
  452. for(U32 surfOrVolIdx = 0; surfOrVolIdx < surfOrVolumeCount; ++surfOrVolIdx)
  453. {
  454. outRt.m_surfOrVolUsages[surfOrVolIdx] = it->m_surfOrVolLastUsages[surfOrVolIdx];
  455. }
  456. }
  457. else if(imported)
  458. {
  459. // Set the usage that was given by the user
  460. for(U32 surfOrVolIdx = 0; surfOrVolIdx < surfOrVolumeCount; ++surfOrVolIdx)
  461. {
  462. outRt.m_surfOrVolUsages[surfOrVolIdx] = inRt.m_importedLastKnownUsage;
  463. }
  464. }
  465. outRt.m_lastBatchThatTransitionedIt.resize(surfOrVolumeCount, kMaxU16);
  466. outRt.m_imported = imported;
  467. }
  468. // Buffers
  469. ctx->m_buffers.resize(descr.m_buffers.getSize());
  470. for(U32 buffIdx = 0; buffIdx < ctx->m_buffers.getSize(); ++buffIdx)
  471. {
  472. ctx->m_buffers[buffIdx].m_usage = descr.m_buffers[buffIdx].m_usage;
  473. ANKI_ASSERT(descr.m_buffers[buffIdx].m_importedBuff.isCreated());
  474. ctx->m_buffers[buffIdx].m_buffer = descr.m_buffers[buffIdx].m_importedBuff;
  475. ctx->m_buffers[buffIdx].m_offset = descr.m_buffers[buffIdx].m_offset;
  476. ctx->m_buffers[buffIdx].m_range = descr.m_buffers[buffIdx].m_range;
  477. }
  478. // AS
  479. ctx->m_as.resize(descr.m_as.getSize());
  480. for(U32 i = 0; i < descr.m_as.getSize(); ++i)
  481. {
  482. ctx->m_as[i].m_usage = descr.m_as[i].m_usage;
  483. ctx->m_as[i].m_as = descr.m_as[i].m_importedAs;
  484. ANKI_ASSERT(ctx->m_as[i].m_as.isCreated());
  485. }
  486. ctx->m_gatherStatistics = descr.m_gatherStatistics;
  487. return ctx;
  488. }
  489. void RenderGraph::initRenderPassesAndSetDeps(const RenderGraphBuilder& descr)
  490. {
  491. ANKI_TRACE_FUNCTION();
  492. BakeContext& ctx = *m_ctx;
  493. const U32 passCount = descr.m_passes.getSize();
  494. ANKI_ASSERT(passCount > 0);
  495. ctx.m_passes.resizeStorage(passCount);
  496. for(U32 passIdx = 0; passIdx < passCount; ++passIdx)
  497. {
  498. const RenderPassBase& inPass = *descr.m_passes[passIdx];
  499. Pass& outPass = *ctx.m_passes.emplaceBack(ctx.m_as.getMemoryPool().m_pool);
  500. outPass.m_callback = inPass.m_callback;
  501. outPass.m_name = inPass.m_name;
  502. outPass.m_writesToSwapchain = inPass.m_writesToSwapchain;
  503. // Create consumer info
  504. outPass.m_consumedTextures.resize(inPass.m_rtDeps.getSize());
  505. for(U32 depIdx = 0; depIdx < inPass.m_rtDeps.getSize(); ++depIdx)
  506. {
  507. const RenderPassDependency& inDep = inPass.m_rtDeps[depIdx];
  508. ANKI_ASSERT(inDep.m_type == RenderPassDependency::Type::kTexture);
  509. RenderPassDependency::TextureInfo& inf = outPass.m_consumedTextures[depIdx];
  510. ANKI_ASSERT(sizeof(inf) == sizeof(inDep.m_texture));
  511. memcpy(&inf, &inDep.m_texture, sizeof(inf));
  512. }
  513. // Set dependencies by checking all previous subpasses.
  514. U32 prevPassIdx = passIdx;
  515. while(prevPassIdx--)
  516. {
  517. const RenderPassBase& prevPass = *descr.m_passes[prevPassIdx];
  518. if(passADependsOnB(inPass, prevPass))
  519. {
  520. outPass.m_dependsOn.emplaceBack(prevPassIdx);
  521. }
  522. }
  523. }
  524. }
  525. void RenderGraph::initBatches()
  526. {
  527. ANKI_TRACE_FUNCTION();
  528. ANKI_ASSERT(m_ctx);
  529. U passesAssignedToBatchCount = 0;
  530. const U passCount = m_ctx->m_passes.getSize();
  531. ANKI_ASSERT(passCount > 0);
  532. while(passesAssignedToBatchCount < passCount)
  533. {
  534. Batch batch(m_ctx->m_as.getMemoryPool().m_pool);
  535. for(U32 i = 0; i < passCount; ++i)
  536. {
  537. if(!m_ctx->m_passIsInBatch.get(i) && !passHasUnmetDependencies(*m_ctx, i))
  538. {
  539. // Add to the batch
  540. ++passesAssignedToBatchCount;
  541. batch.m_passIndices.emplaceBack(i);
  542. }
  543. }
  544. // Mark batch's passes done
  545. for(U32 passIdx : batch.m_passIndices)
  546. {
  547. m_ctx->m_passIsInBatch.set(passIdx);
  548. m_ctx->m_passes[passIdx].m_batchIdx = m_ctx->m_batches.getSize();
  549. }
  550. m_ctx->m_batches.emplaceBack(std::move(batch));
  551. }
  552. }
  553. void RenderGraph::initGraphicsPasses(const RenderGraphBuilder& descr)
  554. {
  555. ANKI_TRACE_FUNCTION();
  556. BakeContext& ctx = *m_ctx;
  557. const U32 passCount = descr.m_passes.getSize();
  558. ANKI_ASSERT(passCount > 0);
  559. for(U32 passIdx = 0; passIdx < passCount; ++passIdx)
  560. {
  561. const RenderPassBase& baseInPass = *descr.m_passes[passIdx];
  562. Pass& outPass = ctx.m_passes[passIdx];
  563. // Create command buffers and framebuffer
  564. if(baseInPass.m_type == RenderPassBase::Type::kGraphics)
  565. {
  566. const GraphicsRenderPass& inPass = static_cast<const GraphicsRenderPass&>(baseInPass);
  567. if(inPass.m_hasRenderpass)
  568. {
  569. outPass.m_beginRenderpassInfo.m_hasRenderpass = true;
  570. outPass.m_beginRenderpassInfo.m_colorRtCount = inPass.m_colorRtCount;
  571. // Init the usage bits
  572. for(U32 i = 0; i < inPass.m_colorRtCount; ++i)
  573. {
  574. const GraphicsRenderPassTargetDesc& inAttachment = inPass.m_rts[i];
  575. RenderTarget& outAttachment = outPass.m_beginRenderpassInfo.m_colorRts[i];
  576. getCrntUsage(inAttachment.m_handle, outPass.m_batchIdx, inAttachment.m_subresource, outAttachment.m_usage);
  577. outAttachment.m_textureView = TextureView(m_ctx->m_rts[inAttachment.m_handle.m_idx].m_texture.get(), inAttachment.m_subresource);
  578. outPass.m_beginRenderpassInfo.m_refs[i] = m_ctx->m_rts[inAttachment.m_handle.m_idx].m_texture;
  579. outAttachment.m_loadOperation = inAttachment.m_loadOperation;
  580. outAttachment.m_storeOperation = inAttachment.m_storeOperation;
  581. outAttachment.m_clearValue = inAttachment.m_clearValue;
  582. }
  583. if(!!inPass.m_rts[kMaxColorRenderTargets].m_subresource.m_depthStencilAspect)
  584. {
  585. const GraphicsRenderPassTargetDesc& inAttachment = inPass.m_rts[kMaxColorRenderTargets];
  586. RenderTarget& outAttachment = outPass.m_beginRenderpassInfo.m_dsRt;
  587. getCrntUsage(inAttachment.m_handle, outPass.m_batchIdx, inAttachment.m_subresource, outAttachment.m_usage);
  588. outAttachment.m_textureView = TextureView(m_ctx->m_rts[inAttachment.m_handle.m_idx].m_texture.get(), inAttachment.m_subresource);
  589. outPass.m_beginRenderpassInfo.m_refs[kMaxColorRenderTargets] = m_ctx->m_rts[inAttachment.m_handle.m_idx].m_texture;
  590. outAttachment.m_loadOperation = inAttachment.m_loadOperation;
  591. outAttachment.m_storeOperation = inAttachment.m_storeOperation;
  592. outAttachment.m_stencilLoadOperation = inAttachment.m_stencilLoadOperation;
  593. outAttachment.m_stencilStoreOperation = inAttachment.m_stencilStoreOperation;
  594. outAttachment.m_clearValue = inAttachment.m_clearValue;
  595. }
  596. if(inPass.m_vrsRtTexelSizeX > 0)
  597. {
  598. const GraphicsRenderPassTargetDesc& inAttachment = inPass.m_rts[kMaxColorRenderTargets + 1];
  599. outPass.m_beginRenderpassInfo.m_vrsRt =
  600. TextureView(m_ctx->m_rts[inAttachment.m_handle.m_idx].m_texture.get(), inAttachment.m_subresource);
  601. outPass.m_beginRenderpassInfo.m_refs[kMaxColorRenderTargets + 1] = m_ctx->m_rts[inAttachment.m_handle.m_idx].m_texture;
  602. outPass.m_beginRenderpassInfo.m_vrsTexelSizeX = inPass.m_vrsRtTexelSizeX;
  603. outPass.m_beginRenderpassInfo.m_vrsTexelSizeY = inPass.m_vrsRtTexelSizeY;
  604. }
  605. }
  606. }
  607. }
  608. }
  609. template<typename TFunc>
  610. void RenderGraph::iterateSurfsOrVolumes(const Texture& tex, const TextureSubresourceDesc& subresource, TFunc func)
  611. {
  612. subresource.validate(tex);
  613. const U32 faceCount = textureTypeIsCube(tex.getTextureType()) ? 6 : 1;
  614. if(subresource.m_allSurfacesOrVolumes)
  615. {
  616. for(U32 mip = 0; mip < tex.getMipmapCount(); ++mip)
  617. {
  618. for(U32 layer = 0; layer < tex.getLayerCount(); ++layer)
  619. {
  620. for(U32 face = 0; face < faceCount; ++face)
  621. {
  622. // Compute surf or vol idx
  623. const U32 idx = (faceCount * tex.getLayerCount()) * mip + faceCount * layer + face;
  624. if(!func(idx, TextureSubresourceDesc::surface(mip, face, layer, subresource.m_depthStencilAspect)))
  625. {
  626. return;
  627. }
  628. }
  629. }
  630. }
  631. }
  632. else
  633. {
  634. const U32 idx = (faceCount * tex.getLayerCount()) * subresource.m_mipmap + faceCount * subresource.m_layer + subresource.m_face;
  635. func(idx, subresource);
  636. }
  637. }
  638. void RenderGraph::setTextureBarrier(Batch& batch, const RenderPassDependency& dep)
  639. {
  640. ANKI_ASSERT(dep.m_type == RenderPassDependency::Type::kTexture);
  641. BakeContext& ctx = *m_ctx;
  642. const U32 batchIdx = U32(&batch - &ctx.m_batches[0]);
  643. const U32 rtIdx = dep.m_texture.m_handle.m_idx;
  644. const TextureUsageBit depUsage = dep.m_texture.m_usage;
  645. RT& rt = ctx.m_rts[rtIdx];
  646. iterateSurfsOrVolumes(*rt.m_texture, dep.m_texture.m_subresource, [&](U32 surfOrVolIdx, const TextureSubresourceDesc& subresource) {
  647. TextureUsageBit& crntUsage = rt.m_surfOrVolUsages[surfOrVolIdx];
  648. const Bool skipBarrier = crntUsage == depUsage && !(crntUsage & TextureUsageBit::kAllWrite);
  649. if(!skipBarrier)
  650. {
  651. // Check if we can merge barriers
  652. if(rt.m_lastBatchThatTransitionedIt[surfOrVolIdx] == batchIdx)
  653. {
  654. // Will merge the barriers
  655. crntUsage |= depUsage;
  656. [[maybe_unused]] Bool found = false;
  657. for(TextureBarrier& b : batch.m_textureBarriersBefore)
  658. {
  659. if(b.m_idx == rtIdx && b.m_subresource == subresource)
  660. {
  661. b.m_usageAfter |= depUsage;
  662. found = true;
  663. break;
  664. }
  665. }
  666. ANKI_ASSERT(found);
  667. }
  668. else
  669. {
  670. // Create a new barrier for this surface
  671. batch.m_textureBarriersBefore.emplaceBack(rtIdx, crntUsage, depUsage, subresource);
  672. crntUsage = depUsage;
  673. rt.m_lastBatchThatTransitionedIt[surfOrVolIdx] = U16(batchIdx);
  674. }
  675. }
  676. return true;
  677. });
  678. }
  679. void RenderGraph::setBatchBarriers(const RenderGraphBuilder& descr)
  680. {
  681. ANKI_TRACE_FUNCTION();
  682. BakeContext& ctx = *m_ctx;
  683. // For all batches
  684. for(Batch& batch : ctx.m_batches)
  685. {
  686. BitSet<kMaxRenderGraphBuffers, U64> buffHasBarrierMask(false);
  687. BitSet<kMaxRenderGraphAccelerationStructures, U32> asHasBarrierMask(false);
  688. // For all passes of that batch
  689. for(U32 passIdx : batch.m_passIndices)
  690. {
  691. const RenderPassBase& pass = *descr.m_passes[passIdx];
  692. // Do textures
  693. for(const RenderPassDependency& dep : pass.m_rtDeps)
  694. {
  695. setTextureBarrier(batch, dep);
  696. }
  697. // Do buffers
  698. for(const RenderPassDependency& dep : pass.m_buffDeps)
  699. {
  700. const U32 buffIdx = dep.m_buffer.m_handle.m_idx;
  701. const BufferUsageBit depUsage = dep.m_buffer.m_usage;
  702. BufferUsageBit& crntUsage = ctx.m_buffers[buffIdx].m_usage;
  703. const Bool skipBarrier = crntUsage == depUsage && !(crntUsage & BufferUsageBit::kAllWrite);
  704. if(skipBarrier)
  705. {
  706. continue;
  707. }
  708. const Bool buffHasBarrier = buffHasBarrierMask.get(buffIdx);
  709. if(!buffHasBarrier)
  710. {
  711. // Buff hasn't had a barrier in this batch, add a new barrier
  712. batch.m_bufferBarriersBefore.emplaceBack(buffIdx, crntUsage, depUsage);
  713. crntUsage = depUsage;
  714. buffHasBarrierMask.set(buffIdx);
  715. }
  716. else
  717. {
  718. // Buff already in a barrier, merge the 2 barriers
  719. BufferBarrier* barrierToMergeTo = nullptr;
  720. for(BufferBarrier& b : batch.m_bufferBarriersBefore)
  721. {
  722. if(b.m_idx == buffIdx)
  723. {
  724. barrierToMergeTo = &b;
  725. break;
  726. }
  727. }
  728. ANKI_ASSERT(barrierToMergeTo);
  729. ANKI_ASSERT(!!barrierToMergeTo->m_usageAfter);
  730. barrierToMergeTo->m_usageAfter |= depUsage;
  731. crntUsage = barrierToMergeTo->m_usageAfter;
  732. }
  733. }
  734. // Do AS
  735. for(const RenderPassDependency& dep : pass.m_asDeps)
  736. {
  737. const U32 asIdx = dep.m_as.m_handle.m_idx;
  738. const AccelerationStructureUsageBit depUsage = dep.m_as.m_usage;
  739. AccelerationStructureUsageBit& crntUsage = ctx.m_as[asIdx].m_usage;
  740. const Bool skipBarrier = crntUsage == depUsage && !(crntUsage & AccelerationStructureUsageBit::kAllWrite);
  741. if(skipBarrier)
  742. {
  743. continue;
  744. }
  745. const Bool asHasBarrierInThisBatch = asHasBarrierMask.get(asIdx);
  746. if(!asHasBarrierInThisBatch)
  747. {
  748. // AS doesn't have a barrier in this batch, create a new one
  749. batch.m_asBarriersBefore.emplaceBack(asIdx, crntUsage, depUsage);
  750. crntUsage = depUsage;
  751. asHasBarrierMask.set(asIdx);
  752. }
  753. else
  754. {
  755. // AS already has a barrier, merge the 2 barriers
  756. ASBarrier* barrierToMergeTo = nullptr;
  757. for(ASBarrier& other : batch.m_asBarriersBefore)
  758. {
  759. if(other.m_idx == asIdx)
  760. {
  761. barrierToMergeTo = &other;
  762. break;
  763. }
  764. }
  765. ANKI_ASSERT(barrierToMergeTo);
  766. ANKI_ASSERT(!!barrierToMergeTo->m_usageAfter);
  767. barrierToMergeTo->m_usageAfter |= depUsage;
  768. crntUsage = barrierToMergeTo->m_usageAfter;
  769. }
  770. }
  771. } // For all passes
  772. ANKI_ASSERT(batch.m_bufferBarriersBefore.getSize() || batch.m_textureBarriersBefore.getSize() || batch.m_asBarriersBefore.getSize());
  773. #if ANKI_DBG_RENDER_GRAPH
  774. // Sort the barriers to ease the dumped graph
  775. std::sort(batch.m_textureBarriersBefore.getBegin(), batch.m_textureBarriersBefore.getEnd(),
  776. [&](const TextureBarrier& a, const TextureBarrier& b) {
  777. const U aidx = a.m_idx;
  778. const U bidx = b.m_idx;
  779. if(aidx == bidx)
  780. {
  781. if(a.m_surface.m_level != b.m_surface.m_level)
  782. {
  783. return a.m_surface.m_level < b.m_surface.m_level;
  784. }
  785. else if(a.m_surface.m_face != b.m_surface.m_face)
  786. {
  787. return a.m_surface.m_face < b.m_surface.m_face;
  788. }
  789. else if(a.m_surface.m_layer != b.m_surface.m_layer)
  790. {
  791. return a.m_surface.m_layer < b.m_surface.m_layer;
  792. }
  793. else
  794. {
  795. return false;
  796. }
  797. }
  798. else
  799. {
  800. return aidx < bidx;
  801. }
  802. });
  803. std::sort(batch.m_bufferBarriersBefore.getBegin(), batch.m_bufferBarriersBefore.getEnd(),
  804. [&](const BufferBarrier& a, const BufferBarrier& b) {
  805. return a.m_idx < b.m_idx;
  806. });
  807. std::sort(batch.m_asBarriersBefore.getBegin(), batch.m_asBarriersBefore.getEnd(), [&](const ASBarrier& a, const ASBarrier& b) {
  808. return a.m_idx < b.m_idx;
  809. });
  810. #endif
  811. } // For all batches
  812. }
  813. void RenderGraph::minimizeSubchannelSwitches()
  814. {
  815. BakeContext& ctx = *m_ctx;
  816. Bool computeFirst = true;
  817. for(Batch& batch : ctx.m_batches)
  818. {
  819. U32 graphicsPasses = 0;
  820. U32 computePasses = 0;
  821. std::sort(batch.m_passIndices.getBegin(), batch.m_passIndices.getEnd(), [&](U32 a, U32 b) {
  822. const Bool aIsCompute = !ctx.m_passes[a].m_beginRenderpassInfo.m_hasRenderpass;
  823. const Bool bIsCompute = !ctx.m_passes[b].m_beginRenderpassInfo.m_hasRenderpass;
  824. graphicsPasses += !aIsCompute + !bIsCompute;
  825. computePasses += aIsCompute + bIsCompute;
  826. if(computeFirst)
  827. {
  828. return !aIsCompute < !bIsCompute;
  829. }
  830. else
  831. {
  832. return aIsCompute < bIsCompute;
  833. }
  834. });
  835. if(graphicsPasses && !computePasses)
  836. {
  837. // Only graphics passes in this batch, start next batch from graphics
  838. computeFirst = false;
  839. }
  840. else if(computePasses && !graphicsPasses)
  841. {
  842. // Only compute passes in this batch, start next batch from compute
  843. computeFirst = true;
  844. }
  845. else
  846. {
  847. // This batch ends in compute start next batch in compute and if it ends with graphics start next in graphics
  848. computeFirst = !computeFirst;
  849. }
  850. }
  851. }
  852. void RenderGraph::sortBatchPasses()
  853. {
  854. BakeContext& ctx = *m_ctx;
  855. for(Batch& batch : ctx.m_batches)
  856. {
  857. std::sort(batch.m_passIndices.getBegin(), batch.m_passIndices.getEnd(), [&](U32 a, U32 b) {
  858. const Bool aIsCompute = !ctx.m_passes[a].m_beginRenderpassInfo.m_hasRenderpass;
  859. const Bool bIsCompute = !ctx.m_passes[b].m_beginRenderpassInfo.m_hasRenderpass;
  860. return aIsCompute < bIsCompute;
  861. });
  862. }
  863. }
  864. void RenderGraph::compileNewGraph(const RenderGraphBuilder& descr, StackMemoryPool& pool)
  865. {
  866. ANKI_TRACE_SCOPED_EVENT(GrRenderGraphCompile);
  867. // Init the context
  868. BakeContext& ctx = *newContext(descr, pool);
  869. m_ctx = &ctx;
  870. // Init the passes and find the dependencies between passes
  871. initRenderPassesAndSetDeps(descr);
  872. // Walk the graph and create pass batches
  873. initBatches();
  874. // Now that we know the batches every pass belongs init the graphics passes
  875. initGraphicsPasses(descr);
  876. // Create barriers between batches
  877. setBatchBarriers(descr);
  878. // Sort passes in batches
  879. if(GrManager::getSingleton().getDeviceCapabilities().m_gpuVendor == GpuVendor::kNvidia)
  880. {
  881. minimizeSubchannelSwitches();
  882. }
  883. else
  884. {
  885. sortBatchPasses();
  886. }
  887. #if ANKI_DBG_RENDER_GRAPH
  888. if(dumpDependencyDotFile(descr, ctx, "./"))
  889. {
  890. ANKI_LOGF("Won't recover on debug code");
  891. }
  892. #endif
  893. }
  894. Texture& RenderGraph::getTexture(RenderTargetHandle handle) const
  895. {
  896. ANKI_ASSERT(m_ctx->m_rts[handle.m_idx].m_texture.isCreated());
  897. return *m_ctx->m_rts[handle.m_idx].m_texture;
  898. }
  899. void RenderGraph::getCachedBuffer(BufferHandle handle, Buffer*& buff, PtrSize& offset, PtrSize& range) const
  900. {
  901. const BufferRange& record = m_ctx->m_buffers[handle.m_idx];
  902. buff = record.m_buffer.get();
  903. offset = record.m_offset;
  904. range = record.m_range;
  905. }
  906. AccelerationStructure* RenderGraph::getAs(AccelerationStructureHandle handle) const
  907. {
  908. ANKI_ASSERT(m_ctx->m_as[handle.m_idx].m_as.isCreated());
  909. return m_ctx->m_as[handle.m_idx].m_as.get();
  910. }
  911. void RenderGraph::recordAndSubmitCommandBuffers(FencePtr* optionalFence)
  912. {
  913. ANKI_TRACE_SCOPED_EVENT(GrRenderGraphRecordAndSubmit);
  914. ANKI_ASSERT(m_ctx);
  915. const U32 batchGroupCount = min(CoreThreadJobManager::getSingleton().getThreadCount(), m_ctx->m_batches.getSize());
  916. StackMemoryPool* pool = m_ctx->m_rts.getMemoryPool().m_pool;
  917. DynamicArray<CommandBufferPtr, MemoryPoolPtrWrapper<StackMemoryPool>> cmdbs(pool);
  918. cmdbs.resize(batchGroupCount);
  919. SpinLock cmdbsMtx;
  920. Atomic<U32> firstGroupThatWroteToSwapchain(kMaxU32);
  921. for(U32 group = 0; group < batchGroupCount; ++group)
  922. {
  923. U32 start, end;
  924. splitThreadedProblem(group, batchGroupCount, m_ctx->m_batches.getSize(), start, end);
  925. if(start == end)
  926. {
  927. continue;
  928. }
  929. CoreThreadJobManager::getSingleton().dispatchTask(
  930. [this, start, end, pool, &cmdbs, &cmdbsMtx, group, batchGroupCount, &firstGroupThatWroteToSwapchain]([[maybe_unused]] U32 tid) {
  931. ANKI_TRACE_SCOPED_EVENT(GrRenderGraphTask);
  932. Array<Char, 32> name;
  933. snprintf(name.getBegin(), name.getSize(), "RenderGraph cmdb %u-%u", start, end);
  934. CommandBufferInitInfo cmdbInit(name.getBegin());
  935. cmdbInit.m_flags = CommandBufferFlag::kGeneralWork;
  936. CommandBufferPtr cmdb = GrManager::getSingleton().newCommandBuffer(cmdbInit);
  937. // Write timestamp
  938. const Bool setPreQuery = m_ctx->m_gatherStatistics && group == 0;
  939. const Bool setPostQuery = m_ctx->m_gatherStatistics && group == batchGroupCount - 1;
  940. TimestampQueryInternalPtr preQuery, postQuery;
  941. if(setPreQuery)
  942. {
  943. preQuery = GrManager::getSingleton().newTimestampQuery();
  944. cmdb->writeTimestamp(preQuery.get());
  945. }
  946. if(setPostQuery)
  947. {
  948. postQuery = GrManager::getSingleton().newTimestampQuery();
  949. }
  950. // Bookkeeping
  951. {
  952. LockGuard lock(cmdbsMtx);
  953. cmdbs[group] = cmdb;
  954. if(preQuery.isCreated())
  955. {
  956. m_statistics.m_timestamps[m_statistics.m_nextTimestamp][0] = preQuery;
  957. }
  958. if(postQuery.isCreated())
  959. {
  960. m_statistics.m_timestamps[m_statistics.m_nextTimestamp][1] = postQuery;
  961. m_statistics.m_cpuStartTimes[m_statistics.m_nextTimestamp] = HighRezTimer::getCurrentTime();
  962. }
  963. }
  964. RenderPassWorkContext ctx;
  965. ctx.m_rgraph = this;
  966. for(U32 i = start; i < end; ++i)
  967. {
  968. const Batch& batch = m_ctx->m_batches[i];
  969. // Set the barriers
  970. DynamicArray<TextureBarrierInfo, MemoryPoolPtrWrapper<StackMemoryPool>> texBarriers(pool);
  971. texBarriers.resizeStorage(batch.m_textureBarriersBefore.getSize());
  972. for(const TextureBarrier& barrier : batch.m_textureBarriersBefore)
  973. {
  974. const Texture& tex = *m_ctx->m_rts[barrier.m_idx].m_texture;
  975. TextureBarrierInfo& inf = *texBarriers.emplaceBack();
  976. inf.m_previousUsage = barrier.m_usageBefore;
  977. inf.m_nextUsage = barrier.m_usageAfter;
  978. inf.m_textureView = TextureView(&tex, barrier.m_subresource);
  979. }
  980. DynamicArray<BufferBarrierInfo, MemoryPoolPtrWrapper<StackMemoryPool>> buffBarriers(pool);
  981. buffBarriers.resizeStorage(batch.m_bufferBarriersBefore.getSize());
  982. for(const BufferBarrier& barrier : batch.m_bufferBarriersBefore)
  983. {
  984. BufferBarrierInfo& inf = *buffBarriers.emplaceBack();
  985. inf.m_previousUsage = barrier.m_usageBefore;
  986. inf.m_nextUsage = barrier.m_usageAfter;
  987. inf.m_bufferView = BufferView(m_ctx->m_buffers[barrier.m_idx].m_buffer.get(), m_ctx->m_buffers[barrier.m_idx].m_offset,
  988. m_ctx->m_buffers[barrier.m_idx].m_range);
  989. }
  990. // Sort them for the command buffer to merge as many as possible
  991. std::sort(buffBarriers.getBegin(), buffBarriers.getEnd(), [](const BufferBarrierInfo& a, const BufferBarrierInfo& b) {
  992. return a.m_bufferView.getBuffer().getUuid() < b.m_bufferView.getBuffer().getUuid();
  993. });
  994. DynamicArray<AccelerationStructureBarrierInfo, MemoryPoolPtrWrapper<StackMemoryPool>> asBarriers(pool);
  995. for(const ASBarrier& barrier : batch.m_asBarriersBefore)
  996. {
  997. AccelerationStructureBarrierInfo& inf = *asBarriers.emplaceBack();
  998. inf.m_previousUsage = barrier.m_usageBefore;
  999. inf.m_nextUsage = barrier.m_usageAfter;
  1000. inf.m_as = m_ctx->m_as[barrier.m_idx].m_as.get();
  1001. }
  1002. cmdb->pushDebugMarker("Barrier", Vec3(1.0f, 0.0f, 0.0f));
  1003. cmdb->setPipelineBarrier(texBarriers, buffBarriers, asBarriers);
  1004. cmdb->popDebugMarker();
  1005. ctx.m_commandBuffer = cmdb.get();
  1006. ctx.m_batchIdx = i;
  1007. // Call the passes
  1008. for(U32 passIdx : batch.m_passIndices)
  1009. {
  1010. Pass& pass = m_ctx->m_passes[passIdx];
  1011. if(pass.m_writesToSwapchain)
  1012. {
  1013. firstGroupThatWroteToSwapchain.min(group);
  1014. }
  1015. const Vec3 passColor = (pass.m_beginRenderpassInfo.m_hasRenderpass) ? Vec3(0.0f, 1.0f, 0.0f) : Vec3(1.0f, 1.0f, 0.0f);
  1016. cmdb->pushDebugMarker(pass.m_name, passColor);
  1017. if(pass.m_beginRenderpassInfo.m_hasRenderpass)
  1018. {
  1019. cmdb->beginRenderPass({pass.m_beginRenderpassInfo.m_colorRts.getBegin(), U32(pass.m_beginRenderpassInfo.m_colorRtCount)},
  1020. pass.m_beginRenderpassInfo.m_dsRt.m_textureView.isValid() ? &pass.m_beginRenderpassInfo.m_dsRt
  1021. : nullptr,
  1022. pass.m_beginRenderpassInfo.m_vrsRt, pass.m_beginRenderpassInfo.m_vrsTexelSizeX,
  1023. pass.m_beginRenderpassInfo.m_vrsTexelSizeY);
  1024. }
  1025. {
  1026. ANKI_TRACE_SCOPED_EVENT(GrRenderGraphCallback);
  1027. ctx.m_passIdx = passIdx;
  1028. pass.m_callback(ctx);
  1029. }
  1030. if(pass.m_beginRenderpassInfo.m_hasRenderpass)
  1031. {
  1032. cmdb->endRenderPass();
  1033. }
  1034. cmdb->popDebugMarker();
  1035. }
  1036. } // end for batches
  1037. if(setPostQuery)
  1038. {
  1039. // Write a timestamp before the last flush
  1040. cmdb->writeTimestamp(postQuery.get());
  1041. }
  1042. cmdb->endRecording();
  1043. });
  1044. }
  1045. CoreThreadJobManager::getSingleton().waitForAllTasksToFinish();
  1046. // Submit
  1047. DynamicArray<CommandBuffer*, MemoryPoolPtrWrapper<StackMemoryPool>> pCmdbs(pool);
  1048. pCmdbs.resize(cmdbs.getSize());
  1049. for(U32 i = 0; i < cmdbs.getSize(); ++i)
  1050. {
  1051. pCmdbs[i] = cmdbs[i].get();
  1052. }
  1053. const U32 firstGroupThatWroteToSwapchain2 = firstGroupThatWroteToSwapchain.getNonAtomically();
  1054. if(firstGroupThatWroteToSwapchain2 == 0 || firstGroupThatWroteToSwapchain2 == kMaxU32)
  1055. {
  1056. GrManager::getSingleton().submit(WeakArray(pCmdbs), {}, optionalFence);
  1057. }
  1058. else
  1059. {
  1060. // 2 submits. The 1st contains all the batches that don't write to swapchain
  1061. GrManager::getSingleton().submit(WeakArray(pCmdbs).subrange(0, firstGroupThatWroteToSwapchain2), {}, nullptr);
  1062. GrManager::getSingleton().submit(
  1063. WeakArray(pCmdbs).subrange(firstGroupThatWroteToSwapchain2, batchGroupCount - firstGroupThatWroteToSwapchain2), {}, optionalFence);
  1064. }
  1065. }
  1066. void RenderGraph::getCrntUsage(RenderTargetHandle handle, U32 batchIdx, const TextureSubresourceDesc& subresource, TextureUsageBit& usage) const
  1067. {
  1068. usage = TextureUsageBit::kNone;
  1069. const Batch& batch = m_ctx->m_batches[batchIdx];
  1070. for(U32 passIdx : batch.m_passIndices)
  1071. {
  1072. for(const RenderPassDependency::TextureInfo& consumer : m_ctx->m_passes[passIdx].m_consumedTextures)
  1073. {
  1074. if(consumer.m_handle == handle && subresource.overlapsWith(consumer.m_subresource))
  1075. {
  1076. usage |= consumer.m_usage;
  1077. break;
  1078. }
  1079. }
  1080. }
  1081. }
  1082. void RenderGraph::periodicCleanup()
  1083. {
  1084. U32 rtsCleanedCount = 0;
  1085. for(RenderTargetCacheEntry& entry : m_renderTargetCache)
  1086. {
  1087. if(entry.m_texturesInUse < entry.m_textures.getSize())
  1088. {
  1089. // Should cleanup
  1090. rtsCleanedCount += entry.m_textures.getSize() - entry.m_texturesInUse;
  1091. // New array
  1092. GrDynamicArray<TextureInternalPtr> newArray;
  1093. if(entry.m_texturesInUse > 0)
  1094. {
  1095. newArray.resize(entry.m_texturesInUse);
  1096. }
  1097. // Populate the new array
  1098. for(U32 i = 0; i < newArray.getSize(); ++i)
  1099. {
  1100. newArray[i] = std::move(entry.m_textures[i]);
  1101. }
  1102. // Destroy the old array and the rest of the textures
  1103. entry.m_textures.destroy();
  1104. // Move new array
  1105. entry.m_textures = std::move(newArray);
  1106. }
  1107. }
  1108. if(rtsCleanedCount > 0)
  1109. {
  1110. ANKI_GR_LOGI("Cleaned %u render targets", rtsCleanedCount);
  1111. }
  1112. }
  1113. void RenderGraph::getStatistics(RenderGraphStatistics& statistics)
  1114. {
  1115. m_statistics.m_nextTimestamp = (m_statistics.m_nextTimestamp + 1) % kMaxBufferedTimestamps;
  1116. const U32 oldFrame = m_statistics.m_nextTimestamp;
  1117. if(m_statistics.m_timestamps[oldFrame][0].isCreated() && m_statistics.m_timestamps[oldFrame][1].isCreated())
  1118. {
  1119. Second start, end;
  1120. [[maybe_unused]] TimestampQueryResult res = m_statistics.m_timestamps[oldFrame][0]->getResult(start);
  1121. ANKI_ASSERT(res == TimestampQueryResult::kAvailable);
  1122. m_statistics.m_timestamps[oldFrame][0].reset(nullptr);
  1123. res = m_statistics.m_timestamps[oldFrame][1]->getResult(end);
  1124. ANKI_ASSERT(res == TimestampQueryResult::kAvailable);
  1125. m_statistics.m_timestamps[oldFrame][1].reset(nullptr);
  1126. const Second diff = end - start;
  1127. statistics.m_gpuTime = diff;
  1128. statistics.m_cpuStartTime = m_statistics.m_cpuStartTimes[oldFrame];
  1129. }
  1130. else
  1131. {
  1132. statistics.m_gpuTime = -1.0;
  1133. statistics.m_cpuStartTime = -1.0;
  1134. }
  1135. }
  1136. #if ANKI_DBG_RENDER_GRAPH
  1137. StringRaii RenderGraph::textureUsageToStr(StackMemoryPool& pool, TextureUsageBit usage)
  1138. {
  1139. if(!usage)
  1140. {
  1141. return StringRaii(&pool, "None");
  1142. }
  1143. StringListRaii slist(&pool);
  1144. # define ANKI_TEX_USAGE(u) \
  1145. if(!!(usage & TextureUsageBit::u)) \
  1146. { \
  1147. slist.pushBackSprintf("%s", #u); \
  1148. }
  1149. ANKI_TEX_USAGE(kSampledGeometry);
  1150. ANKI_TEX_USAGE(kSampledFragment);
  1151. ANKI_TEX_USAGE(kSampledCompute);
  1152. ANKI_TEX_USAGE(kSampledTraceRays);
  1153. ANKI_TEX_USAGE(kUavGeometryRead);
  1154. ANKI_TEX_USAGE(kUavGeometryWrite);
  1155. ANKI_TEX_USAGE(kUavFragmentRead);
  1156. ANKI_TEX_USAGE(kUavFragmentWrite);
  1157. ANKI_TEX_USAGE(kUavComputeRead);
  1158. ANKI_TEX_USAGE(kUavComputeWrite);
  1159. ANKI_TEX_USAGE(kUavTraceRaysRead);
  1160. ANKI_TEX_USAGE(kUavTraceRaysWrite);
  1161. ANKI_TEX_USAGE(kFramebufferRead);
  1162. ANKI_TEX_USAGE(kFramebufferWrite);
  1163. ANKI_TEX_USAGE(kTransferDestination);
  1164. ANKI_TEX_USAGE(kGenerateMipmaps);
  1165. ANKI_TEX_USAGE(kPresent);
  1166. ANKI_TEX_USAGE(kFramebufferShadingRate);
  1167. if(!usage)
  1168. {
  1169. slist.pushBackSprintf("?");
  1170. }
  1171. # undef ANKI_TEX_USAGE
  1172. ANKI_ASSERT(!slist.isEmpty());
  1173. StringRaii str(&pool);
  1174. slist.join(" | ", str);
  1175. return str;
  1176. }
  1177. StringRaii RenderGraph::bufferUsageToStr(StackMemoryPool& pool, BufferUsageBit usage)
  1178. {
  1179. StringListRaii slist(&pool);
  1180. # define ANKI_BUFF_USAGE(u) \
  1181. if(!!(usage & BufferUsageBit::u)) \
  1182. { \
  1183. slist.pushBackSprintf("%s", #u); \
  1184. }
  1185. ANKI_BUFF_USAGE(kConstantGeometry);
  1186. ANKI_BUFF_USAGE(kConstantPixel);
  1187. ANKI_BUFF_USAGE(kConstantCompute);
  1188. ANKI_BUFF_USAGE(kConstantDispatchRays);
  1189. ANKI_BUFF_USAGE(kStorageGeometryRead);
  1190. ANKI_BUFF_USAGE(kStorageGeometryWrite);
  1191. ANKI_BUFF_USAGE(kStorageFragmentRead);
  1192. ANKI_BUFF_USAGE(kStorageFragmentWrite);
  1193. ANKI_BUFF_USAGE(kStorageComputeRead);
  1194. ANKI_BUFF_USAGE(kStorageComputeWrite);
  1195. ANKI_BUFF_USAGE(kStorageTraceRaysRead);
  1196. ANKI_BUFF_USAGE(kStorageTraceRaysWrite);
  1197. ANKI_BUFF_USAGE(kTextureGeometryRead);
  1198. ANKI_BUFF_USAGE(kTextureGeometryWrite);
  1199. ANKI_BUFF_USAGE(kTextureFragmentRead);
  1200. ANKI_BUFF_USAGE(kTextureFragmentWrite);
  1201. ANKI_BUFF_USAGE(kTextureComputeRead);
  1202. ANKI_BUFF_USAGE(kTextureComputeWrite);
  1203. ANKI_BUFF_USAGE(kTextureTraceRaysRead);
  1204. ANKI_BUFF_USAGE(kTextureTraceRaysWrite);
  1205. ANKI_BUFF_USAGE(kIndex);
  1206. ANKI_BUFF_USAGE(kVertex);
  1207. ANKI_BUFF_USAGE(kIndirectCompute);
  1208. ANKI_BUFF_USAGE(kIndirectDraw);
  1209. ANKI_BUFF_USAGE(kIndirectDispatchRays);
  1210. ANKI_BUFF_USAGE(kTransferSource);
  1211. ANKI_BUFF_USAGE(kTransferDestination);
  1212. ANKI_BUFF_USAGE(kAccelerationStructureBuild);
  1213. if(!usage)
  1214. {
  1215. slist.pushBackSprintf("NONE");
  1216. }
  1217. # undef ANKI_BUFF_USAGE
  1218. ANKI_ASSERT(!slist.isEmpty());
  1219. StringRaii str(&pool);
  1220. slist.join(" | ", str);
  1221. return str;
  1222. }
  1223. StringRaii RenderGraph::asUsageToStr(StackMemoryPool& pool, AccelerationStructureUsageBit usage)
  1224. {
  1225. StringListRaii slist(&pool);
  1226. # define ANKI_AS_USAGE(u) \
  1227. if(!!(usage & AccelerationStructureUsageBit::u)) \
  1228. { \
  1229. slist.pushBackSprintf("%s", #u); \
  1230. }
  1231. ANKI_AS_USAGE(kBuild);
  1232. ANKI_AS_USAGE(kAttach);
  1233. ANKI_AS_USAGE(kGeometryRead);
  1234. ANKI_AS_USAGE(kFragmentRead);
  1235. ANKI_AS_USAGE(kComputeRead);
  1236. ANKI_AS_USAGE(kTraceRaysRead);
  1237. if(!usage)
  1238. {
  1239. slist.pushBackSprintf("NONE");
  1240. }
  1241. # undef ANKI_AS_USAGE
  1242. ANKI_ASSERT(!slist.isEmpty());
  1243. StringRaii str(&pool);
  1244. slist.join(" | ", str);
  1245. return str;
  1246. }
  1247. Error RenderGraph::dumpDependencyDotFile(const RenderGraphBuilder& descr, const BakeContext& ctx, CString path) const
  1248. {
  1249. ANKI_GR_LOGW("Running with debug code");
  1250. static constexpr Array<const char*, 5> COLORS = {"red", "green", "blue", "magenta", "cyan"};
  1251. StackMemoryPool& pool = *ctx.m_pool;
  1252. StringListRaii slist(&pool);
  1253. slist.pushBackSprintf("digraph {\n");
  1254. slist.pushBackSprintf("\t//splines = ortho;\nconcentrate = true;\n");
  1255. for(U32 batchIdx = 0; batchIdx < ctx.m_batches.getSize(); ++batchIdx)
  1256. {
  1257. // Set same rank
  1258. slist.pushBackSprintf("\t{rank=\"same\";");
  1259. for(U32 passIdx : ctx.m_batches[batchIdx].m_passIndices)
  1260. {
  1261. slist.pushBackSprintf("\"%s\";", descr.m_passes[passIdx]->m_name.cstr());
  1262. }
  1263. slist.pushBackSprintf("}\n");
  1264. // Print passes
  1265. for(U32 passIdx : ctx.m_batches[batchIdx].m_passIndices)
  1266. {
  1267. CString passName = descr.m_passes[passIdx]->m_name.toCString();
  1268. slist.pushBackSprintf("\t\"%s\"[color=%s,style=%s,shape=box];\n", passName.cstr(), COLORS[batchIdx % COLORS.getSize()],
  1269. (descr.m_passes[passIdx]->m_type == RenderPassBase::Type::kGraphics) ? "bold" : "dashed");
  1270. for(U32 depIdx : ctx.m_passes[passIdx].m_dependsOn)
  1271. {
  1272. slist.pushBackSprintf("\t\"%s\"->\"%s\";\n", descr.m_passes[depIdx]->m_name.cstr(), passName.cstr());
  1273. }
  1274. if(ctx.m_passes[passIdx].m_dependsOn.getSize() == 0)
  1275. {
  1276. slist.pushBackSprintf("\tNONE->\"%s\";\n", descr.m_passes[passIdx]->m_name.cstr());
  1277. }
  1278. }
  1279. }
  1280. # if 0
  1281. // Color the resources
  1282. slist.pushBackSprintf("subgraph cluster_0 {\n");
  1283. for(U rtIdx = 0; rtIdx < descr.m_renderTargets.getSize(); ++rtIdx)
  1284. {
  1285. slist.pushBackSprintf(
  1286. "\t\"%s\"[color=%s];\n", &descr.m_renderTargets[rtIdx].m_name[0], COLORS[rtIdx % COLORS.getSize()]);
  1287. }
  1288. slist.pushBackSprintf("}\n");
  1289. # endif
  1290. // Barriers
  1291. // slist.pushBackSprintf("subgraph cluster_1 {\n");
  1292. StringRaii prevBubble(&pool);
  1293. prevBubble.create("START");
  1294. for(U32 batchIdx = 0; batchIdx < ctx.m_batches.getSize(); ++batchIdx)
  1295. {
  1296. const Batch& batch = ctx.m_batches[batchIdx];
  1297. StringRaii batchName(&pool);
  1298. batchName.sprintf("batch%u", batchIdx);
  1299. for(U32 barrierIdx = 0; barrierIdx < batch.m_textureBarriersBefore.getSize(); ++barrierIdx)
  1300. {
  1301. const TextureBarrier& barrier = batch.m_textureBarriersBefore[barrierIdx];
  1302. StringRaii barrierLabel(&pool);
  1303. barrierLabel.sprintf("<b>%s</b> (mip,dp,f,l)=(%u,%u,%u,%u)<br/>%s <b>to</b> %s", &descr.m_renderTargets[barrier.m_idx].m_name[0],
  1304. barrier.m_surface.m_level, barrier.m_surface.m_depth, barrier.m_surface.m_face, barrier.m_surface.m_layer,
  1305. textureUsageToStr(pool, barrier.m_usageBefore).cstr(), textureUsageToStr(pool, barrier.m_usageAfter).cstr());
  1306. StringRaii barrierName(&pool);
  1307. barrierName.sprintf("%s tex barrier%u", batchName.cstr(), barrierIdx);
  1308. slist.pushBackSprintf("\t\"%s\"[color=%s,style=bold,shape=box,label=< %s >];\n", barrierName.cstr(), COLORS[batchIdx % COLORS.getSize()],
  1309. barrierLabel.cstr());
  1310. slist.pushBackSprintf("\t\"%s\"->\"%s\";\n", prevBubble.cstr(), barrierName.cstr());
  1311. prevBubble = barrierName;
  1312. }
  1313. for(U32 barrierIdx = 0; barrierIdx < batch.m_bufferBarriersBefore.getSize(); ++barrierIdx)
  1314. {
  1315. const BufferBarrier& barrier = batch.m_bufferBarriersBefore[barrierIdx];
  1316. StringRaii barrierLabel(&pool);
  1317. barrierLabel.sprintf("<b>%s</b><br/>%s <b>to</b> %s", &descr.m_buffers[barrier.m_idx].m_name[0],
  1318. bufferUsageToStr(pool, barrier.m_usageBefore).cstr(), bufferUsageToStr(pool, barrier.m_usageAfter).cstr());
  1319. StringRaii barrierName(&pool);
  1320. barrierName.sprintf("%s buff barrier%u", batchName.cstr(), barrierIdx);
  1321. slist.pushBackSprintf("\t\"%s\"[color=%s,style=bold,shape=box,label=< %s >];\n", barrierName.cstr(), COLORS[batchIdx % COLORS.getSize()],
  1322. barrierLabel.cstr());
  1323. slist.pushBackSprintf("\t\"%s\"->\"%s\";\n", prevBubble.cstr(), barrierName.cstr());
  1324. prevBubble = barrierName;
  1325. }
  1326. for(U32 barrierIdx = 0; barrierIdx < batch.m_asBarriersBefore.getSize(); ++barrierIdx)
  1327. {
  1328. const ASBarrier& barrier = batch.m_asBarriersBefore[barrierIdx];
  1329. StringRaii barrierLabel(&pool);
  1330. barrierLabel.sprintf("<b>%s</b><br/>%s <b>to</b> %s", descr.m_as[barrier.m_idx].m_name.getBegin(),
  1331. asUsageToStr(pool, barrier.m_usageBefore).cstr(), asUsageToStr(pool, barrier.m_usageAfter).cstr());
  1332. StringRaii barrierName(&pool);
  1333. barrierName.sprintf("%s AS barrier%u", batchName.cstr(), barrierIdx);
  1334. slist.pushBackSprintf("\t\"%s\"[color=%s,style=bold,shape=box,label=< %s >];\n", barrierName.cstr(), COLORS[batchIdx % COLORS.getSize()],
  1335. barrierLabel.cstr());
  1336. slist.pushBackSprintf("\t\"%s\"->\"%s\";\n", prevBubble.cstr(), barrierName.cstr());
  1337. prevBubble = barrierName;
  1338. }
  1339. for(U32 passIdx : batch.m_passIndices)
  1340. {
  1341. const RenderPassBase& pass = *descr.m_passes[passIdx];
  1342. StringRaii passName(&pool);
  1343. passName.sprintf("%s pass", pass.m_name.cstr());
  1344. slist.pushBackSprintf("\t\"%s\"[color=%s,style=bold];\n", passName.cstr(), COLORS[batchIdx % COLORS.getSize()]);
  1345. slist.pushBackSprintf("\t\"%s\"->\"%s\";\n", prevBubble.cstr(), passName.cstr());
  1346. prevBubble = passName;
  1347. }
  1348. }
  1349. // slist.pushBackSprintf("}\n");
  1350. slist.pushBackSprintf("}");
  1351. File file;
  1352. ANKI_CHECK(file.open(StringRaii(&pool).sprintf("%s/rgraph_%05u.dot", &path[0], m_version).toCString(), FileOpenFlag::kWrite));
  1353. for(const String& s : slist)
  1354. {
  1355. ANKI_CHECK(file.writeTextf("%s", &s[0]));
  1356. }
  1357. return Error::kNone;
  1358. }
  1359. #endif
  1360. } // end namespace anki