RenderGraph.cpp 47 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635
  1. // Copyright (C) 2009-2023, Panagiotis Christopoulos Charitos and contributors.
  2. // All rights reserved.
  3. // Code licensed under the BSD License.
  4. // http://www.anki3d.org/LICENSE
  5. #include <AnKi/Gr/RenderGraph.h>
  6. #include <AnKi/Gr/GrManager.h>
  7. #include <AnKi/Gr/Texture.h>
  8. #include <AnKi/Gr/Sampler.h>
  9. #include <AnKi/Gr/CommandBuffer.h>
  10. #include <AnKi/Util/Tracer.h>
  11. #include <AnKi/Util/BitSet.h>
  12. #include <AnKi/Util/File.h>
  13. #include <AnKi/Util/StringList.h>
  14. #include <AnKi/Util/HighRezTimer.h>
  15. #include <AnKi/Core/Common.h>
  16. namespace anki {
  17. #define ANKI_DBG_RENDER_GRAPH 0
  18. static inline U32 getTextureSurfOrVolCount(const TexturePtr& tex)
  19. {
  20. return tex->getMipmapCount() * tex->getLayerCount() * (textureTypeIsCube(tex->getTextureType()) ? 6 : 1);
  21. }
  22. /// Contains some extra things for render targets.
  23. class RenderGraph::RT
  24. {
  25. public:
  26. DynamicArray<TextureUsageBit, MemoryPoolPtrWrapper<StackMemoryPool>> m_surfOrVolUsages;
  27. DynamicArray<U16, MemoryPoolPtrWrapper<StackMemoryPool>> m_lastBatchThatTransitionedIt;
  28. TexturePtr m_texture; ///< Hold a reference.
  29. Bool m_imported;
  30. RT(StackMemoryPool* pool)
  31. : m_surfOrVolUsages(pool)
  32. , m_lastBatchThatTransitionedIt(pool)
  33. {
  34. }
  35. };
  36. /// Same as RT but for buffers.
  37. class RenderGraph::BufferRange
  38. {
  39. public:
  40. BufferUsageBit m_usage;
  41. BufferPtr m_buffer; ///< Hold a reference.
  42. PtrSize m_offset;
  43. PtrSize m_range;
  44. };
  45. class RenderGraph::AS
  46. {
  47. public:
  48. AccelerationStructureUsageBit m_usage;
  49. AccelerationStructurePtr m_as; ///< Hold a reference.
  50. };
  51. /// Pipeline barrier.
  52. class RenderGraph::TextureBarrier
  53. {
  54. public:
  55. U32 m_idx;
  56. TextureUsageBit m_usageBefore;
  57. TextureUsageBit m_usageAfter;
  58. TextureSurfaceDescriptor m_surface;
  59. DepthStencilAspectBit m_dsAspect;
  60. TextureBarrier(U32 rtIdx, TextureUsageBit usageBefore, TextureUsageBit usageAfter, const TextureSurfaceDescriptor& surf, DepthStencilAspectBit dsAspect)
  61. : m_idx(rtIdx)
  62. , m_usageBefore(usageBefore)
  63. , m_usageAfter(usageAfter)
  64. , m_surface(surf)
  65. , m_dsAspect(dsAspect)
  66. {
  67. }
  68. };
  69. /// Pipeline barrier.
  70. class RenderGraph::BufferBarrier
  71. {
  72. public:
  73. U32 m_idx;
  74. BufferUsageBit m_usageBefore;
  75. BufferUsageBit m_usageAfter;
  76. BufferBarrier(U32 buffIdx, BufferUsageBit usageBefore, BufferUsageBit usageAfter)
  77. : m_idx(buffIdx)
  78. , m_usageBefore(usageBefore)
  79. , m_usageAfter(usageAfter)
  80. {
  81. }
  82. };
  83. /// Pipeline barrier.
  84. class RenderGraph::ASBarrier
  85. {
  86. public:
  87. U32 m_idx;
  88. AccelerationStructureUsageBit m_usageBefore;
  89. AccelerationStructureUsageBit m_usageAfter;
  90. ASBarrier(U32 asIdx, AccelerationStructureUsageBit usageBefore, AccelerationStructureUsageBit usageAfter)
  91. : m_idx(asIdx)
  92. , m_usageBefore(usageBefore)
  93. , m_usageAfter(usageAfter)
  94. {
  95. }
  96. };
  97. /// Contains some extra things the RenderPassBase cannot hold.
  98. class RenderGraph::Pass
  99. {
  100. public:
  101. // WARNING!!!!!: Whatever you put here needs manual destruction in RenderGraph::reset()
  102. DynamicArray<U32, MemoryPoolPtrWrapper<StackMemoryPool>> m_dependsOn;
  103. DynamicArray<RenderPassDependency::TextureInfo, MemoryPoolPtrWrapper<StackMemoryPool>> m_consumedTextures;
  104. Function<void(RenderPassWorkContext&), MemoryPoolPtrWrapper<StackMemoryPool>> m_callback;
  105. class
  106. {
  107. public:
  108. Array<RenderTarget, kMaxColorRenderTargets> m_colorRts;
  109. RenderTarget m_dsRt;
  110. TextureView* m_vrsRt = nullptr;
  111. Array<U32, 4> m_renderArea = {};
  112. U8 m_colorRtCount = 0;
  113. U8 m_vrsTexelSizeX = 0;
  114. U8 m_vrsTexelSizeY = 0;
  115. Array<TextureViewPtr, kMaxColorRenderTargets + 2> m_refs;
  116. Bool hasRenderpass() const
  117. {
  118. return m_renderArea[3] != 0;
  119. }
  120. } m_beginRenderpassInfo;
  121. BaseString<MemoryPoolPtrWrapper<StackMemoryPool>> m_name;
  122. U32 m_batchIdx ANKI_DEBUG_CODE(= kMaxU32);
  123. Bool m_drawsToPresentable = false;
  124. Pass(StackMemoryPool* pool)
  125. : m_dependsOn(pool)
  126. , m_consumedTextures(pool)
  127. , m_name(pool)
  128. {
  129. }
  130. };
  131. /// A batch of render passes. These passes can run in parallel.
  132. /// @warning It's POD. Destructor won't be called.
  133. class RenderGraph::Batch
  134. {
  135. public:
  136. DynamicArray<U32, MemoryPoolPtrWrapper<StackMemoryPool>> m_passIndices;
  137. DynamicArray<TextureBarrier, MemoryPoolPtrWrapper<StackMemoryPool>> m_textureBarriersBefore;
  138. DynamicArray<BufferBarrier, MemoryPoolPtrWrapper<StackMemoryPool>> m_bufferBarriersBefore;
  139. DynamicArray<ASBarrier, MemoryPoolPtrWrapper<StackMemoryPool>> m_asBarriersBefore;
  140. Batch(StackMemoryPool* pool)
  141. : m_passIndices(pool)
  142. , m_textureBarriersBefore(pool)
  143. , m_bufferBarriersBefore(pool)
  144. , m_asBarriersBefore(pool)
  145. {
  146. }
  147. Batch(Batch&& b)
  148. {
  149. *this = std::move(b);
  150. }
  151. Batch& operator=(Batch&& b)
  152. {
  153. m_passIndices = std::move(b.m_passIndices);
  154. m_textureBarriersBefore = std::move(b.m_textureBarriersBefore);
  155. m_bufferBarriersBefore = std::move(b.m_bufferBarriersBefore);
  156. m_asBarriersBefore = std::move(b.m_asBarriersBefore);
  157. return *this;
  158. }
  159. };
  160. /// The RenderGraph build context.
  161. class RenderGraph::BakeContext
  162. {
  163. public:
  164. DynamicArray<Pass, MemoryPoolPtrWrapper<StackMemoryPool>> m_passes;
  165. BitSet<kMaxRenderGraphPasses, U64> m_passIsInBatch{false};
  166. DynamicArray<Batch, MemoryPoolPtrWrapper<StackMemoryPool>> m_batches;
  167. DynamicArray<RT, MemoryPoolPtrWrapper<StackMemoryPool>> m_rts;
  168. DynamicArray<BufferRange, MemoryPoolPtrWrapper<StackMemoryPool>> m_buffers;
  169. DynamicArray<AS, MemoryPoolPtrWrapper<StackMemoryPool>> m_as;
  170. Bool m_gatherStatistics = false;
  171. BakeContext(StackMemoryPool* pool)
  172. : m_passes(pool)
  173. , m_batches(pool)
  174. , m_rts(pool)
  175. , m_buffers(pool)
  176. , m_as(pool)
  177. {
  178. }
  179. };
  180. RenderGraph::RenderGraph(CString name)
  181. : GrObject(kClassType, name)
  182. {
  183. }
  184. RenderGraph::~RenderGraph()
  185. {
  186. ANKI_ASSERT(m_ctx == nullptr);
  187. }
  188. RenderGraph* RenderGraph::newInstance()
  189. {
  190. return anki::newInstance<RenderGraph>(GrMemoryPool::getSingleton(), "N/A");
  191. }
  192. void RenderGraph::reset()
  193. {
  194. ANKI_TRACE_SCOPED_EVENT(GrRenderGraphReset);
  195. if(!m_ctx)
  196. {
  197. return;
  198. }
  199. if((m_version % kPeriodicCleanupEvery) == 0)
  200. {
  201. // Do cleanup
  202. periodicCleanup();
  203. }
  204. // Extract the final usage of the imported RTs and clean all RTs
  205. for(RT& rt : m_ctx->m_rts)
  206. {
  207. if(rt.m_imported)
  208. {
  209. const U32 surfOrVolumeCount = getTextureSurfOrVolCount(rt.m_texture);
  210. // Create a new hash because our hash map dislikes concurent keys.
  211. const U64 uuid = rt.m_texture->getUuid();
  212. const U64 hash = computeHash(&uuid, sizeof(uuid));
  213. auto it = m_importedRenderTargets.find(hash);
  214. if(it != m_importedRenderTargets.getEnd())
  215. {
  216. // Found
  217. ANKI_ASSERT(it->m_surfOrVolLastUsages.getSize() == surfOrVolumeCount);
  218. ANKI_ASSERT(rt.m_surfOrVolUsages.getSize() == surfOrVolumeCount);
  219. }
  220. else
  221. {
  222. // Not found, create
  223. it = m_importedRenderTargets.emplace(hash);
  224. it->m_surfOrVolLastUsages.resize(surfOrVolumeCount);
  225. }
  226. // Update the usage
  227. for(U32 surfOrVolIdx = 0; surfOrVolIdx < surfOrVolumeCount; ++surfOrVolIdx)
  228. {
  229. it->m_surfOrVolLastUsages[surfOrVolIdx] = rt.m_surfOrVolUsages[surfOrVolIdx];
  230. }
  231. }
  232. rt.m_texture.reset(nullptr);
  233. }
  234. for(BufferRange& buff : m_ctx->m_buffers)
  235. {
  236. buff.m_buffer.reset(nullptr);
  237. }
  238. for(AS& as : m_ctx->m_as)
  239. {
  240. as.m_as.reset(nullptr);
  241. }
  242. for(auto& it : m_renderTargetCache)
  243. {
  244. it.m_texturesInUse = 0;
  245. }
  246. for(Pass& p : m_ctx->m_passes)
  247. {
  248. p.m_beginRenderpassInfo.m_refs.fill(TextureViewPtr(nullptr));
  249. p.m_callback.destroy();
  250. p.m_name.destroy();
  251. }
  252. m_ctx = nullptr;
  253. ++m_version;
  254. }
  255. TexturePtr RenderGraph::getOrCreateRenderTarget(const TextureInitInfo& initInf, U64 hash)
  256. {
  257. ANKI_ASSERT(hash);
  258. // Find a cache entry
  259. RenderTargetCacheEntry* entry = nullptr;
  260. auto it = m_renderTargetCache.find(hash);
  261. if(it == m_renderTargetCache.getEnd()) [[unlikely]]
  262. {
  263. // Didn't found the entry, create a new one
  264. auto it2 = m_renderTargetCache.emplace(hash);
  265. entry = &(*it2);
  266. }
  267. else
  268. {
  269. entry = &(*it);
  270. }
  271. ANKI_ASSERT(entry);
  272. // Create or pop one tex from the cache
  273. TexturePtr tex;
  274. const Bool createNewTex = entry->m_textures.getSize() == entry->m_texturesInUse;
  275. if(!createNewTex)
  276. {
  277. // Pop
  278. tex = entry->m_textures[entry->m_texturesInUse++];
  279. }
  280. else
  281. {
  282. // Create it
  283. tex = GrManager::getSingleton().newTexture(initInf);
  284. ANKI_ASSERT(entry->m_texturesInUse == entry->m_textures.getSize());
  285. entry->m_textures.resize(entry->m_textures.getSize() + 1);
  286. entry->m_textures[entry->m_textures.getSize() - 1] = tex;
  287. ++entry->m_texturesInUse;
  288. }
  289. return tex;
  290. }
  291. Bool RenderGraph::overlappingTextureSubresource(const TextureSubresourceInfo& suba, const TextureSubresourceInfo& subb)
  292. {
  293. #define ANKI_OVERLAPPING(first, count) ((suba.first < subb.first + subb.count) && (subb.first < suba.first + suba.count))
  294. const Bool overlappingFaces = ANKI_OVERLAPPING(m_firstFace, m_faceCount);
  295. const Bool overlappingMips = ANKI_OVERLAPPING(m_firstMipmap, m_mipmapCount);
  296. const Bool overlappingLayers = ANKI_OVERLAPPING(m_firstLayer, m_layerCount);
  297. #undef ANKI_OVERLAPPING
  298. return overlappingFaces && overlappingLayers && overlappingMips;
  299. }
  300. Bool RenderGraph::passADependsOnB(const RenderPassDescriptionBase& a, const RenderPassDescriptionBase& b)
  301. {
  302. // Render targets
  303. {
  304. // Compute the 3 types of dependencies
  305. const BitSet<kMaxRenderGraphRenderTargets, U64> aReadBWrite = a.m_readRtMask & b.m_writeRtMask;
  306. const BitSet<kMaxRenderGraphRenderTargets, U64> aWriteBRead = a.m_writeRtMask & b.m_readRtMask;
  307. const BitSet<kMaxRenderGraphRenderTargets, U64> aWriteBWrite = a.m_writeRtMask & b.m_writeRtMask;
  308. const BitSet<kMaxRenderGraphRenderTargets, U64> fullDep = aReadBWrite | aWriteBRead | aWriteBWrite;
  309. if(fullDep.getAnySet())
  310. {
  311. // There might be an overlap
  312. for(const RenderPassDependency& aDep : a.m_rtDeps)
  313. {
  314. if(!fullDep.get(aDep.m_texture.m_handle.m_idx))
  315. {
  316. continue;
  317. }
  318. for(const RenderPassDependency& bDep : b.m_rtDeps)
  319. {
  320. if(aDep.m_texture.m_handle != bDep.m_texture.m_handle)
  321. {
  322. continue;
  323. }
  324. if(!((aDep.m_texture.m_usage | bDep.m_texture.m_usage) & TextureUsageBit::kAllWrite))
  325. {
  326. // Don't care about read to read deps
  327. continue;
  328. }
  329. if(overlappingTextureSubresource(aDep.m_texture.m_subresource, bDep.m_texture.m_subresource))
  330. {
  331. return true;
  332. }
  333. }
  334. }
  335. }
  336. }
  337. // Buffers
  338. if(a.m_readBuffMask || a.m_writeBuffMask)
  339. {
  340. const BitSet<kMaxRenderGraphBuffers, U64> aReadBWrite = a.m_readBuffMask & b.m_writeBuffMask;
  341. const BitSet<kMaxRenderGraphBuffers, U64> aWriteBRead = a.m_writeBuffMask & b.m_readBuffMask;
  342. const BitSet<kMaxRenderGraphBuffers, U64> aWriteBWrite = a.m_writeBuffMask & b.m_writeBuffMask;
  343. const BitSet<kMaxRenderGraphBuffers, U64> fullDep = aReadBWrite | aWriteBRead | aWriteBWrite;
  344. if(fullDep.getAnySet())
  345. {
  346. // There might be an overlap
  347. for(const RenderPassDependency& aDep : a.m_buffDeps)
  348. {
  349. if(!fullDep.get(aDep.m_buffer.m_handle.m_idx))
  350. {
  351. continue;
  352. }
  353. for(const RenderPassDependency& bDep : b.m_buffDeps)
  354. {
  355. if(aDep.m_buffer.m_handle != bDep.m_buffer.m_handle)
  356. {
  357. continue;
  358. }
  359. if(!((aDep.m_buffer.m_usage | bDep.m_buffer.m_usage) & BufferUsageBit::kAllWrite))
  360. {
  361. // Don't care about read to read deps
  362. continue;
  363. }
  364. // TODO: Take into account the ranges
  365. return true;
  366. }
  367. }
  368. }
  369. }
  370. // AS
  371. if(a.m_readAsMask || a.m_writeAsMask)
  372. {
  373. const BitSet<kMaxRenderGraphAccelerationStructures, U32> aReadBWrite = a.m_readAsMask & b.m_writeAsMask;
  374. const BitSet<kMaxRenderGraphAccelerationStructures, U32> aWriteBRead = a.m_writeAsMask & b.m_readAsMask;
  375. const BitSet<kMaxRenderGraphAccelerationStructures, U32> aWriteBWrite = a.m_writeAsMask & b.m_writeAsMask;
  376. const BitSet<kMaxRenderGraphAccelerationStructures, U32> fullDep = aReadBWrite | aWriteBRead | aWriteBWrite;
  377. if(fullDep)
  378. {
  379. for(const RenderPassDependency& aDep : a.m_asDeps)
  380. {
  381. if(!fullDep.get(aDep.m_as.m_handle.m_idx))
  382. {
  383. continue;
  384. }
  385. for(const RenderPassDependency& bDep : b.m_asDeps)
  386. {
  387. if(aDep.m_as.m_handle != bDep.m_as.m_handle)
  388. {
  389. continue;
  390. }
  391. if(!((aDep.m_as.m_usage | bDep.m_as.m_usage) & AccelerationStructureUsageBit::kAllWrite))
  392. {
  393. // Don't care about read to read deps
  394. continue;
  395. }
  396. return true;
  397. }
  398. }
  399. }
  400. }
  401. return false;
  402. }
  403. Bool RenderGraph::passHasUnmetDependencies(const BakeContext& ctx, U32 passIdx)
  404. {
  405. Bool depends = false;
  406. if(ctx.m_batches.getSize() > 0)
  407. {
  408. // Check if the deps of passIdx are all in a batch
  409. for(const U32 depPassIdx : ctx.m_passes[passIdx].m_dependsOn)
  410. {
  411. if(!ctx.m_passIsInBatch.get(depPassIdx))
  412. {
  413. // Dependency pass is not in a batch
  414. depends = true;
  415. break;
  416. }
  417. }
  418. }
  419. else
  420. {
  421. // First batch, check if passIdx depends on any pass
  422. depends = ctx.m_passes[passIdx].m_dependsOn.getSize() != 0;
  423. }
  424. return depends;
  425. }
  426. RenderGraph::BakeContext* RenderGraph::newContext(const RenderGraphDescription& descr, StackMemoryPool& pool)
  427. {
  428. // Allocate
  429. BakeContext* ctx = anki::newInstance<BakeContext>(pool, &pool);
  430. // Init the resources
  431. ctx->m_rts.resizeStorage(descr.m_renderTargets.getSize());
  432. for(U32 rtIdx = 0; rtIdx < descr.m_renderTargets.getSize(); ++rtIdx)
  433. {
  434. RT& outRt = *ctx->m_rts.emplaceBack(&pool);
  435. const RenderGraphDescription::RT& inRt = descr.m_renderTargets[rtIdx];
  436. const Bool imported = inRt.m_importedTex.isCreated();
  437. if(imported)
  438. {
  439. // It's imported
  440. outRt.m_texture = inRt.m_importedTex;
  441. }
  442. else
  443. {
  444. // Need to create new
  445. // Create a new TextureInitInfo with the derived usage
  446. TextureInitInfo initInf = inRt.m_initInfo;
  447. initInf.m_usage = inRt.m_usageDerivedByDeps;
  448. ANKI_ASSERT(initInf.m_usage != TextureUsageBit::kNone && "Probably not referenced by any pass");
  449. // Create the new hash
  450. const U64 hash = appendHash(&initInf.m_usage, sizeof(initInf.m_usage), inRt.m_hash);
  451. // Get or create the texture
  452. outRt.m_texture = getOrCreateRenderTarget(initInf, hash);
  453. }
  454. // Init the usage
  455. const U32 surfOrVolumeCount = getTextureSurfOrVolCount(outRt.m_texture);
  456. outRt.m_surfOrVolUsages.resize(surfOrVolumeCount, TextureUsageBit::kNone);
  457. if(imported && inRt.m_importedAndUndefinedUsage)
  458. {
  459. // Get the usage from previous frames
  460. // Create a new hash because our hash map dislikes concurent keys.
  461. const U64 uuid = outRt.m_texture->getUuid();
  462. const U64 hash = computeHash(&uuid, sizeof(uuid));
  463. auto it = m_importedRenderTargets.find(hash);
  464. ANKI_ASSERT(it != m_importedRenderTargets.getEnd() && "Can't find the imported RT");
  465. ANKI_ASSERT(it->m_surfOrVolLastUsages.getSize() == surfOrVolumeCount);
  466. for(U32 surfOrVolIdx = 0; surfOrVolIdx < surfOrVolumeCount; ++surfOrVolIdx)
  467. {
  468. outRt.m_surfOrVolUsages[surfOrVolIdx] = it->m_surfOrVolLastUsages[surfOrVolIdx];
  469. }
  470. }
  471. else if(imported)
  472. {
  473. // Set the usage that was given by the user
  474. for(U32 surfOrVolIdx = 0; surfOrVolIdx < surfOrVolumeCount; ++surfOrVolIdx)
  475. {
  476. outRt.m_surfOrVolUsages[surfOrVolIdx] = inRt.m_importedLastKnownUsage;
  477. }
  478. }
  479. outRt.m_lastBatchThatTransitionedIt.resize(surfOrVolumeCount, kMaxU16);
  480. outRt.m_imported = imported;
  481. }
  482. // Buffers
  483. ctx->m_buffers.resize(descr.m_buffers.getSize());
  484. for(U32 buffIdx = 0; buffIdx < ctx->m_buffers.getSize(); ++buffIdx)
  485. {
  486. ctx->m_buffers[buffIdx].m_usage = descr.m_buffers[buffIdx].m_usage;
  487. ANKI_ASSERT(descr.m_buffers[buffIdx].m_importedBuff.isCreated());
  488. ctx->m_buffers[buffIdx].m_buffer = descr.m_buffers[buffIdx].m_importedBuff;
  489. ctx->m_buffers[buffIdx].m_offset = descr.m_buffers[buffIdx].m_offset;
  490. ctx->m_buffers[buffIdx].m_range = descr.m_buffers[buffIdx].m_range;
  491. }
  492. // AS
  493. ctx->m_as.resize(descr.m_as.getSize());
  494. for(U32 i = 0; i < descr.m_as.getSize(); ++i)
  495. {
  496. ctx->m_as[i].m_usage = descr.m_as[i].m_usage;
  497. ctx->m_as[i].m_as = descr.m_as[i].m_importedAs;
  498. ANKI_ASSERT(ctx->m_as[i].m_as.isCreated());
  499. }
  500. ctx->m_gatherStatistics = descr.m_gatherStatistics;
  501. return ctx;
  502. }
  503. void RenderGraph::initRenderPassesAndSetDeps(const RenderGraphDescription& descr)
  504. {
  505. BakeContext& ctx = *m_ctx;
  506. const U32 passCount = descr.m_passes.getSize();
  507. ANKI_ASSERT(passCount > 0);
  508. ctx.m_passes.resizeStorage(passCount);
  509. for(U32 passIdx = 0; passIdx < passCount; ++passIdx)
  510. {
  511. const RenderPassDescriptionBase& inPass = *descr.m_passes[passIdx];
  512. Pass& outPass = *ctx.m_passes.emplaceBack(ctx.m_as.getMemoryPool().m_pool);
  513. outPass.m_callback = inPass.m_callback;
  514. outPass.m_name = inPass.m_name;
  515. // Create consumer info
  516. outPass.m_consumedTextures.resize(inPass.m_rtDeps.getSize());
  517. for(U32 depIdx = 0; depIdx < inPass.m_rtDeps.getSize(); ++depIdx)
  518. {
  519. const RenderPassDependency& inDep = inPass.m_rtDeps[depIdx];
  520. ANKI_ASSERT(inDep.m_type == RenderPassDependency::Type::kTexture);
  521. RenderPassDependency::TextureInfo& inf = outPass.m_consumedTextures[depIdx];
  522. ANKI_ASSERT(sizeof(inf) == sizeof(inDep.m_texture));
  523. memcpy(&inf, &inDep.m_texture, sizeof(inf));
  524. }
  525. // Set dependencies by checking all previous subpasses.
  526. U32 prevPassIdx = passIdx;
  527. while(prevPassIdx--)
  528. {
  529. const RenderPassDescriptionBase& prevPass = *descr.m_passes[prevPassIdx];
  530. if(passADependsOnB(inPass, prevPass))
  531. {
  532. outPass.m_dependsOn.emplaceBack(prevPassIdx);
  533. }
  534. }
  535. }
  536. }
  537. void RenderGraph::initBatches()
  538. {
  539. ANKI_ASSERT(m_ctx);
  540. U passesAssignedToBatchCount = 0;
  541. const U passCount = m_ctx->m_passes.getSize();
  542. ANKI_ASSERT(passCount > 0);
  543. while(passesAssignedToBatchCount < passCount)
  544. {
  545. Batch batch(m_ctx->m_as.getMemoryPool().m_pool);
  546. for(U32 i = 0; i < passCount; ++i)
  547. {
  548. if(!m_ctx->m_passIsInBatch.get(i) && !passHasUnmetDependencies(*m_ctx, i))
  549. {
  550. // Add to the batch
  551. ++passesAssignedToBatchCount;
  552. batch.m_passIndices.emplaceBack(i);
  553. }
  554. }
  555. // Mark batch's passes done
  556. for(U32 passIdx : batch.m_passIndices)
  557. {
  558. m_ctx->m_passIsInBatch.set(passIdx);
  559. m_ctx->m_passes[passIdx].m_batchIdx = m_ctx->m_batches.getSize();
  560. }
  561. m_ctx->m_batches.emplaceBack(std::move(batch));
  562. }
  563. }
  564. void RenderGraph::initGraphicsPasses(const RenderGraphDescription& descr)
  565. {
  566. BakeContext& ctx = *m_ctx;
  567. const U32 passCount = descr.m_passes.getSize();
  568. ANKI_ASSERT(passCount > 0);
  569. for(U32 passIdx = 0; passIdx < passCount; ++passIdx)
  570. {
  571. const RenderPassDescriptionBase& baseInPass = *descr.m_passes[passIdx];
  572. Pass& outPass = ctx.m_passes[passIdx];
  573. // Create command buffers and framebuffer
  574. if(baseInPass.m_type == RenderPassDescriptionBase::Type::kGraphics)
  575. {
  576. const GraphicsRenderPassDescription& inPass = static_cast<const GraphicsRenderPassDescription&>(baseInPass);
  577. if(inPass.hasRenderpass())
  578. {
  579. outPass.m_beginRenderpassInfo.m_renderArea = inPass.m_rpassRenderArea;
  580. outPass.m_beginRenderpassInfo.m_colorRtCount = inPass.m_colorRtCount;
  581. // Init the usage bits
  582. for(U32 i = 0; i < inPass.m_colorRtCount; ++i)
  583. {
  584. const RenderTargetInfo& inAttachment = inPass.m_rts[i];
  585. RenderTarget& outAttachment = outPass.m_beginRenderpassInfo.m_colorRts[i];
  586. getCrntUsage(inAttachment.m_handle, outPass.m_batchIdx, TextureSubresourceInfo(inAttachment.m_surface), outAttachment.m_usage);
  587. const TextureViewInitInfo viewInit(m_ctx->m_rts[inAttachment.m_handle.m_idx].m_texture.get(),
  588. TextureSubresourceInfo(inAttachment.m_surface), "RenderGraph");
  589. TextureViewPtr view = GrManager::getSingleton().newTextureView(viewInit);
  590. outAttachment.m_view = view.get();
  591. outPass.m_beginRenderpassInfo.m_refs[i] = view;
  592. outAttachment.m_loadOperation = inAttachment.m_loadOperation;
  593. outAttachment.m_storeOperation = inAttachment.m_storeOperation;
  594. outAttachment.m_clearValue = inAttachment.m_clearValue;
  595. }
  596. if(!!inPass.m_rts[kMaxColorRenderTargets].m_aspect)
  597. {
  598. const RenderTargetInfo& inAttachment = inPass.m_rts[kMaxColorRenderTargets];
  599. RenderTarget& outAttachment = outPass.m_beginRenderpassInfo.m_dsRt;
  600. const TextureSubresourceInfo subresource = TextureSubresourceInfo(inAttachment.m_surface, inAttachment.m_aspect);
  601. getCrntUsage(inAttachment.m_handle, outPass.m_batchIdx, subresource, outAttachment.m_usage);
  602. const TextureViewInitInfo viewInit(m_ctx->m_rts[inAttachment.m_handle.m_idx].m_texture.get(),
  603. TextureSubresourceInfo(inAttachment.m_surface, inAttachment.m_aspect), "RenderGraph");
  604. TextureViewPtr view = GrManager::getSingleton().newTextureView(viewInit);
  605. outAttachment.m_view = view.get();
  606. outPass.m_beginRenderpassInfo.m_refs[kMaxColorRenderTargets] = view;
  607. outAttachment.m_loadOperation = inAttachment.m_loadOperation;
  608. outAttachment.m_storeOperation = inAttachment.m_storeOperation;
  609. outAttachment.m_stencilLoadOperation = inAttachment.m_stencilLoadOperation;
  610. outAttachment.m_stencilStoreOperation = inAttachment.m_stencilStoreOperation;
  611. outAttachment.m_clearValue = inAttachment.m_clearValue;
  612. outAttachment.m_aspect = inAttachment.m_aspect;
  613. }
  614. if(inPass.m_vrsRtTexelSizeX > 0)
  615. {
  616. const RenderTargetInfo& inAttachment = inPass.m_rts[kMaxColorRenderTargets + 1];
  617. const TextureViewInitInfo viewInit(m_ctx->m_rts[inAttachment.m_handle.m_idx].m_texture.get(), inAttachment.m_surface,
  618. "RenderGraph SRI");
  619. TextureViewPtr view = GrManager::getSingleton().newTextureView(viewInit);
  620. outPass.m_beginRenderpassInfo.m_vrsRt = view.get();
  621. outPass.m_beginRenderpassInfo.m_refs[kMaxColorRenderTargets + 1] = view;
  622. outPass.m_beginRenderpassInfo.m_vrsTexelSizeX = inPass.m_vrsRtTexelSizeX;
  623. outPass.m_beginRenderpassInfo.m_vrsTexelSizeY = inPass.m_vrsRtTexelSizeY;
  624. }
  625. }
  626. }
  627. }
  628. }
  629. template<typename TFunc>
  630. void RenderGraph::iterateSurfsOrVolumes(const Texture& tex, const TextureSubresourceInfo& subresource, TFunc func)
  631. {
  632. for(U32 mip = subresource.m_firstMipmap; mip < subresource.m_firstMipmap + subresource.m_mipmapCount; ++mip)
  633. {
  634. for(U32 layer = subresource.m_firstLayer; layer < subresource.m_firstLayer + subresource.m_layerCount; ++layer)
  635. {
  636. for(U32 face = subresource.m_firstFace; face < U32(subresource.m_firstFace + subresource.m_faceCount); ++face)
  637. {
  638. // Compute surf or vol idx
  639. const U32 faceCount = textureTypeIsCube(tex.getTextureType()) ? 6 : 1;
  640. const U32 idx = (faceCount * tex.getLayerCount()) * mip + faceCount * layer + face;
  641. const TextureSurfaceDescriptor surf(mip, face, layer);
  642. if(!func(idx, surf))
  643. {
  644. return;
  645. }
  646. }
  647. }
  648. }
  649. }
  650. void RenderGraph::setTextureBarrier(Batch& batch, const RenderPassDependency& dep)
  651. {
  652. ANKI_ASSERT(dep.m_type == RenderPassDependency::Type::kTexture);
  653. BakeContext& ctx = *m_ctx;
  654. const U32 batchIdx = U32(&batch - &ctx.m_batches[0]);
  655. const U32 rtIdx = dep.m_texture.m_handle.m_idx;
  656. const TextureUsageBit depUsage = dep.m_texture.m_usage;
  657. RT& rt = ctx.m_rts[rtIdx];
  658. iterateSurfsOrVolumes(*rt.m_texture, dep.m_texture.m_subresource, [&](U32 surfOrVolIdx, const TextureSurfaceDescriptor& surf) {
  659. TextureUsageBit& crntUsage = rt.m_surfOrVolUsages[surfOrVolIdx];
  660. const Bool skipBarrier = crntUsage == depUsage && !(crntUsage & TextureUsageBit::kAllWrite);
  661. if(!skipBarrier)
  662. {
  663. // Check if we can merge barriers
  664. if(rt.m_lastBatchThatTransitionedIt[surfOrVolIdx] == batchIdx)
  665. {
  666. // Will merge the barriers
  667. crntUsage |= depUsage;
  668. [[maybe_unused]] Bool found = false;
  669. for(TextureBarrier& b : batch.m_textureBarriersBefore)
  670. {
  671. if(b.m_idx == rtIdx && b.m_surface == surf)
  672. {
  673. b.m_usageAfter |= depUsage;
  674. found = true;
  675. break;
  676. }
  677. }
  678. ANKI_ASSERT(found);
  679. }
  680. else
  681. {
  682. // Create a new barrier for this surface
  683. batch.m_textureBarriersBefore.emplaceBack(rtIdx, crntUsage, depUsage, surf, dep.m_texture.m_subresource.m_depthStencilAspect);
  684. crntUsage = depUsage;
  685. rt.m_lastBatchThatTransitionedIt[surfOrVolIdx] = U16(batchIdx);
  686. }
  687. }
  688. return true;
  689. });
  690. }
  691. void RenderGraph::setBatchBarriers(const RenderGraphDescription& descr)
  692. {
  693. BakeContext& ctx = *m_ctx;
  694. // For all batches
  695. for(Batch& batch : ctx.m_batches)
  696. {
  697. BitSet<kMaxRenderGraphBuffers, U64> buffHasBarrierMask(false);
  698. BitSet<kMaxRenderGraphAccelerationStructures, U32> asHasBarrierMask(false);
  699. // For all passes of that batch
  700. for(U32 passIdx : batch.m_passIndices)
  701. {
  702. const RenderPassDescriptionBase& pass = *descr.m_passes[passIdx];
  703. // Do textures
  704. for(const RenderPassDependency& dep : pass.m_rtDeps)
  705. {
  706. setTextureBarrier(batch, dep);
  707. }
  708. // Do buffers
  709. for(const RenderPassDependency& dep : pass.m_buffDeps)
  710. {
  711. const U32 buffIdx = dep.m_buffer.m_handle.m_idx;
  712. const BufferUsageBit depUsage = dep.m_buffer.m_usage;
  713. BufferUsageBit& crntUsage = ctx.m_buffers[buffIdx].m_usage;
  714. const Bool skipBarrier = crntUsage == depUsage && !(crntUsage & BufferUsageBit::kAllWrite);
  715. if(skipBarrier)
  716. {
  717. continue;
  718. }
  719. const Bool buffHasBarrier = buffHasBarrierMask.get(buffIdx);
  720. if(!buffHasBarrier)
  721. {
  722. // Buff hasn't had a barrier in this batch, add a new barrier
  723. batch.m_bufferBarriersBefore.emplaceBack(buffIdx, crntUsage, depUsage);
  724. crntUsage = depUsage;
  725. buffHasBarrierMask.set(buffIdx);
  726. }
  727. else
  728. {
  729. // Buff already in a barrier, merge the 2 barriers
  730. BufferBarrier* barrierToMergeTo = nullptr;
  731. for(BufferBarrier& b : batch.m_bufferBarriersBefore)
  732. {
  733. if(b.m_idx == buffIdx)
  734. {
  735. barrierToMergeTo = &b;
  736. break;
  737. }
  738. }
  739. ANKI_ASSERT(barrierToMergeTo);
  740. ANKI_ASSERT(!!barrierToMergeTo->m_usageAfter);
  741. barrierToMergeTo->m_usageAfter |= depUsage;
  742. crntUsage = barrierToMergeTo->m_usageAfter;
  743. }
  744. }
  745. // Do AS
  746. for(const RenderPassDependency& dep : pass.m_asDeps)
  747. {
  748. const U32 asIdx = dep.m_as.m_handle.m_idx;
  749. const AccelerationStructureUsageBit depUsage = dep.m_as.m_usage;
  750. AccelerationStructureUsageBit& crntUsage = ctx.m_as[asIdx].m_usage;
  751. const Bool skipBarrier = crntUsage == depUsage && !(crntUsage & AccelerationStructureUsageBit::kAllWrite);
  752. if(skipBarrier)
  753. {
  754. continue;
  755. }
  756. const Bool asHasBarrierInThisBatch = asHasBarrierMask.get(asIdx);
  757. if(!asHasBarrierInThisBatch)
  758. {
  759. // AS doesn't have a barrier in this batch, create a new one
  760. batch.m_asBarriersBefore.emplaceBack(asIdx, crntUsage, depUsage);
  761. crntUsage = depUsage;
  762. asHasBarrierMask.set(asIdx);
  763. }
  764. else
  765. {
  766. // AS already has a barrier, merge the 2 barriers
  767. ASBarrier* barrierToMergeTo = nullptr;
  768. for(ASBarrier& other : batch.m_asBarriersBefore)
  769. {
  770. if(other.m_idx == asIdx)
  771. {
  772. barrierToMergeTo = &other;
  773. break;
  774. }
  775. }
  776. ANKI_ASSERT(barrierToMergeTo);
  777. ANKI_ASSERT(!!barrierToMergeTo->m_usageAfter);
  778. barrierToMergeTo->m_usageAfter |= depUsage;
  779. crntUsage = barrierToMergeTo->m_usageAfter;
  780. }
  781. }
  782. } // For all passes
  783. ANKI_ASSERT(batch.m_bufferBarriersBefore.getSize() || batch.m_textureBarriersBefore.getSize() || batch.m_asBarriersBefore.getSize());
  784. #if ANKI_DBG_RENDER_GRAPH
  785. // Sort the barriers to ease the dumped graph
  786. std::sort(batch.m_textureBarriersBefore.getBegin(), batch.m_textureBarriersBefore.getEnd(),
  787. [&](const TextureBarrier& a, const TextureBarrier& b) {
  788. const U aidx = a.m_idx;
  789. const U bidx = b.m_idx;
  790. if(aidx == bidx)
  791. {
  792. if(a.m_surface.m_level != b.m_surface.m_level)
  793. {
  794. return a.m_surface.m_level < b.m_surface.m_level;
  795. }
  796. else if(a.m_surface.m_face != b.m_surface.m_face)
  797. {
  798. return a.m_surface.m_face < b.m_surface.m_face;
  799. }
  800. else if(a.m_surface.m_layer != b.m_surface.m_layer)
  801. {
  802. return a.m_surface.m_layer < b.m_surface.m_layer;
  803. }
  804. else
  805. {
  806. return false;
  807. }
  808. }
  809. else
  810. {
  811. return aidx < bidx;
  812. }
  813. });
  814. std::sort(batch.m_bufferBarriersBefore.getBegin(), batch.m_bufferBarriersBefore.getEnd(),
  815. [&](const BufferBarrier& a, const BufferBarrier& b) {
  816. return a.m_idx < b.m_idx;
  817. });
  818. std::sort(batch.m_asBarriersBefore.getBegin(), batch.m_asBarriersBefore.getEnd(), [&](const ASBarrier& a, const ASBarrier& b) {
  819. return a.m_idx < b.m_idx;
  820. });
  821. #endif
  822. } // For all batches
  823. }
  824. void RenderGraph::minimizeSubchannelSwitches()
  825. {
  826. BakeContext& ctx = *m_ctx;
  827. Bool computeFirst = true;
  828. for(Batch& batch : ctx.m_batches)
  829. {
  830. U32 graphicsPasses = 0;
  831. U32 computePasses = 0;
  832. std::sort(batch.m_passIndices.getBegin(), batch.m_passIndices.getEnd(), [&](U32 a, U32 b) {
  833. const Bool aIsCompute = !ctx.m_passes[a].m_beginRenderpassInfo.hasRenderpass();
  834. const Bool bIsCompute = !ctx.m_passes[b].m_beginRenderpassInfo.hasRenderpass();
  835. graphicsPasses += !aIsCompute + !bIsCompute;
  836. computePasses += aIsCompute + bIsCompute;
  837. if(computeFirst)
  838. {
  839. return !aIsCompute < !bIsCompute;
  840. }
  841. else
  842. {
  843. return aIsCompute < bIsCompute;
  844. }
  845. });
  846. if(graphicsPasses && !computePasses)
  847. {
  848. // Only graphics passes in this batch, start next batch from graphics
  849. computeFirst = false;
  850. }
  851. else if(computePasses && !graphicsPasses)
  852. {
  853. // Only compute passes in this batch, start next batch from compute
  854. computeFirst = true;
  855. }
  856. else
  857. {
  858. // This batch ends in compute start next batch in compute and if it ends with graphics start next in graphics
  859. computeFirst = !computeFirst;
  860. }
  861. }
  862. }
  863. void RenderGraph::sortBatchPasses()
  864. {
  865. BakeContext& ctx = *m_ctx;
  866. for(Batch& batch : ctx.m_batches)
  867. {
  868. std::sort(batch.m_passIndices.getBegin(), batch.m_passIndices.getEnd(), [&](U32 a, U32 b) {
  869. const Bool aIsCompute = !ctx.m_passes[a].m_beginRenderpassInfo.hasRenderpass();
  870. const Bool bIsCompute = !ctx.m_passes[b].m_beginRenderpassInfo.hasRenderpass();
  871. return aIsCompute < bIsCompute;
  872. });
  873. }
  874. }
  875. void RenderGraph::compileNewGraph(const RenderGraphDescription& descr, StackMemoryPool& pool)
  876. {
  877. ANKI_TRACE_SCOPED_EVENT(GrRenderGraphCompile);
  878. // Init the context
  879. BakeContext& ctx = *newContext(descr, pool);
  880. m_ctx = &ctx;
  881. // Init the passes and find the dependencies between passes
  882. initRenderPassesAndSetDeps(descr);
  883. // Walk the graph and create pass batches
  884. initBatches();
  885. // Now that we know the batches every pass belongs init the graphics passes
  886. initGraphicsPasses(descr);
  887. // Create barriers between batches
  888. setBatchBarriers(descr);
  889. // Sort passes in batches
  890. if(GrManager::getSingleton().getDeviceCapabilities().m_gpuVendor == GpuVendor::kNvidia)
  891. {
  892. minimizeSubchannelSwitches();
  893. }
  894. else
  895. {
  896. sortBatchPasses();
  897. }
  898. #if ANKI_DBG_RENDER_GRAPH
  899. if(dumpDependencyDotFile(descr, ctx, "./"))
  900. {
  901. ANKI_LOGF("Won't recover on debug code");
  902. }
  903. #endif
  904. }
  905. Texture& RenderGraph::getTexture(RenderTargetHandle handle) const
  906. {
  907. ANKI_ASSERT(m_ctx->m_rts[handle.m_idx].m_texture.isCreated());
  908. return *m_ctx->m_rts[handle.m_idx].m_texture;
  909. }
  910. void RenderGraph::getCachedBuffer(BufferHandle handle, Buffer*& buff, PtrSize& offset, PtrSize& range) const
  911. {
  912. const BufferRange& record = m_ctx->m_buffers[handle.m_idx];
  913. buff = record.m_buffer.get();
  914. offset = record.m_offset;
  915. range = record.m_range;
  916. }
  917. AccelerationStructure* RenderGraph::getAs(AccelerationStructureHandle handle) const
  918. {
  919. ANKI_ASSERT(m_ctx->m_as[handle.m_idx].m_as.isCreated());
  920. return m_ctx->m_as[handle.m_idx].m_as.get();
  921. }
  922. void RenderGraph::recordAndSubmitCommandBuffers(FencePtr* optionalFence)
  923. {
  924. ANKI_TRACE_SCOPED_EVENT(GrRenderGraphRecordAndSubmit);
  925. ANKI_ASSERT(m_ctx);
  926. const U32 batchGroupCount = min(CoreThreadJobManager::getSingleton().getThreadCount(), m_ctx->m_batches.getSize());
  927. StackMemoryPool* pool = m_ctx->m_rts.getMemoryPool().m_pool;
  928. DynamicArray<CommandBufferPtr, MemoryPoolPtrWrapper<StackMemoryPool>> cmdbs(pool);
  929. cmdbs.resize(batchGroupCount);
  930. SpinLock cmdbsMtx;
  931. for(U32 group = 0; group < batchGroupCount; ++group)
  932. {
  933. U32 start, end;
  934. splitThreadedProblem(group, batchGroupCount, m_ctx->m_batches.getSize(), start, end);
  935. if(start == end)
  936. {
  937. continue;
  938. }
  939. CoreThreadJobManager::getSingleton().dispatchTask(
  940. [this, start, end, pool, &cmdbs, &cmdbsMtx, group, batchGroupCount]([[maybe_unused]] U32 tid) {
  941. ANKI_TRACE_SCOPED_EVENT(GrRenderGraphTask);
  942. CommandBufferInitInfo cmdbInit("RenderGraph cmdb");
  943. cmdbInit.m_flags = CommandBufferFlag::kGeneralWork;
  944. CommandBufferPtr cmdb = GrManager::getSingleton().newCommandBuffer(cmdbInit);
  945. // Write timestamp
  946. const Bool setPreQuery = m_ctx->m_gatherStatistics && group == 0;
  947. const Bool setPostQuery = m_ctx->m_gatherStatistics && group == batchGroupCount - 1;
  948. TimestampQueryPtr preQuery, postQuery;
  949. if(setPreQuery)
  950. {
  951. preQuery = GrManager::getSingleton().newTimestampQuery();
  952. cmdb->writeTimestamp(preQuery.get());
  953. }
  954. if(setPostQuery)
  955. {
  956. postQuery = GrManager::getSingleton().newTimestampQuery();
  957. }
  958. // Bookkeeping
  959. {
  960. LockGuard lock(cmdbsMtx);
  961. cmdbs[group] = cmdb;
  962. if(preQuery.isCreated())
  963. {
  964. m_statistics.m_nextTimestamp = (m_statistics.m_nextTimestamp + 1) % kMaxBufferedTimestamps;
  965. m_statistics.m_timestamps[m_statistics.m_nextTimestamp * 2] = preQuery;
  966. }
  967. if(postQuery.isCreated())
  968. {
  969. m_statistics.m_timestamps[m_statistics.m_nextTimestamp * 2 + 1] = postQuery;
  970. m_statistics.m_cpuStartTimes[m_statistics.m_nextTimestamp] = HighRezTimer::getCurrentTime();
  971. }
  972. }
  973. RenderPassWorkContext ctx;
  974. ctx.m_rgraph = this;
  975. for(U32 i = start; i < end; ++i)
  976. {
  977. const Batch& batch = m_ctx->m_batches[i];
  978. // Set the barriers
  979. DynamicArray<TextureBarrierInfo, MemoryPoolPtrWrapper<StackMemoryPool>> texBarriers(pool);
  980. texBarriers.resizeStorage(batch.m_textureBarriersBefore.getSize());
  981. for(const TextureBarrier& barrier : batch.m_textureBarriersBefore)
  982. {
  983. TextureBarrierInfo& inf = *texBarriers.emplaceBack();
  984. inf.m_previousUsage = barrier.m_usageBefore;
  985. inf.m_nextUsage = barrier.m_usageAfter;
  986. inf.m_subresource = barrier.m_surface;
  987. inf.m_subresource.m_depthStencilAspect = barrier.m_dsAspect;
  988. inf.m_texture = m_ctx->m_rts[barrier.m_idx].m_texture.get();
  989. }
  990. DynamicArray<BufferBarrierInfo, MemoryPoolPtrWrapper<StackMemoryPool>> buffBarriers(pool);
  991. buffBarriers.resizeStorage(batch.m_bufferBarriersBefore.getSize());
  992. for(const BufferBarrier& barrier : batch.m_bufferBarriersBefore)
  993. {
  994. BufferBarrierInfo& inf = *buffBarriers.emplaceBack();
  995. inf.m_previousUsage = barrier.m_usageBefore;
  996. inf.m_nextUsage = barrier.m_usageAfter;
  997. inf.m_bufferView = BufferView(m_ctx->m_buffers[barrier.m_idx].m_buffer.get(), m_ctx->m_buffers[barrier.m_idx].m_offset,
  998. m_ctx->m_buffers[barrier.m_idx].m_range);
  999. }
  1000. DynamicArray<AccelerationStructureBarrierInfo, MemoryPoolPtrWrapper<StackMemoryPool>> asBarriers(pool);
  1001. for(const ASBarrier& barrier : batch.m_asBarriersBefore)
  1002. {
  1003. AccelerationStructureBarrierInfo& inf = *asBarriers.emplaceBack();
  1004. inf.m_previousUsage = barrier.m_usageBefore;
  1005. inf.m_nextUsage = barrier.m_usageAfter;
  1006. inf.m_as = m_ctx->m_as[barrier.m_idx].m_as.get();
  1007. }
  1008. cmdb->pushDebugMarker("Barrier", Vec3(1.0f, 0.0f, 0.0f));
  1009. cmdb->setPipelineBarrier(texBarriers, buffBarriers, asBarriers);
  1010. cmdb->popDebugMarker();
  1011. ctx.m_commandBuffer = cmdb.get();
  1012. ctx.m_batchIdx = i;
  1013. // Call the passes
  1014. for(U32 passIdx : batch.m_passIndices)
  1015. {
  1016. Pass& pass = m_ctx->m_passes[passIdx];
  1017. const Vec3 passColor = (pass.m_beginRenderpassInfo.hasRenderpass()) ? Vec3(0.0f, 1.0f, 0.0f) : Vec3(1.0f, 1.0f, 0.0f);
  1018. cmdb->pushDebugMarker(pass.m_name, passColor);
  1019. if(pass.m_beginRenderpassInfo.hasRenderpass())
  1020. {
  1021. cmdb->beginRenderPass({pass.m_beginRenderpassInfo.m_colorRts.getBegin(), U32(pass.m_beginRenderpassInfo.m_colorRtCount)},
  1022. pass.m_beginRenderpassInfo.m_dsRt.m_view ? &pass.m_beginRenderpassInfo.m_dsRt : nullptr,
  1023. pass.m_beginRenderpassInfo.m_renderArea[0], pass.m_beginRenderpassInfo.m_renderArea[1],
  1024. pass.m_beginRenderpassInfo.m_renderArea[2], pass.m_beginRenderpassInfo.m_renderArea[3],
  1025. pass.m_beginRenderpassInfo.m_vrsRt, pass.m_beginRenderpassInfo.m_vrsTexelSizeX,
  1026. pass.m_beginRenderpassInfo.m_vrsTexelSizeY);
  1027. }
  1028. {
  1029. ANKI_TRACE_SCOPED_EVENT(GrRenderGraphCallback);
  1030. ctx.m_passIdx = passIdx;
  1031. pass.m_callback(ctx);
  1032. }
  1033. if(pass.m_beginRenderpassInfo.hasRenderpass())
  1034. {
  1035. cmdb->endRenderPass();
  1036. }
  1037. cmdb->popDebugMarker();
  1038. }
  1039. } // end for batches
  1040. if(setPostQuery)
  1041. {
  1042. // Write a timestamp before the last flush
  1043. cmdb->writeTimestamp(postQuery.get());
  1044. }
  1045. cmdb->endRecording();
  1046. });
  1047. }
  1048. CoreThreadJobManager::getSingleton().waitForAllTasksToFinish();
  1049. // Submit
  1050. if(cmdbs.getSize() == 1) [[unlikely]]
  1051. {
  1052. GrManager::getSingleton().submit(cmdbs[0].get(), {}, optionalFence);
  1053. }
  1054. else
  1055. {
  1056. // 2 submits. The 1st contains all the batches minus the last. Then the last batch is alone given that it most likely it writes to the
  1057. // swapchain
  1058. DynamicArray<CommandBuffer*, MemoryPoolPtrWrapper<StackMemoryPool>> pCmdbs(pool);
  1059. pCmdbs.resize(cmdbs.getSize() - 1);
  1060. for(U32 i = 0; i < cmdbs.getSize() - 1; ++i)
  1061. {
  1062. pCmdbs[i] = cmdbs[i].get();
  1063. }
  1064. GrManager::getSingleton().submit(WeakArray(pCmdbs), {}, nullptr);
  1065. GrManager::getSingleton().submit(cmdbs.getBack().get(), {}, optionalFence);
  1066. }
  1067. }
  1068. void RenderGraph::getCrntUsage(RenderTargetHandle handle, U32 batchIdx, const TextureSubresourceInfo& subresource, TextureUsageBit& usage) const
  1069. {
  1070. usage = TextureUsageBit::kNone;
  1071. const Batch& batch = m_ctx->m_batches[batchIdx];
  1072. for(U32 passIdx : batch.m_passIndices)
  1073. {
  1074. for(const RenderPassDependency::TextureInfo& consumer : m_ctx->m_passes[passIdx].m_consumedTextures)
  1075. {
  1076. if(consumer.m_handle == handle && overlappingTextureSubresource(subresource, consumer.m_subresource))
  1077. {
  1078. usage |= consumer.m_usage;
  1079. break;
  1080. }
  1081. }
  1082. }
  1083. }
  1084. void RenderGraph::periodicCleanup()
  1085. {
  1086. U32 rtsCleanedCount = 0;
  1087. for(RenderTargetCacheEntry& entry : m_renderTargetCache)
  1088. {
  1089. if(entry.m_texturesInUse < entry.m_textures.getSize())
  1090. {
  1091. // Should cleanup
  1092. rtsCleanedCount += entry.m_textures.getSize() - entry.m_texturesInUse;
  1093. // New array
  1094. GrDynamicArray<TexturePtr> newArray;
  1095. if(entry.m_texturesInUse > 0)
  1096. {
  1097. newArray.resize(entry.m_texturesInUse);
  1098. }
  1099. // Populate the new array
  1100. for(U32 i = 0; i < newArray.getSize(); ++i)
  1101. {
  1102. newArray[i] = std::move(entry.m_textures[i]);
  1103. }
  1104. // Destroy the old array and the rest of the textures
  1105. entry.m_textures.destroy();
  1106. // Move new array
  1107. entry.m_textures = std::move(newArray);
  1108. }
  1109. }
  1110. if(rtsCleanedCount > 0)
  1111. {
  1112. ANKI_GR_LOGI("Cleaned %u render targets", rtsCleanedCount);
  1113. }
  1114. }
  1115. void RenderGraph::getStatistics(RenderGraphStatistics& statistics) const
  1116. {
  1117. const U32 oldFrame = (m_statistics.m_nextTimestamp + 1) % kMaxBufferedTimestamps;
  1118. if(m_statistics.m_timestamps[oldFrame * 2] && m_statistics.m_timestamps[oldFrame * 2 + 1])
  1119. {
  1120. Second start, end;
  1121. [[maybe_unused]] TimestampQueryResult res = m_statistics.m_timestamps[oldFrame * 2]->getResult(start);
  1122. ANKI_ASSERT(res == TimestampQueryResult::kAvailable);
  1123. res = m_statistics.m_timestamps[oldFrame * 2 + 1]->getResult(end);
  1124. ANKI_ASSERT(res == TimestampQueryResult::kAvailable);
  1125. const Second diff = end - start;
  1126. statistics.m_gpuTime = diff;
  1127. statistics.m_cpuStartTime = m_statistics.m_cpuStartTimes[oldFrame];
  1128. }
  1129. else
  1130. {
  1131. statistics.m_gpuTime = -1.0;
  1132. statistics.m_cpuStartTime = -1.0;
  1133. }
  1134. }
  1135. #if ANKI_DBG_RENDER_GRAPH
  1136. StringRaii RenderGraph::textureUsageToStr(StackMemoryPool& pool, TextureUsageBit usage)
  1137. {
  1138. if(!usage)
  1139. {
  1140. return StringRaii(&pool, "None");
  1141. }
  1142. StringListRaii slist(&pool);
  1143. # define ANKI_TEX_USAGE(u) \
  1144. if(!!(usage & TextureUsageBit::u)) \
  1145. { \
  1146. slist.pushBackSprintf("%s", #u); \
  1147. }
  1148. ANKI_TEX_USAGE(kSampledGeometry);
  1149. ANKI_TEX_USAGE(kSampledFragment);
  1150. ANKI_TEX_USAGE(kSampledCompute);
  1151. ANKI_TEX_USAGE(kSampledTraceRays);
  1152. ANKI_TEX_USAGE(kUavGeometryRead);
  1153. ANKI_TEX_USAGE(kUavGeometryWrite);
  1154. ANKI_TEX_USAGE(kUavFragmentRead);
  1155. ANKI_TEX_USAGE(kUavFragmentWrite);
  1156. ANKI_TEX_USAGE(kUavComputeRead);
  1157. ANKI_TEX_USAGE(kUavComputeWrite);
  1158. ANKI_TEX_USAGE(kUavTraceRaysRead);
  1159. ANKI_TEX_USAGE(kUavTraceRaysWrite);
  1160. ANKI_TEX_USAGE(kFramebufferRead);
  1161. ANKI_TEX_USAGE(kFramebufferWrite);
  1162. ANKI_TEX_USAGE(kTransferDestination);
  1163. ANKI_TEX_USAGE(kGenerateMipmaps);
  1164. ANKI_TEX_USAGE(kPresent);
  1165. ANKI_TEX_USAGE(kFramebufferShadingRate);
  1166. if(!usage)
  1167. {
  1168. slist.pushBackSprintf("?");
  1169. }
  1170. # undef ANKI_TEX_USAGE
  1171. ANKI_ASSERT(!slist.isEmpty());
  1172. StringRaii str(&pool);
  1173. slist.join(" | ", str);
  1174. return str;
  1175. }
  1176. StringRaii RenderGraph::bufferUsageToStr(StackMemoryPool& pool, BufferUsageBit usage)
  1177. {
  1178. StringListRaii slist(&pool);
  1179. # define ANKI_BUFF_USAGE(u) \
  1180. if(!!(usage & BufferUsageBit::u)) \
  1181. { \
  1182. slist.pushBackSprintf("%s", #u); \
  1183. }
  1184. ANKI_BUFF_USAGE(kConstantGeometry);
  1185. ANKI_BUFF_USAGE(kConstantFragment);
  1186. ANKI_BUFF_USAGE(kConstantCompute);
  1187. ANKI_BUFF_USAGE(kConstantTraceRays);
  1188. ANKI_BUFF_USAGE(kStorageGeometryRead);
  1189. ANKI_BUFF_USAGE(kStorageGeometryWrite);
  1190. ANKI_BUFF_USAGE(kStorageFragmentRead);
  1191. ANKI_BUFF_USAGE(kStorageFragmentWrite);
  1192. ANKI_BUFF_USAGE(kStorageComputeRead);
  1193. ANKI_BUFF_USAGE(kStorageComputeWrite);
  1194. ANKI_BUFF_USAGE(kStorageTraceRaysRead);
  1195. ANKI_BUFF_USAGE(kStorageTraceRaysWrite);
  1196. ANKI_BUFF_USAGE(kTextureGeometryRead);
  1197. ANKI_BUFF_USAGE(kTextureGeometryWrite);
  1198. ANKI_BUFF_USAGE(kTextureFragmentRead);
  1199. ANKI_BUFF_USAGE(kTextureFragmentWrite);
  1200. ANKI_BUFF_USAGE(kTextureComputeRead);
  1201. ANKI_BUFF_USAGE(kTextureComputeWrite);
  1202. ANKI_BUFF_USAGE(kTextureTraceRaysRead);
  1203. ANKI_BUFF_USAGE(kTextureTraceRaysWrite);
  1204. ANKI_BUFF_USAGE(kIndex);
  1205. ANKI_BUFF_USAGE(kVertex);
  1206. ANKI_BUFF_USAGE(kIndirectCompute);
  1207. ANKI_BUFF_USAGE(kIndirectDraw);
  1208. ANKI_BUFF_USAGE(kIndirectTraceRays);
  1209. ANKI_BUFF_USAGE(kTransferSource);
  1210. ANKI_BUFF_USAGE(kTransferDestination);
  1211. ANKI_BUFF_USAGE(kAccelerationStructureBuild);
  1212. if(!usage)
  1213. {
  1214. slist.pushBackSprintf("NONE");
  1215. }
  1216. # undef ANKI_BUFF_USAGE
  1217. ANKI_ASSERT(!slist.isEmpty());
  1218. StringRaii str(&pool);
  1219. slist.join(" | ", str);
  1220. return str;
  1221. }
  1222. StringRaii RenderGraph::asUsageToStr(StackMemoryPool& pool, AccelerationStructureUsageBit usage)
  1223. {
  1224. StringListRaii slist(&pool);
  1225. # define ANKI_AS_USAGE(u) \
  1226. if(!!(usage & AccelerationStructureUsageBit::u)) \
  1227. { \
  1228. slist.pushBackSprintf("%s", #u); \
  1229. }
  1230. ANKI_AS_USAGE(kBuild);
  1231. ANKI_AS_USAGE(kAttach);
  1232. ANKI_AS_USAGE(kGeometryRead);
  1233. ANKI_AS_USAGE(kFragmentRead);
  1234. ANKI_AS_USAGE(kComputeRead);
  1235. ANKI_AS_USAGE(kTraceRaysRead);
  1236. if(!usage)
  1237. {
  1238. slist.pushBackSprintf("NONE");
  1239. }
  1240. # undef ANKI_AS_USAGE
  1241. ANKI_ASSERT(!slist.isEmpty());
  1242. StringRaii str(&pool);
  1243. slist.join(" | ", str);
  1244. return str;
  1245. }
  1246. Error RenderGraph::dumpDependencyDotFile(const RenderGraphDescription& descr, const BakeContext& ctx, CString path) const
  1247. {
  1248. ANKI_GR_LOGW("Running with debug code");
  1249. static constexpr Array<const char*, 5> COLORS = {"red", "green", "blue", "magenta", "cyan"};
  1250. StackMemoryPool& pool = *ctx.m_pool;
  1251. StringListRaii slist(&pool);
  1252. slist.pushBackSprintf("digraph {\n");
  1253. slist.pushBackSprintf("\t//splines = ortho;\nconcentrate = true;\n");
  1254. for(U32 batchIdx = 0; batchIdx < ctx.m_batches.getSize(); ++batchIdx)
  1255. {
  1256. // Set same rank
  1257. slist.pushBackSprintf("\t{rank=\"same\";");
  1258. for(U32 passIdx : ctx.m_batches[batchIdx].m_passIndices)
  1259. {
  1260. slist.pushBackSprintf("\"%s\";", descr.m_passes[passIdx]->m_name.cstr());
  1261. }
  1262. slist.pushBackSprintf("}\n");
  1263. // Print passes
  1264. for(U32 passIdx : ctx.m_batches[batchIdx].m_passIndices)
  1265. {
  1266. CString passName = descr.m_passes[passIdx]->m_name.toCString();
  1267. slist.pushBackSprintf("\t\"%s\"[color=%s,style=%s,shape=box];\n", passName.cstr(), COLORS[batchIdx % COLORS.getSize()],
  1268. (descr.m_passes[passIdx]->m_type == RenderPassDescriptionBase::Type::kGraphics) ? "bold" : "dashed");
  1269. for(U32 depIdx : ctx.m_passes[passIdx].m_dependsOn)
  1270. {
  1271. slist.pushBackSprintf("\t\"%s\"->\"%s\";\n", descr.m_passes[depIdx]->m_name.cstr(), passName.cstr());
  1272. }
  1273. if(ctx.m_passes[passIdx].m_dependsOn.getSize() == 0)
  1274. {
  1275. slist.pushBackSprintf("\tNONE->\"%s\";\n", descr.m_passes[passIdx]->m_name.cstr());
  1276. }
  1277. }
  1278. }
  1279. # if 0
  1280. // Color the resources
  1281. slist.pushBackSprintf("subgraph cluster_0 {\n");
  1282. for(U rtIdx = 0; rtIdx < descr.m_renderTargets.getSize(); ++rtIdx)
  1283. {
  1284. slist.pushBackSprintf(
  1285. "\t\"%s\"[color=%s];\n", &descr.m_renderTargets[rtIdx].m_name[0], COLORS[rtIdx % COLORS.getSize()]);
  1286. }
  1287. slist.pushBackSprintf("}\n");
  1288. # endif
  1289. // Barriers
  1290. // slist.pushBackSprintf("subgraph cluster_1 {\n");
  1291. StringRaii prevBubble(&pool);
  1292. prevBubble.create("START");
  1293. for(U32 batchIdx = 0; batchIdx < ctx.m_batches.getSize(); ++batchIdx)
  1294. {
  1295. const Batch& batch = ctx.m_batches[batchIdx];
  1296. StringRaii batchName(&pool);
  1297. batchName.sprintf("batch%u", batchIdx);
  1298. for(U32 barrierIdx = 0; barrierIdx < batch.m_textureBarriersBefore.getSize(); ++barrierIdx)
  1299. {
  1300. const TextureBarrier& barrier = batch.m_textureBarriersBefore[barrierIdx];
  1301. StringRaii barrierLabel(&pool);
  1302. barrierLabel.sprintf("<b>%s</b> (mip,dp,f,l)=(%u,%u,%u,%u)<br/>%s <b>to</b> %s", &descr.m_renderTargets[barrier.m_idx].m_name[0],
  1303. barrier.m_surface.m_level, barrier.m_surface.m_depth, barrier.m_surface.m_face, barrier.m_surface.m_layer,
  1304. textureUsageToStr(pool, barrier.m_usageBefore).cstr(), textureUsageToStr(pool, barrier.m_usageAfter).cstr());
  1305. StringRaii barrierName(&pool);
  1306. barrierName.sprintf("%s tex barrier%u", batchName.cstr(), barrierIdx);
  1307. slist.pushBackSprintf("\t\"%s\"[color=%s,style=bold,shape=box,label=< %s >];\n", barrierName.cstr(), COLORS[batchIdx % COLORS.getSize()],
  1308. barrierLabel.cstr());
  1309. slist.pushBackSprintf("\t\"%s\"->\"%s\";\n", prevBubble.cstr(), barrierName.cstr());
  1310. prevBubble = barrierName;
  1311. }
  1312. for(U32 barrierIdx = 0; barrierIdx < batch.m_bufferBarriersBefore.getSize(); ++barrierIdx)
  1313. {
  1314. const BufferBarrier& barrier = batch.m_bufferBarriersBefore[barrierIdx];
  1315. StringRaii barrierLabel(&pool);
  1316. barrierLabel.sprintf("<b>%s</b><br/>%s <b>to</b> %s", &descr.m_buffers[barrier.m_idx].m_name[0],
  1317. bufferUsageToStr(pool, barrier.m_usageBefore).cstr(), bufferUsageToStr(pool, barrier.m_usageAfter).cstr());
  1318. StringRaii barrierName(&pool);
  1319. barrierName.sprintf("%s buff barrier%u", batchName.cstr(), barrierIdx);
  1320. slist.pushBackSprintf("\t\"%s\"[color=%s,style=bold,shape=box,label=< %s >];\n", barrierName.cstr(), COLORS[batchIdx % COLORS.getSize()],
  1321. barrierLabel.cstr());
  1322. slist.pushBackSprintf("\t\"%s\"->\"%s\";\n", prevBubble.cstr(), barrierName.cstr());
  1323. prevBubble = barrierName;
  1324. }
  1325. for(U32 barrierIdx = 0; barrierIdx < batch.m_asBarriersBefore.getSize(); ++barrierIdx)
  1326. {
  1327. const ASBarrier& barrier = batch.m_asBarriersBefore[barrierIdx];
  1328. StringRaii barrierLabel(&pool);
  1329. barrierLabel.sprintf("<b>%s</b><br/>%s <b>to</b> %s", descr.m_as[barrier.m_idx].m_name.getBegin(),
  1330. asUsageToStr(pool, barrier.m_usageBefore).cstr(), asUsageToStr(pool, barrier.m_usageAfter).cstr());
  1331. StringRaii barrierName(&pool);
  1332. barrierName.sprintf("%s AS barrier%u", batchName.cstr(), barrierIdx);
  1333. slist.pushBackSprintf("\t\"%s\"[color=%s,style=bold,shape=box,label=< %s >];\n", barrierName.cstr(), COLORS[batchIdx % COLORS.getSize()],
  1334. barrierLabel.cstr());
  1335. slist.pushBackSprintf("\t\"%s\"->\"%s\";\n", prevBubble.cstr(), barrierName.cstr());
  1336. prevBubble = barrierName;
  1337. }
  1338. for(U32 passIdx : batch.m_passIndices)
  1339. {
  1340. const RenderPassDescriptionBase& pass = *descr.m_passes[passIdx];
  1341. StringRaii passName(&pool);
  1342. passName.sprintf("%s pass", pass.m_name.cstr());
  1343. slist.pushBackSprintf("\t\"%s\"[color=%s,style=bold];\n", passName.cstr(), COLORS[batchIdx % COLORS.getSize()]);
  1344. slist.pushBackSprintf("\t\"%s\"->\"%s\";\n", prevBubble.cstr(), passName.cstr());
  1345. prevBubble = passName;
  1346. }
  1347. }
  1348. // slist.pushBackSprintf("}\n");
  1349. slist.pushBackSprintf("}");
  1350. File file;
  1351. ANKI_CHECK(file.open(StringRaii(&pool).sprintf("%s/rgraph_%05u.dot", &path[0], m_version).toCString(), FileOpenFlag::kWrite));
  1352. for(const String& s : slist)
  1353. {
  1354. ANKI_CHECK(file.writeTextf("%s", &s[0]));
  1355. }
  1356. return Error::kNone;
  1357. }
  1358. #endif
  1359. } // end namespace anki