RenderGraph.cpp 52 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788
  1. // Copyright (C) 2009-2023, Panagiotis Christopoulos Charitos and contributors.
  2. // All rights reserved.
  3. // Code licensed under the BSD License.
  4. // http://www.anki3d.org/LICENSE
  5. #include <AnKi/Gr/RenderGraph.h>
  6. #include <AnKi/Gr/GrManager.h>
  7. #include <AnKi/Gr/Texture.h>
  8. #include <AnKi/Gr/Sampler.h>
  9. #include <AnKi/Gr/Framebuffer.h>
  10. #include <AnKi/Gr/CommandBuffer.h>
  11. #include <AnKi/Util/Tracer.h>
  12. #include <AnKi/Util/BitSet.h>
  13. #include <AnKi/Util/File.h>
  14. #include <AnKi/Util/StringList.h>
  15. #include <AnKi/Util/HighRezTimer.h>
  16. #include <AnKi/Core/Common.h>
  17. namespace anki {
  18. #define ANKI_DBG_RENDER_GRAPH 0
  19. static inline U32 getTextureSurfOrVolCount(const TexturePtr& tex)
  20. {
  21. return tex->getMipmapCount() * tex->getLayerCount() * (textureTypeIsCube(tex->getTextureType()) ? 6 : 1);
  22. }
  23. /// Contains some extra things for render targets.
  24. class RenderGraph::RT
  25. {
  26. public:
  27. DynamicArray<TextureUsageBit, MemoryPoolPtrWrapper<StackMemoryPool>> m_surfOrVolUsages;
  28. DynamicArray<U16, MemoryPoolPtrWrapper<StackMemoryPool>> m_lastBatchThatTransitionedIt;
  29. TexturePtr m_texture; ///< Hold a reference.
  30. Bool m_imported;
  31. RT(StackMemoryPool* pool)
  32. : m_surfOrVolUsages(pool)
  33. , m_lastBatchThatTransitionedIt(pool)
  34. {
  35. }
  36. };
  37. /// Same as RT but for buffers.
  38. class RenderGraph::BufferRange
  39. {
  40. public:
  41. BufferUsageBit m_usage;
  42. BufferPtr m_buffer; ///< Hold a reference.
  43. PtrSize m_offset;
  44. PtrSize m_range;
  45. };
  46. class RenderGraph::AS
  47. {
  48. public:
  49. AccelerationStructureUsageBit m_usage;
  50. AccelerationStructurePtr m_as; ///< Hold a reference.
  51. };
  52. /// Pipeline barrier.
  53. class RenderGraph::TextureBarrier
  54. {
  55. public:
  56. U32 m_idx;
  57. TextureUsageBit m_usageBefore;
  58. TextureUsageBit m_usageAfter;
  59. TextureSurfaceInfo m_surface;
  60. DepthStencilAspectBit m_dsAspect;
  61. TextureBarrier(U32 rtIdx, TextureUsageBit usageBefore, TextureUsageBit usageAfter, const TextureSurfaceInfo& surf, DepthStencilAspectBit dsAspect)
  62. : m_idx(rtIdx)
  63. , m_usageBefore(usageBefore)
  64. , m_usageAfter(usageAfter)
  65. , m_surface(surf)
  66. , m_dsAspect(dsAspect)
  67. {
  68. }
  69. };
  70. /// Pipeline barrier.
  71. class RenderGraph::BufferBarrier
  72. {
  73. public:
  74. U32 m_idx;
  75. BufferUsageBit m_usageBefore;
  76. BufferUsageBit m_usageAfter;
  77. BufferBarrier(U32 buffIdx, BufferUsageBit usageBefore, BufferUsageBit usageAfter)
  78. : m_idx(buffIdx)
  79. , m_usageBefore(usageBefore)
  80. , m_usageAfter(usageAfter)
  81. {
  82. }
  83. };
  84. /// Pipeline barrier.
  85. class RenderGraph::ASBarrier
  86. {
  87. public:
  88. U32 m_idx;
  89. AccelerationStructureUsageBit m_usageBefore;
  90. AccelerationStructureUsageBit m_usageAfter;
  91. ASBarrier(U32 asIdx, AccelerationStructureUsageBit usageBefore, AccelerationStructureUsageBit usageAfter)
  92. : m_idx(asIdx)
  93. , m_usageBefore(usageBefore)
  94. , m_usageAfter(usageAfter)
  95. {
  96. }
  97. };
  98. /// Contains some extra things the RenderPassBase cannot hold.
  99. class RenderGraph::Pass
  100. {
  101. public:
  102. // WARNING!!!!!: Whatever you put here needs manual destruction in RenderGraph::reset()
  103. DynamicArray<U32, MemoryPoolPtrWrapper<StackMemoryPool>> m_dependsOn;
  104. DynamicArray<RenderPassDependency::TextureInfo, MemoryPoolPtrWrapper<StackMemoryPool>> m_consumedTextures;
  105. Function<void(RenderPassWorkContext&), MemoryPoolPtrWrapper<StackMemoryPool>> m_callback;
  106. DynamicArray<CommandBufferPtr, MemoryPoolPtrWrapper<StackMemoryPool>> m_secondLevelCmdbs;
  107. CommandBufferInitInfo m_secondLevelCmdbInitInfo;
  108. Array<U32, 4> m_fbRenderArea;
  109. Array<TextureUsageBit, kMaxColorRenderTargets> m_colorUsages = {}; ///< For beginRender pass
  110. TextureUsageBit m_dsUsage = TextureUsageBit::kNone; ///< For beginRender pass
  111. FramebufferPtr m_framebuffer;
  112. BaseString<MemoryPoolPtrWrapper<StackMemoryPool>> m_name;
  113. U32 m_batchIdx ANKI_DEBUG_CODE(= kMaxU32);
  114. Bool m_drawsToPresentable = false;
  115. Pass(StackMemoryPool* pool)
  116. : m_dependsOn(pool)
  117. , m_consumedTextures(pool)
  118. , m_secondLevelCmdbs(pool)
  119. , m_name(pool)
  120. {
  121. }
  122. };
  123. /// A batch of render passes. These passes can run in parallel.
  124. /// @warning It's POD. Destructor won't be called.
  125. class RenderGraph::Batch
  126. {
  127. public:
  128. DynamicArray<U32, MemoryPoolPtrWrapper<StackMemoryPool>> m_passIndices;
  129. DynamicArray<TextureBarrier, MemoryPoolPtrWrapper<StackMemoryPool>> m_textureBarriersBefore;
  130. DynamicArray<BufferBarrier, MemoryPoolPtrWrapper<StackMemoryPool>> m_bufferBarriersBefore;
  131. DynamicArray<ASBarrier, MemoryPoolPtrWrapper<StackMemoryPool>> m_asBarriersBefore;
  132. CommandBuffer* m_cmdb; ///< Someone else holds the ref already so have a ptr here.
  133. Batch(StackMemoryPool* pool)
  134. : m_passIndices(pool)
  135. , m_textureBarriersBefore(pool)
  136. , m_bufferBarriersBefore(pool)
  137. , m_asBarriersBefore(pool)
  138. {
  139. }
  140. Batch(Batch&& b)
  141. {
  142. *this = std::move(b);
  143. }
  144. Batch& operator=(Batch&& b)
  145. {
  146. m_passIndices = std::move(b.m_passIndices);
  147. m_textureBarriersBefore = std::move(b.m_textureBarriersBefore);
  148. m_bufferBarriersBefore = std::move(b.m_bufferBarriersBefore);
  149. m_asBarriersBefore = std::move(b.m_asBarriersBefore);
  150. m_cmdb = b.m_cmdb;
  151. b.m_cmdb = nullptr;
  152. return *this;
  153. }
  154. };
  155. /// The RenderGraph build context.
  156. class RenderGraph::BakeContext
  157. {
  158. public:
  159. DynamicArray<Pass, MemoryPoolPtrWrapper<StackMemoryPool>> m_passes;
  160. BitSet<kMaxRenderGraphPasses, U64> m_passIsInBatch{false};
  161. DynamicArray<Batch, MemoryPoolPtrWrapper<StackMemoryPool>> m_batches;
  162. DynamicArray<RT, MemoryPoolPtrWrapper<StackMemoryPool>> m_rts;
  163. DynamicArray<BufferRange, MemoryPoolPtrWrapper<StackMemoryPool>> m_buffers;
  164. DynamicArray<AS, MemoryPoolPtrWrapper<StackMemoryPool>> m_as;
  165. DynamicArray<CommandBufferPtr, MemoryPoolPtrWrapper<StackMemoryPool>> m_graphicsCmdbs;
  166. Bool m_gatherStatistics = false;
  167. BakeContext(StackMemoryPool* pool)
  168. : m_passes(pool)
  169. , m_batches(pool)
  170. , m_rts(pool)
  171. , m_buffers(pool)
  172. , m_as(pool)
  173. , m_graphicsCmdbs(pool)
  174. {
  175. }
  176. };
  177. void FramebufferDescription::bake()
  178. {
  179. m_hash = 0;
  180. ANKI_ASSERT(m_colorAttachmentCount > 0 || !!m_depthStencilAttachment.m_aspect);
  181. // First the depth attachments
  182. if(m_colorAttachmentCount)
  183. {
  184. ANKI_BEGIN_PACKED_STRUCT
  185. struct ColorAttachment
  186. {
  187. TextureSurfaceInfo m_surf;
  188. U32 m_loadOp;
  189. U32 m_storeOp;
  190. Array<U32, 4> m_clearColor;
  191. };
  192. ANKI_END_PACKED_STRUCT
  193. static_assert(sizeof(ColorAttachment) == 4 * (4 + 1 + 1 + 4), "Wrong size");
  194. Array<ColorAttachment, kMaxColorRenderTargets> colorAttachments;
  195. for(U i = 0; i < m_colorAttachmentCount; ++i)
  196. {
  197. const FramebufferDescriptionAttachment& inAtt = m_colorAttachments[i];
  198. colorAttachments[i].m_surf = inAtt.m_surface;
  199. colorAttachments[i].m_loadOp = static_cast<U32>(inAtt.m_loadOperation);
  200. colorAttachments[i].m_storeOp = static_cast<U32>(inAtt.m_storeOperation);
  201. memcpy(&colorAttachments[i].m_clearColor[0], &inAtt.m_clearValue.m_coloru[0], sizeof(U32) * 4);
  202. }
  203. m_hash = computeHash(&colorAttachments[0], sizeof(ColorAttachment) * m_colorAttachmentCount);
  204. }
  205. // DS attachment
  206. if(!!m_depthStencilAttachment.m_aspect)
  207. {
  208. ANKI_BEGIN_PACKED_STRUCT
  209. class DSAttachment
  210. {
  211. public:
  212. TextureSurfaceInfo m_surf;
  213. U32 m_loadOp;
  214. U32 m_storeOp;
  215. U32 m_stencilLoadOp;
  216. U32 m_stencilStoreOp;
  217. U32 m_aspect;
  218. F32 m_depthClear;
  219. I32 m_stencilClear;
  220. } outAtt;
  221. ANKI_END_PACKED_STRUCT
  222. const FramebufferDescriptionAttachment& inAtt = m_depthStencilAttachment;
  223. const Bool hasDepth = !!(inAtt.m_aspect & DepthStencilAspectBit::kDepth);
  224. const Bool hasStencil = !!(inAtt.m_aspect & DepthStencilAspectBit::kStencil);
  225. outAtt.m_surf = inAtt.m_surface;
  226. outAtt.m_loadOp = (hasDepth) ? static_cast<U32>(inAtt.m_loadOperation) : 0;
  227. outAtt.m_storeOp = (hasDepth) ? static_cast<U32>(inAtt.m_storeOperation) : 0;
  228. outAtt.m_stencilLoadOp = (hasStencil) ? static_cast<U32>(inAtt.m_stencilLoadOperation) : 0;
  229. outAtt.m_stencilStoreOp = (hasStencil) ? static_cast<U32>(inAtt.m_stencilStoreOperation) : 0;
  230. outAtt.m_aspect = static_cast<U32>(inAtt.m_aspect);
  231. outAtt.m_depthClear = (hasDepth) ? inAtt.m_clearValue.m_depthStencil.m_depth : 0.0f;
  232. outAtt.m_stencilClear = (hasStencil) ? inAtt.m_clearValue.m_depthStencil.m_stencil : 0;
  233. m_hash = (m_hash != 0) ? appendHash(&outAtt, sizeof(outAtt), m_hash) : computeHash(&outAtt, sizeof(outAtt));
  234. }
  235. // SRI
  236. if(m_shadingRateAttachmentTexelWidth > 0 && m_shadingRateAttachmentTexelHeight > 0)
  237. {
  238. ANKI_BEGIN_PACKED_STRUCT
  239. class SriToHash
  240. {
  241. public:
  242. U32 m_sriTexelWidth;
  243. U32 m_sriTexelHeight;
  244. TextureSurfaceInfo m_surface;
  245. } sriToHash;
  246. ANKI_END_PACKED_STRUCT
  247. sriToHash.m_sriTexelWidth = m_shadingRateAttachmentTexelWidth;
  248. sriToHash.m_sriTexelHeight = m_shadingRateAttachmentTexelHeight;
  249. sriToHash.m_surface = m_shadingRateAttachmentSurface;
  250. m_hash = (m_hash != 0) ? appendHash(&sriToHash, sizeof(sriToHash), m_hash) : computeHash(&sriToHash, sizeof(sriToHash));
  251. }
  252. ANKI_ASSERT(m_hash != 0 && m_hash != 1);
  253. }
  254. RenderGraph::RenderGraph(CString name)
  255. : GrObject(kClassType, name)
  256. {
  257. }
  258. RenderGraph::~RenderGraph()
  259. {
  260. ANKI_ASSERT(m_ctx == nullptr);
  261. }
  262. RenderGraph* RenderGraph::newInstance()
  263. {
  264. return anki::newInstance<RenderGraph>(GrMemoryPool::getSingleton(), "N/A");
  265. }
  266. void RenderGraph::reset()
  267. {
  268. ANKI_TRACE_SCOPED_EVENT(GrRenderGraphReset);
  269. if(!m_ctx)
  270. {
  271. return;
  272. }
  273. if((m_version % kPeriodicCleanupEvery) == 0)
  274. {
  275. // Do cleanup
  276. periodicCleanup();
  277. }
  278. // Extract the final usage of the imported RTs and clean all RTs
  279. for(RT& rt : m_ctx->m_rts)
  280. {
  281. if(rt.m_imported)
  282. {
  283. const U32 surfOrVolumeCount = getTextureSurfOrVolCount(rt.m_texture);
  284. // Create a new hash because our hash map dislikes concurent keys.
  285. const U64 uuid = rt.m_texture->getUuid();
  286. const U64 hash = computeHash(&uuid, sizeof(uuid));
  287. auto it = m_importedRenderTargets.find(hash);
  288. if(it != m_importedRenderTargets.getEnd())
  289. {
  290. // Found
  291. ANKI_ASSERT(it->m_surfOrVolLastUsages.getSize() == surfOrVolumeCount);
  292. ANKI_ASSERT(rt.m_surfOrVolUsages.getSize() == surfOrVolumeCount);
  293. }
  294. else
  295. {
  296. // Not found, create
  297. it = m_importedRenderTargets.emplace(hash);
  298. it->m_surfOrVolLastUsages.resize(surfOrVolumeCount);
  299. }
  300. // Update the usage
  301. for(U32 surfOrVolIdx = 0; surfOrVolIdx < surfOrVolumeCount; ++surfOrVolIdx)
  302. {
  303. it->m_surfOrVolLastUsages[surfOrVolIdx] = rt.m_surfOrVolUsages[surfOrVolIdx];
  304. }
  305. }
  306. rt.m_texture.reset(nullptr);
  307. }
  308. for(BufferRange& buff : m_ctx->m_buffers)
  309. {
  310. buff.m_buffer.reset(nullptr);
  311. }
  312. for(AS& as : m_ctx->m_as)
  313. {
  314. as.m_as.reset(nullptr);
  315. }
  316. for(auto& it : m_renderTargetCache)
  317. {
  318. it.m_texturesInUse = 0;
  319. }
  320. for(Pass& p : m_ctx->m_passes)
  321. {
  322. p.m_framebuffer.reset(nullptr);
  323. p.m_secondLevelCmdbs.destroy();
  324. p.m_callback.destroy();
  325. p.m_name.destroy();
  326. }
  327. m_ctx->m_graphicsCmdbs.destroy();
  328. m_ctx = nullptr;
  329. ++m_version;
  330. }
  331. TexturePtr RenderGraph::getOrCreateRenderTarget(const TextureInitInfo& initInf, U64 hash)
  332. {
  333. ANKI_ASSERT(hash);
  334. // Find a cache entry
  335. RenderTargetCacheEntry* entry = nullptr;
  336. auto it = m_renderTargetCache.find(hash);
  337. if(it == m_renderTargetCache.getEnd()) [[unlikely]]
  338. {
  339. // Didn't found the entry, create a new one
  340. auto it2 = m_renderTargetCache.emplace(hash);
  341. entry = &(*it2);
  342. }
  343. else
  344. {
  345. entry = &(*it);
  346. }
  347. ANKI_ASSERT(entry);
  348. // Create or pop one tex from the cache
  349. TexturePtr tex;
  350. const Bool createNewTex = entry->m_textures.getSize() == entry->m_texturesInUse;
  351. if(!createNewTex)
  352. {
  353. // Pop
  354. tex = entry->m_textures[entry->m_texturesInUse++];
  355. }
  356. else
  357. {
  358. // Create it
  359. tex = GrManager::getSingleton().newTexture(initInf);
  360. ANKI_ASSERT(entry->m_texturesInUse == entry->m_textures.getSize());
  361. entry->m_textures.resize(entry->m_textures.getSize() + 1);
  362. entry->m_textures[entry->m_textures.getSize() - 1] = tex;
  363. ++entry->m_texturesInUse;
  364. }
  365. return tex;
  366. }
  367. FramebufferPtr RenderGraph::getOrCreateFramebuffer(const FramebufferDescription& fbDescr, const RenderTargetHandle* rtHandles,
  368. Bool& drawsToPresentable)
  369. {
  370. ANKI_ASSERT(rtHandles);
  371. U64 hash = fbDescr.m_hash;
  372. ANKI_ASSERT(hash > 0);
  373. drawsToPresentable = false;
  374. // Create a hash that includes the render targets
  375. Array<U64, kMaxColorRenderTargets + 2> uuids;
  376. U count = 0;
  377. for(U i = 0; i < fbDescr.m_colorAttachmentCount; ++i)
  378. {
  379. uuids[count++] = m_ctx->m_rts[rtHandles[i].m_idx].m_texture->getUuid();
  380. if(!!(m_ctx->m_rts[rtHandles[i].m_idx].m_texture->getTextureUsage() & TextureUsageBit::kPresent))
  381. {
  382. drawsToPresentable = true;
  383. }
  384. }
  385. if(!!fbDescr.m_depthStencilAttachment.m_aspect)
  386. {
  387. uuids[count++] = m_ctx->m_rts[rtHandles[kMaxColorRenderTargets].m_idx].m_texture->getUuid();
  388. }
  389. if(fbDescr.m_shadingRateAttachmentTexelWidth > 0)
  390. {
  391. uuids[count++] = m_ctx->m_rts[rtHandles[kMaxColorRenderTargets + 1].m_idx].m_texture->getUuid();
  392. }
  393. hash = appendHash(&uuids[0], sizeof(U64) * count, hash);
  394. FramebufferPtr fb;
  395. auto it = m_fbCache.find(hash);
  396. if(it != m_fbCache.getEnd())
  397. {
  398. fb = *it;
  399. }
  400. else
  401. {
  402. // Create a complete fb init info
  403. FramebufferInitInfo fbInit("RenderGraph FB");
  404. fbInit.m_colorAttachmentCount = fbDescr.m_colorAttachmentCount;
  405. for(U i = 0; i < fbInit.m_colorAttachmentCount; ++i)
  406. {
  407. FramebufferAttachmentInfo& outAtt = fbInit.m_colorAttachments[i];
  408. const FramebufferDescriptionAttachment& inAtt = fbDescr.m_colorAttachments[i];
  409. outAtt.m_clearValue = inAtt.m_clearValue;
  410. outAtt.m_loadOperation = inAtt.m_loadOperation;
  411. outAtt.m_storeOperation = inAtt.m_storeOperation;
  412. // Create texture view
  413. const TextureViewInitInfo viewInit(m_ctx->m_rts[rtHandles[i].m_idx].m_texture.get(), TextureSubresourceInfo(inAtt.m_surface),
  414. "RenderGraph");
  415. TextureViewPtr view = GrManager::getSingleton().newTextureView(viewInit);
  416. outAtt.m_textureView = std::move(view);
  417. }
  418. if(!!fbDescr.m_depthStencilAttachment.m_aspect)
  419. {
  420. FramebufferAttachmentInfo& outAtt = fbInit.m_depthStencilAttachment;
  421. const FramebufferDescriptionAttachment& inAtt = fbDescr.m_depthStencilAttachment;
  422. outAtt.m_clearValue = inAtt.m_clearValue;
  423. outAtt.m_loadOperation = inAtt.m_loadOperation;
  424. outAtt.m_storeOperation = inAtt.m_storeOperation;
  425. outAtt.m_stencilLoadOperation = inAtt.m_stencilLoadOperation;
  426. outAtt.m_stencilStoreOperation = inAtt.m_stencilStoreOperation;
  427. // Create texture view
  428. const TextureViewInitInfo viewInit(m_ctx->m_rts[rtHandles[kMaxColorRenderTargets].m_idx].m_texture.get(),
  429. TextureSubresourceInfo(inAtt.m_surface, inAtt.m_aspect), "RenderGraph");
  430. TextureViewPtr view = GrManager::getSingleton().newTextureView(viewInit);
  431. outAtt.m_textureView = std::move(view);
  432. }
  433. if(fbDescr.m_shadingRateAttachmentTexelWidth > 0)
  434. {
  435. const TextureViewInitInfo viewInit(m_ctx->m_rts[rtHandles[kMaxColorRenderTargets + 1].m_idx].m_texture.get(),
  436. fbDescr.m_shadingRateAttachmentSurface, "RenderGraph SRI");
  437. TextureViewPtr view = GrManager::getSingleton().newTextureView(viewInit);
  438. fbInit.m_shadingRateImage.m_texelWidth = fbDescr.m_shadingRateAttachmentTexelWidth;
  439. fbInit.m_shadingRateImage.m_texelHeight = fbDescr.m_shadingRateAttachmentTexelHeight;
  440. fbInit.m_shadingRateImage.m_textureView = std::move(view);
  441. }
  442. // Create
  443. fb = GrManager::getSingleton().newFramebuffer(fbInit);
  444. m_fbCache.emplace(hash, fb);
  445. }
  446. return fb;
  447. }
  448. Bool RenderGraph::overlappingTextureSubresource(const TextureSubresourceInfo& suba, const TextureSubresourceInfo& subb)
  449. {
  450. #define ANKI_OVERLAPPING(first, count) ((suba.first < subb.first + subb.count) && (subb.first < suba.first + suba.count))
  451. const Bool overlappingFaces = ANKI_OVERLAPPING(m_firstFace, m_faceCount);
  452. const Bool overlappingMips = ANKI_OVERLAPPING(m_firstMipmap, m_mipmapCount);
  453. const Bool overlappingLayers = ANKI_OVERLAPPING(m_firstLayer, m_layerCount);
  454. #undef ANKI_OVERLAPPING
  455. return overlappingFaces && overlappingLayers && overlappingMips;
  456. }
  457. Bool RenderGraph::passADependsOnB(const RenderPassDescriptionBase& a, const RenderPassDescriptionBase& b)
  458. {
  459. // Render targets
  460. {
  461. // Compute the 3 types of dependencies
  462. const BitSet<kMaxRenderGraphRenderTargets, U64> aReadBWrite = a.m_readRtMask & b.m_writeRtMask;
  463. const BitSet<kMaxRenderGraphRenderTargets, U64> aWriteBRead = a.m_writeRtMask & b.m_readRtMask;
  464. const BitSet<kMaxRenderGraphRenderTargets, U64> aWriteBWrite = a.m_writeRtMask & b.m_writeRtMask;
  465. const BitSet<kMaxRenderGraphRenderTargets, U64> fullDep = aReadBWrite | aWriteBRead | aWriteBWrite;
  466. if(fullDep.getAnySet())
  467. {
  468. // There might be an overlap
  469. for(const RenderPassDependency& aDep : a.m_rtDeps)
  470. {
  471. if(!fullDep.get(aDep.m_texture.m_handle.m_idx))
  472. {
  473. continue;
  474. }
  475. for(const RenderPassDependency& bDep : b.m_rtDeps)
  476. {
  477. if(aDep.m_texture.m_handle != bDep.m_texture.m_handle)
  478. {
  479. continue;
  480. }
  481. if(!((aDep.m_texture.m_usage | bDep.m_texture.m_usage) & TextureUsageBit::kAllWrite))
  482. {
  483. // Don't care about read to read deps
  484. continue;
  485. }
  486. if(overlappingTextureSubresource(aDep.m_texture.m_subresource, bDep.m_texture.m_subresource))
  487. {
  488. return true;
  489. }
  490. }
  491. }
  492. }
  493. }
  494. // Buffers
  495. if(a.m_readBuffMask || a.m_writeBuffMask)
  496. {
  497. const BitSet<kMaxRenderGraphBuffers, U64> aReadBWrite = a.m_readBuffMask & b.m_writeBuffMask;
  498. const BitSet<kMaxRenderGraphBuffers, U64> aWriteBRead = a.m_writeBuffMask & b.m_readBuffMask;
  499. const BitSet<kMaxRenderGraphBuffers, U64> aWriteBWrite = a.m_writeBuffMask & b.m_writeBuffMask;
  500. const BitSet<kMaxRenderGraphBuffers, U64> fullDep = aReadBWrite | aWriteBRead | aWriteBWrite;
  501. if(fullDep.getAnySet())
  502. {
  503. // There might be an overlap
  504. for(const RenderPassDependency& aDep : a.m_buffDeps)
  505. {
  506. if(!fullDep.get(aDep.m_buffer.m_handle.m_idx))
  507. {
  508. continue;
  509. }
  510. for(const RenderPassDependency& bDep : b.m_buffDeps)
  511. {
  512. if(aDep.m_buffer.m_handle != bDep.m_buffer.m_handle)
  513. {
  514. continue;
  515. }
  516. if(!((aDep.m_buffer.m_usage | bDep.m_buffer.m_usage) & BufferUsageBit::kAllWrite))
  517. {
  518. // Don't care about read to read deps
  519. continue;
  520. }
  521. // TODO: Take into account the ranges
  522. return true;
  523. }
  524. }
  525. }
  526. }
  527. // AS
  528. if(a.m_readAsMask || a.m_writeAsMask)
  529. {
  530. const BitSet<kMaxRenderGraphAccelerationStructures, U32> aReadBWrite = a.m_readAsMask & b.m_writeAsMask;
  531. const BitSet<kMaxRenderGraphAccelerationStructures, U32> aWriteBRead = a.m_writeAsMask & b.m_readAsMask;
  532. const BitSet<kMaxRenderGraphAccelerationStructures, U32> aWriteBWrite = a.m_writeAsMask & b.m_writeAsMask;
  533. const BitSet<kMaxRenderGraphAccelerationStructures, U32> fullDep = aReadBWrite | aWriteBRead | aWriteBWrite;
  534. if(fullDep)
  535. {
  536. for(const RenderPassDependency& aDep : a.m_asDeps)
  537. {
  538. if(!fullDep.get(aDep.m_as.m_handle.m_idx))
  539. {
  540. continue;
  541. }
  542. for(const RenderPassDependency& bDep : b.m_asDeps)
  543. {
  544. if(aDep.m_as.m_handle != bDep.m_as.m_handle)
  545. {
  546. continue;
  547. }
  548. if(!((aDep.m_as.m_usage | bDep.m_as.m_usage) & AccelerationStructureUsageBit::kAllWrite))
  549. {
  550. // Don't care about read to read deps
  551. continue;
  552. }
  553. return true;
  554. }
  555. }
  556. }
  557. }
  558. return false;
  559. }
  560. Bool RenderGraph::passHasUnmetDependencies(const BakeContext& ctx, U32 passIdx)
  561. {
  562. Bool depends = false;
  563. if(ctx.m_batches.getSize() > 0)
  564. {
  565. // Check if the deps of passIdx are all in a batch
  566. for(const U32 depPassIdx : ctx.m_passes[passIdx].m_dependsOn)
  567. {
  568. if(!ctx.m_passIsInBatch.get(depPassIdx))
  569. {
  570. // Dependency pass is not in a batch
  571. depends = true;
  572. break;
  573. }
  574. }
  575. }
  576. else
  577. {
  578. // First batch, check if passIdx depends on any pass
  579. depends = ctx.m_passes[passIdx].m_dependsOn.getSize() != 0;
  580. }
  581. return depends;
  582. }
  583. RenderGraph::BakeContext* RenderGraph::newContext(const RenderGraphDescription& descr, StackMemoryPool& pool)
  584. {
  585. // Allocate
  586. BakeContext* ctx = anki::newInstance<BakeContext>(pool, &pool);
  587. // Init the resources
  588. ctx->m_rts.resizeStorage(descr.m_renderTargets.getSize());
  589. for(U32 rtIdx = 0; rtIdx < descr.m_renderTargets.getSize(); ++rtIdx)
  590. {
  591. RT& outRt = *ctx->m_rts.emplaceBack(&pool);
  592. const RenderGraphDescription::RT& inRt = descr.m_renderTargets[rtIdx];
  593. const Bool imported = inRt.m_importedTex.isCreated();
  594. if(imported)
  595. {
  596. // It's imported
  597. outRt.m_texture = inRt.m_importedTex;
  598. }
  599. else
  600. {
  601. // Need to create new
  602. // Create a new TextureInitInfo with the derived usage
  603. TextureInitInfo initInf = inRt.m_initInfo;
  604. initInf.m_usage = inRt.m_usageDerivedByDeps;
  605. ANKI_ASSERT(initInf.m_usage != TextureUsageBit::kNone && "Probably not referenced by any pass");
  606. // Create the new hash
  607. const U64 hash = appendHash(&initInf.m_usage, sizeof(initInf.m_usage), inRt.m_hash);
  608. // Get or create the texture
  609. outRt.m_texture = getOrCreateRenderTarget(initInf, hash);
  610. }
  611. // Init the usage
  612. const U32 surfOrVolumeCount = getTextureSurfOrVolCount(outRt.m_texture);
  613. outRt.m_surfOrVolUsages.resize(surfOrVolumeCount, TextureUsageBit::kNone);
  614. if(imported && inRt.m_importedAndUndefinedUsage)
  615. {
  616. // Get the usage from previous frames
  617. // Create a new hash because our hash map dislikes concurent keys.
  618. const U64 uuid = outRt.m_texture->getUuid();
  619. const U64 hash = computeHash(&uuid, sizeof(uuid));
  620. auto it = m_importedRenderTargets.find(hash);
  621. ANKI_ASSERT(it != m_importedRenderTargets.getEnd() && "Can't find the imported RT");
  622. ANKI_ASSERT(it->m_surfOrVolLastUsages.getSize() == surfOrVolumeCount);
  623. for(U32 surfOrVolIdx = 0; surfOrVolIdx < surfOrVolumeCount; ++surfOrVolIdx)
  624. {
  625. outRt.m_surfOrVolUsages[surfOrVolIdx] = it->m_surfOrVolLastUsages[surfOrVolIdx];
  626. }
  627. }
  628. else if(imported)
  629. {
  630. // Set the usage that was given by the user
  631. for(U32 surfOrVolIdx = 0; surfOrVolIdx < surfOrVolumeCount; ++surfOrVolIdx)
  632. {
  633. outRt.m_surfOrVolUsages[surfOrVolIdx] = inRt.m_importedLastKnownUsage;
  634. }
  635. }
  636. outRt.m_lastBatchThatTransitionedIt.resize(surfOrVolumeCount, kMaxU16);
  637. outRt.m_imported = imported;
  638. }
  639. // Buffers
  640. ctx->m_buffers.resize(descr.m_buffers.getSize());
  641. for(U32 buffIdx = 0; buffIdx < ctx->m_buffers.getSize(); ++buffIdx)
  642. {
  643. ctx->m_buffers[buffIdx].m_usage = descr.m_buffers[buffIdx].m_usage;
  644. ANKI_ASSERT(descr.m_buffers[buffIdx].m_importedBuff.isCreated());
  645. ctx->m_buffers[buffIdx].m_buffer = descr.m_buffers[buffIdx].m_importedBuff;
  646. ctx->m_buffers[buffIdx].m_offset = descr.m_buffers[buffIdx].m_offset;
  647. ctx->m_buffers[buffIdx].m_range = descr.m_buffers[buffIdx].m_range;
  648. }
  649. // AS
  650. ctx->m_as.resize(descr.m_as.getSize());
  651. for(U32 i = 0; i < descr.m_as.getSize(); ++i)
  652. {
  653. ctx->m_as[i].m_usage = descr.m_as[i].m_usage;
  654. ctx->m_as[i].m_as = descr.m_as[i].m_importedAs;
  655. ANKI_ASSERT(ctx->m_as[i].m_as.isCreated());
  656. }
  657. ctx->m_gatherStatistics = descr.m_gatherStatistics;
  658. return ctx;
  659. }
  660. void RenderGraph::initRenderPassesAndSetDeps(const RenderGraphDescription& descr)
  661. {
  662. BakeContext& ctx = *m_ctx;
  663. const U32 passCount = descr.m_passes.getSize();
  664. ANKI_ASSERT(passCount > 0);
  665. ctx.m_passes.resizeStorage(passCount);
  666. for(U32 passIdx = 0; passIdx < passCount; ++passIdx)
  667. {
  668. const RenderPassDescriptionBase& inPass = *descr.m_passes[passIdx];
  669. Pass& outPass = *ctx.m_passes.emplaceBack(ctx.m_as.getMemoryPool().m_pool);
  670. outPass.m_callback = inPass.m_callback;
  671. outPass.m_name = inPass.m_name;
  672. // Create consumer info
  673. outPass.m_consumedTextures.resize(inPass.m_rtDeps.getSize());
  674. for(U32 depIdx = 0; depIdx < inPass.m_rtDeps.getSize(); ++depIdx)
  675. {
  676. const RenderPassDependency& inDep = inPass.m_rtDeps[depIdx];
  677. ANKI_ASSERT(inDep.m_type == RenderPassDependency::Type::kTexture);
  678. RenderPassDependency::TextureInfo& inf = outPass.m_consumedTextures[depIdx];
  679. ANKI_ASSERT(sizeof(inf) == sizeof(inDep.m_texture));
  680. memcpy(&inf, &inDep.m_texture, sizeof(inf));
  681. }
  682. // Create command buffers and framebuffer
  683. if(inPass.m_type == RenderPassDescriptionBase::Type::kGraphics)
  684. {
  685. const GraphicsRenderPassDescription& graphicsPass = static_cast<const GraphicsRenderPassDescription&>(inPass);
  686. if(graphicsPass.hasFramebuffer())
  687. {
  688. Bool drawsToPresentable;
  689. outPass.m_framebuffer = getOrCreateFramebuffer(graphicsPass.m_fbDescr, &graphicsPass.m_rtHandles[0], drawsToPresentable);
  690. outPass.m_fbRenderArea = graphicsPass.m_fbRenderArea;
  691. outPass.m_drawsToPresentable = drawsToPresentable;
  692. }
  693. else
  694. {
  695. ANKI_ASSERT(inPass.m_secondLevelCmdbsCount == 0 && "Can't have second level cmdbs");
  696. }
  697. }
  698. else
  699. {
  700. ANKI_ASSERT(inPass.m_secondLevelCmdbsCount == 0 && "Can't have second level cmdbs");
  701. }
  702. // Set dependencies by checking all previous subpasses.
  703. U32 prevPassIdx = passIdx;
  704. while(prevPassIdx--)
  705. {
  706. const RenderPassDescriptionBase& prevPass = *descr.m_passes[prevPassIdx];
  707. if(passADependsOnB(inPass, prevPass))
  708. {
  709. outPass.m_dependsOn.emplaceBack(prevPassIdx);
  710. }
  711. }
  712. }
  713. }
  714. void RenderGraph::initBatches()
  715. {
  716. ANKI_ASSERT(m_ctx);
  717. U passesAssignedToBatchCount = 0;
  718. const U passCount = m_ctx->m_passes.getSize();
  719. ANKI_ASSERT(passCount > 0);
  720. Bool setTimestamp = m_ctx->m_gatherStatistics;
  721. while(passesAssignedToBatchCount < passCount)
  722. {
  723. Batch batch(m_ctx->m_as.getMemoryPool().m_pool);
  724. Bool drawsToPresentable = false;
  725. for(U32 i = 0; i < passCount; ++i)
  726. {
  727. if(!m_ctx->m_passIsInBatch.get(i) && !passHasUnmetDependencies(*m_ctx, i))
  728. {
  729. // Add to the batch
  730. ++passesAssignedToBatchCount;
  731. batch.m_passIndices.emplaceBack(i);
  732. // Will batch draw to the swapchain?
  733. drawsToPresentable = drawsToPresentable || m_ctx->m_passes[i].m_drawsToPresentable;
  734. }
  735. }
  736. // Get or create cmdb for the batch.
  737. // Create a new cmdb if the batch is writing to swapchain. This will help Vulkan to have a dependency of the swap chain image acquire to the
  738. // 2nd command buffer instead of adding it to a single big cmdb.
  739. if(m_ctx->m_graphicsCmdbs.isEmpty() || drawsToPresentable)
  740. {
  741. CommandBufferInitInfo cmdbInit;
  742. cmdbInit.m_flags = CommandBufferFlag::kGeneralWork;
  743. CommandBufferPtr cmdb = GrManager::getSingleton().newCommandBuffer(cmdbInit);
  744. m_ctx->m_graphicsCmdbs.emplaceBack(cmdb);
  745. batch.m_cmdb = cmdb.get();
  746. // Maybe write a timestamp
  747. if(setTimestamp) [[unlikely]]
  748. {
  749. setTimestamp = false;
  750. TimestampQueryPtr query = GrManager::getSingleton().newTimestampQuery();
  751. TimestampQuery* pQuery = query.get();
  752. cmdb->resetTimestampQueries({&pQuery, 1});
  753. cmdb->writeTimestamp(query.get());
  754. m_statistics.m_nextTimestamp = (m_statistics.m_nextTimestamp + 1) % kMaxBufferedTimestamps;
  755. m_statistics.m_timestamps[m_statistics.m_nextTimestamp * 2] = query;
  756. }
  757. }
  758. else
  759. {
  760. batch.m_cmdb = m_ctx->m_graphicsCmdbs.getBack().get();
  761. }
  762. // Mark batch's passes done
  763. for(U32 passIdx : batch.m_passIndices)
  764. {
  765. m_ctx->m_passIsInBatch.set(passIdx);
  766. m_ctx->m_passes[passIdx].m_batchIdx = m_ctx->m_batches.getSize();
  767. }
  768. m_ctx->m_batches.emplaceBack(std::move(batch));
  769. }
  770. }
  771. void RenderGraph::initGraphicsPasses(const RenderGraphDescription& descr)
  772. {
  773. BakeContext& ctx = *m_ctx;
  774. const U32 passCount = descr.m_passes.getSize();
  775. ANKI_ASSERT(passCount > 0);
  776. for(U32 passIdx = 0; passIdx < passCount; ++passIdx)
  777. {
  778. const RenderPassDescriptionBase& inPass = *descr.m_passes[passIdx];
  779. Pass& outPass = ctx.m_passes[passIdx];
  780. // Create command buffers and framebuffer
  781. if(inPass.m_type == RenderPassDescriptionBase::Type::kGraphics)
  782. {
  783. const GraphicsRenderPassDescription& graphicsPass = static_cast<const GraphicsRenderPassDescription&>(inPass);
  784. if(graphicsPass.hasFramebuffer())
  785. {
  786. // Init the usage bits
  787. TextureUsageBit usage;
  788. for(U i = 0; i < graphicsPass.m_fbDescr.m_colorAttachmentCount; ++i)
  789. {
  790. getCrntUsage(graphicsPass.m_rtHandles[i], outPass.m_batchIdx,
  791. TextureSubresourceInfo(graphicsPass.m_fbDescr.m_colorAttachments[i].m_surface), usage);
  792. outPass.m_colorUsages[i] = usage;
  793. }
  794. if(!!graphicsPass.m_fbDescr.m_depthStencilAttachment.m_aspect)
  795. {
  796. TextureSubresourceInfo subresource = TextureSubresourceInfo(graphicsPass.m_fbDescr.m_depthStencilAttachment.m_surface,
  797. graphicsPass.m_fbDescr.m_depthStencilAttachment.m_aspect);
  798. getCrntUsage(graphicsPass.m_rtHandles[kMaxColorRenderTargets], outPass.m_batchIdx, subresource, usage);
  799. outPass.m_dsUsage = usage;
  800. }
  801. // Do some pre-work for the second level command buffers
  802. if(inPass.m_secondLevelCmdbsCount)
  803. {
  804. outPass.m_secondLevelCmdbs.resize(inPass.m_secondLevelCmdbsCount);
  805. CommandBufferInitInfo& cmdbInit = outPass.m_secondLevelCmdbInitInfo;
  806. cmdbInit.m_flags = CommandBufferFlag::kGeneralWork | CommandBufferFlag::kSecondLevel;
  807. cmdbInit.m_framebuffer = outPass.m_framebuffer.get();
  808. cmdbInit.m_colorAttachmentUsages = outPass.m_colorUsages;
  809. cmdbInit.m_depthStencilAttachmentUsage = outPass.m_dsUsage;
  810. }
  811. }
  812. else
  813. {
  814. ANKI_ASSERT(inPass.m_secondLevelCmdbsCount == 0 && "Can't have second level cmdbs");
  815. }
  816. }
  817. else
  818. {
  819. ANKI_ASSERT(inPass.m_secondLevelCmdbsCount == 0 && "Can't have second level cmdbs");
  820. }
  821. }
  822. }
  823. template<typename TFunc>
  824. void RenderGraph::iterateSurfsOrVolumes(const Texture& tex, const TextureSubresourceInfo& subresource, TFunc func)
  825. {
  826. for(U32 mip = subresource.m_firstMipmap; mip < subresource.m_firstMipmap + subresource.m_mipmapCount; ++mip)
  827. {
  828. for(U32 layer = subresource.m_firstLayer; layer < subresource.m_firstLayer + subresource.m_layerCount; ++layer)
  829. {
  830. for(U32 face = subresource.m_firstFace; face < U32(subresource.m_firstFace + subresource.m_faceCount); ++face)
  831. {
  832. // Compute surf or vol idx
  833. const U32 faceCount = textureTypeIsCube(tex.getTextureType()) ? 6 : 1;
  834. const U32 idx = (faceCount * tex.getLayerCount()) * mip + faceCount * layer + face;
  835. const TextureSurfaceInfo surf(mip, 0, face, layer);
  836. if(!func(idx, surf))
  837. {
  838. return;
  839. }
  840. }
  841. }
  842. }
  843. }
  844. void RenderGraph::setTextureBarrier(Batch& batch, const RenderPassDependency& dep)
  845. {
  846. ANKI_ASSERT(dep.m_type == RenderPassDependency::Type::kTexture);
  847. BakeContext& ctx = *m_ctx;
  848. const U32 batchIdx = U32(&batch - &ctx.m_batches[0]);
  849. const U32 rtIdx = dep.m_texture.m_handle.m_idx;
  850. const TextureUsageBit depUsage = dep.m_texture.m_usage;
  851. RT& rt = ctx.m_rts[rtIdx];
  852. iterateSurfsOrVolumes(*rt.m_texture, dep.m_texture.m_subresource, [&](U32 surfOrVolIdx, const TextureSurfaceInfo& surf) {
  853. TextureUsageBit& crntUsage = rt.m_surfOrVolUsages[surfOrVolIdx];
  854. const Bool skipBarrier = crntUsage == depUsage && !(crntUsage & TextureUsageBit::kAllWrite);
  855. if(!skipBarrier)
  856. {
  857. // Check if we can merge barriers
  858. if(rt.m_lastBatchThatTransitionedIt[surfOrVolIdx] == batchIdx)
  859. {
  860. // Will merge the barriers
  861. crntUsage |= depUsage;
  862. [[maybe_unused]] Bool found = false;
  863. for(TextureBarrier& b : batch.m_textureBarriersBefore)
  864. {
  865. if(b.m_idx == rtIdx && b.m_surface == surf)
  866. {
  867. b.m_usageAfter |= depUsage;
  868. found = true;
  869. break;
  870. }
  871. }
  872. ANKI_ASSERT(found);
  873. }
  874. else
  875. {
  876. // Create a new barrier for this surface
  877. batch.m_textureBarriersBefore.emplaceBack(rtIdx, crntUsage, depUsage, surf, dep.m_texture.m_subresource.m_depthStencilAspect);
  878. crntUsage = depUsage;
  879. rt.m_lastBatchThatTransitionedIt[surfOrVolIdx] = U16(batchIdx);
  880. }
  881. }
  882. return true;
  883. });
  884. }
  885. void RenderGraph::setBatchBarriers(const RenderGraphDescription& descr)
  886. {
  887. BakeContext& ctx = *m_ctx;
  888. // For all batches
  889. for(Batch& batch : ctx.m_batches)
  890. {
  891. BitSet<kMaxRenderGraphBuffers, U64> buffHasBarrierMask(false);
  892. BitSet<kMaxRenderGraphAccelerationStructures, U32> asHasBarrierMask(false);
  893. // For all passes of that batch
  894. for(U32 passIdx : batch.m_passIndices)
  895. {
  896. const RenderPassDescriptionBase& pass = *descr.m_passes[passIdx];
  897. // Do textures
  898. for(const RenderPassDependency& dep : pass.m_rtDeps)
  899. {
  900. setTextureBarrier(batch, dep);
  901. }
  902. // Do buffers
  903. for(const RenderPassDependency& dep : pass.m_buffDeps)
  904. {
  905. const U32 buffIdx = dep.m_buffer.m_handle.m_idx;
  906. const BufferUsageBit depUsage = dep.m_buffer.m_usage;
  907. BufferUsageBit& crntUsage = ctx.m_buffers[buffIdx].m_usage;
  908. const Bool skipBarrier = crntUsage == depUsage && !(crntUsage & BufferUsageBit::kAllWrite);
  909. if(skipBarrier)
  910. {
  911. continue;
  912. }
  913. const Bool buffHasBarrier = buffHasBarrierMask.get(buffIdx);
  914. if(!buffHasBarrier)
  915. {
  916. // Buff hasn't had a barrier in this batch, add a new barrier
  917. batch.m_bufferBarriersBefore.emplaceBack(buffIdx, crntUsage, depUsage);
  918. crntUsage = depUsage;
  919. buffHasBarrierMask.set(buffIdx);
  920. }
  921. else
  922. {
  923. // Buff already in a barrier, merge the 2 barriers
  924. BufferBarrier* barrierToMergeTo = nullptr;
  925. for(BufferBarrier& b : batch.m_bufferBarriersBefore)
  926. {
  927. if(b.m_idx == buffIdx)
  928. {
  929. barrierToMergeTo = &b;
  930. break;
  931. }
  932. }
  933. ANKI_ASSERT(barrierToMergeTo);
  934. ANKI_ASSERT(!!barrierToMergeTo->m_usageAfter);
  935. barrierToMergeTo->m_usageAfter |= depUsage;
  936. crntUsage = barrierToMergeTo->m_usageAfter;
  937. }
  938. }
  939. // Do AS
  940. for(const RenderPassDependency& dep : pass.m_asDeps)
  941. {
  942. const U32 asIdx = dep.m_as.m_handle.m_idx;
  943. const AccelerationStructureUsageBit depUsage = dep.m_as.m_usage;
  944. AccelerationStructureUsageBit& crntUsage = ctx.m_as[asIdx].m_usage;
  945. const Bool skipBarrier = crntUsage == depUsage && !(crntUsage & AccelerationStructureUsageBit::kAllWrite);
  946. if(skipBarrier)
  947. {
  948. continue;
  949. }
  950. const Bool asHasBarrierInThisBatch = asHasBarrierMask.get(asIdx);
  951. if(!asHasBarrierInThisBatch)
  952. {
  953. // AS doesn't have a barrier in this batch, create a new one
  954. batch.m_asBarriersBefore.emplaceBack(asIdx, crntUsage, depUsage);
  955. crntUsage = depUsage;
  956. asHasBarrierMask.set(asIdx);
  957. }
  958. else
  959. {
  960. // AS already has a barrier, merge the 2 barriers
  961. ASBarrier* barrierToMergeTo = nullptr;
  962. for(ASBarrier& other : batch.m_asBarriersBefore)
  963. {
  964. if(other.m_idx == asIdx)
  965. {
  966. barrierToMergeTo = &other;
  967. break;
  968. }
  969. }
  970. ANKI_ASSERT(barrierToMergeTo);
  971. ANKI_ASSERT(!!barrierToMergeTo->m_usageAfter);
  972. barrierToMergeTo->m_usageAfter |= depUsage;
  973. crntUsage = barrierToMergeTo->m_usageAfter;
  974. }
  975. }
  976. } // For all passes
  977. ANKI_ASSERT(batch.m_bufferBarriersBefore.getSize() || batch.m_textureBarriersBefore.getSize() || batch.m_asBarriersBefore.getSize());
  978. #if ANKI_DBG_RENDER_GRAPH
  979. // Sort the barriers to ease the dumped graph
  980. std::sort(batch.m_textureBarriersBefore.getBegin(), batch.m_textureBarriersBefore.getEnd(),
  981. [&](const TextureBarrier& a, const TextureBarrier& b) {
  982. const U aidx = a.m_idx;
  983. const U bidx = b.m_idx;
  984. if(aidx == bidx)
  985. {
  986. if(a.m_surface.m_level != b.m_surface.m_level)
  987. {
  988. return a.m_surface.m_level < b.m_surface.m_level;
  989. }
  990. else if(a.m_surface.m_face != b.m_surface.m_face)
  991. {
  992. return a.m_surface.m_face < b.m_surface.m_face;
  993. }
  994. else if(a.m_surface.m_layer != b.m_surface.m_layer)
  995. {
  996. return a.m_surface.m_layer < b.m_surface.m_layer;
  997. }
  998. else
  999. {
  1000. return false;
  1001. }
  1002. }
  1003. else
  1004. {
  1005. return aidx < bidx;
  1006. }
  1007. });
  1008. std::sort(batch.m_bufferBarriersBefore.getBegin(), batch.m_bufferBarriersBefore.getEnd(),
  1009. [&](const BufferBarrier& a, const BufferBarrier& b) {
  1010. return a.m_idx < b.m_idx;
  1011. });
  1012. std::sort(batch.m_asBarriersBefore.getBegin(), batch.m_asBarriersBefore.getEnd(), [&](const ASBarrier& a, const ASBarrier& b) {
  1013. return a.m_idx < b.m_idx;
  1014. });
  1015. #endif
  1016. } // For all batches
  1017. }
  1018. void RenderGraph::compileNewGraph(const RenderGraphDescription& descr, StackMemoryPool& pool)
  1019. {
  1020. ANKI_TRACE_SCOPED_EVENT(GrRenderGraphCompile);
  1021. // Init the context
  1022. BakeContext& ctx = *newContext(descr, pool);
  1023. m_ctx = &ctx;
  1024. // Init the passes and find the dependencies between passes
  1025. initRenderPassesAndSetDeps(descr);
  1026. // Walk the graph and create pass batches
  1027. initBatches();
  1028. // Now that we know the batches every pass belongs init the graphics passes
  1029. initGraphicsPasses(descr);
  1030. // Create barriers between batches
  1031. setBatchBarriers(descr);
  1032. #if ANKI_DBG_RENDER_GRAPH
  1033. if(dumpDependencyDotFile(descr, ctx, "./"))
  1034. {
  1035. ANKI_LOGF("Won't recover on debug code");
  1036. }
  1037. #endif
  1038. }
  1039. Texture& RenderGraph::getTexture(RenderTargetHandle handle) const
  1040. {
  1041. ANKI_ASSERT(m_ctx->m_rts[handle.m_idx].m_texture.isCreated());
  1042. return *m_ctx->m_rts[handle.m_idx].m_texture;
  1043. }
  1044. void RenderGraph::getCachedBuffer(BufferHandle handle, Buffer*& buff, PtrSize& offset, PtrSize& range) const
  1045. {
  1046. const BufferRange& record = m_ctx->m_buffers[handle.m_idx];
  1047. buff = record.m_buffer.get();
  1048. offset = record.m_offset;
  1049. range = record.m_range;
  1050. }
  1051. AccelerationStructure* RenderGraph::getAs(AccelerationStructureHandle handle) const
  1052. {
  1053. ANKI_ASSERT(m_ctx->m_as[handle.m_idx].m_as.isCreated());
  1054. return m_ctx->m_as[handle.m_idx].m_as.get();
  1055. }
  1056. void RenderGraph::runSecondLevel()
  1057. {
  1058. ANKI_TRACE_SCOPED_EVENT(GrRenderGraph2ndLevel);
  1059. ANKI_ASSERT(m_ctx);
  1060. StackMemoryPool& pool = *m_ctx->m_rts.getMemoryPool().m_pool;
  1061. // Gather the tasks
  1062. for(Pass& pass : m_ctx->m_passes)
  1063. {
  1064. for(U32 cmdIdx = 0; cmdIdx < pass.m_secondLevelCmdbs.getSize(); ++cmdIdx)
  1065. {
  1066. RenderPassWorkContext* ctx = anki::newInstance<RenderPassWorkContext>(pool);
  1067. ctx->m_rgraph = this;
  1068. ctx->m_currentSecondLevelCommandBufferIndex = cmdIdx;
  1069. ctx->m_secondLevelCommandBufferCount = pass.m_secondLevelCmdbs.getSize();
  1070. ctx->m_passIdx = U32(&pass - &m_ctx->m_passes[0]);
  1071. ctx->m_batchIdx = pass.m_batchIdx;
  1072. CoreThreadJobManager::getSingleton().dispatchTask([ctx]([[maybe_unused]] U32 tid) {
  1073. ANKI_TRACE_SCOPED_EVENT(GrExecuteSecondaryCmdb);
  1074. // Create the command buffer in the thread
  1075. Pass& pass = ctx->m_rgraph->m_ctx->m_passes[ctx->m_passIdx];
  1076. ANKI_ASSERT(!pass.m_secondLevelCmdbs[ctx->m_currentSecondLevelCommandBufferIndex].isCreated());
  1077. pass.m_secondLevelCmdbs[ctx->m_currentSecondLevelCommandBufferIndex] =
  1078. GrManager::getSingleton().newCommandBuffer(pass.m_secondLevelCmdbInitInfo);
  1079. ctx->m_commandBuffer = pass.m_secondLevelCmdbs[ctx->m_currentSecondLevelCommandBufferIndex].get();
  1080. {
  1081. ANKI_TRACE_SCOPED_EVENT(GrRenderGraphCallback);
  1082. pass.m_callback(*ctx);
  1083. }
  1084. ctx->m_commandBuffer->flush();
  1085. });
  1086. }
  1087. }
  1088. CoreThreadJobManager::getSingleton().waitForAllTasksToFinish();
  1089. }
  1090. void RenderGraph::run() const
  1091. {
  1092. ANKI_TRACE_SCOPED_EVENT(GrRenderGraphRun);
  1093. ANKI_ASSERT(m_ctx);
  1094. StackMemoryPool* pool = m_ctx->m_rts.getMemoryPool().m_pool;
  1095. RenderPassWorkContext ctx;
  1096. ctx.m_rgraph = this;
  1097. ctx.m_currentSecondLevelCommandBufferIndex = 0;
  1098. ctx.m_secondLevelCommandBufferCount = 0;
  1099. for(const Batch& batch : m_ctx->m_batches)
  1100. {
  1101. ctx.m_commandBuffer = batch.m_cmdb;
  1102. CommandBuffer& cmdb = *ctx.m_commandBuffer;
  1103. // Set the barriers
  1104. DynamicArray<TextureBarrierInfo, MemoryPoolPtrWrapper<StackMemoryPool>> texBarriers(pool);
  1105. texBarriers.resizeStorage(batch.m_textureBarriersBefore.getSize());
  1106. for(const TextureBarrier& barrier : batch.m_textureBarriersBefore)
  1107. {
  1108. TextureBarrierInfo& inf = *texBarriers.emplaceBack();
  1109. inf.m_previousUsage = barrier.m_usageBefore;
  1110. inf.m_nextUsage = barrier.m_usageAfter;
  1111. inf.m_subresource = barrier.m_surface;
  1112. inf.m_subresource.m_depthStencilAspect = barrier.m_dsAspect;
  1113. inf.m_texture = m_ctx->m_rts[barrier.m_idx].m_texture.get();
  1114. }
  1115. DynamicArray<BufferBarrierInfo, MemoryPoolPtrWrapper<StackMemoryPool>> buffBarriers(pool);
  1116. buffBarriers.resizeStorage(batch.m_bufferBarriersBefore.getSize());
  1117. for(const BufferBarrier& barrier : batch.m_bufferBarriersBefore)
  1118. {
  1119. BufferBarrierInfo& inf = *buffBarriers.emplaceBack();
  1120. inf.m_previousUsage = barrier.m_usageBefore;
  1121. inf.m_nextUsage = barrier.m_usageAfter;
  1122. inf.m_offset = m_ctx->m_buffers[barrier.m_idx].m_offset;
  1123. inf.m_range = m_ctx->m_buffers[barrier.m_idx].m_range;
  1124. inf.m_buffer = m_ctx->m_buffers[barrier.m_idx].m_buffer.get();
  1125. }
  1126. DynamicArray<AccelerationStructureBarrierInfo, MemoryPoolPtrWrapper<StackMemoryPool>> asBarriers(pool);
  1127. for(const ASBarrier& barrier : batch.m_asBarriersBefore)
  1128. {
  1129. AccelerationStructureBarrierInfo& inf = *asBarriers.emplaceBack();
  1130. inf.m_previousUsage = barrier.m_usageBefore;
  1131. inf.m_nextUsage = barrier.m_usageAfter;
  1132. inf.m_as = m_ctx->m_as[barrier.m_idx].m_as.get();
  1133. }
  1134. cmdb.pushDebugMarker("Barrier", Vec3(1.0f, 0.0f, 0.0f));
  1135. cmdb.setPipelineBarrier(texBarriers, buffBarriers, asBarriers);
  1136. cmdb.popDebugMarker();
  1137. // Call the passes
  1138. for(U32 passIdx : batch.m_passIndices)
  1139. {
  1140. const Pass& pass = m_ctx->m_passes[passIdx];
  1141. Vec3 passColor;
  1142. if(pass.m_framebuffer)
  1143. {
  1144. cmdb.beginRenderPass(pass.m_framebuffer.get(), pass.m_colorUsages, pass.m_dsUsage, pass.m_fbRenderArea[0], pass.m_fbRenderArea[1],
  1145. pass.m_fbRenderArea[2], pass.m_fbRenderArea[3]);
  1146. passColor = Vec3(0.0f, 1.0f, 0.0f);
  1147. }
  1148. else
  1149. {
  1150. passColor = Vec3(1.0f, 1.0f, 0.0f);
  1151. }
  1152. cmdb.pushDebugMarker(pass.m_name, passColor);
  1153. const U32 size = pass.m_secondLevelCmdbs.getSize();
  1154. if(size == 0)
  1155. {
  1156. ctx.m_passIdx = passIdx;
  1157. ctx.m_batchIdx = pass.m_batchIdx;
  1158. ANKI_TRACE_SCOPED_EVENT(GrRenderGraphCallback);
  1159. pass.m_callback(ctx);
  1160. }
  1161. else
  1162. {
  1163. DynamicArray<CommandBuffer*, MemoryPoolPtrWrapper<StackMemoryPool>> cmdbs(pool);
  1164. cmdbs.resizeStorage(size);
  1165. for(const CommandBufferPtr& cmdb2nd : pass.m_secondLevelCmdbs)
  1166. {
  1167. cmdbs.emplaceBack(cmdb2nd.get());
  1168. }
  1169. cmdb.pushSecondLevelCommandBuffers(cmdbs);
  1170. }
  1171. if(pass.m_framebuffer)
  1172. {
  1173. cmdb.endRenderPass();
  1174. }
  1175. cmdb.popDebugMarker();
  1176. }
  1177. }
  1178. }
  1179. void RenderGraph::flush(FencePtr* optionalFence)
  1180. {
  1181. ANKI_TRACE_SCOPED_EVENT(GrRenderGraphFlush);
  1182. for(U32 i = 0; i < m_ctx->m_graphicsCmdbs.getSize(); ++i)
  1183. {
  1184. if(m_ctx->m_gatherStatistics && i == m_ctx->m_graphicsCmdbs.getSize() - 1) [[unlikely]]
  1185. {
  1186. // Write a timestamp before the last flush
  1187. TimestampQueryPtr query = GrManager::getSingleton().newTimestampQuery();
  1188. TimestampQuery* pQuery = query.get();
  1189. m_ctx->m_graphicsCmdbs[i]->resetTimestampQueries({&pQuery, 1});
  1190. m_ctx->m_graphicsCmdbs[i]->writeTimestamp(pQuery);
  1191. m_statistics.m_timestamps[m_statistics.m_nextTimestamp * 2 + 1] = query;
  1192. m_statistics.m_cpuStartTimes[m_statistics.m_nextTimestamp] = HighRezTimer::getCurrentTime();
  1193. }
  1194. // Flush
  1195. m_ctx->m_graphicsCmdbs[i]->flush({}, (i == m_ctx->m_graphicsCmdbs.getSize() - 1) ? optionalFence : nullptr);
  1196. }
  1197. }
  1198. void RenderGraph::getCrntUsage(RenderTargetHandle handle, U32 batchIdx, const TextureSubresourceInfo& subresource, TextureUsageBit& usage) const
  1199. {
  1200. usage = TextureUsageBit::kNone;
  1201. const Batch& batch = m_ctx->m_batches[batchIdx];
  1202. for(U32 passIdx : batch.m_passIndices)
  1203. {
  1204. for(const RenderPassDependency::TextureInfo& consumer : m_ctx->m_passes[passIdx].m_consumedTextures)
  1205. {
  1206. if(consumer.m_handle == handle && overlappingTextureSubresource(subresource, consumer.m_subresource))
  1207. {
  1208. usage |= consumer.m_usage;
  1209. break;
  1210. }
  1211. }
  1212. }
  1213. }
  1214. void RenderGraph::periodicCleanup()
  1215. {
  1216. U32 rtsCleanedCount = 0;
  1217. for(RenderTargetCacheEntry& entry : m_renderTargetCache)
  1218. {
  1219. if(entry.m_texturesInUse < entry.m_textures.getSize())
  1220. {
  1221. // Should cleanup
  1222. rtsCleanedCount += entry.m_textures.getSize() - entry.m_texturesInUse;
  1223. // New array
  1224. GrDynamicArray<TexturePtr> newArray;
  1225. if(entry.m_texturesInUse > 0)
  1226. {
  1227. newArray.resize(entry.m_texturesInUse);
  1228. }
  1229. // Populate the new array
  1230. for(U32 i = 0; i < newArray.getSize(); ++i)
  1231. {
  1232. newArray[i] = std::move(entry.m_textures[i]);
  1233. }
  1234. // Destroy the old array and the rest of the textures
  1235. entry.m_textures.destroy();
  1236. // Move new array
  1237. entry.m_textures = std::move(newArray);
  1238. }
  1239. }
  1240. if(rtsCleanedCount > 0)
  1241. {
  1242. ANKI_GR_LOGI("Cleaned %u render targets", rtsCleanedCount);
  1243. }
  1244. }
  1245. void RenderGraph::getStatistics(RenderGraphStatistics& statistics) const
  1246. {
  1247. const U32 oldFrame = (m_statistics.m_nextTimestamp + 1) % kMaxBufferedTimestamps;
  1248. if(m_statistics.m_timestamps[oldFrame * 2] && m_statistics.m_timestamps[oldFrame * 2 + 1])
  1249. {
  1250. Second start, end;
  1251. [[maybe_unused]] TimestampQueryResult res = m_statistics.m_timestamps[oldFrame * 2]->getResult(start);
  1252. ANKI_ASSERT(res == TimestampQueryResult::kAvailable);
  1253. res = m_statistics.m_timestamps[oldFrame * 2 + 1]->getResult(end);
  1254. ANKI_ASSERT(res == TimestampQueryResult::kAvailable);
  1255. const Second diff = end - start;
  1256. statistics.m_gpuTime = diff;
  1257. statistics.m_cpuStartTime = m_statistics.m_cpuStartTimes[oldFrame];
  1258. }
  1259. else
  1260. {
  1261. statistics.m_gpuTime = -1.0;
  1262. statistics.m_cpuStartTime = -1.0;
  1263. }
  1264. }
  1265. #if ANKI_DBG_RENDER_GRAPH
  1266. StringRaii RenderGraph::textureUsageToStr(StackMemoryPool& pool, TextureUsageBit usage)
  1267. {
  1268. if(!usage)
  1269. {
  1270. return StringRaii(&pool, "None");
  1271. }
  1272. StringListRaii slist(&pool);
  1273. # define ANKI_TEX_USAGE(u) \
  1274. if(!!(usage & TextureUsageBit::u)) \
  1275. { \
  1276. slist.pushBackSprintf("%s", #u); \
  1277. }
  1278. ANKI_TEX_USAGE(kSampledGeometry);
  1279. ANKI_TEX_USAGE(kSampledFragment);
  1280. ANKI_TEX_USAGE(kSampledCompute);
  1281. ANKI_TEX_USAGE(kSampledTraceRays);
  1282. ANKI_TEX_USAGE(kUavGeometryRead);
  1283. ANKI_TEX_USAGE(kUavGeometryWrite);
  1284. ANKI_TEX_USAGE(kUavFragmentRead);
  1285. ANKI_TEX_USAGE(kUavFragmentWrite);
  1286. ANKI_TEX_USAGE(kUavComputeRead);
  1287. ANKI_TEX_USAGE(kUavComputeWrite);
  1288. ANKI_TEX_USAGE(kUavTraceRaysRead);
  1289. ANKI_TEX_USAGE(kUavTraceRaysWrite);
  1290. ANKI_TEX_USAGE(kFramebufferRead);
  1291. ANKI_TEX_USAGE(kFramebufferWrite);
  1292. ANKI_TEX_USAGE(kTransferDestination);
  1293. ANKI_TEX_USAGE(kGenerateMipmaps);
  1294. ANKI_TEX_USAGE(kPresent);
  1295. ANKI_TEX_USAGE(kFramebufferShadingRate);
  1296. if(!usage)
  1297. {
  1298. slist.pushBackSprintf("?");
  1299. }
  1300. # undef ANKI_TEX_USAGE
  1301. ANKI_ASSERT(!slist.isEmpty());
  1302. StringRaii str(&pool);
  1303. slist.join(" | ", str);
  1304. return str;
  1305. }
  1306. StringRaii RenderGraph::bufferUsageToStr(StackMemoryPool& pool, BufferUsageBit usage)
  1307. {
  1308. StringListRaii slist(&pool);
  1309. # define ANKI_BUFF_USAGE(u) \
  1310. if(!!(usage & BufferUsageBit::u)) \
  1311. { \
  1312. slist.pushBackSprintf("%s", #u); \
  1313. }
  1314. ANKI_BUFF_USAGE(kConstantGeometry);
  1315. ANKI_BUFF_USAGE(kConstantFragment);
  1316. ANKI_BUFF_USAGE(kConstantCompute);
  1317. ANKI_BUFF_USAGE(kConstantTraceRays);
  1318. ANKI_BUFF_USAGE(kStorageGeometryRead);
  1319. ANKI_BUFF_USAGE(kStorageGeometryWrite);
  1320. ANKI_BUFF_USAGE(kStorageFragmentRead);
  1321. ANKI_BUFF_USAGE(kStorageFragmentWrite);
  1322. ANKI_BUFF_USAGE(kStorageComputeRead);
  1323. ANKI_BUFF_USAGE(kStorageComputeWrite);
  1324. ANKI_BUFF_USAGE(kStorageTraceRaysRead);
  1325. ANKI_BUFF_USAGE(kStorageTraceRaysWrite);
  1326. ANKI_BUFF_USAGE(kTextureGeometryRead);
  1327. ANKI_BUFF_USAGE(kTextureGeometryWrite);
  1328. ANKI_BUFF_USAGE(kTextureFragmentRead);
  1329. ANKI_BUFF_USAGE(kTextureFragmentWrite);
  1330. ANKI_BUFF_USAGE(kTextureComputeRead);
  1331. ANKI_BUFF_USAGE(kTextureComputeWrite);
  1332. ANKI_BUFF_USAGE(kTextureTraceRaysRead);
  1333. ANKI_BUFF_USAGE(kTextureTraceRaysWrite);
  1334. ANKI_BUFF_USAGE(kIndex);
  1335. ANKI_BUFF_USAGE(kVertex);
  1336. ANKI_BUFF_USAGE(kIndirectCompute);
  1337. ANKI_BUFF_USAGE(kIndirectDraw);
  1338. ANKI_BUFF_USAGE(kIndirectTraceRays);
  1339. ANKI_BUFF_USAGE(kTransferSource);
  1340. ANKI_BUFF_USAGE(kTransferDestination);
  1341. ANKI_BUFF_USAGE(kAccelerationStructureBuild);
  1342. if(!usage)
  1343. {
  1344. slist.pushBackSprintf("NONE");
  1345. }
  1346. # undef ANKI_BUFF_USAGE
  1347. ANKI_ASSERT(!slist.isEmpty());
  1348. StringRaii str(&pool);
  1349. slist.join(" | ", str);
  1350. return str;
  1351. }
  1352. StringRaii RenderGraph::asUsageToStr(StackMemoryPool& pool, AccelerationStructureUsageBit usage)
  1353. {
  1354. StringListRaii slist(&pool);
  1355. # define ANKI_AS_USAGE(u) \
  1356. if(!!(usage & AccelerationStructureUsageBit::u)) \
  1357. { \
  1358. slist.pushBackSprintf("%s", #u); \
  1359. }
  1360. ANKI_AS_USAGE(kBuild);
  1361. ANKI_AS_USAGE(kAttach);
  1362. ANKI_AS_USAGE(kGeometryRead);
  1363. ANKI_AS_USAGE(kFragmentRead);
  1364. ANKI_AS_USAGE(kComputeRead);
  1365. ANKI_AS_USAGE(kTraceRaysRead);
  1366. if(!usage)
  1367. {
  1368. slist.pushBackSprintf("NONE");
  1369. }
  1370. # undef ANKI_AS_USAGE
  1371. ANKI_ASSERT(!slist.isEmpty());
  1372. StringRaii str(&pool);
  1373. slist.join(" | ", str);
  1374. return str;
  1375. }
  1376. Error RenderGraph::dumpDependencyDotFile(const RenderGraphDescription& descr, const BakeContext& ctx, CString path) const
  1377. {
  1378. ANKI_GR_LOGW("Running with debug code");
  1379. static constexpr Array<const char*, 5> COLORS = {"red", "green", "blue", "magenta", "cyan"};
  1380. StackMemoryPool& pool = *ctx.m_pool;
  1381. StringListRaii slist(&pool);
  1382. slist.pushBackSprintf("digraph {\n");
  1383. slist.pushBackSprintf("\t//splines = ortho;\nconcentrate = true;\n");
  1384. for(U32 batchIdx = 0; batchIdx < ctx.m_batches.getSize(); ++batchIdx)
  1385. {
  1386. // Set same rank
  1387. slist.pushBackSprintf("\t{rank=\"same\";");
  1388. for(U32 passIdx : ctx.m_batches[batchIdx].m_passIndices)
  1389. {
  1390. slist.pushBackSprintf("\"%s\";", descr.m_passes[passIdx]->m_name.cstr());
  1391. }
  1392. slist.pushBackSprintf("}\n");
  1393. // Print passes
  1394. for(U32 passIdx : ctx.m_batches[batchIdx].m_passIndices)
  1395. {
  1396. CString passName = descr.m_passes[passIdx]->m_name.toCString();
  1397. slist.pushBackSprintf("\t\"%s\"[color=%s,style=%s,shape=box];\n", passName.cstr(), COLORS[batchIdx % COLORS.getSize()],
  1398. (descr.m_passes[passIdx]->m_type == RenderPassDescriptionBase::Type::kGraphics) ? "bold" : "dashed");
  1399. for(U32 depIdx : ctx.m_passes[passIdx].m_dependsOn)
  1400. {
  1401. slist.pushBackSprintf("\t\"%s\"->\"%s\";\n", descr.m_passes[depIdx]->m_name.cstr(), passName.cstr());
  1402. }
  1403. if(ctx.m_passes[passIdx].m_dependsOn.getSize() == 0)
  1404. {
  1405. slist.pushBackSprintf("\tNONE->\"%s\";\n", descr.m_passes[passIdx]->m_name.cstr());
  1406. }
  1407. }
  1408. }
  1409. # if 0
  1410. // Color the resources
  1411. slist.pushBackSprintf("subgraph cluster_0 {\n");
  1412. for(U rtIdx = 0; rtIdx < descr.m_renderTargets.getSize(); ++rtIdx)
  1413. {
  1414. slist.pushBackSprintf(
  1415. "\t\"%s\"[color=%s];\n", &descr.m_renderTargets[rtIdx].m_name[0], COLORS[rtIdx % COLORS.getSize()]);
  1416. }
  1417. slist.pushBackSprintf("}\n");
  1418. # endif
  1419. // Barriers
  1420. // slist.pushBackSprintf("subgraph cluster_1 {\n");
  1421. StringRaii prevBubble(&pool);
  1422. prevBubble.create("START");
  1423. for(U32 batchIdx = 0; batchIdx < ctx.m_batches.getSize(); ++batchIdx)
  1424. {
  1425. const Batch& batch = ctx.m_batches[batchIdx];
  1426. StringRaii batchName(&pool);
  1427. batchName.sprintf("batch%u", batchIdx);
  1428. for(U32 barrierIdx = 0; barrierIdx < batch.m_textureBarriersBefore.getSize(); ++barrierIdx)
  1429. {
  1430. const TextureBarrier& barrier = batch.m_textureBarriersBefore[barrierIdx];
  1431. StringRaii barrierLabel(&pool);
  1432. barrierLabel.sprintf("<b>%s</b> (mip,dp,f,l)=(%u,%u,%u,%u)<br/>%s <b>to</b> %s", &descr.m_renderTargets[barrier.m_idx].m_name[0],
  1433. barrier.m_surface.m_level, barrier.m_surface.m_depth, barrier.m_surface.m_face, barrier.m_surface.m_layer,
  1434. textureUsageToStr(pool, barrier.m_usageBefore).cstr(), textureUsageToStr(pool, barrier.m_usageAfter).cstr());
  1435. StringRaii barrierName(&pool);
  1436. barrierName.sprintf("%s tex barrier%u", batchName.cstr(), barrierIdx);
  1437. slist.pushBackSprintf("\t\"%s\"[color=%s,style=bold,shape=box,label=< %s >];\n", barrierName.cstr(), COLORS[batchIdx % COLORS.getSize()],
  1438. barrierLabel.cstr());
  1439. slist.pushBackSprintf("\t\"%s\"->\"%s\";\n", prevBubble.cstr(), barrierName.cstr());
  1440. prevBubble = barrierName;
  1441. }
  1442. for(U32 barrierIdx = 0; barrierIdx < batch.m_bufferBarriersBefore.getSize(); ++barrierIdx)
  1443. {
  1444. const BufferBarrier& barrier = batch.m_bufferBarriersBefore[barrierIdx];
  1445. StringRaii barrierLabel(&pool);
  1446. barrierLabel.sprintf("<b>%s</b><br/>%s <b>to</b> %s", &descr.m_buffers[barrier.m_idx].m_name[0],
  1447. bufferUsageToStr(pool, barrier.m_usageBefore).cstr(), bufferUsageToStr(pool, barrier.m_usageAfter).cstr());
  1448. StringRaii barrierName(&pool);
  1449. barrierName.sprintf("%s buff barrier%u", batchName.cstr(), barrierIdx);
  1450. slist.pushBackSprintf("\t\"%s\"[color=%s,style=bold,shape=box,label=< %s >];\n", barrierName.cstr(), COLORS[batchIdx % COLORS.getSize()],
  1451. barrierLabel.cstr());
  1452. slist.pushBackSprintf("\t\"%s\"->\"%s\";\n", prevBubble.cstr(), barrierName.cstr());
  1453. prevBubble = barrierName;
  1454. }
  1455. for(U32 barrierIdx = 0; barrierIdx < batch.m_asBarriersBefore.getSize(); ++barrierIdx)
  1456. {
  1457. const ASBarrier& barrier = batch.m_asBarriersBefore[barrierIdx];
  1458. StringRaii barrierLabel(&pool);
  1459. barrierLabel.sprintf("<b>%s</b><br/>%s <b>to</b> %s", descr.m_as[barrier.m_idx].m_name.getBegin(),
  1460. asUsageToStr(pool, barrier.m_usageBefore).cstr(), asUsageToStr(pool, barrier.m_usageAfter).cstr());
  1461. StringRaii barrierName(&pool);
  1462. barrierName.sprintf("%s AS barrier%u", batchName.cstr(), barrierIdx);
  1463. slist.pushBackSprintf("\t\"%s\"[color=%s,style=bold,shape=box,label=< %s >];\n", barrierName.cstr(), COLORS[batchIdx % COLORS.getSize()],
  1464. barrierLabel.cstr());
  1465. slist.pushBackSprintf("\t\"%s\"->\"%s\";\n", prevBubble.cstr(), barrierName.cstr());
  1466. prevBubble = barrierName;
  1467. }
  1468. for(U32 passIdx : batch.m_passIndices)
  1469. {
  1470. const RenderPassDescriptionBase& pass = *descr.m_passes[passIdx];
  1471. StringRaii passName(&pool);
  1472. passName.sprintf("%s pass", pass.m_name.cstr());
  1473. slist.pushBackSprintf("\t\"%s\"[color=%s,style=bold];\n", passName.cstr(), COLORS[batchIdx % COLORS.getSize()]);
  1474. slist.pushBackSprintf("\t\"%s\"->\"%s\";\n", prevBubble.cstr(), passName.cstr());
  1475. prevBubble = passName;
  1476. }
  1477. }
  1478. // slist.pushBackSprintf("}\n");
  1479. slist.pushBackSprintf("}");
  1480. File file;
  1481. ANKI_CHECK(file.open(StringRaii(&pool).sprintf("%s/rgraph_%05u.dot", &path[0], m_version).toCString(), FileOpenFlag::kWrite));
  1482. for(const String& s : slist)
  1483. {
  1484. ANKI_CHECK(file.writeTextf("%s", &s[0]));
  1485. }
  1486. return Error::kNone;
  1487. }
  1488. #endif
  1489. } // end namespace anki