MeshResource.cpp 12 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362
  1. // Copyright (C) 2009-2022, Panagiotis Christopoulos Charitos and contributors.
  2. // All rights reserved.
  3. // Code licensed under the BSD License.
  4. // http://www.anki3d.org/LICENSE
  5. #include <AnKi/Resource/MeshResource.h>
  6. #include <AnKi/Resource/ResourceManager.h>
  7. #include <AnKi/Resource/MeshBinaryLoader.h>
  8. #include <AnKi/Resource/AsyncLoader.h>
  9. #include <AnKi/Core/GpuMemoryPools.h>
  10. #include <AnKi/Util/Functions.h>
  11. #include <AnKi/Util/Filesystem.h>
  12. namespace anki {
  13. class MeshResource::LoadContext
  14. {
  15. public:
  16. MeshResourcePtr m_mesh;
  17. MeshBinaryLoader m_loader;
  18. LoadContext(MeshResource* mesh, BaseMemoryPool* pool)
  19. : m_mesh(mesh)
  20. , m_loader(&mesh->getManager(), pool)
  21. {
  22. }
  23. };
  24. /// Mesh upload async task.
  25. class MeshResource::LoadTask : public AsyncLoaderTask
  26. {
  27. public:
  28. MeshResource::LoadContext m_ctx;
  29. LoadTask(MeshResource* mesh)
  30. : m_ctx(mesh, &mesh->getManager().getAsyncLoader().getMemoryPool())
  31. {
  32. }
  33. Error operator()([[maybe_unused]] AsyncLoaderTaskContext& ctx) final
  34. {
  35. return m_ctx.m_mesh->loadAsync(m_ctx.m_loader);
  36. }
  37. BaseMemoryPool& getMemoryPool() const
  38. {
  39. return m_ctx.m_mesh->getManager().getAsyncLoader().getMemoryPool();
  40. }
  41. };
  42. MeshResource::MeshResource(ResourceManager* manager)
  43. : ResourceObject(manager)
  44. {
  45. }
  46. MeshResource::~MeshResource()
  47. {
  48. m_subMeshes.destroy(getMemoryPool());
  49. for(Lod& lod : m_lods)
  50. {
  51. if(lod.m_unifiedGeometryIndexBufferOffset != kMaxPtrSize)
  52. {
  53. const U32 alignment = getIndexSize(m_indexType);
  54. const PtrSize size = lod.m_indexCount * PtrSize(alignment);
  55. getManager().getUnifiedGeometryMemoryPool().free(size, alignment, lod.m_unifiedGeometryIndexBufferOffset);
  56. }
  57. for(VertexStreamId stream : EnumIterable(VertexStreamId::kMeshRelatedFirst, VertexStreamId::kMeshRelatedCount))
  58. {
  59. if(lod.m_unifiedGeometryVertBufferOffsets[stream] != kMaxPtrSize)
  60. {
  61. const U32 alignment = getFormatInfo(kMeshRelatedVertexStreamFormats[stream]).m_texelSize;
  62. const PtrSize size = PtrSize(alignment) * lod.m_vertexCount;
  63. getManager().getUnifiedGeometryMemoryPool().free(size, alignment,
  64. lod.m_unifiedGeometryVertBufferOffsets[stream]);
  65. }
  66. }
  67. }
  68. m_lods.destroy(getMemoryPool());
  69. }
  70. Error MeshResource::load(const ResourceFilename& filename, Bool async)
  71. {
  72. UniquePtr<LoadTask> task;
  73. LoadContext* ctx;
  74. LoadContext localCtx(this, &getTempMemoryPool());
  75. StringRaii basename(&getTempMemoryPool());
  76. getFilepathFilename(filename, basename);
  77. const Bool rayTracingEnabled = getManager().getGrManager().getDeviceCapabilities().m_rayTracingEnabled;
  78. BufferPtr unifiedGeometryBuffer = getManager().getUnifiedGeometryMemoryPool().getVertexBuffer();
  79. if(async)
  80. {
  81. task.reset(getManager().getAsyncLoader().newTask<LoadTask>(this));
  82. ctx = &task->m_ctx;
  83. }
  84. else
  85. {
  86. task.reset(nullptr);
  87. ctx = &localCtx;
  88. }
  89. // Open file
  90. MeshBinaryLoader& loader = ctx->m_loader;
  91. ANKI_CHECK(loader.load(filename));
  92. const MeshBinaryHeader& header = loader.getHeader();
  93. // Misc
  94. m_indexType = header.m_indexType;
  95. m_aabb.setMin(header.m_aabbMin);
  96. m_aabb.setMax(header.m_aabbMax);
  97. m_positionsScale = header.m_vertexAttributes[VertexStreamId::kPosition].m_scale[0];
  98. m_positionsTranslation = Vec3(&header.m_vertexAttributes[VertexStreamId::kPosition].m_translation[0]);
  99. // Submeshes
  100. m_subMeshes.create(getMemoryPool(), header.m_subMeshCount);
  101. for(U32 i = 0; i < m_subMeshes.getSize(); ++i)
  102. {
  103. m_subMeshes[i].m_firstIndices = loader.getSubMeshes()[i].m_firstIndices;
  104. m_subMeshes[i].m_indexCounts = loader.getSubMeshes()[i].m_indexCounts;
  105. m_subMeshes[i].m_aabb.setMin(loader.getSubMeshes()[i].m_aabbMin);
  106. m_subMeshes[i].m_aabb.setMax(loader.getSubMeshes()[i].m_aabbMax);
  107. }
  108. // LODs
  109. m_lods.create(getMemoryPool(), header.m_lodCount);
  110. for(I32 l = I32(header.m_lodCount - 1); l >= 0; --l)
  111. {
  112. Lod& lod = m_lods[l];
  113. // Index stuff
  114. lod.m_indexCount = header.m_totalIndexCounts[l];
  115. ANKI_ASSERT((lod.m_indexCount % 3) == 0 && "Expecting triangles");
  116. const PtrSize indexBufferSize = PtrSize(lod.m_indexCount) * getIndexSize(m_indexType);
  117. ANKI_CHECK(getManager().getUnifiedGeometryMemoryPool().allocate(indexBufferSize, getIndexSize(m_indexType),
  118. lod.m_unifiedGeometryIndexBufferOffset));
  119. // Vertex stuff
  120. lod.m_vertexCount = header.m_totalVertexCounts[l];
  121. for(VertexStreamId stream : EnumIterable(VertexStreamId::kMeshRelatedFirst, VertexStreamId::kMeshRelatedCount))
  122. {
  123. if(header.m_vertexAttributes[stream].m_format == Format::kNone)
  124. {
  125. lod.m_unifiedGeometryVertBufferOffsets[stream] = kMaxPtrSize;
  126. continue;
  127. }
  128. m_presentVertStreams |= VertexStreamMask(1 << stream);
  129. const U32 texelSize = getFormatInfo(kMeshRelatedVertexStreamFormats[stream]).m_texelSize;
  130. const PtrSize vertexBufferSize = PtrSize(lod.m_vertexCount) * texelSize;
  131. const U32 alignment = 4;
  132. ANKI_CHECK(getManager().getUnifiedGeometryMemoryPool().allocate(
  133. vertexBufferSize, alignment, lod.m_unifiedGeometryVertBufferOffsets[stream]));
  134. }
  135. // BLAS
  136. if(rayTracingEnabled)
  137. {
  138. AccelerationStructureInitInfo inf(
  139. StringRaii(&getTempMemoryPool()).sprintf("%s_%s", "Blas", basename.cstr()));
  140. inf.m_type = AccelerationStructureType::kBottomLevel;
  141. inf.m_bottomLevel.m_indexBuffer = unifiedGeometryBuffer;
  142. inf.m_bottomLevel.m_indexBufferOffset = lod.m_unifiedGeometryIndexBufferOffset;
  143. inf.m_bottomLevel.m_indexCount = lod.m_indexCount;
  144. inf.m_bottomLevel.m_indexType = m_indexType;
  145. inf.m_bottomLevel.m_positionBuffer = unifiedGeometryBuffer;
  146. inf.m_bottomLevel.m_positionBufferOffset =
  147. lod.m_unifiedGeometryVertBufferOffsets[VertexStreamId::kPosition];
  148. inf.m_bottomLevel.m_positionStride =
  149. getFormatInfo(kMeshRelatedVertexStreamFormats[VertexStreamId::kPosition]).m_texelSize;
  150. inf.m_bottomLevel.m_positionsFormat = kMeshRelatedVertexStreamFormats[VertexStreamId::kPosition];
  151. inf.m_bottomLevel.m_positionCount = lod.m_vertexCount;
  152. lod.m_blas = getManager().getGrManager().newAccelerationStructure(inf);
  153. }
  154. }
  155. // Clear the buffers
  156. if(async)
  157. {
  158. CommandBufferInitInfo cmdbinit("MeshResourceClear");
  159. cmdbinit.m_flags = CommandBufferFlag::kSmallBatch | CommandBufferFlag::kGeneralWork;
  160. CommandBufferPtr cmdb = getManager().getGrManager().newCommandBuffer(cmdbinit);
  161. for(const Lod& lod : m_lods)
  162. {
  163. cmdb->fillBuffer(unifiedGeometryBuffer, lod.m_unifiedGeometryIndexBufferOffset,
  164. PtrSize(lod.m_indexCount) * getIndexSize(m_indexType), 0);
  165. for(VertexStreamId stream :
  166. EnumIterable(VertexStreamId::kMeshRelatedFirst, VertexStreamId::kMeshRelatedCount))
  167. {
  168. if(header.m_vertexAttributes[stream].m_format != Format::kNone)
  169. {
  170. cmdb->fillBuffer(unifiedGeometryBuffer, lod.m_unifiedGeometryVertBufferOffsets[stream],
  171. PtrSize(lod.m_vertexCount)
  172. * getFormatInfo(kMeshRelatedVertexStreamFormats[stream]).m_texelSize,
  173. 0);
  174. }
  175. }
  176. }
  177. const BufferBarrierInfo barrier = {unifiedGeometryBuffer.get(), BufferUsageBit::kTransferDestination,
  178. BufferUsageBit::kVertex, 0, kMaxPtrSize};
  179. cmdb->setPipelineBarrier({}, {&barrier, 1}, {});
  180. cmdb->flush();
  181. }
  182. // Submit the loading task
  183. if(async)
  184. {
  185. getManager().getAsyncLoader().submitTask(task.get());
  186. LoadTask* pTask;
  187. task.moveAndReset(pTask);
  188. }
  189. else
  190. {
  191. ANKI_CHECK(loadAsync(loader));
  192. }
  193. return Error::kNone;
  194. }
  195. Error MeshResource::loadAsync(MeshBinaryLoader& loader) const
  196. {
  197. GrManager& gr = getManager().getGrManager();
  198. TransferGpuAllocator& transferAlloc = getManager().getTransferGpuAllocator();
  199. Array<TransferGpuAllocatorHandle, kMaxLodCount*(U32(VertexStreamId::kMeshRelatedCount) + 1)> handles;
  200. U32 handleCount = 0;
  201. BufferPtr unifiedGeometryBuffer = getManager().getUnifiedGeometryMemoryPool().getVertexBuffer();
  202. const BufferUsageBit unifiedGeometryBufferNonTransferUsage =
  203. unifiedGeometryBuffer->getBufferUsage() ^ BufferUsageBit::kTransferDestination;
  204. CommandBufferInitInfo cmdbinit;
  205. cmdbinit.m_flags = CommandBufferFlag::kSmallBatch | CommandBufferFlag::kGeneralWork;
  206. CommandBufferPtr cmdb = gr.newCommandBuffer(cmdbinit);
  207. // Set transfer to transfer barrier because of the clear that happened while sync loading
  208. const BufferBarrierInfo barrier = {unifiedGeometryBuffer.get(), unifiedGeometryBufferNonTransferUsage,
  209. BufferUsageBit::kTransferDestination, 0, kMaxPtrSize};
  210. cmdb->setPipelineBarrier({}, {&barrier, 1}, {});
  211. // Upload index and vertex buffers
  212. for(U32 lodIdx = 0; lodIdx < m_lods.getSize(); ++lodIdx)
  213. {
  214. const Lod& lod = m_lods[lodIdx];
  215. // Upload index buffer
  216. {
  217. TransferGpuAllocatorHandle& handle = handles[handleCount++];
  218. const PtrSize indexBufferSize = PtrSize(lod.m_indexCount) * getIndexSize(m_indexType);
  219. ANKI_CHECK(transferAlloc.allocate(indexBufferSize, handle));
  220. void* data = handle.getMappedMemory();
  221. ANKI_ASSERT(data);
  222. ANKI_CHECK(loader.storeIndexBuffer(lodIdx, data, indexBufferSize));
  223. cmdb->copyBufferToBuffer(handle.getBuffer(), handle.getOffset(), unifiedGeometryBuffer,
  224. lod.m_unifiedGeometryIndexBufferOffset, handle.getRange());
  225. }
  226. // Upload vert buffers
  227. for(VertexStreamId stream : EnumIterable(VertexStreamId::kMeshRelatedFirst, VertexStreamId::kMeshRelatedCount))
  228. {
  229. if(!(m_presentVertStreams & VertexStreamMask(1 << stream)))
  230. {
  231. continue;
  232. }
  233. TransferGpuAllocatorHandle& handle = handles[handleCount++];
  234. const PtrSize vertexBufferSize =
  235. PtrSize(lod.m_vertexCount) * getFormatInfo(kMeshRelatedVertexStreamFormats[stream]).m_texelSize;
  236. ANKI_CHECK(transferAlloc.allocate(vertexBufferSize, handle));
  237. U8* data = static_cast<U8*>(handle.getMappedMemory());
  238. ANKI_ASSERT(data);
  239. // Load to staging
  240. ANKI_CHECK(loader.storeVertexBuffer(lodIdx, U32(stream), data, vertexBufferSize));
  241. // Copy
  242. cmdb->copyBufferToBuffer(handle.getBuffer(), handle.getOffset(), unifiedGeometryBuffer,
  243. lod.m_unifiedGeometryVertBufferOffsets[stream], handle.getRange());
  244. }
  245. }
  246. if(gr.getDeviceCapabilities().m_rayTracingEnabled)
  247. {
  248. // Build BLASes
  249. // Set the barriers
  250. BufferBarrierInfo bufferBarrier;
  251. bufferBarrier.m_buffer = unifiedGeometryBuffer.get();
  252. bufferBarrier.m_offset = 0;
  253. bufferBarrier.m_size = kMaxPtrSize;
  254. bufferBarrier.m_previousUsage = BufferUsageBit::kTransferDestination;
  255. bufferBarrier.m_nextUsage = unifiedGeometryBufferNonTransferUsage;
  256. Array<AccelerationStructureBarrierInfo, kMaxLodCount> asBarriers;
  257. for(U32 lodIdx = 0; lodIdx < m_lods.getSize(); ++lodIdx)
  258. {
  259. asBarriers[lodIdx].m_as = m_lods[lodIdx].m_blas.get();
  260. asBarriers[lodIdx].m_previousUsage = AccelerationStructureUsageBit::kNone;
  261. asBarriers[lodIdx].m_nextUsage = AccelerationStructureUsageBit::kBuild;
  262. }
  263. cmdb->setPipelineBarrier({}, {&bufferBarrier, 1}, {&asBarriers[0], m_lods.getSize()});
  264. // Build BLASes
  265. for(U32 lodIdx = 0; lodIdx < m_lods.getSize(); ++lodIdx)
  266. {
  267. cmdb->buildAccelerationStructure(m_lods[lodIdx].m_blas);
  268. }
  269. // Barriers again
  270. for(U32 lodIdx = 0; lodIdx < m_lods.getSize(); ++lodIdx)
  271. {
  272. asBarriers[lodIdx].m_as = m_lods[lodIdx].m_blas.get();
  273. asBarriers[lodIdx].m_previousUsage = AccelerationStructureUsageBit::kBuild;
  274. asBarriers[lodIdx].m_nextUsage = AccelerationStructureUsageBit::kAllRead;
  275. }
  276. cmdb->setPipelineBarrier({}, {}, {&asBarriers[0], m_lods.getSize()});
  277. }
  278. else
  279. {
  280. // Only set a barrier
  281. BufferBarrierInfo bufferBarrier;
  282. bufferBarrier.m_buffer = unifiedGeometryBuffer.get();
  283. bufferBarrier.m_offset = 0;
  284. bufferBarrier.m_size = kMaxPtrSize;
  285. bufferBarrier.m_previousUsage = BufferUsageBit::kTransferDestination;
  286. bufferBarrier.m_nextUsage = unifiedGeometryBufferNonTransferUsage;
  287. cmdb->setPipelineBarrier({}, {&bufferBarrier, 1}, {});
  288. }
  289. // Finalize
  290. FencePtr fence;
  291. cmdb->flush({}, &fence);
  292. for(U32 i = 0; i < handleCount; ++i)
  293. {
  294. transferAlloc.release(handles[i], fence);
  295. }
  296. return Error::kNone;
  297. }
  298. } // end namespace anki