MeshResource.cpp 11 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330
  1. // Copyright (C) 2009-2023, Panagiotis Christopoulos Charitos and contributors.
  2. // All rights reserved.
  3. // Code licensed under the BSD License.
  4. // http://www.anki3d.org/LICENSE
  5. #include <AnKi/Resource/MeshResource.h>
  6. #include <AnKi/Resource/ResourceManager.h>
  7. #include <AnKi/Resource/MeshBinaryLoader.h>
  8. #include <AnKi/Resource/AsyncLoader.h>
  9. #include <AnKi/Util/Functions.h>
  10. #include <AnKi/Util/Filesystem.h>
  11. namespace anki {
  12. class MeshResource::LoadContext
  13. {
  14. public:
  15. MeshResourcePtr m_mesh;
  16. MeshBinaryLoader m_loader;
  17. LoadContext(MeshResource* mesh)
  18. : m_mesh(mesh)
  19. , m_loader(&ResourceMemoryPool::getSingleton())
  20. {
  21. }
  22. };
  23. /// Mesh upload async task.
  24. class MeshResource::LoadTask : public AsyncLoaderTask
  25. {
  26. public:
  27. MeshResource::LoadContext m_ctx;
  28. LoadTask(MeshResource* mesh)
  29. : m_ctx(mesh)
  30. {
  31. }
  32. Error operator()([[maybe_unused]] AsyncLoaderTaskContext& ctx) final
  33. {
  34. return m_ctx.m_mesh->loadAsync(m_ctx.m_loader);
  35. }
  36. static BaseMemoryPool& getMemoryPool()
  37. {
  38. return ResourceMemoryPool::getSingleton();
  39. }
  40. };
  41. MeshResource::MeshResource()
  42. {
  43. }
  44. MeshResource::~MeshResource()
  45. {
  46. for(Lod& lod : m_lods)
  47. {
  48. UnifiedGeometryBuffer::getSingleton().deferredFree(lod.m_indexBufferAllocationToken);
  49. for(VertexStreamId stream : EnumIterable(VertexStreamId::kMeshRelatedFirst, VertexStreamId::kMeshRelatedCount))
  50. {
  51. UnifiedGeometryBuffer::getSingleton().deferredFree(lod.m_vertexBuffersAllocationToken[stream]);
  52. }
  53. }
  54. }
  55. Error MeshResource::load(const ResourceFilename& filename, Bool async)
  56. {
  57. UniquePtr<LoadTask> task;
  58. LoadContext* ctx;
  59. LoadContext localCtx(this);
  60. String basename;
  61. getFilepathFilename(filename, basename);
  62. const Bool rayTracingEnabled = GrManager::getSingleton().getDeviceCapabilities().m_rayTracingEnabled;
  63. if(async)
  64. {
  65. task.reset(ResourceManager::getSingleton().getAsyncLoader().newTask<LoadTask>(this));
  66. ctx = &task->m_ctx;
  67. }
  68. else
  69. {
  70. task.reset(nullptr);
  71. ctx = &localCtx;
  72. }
  73. // Open file
  74. MeshBinaryLoader& loader = ctx->m_loader;
  75. ANKI_CHECK(loader.load(filename));
  76. const MeshBinaryHeader& header = loader.getHeader();
  77. // Misc
  78. m_indexType = header.m_indexType;
  79. m_aabb.setMin(header.m_aabbMin);
  80. m_aabb.setMax(header.m_aabbMax);
  81. m_positionsScale = header.m_vertexAttributes[VertexStreamId::kPosition].m_scale[0];
  82. m_positionsTranslation = Vec3(&header.m_vertexAttributes[VertexStreamId::kPosition].m_translation[0]);
  83. // Submeshes
  84. m_subMeshes.resize(header.m_subMeshCount);
  85. for(U32 i = 0; i < m_subMeshes.getSize(); ++i)
  86. {
  87. m_subMeshes[i].m_firstIndices = loader.getSubMeshes()[i].m_firstIndices;
  88. m_subMeshes[i].m_indexCounts = loader.getSubMeshes()[i].m_indexCounts;
  89. m_subMeshes[i].m_aabb.setMin(loader.getSubMeshes()[i].m_aabbMin);
  90. m_subMeshes[i].m_aabb.setMax(loader.getSubMeshes()[i].m_aabbMax);
  91. }
  92. // LODs
  93. m_lods.resize(header.m_lodCount);
  94. for(I32 l = I32(header.m_lodCount - 1); l >= 0; --l)
  95. {
  96. Lod& lod = m_lods[l];
  97. // Index stuff
  98. lod.m_indexCount = header.m_totalIndexCounts[l];
  99. ANKI_ASSERT((lod.m_indexCount % 3) == 0 && "Expecting triangles");
  100. const PtrSize indexBufferSize = PtrSize(lod.m_indexCount) * getIndexSize(m_indexType);
  101. lod.m_indexBufferAllocationToken = UnifiedGeometryBuffer::getSingleton().allocate(indexBufferSize, getIndexSize(m_indexType));
  102. // Vertex stuff
  103. lod.m_vertexCount = header.m_totalVertexCounts[l];
  104. for(VertexStreamId stream : EnumIterable(VertexStreamId::kMeshRelatedFirst, VertexStreamId::kMeshRelatedCount))
  105. {
  106. if(header.m_vertexAttributes[stream].m_format == Format::kNone)
  107. {
  108. continue;
  109. }
  110. m_presentVertStreams |= VertexStreamMask(1 << stream);
  111. lod.m_vertexBuffersAllocationToken[stream] =
  112. UnifiedGeometryBuffer::getSingleton().allocateFormat(kMeshRelatedVertexStreamFormats[stream], lod.m_vertexCount);
  113. }
  114. // BLAS
  115. if(rayTracingEnabled)
  116. {
  117. AccelerationStructureInitInfo inf(ResourceString().sprintf("%s_%s", "Blas", basename.cstr()));
  118. inf.m_type = AccelerationStructureType::kBottomLevel;
  119. inf.m_bottomLevel.m_indexBuffer = &UnifiedGeometryBuffer::getSingleton().getBuffer();
  120. inf.m_bottomLevel.m_indexBufferOffset = lod.m_indexBufferAllocationToken.getOffset();
  121. inf.m_bottomLevel.m_indexCount = lod.m_indexCount;
  122. inf.m_bottomLevel.m_indexType = m_indexType;
  123. inf.m_bottomLevel.m_positionBuffer = &UnifiedGeometryBuffer::getSingleton().getBuffer();
  124. inf.m_bottomLevel.m_positionBufferOffset = lod.m_vertexBuffersAllocationToken[VertexStreamId::kPosition].getOffset();
  125. inf.m_bottomLevel.m_positionStride = getFormatInfo(kMeshRelatedVertexStreamFormats[VertexStreamId::kPosition]).m_texelSize;
  126. inf.m_bottomLevel.m_positionsFormat = kMeshRelatedVertexStreamFormats[VertexStreamId::kPosition];
  127. inf.m_bottomLevel.m_positionCount = lod.m_vertexCount;
  128. lod.m_blas = GrManager::getSingleton().newAccelerationStructure(inf);
  129. }
  130. }
  131. // Clear the buffers
  132. if(async)
  133. {
  134. CommandBufferInitInfo cmdbinit("MeshResourceClear");
  135. cmdbinit.m_flags = CommandBufferFlag::kSmallBatch | CommandBufferFlag::kGeneralWork;
  136. CommandBufferPtr cmdb = GrManager::getSingleton().newCommandBuffer(cmdbinit);
  137. for(const Lod& lod : m_lods)
  138. {
  139. cmdb->fillBuffer(&UnifiedGeometryBuffer::getSingleton().getBuffer(), lod.m_indexBufferAllocationToken.getOffset(),
  140. PtrSize(lod.m_indexCount) * getIndexSize(m_indexType), 0);
  141. for(VertexStreamId stream : EnumIterable(VertexStreamId::kMeshRelatedFirst, VertexStreamId::kMeshRelatedCount))
  142. {
  143. if(header.m_vertexAttributes[stream].m_format != Format::kNone)
  144. {
  145. cmdb->fillBuffer(&UnifiedGeometryBuffer::getSingleton().getBuffer(), lod.m_vertexBuffersAllocationToken[stream].getOffset(),
  146. lod.m_vertexBuffersAllocationToken[stream].getAllocatedSize(), 0);
  147. }
  148. }
  149. }
  150. const BufferBarrierInfo barrier = {&UnifiedGeometryBuffer::getSingleton().getBuffer(), BufferUsageBit::kTransferDestination,
  151. BufferUsageBit::kVertex, 0, kMaxPtrSize};
  152. cmdb->setPipelineBarrier({}, {&barrier, 1}, {});
  153. cmdb->flush();
  154. }
  155. // Submit the loading task
  156. if(async)
  157. {
  158. ResourceManager::getSingleton().getAsyncLoader().submitTask(task.get());
  159. LoadTask* pTask;
  160. task.moveAndReset(pTask);
  161. }
  162. else
  163. {
  164. ANKI_CHECK(loadAsync(loader));
  165. }
  166. return Error::kNone;
  167. }
  168. Error MeshResource::loadAsync(MeshBinaryLoader& loader) const
  169. {
  170. GrManager& gr = GrManager::getSingleton();
  171. TransferGpuAllocator& transferAlloc = ResourceManager::getSingleton().getTransferGpuAllocator();
  172. Array<TransferGpuAllocatorHandle, kMaxLodCount*(U32(VertexStreamId::kMeshRelatedCount) + 1)> handles;
  173. U32 handleCount = 0;
  174. Buffer* unifiedGeometryBuffer = &UnifiedGeometryBuffer::getSingleton().getBuffer();
  175. const BufferUsageBit unifiedGeometryBufferNonTransferUsage = unifiedGeometryBuffer->getBufferUsage() ^ BufferUsageBit::kTransferDestination;
  176. CommandBufferInitInfo cmdbinit;
  177. cmdbinit.m_flags = CommandBufferFlag::kSmallBatch | CommandBufferFlag::kGeneralWork;
  178. CommandBufferPtr cmdb = gr.newCommandBuffer(cmdbinit);
  179. // Set transfer to transfer barrier because of the clear that happened while sync loading
  180. const BufferBarrierInfo barrier = {unifiedGeometryBuffer, unifiedGeometryBufferNonTransferUsage, BufferUsageBit::kTransferDestination, 0,
  181. kMaxPtrSize};
  182. cmdb->setPipelineBarrier({}, {&barrier, 1}, {});
  183. // Upload index and vertex buffers
  184. for(U32 lodIdx = 0; lodIdx < m_lods.getSize(); ++lodIdx)
  185. {
  186. const Lod& lod = m_lods[lodIdx];
  187. // Upload index buffer
  188. {
  189. TransferGpuAllocatorHandle& handle = handles[handleCount++];
  190. const PtrSize indexBufferSize = PtrSize(lod.m_indexCount) * getIndexSize(m_indexType);
  191. ANKI_CHECK(transferAlloc.allocate(indexBufferSize, handle));
  192. void* data = handle.getMappedMemory();
  193. ANKI_ASSERT(data);
  194. ANKI_CHECK(loader.storeIndexBuffer(lodIdx, data, indexBufferSize));
  195. cmdb->copyBufferToBuffer(&handle.getBuffer(), handle.getOffset(), unifiedGeometryBuffer, lod.m_indexBufferAllocationToken.getOffset(),
  196. handle.getRange());
  197. }
  198. // Upload vert buffers
  199. for(VertexStreamId stream : EnumIterable(VertexStreamId::kMeshRelatedFirst, VertexStreamId::kMeshRelatedCount))
  200. {
  201. if(!(m_presentVertStreams & VertexStreamMask(1 << stream)))
  202. {
  203. continue;
  204. }
  205. TransferGpuAllocatorHandle& handle = handles[handleCount++];
  206. const PtrSize vertexBufferSize = PtrSize(lod.m_vertexCount) * getFormatInfo(kMeshRelatedVertexStreamFormats[stream]).m_texelSize;
  207. ANKI_CHECK(transferAlloc.allocate(vertexBufferSize, handle));
  208. U8* data = static_cast<U8*>(handle.getMappedMemory());
  209. ANKI_ASSERT(data);
  210. // Load to staging
  211. ANKI_CHECK(loader.storeVertexBuffer(lodIdx, U32(stream), data, vertexBufferSize));
  212. // Copy
  213. cmdb->copyBufferToBuffer(&handle.getBuffer(), handle.getOffset(), unifiedGeometryBuffer,
  214. lod.m_vertexBuffersAllocationToken[stream].getOffset(), handle.getRange());
  215. }
  216. }
  217. if(gr.getDeviceCapabilities().m_rayTracingEnabled)
  218. {
  219. // Build BLASes
  220. // Set the barriers
  221. BufferBarrierInfo bufferBarrier;
  222. bufferBarrier.m_buffer = unifiedGeometryBuffer;
  223. bufferBarrier.m_offset = 0;
  224. bufferBarrier.m_range = kMaxPtrSize;
  225. bufferBarrier.m_previousUsage = BufferUsageBit::kTransferDestination;
  226. bufferBarrier.m_nextUsage = unifiedGeometryBufferNonTransferUsage;
  227. Array<AccelerationStructureBarrierInfo, kMaxLodCount> asBarriers;
  228. for(U32 lodIdx = 0; lodIdx < m_lods.getSize(); ++lodIdx)
  229. {
  230. asBarriers[lodIdx].m_as = m_lods[lodIdx].m_blas.get();
  231. asBarriers[lodIdx].m_previousUsage = AccelerationStructureUsageBit::kNone;
  232. asBarriers[lodIdx].m_nextUsage = AccelerationStructureUsageBit::kBuild;
  233. }
  234. cmdb->setPipelineBarrier({}, {&bufferBarrier, 1}, {&asBarriers[0], m_lods.getSize()});
  235. // Build BLASes
  236. for(U32 lodIdx = 0; lodIdx < m_lods.getSize(); ++lodIdx)
  237. {
  238. cmdb->buildAccelerationStructure(m_lods[lodIdx].m_blas.get());
  239. }
  240. // Barriers again
  241. for(U32 lodIdx = 0; lodIdx < m_lods.getSize(); ++lodIdx)
  242. {
  243. asBarriers[lodIdx].m_as = m_lods[lodIdx].m_blas.get();
  244. asBarriers[lodIdx].m_previousUsage = AccelerationStructureUsageBit::kBuild;
  245. asBarriers[lodIdx].m_nextUsage = AccelerationStructureUsageBit::kAllRead;
  246. }
  247. cmdb->setPipelineBarrier({}, {}, {&asBarriers[0], m_lods.getSize()});
  248. }
  249. else
  250. {
  251. // Only set a barrier
  252. BufferBarrierInfo bufferBarrier;
  253. bufferBarrier.m_buffer = unifiedGeometryBuffer;
  254. bufferBarrier.m_offset = 0;
  255. bufferBarrier.m_range = kMaxPtrSize;
  256. bufferBarrier.m_previousUsage = BufferUsageBit::kTransferDestination;
  257. bufferBarrier.m_nextUsage = unifiedGeometryBufferNonTransferUsage;
  258. cmdb->setPipelineBarrier({}, {&bufferBarrier, 1}, {});
  259. }
  260. // Finalize
  261. FencePtr fence;
  262. cmdb->flush({}, &fence);
  263. for(U32 i = 0; i < handleCount; ++i)
  264. {
  265. transferAlloc.release(handles[i], fence);
  266. }
  267. return Error::kNone;
  268. }
  269. } // end namespace anki