CommandBufferFactory.cpp 7.8 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334
  1. // Copyright (C) 2009-2021, Panagiotis Christopoulos Charitos and contributors.
  2. // All rights reserved.
  3. // Code licensed under the BSD License.
  4. // http://www.anki3d.org/LICENSE
  5. #include <AnKi/Gr/Vulkan/CommandBufferFactory.h>
  6. #include <AnKi/Util/Tracer.h>
  7. namespace anki
  8. {
  9. void MicroCommandBuffer::destroy()
  10. {
  11. reset();
  12. if(m_handle)
  13. {
  14. vkFreeCommandBuffers(m_threadAlloc->m_factory->m_dev,
  15. m_threadAlloc->m_pools[getQueueTypeFromCommandBufferFlags(m_flags)], 1, &m_handle);
  16. m_handle = {};
  17. }
  18. }
  19. void MicroCommandBuffer::reset()
  20. {
  21. ANKI_TRACE_SCOPED_EVENT(VK_COMMAND_BUFFER_RESET);
  22. ANKI_ASSERT(m_refcount.load() == 0);
  23. ANKI_ASSERT(!m_fence.isCreated() || m_fence->done());
  24. for(GrObjectType type : EnumIterable<GrObjectType>())
  25. {
  26. m_objectRefs[type].destroy(m_fastAlloc);
  27. }
  28. m_fastAlloc.getMemoryPool().reset();
  29. m_fence = {};
  30. }
  31. Error CommandBufferThreadAllocator::init()
  32. {
  33. for(QueueType qtype : EnumIterable<QueueType>())
  34. {
  35. VkCommandPoolCreateInfo ci = {};
  36. ci.sType = VK_STRUCTURE_TYPE_COMMAND_POOL_CREATE_INFO;
  37. ci.flags = VK_COMMAND_POOL_CREATE_RESET_COMMAND_BUFFER_BIT;
  38. ci.queueFamilyIndex = m_factory->m_queueFamilies[qtype];
  39. ANKI_VK_CHECK(vkCreateCommandPool(m_factory->m_dev, &ci, nullptr, &m_pools[qtype]));
  40. }
  41. return Error::NONE;
  42. }
  43. void CommandBufferThreadAllocator::destroyList(IntrusiveList<MicroCommandBuffer>& list)
  44. {
  45. while(!list.isEmpty())
  46. {
  47. MicroCommandBuffer* ptr = list.popFront();
  48. ptr->destroy();
  49. getAllocator().deleteInstance(ptr);
  50. #if ANKI_EXTRA_CHECKS
  51. m_createdCmdbs.fetchSub(1);
  52. #endif
  53. }
  54. }
  55. void CommandBufferThreadAllocator::destroyLists()
  56. {
  57. for(U i = 0; i < 2; ++i)
  58. {
  59. for(U j = 0; j < 2; ++j)
  60. {
  61. for(QueueType qtype : EnumIterable<QueueType>())
  62. {
  63. CmdbType& type = m_types[i][j][qtype];
  64. destroyList(type.m_deletedCmdbs);
  65. destroyList(type.m_readyCmdbs);
  66. destroyList(type.m_inUseCmdbs);
  67. }
  68. }
  69. }
  70. }
  71. void CommandBufferThreadAllocator::destroy()
  72. {
  73. for(VkCommandPool pool : m_pools)
  74. {
  75. if(pool)
  76. {
  77. vkDestroyCommandPool(m_factory->m_dev, pool, nullptr);
  78. pool = {};
  79. }
  80. }
  81. ANKI_ASSERT(m_createdCmdbs.load() == 0 && "Someone still holds references to command buffers");
  82. }
  83. Error CommandBufferThreadAllocator::newCommandBuffer(CommandBufferFlag cmdbFlags, MicroCommandBufferPtr& outPtr,
  84. Bool& createdNew)
  85. {
  86. ANKI_ASSERT(!!(cmdbFlags & CommandBufferFlag::COMPUTE_WORK) ^ !!(cmdbFlags & CommandBufferFlag::GENERAL_WORK));
  87. createdNew = false;
  88. const Bool secondLevel = !!(cmdbFlags & CommandBufferFlag::SECOND_LEVEL);
  89. const Bool smallBatch = !!(cmdbFlags & CommandBufferFlag::SMALL_BATCH);
  90. CmdbType& type = m_types[secondLevel][smallBatch][getQueueTypeFromCommandBufferFlags(cmdbFlags)];
  91. // Move the deleted to (possibly) in-use or ready
  92. {
  93. LockGuard<Mutex> lock(type.m_deletedMtx);
  94. while(!type.m_deletedCmdbs.isEmpty())
  95. {
  96. MicroCommandBuffer* ptr = type.m_deletedCmdbs.popFront();
  97. if(secondLevel)
  98. {
  99. type.m_readyCmdbs.pushFront(ptr);
  100. ptr->reset();
  101. }
  102. else
  103. {
  104. type.m_inUseCmdbs.pushFront(ptr);
  105. }
  106. }
  107. }
  108. // Reset the in-use command buffers and try to get one available
  109. MicroCommandBuffer* out = nullptr;
  110. if(!secondLevel)
  111. {
  112. // Primary
  113. // Try to reuse a ready buffer
  114. if(!type.m_readyCmdbs.isEmpty())
  115. {
  116. out = type.m_readyCmdbs.popFront();
  117. }
  118. // Do a sweep and move in-use buffers to ready
  119. IntrusiveList<MicroCommandBuffer> inUseCmdbs; // Push to temporary because we are iterating
  120. while(!type.m_inUseCmdbs.isEmpty())
  121. {
  122. MicroCommandBuffer* inUseCmdb = type.m_inUseCmdbs.popFront();
  123. if(!inUseCmdb->m_fence.isCreated() || inUseCmdb->m_fence->done())
  124. {
  125. // It's ready
  126. if(out)
  127. {
  128. type.m_readyCmdbs.pushFront(inUseCmdb);
  129. inUseCmdb->reset();
  130. }
  131. else
  132. {
  133. out = inUseCmdb;
  134. }
  135. }
  136. else
  137. {
  138. inUseCmdbs.pushBack(inUseCmdb);
  139. }
  140. }
  141. ANKI_ASSERT(type.m_inUseCmdbs.isEmpty());
  142. type.m_inUseCmdbs = std::move(inUseCmdbs);
  143. }
  144. else
  145. {
  146. // Secondary
  147. ANKI_ASSERT(type.m_inUseCmdbs.isEmpty());
  148. if(!type.m_readyCmdbs.isEmpty())
  149. {
  150. out = type.m_readyCmdbs.popFront();
  151. }
  152. }
  153. if(ANKI_UNLIKELY(out == nullptr))
  154. {
  155. // Create a new one
  156. VkCommandBufferAllocateInfo ci = {};
  157. ci.sType = VK_STRUCTURE_TYPE_COMMAND_BUFFER_ALLOCATE_INFO;
  158. ci.commandPool = m_pools[getQueueTypeFromCommandBufferFlags(cmdbFlags)];
  159. ci.level = (secondLevel) ? VK_COMMAND_BUFFER_LEVEL_SECONDARY : VK_COMMAND_BUFFER_LEVEL_PRIMARY;
  160. ci.commandBufferCount = 1;
  161. ANKI_TRACE_INC_COUNTER(VK_COMMAND_BUFFER_CREATE, 1);
  162. VkCommandBuffer cmdb;
  163. ANKI_VK_CHECK(vkAllocateCommandBuffers(m_factory->m_dev, &ci, &cmdb));
  164. MicroCommandBuffer* newCmdb = getAllocator().newInstance<MicroCommandBuffer>(this);
  165. #if ANKI_EXTRA_CHECKS
  166. m_createdCmdbs.fetchAdd(1);
  167. #endif
  168. newCmdb->m_fastAlloc =
  169. StackAllocator<U8>(m_factory->m_alloc.getMemoryPool().getAllocationCallback(),
  170. m_factory->m_alloc.getMemoryPool().getAllocationCallbackUserData(), 256_KB, 2.0f);
  171. newCmdb->m_handle = cmdb;
  172. newCmdb->m_flags = cmdbFlags;
  173. out = newCmdb;
  174. createdNew = true;
  175. }
  176. else
  177. {
  178. out->reset();
  179. }
  180. ANKI_ASSERT(out && out->m_refcount.load() == 0);
  181. ANKI_ASSERT(out->m_flags == cmdbFlags);
  182. outPtr.reset(out);
  183. return Error::NONE;
  184. }
  185. void CommandBufferThreadAllocator::deleteCommandBuffer(MicroCommandBuffer* ptr)
  186. {
  187. ANKI_ASSERT(ptr);
  188. const Bool secondLevel = !!(ptr->m_flags & CommandBufferFlag::SECOND_LEVEL);
  189. const Bool smallBatch = !!(ptr->m_flags & CommandBufferFlag::SMALL_BATCH);
  190. CmdbType& type = m_types[secondLevel][smallBatch][getQueueTypeFromCommandBufferFlags(ptr->m_flags)];
  191. LockGuard<Mutex> lock(type.m_deletedMtx);
  192. type.m_deletedCmdbs.pushBack(ptr);
  193. }
  194. Error CommandBufferFactory::init(GrAllocator<U8> alloc, VkDevice dev, Array<U32, U(QueueType::COUNT)> queueFamilies)
  195. {
  196. ANKI_ASSERT(dev);
  197. m_alloc = alloc;
  198. m_dev = dev;
  199. m_queueFamilies = queueFamilies;
  200. return Error::NONE;
  201. }
  202. void CommandBufferFactory::destroy()
  203. {
  204. // Run 2 times because destroyLists() populates other allocators' lists
  205. for(U i = 0; i < 2; ++i)
  206. {
  207. for(CommandBufferThreadAllocator* alloc : m_threadAllocs)
  208. {
  209. alloc->destroyLists();
  210. }
  211. }
  212. for(CommandBufferThreadAllocator* talloc : m_threadAllocs)
  213. {
  214. talloc->destroy();
  215. m_alloc.deleteInstance(talloc);
  216. }
  217. m_threadAllocs.destroy(m_alloc);
  218. }
  219. Error CommandBufferFactory::newCommandBuffer(ThreadId tid, CommandBufferFlag cmdbFlags, MicroCommandBufferPtr& ptr)
  220. {
  221. CommandBufferThreadAllocator* alloc = nullptr;
  222. // Get the thread allocator
  223. {
  224. class Comp
  225. {
  226. public:
  227. Bool operator()(const CommandBufferThreadAllocator* a, ThreadId tid) const
  228. {
  229. return a->m_tid < tid;
  230. }
  231. Bool operator()(ThreadId tid, const CommandBufferThreadAllocator* a) const
  232. {
  233. return tid < a->m_tid;
  234. }
  235. };
  236. // Find using binary search
  237. {
  238. RLockGuard<RWMutex> lock(m_threadAllocMtx);
  239. auto it = binarySearch(m_threadAllocs.getBegin(), m_threadAllocs.getEnd(), tid, Comp());
  240. alloc = (it != m_threadAllocs.getEnd()) ? (*it) : nullptr;
  241. }
  242. if(ANKI_UNLIKELY(alloc == nullptr))
  243. {
  244. WLockGuard<RWMutex> lock(m_threadAllocMtx);
  245. // Check again
  246. auto it = binarySearch(m_threadAllocs.getBegin(), m_threadAllocs.getEnd(), tid, Comp());
  247. alloc = (it != m_threadAllocs.getEnd()) ? (*it) : nullptr;
  248. if(alloc == nullptr)
  249. {
  250. alloc = m_alloc.newInstance<CommandBufferThreadAllocator>(this, tid);
  251. m_threadAllocs.resize(m_alloc, m_threadAllocs.getSize() + 1);
  252. m_threadAllocs[m_threadAllocs.getSize() - 1] = alloc;
  253. // Sort for fast find
  254. std::sort(m_threadAllocs.getBegin(), m_threadAllocs.getEnd(),
  255. [](const CommandBufferThreadAllocator* a, const CommandBufferThreadAllocator* b) {
  256. return a->m_tid < b->m_tid;
  257. });
  258. ANKI_CHECK(alloc->init());
  259. }
  260. }
  261. }
  262. ANKI_ASSERT(alloc);
  263. ANKI_ASSERT(alloc->m_tid == tid);
  264. Bool createdNew;
  265. ANKI_CHECK(alloc->newCommandBuffer(cmdbFlags, ptr, createdNew));
  266. if(createdNew)
  267. {
  268. m_createdCmdBufferCount.fetchAdd(1);
  269. }
  270. return Error::NONE;
  271. }
  272. } // end namespace anki