GpuMemoryManager.cpp 9.3 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370
  1. // Copyright (C) 2009-2021, Panagiotis Christopoulos Charitos and contributors.
  2. // All rights reserved.
  3. // Code licensed under the BSD License.
  4. // http://www.anki3d.org/LICENSE
  5. #include <AnKi/Gr/Vulkan/GpuMemoryManager.h>
  6. #include <AnKi/Util/List.h>
  7. namespace anki {
  8. class ClassInf
  9. {
  10. public:
  11. PtrSize m_slotSize;
  12. PtrSize m_chunkSize;
  13. };
  14. static constexpr Array<ClassInf, 7> CLASSES{{{256_B, 16_KB},
  15. {4_KB, 256_KB},
  16. {128_KB, 8_MB},
  17. {1_MB, 64_MB},
  18. {16_MB, 128_MB},
  19. {64_MB, 256_MB},
  20. {128_MB, 256_MB}}};
  21. /// Special classes for the ReBAR memory. Have that as a special case because it's so limited and needs special care.
  22. static constexpr Array<ClassInf, 3> REBAR_CLASSES{{{1_MB, 1_MB}, {12_MB, 12_MB}, {24_MB, 24_MB}}};
  23. class GpuMemoryManager::Memory final :
  24. public ClassGpuAllocatorMemory,
  25. public IntrusiveListEnabled<GpuMemoryManager::Memory>
  26. {
  27. public:
  28. VkDeviceMemory m_handle = VK_NULL_HANDLE;
  29. void* m_mappedAddress = nullptr;
  30. SpinLock m_mtx;
  31. U8 m_classIdx = MAX_U8;
  32. };
  33. class GpuMemoryManager::Interface final : public ClassGpuAllocatorInterface
  34. {
  35. public:
  36. GrAllocator<U8> m_alloc;
  37. Array<IntrusiveList<Memory>, CLASSES.getSize()> m_vacantMemory;
  38. Array<ClassInf, CLASSES.getSize()> m_classes = {};
  39. U8 m_classCount = 0;
  40. Mutex m_mtx;
  41. VkDevice m_dev = VK_NULL_HANDLE;
  42. U8 m_memTypeIdx = MAX_U8;
  43. Bool m_exposesBufferGpuAddress = false;
  44. Error allocate(U32 classIdx, ClassGpuAllocatorMemory*& cmem) override
  45. {
  46. ANKI_ASSERT(classIdx < m_classCount);
  47. Memory* mem;
  48. LockGuard<Mutex> lock(m_mtx);
  49. if(!m_vacantMemory[classIdx].isEmpty())
  50. {
  51. // Recycle
  52. mem = &m_vacantMemory[classIdx].getFront();
  53. m_vacantMemory[classIdx].popFront();
  54. }
  55. else
  56. {
  57. // Create new
  58. VkMemoryAllocateInfo ci = {};
  59. ci.sType = VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO;
  60. ci.allocationSize = m_classes[classIdx].m_chunkSize;
  61. ci.memoryTypeIndex = m_memTypeIdx;
  62. VkMemoryAllocateFlagsInfo flags = {};
  63. flags.sType = VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_FLAGS_INFO;
  64. flags.flags = VK_MEMORY_ALLOCATE_DEVICE_ADDRESS_BIT;
  65. if(m_exposesBufferGpuAddress)
  66. {
  67. ci.pNext = &flags;
  68. }
  69. VkDeviceMemory memHandle;
  70. if(ANKI_UNLIKELY(vkAllocateMemory(m_dev, &ci, nullptr, &memHandle) < 0))
  71. {
  72. ANKI_VK_LOGF("Out of GPU memory. Mem type index %u, size %zu", m_memTypeIdx,
  73. m_classes[classIdx].m_chunkSize);
  74. }
  75. mem = m_alloc.newInstance<Memory>();
  76. mem->m_handle = memHandle;
  77. mem->m_classIdx = U8(classIdx);
  78. }
  79. ANKI_ASSERT(mem);
  80. ANKI_ASSERT(mem->m_handle);
  81. ANKI_ASSERT(mem->m_classIdx == classIdx);
  82. ANKI_ASSERT(mem->m_mappedAddress == nullptr);
  83. cmem = mem;
  84. return Error::NONE;
  85. }
  86. void free(ClassGpuAllocatorMemory* cmem) override
  87. {
  88. ANKI_ASSERT(cmem);
  89. Memory* mem = static_cast<Memory*>(cmem);
  90. ANKI_ASSERT(mem->m_handle);
  91. LockGuard<Mutex> lock(m_mtx);
  92. m_vacantMemory[mem->m_classIdx].pushBack(mem);
  93. // Unmap
  94. if(mem->m_mappedAddress)
  95. {
  96. vkUnmapMemory(m_dev, mem->m_handle);
  97. mem->m_mappedAddress = nullptr;
  98. }
  99. }
  100. U32 getClassCount() const override
  101. {
  102. return m_classCount;
  103. }
  104. void getClassInfo(U32 classIdx, PtrSize& slotSize, PtrSize& chunkSize) const override
  105. {
  106. ANKI_ASSERT(classIdx < m_classCount);
  107. slotSize = m_classes[classIdx].m_slotSize;
  108. chunkSize = m_classes[classIdx].m_chunkSize;
  109. }
  110. void collectGarbage()
  111. {
  112. LockGuard<Mutex> lock(m_mtx);
  113. for(U classIdx = 0; classIdx < m_classCount; ++classIdx)
  114. {
  115. while(!m_vacantMemory[classIdx].isEmpty())
  116. {
  117. Memory* mem = &m_vacantMemory[classIdx].getFront();
  118. m_vacantMemory[classIdx].popFront();
  119. if(mem->m_mappedAddress)
  120. {
  121. vkUnmapMemory(m_dev, mem->m_handle);
  122. }
  123. vkFreeMemory(m_dev, mem->m_handle, nullptr);
  124. m_alloc.deleteInstance(mem);
  125. }
  126. }
  127. }
  128. // Map memory
  129. void* mapMemory(ClassGpuAllocatorMemory* cmem)
  130. {
  131. ANKI_ASSERT(cmem);
  132. Memory* mem = static_cast<Memory*>(cmem);
  133. void* out;
  134. LockGuard<SpinLock> lock(mem->m_mtx);
  135. if(mem->m_mappedAddress)
  136. {
  137. out = mem->m_mappedAddress;
  138. }
  139. else
  140. {
  141. ANKI_VK_CHECKF(vkMapMemory(m_dev, mem->m_handle, 0, m_classes[mem->m_classIdx].m_chunkSize, 0, &out));
  142. mem->m_mappedAddress = out;
  143. }
  144. ANKI_ASSERT(out);
  145. return out;
  146. }
  147. };
  148. class GpuMemoryManager::ClassAllocator : public ClassGpuAllocator
  149. {
  150. public:
  151. Bool m_isDeviceMemory;
  152. };
  153. GpuMemoryManager::~GpuMemoryManager()
  154. {
  155. }
  156. void GpuMemoryManager::destroy()
  157. {
  158. for(U32 i = 0; i < m_ifaces.getSize(); ++i)
  159. {
  160. for(U32 j = 0; j < 2; j++)
  161. {
  162. m_ifaces[i][j].collectGarbage();
  163. }
  164. }
  165. m_ifaces.destroy(m_alloc);
  166. m_callocs.destroy(m_alloc);
  167. }
  168. void GpuMemoryManager::init(VkPhysicalDevice pdev, VkDevice dev, GrAllocator<U8> alloc, Bool exposeBufferGpuAddress)
  169. {
  170. ANKI_ASSERT(pdev);
  171. ANKI_ASSERT(dev);
  172. // Print some info
  173. ANKI_VK_LOGI("Initializing memory manager");
  174. for(const ClassInf& c : CLASSES)
  175. {
  176. ANKI_VK_LOGI("\tGPU mem class. Chunk size: %lu, slotSize: %lu, allocsPerChunk %lu", c.m_chunkSize, c.m_slotSize,
  177. c.m_chunkSize / c.m_slotSize);
  178. }
  179. vkGetPhysicalDeviceMemoryProperties(pdev, &m_memoryProperties);
  180. m_alloc = alloc;
  181. m_ifaces.create(alloc, m_memoryProperties.memoryTypeCount);
  182. for(U32 memTypeIdx = 0; memTypeIdx < m_ifaces.getSize(); ++memTypeIdx)
  183. {
  184. for(U32 linear = 0; linear < 2; ++linear)
  185. {
  186. Interface& iface = m_ifaces[memTypeIdx][linear];
  187. iface.m_alloc = alloc;
  188. iface.m_dev = dev;
  189. iface.m_memTypeIdx = U8(memTypeIdx);
  190. iface.m_exposesBufferGpuAddress = (linear == 1) && exposeBufferGpuAddress;
  191. // Find if it's ReBAR
  192. const VkMemoryPropertyFlags props = m_memoryProperties.memoryTypes[memTypeIdx].propertyFlags;
  193. const VkMemoryPropertyFlags reBarProps = VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT
  194. | VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT
  195. | VK_MEMORY_PROPERTY_HOST_COHERENT_BIT;
  196. const PtrSize heapSize =
  197. m_memoryProperties.memoryHeaps[m_memoryProperties.memoryTypes[memTypeIdx].heapIndex].size;
  198. const Bool isReBar = props == reBarProps && heapSize <= 256_MB;
  199. if(isReBar)
  200. {
  201. ANKI_VK_LOGI("Memory type %u is ReBAR", memTypeIdx);
  202. }
  203. // Choose different classes
  204. if(!isReBar)
  205. {
  206. iface.m_classCount = CLASSES.getSize();
  207. iface.m_classes = CLASSES;
  208. }
  209. else
  210. {
  211. iface.m_classCount = REBAR_CLASSES.getSize();
  212. memcpy(&iface.m_classes[0], &REBAR_CLASSES[0], REBAR_CLASSES.getSizeInBytes());
  213. }
  214. }
  215. }
  216. // One allocator per linear/non-linear resources
  217. m_callocs.create(alloc, m_memoryProperties.memoryTypeCount);
  218. for(U32 memTypeIdx = 0; memTypeIdx < m_callocs.getSize(); ++memTypeIdx)
  219. {
  220. for(U32 linear = 0; linear < 2; ++linear)
  221. {
  222. m_callocs[memTypeIdx][linear].init(m_alloc, &m_ifaces[memTypeIdx][linear]);
  223. const U32 heapIdx = m_memoryProperties.memoryTypes[memTypeIdx].heapIndex;
  224. m_callocs[memTypeIdx][linear].m_isDeviceMemory =
  225. !!(m_memoryProperties.memoryHeaps[heapIdx].flags & VK_MEMORY_HEAP_DEVICE_LOCAL_BIT);
  226. }
  227. }
  228. }
  229. void GpuMemoryManager::allocateMemory(U32 memTypeIdx, PtrSize size, U32 alignment, Bool linearResource,
  230. GpuMemoryHandle& handle)
  231. {
  232. ClassGpuAllocator& calloc = m_callocs[memTypeIdx][linearResource];
  233. const Error err = calloc.allocate(size, alignment, handle.m_classHandle);
  234. (void)err;
  235. handle.m_memory = static_cast<Memory*>(handle.m_classHandle.m_memory)->m_handle;
  236. handle.m_offset = handle.m_classHandle.m_offset;
  237. handle.m_linear = linearResource;
  238. handle.m_memTypeIdx = U8(memTypeIdx);
  239. }
  240. void GpuMemoryManager::freeMemory(GpuMemoryHandle& handle)
  241. {
  242. ANKI_ASSERT(handle);
  243. ClassGpuAllocator& calloc = m_callocs[handle.m_memTypeIdx][handle.m_linear];
  244. calloc.free(handle.m_classHandle);
  245. handle = {};
  246. }
  247. void* GpuMemoryManager::getMappedAddress(GpuMemoryHandle& handle)
  248. {
  249. ANKI_ASSERT(handle);
  250. Interface& iface = m_ifaces[handle.m_memTypeIdx][handle.m_linear];
  251. U8* out = static_cast<U8*>(iface.mapMemory(handle.m_classHandle.m_memory));
  252. return static_cast<void*>(out + handle.m_offset);
  253. }
  254. U32 GpuMemoryManager::findMemoryType(U32 resourceMemTypeBits, VkMemoryPropertyFlags preferFlags,
  255. VkMemoryPropertyFlags avoidFlags) const
  256. {
  257. U32 prefered = MAX_U32;
  258. // Iterate all mem types
  259. for(U32 i = 0; i < m_memoryProperties.memoryTypeCount; i++)
  260. {
  261. if(resourceMemTypeBits & (1u << i))
  262. {
  263. const VkMemoryPropertyFlags flags = m_memoryProperties.memoryTypes[i].propertyFlags;
  264. if((flags & preferFlags) == preferFlags && (flags & avoidFlags) == 0)
  265. {
  266. // It's the candidate we want
  267. if(prefered == MAX_U32)
  268. {
  269. prefered = i;
  270. }
  271. else
  272. {
  273. // On some Intel drivers there are identical memory types pointing to different heaps. Chose the
  274. // biggest heap
  275. const PtrSize crntHeapSize =
  276. m_memoryProperties.memoryHeaps[m_memoryProperties.memoryTypes[i].heapIndex].size;
  277. const PtrSize prevHeapSize =
  278. m_memoryProperties.memoryHeaps[m_memoryProperties.memoryTypes[prefered].heapIndex].size;
  279. if(crntHeapSize > prevHeapSize)
  280. {
  281. prefered = i;
  282. }
  283. }
  284. }
  285. }
  286. }
  287. return prefered;
  288. }
  289. void GpuMemoryManager::getAllocatedMemory(PtrSize& gpuMemory, PtrSize& cpuMemory) const
  290. {
  291. gpuMemory = 0;
  292. cpuMemory = 0;
  293. for(U32 memTypeIdx = 0; memTypeIdx < m_callocs.getSize(); ++memTypeIdx)
  294. {
  295. for(U32 linear = 0; linear < 2; ++linear)
  296. {
  297. if(m_callocs[memTypeIdx][linear].m_isDeviceMemory)
  298. {
  299. gpuMemory += m_callocs[memTypeIdx][linear].getAllocatedMemory();
  300. }
  301. else
  302. {
  303. cpuMemory += m_callocs[memTypeIdx][linear].getAllocatedMemory();
  304. }
  305. }
  306. }
  307. }
  308. } // end namespace anki