Memory.cpp 15 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731
  1. // Copyright (C) 2009-2021, Panagiotis Christopoulos Charitos and contributors.
  2. // All rights reserved.
  3. // Code licensed under the BSD License.
  4. // http://www.anki3d.org/LICENSE
  5. #include <AnKi/Util/Memory.h>
  6. #include <AnKi/Util/Functions.h>
  7. #include <AnKi/Util/Assert.h>
  8. #include <AnKi/Util/Thread.h>
  9. #include <AnKi/Util/Atomic.h>
  10. #include <AnKi/Util/Logger.h>
  11. #include <cstdlib>
  12. #include <cstring>
  13. #include <cstdio>
  14. namespace anki
  15. {
  16. #if ANKI_MEM_EXTRA_CHECKS
  17. static PoolSignature computePoolSignature(void* ptr)
  18. {
  19. ANKI_ASSERT(ptr);
  20. PtrSize sig64 = ptrToNumber(ptr);
  21. PoolSignature sig = PoolSignature(sig64);
  22. sig ^= 0x5bd1e995;
  23. sig ^= sig << 24;
  24. ANKI_ASSERT(sig != 0);
  25. return sig;
  26. }
  27. class AllocationHeader
  28. {
  29. public:
  30. PtrSize m_allocationSize;
  31. PoolSignature m_signature;
  32. };
  33. constexpr U32 MAX_ALIGNMENT = 64;
  34. constexpr U32 ALLOCATION_HEADER_SIZE = getAlignedRoundUp(MAX_ALIGNMENT, sizeof(AllocationHeader));
  35. #endif
  36. #define ANKI_CREATION_OOM_ACTION() ANKI_UTIL_LOGF("Out of memory")
  37. #define ANKI_OOM_ACTION() ANKI_UTIL_LOGE("Out of memory. Expect segfault")
  38. template<typename TPtr, typename TSize>
  39. static void invalidateMemory(TPtr ptr, TSize size)
  40. {
  41. #if ANKI_MEM_EXTRA_CHECKS
  42. memset(static_cast<void*>(ptr), 0xCC, size);
  43. #endif
  44. }
  45. void* mallocAligned(PtrSize size, PtrSize alignmentBytes)
  46. {
  47. ANKI_ASSERT(size > 0);
  48. ANKI_ASSERT(alignmentBytes > 0);
  49. #if ANKI_POSIX
  50. # if !ANKI_OS_ANDROID
  51. void* out = nullptr;
  52. U alignment = getAlignedRoundUp(alignmentBytes, sizeof(void*));
  53. int err = posix_memalign(&out, alignment, size);
  54. if(!err)
  55. {
  56. ANKI_ASSERT(out != nullptr);
  57. // Make sure it's aligned
  58. ANKI_ASSERT(isAligned(alignmentBytes, out));
  59. }
  60. else
  61. {
  62. ANKI_UTIL_LOGE("mallocAligned() failed");
  63. }
  64. return out;
  65. # else
  66. void* out = memalign(getAlignedRoundUp(alignmentBytes, sizeof(void*)), size);
  67. if(out)
  68. {
  69. // Make sure it's aligned
  70. ANKI_ASSERT(isAligned(alignmentBytes, out));
  71. }
  72. else
  73. {
  74. ANKI_UTIL_LOGE("memalign() failed");
  75. }
  76. return out;
  77. # endif
  78. #elif ANKI_OS_WINDOWS
  79. void* out = _aligned_malloc(size, alignmentBytes);
  80. if(out)
  81. {
  82. // Make sure it's aligned
  83. ANKI_ASSERT(isAligned(alignmentBytes, out));
  84. }
  85. else
  86. {
  87. ANKI_UTIL_LOGE("_aligned_malloc() failed");
  88. }
  89. return out;
  90. #else
  91. # error "Unimplemented"
  92. #endif
  93. }
  94. void freeAligned(void* ptr)
  95. {
  96. #if ANKI_POSIX
  97. ::free(ptr);
  98. #elif ANKI_OS_WINDOWS
  99. _aligned_free(ptr);
  100. #else
  101. # error "Unimplemented"
  102. #endif
  103. }
  104. void* allocAligned(void* userData, void* ptr, PtrSize size, PtrSize alignment)
  105. {
  106. (void)userData;
  107. void* out;
  108. if(ptr == nullptr)
  109. {
  110. // Allocate
  111. ANKI_ASSERT(size > 0);
  112. out = mallocAligned(size, alignment);
  113. }
  114. else
  115. {
  116. // Deallocate
  117. ANKI_ASSERT(size == 0);
  118. ANKI_ASSERT(alignment == 0);
  119. freeAligned(ptr);
  120. out = nullptr;
  121. }
  122. return out;
  123. }
  124. BaseMemoryPool::~BaseMemoryPool()
  125. {
  126. ANKI_ASSERT(m_refcount.load() == 0 && "Refcount should be zero");
  127. }
  128. Bool BaseMemoryPool::isInitialized() const
  129. {
  130. return m_allocCb != nullptr;
  131. }
  132. HeapMemoryPool::HeapMemoryPool()
  133. : BaseMemoryPool(Type::HEAP)
  134. {
  135. }
  136. HeapMemoryPool::~HeapMemoryPool()
  137. {
  138. const U32 count = m_allocationsCount.load();
  139. if(count != 0)
  140. {
  141. ANKI_UTIL_LOGW("Memory pool destroyed before all memory being released "
  142. "(%u deallocations missed)",
  143. count);
  144. }
  145. }
  146. void HeapMemoryPool::init(AllocAlignedCallback allocCb, void* allocCbUserData)
  147. {
  148. ANKI_ASSERT(!isInitialized());
  149. ANKI_ASSERT(m_allocCb == nullptr);
  150. ANKI_ASSERT(allocCb != nullptr);
  151. m_allocCb = allocCb;
  152. m_allocCbUserData = allocCbUserData;
  153. #if ANKI_MEM_EXTRA_CHECKS
  154. m_signature = computePoolSignature(this);
  155. #endif
  156. }
  157. void* HeapMemoryPool::allocate(PtrSize size, PtrSize alignment)
  158. {
  159. ANKI_ASSERT(isInitialized());
  160. #if ANKI_MEM_EXTRA_CHECKS
  161. ANKI_ASSERT(alignment <= MAX_ALIGNMENT && "Wrong assumption");
  162. size += ALLOCATION_HEADER_SIZE;
  163. #endif
  164. void* mem = m_allocCb(m_allocCbUserData, nullptr, size, alignment);
  165. if(mem != nullptr)
  166. {
  167. m_allocationsCount.fetchAdd(1);
  168. #if ANKI_MEM_EXTRA_CHECKS
  169. memset(mem, 0, ALLOCATION_HEADER_SIZE);
  170. AllocationHeader& header = *static_cast<AllocationHeader*>(mem);
  171. header.m_signature = m_signature;
  172. header.m_allocationSize = size;
  173. mem = static_cast<void*>(static_cast<U8*>(mem) + ALLOCATION_HEADER_SIZE);
  174. #endif
  175. }
  176. else
  177. {
  178. ANKI_OOM_ACTION();
  179. }
  180. return mem;
  181. }
  182. void HeapMemoryPool::free(void* ptr)
  183. {
  184. ANKI_ASSERT(isInitialized());
  185. if(ANKI_UNLIKELY(ptr == nullptr))
  186. {
  187. return;
  188. }
  189. #if ANKI_MEM_EXTRA_CHECKS
  190. U8* memU8 = static_cast<U8*>(ptr) - ALLOCATION_HEADER_SIZE;
  191. AllocationHeader& header = *reinterpret_cast<AllocationHeader*>(memU8);
  192. if(header.m_signature != m_signature)
  193. {
  194. ANKI_UTIL_LOGE("Signature missmatch on free");
  195. }
  196. ptr = static_cast<void*>(memU8);
  197. invalidateMemory(ptr, header.m_allocationSize);
  198. #endif
  199. m_allocationsCount.fetchSub(1);
  200. m_allocCb(m_allocCbUserData, ptr, 0, 0);
  201. }
  202. StackMemoryPool::StackMemoryPool()
  203. : BaseMemoryPool(Type::STACK)
  204. {
  205. }
  206. StackMemoryPool::~StackMemoryPool()
  207. {
  208. // Iterate all until you find an unused
  209. for(Chunk& ch : m_chunks)
  210. {
  211. if(ch.m_baseMem != nullptr)
  212. {
  213. ch.check();
  214. invalidateMemory(ch.m_baseMem, ch.m_size);
  215. m_allocCb(m_allocCbUserData, ch.m_baseMem, 0, 0);
  216. }
  217. else
  218. {
  219. break;
  220. }
  221. }
  222. // Do some error checks
  223. const U32 allocCount = m_allocationsCount.load();
  224. if(!m_ignoreDeallocationErrors && allocCount != 0)
  225. {
  226. ANKI_UTIL_LOGW("Forgot to deallocate");
  227. }
  228. }
  229. void StackMemoryPool::init(AllocAlignedCallback allocCb, void* allocCbUserData, PtrSize initialChunkSize,
  230. F32 nextChunkScale, PtrSize nextChunkBias, Bool ignoreDeallocationErrors,
  231. PtrSize alignmentBytes)
  232. {
  233. ANKI_ASSERT(!isInitialized());
  234. ANKI_ASSERT(allocCb);
  235. ANKI_ASSERT(initialChunkSize > 0);
  236. ANKI_ASSERT(nextChunkScale >= 1.0);
  237. ANKI_ASSERT(alignmentBytes > 0);
  238. m_allocCb = allocCb;
  239. m_allocCbUserData = allocCbUserData;
  240. m_alignmentBytes = alignmentBytes;
  241. m_initialChunkSize = initialChunkSize;
  242. m_nextChunkScale = nextChunkScale;
  243. m_nextChunkBias = nextChunkBias;
  244. m_ignoreDeallocationErrors = ignoreDeallocationErrors;
  245. }
  246. void* StackMemoryPool::allocate(PtrSize size, PtrSize alignment)
  247. {
  248. ANKI_ASSERT(isInitialized());
  249. ANKI_ASSERT(alignment <= m_alignmentBytes);
  250. (void)alignment;
  251. size = getAlignedRoundUp(m_alignmentBytes, size);
  252. ANKI_ASSERT(size > 0);
  253. U8* out = nullptr;
  254. while(true)
  255. {
  256. // Try to allocate from the current chunk, if there is one
  257. Chunk* crntChunk = nullptr;
  258. const I32 crntChunkIdx = m_crntChunkIdx.load();
  259. if(crntChunkIdx >= 0)
  260. {
  261. crntChunk = &m_chunks[crntChunkIdx];
  262. crntChunk->check();
  263. out = crntChunk->m_mem.fetchAdd(size);
  264. ANKI_ASSERT(out >= crntChunk->m_baseMem);
  265. }
  266. if(crntChunk && out + size <= crntChunk->m_baseMem + crntChunk->m_size)
  267. {
  268. // All is fine, there is enough space in the chunk
  269. m_allocationsCount.fetchAdd(1);
  270. break;
  271. }
  272. else
  273. {
  274. // Need new chunk
  275. LockGuard<Mutex> lock(m_lock);
  276. // Make sure that only one thread will create a new chunk
  277. const Bool someOtherThreadCreateAChunkWhileIWasHoldingTheLock = m_crntChunkIdx.load() != crntChunkIdx;
  278. if(someOtherThreadCreateAChunkWhileIWasHoldingTheLock)
  279. {
  280. continue;
  281. }
  282. // We can create a new chunk
  283. ANKI_ASSERT(crntChunkIdx >= -1);
  284. if(U32(crntChunkIdx + 1) >= m_chunks.getSize())
  285. {
  286. out = nullptr;
  287. ANKI_UTIL_LOGE("Number of chunks is not enough");
  288. ANKI_OOM_ACTION();
  289. break;
  290. }
  291. // Compute the memory of the new chunk. Don't look at the previous chunk
  292. PtrSize newChunkSize = m_initialChunkSize;
  293. for(I i = 0; i < crntChunkIdx + 1; ++i)
  294. {
  295. newChunkSize = PtrSize(F64(newChunkSize) * m_nextChunkScale) + m_nextChunkBias;
  296. }
  297. newChunkSize = max(size, newChunkSize); // Can't have the allocation fail
  298. alignRoundUp(m_alignmentBytes, newChunkSize); // Always align at the end
  299. // Point to the next chunk
  300. Chunk* newChunk = &m_chunks[crntChunkIdx + 1];
  301. if(newChunk->m_baseMem == nullptr || newChunk->m_size != newChunkSize)
  302. {
  303. // Chunk is empty or its memory doesn't match the expected, need to (re)initialize it
  304. if(newChunk->m_baseMem)
  305. {
  306. m_allocCb(m_allocCbUserData, newChunk->m_baseMem, 0, 0);
  307. m_allocatedMemory -= newChunk->m_size;
  308. }
  309. void* mem = m_allocCb(m_allocCbUserData, nullptr, newChunkSize, m_alignmentBytes);
  310. if(mem != nullptr)
  311. {
  312. invalidateMemory(mem, newChunkSize);
  313. newChunk->m_baseMem = static_cast<U8*>(mem);
  314. newChunk->m_mem.setNonAtomically(newChunk->m_baseMem);
  315. newChunk->m_size = newChunkSize;
  316. m_allocatedMemory += newChunk->m_size;
  317. const I32 idx = m_crntChunkIdx.fetchAdd(1);
  318. ANKI_ASSERT(idx == crntChunkIdx);
  319. (void)idx;
  320. }
  321. else
  322. {
  323. out = nullptr;
  324. ANKI_OOM_ACTION();
  325. break;
  326. }
  327. }
  328. else
  329. {
  330. // Will recycle
  331. newChunk->checkReset();
  332. invalidateMemory(newChunk->m_baseMem, newChunk->m_size);
  333. const I32 idx = m_crntChunkIdx.fetchAdd(1);
  334. ANKI_ASSERT(idx == crntChunkIdx);
  335. (void)idx;
  336. }
  337. }
  338. }
  339. return static_cast<void*>(out);
  340. }
  341. void StackMemoryPool::free(void* ptr)
  342. {
  343. ANKI_ASSERT(isInitialized());
  344. if(ANKI_UNLIKELY(ptr == nullptr))
  345. {
  346. return;
  347. }
  348. // ptr shouldn't be null or not aligned. If not aligned it was not allocated by this class
  349. ANKI_ASSERT(ptr != nullptr && isAligned(m_alignmentBytes, ptr));
  350. const U32 count = m_allocationsCount.fetchSub(1);
  351. ANKI_ASSERT(count > 0);
  352. (void)count;
  353. }
  354. void StackMemoryPool::reset()
  355. {
  356. ANKI_ASSERT(isInitialized());
  357. // Iterate all until you find an unused
  358. for(Chunk& ch : m_chunks)
  359. {
  360. if(ch.m_baseMem != nullptr)
  361. {
  362. ch.check();
  363. ch.m_mem.store(ch.m_baseMem);
  364. invalidateMemory(ch.m_baseMem, ch.m_size);
  365. }
  366. else
  367. {
  368. break;
  369. }
  370. }
  371. // Set the crnt chunk
  372. m_crntChunkIdx.setNonAtomically(-1);
  373. // Reset allocation count and do some error checks
  374. const U32 allocCount = m_allocationsCount.exchange(0);
  375. if(!m_ignoreDeallocationErrors && allocCount != 0)
  376. {
  377. ANKI_UTIL_LOGW("Forgot to deallocate");
  378. }
  379. }
  380. ChainMemoryPool::ChainMemoryPool()
  381. : BaseMemoryPool(Type::CHAIN)
  382. {
  383. }
  384. ChainMemoryPool::~ChainMemoryPool()
  385. {
  386. if(m_allocationsCount.load() != 0)
  387. {
  388. ANKI_UTIL_LOGW("Memory pool destroyed before all memory being released");
  389. }
  390. Chunk* ch = m_headChunk;
  391. while(ch)
  392. {
  393. Chunk* next = ch->m_next;
  394. destroyChunk(ch);
  395. ch = next;
  396. }
  397. }
  398. void ChainMemoryPool::init(AllocAlignedCallback allocCb, void* allocCbUserData, PtrSize initialChunkSize,
  399. F32 nextChunkScale, PtrSize nextChunkBias, PtrSize alignmentBytes)
  400. {
  401. ANKI_ASSERT(!isInitialized());
  402. ANKI_ASSERT(initialChunkSize > 0);
  403. ANKI_ASSERT(nextChunkScale >= 1.0);
  404. ANKI_ASSERT(alignmentBytes > 0);
  405. // Set all values
  406. m_allocCb = allocCb;
  407. m_allocCbUserData = allocCbUserData;
  408. m_alignmentBytes = alignmentBytes;
  409. m_initSize = initialChunkSize;
  410. m_scale = nextChunkScale;
  411. m_bias = nextChunkBias;
  412. m_headerSize = max<PtrSize>(m_alignmentBytes, sizeof(Chunk*));
  413. // Initial size should be > 0
  414. ANKI_ASSERT(m_initSize > 0 && "Wrong arg");
  415. // On fixed initial size is the same as the max
  416. if(m_scale == 0.0 && m_bias == 0)
  417. {
  418. ANKI_ASSERT(0 && "Wrong arg");
  419. }
  420. }
  421. void* ChainMemoryPool::allocate(PtrSize size, PtrSize alignment)
  422. {
  423. ANKI_ASSERT(isInitialized());
  424. Chunk* ch;
  425. void* mem = nullptr;
  426. LockGuard<SpinLock> lock(m_lock);
  427. // Get chunk
  428. ch = m_tailChunk;
  429. // Create new chunk if needed
  430. if(ch == nullptr || (mem = allocateFromChunk(ch, size, alignment)) == nullptr)
  431. {
  432. // Create new chunk
  433. PtrSize chunkSize = computeNewChunkSize(size);
  434. ch = createNewChunk(chunkSize);
  435. // Chunk creation failed
  436. if(ch == nullptr)
  437. {
  438. return mem;
  439. }
  440. }
  441. if(mem == nullptr)
  442. {
  443. mem = allocateFromChunk(ch, size, alignment);
  444. ANKI_ASSERT(mem != nullptr && "The chunk should have space");
  445. }
  446. m_allocationsCount.fetchAdd(1);
  447. return mem;
  448. }
  449. void ChainMemoryPool::free(void* ptr)
  450. {
  451. ANKI_ASSERT(isInitialized());
  452. if(ANKI_UNLIKELY(ptr == nullptr))
  453. {
  454. return;
  455. }
  456. // Get the chunk
  457. U8* mem = static_cast<U8*>(ptr);
  458. mem -= m_headerSize;
  459. Chunk* chunk = *reinterpret_cast<Chunk**>(mem);
  460. ANKI_ASSERT(chunk != nullptr);
  461. ANKI_ASSERT((mem >= chunk->m_memory && mem < (chunk->m_memory + chunk->m_memsize)) && "Wrong chunk");
  462. LockGuard<SpinLock> lock(m_lock);
  463. // Decrease the deallocation refcount and if it's zero delete the chunk
  464. ANKI_ASSERT(chunk->m_allocationsCount > 0);
  465. if(--chunk->m_allocationsCount == 0)
  466. {
  467. // Chunk is empty. Delete it
  468. destroyChunk(chunk);
  469. }
  470. m_allocationsCount.fetchSub(1);
  471. }
  472. PtrSize ChainMemoryPool::getChunksCount() const
  473. {
  474. ANKI_ASSERT(isInitialized());
  475. PtrSize count = 0;
  476. Chunk* ch = m_headChunk;
  477. while(ch)
  478. {
  479. ++count;
  480. ch = ch->m_next;
  481. }
  482. return count;
  483. }
  484. PtrSize ChainMemoryPool::getAllocatedSize() const
  485. {
  486. ANKI_ASSERT(isInitialized());
  487. PtrSize sum = 0;
  488. Chunk* ch = m_headChunk;
  489. while(ch)
  490. {
  491. sum += ch->m_top - ch->m_memory;
  492. ch = ch->m_next;
  493. }
  494. return sum;
  495. }
  496. PtrSize ChainMemoryPool::computeNewChunkSize(PtrSize size) const
  497. {
  498. size += m_headerSize;
  499. PtrSize crntMaxSize;
  500. if(m_tailChunk != nullptr)
  501. {
  502. // Get the size of previous
  503. crntMaxSize = m_tailChunk->m_memsize;
  504. // Compute new size
  505. crntMaxSize = PtrSize(F32(crntMaxSize) * m_scale) + m_bias;
  506. }
  507. else
  508. {
  509. // No chunks. Choose initial size
  510. ANKI_ASSERT(m_headChunk == nullptr);
  511. crntMaxSize = m_initSize;
  512. }
  513. crntMaxSize = max(crntMaxSize, size);
  514. ANKI_ASSERT(crntMaxSize > 0);
  515. return crntMaxSize;
  516. }
  517. ChainMemoryPool::Chunk* ChainMemoryPool::createNewChunk(PtrSize size)
  518. {
  519. ANKI_ASSERT(size > 0);
  520. // Allocate memory and chunk in one go
  521. PtrSize chunkAllocSize = getAlignedRoundUp(m_alignmentBytes, sizeof(Chunk));
  522. PtrSize memAllocSize = getAlignedRoundUp(m_alignmentBytes, size);
  523. PtrSize allocationSize = chunkAllocSize + memAllocSize;
  524. Chunk* chunk = reinterpret_cast<Chunk*>(m_allocCb(m_allocCbUserData, nullptr, allocationSize, m_alignmentBytes));
  525. if(chunk)
  526. {
  527. invalidateMemory(chunk, allocationSize);
  528. // Construct it
  529. memset(chunk, 0, sizeof(Chunk));
  530. // Initialize it
  531. chunk->m_memory = reinterpret_cast<U8*>(chunk) + chunkAllocSize;
  532. chunk->m_memsize = memAllocSize;
  533. chunk->m_top = chunk->m_memory;
  534. // Register it
  535. if(m_tailChunk)
  536. {
  537. m_tailChunk->m_next = chunk;
  538. chunk->m_prev = m_tailChunk;
  539. m_tailChunk = chunk;
  540. }
  541. else
  542. {
  543. ANKI_ASSERT(m_headChunk == nullptr);
  544. m_headChunk = m_tailChunk = chunk;
  545. }
  546. }
  547. else
  548. {
  549. ANKI_OOM_ACTION();
  550. }
  551. return chunk;
  552. }
  553. void* ChainMemoryPool::allocateFromChunk(Chunk* ch, PtrSize size, PtrSize alignment)
  554. {
  555. ANKI_ASSERT(ch);
  556. ANKI_ASSERT(ch->m_top <= ch->m_memory + ch->m_memsize);
  557. U8* mem = ch->m_top;
  558. PtrSize memV = ptrToNumber(mem);
  559. alignRoundUp(m_alignmentBytes, memV);
  560. mem = numberToPtr<U8*>(memV);
  561. U8* newTop = mem + m_headerSize + size;
  562. if(newTop <= ch->m_memory + ch->m_memsize)
  563. {
  564. *reinterpret_cast<Chunk**>(mem) = ch;
  565. mem += m_headerSize;
  566. ch->m_top = newTop;
  567. ++ch->m_allocationsCount;
  568. }
  569. else
  570. {
  571. // Chunk is full. Need a new one
  572. mem = nullptr;
  573. }
  574. return mem;
  575. }
  576. void ChainMemoryPool::destroyChunk(Chunk* ch)
  577. {
  578. ANKI_ASSERT(ch);
  579. if(ch == m_tailChunk)
  580. {
  581. m_tailChunk = ch->m_prev;
  582. }
  583. if(ch == m_headChunk)
  584. {
  585. m_headChunk = ch->m_next;
  586. }
  587. if(ch->m_prev)
  588. {
  589. ANKI_ASSERT(ch->m_prev->m_next == ch);
  590. ch->m_prev->m_next = ch->m_next;
  591. }
  592. if(ch->m_next)
  593. {
  594. ANKI_ASSERT(ch->m_next->m_prev == ch);
  595. ch->m_next->m_prev = ch->m_prev;
  596. }
  597. invalidateMemory(ch, getAlignedRoundUp(m_alignmentBytes, sizeof(Chunk)) + ch->m_memsize);
  598. m_allocCb(m_allocCbUserData, ch, 0, 0);
  599. }
  600. } // end namespace anki