Memory.cpp 15 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730
  1. // Copyright (C) 2009-2021, Panagiotis Christopoulos Charitos and contributors.
  2. // All rights reserved.
  3. // Code licensed under the BSD License.
  4. // http://www.anki3d.org/LICENSE
  5. #include <AnKi/Util/Memory.h>
  6. #include <AnKi/Util/Functions.h>
  7. #include <AnKi/Util/Assert.h>
  8. #include <AnKi/Util/Thread.h>
  9. #include <AnKi/Util/Atomic.h>
  10. #include <AnKi/Util/Logger.h>
  11. #include <cstdlib>
  12. #include <cstring>
  13. #include <cstdio>
  14. namespace anki {
  15. #if ANKI_MEM_EXTRA_CHECKS
  16. static PoolSignature computePoolSignature(void* ptr)
  17. {
  18. ANKI_ASSERT(ptr);
  19. PtrSize sig64 = ptrToNumber(ptr);
  20. PoolSignature sig = PoolSignature(sig64);
  21. sig ^= 0x5bd1e995;
  22. sig ^= sig << 24;
  23. ANKI_ASSERT(sig != 0);
  24. return sig;
  25. }
  26. class AllocationHeader
  27. {
  28. public:
  29. PtrSize m_allocationSize;
  30. PoolSignature m_signature;
  31. };
  32. constexpr U32 MAX_ALIGNMENT = 64;
  33. constexpr U32 ALLOCATION_HEADER_SIZE = getAlignedRoundUp(MAX_ALIGNMENT, sizeof(AllocationHeader));
  34. #endif
  35. #define ANKI_CREATION_OOM_ACTION() ANKI_UTIL_LOGF("Out of memory")
  36. #define ANKI_OOM_ACTION() ANKI_UTIL_LOGE("Out of memory. Expect segfault")
  37. template<typename TPtr, typename TSize>
  38. static void invalidateMemory(TPtr ptr, TSize size)
  39. {
  40. #if ANKI_MEM_EXTRA_CHECKS
  41. memset(static_cast<void*>(ptr), 0xCC, size);
  42. #endif
  43. }
  44. void* mallocAligned(PtrSize size, PtrSize alignmentBytes)
  45. {
  46. ANKI_ASSERT(size > 0);
  47. ANKI_ASSERT(alignmentBytes > 0);
  48. #if ANKI_POSIX
  49. # if !ANKI_OS_ANDROID
  50. void* out = nullptr;
  51. U alignment = getAlignedRoundUp(alignmentBytes, sizeof(void*));
  52. int err = posix_memalign(&out, alignment, size);
  53. if(!err)
  54. {
  55. ANKI_ASSERT(out != nullptr);
  56. // Make sure it's aligned
  57. ANKI_ASSERT(isAligned(alignmentBytes, out));
  58. }
  59. else
  60. {
  61. ANKI_UTIL_LOGE("mallocAligned() failed");
  62. }
  63. return out;
  64. # else
  65. void* out = memalign(getAlignedRoundUp(alignmentBytes, sizeof(void*)), size);
  66. if(out)
  67. {
  68. // Make sure it's aligned
  69. ANKI_ASSERT(isAligned(alignmentBytes, out));
  70. }
  71. else
  72. {
  73. ANKI_UTIL_LOGE("memalign() failed");
  74. }
  75. return out;
  76. # endif
  77. #elif ANKI_OS_WINDOWS
  78. void* out = _aligned_malloc(size, alignmentBytes);
  79. if(out)
  80. {
  81. // Make sure it's aligned
  82. ANKI_ASSERT(isAligned(alignmentBytes, out));
  83. }
  84. else
  85. {
  86. ANKI_UTIL_LOGE("_aligned_malloc() failed");
  87. }
  88. return out;
  89. #else
  90. # error "Unimplemented"
  91. #endif
  92. }
  93. void freeAligned(void* ptr)
  94. {
  95. #if ANKI_POSIX
  96. ::free(ptr);
  97. #elif ANKI_OS_WINDOWS
  98. _aligned_free(ptr);
  99. #else
  100. # error "Unimplemented"
  101. #endif
  102. }
  103. void* allocAligned(void* userData, void* ptr, PtrSize size, PtrSize alignment)
  104. {
  105. (void)userData;
  106. void* out;
  107. if(ptr == nullptr)
  108. {
  109. // Allocate
  110. ANKI_ASSERT(size > 0);
  111. out = mallocAligned(size, alignment);
  112. }
  113. else
  114. {
  115. // Deallocate
  116. ANKI_ASSERT(size == 0);
  117. ANKI_ASSERT(alignment == 0);
  118. freeAligned(ptr);
  119. out = nullptr;
  120. }
  121. return out;
  122. }
  123. BaseMemoryPool::~BaseMemoryPool()
  124. {
  125. ANKI_ASSERT(m_refcount.load() == 0 && "Refcount should be zero");
  126. }
  127. Bool BaseMemoryPool::isInitialized() const
  128. {
  129. return m_allocCb != nullptr;
  130. }
  131. HeapMemoryPool::HeapMemoryPool()
  132. : BaseMemoryPool(Type::HEAP)
  133. {
  134. }
  135. HeapMemoryPool::~HeapMemoryPool()
  136. {
  137. const U32 count = m_allocationsCount.load();
  138. if(count != 0)
  139. {
  140. ANKI_UTIL_LOGW("Memory pool destroyed before all memory being released "
  141. "(%u deallocations missed)",
  142. count);
  143. }
  144. }
  145. void HeapMemoryPool::init(AllocAlignedCallback allocCb, void* allocCbUserData)
  146. {
  147. ANKI_ASSERT(!isInitialized());
  148. ANKI_ASSERT(m_allocCb == nullptr);
  149. ANKI_ASSERT(allocCb != nullptr);
  150. m_allocCb = allocCb;
  151. m_allocCbUserData = allocCbUserData;
  152. #if ANKI_MEM_EXTRA_CHECKS
  153. m_signature = computePoolSignature(this);
  154. #endif
  155. }
  156. void* HeapMemoryPool::allocate(PtrSize size, PtrSize alignment)
  157. {
  158. ANKI_ASSERT(isInitialized());
  159. #if ANKI_MEM_EXTRA_CHECKS
  160. ANKI_ASSERT(alignment <= MAX_ALIGNMENT && "Wrong assumption");
  161. size += ALLOCATION_HEADER_SIZE;
  162. #endif
  163. void* mem = m_allocCb(m_allocCbUserData, nullptr, size, alignment);
  164. if(mem != nullptr)
  165. {
  166. m_allocationsCount.fetchAdd(1);
  167. #if ANKI_MEM_EXTRA_CHECKS
  168. memset(mem, 0, ALLOCATION_HEADER_SIZE);
  169. AllocationHeader& header = *static_cast<AllocationHeader*>(mem);
  170. header.m_signature = m_signature;
  171. header.m_allocationSize = size;
  172. mem = static_cast<void*>(static_cast<U8*>(mem) + ALLOCATION_HEADER_SIZE);
  173. #endif
  174. }
  175. else
  176. {
  177. ANKI_OOM_ACTION();
  178. }
  179. return mem;
  180. }
  181. void HeapMemoryPool::free(void* ptr)
  182. {
  183. ANKI_ASSERT(isInitialized());
  184. if(ANKI_UNLIKELY(ptr == nullptr))
  185. {
  186. return;
  187. }
  188. #if ANKI_MEM_EXTRA_CHECKS
  189. U8* memU8 = static_cast<U8*>(ptr) - ALLOCATION_HEADER_SIZE;
  190. AllocationHeader& header = *reinterpret_cast<AllocationHeader*>(memU8);
  191. if(header.m_signature != m_signature)
  192. {
  193. ANKI_UTIL_LOGE("Signature missmatch on free");
  194. }
  195. ptr = static_cast<void*>(memU8);
  196. invalidateMemory(ptr, header.m_allocationSize);
  197. #endif
  198. m_allocationsCount.fetchSub(1);
  199. m_allocCb(m_allocCbUserData, ptr, 0, 0);
  200. }
  201. StackMemoryPool::StackMemoryPool()
  202. : BaseMemoryPool(Type::STACK)
  203. {
  204. }
  205. StackMemoryPool::~StackMemoryPool()
  206. {
  207. // Iterate all until you find an unused
  208. for(Chunk& ch : m_chunks)
  209. {
  210. if(ch.m_baseMem != nullptr)
  211. {
  212. ch.check();
  213. invalidateMemory(ch.m_baseMem, ch.m_size);
  214. m_allocCb(m_allocCbUserData, ch.m_baseMem, 0, 0);
  215. }
  216. else
  217. {
  218. break;
  219. }
  220. }
  221. // Do some error checks
  222. const U32 allocCount = m_allocationsCount.load();
  223. if(!m_ignoreDeallocationErrors && allocCount != 0)
  224. {
  225. ANKI_UTIL_LOGW("Forgot to deallocate");
  226. }
  227. }
  228. void StackMemoryPool::init(AllocAlignedCallback allocCb, void* allocCbUserData, PtrSize initialChunkSize,
  229. F32 nextChunkScale, PtrSize nextChunkBias, Bool ignoreDeallocationErrors,
  230. PtrSize alignmentBytes)
  231. {
  232. ANKI_ASSERT(!isInitialized());
  233. ANKI_ASSERT(allocCb);
  234. ANKI_ASSERT(initialChunkSize > 0);
  235. ANKI_ASSERT(nextChunkScale >= 1.0);
  236. ANKI_ASSERT(alignmentBytes > 0);
  237. m_allocCb = allocCb;
  238. m_allocCbUserData = allocCbUserData;
  239. m_alignmentBytes = alignmentBytes;
  240. m_initialChunkSize = initialChunkSize;
  241. m_nextChunkScale = nextChunkScale;
  242. m_nextChunkBias = nextChunkBias;
  243. m_ignoreDeallocationErrors = ignoreDeallocationErrors;
  244. }
  245. void* StackMemoryPool::allocate(PtrSize size, PtrSize alignment)
  246. {
  247. ANKI_ASSERT(isInitialized());
  248. ANKI_ASSERT(alignment <= m_alignmentBytes);
  249. (void)alignment;
  250. size = getAlignedRoundUp(m_alignmentBytes, size);
  251. ANKI_ASSERT(size > 0);
  252. U8* out = nullptr;
  253. while(true)
  254. {
  255. // Try to allocate from the current chunk, if there is one
  256. Chunk* crntChunk = nullptr;
  257. const I32 crntChunkIdx = m_crntChunkIdx.load();
  258. if(crntChunkIdx >= 0)
  259. {
  260. crntChunk = &m_chunks[crntChunkIdx];
  261. crntChunk->check();
  262. out = crntChunk->m_mem.fetchAdd(size);
  263. ANKI_ASSERT(out >= crntChunk->m_baseMem);
  264. }
  265. if(crntChunk && out + size <= crntChunk->m_baseMem + crntChunk->m_size)
  266. {
  267. // All is fine, there is enough space in the chunk
  268. m_allocationsCount.fetchAdd(1);
  269. break;
  270. }
  271. else
  272. {
  273. // Need new chunk
  274. LockGuard<Mutex> lock(m_lock);
  275. // Make sure that only one thread will create a new chunk
  276. const Bool someOtherThreadCreateAChunkWhileIWasHoldingTheLock = m_crntChunkIdx.load() != crntChunkIdx;
  277. if(someOtherThreadCreateAChunkWhileIWasHoldingTheLock)
  278. {
  279. continue;
  280. }
  281. // We can create a new chunk
  282. ANKI_ASSERT(crntChunkIdx >= -1);
  283. if(U32(crntChunkIdx + 1) >= m_chunks.getSize())
  284. {
  285. out = nullptr;
  286. ANKI_UTIL_LOGE("Number of chunks is not enough");
  287. ANKI_OOM_ACTION();
  288. break;
  289. }
  290. // Compute the memory of the new chunk. Don't look at the previous chunk
  291. PtrSize newChunkSize = m_initialChunkSize;
  292. for(I i = 0; i < crntChunkIdx + 1; ++i)
  293. {
  294. newChunkSize = PtrSize(F64(newChunkSize) * m_nextChunkScale) + m_nextChunkBias;
  295. }
  296. newChunkSize = max(size, newChunkSize); // Can't have the allocation fail
  297. alignRoundUp(m_alignmentBytes, newChunkSize); // Always align at the end
  298. // Point to the next chunk
  299. Chunk* newChunk = &m_chunks[crntChunkIdx + 1];
  300. if(newChunk->m_baseMem == nullptr || newChunk->m_size != newChunkSize)
  301. {
  302. // Chunk is empty or its memory doesn't match the expected, need to (re)initialize it
  303. if(newChunk->m_baseMem)
  304. {
  305. m_allocCb(m_allocCbUserData, newChunk->m_baseMem, 0, 0);
  306. m_allocatedMemory -= newChunk->m_size;
  307. }
  308. void* mem = m_allocCb(m_allocCbUserData, nullptr, newChunkSize, m_alignmentBytes);
  309. if(mem != nullptr)
  310. {
  311. invalidateMemory(mem, newChunkSize);
  312. newChunk->m_baseMem = static_cast<U8*>(mem);
  313. newChunk->m_mem.setNonAtomically(newChunk->m_baseMem);
  314. newChunk->m_size = newChunkSize;
  315. m_allocatedMemory += newChunk->m_size;
  316. const I32 idx = m_crntChunkIdx.fetchAdd(1);
  317. ANKI_ASSERT(idx == crntChunkIdx);
  318. (void)idx;
  319. }
  320. else
  321. {
  322. out = nullptr;
  323. ANKI_OOM_ACTION();
  324. break;
  325. }
  326. }
  327. else
  328. {
  329. // Will recycle
  330. newChunk->checkReset();
  331. invalidateMemory(newChunk->m_baseMem, newChunk->m_size);
  332. const I32 idx = m_crntChunkIdx.fetchAdd(1);
  333. ANKI_ASSERT(idx == crntChunkIdx);
  334. (void)idx;
  335. }
  336. }
  337. }
  338. return static_cast<void*>(out);
  339. }
  340. void StackMemoryPool::free(void* ptr)
  341. {
  342. ANKI_ASSERT(isInitialized());
  343. if(ANKI_UNLIKELY(ptr == nullptr))
  344. {
  345. return;
  346. }
  347. // ptr shouldn't be null or not aligned. If not aligned it was not allocated by this class
  348. ANKI_ASSERT(ptr != nullptr && isAligned(m_alignmentBytes, ptr));
  349. const U32 count = m_allocationsCount.fetchSub(1);
  350. ANKI_ASSERT(count > 0);
  351. (void)count;
  352. }
  353. void StackMemoryPool::reset()
  354. {
  355. ANKI_ASSERT(isInitialized());
  356. // Iterate all until you find an unused
  357. for(Chunk& ch : m_chunks)
  358. {
  359. if(ch.m_baseMem != nullptr)
  360. {
  361. ch.check();
  362. ch.m_mem.store(ch.m_baseMem);
  363. invalidateMemory(ch.m_baseMem, ch.m_size);
  364. }
  365. else
  366. {
  367. break;
  368. }
  369. }
  370. // Set the crnt chunk
  371. m_crntChunkIdx.setNonAtomically(-1);
  372. // Reset allocation count and do some error checks
  373. const U32 allocCount = m_allocationsCount.exchange(0);
  374. if(!m_ignoreDeallocationErrors && allocCount != 0)
  375. {
  376. ANKI_UTIL_LOGW("Forgot to deallocate");
  377. }
  378. }
  379. ChainMemoryPool::ChainMemoryPool()
  380. : BaseMemoryPool(Type::CHAIN)
  381. {
  382. }
  383. ChainMemoryPool::~ChainMemoryPool()
  384. {
  385. if(m_allocationsCount.load() != 0)
  386. {
  387. ANKI_UTIL_LOGW("Memory pool destroyed before all memory being released");
  388. }
  389. Chunk* ch = m_headChunk;
  390. while(ch)
  391. {
  392. Chunk* next = ch->m_next;
  393. destroyChunk(ch);
  394. ch = next;
  395. }
  396. }
  397. void ChainMemoryPool::init(AllocAlignedCallback allocCb, void* allocCbUserData, PtrSize initialChunkSize,
  398. F32 nextChunkScale, PtrSize nextChunkBias, PtrSize alignmentBytes)
  399. {
  400. ANKI_ASSERT(!isInitialized());
  401. ANKI_ASSERT(initialChunkSize > 0);
  402. ANKI_ASSERT(nextChunkScale >= 1.0);
  403. ANKI_ASSERT(alignmentBytes > 0);
  404. // Set all values
  405. m_allocCb = allocCb;
  406. m_allocCbUserData = allocCbUserData;
  407. m_alignmentBytes = alignmentBytes;
  408. m_initSize = initialChunkSize;
  409. m_scale = nextChunkScale;
  410. m_bias = nextChunkBias;
  411. m_headerSize = max<PtrSize>(m_alignmentBytes, sizeof(Chunk*));
  412. // Initial size should be > 0
  413. ANKI_ASSERT(m_initSize > 0 && "Wrong arg");
  414. // On fixed initial size is the same as the max
  415. if(m_scale == 0.0 && m_bias == 0)
  416. {
  417. ANKI_ASSERT(0 && "Wrong arg");
  418. }
  419. }
  420. void* ChainMemoryPool::allocate(PtrSize size, PtrSize alignment)
  421. {
  422. ANKI_ASSERT(isInitialized());
  423. Chunk* ch;
  424. void* mem = nullptr;
  425. LockGuard<SpinLock> lock(m_lock);
  426. // Get chunk
  427. ch = m_tailChunk;
  428. // Create new chunk if needed
  429. if(ch == nullptr || (mem = allocateFromChunk(ch, size, alignment)) == nullptr)
  430. {
  431. // Create new chunk
  432. PtrSize chunkSize = computeNewChunkSize(size);
  433. ch = createNewChunk(chunkSize);
  434. // Chunk creation failed
  435. if(ch == nullptr)
  436. {
  437. return mem;
  438. }
  439. }
  440. if(mem == nullptr)
  441. {
  442. mem = allocateFromChunk(ch, size, alignment);
  443. ANKI_ASSERT(mem != nullptr && "The chunk should have space");
  444. }
  445. m_allocationsCount.fetchAdd(1);
  446. return mem;
  447. }
  448. void ChainMemoryPool::free(void* ptr)
  449. {
  450. ANKI_ASSERT(isInitialized());
  451. if(ANKI_UNLIKELY(ptr == nullptr))
  452. {
  453. return;
  454. }
  455. // Get the chunk
  456. U8* mem = static_cast<U8*>(ptr);
  457. mem -= m_headerSize;
  458. Chunk* chunk = *reinterpret_cast<Chunk**>(mem);
  459. ANKI_ASSERT(chunk != nullptr);
  460. ANKI_ASSERT((mem >= chunk->m_memory && mem < (chunk->m_memory + chunk->m_memsize)) && "Wrong chunk");
  461. LockGuard<SpinLock> lock(m_lock);
  462. // Decrease the deallocation refcount and if it's zero delete the chunk
  463. ANKI_ASSERT(chunk->m_allocationsCount > 0);
  464. if(--chunk->m_allocationsCount == 0)
  465. {
  466. // Chunk is empty. Delete it
  467. destroyChunk(chunk);
  468. }
  469. m_allocationsCount.fetchSub(1);
  470. }
  471. PtrSize ChainMemoryPool::getChunksCount() const
  472. {
  473. ANKI_ASSERT(isInitialized());
  474. PtrSize count = 0;
  475. Chunk* ch = m_headChunk;
  476. while(ch)
  477. {
  478. ++count;
  479. ch = ch->m_next;
  480. }
  481. return count;
  482. }
  483. PtrSize ChainMemoryPool::getAllocatedSize() const
  484. {
  485. ANKI_ASSERT(isInitialized());
  486. PtrSize sum = 0;
  487. Chunk* ch = m_headChunk;
  488. while(ch)
  489. {
  490. sum += ch->m_top - ch->m_memory;
  491. ch = ch->m_next;
  492. }
  493. return sum;
  494. }
  495. PtrSize ChainMemoryPool::computeNewChunkSize(PtrSize size) const
  496. {
  497. size += m_headerSize;
  498. PtrSize crntMaxSize;
  499. if(m_tailChunk != nullptr)
  500. {
  501. // Get the size of previous
  502. crntMaxSize = m_tailChunk->m_memsize;
  503. // Compute new size
  504. crntMaxSize = PtrSize(F32(crntMaxSize) * m_scale) + m_bias;
  505. }
  506. else
  507. {
  508. // No chunks. Choose initial size
  509. ANKI_ASSERT(m_headChunk == nullptr);
  510. crntMaxSize = m_initSize;
  511. }
  512. crntMaxSize = max(crntMaxSize, size);
  513. ANKI_ASSERT(crntMaxSize > 0);
  514. return crntMaxSize;
  515. }
  516. ChainMemoryPool::Chunk* ChainMemoryPool::createNewChunk(PtrSize size)
  517. {
  518. ANKI_ASSERT(size > 0);
  519. // Allocate memory and chunk in one go
  520. PtrSize chunkAllocSize = getAlignedRoundUp(m_alignmentBytes, sizeof(Chunk));
  521. PtrSize memAllocSize = getAlignedRoundUp(m_alignmentBytes, size);
  522. PtrSize allocationSize = chunkAllocSize + memAllocSize;
  523. Chunk* chunk = reinterpret_cast<Chunk*>(m_allocCb(m_allocCbUserData, nullptr, allocationSize, m_alignmentBytes));
  524. if(chunk)
  525. {
  526. invalidateMemory(chunk, allocationSize);
  527. // Construct it
  528. memset(chunk, 0, sizeof(Chunk));
  529. // Initialize it
  530. chunk->m_memory = reinterpret_cast<U8*>(chunk) + chunkAllocSize;
  531. chunk->m_memsize = memAllocSize;
  532. chunk->m_top = chunk->m_memory;
  533. // Register it
  534. if(m_tailChunk)
  535. {
  536. m_tailChunk->m_next = chunk;
  537. chunk->m_prev = m_tailChunk;
  538. m_tailChunk = chunk;
  539. }
  540. else
  541. {
  542. ANKI_ASSERT(m_headChunk == nullptr);
  543. m_headChunk = m_tailChunk = chunk;
  544. }
  545. }
  546. else
  547. {
  548. ANKI_OOM_ACTION();
  549. }
  550. return chunk;
  551. }
  552. void* ChainMemoryPool::allocateFromChunk(Chunk* ch, PtrSize size, PtrSize alignment)
  553. {
  554. ANKI_ASSERT(ch);
  555. ANKI_ASSERT(ch->m_top <= ch->m_memory + ch->m_memsize);
  556. U8* mem = ch->m_top;
  557. PtrSize memV = ptrToNumber(mem);
  558. alignRoundUp(m_alignmentBytes, memV);
  559. mem = numberToPtr<U8*>(memV);
  560. U8* newTop = mem + m_headerSize + size;
  561. if(newTop <= ch->m_memory + ch->m_memsize)
  562. {
  563. *reinterpret_cast<Chunk**>(mem) = ch;
  564. mem += m_headerSize;
  565. ch->m_top = newTop;
  566. ++ch->m_allocationsCount;
  567. }
  568. else
  569. {
  570. // Chunk is full. Need a new one
  571. mem = nullptr;
  572. }
  573. return mem;
  574. }
  575. void ChainMemoryPool::destroyChunk(Chunk* ch)
  576. {
  577. ANKI_ASSERT(ch);
  578. if(ch == m_tailChunk)
  579. {
  580. m_tailChunk = ch->m_prev;
  581. }
  582. if(ch == m_headChunk)
  583. {
  584. m_headChunk = ch->m_next;
  585. }
  586. if(ch->m_prev)
  587. {
  588. ANKI_ASSERT(ch->m_prev->m_next == ch);
  589. ch->m_prev->m_next = ch->m_next;
  590. }
  591. if(ch->m_next)
  592. {
  593. ANKI_ASSERT(ch->m_next->m_prev == ch);
  594. ch->m_next->m_prev = ch->m_prev;
  595. }
  596. invalidateMemory(ch, getAlignedRoundUp(m_alignmentBytes, sizeof(Chunk)) + ch->m_memsize);
  597. m_allocCb(m_allocCbUserData, ch, 0, 0);
  598. }
  599. } // end namespace anki