Memory.cpp 15 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745
  1. // Copyright (C) 2009-2020, Panagiotis Christopoulos Charitos and contributors.
  2. // All rights reserved.
  3. // Code licensed under the BSD License.
  4. // http://www.anki3d.org/LICENSE
  5. #include <anki/util/Memory.h>
  6. #include <anki/util/Functions.h>
  7. #include <anki/util/Assert.h>
  8. #include <anki/util/NonCopyable.h>
  9. #include <anki/util/Thread.h>
  10. #include <anki/util/Atomic.h>
  11. #include <anki/util/Logger.h>
  12. #include <cstdlib>
  13. #include <cstring>
  14. #include <cstdio>
  15. namespace anki
  16. {
  17. #define ANKI_MEM_SIGNATURES ANKI_EXTRA_CHECKS
  18. #if ANKI_MEM_SIGNATURES
  19. using Signature = U32;
  20. static Signature computeSignature(void* ptr)
  21. {
  22. ANKI_ASSERT(ptr);
  23. PtrSize sig64 = ptrToNumber(ptr);
  24. Signature sig = Signature(sig64);
  25. sig ^= 0x5bd1e995;
  26. sig ^= sig << 24;
  27. ANKI_ASSERT(sig != 0);
  28. return sig;
  29. }
  30. #endif
  31. #define ANKI_CREATION_OOM_ACTION() ANKI_UTIL_LOGF("Out of memory")
  32. #define ANKI_OOM_ACTION() ANKI_UTIL_LOGE("Out of memory. Expect segfault")
  33. template<typename TPtr, typename TSize>
  34. static void invalidateMemory(TPtr ptr, TSize size)
  35. {
  36. #if ANKI_EXTRA_CHECKS
  37. memset(static_cast<void*>(ptr), 0xCC, size);
  38. #endif
  39. }
  40. void* mallocAligned(PtrSize size, PtrSize alignmentBytes)
  41. {
  42. ANKI_ASSERT(size > 0);
  43. ANKI_ASSERT(alignmentBytes > 0);
  44. #if ANKI_POSIX
  45. # if !ANKI_OS_ANDROID
  46. void* out = nullptr;
  47. U alignment = getAlignedRoundUp(alignmentBytes, sizeof(void*));
  48. int err = posix_memalign(&out, alignment, size);
  49. if(!err)
  50. {
  51. ANKI_ASSERT(out != nullptr);
  52. // Make sure it's aligned
  53. ANKI_ASSERT(isAligned(alignmentBytes, out));
  54. }
  55. else
  56. {
  57. ANKI_UTIL_LOGE("mallocAligned() failed");
  58. }
  59. return out;
  60. # else
  61. void* out = memalign(getAlignedRoundUp(alignmentBytes, sizeof(void*)), size);
  62. if(out)
  63. {
  64. // Make sure it's aligned
  65. ANKI_ASSERT(isAligned(alignmentBytes, out));
  66. }
  67. else
  68. {
  69. ANKI_UTIL_LOGE("memalign() failed");
  70. }
  71. return out;
  72. # endif
  73. #elif ANKI_OS_WINDOWS
  74. void* out = _aligned_malloc(size, alignmentBytes);
  75. if(out)
  76. {
  77. // Make sure it's aligned
  78. ANKI_ASSERT(isAligned(alignmentBytes, out));
  79. }
  80. else
  81. {
  82. ANKI_UTIL_LOGE("_aligned_malloc() failed");
  83. }
  84. return out;
  85. #else
  86. # error "Unimplemented"
  87. #endif
  88. }
  89. void freeAligned(void* ptr)
  90. {
  91. #if ANKI_POSIX
  92. ::free(ptr);
  93. #elif ANKI_OS_WINDOWS
  94. _aligned_free(ptr);
  95. #else
  96. # error "Unimplemented"
  97. #endif
  98. }
  99. void* allocAligned(void* userData, void* ptr, PtrSize size, PtrSize alignment)
  100. {
  101. (void)userData;
  102. void* out;
  103. if(ptr == nullptr)
  104. {
  105. // Allocate
  106. ANKI_ASSERT(size > 0);
  107. out = mallocAligned(size, alignment);
  108. }
  109. else
  110. {
  111. // Deallocate
  112. ANKI_ASSERT(size == 0);
  113. ANKI_ASSERT(alignment == 0);
  114. freeAligned(ptr);
  115. out = nullptr;
  116. }
  117. return out;
  118. }
  119. BaseMemoryPool::~BaseMemoryPool()
  120. {
  121. ANKI_ASSERT(m_refcount.load() == 0 && "Refcount should be zero");
  122. }
  123. Bool BaseMemoryPool::isCreated() const
  124. {
  125. return m_allocCb != nullptr;
  126. }
  127. HeapMemoryPool::HeapMemoryPool()
  128. : BaseMemoryPool(Type::HEAP)
  129. {
  130. }
  131. HeapMemoryPool::~HeapMemoryPool()
  132. {
  133. const U32 count = m_allocationsCount.load();
  134. if(count != 0)
  135. {
  136. ANKI_UTIL_LOGW("Memory pool destroyed before all memory being released "
  137. "(%u deallocations missed)",
  138. count);
  139. }
  140. }
  141. void HeapMemoryPool::create(AllocAlignedCallback allocCb, void* allocCbUserData)
  142. {
  143. ANKI_ASSERT(!isCreated());
  144. ANKI_ASSERT(m_allocCb == nullptr);
  145. ANKI_ASSERT(allocCb != nullptr);
  146. m_allocCb = allocCb;
  147. m_allocCbUserData = allocCbUserData;
  148. #if ANKI_MEM_SIGNATURES
  149. m_signature = computeSignature(this);
  150. m_headerSize = getAlignedRoundUp(MAX_ALIGNMENT, sizeof(Signature));
  151. #endif
  152. }
  153. void* HeapMemoryPool::allocate(PtrSize size, PtrSize alignment)
  154. {
  155. ANKI_ASSERT(isCreated());
  156. #if ANKI_MEM_SIGNATURES
  157. ANKI_ASSERT(alignment <= MAX_ALIGNMENT && "Wrong assumption");
  158. size += m_headerSize;
  159. #endif
  160. void* mem = m_allocCb(m_allocCbUserData, nullptr, size, alignment);
  161. if(mem != nullptr)
  162. {
  163. m_allocationsCount.fetchAdd(1);
  164. #if ANKI_MEM_SIGNATURES
  165. memset(mem, 0, m_headerSize);
  166. memcpy(mem, &m_signature, sizeof(m_signature));
  167. U8* memU8 = static_cast<U8*>(mem);
  168. memU8 += m_headerSize;
  169. mem = static_cast<void*>(memU8);
  170. #endif
  171. }
  172. else
  173. {
  174. ANKI_OOM_ACTION();
  175. }
  176. return mem;
  177. }
  178. void HeapMemoryPool::free(void* ptr)
  179. {
  180. ANKI_ASSERT(isCreated());
  181. if(ANKI_UNLIKELY(ptr == nullptr))
  182. {
  183. return;
  184. }
  185. #if ANKI_MEM_SIGNATURES
  186. U8* memU8 = static_cast<U8*>(ptr);
  187. memU8 -= m_headerSize;
  188. if(memcmp(memU8, &m_signature, sizeof(m_signature)) != 0)
  189. {
  190. ANKI_UTIL_LOGE("Signature missmatch on free");
  191. }
  192. ptr = static_cast<void*>(memU8);
  193. #endif
  194. m_allocationsCount.fetchSub(1);
  195. m_allocCb(m_allocCbUserData, ptr, 0, 0);
  196. }
  197. StackMemoryPool::StackMemoryPool()
  198. : BaseMemoryPool(Type::STACK)
  199. {
  200. }
  201. StackMemoryPool::~StackMemoryPool()
  202. {
  203. // Iterate all until you find an unused
  204. for(Chunk& ch : m_chunks)
  205. {
  206. if(ch.m_baseMem != nullptr)
  207. {
  208. ch.check();
  209. invalidateMemory(ch.m_baseMem, ch.m_size);
  210. m_allocCb(m_allocCbUserData, ch.m_baseMem, 0, 0);
  211. }
  212. else
  213. {
  214. break;
  215. }
  216. }
  217. // Do some error checks
  218. auto allocCount = m_allocationsCount.load();
  219. if(!m_ignoreDeallocationErrors && allocCount != 0)
  220. {
  221. ANKI_UTIL_LOGW("Forgot to deallocate");
  222. }
  223. }
  224. void StackMemoryPool::create(AllocAlignedCallback allocCb, void* allocCbUserData, PtrSize initialChunkSize,
  225. F32 nextChunkScale, PtrSize nextChunkBias, Bool ignoreDeallocationErrors,
  226. PtrSize alignmentBytes)
  227. {
  228. ANKI_ASSERT(!isCreated());
  229. ANKI_ASSERT(allocCb);
  230. ANKI_ASSERT(initialChunkSize > 0);
  231. ANKI_ASSERT(nextChunkScale >= 1.0);
  232. ANKI_ASSERT(alignmentBytes > 0);
  233. m_allocCb = allocCb;
  234. m_allocCbUserData = allocCbUserData;
  235. m_alignmentBytes = alignmentBytes;
  236. m_initialChunkSize = initialChunkSize;
  237. m_nextChunkScale = nextChunkScale;
  238. m_nextChunkBias = nextChunkBias;
  239. m_ignoreDeallocationErrors = ignoreDeallocationErrors;
  240. // Create the first chunk
  241. void* mem = m_allocCb(m_allocCbUserData, nullptr, m_initialChunkSize, m_alignmentBytes);
  242. if(mem != nullptr)
  243. {
  244. invalidateMemory(mem, m_initialChunkSize);
  245. m_chunks[0].m_baseMem = static_cast<U8*>(mem);
  246. m_chunks[0].m_mem.store(m_chunks[0].m_baseMem);
  247. m_chunks[0].m_size = initialChunkSize;
  248. ANKI_ASSERT(m_crntChunkIdx.load() == 0);
  249. }
  250. else
  251. {
  252. ANKI_CREATION_OOM_ACTION();
  253. }
  254. }
  255. void* StackMemoryPool::allocate(PtrSize size, PtrSize alignment)
  256. {
  257. ANKI_ASSERT(isCreated());
  258. ANKI_ASSERT(alignment <= m_alignmentBytes);
  259. (void)alignment;
  260. size = getAlignedRoundUp(m_alignmentBytes, size);
  261. ANKI_ASSERT(size > 0);
  262. ANKI_ASSERT(size <= m_initialChunkSize && "The chunks should have enough space to hold at least one allocation");
  263. Chunk* crntChunk = nullptr;
  264. Bool retry = true;
  265. U8* out = nullptr;
  266. do
  267. {
  268. crntChunk = &m_chunks[m_crntChunkIdx.load()];
  269. crntChunk->check();
  270. out = crntChunk->m_mem.fetchAdd(size);
  271. ANKI_ASSERT(out >= crntChunk->m_baseMem);
  272. if(PtrSize(out + size - crntChunk->m_baseMem) <= crntChunk->m_size)
  273. {
  274. // All is fine, there is enough space in the chunk
  275. retry = false;
  276. m_allocationsCount.fetchAdd(1);
  277. }
  278. else
  279. {
  280. // Need new chunk
  281. LockGuard<Mutex> lock(m_lock);
  282. // Make sure that only one thread will create a new chunk
  283. if(&m_chunks[m_crntChunkIdx.load()] == crntChunk)
  284. {
  285. // We can create a new chunk
  286. PtrSize oldChunkSize = crntChunk->m_size;
  287. ++crntChunk;
  288. if(crntChunk >= m_chunks.getEnd())
  289. {
  290. ANKI_UTIL_LOGE("Number of chunks is not enough. Expect a crash");
  291. }
  292. if(crntChunk->m_baseMem == nullptr)
  293. {
  294. // Need to create a new chunk
  295. PtrSize newChunkSize = PtrSize(F32(oldChunkSize) * m_nextChunkScale) + m_nextChunkBias;
  296. alignRoundUp(m_alignmentBytes, newChunkSize);
  297. void* mem = m_allocCb(m_allocCbUserData, nullptr, newChunkSize, m_alignmentBytes);
  298. if(mem != nullptr)
  299. {
  300. invalidateMemory(mem, newChunkSize);
  301. crntChunk->m_baseMem = static_cast<U8*>(mem);
  302. crntChunk->m_mem.store(crntChunk->m_baseMem);
  303. crntChunk->m_size = newChunkSize;
  304. U idx = m_crntChunkIdx.fetchAdd(1);
  305. ANKI_ASSERT(&m_chunks[idx] == crntChunk - 1);
  306. (void)idx;
  307. }
  308. else
  309. {
  310. out = nullptr;
  311. retry = false;
  312. ANKI_OOM_ACTION();
  313. }
  314. }
  315. else
  316. {
  317. // Need to recycle one
  318. crntChunk->checkReset();
  319. invalidateMemory(crntChunk->m_baseMem, crntChunk->m_size);
  320. U idx = m_crntChunkIdx.fetchAdd(1);
  321. ANKI_ASSERT(&m_chunks[idx] == crntChunk - 1);
  322. (void)idx;
  323. }
  324. }
  325. }
  326. } while(retry);
  327. return static_cast<void*>(out);
  328. }
  329. void StackMemoryPool::free(void* ptr)
  330. {
  331. ANKI_ASSERT(isCreated());
  332. if(ANKI_UNLIKELY(ptr == nullptr))
  333. {
  334. return;
  335. }
  336. // ptr shouldn't be null or not aligned. If not aligned it was not
  337. // allocated by this class
  338. ANKI_ASSERT(ptr != nullptr && isAligned(m_alignmentBytes, ptr));
  339. auto count = m_allocationsCount.fetchSub(1);
  340. ANKI_ASSERT(count > 0);
  341. (void)count;
  342. }
  343. void StackMemoryPool::reset()
  344. {
  345. ANKI_ASSERT(isCreated());
  346. // Iterate all until you find an unused
  347. for(Chunk& ch : m_chunks)
  348. {
  349. if(ch.m_baseMem != nullptr)
  350. {
  351. ch.check();
  352. ch.m_mem.store(ch.m_baseMem);
  353. invalidateMemory(ch.m_baseMem, ch.m_size);
  354. }
  355. else
  356. {
  357. break;
  358. }
  359. }
  360. // Set the crnt chunk
  361. m_chunks[0].checkReset();
  362. m_crntChunkIdx.store(0);
  363. // Reset allocation count and do some error checks
  364. auto allocCount = m_allocationsCount.exchange(0);
  365. if(!m_ignoreDeallocationErrors && allocCount != 0)
  366. {
  367. ANKI_UTIL_LOGW("Forgot to deallocate");
  368. }
  369. }
  370. PtrSize StackMemoryPool::getMemoryCapacity() const
  371. {
  372. PtrSize sum = 0;
  373. U crntChunkIdx = m_crntChunkIdx.load();
  374. for(U i = 0; i <= crntChunkIdx; ++i)
  375. {
  376. sum += m_chunks[i].m_size;
  377. }
  378. return sum;
  379. }
  380. ChainMemoryPool::ChainMemoryPool()
  381. : BaseMemoryPool(Type::CHAIN)
  382. {
  383. }
  384. ChainMemoryPool::~ChainMemoryPool()
  385. {
  386. if(m_allocationsCount.load() != 0)
  387. {
  388. ANKI_UTIL_LOGW("Memory pool destroyed before all memory being released");
  389. }
  390. Chunk* ch = m_headChunk;
  391. while(ch)
  392. {
  393. Chunk* next = ch->m_next;
  394. destroyChunk(ch);
  395. ch = next;
  396. }
  397. if(m_lock)
  398. {
  399. ANKI_ASSERT(m_allocCb);
  400. m_lock->~SpinLock();
  401. m_allocCb(m_allocCbUserData, m_lock, 0, 0);
  402. }
  403. }
  404. void ChainMemoryPool::create(AllocAlignedCallback allocCb, void* allocCbUserData, PtrSize initialChunkSize,
  405. F32 nextChunkScale, PtrSize nextChunkBias, PtrSize alignmentBytes)
  406. {
  407. ANKI_ASSERT(!isCreated());
  408. ANKI_ASSERT(initialChunkSize > 0);
  409. ANKI_ASSERT(nextChunkScale >= 1.0);
  410. ANKI_ASSERT(alignmentBytes > 0);
  411. // Set all values
  412. m_allocCb = allocCb;
  413. m_allocCbUserData = allocCbUserData;
  414. m_alignmentBytes = alignmentBytes;
  415. m_initSize = initialChunkSize;
  416. m_scale = nextChunkScale;
  417. m_bias = nextChunkBias;
  418. m_headerSize = max<PtrSize>(m_alignmentBytes, sizeof(Chunk*));
  419. m_lock = reinterpret_cast<SpinLock*>(m_allocCb(m_allocCbUserData, nullptr, sizeof(SpinLock), alignof(SpinLock)));
  420. if(!m_lock)
  421. {
  422. ANKI_CREATION_OOM_ACTION();
  423. }
  424. ::new(m_lock) SpinLock();
  425. // Initial size should be > 0
  426. ANKI_ASSERT(m_initSize > 0 && "Wrong arg");
  427. // On fixed initial size is the same as the max
  428. if(m_scale == 0.0 && m_bias == 0)
  429. {
  430. ANKI_ASSERT(0 && "Wrong arg");
  431. }
  432. }
  433. void* ChainMemoryPool::allocate(PtrSize size, PtrSize alignment)
  434. {
  435. ANKI_ASSERT(isCreated());
  436. Chunk* ch;
  437. void* mem = nullptr;
  438. LockGuard<SpinLock> lock(*m_lock);
  439. // Get chunk
  440. ch = m_tailChunk;
  441. // Create new chunk if needed
  442. if(ch == nullptr || (mem = allocateFromChunk(ch, size, alignment)) == nullptr)
  443. {
  444. // Create new chunk
  445. PtrSize chunkSize = computeNewChunkSize(size);
  446. ch = createNewChunk(chunkSize);
  447. // Chunk creation failed
  448. if(ch == nullptr)
  449. {
  450. return mem;
  451. }
  452. }
  453. if(mem == nullptr)
  454. {
  455. mem = allocateFromChunk(ch, size, alignment);
  456. ANKI_ASSERT(mem != nullptr && "The chunk should have space");
  457. }
  458. m_allocationsCount.fetchAdd(1);
  459. return mem;
  460. }
  461. void ChainMemoryPool::free(void* ptr)
  462. {
  463. ANKI_ASSERT(isCreated());
  464. if(ANKI_UNLIKELY(ptr == nullptr))
  465. {
  466. return;
  467. }
  468. // Get the chunk
  469. U8* mem = static_cast<U8*>(ptr);
  470. mem -= m_headerSize;
  471. Chunk* chunk = *reinterpret_cast<Chunk**>(mem);
  472. ANKI_ASSERT(chunk != nullptr);
  473. ANKI_ASSERT((mem >= chunk->m_memory && mem < (chunk->m_memory + chunk->m_memsize)) && "Wrong chunk");
  474. LockGuard<SpinLock> lock(*m_lock);
  475. // Decrease the deallocation refcount and if it's zero delete the chunk
  476. ANKI_ASSERT(chunk->m_allocationsCount > 0);
  477. if(--chunk->m_allocationsCount == 0)
  478. {
  479. // Chunk is empty. Delete it
  480. destroyChunk(chunk);
  481. }
  482. m_allocationsCount.fetchSub(1);
  483. }
  484. PtrSize ChainMemoryPool::getChunksCount() const
  485. {
  486. ANKI_ASSERT(isCreated());
  487. PtrSize count = 0;
  488. Chunk* ch = m_headChunk;
  489. while(ch)
  490. {
  491. ++count;
  492. ch = ch->m_next;
  493. }
  494. return count;
  495. }
  496. PtrSize ChainMemoryPool::getAllocatedSize() const
  497. {
  498. ANKI_ASSERT(isCreated());
  499. PtrSize sum = 0;
  500. Chunk* ch = m_headChunk;
  501. while(ch)
  502. {
  503. sum += ch->m_top - ch->m_memory;
  504. ch = ch->m_next;
  505. }
  506. return sum;
  507. }
  508. PtrSize ChainMemoryPool::computeNewChunkSize(PtrSize size) const
  509. {
  510. size += m_headerSize;
  511. PtrSize crntMaxSize;
  512. if(m_tailChunk != nullptr)
  513. {
  514. // Get the size of previous
  515. crntMaxSize = m_tailChunk->m_memsize;
  516. // Compute new size
  517. crntMaxSize = PtrSize(F32(crntMaxSize) * m_scale) + m_bias;
  518. }
  519. else
  520. {
  521. // No chunks. Choose initial size
  522. ANKI_ASSERT(m_headChunk == nullptr);
  523. crntMaxSize = m_initSize;
  524. }
  525. crntMaxSize = max(crntMaxSize, size);
  526. ANKI_ASSERT(crntMaxSize > 0);
  527. return crntMaxSize;
  528. }
  529. ChainMemoryPool::Chunk* ChainMemoryPool::createNewChunk(PtrSize size)
  530. {
  531. ANKI_ASSERT(size > 0);
  532. // Allocate memory and chunk in one go
  533. PtrSize chunkAllocSize = getAlignedRoundUp(m_alignmentBytes, sizeof(Chunk));
  534. PtrSize memAllocSize = getAlignedRoundUp(m_alignmentBytes, size);
  535. PtrSize allocationSize = chunkAllocSize + memAllocSize;
  536. Chunk* chunk = reinterpret_cast<Chunk*>(m_allocCb(m_allocCbUserData, nullptr, allocationSize, m_alignmentBytes));
  537. if(chunk)
  538. {
  539. invalidateMemory(chunk, allocationSize);
  540. // Construct it
  541. memset(chunk, 0, sizeof(Chunk));
  542. // Initialize it
  543. chunk->m_memory = reinterpret_cast<U8*>(chunk) + chunkAllocSize;
  544. chunk->m_memsize = memAllocSize;
  545. chunk->m_top = chunk->m_memory;
  546. // Register it
  547. if(m_tailChunk)
  548. {
  549. m_tailChunk->m_next = chunk;
  550. chunk->m_prev = m_tailChunk;
  551. m_tailChunk = chunk;
  552. }
  553. else
  554. {
  555. ANKI_ASSERT(m_headChunk == nullptr);
  556. m_headChunk = m_tailChunk = chunk;
  557. }
  558. }
  559. else
  560. {
  561. ANKI_OOM_ACTION();
  562. }
  563. return chunk;
  564. }
  565. void* ChainMemoryPool::allocateFromChunk(Chunk* ch, PtrSize size, PtrSize alignment)
  566. {
  567. ANKI_ASSERT(ch);
  568. ANKI_ASSERT(ch->m_top <= ch->m_memory + ch->m_memsize);
  569. U8* mem = ch->m_top;
  570. PtrSize memV = ptrToNumber(mem);
  571. alignRoundUp(m_alignmentBytes, memV);
  572. mem = numberToPtr<U8*>(memV);
  573. U8* newTop = mem + m_headerSize + size;
  574. if(newTop <= ch->m_memory + ch->m_memsize)
  575. {
  576. *reinterpret_cast<Chunk**>(mem) = ch;
  577. mem += m_headerSize;
  578. ch->m_top = newTop;
  579. ++ch->m_allocationsCount;
  580. }
  581. else
  582. {
  583. // Chunk is full. Need a new one
  584. mem = nullptr;
  585. }
  586. return mem;
  587. }
  588. void ChainMemoryPool::destroyChunk(Chunk* ch)
  589. {
  590. ANKI_ASSERT(ch);
  591. if(ch == m_tailChunk)
  592. {
  593. m_tailChunk = ch->m_prev;
  594. }
  595. if(ch == m_headChunk)
  596. {
  597. m_headChunk = ch->m_next;
  598. }
  599. if(ch->m_prev)
  600. {
  601. ANKI_ASSERT(ch->m_prev->m_next == ch);
  602. ch->m_prev->m_next = ch->m_next;
  603. }
  604. if(ch->m_next)
  605. {
  606. ANKI_ASSERT(ch->m_next->m_prev == ch);
  607. ch->m_next->m_prev = ch->m_prev;
  608. }
  609. invalidateMemory(ch, getAlignedRoundUp(m_alignmentBytes, sizeof(Chunk)) + ch->m_memsize);
  610. m_allocCb(m_allocCbUserData, ch, 0, 0);
  611. }
  612. } // end namespace anki