Memory.cpp 16 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781
  1. // Copyright (C) 2009-2019, Panagiotis Christopoulos Charitos and contributors.
  2. // All rights reserved.
  3. // Code licensed under the BSD License.
  4. // http://www.anki3d.org/LICENSE
  5. #include <anki/util/Memory.h>
  6. #include <anki/util/Functions.h>
  7. #include <anki/util/Assert.h>
  8. #include <anki/util/NonCopyable.h>
  9. #include <anki/util/Thread.h>
  10. #include <anki/util/Atomic.h>
  11. #include <anki/util/Logger.h>
  12. #include <cstdlib>
  13. #include <cstring>
  14. #include <cstdio>
  15. namespace anki
  16. {
  17. #define ANKI_MEM_SIGNATURES ANKI_EXTRA_CHECKS
  18. #if ANKI_MEM_SIGNATURES
  19. using Signature = U32;
  20. static Signature computeSignature(void* ptr)
  21. {
  22. ANKI_ASSERT(ptr);
  23. PtrSize sig64 = reinterpret_cast<PtrSize>(ptr);
  24. Signature sig = sig64;
  25. sig ^= 0x5bd1e995;
  26. sig ^= sig << 24;
  27. ANKI_ASSERT(sig != 0);
  28. return sig;
  29. }
  30. #endif
  31. #define ANKI_CREATION_OOM_ACTION() ANKI_UTIL_LOGF("Out of memory")
  32. #define ANKI_OOM_ACTION() ANKI_UTIL_LOGE("Out of memory. Expect segfault")
  33. template<typename TPtr, typename TSize>
  34. static void invalidateMemory(TPtr ptr, TSize size)
  35. {
  36. #if ANKI_EXTRA_CHECKS
  37. memset(static_cast<void*>(ptr), 0xCC, size);
  38. #endif
  39. }
  40. void* mallocAligned(PtrSize size, PtrSize alignmentBytes)
  41. {
  42. ANKI_ASSERT(size > 0);
  43. ANKI_ASSERT(alignmentBytes > 0);
  44. #if ANKI_POSIX
  45. # if ANKI_OS != ANKI_OS_ANDROID
  46. void* out = nullptr;
  47. U alignment = getAlignedRoundUp(alignmentBytes, sizeof(void*));
  48. int err = posix_memalign(&out, alignment, size);
  49. if(!err)
  50. {
  51. ANKI_ASSERT(out != nullptr);
  52. // Make sure it's aligned
  53. ANKI_ASSERT(isAligned(alignmentBytes, out));
  54. }
  55. else
  56. {
  57. ANKI_UTIL_LOGE("mallocAligned() failed");
  58. }
  59. return out;
  60. # else
  61. void* out = memalign(getAlignedRoundUp(alignmentBytes, sizeof(void*)), size);
  62. if(out)
  63. {
  64. // Make sure it's aligned
  65. ANKI_ASSERT(isAligned(alignmentBytes, out));
  66. }
  67. else
  68. {
  69. ANKI_UTIL_LOGE("memalign() failed");
  70. }
  71. return out;
  72. # endif
  73. #elif ANKI_OS == ANKI_OS_WINDOWS
  74. void* out = _aligned_malloc(size, alignmentBytes);
  75. if(out)
  76. {
  77. // Make sure it's aligned
  78. ANKI_ASSERT(isAligned(alignmentBytes, out));
  79. }
  80. else
  81. {
  82. ANKI_UTIL_LOGE("_aligned_malloc() failed");
  83. }
  84. return out;
  85. #else
  86. # error "Unimplemented"
  87. #endif
  88. }
  89. void freeAligned(void* ptr)
  90. {
  91. #if ANKI_POSIX
  92. ::free(ptr);
  93. #elif ANKI_OS == ANKI_OS_WINDOWS
  94. _aligned_free(ptr);
  95. #else
  96. # error "Unimplemented"
  97. #endif
  98. }
  99. void* allocAligned(void* userData, void* ptr, PtrSize size, PtrSize alignment)
  100. {
  101. (void)userData;
  102. void* out;
  103. if(ptr == nullptr)
  104. {
  105. // Allocate
  106. ANKI_ASSERT(size > 0);
  107. out = mallocAligned(size, alignment);
  108. }
  109. else
  110. {
  111. // Deallocate
  112. ANKI_ASSERT(size == 0);
  113. ANKI_ASSERT(alignment == 0);
  114. freeAligned(ptr);
  115. out = nullptr;
  116. }
  117. return out;
  118. }
  119. BaseMemoryPool::~BaseMemoryPool()
  120. {
  121. ANKI_ASSERT(m_refcount.load() == 0 && "Refcount should be zero");
  122. }
  123. Bool BaseMemoryPool::isCreated() const
  124. {
  125. return m_allocCb != nullptr;
  126. }
  127. void* BaseMemoryPool::allocate(PtrSize size, PtrSize alignmentBytes)
  128. {
  129. void* out = nullptr;
  130. switch(m_type)
  131. {
  132. case Type::HEAP:
  133. out = static_cast<HeapMemoryPool*>(this)->allocate(size, alignmentBytes);
  134. break;
  135. case Type::STACK:
  136. out = static_cast<StackMemoryPool*>(this)->allocate(size, alignmentBytes);
  137. break;
  138. case Type::CHAIN:
  139. out = static_cast<ChainMemoryPool*>(this)->allocate(size, alignmentBytes);
  140. break;
  141. default:
  142. ANKI_ASSERT(0);
  143. }
  144. return out;
  145. }
  146. void BaseMemoryPool::free(void* ptr)
  147. {
  148. switch(m_type)
  149. {
  150. case Type::HEAP:
  151. static_cast<HeapMemoryPool*>(this)->free(ptr);
  152. break;
  153. case Type::STACK:
  154. static_cast<StackMemoryPool*>(this)->free(ptr);
  155. break;
  156. case Type::CHAIN:
  157. static_cast<ChainMemoryPool*>(this)->free(ptr);
  158. break;
  159. default:
  160. ANKI_ASSERT(0);
  161. }
  162. }
  163. HeapMemoryPool::HeapMemoryPool()
  164. : BaseMemoryPool(Type::HEAP)
  165. {
  166. }
  167. HeapMemoryPool::~HeapMemoryPool()
  168. {
  169. U count = m_allocationsCount.load();
  170. if(count != 0)
  171. {
  172. ANKI_UTIL_LOGW("Memory pool destroyed before all memory being released "
  173. "(%u deallocations missed)",
  174. count);
  175. }
  176. }
  177. void HeapMemoryPool::create(AllocAlignedCallback allocCb, void* allocCbUserData)
  178. {
  179. ANKI_ASSERT(!isCreated());
  180. ANKI_ASSERT(m_allocCb == nullptr);
  181. ANKI_ASSERT(allocCb != nullptr);
  182. m_allocCb = allocCb;
  183. m_allocCbUserData = allocCbUserData;
  184. #if ANKI_MEM_SIGNATURES
  185. m_signature = computeSignature(this);
  186. m_headerSize = getAlignedRoundUp(MAX_ALIGNMENT, sizeof(Signature));
  187. #endif
  188. }
  189. void* HeapMemoryPool::allocate(PtrSize size, PtrSize alignment)
  190. {
  191. ANKI_ASSERT(isCreated());
  192. #if ANKI_MEM_SIGNATURES
  193. ANKI_ASSERT(alignment <= MAX_ALIGNMENT && "Wrong assumption");
  194. size += m_headerSize;
  195. #endif
  196. void* mem = m_allocCb(m_allocCbUserData, nullptr, size, alignment);
  197. if(mem != nullptr)
  198. {
  199. m_allocationsCount.fetchAdd(1);
  200. #if ANKI_MEM_SIGNATURES
  201. memset(mem, 0, m_headerSize);
  202. memcpy(mem, &m_signature, sizeof(m_signature));
  203. U8* memU8 = static_cast<U8*>(mem);
  204. memU8 += m_headerSize;
  205. mem = static_cast<void*>(memU8);
  206. #endif
  207. }
  208. else
  209. {
  210. ANKI_OOM_ACTION();
  211. }
  212. return mem;
  213. }
  214. void HeapMemoryPool::free(void* ptr)
  215. {
  216. ANKI_ASSERT(isCreated());
  217. #if ANKI_MEM_SIGNATURES
  218. U8* memU8 = static_cast<U8*>(ptr);
  219. memU8 -= m_headerSize;
  220. if(memcmp(memU8, &m_signature, sizeof(m_signature)) != 0)
  221. {
  222. ANKI_UTIL_LOGE("Signature missmatch on free");
  223. }
  224. ptr = static_cast<void*>(memU8);
  225. #endif
  226. m_allocationsCount.fetchSub(1);
  227. m_allocCb(m_allocCbUserData, ptr, 0, 0);
  228. }
  229. StackMemoryPool::StackMemoryPool()
  230. : BaseMemoryPool(Type::STACK)
  231. {
  232. }
  233. StackMemoryPool::~StackMemoryPool()
  234. {
  235. // Iterate all until you find an unused
  236. for(Chunk& ch : m_chunks)
  237. {
  238. if(ch.m_baseMem != nullptr)
  239. {
  240. ch.check();
  241. invalidateMemory(ch.m_baseMem, ch.m_size);
  242. m_allocCb(m_allocCbUserData, ch.m_baseMem, 0, 0);
  243. }
  244. else
  245. {
  246. break;
  247. }
  248. }
  249. // Do some error checks
  250. auto allocCount = m_allocationsCount.load();
  251. if(!m_ignoreDeallocationErrors && allocCount != 0)
  252. {
  253. ANKI_UTIL_LOGW("Forgot to deallocate");
  254. }
  255. }
  256. void StackMemoryPool::create(AllocAlignedCallback allocCb,
  257. void* allocCbUserData,
  258. PtrSize initialChunkSize,
  259. F32 nextChunkScale,
  260. PtrSize nextChunkBias,
  261. Bool ignoreDeallocationErrors,
  262. PtrSize alignmentBytes)
  263. {
  264. ANKI_ASSERT(!isCreated());
  265. ANKI_ASSERT(allocCb);
  266. ANKI_ASSERT(initialChunkSize > 0);
  267. ANKI_ASSERT(nextChunkScale >= 1.0);
  268. ANKI_ASSERT(alignmentBytes > 0);
  269. m_allocCb = allocCb;
  270. m_allocCbUserData = allocCbUserData;
  271. m_alignmentBytes = alignmentBytes;
  272. m_initialChunkSize = initialChunkSize;
  273. m_nextChunkScale = nextChunkScale;
  274. m_nextChunkBias = nextChunkBias;
  275. m_ignoreDeallocationErrors = ignoreDeallocationErrors;
  276. // Create the first chunk
  277. void* mem = m_allocCb(m_allocCbUserData, nullptr, m_initialChunkSize, m_alignmentBytes);
  278. if(mem != nullptr)
  279. {
  280. invalidateMemory(mem, m_initialChunkSize);
  281. m_chunks[0].m_baseMem = static_cast<U8*>(mem);
  282. m_chunks[0].m_mem.store(m_chunks[0].m_baseMem);
  283. m_chunks[0].m_size = initialChunkSize;
  284. ANKI_ASSERT(m_crntChunkIdx.load() == 0);
  285. }
  286. else
  287. {
  288. ANKI_CREATION_OOM_ACTION();
  289. }
  290. }
  291. void* StackMemoryPool::allocate(PtrSize size, PtrSize alignment)
  292. {
  293. ANKI_ASSERT(isCreated());
  294. ANKI_ASSERT(alignment <= m_alignmentBytes);
  295. (void)alignment;
  296. size = getAlignedRoundUp(m_alignmentBytes, size);
  297. ANKI_ASSERT(size > 0);
  298. ANKI_ASSERT(size <= m_initialChunkSize && "The chunks should have enough space to hold at least one allocation");
  299. Chunk* crntChunk = nullptr;
  300. Bool retry = true;
  301. U8* out = nullptr;
  302. do
  303. {
  304. crntChunk = &m_chunks[m_crntChunkIdx.load()];
  305. crntChunk->check();
  306. out = crntChunk->m_mem.fetchAdd(size);
  307. ANKI_ASSERT(out >= crntChunk->m_baseMem);
  308. if(PtrSize(out + size - crntChunk->m_baseMem) <= crntChunk->m_size)
  309. {
  310. // All is fine, there is enough space in the chunk
  311. retry = false;
  312. m_allocationsCount.fetchAdd(1);
  313. }
  314. else
  315. {
  316. // Need new chunk
  317. LockGuard<Mutex> lock(m_lock);
  318. // Make sure that only one thread will create a new chunk
  319. if(&m_chunks[m_crntChunkIdx.load()] == crntChunk)
  320. {
  321. // We can create a new chunk
  322. PtrSize oldChunkSize = crntChunk->m_size;
  323. ++crntChunk;
  324. if(crntChunk >= m_chunks.getEnd())
  325. {
  326. ANKI_UTIL_LOGE("Number of chunks is not enough. Expect a crash");
  327. }
  328. if(crntChunk->m_baseMem == nullptr)
  329. {
  330. // Need to create a new chunk
  331. PtrSize newChunkSize = oldChunkSize * m_nextChunkScale + m_nextChunkBias;
  332. alignRoundUp(m_alignmentBytes, newChunkSize);
  333. void* mem = m_allocCb(m_allocCbUserData, nullptr, newChunkSize, m_alignmentBytes);
  334. if(mem != nullptr)
  335. {
  336. invalidateMemory(mem, newChunkSize);
  337. crntChunk->m_baseMem = static_cast<U8*>(mem);
  338. crntChunk->m_mem.store(crntChunk->m_baseMem);
  339. crntChunk->m_size = newChunkSize;
  340. U idx = m_crntChunkIdx.fetchAdd(1);
  341. ANKI_ASSERT(&m_chunks[idx] == crntChunk - 1);
  342. (void)idx;
  343. }
  344. else
  345. {
  346. out = nullptr;
  347. retry = false;
  348. ANKI_OOM_ACTION();
  349. }
  350. }
  351. else
  352. {
  353. // Need to recycle one
  354. crntChunk->checkReset();
  355. invalidateMemory(crntChunk->m_baseMem, crntChunk->m_size);
  356. U idx = m_crntChunkIdx.fetchAdd(1);
  357. ANKI_ASSERT(&m_chunks[idx] == crntChunk - 1);
  358. (void)idx;
  359. }
  360. }
  361. }
  362. } while(retry);
  363. return static_cast<void*>(out);
  364. }
  365. void StackMemoryPool::free(void* ptr)
  366. {
  367. ANKI_ASSERT(isCreated());
  368. // ptr shouldn't be null or not aligned. If not aligned it was not
  369. // allocated by this class
  370. ANKI_ASSERT(ptr != nullptr && isAligned(m_alignmentBytes, ptr));
  371. auto count = m_allocationsCount.fetchSub(1);
  372. ANKI_ASSERT(count > 0);
  373. (void)count;
  374. }
  375. void StackMemoryPool::reset()
  376. {
  377. ANKI_ASSERT(isCreated());
  378. // Iterate all until you find an unused
  379. for(Chunk& ch : m_chunks)
  380. {
  381. if(ch.m_baseMem != nullptr)
  382. {
  383. ch.check();
  384. ch.m_mem.store(ch.m_baseMem);
  385. invalidateMemory(ch.m_baseMem, ch.m_size);
  386. }
  387. else
  388. {
  389. break;
  390. }
  391. }
  392. // Set the crnt chunk
  393. m_chunks[0].checkReset();
  394. m_crntChunkIdx.store(0);
  395. // Reset allocation count and do some error checks
  396. auto allocCount = m_allocationsCount.exchange(0);
  397. if(!m_ignoreDeallocationErrors && allocCount != 0)
  398. {
  399. ANKI_UTIL_LOGW("Forgot to deallocate");
  400. }
  401. }
  402. PtrSize StackMemoryPool::getMemoryCapacity() const
  403. {
  404. PtrSize sum = 0;
  405. U crntChunkIdx = m_crntChunkIdx.load();
  406. for(U i = 0; i <= crntChunkIdx; ++i)
  407. {
  408. sum += m_chunks[i].m_size;
  409. }
  410. return sum;
  411. }
  412. ChainMemoryPool::ChainMemoryPool()
  413. : BaseMemoryPool(Type::CHAIN)
  414. {
  415. }
  416. ChainMemoryPool::~ChainMemoryPool()
  417. {
  418. if(m_allocationsCount.load() != 0)
  419. {
  420. ANKI_UTIL_LOGW("Memory pool destroyed before all memory being released");
  421. }
  422. Chunk* ch = m_headChunk;
  423. while(ch)
  424. {
  425. Chunk* next = ch->m_next;
  426. destroyChunk(ch);
  427. ch = next;
  428. }
  429. if(m_lock)
  430. {
  431. ANKI_ASSERT(m_allocCb);
  432. m_lock->~SpinLock();
  433. m_allocCb(m_allocCbUserData, m_lock, 0, 0);
  434. }
  435. }
  436. void ChainMemoryPool::create(AllocAlignedCallback allocCb,
  437. void* allocCbUserData,
  438. PtrSize initialChunkSize,
  439. F32 nextChunkScale,
  440. PtrSize nextChunkBias,
  441. PtrSize alignmentBytes)
  442. {
  443. ANKI_ASSERT(!isCreated());
  444. ANKI_ASSERT(initialChunkSize > 0);
  445. ANKI_ASSERT(nextChunkScale >= 1.0);
  446. ANKI_ASSERT(alignmentBytes > 0);
  447. // Set all values
  448. m_allocCb = allocCb;
  449. m_allocCbUserData = allocCbUserData;
  450. m_alignmentBytes = alignmentBytes;
  451. m_initSize = initialChunkSize;
  452. m_scale = nextChunkScale;
  453. m_bias = nextChunkBias;
  454. m_headerSize = max(m_alignmentBytes, sizeof(Chunk*));
  455. m_lock = reinterpret_cast<SpinLock*>(m_allocCb(m_allocCbUserData, nullptr, sizeof(SpinLock), alignof(SpinLock)));
  456. if(!m_lock)
  457. {
  458. ANKI_CREATION_OOM_ACTION();
  459. }
  460. ::new(m_lock) SpinLock();
  461. // Initial size should be > 0
  462. ANKI_ASSERT(m_initSize > 0 && "Wrong arg");
  463. // On fixed initial size is the same as the max
  464. if(m_scale == 0.0 && m_bias == 0)
  465. {
  466. ANKI_ASSERT(0 && "Wrong arg");
  467. }
  468. }
  469. void* ChainMemoryPool::allocate(PtrSize size, PtrSize alignment)
  470. {
  471. ANKI_ASSERT(isCreated());
  472. Chunk* ch;
  473. void* mem = nullptr;
  474. LockGuard<SpinLock> lock(*m_lock);
  475. // Get chunk
  476. ch = m_tailChunk;
  477. // Create new chunk if needed
  478. if(ch == nullptr || (mem = allocateFromChunk(ch, size, alignment)) == nullptr)
  479. {
  480. // Create new chunk
  481. PtrSize chunkSize = computeNewChunkSize(size);
  482. ch = createNewChunk(chunkSize);
  483. // Chunk creation failed
  484. if(ch == nullptr)
  485. {
  486. return mem;
  487. }
  488. }
  489. if(mem == nullptr)
  490. {
  491. mem = allocateFromChunk(ch, size, alignment);
  492. ANKI_ASSERT(mem != nullptr && "The chunk should have space");
  493. }
  494. m_allocationsCount.fetchAdd(1);
  495. return mem;
  496. }
  497. void ChainMemoryPool::free(void* ptr)
  498. {
  499. ANKI_ASSERT(isCreated());
  500. if(ANKI_UNLIKELY(ptr == nullptr))
  501. {
  502. return;
  503. }
  504. // Get the chunk
  505. U8* mem = static_cast<U8*>(ptr);
  506. mem -= m_headerSize;
  507. Chunk* chunk = *reinterpret_cast<Chunk**>(mem);
  508. ANKI_ASSERT(chunk != nullptr);
  509. ANKI_ASSERT((mem >= chunk->m_memory && mem < (chunk->m_memory + chunk->m_memsize)) && "Wrong chunk");
  510. LockGuard<SpinLock> lock(*m_lock);
  511. // Decrease the deallocation refcount and if it's zero delete the chunk
  512. ANKI_ASSERT(chunk->m_allocationsCount > 0);
  513. if(--chunk->m_allocationsCount == 0)
  514. {
  515. // Chunk is empty. Delete it
  516. destroyChunk(chunk);
  517. }
  518. m_allocationsCount.fetchSub(1);
  519. }
  520. PtrSize ChainMemoryPool::getChunksCount() const
  521. {
  522. ANKI_ASSERT(isCreated());
  523. PtrSize count = 0;
  524. Chunk* ch = m_headChunk;
  525. while(ch)
  526. {
  527. ++count;
  528. ch = ch->m_next;
  529. }
  530. return count;
  531. }
  532. PtrSize ChainMemoryPool::getAllocatedSize() const
  533. {
  534. ANKI_ASSERT(isCreated());
  535. PtrSize sum = 0;
  536. Chunk* ch = m_headChunk;
  537. while(ch)
  538. {
  539. sum += ch->m_top - ch->m_memory;
  540. ch = ch->m_next;
  541. }
  542. return sum;
  543. }
  544. PtrSize ChainMemoryPool::computeNewChunkSize(PtrSize size) const
  545. {
  546. size += m_headerSize;
  547. PtrSize crntMaxSize;
  548. if(m_tailChunk != nullptr)
  549. {
  550. // Get the size of previous
  551. crntMaxSize = m_tailChunk->m_memsize;
  552. // Compute new size
  553. crntMaxSize = F32(crntMaxSize) * m_scale + m_bias;
  554. }
  555. else
  556. {
  557. // No chunks. Choose initial size
  558. ANKI_ASSERT(m_headChunk == nullptr);
  559. crntMaxSize = m_initSize;
  560. }
  561. crntMaxSize = max(crntMaxSize, size);
  562. ANKI_ASSERT(crntMaxSize > 0);
  563. return crntMaxSize;
  564. }
  565. ChainMemoryPool::Chunk* ChainMemoryPool::createNewChunk(PtrSize size)
  566. {
  567. ANKI_ASSERT(size > 0);
  568. // Allocate memory and chunk in one go
  569. PtrSize chunkAllocSize = getAlignedRoundUp(m_alignmentBytes, sizeof(Chunk));
  570. PtrSize memAllocSize = getAlignedRoundUp(m_alignmentBytes, size);
  571. PtrSize allocationSize = chunkAllocSize + memAllocSize;
  572. Chunk* chunk = reinterpret_cast<Chunk*>(m_allocCb(m_allocCbUserData, nullptr, allocationSize, m_alignmentBytes));
  573. if(chunk)
  574. {
  575. invalidateMemory(chunk, allocationSize);
  576. // Construct it
  577. memset(chunk, 0, sizeof(Chunk));
  578. // Initialize it
  579. chunk->m_memory = reinterpret_cast<U8*>(chunk) + chunkAllocSize;
  580. chunk->m_memsize = memAllocSize;
  581. chunk->m_top = chunk->m_memory;
  582. // Register it
  583. if(m_tailChunk)
  584. {
  585. m_tailChunk->m_next = chunk;
  586. chunk->m_prev = m_tailChunk;
  587. m_tailChunk = chunk;
  588. }
  589. else
  590. {
  591. ANKI_ASSERT(m_headChunk == nullptr);
  592. m_headChunk = m_tailChunk = chunk;
  593. }
  594. }
  595. else
  596. {
  597. ANKI_OOM_ACTION();
  598. }
  599. return chunk;
  600. }
  601. void* ChainMemoryPool::allocateFromChunk(Chunk* ch, PtrSize size, PtrSize alignment)
  602. {
  603. ANKI_ASSERT(ch);
  604. ANKI_ASSERT(ch->m_top <= ch->m_memory + ch->m_memsize);
  605. U8* mem = ch->m_top;
  606. PtrSize memV = ptrToNumber(mem);
  607. alignRoundUp(m_alignmentBytes, memV);
  608. mem = numberToPtr<U8*>(memV);
  609. U8* newTop = mem + m_headerSize + size;
  610. if(newTop <= ch->m_memory + ch->m_memsize)
  611. {
  612. *reinterpret_cast<Chunk**>(mem) = ch;
  613. mem += m_headerSize;
  614. ch->m_top = newTop;
  615. ++ch->m_allocationsCount;
  616. }
  617. else
  618. {
  619. // Chunk is full. Need a new one
  620. mem = nullptr;
  621. }
  622. return mem;
  623. }
  624. void ChainMemoryPool::destroyChunk(Chunk* ch)
  625. {
  626. ANKI_ASSERT(ch);
  627. if(ch == m_tailChunk)
  628. {
  629. m_tailChunk = ch->m_prev;
  630. }
  631. if(ch == m_headChunk)
  632. {
  633. m_headChunk = ch->m_next;
  634. }
  635. if(ch->m_prev)
  636. {
  637. ANKI_ASSERT(ch->m_prev->m_next == ch);
  638. ch->m_prev->m_next = ch->m_next;
  639. }
  640. if(ch->m_next)
  641. {
  642. ANKI_ASSERT(ch->m_next->m_prev == ch);
  643. ch->m_next->m_prev = ch->m_prev;
  644. }
  645. invalidateMemory(ch, getAlignedRoundUp(m_alignmentBytes, sizeof(Chunk)) + ch->m_memsize);
  646. m_allocCb(m_allocCbUserData, ch, 0, 0);
  647. }
  648. } // end namespace anki