Memory.cpp 19 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848
  1. // Copyright (C) 2009-2016, Panagiotis Christopoulos Charitos.
  2. // All rights reserved.
  3. // Code licensed under the BSD License.
  4. // http://www.anki3d.org/LICENSE
  5. #include <anki/util/Memory.h>
  6. #include <anki/util/Functions.h>
  7. #include <anki/util/Assert.h>
  8. #include <anki/util/NonCopyable.h>
  9. #include <anki/util/Thread.h>
  10. #include <anki/util/Atomic.h>
  11. #include <anki/util/Logger.h>
  12. #include <cstdlib>
  13. #include <cstring>
  14. #include <cstdio>
  15. namespace anki
  16. {
  17. //==============================================================================
  18. // Misc =
  19. //==============================================================================
  20. #define ANKI_MEM_SIGNATURES ANKI_DEBUG
  21. #if ANKI_MEM_SIGNATURES
  22. using Signature = U32;
  23. static Signature computeSignature(void* ptr)
  24. {
  25. ANKI_ASSERT(ptr);
  26. PtrSize sig64 = reinterpret_cast<PtrSize>(ptr);
  27. Signature sig = sig64;
  28. sig ^= 0x5bd1e995;
  29. sig ^= sig << 24;
  30. ANKI_ASSERT(sig != 0);
  31. return sig;
  32. }
  33. #endif
  34. #define ANKI_CREATION_OOM_ACTION() ANKI_LOGF("Out of memory")
  35. #define ANKI_OOM_ACTION() ANKI_LOGE("Out of memory. Expect segfault")
  36. template<typename TPtr, typename TSize>
  37. static void invalidateMemory(TPtr ptr, TSize size)
  38. {
  39. #if ANKI_DEBUG
  40. memset(static_cast<void*>(ptr), 0xCC, size);
  41. #endif
  42. }
  43. //==============================================================================
  44. // Other =
  45. //==============================================================================
  46. //==============================================================================
  47. void* mallocAligned(PtrSize size, PtrSize alignmentBytes)
  48. {
  49. ANKI_ASSERT(size > 0);
  50. ANKI_ASSERT(alignmentBytes > 0);
  51. #if ANKI_POSIX
  52. #if ANKI_OS != ANKI_OS_ANDROID
  53. void* out = nullptr;
  54. U alignment = getAlignedRoundUp(alignmentBytes, sizeof(void*));
  55. int err = posix_memalign(&out, alignment, size);
  56. if(!err)
  57. {
  58. ANKI_ASSERT(out != nullptr);
  59. // Make sure it's aligned
  60. ANKI_ASSERT(isAligned(alignmentBytes, out));
  61. }
  62. else
  63. {
  64. ANKI_LOGE("mallocAligned() failed");
  65. }
  66. return out;
  67. #else
  68. void* out =
  69. memalign(getAlignedRoundUp(alignmentBytes, sizeof(void*)), size);
  70. if(out)
  71. {
  72. // Make sure it's aligned
  73. ANKI_ASSERT(isAligned(alignmentBytes, out));
  74. }
  75. else
  76. {
  77. ANKI_LOGE("memalign() failed");
  78. }
  79. return out;
  80. #endif
  81. #elif ANKI_OS == ANKI_OS_WINDOWS
  82. void* out = _aligned_malloc(size, alignmentBytes);
  83. if(out)
  84. {
  85. // Make sure it's aligned
  86. ANKI_ASSERT(isAligned(alignmentBytes, out));
  87. }
  88. else
  89. {
  90. ANKI_LOGE("_aligned_malloc() failed");
  91. }
  92. return out;
  93. #else
  94. #error "Unimplemented"
  95. #endif
  96. }
  97. //==============================================================================
  98. void freeAligned(void* ptr)
  99. {
  100. #if ANKI_POSIX
  101. ::free(ptr);
  102. #elif ANKI_OS == ANKI_OS_WINDOWS
  103. _aligned_free(ptr);
  104. #else
  105. #error "Unimplemented"
  106. #endif
  107. }
  108. //==============================================================================
  109. void* allocAligned(void* userData, void* ptr, PtrSize size, PtrSize alignment)
  110. {
  111. (void)userData;
  112. void* out;
  113. if(ptr == nullptr)
  114. {
  115. // Allocate
  116. ANKI_ASSERT(size > 0);
  117. out = mallocAligned(size, alignment);
  118. }
  119. else
  120. {
  121. // Deallocate
  122. ANKI_ASSERT(size == 0);
  123. ANKI_ASSERT(alignment == 0);
  124. freeAligned(ptr);
  125. out = nullptr;
  126. }
  127. return out;
  128. }
  129. //==============================================================================
  130. // BaseMemoryPool =
  131. //==============================================================================
  132. //==============================================================================
  133. BaseMemoryPool::~BaseMemoryPool()
  134. {
  135. ANKI_ASSERT(m_refcount.load() == 0 && "Refcount should be zero");
  136. }
  137. //==============================================================================
  138. Bool BaseMemoryPool::isCreated() const
  139. {
  140. return m_allocCb != nullptr;
  141. }
  142. //==============================================================================
  143. void* BaseMemoryPool::allocate(PtrSize size, PtrSize alignmentBytes)
  144. {
  145. void* out = nullptr;
  146. switch(m_type)
  147. {
  148. case Type::HEAP:
  149. out =
  150. static_cast<HeapMemoryPool*>(this)->allocate(size, alignmentBytes);
  151. break;
  152. case Type::STACK:
  153. out =
  154. static_cast<StackMemoryPool*>(this)->allocate(size, alignmentBytes);
  155. break;
  156. case Type::CHAIN:
  157. out =
  158. static_cast<ChainMemoryPool*>(this)->allocate(size, alignmentBytes);
  159. break;
  160. default:
  161. ANKI_ASSERT(0);
  162. }
  163. return out;
  164. }
  165. //==============================================================================
  166. void BaseMemoryPool::free(void* ptr)
  167. {
  168. switch(m_type)
  169. {
  170. case Type::HEAP:
  171. static_cast<HeapMemoryPool*>(this)->free(ptr);
  172. break;
  173. case Type::STACK:
  174. static_cast<StackMemoryPool*>(this)->free(ptr);
  175. break;
  176. case Type::CHAIN:
  177. static_cast<ChainMemoryPool*>(this)->free(ptr);
  178. break;
  179. default:
  180. ANKI_ASSERT(0);
  181. }
  182. }
  183. //==============================================================================
  184. // HeapMemoryPool =
  185. //==============================================================================
  186. //==============================================================================
  187. HeapMemoryPool::HeapMemoryPool()
  188. : BaseMemoryPool(Type::HEAP)
  189. {
  190. }
  191. //==============================================================================
  192. HeapMemoryPool::~HeapMemoryPool()
  193. {
  194. if(m_allocationsCount.load() != 0)
  195. {
  196. ANKI_LOGW("Memory pool destroyed before all memory being released");
  197. }
  198. }
  199. //==============================================================================
  200. void HeapMemoryPool::create(AllocAlignedCallback allocCb, void* allocCbUserData)
  201. {
  202. ANKI_ASSERT(!isCreated());
  203. ANKI_ASSERT(m_allocCb == nullptr);
  204. ANKI_ASSERT(allocCb != nullptr);
  205. m_allocCb = allocCb;
  206. m_allocCbUserData = allocCbUserData;
  207. #if ANKI_MEM_SIGNATURES
  208. m_signature = computeSignature(this);
  209. m_headerSize = getAlignedRoundUp(MAX_ALIGNMENT, sizeof(Signature));
  210. #endif
  211. }
  212. //==============================================================================
  213. void* HeapMemoryPool::allocate(PtrSize size, PtrSize alignment)
  214. {
  215. ANKI_ASSERT(isCreated());
  216. #if ANKI_MEM_SIGNATURES
  217. ANKI_ASSERT(alignment <= MAX_ALIGNMENT && "Wrong assumption");
  218. size += m_headerSize;
  219. #endif
  220. void* mem = m_allocCb(m_allocCbUserData, nullptr, size, alignment);
  221. if(mem != nullptr)
  222. {
  223. m_allocationsCount.fetchAdd(1);
  224. #if ANKI_MEM_SIGNATURES
  225. memset(mem, 0, m_headerSize);
  226. memcpy(mem, &m_signature, sizeof(m_signature));
  227. U8* memU8 = static_cast<U8*>(mem);
  228. memU8 += m_headerSize;
  229. mem = static_cast<void*>(memU8);
  230. #endif
  231. }
  232. else
  233. {
  234. ANKI_OOM_ACTION();
  235. }
  236. return mem;
  237. }
  238. //==============================================================================
  239. void HeapMemoryPool::free(void* ptr)
  240. {
  241. ANKI_ASSERT(isCreated());
  242. #if ANKI_MEM_SIGNATURES
  243. U8* memU8 = static_cast<U8*>(ptr);
  244. memU8 -= m_headerSize;
  245. if(memcmp(memU8, &m_signature, sizeof(m_signature)) != 0)
  246. {
  247. ANKI_LOGE("Signature missmatch on free");
  248. }
  249. ptr = static_cast<void*>(memU8);
  250. #endif
  251. m_allocationsCount.fetchSub(1);
  252. m_allocCb(m_allocCbUserData, ptr, 0, 0);
  253. }
  254. //==============================================================================
  255. // StackMemoryPool =
  256. //==============================================================================
  257. //==============================================================================
  258. StackMemoryPool::StackMemoryPool()
  259. : BaseMemoryPool(Type::STACK)
  260. {
  261. }
  262. //==============================================================================
  263. StackMemoryPool::~StackMemoryPool()
  264. {
  265. // Iterate all until you find an unused
  266. for(Chunk& ch : m_chunks)
  267. {
  268. if(ch.m_baseMem != nullptr)
  269. {
  270. ch.check();
  271. invalidateMemory(ch.m_baseMem, ch.m_size);
  272. m_allocCb(m_allocCbUserData, ch.m_baseMem, 0, 0);
  273. }
  274. else
  275. {
  276. break;
  277. }
  278. }
  279. // Do some error checks
  280. auto allocCount = m_allocationsCount.load();
  281. if(!m_ignoreDeallocationErrors && allocCount != 0)
  282. {
  283. ANKI_LOGW("Forgot to deallocate");
  284. }
  285. }
  286. //==============================================================================
  287. void StackMemoryPool::create(AllocAlignedCallback allocCb,
  288. void* allocCbUserData,
  289. PtrSize initialChunkSize,
  290. F32 nextChunkScale,
  291. PtrSize nextChunkBias,
  292. Bool ignoreDeallocationErrors,
  293. PtrSize alignmentBytes)
  294. {
  295. ANKI_ASSERT(!isCreated());
  296. ANKI_ASSERT(allocCb);
  297. ANKI_ASSERT(initialChunkSize > 0);
  298. ANKI_ASSERT(nextChunkScale >= 1.0);
  299. ANKI_ASSERT(alignmentBytes > 0);
  300. m_allocCb = allocCb;
  301. m_allocCbUserData = allocCbUserData;
  302. m_alignmentBytes = alignmentBytes;
  303. m_initialChunkSize = initialChunkSize;
  304. m_nextChunkScale = nextChunkScale;
  305. m_nextChunkBias = nextChunkBias;
  306. m_ignoreDeallocationErrors = ignoreDeallocationErrors;
  307. // Create the first chunk
  308. void* mem = m_allocCb(
  309. m_allocCbUserData, nullptr, m_initialChunkSize, m_alignmentBytes);
  310. if(mem != nullptr)
  311. {
  312. invalidateMemory(mem, m_initialChunkSize);
  313. m_chunks[0].m_baseMem = static_cast<U8*>(mem);
  314. m_chunks[0].m_mem.store(m_chunks[0].m_baseMem);
  315. m_chunks[0].m_size = initialChunkSize;
  316. ANKI_ASSERT(m_crntChunkIdx.load() == 0);
  317. }
  318. else
  319. {
  320. ANKI_CREATION_OOM_ACTION();
  321. }
  322. }
  323. //==============================================================================
  324. void* StackMemoryPool::allocate(PtrSize size, PtrSize alignment)
  325. {
  326. ANKI_ASSERT(isCreated());
  327. ANKI_ASSERT(alignment <= m_alignmentBytes);
  328. (void)alignment;
  329. size = getAlignedRoundUp(m_alignmentBytes, size);
  330. ANKI_ASSERT(size > 0);
  331. ANKI_ASSERT(size <= m_initialChunkSize
  332. && "The chunks should have enough "
  333. "space to hold at least one allocation");
  334. Chunk* crntChunk = nullptr;
  335. Bool retry = true;
  336. U8* out = nullptr;
  337. do
  338. {
  339. crntChunk = &m_chunks[m_crntChunkIdx.load()];
  340. crntChunk->check();
  341. out = crntChunk->m_mem.fetchAdd(size);
  342. ANKI_ASSERT(out >= crntChunk->m_baseMem);
  343. if(PtrSize(out + size - crntChunk->m_baseMem) <= crntChunk->m_size)
  344. {
  345. // All is fine, there is enough space in the chunk
  346. retry = false;
  347. m_allocationsCount.fetchAdd(1);
  348. }
  349. else
  350. {
  351. // Need new chunk
  352. LockGuard<Mutex> lock(m_lock);
  353. // Make sure that only one thread will create a new chunk
  354. if(&m_chunks[m_crntChunkIdx.load()] == crntChunk)
  355. {
  356. // We can create a new chunk
  357. PtrSize oldChunkSize = crntChunk->m_size;
  358. ++crntChunk;
  359. if(crntChunk >= m_chunks.getEnd())
  360. {
  361. ANKI_LOGE("Number of chunks is not enough. Expect a crash");
  362. }
  363. if(crntChunk->m_baseMem == nullptr)
  364. {
  365. // Need to create a new chunk
  366. PtrSize newChunkSize =
  367. oldChunkSize * m_nextChunkScale + m_nextChunkBias;
  368. alignRoundUp(m_alignmentBytes, newChunkSize);
  369. void* mem = m_allocCb(m_allocCbUserData,
  370. nullptr,
  371. newChunkSize,
  372. m_alignmentBytes);
  373. if(mem != nullptr)
  374. {
  375. invalidateMemory(mem, newChunkSize);
  376. crntChunk->m_baseMem = static_cast<U8*>(mem);
  377. crntChunk->m_mem.store(crntChunk->m_baseMem);
  378. crntChunk->m_size = newChunkSize;
  379. U idx = m_crntChunkIdx.fetchAdd(1);
  380. ANKI_ASSERT(&m_chunks[idx] == crntChunk - 1);
  381. (void)idx;
  382. }
  383. else
  384. {
  385. out = nullptr;
  386. retry = false;
  387. ANKI_OOM_ACTION();
  388. }
  389. }
  390. else
  391. {
  392. // Need to recycle one
  393. crntChunk->checkReset();
  394. invalidateMemory(crntChunk->m_baseMem, crntChunk->m_size);
  395. U idx = m_crntChunkIdx.fetchAdd(1);
  396. ANKI_ASSERT(&m_chunks[idx] == crntChunk - 1);
  397. (void)idx;
  398. }
  399. }
  400. }
  401. } while(retry);
  402. return static_cast<void*>(out);
  403. }
  404. //==============================================================================
  405. void StackMemoryPool::free(void* ptr)
  406. {
  407. ANKI_ASSERT(isCreated());
  408. // ptr shouldn't be null or not aligned. If not aligned it was not
  409. // allocated by this class
  410. ANKI_ASSERT(ptr != nullptr && isAligned(m_alignmentBytes, ptr));
  411. auto count = m_allocationsCount.fetchSub(1);
  412. ANKI_ASSERT(count > 0);
  413. (void)count;
  414. }
  415. //==============================================================================
  416. void StackMemoryPool::reset()
  417. {
  418. ANKI_ASSERT(isCreated());
  419. // Iterate all until you find an unused
  420. for(Chunk& ch : m_chunks)
  421. {
  422. if(ch.m_baseMem != nullptr)
  423. {
  424. ch.check();
  425. ch.m_mem.store(ch.m_baseMem);
  426. invalidateMemory(ch.m_baseMem, ch.m_size);
  427. }
  428. else
  429. {
  430. break;
  431. }
  432. }
  433. // Set the crnt chunk
  434. m_chunks[0].checkReset();
  435. m_crntChunkIdx.store(0);
  436. // Reset allocation count and do some error checks
  437. auto allocCount = m_allocationsCount.exchange(0);
  438. if(!m_ignoreDeallocationErrors && allocCount != 0)
  439. {
  440. ANKI_LOGW("Forgot to deallocate");
  441. }
  442. }
  443. //==============================================================================
  444. PtrSize StackMemoryPool::getMemoryCapacity() const
  445. {
  446. PtrSize sum = 0;
  447. U crntChunkIdx = m_crntChunkIdx.load();
  448. for(U i = 0; i <= crntChunkIdx; ++i)
  449. {
  450. sum += m_chunks[i].m_size;
  451. }
  452. return sum;
  453. }
  454. //==============================================================================
  455. // ChainMemoryPool =
  456. //==============================================================================
  457. //==============================================================================
  458. ChainMemoryPool::ChainMemoryPool()
  459. : BaseMemoryPool(Type::CHAIN)
  460. {
  461. }
  462. //==============================================================================
  463. ChainMemoryPool::~ChainMemoryPool()
  464. {
  465. if(m_allocationsCount.load() != 0)
  466. {
  467. ANKI_LOGW("Memory pool destroyed before all memory being released");
  468. }
  469. Chunk* ch = m_headChunk;
  470. while(ch)
  471. {
  472. Chunk* next = ch->m_next;
  473. destroyChunk(ch);
  474. ch = next;
  475. }
  476. if(m_lock)
  477. {
  478. ANKI_ASSERT(m_allocCb);
  479. m_lock->~SpinLock();
  480. m_allocCb(m_allocCbUserData, m_lock, 0, 0);
  481. }
  482. }
  483. //==============================================================================
  484. void ChainMemoryPool::create(AllocAlignedCallback allocCb,
  485. void* allocCbUserData,
  486. PtrSize initialChunkSize,
  487. F32 nextChunkScale,
  488. PtrSize nextChunkBias,
  489. PtrSize alignmentBytes)
  490. {
  491. ANKI_ASSERT(!isCreated());
  492. ANKI_ASSERT(initialChunkSize > 0);
  493. ANKI_ASSERT(nextChunkScale >= 1.0);
  494. ANKI_ASSERT(alignmentBytes > 0);
  495. // Set all values
  496. m_allocCb = allocCb;
  497. m_allocCbUserData = allocCbUserData;
  498. m_alignmentBytes = alignmentBytes;
  499. m_initSize = initialChunkSize;
  500. m_scale = nextChunkScale;
  501. m_bias = nextChunkBias;
  502. m_headerSize = max(m_alignmentBytes, sizeof(Chunk*));
  503. m_lock = reinterpret_cast<SpinLock*>(m_allocCb(
  504. m_allocCbUserData, nullptr, sizeof(SpinLock), alignof(SpinLock)));
  505. if(!m_lock)
  506. {
  507. ANKI_CREATION_OOM_ACTION();
  508. }
  509. ::new(m_lock) SpinLock();
  510. // Initial size should be > 0
  511. ANKI_ASSERT(m_initSize > 0 && "Wrong arg");
  512. // On fixed initial size is the same as the max
  513. if(m_scale == 0.0 && m_bias == 0)
  514. {
  515. ANKI_ASSERT(0 && "Wrong arg");
  516. }
  517. }
  518. //==============================================================================
  519. void* ChainMemoryPool::allocate(PtrSize size, PtrSize alignment)
  520. {
  521. ANKI_ASSERT(isCreated());
  522. Chunk* ch;
  523. void* mem = nullptr;
  524. LockGuard<SpinLock> lock(*m_lock);
  525. // Get chunk
  526. ch = m_tailChunk;
  527. // Create new chunk if needed
  528. if(ch == nullptr
  529. || (mem = allocateFromChunk(ch, size, alignment)) == nullptr)
  530. {
  531. // Create new chunk
  532. PtrSize chunkSize = computeNewChunkSize(size);
  533. ch = createNewChunk(chunkSize);
  534. // Chunk creation failed
  535. if(ch == nullptr)
  536. {
  537. return mem;
  538. }
  539. }
  540. if(mem == nullptr)
  541. {
  542. mem = allocateFromChunk(ch, size, alignment);
  543. ANKI_ASSERT(mem != nullptr && "The chunk should have space");
  544. }
  545. m_allocationsCount.fetchAdd(1);
  546. return mem;
  547. }
  548. //==============================================================================
  549. void ChainMemoryPool::free(void* ptr)
  550. {
  551. ANKI_ASSERT(isCreated());
  552. if(ANKI_UNLIKELY(ptr == nullptr))
  553. {
  554. return;
  555. }
  556. // Get the chunk
  557. U8* mem = static_cast<U8*>(ptr);
  558. mem -= m_headerSize;
  559. Chunk* chunk = *reinterpret_cast<Chunk**>(mem);
  560. ANKI_ASSERT(chunk != nullptr);
  561. ANKI_ASSERT(
  562. (mem >= chunk->m_memory && mem < (chunk->m_memory + chunk->m_memsize))
  563. && "Wrong chunk");
  564. LockGuard<SpinLock> lock(*m_lock);
  565. // Decrease the deallocation refcount and if it's zero delete the chunk
  566. ANKI_ASSERT(chunk->m_allocationsCount > 0);
  567. if(--chunk->m_allocationsCount == 0)
  568. {
  569. // Chunk is empty. Delete it
  570. destroyChunk(chunk);
  571. }
  572. m_allocationsCount.fetchSub(1);
  573. }
  574. //==============================================================================
  575. PtrSize ChainMemoryPool::getChunksCount() const
  576. {
  577. ANKI_ASSERT(isCreated());
  578. PtrSize count = 0;
  579. Chunk* ch = m_headChunk;
  580. while(ch)
  581. {
  582. ++count;
  583. ch = ch->m_next;
  584. }
  585. return count;
  586. }
  587. //==============================================================================
  588. PtrSize ChainMemoryPool::getAllocatedSize() const
  589. {
  590. ANKI_ASSERT(isCreated());
  591. PtrSize sum = 0;
  592. Chunk* ch = m_headChunk;
  593. while(ch)
  594. {
  595. sum += ch->m_top - ch->m_memory;
  596. ch = ch->m_next;
  597. }
  598. return sum;
  599. }
  600. //==============================================================================
  601. PtrSize ChainMemoryPool::computeNewChunkSize(PtrSize size) const
  602. {
  603. size += m_headerSize;
  604. PtrSize crntMaxSize;
  605. if(m_tailChunk != nullptr)
  606. {
  607. // Get the size of previous
  608. crntMaxSize = m_tailChunk->m_memsize;
  609. // Compute new size
  610. crntMaxSize = F32(crntMaxSize) * m_scale + m_bias;
  611. }
  612. else
  613. {
  614. // No chunks. Choose initial size
  615. ANKI_ASSERT(m_headChunk == nullptr);
  616. crntMaxSize = m_initSize;
  617. }
  618. crntMaxSize = max(crntMaxSize, size);
  619. ANKI_ASSERT(crntMaxSize > 0);
  620. return crntMaxSize;
  621. }
  622. //==============================================================================
  623. ChainMemoryPool::Chunk* ChainMemoryPool::createNewChunk(PtrSize size)
  624. {
  625. ANKI_ASSERT(size > 0);
  626. // Allocate memory and chunk in one go
  627. PtrSize chunkAllocSize = getAlignedRoundUp(m_alignmentBytes, sizeof(Chunk));
  628. PtrSize memAllocSize = getAlignedRoundUp(m_alignmentBytes, size);
  629. PtrSize allocationSize = chunkAllocSize + memAllocSize;
  630. Chunk* chunk = reinterpret_cast<Chunk*>(m_allocCb(
  631. m_allocCbUserData, nullptr, allocationSize, m_alignmentBytes));
  632. if(chunk)
  633. {
  634. invalidateMemory(chunk, allocationSize);
  635. // Construct it
  636. memset(chunk, 0, sizeof(Chunk));
  637. // Initialize it
  638. chunk->m_memory = reinterpret_cast<U8*>(chunk) + chunkAllocSize;
  639. chunk->m_memsize = memAllocSize;
  640. chunk->m_top = chunk->m_memory;
  641. // Register it
  642. if(m_tailChunk)
  643. {
  644. m_tailChunk->m_next = chunk;
  645. chunk->m_prev = m_tailChunk;
  646. m_tailChunk = chunk;
  647. }
  648. else
  649. {
  650. ANKI_ASSERT(m_headChunk == nullptr);
  651. m_headChunk = m_tailChunk = chunk;
  652. }
  653. }
  654. else
  655. {
  656. ANKI_OOM_ACTION();
  657. }
  658. return chunk;
  659. }
  660. //==============================================================================
  661. void* ChainMemoryPool::allocateFromChunk(
  662. Chunk* ch, PtrSize size, PtrSize alignment)
  663. {
  664. ANKI_ASSERT(ch);
  665. ANKI_ASSERT(ch->m_top <= ch->m_memory + ch->m_memsize);
  666. U8* mem = ch->m_top;
  667. alignRoundUp(m_alignmentBytes, mem);
  668. U8* newTop = mem + m_headerSize + size;
  669. if(newTop <= ch->m_memory + ch->m_memsize)
  670. {
  671. *reinterpret_cast<Chunk**>(mem) = ch;
  672. mem += m_headerSize;
  673. ch->m_top = newTop;
  674. ++ch->m_allocationsCount;
  675. }
  676. else
  677. {
  678. // Chunk is full. Need a new one
  679. mem = nullptr;
  680. }
  681. return mem;
  682. }
  683. //==============================================================================
  684. void ChainMemoryPool::destroyChunk(Chunk* ch)
  685. {
  686. ANKI_ASSERT(ch);
  687. if(ch == m_tailChunk)
  688. {
  689. m_tailChunk = ch->m_prev;
  690. }
  691. if(ch == m_headChunk)
  692. {
  693. m_headChunk = ch->m_next;
  694. }
  695. if(ch->m_prev)
  696. {
  697. ANKI_ASSERT(ch->m_prev->m_next == ch);
  698. ch->m_prev->m_next = ch->m_next;
  699. }
  700. if(ch->m_next)
  701. {
  702. ANKI_ASSERT(ch->m_next->m_prev == ch);
  703. ch->m_next->m_prev = ch->m_prev;
  704. }
  705. invalidateMemory(
  706. ch, getAlignedRoundUp(m_alignmentBytes, sizeof(Chunk)) + ch->m_memsize);
  707. m_allocCb(m_allocCbUserData, ch, 0, 0);
  708. }
  709. } // end namespace anki