Kaynağa Gözat

Refactor the stack allocators

Panagiotis Christopoulos Charitos 4 yıl önce
ebeveyn
işleme
57abed8760

+ 2 - 2
AnKi/Core/GpuMemoryManager.h

@@ -8,7 +8,7 @@
 #include <AnKi/Core/Common.h>
 #include <AnKi/Core/Common.h>
 #include <AnKi/Gr/Buffer.h>
 #include <AnKi/Gr/Buffer.h>
 #include <AnKi/Gr/Utils/FrameGpuAllocator.h>
 #include <AnKi/Gr/Utils/FrameGpuAllocator.h>
-#include <AnKi/Util/BuddyAllocator.h>
+#include <AnKi/Util/BuddyAllocatorBuilder.h>
 
 
 namespace anki {
 namespace anki {
 
 
@@ -44,7 +44,7 @@ public:
 private:
 private:
 	GrManager* m_gr = nullptr;
 	GrManager* m_gr = nullptr;
 	BufferPtr m_vertBuffer;
 	BufferPtr m_vertBuffer;
-	BuddyAllocator<> m_buddyAllocator;
+	BuddyAllocatorBuilder<> m_buddyAllocator;
 };
 };
 
 
 enum class StagingGpuMemoryType : U8
 enum class StagingGpuMemoryType : U8

+ 3 - 3
AnKi/Resource/ResourceManager.cpp

@@ -106,7 +106,7 @@ Error ResourceManager::loadResource(const CString& filename, ResourcePtr<T>& out
 		auto& pool = m_tmpAlloc.getMemoryPool();
 		auto& pool = m_tmpAlloc.getMemoryPool();
 
 
 		{
 		{
-			U allocsCountBefore = pool.getAllocationsCount();
+			U allocsCountBefore = pool.getAllocationCount();
 			(void)allocsCountBefore;
 			(void)allocsCountBefore;
 
 
 			err = ptr->load(filename, async);
 			err = ptr->load(filename, async);
@@ -117,7 +117,7 @@ Error ResourceManager::loadResource(const CString& filename, ResourcePtr<T>& out
 				return err;
 				return err;
 			}
 			}
 
 
-			ANKI_ASSERT(pool.getAllocationsCount() == allocsCountBefore && "Forgot to deallocate");
+			ANKI_ASSERT(pool.getAllocationCount() == allocsCountBefore && "Forgot to deallocate");
 		}
 		}
 
 
 		ptr->setFilename(filename);
 		ptr->setFilename(filename);
@@ -125,7 +125,7 @@ Error ResourceManager::loadResource(const CString& filename, ResourcePtr<T>& out
 
 
 		// Reset the memory pool if no-one is using it.
 		// Reset the memory pool if no-one is using it.
 		// NOTE: Check because resources load other resources
 		// NOTE: Check because resources load other resources
-		if(pool.getAllocationsCount() == 0)
+		if(pool.getAllocationCount() == 0)
 		{
 		{
 			pool.reset();
 			pool.reset();
 		}
 		}

+ 2 - 1
AnKi/Util.h

@@ -41,7 +41,8 @@
 #include <AnKi/Util/Xml.h>
 #include <AnKi/Util/Xml.h>
 #include <AnKi/Util/F16.h>
 #include <AnKi/Util/F16.h>
 #include <AnKi/Util/Function.h>
 #include <AnKi/Util/Function.h>
-#include <AnKi/Util/BuddyAllocator.h>
+#include <AnKi/Util/BuddyAllocatorBuilder.h>
+#include <AnKi/Util/StackAllocatorBuilder.h>
 
 
 /// @defgroup util Utilities (like STL)
 /// @defgroup util Utilities (like STL)
 
 

+ 8 - 8
AnKi/Util/BuddyAllocator.h → AnKi/Util/BuddyAllocatorBuilder.h

@@ -15,33 +15,33 @@ namespace anki {
 /// This is a generic implementation of a buddy allocator.
 /// This is a generic implementation of a buddy allocator.
 /// @tparam T_MAX_MEMORY_RANGE_LOG2 The max memory to allocate.
 /// @tparam T_MAX_MEMORY_RANGE_LOG2 The max memory to allocate.
 template<U32 T_MAX_MEMORY_RANGE_LOG2 = 32>
 template<U32 T_MAX_MEMORY_RANGE_LOG2 = 32>
-class BuddyAllocator
+class BuddyAllocatorBuilder
 {
 {
 public:
 public:
 	/// The type of the address.
 	/// The type of the address.
 	using Address = std::conditional_t<(T_MAX_MEMORY_RANGE_LOG2 > 32), PtrSize, U32>;
 	using Address = std::conditional_t<(T_MAX_MEMORY_RANGE_LOG2 > 32), PtrSize, U32>;
 
 
-	BuddyAllocator()
+	BuddyAllocatorBuilder()
 	{
 	{
 	}
 	}
 
 
 	/// @copydoc init
 	/// @copydoc init
-	BuddyAllocator(GenericMemoryPoolAllocator<U8> alloc, U32 maxMemoryRangeLog2)
+	BuddyAllocatorBuilder(GenericMemoryPoolAllocator<U8> alloc, U32 maxMemoryRangeLog2)
 	{
 	{
 		init(alloc, maxMemoryRangeLog2);
 		init(alloc, maxMemoryRangeLog2);
 	}
 	}
 
 
-	BuddyAllocator(const BuddyAllocator&) = delete; // Non-copyable
+	BuddyAllocatorBuilder(const BuddyAllocatorBuilder&) = delete; // Non-copyable
 
 
-	~BuddyAllocator()
+	~BuddyAllocatorBuilder()
 	{
 	{
 		destroy();
 		destroy();
 	}
 	}
 
 
-	BuddyAllocator& operator=(const BuddyAllocator&) = delete; // Non-copyable
+	BuddyAllocatorBuilder& operator=(const BuddyAllocatorBuilder&) = delete; // Non-copyable
 
 
 	/// Init the allocator.
 	/// Init the allocator.
-	/// @param alloc The allocator used for internal structures of the BuddyAllocator.
+	/// @param alloc The allocator used for internal structures of the BuddyAllocatorBuilder.
 	/// @param maxMemoryRangeLog2 The max memory to allocate.
 	/// @param maxMemoryRangeLog2 The max memory to allocate.
 	void init(GenericMemoryPoolAllocator<U8> alloc, U32 maxMemoryRangeLog2);
 	void init(GenericMemoryPoolAllocator<U8> alloc, U32 maxMemoryRangeLog2);
 
 
@@ -109,4 +109,4 @@ private:
 
 
 } // end namespace anki
 } // end namespace anki
 
 
-#include <AnKi/Util/BuddyAllocator.inl.h>
+#include <AnKi/Util/BuddyAllocatorBuilder.inl.h>

+ 7 - 7
AnKi/Util/BuddyAllocator.inl.h → AnKi/Util/BuddyAllocatorBuilder.inl.h

@@ -3,12 +3,12 @@
 // Code licensed under the BSD License.
 // Code licensed under the BSD License.
 // http://www.anki3d.org/LICENSE
 // http://www.anki3d.org/LICENSE
 
 
-#include <AnKi/Util/BuddyAllocator.h>
+#include <AnKi/Util/BuddyAllocatorBuilder.h>
 
 
 namespace anki {
 namespace anki {
 
 
 template<U32 T_MAX_MEMORY_RANGE_LOG2>
 template<U32 T_MAX_MEMORY_RANGE_LOG2>
-void BuddyAllocator<T_MAX_MEMORY_RANGE_LOG2>::init(GenericMemoryPoolAllocator<U8> alloc, U32 maxMemoryRangeLog2)
+void BuddyAllocatorBuilder<T_MAX_MEMORY_RANGE_LOG2>::init(GenericMemoryPoolAllocator<U8> alloc, U32 maxMemoryRangeLog2)
 {
 {
 	ANKI_ASSERT(maxMemoryRangeLog2 >= 1 && maxMemoryRangeLog2 <= T_MAX_MEMORY_RANGE_LOG2);
 	ANKI_ASSERT(maxMemoryRangeLog2 >= 1 && maxMemoryRangeLog2 <= T_MAX_MEMORY_RANGE_LOG2);
 	ANKI_ASSERT(m_freeLists.getSize() == 0 && m_userAllocatedSize == 0 && m_realAllocatedSize == 0);
 	ANKI_ASSERT(m_freeLists.getSize() == 0 && m_userAllocatedSize == 0 && m_realAllocatedSize == 0);
@@ -21,7 +21,7 @@ void BuddyAllocator<T_MAX_MEMORY_RANGE_LOG2>::init(GenericMemoryPoolAllocator<U8
 }
 }
 
 
 template<U32 T_MAX_MEMORY_RANGE_LOG2>
 template<U32 T_MAX_MEMORY_RANGE_LOG2>
-void BuddyAllocator<T_MAX_MEMORY_RANGE_LOG2>::destroy()
+void BuddyAllocatorBuilder<T_MAX_MEMORY_RANGE_LOG2>::destroy()
 {
 {
 	ANKI_ASSERT(m_userAllocatedSize == 0 && "Forgot to free all memory");
 	ANKI_ASSERT(m_userAllocatedSize == 0 && "Forgot to free all memory");
 	m_freeLists.destroy(m_alloc);
 	m_freeLists.destroy(m_alloc);
@@ -31,7 +31,7 @@ void BuddyAllocator<T_MAX_MEMORY_RANGE_LOG2>::destroy()
 }
 }
 
 
 template<U32 T_MAX_MEMORY_RANGE_LOG2>
 template<U32 T_MAX_MEMORY_RANGE_LOG2>
-Bool BuddyAllocator<T_MAX_MEMORY_RANGE_LOG2>::allocate(PtrSize size, Address& outAddress)
+Bool BuddyAllocatorBuilder<T_MAX_MEMORY_RANGE_LOG2>::allocate(PtrSize size, Address& outAddress)
 {
 {
 	ANKI_ASSERT(size > 0 && size <= m_maxMemoryRange);
 	ANKI_ASSERT(size > 0 && size <= m_maxMemoryRange);
 
 
@@ -85,7 +85,7 @@ Bool BuddyAllocator<T_MAX_MEMORY_RANGE_LOG2>::allocate(PtrSize size, Address& ou
 }
 }
 
 
 template<U32 T_MAX_MEMORY_RANGE_LOG2>
 template<U32 T_MAX_MEMORY_RANGE_LOG2>
-void BuddyAllocator<T_MAX_MEMORY_RANGE_LOG2>::free(Address address, PtrSize size)
+void BuddyAllocatorBuilder<T_MAX_MEMORY_RANGE_LOG2>::free(Address address, PtrSize size)
 {
 {
 	const PtrSize alignedSize = nextPowerOfTwo(size);
 	const PtrSize alignedSize = nextPowerOfTwo(size);
 	freeInternal(address, alignedSize);
 	freeInternal(address, alignedSize);
@@ -107,7 +107,7 @@ void BuddyAllocator<T_MAX_MEMORY_RANGE_LOG2>::free(Address address, PtrSize size
 }
 }
 
 
 template<U32 T_MAX_MEMORY_RANGE_LOG2>
 template<U32 T_MAX_MEMORY_RANGE_LOG2>
-void BuddyAllocator<T_MAX_MEMORY_RANGE_LOG2>::freeInternal(PtrSize address, PtrSize size)
+void BuddyAllocatorBuilder<T_MAX_MEMORY_RANGE_LOG2>::freeInternal(PtrSize address, PtrSize size)
 {
 {
 	ANKI_ASSERT(isPowerOfTwo(size));
 	ANKI_ASSERT(isPowerOfTwo(size));
 	ANKI_ASSERT(address + size <= m_maxMemoryRange);
 	ANKI_ASSERT(address + size <= m_maxMemoryRange);
@@ -155,7 +155,7 @@ void BuddyAllocator<T_MAX_MEMORY_RANGE_LOG2>::freeInternal(PtrSize address, PtrS
 }
 }
 
 
 template<U32 T_MAX_MEMORY_RANGE_LOG2>
 template<U32 T_MAX_MEMORY_RANGE_LOG2>
-void BuddyAllocator<T_MAX_MEMORY_RANGE_LOG2>::debugPrint() const
+void BuddyAllocatorBuilder<T_MAX_MEMORY_RANGE_LOG2>::debugPrint() const
 {
 {
 	constexpr PtrSize MAX_MEMORY_RANGE = pow2<PtrSize>(T_MAX_MEMORY_RANGE_LOG2);
 	constexpr PtrSize MAX_MEMORY_RANGE = pow2<PtrSize>(T_MAX_MEMORY_RANGE_LOG2);
 
 

+ 64 - 181
AnKi/Util/Memory.cpp

@@ -57,10 +57,10 @@ void* mallocAligned(PtrSize size, PtrSize alignmentBytes)
 #if ANKI_POSIX
 #if ANKI_POSIX
 #	if !ANKI_OS_ANDROID
 #	if !ANKI_OS_ANDROID
 	void* out = nullptr;
 	void* out = nullptr;
-	U alignment = getAlignedRoundUp(alignmentBytes, sizeof(void*));
+	PtrSize alignment = getAlignedRoundUp(alignmentBytes, sizeof(void*));
 	int err = posix_memalign(&out, alignment, size);
 	int err = posix_memalign(&out, alignment, size);
 
 
-	if(!err)
+	if(ANKI_LIKELY(!err))
 	{
 	{
 		ANKI_ASSERT(out != nullptr);
 		ANKI_ASSERT(out != nullptr);
 		// Make sure it's aligned
 		// Make sure it's aligned
@@ -158,7 +158,7 @@ HeapMemoryPool::HeapMemoryPool()
 
 
 HeapMemoryPool::~HeapMemoryPool()
 HeapMemoryPool::~HeapMemoryPool()
 {
 {
-	const U32 count = m_allocationsCount.load();
+	const U32 count = m_allocationCount.load();
 	if(count != 0)
 	if(count != 0)
 	{
 	{
 		ANKI_UTIL_LOGW("Memory pool destroyed before all memory being released "
 		ANKI_UTIL_LOGW("Memory pool destroyed before all memory being released "
@@ -192,7 +192,7 @@ void* HeapMemoryPool::allocate(PtrSize size, PtrSize alignment)
 
 
 	if(mem != nullptr)
 	if(mem != nullptr)
 	{
 	{
-		m_allocationsCount.fetchAdd(1);
+		m_allocationCount.fetchAdd(1);
 
 
 #if ANKI_MEM_EXTRA_CHECKS
 #if ANKI_MEM_EXTRA_CHECKS
 		memset(mem, 0, ALLOCATION_HEADER_SIZE);
 		memset(mem, 0, ALLOCATION_HEADER_SIZE);
@@ -231,10 +231,44 @@ void HeapMemoryPool::free(void* ptr)
 	ptr = static_cast<void*>(memU8);
 	ptr = static_cast<void*>(memU8);
 	invalidateMemory(ptr, header.m_allocationSize);
 	invalidateMemory(ptr, header.m_allocationSize);
 #endif
 #endif
-	m_allocationsCount.fetchSub(1);
+	m_allocationCount.fetchSub(1);
 	m_allocCb(m_allocCbUserData, ptr, 0, 0);
 	m_allocCb(m_allocCbUserData, ptr, 0, 0);
 }
 }
 
 
+Error StackMemoryPool::StackAllocatorBuilderInterface::allocateChunk(PtrSize size, Chunk*& out)
+{
+	ANKI_ASSERT(size > 0);
+
+	const PtrSize fullChunkSize = offsetof(Chunk, m_memoryStart) + size;
+
+	void* mem = m_parent->m_allocCb(m_parent->m_allocCbUserData, nullptr, fullChunkSize, MAX_ALIGNMENT);
+
+	if(ANKI_LIKELY(mem))
+	{
+		out = static_cast<Chunk*>(mem);
+		invalidateMemory(&out->m_memoryStart[0], size);
+	}
+	else
+	{
+		ANKI_OOM_ACTION();
+		return Error::OUT_OF_MEMORY;
+	}
+
+	return Error::NONE;
+}
+
+void StackMemoryPool::StackAllocatorBuilderInterface::freeChunk(Chunk* chunk)
+{
+	ANKI_ASSERT(chunk);
+	m_parent->m_allocCb(m_parent->m_allocCbUserData, chunk, 0, 0);
+}
+
+void StackMemoryPool::StackAllocatorBuilderInterface::recycleChunk(Chunk& chunk)
+{
+	ANKI_ASSERT(chunk.m_chunkSize > 0);
+	invalidateMemory(&chunk.m_memoryStart[0], chunk.m_chunkSize);
+}
+
 StackMemoryPool::StackMemoryPool()
 StackMemoryPool::StackMemoryPool()
 	: BaseMemoryPool(Type::STACK)
 	: BaseMemoryPool(Type::STACK)
 {
 {
@@ -242,166 +276,41 @@ StackMemoryPool::StackMemoryPool()
 
 
 StackMemoryPool::~StackMemoryPool()
 StackMemoryPool::~StackMemoryPool()
 {
 {
-	// Iterate all until you find an unused
-	for(Chunk& ch : m_chunks)
-	{
-		if(ch.m_baseMem != nullptr)
-		{
-			ch.check();
-
-			invalidateMemory(ch.m_baseMem, ch.m_size);
-			m_allocCb(m_allocCbUserData, ch.m_baseMem, 0, 0);
-		}
-		else
-		{
-			break;
-		}
-	}
-
-	// Do some error checks
-	const U32 allocCount = m_allocationsCount.load();
-	if(!m_ignoreDeallocationErrors && allocCount != 0)
-	{
-		ANKI_UTIL_LOGW("Forgot to deallocate");
-	}
 }
 }
 
 
 void StackMemoryPool::init(AllocAlignedCallback allocCb, void* allocCbUserData, PtrSize initialChunkSize,
 void StackMemoryPool::init(AllocAlignedCallback allocCb, void* allocCbUserData, PtrSize initialChunkSize,
-						   F32 nextChunkScale, PtrSize nextChunkBias, Bool ignoreDeallocationErrors,
-						   PtrSize alignmentBytes)
+						   F64 nextChunkScale, PtrSize nextChunkBias, Bool ignoreDeallocationErrors, U32 alignmentBytes)
 {
 {
 	ANKI_ASSERT(!isInitialized());
 	ANKI_ASSERT(!isInitialized());
 	ANKI_ASSERT(allocCb);
 	ANKI_ASSERT(allocCb);
 	ANKI_ASSERT(initialChunkSize > 0);
 	ANKI_ASSERT(initialChunkSize > 0);
 	ANKI_ASSERT(nextChunkScale >= 1.0);
 	ANKI_ASSERT(nextChunkScale >= 1.0);
-	ANKI_ASSERT(alignmentBytes > 0);
+	ANKI_ASSERT(alignmentBytes > 0 && alignmentBytes <= MAX_ALIGNMENT);
 
 
 	m_allocCb = allocCb;
 	m_allocCb = allocCb;
 	m_allocCbUserData = allocCbUserData;
 	m_allocCbUserData = allocCbUserData;
-	m_alignmentBytes = alignmentBytes;
-	m_initialChunkSize = initialChunkSize;
-	m_nextChunkScale = nextChunkScale;
-	m_nextChunkBias = nextChunkBias;
-	m_ignoreDeallocationErrors = ignoreDeallocationErrors;
+	m_builder.getInterface().m_parent = this;
+	m_builder.getInterface().m_alignmentBytes = alignmentBytes;
+	m_builder.getInterface().m_ignoreDeallocationErrors = ignoreDeallocationErrors;
+	m_builder.getInterface().m_initialChunkSize = initialChunkSize;
+	m_builder.getInterface().m_nextChunkScale = nextChunkScale;
+	m_builder.getInterface().m_nextChunkBias = nextChunkBias;
 }
 }
 
 
 void* StackMemoryPool::allocate(PtrSize size, PtrSize alignment)
 void* StackMemoryPool::allocate(PtrSize size, PtrSize alignment)
 {
 {
 	ANKI_ASSERT(isInitialized());
 	ANKI_ASSERT(isInitialized());
-	ANKI_ASSERT(alignment <= m_alignmentBytes);
-	(void)alignment;
-
-	size = getAlignedRoundUp(m_alignmentBytes, size);
-	ANKI_ASSERT(size > 0);
-
-	U8* out = nullptr;
 
 
-	while(true)
+	Chunk* chunk;
+	PtrSize offset;
+	if(m_builder.allocate(size, alignment, chunk, offset))
 	{
 	{
-		// Try to allocate from the current chunk, if there is one
-		Chunk* crntChunk = nullptr;
-		const I32 crntChunkIdx = m_crntChunkIdx.load();
-		if(crntChunkIdx >= 0)
-		{
-			crntChunk = &m_chunks[crntChunkIdx];
-			crntChunk->check();
-
-			out = crntChunk->m_mem.fetchAdd(size);
-			ANKI_ASSERT(out >= crntChunk->m_baseMem);
-		}
-
-		if(crntChunk && out + size <= crntChunk->m_baseMem + crntChunk->m_size)
-		{
-			// All is fine, there is enough space in the chunk
-
-			m_allocationsCount.fetchAdd(1);
-			break;
-		}
-		else
-		{
-			// Need new chunk
-
-			LockGuard<Mutex> lock(m_lock);
-
-			// Make sure that only one thread will create a new chunk
-			const Bool someOtherThreadCreateAChunkWhileIWasHoldingTheLock = m_crntChunkIdx.load() != crntChunkIdx;
-			if(someOtherThreadCreateAChunkWhileIWasHoldingTheLock)
-			{
-				continue;
-			}
-
-			// We can create a new chunk
-
-			ANKI_ASSERT(crntChunkIdx >= -1);
-			if(U32(crntChunkIdx + 1) >= m_chunks.getSize())
-			{
-				out = nullptr;
-				ANKI_UTIL_LOGE("Number of chunks is not enough");
-				ANKI_OOM_ACTION();
-				break;
-			}
-
-			// Compute the memory of the new chunk. Don't look at the previous chunk
-			PtrSize newChunkSize = m_initialChunkSize;
-			for(I i = 0; i < crntChunkIdx + 1; ++i)
-			{
-				newChunkSize = PtrSize(F64(newChunkSize) * m_nextChunkScale) + m_nextChunkBias;
-			}
-
-			newChunkSize = max(size, newChunkSize); // Can't have the allocation fail
-			alignRoundUp(m_alignmentBytes, newChunkSize); // Always align at the end
-
-			// Point to the next chunk
-			Chunk* newChunk = &m_chunks[crntChunkIdx + 1];
-
-			if(newChunk->m_baseMem == nullptr || newChunk->m_size != newChunkSize)
-			{
-				// Chunk is empty or its memory doesn't match the expected, need to (re)initialize it
-
-				if(newChunk->m_baseMem)
-				{
-					m_allocCb(m_allocCbUserData, newChunk->m_baseMem, 0, 0);
-					m_allocatedMemory -= newChunk->m_size;
-				}
-
-				void* mem = m_allocCb(m_allocCbUserData, nullptr, newChunkSize, m_alignmentBytes);
-
-				if(mem != nullptr)
-				{
-					invalidateMemory(mem, newChunkSize);
-
-					newChunk->m_baseMem = static_cast<U8*>(mem);
-					newChunk->m_mem.setNonAtomically(newChunk->m_baseMem);
-					newChunk->m_size = newChunkSize;
-
-					m_allocatedMemory += newChunk->m_size;
-
-					const I32 idx = m_crntChunkIdx.fetchAdd(1);
-					ANKI_ASSERT(idx == crntChunkIdx);
-					(void)idx;
-				}
-				else
-				{
-					out = nullptr;
-					ANKI_OOM_ACTION();
-					break;
-				}
-			}
-			else
-			{
-				// Will recycle
-
-				newChunk->checkReset();
-				invalidateMemory(newChunk->m_baseMem, newChunk->m_size);
-
-				const I32 idx = m_crntChunkIdx.fetchAdd(1);
-				ANKI_ASSERT(idx == crntChunkIdx);
-				(void)idx;
-			}
-		}
+		return nullptr;
 	}
 	}
 
 
-	return static_cast<void*>(out);
+	m_allocationCount.fetchAdd(1);
+	const PtrSize address = ptrToNumber(&chunk->m_memoryStart[0]) + offset;
+	return numberToPtr<void*>(address);
 }
 }
 
 
 void StackMemoryPool::free(void* ptr)
 void StackMemoryPool::free(void* ptr)
@@ -413,43 +322,17 @@ void StackMemoryPool::free(void* ptr)
 		return;
 		return;
 	}
 	}
 
 
-	// ptr shouldn't be null or not aligned. If not aligned it was not allocated by this class
-	ANKI_ASSERT(ptr != nullptr && isAligned(m_alignmentBytes, ptr));
-
-	const U32 count = m_allocationsCount.fetchSub(1);
+	const U32 count = m_allocationCount.fetchSub(1);
 	ANKI_ASSERT(count > 0);
 	ANKI_ASSERT(count > 0);
 	(void)count;
 	(void)count;
+	m_builder.free();
 }
 }
 
 
 void StackMemoryPool::reset()
 void StackMemoryPool::reset()
 {
 {
 	ANKI_ASSERT(isInitialized());
 	ANKI_ASSERT(isInitialized());
-
-	// Iterate all until you find an unused
-	for(Chunk& ch : m_chunks)
-	{
-		if(ch.m_baseMem != nullptr)
-		{
-			ch.check();
-			ch.m_mem.store(ch.m_baseMem);
-
-			invalidateMemory(ch.m_baseMem, ch.m_size);
-		}
-		else
-		{
-			break;
-		}
-	}
-
-	// Set the crnt chunk
-	m_crntChunkIdx.setNonAtomically(-1);
-
-	// Reset allocation count and do some error checks
-	const U32 allocCount = m_allocationsCount.exchange(0);
-	if(!m_ignoreDeallocationErrors && allocCount != 0)
-	{
-		ANKI_UTIL_LOGW("Forgot to deallocate");
-	}
+	m_builder.reset();
+	m_allocationCount.store(0);
 }
 }
 
 
 ChainMemoryPool::ChainMemoryPool()
 ChainMemoryPool::ChainMemoryPool()
@@ -459,7 +342,7 @@ ChainMemoryPool::ChainMemoryPool()
 
 
 ChainMemoryPool::~ChainMemoryPool()
 ChainMemoryPool::~ChainMemoryPool()
 {
 {
-	if(m_allocationsCount.load() != 0)
+	if(m_allocationCount.load() != 0)
 	{
 	{
 		ANKI_UTIL_LOGW("Memory pool destroyed before all memory being released");
 		ANKI_UTIL_LOGW("Memory pool destroyed before all memory being released");
 	}
 	}
@@ -532,7 +415,7 @@ void* ChainMemoryPool::allocate(PtrSize size, PtrSize alignment)
 		ANKI_ASSERT(mem != nullptr && "The chunk should have space");
 		ANKI_ASSERT(mem != nullptr && "The chunk should have space");
 	}
 	}
 
 
-	m_allocationsCount.fetchAdd(1);
+	m_allocationCount.fetchAdd(1);
 
 
 	return mem;
 	return mem;
 }
 }
@@ -557,14 +440,14 @@ void ChainMemoryPool::free(void* ptr)
 	LockGuard<SpinLock> lock(m_lock);
 	LockGuard<SpinLock> lock(m_lock);
 
 
 	// Decrease the deallocation refcount and if it's zero delete the chunk
 	// Decrease the deallocation refcount and if it's zero delete the chunk
-	ANKI_ASSERT(chunk->m_allocationsCount > 0);
-	if(--chunk->m_allocationsCount == 0)
+	ANKI_ASSERT(chunk->m_allocationCount > 0);
+	if(--chunk->m_allocationCount == 0)
 	{
 	{
 		// Chunk is empty. Delete it
 		// Chunk is empty. Delete it
 		destroyChunk(chunk);
 		destroyChunk(chunk);
 	}
 	}
 
 
-	m_allocationsCount.fetchSub(1);
+	m_allocationCount.fetchSub(1);
 }
 }
 
 
 PtrSize ChainMemoryPool::getChunksCount() const
 PtrSize ChainMemoryPool::getChunksCount() const
@@ -686,7 +569,7 @@ void* ChainMemoryPool::allocateFromChunk(Chunk* ch, PtrSize size, PtrSize alignm
 		mem += m_headerSize;
 		mem += m_headerSize;
 
 
 		ch->m_top = newTop;
 		ch->m_top = newTop;
-		++ch->m_allocationsCount;
+		++ch->m_allocationCount;
 	}
 	}
 	else
 	else
 	{
 	{

+ 70 - 48
AnKi/Util/Memory.h

@@ -10,6 +10,7 @@
 #include <AnKi/Util/Assert.h>
 #include <AnKi/Util/Assert.h>
 #include <AnKi/Util/Array.h>
 #include <AnKi/Util/Array.h>
 #include <AnKi/Util/Thread.h>
 #include <AnKi/Util/Thread.h>
+#include <AnKi/Util/StackAllocatorBuilder.h>
 #include <utility> // For forward
 #include <utility> // For forward
 
 
 namespace anki {
 namespace anki {
@@ -89,9 +90,9 @@ public:
 	}
 	}
 
 
 	/// Return number of allocations
 	/// Return number of allocations
-	U32 getAllocationsCount() const
+	U32 getAllocationCount() const
 	{
 	{
-		return m_allocationsCount.load();
+		return m_allocationCount.load();
 	}
 	}
 
 
 protected:
 protected:
@@ -111,7 +112,7 @@ protected:
 	void* m_allocCbUserData = nullptr;
 	void* m_allocCbUserData = nullptr;
 
 
 	/// Allocations count.
 	/// Allocations count.
-	Atomic<U32> m_allocationsCount = {0};
+	Atomic<U32> m_allocationCount = {0};
 
 
 	BaseMemoryPool(Type type)
 	BaseMemoryPool(Type type)
 		: m_type(type)
 		: m_type(type)
@@ -181,9 +182,9 @@ public:
 	/// @param ignoreDeallocationErrors Method free() may fail if the ptr is not in the top of the stack. Set that to
 	/// @param ignoreDeallocationErrors Method free() may fail if the ptr is not in the top of the stack. Set that to
 	///        true to suppress such errors.
 	///        true to suppress such errors.
 	/// @param alignmentBytes The maximum supported alignment for returned memory.
 	/// @param alignmentBytes The maximum supported alignment for returned memory.
-	void init(AllocAlignedCallback allocCb, void* allocCbUserData, PtrSize initialChunkSize, F32 nextChunkScale = 2.0,
+	void init(AllocAlignedCallback allocCb, void* allocCbUserData, PtrSize initialChunkSize, F64 nextChunkScale = 2.0,
 			  PtrSize nextChunkBias = 0, Bool ignoreDeallocationErrors = true,
 			  PtrSize nextChunkBias = 0, Bool ignoreDeallocationErrors = true,
-			  PtrSize alignmentBytes = ANKI_SAFE_ALIGNMENT);
+			  U32 alignmentBytes = ANKI_SAFE_ALIGNMENT);
 
 
 	/// Allocate aligned memory.
 	/// Allocate aligned memory.
 	/// @param size The size to allocate.
 	/// @param size The size to allocate.
@@ -205,69 +206,90 @@ public:
 	/// @note It's not thread safe with other methods.
 	/// @note It's not thread safe with other methods.
 	PtrSize getMemoryCapacity() const
 	PtrSize getMemoryCapacity() const
 	{
 	{
-		return m_allocatedMemory;
+		return m_builder.getMemoryCapacity();
 	}
 	}
 
 
 private:
 private:
-	/// The memory chunk.
-	class Chunk
+	/// This is the absolute max alignment.
+	static constexpr U32 MAX_ALIGNMENT = ANKI_SAFE_ALIGNMENT;
+
+	/// This is the chunk the StackAllocatorBuilder will be allocating.
+	class alignas(MAX_ALIGNMENT) Chunk
 	{
 	{
 	public:
 	public:
-		/// The base memory of the chunk.
-		U8* m_baseMem = nullptr;
+		/// Required by StackAllocatorBuilder.
+		Chunk* m_nextChunk;
+
+		/// Required by StackAllocatorBuilder.
+		Atomic<PtrSize> m_offsetInChunk;
 
 
-		/// The moving ptr for the next allocation.
-		Atomic<U8*> m_mem = {nullptr};
+		/// Required by StackAllocatorBuilder.
+		PtrSize m_chunkSize;
 
 
-		/// The chunk size.
-		PtrSize m_size = 0;
+		/// The start of the actual CPU memory.
+		alignas(MAX_ALIGNMENT) U8 m_memoryStart[1];
+	};
+
+	/// Implements the StackAllocatorBuilder TInterface
+	class StackAllocatorBuilderInterface
+	{
+	public:
+		StackMemoryPool* m_parent = nullptr;
 
 
-		/// Check that it's initialized.
-		void check() const
+		PtrSize m_alignmentBytes = 0;
+
+		Bool m_ignoreDeallocationErrors = false;
+
+		PtrSize m_initialChunkSize = 0;
+
+		F64 m_nextChunkScale = 0.0;
+
+		PtrSize m_nextChunkBias = 0;
+
+		// The rest of the functions implement the StackAllocatorBuilder TInterface.
+
+		PtrSize getMaxAlignment() const
 		{
 		{
-			ANKI_ASSERT(m_baseMem != nullptr);
-			ANKI_ASSERT(m_mem.load() >= m_baseMem);
-			ANKI_ASSERT(m_size > 0);
+			ANKI_ASSERT(m_alignmentBytes > 0);
+			return m_alignmentBytes;
 		}
 		}
 
 
-		// Check that it's in reset state.
-		void checkReset() const
+		PtrSize getInitialChunkSize() const
 		{
 		{
-			ANKI_ASSERT(m_baseMem != nullptr);
-			ANKI_ASSERT(m_mem.load() == m_baseMem);
-			ANKI_ASSERT(m_size > 0);
+			ANKI_ASSERT(m_initialChunkSize > 0);
+			return m_initialChunkSize;
 		}
 		}
-	};
 
 
-	/// Alignment of allocations
-	PtrSize m_alignmentBytes = 0;
-
-	/// The size of the first chunk.
-	PtrSize m_initialChunkSize = 0;
-
-	/// Chunk scale.
-	F32 m_nextChunkScale = 0.0;
+		F64 getNextChunkGrowScale() const
+		{
+			ANKI_ASSERT(m_nextChunkScale >= 1.0);
+			return m_nextChunkScale;
+		}
 
 
-	/// Chunk bias.
-	PtrSize m_nextChunkBias = 0;
+		PtrSize getNextChunkGrowBias() const
+		{
+			return m_nextChunkBias;
+		}
 
 
-	/// Allocated memory.
-	PtrSize m_allocatedMemory = 0;
+		Bool ignoreDeallocationErrors() const
+		{
+			return m_ignoreDeallocationErrors;
+		}
 
 
-	/// Ignore deallocation errors.
-	Bool m_ignoreDeallocationErrors = false;
+		Error allocateChunk(PtrSize size, Chunk*& out);
 
 
-	/// The current chunk. Chose the more strict memory order to avoid compiler re-ordering of instructions
-	Atomic<I32, AtomicMemoryOrder::SEQ_CST> m_crntChunkIdx = {-1};
+		void freeChunk(Chunk* chunk);
 
 
-	/// The max number of chunks.
-	static const U MAX_CHUNKS = 256;
+		void recycleChunk(Chunk& chunk);
 
 
-	/// The chunks.
-	Array<Chunk, MAX_CHUNKS> m_chunks;
+		Atomic<U32>* getAllocationCount()
+		{
+			return &m_parent->m_allocationCount;
+		}
+	};
 
 
-	/// Protect the m_crntChunkIdx.
-	Mutex m_lock;
+	/// The allocator helper.
+	StackAllocatorBuilder<Chunk, StackAllocatorBuilderInterface, Mutex> m_builder;
 };
 };
 
 
 /// Chain memory pool. Almost similar to StackMemoryPool but more flexible and at the same time a bit slower.
 /// Chain memory pool. Almost similar to StackMemoryPool but more flexible and at the same time a bit slower.
@@ -323,7 +345,7 @@ private:
 		U8* m_top = nullptr;
 		U8* m_top = nullptr;
 
 
 		/// Used to identify if the chunk can be deleted
 		/// Used to identify if the chunk can be deleted
-		PtrSize m_allocationsCount = 0;
+		PtrSize m_allocationCount = 0;
 
 
 		/// Previous chunk in the list
 		/// Previous chunk in the list
 		Chunk* m_prev = nullptr;
 		Chunk* m_prev = nullptr;

+ 105 - 0
AnKi/Util/StackAllocatorBuilder.h

@@ -0,0 +1,105 @@
+// Copyright (C) 2009-2021, Panagiotis Christopoulos Charitos and contributors.
+// All rights reserved.
+// Code licensed under the BSD License.
+// http://www.anki3d.org/LICENSE
+
+#pragma once
+
+#include <AnKi/Util/Atomic.h>
+#include <AnKi/Util/Common.h>
+
+namespace anki {
+
+/// @addtogroup util_memory
+/// @{
+
+/// This is a convenience class used to build stack memory allocators.
+/// @tparam TChunk This is the type of the internally allocated chunks. This should be having the following members:
+///                @code
+///                TChunk* m_nextChunk;
+///                Atomic<PtrSize> m_offsetInChunk;
+///                PtrSize m_chunkSize;
+///                @endcode
+/// @tparam TInterface This is the type of the interface that contains various info. Should have the following members:
+///                    @code
+///                    U32 getMaxAlignment();
+///                    PtrSize getInitialChunkSize();
+///                    F64 getNextChunkGrowScale();
+///                    PtrSize getNextChunkGrowBias();
+///                    Bool ignoreDeallocationErrors();
+///                    Error allocateChunk(PtrSize size, TChunk*& out);
+///                    void freeChunk(TChunk* out);
+///                    void recycleChunk(TChunk& out);
+///                    Atomic<U32>* getAllocationCount(); // It's optional
+///                    @endcode
+/// @tparam TLock This an optional lock. Can be a Mutex or SpinLock or some dummy class.
+template<typename TChunk, typename TInterface, typename TLock>
+class StackAllocatorBuilder
+{
+public:
+	/// Create.
+	StackAllocatorBuilder() = default;
+
+	/// Destroy.
+	~StackAllocatorBuilder();
+
+	/// Allocate memory.
+	/// @param size The size to allocate.
+	/// @param alignment The alignment of the returned address.
+	/// @param[out] chunk The chunk that the memory belongs to.
+	/// @param[out] offset The offset inside the chunk.
+	/// @note This is thread safe with itself.
+	ANKI_USE_RESULT Error allocate(PtrSize size, PtrSize alignment, TChunk*& chunk, PtrSize& offset);
+
+	/// Free memory. Doesn't do something special, only some bookkeeping.
+	void free();
+
+	/// Reset all the memory chunks.
+	/// @note Not thread safe. Don't call it while calling allocate.
+	void reset();
+
+	/// Access the interface.
+	/// @note Not thread safe. Don't call it while calling allocate.
+	TInterface& getInterface()
+	{
+		return m_interface;
+	}
+
+	/// Access the interface.
+	/// @note Not thread safe. Don't call it while calling allocate.
+	const TInterface& getInterface() const
+	{
+		return m_interface;
+	}
+
+	/// Get the total memory comsumed by the chunks.
+	/// @note Not thread safe. Don't call it while calling allocate.
+	PtrSize getMemoryCapacity() const
+	{
+		return m_memoryCapacity;
+	}
+
+private:
+	/// The current chunk. Chose the more strict memory order to avoid compiler re-ordering of instructions
+	Atomic<TChunk*, AtomicMemoryOrder::SEQ_CST> m_crntChunk = {nullptr};
+
+	/// The beginning of the chunk list.
+	TChunk* m_chunksListHead = nullptr;
+
+	/// The memory allocated by all chunks.
+	PtrSize m_memoryCapacity = 0;
+
+	/// Number of chunks allocated.
+	U32 m_chunkCount = 0;
+
+	/// The interface as decribed in the class docs.
+	TInterface m_interface;
+
+	/// An optional lock.
+	TLock m_lock;
+};
+/// @}
+
+} // end namespace anki
+
+#include <AnKi/Util/StackAllocatorBuilder.inl.h>

+ 187 - 0
AnKi/Util/StackAllocatorBuilder.inl.h

@@ -0,0 +1,187 @@
+// Copyright (C) 2009-2021, Panagiotis Christopoulos Charitos and contributors.
+// All rights reserved.
+// Code licensed under the BSD License.
+// http://www.anki3d.org/LICENSE
+
+#include <AnKi/Util/StackAllocatorBuilder.h>
+#include <AnKi/Util/Functions.h>
+#include <AnKi/Util/Logger.h>
+
+namespace anki {
+
+template<typename TChunk, typename TInterface, typename TLock>
+StackAllocatorBuilder<TChunk, TInterface, TLock>::~StackAllocatorBuilder()
+{
+	// Free chunks
+	TChunk* chunk = m_chunksListHead;
+	while(chunk)
+	{
+		TChunk* next = chunk->m_nextChunk;
+		m_interface.freeChunk(chunk);
+		chunk = next;
+	}
+
+	// Do some error checks
+	Atomic<U32>* allocationCount = m_interface.getAllocationCount();
+	if(allocationCount)
+	{
+		const U32 allocCount = allocationCount->load();
+		if(!m_interface.ignoreDeallocationErrors() && allocCount != 0)
+		{
+			ANKI_UTIL_LOGW("Forgot to deallocate");
+		}
+	}
+}
+
+template<typename TChunk, typename TInterface, typename TLock>
+Error StackAllocatorBuilder<TChunk, TInterface, TLock>::allocate(PtrSize size, PtrSize alignment, TChunk*& chunk,
+																 PtrSize& offset)
+{
+	ANKI_ASSERT(alignment <= m_interface.getMaxAlignment());
+	(void)alignment;
+
+	size = getAlignedRoundUp(m_interface.getMaxAlignment(), size);
+	ANKI_ASSERT(size > 0);
+
+	chunk = nullptr;
+	offset = MAX_PTR_SIZE;
+
+	while(true)
+	{
+		// Try to allocate from the current chunk, if there is one
+		TChunk* crntChunk = m_crntChunk.load();
+		if(crntChunk)
+		{
+			offset = crntChunk->m_offsetInChunk.fetchAdd(size);
+		}
+
+		if(crntChunk && offset + size <= crntChunk->m_chunkSize)
+		{
+			// All is fine, there is enough space in the chunk
+
+			chunk = crntChunk;
+
+			Atomic<U32>* allocationCount = m_interface.getAllocationCount();
+			if(allocationCount)
+			{
+				allocationCount->fetchAdd(1);
+			}
+
+			break;
+		}
+		else
+		{
+			// Need new chunk
+
+			LockGuard<TLock> lock(m_lock);
+
+			// Make sure that only one thread will create a new chunk
+			const Bool someOtherThreadCreatedAChunkWhileIWasHoldingTheLock = m_crntChunk.load() != crntChunk;
+			if(someOtherThreadCreatedAChunkWhileIWasHoldingTheLock)
+			{
+				continue;
+			}
+
+			// We can create a new chunk
+
+			// Compute the memory of the new chunk. Don't look at any previous chunk
+			PtrSize nextChunkSize = m_interface.getInitialChunkSize();
+			ANKI_ASSERT(nextChunkSize > 0);
+			for(U32 i = 0; i < m_chunkCount; ++i)
+			{
+				const F64 scale = m_interface.getNextChunkGrowScale();
+				ANKI_ASSERT(scale >= 1.0);
+				nextChunkSize = PtrSize(F64(nextChunkSize) * scale) + m_interface.getNextChunkGrowBias();
+				ANKI_ASSERT(nextChunkSize > 0);
+			}
+
+			nextChunkSize = max(size, nextChunkSize); // Can't have the allocation fail
+			alignRoundUp(m_interface.getMaxAlignment(), nextChunkSize); // Align again
+
+			TChunk* nextChunk = (crntChunk) ? crntChunk->m_nextChunk : nullptr;
+
+			if(nextChunk && nextChunk->m_chunkSize == nextChunkSize)
+			{
+				// Will recycle
+
+				crntChunk->m_nextChunk->m_offsetInChunk.store(0);
+				m_interface.recycleChunk(*nextChunk);
+				m_crntChunk.store(nextChunk);
+			}
+			else
+			{
+				// There is no chunk or there is but it's too small
+
+				// Do that first because it might throw error
+				TChunk* newNextChunk;
+				ANKI_CHECK(m_interface.allocateChunk(nextChunkSize, newNextChunk));
+				newNextChunk->m_nextChunk = nullptr;
+				newNextChunk->m_offsetInChunk.setNonAtomically(0);
+				newNextChunk->m_chunkSize = nextChunkSize;
+				++m_chunkCount;
+
+				// Remove the existing next chunk if there is one
+				TChunk* nextNextChunk = nullptr;
+				if(nextChunk)
+				{
+					nextNextChunk = nextChunk->m_nextChunk;
+					m_interface.freeChunk(nextChunk);
+					nextChunk = nullptr;
+					--m_chunkCount;
+				}
+
+				// Do list stuff
+				if(crntChunk)
+				{
+					crntChunk->m_nextChunk = newNextChunk;
+					ANKI_ASSERT(m_chunksListHead != nullptr);
+				}
+				else
+				{
+					ANKI_ASSERT(m_chunksListHead == nullptr);
+					m_chunksListHead = newNextChunk;
+				}
+
+				newNextChunk->m_nextChunk = nextNextChunk;
+
+				m_crntChunk.store(newNextChunk);
+
+				m_memoryCapacity += nextChunkSize;
+			}
+		}
+	}
+
+	ANKI_ASSERT(chunk && offset != MAX_PTR_SIZE);
+	return Error::NONE;
+}
+
+template<typename TChunk, typename TInterface, typename TLock>
+void StackAllocatorBuilder<TChunk, TInterface, TLock>::free()
+{
+	Atomic<U32>* allocationCount = m_interface.getAllocationCount();
+	if(allocationCount)
+	{
+		const U32 count = allocationCount->fetchSub(1);
+		ANKI_ASSERT(count > 0);
+		(void)count;
+	}
+}
+
+template<typename TChunk, typename TInterface, typename TLock>
+void StackAllocatorBuilder<TChunk, TInterface, TLock>::reset()
+{
+	m_crntChunk.setNonAtomically(m_chunksListHead);
+
+	// Reset allocation count and do some error checks
+	Atomic<U32>* allocationCount = m_interface.getAllocationCount();
+	if(allocationCount)
+	{
+		const U32 allocCount = allocationCount->exchange(0);
+		if(!m_interface.ignoreDeallocationErrors() && allocCount != 0)
+		{
+			ANKI_UTIL_LOGW("Forgot to deallocate");
+		}
+	}
+}
+
+} // end namespace anki

+ 4 - 4
Tests/Util/BuddyAllocator.cpp → Tests/Util/BuddyAllocatorBuilder.cpp

@@ -4,17 +4,17 @@
 // http://www.anki3d.org/LICENSE
 // http://www.anki3d.org/LICENSE
 
 
 #include <Tests/Framework/Framework.h>
 #include <Tests/Framework/Framework.h>
-#include <AnKi/Util/BuddyAllocator.h>
+#include <AnKi/Util/BuddyAllocatorBuilder.h>
 
 
 namespace anki {
 namespace anki {
 
 
-ANKI_TEST(Util, BuddyAllocator)
+ANKI_TEST(Util, BuddyAllocatorBuilder)
 {
 {
 	HeapAllocator<U8> alloc(allocAligned, nullptr);
 	HeapAllocator<U8> alloc(allocAligned, nullptr);
 
 
 	// Simple
 	// Simple
 	{
 	{
-		BuddyAllocator<4> buddy(alloc, 4);
+		BuddyAllocatorBuilder<4> buddy(alloc, 4);
 
 
 		Array<U32, 2> addr;
 		Array<U32, 2> addr;
 		Bool success = buddy.allocate(1, addr[0]);
 		Bool success = buddy.allocate(1, addr[0]);
@@ -32,7 +32,7 @@ ANKI_TEST(Util, BuddyAllocator)
 
 
 	// Fuzzy
 	// Fuzzy
 	{
 	{
-		BuddyAllocator<32> buddy(alloc, 32);
+		BuddyAllocatorBuilder<32> buddy(alloc, 32);
 		std::vector<std::pair<U32, U32>> allocations;
 		std::vector<std::pair<U32, U32>> allocations;
 		for(U32 it = 0; it < 1000; ++it)
 		for(U32 it = 0; it < 1000; ++it)
 		{
 		{

+ 5 - 5
Tests/Util/Memory.cpp

@@ -57,10 +57,10 @@ ANKI_TEST(Util, StackMemoryPool)
 
 
 		void* a = pool.allocate(25, 1);
 		void* a = pool.allocate(25, 1);
 		ANKI_TEST_EXPECT_NEQ(a, nullptr);
 		ANKI_TEST_EXPECT_NEQ(a, nullptr);
-		ANKI_TEST_EXPECT_EQ(pool.getAllocationsCount(), 1);
+		ANKI_TEST_EXPECT_EQ(pool.getAllocationCount(), 1);
 
 
 		pool.free(a);
 		pool.free(a);
-		ANKI_TEST_EXPECT_EQ(pool.getAllocationsCount(), 0);
+		ANKI_TEST_EXPECT_EQ(pool.getAllocationCount(), 0);
 
 
 		// Allocate a few
 		// Allocate a few
 		const U SIZE = 75;
 		const U SIZE = 75;
@@ -72,11 +72,11 @@ ANKI_TEST(Util, StackMemoryPool)
 		ANKI_TEST_EXPECT_NEQ(a, nullptr);
 		ANKI_TEST_EXPECT_NEQ(a, nullptr);
 		a = pool.allocate(SIZE, 1);
 		a = pool.allocate(SIZE, 1);
 		ANKI_TEST_EXPECT_NEQ(a, nullptr);
 		ANKI_TEST_EXPECT_NEQ(a, nullptr);
-		ANKI_TEST_EXPECT_EQ(pool.getAllocationsCount(), 4);
+		ANKI_TEST_EXPECT_EQ(pool.getAllocationCount(), 4);
 
 
 		// Reset
 		// Reset
 		pool.reset();
 		pool.reset();
-		ANKI_TEST_EXPECT_EQ(pool.getAllocationsCount(), 0);
+		ANKI_TEST_EXPECT_EQ(pool.getAllocationCount(), 0);
 
 
 		// Allocate again
 		// Allocate again
 		a = pool.allocate(SIZE, 1);
 		a = pool.allocate(SIZE, 1);
@@ -87,7 +87,7 @@ ANKI_TEST(Util, StackMemoryPool)
 		ANKI_TEST_EXPECT_NEQ(a, nullptr);
 		ANKI_TEST_EXPECT_NEQ(a, nullptr);
 		a = pool.allocate(SIZE, 1);
 		a = pool.allocate(SIZE, 1);
 		ANKI_TEST_EXPECT_NEQ(a, nullptr);
 		ANKI_TEST_EXPECT_NEQ(a, nullptr);
-		ANKI_TEST_EXPECT_EQ(pool.getAllocationsCount(), 4);
+		ANKI_TEST_EXPECT_EQ(pool.getAllocationCount(), 4);
 	}
 	}
 
 
 	// Parallel
 	// Parallel