Browse Source

Remove the StackGpuAllocator

Panagiotis Christopoulos Charitos 4 years ago
parent
commit
52cacc7082

+ 0 - 1
AnKi/Gr.h

@@ -23,7 +23,6 @@
 #include <AnKi/Gr/Utils/ClassGpuAllocator.h>
 #include <AnKi/Gr/Utils/FrameGpuAllocator.h>
 #include <AnKi/Gr/Utils/Functions.h>
-#include <AnKi/Gr/Utils/StackGpuAllocator.h>
 
 /// @defgroup graphics Graphics API abstraction
 

+ 1 - 2
AnKi/Gr/CMakeLists.txt

@@ -5,8 +5,7 @@ set(COMMON
 	"ShaderProgram.cpp"
 	"Utils/ClassGpuAllocator.cpp"
 	"Utils/FrameGpuAllocator.cpp"
-	"Utils/Functions.cpp"
-	"Utils/StackGpuAllocator.cpp")
+	"Utils/Functions.cpp")
 
 foreach(S ${COMMON})
 	anki_add_source_files("${CMAKE_CURRENT_SOURCE_DIR}/${S}")

+ 0 - 136
AnKi/Gr/Utils/StackGpuAllocator.cpp

@@ -1,136 +0,0 @@
-// Copyright (C) 2009-2021, Panagiotis Christopoulos Charitos and contributors.
-// All rights reserved.
-// Code licensed under the BSD License.
-// http://www.anki3d.org/LICENSE
-
-#include <AnKi/Gr/Utils/StackGpuAllocator.h>
-
-namespace anki {
-
-class StackGpuAllocatorChunk
-{
-public:
-	StackGpuAllocatorChunk* m_next;
-	StackGpuAllocatorMemory* m_mem;
-	Atomic<PtrSize> m_offset;
-	PtrSize m_size;
-};
-
-StackGpuAllocator::~StackGpuAllocator()
-{
-	Chunk* chunk = m_chunkListHead;
-	while(chunk)
-	{
-		if(chunk->m_mem)
-		{
-			m_iface->free(chunk->m_mem);
-		}
-
-		Chunk* next = chunk->m_next;
-		m_alloc.deleteInstance(chunk);
-		chunk = next;
-	}
-}
-
-void StackGpuAllocator::init(GenericMemoryPoolAllocator<U8> alloc, StackGpuAllocatorInterface* iface)
-{
-	ANKI_ASSERT(iface);
-	m_alloc = alloc;
-	m_iface = iface;
-	iface->getChunkGrowInfo(m_scale, m_bias, m_initialSize);
-	ANKI_ASSERT(m_scale >= 1.0);
-	ANKI_ASSERT(m_initialSize > 0);
-
-	m_alignment = iface->getMaxAlignment();
-	ANKI_ASSERT(m_alignment > 0);
-
-	alignRoundUp(m_alignment, m_initialSize);
-}
-
-Error StackGpuAllocator::allocate(PtrSize size, StackGpuAllocatorHandle& handle)
-{
-	alignRoundUp(m_alignment, size);
-	ANKI_ASSERT(size > 0);
-	ANKI_ASSERT(size <= m_initialSize && "The chunks should have enough space to hold at least one allocation");
-
-	Chunk* crntChunk;
-	Bool retry = true;
-
-	do
-	{
-		crntChunk = m_crntChunk.load();
-		PtrSize offset;
-
-		if(crntChunk && ((offset = crntChunk->m_offset.fetchAdd(size)) + size) <= crntChunk->m_size)
-		{
-			// All is fine, there is enough space in the chunk
-
-			handle.m_memory = crntChunk->m_mem;
-			handle.m_offset = offset;
-
-			retry = false;
-		}
-		else
-		{
-			// Need new chunk
-
-			LockGuard<Mutex> lock(m_lock);
-
-			// Make sure that only one thread will create a new chunk
-			if(m_crntChunk.load() == crntChunk)
-			{
-				// We can create a new chunk
-
-				if(crntChunk == nullptr || crntChunk->m_next == nullptr)
-				{
-					// Need to create a new chunk
-
-					Chunk* newChunk = m_alloc.newInstance<Chunk>();
-
-					if(crntChunk)
-					{
-						crntChunk->m_next = newChunk;
-						newChunk->m_size = PtrSize(F32(crntChunk->m_size) * m_scale + F32(m_bias));
-					}
-					else
-					{
-						newChunk->m_size = m_initialSize;
-
-						if(m_chunkListHead == nullptr)
-						{
-							m_chunkListHead = newChunk;
-						}
-					}
-					alignRoundUp(m_alignment, newChunk->m_size);
-
-					newChunk->m_next = nullptr;
-					newChunk->m_offset.setNonAtomically(0);
-					ANKI_CHECK(m_iface->allocate(newChunk->m_size, newChunk->m_mem));
-
-					m_crntChunk.store(newChunk);
-				}
-				else
-				{
-					// Need to recycle one
-
-					crntChunk->m_next->m_offset.setNonAtomically(0);
-
-					m_crntChunk.store(crntChunk->m_next);
-				}
-			}
-		}
-	} while(retry);
-
-	return Error::NONE;
-}
-
-void StackGpuAllocator::reset()
-{
-	m_crntChunk.setNonAtomically(m_chunkListHead);
-	if(m_chunkListHead)
-	{
-		m_chunkListHead->m_offset.setNonAtomically(0);
-	}
-}
-
-} // end namespace anki

+ 0 - 101
AnKi/Gr/Utils/StackGpuAllocator.h

@@ -1,101 +0,0 @@
-// Copyright (C) 2009-2021, Panagiotis Christopoulos Charitos and contributors.
-// All rights reserved.
-// Code licensed under the BSD License.
-// http://www.anki3d.org/LICENSE
-
-#pragma once
-
-#include <AnKi/Gr/Common.h>
-
-namespace anki {
-
-// Forward
-class StackGpuAllocatorChunk;
-
-/// @addtogroup graphics
-/// @{
-
-/// The user defined output of an allocation.
-class StackGpuAllocatorMemory
-{
-};
-
-/// The user defined methods to allocate memory.
-class StackGpuAllocatorInterface
-{
-public:
-	virtual ~StackGpuAllocatorInterface()
-	{
-	}
-
-	/// Allocate memory. Should be thread safe.
-	virtual ANKI_USE_RESULT Error allocate(PtrSize size, StackGpuAllocatorMemory*& mem) = 0;
-
-	/// Free memory. Should be thread safe.
-	virtual void free(StackGpuAllocatorMemory* mem) = 0;
-
-	/// If the current chunk of the linear allocator is of size N then the next chunk will be of size N*scale+bias.
-	virtual void getChunkGrowInfo(F32& scale, PtrSize& bias, PtrSize& initialSize) = 0;
-
-	virtual U32 getMaxAlignment() = 0;
-};
-
-/// The output of an allocation.
-class StackGpuAllocatorHandle
-{
-	friend class StackGpuAllocator;
-
-public:
-	StackGpuAllocatorMemory* m_memory = nullptr;
-	PtrSize m_offset = 0;
-
-	explicit operator Bool() const
-	{
-		return m_memory != nullptr;
-	}
-};
-
-/// Linear based allocator.
-class StackGpuAllocator
-{
-public:
-	StackGpuAllocator() = default;
-
-	StackGpuAllocator(const StackGpuAllocator&) = delete; // Non-copyable
-
-	~StackGpuAllocator();
-
-	StackGpuAllocator& operator=(const StackGpuAllocator&) = delete; // Non-copyable
-
-	void init(GenericMemoryPoolAllocator<U8> alloc, StackGpuAllocatorInterface* iface);
-
-	/// Allocate memory.
-	ANKI_USE_RESULT Error allocate(PtrSize size, StackGpuAllocatorHandle& handle);
-
-	/// Reset all the memory chunks. Not thread safe.
-	void reset();
-
-	StackGpuAllocatorInterface* getInterface() const
-	{
-		ANKI_ASSERT(m_iface);
-		return m_iface;
-	}
-
-private:
-	using Chunk = StackGpuAllocatorChunk;
-
-	GenericMemoryPoolAllocator<U8> m_alloc;
-	StackGpuAllocatorInterface* m_iface = nullptr;
-
-	Chunk* m_chunkListHead = nullptr;
-	Atomic<Chunk*> m_crntChunk = {nullptr};
-	Mutex m_lock;
-
-	F32 m_scale = 0.0;
-	PtrSize m_bias = 0;
-	PtrSize m_initialSize = 0;
-	U32 m_alignment = 0;
-};
-/// @}
-
-} // end namespace anki

+ 50 - 96
AnKi/Resource/TransferGpuAllocator.cpp

@@ -11,75 +11,25 @@
 
 namespace anki {
 
-class TransferGpuAllocator::Memory : public StackGpuAllocatorMemory
+Error TransferGpuAllocator::StackAllocatorBuilderInterface::allocateChunk(PtrSize size, Chunk*& out)
 {
-public:
-	BufferPtr m_buffer;
-	void* m_mappedMemory;
-};
+	out = m_parent->m_alloc.newInstance<Chunk>();
 
-class TransferGpuAllocator::Interface : public StackGpuAllocatorInterface
-{
-public:
-	GrManager* m_gr;
-	ResourceAllocator<U8> m_alloc;
-
-	ResourceAllocator<U8> getAllocator() const
-	{
-		return m_alloc;
-	}
-
-	ANKI_USE_RESULT Error allocate(PtrSize size, StackGpuAllocatorMemory*& mem) final
-	{
-		TransferGpuAllocator::Memory* mm = m_alloc.newInstance<TransferGpuAllocator::Memory>();
-
-		mm->m_buffer = m_gr->newBuffer(
-			BufferInitInfo(size, BufferUsageBit::TRANSFER_SOURCE, BufferMapAccessBit::WRITE, "Transfer"));
-		mm->m_mappedMemory = mm->m_buffer->map(0, size, BufferMapAccessBit::WRITE);
+	BufferInitInfo bufferInit(size, BufferUsageBit::TRANSFER_SOURCE, BufferMapAccessBit::WRITE, "Transfer");
+	out->m_buffer = m_parent->m_gr->newBuffer(bufferInit);
 
-		mem = mm;
-		return Error::NONE;
-	}
-
-	void free(StackGpuAllocatorMemory* mem) final
-	{
-		ANKI_ASSERT(mem);
-
-		TransferGpuAllocator::Memory* mm = static_cast<TransferGpuAllocator::Memory*>(mem);
-		if(mm->m_mappedMemory)
-		{
-			mm->m_buffer->unmap();
-		}
-		m_alloc.deleteInstance(mm);
-	}
+	out->m_mappedBuffer = out->m_buffer->map(0, MAX_PTR_SIZE, BufferMapAccessBit::WRITE);
 
-	void getChunkGrowInfo(F32& scale, PtrSize& bias, PtrSize& initialSize) final
-	{
-		scale = 1.5;
-		bias = 0;
-		initialSize = TransferGpuAllocator::CHUNK_INITIAL_SIZE;
-	}
-
-	U32 getMaxAlignment() final
-	{
-		return TransferGpuAllocator::GPU_BUFFER_ALIGNMENT;
-	}
-};
-
-BufferPtr TransferGpuAllocatorHandle::getBuffer() const
-{
-	ANKI_ASSERT(m_handle.m_memory);
-	const TransferGpuAllocator::Memory* mm = static_cast<const TransferGpuAllocator::Memory*>(m_handle.m_memory);
-	ANKI_ASSERT(mm->m_buffer);
-	return mm->m_buffer;
+	return Error::NONE;
 }
 
-void* TransferGpuAllocatorHandle::getMappedMemory() const
+void TransferGpuAllocator::StackAllocatorBuilderInterface::freeChunk(Chunk* chunk)
 {
-	ANKI_ASSERT(m_handle.m_memory);
-	const TransferGpuAllocator::Memory* mm = static_cast<const TransferGpuAllocator::Memory*>(m_handle.m_memory);
-	ANKI_ASSERT(mm->m_mappedMemory);
-	return static_cast<U8*>(mm->m_mappedMemory) + m_handle.m_offset;
+	ANKI_ASSERT(chunk);
+
+	chunk->m_buffer->unmap();
+
+	m_parent->m_alloc.deleteInstance(chunk);
 }
 
 TransferGpuAllocator::TransferGpuAllocator()
@@ -88,10 +38,10 @@ TransferGpuAllocator::TransferGpuAllocator()
 
 TransferGpuAllocator::~TransferGpuAllocator()
 {
-	for(Frame& frame : m_frames)
+	for(Pool& pool : m_pools)
 	{
-		ANKI_ASSERT(frame.m_pendingReleases == 0);
-		frame.m_fences.destroy(m_alloc);
+		ANKI_ASSERT(pool.m_pendingReleases == 0);
+		pool.m_fences.destroy(m_alloc);
 	}
 }
 
@@ -100,16 +50,12 @@ Error TransferGpuAllocator::init(PtrSize maxSize, GrManager* gr, ResourceAllocat
 	m_alloc = alloc;
 	m_gr = gr;
 
-	m_maxAllocSize = getAlignedRoundUp(CHUNK_INITIAL_SIZE * FRAME_COUNT, maxSize);
+	m_maxAllocSize = getAlignedRoundUp(CHUNK_INITIAL_SIZE * POOL_COUNT, maxSize);
 	ANKI_RESOURCE_LOGI("Will use %luMB of memory for transfer scratch", m_maxAllocSize / 1024 / 1024);
 
-	m_interface.reset(m_alloc.newInstance<Interface>());
-	m_interface->m_gr = gr;
-	m_interface->m_alloc = alloc;
-
-	for(Frame& frame : m_frames)
+	for(Pool& pool : m_pools)
 	{
-		frame.m_stackAlloc.init(m_alloc, m_interface.get());
+		pool.m_stackAlloc.getInterface().m_parent = this;
 	}
 
 	return Error::NONE;
@@ -119,52 +65,60 @@ Error TransferGpuAllocator::allocate(PtrSize size, TransferGpuAllocatorHandle& h
 {
 	ANKI_TRACE_SCOPED_EVENT(RSRC_ALLOCATE_TRANSFER);
 
-	const PtrSize frameSize = m_maxAllocSize / FRAME_COUNT;
+	const PtrSize poolSize = m_maxAllocSize / POOL_COUNT;
 
 	LockGuard<Mutex> lock(m_mtx);
 
-	Frame* frame;
-	if(m_crntFrameAllocatedSize + size <= frameSize)
+	Pool* pool;
+	if(m_crntPoolAllocatedSize + size <= poolSize)
 	{
-		// Have enough space in the frame
+		// Have enough space in the pool
 
-		frame = &m_frames[m_frameCount];
+		pool = &m_pools[m_crntPool];
 	}
 	else
 	{
-		// Don't have enough space. Wait for next frame
+		// Don't have enough space. Wait for one pool used in the past
 
-		m_frameCount = U8((m_frameCount + 1) % FRAME_COUNT);
-		Frame& nextFrame = m_frames[m_frameCount];
+		m_crntPool = U8((m_crntPool + 1) % POOL_COUNT);
+		pool = &m_pools[m_crntPool];
 
 		// Wait for all memory to be released
-		while(nextFrame.m_pendingReleases != 0)
+		while(pool->m_pendingReleases != 0)
 		{
 			m_condVar.wait(m_mtx);
 		}
 
-		// Wait all fences
-		while(!nextFrame.m_fences.isEmpty())
+		// All memory is released, loop until all fences are triggered
+		while(!pool->m_fences.isEmpty())
 		{
-			FencePtr fence = nextFrame.m_fences.getFront();
+			FencePtr fence = pool->m_fences.getFront();
 
 			const Bool done = fence->clientWait(MAX_FENCE_WAIT_TIME);
 			if(done)
 			{
-				nextFrame.m_fences.popFront(m_alloc);
+				pool->m_fences.popFront(m_alloc);
 			}
 		}
 
-		nextFrame.m_stackAlloc.reset();
-		m_crntFrameAllocatedSize = 0;
-		frame = &nextFrame;
+		pool->m_stackAlloc.reset();
+		m_crntPoolAllocatedSize = 0;
 	}
 
-	ANKI_CHECK(frame->m_stackAlloc.allocate(size, handle.m_handle));
+	Chunk* chunk;
+	PtrSize offset;
+	const Error err = pool->m_stackAlloc.allocate(size, GPU_BUFFER_ALIGNMENT, chunk, offset);
+	ANKI_ASSERT(!err);
+	(void)err;
+
+	handle.m_buffer = chunk->m_buffer;
+	handle.m_mappedMemory = static_cast<U8*>(chunk->m_mappedBuffer) + offset;
+	handle.m_offsetInBuffer = offset;
 	handle.m_range = size;
-	handle.m_frame = U8(frame - &m_frames[0]);
-	m_crntFrameAllocatedSize += size;
-	++frame->m_pendingReleases;
+	handle.m_pool = U8(pool - &m_pools[0]);
+
+	m_crntPoolAllocatedSize += size;
+	++pool->m_pendingReleases;
 
 	return Error::NONE;
 }
@@ -174,15 +128,15 @@ void TransferGpuAllocator::release(TransferGpuAllocatorHandle& handle, FencePtr
 	ANKI_ASSERT(fence);
 	ANKI_ASSERT(handle.valid());
 
-	Frame& frame = m_frames[handle.m_frame];
+	Pool& pool = m_pools[handle.m_pool];
 
 	{
 		LockGuard<Mutex> lock(m_mtx);
 
-		frame.m_fences.pushBack(m_alloc, fence);
+		pool.m_fences.pushBack(m_alloc, fence);
 
-		ANKI_ASSERT(frame.m_pendingReleases > 0);
-		--frame.m_pendingReleases;
+		ANKI_ASSERT(pool.m_pendingReleases > 0);
+		--pool.m_pendingReleases;
 
 		m_condVar.notifyOne();
 	}

+ 115 - 27
AnKi/Resource/TransferGpuAllocator.h

@@ -6,8 +6,9 @@
 #pragma once
 
 #include <AnKi/Resource/Common.h>
-#include <AnKi/Gr/Utils/StackGpuAllocator.h>
+#include <AnKi/Util/StackAllocatorBuilder.h>
 #include <AnKi/Util/List.h>
+#include <AnKi/Gr/Buffer.h>
 
 namespace anki {
 
@@ -38,46 +39,57 @@ public:
 
 	TransferGpuAllocatorHandle& operator=(TransferGpuAllocatorHandle&& b)
 	{
-		m_handle = b.m_handle;
+		m_buffer = b.m_buffer;
+		m_mappedMemory = b.m_mappedMemory;
+		m_offsetInBuffer = b.m_offsetInBuffer;
 		m_range = b.m_range;
-		m_frame = b.m_frame;
+		m_pool = b.m_pool;
 		b.invalidate();
 		return *this;
 	}
 
-	BufferPtr getBuffer() const;
+	const BufferPtr& getBuffer() const
+	{
+		return m_buffer;
+	}
 
-	void* getMappedMemory() const;
+	void* getMappedMemory() const
+	{
+		ANKI_ASSERT(m_mappedMemory);
+		return m_mappedMemory;
+	}
 
 	PtrSize getOffset() const
 	{
-		ANKI_ASSERT(m_handle);
-		return m_handle.m_offset;
+		ANKI_ASSERT(m_offsetInBuffer != MAX_PTR_SIZE);
+		return m_offsetInBuffer;
 	}
 
 	PtrSize getRange() const
 	{
-		ANKI_ASSERT(m_handle);
 		ANKI_ASSERT(m_range != 0);
 		return m_range;
 	}
 
 private:
-	StackGpuAllocatorHandle m_handle;
+	BufferPtr m_buffer;
+	void* m_mappedMemory = nullptr;
+	PtrSize m_offsetInBuffer = MAX_PTR_SIZE;
 	PtrSize m_range = 0;
-	U8 m_frame = MAX_U8;
+	U8 m_pool = MAX_U8;
 
 	Bool valid() const
 	{
-		return m_range != 0 && m_frame < MAX_U8;
+		return m_range != 0 && m_pool < MAX_U8;
 	}
 
 	void invalidate()
 	{
-		m_handle.m_memory = nullptr;
-		m_handle.m_offset = 0;
+		m_buffer.reset(nullptr);
+		m_mappedMemory = nullptr;
+		m_offsetInBuffer = MAX_PTR_SIZE;
 		m_range = 0;
-		m_frame = MAX_U8;
+		m_pool = MAX_U8;
 	}
 };
 
@@ -91,8 +103,8 @@ public:
 	/// of the buffer to image copies.
 	static constexpr U32 GPU_BUFFER_ALIGNMENT = 16 * 3;
 
-	static const U32 FRAME_COUNT = 3;
-	static const PtrSize CHUNK_INITIAL_SIZE = 64_MB;
+	static constexpr U32 POOL_COUNT = 3;
+	static constexpr PtrSize CHUNK_INITIAL_SIZE = 64_MB;
 	static constexpr Second MAX_FENCE_WAIT_TIME = 500.0_ms;
 
 	TransferGpuAllocator();
@@ -109,28 +121,104 @@ public:
 	void release(TransferGpuAllocatorHandle& handle, FencePtr fence);
 
 private:
-	class Interface;
-	class Memory;
+	/// This is the chunk the StackAllocatorBuilder will be allocating.
+	class Chunk
+	{
+	public:
+		/// Required by StackAllocatorBuilder.
+		Chunk* m_nextChunk;
 
-	ResourceAllocator<U8> m_alloc;
-	GrManager* m_gr = nullptr;
-	PtrSize m_maxAllocSize = 0;
+		/// Required by StackAllocatorBuilder.
+		Atomic<PtrSize> m_offsetInChunk;
 
-	UniquePtr<Interface> m_interface;
+		/// Required by StackAllocatorBuilder.
+		PtrSize m_chunkSize;
 
-	class Frame
+		/// The GPU memory.
+		BufferPtr m_buffer;
+
+		/// Points to the mapped m_buffer.
+		void* m_mappedBuffer;
+	};
+
+	/// Implements the StackAllocatorBuilder TInterface
+	class StackAllocatorBuilderInterface
 	{
 	public:
-		StackGpuAllocator m_stackAlloc;
+		TransferGpuAllocator* m_parent = nullptr;
+
+		// The rest of the functions implement the StackAllocatorBuilder TInterface.
+
+		constexpr PtrSize getMaxAlignment()
+		{
+			return GPU_BUFFER_ALIGNMENT;
+		}
+
+		constexpr PtrSize getInitialChunkSize() const
+		{
+			return CHUNK_INITIAL_SIZE;
+		}
+
+		constexpr F64 getNextChunkGrowScale() const
+		{
+			return 1.5;
+		}
+
+		constexpr PtrSize getNextChunkGrowBias() const
+		{
+			return 0;
+		}
+
+		constexpr Bool ignoreDeallocationErrors() const
+		{
+			return false;
+		}
+
+		Error allocateChunk(PtrSize size, Chunk*& out);
+
+		void freeChunk(Chunk* chunk);
+
+		void recycleChunk(Chunk& chunk)
+		{
+			// Do nothing
+		}
+
+		constexpr Atomic<U32>* getAllocationCount()
+		{
+			return nullptr;
+		}
+	};
+
+	/// StackAllocatorBuilder doesn't need a lock. There is another lock.
+	class DummyMutex
+	{
+	public:
+		void lock()
+		{
+		}
+
+		void unlock()
+		{
+		}
+	};
+
+	class Pool
+	{
+	public:
+		StackAllocatorBuilder<Chunk, StackAllocatorBuilderInterface, DummyMutex> m_stackAlloc;
 		List<FencePtr> m_fences;
 		U32 m_pendingReleases = 0;
 	};
 
+	ResourceAllocator<U8> m_alloc;
+	GrManager* m_gr = nullptr;
+	PtrSize m_maxAllocSize = 0;
+
 	Mutex m_mtx; ///< Protect all members bellow.
 	ConditionVariable m_condVar;
-	Array<Frame, FRAME_COUNT> m_frames;
-	U8 m_frameCount = 0;
-	PtrSize m_crntFrameAllocatedSize = 0;
+	Array<Pool, POOL_COUNT> m_pools;
+	U8 m_crntPool = 0;
+	PtrSize m_crntPoolAllocatedSize = 0;
 };
 /// @}
 

+ 0 - 145
Tests/Gr/StackGpuAllocator.cpp

@@ -1,145 +0,0 @@
-// Copyright (C) 2009-2021, Panagiotis Christopoulos Charitos and contributors.
-// All rights reserved.
-// Code licensed under the BSD License.
-// http://www.anki3d.org/LICENSE
-
-#include <AnKi/Gr/Utils/StackGpuAllocator.h>
-#include <AnKi/Util/ThreadHive.h>
-#include <Tests/Framework/Framework.h>
-#include <algorithm>
-
-using namespace anki;
-
-namespace {
-
-const U ALLOCATION_COUNT = 1024;
-const U THREAD_COUNT = 4;
-const U32 MIN_ALLOCATION_SIZE = 256;
-const U32 MAX_ALLOCATION_SIZE = 1024 * 10;
-const U32 ALIGNMENT = 256;
-
-class Mem : public StackGpuAllocatorMemory
-{
-public:
-	void* m_mem = nullptr;
-	PtrSize m_size = 0;
-};
-
-class Interface final : public StackGpuAllocatorInterface
-{
-public:
-	ANKI_USE_RESULT Error allocate(PtrSize size, StackGpuAllocatorMemory*& mem)
-	{
-		Mem* m = new Mem();
-
-		m->m_mem = mallocAligned(size, ALIGNMENT);
-		m->m_size = size;
-		mem = m;
-
-		return Error::NONE;
-	}
-
-	void free(StackGpuAllocatorMemory* mem)
-	{
-		Mem* m = static_cast<Mem*>(mem);
-		freeAligned(m->m_mem);
-		delete m;
-	}
-
-	void getChunkGrowInfo(F32& scale, PtrSize& bias, PtrSize& initialSize)
-	{
-		scale = 2.0;
-		bias = 0;
-		initialSize = ALIGNMENT * 1024;
-	}
-
-	U32 getMaxAlignment()
-	{
-		return ALIGNMENT;
-	}
-};
-
-class Allocation
-{
-public:
-	StackGpuAllocatorHandle m_handle;
-	PtrSize m_size;
-};
-
-class TestContext
-{
-public:
-	StackGpuAllocator* m_salloc;
-	Array<Allocation, ALLOCATION_COUNT> m_allocs;
-	Atomic<U32> m_allocCount;
-};
-
-static void doAllocation(void* arg, U32 threadId, ThreadHive& hive, ThreadHiveSemaphore* sem)
-{
-	TestContext* ctx = static_cast<TestContext*>(arg);
-
-	U allocCount = ctx->m_allocCount.fetchAdd(1);
-	PtrSize allocSize = getRandomRange(MIN_ALLOCATION_SIZE, MAX_ALLOCATION_SIZE);
-	ctx->m_allocs[allocCount].m_size = allocSize;
-	ANKI_TEST_EXPECT_NO_ERR(ctx->m_salloc->allocate(allocSize, ctx->m_allocs[allocCount].m_handle));
-}
-
-} // end anonymous namespace
-
-ANKI_TEST(Gr, StackGpuAllocator)
-{
-	HeapAllocator<U8> alloc(allocAligned, nullptr);
-
-	Interface iface;
-	StackGpuAllocator salloc;
-	salloc.init(alloc, &iface);
-
-	ThreadHive hive(THREAD_COUNT, alloc);
-
-	for(U i = 0; i < 1024; ++i)
-	{
-		TestContext ctx;
-		memset(&ctx, 0, sizeof(ctx));
-		ctx.m_salloc = &salloc;
-
-		ThreadHiveTask task;
-		task.m_callback = doAllocation;
-		task.m_argument = &ctx;
-
-		for(U i = 0; i < ALLOCATION_COUNT; ++i)
-		{
-			hive.submitTasks(&task, 1);
-		}
-
-		hive.waitAllTasks();
-
-		// Make sure memory doesn't overlap
-		std::sort(ctx.m_allocs.getBegin(), ctx.m_allocs.getEnd(), [](const Allocation& a, const Allocation& b) {
-			if(a.m_handle.m_memory != b.m_handle.m_memory)
-			{
-				return a.m_handle.m_memory < b.m_handle.m_memory;
-			}
-
-			if(a.m_handle.m_offset != b.m_handle.m_offset)
-			{
-				return a.m_handle.m_offset <= b.m_handle.m_offset;
-			}
-
-			ANKI_TEST_EXPECT_EQ(1, 0);
-			return true;
-		});
-
-		for(U i = 1; i < ALLOCATION_COUNT; ++i)
-		{
-			const Allocation& a = ctx.m_allocs[i - 1];
-			const Allocation& b = ctx.m_allocs[i];
-
-			if(a.m_handle.m_memory == b.m_handle.m_memory)
-			{
-				ANKI_TEST_EXPECT_LEQ(a.m_handle.m_offset + a.m_size, b.m_handle.m_offset);
-			}
-		}
-
-		salloc.reset();
-	}
-}