Browse Source

Some refactoring in GPU pools

Panagiotis Christopoulos Charitos 3 years ago
parent
commit
cf6852b032

+ 8 - 8
AnKi/Core/GpuMemoryPools.h

@@ -7,8 +7,8 @@
 
 
 #include <AnKi/Core/Common.h>
 #include <AnKi/Core/Common.h>
 #include <AnKi/Gr/Buffer.h>
 #include <AnKi/Gr/Buffer.h>
-#include <AnKi/Gr/Utils/FrameGpuAllocator.h>
-#include <AnKi/Gr/Utils/SegregatedListsGpuAllocator.h>
+#include <AnKi/Gr/Utils/StackGpuMemoryPool.h>
+#include <AnKi/Gr/Utils/SegregatedListsGpuMemoryPool.h>
 
 
 namespace anki {
 namespace anki {
 
 
@@ -30,12 +30,12 @@ public:
 
 
 	void init(HeapMemoryPool* pool, GrManager* gr, const ConfigSet& cfg);
 	void init(HeapMemoryPool* pool, GrManager* gr, const ConfigSet& cfg);
 
 
-	void allocate(PtrSize size, U32 alignment, SegregatedListsGpuAllocatorToken& token)
+	void allocate(PtrSize size, U32 alignment, SegregatedListsGpuMemoryPoolToken& token)
 	{
 	{
 		m_alloc.allocate(size, alignment, token);
 		m_alloc.allocate(size, alignment, token);
 	}
 	}
 
 
-	void free(SegregatedListsGpuAllocatorToken& token)
+	void free(SegregatedListsGpuMemoryPoolToken& token)
 	{
 	{
 		m_alloc.free(token);
 		m_alloc.free(token);
 	}
 	}
@@ -56,7 +56,7 @@ public:
 	}
 	}
 
 
 private:
 private:
-	SegregatedListsGpuAllocator m_alloc;
+	SegregatedListsGpuMemoryPool m_alloc;
 };
 };
 
 
 /// Memory pool for the GPU scene.
 /// Memory pool for the GPU scene.
@@ -71,12 +71,12 @@ public:
 
 
 	void init(HeapMemoryPool* pool, GrManager* gr, const ConfigSet& cfg);
 	void init(HeapMemoryPool* pool, GrManager* gr, const ConfigSet& cfg);
 
 
-	void allocate(PtrSize size, U32 alignment, SegregatedListsGpuAllocatorToken& token)
+	void allocate(PtrSize size, U32 alignment, SegregatedListsGpuMemoryPoolToken& token)
 	{
 	{
 		m_alloc.allocate(size, alignment, token);
 		m_alloc.allocate(size, alignment, token);
 	}
 	}
 
 
-	void free(SegregatedListsGpuAllocatorToken& token)
+	void free(SegregatedListsGpuMemoryPoolToken& token)
 	{
 	{
 		m_alloc.free(token);
 		m_alloc.free(token);
 	}
 	}
@@ -97,7 +97,7 @@ public:
 	}
 	}
 
 
 private:
 private:
-	SegregatedListsGpuAllocator m_alloc;
+	SegregatedListsGpuMemoryPool m_alloc;
 };
 };
 
 
 /// Token that gets returned when requesting for memory to write to a resource.
 /// Token that gets returned when requesting for memory to write to a resource.

+ 1 - 1
AnKi/Gr.h

@@ -21,7 +21,7 @@
 #include <AnKi/Gr/RenderGraph.h>
 #include <AnKi/Gr/RenderGraph.h>
 #include <AnKi/Gr/GrUpscaler.h>
 #include <AnKi/Gr/GrUpscaler.h>
 
 
-#include <AnKi/Gr/Utils/FrameGpuAllocator.h>
+#include <AnKi/Gr/Utils/StackGpuMemoryPool.h>
 #include <AnKi/Gr/Utils/Functions.h>
 #include <AnKi/Gr/Utils/Functions.h>
 
 
 /// @defgroup graphics Graphics API abstraction
 /// @defgroup graphics Graphics API abstraction

+ 4 - 4
AnKi/Gr/CMakeLists.txt

@@ -3,9 +3,9 @@ set(common_sources
 	GrObject.cpp
 	GrObject.cpp
 	RenderGraph.cpp
 	RenderGraph.cpp
 	ShaderProgram.cpp
 	ShaderProgram.cpp
-	Utils/FrameGpuAllocator.cpp
+	Utils/StackGpuMemoryPool.cpp
 	Utils/Functions.cpp
 	Utils/Functions.cpp
-	Utils/SegregatedListsGpuAllocator.cpp)
+	Utils/SegregatedListsGpuMemoryPool.cpp)
 
 
 set(common_headers
 set(common_headers
 	AccelerationStructure.h
 	AccelerationStructure.h
@@ -29,10 +29,10 @@ set(common_headers
 	TextureView.h
 	TextureView.h
 	TimestampQuery.h
 	TimestampQuery.h
 	GrUpscaler.h
 	GrUpscaler.h
-	Utils/FrameGpuAllocator.h
+	Utils/StackGpuMemoryPool.h
 	Utils/Functions.h
 	Utils/Functions.h
 	Utils/InstantiationMacros.h
 	Utils/InstantiationMacros.h
-	Utils/SegregatedListsGpuAllocator.h)
+	Utils/SegregatedListsGpuMemoryPool.h)
 
 
 if(VULKAN)
 if(VULKAN)
 	set(backend_sources
 	set(backend_sources

+ 0 - 92
AnKi/Gr/Utils/FrameGpuAllocator.cpp

@@ -1,92 +0,0 @@
-// Copyright (C) 2009-2022, Panagiotis Christopoulos Charitos and contributors.
-// All rights reserved.
-// Code licensed under the BSD License.
-// http://www.anki3d.org/LICENSE
-
-#include <AnKi/Gr/Utils/FrameGpuAllocator.h>
-
-namespace anki {
-
-void FrameGpuAllocator::init(PtrSize size, U32 alignment, PtrSize maxAllocationSize)
-{
-	ANKI_ASSERT(!isCreated());
-	ANKI_ASSERT(size > 0 && alignment > 0 && maxAllocationSize > 0);
-
-	PtrSize perFrameSize = size / kMaxFramesInFlight;
-	alignRoundDown(alignment, perFrameSize);
-	m_size = perFrameSize * kMaxFramesInFlight;
-
-	m_alignment = alignment;
-	m_maxAllocationSize = maxAllocationSize;
-}
-
-PtrSize FrameGpuAllocator::endFrame()
-{
-	ANKI_ASSERT(isCreated());
-
-	PtrSize perFrameSize = m_size / kMaxFramesInFlight;
-
-	PtrSize crntFrameStartOffset = perFrameSize * (m_frame % kMaxFramesInFlight);
-
-	PtrSize nextFrameStartOffset = perFrameSize * ((m_frame + 1) % kMaxFramesInFlight);
-
-	PtrSize crntOffset = m_offset.exchange(nextFrameStartOffset);
-	ANKI_ASSERT(crntOffset >= crntFrameStartOffset);
-
-	PtrSize bytesUsed = crntOffset - crntFrameStartOffset;
-	PtrSize bytesNotUsed = (bytesUsed > perFrameSize) ? 0 : perFrameSize - bytesUsed;
-
-	++m_frame;
-	return bytesNotUsed;
-}
-
-Error FrameGpuAllocator::allocate(PtrSize originalSize, PtrSize& outOffset)
-{
-	ANKI_ASSERT(isCreated());
-	ANKI_ASSERT(originalSize > 0);
-	Error err = Error::kNone;
-
-	// Align size
-	PtrSize size = getAlignedRoundUp(m_alignment, originalSize);
-	ANKI_ASSERT(size <= m_maxAllocationSize && "Too high!");
-
-	const PtrSize offset = m_offset.fetchAdd(size);
-	const PtrSize perFrameSize = m_size / kMaxFramesInFlight;
-	const PtrSize crntFrameStartOffset = perFrameSize * (m_frame % kMaxFramesInFlight);
-
-	if(offset - crntFrameStartOffset + size <= perFrameSize)
-	{
-		ANKI_ASSERT(isAligned(m_alignment, offset));
-		ANKI_ASSERT((offset + size) <= m_size);
-
-#if ANKI_ENABLE_TRACE
-		m_lastAllocatedSize.store(size);
-#endif
-
-		// Encode token
-		outOffset = offset;
-
-		ANKI_ASSERT(outOffset + originalSize <= m_size);
-	}
-	else
-	{
-		outOffset = kMaxPtrSize;
-		err = Error::kOutOfMemory;
-	}
-
-	return err;
-}
-
-#if ANKI_ENABLE_TRACE
-PtrSize FrameGpuAllocator::getUnallocatedMemorySize() const
-{
-	PtrSize perFrameSize = m_size / kMaxFramesInFlight;
-	PtrSize crntFrameStartOffset = perFrameSize * (m_frame % kMaxFramesInFlight);
-	PtrSize usedSize = m_offset.getNonAtomically() - crntFrameStartOffset + m_lastAllocatedSize.getNonAtomically();
-
-	PtrSize remaining = (perFrameSize >= usedSize) ? (perFrameSize - usedSize) : 0;
-	return remaining;
-}
-#endif
-
-} // end namespace anki

+ 0 - 67
AnKi/Gr/Utils/FrameGpuAllocator.h

@@ -1,67 +0,0 @@
-// Copyright (C) 2009-2022, Panagiotis Christopoulos Charitos and contributors.
-// All rights reserved.
-// Code licensed under the BSD License.
-// http://www.anki3d.org/LICENSE
-
-#pragma once
-
-#include <AnKi/Gr/Common.h>
-
-namespace anki {
-
-/// @addtogroup graphics
-/// @{
-
-/// Manages pre-allocated GPU memory for per frame usage.
-class FrameGpuAllocator
-{
-public:
-	FrameGpuAllocator()
-	{
-	}
-
-	FrameGpuAllocator(const FrameGpuAllocator&) = delete; // Non-copyable
-
-	~FrameGpuAllocator()
-	{
-	}
-
-	FrameGpuAllocator& operator=(const FrameGpuAllocator&) = delete; // Non-copyable
-
-	/// Initialize with pre-allocated always mapped memory.
-	/// @param size The size of the GPU buffer.
-	/// @param alignment The working alignment.
-	/// @param maxAllocationSize The size in @a allocate cannot exceed maxAllocationSize.
-	void init(PtrSize size, U32 alignment, PtrSize maxAllocationSize = kMaxPtrSize);
-
-	/// Allocate memory for a dynamic buffer.
-	Error allocate(PtrSize size, PtrSize& outOffset);
-
-	/// Call this at the end of the frame.
-	/// @return The bytes that were not used. Used for statistics.
-	PtrSize endFrame();
-
-#if ANKI_ENABLE_TRACE
-	/// Call this before endFrame.
-	PtrSize getUnallocatedMemorySize() const;
-#endif
-
-private:
-	PtrSize m_size = 0; ///< The full size of the buffer.
-	U32 m_alignment = 0; ///< Always work in that alignment.
-	PtrSize m_maxAllocationSize = 0; ///< For debugging.
-
-	Atomic<PtrSize> m_offset = {0};
-#if ANKI_ENABLE_TRACE
-	Atomic<PtrSize> m_lastAllocatedSize = {0}; ///< For tracing.
-#endif
-	U64 m_frame = 0;
-
-	Bool isCreated() const
-	{
-		return m_size > 0;
-	}
-};
-/// @}
-
-} // end namespace anki

+ 20 - 20
AnKi/Gr/Utils/SegregatedListsGpuAllocator.cpp → AnKi/Gr/Utils/SegregatedListsGpuMemoryPool.cpp

@@ -3,22 +3,22 @@
 // Code licensed under the BSD License.
 // Code licensed under the BSD License.
 // http://www.anki3d.org/LICENSE
 // http://www.anki3d.org/LICENSE
 
 
-#include <AnKi/Gr/Utils/SegregatedListsGpuAllocator.h>
+#include <AnKi/Gr/Utils/SegregatedListsGpuMemoryPool.h>
 #include <AnKi/Gr/GrManager.h>
 #include <AnKi/Gr/GrManager.h>
 #include <AnKi/Gr/CommandBuffer.h>
 #include <AnKi/Gr/CommandBuffer.h>
 
 
 namespace anki {
 namespace anki {
 
 
-class SegregatedListsGpuAllocator::Chunk : public SegregatedListsAllocatorBuilderChunkBase
+class SegregatedListsGpuMemoryPool::Chunk : public SegregatedListsAllocatorBuilderChunkBase
 {
 {
 public:
 public:
 	PtrSize m_offsetInGpuBuffer;
 	PtrSize m_offsetInGpuBuffer;
 };
 };
 
 
-class SegregatedListsGpuAllocator::BuilderInterface
+class SegregatedListsGpuMemoryPool::BuilderInterface
 {
 {
 public:
 public:
-	SegregatedListsGpuAllocator* m_parent = nullptr;
+	SegregatedListsGpuMemoryPool* m_parent = nullptr;
 
 
 	/// @name Interface methods
 	/// @name Interface methods
 	/// @{
 	/// @{
@@ -54,9 +54,9 @@ public:
 	/// @}
 	/// @}
 };
 };
 
 
-void SegregatedListsGpuAllocator::init(GrManager* gr, BaseMemoryPool* pool, BufferUsageBit gpuBufferUsage,
-									   ConstWeakArray<PtrSize> classUpperSizes, PtrSize initialGpuBufferSize,
-									   CString bufferName, Bool allowCoWs)
+void SegregatedListsGpuMemoryPool::init(GrManager* gr, BaseMemoryPool* pool, BufferUsageBit gpuBufferUsage,
+										ConstWeakArray<PtrSize> classUpperSizes, PtrSize initialGpuBufferSize,
+										CString bufferName, Bool allowCoWs)
 {
 {
 	ANKI_ASSERT(!isInitialized());
 	ANKI_ASSERT(!isInitialized());
 	ANKI_ASSERT(gr && pool);
 	ANKI_ASSERT(gr && pool);
@@ -86,7 +86,7 @@ void SegregatedListsGpuAllocator::init(GrManager* gr, BaseMemoryPool* pool, Buff
 	m_allowCoWs = allowCoWs;
 	m_allowCoWs = allowCoWs;
 }
 }
 
 
-void SegregatedListsGpuAllocator::destroy()
+void SegregatedListsGpuMemoryPool::destroy()
 {
 {
 	if(!isInitialized())
 	if(!isInitialized())
 	{
 	{
@@ -96,9 +96,9 @@ void SegregatedListsGpuAllocator::destroy()
 	m_gr->finish();
 	m_gr->finish();
 	m_gr = nullptr;
 	m_gr = nullptr;
 
 
-	for(DynamicArray<SegregatedListsGpuAllocatorToken>& arr : m_garbage)
+	for(DynamicArray<SegregatedListsGpuMemoryPoolToken>& arr : m_garbage)
 	{
 	{
-		for(const SegregatedListsGpuAllocatorToken& token : arr)
+		for(const SegregatedListsGpuMemoryPoolToken& token : arr)
 		{
 		{
 			m_builder->free(static_cast<Chunk*>(token.m_chunk), token.m_chunkOffset, token.m_size);
 			m_builder->free(static_cast<Chunk*>(token.m_chunk), token.m_chunkOffset, token.m_size);
 		}
 		}
@@ -118,7 +118,7 @@ void SegregatedListsGpuAllocator::destroy()
 	m_deletedChunks.destroy(*m_pool);
 	m_deletedChunks.destroy(*m_pool);
 }
 }
 
 
-Error SegregatedListsGpuAllocator::allocateChunk(Chunk*& newChunk, PtrSize& chunkSize)
+Error SegregatedListsGpuMemoryPool::allocateChunk(Chunk*& newChunk, PtrSize& chunkSize)
 {
 {
 	ANKI_ASSERT(isInitialized());
 	ANKI_ASSERT(isInitialized());
 
 
@@ -154,7 +154,7 @@ Error SegregatedListsGpuAllocator::allocateChunk(Chunk*& newChunk, PtrSize& chun
 		BufferPtr newBuffer = m_gr->newBuffer(buffInit);
 		BufferPtr newBuffer = m_gr->newBuffer(buffInit);
 
 
 		// Do the copy
 		// Do the copy
-		CommandBufferInitInfo cmdbInit("SegregatedListsGpuAllocator CoW");
+		CommandBufferInitInfo cmdbInit("SegregatedListsGpuMemoryPool CoW");
 		cmdbInit.m_flags = CommandBufferFlag::kSmallBatch;
 		cmdbInit.m_flags = CommandBufferFlag::kSmallBatch;
 		CommandBufferPtr cmdb = m_gr->newCommandBuffer(cmdbInit);
 		CommandBufferPtr cmdb = m_gr->newCommandBuffer(cmdbInit);
 
 
@@ -193,16 +193,16 @@ Error SegregatedListsGpuAllocator::allocateChunk(Chunk*& newChunk, PtrSize& chun
 	return Error::kNone;
 	return Error::kNone;
 }
 }
 
 
-void SegregatedListsGpuAllocator::deleteChunk(Chunk* chunk)
+void SegregatedListsGpuMemoryPool::deleteChunk(Chunk* chunk)
 {
 {
 	m_deletedChunks.emplaceBack(*m_pool, chunk);
 	m_deletedChunks.emplaceBack(*m_pool, chunk);
 }
 }
 
 
-void SegregatedListsGpuAllocator::allocate(PtrSize size, U32 alignment, SegregatedListsGpuAllocatorToken& token)
+void SegregatedListsGpuMemoryPool::allocate(PtrSize size, U32 alignment, SegregatedListsGpuMemoryPoolToken& token)
 {
 {
 	ANKI_ASSERT(isInitialized());
 	ANKI_ASSERT(isInitialized());
 	ANKI_ASSERT(size > 0 && alignment > 0 && isPowerOfTwo(alignment));
 	ANKI_ASSERT(size > 0 && alignment > 0 && isPowerOfTwo(alignment));
-	ANKI_ASSERT(token == SegregatedListsGpuAllocatorToken());
+	ANKI_ASSERT(token == SegregatedListsGpuMemoryPoolToken());
 
 
 	LockGuard lock(m_lock);
 	LockGuard lock(m_lock);
 
 
@@ -222,7 +222,7 @@ void SegregatedListsGpuAllocator::allocate(PtrSize size, U32 alignment, Segregat
 	m_allocatedSize += size;
 	m_allocatedSize += size;
 }
 }
 
 
-void SegregatedListsGpuAllocator::free(SegregatedListsGpuAllocatorToken& token)
+void SegregatedListsGpuMemoryPool::free(SegregatedListsGpuMemoryPoolToken& token)
 {
 {
 	ANKI_ASSERT(isInitialized());
 	ANKI_ASSERT(isInitialized());
 
 
@@ -239,7 +239,7 @@ void SegregatedListsGpuAllocator::free(SegregatedListsGpuAllocatorToken& token)
 	token = {};
 	token = {};
 }
 }
 
 
-void SegregatedListsGpuAllocator::endFrame()
+void SegregatedListsGpuMemoryPool::endFrame()
 {
 {
 	ANKI_ASSERT(isInitialized());
 	ANKI_ASSERT(isInitialized());
 
 
@@ -248,7 +248,7 @@ void SegregatedListsGpuAllocator::endFrame()
 	m_frame = (m_frame + 1) % kMaxFramesInFlight;
 	m_frame = (m_frame + 1) % kMaxFramesInFlight;
 
 
 	// Throw out the garbage
 	// Throw out the garbage
-	for(SegregatedListsGpuAllocatorToken& token : m_garbage[m_frame])
+	for(SegregatedListsGpuMemoryPoolToken& token : m_garbage[m_frame])
 	{
 	{
 		m_builder->free(static_cast<Chunk*>(token.m_chunk), token.m_chunkOffset, token.m_size);
 		m_builder->free(static_cast<Chunk*>(token.m_chunk), token.m_chunkOffset, token.m_size);
 
 
@@ -259,8 +259,8 @@ void SegregatedListsGpuAllocator::endFrame()
 	m_garbage[m_frame].destroy(*m_pool);
 	m_garbage[m_frame].destroy(*m_pool);
 }
 }
 
 
-void SegregatedListsGpuAllocator::getStats(F32& externalFragmentation, PtrSize& userAllocatedSize,
-										   PtrSize& totalSize) const
+void SegregatedListsGpuMemoryPool::getStats(F32& externalFragmentation, PtrSize& userAllocatedSize,
+											PtrSize& totalSize) const
 {
 {
 	ANKI_ASSERT(isInitialized());
 	ANKI_ASSERT(isInitialized());
 
 

+ 14 - 14
AnKi/Gr/Utils/SegregatedListsGpuAllocator.h → AnKi/Gr/Utils/SegregatedListsGpuMemoryPool.h

@@ -11,17 +11,17 @@ namespace anki {
 /// @addtogroup graphics
 /// @addtogroup graphics
 /// @{
 /// @{
 
 
-/// The result of an allocation of SegregatedListsGpuAllocator.
-/// @memberof SegregatedListsGpuAllocator
-class SegregatedListsGpuAllocatorToken
+/// The result of an allocation of SegregatedListsGpuMemoryPool.
+/// @memberof SegregatedListsGpuMemoryPool
+class SegregatedListsGpuMemoryPoolToken
 {
 {
-	friend class SegregatedListsGpuAllocator;
+	friend class SegregatedListsGpuMemoryPool;
 
 
 public:
 public:
-	/// The offset in the SegregatedListsGpuAllocatorToken::getBuffer() buffer.
+	/// The offset in the SegregatedListsGpuMemoryPoolToken::getBuffer() buffer.
 	PtrSize m_offset = kMaxPtrSize;
 	PtrSize m_offset = kMaxPtrSize;
 
 
-	Bool operator==(const SegregatedListsGpuAllocatorToken& b) const
+	Bool operator==(const SegregatedListsGpuMemoryPoolToken& b) const
 	{
 	{
 		return m_offset == b.m_offset && m_chunk == b.m_chunk && m_chunkOffset == b.m_chunkOffset && m_size == b.m_size;
 		return m_offset == b.m_offset && m_chunk == b.m_chunk && m_chunkOffset == b.m_chunkOffset && m_size == b.m_size;
 	}
 	}
@@ -34,19 +34,19 @@ private:
 
 
 /// GPU memory allocator based on segregated lists. It allocates a GPU buffer with some initial size. If there is a need
 /// GPU memory allocator based on segregated lists. It allocates a GPU buffer with some initial size. If there is a need
 /// to grow it allocates a bigger buffer and copies contents of the old one to the new (CoW).
 /// to grow it allocates a bigger buffer and copies contents of the old one to the new (CoW).
-class SegregatedListsGpuAllocator
+class SegregatedListsGpuMemoryPool
 {
 {
 public:
 public:
-	SegregatedListsGpuAllocator() = default;
+	SegregatedListsGpuMemoryPool() = default;
 
 
-	~SegregatedListsGpuAllocator()
+	~SegregatedListsGpuMemoryPool()
 	{
 	{
 		destroy();
 		destroy();
 	}
 	}
 
 
-	SegregatedListsGpuAllocator(const SegregatedListsGpuAllocator&) = delete;
+	SegregatedListsGpuMemoryPool(const SegregatedListsGpuMemoryPool&) = delete;
 
 
-	SegregatedListsGpuAllocator& operator=(const SegregatedListsGpuAllocator&) = delete;
+	SegregatedListsGpuMemoryPool& operator=(const SegregatedListsGpuMemoryPool&) = delete;
 
 
 	void init(GrManager* gr, BaseMemoryPool* pool, BufferUsageBit gpuBufferUsage,
 	void init(GrManager* gr, BaseMemoryPool* pool, BufferUsageBit gpuBufferUsage,
 			  ConstWeakArray<PtrSize> classUpperSizes, PtrSize initialGpuBufferSize, CString bufferName,
 			  ConstWeakArray<PtrSize> classUpperSizes, PtrSize initialGpuBufferSize, CString bufferName,
@@ -56,11 +56,11 @@ public:
 
 
 	/// Allocate memory.
 	/// Allocate memory.
 	/// @note It's thread-safe.
 	/// @note It's thread-safe.
-	void allocate(PtrSize size, U32 alignment, SegregatedListsGpuAllocatorToken& token);
+	void allocate(PtrSize size, U32 alignment, SegregatedListsGpuMemoryPoolToken& token);
 
 
 	/// Free memory.
 	/// Free memory.
 	/// @note It's thread-safe.
 	/// @note It's thread-safe.
-	void free(SegregatedListsGpuAllocatorToken& token);
+	void free(SegregatedListsGpuMemoryPoolToken& token);
 
 
 	/// @note It's thread-safe.
 	/// @note It's thread-safe.
 	void endFrame();
 	void endFrame();
@@ -96,7 +96,7 @@ private:
 
 
 	DynamicArray<Chunk*> m_deletedChunks;
 	DynamicArray<Chunk*> m_deletedChunks;
 
 
-	Array<DynamicArray<SegregatedListsGpuAllocatorToken>, kMaxFramesInFlight> m_garbage;
+	Array<DynamicArray<SegregatedListsGpuMemoryPoolToken>, kMaxFramesInFlight> m_garbage;
 	U8 m_frame = 0;
 	U8 m_frame = 0;
 	Bool m_allowCoWs = true;
 	Bool m_allowCoWs = true;
 
 

+ 173 - 0
AnKi/Gr/Utils/StackGpuMemoryPool.cpp

@@ -0,0 +1,173 @@
+// Copyright (C) 2009-2022, Panagiotis Christopoulos Charitos and contributors.
+// All rights reserved.
+// Code licensed under the BSD License.
+// http://www.anki3d.org/LICENSE
+
+#include <AnKi/Gr/Utils/StackGpuMemoryPool.h>
+#include <AnKi/Gr/Buffer.h>
+#include <AnKi/Gr/GrManager.h>
+
+namespace anki {
+
+class StackGpuMemoryPool::Chunk
+{
+public:
+	// Builder interface stuff:
+	Chunk* m_nextChunk = nullptr;
+	Atomic<PtrSize> m_offsetInChunk = {0};
+	PtrSize m_chunkSize = 0;
+
+	// Other stuff:
+	BufferPtr m_buffer;
+	U8* m_mappedMemory = nullptr;
+};
+
+class StackGpuMemoryPool::BuilderInterface
+{
+public:
+	GrManager* m_gr = nullptr;
+	BaseMemoryPool* m_cpuPool = nullptr;
+	PtrSize m_initialSize = 0;
+	F64 m_scale = 0.0;
+	PtrSize m_bias = 0;
+	String m_bufferName;
+	U32 m_alignment = 0;
+	BufferUsageBit m_bufferUsage = BufferUsageBit::kNone;
+	BufferMapAccessBit m_bufferMap = BufferMapAccessBit::kNone;
+	U8 m_chunkCount = 0;
+	Bool m_allowToGrow = false;
+
+	~BuilderInterface()
+	{
+		m_bufferName.destroy(*m_cpuPool);
+	}
+
+	// Builder interface stuff:
+	U32 getMaxAlignment() const
+	{
+		return m_alignment;
+	}
+
+	PtrSize getInitialChunkSize() const
+	{
+		return m_initialSize;
+	}
+
+	F64 getNextChunkGrowScale()
+	{
+		return m_scale;
+	}
+
+	PtrSize getNextChunkGrowBias() const
+	{
+		return m_bias;
+	}
+
+	static constexpr Bool ignoreDeallocationErrors()
+	{
+		return true;
+	}
+
+	Error allocateChunk(PtrSize size, Chunk*& out)
+	{
+		if(!m_allowToGrow && m_chunkCount > 0)
+		{
+			ANKI_GR_LOGE("Memory pool is not allowed to grow");
+			return Error::kOutOfMemory;
+		}
+
+		Chunk* chunk = newInstance<Chunk>(*m_cpuPool);
+
+		BufferInitInfo buffInit(m_bufferName);
+		buffInit.m_size = size;
+		buffInit.m_usage = m_bufferUsage;
+		buffInit.m_mapAccess = m_bufferMap;
+		chunk->m_buffer = m_gr->newBuffer(buffInit);
+
+		if(!!m_bufferMap)
+		{
+			chunk->m_mappedMemory = static_cast<U8*>(chunk->m_buffer->map(0, size, m_bufferMap));
+		}
+
+		out = chunk;
+		++m_chunkCount;
+
+		return Error::kNone;
+	}
+
+	void freeChunk(Chunk* chunk)
+	{
+		if(chunk->m_mappedMemory)
+		{
+			chunk->m_buffer->unmap();
+		}
+
+		deleteInstance(*m_cpuPool, chunk);
+	}
+
+	void recycleChunk([[maybe_unused]] Chunk& out)
+	{
+		// Do nothing
+	}
+
+	Atomic<U32>* getAllocationCount()
+	{
+		return nullptr;
+	};
+};
+
+StackGpuMemoryPool::~StackGpuMemoryPool()
+{
+	if(m_builder)
+	{
+		BaseMemoryPool* cpuPool = m_builder->getInterface().m_cpuPool;
+		deleteInstance(*cpuPool, m_builder);
+	}
+}
+
+void StackGpuMemoryPool::init(GrManager* gr, BaseMemoryPool* cpuPool, PtrSize initialSize, F64 nextChunkGrowScale,
+							  PtrSize nextChunkGrowBias, U32 alignment, BufferUsageBit bufferUsage,
+							  BufferMapAccessBit bufferMapping, Bool allowToGrow, CString bufferName)
+{
+	ANKI_ASSERT(m_builder == nullptr);
+	ANKI_ASSERT(initialSize > 0 && alignment > 0);
+	ANKI_ASSERT(nextChunkGrowScale >= 1.0 && nextChunkGrowBias > 0);
+
+	m_builder = newInstance<Builder>(*cpuPool);
+	BuilderInterface& inter = m_builder->getInterface();
+	inter.m_gr = gr;
+	inter.m_cpuPool = cpuPool;
+	inter.m_initialSize = initialSize;
+	inter.m_scale = nextChunkGrowScale;
+	inter.m_bias = nextChunkGrowBias;
+	inter.m_bufferName.create(*cpuPool, bufferName);
+	inter.m_alignment = alignment;
+	inter.m_bufferUsage = bufferUsage;
+	inter.m_bufferMap = bufferMapping;
+	inter.m_allowToGrow = allowToGrow;
+}
+
+void StackGpuMemoryPool::reset()
+{
+	m_builder->reset();
+}
+
+void StackGpuMemoryPool::allocate(PtrSize size, PtrSize& outOffset, Buffer*& buffer, void*& mappedMemory)
+{
+	Chunk* chunk;
+	PtrSize offset;
+	const Error err = m_builder->allocate(size, 1, chunk, offset);
+	if(err)
+	{
+		ANKI_GR_LOGF("Allocation failed");
+	}
+
+	outOffset = offset;
+	buffer = chunk->m_buffer.get();
+	if(chunk->m_mappedMemory)
+	{
+		mappedMemory = chunk->m_mappedMemory + offset;
+	}
+}
+
+} // end namespace anki

+ 53 - 0
AnKi/Gr/Utils/StackGpuMemoryPool.h

@@ -0,0 +1,53 @@
+// Copyright (C) 2009-2022, Panagiotis Christopoulos Charitos and contributors.
+// All rights reserved.
+// Code licensed under the BSD License.
+// http://www.anki3d.org/LICENSE
+
+#pragma once
+
+#include <AnKi/Gr/Common.h>
+#include <AnKi/Util/StackAllocatorBuilder.h>
+
+namespace anki {
+
+/// @addtogroup graphics
+/// @{
+
+/// Stack memory pool for GPU usage.
+class StackGpuMemoryPool
+{
+public:
+	StackGpuMemoryPool() = default;
+
+	StackGpuMemoryPool(const StackGpuMemoryPool&) = delete; // Non-copyable
+
+	~StackGpuMemoryPool();
+
+	StackGpuMemoryPool& operator=(const StackGpuMemoryPool&) = delete; // Non-copyable
+
+	void init(GrManager* gr, BaseMemoryPool* cpuPool, PtrSize initialSize, F64 nextChunkGrowScale,
+			  PtrSize nextChunkGrowBias, U32 alignment, BufferUsageBit bufferUsage, BufferMapAccessBit bufferMapping,
+			  Bool allowToGrow, CString bufferName);
+
+	/// @note It's thread-safe against other allocate()
+	void allocate(PtrSize size, PtrSize& outOffset, Buffer*& buffer)
+	{
+		void* dummyMapped = nullptr;
+		allocate(size, outOffset, buffer, dummyMapped);
+	}
+
+	/// @note It's thread-safe against other allocate()
+	void allocate(PtrSize size, PtrSize& outOffset, Buffer*& buffer, void*& mappedMemory);
+
+	void reset();
+
+private:
+	class Chunk;
+	class BuilderInterface;
+	using Builder = StackAllocatorBuilder<Chunk, BuilderInterface, Mutex>;
+
+	Builder* m_builder = nullptr;
+};
+/// @}
+
+} // end namespace anki

+ 2 - 2
AnKi/Resource/MeshResource.h

@@ -102,8 +102,8 @@ private:
 	class Lod
 	class Lod
 	{
 	{
 	public:
 	public:
-		SegregatedListsGpuAllocatorToken m_indexBufferAllocationToken;
-		Array<SegregatedListsGpuAllocatorToken, U32(VertexStreamId::kMeshRelatedCount)> m_vertexBuffersAllocationToken;
+		SegregatedListsGpuMemoryPoolToken m_indexBufferAllocationToken;
+		Array<SegregatedListsGpuMemoryPoolToken, U32(VertexStreamId::kMeshRelatedCount)> m_vertexBuffersAllocationToken;
 
 
 		U32 m_indexCount = 0;
 		U32 m_indexCount = 0;
 		U32 m_vertexCount = 0;
 		U32 m_vertexCount = 0;

+ 1 - 2
AnKi/Scene/Components/MoveComponent.h

@@ -149,8 +149,7 @@ private:
 	/// Keep the previous transformation for checking if it moved
 	/// Keep the previous transformation for checking if it moved
 	Transform m_prevWTrf = Transform::getIdentity();
 	Transform m_prevWTrf = Transform::getIdentity();
 
 
-	SceneNode* m_node = nullptr;
-	SegregatedListsGpuAllocatorToken m_gpuSceneTransforms;
+	SegregatedListsGpuMemoryPoolToken m_gpuSceneTransforms;
 
 
 	Bool m_markedForUpdate : 1;
 	Bool m_markedForUpdate : 1;
 	Bool m_ignoreLocalTransform : 1;
 	Bool m_ignoreLocalTransform : 1;

+ 1 - 1
AnKi/Scene/Components/RenderComponent.h

@@ -112,7 +112,7 @@ private:
 	const void* m_rtCallbackUserData = nullptr;
 	const void* m_rtCallbackUserData = nullptr;
 	RenderComponentFlag m_flags = RenderComponentFlag::kNone;
 	RenderComponentFlag m_flags = RenderComponentFlag::kNone;
 
 
-	SegregatedListsGpuAllocatorToken m_gpuSceneRenderableGpuView;
+	SegregatedListsGpuMemoryPoolToken m_gpuSceneRenderableGpuView;
 
 
 	void onDestroy(SceneNode& node);
 	void onDestroy(SceneNode& node);
 };
 };

+ 2 - 2
AnKi/Scene/Components/SkinComponent.h

@@ -106,8 +106,8 @@ private:
 	U8 m_crntBoneTrfs = 0;
 	U8 m_crntBoneTrfs = 0;
 	U8 m_prevBoneTrfs = 1;
 	U8 m_prevBoneTrfs = 1;
 
 
-	SegregatedListsGpuAllocatorToken m_crntBoneTransformsGpuSceneOffset;
-	SegregatedListsGpuAllocatorToken m_prevBoneTransformsGpuSceneOffset;
+	SegregatedListsGpuMemoryPoolToken m_crntBoneTransformsGpuSceneOffset;
+	SegregatedListsGpuMemoryPoolToken m_prevBoneTransformsGpuSceneOffset;
 
 
 	Error update(SceneComponentUpdateInfo& info, Bool& updated);
 	Error update(SceneComponentUpdateInfo& info, Bool& updated);
 
 

+ 1 - 1
AnKi/Scene/Components/SpatialComponent.h

@@ -131,7 +131,7 @@ private:
 
 
 	OctreePlaceable m_octreeInfo;
 	OctreePlaceable m_octreeInfo;
 
 
-	SegregatedListsGpuAllocatorToken m_gpuSceneAabb;
+	SegregatedListsGpuMemoryPoolToken m_gpuSceneAabb;
 
 
 	Bool m_markedForUpdate : 1;
 	Bool m_markedForUpdate : 1;
 	Bool m_placed : 1;
 	Bool m_placed : 1;

+ 1 - 1
AnKi/Util/StackAllocatorBuilder.h

@@ -28,7 +28,7 @@ namespace anki {
 ///                    PtrSize getNextChunkGrowBias();
 ///                    PtrSize getNextChunkGrowBias();
 ///                    Bool ignoreDeallocationErrors();
 ///                    Bool ignoreDeallocationErrors();
 ///                    Error allocateChunk(PtrSize size, TChunk*& out);
 ///                    Error allocateChunk(PtrSize size, TChunk*& out);
-///                    void freeChunk(TChunk* out);
+///                    void freeChunk(TChunk* chunk);
 ///                    void recycleChunk(TChunk& out);
 ///                    void recycleChunk(TChunk& out);
 ///                    Atomic<U32>* getAllocationCount(); // It's optional
 ///                    Atomic<U32>* getAllocationCount(); // It's optional
 ///                    @endcode
 ///                    @endcode