Browse Source

All mesh resources suballocate from a bigger buffer instead of individuals

Panagiotis Christopoulos Charitos 4 years ago
parent
commit
d0004ed2c9

+ 9 - 3
AnKi/Core/App.cpp

@@ -24,7 +24,7 @@
 #include <AnKi/Script/ScriptManager.h>
 #include <AnKi/Script/ScriptManager.h>
 #include <AnKi/Resource/ResourceFilesystem.h>
 #include <AnKi/Resource/ResourceFilesystem.h>
 #include <AnKi/Resource/AsyncLoader.h>
 #include <AnKi/Resource/AsyncLoader.h>
-#include <AnKi/Core/GpuMemoryManager.h>
+#include <AnKi/Core/GpuMemoryPools.h>
 #include <AnKi/Ui/UiManager.h>
 #include <AnKi/Ui/UiManager.h>
 #include <AnKi/Ui/Canvas.h>
 #include <AnKi/Ui/Canvas.h>
 #include <csignal>
 #include <csignal>
@@ -288,6 +288,8 @@ void App::cleanup()
 	m_physics = nullptr;
 	m_physics = nullptr;
 	m_heapAlloc.deleteInstance(m_stagingMem);
 	m_heapAlloc.deleteInstance(m_stagingMem);
 	m_stagingMem = nullptr;
 	m_stagingMem = nullptr;
+	m_heapAlloc.deleteInstance(m_vertexMem);
+	m_vertexMem = nullptr;
 	m_heapAlloc.deleteInstance(m_threadHive);
 	m_heapAlloc.deleteInstance(m_threadHive);
 	m_threadHive = nullptr;
 	m_threadHive = nullptr;
 	GrManager::deleteInstance(m_gr);
 	GrManager::deleteInstance(m_gr);
@@ -420,9 +422,12 @@ Error App::initInternal(const ConfigSet& config_, AllocAlignedCallback allocCb,
 	ANKI_CHECK(GrManager::newInstance(grInit, m_gr));
 	ANKI_CHECK(GrManager::newInstance(grInit, m_gr));
 
 
 	//
 	//
-	// Staging mem
+	// GPU mem
 	//
 	//
-	m_stagingMem = m_heapAlloc.newInstance<StagingGpuMemoryManager>();
+	m_vertexMem = m_heapAlloc.newInstance<VertexGpuMemoryPool>();
+	ANKI_CHECK(m_vertexMem->init(m_heapAlloc, m_gr, config));
+
+	m_stagingMem = m_heapAlloc.newInstance<StagingGpuMemoryPool>();
 	ANKI_CHECK(m_stagingMem->init(m_gr, config));
 	ANKI_CHECK(m_stagingMem->init(m_gr, config));
 
 
 	//
 	//
@@ -445,6 +450,7 @@ Error App::initInternal(const ConfigSet& config_, AllocAlignedCallback allocCb,
 	rinit.m_gr = m_gr;
 	rinit.m_gr = m_gr;
 	rinit.m_physics = m_physics;
 	rinit.m_physics = m_physics;
 	rinit.m_resourceFs = m_resourceFs;
 	rinit.m_resourceFs = m_resourceFs;
+	rinit.m_vertexMemory = m_vertexMem;
 	rinit.m_config = &config;
 	rinit.m_config = &config;
 	rinit.m_cacheDir = m_cacheDir.toCString();
 	rinit.m_cacheDir = m_cacheDir.toCString();
 	rinit.m_allocCallback = m_allocCb;
 	rinit.m_allocCallback = m_allocCb;

+ 4 - 2
AnKi/Core/App.h

@@ -26,7 +26,8 @@ class SceneGraph;
 class ScriptManager;
 class ScriptManager;
 class ResourceManager;
 class ResourceManager;
 class ResourceFilesystem;
 class ResourceFilesystem;
-class StagingGpuMemoryManager;
+class StagingGpuMemoryPool;
+class VertexGpuMemoryPool;
 class UiManager;
 class UiManager;
 class UiQueueElement;
 class UiQueueElement;
 class RenderQueue;
 class RenderQueue;
@@ -170,7 +171,8 @@ private:
 	NativeWindow* m_window = nullptr;
 	NativeWindow* m_window = nullptr;
 	Input* m_input = nullptr;
 	Input* m_input = nullptr;
 	GrManager* m_gr = nullptr;
 	GrManager* m_gr = nullptr;
-	StagingGpuMemoryManager* m_stagingMem = nullptr;
+	VertexGpuMemoryPool* m_vertexMem = nullptr;
+	StagingGpuMemoryPool* m_stagingMem = nullptr;
 	PhysicsWorld* m_physics = nullptr;
 	PhysicsWorld* m_physics = nullptr;
 	ResourceFilesystem* m_resourceFs = nullptr;
 	ResourceFilesystem* m_resourceFs = nullptr;
 	ResourceManager* m_resources = nullptr;
 	ResourceManager* m_resources = nullptr;

+ 1 - 1
AnKi/Core/CMakeLists.txt

@@ -1,4 +1,4 @@
-set(SOURCES App.cpp ConfigSet.cpp GpuMemoryManager.cpp DeveloperConsole.cpp CoreTracer.cpp)
+set(SOURCES App.cpp ConfigSet.cpp GpuMemoryPools.cpp DeveloperConsole.cpp CoreTracer.cpp)
 file(GLOB HEADERS *.h)
 file(GLOB HEADERS *.h)
 
 
 if(SDL)
 if(SDL)

+ 1 - 0
AnKi/Core/ConfigDefs.h

@@ -7,6 +7,7 @@ ANKI_CONFIG_OPTION(core_uniformPerFrameMemorySize, 24_MB, 1_MB, 1_GB)
 ANKI_CONFIG_OPTION(core_storagePerFrameMemorySize, 24_MB, 1_MB, 1_GB)
 ANKI_CONFIG_OPTION(core_storagePerFrameMemorySize, 24_MB, 1_MB, 1_GB)
 ANKI_CONFIG_OPTION(core_vertexPerFrameMemorySize, 12_MB, 1_MB, 1_GB)
 ANKI_CONFIG_OPTION(core_vertexPerFrameMemorySize, 12_MB, 1_MB, 1_GB)
 ANKI_CONFIG_OPTION(core_textureBufferPerFrameMemorySize, 1_MB, 1_MB, 1_GB)
 ANKI_CONFIG_OPTION(core_textureBufferPerFrameMemorySize, 1_MB, 1_MB, 1_GB)
+ANKI_CONFIG_OPTION(core_globalVertexMemorySize, 128_MB, 16_MB, 2_GB)
 
 
 ANKI_CONFIG_OPTION(width, 1920, 16, 16 * 1024, "Width")
 ANKI_CONFIG_OPTION(width, 1920, 16, 16 * 1024, "Width")
 ANKI_CONFIG_OPTION(height, 1080, 16, 16 * 1024, "Height")
 ANKI_CONFIG_OPTION(height, 1080, 16, 16 * 1024, "Height")

+ 25 - 14
AnKi/Core/GpuMemoryManager.cpp → AnKi/Core/GpuMemoryPools.cpp

@@ -3,19 +3,19 @@
 // Code licensed under the BSD License.
 // Code licensed under the BSD License.
 // http://www.anki3d.org/LICENSE
 // http://www.anki3d.org/LICENSE
 
 
-#include <AnKi/Core/GpuMemoryManager.h>
+#include <AnKi/Core/GpuMemoryPools.h>
 #include <AnKi/Core/ConfigSet.h>
 #include <AnKi/Core/ConfigSet.h>
 #include <AnKi/Gr/GrManager.h>
 #include <AnKi/Gr/GrManager.h>
 #include <AnKi/Util/Tracer.h>
 #include <AnKi/Util/Tracer.h>
 
 
 namespace anki {
 namespace anki {
 
 
-VertexGpuMemoryManager::~VertexGpuMemoryManager()
+VertexGpuMemoryPool::~VertexGpuMemoryPool()
 {
 {
 	// Do nothing
 	// Do nothing
 }
 }
 
 
-Error VertexGpuMemoryManager::init(GenericMemoryPoolAllocator<U8> alloc, GrManager* gr, const ConfigSet& cfg)
+Error VertexGpuMemoryPool::init(GenericMemoryPoolAllocator<U8> alloc, GrManager* gr, const ConfigSet& cfg)
 {
 {
 	m_gr = gr;
 	m_gr = gr;
 
 
@@ -27,7 +27,13 @@ Error VertexGpuMemoryManager::init(GenericMemoryPoolAllocator<U8> alloc, GrManag
 		ANKI_CORE_LOGE("core_globalVertexMemorySize should be a power of two (because of the buddy allocator");
 		ANKI_CORE_LOGE("core_globalVertexMemorySize should be a power of two (because of the buddy allocator");
 		return Error::USER_DATA;
 		return Error::USER_DATA;
 	}
 	}
-	bufferInit.m_usage = BufferUsageBit::VERTEX | BufferUsageBit::INDEX;
+
+	bufferInit.m_usage = BufferUsageBit::VERTEX | BufferUsageBit::INDEX | BufferUsageBit::TRANSFER_DESTINATION;
+	if(gr->getDeviceCapabilities().m_rayTracingEnabled)
+	{
+		bufferInit.m_usage |= BufferUsageBit::ACCELERATION_STRUCTURE_BUILD;
+	}
+
 	m_vertBuffer = gr->newBuffer(bufferInit);
 	m_vertBuffer = gr->newBuffer(bufferInit);
 
 
 	// Init the rest
 	// Init the rest
@@ -36,22 +42,27 @@ Error VertexGpuMemoryManager::init(GenericMemoryPoolAllocator<U8> alloc, GrManag
 	return Error::NONE;
 	return Error::NONE;
 }
 }
 
 
-ANKI_USE_RESULT Error VertexGpuMemoryManager::allocate(PtrSize size, PtrSize& offset)
+ANKI_USE_RESULT Error VertexGpuMemoryPool::allocate(PtrSize size, PtrSize& offset)
 {
 {
 	U32 offset32;
 	U32 offset32;
-	ANKI_CHECK(m_buddyAllocator.allocate(size, offset32));
+	const Bool success = m_buddyAllocator.allocate(size, offset32);
+	if(ANKI_UNLIKELY(!success))
+	{
+		ANKI_CORE_LOGE("Failed to allocate vertex memory of size: %zu", size);
+		return Error::OUT_OF_MEMORY;
+	}
 
 
 	offset = offset32;
 	offset = offset32;
 
 
 	return Error::NONE;
 	return Error::NONE;
 }
 }
 
 
-void VertexGpuMemoryManager::free(PtrSize size, PtrSize offset)
+void VertexGpuMemoryPool::free(PtrSize size, PtrSize offset)
 {
 {
 	m_buddyAllocator.free(U32(offset), size);
 	m_buddyAllocator.free(U32(offset), size);
 }
 }
 
 
-StagingGpuMemoryManager::~StagingGpuMemoryManager()
+StagingGpuMemoryPool::~StagingGpuMemoryPool()
 {
 {
 	m_gr->finish();
 	m_gr->finish();
 
 
@@ -62,7 +73,7 @@ StagingGpuMemoryManager::~StagingGpuMemoryManager()
 	}
 	}
 }
 }
 
 
-Error StagingGpuMemoryManager::init(GrManager* gr, const ConfigSet& cfg)
+Error StagingGpuMemoryPool::init(GrManager* gr, const ConfigSet& cfg)
 {
 {
 	m_gr = gr;
 	m_gr = gr;
 
 
@@ -88,8 +99,8 @@ Error StagingGpuMemoryManager::init(GrManager* gr, const ConfigSet& cfg)
 	return Error::NONE;
 	return Error::NONE;
 }
 }
 
 
-void StagingGpuMemoryManager::initBuffer(StagingGpuMemoryType type, U32 alignment, PtrSize maxAllocSize,
-										 BufferUsageBit usage, GrManager& gr)
+void StagingGpuMemoryPool::initBuffer(StagingGpuMemoryType type, U32 alignment, PtrSize maxAllocSize,
+									  BufferUsageBit usage, GrManager& gr)
 {
 {
 	PerFrameBuffer& perframe = m_perFrameBuffers[type];
 	PerFrameBuffer& perframe = m_perFrameBuffers[type];
 
 
@@ -98,7 +109,7 @@ void StagingGpuMemoryManager::initBuffer(StagingGpuMemoryType type, U32 alignmen
 	perframe.m_mappedMem = static_cast<U8*>(perframe.m_buff->map(0, perframe.m_size, BufferMapAccessBit::WRITE));
 	perframe.m_mappedMem = static_cast<U8*>(perframe.m_buff->map(0, perframe.m_size, BufferMapAccessBit::WRITE));
 }
 }
 
 
-void* StagingGpuMemoryManager::allocateFrame(PtrSize size, StagingGpuMemoryType usage, StagingGpuMemoryToken& token)
+void* StagingGpuMemoryPool::allocateFrame(PtrSize size, StagingGpuMemoryType usage, StagingGpuMemoryToken& token)
 {
 {
 	PerFrameBuffer& buff = m_perFrameBuffers[usage];
 	PerFrameBuffer& buff = m_perFrameBuffers[usage];
 	const Error err = buff.m_alloc.allocate(size, token.m_offset);
 	const Error err = buff.m_alloc.allocate(size, token.m_offset);
@@ -114,7 +125,7 @@ void* StagingGpuMemoryManager::allocateFrame(PtrSize size, StagingGpuMemoryType
 	return buff.m_mappedMem + token.m_offset;
 	return buff.m_mappedMem + token.m_offset;
 }
 }
 
 
-void* StagingGpuMemoryManager::tryAllocateFrame(PtrSize size, StagingGpuMemoryType usage, StagingGpuMemoryToken& token)
+void* StagingGpuMemoryPool::tryAllocateFrame(PtrSize size, StagingGpuMemoryType usage, StagingGpuMemoryToken& token)
 {
 {
 	PerFrameBuffer& buff = m_perFrameBuffers[usage];
 	PerFrameBuffer& buff = m_perFrameBuffers[usage];
 	const Error err = buff.m_alloc.allocate(size, token.m_offset);
 	const Error err = buff.m_alloc.allocate(size, token.m_offset);
@@ -132,7 +143,7 @@ void* StagingGpuMemoryManager::tryAllocateFrame(PtrSize size, StagingGpuMemoryTy
 	}
 	}
 }
 }
 
 
-void StagingGpuMemoryManager::endFrame()
+void StagingGpuMemoryPool::endFrame()
 {
 {
 	for(StagingGpuMemoryType usage = StagingGpuMemoryType::UNIFORM; usage < StagingGpuMemoryType::COUNT; ++usage)
 	for(StagingGpuMemoryType usage = StagingGpuMemoryType::UNIFORM; usage < StagingGpuMemoryType::COUNT; ++usage)
 	{
 	{

+ 11 - 11
AnKi/Core/GpuMemoryManager.h → AnKi/Core/GpuMemoryPools.h

@@ -19,16 +19,16 @@ class ConfigSet;
 /// @{
 /// @{
 
 
 /// Manages vertex and index memory for the whole application.
 /// Manages vertex and index memory for the whole application.
-class VertexGpuMemoryManager
+class VertexGpuMemoryPool
 {
 {
 public:
 public:
-	VertexGpuMemoryManager() = default;
+	VertexGpuMemoryPool() = default;
 
 
-	VertexGpuMemoryManager(const VertexGpuMemoryManager&) = delete; // Non-copyable
+	VertexGpuMemoryPool(const VertexGpuMemoryPool&) = delete; // Non-copyable
 
 
-	~VertexGpuMemoryManager();
+	~VertexGpuMemoryPool();
 
 
-	VertexGpuMemoryManager& operator=(const VertexGpuMemoryManager&) = delete; // Non-copyable
+	VertexGpuMemoryPool& operator=(const VertexGpuMemoryPool&) = delete; // Non-copyable
 
 
 	ANKI_USE_RESULT Error init(GenericMemoryPoolAllocator<U8> alloc, GrManager* gr, const ConfigSet& cfg);
 	ANKI_USE_RESULT Error init(GenericMemoryPoolAllocator<U8> alloc, GrManager* gr, const ConfigSet& cfg);
 
 
@@ -44,7 +44,7 @@ public:
 private:
 private:
 	GrManager* m_gr = nullptr;
 	GrManager* m_gr = nullptr;
 	BufferPtr m_vertBuffer;
 	BufferPtr m_vertBuffer;
-	BuddyAllocatorBuilder<> m_buddyAllocator;
+	BuddyAllocatorBuilder<32, Mutex> m_buddyAllocator;
 };
 };
 
 
 enum class StagingGpuMemoryType : U8
 enum class StagingGpuMemoryType : U8
@@ -87,16 +87,16 @@ public:
 };
 };
 
 
 /// Manages staging GPU memory.
 /// Manages staging GPU memory.
-class StagingGpuMemoryManager
+class StagingGpuMemoryPool
 {
 {
 public:
 public:
-	StagingGpuMemoryManager() = default;
+	StagingGpuMemoryPool() = default;
 
 
-	StagingGpuMemoryManager(const StagingGpuMemoryManager&) = delete; // Non-copyable
+	StagingGpuMemoryPool(const StagingGpuMemoryPool&) = delete; // Non-copyable
 
 
-	~StagingGpuMemoryManager();
+	~StagingGpuMemoryPool();
 
 
-	StagingGpuMemoryManager& operator=(const StagingGpuMemoryManager&) = delete; // Non-copyable
+	StagingGpuMemoryPool& operator=(const StagingGpuMemoryPool&) = delete; // Non-copyable
 
 
 	ANKI_USE_RESULT Error init(GrManager* gr, const ConfigSet& cfg);
 	ANKI_USE_RESULT Error init(GrManager* gr, const ConfigSet& cfg);
 
 

+ 38 - 2
AnKi/Gr/Vulkan/GrManagerImpl.cpp

@@ -431,8 +431,44 @@ Error GrManagerImpl::initInstance(const GrManagerInitInfo& init)
 		return Error::FUNCTION_FAILED;
 		return Error::FUNCTION_FAILED;
 	}
 	}
 
 
-	count = 1;
-	ANKI_VK_CHECK(vkEnumeratePhysicalDevices(m_instance, &count, &m_physicalDevice));
+	// Find the correct physical device
+	{
+		DynamicArrayAuto<VkPhysicalDevice> physicalDevices(m_alloc, count);
+		ANKI_VK_CHECK(vkEnumeratePhysicalDevices(m_instance, &count, &physicalDevices[0]));
+
+		VkPhysicalDevice firstChoice = VK_NULL_HANDLE;
+		VkPhysicalDevice secondChoice = VK_NULL_HANDLE;
+		for(U32 devIdx = 0; devIdx < count; ++devIdx)
+		{
+			VkPhysicalDeviceProperties2 props = {};
+			props.sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PROPERTIES_2;
+			vkGetPhysicalDeviceProperties2(physicalDevices[devIdx], &props);
+
+			if(props.properties.deviceType == VK_PHYSICAL_DEVICE_TYPE_DISCRETE_GPU)
+			{
+				// Found it
+				firstChoice = physicalDevices[devIdx];
+			}
+			else if(props.properties.deviceType == VK_PHYSICAL_DEVICE_TYPE_INTEGRATED_GPU)
+			{
+				secondChoice = physicalDevices[devIdx];
+			}
+		}
+
+		if(firstChoice != VK_NULL_HANDLE)
+		{
+			m_physicalDevice = firstChoice;
+		}
+		else if(secondChoice != VK_NULL_HANDLE)
+		{
+			m_physicalDevice = secondChoice;
+		}
+		else
+		{
+			ANKI_VK_LOGE("Couldn't find a suitable descrete or integrated physical device");
+			return Error::FUNCTION_FAILED;
+		}
+	}
 
 
 	m_rtPipelineProps.sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_RAY_TRACING_PIPELINE_PROPERTIES_KHR;
 	m_rtPipelineProps.sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_RAY_TRACING_PIPELINE_PROPERTIES_KHR;
 	m_accelerationStructureProps.sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_ACCELERATION_STRUCTURE_PROPERTIES_KHR;
 	m_accelerationStructureProps.sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_ACCELERATION_STRUCTURE_PROPERTIES_KHR;

+ 1 - 1
AnKi/Renderer/ClusterBinning.cpp

@@ -147,7 +147,7 @@ void ClusterBinning::writeClustererBuffers(RenderingContext& ctx)
 
 
 	// Allocate buffers
 	// Allocate buffers
 	ClusteredShadingContext& cs = ctx.m_clusteredShading;
 	ClusteredShadingContext& cs = ctx.m_clusteredShading;
-	StagingGpuMemoryManager& stagingMem = m_r->getStagingGpuMemoryManager();
+	StagingGpuMemoryPool& stagingMem = m_r->getStagingGpuMemory();
 
 
 	cs.m_clusteredShadingUniformsAddress = stagingMem.allocateFrame(
 	cs.m_clusteredShadingUniformsAddress = stagingMem.allocateFrame(
 		sizeof(ClusteredShadingUniforms), StagingGpuMemoryType::UNIFORM, cs.m_clusteredShadingUniformsToken);
 		sizeof(ClusteredShadingUniforms), StagingGpuMemoryType::UNIFORM, cs.m_clusteredShadingUniformsToken);

+ 1 - 1
AnKi/Renderer/Common.h

@@ -6,7 +6,7 @@
 #pragma once
 #pragma once
 
 
 #include <AnKi/Gr.h>
 #include <AnKi/Gr.h>
-#include <AnKi/Core/GpuMemoryManager.h>
+#include <AnKi/Core/GpuMemoryPools.h>
 #include <AnKi/Util/Ptr.h>
 #include <AnKi/Util/Ptr.h>
 #include <AnKi/Shaders/Include/Evsm.h>
 #include <AnKi/Shaders/Include/Evsm.h>
 #include <AnKi/Shaders/Include/ClusteredShadingTypes.h>
 #include <AnKi/Shaders/Include/ClusteredShadingTypes.h>

+ 1 - 1
AnKi/Renderer/Dbg.cpp

@@ -74,7 +74,7 @@ void Dbg::run(RenderPassWorkContext& rgraphCtx, const RenderingContext& ctx)
 	dctx.m_viewProjectionMatrix = ctx.m_renderQueue->m_viewProjectionMatrix;
 	dctx.m_viewProjectionMatrix = ctx.m_renderQueue->m_viewProjectionMatrix;
 	dctx.m_projectionMatrix = ctx.m_renderQueue->m_projectionMatrix;
 	dctx.m_projectionMatrix = ctx.m_renderQueue->m_projectionMatrix;
 	dctx.m_cameraTransform = ctx.m_renderQueue->m_viewMatrix.getInverse();
 	dctx.m_cameraTransform = ctx.m_renderQueue->m_viewMatrix.getInverse();
-	dctx.m_stagingGpuAllocator = &m_r->getStagingGpuMemoryManager();
+	dctx.m_stagingGpuAllocator = &m_r->getStagingGpuMemory();
 	dctx.m_frameAllocator = ctx.m_tempAllocator;
 	dctx.m_frameAllocator = ctx.m_tempAllocator;
 	dctx.m_commandBuffer = cmdb;
 	dctx.m_commandBuffer = cmdb;
 	dctx.m_sampler = m_r->getSamplers().m_trilinearRepeatAniso;
 	dctx.m_sampler = m_r->getSamplers().m_trilinearRepeatAniso;

+ 1 - 1
AnKi/Renderer/Drawer.cpp

@@ -50,7 +50,7 @@ void RenderableDrawer::drawRange(Pass pass, const Mat4& viewMat, const Mat4& vie
 	ctx.m_queueCtx.m_projectionMatrix = Mat4::getIdentity(); // TODO
 	ctx.m_queueCtx.m_projectionMatrix = Mat4::getIdentity(); // TODO
 	ctx.m_queueCtx.m_previousViewProjectionMatrix = prevViewProjMat;
 	ctx.m_queueCtx.m_previousViewProjectionMatrix = prevViewProjMat;
 	ctx.m_queueCtx.m_cameraTransform = ctx.m_queueCtx.m_viewMatrix.getInverse();
 	ctx.m_queueCtx.m_cameraTransform = ctx.m_queueCtx.m_viewMatrix.getInverse();
-	ctx.m_queueCtx.m_stagingGpuAllocator = &m_r->getStagingGpuMemoryManager();
+	ctx.m_queueCtx.m_stagingGpuAllocator = &m_r->getStagingGpuMemory();
 	ctx.m_queueCtx.m_commandBuffer = cmdb;
 	ctx.m_queueCtx.m_commandBuffer = cmdb;
 	ctx.m_queueCtx.m_sampler = sampler;
 	ctx.m_queueCtx.m_sampler = sampler;
 	ctx.m_queueCtx.m_key = RenderingKey(pass, 0, 1, false, false);
 	ctx.m_queueCtx.m_key = RenderingKey(pass, 0, 1, false, false);

+ 1 - 1
AnKi/Renderer/GenericCompute.cpp

@@ -36,7 +36,7 @@ void GenericCompute::run(const RenderingContext& ctx, RenderPassWorkContext& rgr
 
 
 	GenericGpuComputeJobQueueElementContext elementCtx;
 	GenericGpuComputeJobQueueElementContext elementCtx;
 	elementCtx.m_commandBuffer = rgraphCtx.m_commandBuffer;
 	elementCtx.m_commandBuffer = rgraphCtx.m_commandBuffer;
-	elementCtx.m_stagingGpuAllocator = &m_r->getStagingGpuMemoryManager();
+	elementCtx.m_stagingGpuAllocator = &m_r->getStagingGpuMemory();
 	elementCtx.m_viewMatrix = ctx.m_matrices.m_view;
 	elementCtx.m_viewMatrix = ctx.m_matrices.m_view;
 	elementCtx.m_viewProjectionMatrix = ctx.m_matrices.m_viewProjection;
 	elementCtx.m_viewProjectionMatrix = ctx.m_matrices.m_viewProjection;
 	elementCtx.m_projectionMatrix = ctx.m_matrices.m_projection;
 	elementCtx.m_projectionMatrix = ctx.m_matrices.m_projection;

+ 3 - 3
AnKi/Renderer/MainRenderer.cpp

@@ -28,9 +28,9 @@ MainRenderer::~MainRenderer()
 	ANKI_R_LOGI("Destroying main renderer");
 	ANKI_R_LOGI("Destroying main renderer");
 }
 }
 
 
-Error MainRenderer::init(ThreadHive* hive, ResourceManager* resources, GrManager* gr,
-						 StagingGpuMemoryManager* stagingMem, UiManager* ui, AllocAlignedCallback allocCb,
-						 void* allocCbUserData, const ConfigSet& config, Timestamp* globTimestamp)
+Error MainRenderer::init(ThreadHive* hive, ResourceManager* resources, GrManager* gr, StagingGpuMemoryPool* stagingMem,
+						 UiManager* ui, AllocAlignedCallback allocCb, void* allocCbUserData, const ConfigSet& config,
+						 Timestamp* globTimestamp)
 {
 {
 	ANKI_R_LOGI("Initializing main renderer");
 	ANKI_R_LOGI("Initializing main renderer");
 
 

+ 2 - 2
AnKi/Renderer/MainRenderer.h

@@ -14,7 +14,7 @@ namespace anki {
 // Forward
 // Forward
 class ResourceManager;
 class ResourceManager;
 class ConfigSet;
 class ConfigSet;
-class StagingGpuMemoryManager;
+class StagingGpuMemoryPool;
 class UiManager;
 class UiManager;
 
 
 /// @addtogroup renderer
 /// @addtogroup renderer
@@ -38,7 +38,7 @@ public:
 	~MainRenderer();
 	~MainRenderer();
 
 
 	ANKI_USE_RESULT Error init(ThreadHive* hive, ResourceManager* resources, GrManager* gl,
 	ANKI_USE_RESULT Error init(ThreadHive* hive, ResourceManager* resources, GrManager* gl,
-							   StagingGpuMemoryManager* stagingMem, UiManager* ui, AllocAlignedCallback allocCb,
+							   StagingGpuMemoryPool* stagingMem, UiManager* ui, AllocAlignedCallback allocCb,
 							   void* allocCbUserData, const ConfigSet& config, Timestamp* globTimestamp);
 							   void* allocCbUserData, const ConfigSet& config, Timestamp* globTimestamp);
 
 
 	ANKI_USE_RESULT Error render(RenderQueue& rqueue, TexturePtr presentTex);
 	ANKI_USE_RESULT Error render(RenderQueue& rqueue, TexturePtr presentTex);

+ 2 - 2
AnKi/Renderer/RenderQueue.h

@@ -41,7 +41,7 @@ public:
 	RenderingKey m_key;
 	RenderingKey m_key;
 	CommandBufferPtr m_commandBuffer;
 	CommandBufferPtr m_commandBuffer;
 	SamplerPtr m_sampler; ///< A trilinear sampler with anisotropy.
 	SamplerPtr m_sampler; ///< A trilinear sampler with anisotropy.
-	StagingGpuMemoryManager* m_stagingGpuAllocator ANKI_DEBUG_CODE(= nullptr);
+	StagingGpuMemoryPool* m_stagingGpuAllocator ANKI_DEBUG_CODE(= nullptr);
 	StackAllocator<U8> m_frameAllocator;
 	StackAllocator<U8> m_frameAllocator;
 	Bool m_debugDraw; ///< If true the drawcall should be drawing some kind of debug mesh.
 	Bool m_debugDraw; ///< If true the drawcall should be drawing some kind of debug mesh.
 	BitSet<U(RenderQueueDebugDrawFlag::COUNT), U32> m_debugDrawFlags = {false};
 	BitSet<U(RenderQueueDebugDrawFlag::COUNT), U32> m_debugDrawFlags = {false};
@@ -78,7 +78,7 @@ class GenericGpuComputeJobQueueElementContext final : public RenderingMatrices
 {
 {
 public:
 public:
 	CommandBufferPtr m_commandBuffer;
 	CommandBufferPtr m_commandBuffer;
-	StagingGpuMemoryManager* m_stagingGpuAllocator ANKI_DEBUG_CODE(= nullptr);
+	StagingGpuMemoryPool* m_stagingGpuAllocator ANKI_DEBUG_CODE(= nullptr);
 };
 };
 
 
 /// Callback for GenericGpuComputeJobQueueElement.
 /// Callback for GenericGpuComputeJobQueueElement.

+ 1 - 1
AnKi/Renderer/Renderer.cpp

@@ -59,7 +59,7 @@ Renderer::~Renderer()
 	m_currentDebugRtName.destroy(getAllocator());
 	m_currentDebugRtName.destroy(getAllocator());
 }
 }
 
 
-Error Renderer::init(ThreadHive* hive, ResourceManager* resources, GrManager* gl, StagingGpuMemoryManager* stagingMem,
+Error Renderer::init(ThreadHive* hive, ResourceManager* resources, GrManager* gl, StagingGpuMemoryPool* stagingMem,
 					 UiManager* ui, HeapAllocator<U8> alloc, const ConfigSet& config, Timestamp* globTimestamp)
 					 UiManager* ui, HeapAllocator<U8> alloc, const ConfigSet& config, Timestamp* globTimestamp)
 {
 {
 	ANKI_TRACE_SCOPED_EVENT(R_INIT);
 	ANKI_TRACE_SCOPED_EVENT(R_INIT);

+ 5 - 5
AnKi/Renderer/Renderer.h

@@ -10,7 +10,7 @@
 #include <AnKi/Math.h>
 #include <AnKi/Math.h>
 #include <AnKi/Gr.h>
 #include <AnKi/Gr.h>
 #include <AnKi/Resource/Forward.h>
 #include <AnKi/Resource/Forward.h>
-#include <AnKi/Core/GpuMemoryManager.h>
+#include <AnKi/Core/GpuMemoryPools.h>
 #include <AnKi/Collision/Forward.h>
 #include <AnKi/Collision/Forward.h>
 
 
 namespace anki {
 namespace anki {
@@ -18,7 +18,7 @@ namespace anki {
 // Forward
 // Forward
 class ConfigSet;
 class ConfigSet;
 class ResourceManager;
 class ResourceManager;
-class StagingGpuMemoryManager;
+class StagingGpuMemoryPool;
 class UiManager;
 class UiManager;
 
 
 /// @addtogroup renderer
 /// @addtogroup renderer
@@ -73,7 +73,7 @@ public:
 
 
 	/// Init the renderer.
 	/// Init the renderer.
 	ANKI_USE_RESULT Error init(ThreadHive* hive, ResourceManager* resources, GrManager* gr,
 	ANKI_USE_RESULT Error init(ThreadHive* hive, ResourceManager* resources, GrManager* gr,
-							   StagingGpuMemoryManager* stagingMem, UiManager* ui, HeapAllocator<U8> alloc,
+							   StagingGpuMemoryPool* stagingMem, UiManager* ui, HeapAllocator<U8> alloc,
 							   const ConfigSet& config, Timestamp* globTimestamp);
 							   const ConfigSet& config, Timestamp* globTimestamp);
 
 
 	/// This function does all the rendering stages and produces a final result.
 	/// This function does all the rendering stages and produces a final result.
@@ -164,7 +164,7 @@ public:
 		return m_samplers;
 		return m_samplers;
 	}
 	}
 
 
-	StagingGpuMemoryManager& getStagingGpuMemoryManager()
+	StagingGpuMemoryPool& getStagingGpuMemory()
 	{
 	{
 		ANKI_ASSERT(m_stagingMem);
 		ANKI_ASSERT(m_stagingMem);
 		return *m_stagingMem;
 		return *m_stagingMem;
@@ -220,7 +220,7 @@ public:
 private:
 private:
 	ResourceManager* m_resources = nullptr;
 	ResourceManager* m_resources = nullptr;
 	ThreadHive* m_threadHive = nullptr;
 	ThreadHive* m_threadHive = nullptr;
-	StagingGpuMemoryManager* m_stagingMem = nullptr;
+	StagingGpuMemoryPool* m_stagingMem = nullptr;
 	GrManager* m_gr = nullptr;
 	GrManager* m_gr = nullptr;
 	UiManager* m_ui = nullptr;
 	UiManager* m_ui = nullptr;
 	Timestamp* m_globTimestamp;
 	Timestamp* m_globTimestamp;

+ 1 - 1
AnKi/Renderer/RendererObject.cpp

@@ -32,7 +32,7 @@ ResourceManager& RendererObject::getResourceManager()
 
 
 void* RendererObject::allocateFrameStagingMemory(PtrSize size, StagingGpuMemoryType usage, StagingGpuMemoryToken& token)
 void* RendererObject::allocateFrameStagingMemory(PtrSize size, StagingGpuMemoryType usage, StagingGpuMemoryToken& token)
 {
 {
-	return m_r->getStagingGpuMemoryManager().allocateFrame(size, usage, token);
+	return m_r->getStagingGpuMemory().allocateFrame(size, usage, token);
 }
 }
 
 
 void RendererObject::bindUniforms(CommandBufferPtr& cmdb, U32 set, U32 binding,
 void RendererObject::bindUniforms(CommandBufferPtr& cmdb, U32 set, U32 binding,

+ 1 - 1
AnKi/Renderer/RendererObject.h

@@ -10,7 +10,7 @@
 #include <AnKi/Gr.h>
 #include <AnKi/Gr.h>
 #include <AnKi/Resource/ResourceManager.h>
 #include <AnKi/Resource/ResourceManager.h>
 #include <AnKi/Resource/ShaderProgramResource.h>
 #include <AnKi/Resource/ShaderProgramResource.h>
-#include <AnKi/Core/GpuMemoryManager.h>
+#include <AnKi/Core/GpuMemoryPools.h>
 
 
 namespace anki {
 namespace anki {
 
 

+ 58 - 45
AnKi/Resource/MeshResource.cpp

@@ -7,6 +7,7 @@
 #include <AnKi/Resource/ResourceManager.h>
 #include <AnKi/Resource/ResourceManager.h>
 #include <AnKi/Resource/MeshBinaryLoader.h>
 #include <AnKi/Resource/MeshBinaryLoader.h>
 #include <AnKi/Resource/AsyncLoader.h>
 #include <AnKi/Resource/AsyncLoader.h>
+#include <AnKi/Core/GpuMemoryPools.h>
 #include <AnKi/Util/Functions.h>
 #include <AnKi/Util/Functions.h>
 #include <AnKi/Util/Filesystem.h>
 #include <AnKi/Util/Filesystem.h>
 
 
@@ -57,6 +58,17 @@ MeshResource::~MeshResource()
 {
 {
 	m_subMeshes.destroy(getAllocator());
 	m_subMeshes.destroy(getAllocator());
 	m_vertexBufferInfos.destroy(getAllocator());
 	m_vertexBufferInfos.destroy(getAllocator());
+
+	if(m_vertexBuffersOffset != MAX_PTR_SIZE)
+	{
+		getManager().getVertexGpuMemory().free(m_vertexBuffersSize, m_vertexBuffersOffset);
+	}
+
+	if(m_indexBufferOffset != MAX_PTR_SIZE)
+	{
+		const PtrSize indexBufferSize = PtrSize(m_indexCount) * ((m_indexType == IndexType::U32) ? 4 : 2);
+		getManager().getVertexGpuMemory().free(indexBufferSize, m_indexBufferOffset);
+	}
 }
 }
 
 
 Bool MeshResource::isCompatible(const MeshResource& other) const
 Bool MeshResource::isCompatible(const MeshResource& other) const
@@ -91,7 +103,9 @@ Error MeshResource::load(const ResourceFilename& filename, Bool async)
 	ANKI_CHECK(loader.load(filename));
 	ANKI_CHECK(loader.load(filename));
 	const MeshBinaryHeader& header = loader.getHeader();
 	const MeshBinaryHeader& header = loader.getHeader();
 
 
-	// Get submeshes
+	//
+	// Submeshes
+	//
 	m_subMeshes.create(getAllocator(), header.m_subMeshCount);
 	m_subMeshes.create(getAllocator(), header.m_subMeshCount);
 	for(U32 i = 0; i < m_subMeshes.getSize(); ++i)
 	for(U32 i = 0; i < m_subMeshes.getSize(); ++i)
 	{
 	{
@@ -101,45 +115,40 @@ Error MeshResource::load(const ResourceFilename& filename, Bool async)
 		m_subMeshes[i].m_aabb.setMax(loader.getSubMeshes()[i].m_aabbMax);
 		m_subMeshes[i].m_aabb.setMax(loader.getSubMeshes()[i].m_aabbMax);
 	}
 	}
 
 
+	//
 	// Index stuff
 	// Index stuff
+	//
 	m_indexCount = header.m_totalIndexCount;
 	m_indexCount = header.m_totalIndexCount;
 	ANKI_ASSERT((m_indexCount % 3) == 0 && "Expecting triangles");
 	ANKI_ASSERT((m_indexCount % 3) == 0 && "Expecting triangles");
 	m_indexType = header.m_indexType;
 	m_indexType = header.m_indexType;
 
 
-	const PtrSize indexBuffSize = PtrSize(m_indexCount) * ((m_indexType == IndexType::U32) ? 4 : 2);
-
-	BufferUsageBit indexBufferUsage = BufferUsageBit::INDEX | BufferUsageBit::TRANSFER_DESTINATION;
-	if(rayTracingEnabled)
-	{
-		indexBufferUsage |= BufferUsageBit::ACCELERATION_STRUCTURE_BUILD;
-	}
-	m_indexBuffer = getManager().getGrManager().newBuffer(
-		BufferInitInfo(indexBuffSize, indexBufferUsage, BufferMapAccessBit::NONE,
-					   StringAuto(getTempAllocator()).sprintf("%s_%s", "Idx", basename.cstr())));
+	const PtrSize indexBufferSize = PtrSize(m_indexCount) * ((m_indexType == IndexType::U32) ? 4 : 2);
+	ANKI_CHECK(getManager().getVertexGpuMemory().allocate(indexBufferSize, m_indexBufferOffset));
 
 
+	//
 	// Vertex stuff
 	// Vertex stuff
+	//
 	m_vertexCount = header.m_totalVertexCount;
 	m_vertexCount = header.m_totalVertexCount;
 	m_vertexBufferInfos.create(getAllocator(), header.m_vertexBufferCount);
 	m_vertexBufferInfos.create(getAllocator(), header.m_vertexBufferCount);
 
 
-	U32 totalVertexBuffSize = 0;
+	m_vertexBuffersSize = 0;
 	for(U32 i = 0; i < header.m_vertexBufferCount; ++i)
 	for(U32 i = 0; i < header.m_vertexBufferCount; ++i)
 	{
 	{
-		alignRoundUp(MESH_BINARY_BUFFER_ALIGNMENT, totalVertexBuffSize);
+		alignRoundUp(MESH_BINARY_BUFFER_ALIGNMENT, m_vertexBuffersSize);
 
 
-		m_vertexBufferInfos[i].m_offset = totalVertexBuffSize;
+		m_vertexBufferInfos[i].m_offset = m_vertexBuffersSize;
 		m_vertexBufferInfos[i].m_stride = header.m_vertexBuffers[i].m_vertexStride;
 		m_vertexBufferInfos[i].m_stride = header.m_vertexBuffers[i].m_vertexStride;
 
 
-		totalVertexBuffSize += m_vertexCount * m_vertexBufferInfos[i].m_stride;
+		m_vertexBuffersSize += m_vertexCount * m_vertexBufferInfos[i].m_stride;
 	}
 	}
 
 
-	BufferUsageBit vertexBufferUsage = BufferUsageBit::VERTEX | BufferUsageBit::TRANSFER_DESTINATION;
-	if(rayTracingEnabled)
+	ANKI_CHECK(getManager().getVertexGpuMemory().allocate(m_vertexBuffersSize, m_vertexBuffersOffset));
+
+	// Readjust the individual offset now that we have a global offset
+	for(U32 i = 0; i < header.m_vertexBufferCount; ++i)
 	{
 	{
-		vertexBufferUsage |= BufferUsageBit::ACCELERATION_STRUCTURE_BUILD;
+		m_vertexBufferInfos[i].m_offset += m_vertexBuffersOffset;
 	}
 	}
-	m_vertexBuffer = getManager().getGrManager().newBuffer(
-		BufferInitInfo(totalVertexBuffSize, vertexBufferUsage, BufferMapAccessBit::NONE,
-					   StringAuto(getTempAllocator()).sprintf("%s_%s", "Vert", basename.cstr())));
 
 
 	for(VertexAttributeId attrib = VertexAttributeId::FIRST; attrib < VertexAttributeId::COUNT; ++attrib)
 	for(VertexAttributeId attrib = VertexAttributeId::FIRST; attrib < VertexAttributeId::COUNT; ++attrib)
 	{
 	{
@@ -158,33 +167,36 @@ Error MeshResource::load(const ResourceFilename& filename, Bool async)
 	// Other
 	// Other
 	m_aabb.setMin(header.m_aabbMin);
 	m_aabb.setMin(header.m_aabbMin);
 	m_aabb.setMax(header.m_aabbMax);
 	m_aabb.setMax(header.m_aabbMax);
+	m_vertexBuffer = getManager().getVertexGpuMemory().getVertexBuffer();
 
 
+	//
 	// Clear the buffers
 	// Clear the buffers
+	//
 	if(async)
 	if(async)
 	{
 	{
 		CommandBufferInitInfo cmdbinit;
 		CommandBufferInitInfo cmdbinit;
 		cmdbinit.m_flags = CommandBufferFlag::SMALL_BATCH | CommandBufferFlag::GENERAL_WORK;
 		cmdbinit.m_flags = CommandBufferFlag::SMALL_BATCH | CommandBufferFlag::GENERAL_WORK;
 		CommandBufferPtr cmdb = getManager().getGrManager().newCommandBuffer(cmdbinit);
 		CommandBufferPtr cmdb = getManager().getGrManager().newCommandBuffer(cmdbinit);
 
 
-		cmdb->fillBuffer(m_vertexBuffer, 0, MAX_PTR_SIZE, 0);
-		cmdb->fillBuffer(m_indexBuffer, 0, MAX_PTR_SIZE, 0);
+		cmdb->fillBuffer(m_vertexBuffer, m_vertexBuffersOffset, m_vertexBuffersSize, 0);
+		cmdb->fillBuffer(m_vertexBuffer, m_indexBufferOffset, indexBufferSize, 0);
 
 
 		cmdb->setBufferBarrier(m_vertexBuffer, BufferUsageBit::TRANSFER_DESTINATION, BufferUsageBit::VERTEX, 0,
 		cmdb->setBufferBarrier(m_vertexBuffer, BufferUsageBit::TRANSFER_DESTINATION, BufferUsageBit::VERTEX, 0,
 							   MAX_PTR_SIZE);
 							   MAX_PTR_SIZE);
-		cmdb->setBufferBarrier(m_indexBuffer, BufferUsageBit::TRANSFER_DESTINATION, BufferUsageBit::INDEX, 0,
-							   MAX_PTR_SIZE);
 
 
 		cmdb->flush();
 		cmdb->flush();
 	}
 	}
 
 
+	//
 	// Create the BLAS
 	// Create the BLAS
+	//
 	if(rayTracingEnabled)
 	if(rayTracingEnabled)
 	{
 	{
 		AccelerationStructureInitInfo inf(StringAuto(getTempAllocator()).sprintf("%s_%s", "Blas", basename.cstr()));
 		AccelerationStructureInitInfo inf(StringAuto(getTempAllocator()).sprintf("%s_%s", "Blas", basename.cstr()));
 		inf.m_type = AccelerationStructureType::BOTTOM_LEVEL;
 		inf.m_type = AccelerationStructureType::BOTTOM_LEVEL;
 
 
-		inf.m_bottomLevel.m_indexBuffer = m_indexBuffer;
-		inf.m_bottomLevel.m_indexBufferOffset = 0;
+		inf.m_bottomLevel.m_indexBuffer = m_vertexBuffer;
+		inf.m_bottomLevel.m_indexBufferOffset = m_indexBufferOffset;
 		inf.m_bottomLevel.m_indexCount = m_indexCount;
 		inf.m_bottomLevel.m_indexCount = m_indexCount;
 		inf.m_bottomLevel.m_indexType = m_indexType;
 		inf.m_bottomLevel.m_indexType = m_indexType;
 
 
@@ -210,6 +222,8 @@ Error MeshResource::load(const ResourceFilename& filename, Bool async)
 	// Fill the GPU descriptor
 	// Fill the GPU descriptor
 	if(rayTracingEnabled)
 	if(rayTracingEnabled)
 	{
 	{
+		m_meshGpuDescriptor.m_indexBufferPtr = m_vertexBuffer->getGpuAddress() + m_indexBufferOffset;
+
 		U32 bufferIdx;
 		U32 bufferIdx;
 		Format format;
 		Format format;
 		U32 relativeOffset;
 		U32 relativeOffset;
@@ -218,18 +232,18 @@ Error MeshResource::load(const ResourceFilename& filename, Bool async)
 		PtrSize offset;
 		PtrSize offset;
 		PtrSize stride;
 		PtrSize stride;
 		getVertexBufferInfo(bufferIdx, buffer, offset, stride);
 		getVertexBufferInfo(bufferIdx, buffer, offset, stride);
-		m_meshGpuDescriptor.m_indexBufferPtr = m_indexBuffer->getGpuAddress();
-		m_meshGpuDescriptor.m_vertexBufferPtrs[VertexAttributeBufferId::POSITION] = buffer->getGpuAddress();
+		m_meshGpuDescriptor.m_vertexBufferPtrs[VertexAttributeBufferId::POSITION] = buffer->getGpuAddress() + offset;
 
 
 		getVertexAttributeInfo(VertexAttributeId::NORMAL, bufferIdx, format, relativeOffset);
 		getVertexAttributeInfo(VertexAttributeId::NORMAL, bufferIdx, format, relativeOffset);
 		getVertexBufferInfo(bufferIdx, buffer, offset, stride);
 		getVertexBufferInfo(bufferIdx, buffer, offset, stride);
-		m_meshGpuDescriptor.m_vertexBufferPtrs[VertexAttributeBufferId::NORMAL_TANGENT_UV0] = buffer->getGpuAddress();
+		m_meshGpuDescriptor.m_vertexBufferPtrs[VertexAttributeBufferId::NORMAL_TANGENT_UV0] =
+			buffer->getGpuAddress() + offset;
 
 
 		if(hasBoneWeights())
 		if(hasBoneWeights())
 		{
 		{
 			getVertexAttributeInfo(VertexAttributeId::BONE_WEIGHTS, bufferIdx, format, relativeOffset);
 			getVertexAttributeInfo(VertexAttributeId::BONE_WEIGHTS, bufferIdx, format, relativeOffset);
 			getVertexBufferInfo(bufferIdx, buffer, offset, stride);
 			getVertexBufferInfo(bufferIdx, buffer, offset, stride);
-			m_meshGpuDescriptor.m_vertexBufferPtrs[VertexAttributeBufferId::BONE] = buffer->getGpuAddress();
+			m_meshGpuDescriptor.m_vertexBufferPtrs[VertexAttributeBufferId::BONE] = buffer->getGpuAddress() + offset;
 		}
 		}
 
 
 		m_meshGpuDescriptor.m_indexCount = m_indexCount;
 		m_meshGpuDescriptor.m_indexCount = m_indexCount;
@@ -266,23 +280,24 @@ Error MeshResource::loadAsync(MeshBinaryLoader& loader) const
 	// Set barriers
 	// Set barriers
 	cmdb->setBufferBarrier(m_vertexBuffer, BufferUsageBit::VERTEX, BufferUsageBit::TRANSFER_DESTINATION, 0,
 	cmdb->setBufferBarrier(m_vertexBuffer, BufferUsageBit::VERTEX, BufferUsageBit::TRANSFER_DESTINATION, 0,
 						   MAX_PTR_SIZE);
 						   MAX_PTR_SIZE);
-	cmdb->setBufferBarrier(m_indexBuffer, BufferUsageBit::INDEX, BufferUsageBit::TRANSFER_DESTINATION, 0, MAX_PTR_SIZE);
 
 
 	// Write index buffer
 	// Write index buffer
 	{
 	{
-		ANKI_CHECK(transferAlloc.allocate(m_indexBuffer->getSize(), handles[1]));
+		const PtrSize indexBufferSize = PtrSize(m_indexCount) * ((m_indexType == IndexType::U32) ? 4 : 2);
+
+		ANKI_CHECK(transferAlloc.allocate(indexBufferSize, handles[1]));
 		void* data = handles[1].getMappedMemory();
 		void* data = handles[1].getMappedMemory();
 		ANKI_ASSERT(data);
 		ANKI_ASSERT(data);
 
 
-		ANKI_CHECK(loader.storeIndexBuffer(data, m_indexBuffer->getSize()));
+		ANKI_CHECK(loader.storeIndexBuffer(data, indexBufferSize));
 
 
-		cmdb->copyBufferToBuffer(handles[1].getBuffer(), handles[1].getOffset(), m_indexBuffer, 0,
+		cmdb->copyBufferToBuffer(handles[1].getBuffer(), handles[1].getOffset(), m_vertexBuffer, m_indexBufferOffset,
 								 handles[1].getRange());
 								 handles[1].getRange());
 	}
 	}
 
 
 	// Write vert buff
 	// Write vert buff
 	{
 	{
-		ANKI_CHECK(transferAlloc.allocate(m_vertexBuffer->getSize(), handles[0]));
+		ANKI_CHECK(transferAlloc.allocate(m_vertexBuffersSize, handles[0]));
 		U8* data = static_cast<U8*>(handles[0].getMappedMemory());
 		U8* data = static_cast<U8*>(handles[0].getMappedMemory());
 		ANKI_ASSERT(data);
 		ANKI_ASSERT(data);
 
 
@@ -297,10 +312,10 @@ Error MeshResource::loadAsync(MeshBinaryLoader& loader) const
 			offset += PtrSize(m_vertexBufferInfos[i].m_stride) * m_vertexCount;
 			offset += PtrSize(m_vertexBufferInfos[i].m_stride) * m_vertexCount;
 		}
 		}
 
 
-		ANKI_ASSERT(offset == m_vertexBuffer->getSize());
+		ANKI_ASSERT(offset == m_vertexBuffersSize);
 
 
 		// Copy
 		// Copy
-		cmdb->copyBufferToBuffer(handles[0].getBuffer(), handles[0].getOffset(), m_vertexBuffer, 0,
+		cmdb->copyBufferToBuffer(handles[0].getBuffer(), handles[0].getOffset(), m_vertexBuffer, m_vertexBuffersOffset,
 								 handles[0].getRange());
 								 handles[0].getRange());
 	}
 	}
 
 
@@ -308,9 +323,9 @@ Error MeshResource::loadAsync(MeshBinaryLoader& loader) const
 	if(gr.getDeviceCapabilities().m_rayTracingEnabled)
 	if(gr.getDeviceCapabilities().m_rayTracingEnabled)
 	{
 	{
 		cmdb->setBufferBarrier(m_vertexBuffer, BufferUsageBit::TRANSFER_DESTINATION,
 		cmdb->setBufferBarrier(m_vertexBuffer, BufferUsageBit::TRANSFER_DESTINATION,
-							   BufferUsageBit::ACCELERATION_STRUCTURE_BUILD | BufferUsageBit::VERTEX, 0, MAX_PTR_SIZE);
-		cmdb->setBufferBarrier(m_indexBuffer, BufferUsageBit::TRANSFER_DESTINATION,
-							   BufferUsageBit::ACCELERATION_STRUCTURE_BUILD | BufferUsageBit::INDEX, 0, MAX_PTR_SIZE);
+							   BufferUsageBit::ACCELERATION_STRUCTURE_BUILD | BufferUsageBit::VERTEX
+								   | BufferUsageBit::INDEX,
+							   0, MAX_PTR_SIZE);
 
 
 		cmdb->setAccelerationStructureBarrier(m_blas, AccelerationStructureUsageBit::NONE,
 		cmdb->setAccelerationStructureBarrier(m_blas, AccelerationStructureUsageBit::NONE,
 											  AccelerationStructureUsageBit::BUILD);
 											  AccelerationStructureUsageBit::BUILD);
@@ -322,10 +337,8 @@ Error MeshResource::loadAsync(MeshBinaryLoader& loader) const
 	}
 	}
 	else
 	else
 	{
 	{
-		cmdb->setBufferBarrier(m_vertexBuffer, BufferUsageBit::TRANSFER_DESTINATION, BufferUsageBit::VERTEX, 0,
-							   MAX_PTR_SIZE);
-		cmdb->setBufferBarrier(m_indexBuffer, BufferUsageBit::TRANSFER_DESTINATION, BufferUsageBit::INDEX, 0,
-							   MAX_PTR_SIZE);
+		cmdb->setBufferBarrier(m_vertexBuffer, BufferUsageBit::TRANSFER_DESTINATION,
+							   BufferUsageBit::VERTEX | BufferUsageBit::INDEX, 0, MAX_PTR_SIZE);
 	}
 	}
 
 
 	// Finalize
 	// Finalize

+ 13 - 8
AnKi/Resource/MeshResource.h

@@ -57,8 +57,8 @@ public:
 	/// Get all info around vertex indices.
 	/// Get all info around vertex indices.
 	void getIndexBufferInfo(BufferPtr& buff, PtrSize& buffOffset, U32& indexCount, IndexType& indexType) const
 	void getIndexBufferInfo(BufferPtr& buff, PtrSize& buffOffset, U32& indexCount, IndexType& indexType) const
 	{
 	{
-		buff = m_indexBuffer;
-		buffOffset = 0;
+		buff = m_vertexBuffer;
+		buffOffset = m_indexBufferOffset;
 		indexCount = m_indexCount;
 		indexCount = m_indexCount;
 		indexType = m_indexType;
 		indexType = m_indexType;
 	}
 	}
@@ -113,7 +113,7 @@ public:
 	/// Get the buffer that contains all the indices of all submesses.
 	/// Get the buffer that contains all the indices of all submesses.
 	BufferPtr getIndexBuffer() const
 	BufferPtr getIndexBuffer() const
 	{
 	{
-		return m_indexBuffer;
+		return m_vertexBuffer;
 	}
 	}
 
 
 	/// Get the buffer that contains all the vertices of all submesses.
 	/// Get the buffer that contains all the vertices of all submesses.
@@ -137,7 +137,7 @@ private:
 	class VertBuffInfo
 	class VertBuffInfo
 	{
 	{
 	public:
 	public:
-		PtrSize m_offset; ///< Offset from the base of m_vertBuff.
+		PtrSize m_offset; ///< Offset from the base of m_vertexBuffer.
 		U32 m_stride;
 		U32 m_stride;
 	};
 	};
 
 
@@ -153,13 +153,18 @@ private:
 	DynamicArray<VertBuffInfo> m_vertexBufferInfos;
 	DynamicArray<VertBuffInfo> m_vertexBufferInfos;
 	Array<AttribInfo, U(VertexAttributeId::COUNT)> m_attributes;
 	Array<AttribInfo, U(VertexAttributeId::COUNT)> m_attributes;
 
 
-	BufferPtr m_indexBuffer;
-	BufferPtr m_vertexBuffer;
-	U32 m_indexCount = 0;
+	BufferPtr m_vertexBuffer; ///< Contains all data (vertices and indices).
+
+	PtrSize m_vertexBuffersOffset = MAX_PTR_SIZE; ///< Used for deallocation.
+	PtrSize m_vertexBuffersSize = 0; ///< Used for deallocation.
 	U32 m_vertexCount = 0;
 	U32 m_vertexCount = 0;
-	Aabb m_aabb;
+
+	PtrSize m_indexBufferOffset = MAX_PTR_SIZE; ///< The offset from the base of m_vertexBuffer.
+	U32 m_indexCount = 0; ///< Total index count as if all submeshes are a single submesh.
 	IndexType m_indexType;
 	IndexType m_indexType;
 
 
+	Aabb m_aabb;
+
 	// RT
 	// RT
 	AccelerationStructurePtr m_blas;
 	AccelerationStructurePtr m_blas;
 	MeshGpuDescriptor m_meshGpuDescriptor;
 	MeshGpuDescriptor m_meshGpuDescriptor;

+ 4 - 4
AnKi/Resource/ModelResource.cpp

@@ -82,7 +82,7 @@ void ModelPatch::getRenderingInfo(const RenderingKey& key, ModelRenderingInfo& i
 
 
 	// Index buff
 	// Index buff
 	inf.m_indexBuffer = m_indexBufferInfos[meshLod].m_buffer;
 	inf.m_indexBuffer = m_indexBufferInfos[meshLod].m_buffer;
-	inf.m_indexBufferOffset = 0;
+	inf.m_indexBufferOffset = m_indexBufferInfos[meshLod].m_offset;
 	inf.m_indexCount = m_indexBufferInfos[meshLod].m_indexCount;
 	inf.m_indexCount = m_indexBufferInfos[meshLod].m_indexCount;
 	inf.m_firstIndex = m_indexBufferInfos[meshLod].m_firstIndex;
 	inf.m_firstIndex = m_indexBufferInfos[meshLod].m_firstIndex;
 	inf.m_indexType = m_indexType;
 	inf.m_indexType = m_indexType;
@@ -236,9 +236,9 @@ Error ModelPatch::init(ModelResource* model, ConstWeakArray<CString> meshFNames,
 				PtrSize offset;
 				PtrSize offset;
 				mesh.getIndexBufferInfo(outIndexBufferInfo.m_buffer, offset, outIndexBufferInfo.m_indexCount,
 				mesh.getIndexBufferInfo(outIndexBufferInfo.m_buffer, offset, outIndexBufferInfo.m_indexCount,
 										indexType);
 										indexType);
-				ANKI_ASSERT(offset == 0);
-				m_indexType = indexType;
+				outIndexBufferInfo.m_offset = offset;
 				outIndexBufferInfo.m_firstIndex = 0;
 				outIndexBufferInfo.m_firstIndex = 0;
+				m_indexType = indexType;
 			}
 			}
 			else
 			else
 			{
 			{
@@ -246,7 +246,7 @@ Error ModelPatch::init(ModelResource* model, ConstWeakArray<CString> meshFNames,
 				PtrSize offset;
 				PtrSize offset;
 				mesh.getIndexBufferInfo(outIndexBufferInfo.m_buffer, offset, outIndexBufferInfo.m_indexCount,
 				mesh.getIndexBufferInfo(outIndexBufferInfo.m_buffer, offset, outIndexBufferInfo.m_indexCount,
 										indexType);
 										indexType);
-				ANKI_ASSERT(offset == 0);
+				outIndexBufferInfo.m_offset = offset;
 				m_indexType = indexType;
 				m_indexType = indexType;
 
 
 				Aabb aabb;
 				Aabb aabb;

+ 1 - 0
AnKi/Resource/ModelResource.h

@@ -157,6 +157,7 @@ private:
 	{
 	{
 	public:
 	public:
 		BufferPtr m_buffer;
 		BufferPtr m_buffer;
+		PtrSize m_offset;
 		U32 m_firstIndex = MAX_U32;
 		U32 m_firstIndex = MAX_U32;
 		U32 m_indexCount = MAX_U32;
 		U32 m_indexCount = MAX_U32;
 	};
 	};

+ 1 - 0
AnKi/Resource/ResourceManager.cpp

@@ -42,6 +42,7 @@ Error ResourceManager::init(ResourceManagerInitInfo& init)
 	m_gr = init.m_gr;
 	m_gr = init.m_gr;
 	m_physics = init.m_physics;
 	m_physics = init.m_physics;
 	m_fs = init.m_resourceFs;
 	m_fs = init.m_resourceFs;
+	m_vertexMem = init.m_vertexMemory;
 	m_alloc = ResourceAllocator<U8>(init.m_allocCallback, init.m_allocCallbackData);
 	m_alloc = ResourceAllocator<U8>(init.m_allocCallback, init.m_allocCallbackData);
 
 
 	m_tmpAlloc = TempResourceAllocator<U8>(init.m_allocCallback, init.m_allocCallbackData, 10_MB);
 	m_tmpAlloc = TempResourceAllocator<U8>(init.m_allocCallback, init.m_allocCallbackData, 10_MB);

+ 9 - 0
AnKi/Resource/ResourceManager.h

@@ -21,6 +21,7 @@ class AsyncLoader;
 class ResourceManagerModel;
 class ResourceManagerModel;
 class ShaderCompilerCache;
 class ShaderCompilerCache;
 class ShaderProgramResourceSystem;
 class ShaderProgramResourceSystem;
+class VertexGpuMemoryPool;
 
 
 /// @addtogroup resource
 /// @addtogroup resource
 /// @{
 /// @{
@@ -94,6 +95,7 @@ public:
 	ResourceFilesystem* m_resourceFs = nullptr;
 	ResourceFilesystem* m_resourceFs = nullptr;
 	const ConfigSet* m_config = nullptr;
 	const ConfigSet* m_config = nullptr;
 	CString m_cacheDir;
 	CString m_cacheDir;
+	VertexGpuMemoryPool* m_vertexMemory = nullptr;
 	AllocAlignedCallback m_allocCallback = 0;
 	AllocAlignedCallback m_allocCallback = 0;
 	void* m_allocCallbackData = nullptr;
 	void* m_allocCallbackData = nullptr;
 };
 };
@@ -214,6 +216,12 @@ public:
 		return *m_shaderProgramSystem;
 		return *m_shaderProgramSystem;
 	}
 	}
 
 
+	VertexGpuMemoryPool& getVertexGpuMemory()
+	{
+		ANKI_ASSERT(m_vertexMem);
+		return *m_vertexMem;
+	}
+
 private:
 private:
 	GrManager* m_gr = nullptr;
 	GrManager* m_gr = nullptr;
 	PhysicsWorld* m_physics = nullptr;
 	PhysicsWorld* m_physics = nullptr;
@@ -224,6 +232,7 @@ private:
 	U32 m_maxImageSize;
 	U32 m_maxImageSize;
 	AsyncLoader* m_asyncLoader = nullptr; ///< Async loading thread
 	AsyncLoader* m_asyncLoader = nullptr; ///< Async loading thread
 	ShaderProgramResourceSystem* m_shaderProgramSystem = nullptr;
 	ShaderProgramResourceSystem* m_shaderProgramSystem = nullptr;
+	VertexGpuMemoryPool* m_vertexMem = nullptr;
 	U64 m_uuid = 0;
 	U64 m_uuid = 0;
 	U64 m_loadRequestCount = 0;
 	U64 m_loadRequestCount = 0;
 	TransferGpuAllocator* m_transferGpuAlloc = nullptr;
 	TransferGpuAllocator* m_transferGpuAlloc = nullptr;

+ 1 - 1
AnKi/Scene/Components/RenderComponent.cpp

@@ -15,7 +15,7 @@ ANKI_SCENE_COMPONENT_STATICS(RenderComponent)
 
 
 void RenderComponent::allocateAndSetupUniforms(const MaterialResourcePtr& mtl, const RenderQueueDrawContext& ctx,
 void RenderComponent::allocateAndSetupUniforms(const MaterialResourcePtr& mtl, const RenderQueueDrawContext& ctx,
 											   ConstWeakArray<Mat4> transforms, ConstWeakArray<Mat4> prevTransforms,
 											   ConstWeakArray<Mat4> transforms, ConstWeakArray<Mat4> prevTransforms,
-											   StagingGpuMemoryManager& alloc)
+											   StagingGpuMemoryPool& alloc)
 {
 {
 	ANKI_ASSERT(transforms.getSize() <= MAX_INSTANCE_COUNT);
 	ANKI_ASSERT(transforms.getSize() <= MAX_INSTANCE_COUNT);
 	ANKI_ASSERT(prevTransforms.getSize() == transforms.getSize());
 	ANKI_ASSERT(prevTransforms.getSize() == transforms.getSize());

+ 2 - 2
AnKi/Scene/Components/RenderComponent.h

@@ -7,7 +7,7 @@
 
 
 #include <AnKi/Scene/Components/SceneComponent.h>
 #include <AnKi/Scene/Components/SceneComponent.h>
 #include <AnKi/Resource/MaterialResource.h>
 #include <AnKi/Resource/MaterialResource.h>
-#include <AnKi/Core/GpuMemoryManager.h>
+#include <AnKi/Core/GpuMemoryPools.h>
 #include <AnKi/Renderer/RenderQueue.h>
 #include <AnKi/Renderer/RenderQueue.h>
 
 
 namespace anki {
 namespace anki {
@@ -103,7 +103,7 @@ public:
 	/// Helper function.
 	/// Helper function.
 	static void allocateAndSetupUniforms(const MaterialResourcePtr& mtl, const RenderQueueDrawContext& ctx,
 	static void allocateAndSetupUniforms(const MaterialResourcePtr& mtl, const RenderQueueDrawContext& ctx,
 										 ConstWeakArray<Mat4> transforms, ConstWeakArray<Mat4> prevTransforms,
 										 ConstWeakArray<Mat4> transforms, ConstWeakArray<Mat4> prevTransforms,
-										 StagingGpuMemoryManager& alloc);
+										 StagingGpuMemoryPool& alloc);
 
 
 private:
 private:
 	RenderQueueDrawCallback m_callback = nullptr;
 	RenderQueueDrawCallback m_callback = nullptr;

+ 5 - 6
AnKi/Scene/DebugDrawer.cpp

@@ -6,14 +6,14 @@
 #include <AnKi/Scene/DebugDrawer.h>
 #include <AnKi/Scene/DebugDrawer.h>
 #include <AnKi/Resource/ResourceManager.h>
 #include <AnKi/Resource/ResourceManager.h>
 #include <AnKi/Renderer/RenderQueue.h>
 #include <AnKi/Renderer/RenderQueue.h>
-#include <AnKi/Core/GpuMemoryManager.h>
+#include <AnKi/Core/GpuMemoryPools.h>
 #include <AnKi/Physics/PhysicsWorld.h>
 #include <AnKi/Physics/PhysicsWorld.h>
 #include <AnKi/Gr/Buffer.h>
 #include <AnKi/Gr/Buffer.h>
 #include <AnKi/Collision.h>
 #include <AnKi/Collision.h>
 
 
 namespace anki {
 namespace anki {
 
 
-void allocateAndPopulateDebugBox(StagingGpuMemoryManager& stagingGpuAllocator, StagingGpuMemoryToken& vertsToken,
+void allocateAndPopulateDebugBox(StagingGpuMemoryPool& stagingGpuAllocator, StagingGpuMemoryToken& vertsToken,
 								 StagingGpuMemoryToken& indicesToken, U32& indexCount)
 								 StagingGpuMemoryToken& indicesToken, U32& indexCount)
 {
 {
 	Vec3* verts = static_cast<Vec3*>(
 	Vec3* verts = static_cast<Vec3*>(
@@ -139,8 +139,7 @@ Error DebugDrawer2::init(ResourceManager* rsrcManager)
 }
 }
 
 
 void DebugDrawer2::drawCubes(ConstWeakArray<Mat4> mvps, const Vec4& color, F32 lineSize, Bool ditherFailedDepth,
 void DebugDrawer2::drawCubes(ConstWeakArray<Mat4> mvps, const Vec4& color, F32 lineSize, Bool ditherFailedDepth,
-							 F32 cubeSideSize, StagingGpuMemoryManager& stagingGpuAllocator,
-							 CommandBufferPtr& cmdb) const
+							 F32 cubeSideSize, StagingGpuMemoryPool& stagingGpuAllocator, CommandBufferPtr& cmdb) const
 {
 {
 	// Set the uniforms
 	// Set the uniforms
 	StagingGpuMemoryToken unisToken;
 	StagingGpuMemoryToken unisToken;
@@ -180,7 +179,7 @@ void DebugDrawer2::drawCubes(ConstWeakArray<Mat4> mvps, const Vec4& color, F32 l
 }
 }
 
 
 void DebugDrawer2::drawLines(ConstWeakArray<Mat4> mvps, const Vec4& color, F32 lineSize, Bool ditherFailedDepth,
 void DebugDrawer2::drawLines(ConstWeakArray<Mat4> mvps, const Vec4& color, F32 lineSize, Bool ditherFailedDepth,
-							 ConstWeakArray<Vec3> linePositions, StagingGpuMemoryManager& stagingGpuAllocator,
+							 ConstWeakArray<Vec3> linePositions, StagingGpuMemoryPool& stagingGpuAllocator,
 							 CommandBufferPtr& cmdb) const
 							 CommandBufferPtr& cmdb) const
 {
 {
 	ANKI_ASSERT(mvps.getSize() > 0);
 	ANKI_ASSERT(mvps.getSize() > 0);
@@ -222,7 +221,7 @@ void DebugDrawer2::drawLines(ConstWeakArray<Mat4> mvps, const Vec4& color, F32 l
 void DebugDrawer2::drawBillboardTextures(const Mat4& projMat, const Mat4& viewMat, ConstWeakArray<Vec3> positions,
 void DebugDrawer2::drawBillboardTextures(const Mat4& projMat, const Mat4& viewMat, ConstWeakArray<Vec3> positions,
 										 const Vec4& color, Bool ditherFailedDepth, TextureViewPtr tex,
 										 const Vec4& color, Bool ditherFailedDepth, TextureViewPtr tex,
 										 SamplerPtr sampler, Vec2 billboardSize,
 										 SamplerPtr sampler, Vec2 billboardSize,
-										 StagingGpuMemoryManager& stagingGpuAllocator, CommandBufferPtr& cmdb) const
+										 StagingGpuMemoryPool& stagingGpuAllocator, CommandBufferPtr& cmdb) const
 {
 {
 	StagingGpuMemoryToken positionsToken;
 	StagingGpuMemoryToken positionsToken;
 	Vec3* verts = static_cast<Vec3*>(
 	Vec3* verts = static_cast<Vec3*>(

+ 10 - 10
AnKi/Scene/DebugDrawer.h

@@ -16,14 +16,14 @@ namespace anki {
 
 
 // Forward
 // Forward
 class RenderQueueDrawContext;
 class RenderQueueDrawContext;
-class StagingGpuMemoryManager;
+class StagingGpuMemoryPool;
 class StagingGpuMemoryToken;
 class StagingGpuMemoryToken;
 
 
 /// @addtogroup renderer
 /// @addtogroup renderer
 /// @{
 /// @{
 
 
 /// Allocate memory for a line cube and populate it.
 /// Allocate memory for a line cube and populate it.
-void allocateAndPopulateDebugBox(StagingGpuMemoryManager& stagingGpuAllocator, StagingGpuMemoryToken& vertsToken,
+void allocateAndPopulateDebugBox(StagingGpuMemoryPool& stagingGpuAllocator, StagingGpuMemoryToken& vertsToken,
 								 StagingGpuMemoryToken& indicesToken, U32& indexCount);
 								 StagingGpuMemoryToken& indicesToken, U32& indexCount);
 
 
 /// Debug drawer.
 /// Debug drawer.
@@ -38,21 +38,21 @@ public:
 	}
 	}
 
 
 	void drawCubes(ConstWeakArray<Mat4> mvps, const Vec4& color, F32 lineSize, Bool ditherFailedDepth, F32 cubeSideSize,
 	void drawCubes(ConstWeakArray<Mat4> mvps, const Vec4& color, F32 lineSize, Bool ditherFailedDepth, F32 cubeSideSize,
-				   StagingGpuMemoryManager& stagingGpuAllocator, CommandBufferPtr& cmdb) const;
+				   StagingGpuMemoryPool& stagingGpuAllocator, CommandBufferPtr& cmdb) const;
 
 
 	void drawCube(const Mat4& mvp, const Vec4& color, F32 lineSize, Bool ditherFailedDepth, F32 cubeSideSize,
 	void drawCube(const Mat4& mvp, const Vec4& color, F32 lineSize, Bool ditherFailedDepth, F32 cubeSideSize,
-				  StagingGpuMemoryManager& stagingGpuAllocator, CommandBufferPtr& cmdb) const
+				  StagingGpuMemoryPool& stagingGpuAllocator, CommandBufferPtr& cmdb) const
 	{
 	{
 		drawCubes(ConstWeakArray<Mat4>(&mvp, 1), color, lineSize, ditherFailedDepth, cubeSideSize, stagingGpuAllocator,
 		drawCubes(ConstWeakArray<Mat4>(&mvp, 1), color, lineSize, ditherFailedDepth, cubeSideSize, stagingGpuAllocator,
 				  cmdb);
 				  cmdb);
 	}
 	}
 
 
 	void drawLines(ConstWeakArray<Mat4> mvps, const Vec4& color, F32 lineSize, Bool ditherFailedDepth,
 	void drawLines(ConstWeakArray<Mat4> mvps, const Vec4& color, F32 lineSize, Bool ditherFailedDepth,
-				   ConstWeakArray<Vec3> linePositions, StagingGpuMemoryManager& stagingGpuAllocator,
+				   ConstWeakArray<Vec3> linePositions, StagingGpuMemoryPool& stagingGpuAllocator,
 				   CommandBufferPtr& cmdb) const;
 				   CommandBufferPtr& cmdb) const;
 
 
 	void drawLine(const Mat4& mvp, const Vec4& color, F32 lineSize, Bool ditherFailedDepth, const Vec3& a,
 	void drawLine(const Mat4& mvp, const Vec4& color, F32 lineSize, Bool ditherFailedDepth, const Vec3& a,
-				  const Vec3& b, StagingGpuMemoryManager& stagingGpuAllocator, CommandBufferPtr& cmdb) const
+				  const Vec3& b, StagingGpuMemoryPool& stagingGpuAllocator, CommandBufferPtr& cmdb) const
 	{
 	{
 		Array<Vec3, 2> points = {a, b};
 		Array<Vec3, 2> points = {a, b};
 		drawLines(ConstWeakArray<Mat4>(&mvp, 1), color, lineSize, ditherFailedDepth, points, stagingGpuAllocator, cmdb);
 		drawLines(ConstWeakArray<Mat4>(&mvp, 1), color, lineSize, ditherFailedDepth, points, stagingGpuAllocator, cmdb);
@@ -60,12 +60,12 @@ public:
 
 
 	void drawBillboardTextures(const Mat4& projMat, const Mat4& viewMat, ConstWeakArray<Vec3> positions,
 	void drawBillboardTextures(const Mat4& projMat, const Mat4& viewMat, ConstWeakArray<Vec3> positions,
 							   const Vec4& color, Bool ditherFailedDepth, TextureViewPtr tex, SamplerPtr sampler,
 							   const Vec4& color, Bool ditherFailedDepth, TextureViewPtr tex, SamplerPtr sampler,
-							   Vec2 billboardSize, StagingGpuMemoryManager& stagingGpuAllocator,
+							   Vec2 billboardSize, StagingGpuMemoryPool& stagingGpuAllocator,
 							   CommandBufferPtr& cmdb) const;
 							   CommandBufferPtr& cmdb) const;
 
 
 	void drawBillboardTexture(const Mat4& projMat, const Mat4& viewMat, Vec3 position, const Vec4& color,
 	void drawBillboardTexture(const Mat4& projMat, const Mat4& viewMat, Vec3 position, const Vec4& color,
 							  Bool ditherFailedDepth, TextureViewPtr tex, SamplerPtr sampler, Vec2 billboardSize,
 							  Bool ditherFailedDepth, TextureViewPtr tex, SamplerPtr sampler, Vec2 billboardSize,
-							  StagingGpuMemoryManager& stagingGpuAllocator, CommandBufferPtr& cmdb) const
+							  StagingGpuMemoryPool& stagingGpuAllocator, CommandBufferPtr& cmdb) const
 	{
 	{
 		drawBillboardTextures(projMat, viewMat, ConstWeakArray<Vec3>(&position, 1), color, ditherFailedDepth, tex,
 		drawBillboardTextures(projMat, viewMat, ConstWeakArray<Vec3>(&position, 1), color, ditherFailedDepth, tex,
 							  sampler, billboardSize, stagingGpuAllocator, cmdb);
 							  sampler, billboardSize, stagingGpuAllocator, cmdb);
@@ -86,7 +86,7 @@ public:
 	{
 	{
 	}
 	}
 
 
-	void start(const Mat4& mvp, CommandBufferPtr& cmdb, StagingGpuMemoryManager* stagingGpuAllocator)
+	void start(const Mat4& mvp, CommandBufferPtr& cmdb, StagingGpuMemoryPool* stagingGpuAllocator)
 	{
 	{
 		ANKI_ASSERT(stagingGpuAllocator);
 		ANKI_ASSERT(stagingGpuAllocator);
 		ANKI_ASSERT(m_vertCount == 0);
 		ANKI_ASSERT(m_vertCount == 0);
@@ -108,7 +108,7 @@ private:
 	const DebugDrawer2* m_dbg; ///< The debug drawer
 	const DebugDrawer2* m_dbg; ///< The debug drawer
 	Mat4 m_mvp = Mat4::getIdentity();
 	Mat4 m_mvp = Mat4::getIdentity();
 	CommandBufferPtr m_cmdb;
 	CommandBufferPtr m_cmdb;
-	StagingGpuMemoryManager* m_stagingGpuAllocator = nullptr;
+	StagingGpuMemoryPool* m_stagingGpuAllocator = nullptr;
 
 
 	// Use a vertex cache because drawLines() is practically called for every line
 	// Use a vertex cache because drawLines() is practically called for every line
 	Array<Vec3, 32> m_vertCache;
 	Array<Vec3, 32> m_vertCache;

+ 5 - 5
AnKi/Ui/Canvas.cpp

@@ -7,7 +7,7 @@
 #include <AnKi/Ui/Font.h>
 #include <AnKi/Ui/Font.h>
 #include <AnKi/Ui/UiManager.h>
 #include <AnKi/Ui/UiManager.h>
 #include <AnKi/Resource/ResourceManager.h>
 #include <AnKi/Resource/ResourceManager.h>
-#include <AnKi/Core/GpuMemoryManager.h>
+#include <AnKi/Core/GpuMemoryPools.h>
 #include <AnKi/Input/Input.h>
 #include <AnKi/Input/Input.h>
 #include <AnKi/Gr/Sampler.h>
 #include <AnKi/Gr/Sampler.h>
 #include <AnKi/Gr/GrManager.h>
 #include <AnKi/Gr/GrManager.h>
@@ -236,10 +236,10 @@ void Canvas::appendToCommandBufferInternal(CommandBufferPtr& cmdb)
 			return;
 			return;
 		}
 		}
 
 
-		ImDrawVert* verts = static_cast<ImDrawVert*>(m_manager->getStagingGpuMemoryManager().allocateFrame(
-			verticesSize, StagingGpuMemoryType::VERTEX, vertsToken));
-		ImDrawIdx* indices = static_cast<ImDrawIdx*>(m_manager->getStagingGpuMemoryManager().allocateFrame(
-			indicesSize, StagingGpuMemoryType::VERTEX, indicesToken));
+		ImDrawVert* verts = static_cast<ImDrawVert*>(
+			m_manager->getStagingGpuMemory().allocateFrame(verticesSize, StagingGpuMemoryType::VERTEX, vertsToken));
+		ImDrawIdx* indices = static_cast<ImDrawIdx*>(
+			m_manager->getStagingGpuMemory().allocateFrame(indicesSize, StagingGpuMemoryType::VERTEX, indicesToken));
 
 
 		for(I n = 0; n < drawData.CmdListsCount; ++n)
 		for(I n = 0; n < drawData.CmdListsCount; ++n)
 		{
 		{

+ 1 - 1
AnKi/Ui/UiManager.cpp

@@ -18,7 +18,7 @@ UiManager::~UiManager()
 }
 }
 
 
 Error UiManager::init(AllocAlignedCallback allocCallback, void* allocCallbackUserData, ResourceManager* resources,
 Error UiManager::init(AllocAlignedCallback allocCallback, void* allocCallbackUserData, ResourceManager* resources,
-					  GrManager* gr, StagingGpuMemoryManager* gpuMem, Input* input)
+					  GrManager* gr, StagingGpuMemoryPool* gpuMem, Input* input)
 {
 {
 	ANKI_ASSERT(resources);
 	ANKI_ASSERT(resources);
 	ANKI_ASSERT(gr);
 	ANKI_ASSERT(gr);

+ 4 - 5
AnKi/Ui/UiManager.h

@@ -12,7 +12,7 @@ namespace anki {
 // Forward
 // Forward
 class ResourceManager;
 class ResourceManager;
 class GrManager;
 class GrManager;
-class StagingGpuMemoryManager;
+class StagingGpuMemoryPool;
 class Input;
 class Input;
 
 
 /// @addtogroup ui
 /// @addtogroup ui
@@ -27,8 +27,7 @@ public:
 	~UiManager();
 	~UiManager();
 
 
 	ANKI_USE_RESULT Error init(AllocAlignedCallback allocCallback, void* allocCallbackUserData,
 	ANKI_USE_RESULT Error init(AllocAlignedCallback allocCallback, void* allocCallbackUserData,
-							   ResourceManager* resources, GrManager* gr, StagingGpuMemoryManager* gpuMem,
-							   Input* input);
+							   ResourceManager* resources, GrManager* gr, StagingGpuMemoryPool* gpuMem, Input* input);
 
 
 	UiAllocator getAllocator() const
 	UiAllocator getAllocator() const
 	{
 	{
@@ -47,7 +46,7 @@ public:
 		return *m_gr;
 		return *m_gr;
 	}
 	}
 
 
-	StagingGpuMemoryManager& getStagingGpuMemoryManager()
+	StagingGpuMemoryPool& getStagingGpuMemory()
 	{
 	{
 		ANKI_ASSERT(m_gpuMem);
 		ANKI_ASSERT(m_gpuMem);
 		return *m_gpuMem;
 		return *m_gpuMem;
@@ -80,7 +79,7 @@ private:
 	UiAllocator m_alloc;
 	UiAllocator m_alloc;
 	ResourceManager* m_resources = nullptr;
 	ResourceManager* m_resources = nullptr;
 	GrManager* m_gr = nullptr;
 	GrManager* m_gr = nullptr;
-	StagingGpuMemoryManager* m_gpuMem = nullptr;
+	StagingGpuMemoryPool* m_gpuMem = nullptr;
 	Input* m_input = nullptr;
 	Input* m_input = nullptr;
 };
 };
 /// @}
 /// @}

+ 8 - 9
AnKi/Util/BuddyAllocatorBuilder.h

@@ -14,7 +14,8 @@ namespace anki {
 
 
 /// This is a generic implementation of a buddy allocator.
 /// This is a generic implementation of a buddy allocator.
 /// @tparam T_MAX_MEMORY_RANGE_LOG2 The max memory to allocate.
 /// @tparam T_MAX_MEMORY_RANGE_LOG2 The max memory to allocate.
-template<U32 T_MAX_MEMORY_RANGE_LOG2 = 32>
+/// @tparam TLock This an optional lock. Can be a Mutex or SpinLock or some dummy class.
+template<U32 T_MAX_MEMORY_RANGE_LOG2, typename TLock>
 class BuddyAllocatorBuilder
 class BuddyAllocatorBuilder
 {
 {
 public:
 public:
@@ -50,7 +51,7 @@ public:
 
 
 	/// Allocate memory.
 	/// Allocate memory.
 	/// @param size The size of the allocation.
 	/// @param size The size of the allocation.
-	/// @param[out] address The returned address if the allocation didn't fail.
+	/// @param[out] address The returned address if the allocation didn't fail. It will stay untouched if it failed.
 	/// @return True if the allocation succeeded.
 	/// @return True if the allocation succeeded.
 	ANKI_USE_RESULT Bool allocate(PtrSize size, Address& address);
 	ANKI_USE_RESULT Bool allocate(PtrSize size, Address& address);
 
 
@@ -63,11 +64,8 @@ public:
 	void debugPrint() const;
 	void debugPrint() const;
 
 
 	/// Get some info.
 	/// Get some info.
-	void getInfo(PtrSize& userAllocatedSize, PtrSize& realAllocatedSize) const
-	{
-		userAllocatedSize = m_userAllocatedSize;
-		realAllocatedSize = m_realAllocatedSize;
-	}
+	void getInfo(PtrSize& userAllocatedSize, PtrSize& realAllocatedSize, F64& externalFragmentation,
+				 F64& internalFragmentation) const;
 
 
 private:
 private:
 	template<typename T>
 	template<typename T>
@@ -86,8 +84,9 @@ private:
 	GenericMemoryPoolAllocator<U8> m_alloc;
 	GenericMemoryPoolAllocator<U8> m_alloc;
 	DynamicArray<FreeList> m_freeLists;
 	DynamicArray<FreeList> m_freeLists;
 	PtrSize m_maxMemoryRange = 0;
 	PtrSize m_maxMemoryRange = 0;
-	PtrSize m_userAllocatedSize = 0;
-	PtrSize m_realAllocatedSize = 0;
+	PtrSize m_userAllocatedSize = 0; ///< The total ammount of memory requested by the user.
+	PtrSize m_realAllocatedSize = 0; ///< The total ammount of memory actually allocated.
+	mutable TLock m_mutex;
 
 
 	U32 orderCount() const
 	U32 orderCount() const
 	{
 	{

+ 52 - 12
AnKi/Util/BuddyAllocatorBuilder.inl.h

@@ -7,8 +7,9 @@
 
 
 namespace anki {
 namespace anki {
 
 
-template<U32 T_MAX_MEMORY_RANGE_LOG2>
-void BuddyAllocatorBuilder<T_MAX_MEMORY_RANGE_LOG2>::init(GenericMemoryPoolAllocator<U8> alloc, U32 maxMemoryRangeLog2)
+template<U32 T_MAX_MEMORY_RANGE_LOG2, typename TLock>
+void BuddyAllocatorBuilder<T_MAX_MEMORY_RANGE_LOG2, TLock>::init(GenericMemoryPoolAllocator<U8> alloc,
+																 U32 maxMemoryRangeLog2)
 {
 {
 	ANKI_ASSERT(maxMemoryRangeLog2 >= 1 && maxMemoryRangeLog2 <= T_MAX_MEMORY_RANGE_LOG2);
 	ANKI_ASSERT(maxMemoryRangeLog2 >= 1 && maxMemoryRangeLog2 <= T_MAX_MEMORY_RANGE_LOG2);
 	ANKI_ASSERT(m_freeLists.getSize() == 0 && m_userAllocatedSize == 0 && m_realAllocatedSize == 0);
 	ANKI_ASSERT(m_freeLists.getSize() == 0 && m_userAllocatedSize == 0 && m_realAllocatedSize == 0);
@@ -20,8 +21,8 @@ void BuddyAllocatorBuilder<T_MAX_MEMORY_RANGE_LOG2>::init(GenericMemoryPoolAlloc
 	m_freeLists.create(m_alloc, orderCount);
 	m_freeLists.create(m_alloc, orderCount);
 }
 }
 
 
-template<U32 T_MAX_MEMORY_RANGE_LOG2>
-void BuddyAllocatorBuilder<T_MAX_MEMORY_RANGE_LOG2>::destroy()
+template<U32 T_MAX_MEMORY_RANGE_LOG2, typename TLock>
+void BuddyAllocatorBuilder<T_MAX_MEMORY_RANGE_LOG2, TLock>::destroy()
 {
 {
 	ANKI_ASSERT(m_userAllocatedSize == 0 && "Forgot to free all memory");
 	ANKI_ASSERT(m_userAllocatedSize == 0 && "Forgot to free all memory");
 	m_freeLists.destroy(m_alloc);
 	m_freeLists.destroy(m_alloc);
@@ -30,11 +31,13 @@ void BuddyAllocatorBuilder<T_MAX_MEMORY_RANGE_LOG2>::destroy()
 	m_realAllocatedSize = 0;
 	m_realAllocatedSize = 0;
 }
 }
 
 
-template<U32 T_MAX_MEMORY_RANGE_LOG2>
-Bool BuddyAllocatorBuilder<T_MAX_MEMORY_RANGE_LOG2>::allocate(PtrSize size, Address& outAddress)
+template<U32 T_MAX_MEMORY_RANGE_LOG2, typename TLock>
+Bool BuddyAllocatorBuilder<T_MAX_MEMORY_RANGE_LOG2, TLock>::allocate(PtrSize size, Address& outAddress)
 {
 {
 	ANKI_ASSERT(size > 0 && size <= m_maxMemoryRange);
 	ANKI_ASSERT(size > 0 && size <= m_maxMemoryRange);
 
 
+	LockGuard<TLock> lock(m_mutex);
+
 	// Lazy initialize
 	// Lazy initialize
 	if(m_userAllocatedSize == 0)
 	if(m_userAllocatedSize == 0)
 	{
 	{
@@ -84,10 +87,13 @@ Bool BuddyAllocatorBuilder<T_MAX_MEMORY_RANGE_LOG2>::allocate(PtrSize size, Addr
 	return true;
 	return true;
 }
 }
 
 
-template<U32 T_MAX_MEMORY_RANGE_LOG2>
-void BuddyAllocatorBuilder<T_MAX_MEMORY_RANGE_LOG2>::free(Address address, PtrSize size)
+template<U32 T_MAX_MEMORY_RANGE_LOG2, typename TLock>
+void BuddyAllocatorBuilder<T_MAX_MEMORY_RANGE_LOG2, TLock>::free(Address address, PtrSize size)
 {
 {
 	const PtrSize alignedSize = nextPowerOfTwo(size);
 	const PtrSize alignedSize = nextPowerOfTwo(size);
+
+	LockGuard<TLock> lock(m_mutex);
+
 	freeInternal(address, alignedSize);
 	freeInternal(address, alignedSize);
 
 
 	ANKI_ASSERT(m_userAllocatedSize >= size);
 	ANKI_ASSERT(m_userAllocatedSize >= size);
@@ -106,9 +112,10 @@ void BuddyAllocatorBuilder<T_MAX_MEMORY_RANGE_LOG2>::free(Address address, PtrSi
 	}
 	}
 }
 }
 
 
-template<U32 T_MAX_MEMORY_RANGE_LOG2>
-void BuddyAllocatorBuilder<T_MAX_MEMORY_RANGE_LOG2>::freeInternal(PtrSize address, PtrSize size)
+template<U32 T_MAX_MEMORY_RANGE_LOG2, typename TLock>
+void BuddyAllocatorBuilder<T_MAX_MEMORY_RANGE_LOG2, TLock>::freeInternal(PtrSize address, PtrSize size)
 {
 {
+	ANKI_ASSERT(size);
 	ANKI_ASSERT(isPowerOfTwo(size));
 	ANKI_ASSERT(isPowerOfTwo(size));
 	ANKI_ASSERT(address + size <= m_maxMemoryRange);
 	ANKI_ASSERT(address + size <= m_maxMemoryRange);
 
 
@@ -154,14 +161,16 @@ void BuddyAllocatorBuilder<T_MAX_MEMORY_RANGE_LOG2>::freeInternal(PtrSize addres
 	}
 	}
 }
 }
 
 
-template<U32 T_MAX_MEMORY_RANGE_LOG2>
-void BuddyAllocatorBuilder<T_MAX_MEMORY_RANGE_LOG2>::debugPrint() const
+template<U32 T_MAX_MEMORY_RANGE_LOG2, typename TLock>
+void BuddyAllocatorBuilder<T_MAX_MEMORY_RANGE_LOG2, TLock>::debugPrint() const
 {
 {
 	constexpr PtrSize MAX_MEMORY_RANGE = pow2<PtrSize>(T_MAX_MEMORY_RANGE_LOG2);
 	constexpr PtrSize MAX_MEMORY_RANGE = pow2<PtrSize>(T_MAX_MEMORY_RANGE_LOG2);
 
 
 	// Allocate because we can't possibly have that in the stack
 	// Allocate because we can't possibly have that in the stack
 	BitSet<MAX_MEMORY_RANGE>* freeBytes = m_alloc.newInstance<BitSet<MAX_MEMORY_RANGE>>(false);
 	BitSet<MAX_MEMORY_RANGE>* freeBytes = m_alloc.newInstance<BitSet<MAX_MEMORY_RANGE>>(false);
 
 
+	LockGuard<TLock> lock(m_mutex);
+
 	for(I32 order = orderCount() - 1; order >= 0; --order)
 	for(I32 order = orderCount() - 1; order >= 0; --order)
 	{
 	{
 		const PtrSize orderSize = pow2<PtrSize>(order);
 		const PtrSize orderSize = pow2<PtrSize>(order);
@@ -186,4 +195,35 @@ void BuddyAllocatorBuilder<T_MAX_MEMORY_RANGE_LOG2>::debugPrint() const
 	m_alloc.deleteInstance(freeBytes);
 	m_alloc.deleteInstance(freeBytes);
 }
 }
 
 
+template<U32 T_MAX_MEMORY_RANGE_LOG2, typename TLock>
+void BuddyAllocatorBuilder<T_MAX_MEMORY_RANGE_LOG2, TLock>::getInfo(PtrSize& userAllocatedSize,
+																	PtrSize& realAllocatedSize,
+																	F64& externalFragmentation,
+																	F64& internalFragmentation) const
+{
+	LockGuard<TLock> lock(m_mutex);
+
+	userAllocatedSize = m_userAllocatedSize;
+	realAllocatedSize = m_realAllocatedSize;
+
+	// Compute external fragmetation (wikipedia has the definition)
+	U32 order = 0;
+	U32 orderWithTheBiggestBlock = MAX_U32;
+	for(const FreeList& list : m_freeLists)
+	{
+		if(list.getSize())
+		{
+			orderWithTheBiggestBlock = order;
+		}
+		++order;
+	}
+	const PtrSize biggestBlockSize =
+		(orderWithTheBiggestBlock == MAX_U32) ? m_maxMemoryRange : pow2<PtrSize>(orderWithTheBiggestBlock);
+	const PtrSize realFreeMemory = m_maxMemoryRange - m_realAllocatedSize;
+	externalFragmentation = 1.0 - F64(biggestBlockSize) / F64(realFreeMemory);
+
+	// Internal fragmentation
+	internalFragmentation = 1.0 - F64(m_userAllocatedSize) / F64(m_realAllocatedSize);
+}
+
 } // end namespace anki
 } // end namespace anki

+ 3 - 3
Tests/Gr/Gr.cpp

@@ -10,7 +10,7 @@
 #include <AnKi/Input/Input.h>
 #include <AnKi/Input/Input.h>
 #include <AnKi/Core/ConfigSet.h>
 #include <AnKi/Core/ConfigSet.h>
 #include <AnKi/Util/HighRezTimer.h>
 #include <AnKi/Util/HighRezTimer.h>
-#include <AnKi/Core/GpuMemoryManager.h>
+#include <AnKi/Core/GpuMemoryPools.h>
 #include <AnKi/Resource/TransferGpuAllocator.h>
 #include <AnKi/Resource/TransferGpuAllocator.h>
 #include <AnKi/ShaderCompiler/Glslang.h>
 #include <AnKi/ShaderCompiler/Glslang.h>
 #include <AnKi/ShaderCompiler/ShaderProgramParser.h>
 #include <AnKi/ShaderCompiler/ShaderProgramParser.h>
@@ -251,11 +251,11 @@ void main()
 
 
 static NativeWindow* win = nullptr;
 static NativeWindow* win = nullptr;
 static GrManager* gr = nullptr;
 static GrManager* gr = nullptr;
-static StagingGpuMemoryManager* stagingMem = nullptr;
+static StagingGpuMemoryPool* stagingMem = nullptr;
 static Input* input = nullptr;
 static Input* input = nullptr;
 
 
 #define COMMON_BEGIN() \
 #define COMMON_BEGIN() \
-	stagingMem = new StagingGpuMemoryManager(); \
+	stagingMem = new StagingGpuMemoryPool(); \
 	ConfigSet cfg = DefaultConfigSet::get(); \
 	ConfigSet cfg = DefaultConfigSet::get(); \
 	cfg.set("width", WIDTH); \
 	cfg.set("width", WIDTH); \
 	cfg.set("height", HEIGHT); \
 	cfg.set("height", HEIGHT); \

+ 2 - 2
Tests/Ui/Ui.cpp

@@ -8,7 +8,7 @@
 #include <AnKi/Util/HighRezTimer.h>
 #include <AnKi/Util/HighRezTimer.h>
 #include <AnKi/Ui.h>
 #include <AnKi/Ui.h>
 #include <AnKi/Input.h>
 #include <AnKi/Input.h>
-#include <AnKi/Core/GpuMemoryManager.h>
+#include <AnKi/Core/GpuMemoryPools.h>
 
 
 namespace anki {
 namespace anki {
 
 
@@ -70,7 +70,7 @@ ANKI_TEST(Ui, Ui)
 	ResourceManager* resource = createResourceManager(cfg, gr, physics, fs);
 	ResourceManager* resource = createResourceManager(cfg, gr, physics, fs);
 	UiManager* ui = new UiManager();
 	UiManager* ui = new UiManager();
 
 
-	StagingGpuMemoryManager* stagingMem = new StagingGpuMemoryManager();
+	StagingGpuMemoryPool* stagingMem = new StagingGpuMemoryPool();
 	ANKI_TEST_EXPECT_NO_ERR(stagingMem->init(gr, cfg));
 	ANKI_TEST_EXPECT_NO_ERR(stagingMem->init(gr, cfg));
 
 
 	HeapAllocator<U8> alloc(allocAligned, nullptr);
 	HeapAllocator<U8> alloc(allocAligned, nullptr);

+ 12 - 4
Tests/Util/BuddyAllocatorBuilder.cpp

@@ -14,7 +14,7 @@ ANKI_TEST(Util, BuddyAllocatorBuilder)
 
 
 	// Simple
 	// Simple
 	{
 	{
-		BuddyAllocatorBuilder<4> buddy(alloc, 4);
+		BuddyAllocatorBuilder<4, Mutex> buddy(alloc, 4);
 
 
 		Array<U32, 2> addr;
 		Array<U32, 2> addr;
 		Bool success = buddy.allocate(1, addr[0]);
 		Bool success = buddy.allocate(1, addr[0]);
@@ -32,15 +32,15 @@ ANKI_TEST(Util, BuddyAllocatorBuilder)
 
 
 	// Fuzzy
 	// Fuzzy
 	{
 	{
-		BuddyAllocatorBuilder<32> buddy(alloc, 32);
+		BuddyAllocatorBuilder<32, Mutex> buddy(alloc, 32);
 		std::vector<std::pair<U32, U32>> allocations;
 		std::vector<std::pair<U32, U32>> allocations;
-		for(U32 it = 0; it < 1000; ++it)
+		for(U32 it = 0; it < 10000; ++it)
 		{
 		{
 			if((getRandom() % 2) == 0)
 			if((getRandom() % 2) == 0)
 			{
 			{
 				// Do an allocation
 				// Do an allocation
 				U32 addr;
 				U32 addr;
-				const U32 size = max<U32>(getRandom() % 512, 1);
+				const U32 size = max<U32>(getRandom() % 256_MB, 1);
 				const Bool success = buddy.allocate(size, addr);
 				const Bool success = buddy.allocate(size, addr);
 				if(success)
 				if(success)
 				{
 				{
@@ -60,6 +60,14 @@ ANKI_TEST(Util, BuddyAllocatorBuilder)
 			}
 			}
 		}
 		}
 
 
+		// Get the fragmentation
+		PtrSize userAllocatedSize, realAllocatedSize;
+		F64 externalFragmentation, internalFragmentation;
+		buddy.getInfo(userAllocatedSize, realAllocatedSize, externalFragmentation, internalFragmentation);
+		ANKI_TEST_LOGI("Memory info: userAllocatedSize %zu, realAllocatedSize %zu, externalFragmentation %f, "
+					   "internalFragmentation %f",
+					   userAllocatedSize, realAllocatedSize, externalFragmentation, internalFragmentation);
+
 		// Remove the remaining
 		// Remove the remaining
 		for(const auto& pair : allocations)
 		for(const auto& pair : allocations)
 		{
 		{