Browse Source

Enable the new async loading path

Panagiotis Christopoulos Charitos 9 years ago
parent
commit
9664212b8a

+ 8 - 0
include/anki/gr/common/GpuFrameRingAllocator.h

@@ -43,12 +43,20 @@ public:
 	/// @return The bytes that were not used. Used for statistics.
 	PtrSize endFrame();
 
+#if ANKI_ENABLE_TRACE
+	/// Call this before endFrame.
+	PtrSize getUnallocatedMemorySize() const;
+#endif
+
 private:
 	PtrSize m_size = 0; ///< The full size of the buffer.
 	U32 m_alignment = 0; ///< Always work in that alignment.
 	PtrSize m_maxAllocationSize = 0; ///< For debugging.
 
 	Atomic<PtrSize> m_offset = {0};
+#if ANKI_ENABLE_TRACE
+	Atomic<PtrSize> m_lastAllocatedSize = {0}; ///< For tracing.
+#endif
 	U64 m_frame = 0;
 
 	Bool isCreated() const

+ 20 - 7
include/anki/gr/gl/DynamicMemoryManager.h

@@ -6,7 +6,6 @@
 #pragma once
 
 #include <anki/gr/gl/Common.h>
-#include <anki/gr/common/GpuBlockAllocator.h>
 #include <anki/gr/common/GpuFrameRingAllocator.h>
 
 namespace anki
@@ -28,17 +27,31 @@ public:
 
 	~DynamicMemoryManager();
 
-	void init(GenericMemoryPoolAllocator<U8> alloc, const ConfigSet& cfg);
+	void initMainThread(
+		GenericMemoryPoolAllocator<U8> alloc, const ConfigSet& cfg);
 
-	void destroy();
+	void initRenderThread();
+
+	void destroyRenderThread();
+
+	void endFrame();
 
 	ANKI_USE_RESULT void* allocatePerFrame(
 		BufferUsage usage, PtrSize size, DynamicBufferToken& handle);
 
-	ANKI_USE_RESULT void* allocatePersistent(
-		BufferUsage usage, PtrSize size, DynamicBufferToken& handle);
+	void* getBaseAddress(BufferUsage usage) const
+	{
+		void* addr = m_buffers[usage].m_mappedMem;
+		ANKI_ASSERT(addr);
+		return addr;
+	}
 
-	void freePersistent(BufferUsage usage, const DynamicBufferToken& handle);
+	GLuint getGlName(BufferUsage usage) const
+	{
+		GLuint name = m_buffers[usage].m_name;
+		ANKI_ASSERT(name);
+		return name;
+	}
 
 private:
 	class alignas(16) Aligned16Type
@@ -50,10 +63,10 @@ private:
 	class DynamicBuffer
 	{
 	public:
+		PtrSize m_size = 0;
 		GLuint m_name = 0;
 		DynamicArray<Aligned16Type> m_cpuBuff;
 		U8* m_mappedMem = nullptr;
-		GpuBlockAllocator m_persistentAlloc;
 		GpuFrameRingAllocator m_frameAlloc;
 	};
 

+ 8 - 69
include/anki/gr/gl/GlState.h

@@ -6,6 +6,7 @@
 #pragma once
 
 #include <anki/gr/gl/Common.h>
+#include <anki/gr/gl/DynamicMemoryManager.h>
 #include <anki/util/DynamicArray.h>
 
 namespace anki
@@ -77,24 +78,7 @@ public:
 	Bool m_depthWriteMask = true;
 	/// @}
 
-	/// Ring buffers
-	/// @{
-
-	// Ring buffer for dynamic buffers.
-	class DynamicBuffer
-	{
-	public:
-		GLuint m_name = 0;
-		U8* m_address = nullptr; ///< Host address of the buffer.
-		PtrSize m_size = 0; ///< This is aligned compared to GLOBAL_XXX_SIZE
-		U32 m_alignment = 0; ///< Always work in that alignment.
-		U32 m_maxAllocationSize = 0; ///< For debugging.
-		Atomic<PtrSize> m_currentOffset = {0};
-		Atomic<PtrSize> m_bytesUsed = {0}; ///< Per frame. For debugging.
-	};
-
-	Array<DynamicBuffer, U(BufferUsage::COUNT)> m_dynamicBuffers;
-	/// @}
+	DynamicMemoryManager m_dynamicMemoryManager;
 
 	GlState(GrManager* manager)
 		: m_manager(manager)
@@ -102,71 +86,26 @@ public:
 	}
 
 	/// Call this from the main thread.
-	void init0(const ConfigSet& config);
+	void initMainThread(const ConfigSet& config);
 
 	/// Call this from the rendering thread.
-	void init1();
+	void initRenderThread();
 
 	/// Call this from the rendering thread.
 	void destroy();
 
 	/// Allocate memory for a dynamic buffer.
 	void* allocateDynamicMemory(
-		PtrSize size, BufferUsage usage, DynamicBufferToken& token);
-
-	void checkDynamicMemoryConsumption();
+		PtrSize size, BufferUsage usage, DynamicBufferToken& token)
+	{
+		return m_dynamicMemoryManager.allocatePerFrame(usage, size, token);
+	}
 
 	void flushVertexState();
 
 private:
 	GrManager* m_manager;
-
-	class alignas(16) Aligned16Type
-	{
-		U8 _m_val[16];
-	};
-
-	DynamicArray<Aligned16Type> m_transferBuffer;
-
-	void initDynamicBuffer(
-		GLenum target, U32 aligment, U32 maxAllocationSize, BufferUsage usage);
 };
-
-//==============================================================================
-inline void* GlState::allocateDynamicMemory(
-	PtrSize originalSize, BufferUsage usage, DynamicBufferToken& token)
-{
-	ANKI_ASSERT(originalSize > 0);
-
-	DynamicBuffer& buff = m_dynamicBuffers[usage];
-	ANKI_ASSERT(buff.m_address);
-
-	// Align size
-	PtrSize size = getAlignedRoundUp(buff.m_alignment, originalSize);
-	ANKI_ASSERT(size <= buff.m_maxAllocationSize && "Too high!");
-
-	// Allocate
-	PtrSize offset = 0;
-
-	// Run in loop in case the begining of the range falls inside but the end
-	// outside the buffer
-	do
-	{
-		offset = buff.m_currentOffset.fetchAdd(size);
-		offset = offset % buff.m_size;
-	} while((offset + size) > buff.m_size);
-
-	ANKI_ASSERT(isAligned(buff.m_alignment, buff.m_address + offset));
-	ANKI_ASSERT((offset + size) <= buff.m_size);
-
-	buff.m_bytesUsed.fetchAdd(size);
-
-	// Encode token
-	token.m_offset = offset;
-	token.m_range = originalSize;
-
-	return static_cast<void*>(buff.m_address + offset);
-}
 /// @}
 
 } // end namespace anki

+ 20 - 22
include/anki/resource/AsyncLoader.h

@@ -49,9 +49,23 @@ public:
 
 	void init(const HeapAllocator<U8>& alloc);
 
+	/// Submit a task.
+	void submitTask(AsyncLoaderTask* task);
+
 	/// Create a new asynchronous loading task.
 	template<typename TTask, typename... TArgs>
-	void newTask(TArgs&&... args);
+	TTask* newTask(TArgs&&... args)
+	{
+		return m_alloc.template newInstance<TTask>(
+			std::forward<TArgs>(args)...);
+	}
+
+	/// Create and submit a new asynchronous loading task.
+	template<typename TTask, typename... TArgs>
+	void submitNewTask(TArgs&&... args)
+	{
+		submitTask(newTask<TTask>(std::forward<TArgs>(args)...));
+	}
 
 	/// Pause the loader. This method will block the main thread for the current
 	/// async task to finish. The rest of the tasks in the queue will not be
@@ -61,6 +75,11 @@ public:
 	/// Resume the async loading.
 	void resume();
 
+	HeapAllocator<U8> getAllocator() const
+	{
+		return m_alloc;
+	}
+
 private:
 	HeapAllocator<U8> m_alloc;
 	Thread m_thread;
@@ -80,27 +99,6 @@ private:
 
 	void stop();
 };
-
-//==============================================================================
-template<typename TTask, typename... TArgs>
-inline void AsyncLoader::newTask(TArgs&&... args)
-{
-	TTask* newTask =
-		m_alloc.template newInstance<TTask>(std::forward<TArgs>(args)...);
-
-	// Append task to the list
-	{
-		LockGuard<Mutex> lock(m_mtx);
-		m_taskQueue.pushBack(newTask);
-
-		if(!m_paused)
-		{
-			// Wake up the thread if it's not paused
-			m_condVar.notifyOne();
-		}
-	}
-}
-
 /// @}
 
 } // end namespace anki

+ 0 - 3
include/anki/resource/Mesh.h

@@ -107,9 +107,6 @@ protected:
 
 	BufferPtr m_vertBuff;
 	BufferPtr m_indicesBuff;
-
-	/// Create the VBOs using the mesh data
-	void createBuffers(const MeshLoader& loader);
 };
 /// @}
 

+ 5 - 1
include/anki/resource/MeshLoader.h

@@ -104,8 +104,11 @@ public:
 		U32 m_indicesCount = 0;
 	};
 
-	MeshLoader(ResourceManager* manager)
+	MeshLoader(ResourceManager* manager);
+
+	MeshLoader(ResourceManager* manager, GenericMemoryPoolAllocator<U8> alloc)
 		: m_manager(manager)
+		, m_alloc(alloc)
 	{
 	}
 
@@ -161,6 +164,7 @@ private:
 	using MDynamicArray = DynamicArray<T>;
 
 	ResourceManager* m_manager;
+	GenericMemoryPoolAllocator<U8> m_alloc;
 	Header m_header;
 
 	MDynamicArray<U8> m_verts;

+ 2 - 1
sandbox/config.xml

@@ -43,7 +43,8 @@
 	<imageReflectionMaxDistance>30</imageReflectionMaxDistance>
 	<gr.uniformPerFrameMemorySize>16777216</gr.uniformPerFrameMemorySize>
 	<gr.storagePerFrameMemorySize>16777216</gr.storagePerFrameMemorySize>
-	<gr.transferPersistentMemorySize>33554432</gr.transferPersistentMemorySize>
+	<gr.transferPerFrameMemorySize>67108864</gr.transferPerFrameMemorySize>
+	<gr.vertexPerFrameMemorySize>16777216</gr.vertexPerFrameMemorySize>
 	<maxTextureSize>1048576</maxTextureSize>
 	<textureAnisotropy>8</textureAnisotropy>
 	<dataPaths>assets:.</dataPaths>

+ 2 - 2
src/core/App.cpp

@@ -388,9 +388,9 @@ Error App::mainLoop()
 		// Pause and sync async loader. That will force all tasks before the
 		// pause to finish in this frame.
 		m_resources->getAsyncLoader().pause();
-		
+
 		m_gr->swapBuffers();
-		
+
 		// Now resume the loader
 		m_resources->getAsyncLoader().resume();
 

+ 3 - 1
src/core/Config.cpp

@@ -72,7 +72,9 @@ Config::Config()
 	//
 	newOption("gr.uniformPerFrameMemorySize", 1024 * 1024 * 16);
 	newOption("gr.storagePerFrameMemorySize", 1024 * 1024 * 16);
-	newOption("gr.transferPersistentMemorySize", 1024 * 1024 * 32);
+	newOption("gr.vertexPerFrameMemorySize", 1024 * 1024 * 10);
+	newOption(
+		"gr.transferPerFrameMemorySize", (4096 / 4) * (4096 / 4) * 16 * 4);
 
 	//
 	// Resource

+ 22 - 0
src/gr/common/GpuFrameRingAllocator.cpp

@@ -69,9 +69,15 @@ Error GpuFrameRingAllocator::allocate(
 		ANKI_ASSERT(isAligned(m_alignment, offset));
 		ANKI_ASSERT((offset + size) <= m_size);
 
+#if ANKI_ENABLE_TRACE
+		m_lastAllocatedSize.store(size);
+#endif
+
 		// Encode token
 		token.m_offset = offset;
 		token.m_range = originalSize;
+
+		ANKI_ASSERT(token.m_offset + token.m_range <= m_size);
 	}
 	else if(handleOomError)
 	{
@@ -85,4 +91,20 @@ Error GpuFrameRingAllocator::allocate(
 	return err;
 }
 
+//==============================================================================
+#if ANKI_ENABLE_TRACE
+PtrSize GpuFrameRingAllocator::getUnallocatedMemorySize() const
+{
+	PtrSize perFrameSize = m_size / MAX_FRAMES_IN_FLIGHT;
+	PtrSize crntFrameStartOffset =
+		perFrameSize * (m_frame % MAX_FRAMES_IN_FLIGHT);
+	PtrSize usedSize =
+		m_offset.get() - crntFrameStartOffset + m_lastAllocatedSize.get();
+
+	PtrSize remaining =
+		(perFrameSize >= usedSize) ? (perFrameSize - usedSize) : 0;
+	return remaining;
+}
+#endif
+
 } // end namespace anki

+ 4 - 2
src/gr/gl/CommandBuffer.cpp

@@ -489,7 +489,8 @@ public:
 
 	Error operator()(GlState& state)
 	{
-		U8* data = state.m_dynamicBuffers[BufferUsage::TRANSFER].m_address
+		U8* data = static_cast<U8*>(state.m_dynamicMemoryManager.getBaseAddress(
+					   BufferUsage::TRANSFER))
 			+ m_token.m_offset;
 
 		m_handle->getImplementation().write(m_surf, data, m_token.m_range);
@@ -526,7 +527,8 @@ public:
 
 	Error operator()(GlState& state)
 	{
-		U8* data = state.m_dynamicBuffers[BufferUsage::TRANSFER].m_address
+		U8* data = static_cast<U8*>(state.m_dynamicMemoryManager.getBaseAddress(
+					   BufferUsage::TRANSFER))
 			+ m_token.m_offset;
 
 		m_handle->getImplementation().write(data, m_offset, m_token.m_range);

+ 80 - 31
src/gr/gl/DynamicMemoryManager.cpp

@@ -4,7 +4,9 @@
 // http://www.anki3d.org/LICENSE
 
 #include <anki/gr/gl/DynamicMemoryManager.h>
+#include <anki/gr/gl/Error.h>
 #include <anki/core/Config.h>
+#include <anki/core/Trace.h>
 
 namespace anki
 {
@@ -19,13 +21,14 @@ DynamicMemoryManager::~DynamicMemoryManager()
 }
 
 //==============================================================================
-void DynamicMemoryManager::destroy()
+void DynamicMemoryManager::destroyRenderThread()
 {
 	for(DynamicBuffer& buff : m_buffers)
 	{
 		if(buff.m_name != 0)
 		{
 			glDeleteBuffers(1, &buff.m_name);
+			buff.m_name = 0;
 		}
 
 		buff.m_cpuBuff.destroy(m_alloc);
@@ -33,24 +36,41 @@ void DynamicMemoryManager::destroy()
 }
 
 //==============================================================================
-void DynamicMemoryManager::init(
+void DynamicMemoryManager::initMainThread(
 	GenericMemoryPoolAllocator<U8> alloc, const ConfigSet& cfg)
 {
-	const U BUFF_FLAGS = GL_MAP_WRITE_BIT | GL_MAP_PERSISTENT_BIT;
-
 	m_alloc = alloc;
 
+	m_buffers[BufferUsage::UNIFORM].m_size =
+		cfg.getNumber("gr.uniformPerFrameMemorySize");
+
+	m_buffers[BufferUsage::STORAGE].m_size =
+		cfg.getNumber("gr.storagePerFrameMemorySize");
+
+	m_buffers[BufferUsage::VERTEX].m_size =
+		cfg.getNumber("gr.vertexPerFrameMemorySize");
+
+	m_buffers[BufferUsage::TRANSFER].m_size =
+		cfg.getNumber("gr.transferPerFrameMemorySize");
+}
+
+//==============================================================================
+void DynamicMemoryManager::initRenderThread()
+{
+	const U BUFF_FLAGS = GL_MAP_WRITE_BIT | GL_MAP_PERSISTENT_BIT;
+
 	// Uniform
 	{
 		// Create buffer
-		PtrSize size = cfg.getNumber("gr.uniformPerFrameMemorySize");
 		DynamicBuffer& buff = m_buffers[BufferUsage::UNIFORM];
+		PtrSize size = buff.m_size;
 		glGenBuffers(1, &buff.m_name);
+		glBindBuffer(GL_UNIFORM_BUFFER, buff.m_name);
 
 		// Map it
-		glNamedBufferStorage(buff.m_name, size, nullptr, BUFF_FLAGS);
+		glBufferStorage(GL_UNIFORM_BUFFER, size, nullptr, BUFF_FLAGS);
 		buff.m_mappedMem = static_cast<U8*>(
-			glMapNamedBufferRange(buff.m_name, 0, size, BUFF_FLAGS));
+			glMapBufferRange(GL_UNIFORM_BUFFER, 0, size, BUFF_FLAGS));
 		ANKI_ASSERT(buff.m_mappedMem);
 
 		// Create the allocator
@@ -62,14 +82,15 @@ void DynamicMemoryManager::init(
 	// Storage
 	{
 		// Create buffer
-		PtrSize size = cfg.getNumber("gr.storagePerFrameMemorySize");
 		DynamicBuffer& buff = m_buffers[BufferUsage::STORAGE];
+		PtrSize size = buff.m_size;
 		glGenBuffers(1, &buff.m_name);
+		glBindBuffer(GL_SHADER_STORAGE_BUFFER, buff.m_name);
 
 		// Map it
-		glNamedBufferStorage(buff.m_name, size, nullptr, BUFF_FLAGS);
+		glBufferStorage(GL_SHADER_STORAGE_BUFFER, size, nullptr, BUFF_FLAGS);
 		buff.m_mappedMem = static_cast<U8*>(
-			glMapNamedBufferRange(buff.m_name, 0, size, BUFF_FLAGS));
+			glMapBufferRange(GL_SHADER_STORAGE_BUFFER, 0, size, BUFF_FLAGS));
 		ANKI_ASSERT(buff.m_mappedMem);
 
 		// Create the allocator
@@ -79,17 +100,32 @@ void DynamicMemoryManager::init(
 		buff.m_frameAlloc.init(size, blockAlignment, MAX_STORAGE_BLOCK_SIZE);
 	}
 
-	// Transfer
+	// Vertex
 	{
-		// Big enough block to hold a texture surface
-		const PtrSize BLOCK_SIZE = (4096 * 4096) / 4 * 16 + 512;
+		// Create buffer
+		DynamicBuffer& buff = m_buffers[BufferUsage::VERTEX];
+		PtrSize size = buff.m_size;
+		glGenBuffers(1, &buff.m_name);
+		glBindBuffer(GL_ARRAY_BUFFER, buff.m_name);
+
+		// Map it
+		glBufferStorage(GL_ARRAY_BUFFER, size, nullptr, BUFF_FLAGS);
+		buff.m_mappedMem = static_cast<U8*>(
+			glMapBufferRange(GL_ARRAY_BUFFER, 0, size, BUFF_FLAGS));
+		ANKI_ASSERT(buff.m_mappedMem);
 
-		PtrSize size = cfg.getNumber("gr.transferPersistentMemorySize");
+		// Create the allocator
+		buff.m_frameAlloc.init(size, 16, MAX_U32);
+	}
+
+	// Transfer
+	{
 		DynamicBuffer& buff = m_buffers[BufferUsage::TRANSFER];
+		PtrSize size = buff.m_size;
 		buff.m_cpuBuff.create(m_alloc, size);
 
 		buff.m_mappedMem = reinterpret_cast<U8*>(&buff.m_cpuBuff[0]);
-		buff.m_persistentAlloc.init(m_alloc, size, BLOCK_SIZE);
+		buff.m_frameAlloc.init(size, 16, MAX_U32);
 	}
 }
 
@@ -98,34 +134,47 @@ void* DynamicMemoryManager::allocatePerFrame(
 	BufferUsage usage, PtrSize size, DynamicBufferToken& handle)
 {
 	DynamicBuffer& buff = m_buffers[usage];
-	Error err = buff.m_frameAlloc.allocate(size, handle, true);
-	(void)err;
-	return buff.m_mappedMem + handle.m_offset;
-}
-
-//==============================================================================
-void* DynamicMemoryManager::allocatePersistent(
-	BufferUsage usage, PtrSize size, DynamicBufferToken& handle)
-{
-	DynamicBuffer& buff = m_buffers[usage];
-	Error err = buff.m_persistentAlloc.allocate(size, 16, handle, false);
+	Error err = buff.m_frameAlloc.allocate(size, handle, false);
 	if(!err)
 	{
 		return buff.m_mappedMem + handle.m_offset;
 	}
 	else
 	{
-		ANKI_LOGW("Out of persistent dynamic memory. Someone should serialize");
+		ANKI_LOGW(
+			"Out of per-frame GPU memory. Someone will have to handle this");
 		return nullptr;
 	}
 }
 
 //==============================================================================
-void DynamicMemoryManager::freePersistent(
-	BufferUsage usage, const DynamicBufferToken& handle)
+void DynamicMemoryManager::endFrame()
 {
-	DynamicBuffer& buff = m_buffers[usage];
-	buff.m_persistentAlloc.free(handle);
+	for(BufferUsage usage = BufferUsage::FIRST; usage < BufferUsage::COUNT;
+		++usage)
+	{
+		DynamicBuffer& buff = m_buffers[usage];
+
+		if(buff.m_mappedMem)
+		{
+			// Increase the counters
+			switch(usage)
+			{
+			case BufferUsage::UNIFORM:
+				ANKI_TRACE_INC_COUNTER(GR_DYNAMIC_UNIFORMS_SIZE,
+					buff.m_frameAlloc.getUnallocatedMemorySize());
+				break;
+			case BufferUsage::STORAGE:
+				ANKI_TRACE_INC_COUNTER(GR_DYNAMIC_STORAGE_SIZE,
+					buff.m_frameAlloc.getUnallocatedMemorySize());
+				break;
+			default:
+				break;
+			}
+
+			buff.m_frameAlloc.endFrame();
+		}
+	}
 }
 
 } // end namespace anki

+ 6 - 105
src/gr/gl/GlState.cpp

@@ -86,22 +86,13 @@ __stdcall
 #endif
 
 //==============================================================================
-void GlState::init0(const ConfigSet& config)
+void GlState::initMainThread(const ConfigSet& config)
 {
-	m_dynamicBuffers[BufferUsage::UNIFORM].m_size =
-		config.getNumber("gr.uniformPerFrameMemorySize");
-
-	m_dynamicBuffers[BufferUsage::STORAGE].m_size =
-		config.getNumber("gr.storagePerFrameMemorySize");
-
-	m_dynamicBuffers[BufferUsage::VERTEX].m_size = 1024;
-
-	m_dynamicBuffers[BufferUsage::TRANSFER].m_size =
-		config.getNumber("gr.transferPersistentMemorySize");
+	m_dynamicMemoryManager.initMainThread(m_manager->getAllocator(), config);
 }
 
 //==============================================================================
-void GlState::init1()
+void GlState::initRenderThread()
 {
 	// GL version
 	GLint major, minor;
@@ -161,105 +152,15 @@ void GlState::init1()
 	// Other
 	memset(&m_vertexBindingStrides[0], 0, sizeof(m_vertexBindingStrides));
 
-	// Init ring buffers
-	GLint64 blockAlignment;
-	glGetInteger64v(GL_UNIFORM_BUFFER_OFFSET_ALIGNMENT, &blockAlignment);
-	initDynamicBuffer(GL_UNIFORM_BUFFER,
-		blockAlignment,
-		MAX_UNIFORM_BLOCK_SIZE,
-		BufferUsage::UNIFORM);
-
-	glGetInteger64v(GL_SHADER_STORAGE_BUFFER_OFFSET_ALIGNMENT, &blockAlignment);
-	initDynamicBuffer(GL_SHADER_STORAGE_BUFFER,
-		blockAlignment,
-		MAX_STORAGE_BLOCK_SIZE,
-		BufferUsage::STORAGE);
-
-	initDynamicBuffer(GL_ARRAY_BUFFER, 16, MAX_U32, BufferUsage::VERTEX);
-
-	{
-		m_transferBuffer.create(m_manager->getAllocator(),
-			m_dynamicBuffers[BufferUsage::TRANSFER].m_size
-				/ sizeof(Aligned16Type));
-
-		auto& buff = m_dynamicBuffers[BufferUsage::TRANSFER];
-		buff.m_address = reinterpret_cast<U8*>(&m_transferBuffer[0]);
-		ANKI_ASSERT(isAligned(ANKI_SAFE_ALIGNMENT, buff.m_address));
-		buff.m_alignment = ANKI_SAFE_ALIGNMENT;
-		buff.m_maxAllocationSize = MAX_U32;
-	}
-}
-
-//==============================================================================
-void GlState::initDynamicBuffer(
-	GLenum target, U32 alignment, U32 maxAllocationSize, BufferUsage usage)
-{
-	DynamicBuffer& buff = m_dynamicBuffers[usage];
-	ANKI_ASSERT(buff.m_size > 0);
-
-	const U FLAGS = GL_MAP_WRITE_BIT | GL_MAP_PERSISTENT_BIT;
-
-	alignRoundUp(alignment, buff.m_size);
-
-	glGenBuffers(1, &buff.m_name);
-
-	glBindBuffer(target, buff.m_name);
-	glBufferStorage(target, buff.m_size, nullptr, FLAGS);
-
-	buff.m_address =
-		static_cast<U8*>(glMapBufferRange(target, 0, buff.m_size, FLAGS));
-	buff.m_alignment = alignment;
-	buff.m_maxAllocationSize = maxAllocationSize;
-}
-
-//==============================================================================
-void GlState::checkDynamicMemoryConsumption()
-{
-	for(BufferUsage usage = BufferUsage::FIRST; usage < BufferUsage::COUNT;
-		++usage)
-	{
-		DynamicBuffer& buff = m_dynamicBuffers[usage];
-
-		if(buff.m_address)
-		{
-			auto bytesUsed = buff.m_bytesUsed.exchange(0);
-			if(bytesUsed >= buff.m_size / MAX_FRAMES_IN_FLIGHT)
-			{
-				ANKI_LOGW("Using too much dynamic memory (mem_type: %u, "
-						  "mem_used: %u). Increase the limit",
-					U(usage),
-					U(bytesUsed));
-			}
-
-			// Increase the counters
-			switch(usage)
-			{
-			case BufferUsage::UNIFORM:
-				ANKI_TRACE_INC_COUNTER(GR_DYNAMIC_UNIFORMS_SIZE, bytesUsed);
-				break;
-			case BufferUsage::STORAGE:
-				ANKI_TRACE_INC_COUNTER(GR_DYNAMIC_STORAGE_SIZE, bytesUsed);
-				break;
-			default:
-				break;
-			}
-		}
-	}
+	// Init dynamic memory
+	m_dynamicMemoryManager.initRenderThread();
 }
 
 //==============================================================================
 void GlState::destroy()
 {
-	for(auto& x : m_dynamicBuffers)
-	{
-		if(x.m_name)
-		{
-			glDeleteBuffers(1, &x.m_name);
-		}
-	}
-
+	m_dynamicMemoryManager.destroyRenderThread();
 	glDeleteVertexArrays(1, &m_defaultVao);
-	m_transferBuffer.destroy(m_manager->getAllocator());
 }
 
 //==============================================================================

+ 2 - 12
src/gr/gl/GrManager.cpp

@@ -53,18 +53,8 @@ void* GrManager::allocateFrameHostVisibleMemory(
 {
 	// Will be used in a thread safe way
 	GlState& state = m_impl->getRenderingThread().getState();
-
-	void* data = state.allocateDynamicMemory(size, usage, token);
-	ANKI_ASSERT(data);
-
-	// Encode token
-	PtrSize offset =
-		static_cast<U8*>(data) - state.m_dynamicBuffers[usage].m_address;
-	ANKI_ASSERT(offset < MAX_U32 && size < MAX_U32);
-	token.m_offset = offset;
-	token.m_range = size;
-
-	return data;
+	void* ptr = state.allocateDynamicMemory(size, usage, token);
+	return ptr;
 }
 
 //==============================================================================

+ 5 - 4
src/gr/gl/RenderingThread.cpp

@@ -149,7 +149,7 @@ void RenderingThread::start(Bool registerMessages, const ConfigSet& config)
 	m_swapBuffersCommands->getImplementation()
 		.pushBackNewCommand<SwapBuffersCommand>(this);
 
-	m_state.init0(config);
+	m_state.initMainThread(config);
 	m_manager->getImplementation().pinContextToCurrentThread(false);
 
 #if !ANKI_DISABLE_GL_RENDERING_THREAD
@@ -203,7 +203,7 @@ void RenderingThread::prepare()
 	m_serverThreadId = Thread::getCurrentThreadId();
 
 	// Init state
-	m_state.init1();
+	m_state.initRenderThread();
 }
 
 //==============================================================================
@@ -313,8 +313,6 @@ void RenderingThread::swapBuffersInternal(GlState& state)
 		m_frameCondVar.notifyOne();
 	}
 
-	state.checkDynamicMemoryConsumption();
-
 	ANKI_TRACE_STOP_EVENT(SWAP_BUFFERS);
 }
 
@@ -334,6 +332,9 @@ void RenderingThread::swapBuffers()
 		m_frameWait = true;
 	}
 #endif
+
+	m_state.m_dynamicMemoryManager.endFrame();
+
 	// ...and then flush a new swap buffers
 	flushCommandBuffer(m_swapBuffersCommands);
 	ANKI_TRACE_STOP_EVENT(SWAP_BUFFERS);

+ 5 - 3
src/gr/gl/ResourceGroupImpl.cpp

@@ -134,7 +134,7 @@ void ResourceGroupImpl::init(const ResourceGroupInitInfo& init)
 									   .getState();
 
 			m_vertBuffNames[i] =
-				state.m_dynamicBuffers[BufferUsage::VERTEX].m_name;
+				state.m_dynamicMemoryManager.getGlName(BufferUsage::VERTEX);
 			m_vertBuffOffsets[i] = MAX_U32;
 
 			++m_vertBindingsCount;
@@ -271,7 +271,8 @@ void ResourceGroupImpl::bind(
 			{
 				glBindBufferRange(GL_UNIFORM_BUFFER,
 					MAX_UNIFORM_BUFFER_BINDINGS * slot + i,
-					state.m_dynamicBuffers[BufferUsage::UNIFORM].m_name,
+					state.m_dynamicMemoryManager.getGlName(
+						BufferUsage::UNIFORM),
 					token.m_offset,
 					token.m_range);
 			}
@@ -305,7 +306,8 @@ void ResourceGroupImpl::bind(
 			{
 				glBindBufferRange(GL_SHADER_STORAGE_BUFFER,
 					MAX_STORAGE_BUFFER_BINDINGS * slot + i,
-					state.m_dynamicBuffers[BufferUsage::STORAGE].m_name,
+					state.m_dynamicMemoryManager.getGlName(
+						BufferUsage::STORAGE),
 					token.m_offset,
 					token.m_range);
 			}

+ 16 - 0
src/resource/AsyncLoader.cpp

@@ -159,4 +159,20 @@ Error AsyncLoader::threadWorker()
 	return err;
 }
 
+//==============================================================================
+void AsyncLoader::submitTask(AsyncLoaderTask* task)
+{
+	ANKI_ASSERT(task);
+
+	// Append task to the list
+	LockGuard<Mutex> lock(m_mtx);
+	m_taskQueue.pushBack(task);
+
+	if(!m_paused)
+	{
+		// Wake up the thread if it's not paused
+		m_condVar.notifyOne();
+	}
+}
+
 } // end namespace anki

+ 101 - 25
src/resource/Mesh.cpp

@@ -6,12 +6,103 @@
 #include <anki/resource/Mesh.h>
 #include <anki/resource/ResourceManager.h>
 #include <anki/resource/MeshLoader.h>
+#include <anki/resource/AsyncLoader.h>
 #include <anki/util/Functions.h>
 #include <anki/misc/Xml.h>
 
 namespace anki
 {
 
+//==============================================================================
+// MeshLoadTask                                                                =
+//==============================================================================
+
+/// Mesh upload async task.
+class MeshLoadTask : public AsyncLoaderTask
+{
+public:
+	ResourceManager* m_manager ANKI_DBG_NULLIFY_PTR;
+	BufferPtr m_vertBuff;
+	BufferPtr m_indicesBuff;
+	MeshLoader m_loader;
+
+	MeshLoadTask(ResourceManager* manager)
+		: m_manager(manager)
+		, m_loader(manager, manager->getAsyncLoader().getAllocator())
+	{
+	}
+
+	Error operator()(AsyncLoaderTaskContext& ctx) final;
+};
+
+//==============================================================================
+Error MeshLoadTask::operator()(AsyncLoaderTaskContext& ctx)
+{
+	GrManager& gr = m_manager->getGrManager();
+	CommandBufferPtr cmdb;
+
+	// Write vert buff
+	if(m_vertBuff)
+	{
+		DynamicBufferToken token;
+		void* data = gr.allocateFrameHostVisibleMemory(
+			m_loader.getVertexDataSize(), BufferUsage::TRANSFER, token);
+
+		if(data)
+		{
+			memcpy(
+				data, m_loader.getVertexData(), m_loader.getVertexDataSize());
+			cmdb = gr.newInstance<CommandBuffer>(CommandBufferInitInfo());
+			cmdb->writeBuffer(m_vertBuff, 0, token);
+			m_vertBuff.reset(nullptr);
+		}
+		else
+		{
+			ctx.m_pause = true;
+			ctx.m_resubmitTask = true;
+			return ErrorCode::NONE;
+		}
+	}
+
+	// Create index buffer
+	{
+		DynamicBufferToken token;
+		void* data = gr.allocateFrameHostVisibleMemory(
+			m_loader.getIndexDataSize(), BufferUsage::TRANSFER, token);
+
+		if(data)
+		{
+			memcpy(data, m_loader.getIndexData(), m_loader.getIndexDataSize());
+
+			if(!cmdb)
+			{
+				cmdb = gr.newInstance<CommandBuffer>(CommandBufferInitInfo());
+			}
+
+			cmdb->writeBuffer(m_indicesBuff, 0, token);
+			cmdb->flush();
+		}
+		else
+		{
+			// Submit prev work
+			if(cmdb)
+			{
+				cmdb->flush();
+			}
+
+			ctx.m_pause = true;
+			ctx.m_resubmitTask = true;
+			return ErrorCode::NONE;
+		}
+	}
+
+	return ErrorCode::NONE;
+}
+
+//==============================================================================
+// Mesh                                                                        =
+//==============================================================================
+
 //==============================================================================
 Mesh::Mesh(ResourceManager* manager)
 	: ResourceObject(manager)
@@ -35,7 +126,10 @@ Bool Mesh::isCompatible(const Mesh& other) const
 //==============================================================================
 Error Mesh::load(const ResourceFilename& filename)
 {
-	MeshLoader loader(&getManager());
+	MeshLoadTask* task =
+		getManager().getAsyncLoader().newTask<MeshLoadTask>(&getManager());
+
+	MeshLoader& loader = task->m_loader;
 	ANKI_CHECK(loader.load(filename));
 
 	const MeshLoader::Header& header = loader.getHeader();
@@ -56,41 +150,23 @@ Error Mesh::load(const ResourceFilename& filename)
 	m_texChannelsCount = header.m_uvsChannelCount;
 	m_weights = loader.hasBoneInfo();
 
-	createBuffers(loader);
-
-	return ErrorCode::NONE;
-}
-
-//==============================================================================
-void Mesh::createBuffers(const MeshLoader& loader)
-{
+	// Allocate the buffers
 	GrManager& gr = getManager().getGrManager();
 
-	CommandBufferPtr cmdb =
-		gr.newInstance<CommandBuffer>(CommandBufferInitInfo());
-
-	// Create vertex buffer
 	m_vertBuff = gr.newInstance<Buffer>(loader.getVertexDataSize(),
 		BufferUsageBit::VERTEX,
 		BufferAccessBit::CLIENT_WRITE);
 
-	DynamicBufferToken token;
-	void* data = gr.allocateFrameHostVisibleMemory(
-		loader.getVertexDataSize(), BufferUsage::TRANSFER, token);
-	memcpy(data, loader.getVertexData(), loader.getVertexDataSize());
-	cmdb->writeBuffer(m_vertBuff, 0, token);
-
-	// Create index buffer
 	m_indicesBuff = gr.newInstance<Buffer>(loader.getIndexDataSize(),
 		BufferUsageBit::INDEX,
 		BufferAccessBit::CLIENT_WRITE);
 
-	data = gr.allocateFrameHostVisibleMemory(
-		loader.getIndexDataSize(), BufferUsage::TRANSFER, token);
-	memcpy(data, loader.getIndexData(), loader.getIndexDataSize());
-	cmdb->writeBuffer(m_indicesBuff, 0, token);
+	// Submit the loading task
+	task->m_indicesBuff = m_indicesBuff;
+	task->m_vertBuff = m_vertBuff;
+	getManager().getAsyncLoader().submitTask(task);
 
-	cmdb->flush();
+	return ErrorCode::NONE;
 }
 
 } // end namespace anki

+ 10 - 6
src/resource/MeshLoader.cpp

@@ -11,21 +11,25 @@ namespace anki
 {
 
 //==============================================================================
-MeshLoader::~MeshLoader()
+MeshLoader::MeshLoader(ResourceManager* manager)
+	: MeshLoader(manager, manager->getTempAllocator())
 {
-	auto alloc = m_manager->getTempAllocator();
+}
 
+//==============================================================================
+MeshLoader::~MeshLoader()
+{
 	// WARNING: Watch the order of deallocation. Reverse of the deallocation to
 	// have successful cleanups
-	m_verts.destroy(alloc);
-	m_indices.destroy(alloc);
-	m_subMeshes.destroy(alloc);
+	m_verts.destroy(m_alloc);
+	m_indices.destroy(m_alloc);
+	m_subMeshes.destroy(m_alloc);
 }
 
 //==============================================================================
 Error MeshLoader::load(const ResourceFilename& filename)
 {
-	auto alloc = m_manager->getTempAllocator();
+	auto& alloc = m_alloc;
 
 	// Load header
 	ResourceFilePtr file;

+ 81 - 49
src/resource/TextureResource.cpp

@@ -12,28 +12,29 @@ namespace anki
 {
 
 //==============================================================================
-// Misc                                                                        =
+// TexUploadTask                                                               =
 //==============================================================================
 
+/// Texture upload async task.
 class TexUploadTask : public AsyncLoaderTask
 {
 public:
-	UniquePtr<ImageLoader> m_loader;
+	ImageLoader m_loader;
 	TexturePtr m_tex;
 	GrManager* m_gr ANKI_DBG_NULLIFY_PTR;
-	U32 m_depth = 0;
-	U8 m_faces = 0;
-
-	TexUploadTask(UniquePtr<ImageLoader>& loader,
-		TexturePtr tex,
-		GrManager* gr,
-		U depth,
-		U faces)
-		: m_loader(std::move(loader))
-		, m_tex(tex)
-		, m_gr(gr)
-		, m_depth(depth)
-		, m_faces(faces)
+	U m_depth = 0;
+	U m_faces = 0;
+
+	class
+	{
+	public:
+		U m_depth = 0;
+		U m_face = 0;
+		U m_mip = 0;
+	} m_ctx;
+
+	TexUploadTask(GenericMemoryPoolAllocator<U8> alloc)
+		: m_loader(alloc)
 	{
 	}
 
@@ -43,33 +44,64 @@ public:
 //==============================================================================
 Error TexUploadTask::operator()(AsyncLoaderTaskContext& ctx)
 {
-	CommandBufferPtr cmdb =
-		m_gr->newInstance<CommandBuffer>(CommandBufferInitInfo());
+	CommandBufferPtr cmdb;
 
 	// Upload the data
-	for(U depth = 0; depth < m_depth; depth++)
+	for(U depth = m_ctx.m_depth; depth < m_depth; ++depth)
 	{
-		for(U face = 0; face < m_faces; face++)
+		for(U face = m_ctx.m_face; face < m_faces; ++face)
 		{
-			for(U level = 0; level < m_loader->getMipLevelsCount(); level++)
+			for(U mip = m_ctx.m_mip; mip < m_loader.getMipLevelsCount(); ++mip)
 			{
 				U surfIdx = max(depth, face);
-				const auto& surf = m_loader->getSurface(level, surfIdx);
+				const auto& surf = m_loader.getSurface(mip, surfIdx);
 
 				DynamicBufferToken token;
 				void* data = m_gr->allocateFrameHostVisibleMemory(
 					surf.m_data.getSize(), BufferUsage::TRANSFER, token);
-				memcpy(data, &surf.m_data[0], surf.m_data.getSize());
 
-				cmdb->textureUpload(
-					m_tex, TextureSurfaceInfo(level, depth, face), token);
+				if(data)
+				{
+					// There is enough transfer memory
+
+					memcpy(data, &surf.m_data[0], surf.m_data.getSize());
+
+					if(!cmdb)
+					{
+						cmdb = m_gr->newInstance<CommandBuffer>(
+							CommandBufferInitInfo());
+					}
+
+					cmdb->textureUpload(
+						m_tex, TextureSurfaceInfo(mip, depth, face), token);
+				}
+				else
+				{
+					// Not enough transfer memory. Move the work to the future
+
+					if(cmdb)
+					{
+						cmdb->flush();
+					}
+
+					m_ctx.m_depth = depth;
+					m_ctx.m_mip = mip;
+					m_ctx.m_face = face;
+
+					ctx.m_pause = true;
+					ctx.m_resubmitTask = true;
+
+					return ErrorCode::NONE;
+				}
 			}
 		}
 	}
 
-	// Finaly enque the GL job chain
-	// TODO This is probably a bad idea
-	cmdb->finish();
+	// Finaly enque the command buffer
+	if(cmdb)
+	{
+		cmdb->flush();
+	}
 
 	return ErrorCode::NONE;
 }
@@ -86,34 +118,30 @@ TextureResource::~TextureResource()
 //==============================================================================
 Error TextureResource::load(const ResourceFilename& filename)
 {
-	GrManager& gr = getManager().getGrManager();
-	// Always first to avoid assertions (because of the check of the allocator)
-	CommandBufferPtr cmdb =
-		gr.newInstance<CommandBuffer>(CommandBufferInitInfo());
-
 	TextureInitInfo init;
 	U depth = 0;
 	U faces = 0;
 
 	// Load image
-	UniquePtr<ImageLoader> img;
-	img.reset(getAllocator().newInstance<ImageLoader>(getAllocator()));
+	TexUploadTask* task = getManager().getAsyncLoader().newTask<TexUploadTask>(
+		getManager().getAsyncLoader().getAllocator());
+	ImageLoader& loader = task->m_loader;
 
 	ResourceFilePtr file;
 	ANKI_CHECK(openFile(filename, file));
 
-	ANKI_CHECK(img->load(file, filename, getManager().getMaxTextureSize()));
+	ANKI_CHECK(loader.load(file, filename, getManager().getMaxTextureSize()));
 
 	// width + height
-	const auto& tmpSurf = img->getSurface(0, 0);
+	const auto& tmpSurf = loader.getSurface(0, 0);
 	init.m_width = tmpSurf.m_width;
 	init.m_height = tmpSurf.m_height;
 
 	// depth
-	if(img->getTextureType() == ImageLoader::TextureType::_2D_ARRAY
-		|| img->getTextureType() == ImageLoader::TextureType::_3D)
+	if(loader.getTextureType() == ImageLoader::TextureType::_2D_ARRAY
+		|| loader.getTextureType() == ImageLoader::TextureType::_3D)
 	{
-		init.m_depth = img->getDepth();
+		init.m_depth = loader.getDepth();
 	}
 	else
 	{
@@ -121,7 +149,7 @@ Error TextureResource::load(const ResourceFilename& filename)
 	}
 
 	// target
-	switch(img->getTextureType())
+	switch(loader.getTextureType())
 	{
 	case ImageLoader::TextureType::_2D:
 		init.m_type = TextureType::_2D;
@@ -151,9 +179,9 @@ Error TextureResource::load(const ResourceFilename& filename)
 	init.m_format.m_transform = TransformFormat::UNORM;
 	init.m_format.m_srgb = false;
 
-	if(img->getColorFormat() == ImageLoader::ColorFormat::RGB8)
+	if(loader.getColorFormat() == ImageLoader::ColorFormat::RGB8)
 	{
-		switch(img->getCompression())
+		switch(loader.getCompression())
 		{
 		case ImageLoader::DataCompression::RAW:
 			init.m_format.m_components = ComponentFormat::R8G8B8;
@@ -171,9 +199,9 @@ Error TextureResource::load(const ResourceFilename& filename)
 			ANKI_ASSERT(0);
 		}
 	}
-	else if(img->getColorFormat() == ImageLoader::ColorFormat::RGBA8)
+	else if(loader.getColorFormat() == ImageLoader::ColorFormat::RGBA8)
 	{
-		switch(img->getCompression())
+		switch(loader.getCompression())
 		{
 		case ImageLoader::DataCompression::RAW:
 			init.m_format.m_components = ComponentFormat::R8G8B8A8;
@@ -197,7 +225,7 @@ Error TextureResource::load(const ResourceFilename& filename)
 	}
 
 	// mipmapsCount
-	init.m_mipmapsCount = img->getMipLevelsCount();
+	init.m_mipmapsCount = loader.getMipLevelsCount();
 
 	// filteringType
 	init.m_sampling.m_minMagFilter = SamplingFilter::LINEAR;
@@ -210,14 +238,18 @@ Error TextureResource::load(const ResourceFilename& filename)
 	init.m_sampling.m_anisotropyLevel = getManager().getTextureAnisotropy();
 
 	// Create the texture
-	m_tex = gr.newInstance<Texture>(init);
+	m_tex = getManager().getGrManager().newInstance<Texture>(init);
 
 	// Upload the data asynchronously
-	getManager().getAsyncLoader().newTask<TexUploadTask>(
-		img, m_tex, &gr, depth, faces);
+	task->m_depth = depth;
+	task->m_faces = faces;
+	task->m_gr = &getManager().getGrManager();
+	task->m_tex = m_tex;
 
-	m_size = UVec3(init.m_width, init.m_height, init.m_depth);
+	getManager().getAsyncLoader().submitTask(task);
 
+	// Done
+	m_size = UVec3(init.m_width, init.m_height, init.m_depth);
 	return ErrorCode::NONE;
 }
 

+ 30 - 26
tests/resource/AsyncLoader.cpp

@@ -23,8 +23,12 @@ public:
 	Bool m_pause;
 	Bool m_resubmit;
 
-	Task(F32 time, Barrier* barrier, Atomic<U32>* count, I32 id = -1, 
-		Bool pause = false, Bool resubmit = false)
+	Task(F32 time,
+		Barrier* barrier,
+		Atomic<U32>* count,
+		I32 id = -1,
+		Bool pause = false,
+		Bool resubmit = false)
 		: m_sleepTime(time)
 		, m_barrier(barrier)
 		, m_count(count)
@@ -59,7 +63,7 @@ public:
 		{
 			m_barrier->wait();
 		}
-		
+
 		ctx.m_pause = m_pause;
 		ctx.m_resubmitTask = m_resubmit;
 		m_resubmit = false;
@@ -117,7 +121,7 @@ ANKI_TEST(Resource, AsyncLoader)
 		a.init(alloc);
 		Barrier barrier(2);
 
-		a.newTask<Task>(0.0, &barrier, nullptr);
+		a.submitNewTask<Task>(0.0, &barrier, nullptr);
 		barrier.wait();
 	}
 
@@ -138,7 +142,7 @@ ANKI_TEST(Resource, AsyncLoader)
 				pbarrier = &barrier;
 			}
 
-			a.newTask<Task>(0.01, pbarrier, &counter);
+			a.submitNewTask<Task>(0.01, pbarrier, &counter);
 		}
 
 		barrier.wait();
@@ -153,7 +157,7 @@ ANKI_TEST(Resource, AsyncLoader)
 
 		for(U i = 0; i < 100; i++)
 		{
-			a.newTask<Task>(0.0, nullptr, nullptr);
+			a.submitNewTask<Task>(0.0, nullptr, nullptr);
 		}
 	}
 
@@ -172,7 +176,7 @@ ANKI_TEST(Resource, AsyncLoader)
 				pbarrier = &barrier;
 			}
 
-			a.newTask<MemTask>(alloc, pbarrier);
+			a.submitNewTask<MemTask>(alloc, pbarrier);
 		}
 
 		barrier.wait();
@@ -185,7 +189,7 @@ ANKI_TEST(Resource, AsyncLoader)
 
 		for(U i = 0; i < 10; i++)
 		{
-			a.newTask<MemTask>(alloc, nullptr);
+			a.submitNewTask<MemTask>(alloc, nullptr);
 		}
 	}
 
@@ -195,25 +199,25 @@ ANKI_TEST(Resource, AsyncLoader)
 		a.init(alloc);
 		Atomic<U32> counter(0);
 		Barrier barrier(2);
-		
+
 		// Check if the pause will sync
-		a.newTask<Task>(0.5, nullptr, &counter, 0);
+		a.submitNewTask<Task>(0.5, nullptr, &counter, 0);
 		HighRezTimer::sleep(0.25); // Wait for the thread to pick the task...
 		a.pause(); /// ...and then sync
 		ANKI_TEST_EXPECT_EQ(counter.load(), 1);
-		
+
 		// Test resume
-		a.newTask<Task>(0.1, nullptr, &counter, 1);
+		a.submitNewTask<Task>(0.1, nullptr, &counter, 1);
 		HighRezTimer::sleep(1.0);
 		ANKI_TEST_EXPECT_EQ(counter.load(), 1);
 		a.resume();
-		
+
 		// Sync
-		a.newTask<Task>(0.1, &barrier, &counter, 2);
+		a.submitNewTask<Task>(0.1, &barrier, &counter, 2);
 		barrier.wait();
-		
+
 		ANKI_TEST_EXPECT_EQ(counter.load(), 3);
-	}	
+	}
 
 	// Pause/resume
 	{
@@ -221,27 +225,27 @@ ANKI_TEST(Resource, AsyncLoader)
 		a.init(alloc);
 		Atomic<U32> counter(0);
 		Barrier barrier(2);
-		
+
 		// Check task resubmit
-		a.newTask<Task>(0.0, &barrier, &counter, -1, false, true);
+		a.submitNewTask<Task>(0.0, &barrier, &counter, -1, false, true);
 		barrier.wait();
 		barrier.wait();
 		ANKI_TEST_EXPECT_EQ(counter.load(), 2);
-		
+
 		// Check task pause
-		a.newTask<Task>(0.0, nullptr, &counter, -1, true, false);
-		a.newTask<Task>(0.0, nullptr, &counter, -1, false, false);
+		a.submitNewTask<Task>(0.0, nullptr, &counter, -1, true, false);
+		a.submitNewTask<Task>(0.0, nullptr, &counter, -1, false, false);
 		HighRezTimer::sleep(1.0);
 		ANKI_TEST_EXPECT_EQ(counter.load(), 3);
 		a.resume();
 		HighRezTimer::sleep(1.0);
 		ANKI_TEST_EXPECT_EQ(counter.load(), 4);
-		
+
 		// Check both
 		counter.set(0);
-		a.newTask<Task>(0.0, nullptr, &counter, 0, false, false);
-		a.newTask<Task>(0.0, nullptr, &counter, -1, true, true);
-		a.newTask<Task>(0.0, nullptr, &counter, 2, false, false);
+		a.submitNewTask<Task>(0.0, nullptr, &counter, 0, false, false);
+		a.submitNewTask<Task>(0.0, nullptr, &counter, -1, true, true);
+		a.submitNewTask<Task>(0.0, nullptr, &counter, 2, false, false);
 		HighRezTimer::sleep(1.0);
 		ANKI_TEST_EXPECT_EQ(counter.load(), 2);
 		a.resume();
@@ -265,7 +269,7 @@ ANKI_TEST(Resource, AsyncLoader)
 				pbarrier = &barrier;
 			}
 
-			a.newTask<Task>(randRange(0.0, 0.5), pbarrier, &counter, i);
+			a.submitNewTask<Task>(randRange(0.0, 0.5), pbarrier, &counter, i);
 		}
 
 		barrier.wait();