瀏覽代碼

Make MicroObjectDeleter a bit smarter. Remove initial usage from texture init info

Panagiotis Christopoulos Charitos 3 年之前
父節點
當前提交
1512e72b64

+ 1 - 3
AnKi/Gr/Texture.h

@@ -24,7 +24,6 @@ public:
 	Format m_format = Format::NONE;
 
 	TextureUsageBit m_usage = TextureUsageBit::NONE; ///< How the texture will be used.
-	TextureUsageBit m_initialUsage = TextureUsageBit::NONE; ///< Its initial usage.
 	TextureType m_type = TextureType::_2D;
 
 	U8 m_mipmapCount = 1;
@@ -47,8 +46,7 @@ public:
 		const U size = U(last - first);
 		ANKI_ASSERT(size
 					== sizeof(m_width) + sizeof(m_height) + sizeof(m_depth) + sizeof(m_layerCount) + sizeof(m_format)
-						   + sizeof(m_usage) + sizeof(m_initialUsage) + sizeof(m_type) + sizeof(m_mipmapCount)
-						   + sizeof(m_samples));
+						   + sizeof(m_usage) + sizeof(m_type) + sizeof(m_mipmapCount) + sizeof(m_samples));
 		return anki::computeHash(first, size);
 	}
 

+ 45 - 126
AnKi/Gr/Vulkan/CommandBufferFactory.cpp

@@ -23,7 +23,7 @@ static VulkanQueueType getQueueTypeFromCommandBufferFlags(CommandBufferFlag flag
 	}
 }
 
-void MicroCommandBuffer::destroy()
+MicroCommandBuffer::~MicroCommandBuffer()
 {
 	reset();
 
@@ -31,6 +31,10 @@ void MicroCommandBuffer::destroy()
 	{
 		vkFreeCommandBuffers(m_threadAlloc->m_factory->m_dev, m_threadAlloc->m_pools[m_queue], 1, &m_handle);
 		m_handle = {};
+
+		const U32 count = m_threadAlloc->m_factory->m_createdCmdBufferCount.fetchSub(1);
+		ANKI_ASSERT(count > 0);
+		(void)count;
 	}
 }
 
@@ -39,7 +43,7 @@ void MicroCommandBuffer::reset()
 	ANKI_TRACE_SCOPED_EVENT(VK_COMMAND_BUFFER_RESET);
 
 	ANKI_ASSERT(m_refcount.load() == 0);
-	ANKI_ASSERT(!m_fence.isCreated() || m_fence->done());
+	ANKI_ASSERT(!m_fence.isCreated());
 
 	for(GrObjectType type : EnumIterable<GrObjectType>())
 	{
@@ -47,8 +51,6 @@ void MicroCommandBuffer::reset()
 	}
 
 	m_fastAlloc.getMemoryPool().reset();
-
-	m_fence = {};
 }
 
 Error CommandBufferThreadAllocator::init()
@@ -68,42 +70,35 @@ Error CommandBufferThreadAllocator::init()
 		ANKI_VK_CHECK(vkCreateCommandPool(m_factory->m_dev, &ci, nullptr, &m_pools[qtype]));
 	}
 
-	return Error::NONE;
-}
-
-void CommandBufferThreadAllocator::destroyList(IntrusiveList<MicroCommandBuffer>& list)
-{
-	while(!list.isEmpty())
+	for(U32 secondLevel = 0; secondLevel < 2; ++secondLevel)
 	{
-		MicroCommandBuffer* ptr = list.popFront();
-		ptr->destroy();
-		getAllocator().deleteInstance(ptr);
-#if ANKI_EXTRA_CHECKS
-		m_createdCmdbs.fetchSub(1);
-#endif
+		for(U32 smallBatch = 0; smallBatch < 2; ++smallBatch)
+		{
+			for(VulkanQueueType queue : EnumIterable<VulkanQueueType>())
+			{
+				MicroObjectRecycler<MicroCommandBuffer>& recycler = m_recyclers[secondLevel][smallBatch][queue];
+
+				recycler.init(m_factory->m_alloc);
+			}
+		}
 	}
+
+	return Error::NONE;
 }
 
-void CommandBufferThreadAllocator::destroyLists()
+void CommandBufferThreadAllocator::destroy()
 {
-	for(U i = 0; i < 2; ++i)
+	for(U32 secondLevel = 0; secondLevel < 2; ++secondLevel)
 	{
-		for(U j = 0; j < 2; ++j)
+		for(U32 smallBatch = 0; smallBatch < 2; ++smallBatch)
 		{
-			for(VulkanQueueType qtype : EnumIterable<VulkanQueueType>())
+			for(VulkanQueueType queue : EnumIterable<VulkanQueueType>())
 			{
-				CmdbType& type = m_types[i][j][qtype];
-
-				destroyList(type.m_deletedCmdbs);
-				destroyList(type.m_readyCmdbs);
-				destroyList(type.m_inUseCmdbs);
+				m_recyclers[secondLevel][smallBatch][queue].destroy();
 			}
 		}
 	}
-}
 
-void CommandBufferThreadAllocator::destroy()
-{
 	for(VkCommandPool& pool : m_pools)
 	{
 		if(pool)
@@ -112,94 +107,19 @@ void CommandBufferThreadAllocator::destroy()
 			pool = VK_NULL_HANDLE;
 		}
 	}
-
-	ANKI_ASSERT(m_createdCmdbs.load() == 0 && "Someone still holds references to command buffers");
 }
 
-Error CommandBufferThreadAllocator::newCommandBuffer(CommandBufferFlag cmdbFlags, MicroCommandBufferPtr& outPtr,
-													 Bool& createdNew)
+Error CommandBufferThreadAllocator::newCommandBuffer(CommandBufferFlag cmdbFlags, MicroCommandBufferPtr& outPtr)
 {
 	ANKI_ASSERT(!!(cmdbFlags & CommandBufferFlag::COMPUTE_WORK) ^ !!(cmdbFlags & CommandBufferFlag::GENERAL_WORK));
-	createdNew = false;
 
 	const Bool secondLevel = !!(cmdbFlags & CommandBufferFlag::SECOND_LEVEL);
 	const Bool smallBatch = !!(cmdbFlags & CommandBufferFlag::SMALL_BATCH);
 	const VulkanQueueType queue = getQueueTypeFromCommandBufferFlags(cmdbFlags, m_factory->m_queueFamilies);
 
-	CmdbType& type = m_types[secondLevel][smallBatch][queue];
+	MicroObjectRecycler<MicroCommandBuffer>& recycler = m_recyclers[secondLevel][smallBatch][queue];
 
-	// Move the deleted to (possibly) in-use or ready
-	{
-		LockGuard<Mutex> lock(type.m_deletedMtx);
-
-		while(!type.m_deletedCmdbs.isEmpty())
-		{
-			MicroCommandBuffer* ptr = type.m_deletedCmdbs.popFront();
-
-			if(secondLevel)
-			{
-				type.m_readyCmdbs.pushFront(ptr);
-				ptr->reset();
-			}
-			else
-			{
-				type.m_inUseCmdbs.pushFront(ptr);
-			}
-		}
-	}
-
-	// Reset the in-use command buffers and try to get one available
-	MicroCommandBuffer* out = nullptr;
-	if(!secondLevel)
-	{
-		// Primary
-
-		// Try to reuse a ready buffer
-		if(!type.m_readyCmdbs.isEmpty())
-		{
-			out = type.m_readyCmdbs.popFront();
-		}
-
-		// Do a sweep and move in-use buffers to ready
-		IntrusiveList<MicroCommandBuffer> inUseCmdbs; // Push to temporary because we are iterating
-		while(!type.m_inUseCmdbs.isEmpty())
-		{
-			MicroCommandBuffer* inUseCmdb = type.m_inUseCmdbs.popFront();
-
-			if(!inUseCmdb->m_fence.isCreated() || inUseCmdb->m_fence->done())
-			{
-				// It's ready
-
-				if(out)
-				{
-					type.m_readyCmdbs.pushFront(inUseCmdb);
-					inUseCmdb->reset();
-				}
-				else
-				{
-					out = inUseCmdb;
-				}
-			}
-			else
-			{
-				inUseCmdbs.pushBack(inUseCmdb);
-			}
-		}
-
-		ANKI_ASSERT(type.m_inUseCmdbs.isEmpty());
-		type.m_inUseCmdbs = std::move(inUseCmdbs);
-	}
-	else
-	{
-		// Secondary
-
-		ANKI_ASSERT(type.m_inUseCmdbs.isEmpty());
-
-		if(!type.m_readyCmdbs.isEmpty())
-		{
-			out = type.m_readyCmdbs.popFront();
-		}
-	}
+	MicroCommandBuffer* out = recycler.findToReuse();
 
 	if(ANKI_UNLIKELY(out == nullptr))
 	{
@@ -217,10 +137,6 @@ Error CommandBufferThreadAllocator::newCommandBuffer(CommandBufferFlag cmdbFlags
 
 		MicroCommandBuffer* newCmdb = getAllocator().newInstance<MicroCommandBuffer>(this);
 
-#if ANKI_EXTRA_CHECKS
-		m_createdCmdbs.fetchAdd(1);
-#endif
-
 		newCmdb->m_fastAlloc =
 			StackAllocator<U8>(m_factory->m_alloc.getMemoryPool().getAllocationCallback(),
 							   m_factory->m_alloc.getMemoryPool().getAllocationCallbackUserData(), 256_KB, 2.0f);
@@ -231,11 +147,15 @@ Error CommandBufferThreadAllocator::newCommandBuffer(CommandBufferFlag cmdbFlags
 
 		out = newCmdb;
 
-		createdNew = true;
+		m_factory->m_createdCmdBufferCount.fetchAdd(1);
 	}
 	else
 	{
-		out->reset();
+		for(GrObjectType type : EnumIterable<GrObjectType>())
+		{
+			(void)type;
+			ANKI_ASSERT(out->m_objectRefs[type].getSize() == 0);
+		}
 	}
 
 	ANKI_ASSERT(out && out->m_refcount.load() == 0);
@@ -251,10 +171,7 @@ void CommandBufferThreadAllocator::deleteCommandBuffer(MicroCommandBuffer* ptr)
 	const Bool secondLevel = !!(ptr->m_flags & CommandBufferFlag::SECOND_LEVEL);
 	const Bool smallBatch = !!(ptr->m_flags & CommandBufferFlag::SMALL_BATCH);
 
-	CmdbType& type = m_types[secondLevel][smallBatch][ptr->m_queue];
-
-	LockGuard<Mutex> lock(type.m_deletedMtx);
-	type.m_deletedCmdbs.pushBack(ptr);
+	m_recyclers[secondLevel][smallBatch][ptr->m_queue].recycle(ptr);
 }
 
 Error CommandBufferFactory::init(GrAllocator<U8> alloc, VkDevice dev, const VulkanQueueFamilies& queueFamilies)
@@ -269,12 +186,19 @@ Error CommandBufferFactory::init(GrAllocator<U8> alloc, VkDevice dev, const Vulk
 
 void CommandBufferFactory::destroy()
 {
-	// Run 2 times because destroyLists() populates other allocators' lists
-	for(U i = 0; i < 2; ++i)
+	// First trim the caches for all recyclers. This will release the primaries and populate the recyclers of
+	// secondaries
+	for(CommandBufferThreadAllocator* talloc : m_threadAllocs)
 	{
-		for(CommandBufferThreadAllocator* alloc : m_threadAllocs)
+		for(U32 secondLevel = 0; secondLevel < 2; ++secondLevel)
 		{
-			alloc->destroyLists();
+			for(U32 smallBatch = 0; smallBatch < 2; ++smallBatch)
+			{
+				for(VulkanQueueType queue : EnumIterable<VulkanQueueType>())
+				{
+					talloc->m_recyclers[secondLevel][smallBatch][queue].trimCache();
+				}
+			}
 		}
 	}
 
@@ -342,12 +266,7 @@ Error CommandBufferFactory::newCommandBuffer(ThreadId tid, CommandBufferFlag cmd
 
 	ANKI_ASSERT(alloc);
 	ANKI_ASSERT(alloc->m_tid == tid);
-	Bool createdNew;
-	ANKI_CHECK(alloc->newCommandBuffer(cmdbFlags, ptr, createdNew));
-	if(createdNew)
-	{
-		m_createdCmdBufferCount.fetchAdd(1);
-	}
+	ANKI_CHECK(alloc->newCommandBuffer(cmdbFlags, ptr));
 
 	return Error::NONE;
 }

+ 23 - 23
AnKi/Gr/Vulkan/CommandBufferFactory.h

@@ -7,6 +7,7 @@
 
 #include <AnKi/Gr/Vulkan/FenceFactory.h>
 #include <AnKi/Gr/CommandBuffer.h>
+#include <AnKi/Gr/Vulkan/MicroObjectRecycler.h>
 #include <AnKi/Util/List.h>
 
 namespace anki {
@@ -30,13 +31,33 @@ public:
 		ANKI_ASSERT(allocator);
 	}
 
+	~MicroCommandBuffer();
+
 	Atomic<I32>& getRefcount()
 	{
 		return m_refcount;
 	}
 
+	void setFence(MicroFencePtr& fence)
+	{
+		ANKI_ASSERT(!(m_flags & CommandBufferFlag::SECOND_LEVEL));
+		ANKI_ASSERT(!m_fence.isCreated());
+		m_fence = fence;
+	}
+
+	MicroFencePtr& getFence()
+	{
+		return m_fence;
+	}
+
 	GrAllocator<U8>& getAllocator();
 
+	/// Interface method.
+	void onFenceDone()
+	{
+		reset();
+	}
+
 	StackAllocator<U8>& getFastAllocator()
 	{
 		return m_fastAlloc;
@@ -54,13 +75,6 @@ public:
 		pushToArray(m_objectRefs[T::CLASS_TYPE], x.get());
 	}
 
-	void setFence(MicroFencePtr& fence)
-	{
-		ANKI_ASSERT(!(m_flags & CommandBufferFlag::SECOND_LEVEL));
-		ANKI_ASSERT(!m_fence.isCreated());
-		m_fence = fence;
-	}
-
 	CommandBufferFlag getFlags() const
 	{
 		return m_flags;
@@ -88,7 +102,6 @@ private:
 	CommandBufferFlag m_flags = CommandBufferFlag::NONE;
 	VulkanQueueType m_queue = VulkanQueueType::COUNT;
 
-	void destroy();
 	void reset();
 
 	void pushToArray(DynamicArray<GrObjectPtr>& arr, GrObject* grobj)
@@ -153,7 +166,7 @@ public:
 	GrAllocator<U8>& getAllocator();
 
 	/// Request a new command buffer.
-	ANKI_USE_RESULT Error newCommandBuffer(CommandBufferFlag cmdbFlags, MicroCommandBufferPtr& ptr, Bool& createdNew);
+	ANKI_USE_RESULT Error newCommandBuffer(CommandBufferFlag cmdbFlags, MicroCommandBufferPtr& ptr);
 
 	/// It will recycle it.
 	void deleteCommandBuffer(MicroCommandBuffer* ptr);
@@ -163,24 +176,11 @@ private:
 	ThreadId m_tid;
 	Array<VkCommandPool, U(VulkanQueueType::COUNT)> m_pools = {};
 
-	class CmdbType
-	{
-	public:
-		IntrusiveList<MicroCommandBuffer> m_readyCmdbs; ///< Buffers that are ready to be used.
-		IntrusiveList<MicroCommandBuffer> m_inUseCmdbs; ///< Buffer that got dereferenced and maybe in-use.
-
-		IntrusiveList<MicroCommandBuffer> m_deletedCmdbs;
-		Mutex m_deletedMtx; ///< Lock because the dallocations may happen anywhere.
-	};
-
 #if ANKI_EXTRA_CHECKS
 	Atomic<U32> m_createdCmdbs = {0};
 #endif
 
-	Array3d<CmdbType, 2, 2, U(VulkanQueueType::COUNT)> m_types;
-
-	void destroyList(IntrusiveList<MicroCommandBuffer>& list);
-	void destroyLists();
+	Array3d<MicroObjectRecycler<MicroCommandBuffer>, 2, 2, U(VulkanQueueType::COUNT)> m_recyclers;
 };
 
 /// Command bufffer object recycler.

+ 6 - 0
AnKi/Gr/Vulkan/DeferredBarrierFactory.h

@@ -50,6 +50,12 @@ public:
 		return m_fence;
 	}
 
+	/// Interface method.
+	void onFenceDone()
+	{
+		// Do nothing
+	}
+
 private:
 	VkEvent m_handle = VK_NULL_HANDLE;
 	Atomic<U32> m_refcount = {0};

+ 34 - 6
AnKi/Gr/Vulkan/MicroObjectRecycler.h

@@ -13,7 +13,7 @@ namespace anki {
 /// @addtogroup vulkan
 /// @{
 
-/// Helper class for MicroXXX objects.
+/// Helper class for MicroXXX objects. It expects a specific interface for the T.
 template<typename T>
 class MicroObjectRecycler
 {
@@ -47,19 +47,47 @@ public:
 	void recycle(T* s);
 
 	/// Destroy those objects that their fence is done. It's thread-safe.
-	/// @param objectsToNotDestroy The number of objects to keep alive for future recycling.
-	void trimCache(U32 objectsToNotDestroy = 0);
+	void trimCache()
+	{
+		LockGuard<Mutex> lock(m_mtx);
+		checkDoneFences();
+		trimCacheInternal(0);
+	}
+
+	U32 getCacheSize() const
+	{
+		return m_objects.getSize();
+	}
 
 private:
+	class Object
+	{
+	public:
+		T* m_microObject;
+		Bool m_fenceDone;
+	};
+
 	GrAllocator<U8> m_alloc;
-	DynamicArray<T*> m_objects;
+	DynamicArray<Object> m_objects;
 	Mutex m_mtx;
+
+	// Begin trim cache adjustment vars
+	U32 m_readyObjectsAfterTrim = 1;
+	static constexpr U32 m_maxRequestsPerAdjustment = 128;
+	U32 m_cacheMisses = 0;
+	U32 m_requests = 0;
+	U32 m_minCacheSizePerRequest = MAX_U32;
+	// End trim cache adjustment vars
+
 #if ANKI_EXTRA_CHECKS
 	U32 m_createdAndNotRecycled = 0;
 #endif
 
-	/// @return The number of objects that could be deleted.
-	U32 releaseFences();
+	void trimCacheInternal(U32 aliveObjectCountAfterTrim);
+
+	void adjustAliveObjectCount();
+
+	void checkDoneFences();
 };
 /// @}
 

+ 106 - 52
AnKi/Gr/Vulkan/MicroObjectRecycler.inl.h

@@ -12,14 +12,16 @@ inline void MicroObjectRecycler<T>::destroy()
 {
 	LockGuard<Mutex> lock(m_mtx);
 
+	checkDoneFences();
+
 	for(U32 i = 0; i < m_objects.getSize(); ++i)
 	{
-		T* obj = m_objects[i];
-		ANKI_ASSERT(obj);
-		ANKI_ASSERT(!obj->getFence() || obj->getFence()->done());
+		T* mobj = m_objects[i].m_microObject;
+		ANKI_ASSERT(mobj);
+		ANKI_ASSERT(!mobj->getFence());
 
-		auto alloc = obj->getAllocator();
-		alloc.deleteInstance(obj);
+		auto alloc = mobj->getAllocator();
+		alloc.deleteInstance(mobj);
 #if ANKI_EXTRA_CHECKS
 		--m_createdAndNotRecycled;
 #endif
@@ -29,47 +31,34 @@ inline void MicroObjectRecycler<T>::destroy()
 	ANKI_ASSERT(m_createdAndNotRecycled == 0 && "Destroying the recycler while objects have not recycled yet");
 }
 
-template<typename T>
-inline U32 MicroObjectRecycler<T>::releaseFences()
-{
-	U32 objectsThatCanBeDestroyed = 0;
-	for(U32 i = 0; i < m_objects.getSize(); ++i)
-	{
-		T& obj = *m_objects[i];
-		if(obj.getFence() && obj.getFence()->done())
-		{
-			obj.getFence().reset(nullptr);
-			++objectsThatCanBeDestroyed;
-		}
-	}
-
-	return objectsThatCanBeDestroyed;
-}
-
 template<typename T>
 inline T* MicroObjectRecycler<T>::findToReuse()
 {
 	T* out = nullptr;
 	LockGuard<Mutex> lock(m_mtx);
 
-	if(m_objects.getSize() > 0)
-	{
-		releaseFences();
+	checkDoneFences();
+	adjustAliveObjectCount();
+
+	// Trim the cache but leave at least one object to be recycled
+	trimCacheInternal(max(m_readyObjectsAfterTrim, 1u));
 
-		for(U32 i = 0; i < m_objects.getSize(); ++i)
+	for(U32 i = 0; i < m_objects.getSize(); ++i)
+	{
+		if(m_objects[i].m_fenceDone)
 		{
-			if(!m_objects[i]->getFence())
-			{
-				out = m_objects[i];
-				m_objects[i] = m_objects[m_objects.getSize() - 1];
-				m_objects.popBack(m_alloc);
-				break;
-			}
+			out = m_objects[i].m_microObject;
+			m_objects[i] = m_objects[m_objects.getSize() - 1];
+			m_objects.popBack(m_alloc);
+
+			break;
 		}
 	}
 
 	ANKI_ASSERT(out == nullptr || out->getRefcount().getNonAtomically() == 0);
 
+	m_cacheMisses += (out == nullptr);
+
 #if ANKI_EXTRA_CHECKS
 	if(out == nullptr)
 	{
@@ -81,43 +70,73 @@ inline T* MicroObjectRecycler<T>::findToReuse()
 }
 
 template<typename T>
-inline void MicroObjectRecycler<T>::recycle(T* s)
+void MicroObjectRecycler<T>::recycle(T* mobj)
 {
-	ANKI_ASSERT(s);
-	ANKI_ASSERT(s->getRefcount().getNonAtomically() == 0);
+	ANKI_ASSERT(mobj);
+	ANKI_ASSERT(mobj->getRefcount().getNonAtomically() == 0);
 
 	LockGuard<Mutex> lock(m_mtx);
 
-	releaseFences();
+	Object obj;
+	obj.m_fenceDone = !mobj->getFence();
+	obj.m_microObject = mobj;
+
+	if(obj.m_fenceDone)
+	{
+		mobj->onFenceDone();
+	}
 
-	m_objects.emplaceBack(m_alloc, s);
+	m_objects.emplaceBack(m_alloc, obj);
+	checkDoneFences();
+	trimCacheInternal(m_readyObjectsAfterTrim);
 }
 
 template<typename T>
-inline void MicroObjectRecycler<T>::trimCache(U32 objectsToNotDestroy)
+void MicroObjectRecycler<T>::checkDoneFences()
 {
-	LockGuard<Mutex> lock(m_mtx);
+	for(Object& obj : m_objects)
+	{
+		T& mobj = *obj.m_microObject;
 
-	U32 objectsThatCoultBeDeletedCount = releaseFences();
+		if(obj.m_fenceDone)
+		{
+			ANKI_ASSERT(!mobj.getFence());
+		}
 
-	DynamicArray<T*> aliveObjects;
+		if(!obj.m_fenceDone && mobj.getFence() && mobj.getFence()->done())
+		{
+			mobj.getFence().reset(nullptr);
+			mobj.onFenceDone();
+			obj.m_fenceDone = true;
+		}
+	}
+}
 
-	for(U32 i = 0; i < m_objects.getSize(); ++i)
+template<typename T>
+void MicroObjectRecycler<T>::trimCacheInternal(U32 aliveObjectCountAfterTrim)
+{
+	DynamicArray<Object> aliveObjects;
+
+	for(Object& obj : m_objects)
 	{
-		T* obj = m_objects[i];
-		ANKI_ASSERT(obj);
-		ANKI_ASSERT(obj->getRefcount().getNonAtomically() == 0);
+		T& mobj = *obj.m_microObject;
+		const Bool inUseByTheGpu = !obj.m_fenceDone;
 
-		if(obj->getFence() || objectsThatCoultBeDeletedCount <= objectsToNotDestroy)
+		if(inUseByTheGpu)
 		{
-			// Can't delete it
+			// Can't delete it for sure
 			aliveObjects.emplaceBack(m_alloc, obj);
 		}
+		else if(aliveObjectCountAfterTrim > 0)
+		{
+			// Need to keep a few alive for recycling
+			aliveObjects.emplaceBack(m_alloc, obj);
+			--aliveObjectCountAfterTrim;
+		}
 		else
 		{
-			auto alloc = obj->getAllocator();
-			alloc.deleteInstance(obj);
-			--objectsThatCoultBeDeletedCount;
+			auto alloc = mobj.getAllocator();
+			alloc.deleteInstance(&mobj);
 #if ANKI_EXTRA_CHECKS
 			--m_createdAndNotRecycled;
 #endif
@@ -130,11 +149,46 @@ inline void MicroObjectRecycler<T>::trimCache(U32 objectsToNotDestroy)
 		m_objects.destroy(m_alloc);
 		m_objects = std::move(aliveObjects);
 	}
-	else if(aliveObjects.getSize() == 0 && m_objects.getSize() >= 0)
+	else if(aliveObjects.getSize() == 0 && m_objects.getSize() > 0)
 	{
 		// All dead, destroy the array
 		m_objects.destroy(m_alloc);
 	}
 }
 
+template<typename T>
+void MicroObjectRecycler<T>::adjustAliveObjectCount()
+{
+	U32 readyObjects = 0;
+	for(Object& obj : m_objects)
+	{
+		readyObjects += obj.m_fenceDone;
+	}
+
+	if(ANKI_LIKELY(m_requests < m_maxRequestsPerAdjustment))
+	{
+		// Not enough requests for a recycle
+		m_minCacheSizePerRequest = min(m_minCacheSizePerRequest, readyObjects);
+		++m_requests;
+	}
+	else
+	{
+		if(m_cacheMisses)
+		{
+			// Need more alive objects
+			m_readyObjectsAfterTrim += 4;
+		}
+		else if(m_minCacheSizePerRequest > 2 && m_readyObjectsAfterTrim > 0)
+		{
+			// Have more than enough alive objects per request, decrease alive objects
+			--m_readyObjectsAfterTrim;
+		}
+
+		// Start new cycle
+		m_cacheMisses = 0;
+		m_requests = 0;
+		m_minCacheSizePerRequest = readyObjects;
+	}
+}
+
 } // end namespace anki

+ 6 - 0
AnKi/Gr/Vulkan/SemaphoreFactory.h

@@ -47,6 +47,12 @@ public:
 		return m_fence;
 	}
 
+	/// Interface method.
+	void onFenceDone()
+	{
+		// Do nothing
+	}
+
 	void setFence(MicroFencePtr& fence)
 	{
 		m_fence = fence;

+ 6 - 0
AnKi/Gr/Vulkan/SwapchainFactory.h

@@ -49,6 +49,12 @@ public:
 		return m_fence;
 	}
 
+	/// Interface method.
+	void onFenceDone()
+	{
+		// Do nothing
+	}
+
 	VkRenderPass getRenderPass(VkAttachmentLoadOp loadOp) const
 	{
 		const U idx = (loadOp == VK_ATTACHMENT_LOAD_OP_DONT_CARE) ? RPASS_LOAD_DONT_CARE : RPASS_LOAD_CLEAR;

+ 0 - 24
AnKi/Gr/Vulkan/TextureImpl.cpp

@@ -176,30 +176,6 @@ Error TextureImpl::initInternal(VkImage externalImage, const TextureInitInfo& in
 		m_viewCreateInfoTemplate.pNext = &m_astcDecodeMode;
 	}
 
-	// Transition the image layout from undefined to something relevant
-	if(!!init.m_initialUsage)
-	{
-		ANKI_ASSERT(usageValid(init.m_initialUsage));
-		ANKI_ASSERT(!(init.m_initialUsage & TextureUsageBit::GENERATE_MIPMAPS) && "That doesn't make any sense");
-
-		CommandBufferInitInfo cmdbinit;
-		cmdbinit.m_flags = CommandBufferFlag::GENERAL_WORK | CommandBufferFlag::SMALL_BATCH;
-		CommandBufferPtr cmdb = getManager().newCommandBuffer(cmdbinit);
-
-		VkImageSubresourceRange range;
-		range.aspectMask = convertImageAspect(m_aspect);
-		range.baseArrayLayer = 0;
-		range.baseMipLevel = 0;
-		range.layerCount = m_layerCount;
-		range.levelCount = m_mipCount;
-
-		CommandBufferImpl& cmdbImpl = static_cast<CommandBufferImpl&>(*cmdb);
-		cmdbImpl.setTextureBarrierRange(TexturePtr(this), TextureUsageBit::NONE, init.m_initialUsage, range);
-
-		cmdbImpl.endRecording();
-		getGrManagerImpl().flushCommandBuffer(cmdbImpl.getMicroCommandBuffer(), false, {}, nullptr);
-	}
-
 	// Create a view if the texture is a single surface
 	if(m_texType == TextureType::_2D && m_mipCount == 1 && m_aspect == DepthStencilAspectBit::NONE)
 	{

+ 1 - 2
AnKi/Renderer/DepthDownscale.cpp

@@ -62,8 +62,7 @@ Error DepthDownscale::initInternal()
 
 		TextureInitInfo texInit = m_r->create2DRenderTargetInitInfo(width, height, Format::R32_SFLOAT, usage, "HiZ");
 		texInit.m_mipmapCount = U8(m_mipCount);
-		texInit.m_initialUsage = TextureUsageBit::SAMPLED_FRAGMENT;
-		m_hizTex = m_r->createAndClearRenderTarget(texInit);
+		m_hizTex = m_r->createAndClearRenderTarget(texInit, TextureUsageBit::SAMPLED_FRAGMENT);
 	}
 
 	// Progs

+ 1 - 2
AnKi/Renderer/DownscaleBlur.cpp

@@ -50,8 +50,7 @@ Error DownscaleBlur::initInternal()
 		texinit.m_usage |= TextureUsageBit::FRAMEBUFFER_ATTACHMENT_WRITE;
 	}
 	texinit.m_mipmapCount = U8(m_passCount);
-	texinit.m_initialUsage = TextureUsageBit::SAMPLED_COMPUTE;
-	m_rtTex = m_r->createAndClearRenderTarget(texinit);
+	m_rtTex = m_r->createAndClearRenderTarget(texinit, TextureUsageBit::SAMPLED_COMPUTE);
 
 	// FB descr
 	if(!preferCompute)

+ 1 - 3
AnKi/Renderer/GBuffer.cpp

@@ -41,9 +41,7 @@ Error GBuffer::initInternal()
 			m_r->getInternalResolution().x(), m_r->getInternalResolution().y(), m_r->getDepthNoStencilFormat(),
 			TextureUsageBit::ALL_SAMPLED | TextureUsageBit::ALL_FRAMEBUFFER_ATTACHMENT, depthRtNames[i]);
 
-		texinit.m_initialUsage = TextureUsageBit::SAMPLED_FRAGMENT;
-
-		m_depthRts[i] = m_r->createAndClearRenderTarget(texinit);
+		m_depthRts[i] = m_r->createAndClearRenderTarget(texinit, TextureUsageBit::SAMPLED_FRAGMENT);
 	}
 
 	static const Array<const char*, GBUFFER_COLOR_ATTACHMENT_COUNT> rtNames = {

+ 2 - 3
AnKi/Renderer/IndirectDiffuse.cpp

@@ -44,10 +44,9 @@ Error IndirectDiffuse::initInternal()
 	usage |= (preferCompute) ? TextureUsageBit::IMAGE_COMPUTE_WRITE : TextureUsageBit::FRAMEBUFFER_ATTACHMENT_WRITE;
 	TextureInitInfo texInit =
 		m_r->create2DRenderTargetInitInfo(size.x(), size.y(), m_r->getHdrFormat(), usage, "IndirectDiffuse #1");
-	texInit.m_initialUsage = TextureUsageBit::ALL_SAMPLED;
-	m_rts[0] = m_r->createAndClearRenderTarget(texInit);
+	m_rts[0] = m_r->createAndClearRenderTarget(texInit, TextureUsageBit::ALL_SAMPLED);
 	texInit.setName("IndirectDiffuse #2");
-	m_rts[1] = m_r->createAndClearRenderTarget(texInit);
+	m_rts[1] = m_r->createAndClearRenderTarget(texInit, TextureUsageBit::ALL_SAMPLED);
 
 	m_fbDescr.m_colorAttachmentCount = 1;
 	m_fbDescr.bake();

+ 1 - 2
AnKi/Renderer/IndirectDiffuseProbes.cpp

@@ -484,9 +484,8 @@ void IndirectDiffuseProbes::prepareProbes(InternalContext& giCtx)
 			texInit.m_height = probe.m_cellCounts.y();
 			texInit.m_depth = probe.m_cellCounts.z();
 			texInit.m_usage = TextureUsageBit::ALL_COMPUTE | TextureUsageBit::ALL_SAMPLED;
-			texInit.m_initialUsage = TextureUsageBit::SAMPLED_FRAGMENT;
 
-			entry.m_volumeTex = m_r->createAndClearRenderTarget(texInit);
+			entry.m_volumeTex = m_r->createAndClearRenderTarget(texInit, TextureUsageBit::SAMPLED_FRAGMENT);
 		}
 
 		// Compute the render position

+ 2 - 3
AnKi/Renderer/MotionVectors.cpp

@@ -58,10 +58,9 @@ Error MotionVectors::initInternal()
 	TextureInitInfo historyLengthTexInit =
 		m_r->create2DRenderTargetInitInfo(m_r->getInternalResolution().x(), m_r->getInternalResolution().y(),
 										  Format::R8_UNORM, historyLengthUsage, "MotionVectorsHistoryLen#1");
-	historyLengthTexInit.m_initialUsage = TextureUsageBit::ALL_SAMPLED;
-	m_historyLengthTextures[0] = m_r->createAndClearRenderTarget(historyLengthTexInit);
+	m_historyLengthTextures[0] = m_r->createAndClearRenderTarget(historyLengthTexInit, TextureUsageBit::ALL_SAMPLED);
 	historyLengthTexInit.setName("MotionVectorsHistoryLen#2");
-	m_historyLengthTextures[1] = m_r->createAndClearRenderTarget(historyLengthTexInit);
+	m_historyLengthTextures[1] = m_r->createAndClearRenderTarget(historyLengthTexInit, TextureUsageBit::ALL_SAMPLED);
 
 	m_fbDescr.m_colorAttachmentCount = 2;
 	m_fbDescr.bake();

+ 1 - 2
AnKi/Renderer/ProbeReflections.cpp

@@ -125,9 +125,8 @@ Error ProbeReflections::initLightShading()
 		texinit.m_mipmapCount = U8(m_lightShading.m_mipCount);
 		texinit.m_type = TextureType::CUBE_ARRAY;
 		texinit.m_layerCount = m_cacheEntries.getSize();
-		texinit.m_initialUsage = TextureUsageBit::SAMPLED_FRAGMENT;
 
-		m_lightShading.m_cubeArr = m_r->createAndClearRenderTarget(texinit);
+		m_lightShading.m_cubeArr = m_r->createAndClearRenderTarget(texinit, TextureUsageBit::SAMPLED_FRAGMENT);
 	}
 
 	// Init deferred

+ 7 - 8
AnKi/Renderer/Renderer.cpp

@@ -118,7 +118,6 @@ Error Renderer::initInternal(UVec2 swapchainResolution)
 		texinit.m_width = texinit.m_height = 4;
 		texinit.m_usage = TextureUsageBit::ALL_SAMPLED;
 		texinit.m_format = Format::R8G8B8A8_UNORM;
-		texinit.m_initialUsage = TextureUsageBit::ALL_SAMPLED;
 		TexturePtr tex = getGrManager().newTexture(texinit);
 
 		TextureViewInitInfo viewinit(tex);
@@ -433,7 +432,8 @@ RenderTargetDescription Renderer::create2DRenderTargetDescription(U32 w, U32 h,
 	return init;
 }
 
-TexturePtr Renderer::createAndClearRenderTarget(const TextureInitInfo& inf, const ClearValue& clearVal)
+TexturePtr Renderer::createAndClearRenderTarget(const TextureInitInfo& inf, TextureUsageBit initialUsage,
+												const ClearValue& clearVal)
 {
 	ANKI_ASSERT(!!(inf.m_usage & TextureUsageBit::FRAMEBUFFER_ATTACHMENT_WRITE)
 				|| !!(inf.m_usage & TextureUsageBit::IMAGE_COMPUTE_WRITE));
@@ -521,10 +521,10 @@ TexturePtr Renderer::createAndClearRenderTarget(const TextureInitInfo& inf, cons
 					cmdb->beginRenderPass(fb, colUsage, dsUsage);
 					cmdb->endRenderPass();
 
-					if(!!inf.m_initialUsage)
+					if(!!initialUsage)
 					{
-						cmdb->setTextureSurfaceBarrier(tex, TextureUsageBit::FRAMEBUFFER_ATTACHMENT_WRITE,
-													   inf.m_initialUsage, surf);
+						cmdb->setTextureSurfaceBarrier(tex, TextureUsageBit::FRAMEBUFFER_ATTACHMENT_WRITE, initialUsage,
+													   surf);
 					}
 				}
 				else
@@ -569,10 +569,9 @@ TexturePtr Renderer::createAndClearRenderTarget(const TextureInitInfo& inf, cons
 
 					cmdb->dispatchCompute(wgSize.x(), wgSize.y(), wgSize.z());
 
-					if(!!inf.m_initialUsage)
+					if(!!initialUsage)
 					{
-						cmdb->setTextureSurfaceBarrier(tex, TextureUsageBit::IMAGE_COMPUTE_WRITE, inf.m_initialUsage,
-													   surf);
+						cmdb->setTextureSurfaceBarrier(tex, TextureUsageBit::IMAGE_COMPUTE_WRITE, initialUsage, surf);
 					}
 				}
 			}

+ 1 - 1
AnKi/Renderer/Renderer.h

@@ -110,7 +110,7 @@ public:
 	ANKI_USE_RESULT RenderTargetDescription create2DRenderTargetDescription(U32 w, U32 h, Format format,
 																			CString name = {});
 
-	ANKI_USE_RESULT TexturePtr createAndClearRenderTarget(const TextureInitInfo& inf,
+	ANKI_USE_RESULT TexturePtr createAndClearRenderTarget(const TextureInitInfo& inf, TextureUsageBit initialUsage,
 														  const ClearValue& clearVal = ClearValue());
 
 	GrManager& getGrManager()

+ 3 - 5
AnKi/Renderer/RtShadows.cpp

@@ -135,8 +135,7 @@ Error RtShadows::initInternal()
 			TextureUsageBit::ALL_SAMPLED | TextureUsageBit::IMAGE_TRACE_RAYS_WRITE
 				| TextureUsageBit::IMAGE_COMPUTE_WRITE,
 			"RtShadows History");
-		texinit.m_initialUsage = TextureUsageBit::SAMPLED_FRAGMENT;
-		m_historyRt = m_r->createAndClearRenderTarget(texinit);
+		m_historyRt = m_r->createAndClearRenderTarget(texinit, TextureUsageBit::SAMPLED_FRAGMENT);
 	}
 
 	// Temp shadow RT
@@ -154,11 +153,10 @@ Error RtShadows::initInternal()
 			TextureUsageBit::ALL_SAMPLED | TextureUsageBit::IMAGE_TRACE_RAYS_WRITE
 				| TextureUsageBit::IMAGE_COMPUTE_WRITE,
 			"RtShadows Moments #1");
-		texinit.m_initialUsage = TextureUsageBit::SAMPLED_FRAGMENT;
-		m_momentsRts[0] = m_r->createAndClearRenderTarget(texinit);
+		m_momentsRts[0] = m_r->createAndClearRenderTarget(texinit, TextureUsageBit::SAMPLED_FRAGMENT);
 
 		texinit.setName("RtShadows Moments #2");
-		m_momentsRts[1] = m_r->createAndClearRenderTarget(texinit);
+		m_momentsRts[1] = m_r->createAndClearRenderTarget(texinit, TextureUsageBit::SAMPLED_FRAGMENT);
 	}
 
 	// Variance RT

+ 1 - 2
AnKi/Renderer/ShadowMapping.cpp

@@ -109,10 +109,9 @@ Error ShadowMapping::initAtlas()
 		TextureInitInfo texinit = m_r->create2DRenderTargetInitInfo(
 			m_atlas.m_tileResolution * m_atlas.m_tileCountBothAxis,
 			m_atlas.m_tileResolution * m_atlas.m_tileCountBothAxis, texFormat, usage, "SM atlas");
-		texinit.m_initialUsage = TextureUsageBit::SAMPLED_FRAGMENT;
 		ClearValue clearVal;
 		clearVal.m_colorf[0] = 1.0f;
-		m_atlas.m_tex = m_r->createAndClearRenderTarget(texinit, clearVal);
+		m_atlas.m_tex = m_r->createAndClearRenderTarget(texinit, TextureUsageBit::SAMPLED_FRAGMENT, clearVal);
 	}
 
 	// Tiles

+ 1 - 3
AnKi/Renderer/TemporalAA.cpp

@@ -69,9 +69,7 @@ Error TemporalAA::initInternal()
 			m_r->create2DRenderTargetInitInfo(m_r->getInternalResolution().x(), m_r->getInternalResolution().y(),
 											  m_r->getHdrFormat(), usage, "TemporalAA");
 
-		texinit.m_initialUsage = TextureUsageBit::SAMPLED_FRAGMENT;
-
-		m_rtTextures[i] = m_r->createAndClearRenderTarget(texinit);
+		m_rtTextures[i] = m_r->createAndClearRenderTarget(texinit, TextureUsageBit::SAMPLED_FRAGMENT);
 	}
 
 	m_tonemappedRtDescr = m_r->create2DRenderTargetDescription(

+ 2 - 3
AnKi/Renderer/VolumetricLightingAccumulation.cpp

@@ -66,9 +66,8 @@ Error VolumetricLightingAccumulation::init()
 										  "VolLight");
 	texinit.m_depth = m_volumeSize[2];
 	texinit.m_type = TextureType::_3D;
-	texinit.m_initialUsage = TextureUsageBit::SAMPLED_FRAGMENT;
-	m_rtTextures[0] = m_r->createAndClearRenderTarget(texinit);
-	m_rtTextures[1] = m_r->createAndClearRenderTarget(texinit);
+	m_rtTextures[0] = m_r->createAndClearRenderTarget(texinit, TextureUsageBit::SAMPLED_FRAGMENT);
+	m_rtTextures[1] = m_r->createAndClearRenderTarget(texinit, TextureUsageBit::SAMPLED_FRAGMENT);
 
 	return Error::NONE;
 }

+ 1 - 2
AnKi/Renderer/VrsSriGeneration.cpp

@@ -46,8 +46,7 @@ Error VrsSriGeneration::initInternal()
 									 | TextureUsageBit::SAMPLED_FRAGMENT;
 	TextureInitInfo sriInitInfo =
 		m_r->create2DRenderTargetInitInfo(rez.x(), rez.y(), Format::R8_UINT, texUsage, "VRS SRI");
-	sriInitInfo.m_initialUsage = TextureUsageBit::FRAMEBUFFER_SHADING_RATE;
-	m_sriTex = m_r->createAndClearRenderTarget(sriInitInfo);
+	m_sriTex = m_r->createAndClearRenderTarget(sriInitInfo, TextureUsageBit::FRAMEBUFFER_SHADING_RATE);
 
 	// Descr
 	m_fbDescr.m_colorAttachmentCount = 1;

+ 18 - 1
AnKi/Resource/ImageResource.cpp

@@ -73,7 +73,6 @@ Error ImageResource::load(const ResourceFilename& filename, Bool async)
 
 	TextureInitInfo init(filenameExt);
 	init.m_usage = TextureUsageBit::ALL_SAMPLED | TextureUsageBit::TRANSFER_DESTINATION;
-	init.m_initialUsage = TextureUsageBit::ALL_SAMPLED;
 	U32 faces = 0;
 
 	ResourceFilePtr file;
@@ -207,6 +206,24 @@ Error ImageResource::load(const ResourceFilename& filename, Bool async)
 	// Create the texture
 	m_tex = getManager().getGrManager().newTexture(init);
 
+	// Transition it. TODO remove that eventually
+	{
+		CommandBufferInitInfo cmdbinit;
+		cmdbinit.m_flags = CommandBufferFlag::GENERAL_WORK | CommandBufferFlag::SMALL_BATCH;
+		CommandBufferPtr cmdb = getManager().getGrManager().newCommandBuffer(cmdbinit);
+
+		TextureSubresourceInfo subresource;
+		subresource.m_faceCount = textureTypeIsCube(init.m_type) ? 6 : 1;
+		subresource.m_layerCount = init.m_layerCount;
+		subresource.m_mipmapCount = init.m_mipmapCount;
+
+		cmdb->setTextureBarrier(m_tex, TextureUsageBit::NONE, TextureUsageBit::ALL_SAMPLED, subresource);
+
+		FencePtr outFence;
+		cmdb->flush({}, &outFence);
+		outFence->clientWait(60.0_sec);
+	}
+
 	// Set the context
 	ctx->m_faces = faces;
 	ctx->m_layerCount = init.m_layerCount;

+ 0 - 1
AnKi/ShaderCompiler/Glslang.h

@@ -8,7 +8,6 @@
 #include <AnKi/ShaderCompiler/Common.h>
 #include <AnKi/Util/String.h>
 #include <AnKi/Gr/Enums.h>
-#include <AnKi/Gr/Utils/Functions.h>
 
 namespace anki {
 

+ 0 - 1
AnKi/ShaderCompiler/ShaderProgramBinaryExtra.h

@@ -4,7 +4,6 @@
 // http://www.anki3d.org/LICENSE
 
 #include <AnKi/ShaderCompiler/Common.h>
-#include <AnKi/Gr/Utils/Functions.h>
 #include <AnKi/Util/Serializer.h>
 
 namespace anki {

+ 0 - 1
AnKi/ShaderCompiler/ShaderProgramParser.h

@@ -9,7 +9,6 @@
 #include <AnKi/Util/StringList.h>
 #include <AnKi/Util/WeakArray.h>
 #include <AnKi/Util/DynamicArray.h>
-#include <AnKi/Gr/Utils/Functions.h>
 
 namespace anki {
 

+ 4 - 4
AnKi/Shaders/IndirectDiffuse.glsl

@@ -26,12 +26,12 @@ ANKI_SPECIALIZATION_CONSTANT_U32(SAMPLE_COUNT, 6u);
 #include <AnKi/Shaders/ClusteredShadingCommon.glsl>
 
 layout(set = 0, binding = 4) uniform sampler u_linearAnyClampSampler;
-layout(set = 0, binding = 5) ANKI_RP uniform texture2D u_gbufferRt2;
+layout(set = 0, binding = 5) uniform ANKI_RP texture2D u_gbufferRt2;
 layout(set = 0, binding = 6) uniform texture2D u_depthRt;
-layout(set = 0, binding = 7) ANKI_RP uniform texture2D u_lightBufferRt;
-layout(set = 0, binding = 8) ANKI_RP uniform texture2D u_historyTex;
+layout(set = 0, binding = 7) uniform ANKI_RP texture2D u_lightBufferRt;
+layout(set = 0, binding = 8) uniform ANKI_RP texture2D u_historyTex;
 layout(set = 0, binding = 9) uniform texture2D u_motionVectorsTex;
-layout(set = 0, binding = 10) uniform texture2D u_historyLengthTex;
+layout(set = 0, binding = 10) uniform ANKI_RP texture2D u_historyLengthTex;
 
 #if defined(ANKI_COMPUTE_SHADER)
 const UVec2 WORKGROUP_SIZE = UVec2(8, 8);

+ 4 - 4
AnKi/Shaders/IndirectSpecular.glsl

@@ -18,13 +18,13 @@ layout(set = 0, binding = 0, row_major) uniform b_unis
 };
 
 layout(set = 0, binding = 1) uniform sampler u_trilinearClampSampler;
-layout(set = 0, binding = 2) uniform texture2D u_gbufferRt1;
-layout(set = 0, binding = 3) uniform texture2D u_gbufferRt2;
+layout(set = 0, binding = 2) uniform ANKI_RP texture2D u_gbufferRt1;
+layout(set = 0, binding = 3) uniform ANKI_RP texture2D u_gbufferRt2;
 layout(set = 0, binding = 4) uniform texture2D u_depthRt;
-layout(set = 0, binding = 5) uniform texture2D u_lightBufferRt;
+layout(set = 0, binding = 5) uniform ANKI_RP texture2D u_lightBufferRt;
 
 layout(set = 0, binding = 6) uniform sampler u_trilinearRepeatSampler;
-layout(set = 0, binding = 7) uniform texture2D u_noiseTex;
+layout(set = 0, binding = 7) uniform ANKI_RP texture2D u_noiseTex;
 const Vec2 NOISE_TEX_SIZE = Vec2(64.0);
 
 #define CLUSTERED_SHADING_SET 0

+ 4 - 4
AnKi/Shaders/LightShading.ankiprog

@@ -27,14 +27,14 @@ ANKI_SPECIALIZATION_CONSTANT_U32(TILE_SIZE, 3u);
 layout(set = 0, binding = 5) uniform sampler u_nearestAnyClampSampler;
 layout(set = 0, binding = 6) uniform sampler u_trilinearClampSampler;
 
-layout(set = 0, binding = 7) uniform texture2D u_gbuffer0Tex;
-layout(set = 0, binding = 8) uniform texture2D u_gbuffer1Tex;
-layout(set = 0, binding = 9) uniform texture2D u_gbuffer2Tex;
+layout(set = 0, binding = 7) uniform ANKI_RP texture2D u_gbuffer0Tex;
+layout(set = 0, binding = 8) uniform ANKI_RP texture2D u_gbuffer1Tex;
+layout(set = 0, binding = 9) uniform ANKI_RP texture2D u_gbuffer2Tex;
 layout(set = 0, binding = 10) uniform texture2D u_msDepthRt;
 #if USE_SHADOW_LAYERS
 layout(set = 0, binding = 11) uniform utexture2D u_shadowLayersTex;
 #else
-layout(set = 0, binding = 12) uniform texture2D u_resolvedSm;
+layout(set = 0, binding = 12) uniform ANKI_RP texture2D u_resolvedSm;
 #endif
 
 layout(location = 0) in Vec2 in_uv;

+ 2 - 2
AnKi/Shaders/TemporalAA.glsl

@@ -16,8 +16,8 @@ ANKI_SPECIALIZATION_CONSTANT_UVEC2(FB_SIZE, 2u);
 
 layout(set = 0, binding = 0) uniform sampler u_linearAnyClampSampler;
 layout(set = 0, binding = 1) uniform texture2D u_depthRt;
-layout(set = 0, binding = 2) uniform texture2D u_inputRt;
-layout(set = 0, binding = 3) uniform texture2D u_historyRt;
+layout(set = 0, binding = 2) uniform ANKI_RP texture2D u_inputRt;
+layout(set = 0, binding = 3) uniform ANKI_RP texture2D u_historyRt;
 layout(set = 0, binding = 4) uniform texture2D u_motionVectorsTex;
 
 const U32 TONEMAPPING_SET = 0u;

+ 1 - 1
AnKi/Shaders/TonemappingAverageLuminance.ankiprog

@@ -17,7 +17,7 @@ layout(local_size_x = WORKGROUP_SIZE.x, local_size_y = WORKGROUP_SIZE.y, local_s
 const UVec2 ALIGNED_INPUT_TEX_SIZE = WORKGROUP_SIZE * ((INPUT_TEX_SIZE + WORKGROUP_SIZE - 1u) / WORKGROUP_SIZE);
 const UVec2 PIXELS_PER_TILE = ALIGNED_INPUT_TEX_SIZE / WORKGROUP_SIZE;
 
-layout(set = 0, binding = 0) uniform texture2D u_tex;
+layout(set = 0, binding = 0) uniform ANKI_RP texture2D u_tex;
 
 #define TONEMAPPING_RESOURCE_AS_BUFFER 1
 #define TONEMAPPING_SET 0

+ 0 - 4
Tests/Gr/Gr.cpp

@@ -926,7 +926,6 @@ ANKI_TEST(Gr, DrawWithTexture)
 	init.m_depth = 1;
 	init.m_format = Format::R8G8B8_UNORM;
 	init.m_usage = TextureUsageBit::SAMPLED_FRAGMENT | TextureUsageBit::TRANSFER_DESTINATION;
-	init.m_initialUsage = TextureUsageBit::SAMPLED_FRAGMENT;
 	init.m_height = 2;
 	init.m_width = 2;
 	init.m_mipmapCount = 2;
@@ -947,7 +946,6 @@ ANKI_TEST(Gr, DrawWithTexture)
 	init.m_mipmapCount = 3;
 	init.m_usage =
 		TextureUsageBit::SAMPLED_FRAGMENT | TextureUsageBit::TRANSFER_DESTINATION | TextureUsageBit::GENERATE_MIPMAPS;
-	init.m_initialUsage = TextureUsageBit::NONE;
 
 	TexturePtr b = gr->newTexture(init);
 
@@ -1427,7 +1425,6 @@ ANKI_TEST(Gr, 3DTextures)
 	init.m_depth = 1;
 	init.m_format = Format::R8G8B8A8_UNORM;
 	init.m_usage = TextureUsageBit::SAMPLED_FRAGMENT | TextureUsageBit::TRANSFER_DESTINATION;
-	init.m_initialUsage = TextureUsageBit::TRANSFER_DESTINATION;
 	init.m_height = 2;
 	init.m_width = 2;
 	init.m_mipmapCount = 2;
@@ -2780,7 +2777,6 @@ ANKI_TEST(Gr, RayGen)
 		inf.m_format = Format::R8G8B8A8_UNORM;
 		inf.m_usage = TextureUsageBit::IMAGE_TRACE_RAYS_READ | TextureUsageBit::IMAGE_TRACE_RAYS_WRITE
 					  | TextureUsageBit::IMAGE_COMPUTE_READ;
-		inf.m_initialUsage = TextureUsageBit::IMAGE_COMPUTE_READ;
 
 		offscreenRts[0] = gr->newTexture(inf);