Bläddra i källkod

Add the notion of buffer views

Panagiotis Christopoulos Charitos 1 år sedan
förälder
incheckning
fd50601525
53 ändrade filer med 669 tillägg och 678 borttagningar
  1. 1 1
      AnKi/Core/GpuMemory/GpuSceneBuffer.cpp
  2. 2 2
      AnKi/Core/GpuMemory/GpuSceneBuffer.h
  3. 3 2
      AnKi/Core/GpuMemory/GpuVisibleTransientMemoryPool.h
  4. 2 2
      AnKi/Core/GpuMemory/RebarTransientMemoryPool.h
  5. 39 30
      AnKi/Core/GpuMemory/UnifiedGeometryBuffer.h
  6. 11 29
      AnKi/Gr/AccelerationStructure.h
  7. 113 0
      AnKi/Gr/Buffer.h
  8. 25 117
      AnKi/Gr/CommandBuffer.h
  9. 0 13
      AnKi/Gr/Common.h
  10. 2 3
      AnKi/Gr/RenderGraph.cpp
  11. 3 27
      AnKi/Gr/RenderGraph.h
  12. 7 18
      AnKi/Gr/RenderGraph.inl.h
  13. 3 3
      AnKi/Gr/Utils/SegregatedListsGpuMemoryPool.cpp
  14. 7 6
      AnKi/Gr/Vulkan/VkAccelerationStructure.cpp
  15. 134 99
      AnKi/Gr/Vulkan/VkCommandBuffer.cpp
  16. 3 4
      AnKi/Renderer/AccelerationStructureBuilder.cpp
  17. 2 2
      AnKi/Renderer/AccelerationStructureBuilder.h
  18. 25 24
      AnKi/Renderer/ClusterBinning.cpp
  19. 4 4
      AnKi/Renderer/ClusterBinning.h
  20. 1 1
      AnKi/Renderer/Common.h
  21. 7 7
      AnKi/Renderer/Dbg.cpp
  22. 2 2
      AnKi/Renderer/DepthDownscale.cpp
  23. 2 2
      AnKi/Renderer/ForwardShading.h
  24. 3 3
      AnKi/Renderer/GBuffer.h
  25. 2 3
      AnKi/Renderer/LensFlare.cpp
  26. 1 1
      AnKi/Renderer/LensFlare.h
  27. 2 4
      AnKi/Renderer/PrimaryNonRenderableVisibility.cpp
  28. 2 2
      AnKi/Renderer/PrimaryNonRenderableVisibility.h
  29. 3 3
      AnKi/Renderer/ProbeReflections.cpp
  30. 1 1
      AnKi/Renderer/Renderer.cpp
  31. 1 1
      AnKi/Renderer/RendererObject.cpp
  32. 17 16
      AnKi/Renderer/RtShadows.cpp
  33. 11 12
      AnKi/Renderer/ShadowMapping.cpp
  34. 4 4
      AnKi/Renderer/ShadowMapping.h
  35. 63 57
      AnKi/Renderer/Utils/Drawer.cpp
  36. 7 7
      AnKi/Renderer/Utils/Drawer.h
  37. 69 61
      AnKi/Renderer/Utils/GpuVisibility.cpp
  38. 20 20
      AnKi/Renderer/Utils/GpuVisibility.h
  39. 4 3
      AnKi/Renderer/Utils/HzbGenerator.cpp
  40. 2 3
      AnKi/Renderer/Utils/Readback.cpp
  41. 1 1
      AnKi/Renderer/Utils/Readback.h
  42. 2 2
      AnKi/Renderer/Utils/TraditionalDeferredShading.cpp
  43. 2 2
      AnKi/Renderer/Utils/TraditionalDeferredShading.h
  44. 1 1
      AnKi/Resource/ImageResource.cpp
  45. 20 33
      AnKi/Resource/MeshResource.cpp
  46. 1 1
      AnKi/Resource/TransferGpuAllocator.h
  47. 4 7
      AnKi/Scene/Components/ParticleEmitterComponent.cpp
  48. 1 1
      AnKi/Scene/GpuSceneArray.h
  49. 2 2
      AnKi/Ui/Canvas.cpp
  50. 1 1
      AnKi/Ui/Font.cpp
  51. 18 22
      Tests/Gr/Gr.cpp
  52. 4 4
      Tests/Gr/GrMeshShaders.cpp
  53. 2 2
      Tests/Gr/GrTextureBuffer.cpp

+ 1 - 1
AnKi/Core/GpuMemory/GpuSceneBuffer.cpp

@@ -139,7 +139,7 @@ void GpuSceneMicroPatcher::patchGpuScene(CommandBuffer& cmdb)
 
 	cmdb.bindStorageBuffer(0, 0, headersToken);
 	cmdb.bindStorageBuffer(0, 1, dataToken);
-	cmdb.bindStorageBuffer(0, 2, &GpuSceneBuffer::getSingleton().getBuffer(), 0, kMaxPtrSize);
+	cmdb.bindStorageBuffer(0, 2, BufferView(&GpuSceneBuffer::getSingleton().getBuffer()));
 
 	cmdb.bindShaderProgram(m_grProgram.get());
 

+ 2 - 2
AnKi/Core/GpuMemory/GpuSceneBuffer.h

@@ -101,9 +101,9 @@ public:
 		return m_pool.getGpuBuffer();
 	}
 
-	BufferOffsetRange getBufferOffsetRange() const
+	BufferView getBufferView() const
 	{
-		return {&m_pool.getGpuBuffer(), 0, kMaxPtrSize};
+		return BufferView(&m_pool.getGpuBuffer());
 	}
 
 private:

+ 3 - 2
AnKi/Core/GpuMemory/GpuVisibleTransientMemoryPool.h

@@ -8,6 +8,7 @@
 #include <AnKi/Core/Common.h>
 #include <AnKi/Gr/Utils/StackGpuMemoryPool.h>
 #include <AnKi/Gr/GrManager.h>
+#include <AnKi/Gr/Buffer.h>
 
 namespace anki {
 
@@ -43,7 +44,7 @@ public:
 		return m_buffer != nullptr;
 	}
 
-	operator BufferOffsetRange() const;
+	operator BufferView() const;
 
 private:
 	Buffer* m_buffer = nullptr;
@@ -91,7 +92,7 @@ private:
 	~GpuVisibleTransientMemoryPool() = default;
 };
 
-inline GpuVisibleTransientMemoryAllocation::operator BufferOffsetRange() const
+inline GpuVisibleTransientMemoryAllocation::operator BufferView() const
 {
 	ANKI_ASSERT(isValid());
 	return {m_buffer, m_offset, m_size};

+ 2 - 2
AnKi/Core/GpuMemory/RebarTransientMemoryPool.h

@@ -47,7 +47,7 @@ public:
 
 	Buffer& getBuffer() const;
 
-	operator BufferOffsetRange() const;
+	operator BufferView() const;
 
 private:
 	PtrSize m_offset = kMaxPtrSize;
@@ -121,7 +121,7 @@ inline Buffer& RebarAllocation::getBuffer() const
 	return RebarTransientMemoryPool::getSingleton().getBuffer();
 }
 
-inline RebarAllocation::operator BufferOffsetRange() const
+inline RebarAllocation::operator BufferView() const
 {
 	ANKI_ASSERT(isValid());
 	return {&RebarTransientMemoryPool::getSingleton().getBuffer(), m_offset, m_range};

+ 39 - 30
AnKi/Core/GpuMemory/UnifiedGeometryBuffer.h

@@ -36,15 +36,18 @@ public:
 	{
 		ANKI_ASSERT(!isValid() && "Forgot to delete");
 		m_token = b.m_token;
-		m_realOffset = b.m_realOffset;
-		m_realAllocatedSize = b.m_realAllocatedSize;
+		m_fakeOffset = b.m_fakeOffset;
+		m_fakeAllocatedSize = b.m_fakeAllocatedSize;
 		b.m_token = {};
-		b.m_realAllocatedSize = 0;
-		b.m_realOffset = kMaxU32;
+		b.m_fakeAllocatedSize = 0;
+		b.m_fakeOffset = kMaxU32;
 		return *this;
 	}
 
-	operator BufferOffsetRange() const;
+	operator BufferView() const;
+
+	/// This will return an exaggerated view compared to the above that it's properly aligned.
+	BufferView getCompleteBufferView() const;
 
 	Bool isValid() const
 	{
@@ -55,19 +58,19 @@ public:
 	U32 getOffset() const
 	{
 		ANKI_ASSERT(isValid());
-		return m_realOffset;
+		return m_fakeOffset;
 	}
 
 	U32 getAllocatedSize() const
 	{
 		ANKI_ASSERT(isValid());
-		return m_realAllocatedSize;
+		return m_fakeAllocatedSize;
 	}
 
 private:
 	SegregatedListsGpuMemoryPoolToken m_token;
-	U32 m_realOffset = kMaxU32; ///< In some allocations with weird alignments we need a different offset.
-	U32 m_realAllocatedSize = 0;
+	U32 m_fakeOffset = kMaxU32; ///< In some allocations with weird alignments we need a different offset.
+	U32 m_fakeAllocatedSize = 0;
 };
 
 /// Manages vertex and index memory for the WHOLE application.
@@ -83,13 +86,25 @@ public:
 
 	void init();
 
+	/// The alignment doesn't need to be power of 2 unlike other allocators.
 	UnifiedGeometryBufferAllocation allocate(PtrSize size, U32 alignment)
 	{
+		ANKI_ASSERT(size && alignment);
+
+		const U32 fixedAlignment = max(4u, nextPowerOfTwo(alignment)); // Fix the alignment and make sure it's at least 4
+		const U32 fixedSize = getAlignedRoundUp(4u, U32(size) + alignment); // Over-allocate and align to 4 because some cmd buffer ops need it
+		ANKI_ASSERT(fixedSize >= size);
+
 		UnifiedGeometryBufferAllocation out;
-		m_pool.allocate(size, alignment, out.m_token);
-		ANKI_ASSERT(isAligned(alignment, out.m_token.m_offset));
-		out.m_realOffset = U32(out.m_token.m_offset);
-		out.m_realAllocatedSize = U32(size);
+		m_pool.allocate(fixedSize, fixedAlignment, out.m_token);
+
+		const U32 remainder = out.m_token.m_offset % alignment;
+		out.m_fakeOffset = U32(out.m_token.m_offset + (alignment - remainder));
+		ANKI_ASSERT(isAligned(alignment, out.m_fakeOffset));
+
+		out.m_fakeAllocatedSize = U32(size);
+		ANKI_ASSERT(PtrSize(out.m_fakeOffset) + out.m_fakeAllocatedSize <= out.m_token.m_offset + out.m_token.m_size);
+
 		return out;
 	}
 
@@ -97,25 +112,14 @@ public:
 	UnifiedGeometryBufferAllocation allocateFormat(Format format, U32 count)
 	{
 		const U32 texelSize = getFormatInfo(format).m_texelSize;
-		const U32 alignment = max(4u, nextPowerOfTwo(texelSize));
-		const U32 size = count * texelSize + alignment; // Over-allocate
-
-		UnifiedGeometryBufferAllocation out;
-		m_pool.allocate(size, alignment, out.m_token);
-
-		const U32 remainder = out.m_token.m_offset % texelSize;
-		out.m_realOffset = U32(out.m_token.m_offset + (texelSize - remainder));
-		out.m_realAllocatedSize = count * texelSize;
-		ANKI_ASSERT(isAligned(texelSize, out.m_realOffset));
-		ANKI_ASSERT(out.m_realOffset + out.m_realAllocatedSize <= out.m_token.m_offset + out.m_token.m_size);
-		return out;
+		return allocate(texelSize * count, texelSize);
 	}
 
 	void deferredFree(UnifiedGeometryBufferAllocation& alloc)
 	{
 		m_pool.deferredFree(alloc.m_token);
-		alloc.m_realAllocatedSize = 0;
-		alloc.m_realOffset = kMaxU32;
+		alloc.m_fakeAllocatedSize = 0;
+		alloc.m_fakeOffset = kMaxU32;
 	}
 
 	void endFrame()
@@ -131,9 +135,9 @@ public:
 		return m_pool.getGpuBuffer();
 	}
 
-	BufferOffsetRange getBufferOffsetRange() const
+	BufferView getBufferView() const
 	{
-		return {&m_pool.getGpuBuffer(), 0, kMaxPtrSize};
+		return BufferView(&m_pool.getGpuBuffer());
 	}
 
 private:
@@ -151,10 +155,15 @@ inline UnifiedGeometryBufferAllocation::~UnifiedGeometryBufferAllocation()
 	UnifiedGeometryBuffer::getSingleton().deferredFree(*this);
 }
 
-inline UnifiedGeometryBufferAllocation::operator BufferOffsetRange() const
+inline UnifiedGeometryBufferAllocation::operator BufferView() const
 {
 	return {&UnifiedGeometryBuffer::getSingleton().getBuffer(), getOffset(), getAllocatedSize()};
 }
+
+inline BufferView UnifiedGeometryBufferAllocation::getCompleteBufferView() const
+{
+	return {&UnifiedGeometryBuffer::getSingleton().getBuffer(), m_token.m_offset, m_token.m_size};
+}
 /// @}
 
 } // end namespace anki

+ 11 - 29
AnKi/Gr/AccelerationStructure.h

@@ -18,44 +18,26 @@ namespace anki {
 class BottomLevelAccelerationStructureInitInfo
 {
 public:
-	const Buffer* m_indexBuffer = nullptr;
-	PtrSize m_indexBufferOffset = 0;
+	BufferView m_indexBuffer;
 	U32 m_indexCount = 0;
 	IndexType m_indexType = IndexType::kCount;
 
-	const Buffer* m_positionBuffer = nullptr;
-	PtrSize m_positionBufferOffset = 0;
+	BufferView m_positionBuffer;
 	U32 m_positionStride = 0;
 	Format m_positionsFormat = Format::kNone;
 	U32 m_positionCount = 0;
 
 	Bool isValid() const
 	{
-		if(m_indexBuffer == nullptr || m_indexCount == 0 || m_indexType == IndexType::kCount || m_positionBuffer == nullptr || m_positionStride == 0
-		   || m_positionsFormat == Format::kNone || m_positionCount == 0)
-		{
-			return false;
-		}
+		Bool valid = true;
 
-		const PtrSize posRange = m_positionBufferOffset + PtrSize(m_positionStride) * m_positionCount;
-		const PtrSize formatSize = getFormatInfo(m_positionsFormat).m_texelSize;
-		if(m_positionStride < formatSize)
-		{
-			return false;
-		}
+		valid = valid && (m_indexBuffer.isValid() && m_indexCount * getIndexSize(m_indexType) == m_indexBuffer.getRange());
 
-		if(posRange > m_positionBuffer->getSize())
-		{
-			return false;
-		}
-
-		const PtrSize idxStride = (m_indexType == IndexType::kU16) ? 2 : 4;
-		if(m_indexBufferOffset + idxStride * m_indexCount > m_indexBuffer->getSize())
-		{
-			return false;
-		}
+		const U32 vertSize = getFormatInfo(m_positionsFormat).m_texelSize;
+		valid = valid
+				&& (m_positionBuffer.isValid() && m_positionStride >= vertSize && m_positionStride * m_positionCount == m_positionBuffer.getRange());
 
-		return true;
+		return valid;
 	}
 };
 
@@ -83,14 +65,14 @@ public:
 	{
 	public:
 		U32 m_maxInstanceCount = 0;
-		Buffer* m_instancesBuffer = nullptr;
-		PtrSize m_instancesBufferOffset = kMaxPtrSize;
+		BufferView m_instancesBuffer; ///< Filled with AccelerationStructureInstance structs.
 	} m_indirectArgs; ///< Pass the instances GPU buffer directly.
 
 	Bool isValid() const
 	{
 		return m_directArgs.m_instances.getSize() > 0
-			   || (m_indirectArgs.m_maxInstanceCount > 0 && m_indirectArgs.m_instancesBuffer && m_indirectArgs.m_instancesBufferOffset < kMaxPtrSize);
+			   || (m_indirectArgs.m_maxInstanceCount > 0 && m_indirectArgs.m_instancesBuffer.isValid()
+				   && m_indirectArgs.m_instancesBuffer.getRange() == sizeof(AccelerationStructureInstance) * m_indirectArgs.m_maxInstanceCount);
 	}
 };
 

+ 113 - 0
AnKi/Gr/Buffer.h

@@ -127,6 +127,119 @@ private:
 	/// Allocate and initialize a new instance.
 	[[nodiscard]] static Buffer* newInstance(const BufferInitInfo& init);
 };
+
+/// A part of a buffer.
+class BufferView
+{
+public:
+	BufferView() = default;
+
+	BufferView(const BufferView&) = default;
+
+	explicit BufferView(Buffer* buffer)
+		: m_buffer(buffer)
+		, m_offset(0)
+		, m_range(buffer->getSize())
+	{
+		check();
+	}
+
+	BufferView(Buffer* buffer, PtrSize offset, PtrSize range)
+		: m_buffer(buffer)
+		, m_offset(offset)
+		, m_range(range)
+	{
+		check();
+	}
+
+	BufferView& operator=(const BufferView&) = default;
+
+	[[nodiscard]] Buffer& getBuffer() const
+	{
+		check();
+		return *m_buffer;
+	}
+
+	[[nodiscard]] const PtrSize& getOffset() const
+	{
+		check();
+		return m_offset;
+	}
+
+	BufferView& setOffset(PtrSize offset)
+	{
+		check();
+		m_offset = offset;
+		check();
+		return *this;
+	}
+
+	BufferView& incrementOffset(PtrSize bytes)
+	{
+		check();
+		ANKI_ASSERT(m_range >= bytes);
+		m_range -= bytes;
+		m_offset += bytes;
+		if(m_range == 0)
+		{
+			*this = {};
+		}
+		else
+		{
+			check();
+		}
+		return *this;
+	}
+
+	[[nodiscard]] const PtrSize& getRange() const
+	{
+		check();
+		return m_range;
+	}
+
+	BufferView& setRange(PtrSize range)
+	{
+		check();
+		ANKI_ASSERT(range <= m_range);
+		m_range = range;
+		check();
+		return *this;
+	}
+
+	[[nodiscard]] Bool isValid() const
+	{
+		return m_buffer != nullptr;
+	}
+
+	[[nodiscard]] Bool overlaps(const BufferView& b) const
+	{
+		check();
+		b.check();
+		Bool overlaps = m_buffer == b.m_buffer;
+		if(m_offset <= b.m_offset)
+		{
+			overlaps = overlaps && (m_offset + m_range > b.m_offset);
+		}
+		else
+		{
+			overlaps = overlaps && (b.m_offset + b.m_range > m_offset);
+		}
+
+		return overlaps;
+	}
+
+private:
+	Buffer* m_buffer = nullptr;
+	PtrSize m_offset = kMaxPtrSize;
+	PtrSize m_range = 0;
+
+	void check() const
+	{
+		ANKI_ASSERT(m_buffer && m_range > 0);
+		ANKI_ASSERT(m_range <= m_buffer->getSize() && m_offset < m_buffer->getSize()); // Do that to ensure the next line won't overflow
+		ANKI_ASSERT(m_offset + m_range <= m_buffer->getSize());
+	}
+};
 /// @}
 
 } // end namespace anki

+ 25 - 117
AnKi/Gr/CommandBuffer.h

@@ -6,6 +6,7 @@
 #pragma once
 
 #include <AnKi/Gr/GrObject.h>
+#include <AnKi/Gr/Buffer.h>
 #include <AnKi/Util/Functions.h>
 #include <AnKi/Util/WeakArray.h>
 #include <AnKi/Math.h>
@@ -27,11 +28,9 @@ public:
 class BufferBarrierInfo
 {
 public:
-	Buffer* m_buffer = nullptr;
+	BufferView m_bufferView;
 	BufferUsageBit m_previousUsage = BufferUsageBit::kNone;
 	BufferUsageBit m_nextUsage = BufferUsageBit::kNone;
-	PtrSize m_offset = 0;
-	PtrSize m_range = 0;
 };
 
 class AccelerationStructureBarrierInfo
@@ -115,13 +114,13 @@ public:
 	/// @{
 
 	/// Bind vertex buffer.
-	void bindVertexBuffer(U32 binding, Buffer* buff, PtrSize offset, PtrSize stride, VertexStepRate stepRate = VertexStepRate::kVertex);
+	void bindVertexBuffer(U32 binding, const BufferView& buff, PtrSize stride, VertexStepRate stepRate = VertexStepRate::kVertex);
 
 	/// Setup a vertex attribute.
 	void setVertexAttribute(VertexAttribute attribute, U32 buffBinding, Format fmt, PtrSize relativeOffset);
 
 	/// Bind index buffer.
-	void bindIndexBuffer(Buffer* buff, PtrSize offset, IndexType type);
+	void bindIndexBuffer(const BufferView& buff, IndexType type);
 
 	/// Enable primitive restart.
 	void setPrimitiveRestart(Bool enable);
@@ -199,74 +198,22 @@ public:
 	void setLineWidth(F32 lineWidth);
 
 	/// Bind sampler.
-	/// @param set The set to bind to.
-	/// @param binding The binding to bind to.
-	/// @param sampler The sampler to override the default sampler of the tex.
-	/// @param arrayIdx The array index if the binding is an array.
 	void bindSampler(U32 set, U32 binding, Sampler* sampler, U32 arrayIdx = 0);
 
 	/// Bind a texture.
-	/// @param set The set to bind to.
-	/// @param binding The binding to bind to.
-	/// @param texView The texture view to bind.
-	/// @param arrayIdx The array index if the binding is an array.
 	void bindTexture(U32 set, U32 binding, TextureView* texView, U32 arrayIdx = 0);
 
 	/// Bind uniform buffer.
-	/// @param set The set to bind to.
-	/// @param binding The binding to bind to.
-	/// @param[in,out] buff The buffer to bind.
-	/// @param offset The base of the binding.
-	/// @param range The bytes to bind starting from the offset. If it's kMaxPtrSize then map from offset to the end of the buffer.
-	/// @param arrayIdx The array index if the binding is an array.
-	void bindUniformBuffer(U32 set, U32 binding, Buffer* buff, PtrSize offset, PtrSize range, U32 arrayIdx = 0);
-
-	/// Bind uniform buffer.
-	/// @param set The set to bind to.
-	/// @param binding The binding to bind to.
-	/// @param[in,out] buff The buffer to bind.
-	/// @param arrayIdx The array index if the binding is an array.
-	void bindUniformBuffer(U32 set, U32 binding, const BufferOffsetRange& buff, U32 arrayIdx = 0)
-	{
-		bindUniformBuffer(set, binding, buff.m_buffer, buff.m_offset, buff.m_range, arrayIdx);
-	}
-
-	/// Bind storage buffer.
-	/// @param set The set to bind to.
-	/// @param binding The binding to bind to.
-	/// @param[in,out] buff The buffer to bind.
-	/// @param offset The base of the binding.
-	/// @param range The bytes to bind starting from the offset. If it's kMaxPtrSize then map from offset to the end
-	///              of the buffer.
-	/// @param arrayIdx The array index if the binding is an array.
-	void bindStorageBuffer(U32 set, U32 binding, Buffer* buff, PtrSize offset, PtrSize range, U32 arrayIdx = 0);
+	void bindUniformBuffer(U32 set, U32 binding, const BufferView& buff, U32 arrayIdx = 0);
 
 	/// Bind storage buffer.
-	/// @param set The set to bind to.
-	/// @param binding The binding to bind to.
-	/// @param[in,out] buff The buffer to bind.
-	/// @param arrayIdx The array index if the binding is an array.
-	void bindStorageBuffer(U32 set, U32 binding, const BufferOffsetRange& buff, U32 arrayIdx = 0)
-	{
-		bindStorageBuffer(set, binding, buff.m_buffer, buff.m_offset, buff.m_range, arrayIdx);
-	}
+	void bindStorageBuffer(U32 set, U32 binding, const BufferView& buff, U32 arrayIdx = 0);
 
 	/// Bind load/store image.
-	/// @param set The set to bind to.
-	/// @param binding The binding to bind to.
-	/// @param img The view to bind.
-	/// @param arrayIdx The array index if the binding is an array.
 	void bindStorageTexture(U32 set, U32 binding, TextureView* img, U32 arrayIdx = 0);
 
 	/// Bind texture buffer.
-	/// @param set The set to bind to.
-	/// @param binding The binding to bind to.
-	/// @param[in,out] buff The buffer to bind.
-	/// @param offset The base of the binding.
-	/// @param range The bytes to bind starting from the offset. If it's kMaxPtrSize then map from offset to the end of the buffer.
-	/// @param fmt The format of the buffer.
-	/// @param arrayIdx The array index if the binding is an array.
-	void bindReadOnlyTexelBuffer(U32 set, U32 binding, Buffer* buff, PtrSize offset, PtrSize range, Format fmt, U32 arrayIdx = 0);
+	void bindReadOnlyTexelBuffer(U32 set, U32 binding, const BufferView& buff, Format fmt, U32 arrayIdx = 0);
 
 	/// Bind an acceleration structure.
 	/// @param set The set to bind to.
@@ -311,23 +258,23 @@ public:
 
 	void draw(PrimitiveTopology topology, U32 count, U32 instanceCount = 1, U32 first = 0, U32 baseInstance = 0);
 
-	void drawIndexedIndirect(PrimitiveTopology topology, U32 drawCount, PtrSize offset, Buffer* indirectBuff);
+	void drawIndexedIndirect(PrimitiveTopology topology, const BufferView& indirectBuff, U32 drawCount = 1);
 
-	void drawIndirect(PrimitiveTopology topology, U32 drawCount, PtrSize offset, Buffer* indirectBuff);
+	void drawIndirect(PrimitiveTopology topology, const BufferView& indirectBuff, U32 drawCount = 1);
 
-	void drawIndexedIndirectCount(PrimitiveTopology topology, Buffer* argBuffer, PtrSize argBufferOffset, U32 argBufferStride, Buffer* countBuffer,
-								  PtrSize countBufferOffset, U32 maxDrawCount);
+	void drawIndexedIndirectCount(PrimitiveTopology topology, const BufferView& argBuffer, U32 argBufferStride, const BufferView& countBuffer,
+								  U32 maxDrawCount);
 
-	void drawIndirectCount(PrimitiveTopology topology, Buffer* argBuffer, PtrSize argBufferOffset, U32 argBufferStride, Buffer* countBuffer,
-						   PtrSize countBufferOffset, U32 maxDrawCount);
+	void drawIndirectCount(PrimitiveTopology topology, const BufferView& argBuffer, U32 argBufferStride, const BufferView& countBuffer,
+						   U32 maxDrawCount);
 
 	void drawMeshTasks(U32 groupCountX, U32 groupCountY, U32 groupCountZ);
 
-	void drawMeshTasksIndirect(Buffer* argBuffer, PtrSize argBufferOffset);
+	void drawMeshTasksIndirect(const BufferView& argBuffer, U32 drawCount = 1);
 
 	void dispatchCompute(U32 groupCountX, U32 groupCountY, U32 groupCountZ);
 
-	void dispatchComputeIndirect(Buffer* argBuffer, PtrSize argBufferOffset);
+	void dispatchComputeIndirect(const BufferView& argBuffer);
 
 	/// Trace rays.
 	///
@@ -350,15 +297,13 @@ public:
 	/// The I_offset is the AccelerationStructureInstance::m_hitgroupSbtRecordIndex.
 	///
 	/// @param[in] sbtBuffer The SBT buffer.
-	/// @param sbtBufferOffset Offset inside the sbtBuffer where SBT records start.
-	/// @param hitGroupSbtRecordCount The number of SBT records that contain hit groups.
 	/// @param sbtRecordSize The size of an SBT record
+	/// @param hitGroupSbtRecordCount The number of SBT records that contain hit groups.
 	/// @param rayTypecount The number of ray types hosted in the pipeline. See above on how it's been used.
 	/// @param width Width.
 	/// @param height Height.
 	/// @param depth Depth.
-	void traceRays(Buffer* sbtBuffer, PtrSize sbtBufferOffset, U32 sbtRecordSize, U32 hitGroupSbtRecordCount, U32 rayTypeCount, U32 width, U32 height,
-				   U32 depth);
+	void traceRays(const BufferView& sbtBuffer, U32 sbtRecordSize, U32 hitGroupSbtRecordCount, U32 rayTypeCount, U32 width, U32 height, U32 depth);
 
 	/// Generate mipmaps for non-3D textures. You have to transition all the mip levels of this face and layer to
 	/// TextureUsageBit::kGenerateMipmaps before calling this method.
@@ -366,71 +311,34 @@ public:
 	///                mip chain and only one face and one layer.
 	void generateMipmaps2d(TextureView* texView);
 
-	/// Generate mipmaps only for 3D textures.
-	/// @param texView The texture view to generate mips.
-	void generateMipmaps3d(TextureView* tex);
-
 	/// Blit from surface to surface.
-	/// @param srcView The source view that points to a surface.
-	/// @param dstView The destination view that points to a surface.
 	void blitTextureViews(TextureView* srcView, TextureView* destView);
 
 	/// Clear a single texture surface. Can be used for all textures except 3D.
-	/// @param[in,out] texView The texture view to clear.
-	/// @param[in] clearValue The value to clear it with.
 	void clearTextureView(TextureView* texView, const ClearValue& clearValue);
 
 	/// Copy a buffer to a texture surface or volume.
-	/// @param buff The source buffer to copy from.
-	/// @param offset The offset in the buffer to start reading from.
-	/// @param range The size of the buffer to read.
-	/// @param texView The texture view that points to a surface or volume to write to.
-	void copyBufferToTextureView(Buffer* buff, PtrSize offset, PtrSize range, TextureView* texView);
-
-	/// Fill a buffer with some value.
-	/// @param[in,out] buff The buffer to fill.
-	/// @param offset From where to start filling. Must be multiple of 4.
-	/// @param size The bytes to fill. Must be multiple of 4 or kMaxPtrSize to indicate the whole buffer.
-	/// @param value The value to fill the buffer with.
-	void fillBuffer(Buffer* buff, PtrSize offset, PtrSize size, U32 value);
+	void copyBufferToTexture(const BufferView& buff, TextureView* texView);
 
 	/// Fill a buffer with some value.
-	/// @param[in,out] buff The buffer to fill.
-	/// @param value The value to fill the buffer with.
-	void fillBuffer(const BufferOffsetRange& buff, U32 value)
-	{
-		fillBuffer(buff.m_buffer, buff.m_offset, buff.m_range, value);
-	}
+	void fillBuffer(const BufferView& buff, U32 value);
 
 	/// Write the occlusion result to buffer.
-	/// @param[in] queries The queries to write the result of.
-	/// @param offset The offset inside the buffer to write the result.
-	/// @param buff The buffer to update.
-	void writeOcclusionQueriesResultToBuffer(ConstWeakArray<OcclusionQuery*> queries, PtrSize offset, Buffer* buff);
+	void writeOcclusionQueriesResultToBuffer(ConstWeakArray<OcclusionQuery*> queries, const BufferView& buff);
 
 	/// Copy buffer to buffer.
-	/// @param[in] src Source buffer.
-	/// @param srcOffset Offset in the src buffer.
-	/// @param[out] dst Destination buffer.
-	/// @param dstOffset Offset in the destination buffer.
-	/// @param range Size to copy.
-	void copyBufferToBuffer(Buffer* src, PtrSize srcOffset, Buffer* dst, PtrSize dstOffset, PtrSize range)
+	void copyBufferToBuffer(const BufferView& src, const BufferView& dst)
 	{
-		Array<CopyBufferToBufferInfo, 1> copies = {{{srcOffset, dstOffset, range}}};
-		copyBufferToBuffer(src, dst, copies);
+		ANKI_ASSERT(src.getRange() == dst.getRange());
+		Array<CopyBufferToBufferInfo, 1> copies = {{{src.getOffset(), dst.getOffset(), src.getRange()}}};
+		copyBufferToBuffer(&src.getBuffer(), &dst.getBuffer(), copies);
 	}
 
 	/// Copy buffer to buffer.
-	/// @param[in] src Source buffer.
-	/// @param[out] dst Destination buffer.
-	/// @param copies Info on the copies.
 	void copyBufferToBuffer(Buffer* src, Buffer* dst, ConstWeakArray<CopyBufferToBufferInfo> copies);
 
 	/// Build the acceleration structure.
-	/// @param as The AS to build.
-	/// @param scratchBuffer A scratch buffer. Ask the AS for size.
-	/// @param scratchBufferOffset Scratch buffer offset.
-	void buildAccelerationStructure(AccelerationStructure* as, Buffer* scratchBuffer, PtrSize scratchBufferOffset);
+	void buildAccelerationStructure(AccelerationStructure* as, const BufferView& scratchBuffer);
 
 	/// Do upscaling by an external upscaler
 	/// @param[in] upscaler the upscaler to use for upscaling

+ 0 - 13
AnKi/Gr/Common.h

@@ -1093,19 +1093,6 @@ public:
 	}
 };
 
-class BufferOffsetRange
-{
-public:
-	Buffer* m_buffer = nullptr;
-	PtrSize m_offset = kMaxPtrSize;
-	PtrSize m_range = 0;
-
-	Bool isValid() const
-	{
-		return m_buffer != nullptr && m_offset < kMaxPtrSize && m_range > 0;
-	}
-};
-
 /// Compute max number of mipmaps for a 2D texture.
 U32 computeMaxMipmapCount2d(U32 w, U32 h, U32 minSizeOfLastMip = 1);
 

+ 2 - 3
AnKi/Gr/RenderGraph.cpp

@@ -1192,9 +1192,8 @@ void RenderGraph::recordAndSubmitCommandBuffers(FencePtr* optionalFence)
 						BufferBarrierInfo& inf = *buffBarriers.emplaceBack();
 						inf.m_previousUsage = barrier.m_usageBefore;
 						inf.m_nextUsage = barrier.m_usageAfter;
-						inf.m_offset = m_ctx->m_buffers[barrier.m_idx].m_offset;
-						inf.m_range = m_ctx->m_buffers[barrier.m_idx].m_range;
-						inf.m_buffer = m_ctx->m_buffers[barrier.m_idx].m_buffer.get();
+						inf.m_bufferView = BufferView(m_ctx->m_buffers[barrier.m_idx].m_buffer.get(), m_ctx->m_buffers[barrier.m_idx].m_offset,
+													  m_ctx->m_buffers[barrier.m_idx].m_range);
 					}
 					DynamicArray<AccelerationStructureBarrierInfo, MemoryPoolPtrWrapper<StackMemoryPool>> asBarriers(pool);
 					for(const ASBarrier& barrier : batch.m_asBarriersBefore)

+ 3 - 27
AnKi/Gr/RenderGraph.h

@@ -186,7 +186,7 @@ public:
 		Buffer* buff;
 		PtrSize offset, range;
 		getBufferState(handle, buff, offset, range);
-		m_commandBuffer->bindStorageBuffer(set, binding, buff, offset, range);
+		m_commandBuffer->bindStorageBuffer(set, binding, BufferView(buff, offset, range));
 	}
 
 	/// Convenience method.
@@ -195,7 +195,7 @@ public:
 		Buffer* buff;
 		PtrSize offset, range;
 		getBufferState(handle, buff, offset, range);
-		m_commandBuffer->bindUniformBuffer(set, binding, buff, offset, range);
+		m_commandBuffer->bindUniformBuffer(set, binding, BufferView(buff, offset, range));
 	}
 
 	/// Convenience method.
@@ -481,13 +481,7 @@ public:
 	RenderTargetHandle newRenderTarget(const RenderTargetDescription& initInf);
 
 	/// Import a buffer.
-	BufferHandle importBuffer(Buffer* buff, BufferUsageBit usage, PtrSize offset = 0, PtrSize range = kMaxPtrSize);
-
-	/// Import a buffer.
-	BufferHandle importBuffer(BufferUsageBit usage, const BufferOffsetRange& buff)
-	{
-		return importBuffer(buff.m_buffer, usage, buff.m_offset, buff.m_range);
-	}
+	BufferHandle importBuffer(const BufferView& buff, BufferUsageBit usage);
 
 	/// Import an AS.
 	AccelerationStructureHandle importAccelerationStructure(AccelerationStructure* as, AccelerationStructureUsageBit usage);
@@ -546,24 +540,6 @@ private:
 	DynamicArray<BufferRsrc, MemoryPoolPtrWrapper<StackMemoryPool>> m_buffers{m_pool};
 	DynamicArray<AS, MemoryPoolPtrWrapper<StackMemoryPool>> m_as{m_pool};
 	Bool m_gatherStatistics = false;
-
-	/// Return true if 2 buffer ranges overlap.
-	static Bool bufferRangeOverlaps(PtrSize offsetA, PtrSize rangeA, PtrSize offsetB, PtrSize rangeB)
-	{
-		ANKI_ASSERT(rangeA > 0 && rangeB > 0);
-		if(rangeA == kMaxPtrSize || rangeB == kMaxPtrSize)
-		{
-			return true;
-		}
-		else if(offsetA <= offsetB)
-		{
-			return offsetA + rangeA > offsetB;
-		}
-		else
-		{
-			return offsetB + rangeB > offsetA;
-		}
-	}
 };
 
 /// Statistics.

+ 7 - 18
AnKi/Gr/RenderGraph.inl.h

@@ -294,32 +294,21 @@ inline RenderTargetHandle RenderGraphDescription::newRenderTarget(const RenderTa
 	return out;
 }
 
-inline BufferHandle RenderGraphDescription::importBuffer(Buffer* buff, BufferUsageBit usage, PtrSize offset, PtrSize range)
+inline BufferHandle RenderGraphDescription::importBuffer(const BufferView& buff, BufferUsageBit usage)
 {
-	// Checks
-	ANKI_ASSERT(buff);
-	if(range == kMaxPtrSize)
-	{
-		ANKI_ASSERT(offset < buff->getSize());
-	}
-	else
-	{
-		ANKI_ASSERT((offset + range) <= buff->getSize());
-	}
-
-	ANKI_ASSERT(range > 0);
+	ANKI_ASSERT(buff.isValid());
 
 	for([[maybe_unused]] const BufferRsrc& bb : m_buffers)
 	{
-		ANKI_ASSERT((bb.m_importedBuff.get() != buff || !bufferRangeOverlaps(bb.m_offset, bb.m_range, offset, range)) && "Range already imported");
+		ANKI_ASSERT(!buff.overlaps(BufferView(bb.m_importedBuff.get(), bb.m_offset, bb.m_range)) && "Range already imported");
 	}
 
 	BufferRsrc& b = *m_buffers.emplaceBack();
-	b.setName(buff->getName());
+	b.setName(buff.getBuffer().getName());
 	b.m_usage = usage;
-	b.m_importedBuff.reset(buff);
-	b.m_offset = offset;
-	b.m_range = range;
+	b.m_importedBuff.reset(&buff.getBuffer());
+	b.m_offset = buff.getOffset();
+	b.m_range = buff.getRange();
 
 	BufferHandle out;
 	out.m_idx = m_buffers.getSize() - 1;

+ 3 - 3
AnKi/Gr/Utils/SegregatedListsGpuMemoryPool.cpp

@@ -157,15 +157,15 @@ Error SegregatedListsGpuMemoryPool::allocateChunk(Chunk*& newChunk, PtrSize& chu
 		CommandBufferPtr cmdb = GrManager::getSingleton().newCommandBuffer(cmdbInit);
 
 		Array<BufferBarrierInfo, 2> barriers;
-		barriers[0].m_buffer = m_gpuBuffer.get();
+		barriers[0].m_bufferView = BufferView(m_gpuBuffer.get());
 		barriers[0].m_previousUsage = m_bufferUsage;
 		barriers[0].m_nextUsage = BufferUsageBit::kTransferSource;
-		barriers[1].m_buffer = newBuffer.get();
+		barriers[1].m_bufferView = BufferView(newBuffer.get());
 		barriers[1].m_previousUsage = BufferUsageBit::kNone;
 		barriers[1].m_nextUsage = BufferUsageBit::kTransferDestination;
 		cmdb->setPipelineBarrier({}, barriers, {});
 
-		cmdb->copyBufferToBuffer(m_gpuBuffer.get(), 0, newBuffer.get(), 0, m_gpuBuffer->getSize());
+		cmdb->copyBufferToBuffer(BufferView(m_gpuBuffer.get()), BufferView(newBuffer.get(), 0, m_gpuBuffer->getSize()));
 
 		barriers[1].m_previousUsage = BufferUsageBit::kTransferDestination;
 		barriers[1].m_nextUsage = m_bufferUsage;

+ 7 - 6
AnKi/Gr/Vulkan/VkAccelerationStructure.cpp

@@ -55,11 +55,12 @@ Error AccelerationStructureImpl::init(const AccelerationStructureInitInfo& inf)
 		geom.geometry.triangles.sType = VK_STRUCTURE_TYPE_ACCELERATION_STRUCTURE_GEOMETRY_TRIANGLES_DATA_KHR;
 		geom.geometry.triangles.vertexFormat = convertFormat(inf.m_bottomLevel.m_positionsFormat);
 		geom.geometry.triangles.vertexData.deviceAddress =
-			inf.m_bottomLevel.m_positionBuffer->getGpuAddress() + inf.m_bottomLevel.m_positionBufferOffset;
+			inf.m_bottomLevel.m_positionBuffer.getBuffer().getGpuAddress() + inf.m_bottomLevel.m_positionBuffer.getOffset();
 		geom.geometry.triangles.vertexStride = inf.m_bottomLevel.m_positionStride;
 		geom.geometry.triangles.maxVertex = inf.m_bottomLevel.m_positionCount - 1;
 		geom.geometry.triangles.indexType = convertIndexType(inf.m_bottomLevel.m_indexType);
-		geom.geometry.triangles.indexData.deviceAddress = inf.m_bottomLevel.m_indexBuffer->getGpuAddress() + inf.m_bottomLevel.m_indexBufferOffset;
+		geom.geometry.triangles.indexData.deviceAddress =
+			inf.m_bottomLevel.m_indexBuffer.getBuffer().getGpuAddress() + inf.m_bottomLevel.m_indexBuffer.getOffset();
 		geom.flags = 0; // VK_GEOMETRY_OPAQUE_BIT_KHR; // TODO
 
 		// Geom build info
@@ -147,10 +148,10 @@ Error AccelerationStructureImpl::init(const AccelerationStructureInitInfo& inf)
 		else
 		{
 			// Instances buffer already created
-			ANKI_ASSERT(inf.m_topLevel.m_indirectArgs.m_instancesBufferOffset
+			ANKI_ASSERT(inf.m_topLevel.m_indirectArgs.m_instancesBuffer.getOffset()
 							+ sizeof(VkAccelerationStructureInstanceKHR) * inf.m_topLevel.m_indirectArgs.m_maxInstanceCount
-						<= inf.m_topLevel.m_indirectArgs.m_instancesBuffer->getSize());
-			m_topLevelInfo.m_instancesBuffer.reset(inf.m_topLevel.m_indirectArgs.m_instancesBuffer);
+						<= inf.m_topLevel.m_indirectArgs.m_instancesBuffer.getRange());
+			m_topLevelInfo.m_instancesBuffer.reset(&inf.m_topLevel.m_indirectArgs.m_instancesBuffer.getBuffer());
 
 			m_topLevelInfo.m_maxInstanceCount = inf.m_topLevel.m_indirectArgs.m_maxInstanceCount;
 		}
@@ -163,7 +164,7 @@ Error AccelerationStructureImpl::init(const AccelerationStructureInitInfo& inf)
 		geom.geometry.instances.data.deviceAddress = m_topLevelInfo.m_instancesBuffer->getGpuAddress();
 		if(isIndirect)
 		{
-			geom.geometry.instances.data.deviceAddress += inf.m_topLevel.m_indirectArgs.m_instancesBufferOffset;
+			geom.geometry.instances.data.deviceAddress += inf.m_topLevel.m_indirectArgs.m_instancesBuffer.getRange();
 		}
 		geom.geometry.instances.arrayOfPointers = false;
 		geom.flags = VK_GEOMETRY_OPAQUE_BIT_KHR; // TODO

+ 134 - 99
AnKi/Gr/Vulkan/VkCommandBuffer.cpp

@@ -39,13 +39,15 @@ void CommandBuffer::endRecording()
 	self.endRecording();
 }
 
-void CommandBuffer::bindVertexBuffer(U32 binding, Buffer* buff, PtrSize offset, PtrSize stride, VertexStepRate stepRate)
+void CommandBuffer::bindVertexBuffer(U32 binding, const BufferView& buff, PtrSize stride, VertexStepRate stepRate)
 {
+	ANKI_ASSERT(buff.isValid());
+
 	ANKI_VK_SELF(CommandBufferImpl);
 	self.commandCommon();
 	self.m_state.bindVertexBuffer(binding, stride, stepRate);
-	const VkBuffer vkbuff = static_cast<const BufferImpl&>(*buff).getHandle();
-	vkCmdBindVertexBuffers(self.m_handle, binding, 1, &vkbuff, &offset);
+	const VkBuffer vkbuff = static_cast<const BufferImpl&>(buff.getBuffer()).getHandle();
+	vkCmdBindVertexBuffers(self.m_handle, binding, 1, &vkbuff, &buff.getOffset());
 }
 
 void CommandBuffer::setVertexAttribute(VertexAttribute attribute, U32 buffBinding, Format fmt, PtrSize relativeOffset)
@@ -55,13 +57,15 @@ void CommandBuffer::setVertexAttribute(VertexAttribute attribute, U32 buffBindin
 	self.m_state.setVertexAttribute(attribute, buffBinding, fmt, relativeOffset);
 }
 
-void CommandBuffer::bindIndexBuffer(Buffer* buff, PtrSize offset, IndexType type)
+void CommandBuffer::bindIndexBuffer(const BufferView& buff, IndexType type)
 {
+	ANKI_ASSERT(buff.isValid());
+
 	ANKI_VK_SELF(CommandBufferImpl);
 	self.commandCommon();
-	const BufferImpl& buffi = static_cast<const BufferImpl&>(*buff);
+	const BufferImpl& buffi = static_cast<const BufferImpl&>(buff.getBuffer());
 	ANKI_ASSERT(!!(buffi.getBufferUsage() & BufferUsageBit::kIndex));
-	vkCmdBindIndexBuffer(self.m_handle, buffi.getHandle(), offset, convertIndexType(type));
+	vkCmdBindIndexBuffer(self.m_handle, buffi.getHandle(), buff.getOffset(), convertIndexType(type));
 }
 
 void CommandBuffer::setPrimitiveRestart(Bool enable)
@@ -279,18 +283,22 @@ void CommandBuffer::bindSampler(U32 set, U32 binding, Sampler* sampler, U32 arra
 	self.m_microCmdb->pushObjectRef(sampler);
 }
 
-void CommandBuffer::bindUniformBuffer(U32 set, U32 binding, Buffer* buff, PtrSize offset, PtrSize range, U32 arrayIdx)
+void CommandBuffer::bindUniformBuffer(U32 set, U32 binding, const BufferView& buff, U32 arrayIdx)
 {
+	ANKI_ASSERT(buff.isValid());
+
 	ANKI_VK_SELF(CommandBufferImpl);
 	self.commandCommon();
-	self.m_dsetState[set].bindUniformBuffer(binding, arrayIdx, buff, offset, range);
+	self.m_dsetState[set].bindUniformBuffer(binding, arrayIdx, &buff.getBuffer(), buff.getOffset(), buff.getRange());
 }
 
-void CommandBuffer::bindStorageBuffer(U32 set, U32 binding, Buffer* buff, PtrSize offset, PtrSize range, U32 arrayIdx)
+void CommandBuffer::bindStorageBuffer(U32 set, U32 binding, const BufferView& buff, U32 arrayIdx)
 {
+	ANKI_ASSERT(buff.isValid());
+
 	ANKI_VK_SELF(CommandBufferImpl);
 	self.commandCommon();
-	self.m_dsetState[set].bindStorageBuffer(binding, arrayIdx, buff, offset, range);
+	self.m_dsetState[set].bindStorageBuffer(binding, arrayIdx, &buff.getBuffer(), buff.getOffset(), buff.getRange());
 }
 
 void CommandBuffer::bindStorageTexture(U32 set, U32 binding, TextureView* img, U32 arrayIdx)
@@ -314,11 +322,13 @@ void CommandBuffer::bindAccelerationStructure(U32 set, U32 binding, Acceleration
 	self.m_microCmdb->pushObjectRef(as);
 }
 
-void CommandBuffer::bindReadOnlyTexelBuffer(U32 set, U32 binding, Buffer* buff, PtrSize offset, PtrSize range, Format fmt, U32 arrayIdx)
+void CommandBuffer::bindReadOnlyTexelBuffer(U32 set, U32 binding, const BufferView& buff, Format fmt, U32 arrayIdx)
 {
+	ANKI_ASSERT(buff.isValid());
+
 	ANKI_VK_SELF(CommandBufferImpl);
 	self.commandCommon();
-	self.m_dsetState[set].bindReadOnlyTexelBuffer(binding, arrayIdx, buff, offset, range, fmt);
+	self.m_dsetState[set].bindReadOnlyTexelBuffer(binding, arrayIdx, &buff.getBuffer(), buff.getOffset(), buff.getRange(), fmt);
 }
 
 void CommandBuffer::bindAllBindless(U32 set)
@@ -586,76 +596,96 @@ void CommandBuffer::draw(PrimitiveTopology topology, U32 count, U32 instanceCoun
 	vkCmdDraw(self.m_handle, count, instanceCount, first, baseInstance);
 }
 
-void CommandBuffer::drawIndirect(PrimitiveTopology topology, U32 drawCount, PtrSize offset, Buffer* buff)
+void CommandBuffer::drawIndirect(PrimitiveTopology topology, const BufferView& buff, U32 drawCount)
 {
+	ANKI_ASSERT(buff.isValid());
+	ANKI_ASSERT(drawCount > 0);
+
 	ANKI_VK_SELF(CommandBufferImpl);
 	self.m_state.setPrimitiveTopology(topology);
 	self.drawcallCommon();
-	const BufferImpl& impl = static_cast<const BufferImpl&>(*buff);
+
+	const BufferImpl& impl = static_cast<const BufferImpl&>(buff.getBuffer());
 	ANKI_ASSERT(impl.usageValid(BufferUsageBit::kIndirectDraw));
-	ANKI_ASSERT((offset % 4) == 0);
-	ANKI_ASSERT((offset + sizeof(DrawIndirectArgs) * drawCount) <= impl.getSize());
+	ANKI_ASSERT((buff.getOffset() % 4) == 0);
+	ANKI_ASSERT((buff.getRange() % sizeof(DrawIndirectArgs)) == 0);
+	ANKI_ASSERT(sizeof(DrawIndirectArgs) * drawCount == buff.getRange());
 
-	vkCmdDrawIndirect(self.m_handle, impl.getHandle(), offset, drawCount, sizeof(DrawIndirectArgs));
+	vkCmdDrawIndirect(self.m_handle, impl.getHandle(), buff.getOffset(), drawCount, sizeof(DrawIndirectArgs));
 }
 
-void CommandBuffer::drawIndexedIndirectCount(PrimitiveTopology topology, Buffer* argBuffer, PtrSize argBufferOffset, U32 argBufferStride,
-											 Buffer* countBuffer, PtrSize countBufferOffset, U32 maxDrawCount)
+void CommandBuffer::drawIndexedIndirect(PrimitiveTopology topology, const BufferView& buff, U32 drawCount)
 {
+	ANKI_ASSERT(buff.isValid());
+	ANKI_ASSERT(drawCount > 0);
+
 	ANKI_VK_SELF(CommandBufferImpl);
 	self.m_state.setPrimitiveTopology(topology);
 	self.drawcallCommon();
-	const BufferImpl& argBufferImpl = static_cast<const BufferImpl&>(*argBuffer);
-	ANKI_ASSERT(argBufferImpl.usageValid(BufferUsageBit::kIndirectDraw));
-	ANKI_ASSERT((argBufferOffset % 4) == 0);
-	ANKI_ASSERT(argBufferStride >= sizeof(DrawIndexedIndirectArgs));
-	ANKI_ASSERT(argBufferOffset + maxDrawCount * argBufferStride <= argBuffer->getSize());
-
-	const BufferImpl& countBufferImpl = static_cast<const BufferImpl&>(*countBuffer);
-	ANKI_ASSERT(countBufferImpl.usageValid(BufferUsageBit::kIndirectDraw));
-	ANKI_ASSERT((countBufferOffset % 4) == 0);
-	ANKI_ASSERT(countBufferOffset + sizeof(U32) <= countBuffer->getSize());
 
-	ANKI_ASSERT(maxDrawCount > 0 && maxDrawCount <= getGrManagerImpl().getDeviceCapabilities().m_maxDrawIndirectCount);
+	const BufferImpl& impl = static_cast<const BufferImpl&>(buff.getBuffer());
+	ANKI_ASSERT(impl.usageValid(BufferUsageBit::kIndirectDraw));
+	ANKI_ASSERT((buff.getOffset() % 4) == 0);
+	ANKI_ASSERT(sizeof(DrawIndexedIndirectArgs) * drawCount == buff.getRange());
 
-	vkCmdDrawIndexedIndirectCountKHR(self.m_handle, argBufferImpl.getHandle(), argBufferOffset, countBufferImpl.getHandle(), countBufferOffset,
-									 maxDrawCount, argBufferStride);
+	vkCmdDrawIndexedIndirect(self.m_handle, impl.getHandle(), buff.getOffset(), drawCount, sizeof(DrawIndexedIndirectArgs));
 }
 
-void CommandBuffer::drawIndirectCount(PrimitiveTopology topology, Buffer* argBuffer, PtrSize argBufferOffset, U32 argBufferStride,
-									  Buffer* countBuffer, PtrSize countBufferOffset, U32 maxDrawCount)
+void CommandBuffer::drawIndexedIndirectCount(PrimitiveTopology topology, const BufferView& argBuffer, U32 argBufferStride,
+											 const BufferView& countBuffer, U32 maxDrawCount)
 {
+	ANKI_ASSERT(argBuffer.isValid());
+	ANKI_ASSERT(countBuffer.isValid());
+
 	ANKI_VK_SELF(CommandBufferImpl);
 	self.m_state.setPrimitiveTopology(topology);
 	self.drawcallCommon();
-	const BufferImpl& argBufferImpl = static_cast<const BufferImpl&>(*argBuffer);
+
+	ANKI_ASSERT(argBufferStride >= sizeof(DrawIndexedIndirectArgs));
+
+	const BufferImpl& argBufferImpl = static_cast<const BufferImpl&>(argBuffer.getBuffer());
 	ANKI_ASSERT(argBufferImpl.usageValid(BufferUsageBit::kIndirectDraw));
-	ANKI_ASSERT((argBufferOffset % 4) == 0);
-	ANKI_ASSERT(argBufferStride >= sizeof(DrawIndirectArgs));
-	ANKI_ASSERT(argBufferOffset + maxDrawCount * argBufferStride <= argBuffer->getSize());
+	ANKI_ASSERT((argBuffer.getOffset() % 4) == 0);
+	ANKI_ASSERT((argBuffer.getRange() % argBufferStride) == 0);
+	ANKI_ASSERT(argBufferStride * maxDrawCount == argBuffer.getRange());
 
-	const BufferImpl& countBufferImpl = static_cast<const BufferImpl&>(*countBuffer);
+	const BufferImpl& countBufferImpl = static_cast<const BufferImpl&>(countBuffer.getBuffer());
 	ANKI_ASSERT(countBufferImpl.usageValid(BufferUsageBit::kIndirectDraw));
-	ANKI_ASSERT((countBufferOffset % 4) == 0);
-	ANKI_ASSERT(countBufferOffset + maxDrawCount * sizeof(U32) <= countBuffer->getSize());
+	ANKI_ASSERT((countBuffer.getOffset() % 4) == 0);
+	ANKI_ASSERT(countBuffer.getRange() == sizeof(U32));
 
 	ANKI_ASSERT(maxDrawCount > 0 && maxDrawCount <= getGrManagerImpl().getDeviceCapabilities().m_maxDrawIndirectCount);
 
-	vkCmdDrawIndirectCountKHR(self.m_handle, argBufferImpl.getHandle(), argBufferOffset, countBufferImpl.getHandle(), countBufferOffset, maxDrawCount,
-							  argBufferStride);
+	vkCmdDrawIndexedIndirectCountKHR(self.m_handle, argBufferImpl.getHandle(), argBuffer.getOffset(), countBufferImpl.getHandle(),
+									 countBuffer.getOffset(), maxDrawCount, argBufferStride);
 }
 
-void CommandBuffer::drawIndexedIndirect(PrimitiveTopology topology, U32 drawCount, PtrSize offset, Buffer* buff)
+void CommandBuffer::drawIndirectCount(PrimitiveTopology topology, const BufferView& argBuffer, U32 argBufferStride, const BufferView& countBuffer,
+									  U32 maxDrawCount)
 {
+	ANKI_ASSERT(argBuffer.isValid());
+	ANKI_ASSERT(countBuffer.isValid());
+
 	ANKI_VK_SELF(CommandBufferImpl);
 	self.m_state.setPrimitiveTopology(topology);
 	self.drawcallCommon();
-	const BufferImpl& impl = static_cast<const BufferImpl&>(*buff);
-	ANKI_ASSERT(impl.usageValid(BufferUsageBit::kIndirectDraw));
-	ANKI_ASSERT((offset % 4) == 0);
-	ANKI_ASSERT((offset + sizeof(DrawIndexedIndirectArgs) * drawCount) <= impl.getSize());
 
-	vkCmdDrawIndexedIndirect(self.m_handle, impl.getHandle(), offset, drawCount, sizeof(DrawIndexedIndirectArgs));
+	ANKI_ASSERT(argBufferStride >= sizeof(DrawIndirectArgs));
+
+	const BufferImpl& argBufferImpl = static_cast<const BufferImpl&>(argBuffer.getBuffer());
+	ANKI_ASSERT(argBufferImpl.usageValid(BufferUsageBit::kIndirectDraw));
+	ANKI_ASSERT((argBuffer.getOffset() % 4) == 0);
+	ANKI_ASSERT(maxDrawCount * argBufferStride == argBuffer.getRange());
+
+	const BufferImpl& countBufferImpl = static_cast<const BufferImpl&>(countBuffer.getBuffer());
+	ANKI_ASSERT(countBufferImpl.usageValid(BufferUsageBit::kIndirectDraw));
+	ANKI_ASSERT((countBuffer.getOffset() % 4) == 0);
+	ANKI_ASSERT(countBuffer.getRange() == sizeof(U32));
+
+	ANKI_ASSERT(maxDrawCount > 0 && maxDrawCount <= getGrManagerImpl().getDeviceCapabilities().m_maxDrawIndirectCount);
+
+	vkCmdDrawIndirectCountKHR(self.m_handle, argBufferImpl.getHandle(), argBuffer.getOffset(), countBufferImpl.getHandle(), countBuffer.getOffset(),
+							  maxDrawCount, argBufferStride);
 }
 
 void CommandBuffer::drawMeshTasks(U32 groupCountX, U32 groupCountY, U32 groupCountZ)
@@ -666,18 +696,22 @@ void CommandBuffer::drawMeshTasks(U32 groupCountX, U32 groupCountY, U32 groupCou
 	vkCmdDrawMeshTasksEXT(self.m_handle, groupCountX, groupCountY, groupCountZ);
 }
 
-void CommandBuffer::drawMeshTasksIndirect(Buffer* argBuffer, PtrSize argBufferOffset)
+void CommandBuffer::drawMeshTasksIndirect(const BufferView& argBuffer, U32 drawCount)
 {
-	ANKI_VK_SELF(CommandBufferImpl);
+	ANKI_ASSERT(argBuffer.isValid());
+	ANKI_ASSERT(drawCount > 0);
 	ANKI_ASSERT(!!(getGrManagerImpl().getExtensions() & VulkanExtensions::kEXT_mesh_shader));
-	ANKI_ASSERT((argBufferOffset % 4) == 0);
-	const BufferImpl& impl = static_cast<const BufferImpl&>(*argBuffer);
+
+	ANKI_ASSERT((argBuffer.getOffset() % 4) == 0);
+	ANKI_ASSERT(drawCount * sizeof(DispatchIndirectArgs) == argBuffer.getRange());
+	const BufferImpl& impl = static_cast<const BufferImpl&>(argBuffer.getBuffer());
 	ANKI_ASSERT(impl.usageValid(BufferUsageBit::kIndirectDraw));
-	ANKI_ASSERT((argBufferOffset + sizeof(DispatchIndirectArgs)) <= impl.getSize());
+
+	ANKI_VK_SELF(CommandBufferImpl);
 
 	self.m_state.setPrimitiveTopology(PrimitiveTopology::kTriangles); // Not sure if that's needed
 	self.drawcallCommon();
-	vkCmdDrawMeshTasksIndirectEXT(self.m_handle, impl.getHandle(), argBufferOffset, 1, sizeof(DispatchIndirectArgs));
+	vkCmdDrawMeshTasksIndirectEXT(self.m_handle, impl.getHandle(), argBuffer.getOffset(), drawCount, sizeof(DispatchIndirectArgs));
 }
 
 void CommandBuffer::dispatchCompute(U32 groupCountX, U32 groupCountY, U32 groupCountZ)
@@ -688,19 +722,23 @@ void CommandBuffer::dispatchCompute(U32 groupCountX, U32 groupCountY, U32 groupC
 	vkCmdDispatch(self.m_handle, groupCountX, groupCountY, groupCountZ);
 }
 
-void CommandBuffer::dispatchComputeIndirect(Buffer* argBuffer, PtrSize argBufferOffset)
+void CommandBuffer::dispatchComputeIndirect(const BufferView& argBuffer)
 {
+	ANKI_ASSERT(argBuffer.isValid());
+
+	ANKI_ASSERT(sizeof(DispatchIndirectArgs) == argBuffer.getRange());
+	ANKI_ASSERT(argBuffer.getOffset() % 4 == 0);
+
 	ANKI_VK_SELF(CommandBufferImpl);
-	ANKI_ASSERT(argBuffer);
-	ANKI_ASSERT(argBufferOffset + sizeof(U32) * 2 < argBuffer->getSize());
-	ANKI_ASSERT(argBufferOffset % 4 == 0);
 	self.dispatchCommon();
-	vkCmdDispatchIndirect(self.m_handle, static_cast<BufferImpl*>(argBuffer)->getHandle(), argBufferOffset);
+	vkCmdDispatchIndirect(self.m_handle, static_cast<BufferImpl&>(argBuffer.getBuffer()).getHandle(), argBuffer.getOffset());
 }
 
-void CommandBuffer::traceRays(Buffer* sbtBuffer, PtrSize sbtBufferOffset, U32 sbtRecordSize32, U32 hitGroupSbtRecordCount, U32 rayTypeCount,
-							  U32 width, U32 height, U32 depth)
+void CommandBuffer::traceRays(const BufferView& sbtBuffer, U32 sbtRecordSize32, U32 hitGroupSbtRecordCount, U32 rayTypeCount, U32 width, U32 height,
+							  U32 depth)
 {
+	ANKI_ASSERT(sbtBuffer.isValid());
+
 	ANKI_VK_SELF(CommandBufferImpl);
 	const PtrSize sbtRecordSize = sbtRecordSize32;
 	ANKI_ASSERT(hitGroupSbtRecordCount > 0);
@@ -713,8 +751,8 @@ void CommandBuffer::traceRays(Buffer* sbtBuffer, PtrSize sbtBufferOffset, U32 sb
 	ANKI_ASSERT((hitGroupSbtRecordCount % rayTypeCount) == 0);
 	const PtrSize sbtRecordCount = 1 + rayTypeCount + hitGroupSbtRecordCount;
 	[[maybe_unused]] const PtrSize sbtBufferSize = sbtRecordCount * sbtRecordSize;
-	ANKI_ASSERT(sbtBufferSize + sbtBufferOffset <= sbtBuffer->getSize());
-	ANKI_ASSERT(isAligned(getGrManagerImpl().getDeviceCapabilities().m_sbtRecordAlignment, sbtBufferOffset));
+	ANKI_ASSERT(sbtBufferSize <= sbtBuffer.getRange());
+	ANKI_ASSERT(isAligned(getGrManagerImpl().getDeviceCapabilities().m_sbtRecordAlignment, sbtBuffer.getOffset()));
 
 	self.commandCommon();
 
@@ -733,7 +771,7 @@ void CommandBuffer::traceRays(Buffer* sbtBuffer, PtrSize sbtBufferOffset, U32 sb
 	}
 
 	Array<VkStridedDeviceAddressRegionKHR, 4> regions;
-	const U64 stbBufferAddress = sbtBuffer->getGpuAddress() + sbtBufferOffset;
+	const U64 stbBufferAddress = sbtBuffer.getBuffer().getGpuAddress() + sbtBuffer.getOffset();
 	ANKI_ASSERT(isAligned(getGrManagerImpl().getDeviceCapabilities().m_sbtRecordAlignment, stbBufferAddress));
 
 	// Rgen
@@ -847,11 +885,6 @@ void CommandBuffer::generateMipmaps2d(TextureView* texView)
 	}
 }
 
-void CommandBuffer::generateMipmaps3d([[maybe_unused]] TextureView* texView)
-{
-	ANKI_ASSERT(!"TODO");
-}
-
 void CommandBuffer::blitTextureViews([[maybe_unused]] TextureView* srcView, [[maybe_unused]] TextureView* destView)
 {
 	ANKI_ASSERT(!"TODO");
@@ -880,8 +913,10 @@ void CommandBuffer::clearTextureView(TextureView* texView, const ClearValue& cle
 	}
 }
 
-void CommandBuffer::copyBufferToTextureView(Buffer* buff, PtrSize offset, [[maybe_unused]] PtrSize range, TextureView* texView)
+void CommandBuffer::copyBufferToTexture(const BufferView& buff, TextureView* texView)
 {
+	ANKI_ASSERT(buff.isValid());
+
 	ANKI_VK_SELF(CommandBufferImpl);
 	self.commandCommon();
 
@@ -904,11 +939,11 @@ void CommandBuffer::copyBufferToTextureView(Buffer* buff, PtrSize offset, [[mayb
 
 	if(!is3D)
 	{
-		ANKI_ASSERT(range == computeSurfaceSize(width, height, tex.getFormat()));
+		ANKI_ASSERT(buff.getRange() == computeSurfaceSize(width, height, tex.getFormat()));
 	}
 	else
 	{
-		ANKI_ASSERT(range == computeVolumeSize(width, height, depth, tex.getFormat()));
+		ANKI_ASSERT(buff.getRange() == computeVolumeSize(width, height, depth, tex.getFormat()));
 	}
 
 	// Copy
@@ -921,54 +956,53 @@ void CommandBuffer::copyBufferToTextureView(Buffer* buff, PtrSize offset, [[mayb
 	region.imageExtent.width = width;
 	region.imageExtent.height = height;
 	region.imageExtent.depth = depth;
-	region.bufferOffset = offset;
+	region.bufferOffset = buff.getOffset();
 	region.bufferImageHeight = 0;
 	region.bufferRowLength = 0;
 
-	vkCmdCopyBufferToImage(self.m_handle, static_cast<const BufferImpl&>(*buff).getHandle(), tex.m_imageHandle, layout, 1, &region);
+	vkCmdCopyBufferToImage(self.m_handle, static_cast<const BufferImpl&>(buff.getBuffer()).getHandle(), tex.m_imageHandle, layout, 1, &region);
 }
 
-void CommandBuffer::fillBuffer(Buffer* buff, PtrSize offset, PtrSize size, U32 value)
+void CommandBuffer::fillBuffer(const BufferView& buff, U32 value)
 {
+	ANKI_ASSERT(buff.isValid());
+
 	ANKI_VK_SELF(CommandBufferImpl);
 	self.commandCommon();
 	ANKI_ASSERT(!self.m_insideRenderpass);
-	const BufferImpl& impl = static_cast<const BufferImpl&>(*buff);
+	const BufferImpl& impl = static_cast<const BufferImpl&>(buff.getBuffer());
 	ANKI_ASSERT(impl.usageValid(BufferUsageBit::kTransferDestination));
 
-	ANKI_ASSERT(offset < impl.getSize());
-	ANKI_ASSERT((offset % 4) == 0 && "Should be multiple of 4");
+	ANKI_ASSERT((buff.getOffset() % 4) == 0 && "Should be multiple of 4");
+	ANKI_ASSERT((buff.getRange() % 4) == 0 && "Should be multiple of 4");
 
-	size = (size == kMaxPtrSize) ? (impl.getActualSize() - offset) : size;
-	alignRoundUp(4, size); // Needs to be multiple of 4
-	ANKI_ASSERT(offset + size <= impl.getActualSize());
-	ANKI_ASSERT((size % 4) == 0 && "Should be multiple of 4");
-
-	vkCmdFillBuffer(self.m_handle, impl.getHandle(), offset, size, value);
+	vkCmdFillBuffer(self.m_handle, impl.getHandle(), buff.getOffset(), buff.getRange(), value);
 }
 
-void CommandBuffer::writeOcclusionQueriesResultToBuffer(ConstWeakArray<OcclusionQuery*> queries, PtrSize offset, Buffer* buff)
+void CommandBuffer::writeOcclusionQueriesResultToBuffer(ConstWeakArray<OcclusionQuery*> queries, const BufferView& buff)
 {
+	ANKI_ASSERT(buff.isValid());
+
 	ANKI_VK_SELF(CommandBufferImpl);
 	ANKI_ASSERT(queries.getSize() > 0);
 	self.commandCommon();
 	ANKI_ASSERT(!self.m_insideRenderpass);
 
-	const BufferImpl& impl = static_cast<const BufferImpl&>(*buff);
+	ANKI_ASSERT(sizeof(U32) * queries.getSize() <= buff.getRange());
+	ANKI_ASSERT((buff.getOffset() % 4) == 0);
+
+	const BufferImpl& impl = static_cast<const BufferImpl&>(buff.getBuffer());
 	ANKI_ASSERT(impl.usageValid(BufferUsageBit::kTransferDestination));
 
 	for(U32 i = 0; i < queries.getSize(); ++i)
 	{
 		ANKI_ASSERT(queries[i]);
-		ANKI_ASSERT((offset % 4) == 0);
-		ANKI_ASSERT((offset + sizeof(U32)) <= impl.getSize());
 
 		OcclusionQueryImpl* q = static_cast<OcclusionQueryImpl*>(queries[i]);
 
-		vkCmdCopyQueryPoolResults(self.m_handle, q->m_handle.getQueryPool(), q->m_handle.getQueryIndex(), 1, impl.getHandle(), offset, sizeof(U32),
-								  VK_QUERY_RESULT_PARTIAL_BIT);
+		vkCmdCopyQueryPoolResults(self.m_handle, q->m_handle.getQueryPool(), q->m_handle.getQueryIndex(), 1, impl.getHandle(),
+								  buff.getOffset() * sizeof(U32) * i, sizeof(U32), VK_QUERY_RESULT_PARTIAL_BIT);
 
-		offset += sizeof(U32);
 		self.m_microCmdb->pushObjectRef(q);
 	}
 }
@@ -989,12 +1023,13 @@ void CommandBuffer::copyBufferToBuffer(Buffer* src, Buffer* dst, ConstWeakArray<
 					copies.getSize(), &vkCopies[0]);
 }
 
-void CommandBuffer::buildAccelerationStructure(AccelerationStructure* as, Buffer* scratchBuffer, PtrSize scratchBufferOffset)
+void CommandBuffer::buildAccelerationStructure(AccelerationStructure* as, const BufferView& scratchBuffer)
 {
-	ANKI_VK_SELF(CommandBufferImpl);
-	ANKI_ASSERT(as && scratchBuffer);
-	ANKI_ASSERT(as->getBuildScratchBufferSize() + scratchBufferOffset <= scratchBuffer->getSize());
+	ANKI_ASSERT(scratchBuffer.isValid());
+	ANKI_ASSERT(as);
+	ANKI_ASSERT(as->getBuildScratchBufferSize() <= scratchBuffer.getRange());
 
+	ANKI_VK_SELF(CommandBufferImpl);
 	self.commandCommon();
 
 	// Get objects
@@ -1003,7 +1038,7 @@ void CommandBuffer::buildAccelerationStructure(AccelerationStructure* as, Buffer
 	// Create the build info
 	VkAccelerationStructureBuildGeometryInfoKHR buildInfo;
 	VkAccelerationStructureBuildRangeInfoKHR rangeInfo;
-	asImpl.generateBuildInfo(scratchBuffer->getGpuAddress() + scratchBufferOffset, buildInfo, rangeInfo);
+	asImpl.generateBuildInfo(scratchBuffer.getBuffer().getGpuAddress() + scratchBuffer.getOffset(), buildInfo, rangeInfo);
 
 	// Run the command
 	Array<const VkAccelerationStructureBuildRangeInfoKHR*, 1> pRangeInfos = {&rangeInfo};
@@ -1158,8 +1193,8 @@ void CommandBuffer::setPipelineBarrier(ConstWeakArray<TextureBarrierInfo> textur
 
 	for(const BufferBarrierInfo& barrier : buffers)
 	{
-		ANKI_ASSERT(barrier.m_buffer);
-		const BufferImpl& impl = static_cast<const BufferImpl&>(*barrier.m_buffer);
+		ANKI_ASSERT(barrier.m_bufferView.isValid());
+		const BufferImpl& impl = static_cast<const BufferImpl&>(barrier.m_bufferView.getBuffer());
 
 		const VkBuffer handle = impl.getHandle();
 		VkPipelineStageFlags srcStage;

+ 3 - 4
AnKi/Renderer/AccelerationStructureBuilder.cpp

@@ -43,15 +43,14 @@ void AccelerationStructureBuilder::populateRenderGraph(RenderingContext& ctx)
 	AccelerationStructureInitInfo initInf("Main TLAS");
 	initInf.m_type = AccelerationStructureType::kTopLevel;
 	initInf.m_topLevel.m_indirectArgs.m_maxInstanceCount = GpuSceneArrays::RenderableBoundingVolumeRt::getSingleton().getElementCount();
-	initInf.m_topLevel.m_indirectArgs.m_instancesBuffer = visOut.m_instancesBuffer.m_buffer;
-	initInf.m_topLevel.m_indirectArgs.m_instancesBufferOffset = visOut.m_instancesBuffer.m_offset;
+	initInf.m_topLevel.m_indirectArgs.m_instancesBuffer = visOut.m_instancesBuffer;
 	m_runCtx.m_tlas = GrManager::getSingleton().newAccelerationStructure(initInf);
 
 	// Build the AS
 	{
 		RenderGraphDescription& rgraph = ctx.m_renderGraphDescr;
 
-		const BufferOffsetRange scratchBuff = GpuVisibleTransientMemoryPool::getSingleton().allocate(m_runCtx.m_tlas->getBuildScratchBufferSize());
+		const BufferView scratchBuff = GpuVisibleTransientMemoryPool::getSingleton().allocate(m_runCtx.m_tlas->getBuildScratchBufferSize());
 
 		m_runCtx.m_tlasHandle = rgraph.importAccelerationStructure(m_runCtx.m_tlas.get(), AccelerationStructureUsageBit::kNone);
 
@@ -61,7 +60,7 @@ void AccelerationStructureBuilder::populateRenderGraph(RenderingContext& ctx)
 
 		rpass.setWork([this, scratchBuff](RenderPassWorkContext& rgraphCtx) {
 			ANKI_TRACE_SCOPED_EVENT(ASBuilder);
-			rgraphCtx.m_commandBuffer->buildAccelerationStructure(m_runCtx.m_tlas.get(), scratchBuff.m_buffer, scratchBuff.m_offset);
+			rgraphCtx.m_commandBuffer->buildAccelerationStructure(m_runCtx.m_tlas.get(), scratchBuff);
 		});
 	}
 }

+ 2 - 2
AnKi/Renderer/AccelerationStructureBuilder.h

@@ -28,7 +28,7 @@ public:
 		return m_runCtx.m_tlasHandle;
 	}
 
-	void getVisibilityInfo(BufferHandle& handle, BufferOffsetRange& buffer) const
+	void getVisibilityInfo(BufferHandle& handle, BufferView& buffer) const
 	{
 		handle = m_runCtx.m_visibilityHandle;
 		buffer = m_runCtx.m_visibleRenderableIndicesBuff;
@@ -42,7 +42,7 @@ public:
 		AccelerationStructureHandle m_tlasHandle;
 
 		BufferHandle m_visibilityHandle;
-		BufferOffsetRange m_visibleRenderableIndicesBuff;
+		BufferView m_visibleRenderableIndicesBuff;
 	} m_runCtx;
 };
 /// @}

+ 25 - 24
AnKi/Renderer/ClusterBinning.cpp

@@ -53,17 +53,17 @@ void ClusterBinning::populateRenderGraph(RenderingContext& ctx)
 	{
 		const U32 clusterCount = getRenderer().getTileCounts().x() * getRenderer().getTileCounts().y() + getRenderer().getZSplitCount();
 		m_runCtx.m_clustersBuffer = GpuVisibleTransientMemoryPool::getSingleton().allocate(sizeof(Cluster) * clusterCount);
-		m_runCtx.m_clustersHandle = rgraph.importBuffer(BufferUsageBit::kNone, m_runCtx.m_clustersBuffer);
+		m_runCtx.m_clustersHandle = rgraph.importBuffer(m_runCtx.m_clustersBuffer, BufferUsageBit::kNone);
 	}
 
 	// Setup the indirect dispatches and zero the clusters buffer
-	BufferOffsetRange indirectArgsBuff;
+	BufferView indirectArgsBuff;
 	BufferHandle indirectArgsHandle;
 	{
 		// Allocate memory for the indirect args
 		constexpr U32 dispatchCount = U32(GpuSceneNonRenderableObjectType::kCount) * 2;
 		indirectArgsBuff = GpuVisibleTransientMemoryPool::getSingleton().allocate(sizeof(DispatchIndirectArgs) * dispatchCount);
-		indirectArgsHandle = rgraph.importBuffer(BufferUsageBit::kNone, indirectArgsBuff);
+		indirectArgsHandle = rgraph.importBuffer(indirectArgsBuff, BufferUsageBit::kNone);
 
 		// Create the pass
 		ComputeRenderPassDescription& rpass = rgraph.newComputeRenderPass("Cluster binning setup");
@@ -86,16 +86,16 @@ void ClusterBinning::populateRenderGraph(RenderingContext& ctx)
 
 			for(GpuSceneNonRenderableObjectType type : EnumIterable<GpuSceneNonRenderableObjectType>())
 			{
-				const BufferOffsetRange& buff = getRenderer().getPrimaryNonRenderableVisibility().getVisibleIndicesBuffer(type);
-				cmdb.bindStorageBuffer(0, 0, buff.m_buffer, buff.m_offset, buff.m_range, U32(type));
+				const BufferView& buff = getRenderer().getPrimaryNonRenderableVisibility().getVisibleIndicesBuffer(type);
+				cmdb.bindStorageBuffer(0, 0, buff, U32(type));
 			}
 
-			cmdb.bindStorageBuffer(0, 1, indirectArgsBuff.m_buffer, indirectArgsBuff.m_offset, indirectArgsBuff.m_range);
+			cmdb.bindStorageBuffer(0, 1, indirectArgsBuff);
 
 			cmdb.dispatchCompute(1, 1, 1);
 
 			// Now zero the clusters buffer
-			cmdb.fillBuffer(m_runCtx.m_clustersBuffer.m_buffer, m_runCtx.m_clustersBuffer.m_offset, m_runCtx.m_clustersBuffer.m_range, 0);
+			cmdb.fillBuffer(m_runCtx.m_clustersBuffer, 0);
 		});
 	}
 
@@ -110,13 +110,13 @@ void ClusterBinning::populateRenderGraph(RenderingContext& ctx)
 		rpass.setWork([this, &ctx, indirectArgsBuff](RenderPassWorkContext& rgraphCtx) {
 			CommandBuffer& cmdb = *rgraphCtx.m_commandBuffer;
 
-			PtrSize indirectArgsBuffOffset = indirectArgsBuff.m_offset;
+			PtrSize indirectArgsBuffOffset = indirectArgsBuff.getOffset();
 			for(GpuSceneNonRenderableObjectType type : EnumIterable<GpuSceneNonRenderableObjectType>())
 			{
 				cmdb.bindShaderProgram(m_binningGrProgs[type].get());
 
-				const BufferOffsetRange& idsBuff = getRenderer().getPrimaryNonRenderableVisibility().getVisibleIndicesBuffer(type);
-				cmdb.bindStorageBuffer(0, 0, idsBuff.m_buffer, idsBuff.m_offset, idsBuff.m_range);
+				const BufferView& idsBuff = getRenderer().getPrimaryNonRenderableVisibility().getVisibleIndicesBuffer(type);
+				cmdb.bindStorageBuffer(0, 0, idsBuff);
 
 				PtrSize objBufferOffset = 0;
 				PtrSize objBufferRange = 0;
@@ -149,12 +149,11 @@ void ClusterBinning::populateRenderGraph(RenderingContext& ctx)
 				if(objBufferRange == 0)
 				{
 					objBufferOffset = 0;
-					objBufferRange = kMaxPtrSize;
+					objBufferRange = GpuSceneBuffer::getSingleton().getBufferView().getRange();
 				}
 
-				cmdb.bindStorageBuffer(0, 1, &GpuSceneBuffer::getSingleton().getBuffer(), objBufferOffset, objBufferRange);
-				cmdb.bindStorageBuffer(0, 2, m_runCtx.m_clustersBuffer.m_buffer, m_runCtx.m_clustersBuffer.m_offset,
-									   m_runCtx.m_clustersBuffer.m_range);
+				cmdb.bindStorageBuffer(0, 1, BufferView(&GpuSceneBuffer::getSingleton().getBuffer(), objBufferOffset, objBufferRange));
+				cmdb.bindStorageBuffer(0, 2, m_runCtx.m_clustersBuffer);
 
 				struct ClusterBinningUniforms
 				{
@@ -191,7 +190,8 @@ void ClusterBinning::populateRenderGraph(RenderingContext& ctx)
 
 				cmdb.setPushConstants(&unis, sizeof(unis));
 
-				cmdb.dispatchComputeIndirect(indirectArgsBuff.m_buffer, indirectArgsBuffOffset);
+				cmdb.dispatchComputeIndirect(BufferView(indirectArgsBuff).setOffset(indirectArgsBuffOffset).setRange(sizeof(DispatchIndirectArgs)));
+
 				indirectArgsBuffOffset += sizeof(DispatchIndirectArgs);
 			}
 		});
@@ -204,7 +204,7 @@ void ClusterBinning::populateRenderGraph(RenderingContext& ctx)
 		{
 			m_runCtx.m_packedObjectsBuffers[type] =
 				GpuVisibleTransientMemoryPool::getSingleton().allocate(kClusteredObjectSizes[type] * kMaxVisibleClusteredObjects[type]);
-			m_runCtx.m_packedObjectsHandles[type] = rgraph.importBuffer(BufferUsageBit::kNone, m_runCtx.m_packedObjectsBuffers[type]);
+			m_runCtx.m_packedObjectsHandles[type] = rgraph.importBuffer(m_runCtx.m_packedObjectsBuffers[type], BufferUsageBit::kNone);
 		}
 
 		// Create the pass
@@ -218,7 +218,8 @@ void ClusterBinning::populateRenderGraph(RenderingContext& ctx)
 		rpass.setWork([this, indirectArgsBuff](RenderPassWorkContext& rgraphCtx) {
 			CommandBuffer& cmdb = *rgraphCtx.m_commandBuffer;
 
-			PtrSize indirectArgsBuffOffset = indirectArgsBuff.m_offset + sizeof(DispatchIndirectArgs) * U32(GpuSceneNonRenderableObjectType::kCount);
+			PtrSize indirectArgsBuffOffset =
+				indirectArgsBuff.getOffset() + sizeof(DispatchIndirectArgs) * U32(GpuSceneNonRenderableObjectType::kCount);
 			for(GpuSceneNonRenderableObjectType type : EnumIterable<GpuSceneNonRenderableObjectType>())
 			{
 				cmdb.bindShaderProgram(m_packingGrProgs[type].get());
@@ -254,17 +255,17 @@ void ClusterBinning::populateRenderGraph(RenderingContext& ctx)
 				if(objBufferRange == 0)
 				{
 					objBufferOffset = 0;
-					objBufferRange = kMaxPtrSize;
+					objBufferRange = GpuSceneBuffer::getSingleton().getBufferView().getRange();
 				}
 
-				cmdb.bindStorageBuffer(0, 0, &GpuSceneBuffer::getSingleton().getBuffer(), objBufferOffset, objBufferRange);
-				cmdb.bindStorageBuffer(0, 1, m_runCtx.m_packedObjectsBuffers[type].m_buffer, m_runCtx.m_packedObjectsBuffers[type].m_offset,
-									   m_runCtx.m_packedObjectsBuffers[type].m_range);
+				cmdb.bindStorageBuffer(0, 0, BufferView(&GpuSceneBuffer::getSingleton().getBuffer(), objBufferOffset, objBufferRange));
+				cmdb.bindStorageBuffer(0, 1, m_runCtx.m_packedObjectsBuffers[type]);
+
+				const BufferView& idsBuff = getRenderer().getPrimaryNonRenderableVisibility().getVisibleIndicesBuffer(type);
+				cmdb.bindStorageBuffer(0, 2, idsBuff);
 
-				const BufferOffsetRange& idsBuff = getRenderer().getPrimaryNonRenderableVisibility().getVisibleIndicesBuffer(type);
-				cmdb.bindStorageBuffer(0, 2, idsBuff.m_buffer, idsBuff.m_offset, idsBuff.m_range);
+				cmdb.dispatchComputeIndirect(BufferView(indirectArgsBuff).setOffset(indirectArgsBuffOffset).setRange(sizeof(DispatchIndirectArgs)));
 
-				cmdb.dispatchComputeIndirect(indirectArgsBuff.m_buffer, indirectArgsBuffOffset);
 				indirectArgsBuffOffset += sizeof(DispatchIndirectArgs);
 			}
 		});

+ 4 - 4
AnKi/Renderer/ClusterBinning.h

@@ -25,7 +25,7 @@ public:
 	/// Populate the rendergraph.
 	void populateRenderGraph(RenderingContext& ctx);
 
-	const BufferOffsetRange& getPackedObjectsBuffer(GpuSceneNonRenderableObjectType type) const
+	const BufferView& getPackedObjectsBuffer(GpuSceneNonRenderableObjectType type) const
 	{
 		return m_runCtx.m_packedObjectsBuffers[type];
 	}
@@ -35,7 +35,7 @@ public:
 		return m_runCtx.m_packedObjectsHandles[type];
 	}
 
-	const BufferOffsetRange& getClustersBuffer() const
+	const BufferView& getClustersBuffer() const
 	{
 		return m_runCtx.m_clustersBuffer;
 	}
@@ -59,10 +59,10 @@ private:
 	{
 	public:
 		BufferHandle m_clustersHandle;
-		BufferOffsetRange m_clustersBuffer;
+		BufferView m_clustersBuffer;
 
 		Array<BufferHandle, U32(GpuSceneNonRenderableObjectType::kCount)> m_packedObjectsHandles;
-		Array<BufferOffsetRange, U32(GpuSceneNonRenderableObjectType::kCount)> m_packedObjectsBuffers;
+		Array<BufferView, U32(GpuSceneNonRenderableObjectType::kCount)> m_packedObjectsBuffers;
 
 		RenderingContext* m_rctx = nullptr;
 	} m_runCtx;

+ 1 - 1
AnKi/Renderer/Common.h

@@ -87,7 +87,7 @@ public:
 
 	Array<Mat4, kMaxShadowCascades> m_dirLightTextureMatrices;
 
-	BufferOffsetRange m_globalRenderingUniformsBuffer;
+	BufferView m_globalRenderingUniformsBuffer;
 
 	RenderingContext(StackMemoryPool* pool)
 		: m_renderGraphDescr(pool)

+ 7 - 7
AnKi/Renderer/Dbg.cpp

@@ -183,13 +183,13 @@ void Dbg::run(RenderPassWorkContext& rgraphCtx, const RenderingContext& ctx)
 		unis.m_viewProjMat = ctx.m_matrices.m_viewProjection;
 
 		cmdb.setPushConstants(&unis, sizeof(unis));
-		cmdb.bindVertexBuffer(0, m_cubeVertsBuffer.get(), 0, sizeof(Vec3));
+		cmdb.bindVertexBuffer(0, BufferView(m_cubeVertsBuffer.get()), sizeof(Vec3));
 		cmdb.setVertexAttribute(VertexAttribute::kPosition, 0, Format::kR32G32B32_Sfloat, 0);
-		cmdb.bindIndexBuffer(m_cubeIndicesBuffer.get(), 0, IndexType::kU16);
+		cmdb.bindIndexBuffer(BufferView(m_cubeIndicesBuffer.get()), IndexType::kU16);
 
-		cmdb.bindStorageBuffer(0, 2, GpuSceneArrays::RenderableBoundingVolumeGBuffer::getSingleton().getBufferOffsetRange());
+		cmdb.bindStorageBuffer(0, 2, GpuSceneArrays::RenderableBoundingVolumeGBuffer::getSingleton().getBufferView());
 
-		BufferOffsetRange indicesBuff;
+		BufferView indicesBuff;
 		BufferHandle dep;
 		getRenderer().getGBuffer().getVisibleAabbsBuffer(indicesBuff, dep);
 		cmdb.bindStorageBuffer(0, 3, indicesBuff);
@@ -203,9 +203,9 @@ void Dbg::run(RenderPassWorkContext& rgraphCtx, const RenderingContext& ctx)
 
 		if(allAabbCount)
 		{
-			cmdb.bindStorageBuffer(0, 2, GpuSceneArrays::RenderableBoundingVolumeForward::getSingleton().getBufferOffsetRange());
+			cmdb.bindStorageBuffer(0, 2, GpuSceneArrays::RenderableBoundingVolumeForward::getSingleton().getBufferView());
 
-			BufferOffsetRange indicesBuff;
+			BufferView indicesBuff;
 			BufferHandle dep;
 			getRenderer().getForwardShading().getVisibleAabbsBuffer(indicesBuff, dep);
 			cmdb.bindStorageBuffer(0, 3, indicesBuff);
@@ -257,7 +257,7 @@ void Dbg::populateRenderGraph(RenderingContext& ctx)
 	pass.newTextureDependency(m_runCtx.m_rt, TextureUsageBit::kFramebufferWrite);
 	pass.newTextureDependency(getRenderer().getGBuffer().getDepthRt(), TextureUsageBit::kSampledFragment | TextureUsageBit::kFramebufferRead);
 
-	BufferOffsetRange indicesBuff;
+	BufferView indicesBuff;
 	BufferHandle dep;
 	getRenderer().getGBuffer().getVisibleAabbsBuffer(indicesBuff, dep);
 	pass.newBufferDependency(dep, BufferUsageBit::kStorageGeometryRead);

+ 2 - 2
AnKi/Renderer/DepthDownscale.cpp

@@ -68,7 +68,7 @@ Error DepthDownscale::initInternal()
 		cmdbInit.m_flags |= CommandBufferFlag::kSmallBatch;
 		CommandBufferPtr cmdb = GrManager::getSingleton().newCommandBuffer(cmdbInit);
 
-		cmdb->fillBuffer(m_counterBuffer.get(), 0, kMaxPtrSize, 0);
+		cmdb->fillBuffer(BufferView(m_counterBuffer.get()), 0);
 
 		FencePtr fence;
 		cmdb->endRecording();
@@ -147,7 +147,7 @@ void DepthDownscale::populateRenderGraph(RenderingContext& ctx)
 				rgraphCtx.bindStorageTexture(0, 0, m_runCtx.m_rt, subresource, mip);
 			}
 
-			cmdb.bindStorageBuffer(0, 1, m_counterBuffer.get(), 0, sizeof(U32));
+			cmdb.bindStorageBuffer(0, 1, BufferView(m_counterBuffer.get(), 0, sizeof(U32)));
 
 			cmdb.bindSampler(0, 2, getRenderer().getSamplers().m_trilinearClamp.get());
 			rgraphCtx.bindTexture(0, 3, getRenderer().getGBuffer().getDepthRt(), TextureSubresourceInfo(DepthStencilAspectBit::kDepth));

+ 2 - 2
AnKi/Renderer/ForwardShading.h

@@ -33,11 +33,11 @@ public:
 	void run(const RenderingContext& ctx, RenderPassWorkContext& rgraphCtx);
 
 	/// Returns a buffer with indices of the visible AABBs. Used in debug drawing.
-	void getVisibleAabbsBuffer(BufferOffsetRange& visibleAaabbIndicesBuffer, BufferHandle& dep) const
+	void getVisibleAabbsBuffer(BufferView& visibleAaabbIndicesBuffer, BufferHandle& dep) const
 	{
 		visibleAaabbIndicesBuffer = m_runCtx.m_visOut.m_visibleAaabbIndicesBuffer;
 		dep = m_runCtx.m_visOut.m_dependency;
-		ANKI_ASSERT(visibleAaabbIndicesBuffer.m_buffer != nullptr && dep.isValid());
+		ANKI_ASSERT(visibleAaabbIndicesBuffer.isValid() && dep.isValid());
 	}
 
 private:

+ 3 - 3
AnKi/Renderer/GBuffer.h

@@ -60,11 +60,11 @@ public:
 							  [[maybe_unused]] ShaderProgramPtr& optionalShaderProgram) const override;
 
 	/// Returns a buffer with indices of the visible AABBs. Used in debug drawing.
-	void getVisibleAabbsBuffer(BufferOffsetRange& visibleAaabbIndicesBuffer, BufferHandle& dep) const
+	void getVisibleAabbsBuffer(BufferView& visibleAaabbIndicesBuffer, BufferHandle& dep) const
 	{
 		visibleAaabbIndicesBuffer = m_runCtx.m_visibleAaabbIndicesBuffer;
 		dep = m_runCtx.m_visibleAaabbIndicesBufferDepedency;
-		ANKI_ASSERT(visibleAaabbIndicesBuffer.m_buffer != nullptr && dep.isValid());
+		ANKI_ASSERT(visibleAaabbIndicesBuffer.isValid() && dep.isValid());
 	}
 
 private:
@@ -83,7 +83,7 @@ private:
 		RenderTargetHandle m_prevFrameDepthRt;
 		RenderTargetHandle m_hzbRt;
 
-		BufferOffsetRange m_visibleAaabbIndicesBuffer; ///< Optional
+		BufferView m_visibleAaabbIndicesBuffer; ///< Optional
 		BufferHandle m_visibleAaabbIndicesBufferDepedency;
 	} m_runCtx;
 

+ 2 - 3
AnKi/Renderer/LensFlare.cpp

@@ -55,7 +55,7 @@ void LensFlare::populateRenderGraph(RenderingContext& ctx)
 
 	// Create indirect buffer
 	m_runCtx.m_indirectBuff = GpuVisibleTransientMemoryPool::getSingleton().allocate(sizeof(DrawIndirectArgs) * flareCount);
-	m_runCtx.m_indirectBuffHandle = rgraph.importBuffer(BufferUsageBit::kNone, m_runCtx.m_indirectBuff);
+	m_runCtx.m_indirectBuffHandle = rgraph.importBuffer(m_runCtx.m_indirectBuff, BufferUsageBit::kNone);
 
 	// Create the pass
 	ComputeRenderPassDescription& rpass = rgraph.newComputeRenderPass("Lens flare indirect");
@@ -141,8 +141,7 @@ void LensFlare::runDrawFlares(const RenderingContext& ctx, CommandBuffer& cmdb)
 		cmdb.bindSampler(0, 1, getRenderer().getSamplers().m_trilinearRepeat.get());
 		cmdb.bindTexture(0, 2, &comp.getImage().getTextureView());
 
-		cmdb.drawIndirect(PrimitiveTopology::kTriangleStrip, 1, count * sizeof(DrawIndirectArgs) + m_runCtx.m_indirectBuff.m_offset,
-						  m_runCtx.m_indirectBuff.m_buffer);
+		cmdb.drawIndirect(PrimitiveTopology::kTriangleStrip, BufferView(m_runCtx.m_indirectBuff).incrementOffset(count * sizeof(DrawIndirectArgs)));
 
 		++count;
 	}

+ 1 - 1
AnKi/Renderer/LensFlare.h

@@ -43,7 +43,7 @@ private:
 	class
 	{
 	public:
-		BufferOffsetRange m_indirectBuff;
+		BufferView m_indirectBuff;
 		BufferHandle m_indirectBuffHandle;
 	} m_runCtx;
 

+ 2 - 4
AnKi/Renderer/PrimaryNonRenderableVisibility.cpp

@@ -85,7 +85,7 @@ void PrimaryNonRenderableVisibility::populateRenderGraph(RenderingContext& ctx)
 			memset(mem, 0, sizeof(U32));
 
 			m_runCtx.m_visibleIndicesBuffers[type] = alloc;
-			m_runCtx.m_visibleIndicesHandles[type] = rgraph.importBuffer(BufferUsageBit::kNone, m_runCtx.m_visibleIndicesBuffers[type]);
+			m_runCtx.m_visibleIndicesHandles[type] = rgraph.importBuffer(m_runCtx.m_visibleIndicesBuffers[type], BufferUsageBit::kNone);
 		}
 		else
 		{
@@ -134,9 +134,7 @@ void PrimaryNonRenderableVisibility::populateRenderGraph(RenderingContext& ctx)
 				}
 
 				// Allocate feedback buffer for this frame
-				in.m_cpuFeedbackBuffer.m_range = (objCount * 2 + 1) * sizeof(U32);
-				getRenderer().getReadbackManager().allocateData(m_readbacks[feedbackType], in.m_cpuFeedbackBuffer.m_range,
-																in.m_cpuFeedbackBuffer.m_buffer, in.m_cpuFeedbackBuffer.m_offset);
+				getRenderer().getReadbackManager().allocateData(m_readbacks[feedbackType], (objCount * 2 + 1) * sizeof(U32), in.m_cpuFeedbackBuffer);
 			}
 
 			GpuVisibilityNonRenderablesOutput out;

+ 2 - 2
AnKi/Renderer/PrimaryNonRenderableVisibility.h

@@ -46,7 +46,7 @@ public:
 		return m_runCtx.m_visibleIndicesHandles[type];
 	}
 
-	const BufferOffsetRange& getVisibleIndicesBuffer(GpuSceneNonRenderableObjectType type) const
+	const BufferView& getVisibleIndicesBuffer(GpuSceneNonRenderableObjectType type) const
 	{
 		return m_runCtx.m_visibleIndicesBuffers[type];
 	}
@@ -58,7 +58,7 @@ private:
 	{
 	public:
 		Array<BufferHandle, U32(GpuSceneNonRenderableObjectType::kCount)> m_visibleIndicesHandles;
-		Array<BufferOffsetRange, U32(GpuSceneNonRenderableObjectType::kCount)> m_visibleIndicesBuffers;
+		Array<BufferView, U32(GpuSceneNonRenderableObjectType::kCount)> m_visibleIndicesBuffers;
 
 		/// Feedback from the GPU
 		InterestingVisibleComponents m_interestingComponents;

+ 3 - 3
AnKi/Renderer/ProbeReflections.cpp

@@ -181,7 +181,7 @@ void ProbeReflections::populateRenderGraph(RenderingContext& rctx)
 	// Create render targets now to save memory
 	const RenderTargetHandle probeTexture = rgraph.importRenderTarget(&probeToRefresh->getReflectionTexture(), TextureUsageBit::kNone);
 	m_runCtx.m_probeTex = probeTexture;
-	const BufferHandle irradianceDiceValuesBuffHandle = rgraph.importBuffer(m_irradiance.m_diceValuesBuff.get(), BufferUsageBit::kNone);
+	const BufferHandle irradianceDiceValuesBuffHandle = rgraph.importBuffer(BufferView(m_irradiance.m_diceValuesBuff.get()), BufferUsageBit::kNone);
 	const RenderTargetHandle gbufferDepthRt = rgraph.newRenderTarget(m_gbuffer.m_depthRtDescr);
 	const RenderTargetHandle shadowMapRt = (doShadows) ? rgraph.newRenderTarget(m_shadowMapping.m_rtDescr) : RenderTargetHandle();
 
@@ -465,7 +465,7 @@ void ProbeReflections::populateRenderGraph(RenderingContext& rctx)
 
 			rgraphCtx.bindColorTexture(0, 1, probeTexture);
 
-			cmdb.bindStorageBuffer(0, 3, m_irradiance.m_diceValuesBuff.get(), 0, m_irradiance.m_diceValuesBuff->getSize());
+			cmdb.bindStorageBuffer(0, 3, BufferView(m_irradiance.m_diceValuesBuff.get()));
 
 			cmdb.dispatchCompute(1, 1, 1);
 		});
@@ -499,7 +499,7 @@ void ProbeReflections::populateRenderGraph(RenderingContext& rctx)
 				rgraphCtx.bindColorTexture(0, 1, gbufferColorRts[i], i);
 			}
 
-			cmdb.bindStorageBuffer(0, 2, m_irradiance.m_diceValuesBuff.get(), 0, m_irradiance.m_diceValuesBuff->getSize());
+			cmdb.bindStorageBuffer(0, 2, BufferView(m_irradiance.m_diceValuesBuff.get()));
 
 			for(U8 f = 0; f < 6; ++f)
 			{

+ 1 - 1
AnKi/Renderer/Renderer.cpp

@@ -688,7 +688,7 @@ void Renderer::gpuSceneCopy(RenderingContext& ctx)
 	RenderGraphDescription& rgraph = ctx.m_renderGraphDescr;
 
 	m_runCtx.m_gpuSceneHandle =
-		rgraph.importBuffer(&GpuSceneBuffer::getSingleton().getBuffer(), GpuSceneBuffer::getSingleton().getBuffer().getBufferUsage());
+		rgraph.importBuffer(GpuSceneBuffer::getSingleton().getBufferView(), GpuSceneBuffer::getSingleton().getBuffer().getBufferUsage());
 
 	if(GpuSceneMicroPatcher::getSingleton().patchingIsNeeded())
 	{

+ 1 - 1
AnKi/Renderer/RendererObject.cpp

@@ -101,7 +101,7 @@ void RendererObject::zeroBuffer(Buffer* buff)
 	cmdbInit.m_flags |= CommandBufferFlag::kSmallBatch;
 	CommandBufferPtr cmdb = GrManager::getSingleton().newCommandBuffer(cmdbInit);
 
-	cmdb->fillBuffer(buff, 0, kMaxPtrSize, 0);
+	cmdb->fillBuffer(BufferView(buff), 0);
 
 	FencePtr fence;
 	cmdb->endRecording();

+ 17 - 16
AnKi/Renderer/RtShadows.cpp

@@ -209,10 +209,10 @@ void RtShadows::populateRenderGraph(RenderingContext& ctx)
 
 	// Setup build SBT dispatch
 	BufferHandle sbtBuildIndirectArgsHandle;
-	BufferOffsetRange sbtBuildIndirectArgsBuffer;
+	BufferView sbtBuildIndirectArgsBuffer;
 	{
 		sbtBuildIndirectArgsBuffer = GpuVisibleTransientMemoryPool::getSingleton().allocate(sizeof(DispatchIndirectArgs));
-		sbtBuildIndirectArgsHandle = rgraph.importBuffer(BufferUsageBit::kStorageComputeWrite, sbtBuildIndirectArgsBuffer);
+		sbtBuildIndirectArgsHandle = rgraph.importBuffer(sbtBuildIndirectArgsBuffer, BufferUsageBit::kStorageComputeWrite);
 
 		ComputeRenderPassDescription& rpass = rgraph.newComputeRenderPass("RtShadows setup build SBT");
 
@@ -224,7 +224,7 @@ void RtShadows::populateRenderGraph(RenderingContext& ctx)
 
 			cmdb.bindShaderProgram(m_setupBuildSbtGrProg.get());
 
-			cmdb.bindStorageBuffer(0, 0, GpuSceneArrays::RenderableBoundingVolumeRt::getSingleton().getBufferOffsetRange());
+			cmdb.bindStorageBuffer(0, 0, GpuSceneArrays::RenderableBoundingVolumeRt::getSingleton().getBufferView());
 			cmdb.bindStorageBuffer(0, 1, sbtBuildIndirectArgsBuffer);
 
 			cmdb.dispatchCompute(1, 1, 1);
@@ -233,13 +233,13 @@ void RtShadows::populateRenderGraph(RenderingContext& ctx)
 
 	// Build the SBT
 	BufferHandle sbtHandle;
-	BufferOffsetRange sbtBuffer;
+	BufferView sbtBuffer;
 	{
 		// Allocate SBT
 		U8* sbtMem;
 		sbtBuffer = RebarTransientMemoryPool::getSingleton().allocateFrame(
 			(GpuSceneArrays::RenderableBoundingVolumeRt::getSingleton().getElementCount() + 2) * m_sbtRecordSize, sbtMem);
-		sbtHandle = rgraph.importBuffer(BufferUsageBit::kStorageComputeWrite, sbtBuffer);
+		sbtHandle = rgraph.importBuffer(sbtBuffer, BufferUsageBit::kStorageComputeWrite);
 
 		// Write the first 2 entries of the SBT
 		ConstWeakArray<U8> shaderGroupHandles = m_rtLibraryGrProg->getShaderGroupHandles();
@@ -251,7 +251,7 @@ void RtShadows::populateRenderGraph(RenderingContext& ctx)
 		ComputeRenderPassDescription& rpass = rgraph.newComputeRenderPass("RtShadows build SBT");
 
 		BufferHandle visibilityHandle;
-		BufferOffsetRange visibleRenderableIndicesBuff;
+		BufferView visibleRenderableIndicesBuff;
 		getRenderer().getAccelerationStructureBuilder().getVisibilityInfo(visibilityHandle, visibleRenderableIndicesBuff);
 
 		rpass.newBufferDependency(visibilityHandle, BufferUsageBit::kStorageComputeRead);
@@ -263,10 +263,10 @@ void RtShadows::populateRenderGraph(RenderingContext& ctx)
 
 			cmdb.bindShaderProgram(m_buildSbtGrProg.get());
 
-			cmdb.bindStorageBuffer(0, 0, GpuSceneArrays::Renderable::getSingleton().getBufferOffsetRange());
-			cmdb.bindStorageBuffer(0, 1, &GpuSceneBuffer::getSingleton().getBuffer(), 0, kMaxPtrSize);
+			cmdb.bindStorageBuffer(0, 0, GpuSceneArrays::Renderable::getSingleton().getBufferView());
+			cmdb.bindStorageBuffer(0, 1, BufferView(&GpuSceneBuffer::getSingleton().getBuffer()));
 			cmdb.bindStorageBuffer(0, 2, visibleRenderableIndicesBuff);
-			cmdb.bindStorageBuffer(0, 3, &m_rtLibraryGrProg->getShaderGroupHandlesGpuBuffer(), 0, kMaxPtrSize);
+			cmdb.bindStorageBuffer(0, 3, BufferView(&m_rtLibraryGrProg->getShaderGroupHandlesGpuBuffer()));
 			cmdb.bindStorageBuffer(0, 4, sbtBuffer);
 
 			RtShadowsSbtBuildUniforms unis = {};
@@ -277,7 +277,7 @@ void RtShadows::populateRenderGraph(RenderingContext& ctx)
 			unis.m_shaderHandleDwordSize = shaderHandleSize / 4;
 			cmdb.setPushConstants(&unis, sizeof(unis));
 
-			cmdb.dispatchComputeIndirect(sbtBuildIndirectArgsBuffer.m_buffer, sbtBuildIndirectArgsBuffer.m_offset);
+			cmdb.dispatchComputeIndirect(sbtBuildIndirectArgsBuffer);
 		});
 	}
 
@@ -318,12 +318,14 @@ void RtShadows::populateRenderGraph(RenderingContext& ctx)
 			cmdb.bindAllBindless(U32(MaterialSet::kBindless));
 			cmdb.bindSampler(U32(MaterialSet::kGlobal), U32(MaterialBinding::kTrilinearRepeatSampler),
 							 getRenderer().getSamplers().m_trilinearRepeat.get());
-			cmdb.bindStorageBuffer(U32(MaterialSet::kGlobal), U32(MaterialBinding::kGpuScene), &GpuSceneBuffer::getSingleton().getBuffer(), 0,
-								   kMaxPtrSize);
+			cmdb.bindStorageBuffer(U32(MaterialSet::kGlobal), U32(MaterialBinding::kGpuScene), GpuSceneBuffer::getSingleton().getBufferView());
 
 #define ANKI_UNIFIED_GEOM_FORMAT(fmt, shaderType) \
-	cmdb.bindReadOnlyTexelBuffer(U32(MaterialSet::kGlobal), U32(MaterialBinding::kUnifiedGeometry_##fmt), \
-								 &UnifiedGeometryBuffer::getSingleton().getBuffer(), 0, kMaxPtrSize, Format::k##fmt);
+	cmdb.bindReadOnlyTexelBuffer( \
+		U32(MaterialSet::kGlobal), U32(MaterialBinding::kUnifiedGeometry_##fmt), \
+		BufferView(&UnifiedGeometryBuffer::getSingleton().getBuffer(), 0, \
+				   getAlignedRoundDown(getFormatInfo(Format::k##fmt).m_texelSize, UnifiedGeometryBuffer::getSingleton().getBuffer().getSize())), \
+		Format::k##fmt);
 #include <AnKi/Shaders/Include/UnifiedGeometryTypes.def.h>
 
 			constexpr U32 kSet = 2;
@@ -345,8 +347,7 @@ void RtShadows::populateRenderGraph(RenderingContext& ctx)
 			rgraphCtx.bindStorageTexture(kSet, 12, m_runCtx.m_currentMomentsRt);
 			cmdb.bindTexture(kSet, 13, &m_blueNoiseImage->getTextureView());
 
-			cmdb.traceRays(sbtBuffer.m_buffer, sbtBuffer.m_offset, m_sbtRecordSize,
-						   GpuSceneArrays::RenderableBoundingVolumeRt::getSingleton().getElementCount(), 1,
+			cmdb.traceRays(sbtBuffer, m_sbtRecordSize, GpuSceneArrays::RenderableBoundingVolumeRt::getSingleton().getElementCount(), 1,
 						   getRenderer().getInternalResolution().x() / 2, getRenderer().getInternalResolution().y() / 2, 1);
 		});
 	}

+ 11 - 12
AnKi/Renderer/ShadowMapping.cpp

@@ -365,7 +365,7 @@ void ShadowMapping::processLights(RenderingContext& ctx)
 
 			// Vet visibility
 			const Bool renderAllways = !(result & TileAllocatorResult2::kTileCached);
-			BufferOffsetRange clearTileIndirectArgs;
+			BufferView clearTileIndirectArgs;
 			if(!renderAllways)
 			{
 				clearTileIndirectArgs = createVetVisibilityPass(generateTempPassName("Shadows: Vet point light", lightIdx), *lightc, visOut, rgraph);
@@ -455,7 +455,7 @@ void ShadowMapping::processLights(RenderingContext& ctx)
 
 			// Vet visibility
 			const Bool renderAllways = !(result & TileAllocatorResult2::kTileCached);
-			BufferOffsetRange clearTileIndirectArgs;
+			BufferView clearTileIndirectArgs;
 			if(!renderAllways)
 			{
 				clearTileIndirectArgs = createVetVisibilityPass(generateTempPassName("Shadows: Vet spot light", lightIdx), *lightc, visOut, rgraph);
@@ -573,10 +573,10 @@ void ShadowMapping::processLights(RenderingContext& ctx)
 	}
 }
 
-BufferOffsetRange ShadowMapping::createVetVisibilityPass(CString passName, const LightComponent& lightc, const GpuVisibilityOutput& visOut,
-														 RenderGraphDescription& rgraph) const
+BufferView ShadowMapping::createVetVisibilityPass(CString passName, const LightComponent& lightc, const GpuVisibilityOutput& visOut,
+												  RenderGraphDescription& rgraph) const
 {
-	BufferOffsetRange clearTileIndirectArgs;
+	BufferView clearTileIndirectArgs;
 
 	clearTileIndirectArgs = GpuVisibleTransientMemoryPool::getSingleton().allocate(sizeof(DrawIndirectArgs));
 
@@ -596,8 +596,8 @@ BufferOffsetRange ShadowMapping::createVetVisibilityPass(CString passName, const
 
 		cmdb.bindStorageBuffer(0, 0, hashBuff);
 		cmdb.bindStorageBuffer(0, 1, mdiBuff);
-		cmdb.bindStorageBuffer(0, 2, GpuSceneArrays::Light::getSingleton().getBufferOffsetRange());
-		cmdb.bindStorageBuffer(0, 3, GpuSceneArrays::LightVisibleRenderablesHash::getSingleton().getBufferOffsetRange());
+		cmdb.bindStorageBuffer(0, 2, GpuSceneArrays::Light::getSingleton().getBufferView());
+		cmdb.bindStorageBuffer(0, 3, GpuSceneArrays::LightVisibleRenderablesHash::getSingleton().getBufferView());
 		cmdb.bindStorageBuffer(0, 4, clearTileIndirectArgs);
 		cmdb.bindStorageBuffer(0, 5, taskShadersIndirectArgs);
 
@@ -609,7 +609,7 @@ BufferOffsetRange ShadowMapping::createVetVisibilityPass(CString passName, const
 }
 
 void ShadowMapping::createDrawShadowsPass(const UVec4& viewport, const Mat4& viewProjMat, const Mat3x4& viewMat, const GpuVisibilityOutput& visOut,
-										  const GpuMeshletVisibilityOutput& meshletVisOut, const BufferOffsetRange& clearTileIndirectArgs,
+										  const GpuMeshletVisibilityOutput& meshletVisOut, const BufferView& clearTileIndirectArgs,
 										  const RenderTargetHandle hzbRt, CString passName, RenderGraphDescription& rgraph)
 {
 	ShadowSubpassInfo spass;
@@ -652,7 +652,7 @@ void ShadowMapping::createDrawShadowsPass(ConstWeakArray<ShadowSubpassInfo> subp
 	// Create the pass
 	GraphicsRenderPassDescription& pass = rgraph.newGraphicsRenderPass(passName);
 
-	const Bool loadFb = !(subpasses.getSize() == 1 && subpasses[0].m_clearTileIndirectArgs.m_buffer == nullptr);
+	const Bool loadFb = !(subpasses.getSize() == 1 && subpasses[0].m_clearTileIndirectArgs.isValid());
 
 	RenderTargetInfo smRti(m_runCtx.m_rt);
 	smRti.m_loadOperation = (loadFb) ? RenderTargetLoadOperation::kLoad : RenderTargetLoadOperation::kClear;
@@ -679,11 +679,10 @@ void ShadowMapping::createDrawShadowsPass(ConstWeakArray<ShadowSubpassInfo> subp
 				cmdb.bindShaderProgram(m_clearDepthGrProg.get());
 				cmdb.setDepthCompareOperation(CompareOperation::kAlways);
 
-				if(spass.m_clearTileIndirectArgs.m_buffer)
+				if(spass.m_clearTileIndirectArgs.isValid())
 				{
 
-					cmdb.drawIndirect(PrimitiveTopology::kTriangles, 1, spass.m_clearTileIndirectArgs.m_offset,
-									  spass.m_clearTileIndirectArgs.m_buffer);
+					cmdb.drawIndirect(PrimitiveTopology::kTriangles, spass.m_clearTileIndirectArgs);
 				}
 				else
 				{

+ 4 - 4
AnKi/Renderer/ShadowMapping.h

@@ -41,7 +41,7 @@ private:
 		UVec4 m_viewport;
 		Mat4 m_viewProjMat;
 		Mat3x4 m_viewMat;
-		BufferOffsetRange m_clearTileIndirectArgs;
+		BufferView m_clearTileIndirectArgs;
 		RenderTargetHandle m_hzbRt;
 	};
 
@@ -80,11 +80,11 @@ private:
 
 	void chooseDetail(const Vec3& cameraOrigin, const LightComponent& lightc, Vec2 lodDistances, U32& tileAllocatorHierarchy) const;
 
-	BufferOffsetRange createVetVisibilityPass(CString passName, const LightComponent& lightc, const GpuVisibilityOutput& visOut,
-											  RenderGraphDescription& rgraph) const;
+	BufferView createVetVisibilityPass(CString passName, const LightComponent& lightc, const GpuVisibilityOutput& visOut,
+									   RenderGraphDescription& rgraph) const;
 
 	void createDrawShadowsPass(const UVec4& viewport, const Mat4& viewProjMat, const Mat3x4& viewMat, const GpuVisibilityOutput& visOut,
-							   const GpuMeshletVisibilityOutput& meshletVisOut, const BufferOffsetRange& clearTileIndirectArgs,
+							   const GpuMeshletVisibilityOutput& meshletVisOut, const BufferView& clearTileIndirectArgs,
 							   const RenderTargetHandle hzbRt, CString passName, RenderGraphDescription& rgraph);
 
 	void createDrawShadowsPass(ConstWeakArray<ShadowSubpassInfo> subPasses, const GpuVisibilityOutput& visOut,

+ 63 - 57
AnKi/Renderer/Utils/Drawer.cpp

@@ -52,33 +52,33 @@ void RenderableDrawer::setState(const RenderableDrawerArguments& args, CommandBu
 	// More globals
 	cmdb.bindAllBindless(U32(MaterialSet::kBindless));
 	cmdb.bindSampler(U32(MaterialSet::kGlobal), U32(MaterialBinding::kTrilinearRepeatSampler), args.m_sampler);
-	cmdb.bindStorageBuffer(U32(MaterialSet::kGlobal), U32(MaterialBinding::kGpuScene), &GpuSceneBuffer::getSingleton().getBuffer(), 0, kMaxPtrSize);
+	cmdb.bindStorageBuffer(U32(MaterialSet::kGlobal), U32(MaterialBinding::kGpuScene), GpuSceneBuffer::getSingleton().getBufferView());
 
 #define ANKI_UNIFIED_GEOM_FORMAT(fmt, shaderType) \
-	cmdb.bindReadOnlyTexelBuffer(U32(MaterialSet::kGlobal), U32(MaterialBinding::kUnifiedGeometry_##fmt), \
-								 &UnifiedGeometryBuffer::getSingleton().getBuffer(), 0, kMaxPtrSize, Format::k##fmt);
+	cmdb.bindReadOnlyTexelBuffer( \
+		U32(MaterialSet::kGlobal), U32(MaterialBinding::kUnifiedGeometry_##fmt), \
+		BufferView(&UnifiedGeometryBuffer::getSingleton().getBuffer(), 0, \
+				   getAlignedRoundDown(getFormatInfo(Format::k##fmt).m_texelSize, UnifiedGeometryBuffer::getSingleton().getBuffer().getSize())), \
+		Format::k##fmt);
 #include <AnKi/Shaders/Include/UnifiedGeometryTypes.def.h>
 
 	cmdb.bindStorageBuffer(U32(MaterialSet::kGlobal), U32(MaterialBinding::kMeshletBoundingVolumes),
-						   UnifiedGeometryBuffer::getSingleton().getBufferOffsetRange());
+						   UnifiedGeometryBuffer::getSingleton().getBufferView());
 	cmdb.bindStorageBuffer(U32(MaterialSet::kGlobal), U32(MaterialBinding::kMeshletGeometryDescriptors),
-						   UnifiedGeometryBuffer::getSingleton().getBufferOffsetRange());
-	if(args.m_mesh.m_meshletGroupInstancesBuffer.m_range)
+						   UnifiedGeometryBuffer::getSingleton().getBufferView());
+	if(args.m_mesh.m_meshletGroupInstancesBuffer.isValid())
 	{
 		cmdb.bindStorageBuffer(U32(MaterialSet::kGlobal), U32(MaterialBinding::kMeshletGroups), args.m_mesh.m_meshletGroupInstancesBuffer);
 	}
-	cmdb.bindStorageBuffer(U32(MaterialSet::kGlobal), U32(MaterialBinding::kRenderables),
-						   GpuSceneArrays::Renderable::getSingleton().getBufferOffsetRange());
-	cmdb.bindStorageBuffer(U32(MaterialSet::kGlobal), U32(MaterialBinding::kMeshLods),
-						   GpuSceneArrays::MeshLod::getSingleton().getBufferOffsetRange());
-	cmdb.bindStorageBuffer(U32(MaterialSet::kGlobal), U32(MaterialBinding::kTransforms),
-						   GpuSceneArrays::Transform::getSingleton().getBufferOffsetRange());
+	cmdb.bindStorageBuffer(U32(MaterialSet::kGlobal), U32(MaterialBinding::kRenderables), GpuSceneArrays::Renderable::getSingleton().getBufferView());
+	cmdb.bindStorageBuffer(U32(MaterialSet::kGlobal), U32(MaterialBinding::kMeshLods), GpuSceneArrays::MeshLod::getSingleton().getBufferView());
+	cmdb.bindStorageBuffer(U32(MaterialSet::kGlobal), U32(MaterialBinding::kTransforms), GpuSceneArrays::Transform::getSingleton().getBufferView());
 	cmdb.bindTexture(U32(MaterialSet::kGlobal), U32(MaterialBinding::kHzbTexture),
 					 (args.m_hzbTexture) ? args.m_hzbTexture : &getRenderer().getDummyTextureView2d());
 	cmdb.bindSampler(U32(MaterialSet::kGlobal), U32(MaterialBinding::kNearestClampSampler), getRenderer().getSamplers().m_nearestNearestClamp.get());
 
 	// Misc
-	cmdb.bindIndexBuffer(&UnifiedGeometryBuffer::getSingleton().getBuffer(), 0, IndexType::kU16);
+	cmdb.bindIndexBuffer(UnifiedGeometryBuffer::getSingleton().getBufferView(), IndexType::kU16);
 }
 
 void RenderableDrawer::drawMdi(const RenderableDrawerArguments& args, CommandBuffer& cmdb)
@@ -127,56 +127,62 @@ void RenderableDrawer::drawMdi(const RenderableDrawerArguments& args, CommandBuf
 				const UVec4 firstPayload(args.m_mesh.m_bucketMeshletGroupInstanceRanges[bucketIdx].getFirstInstance());
 				cmdb.setPushConstants(&firstPayload, sizeof(firstPayload));
 
-				cmdb.drawMeshTasksIndirect(args.m_mesh.m_taskShaderIndirectArgsBuffer.m_buffer,
-										   args.m_mesh.m_taskShaderIndirectArgsBuffer.m_offset + sizeof(DispatchIndirectArgs) * bucketIdx);
+				cmdb.drawMeshTasksIndirect(BufferView(
+					&args.m_mesh.m_taskShaderIndirectArgsBuffer.getBuffer(),
+					args.m_mesh.m_taskShaderIndirectArgsBuffer.getOffset() + sizeof(DispatchIndirectArgs) * bucketIdx, sizeof(DispatchIndirectArgs)));
 			}
 			else if(meshlets)
 			{
-				cmdb.bindVertexBuffer(0, args.m_softwareMesh.m_meshletInstancesBuffer.m_buffer,
-									  args.m_softwareMesh.m_meshletInstancesBuffer.m_offset
-										  + args.m_softwareMesh.m_bucketMeshletInstanceRanges[bucketIdx].getFirstInstance()
-												* sizeof(GpuSceneMeshletInstance),
-									  sizeof(GpuSceneMeshletInstance), VertexStepRate::kInstance);
-
-				cmdb.drawIndirect(PrimitiveTopology::kTriangles, 1,
-								  args.m_softwareMesh.m_drawIndirectArgsBuffer.m_offset + sizeof(DrawIndirectArgs) * bucketIdx,
-								  args.m_softwareMesh.m_drawIndirectArgsBuffer.m_buffer);
+				const InstanceRange& instanceRange = args.m_softwareMesh.m_bucketMeshletInstanceRanges[bucketIdx];
+				const BufferView vertBufferView = BufferView(args.m_softwareMesh.m_meshletInstancesBuffer)
+													  .incrementOffset(instanceRange.getFirstInstance() * sizeof(GpuSceneMeshletInstance))
+													  .setRange(instanceRange.getInstanceCount() * sizeof(GpuSceneMeshletInstance));
+				cmdb.bindVertexBuffer(0, vertBufferView, sizeof(GpuSceneMeshletInstance), VertexStepRate::kInstance);
+
+				const BufferView indirectArgsBuffView = BufferView(args.m_softwareMesh.m_drawIndirectArgsBuffer)
+															.incrementOffset(sizeof(DrawIndirectArgs) * bucketIdx)
+															.setRange(sizeof(DrawIndirectArgs));
+				cmdb.drawIndirect(PrimitiveTopology::kTriangles, indirectArgsBuffView);
+			}
+			else if(state.m_indexedDrawcall)
+			{
+				// Legacy
+
+				const InstanceRange& instanceRange = args.m_legacy.m_bucketRenderableInstanceRanges[bucketIdx];
+				const U32 maxDrawCount = instanceRange.getInstanceCount();
+
+				const BufferView vertBufferView = BufferView(args.m_legacy.m_renderableInstancesBuffer)
+													  .incrementOffset(instanceRange.getFirstInstance() * sizeof(GpuSceneRenderableInstance))
+													  .setRange(instanceRange.getInstanceCount() * sizeof(GpuSceneRenderableInstance));
+				cmdb.bindVertexBuffer(0, vertBufferView, sizeof(GpuSceneRenderableInstance), VertexStepRate::kInstance);
+
+				const BufferView indirectArgsBuffView = BufferView(args.m_legacy.m_drawIndexedIndirectArgsBuffer)
+															.incrementOffset(instanceRange.getFirstInstance() * sizeof(DrawIndexedIndirectArgs))
+															.setRange(instanceRange.getInstanceCount() * sizeof(DrawIndexedIndirectArgs));
+				const BufferView mdiCountBuffView =
+					BufferView(args.m_legacy.m_mdiDrawCountsBuffer).incrementOffset(sizeof(U32) * bucketIdx).setRange(sizeof(U32));
+				cmdb.drawIndexedIndirectCount(state.m_primitiveTopology, indirectArgsBuffView, sizeof(DrawIndexedIndirectArgs), mdiCountBuffView,
+											  maxDrawCount);
 			}
 			else
 			{
-				const U32 maxDrawCount = args.m_legacy.m_bucketRenderableInstanceRanges[bucketIdx].getInstanceCount();
-
-				if(state.m_indexedDrawcall)
-				{
-					cmdb.bindVertexBuffer(0, args.m_legacy.m_renderableInstancesBuffer.m_buffer,
-										  args.m_legacy.m_renderableInstancesBuffer.m_offset
-											  + args.m_legacy.m_bucketRenderableInstanceRanges[bucketIdx].getFirstInstance()
-													* sizeof(GpuSceneRenderableInstance),
-										  sizeof(GpuSceneRenderableInstance), VertexStepRate::kInstance);
-
-					cmdb.drawIndexedIndirectCount(state.m_primitiveTopology, args.m_legacy.m_drawIndexedIndirectArgsBuffer.m_buffer,
-												  args.m_legacy.m_drawIndexedIndirectArgsBuffer.m_offset
-													  + args.m_legacy.m_bucketRenderableInstanceRanges[bucketIdx].getFirstInstance()
-															* sizeof(DrawIndexedIndirectArgs),
-												  sizeof(DrawIndexedIndirectArgs), args.m_legacy.m_mdiDrawCountsBuffer.m_buffer,
-												  args.m_legacy.m_mdiDrawCountsBuffer.m_offset + sizeof(U32) * bucketIdx, maxDrawCount);
-				}
-				else
-				{
-					cmdb.bindVertexBuffer(0, args.m_legacy.m_renderableInstancesBuffer.m_buffer,
-										  args.m_legacy.m_renderableInstancesBuffer.m_offset
-											  + args.m_legacy.m_bucketRenderableInstanceRanges[bucketIdx].getFirstInstance()
-													* sizeof(GpuSceneRenderableInstance),
-										  sizeof(GpuSceneRenderableInstance), VertexStepRate::kInstance);
-
-					// Yes, the DrawIndexedIndirectArgs is intentional
-					cmdb.drawIndirectCount(state.m_primitiveTopology, args.m_legacy.m_drawIndexedIndirectArgsBuffer.m_buffer,
-										   args.m_legacy.m_drawIndexedIndirectArgsBuffer.m_offset
-											   + args.m_legacy.m_bucketRenderableInstanceRanges[bucketIdx].getFirstInstance()
-													 * sizeof(DrawIndexedIndirectArgs),
-										   sizeof(DrawIndexedIndirectArgs), args.m_legacy.m_mdiDrawCountsBuffer.m_buffer,
-										   args.m_legacy.m_mdiDrawCountsBuffer.m_offset + sizeof(U32) * bucketIdx, maxDrawCount);
-				}
+				// Legacy
+
+				const InstanceRange& instanceRange = args.m_legacy.m_bucketRenderableInstanceRanges[bucketIdx];
+				const U32 maxDrawCount = instanceRange.getInstanceCount();
+
+				const BufferView vertBufferView = BufferView(args.m_legacy.m_renderableInstancesBuffer)
+													  .incrementOffset(instanceRange.getFirstInstance() * sizeof(GpuSceneRenderableInstance))
+													  .setRange(instanceRange.getInstanceCount() * sizeof(GpuSceneRenderableInstance));
+				cmdb.bindVertexBuffer(0, vertBufferView, sizeof(GpuSceneRenderableInstance), VertexStepRate::kInstance);
+
+				// Yes, the DrawIndexedIndirectArgs is intentional
+				const BufferView indirectArgsBuffView = BufferView(args.m_legacy.m_drawIndexedIndirectArgsBuffer)
+															.incrementOffset(instanceRange.getFirstInstance() * sizeof(DrawIndexedIndirectArgs))
+															.setRange(instanceRange.getInstanceCount() * sizeof(DrawIndexedIndirectArgs));
+				const BufferView countBuffView =
+					BufferView(args.m_legacy.m_mdiDrawCountsBuffer).incrementOffset(sizeof(U32) * bucketIdx).setRange(sizeof(U32));
+				cmdb.drawIndirectCount(state.m_primitiveTopology, indirectArgsBuffView, sizeof(DrawIndexedIndirectArgs), countBuffView, maxDrawCount);
 			}
 		});
 

+ 7 - 7
AnKi/Renderer/Utils/Drawer.h

@@ -35,9 +35,9 @@ public:
 	class
 	{
 	public:
-		BufferOffsetRange m_mdiDrawCountsBuffer;
-		BufferOffsetRange m_renderableInstancesBuffer;
-		BufferOffsetRange m_drawIndexedIndirectArgsBuffer;
+		BufferView m_mdiDrawCountsBuffer;
+		BufferView m_renderableInstancesBuffer;
+		BufferView m_drawIndexedIndirectArgsBuffer;
 
 		ConstWeakArray<InstanceRange> m_bucketRenderableInstanceRanges;
 	} m_legacy; ///< Legacy vertex flow
@@ -45,8 +45,8 @@ public:
 	class
 	{
 	public:
-		BufferOffsetRange m_taskShaderIndirectArgsBuffer;
-		BufferOffsetRange m_meshletGroupInstancesBuffer;
+		BufferView m_taskShaderIndirectArgsBuffer;
+		BufferView m_meshletGroupInstancesBuffer;
 
 		ConstWeakArray<InstanceRange> m_bucketMeshletGroupInstanceRanges;
 	} m_mesh;
@@ -54,8 +54,8 @@ public:
 	class
 	{
 	public:
-		BufferOffsetRange m_meshletInstancesBuffer;
-		BufferOffsetRange m_drawIndirectArgsBuffer;
+		BufferView m_meshletInstancesBuffer;
+		BufferView m_drawIndirectArgsBuffer;
 
 		ConstWeakArray<InstanceRange> m_bucketMeshletInstanceRanges;
 	} m_softwareMesh;

+ 69 - 61
AnKi/Renderer/Utils/GpuVisibility.cpp

@@ -36,9 +36,9 @@ static NumericCVar<PtrSize> g_maxMeshletGroupMemoryPerTest(CVarSubsystem::kRende
 static StatCounter g_gpuVisMemoryAllocatedStatVar(StatCategory::kRenderer, "GPU visibility mem",
 												  StatFlag::kBytes | StatFlag::kMainThreadUpdates | StatFlag::kZeroEveryFrame);
 
-static BufferOffsetRange allocateTransientGpuMem(PtrSize size)
+static BufferView allocateTransientGpuMem(PtrSize size)
 {
-	BufferOffsetRange out = {};
+	BufferView out = {};
 
 	if(size)
 	{
@@ -260,9 +260,9 @@ void GpuVisibility::populateRenderGraphInternal(Bool distanceBased, BaseGpuVisib
 			mem.m_meshletGroupsInstancesBuffer =
 				allocateTransientGpuMem(maxTotalMemReq.m_meshletGroupInstanceCount * sizeof(GpuSceneMeshletGroupInstance));
 
-			mem.m_bufferDepedency =
-				rgraph.importBuffer(BufferUsageBit::kNone, (mem.m_drawIndexedIndirectArgsBuffer.m_buffer) ? mem.m_drawIndexedIndirectArgsBuffer
-																										  : mem.m_meshletGroupsInstancesBuffer);
+			mem.m_bufferDepedency = rgraph.importBuffer((mem.m_drawIndexedIndirectArgsBuffer.isValid()) ? mem.m_drawIndexedIndirectArgsBuffer
+																										: mem.m_meshletGroupsInstancesBuffer,
+														BufferUsageBit::kNone);
 		}
 
 		if(getRenderer().runSoftwareMeshletRendering())
@@ -275,7 +275,7 @@ void GpuVisibility::populateRenderGraphInternal(Bool distanceBased, BaseGpuVisib
 
 				mem.m_meshletInstancesBuffer = allocateTransientGpuMem(maxTotalMemReq.m_meshletInstanceCount * sizeof(GpuSceneMeshletInstance));
 
-				mem.m_bufferDepedency = rgraph.importBuffer(BufferUsageBit::kNone, mem.m_meshletInstancesBuffer);
+				mem.m_bufferDepedency = rgraph.importBuffer(mem.m_meshletInstancesBuffer, BufferUsageBit::kNone);
 			}
 		}
 	}
@@ -284,16 +284,22 @@ void GpuVisibility::populateRenderGraphInternal(Bool distanceBased, BaseGpuVisib
 	const MemoryRequirements& req = m_runCtx.m_totalMemRequirements[in.m_technique];
 	const PersistentMemory& mem = m_runCtx.m_persistentMem[m_runCtx.m_populateRenderGraphCallCount++ % m_runCtx.m_persistentMem.getSize()];
 
-	out.m_legacy.m_drawIndexedIndirectArgsBuffer = mem.m_drawIndexedIndirectArgsBuffer;
-	out.m_legacy.m_drawIndexedIndirectArgsBuffer.m_range = req.m_renderableInstanceCount * sizeof(DrawIndexedIndirectArgs);
+	out.m_legacy.m_drawIndexedIndirectArgsBuffer =
+		(req.m_renderableInstanceCount)
+			? BufferView(mem.m_drawIndexedIndirectArgsBuffer).setRange(req.m_renderableInstanceCount * sizeof(DrawIndexedIndirectArgs))
+			: BufferView();
 
-	out.m_legacy.m_renderableInstancesBuffer = mem.m_renderableInstancesBuffer;
-	out.m_legacy.m_renderableInstancesBuffer.m_range = req.m_renderableInstanceCount * sizeof(GpuSceneRenderableInstance);
+	out.m_legacy.m_renderableInstancesBuffer =
+		(req.m_renderableInstanceCount)
+			? BufferView(mem.m_renderableInstancesBuffer).setRange(req.m_renderableInstanceCount * sizeof(GpuSceneRenderableInstance))
+			: BufferView();
 
 	out.m_legacy.m_mdiDrawCountsBuffer = allocateTransientGpuMem(sizeof(U32) * bucketCount);
 
-	out.m_mesh.m_meshletGroupInstancesBuffer = mem.m_meshletGroupsInstancesBuffer;
-	out.m_mesh.m_meshletGroupInstancesBuffer.m_range = req.m_meshletGroupInstanceCount * sizeof(GpuSceneMeshletGroupInstance);
+	out.m_mesh.m_meshletGroupInstancesBuffer =
+		(req.m_meshletGroupInstanceCount)
+			? BufferView(mem.m_meshletGroupsInstancesBuffer).setRange(req.m_meshletGroupInstanceCount * sizeof(GpuSceneMeshletGroupInstance))
+			: BufferView();
 
 	out.m_mesh.m_taskShaderIndirectArgsBuffer = allocateTransientGpuMem(bucketCount * sizeof(DispatchIndirectArgs));
 
@@ -313,7 +319,7 @@ void GpuVisibility::populateRenderGraphInternal(Bool distanceBased, BaseGpuVisib
 	out.m_mesh.m_bucketMeshletGroupInstanceRanges = m_runCtx.m_meshletGroupInstanceRanges[in.m_technique];
 
 	// Zero some stuff
-	const BufferHandle zeroStuffDependency = rgraph.importBuffer(BufferUsageBit::kNone, out.m_legacy.m_mdiDrawCountsBuffer);
+	const BufferHandle zeroStuffDependency = rgraph.importBuffer(out.m_legacy.m_mdiDrawCountsBuffer, BufferUsageBit::kNone);
 	{
 		Array<Char, 128> passName;
 		snprintf(passName.getBegin(), passName.getSizeInBytes(), "GPU vis zero: %s", in.m_passesName.cstr());
@@ -328,24 +334,24 @@ void GpuVisibility::populateRenderGraphInternal(Bool distanceBased, BaseGpuVisib
 			cmdb.fillBuffer(out.m_legacy.m_mdiDrawCountsBuffer, 0);
 			cmdb.popDebugMarker();
 
-			if(out.m_mesh.m_taskShaderIndirectArgsBuffer.m_buffer)
+			if(out.m_mesh.m_taskShaderIndirectArgsBuffer.isValid())
 			{
 				cmdb.pushDebugMarker("Task shader indirect args", Vec3(1.0f, 1.0f, 1.0f));
 				cmdb.fillBuffer(out.m_mesh.m_taskShaderIndirectArgsBuffer, 0);
 				cmdb.popDebugMarker();
 			}
 
-			if(out.m_visiblesHashBuffer.m_buffer)
+			if(out.m_visiblesHashBuffer.isValid())
 			{
 				cmdb.pushDebugMarker("Visibles hash", Vec3(1.0f, 1.0f, 1.0f));
 				cmdb.fillBuffer(out.m_visiblesHashBuffer, 0);
 				cmdb.popDebugMarker();
 			}
 
-			if(out.m_visibleAaabbIndicesBuffer.m_buffer)
+			if(out.m_visibleAaabbIndicesBuffer.isValid())
 			{
 				cmdb.pushDebugMarker("Visible AABB indices", Vec3(1.0f, 1.0f, 1.0f));
-				cmdb.fillBuffer(out.m_visibleAaabbIndicesBuffer.m_buffer, out.m_visibleAaabbIndicesBuffer.m_offset, sizeof(U32), 0);
+				cmdb.fillBuffer(BufferView(out.m_visibleAaabbIndicesBuffer).setRange(sizeof(U32)), 0);
 				cmdb.popDebugMarker();
 			}
 		});
@@ -374,16 +380,16 @@ void GpuVisibility::populateRenderGraphInternal(Bool distanceBased, BaseGpuVisib
 				  technique = in.m_technique, out](RenderPassWorkContext& rpass) {
 		CommandBuffer& cmdb = *rpass.m_commandBuffer;
 
-		const Bool gatherAabbIndices = out.m_visibleAaabbIndicesBuffer.m_buffer != nullptr;
-		const Bool genHash = out.m_visiblesHashBuffer.m_buffer != nullptr;
+		const Bool gatherAabbIndices = out.m_visibleAaabbIndicesBuffer.isValid();
+		const Bool genHash = out.m_visiblesHashBuffer.isValid();
 
 		U32 gatherType = 0;
-		if(out.m_mesh.m_meshletGroupInstancesBuffer.m_range > 0)
+		if(out.m_mesh.m_meshletGroupInstancesBuffer.isValid())
 		{
 			gatherType |= 2u;
 		}
 
-		if(out.m_legacy.m_renderableInstancesBuffer.m_range > 0)
+		if(out.m_legacy.m_renderableInstancesBuffer.isValid())
 		{
 			gatherType |= 1u;
 		}
@@ -398,20 +404,20 @@ void GpuVisibility::populateRenderGraphInternal(Bool distanceBased, BaseGpuVisib
 			cmdb.bindShaderProgram(m_distGrProgs[gatherAabbIndices][genHash][gatherType - 1u].get());
 		}
 
-		BufferOffsetRange aabbsBuffer;
+		BufferView aabbsBuffer;
 		U32 aabbCount = 0;
 		switch(technique)
 		{
 		case RenderingTechnique::kGBuffer:
-			aabbsBuffer = GpuSceneArrays::RenderableBoundingVolumeGBuffer::getSingleton().getBufferOffsetRange();
+			aabbsBuffer = GpuSceneArrays::RenderableBoundingVolumeGBuffer::getSingleton().getBufferView();
 			aabbCount = GpuSceneArrays::RenderableBoundingVolumeGBuffer::getSingleton().getElementCount();
 			break;
 		case RenderingTechnique::kDepth:
-			aabbsBuffer = GpuSceneArrays::RenderableBoundingVolumeDepth::getSingleton().getBufferOffsetRange();
+			aabbsBuffer = GpuSceneArrays::RenderableBoundingVolumeDepth::getSingleton().getBufferView();
 			aabbCount = GpuSceneArrays::RenderableBoundingVolumeDepth::getSingleton().getElementCount();
 			break;
 		case RenderingTechnique::kForward:
-			aabbsBuffer = GpuSceneArrays::RenderableBoundingVolumeForward::getSingleton().getBufferOffsetRange();
+			aabbsBuffer = GpuSceneArrays::RenderableBoundingVolumeForward::getSingleton().getBufferView();
 			aabbCount = GpuSceneArrays::RenderableBoundingVolumeForward::getSingleton().getElementCount();
 			break;
 		default:
@@ -419,10 +425,10 @@ void GpuVisibility::populateRenderGraphInternal(Bool distanceBased, BaseGpuVisib
 		}
 
 		cmdb.bindStorageBuffer(0, 0, aabbsBuffer);
-		cmdb.bindStorageBuffer(0, 1, GpuSceneArrays::Renderable::getSingleton().getBufferOffsetRange());
-		cmdb.bindStorageBuffer(0, 2, GpuSceneArrays::MeshLod::getSingleton().getBufferOffsetRange());
-		cmdb.bindStorageBuffer(0, 3, GpuSceneArrays::Transform::getSingleton().getBufferOffsetRange());
-		cmdb.bindStorageBuffer(0, 4, GpuSceneBuffer::getSingleton().getBufferOffsetRange());
+		cmdb.bindStorageBuffer(0, 1, GpuSceneArrays::Renderable::getSingleton().getBufferView());
+		cmdb.bindStorageBuffer(0, 2, GpuSceneArrays::MeshLod::getSingleton().getBufferView());
+		cmdb.bindStorageBuffer(0, 3, GpuSceneArrays::Transform::getSingleton().getBufferView());
+		cmdb.bindStorageBuffer(0, 4, GpuSceneBuffer::getSingleton().getBufferView());
 		if(gatherType & 1u)
 		{
 			cmdb.bindStorageBuffer(0, 5, out.m_legacy.m_renderableInstancesBuffer);
@@ -514,7 +520,7 @@ void GpuVisibility::populateRenderGraphMeshletInternal(Bool passthrough, BaseGpu
 {
 	RenderGraphDescription& rgraph = *in.m_rgraph;
 
-	if(in.m_taskShaderIndirectArgsBuffer.m_buffer == nullptr) [[unlikely]]
+	if(!in.m_taskShaderIndirectArgsBuffer.isValid()) [[unlikely]]
 	{
 		// Early exit
 		return;
@@ -551,13 +557,14 @@ void GpuVisibility::populateRenderGraphMeshletInternal(Bool passthrough, BaseGpu
 
 	out.m_drawIndirectArgsBuffer = allocateTransientGpuMem(sizeof(DrawIndirectArgs) * bucketCount);
 
-	out.m_meshletInstancesBuffer = mem.m_meshletInstancesBuffer;
-	out.m_meshletInstancesBuffer.m_range = m_runCtx.m_totalMemRequirements[in.m_technique].m_meshletInstanceCount * sizeof(GpuSceneMeshletInstance);
+	out.m_meshletInstancesBuffer =
+		BufferView(mem.m_meshletInstancesBuffer)
+			.setRange(m_runCtx.m_totalMemRequirements[in.m_technique].m_meshletInstanceCount * sizeof(GpuSceneMeshletInstance));
 
 	out.m_bucketMeshletInstanceRanges = m_runCtx.m_meshletInstanceRanges[in.m_technique];
 
 	// Zero some stuff
-	const BufferHandle indirectArgsDep = rgraph.importBuffer(BufferUsageBit::kNone, out.m_drawIndirectArgsBuffer);
+	const BufferHandle indirectArgsDep = rgraph.importBuffer(out.m_drawIndirectArgsBuffer, BufferUsageBit::kNone);
 	{
 		Array<Char, 128> passName;
 		snprintf(passName.getBegin(), passName.getSizeInBytes(), "GPU meshlet vis zero: %s", in.m_passesName.cstr());
@@ -606,10 +613,10 @@ void GpuVisibility::populateRenderGraphMeshletInternal(Bool passthrough, BaseGpu
 			cmdb.bindShaderProgram(m_meshletCullingGrProgs[hasHzb][isPassthrough].get());
 
 			cmdb.bindStorageBuffer(0, 0, meshletGroupInstancesBuffer);
-			cmdb.bindStorageBuffer(0, 1, GpuSceneArrays::Renderable::getSingleton().getBufferOffsetRange());
-			cmdb.bindStorageBuffer(0, 2, GpuSceneArrays::MeshLod::getSingleton().getBufferOffsetRange());
-			cmdb.bindStorageBuffer(0, 3, GpuSceneArrays::Transform::getSingleton().getBufferOffsetRange());
-			cmdb.bindStorageBuffer(0, 4, UnifiedGeometryBuffer::getSingleton().getBufferOffsetRange());
+			cmdb.bindStorageBuffer(0, 1, GpuSceneArrays::Renderable::getSingleton().getBufferView());
+			cmdb.bindStorageBuffer(0, 2, GpuSceneArrays::MeshLod::getSingleton().getBufferView());
+			cmdb.bindStorageBuffer(0, 3, GpuSceneArrays::Transform::getSingleton().getBufferView());
+			cmdb.bindStorageBuffer(0, 4, UnifiedGeometryBuffer::getSingleton().getBufferView());
 			cmdb.bindStorageBuffer(0, 5, out.m_drawIndirectArgsBuffer);
 			cmdb.bindStorageBuffer(0, 6, out.m_meshletInstancesBuffer);
 			if(hasHzb)
@@ -644,7 +651,8 @@ void GpuVisibility::populateRenderGraphMeshletInternal(Bool passthrough, BaseGpu
 			consts.m_meshletCount = out.m_bucketMeshletInstanceRanges[i].getInstanceCount();
 			cmdb.setPushConstants(&consts, sizeof(consts));
 
-			cmdb.dispatchComputeIndirect(computeIndirectArgs.m_buffer, computeIndirectArgs.m_offset + i * sizeof(DispatchIndirectArgs));
+			cmdb.dispatchComputeIndirect(
+				BufferView(computeIndirectArgs).incrementOffset(i * sizeof(DispatchIndirectArgs)).setRange(sizeof(DispatchIndirectArgs)));
 		};
 	});
 }
@@ -701,14 +709,14 @@ void GpuVisibilityNonRenderables::populateRenderGraph(GpuVisibilityNonRenderable
 		U32* count;
 		out.m_visiblesBuffer = RebarTransientMemoryPool::getSingleton().allocateFrame(sizeof(U32), count);
 		*count = 0;
-		out.m_visiblesBufferHandle = rgraph.importBuffer(BufferUsageBit::kNone, out.m_visiblesBuffer);
+		out.m_visiblesBufferHandle = rgraph.importBuffer(out.m_visiblesBuffer, BufferUsageBit::kNone);
 
 		return;
 	}
 
-	if(in.m_cpuFeedbackBuffer.m_buffer)
+	if(in.m_cpuFeedbackBuffer.isValid())
 	{
-		ANKI_ASSERT(in.m_cpuFeedbackBuffer.m_range == sizeof(U32) * (objCount * 2 + 1));
+		ANKI_ASSERT(in.m_cpuFeedbackBuffer.getRange() == sizeof(U32) * (objCount * 2 + 1));
 	}
 
 	const Bool firstRunInFrame = m_lastFrameIdx != getRenderer().getFrameCount();
@@ -733,14 +741,14 @@ void GpuVisibilityNonRenderables::populateRenderGraph(GpuVisibilityNonRenderable
 		buffInit.m_usage = BufferUsageBit::kStorageComputeWrite | BufferUsageBit::kStorageComputeRead | BufferUsageBit::kTransferDestination;
 		m_counterBuffer = GrManager::getSingleton().newBuffer(buffInit);
 
-		m_counterBufferZeroingHandle = rgraph.importBuffer(m_counterBuffer.get(), buffInit.m_usage, 0, kMaxPtrSize);
+		m_counterBufferZeroingHandle = rgraph.importBuffer(BufferView(m_counterBuffer.get()), buffInit.m_usage);
 
 		ComputeRenderPassDescription& pass = rgraph.newComputeRenderPass("GpuVisibilityNonRenderablesClearCounterBuffer");
 
 		pass.newBufferDependency(m_counterBufferZeroingHandle, BufferUsageBit::kTransferDestination);
 
 		pass.setWork([counterBuffer = m_counterBuffer](RenderPassWorkContext& rgraph) {
-			rgraph.m_commandBuffer->fillBuffer(counterBuffer.get(), 0, kMaxPtrSize, 0);
+			rgraph.m_commandBuffer->fillBuffer(BufferView(counterBuffer.get()), 0);
 		});
 
 		m_counterBufferOffset = 0;
@@ -752,7 +760,7 @@ void GpuVisibilityNonRenderables::populateRenderGraph(GpuVisibilityNonRenderable
 
 	// Allocate memory for the result
 	out.m_visiblesBuffer = allocateTransientGpuMem((objCount + 1) * sizeof(U32));
-	out.m_visiblesBufferHandle = rgraph.importBuffer(BufferUsageBit::kNone, out.m_visiblesBuffer);
+	out.m_visiblesBufferHandle = rgraph.importBuffer(out.m_visiblesBuffer, BufferUsageBit::kNone);
 
 	// Create the renderpass
 	ComputeRenderPassDescription& pass = rgraph.newComputeRenderPass(in.m_passesName);
@@ -775,27 +783,27 @@ void GpuVisibilityNonRenderables::populateRenderGraph(GpuVisibilityNonRenderable
 				  objCount](RenderPassWorkContext& rgraph) {
 		CommandBuffer& cmdb = *rgraph.m_commandBuffer;
 
-		const Bool needsFeedback = feedbackBuffer.m_buffer != nullptr;
+		const Bool needsFeedback = feedbackBuffer.isValid();
 
 		cmdb.bindShaderProgram(m_grProgs[0][objType][needsFeedback].get());
 
-		BufferOffsetRange objBuffer;
+		BufferView objBuffer;
 		switch(objType)
 		{
 		case GpuSceneNonRenderableObjectType::kLight:
-			objBuffer = GpuSceneArrays::Light::getSingleton().getBufferOffsetRange();
+			objBuffer = GpuSceneArrays::Light::getSingleton().getBufferView();
 			break;
 		case GpuSceneNonRenderableObjectType::kDecal:
-			objBuffer = GpuSceneArrays::Decal::getSingleton().getBufferOffsetRange();
+			objBuffer = GpuSceneArrays::Decal::getSingleton().getBufferView();
 			break;
 		case GpuSceneNonRenderableObjectType::kFogDensityVolume:
-			objBuffer = GpuSceneArrays::FogDensityVolume::getSingleton().getBufferOffsetRange();
+			objBuffer = GpuSceneArrays::FogDensityVolume::getSingleton().getBufferView();
 			break;
 		case GpuSceneNonRenderableObjectType::kGlobalIlluminationProbe:
-			objBuffer = GpuSceneArrays::GlobalIlluminationProbe::getSingleton().getBufferOffsetRange();
+			objBuffer = GpuSceneArrays::GlobalIlluminationProbe::getSingleton().getBufferView();
 			break;
 		case GpuSceneNonRenderableObjectType::kReflectionProbe:
-			objBuffer = GpuSceneArrays::ReflectionProbe::getSingleton().getBufferOffsetRange();
+			objBuffer = GpuSceneArrays::ReflectionProbe::getSingleton().getBufferView();
 			break;
 		default:
 			ANKI_ASSERT(0);
@@ -812,11 +820,11 @@ void GpuVisibilityNonRenderables::populateRenderGraph(GpuVisibilityNonRenderable
 		cmdb.setPushConstants(&unis, sizeof(unis));
 
 		rgraph.bindStorageBuffer(0, 1, visibleIndicesBuffHandle);
-		cmdb.bindStorageBuffer(0, 2, counterBuffer.get(), counterBufferOffset, sizeof(U32) * kCountersPerDispatch);
+		cmdb.bindStorageBuffer(0, 2, BufferView(counterBuffer.get(), counterBufferOffset, sizeof(U32) * kCountersPerDispatch));
 
 		if(needsFeedback)
 		{
-			cmdb.bindStorageBuffer(0, 3, feedbackBuffer.m_buffer, feedbackBuffer.m_offset, feedbackBuffer.m_range);
+			cmdb.bindStorageBuffer(0, 3, feedbackBuffer);
 		}
 
 		dispatchPPCompute(cmdb, 64, 1, objCount, 1);
@@ -854,11 +862,11 @@ void GpuVisibilityAccelerationStructures::pupulateRenderGraph(GpuVisibilityAccel
 	const U32 aabbCount = GpuSceneArrays::RenderableBoundingVolumeRt::getSingleton().getElementCount();
 
 	out.m_instancesBuffer = allocateTransientGpuMem(aabbCount * sizeof(AccelerationStructureInstance));
-	out.m_someBufferHandle = rgraph.importBuffer(BufferUsageBit::kStorageComputeWrite, out.m_instancesBuffer);
+	out.m_someBufferHandle = rgraph.importBuffer(out.m_instancesBuffer, BufferUsageBit::kStorageComputeWrite);
 
 	out.m_renderableIndicesBuffer = allocateTransientGpuMem((aabbCount + 1) * sizeof(U32));
 
-	const BufferOffsetRange zeroInstancesDispatchArgsBuff = allocateTransientGpuMem(sizeof(DispatchIndirectArgs));
+	const BufferView zeroInstancesDispatchArgsBuff = allocateTransientGpuMem(sizeof(DispatchIndirectArgs));
 
 	// Create vis pass
 	{
@@ -893,13 +901,13 @@ void GpuVisibilityAccelerationStructures::pupulateRenderGraph(GpuVisibilityAccel
 
 			cmdb.setPushConstants(&unis, sizeof(unis));
 
-			cmdb.bindStorageBuffer(0, 0, GpuSceneArrays::RenderableBoundingVolumeRt::getSingleton().getBufferOffsetRange());
-			cmdb.bindStorageBuffer(0, 1, GpuSceneArrays::Renderable::getSingleton().getBufferOffsetRange());
-			cmdb.bindStorageBuffer(0, 2, GpuSceneArrays::MeshLod::getSingleton().getBufferOffsetRange());
-			cmdb.bindStorageBuffer(0, 3, GpuSceneArrays::Transform::getSingleton().getBufferOffsetRange());
+			cmdb.bindStorageBuffer(0, 0, GpuSceneArrays::RenderableBoundingVolumeRt::getSingleton().getBufferView());
+			cmdb.bindStorageBuffer(0, 1, GpuSceneArrays::Renderable::getSingleton().getBufferView());
+			cmdb.bindStorageBuffer(0, 2, GpuSceneArrays::MeshLod::getSingleton().getBufferView());
+			cmdb.bindStorageBuffer(0, 3, GpuSceneArrays::Transform::getSingleton().getBufferView());
 			cmdb.bindStorageBuffer(0, 4, instancesBuff);
 			cmdb.bindStorageBuffer(0, 5, indicesBuff);
-			cmdb.bindStorageBuffer(0, 6, m_counterBuffer.get(), 0, sizeof(U32) * 2);
+			cmdb.bindStorageBuffer(0, 6, BufferView(m_counterBuffer.get(), 0, sizeof(U32) * 2));
 			cmdb.bindStorageBuffer(0, 7, zeroInstancesDispatchArgsBuff);
 
 			const U32 aabbCount = GpuSceneArrays::RenderableBoundingVolumeRt::getSingleton().getElementCount();
@@ -925,7 +933,7 @@ void GpuVisibilityAccelerationStructures::pupulateRenderGraph(GpuVisibilityAccel
 			cmdb.bindStorageBuffer(0, 0, indicesBuff);
 			cmdb.bindStorageBuffer(0, 1, instancesBuff);
 
-			cmdb.dispatchComputeIndirect(zeroInstancesDispatchArgsBuff.m_buffer, zeroInstancesDispatchArgsBuff.m_offset);
+			cmdb.dispatchComputeIndirect(zeroInstancesDispatchArgsBuff);
 		});
 	}
 }

+ 20 - 20
AnKi/Renderer/Utils/GpuVisibility.h

@@ -87,9 +87,9 @@ public:
 	class
 	{
 	public:
-		BufferOffsetRange m_renderableInstancesBuffer; ///< An array of GpuSceneRenderableInstance.
-		BufferOffsetRange m_mdiDrawCountsBuffer; ///< An array of U32, one for each render state bucket (even those that use task/mesh flow).
-		BufferOffsetRange m_drawIndexedIndirectArgsBuffer; ///< Array of DrawIndexedIndirectArgs or DrawIndirectArgs.
+		BufferView m_renderableInstancesBuffer; ///< An array of GpuSceneRenderableInstance.
+		BufferView m_mdiDrawCountsBuffer; ///< An array of U32, one for each render state bucket (even those that use task/mesh flow).
+		BufferView m_drawIndexedIndirectArgsBuffer; ///< Array of DrawIndexedIndirectArgs or DrawIndirectArgs.
 
 		/// Defines the element sub-ranges in the m_renderableInstancesBuffer an m_drawIndexedIndirectArgsBuffer per render state bucket.
 		ConstWeakArray<InstanceRange> m_bucketRenderableInstanceRanges;
@@ -98,16 +98,16 @@ public:
 	class
 	{
 	public:
-		BufferOffsetRange m_taskShaderIndirectArgsBuffer; ///< An array of DispatchIndirectArgs, one for each render state bucket.
-		BufferOffsetRange m_meshletGroupInstancesBuffer; ///< Array with GpuSceneMeshletGroupInstance.
+		BufferView m_taskShaderIndirectArgsBuffer; ///< An array of DispatchIndirectArgs, one for each render state bucket.
+		BufferView m_meshletGroupInstancesBuffer; ///< Array with GpuSceneMeshletGroupInstance.
 
 		/// Defines the element sub-ranges in the m_meshletGroupInstancesBuffer per render state bucket.
 		ConstWeakArray<InstanceRange> m_bucketMeshletGroupInstanceRanges;
 	} m_mesh; ///< S/W meshlets or H/W mesh shading.
 
-	BufferOffsetRange m_visibleAaabbIndicesBuffer; ///< [Optional] Indices to the AABB buffer. The 1st element is the count.
+	BufferView m_visibleAaabbIndicesBuffer; ///< [Optional] Indices to the AABB buffer. The 1st element is the count.
 
-	BufferOffsetRange m_visiblesHashBuffer; ///< [Optional] A hash of the visible objects. Used to conditionaly not perform shadow randering.
+	BufferView m_visiblesHashBuffer; ///< [Optional] A hash of the visible objects. Used to conditionaly not perform shadow randering.
 
 	Bool containsDrawcalls() const
 	{
@@ -123,8 +123,8 @@ public:
 
 	RenderingTechnique m_technique = RenderingTechnique::kCount;
 
-	BufferOffsetRange m_taskShaderIndirectArgsBuffer; ///< Taken from GpuVisibilityOutput.
-	BufferOffsetRange m_meshletGroupInstancesBuffer; ///< Taken from GpuVisibilityOutput.
+	BufferView m_taskShaderIndirectArgsBuffer; ///< Taken from GpuVisibilityOutput.
+	BufferView m_meshletGroupInstancesBuffer; ///< Taken from GpuVisibilityOutput.
 	ConstWeakArray<InstanceRange> m_bucketMeshletGroupInstanceRanges; ///< Taken from GpuVisibilityOutput.
 
 	BufferHandle m_dependency;
@@ -162,8 +162,8 @@ class PassthroughGpuMeshletVisibilityInput : public BaseGpuMeshletVisibilityInpu
 class GpuMeshletVisibilityOutput
 {
 public:
-	BufferOffsetRange m_drawIndirectArgsBuffer; ///< Array of DrawIndirectArgs. One for every render state bucket (even those that use that flow).
-	BufferOffsetRange m_meshletInstancesBuffer; ///< Array of GpuSceneMeshletInstance.
+	BufferView m_drawIndirectArgsBuffer; ///< Array of DrawIndirectArgs. One for every render state bucket (even those that use that flow).
+	BufferView m_meshletInstancesBuffer; ///< Array of GpuSceneMeshletInstance.
 
 	/// Defines the element sub-ranges in the m_meshletInstancesBuffer per render state bucket.
 	ConstWeakArray<InstanceRange> m_bucketMeshletInstanceRanges;
@@ -225,14 +225,14 @@ private:
 	{
 	public:
 		// Legacy
-		BufferOffsetRange m_drawIndexedIndirectArgsBuffer;
-		BufferOffsetRange m_renderableInstancesBuffer; ///< Instance rate vertex buffer.
+		BufferView m_drawIndexedIndirectArgsBuffer;
+		BufferView m_renderableInstancesBuffer; ///< Instance rate vertex buffer.
 
 		// HW & SW Meshlet rendering
-		BufferOffsetRange m_meshletGroupsInstancesBuffer;
+		BufferView m_meshletGroupsInstancesBuffer;
 
 		// SW meshlet rendering
-		BufferOffsetRange m_meshletInstancesBuffer; ///< Instance rate vertex buffer.
+		BufferView m_meshletInstancesBuffer; ///< Instance rate vertex buffer.
 
 		BufferHandle m_bufferDepedency;
 	};
@@ -241,7 +241,7 @@ private:
 	{
 	public:
 		// SW meshlet rendering
-		BufferOffsetRange m_meshletInstancesBuffer; ///< Instance rate vertex buffer.
+		BufferView m_meshletInstancesBuffer; ///< Instance rate vertex buffer.
 
 		BufferHandle m_bufferDepedency;
 	};
@@ -300,7 +300,7 @@ public:
 	RenderGraphDescription* m_rgraph = nullptr;
 
 	const RenderTargetHandle* m_hzbRt = nullptr; ///< Optional.
-	BufferOffsetRange m_cpuFeedbackBuffer; ///< Optional.
+	BufferView m_cpuFeedbackBuffer; ///< Optional.
 };
 
 /// @memberof GpuVisibilityNonRenderables
@@ -308,7 +308,7 @@ class GpuVisibilityNonRenderablesOutput
 {
 public:
 	BufferHandle m_visiblesBufferHandle; ///< Buffer handle holding the visible objects. Used for tracking. No need to track all buffers.
-	BufferOffsetRange m_visiblesBuffer;
+	BufferView m_visiblesBuffer;
 };
 
 /// GPU visibility of lights, probes etc.
@@ -364,8 +364,8 @@ class GpuVisibilityAccelerationStructuresOutput
 public:
 	BufferHandle m_someBufferHandle; ///< Some handle to track dependencies. No need to track every buffer.
 
-	BufferOffsetRange m_instancesBuffer; ///< Points to AccelerationStructureBuildRangeInfo::m_primitiveCount number of AccelerationStructureInstance.
-	BufferOffsetRange m_renderableIndicesBuffer; ///< AccelerationStructureBuildRangeInfo::m_primitiveCount number of indices to renderables.
+	BufferView m_instancesBuffer; ///< Points to AccelerationStructureBuildRangeInfo::m_primitiveCount number of AccelerationStructureInstance.
+	BufferView m_renderableIndicesBuffer; ///< AccelerationStructureBuildRangeInfo::m_primitiveCount number of indices to renderables.
 };
 
 /// Performs visibility to gather bottom-level acceleration structures in a buffer that can be used to build a TLAS.

+ 4 - 3
AnKi/Renderer/Utils/HzbGenerator.cpp

@@ -62,7 +62,7 @@ Error HzbGenerator::init()
 		cmdbInit.m_flags |= CommandBufferFlag::kSmallBatch;
 		CommandBufferPtr cmdb = GrManager::getSingleton().newCommandBuffer(cmdbInit);
 
-		cmdb->fillBuffer(m_counterBuffer.get(), 0, kMaxPtrSize, 0);
+		cmdb->fillBuffer(BufferView(m_counterBuffer.get()), 0);
 
 		FencePtr fence;
 		cmdb->endRecording();
@@ -164,7 +164,8 @@ void HzbGenerator::populateRenderGraphInternal(ConstWeakArray<DispatchInput> dis
 				rgraphCtx.bindStorageTexture(0, 0, in.m_dstHzbRt, subresource, mip);
 			}
 
-			cmdb.bindStorageBuffer(0, 1, m_counterBuffer.get(), (firstCounterBufferElement + dispatch) * m_counterBufferElementSize, sizeof(U32));
+			cmdb.bindStorageBuffer(
+				0, 1, BufferView(m_counterBuffer.get(), (firstCounterBufferElement + dispatch) * m_counterBufferElementSize, sizeof(U32)));
 			rgraphCtx.bindTexture(0, 2, in.m_srcDepthRt, TextureSubresourceInfo(DepthStencilAspectBit::kDepth));
 
 			cmdb.dispatchCompute(dispatchThreadGroupCountXY[0], dispatchThreadGroupCountXY[1], 1);
@@ -300,7 +301,7 @@ void HzbGenerator::populateRenderGraphDirectionalLight(const HzbDirectionalLight
 
 			cmdb.setPushConstants(&unis, sizeof(unis));
 
-			cmdb.bindIndexBuffer(m_boxIndexBuffer.get(), 0, IndexType::kU16);
+			cmdb.bindIndexBuffer(BufferView(m_boxIndexBuffer.get()), IndexType::kU16);
 
 			cmdb.drawIndexed(PrimitiveTopology::kTriangles, sizeof(kBoxIndices) / sizeof(kBoxIndices[0]), maxDepthRtSize.x() * maxDepthRtSize.y());
 

+ 2 - 3
AnKi/Renderer/Utils/Readback.cpp

@@ -7,7 +7,7 @@
 
 namespace anki {
 
-void ReadbackManager::allocateData(MultiframeReadbackToken& token, PtrSize size, Buffer*& buffer, PtrSize& bufferOffset) const
+void ReadbackManager::allocateData(MultiframeReadbackToken& token, PtrSize size, BufferView& buffer) const
 {
 	for([[maybe_unused]] U64 frame : token.m_frameIds)
 	{
@@ -27,8 +27,7 @@ void ReadbackManager::allocateData(MultiframeReadbackToken& token, PtrSize size,
 	}
 	token.m_frameIds[token.m_slot] = m_frameId;
 
-	buffer = &allocation.getBuffer();
-	bufferOffset = allocation.getOffset();
+	buffer = BufferView(&allocation.getBuffer(), allocation.getOffset(), size);
 
 	token.m_slot = (token.m_slot + 1) % kMaxFramesInFlight;
 }

+ 1 - 1
AnKi/Renderer/Utils/Readback.h

@@ -50,7 +50,7 @@ public:
 	}
 
 	/// Allocate new data for the following frame. 2nd thing to call in a frame.
-	void allocateData(MultiframeReadbackToken& token, PtrSize size, Buffer*& buffer, PtrSize& bufferOffset) const;
+	void allocateData(MultiframeReadbackToken& token, PtrSize size, BufferView& buffer) const;
 
 	/// Last thing to call in a frame.
 	void endFrame(Fence* fence);

+ 2 - 2
AnKi/Renderer/Utils/TraditionalDeferredShading.cpp

@@ -106,12 +106,12 @@ void TraditionalDeferredLightShading::drawLights(TraditionalDeferredLightShading
 		cmdb.bindStorageBuffer(0, 1, info.m_visibleLightsBuffer);
 		if(GpuSceneArrays::Light::getSingleton().getElementCount() > 0)
 		{
-			cmdb.bindStorageBuffer(0, 2, GpuSceneArrays::Light::getSingleton().getBufferOffsetRange());
+			cmdb.bindStorageBuffer(0, 2, GpuSceneArrays::Light::getSingleton().getBufferView());
 		}
 		else
 		{
 			// Set something random
-			cmdb.bindStorageBuffer(0, 2, GpuSceneBuffer::getSingleton().getBufferOffsetRange());
+			cmdb.bindStorageBuffer(0, 2, GpuSceneBuffer::getSingleton().getBufferView());
 		}
 
 		// NOTE: Use nearest sampler because we don't want the result to sample the near tiles

+ 2 - 2
AnKi/Renderer/Utils/TraditionalDeferredShading.h

@@ -23,7 +23,7 @@ public:
 	F32 m_effectiveShadowDistance = -1.0f; // TODO rm
 	Mat4 m_dirLightMatrix; // TODO rm
 
-	BufferOffsetRange m_visibleLightsBuffer;
+	BufferView m_visibleLightsBuffer;
 
 	Bool m_computeSpecular = false;
 
@@ -38,7 +38,7 @@ public:
 	TextureSubresourceInfo m_directionalLightShadowmapRenderTargetSubresourceInfo = {DepthStencilAspectBit::kDepth};
 
 	RenderTargetHandle m_skyLutRenderTarget;
-	BufferOffsetRange m_globalRendererConsts;
+	BufferView m_globalRendererConsts;
 
 	RenderPassWorkContext* m_renderpassContext = nullptr;
 };

+ 1 - 1
AnKi/Resource/ImageResource.cpp

@@ -329,7 +329,7 @@ Error ImageResource::load(LoadingContext& ctx)
 
 			TextureViewPtr tmpView = GrManager::getSingleton().newTextureView(TextureViewInitInfo(ctx.m_tex.get(), subresource, "RsrcTmp"));
 
-			cmdb->copyBufferToTextureView(&handle.getBuffer(), handle.getOffset(), handle.getRange(), tmpView.get());
+			cmdb->copyBufferToTexture(handle, tmpView.get());
 		}
 
 		// Set the barriers of the batch

+ 20 - 33
AnKi/Resource/MeshResource.cpp

@@ -166,12 +166,10 @@ Error MeshResource::load(const ResourceFilename& filename, Bool async)
 			AccelerationStructureInitInfo inf(ResourceString().sprintf("%s_%s", "Blas", basename.cstr()));
 			inf.m_type = AccelerationStructureType::kBottomLevel;
 
-			inf.m_bottomLevel.m_indexBuffer = &UnifiedGeometryBuffer::getSingleton().getBuffer();
-			inf.m_bottomLevel.m_indexBufferOffset = lod.m_indexBufferAllocationToken.getOffset();
+			inf.m_bottomLevel.m_indexBuffer = lod.m_indexBufferAllocationToken;
 			inf.m_bottomLevel.m_indexCount = lod.m_indexCount;
 			inf.m_bottomLevel.m_indexType = m_indexType;
-			inf.m_bottomLevel.m_positionBuffer = &UnifiedGeometryBuffer::getSingleton().getBuffer();
-			inf.m_bottomLevel.m_positionBufferOffset = lod.m_vertexBuffersAllocationToken[VertexStreamId::kPosition].getOffset();
+			inf.m_bottomLevel.m_positionBuffer = lod.m_vertexBuffersAllocationToken[VertexStreamId::kPosition];
 			inf.m_bottomLevel.m_positionStride = getFormatInfo(kMeshRelatedVertexStreamFormats[VertexStreamId::kPosition]).m_texelSize;
 			inf.m_bottomLevel.m_positionsFormat = kMeshRelatedVertexStreamFormats[VertexStreamId::kPosition];
 			inf.m_bottomLevel.m_positionCount = lod.m_vertexCount;
@@ -189,13 +187,13 @@ Error MeshResource::load(const ResourceFilename& filename, Bool async)
 
 		for(const Lod& lod : m_lods)
 		{
-			cmdb->fillBuffer(lod.m_indexBufferAllocationToken, 0);
+			cmdb->fillBuffer(lod.m_indexBufferAllocationToken.getCompleteBufferView(), 0);
 
 			for(VertexStreamId stream : EnumIterable(VertexStreamId::kMeshRelatedFirst, VertexStreamId::kMeshRelatedCount))
 			{
 				if(header.m_vertexAttributes[stream].m_format != Format::kNone)
 				{
-					cmdb->fillBuffer(lod.m_vertexBuffersAllocationToken[stream], 0);
+					cmdb->fillBuffer(lod.m_vertexBuffersAllocationToken[stream].getCompleteBufferView(), 0);
 				}
 			}
 
@@ -207,8 +205,8 @@ Error MeshResource::load(const ResourceFilename& filename, Bool async)
 			}
 		}
 
-		const BufferBarrierInfo barrier = {&UnifiedGeometryBuffer::getSingleton().getBuffer(), BufferUsageBit::kTransferDestination,
-										   BufferUsageBit::kVertex, 0, kMaxPtrSize};
+		const BufferBarrierInfo barrier = {UnifiedGeometryBuffer::getSingleton().getBufferView(), BufferUsageBit::kTransferDestination,
+										   BufferUsageBit::kVertex};
 
 		cmdb->setPipelineBarrier({}, {&barrier, 1}, {});
 
@@ -247,8 +245,8 @@ Error MeshResource::loadAsync(MeshBinaryLoader& loader) const
 	CommandBufferPtr cmdb = gr.newCommandBuffer(cmdbinit);
 
 	// Set transfer to transfer barrier because of the clear that happened while sync loading
-	const BufferBarrierInfo barrier = {unifiedGeometryBuffer, unifiedGeometryBufferNonTransferUsage, BufferUsageBit::kTransferDestination, 0,
-									   kMaxPtrSize};
+	const BufferBarrierInfo barrier = {UnifiedGeometryBuffer::getSingleton().getBufferView(), unifiedGeometryBufferNonTransferUsage,
+									   BufferUsageBit::kTransferDestination};
 	cmdb->setPipelineBarrier({}, {&barrier, 1}, {});
 
 	// Upload index and vertex buffers
@@ -259,16 +257,14 @@ Error MeshResource::loadAsync(MeshBinaryLoader& loader) const
 		// Upload index buffer
 		{
 			TransferGpuAllocatorHandle& handle = handles[handleCount++];
-			const PtrSize indexBufferSize = PtrSize(lod.m_indexCount) * getIndexSize(m_indexType);
 
-			ANKI_CHECK(transferAlloc.allocate(indexBufferSize, handle));
+			ANKI_CHECK(transferAlloc.allocate(lod.m_indexBufferAllocationToken.getAllocatedSize(), handle));
 			void* data = handle.getMappedMemory();
 			ANKI_ASSERT(data);
 
-			ANKI_CHECK(loader.storeIndexBuffer(lodIdx, data, indexBufferSize));
+			ANKI_CHECK(loader.storeIndexBuffer(lodIdx, data, handle.getRange()));
 
-			cmdb->copyBufferToBuffer(&handle.getBuffer(), handle.getOffset(), unifiedGeometryBuffer, lod.m_indexBufferAllocationToken.getOffset(),
-									 handle.getRange());
+			cmdb->copyBufferToBuffer(handle, lod.m_indexBufferAllocationToken);
 		}
 
 		// Upload vert buffers
@@ -280,18 +276,16 @@ Error MeshResource::loadAsync(MeshBinaryLoader& loader) const
 			}
 
 			TransferGpuAllocatorHandle& handle = handles[handleCount++];
-			const PtrSize vertexBufferSize = PtrSize(lod.m_vertexCount) * getFormatInfo(kMeshRelatedVertexStreamFormats[stream]).m_texelSize;
 
-			ANKI_CHECK(transferAlloc.allocate(vertexBufferSize, handle));
+			ANKI_CHECK(transferAlloc.allocate(lod.m_vertexBuffersAllocationToken[stream].getAllocatedSize(), handle));
 			U8* data = static_cast<U8*>(handle.getMappedMemory());
 			ANKI_ASSERT(data);
 
 			// Load to staging
-			ANKI_CHECK(loader.storeVertexBuffer(lodIdx, U32(stream), data, vertexBufferSize));
+			ANKI_CHECK(loader.storeVertexBuffer(lodIdx, U32(stream), data, handle.getRange()));
 
 			// Copy
-			cmdb->copyBufferToBuffer(&handle.getBuffer(), handle.getOffset(), unifiedGeometryBuffer,
-									 lod.m_vertexBuffersAllocationToken[stream].getOffset(), handle.getRange());
+			cmdb->copyBufferToBuffer(handle, lod.m_vertexBuffersAllocationToken[stream]);
 		}
 
 		if(lod.m_meshletBoundingVolumes.isValid())
@@ -302,8 +296,7 @@ Error MeshResource::loadAsync(MeshBinaryLoader& loader) const
 			ANKI_CHECK(transferAlloc.allocate(primitivesSize, handle));
 			ANKI_CHECK(loader.storeMeshletIndicesBuffer(lodIdx, handle.getMappedMemory(), primitivesSize));
 
-			cmdb->copyBufferToBuffer(&handle.getBuffer(), handle.getOffset(), unifiedGeometryBuffer, lod.m_meshletIndices.getOffset(),
-									 handle.getRange());
+			cmdb->copyBufferToBuffer(handle, lod.m_meshletIndices);
 
 			// Meshlets
 			ResourceDynamicArray<MeshBinaryMeshlet> binaryMeshlets;
@@ -357,10 +350,8 @@ Error MeshResource::loadAsync(MeshBinaryLoader& loader) const
 				outMeshletBoundingVolume.m_primitiveCount = inMeshlet.m_primitiveCount;
 			}
 
-			cmdb->copyBufferToBuffer(&handle2.getBuffer(), handle2.getOffset(), unifiedGeometryBuffer, lod.m_meshletBoundingVolumes.getOffset(),
-									 handle2.getRange());
-			cmdb->copyBufferToBuffer(&handle3.getBuffer(), handle3.getOffset(), unifiedGeometryBuffer, lod.m_meshletGeometryDescriptors.getOffset(),
-									 handle3.getRange());
+			cmdb->copyBufferToBuffer(handle2, lod.m_meshletBoundingVolumes);
+			cmdb->copyBufferToBuffer(handle3, lod.m_meshletGeometryDescriptors);
 		}
 	}
 
@@ -370,9 +361,7 @@ Error MeshResource::loadAsync(MeshBinaryLoader& loader) const
 
 		// Set the barriers
 		BufferBarrierInfo bufferBarrier;
-		bufferBarrier.m_buffer = unifiedGeometryBuffer;
-		bufferBarrier.m_offset = 0;
-		bufferBarrier.m_range = kMaxPtrSize;
+		bufferBarrier.m_bufferView = UnifiedGeometryBuffer::getSingleton().getBufferView();
 		bufferBarrier.m_previousUsage = BufferUsageBit::kTransferDestination;
 		bufferBarrier.m_nextUsage = unifiedGeometryBufferNonTransferUsage;
 
@@ -395,7 +384,7 @@ Error MeshResource::loadAsync(MeshBinaryLoader& loader) const
 			buffInit.m_usage = BufferUsageBit::kAccelerationStructureBuildScratch;
 			BufferPtr scratchBuff = GrManager::getSingleton().newBuffer(buffInit);
 
-			cmdb->buildAccelerationStructure(m_lods[lodIdx].m_blas.get(), scratchBuff.get(), 0);
+			cmdb->buildAccelerationStructure(m_lods[lodIdx].m_blas.get(), BufferView(scratchBuff.get()));
 		}
 
 		// Barriers again
@@ -412,9 +401,7 @@ Error MeshResource::loadAsync(MeshBinaryLoader& loader) const
 	{
 		// Only set a barrier
 		BufferBarrierInfo bufferBarrier;
-		bufferBarrier.m_buffer = unifiedGeometryBuffer;
-		bufferBarrier.m_offset = 0;
-		bufferBarrier.m_range = kMaxPtrSize;
+		bufferBarrier.m_bufferView = UnifiedGeometryBuffer::getSingleton().getBufferView();
 		bufferBarrier.m_previousUsage = BufferUsageBit::kTransferDestination;
 		bufferBarrier.m_nextUsage = unifiedGeometryBufferNonTransferUsage;
 

+ 1 - 1
AnKi/Resource/TransferGpuAllocator.h

@@ -48,7 +48,7 @@ public:
 		return *this;
 	}
 
-	operator BufferOffsetRange() const
+	operator BufferView() const
 	{
 		return {m_buffer.get(), m_offsetInBuffer, m_range};
 	}

+ 4 - 7
AnKi/Scene/Components/ParticleEmitterComponent.cpp

@@ -232,15 +232,12 @@ ParticleEmitterComponent::ParticleEmitterComponent(SceneNode* node)
 	CommandBufferInitInfo cmdbInit("Particle quad upload");
 	cmdbInit.m_flags |= CommandBufferFlag::kSmallBatch;
 	CommandBufferPtr cmdb = GrManager::getSingleton().newCommandBuffer(cmdbInit);
-	Buffer* srcBuff = &RebarTransientMemoryPool::getSingleton().getBuffer();
 	Buffer* dstBuff = &UnifiedGeometryBuffer::getSingleton().getBuffer();
-	cmdb->copyBufferToBuffer(srcBuff, positionsAlloc.getOffset(), dstBuff, m_quadPositions.getOffset(), positionsAlloc.getRange());
-	cmdb->copyBufferToBuffer(srcBuff, uvsAlloc.getOffset(), dstBuff, m_quadUvs.getOffset(), uvsAlloc.getRange());
-	cmdb->copyBufferToBuffer(srcBuff, indicesAlloc.getOffset(), dstBuff, m_quadIndices.getOffset(), indicesAlloc.getRange());
+	cmdb->copyBufferToBuffer(positionsAlloc, m_quadPositions);
+	cmdb->copyBufferToBuffer(uvsAlloc, m_quadUvs);
+	cmdb->copyBufferToBuffer(indicesAlloc, m_quadIndices);
 	BufferBarrierInfo barrier;
-	barrier.m_buffer = dstBuff;
-	barrier.m_offset = 0;
-	barrier.m_range = kMaxPtrSize;
+	barrier.m_bufferView = BufferView(dstBuff);
 	barrier.m_previousUsage = BufferUsageBit::kTransferDestination;
 	barrier.m_nextUsage = dstBuff->getBufferUsage();
 	cmdb->setPipelineBarrier({}, {&barrier, 1}, {});

+ 1 - 1
AnKi/Scene/GpuSceneArray.h

@@ -122,7 +122,7 @@ public:
 	}
 
 	/// @note Thread-safe
-	BufferOffsetRange getBufferOffsetRange() const
+	BufferView getBufferView() const
 	{
 		return {&GpuSceneBuffer::getSingleton().getBuffer(), getGpuSceneOffsetOfArrayBase(), getBufferRange()};
 	}

+ 2 - 2
AnKi/Ui/Canvas.cpp

@@ -219,12 +219,12 @@ void Canvas::appendToCommandBufferInternal(CommandBuffer& cmdb)
 	const F32 fbHeight = drawData.DisplaySize.y * drawData.FramebufferScale.y;
 	cmdb.setViewport(0, 0, U32(fbWidth), U32(fbHeight));
 
-	cmdb.bindVertexBuffer(0, &vertsToken.getBuffer(), vertsToken.getOffset(), sizeof(ImDrawVert));
+	cmdb.bindVertexBuffer(0, vertsToken, sizeof(ImDrawVert));
 	cmdb.setVertexAttribute(VertexAttribute::kPosition, 0, Format::kR32G32_Sfloat, 0);
 	cmdb.setVertexAttribute(VertexAttribute::kColor, 0, Format::kR8G8B8A8_Unorm, sizeof(Vec2) * 2);
 	cmdb.setVertexAttribute(VertexAttribute::kTexCoord, 0, Format::kR32G32_Sfloat, sizeof(Vec2));
 
-	cmdb.bindIndexBuffer(&indicesToken.getBuffer(), indicesToken.getOffset(), IndexType::kU16);
+	cmdb.bindIndexBuffer(indicesToken, IndexType::kU16);
 
 	// Will project scissor/clipping rectangles into framebuffer space
 	const Vec2 clipOff = drawData.DisplayPos; // (0,0) unless using multi-viewports

+ 1 - 1
AnKi/Ui/Font.cpp

@@ -94,7 +94,7 @@ void Font::createTexture(const void* data, U32 width, U32 height)
 	TextureBarrierInfo barrier = {m_tex.get(), TextureUsageBit::kNone, TextureUsageBit::kTransferDestination, surf};
 	cmdb->setPipelineBarrier({&barrier, 1}, {}, {});
 
-	cmdb->copyBufferToTextureView(buff.get(), 0, buffSize, tmpView.get());
+	cmdb->copyBufferToTexture(BufferView(buff.get()), tmpView.get());
 
 	barrier.m_previousUsage = TextureUsageBit::kTransferDestination;
 	barrier.m_nextUsage = TextureUsageBit::kGenerateMipmaps;

+ 18 - 22
Tests/Gr/Gr.cpp

@@ -396,9 +396,7 @@ static void setBufferBarrier(CommandBufferPtr cmdb, BufferPtr buffer, BufferUsag
 	BufferBarrierInfo barrier;
 	barrier.m_previousUsage = before;
 	barrier.m_nextUsage = after;
-	barrier.m_offset = offset;
-	barrier.m_range = range;
-	barrier.m_buffer = buffer.get();
+	barrier.m_bufferView = BufferView(buffer.get(), offset, range);
 
 	cmdb->setPipelineBarrier({}, {&barrier, 1}, {});
 }
@@ -1261,10 +1259,10 @@ static void drawOffscreenDrawcalls([[maybe_unused]] GrManager& gr, ShaderProgram
 	*color++ = Vec4(1.0, 0.0, 0.0, 0.0);
 	*color = Vec4(0.0, 1.0, 0.0, 0.0);
 
-	cmdb->bindVertexBuffer(0, vertBuff.get(), 0, sizeof(Vec3));
+	cmdb->bindVertexBuffer(0, BufferView(vertBuff.get()), sizeof(Vec3));
 	cmdb->setVertexAttribute(VertexAttribute::kPosition, 0, Format::kR32G32B32_Sfloat, 0);
 	cmdb->bindShaderProgram(prog.get());
-	cmdb->bindIndexBuffer(indexBuff.get(), 0, IndexType::kU16);
+	cmdb->bindIndexBuffer(BufferView(indexBuff.get()), IndexType::kU16);
 	cmdb->setViewport(0, 0, viewPortSize, viewPortSize);
 	cmdb->drawIndexed(PrimitiveTopology::kTriangles, 6 * 2 * 3);
 
@@ -1930,15 +1928,13 @@ void main()
 	TextureSubresourceInfo subresource;
 	subresource.m_mipmapCount = texInit.m_mipmapCount;
 	setTextureBarrier(cmdb, tex, TextureUsageBit::kNone, TextureUsageBit::kTransferDestination, subresource);
-	cmdb->copyBufferToTextureView(uploadBuff.get(), 0, uploadBuff->getSize(),
-								  g_gr->newTextureView(TextureViewInitInfo(tex.get(), TextureSurfaceInfo(0, 0, 0))).get());
-	cmdb->copyBufferToTextureView(uploadBuff2.get(), 0, uploadBuff2->getSize(),
-								  g_gr->newTextureView(TextureViewInitInfo(tex.get(), TextureSurfaceInfo(1, 0, 0))).get());
+	cmdb->copyBufferToTexture(BufferView(uploadBuff.get()), g_gr->newTextureView(TextureViewInitInfo(tex.get(), TextureSurfaceInfo(0, 0, 0))).get());
+	cmdb->copyBufferToTexture(BufferView(uploadBuff2.get()), g_gr->newTextureView(TextureViewInitInfo(tex.get(), TextureSurfaceInfo(1, 0, 0))).get());
 
 	setTextureBarrier(cmdb, tex, TextureUsageBit::kTransferDestination, TextureUsageBit::kSampledCompute, subresource);
 	cmdb->bindShaderProgram(prog.get());
 	// cmdb->bindTextureAndSampler(0, 0, texView.get(), sampler.get());
-	cmdb->bindStorageBuffer(0, 1, resultBuff.get(), 0, resultBuff->getSize());
+	cmdb->bindStorageBuffer(0, 1, BufferView(resultBuff.get()));
 	cmdb->dispatchCompute(1, 1, 1);
 
 	setBufferBarrier(cmdb, resultBuff, BufferUsageBit::kStorageComputeWrite, BufferUsageBit::kStorageComputeWrite, 0, resultBuff->getSize());
@@ -2137,10 +2133,10 @@ void main()
 
 	for(U32 i = 0; i < uniformBuffers.getSize(); ++i)
 	{
-		cmdb->bindUniformBuffer(0, 0, uniformBuffers[i].get(), 0, kMaxPtrSize, i);
+		cmdb->bindUniformBuffer(0, 0, BufferView(uniformBuffers[i].get()), i);
 	}
 
-	cmdb->bindStorageBuffer(0, 1, resBuff.get(), 0, kMaxPtrSize);
+	cmdb->bindStorageBuffer(0, 1, BufferView(resBuff.get()));
 
 	cmdb->bindShaderProgram(prog.get());
 	cmdb->dispatchCompute(1, 1, 1);
@@ -2869,10 +2865,10 @@ void main()
 		{
 			AccelerationStructureInitInfo inf;
 			inf.m_type = AccelerationStructureType::kBottomLevel;
-			inf.m_bottomLevel.m_indexBuffer = g.m_indexBuffer.get();
+			inf.m_bottomLevel.m_indexBuffer = BufferView(g.m_indexBuffer.get());
 			inf.m_bottomLevel.m_indexType = IndexType::kU16;
 			inf.m_bottomLevel.m_indexCount = g.m_indexCount;
-			inf.m_bottomLevel.m_positionBuffer = g.m_vertexBuffer.get();
+			inf.m_bottomLevel.m_positionBuffer = BufferView(g.m_vertexBuffer.get());
 			inf.m_bottomLevel.m_positionCount = 8;
 			inf.m_bottomLevel.m_positionsFormat = Format::kR32G32B32_Sfloat;
 			inf.m_bottomLevel.m_positionStride = sizeof(Vec3);
@@ -3403,7 +3399,7 @@ void main()
 				scratchInit.m_size = g.m_blas->getBuildScratchBufferSize();
 				scratchInit.m_usage = BufferUsageBit::kAccelerationStructureBuildScratch;
 				BufferPtr scratchBuff = GrManager::getSingleton().newBuffer(scratchInit);
-				cmdb->buildAccelerationStructure(g.m_blas.get(), scratchBuff.get(), 0);
+				cmdb->buildAccelerationStructure(g.m_blas.get(), BufferView(scratchBuff.get()));
 			}
 
 			for(const Geom& g : geometries)
@@ -3416,7 +3412,7 @@ void main()
 			scratchInit.m_size = tlas->getBuildScratchBufferSize();
 			scratchInit.m_usage = BufferUsageBit::kAccelerationStructureBuildScratch;
 			BufferPtr scratchBuff = GrManager::getSingleton().newBuffer(scratchInit);
-			cmdb->buildAccelerationStructure(tlas.get(), scratchBuff.get(), 0);
+			cmdb->buildAccelerationStructure(tlas.get(), BufferView(scratchBuff.get()));
 			setAccelerationStructureBarrier(cmdb, tlas, AccelerationStructureUsageBit::kBuild, AccelerationStructureUsageBit::kTraceRaysRead);
 		}
 
@@ -3444,8 +3440,8 @@ void main()
 		setTextureBarrier(cmdb, offscreenRts[(i + 1) & 1], TextureUsageBit::kStorageComputeRead, TextureUsageBit::kStorageTraceRaysRead,
 						  TextureSubresourceInfo());
 
-		cmdb->bindStorageBuffer(0, 0, modelBuffer.get(), 0, kMaxPtrSize);
-		cmdb->bindStorageBuffer(0, 1, lightBuffer.get(), 0, kMaxPtrSize);
+		cmdb->bindStorageBuffer(0, 0, BufferView(modelBuffer.get()));
+		cmdb->bindStorageBuffer(0, 1, BufferView(lightBuffer.get()));
 		cmdb->bindAccelerationStructure(1, 0, tlas.get());
 		cmdb->bindStorageTexture(1, 1, offscreenHistoryView.get());
 		cmdb->bindStorageTexture(1, 2, offscreenView.get());
@@ -3461,7 +3457,7 @@ void main()
 		cmdb->setPushConstants(&pc, sizeof(pc));
 
 		const U32 sbtRecordSize = g_gr->getDeviceCapabilities().m_sbtRecordAlignment;
-		cmdb->traceRays(sbt.get(), 0, sbtRecordSize, U32(GeomWhat::kCount) * 2, 2, WIDTH, HEIGHT, 1);
+		cmdb->traceRays(BufferView(sbt.get()), sbtRecordSize, U32(GeomWhat::kCount) * 2, 2, WIDTH, HEIGHT, 1);
 
 		// Copy to present
 		setTextureBarrier(cmdb, offscreenRts[i & 1], TextureUsageBit::kStorageTraceRaysWrite, TextureUsageBit::kStorageComputeRead,
@@ -3580,21 +3576,21 @@ void main()
 	cinit.m_flags = CommandBufferFlag::kComputeWork | CommandBufferFlag::kSmallBatch;
 	CommandBufferPtr incrementCmdb = g_gr->newCommandBuffer(cinit);
 	incrementCmdb->bindShaderProgram(incrementProg.get());
-	incrementCmdb->bindStorageBuffer(0, 0, atomicsBuffer.get(), 0, kMaxPtrSize);
+	incrementCmdb->bindStorageBuffer(0, 0, BufferView(atomicsBuffer.get()));
 	incrementCmdb->dispatchCompute(ARRAY_SIZE / 8, 1, 1);
 
 	// Create the 2nd command buffer
 	cinit.m_flags = CommandBufferFlag::kGeneralWork | CommandBufferFlag::kSmallBatch;
 	CommandBufferPtr checkCmdb = g_gr->newCommandBuffer(cinit);
 	checkCmdb->bindShaderProgram(checkProg.get());
-	checkCmdb->bindStorageBuffer(0, 0, atomicsBuffer.get(), 0, kMaxPtrSize);
+	checkCmdb->bindStorageBuffer(0, 0, BufferView(atomicsBuffer.get()));
 	checkCmdb->dispatchCompute(ARRAY_SIZE / 8, 1, 1);
 
 	// Create the 3rd command buffer
 	cinit.m_flags = CommandBufferFlag::kComputeWork | CommandBufferFlag::kSmallBatch;
 	CommandBufferPtr incrementCmdb2 = g_gr->newCommandBuffer(cinit);
 	incrementCmdb2->bindShaderProgram(incrementProg.get());
-	incrementCmdb2->bindStorageBuffer(0, 0, atomicsBuffer.get(), 0, kMaxPtrSize);
+	incrementCmdb2->bindStorageBuffer(0, 0, BufferView(atomicsBuffer.get()));
 	incrementCmdb2->dispatchCompute(ARRAY_SIZE / 8, 1, 1);
 
 	// Submit

+ 4 - 4
Tests/Gr/GrMeshShaders.cpp

@@ -242,10 +242,10 @@ float3 main(VertOut input) : SV_TARGET0
 			rt.m_clearValue.m_colorf = {1.0f, 0.0f, 1.0f, 0.0f};
 			cmdb->beginRenderPass({rt});
 
-			cmdb->bindStorageBuffer(0, 0, indexBuff.get(), 0, kMaxPtrSize);
-			cmdb->bindStorageBuffer(0, 1, positionsBuff.get(), 0, kMaxPtrSize);
-			cmdb->bindStorageBuffer(0, 2, colorsBuff.get(), 0, kMaxPtrSize);
-			cmdb->bindStorageBuffer(0, 3, meshletsBuff.get(), 0, kMaxPtrSize);
+			cmdb->bindStorageBuffer(0, 0, BufferView(indexBuff.get()));
+			cmdb->bindStorageBuffer(0, 1, BufferView(positionsBuff.get()));
+			cmdb->bindStorageBuffer(0, 2, BufferView(colorsBuff.get()));
+			cmdb->bindStorageBuffer(0, 3, BufferView(meshletsBuff.get()));
 
 			cmdb->bindShaderProgram(prog.get());
 

+ 2 - 2
Tests/Gr/GrTextureBuffer.cpp

@@ -58,8 +58,8 @@ void main()
 		cmdbInit.m_flags = CommandBufferFlag::kSmallBatch | CommandBufferFlag::kGeneralWork;
 		CommandBufferPtr cmdb = gr->newCommandBuffer(cmdbInit);
 
-		cmdb->bindReadOnlyTexelBuffer(0, 0, texBuff.get(), 0, kMaxPtrSize, Format::kR8G8B8A8_Snorm);
-		cmdb->bindStorageBuffer(0, 1, storageBuff.get(), 0, kMaxPtrSize);
+		cmdb->bindReadOnlyTexelBuffer(0, 0, BufferView(texBuff.get()), Format::kR8G8B8A8_Snorm);
+		cmdb->bindStorageBuffer(0, 1, BufferView(storageBuff.get()));
 		cmdb->bindShaderProgram(prog.get());
 		cmdb->dispatchCompute(1, 1, 1);
 		cmdb->endRecording();