瀏覽代碼

Change the code style. Increase the column size to 150

Panagiotis Christopoulos Charitos 2 年之前
父節點
當前提交
2f699aa49b
共有 100 個文件被更改,包括 839 次插入1346 次删除
  1. 2 2
      .clang-format
  2. 2 2
      .clang-format-hlsl
  3. 1 2
      AnKi/Collision/Aabb.cpp
  4. 2 4
      AnKi/Collision/FunctionsTestPlane.cpp
  5. 1 2
      AnKi/Collision/GjkEpa.cpp
  6. 1 2
      AnKi/Collision/GjkEpa.h
  7. 1 2
      AnKi/Collision/Obb.cpp
  8. 1 2
      AnKi/Collision/Ray.h
  9. 1 2
      AnKi/Collision/Sphere.cpp
  10. 11 19
      AnKi/Core/App.cpp
  11. 2 4
      AnKi/Core/ConfigSet.cpp
  12. 1 2
      AnKi/Core/ConfigVars.defs.h
  13. 3 6
      AnKi/Core/CoreTracer.cpp
  14. 3 5
      AnKi/Core/DeveloperConsole.cpp
  15. 5 10
      AnKi/Core/GpuMemory/GpuSceneBuffer.cpp
  16. 1 2
      AnKi/Core/GpuMemory/GpuSceneBuffer.h
  17. 4 6
      AnKi/Core/GpuMemory/GpuVisibleTransientMemoryPool.h
  18. 3 4
      AnKi/Core/GpuMemory/RebarTransientMemoryPool.cpp
  19. 10 6
      AnKi/Core/GpuMemory/RebarTransientMemoryPool.h
  20. 1 2
      AnKi/Core/MaliHwCounters.cpp
  21. 2 4
      AnKi/Core/StatsUi.cpp
  22. 2 3
      AnKi/Gr/AccelerationStructure.h
  23. 16 26
      AnKi/Gr/CommandBuffer.h
  24. 3 11
      AnKi/Gr/Common.cpp
  25. 30 41
      AnKi/Gr/Common.h
  26. 33 60
      AnKi/Gr/Gl/CommandBuffer.cpp
  27. 5 6
      AnKi/Gr/Gl/CommandBufferImpl.cpp
  28. 1 2
      AnKi/Gr/Gl/Common.cpp
  29. 1 2
      AnKi/Gr/Gl/Common.h
  30. 2 4
      AnKi/Gr/Gl/Fence.cpp
  31. 1 2
      AnKi/Gr/Gl/Framebuffer.cpp
  32. 9 12
      AnKi/Gr/Gl/FramebufferImpl.cpp
  33. 2 4
      AnKi/Gr/Gl/FramebufferImpl.h
  34. 2 4
      AnKi/Gr/Gl/GlState.cpp
  35. 2 4
      AnKi/Gr/Gl/GlState.h
  36. 1 2
      AnKi/Gr/Gl/GrManagerImpl.cpp
  37. 1 2
      AnKi/Gr/Gl/GrManagerImplSdl.cpp
  38. 2 4
      AnKi/Gr/Gl/RenderingThread.cpp
  39. 2 4
      AnKi/Gr/Gl/Shader.cpp
  40. 2 2
      AnKi/Gr/Gl/ShaderImpl.cpp
  41. 3 4
      AnKi/Gr/Gl/ShaderProgram.cpp
  42. 1 2
      AnKi/Gr/Gl/ShaderProgramImpl.h
  43. 8 13
      AnKi/Gr/Gl/StateTracker.h
  44. 11 19
      AnKi/Gr/Gl/TextureImpl.cpp
  45. 63 86
      AnKi/Gr/RenderGraph.cpp
  46. 17 30
      AnKi/Gr/RenderGraph.h
  47. 19 23
      AnKi/Gr/RenderGraph.inl.h
  48. 1 3
      AnKi/Gr/Sampler.h
  49. 1 3
      AnKi/Gr/ShaderProgram.cpp
  50. 4 5
      AnKi/Gr/Texture.h
  51. 5 7
      AnKi/Gr/TextureView.h
  52. 9 12
      AnKi/Gr/Utils/Functions.cpp
  53. 8 9
      AnKi/Gr/Utils/Functions.h
  54. 4 6
      AnKi/Gr/Utils/SegregatedListsGpuMemoryPool.cpp
  55. 3 4
      AnKi/Gr/Utils/SegregatedListsGpuMemoryPool.h
  56. 2 3
      AnKi/Gr/Utils/StackGpuMemoryPool.cpp
  57. 2 2
      AnKi/Gr/Utils/StackGpuMemoryPool.h
  58. 1 2
      AnKi/Gr/Vulkan/AccelerationStructure.cpp
  59. 9 15
      AnKi/Gr/Vulkan/AccelerationStructureImpl.cpp
  60. 2 3
      AnKi/Gr/Vulkan/AccelerationStructureImpl.h
  61. 16 25
      AnKi/Gr/Vulkan/BufferImpl.cpp
  62. 2 3
      AnKi/Gr/Vulkan/BufferImpl.h
  63. 27 46
      AnKi/Gr/Vulkan/CommandBuffer.cpp
  64. 3 4
      AnKi/Gr/Vulkan/CommandBufferFactory.cpp
  65. 28 46
      AnKi/Gr/Vulkan/CommandBufferImpl.cpp
  66. 25 43
      AnKi/Gr/Vulkan/CommandBufferImpl.h
  67. 35 49
      AnKi/Gr/Vulkan/CommandBufferImpl.inl.h
  68. 1 2
      AnKi/Gr/Vulkan/Common.cpp
  69. 27 49
      AnKi/Gr/Vulkan/DescriptorSet.cpp
  70. 4 8
      AnKi/Gr/Vulkan/DescriptorSet.h
  71. 7 12
      AnKi/Gr/Vulkan/FramebufferImpl.cpp
  72. 3 4
      AnKi/Gr/Vulkan/FramebufferImpl.h
  73. 15 29
      AnKi/Gr/Vulkan/GpuMemoryManager.cpp
  74. 2 4
      AnKi/Gr/Vulkan/GpuMemoryManager.h
  75. 49 89
      AnKi/Gr/Vulkan/GrManagerImpl.cpp
  76. 5 9
      AnKi/Gr/Vulkan/GrManagerImpl.h
  77. 1 2
      AnKi/Gr/Vulkan/GrManagerImplSdl.cpp
  78. 11 15
      AnKi/Gr/Vulkan/GrUpscalerImpl.cpp
  79. 2 4
      AnKi/Gr/Vulkan/OcclusionQueryImpl.cpp
  80. 25 34
      AnKi/Gr/Vulkan/Pipeline.cpp
  81. 4 6
      AnKi/Gr/Vulkan/Pipeline.h
  82. 1 2
      AnKi/Gr/Vulkan/PipelineLayout.cpp
  83. 1 2
      AnKi/Gr/Vulkan/PipelineLayout.h
  84. 4 8
      AnKi/Gr/Vulkan/ShaderImpl.cpp
  85. 1 2
      AnKi/Gr/Vulkan/ShaderImpl.h
  86. 15 26
      AnKi/Gr/Vulkan/ShaderProgramImpl.cpp
  87. 2 4
      AnKi/Gr/Vulkan/ShaderProgramImpl.h
  88. 11 17
      AnKi/Gr/Vulkan/SwapchainFactory.cpp
  89. 12 18
      AnKi/Gr/Vulkan/TextureImpl.cpp
  90. 4 7
      AnKi/Gr/Vulkan/TextureImpl.h
  91. 1 2
      AnKi/Gr/Vulkan/TextureView.cpp
  92. 2 2
      AnKi/Gr/Vulkan/TimestampQueryImpl.cpp
  93. 25 42
      AnKi/Importer/GltfImporter.cpp
  94. 2 4
      AnKi/Importer/GltfImporter.h
  95. 11 16
      AnKi/Importer/GltfImporterAnimation.cpp
  96. 24 38
      AnKi/Importer/GltfImporterMaterial.cpp
  97. 20 27
      AnKi/Importer/GltfImporterMesh.cpp
  98. 61 105
      AnKi/Importer/ImageImporter.cpp
  99. 1 3
      AnKi/Math/Axisang.h
  100. 1 2
      AnKi/Math/Functions.h

+ 2 - 2
.clang-format

@@ -57,7 +57,7 @@ BreakConstructorInitializersBeforeComma: true
 BreakConstructorInitializers: BeforeColon
 BreakAfterJavaFieldAnnotations: false
 BreakStringLiterals: true
-ColumnLimit:     120
+ColumnLimit:     150
 CommentPragmas:  '^ IWYU pragma:'
 QualifierAlignment: Leave
 CompactNamespaces: false
@@ -122,7 +122,7 @@ ObjCSpaceBeforeProtocolList: true
 PenaltyBreakAssignment: 2
 PenaltyBreakBeforeFirstCallParameter: 19
 PenaltyBreakComment: 300
-PenaltyBreakFirstLessLess: 120
+PenaltyBreakFirstLessLess: 150
 PenaltyBreakOpenParenthesis: 0
 PenaltyBreakString: 1000
 PenaltyBreakTemplateDeclaration: 10

+ 2 - 2
.clang-format-hlsl

@@ -57,7 +57,7 @@ BreakBeforeTernaryOperators: true
 BreakConstructorInitializers: BeforeColon
 BreakAfterJavaFieldAnnotations: false
 BreakStringLiterals: true
-ColumnLimit:     120
+ColumnLimit:     150
 CommentPragmas:  '^ IWYU pragma:'
 QualifierAlignment: Leave
 CompactNamespaces: false
@@ -122,7 +122,7 @@ ObjCSpaceBeforeProtocolList: true
 PenaltyBreakAssignment: 2
 PenaltyBreakBeforeFirstCallParameter: 19
 PenaltyBreakComment: 300
-PenaltyBreakFirstLessLess: 120
+PenaltyBreakFirstLessLess: 150
 PenaltyBreakOpenParenthesis: 0
 PenaltyBreakString: 1000
 PenaltyBreakTemplateDeclaration: 10

+ 1 - 2
AnKi/Collision/Aabb.cpp

@@ -38,8 +38,7 @@ Aabb Aabb::getCompoundShape(const Aabb& b) const
 	return out;
 }
 
-void Aabb::setFromPointCloud(const Vec3* pointBuffer, U pointCount, PtrSize pointStride,
-							 [[maybe_unused]] PtrSize buffSize)
+void Aabb::setFromPointCloud(const Vec3* pointBuffer, U pointCount, PtrSize pointStride, [[maybe_unused]] PtrSize buffSize)
 {
 	// Preconditions
 	ANKI_ASSERT(pointBuffer);

+ 2 - 4
AnKi/Collision/FunctionsTestPlane.cpp

@@ -13,8 +13,7 @@ F32 testPlane(const Plane& plane, const Aabb& aabb)
 	__m128 gezero = _mm_cmpge_ps(plane.getNormal().getSimd(), _mm_setzero_ps());
 
 	Vec4 diagMin;
-	diagMin.getSimd() =
-		_mm_or_ps(_mm_and_ps(gezero, aabb.getMin().getSimd()), _mm_andnot_ps(gezero, aabb.getMax().getSimd()));
+	diagMin.getSimd() = _mm_or_ps(_mm_and_ps(gezero, aabb.getMin().getSimd()), _mm_andnot_ps(gezero, aabb.getMax().getSimd()));
 #else
 	Vec4 diagMin(0.0f), diagMax(0.0f);
 	// set min/max values for x,y,z direction
@@ -43,8 +42,7 @@ F32 testPlane(const Plane& plane, const Aabb& aabb)
 
 #if ANKI_SIMD_SSE
 	Vec4 diagMax;
-	diagMax.getSimd() =
-		_mm_or_ps(_mm_and_ps(gezero, aabb.getMax().getSimd()), _mm_andnot_ps(gezero, aabb.getMin().getSimd()));
+	diagMax.getSimd() = _mm_or_ps(_mm_and_ps(gezero, aabb.getMax().getSimd()), _mm_andnot_ps(gezero, aabb.getMin().getSimd()));
 #endif
 
 	ANKI_ASSERT(diagMax.w() == 0.0f);

+ 1 - 2
AnKi/Collision/GjkEpa.cpp

@@ -209,8 +209,7 @@ static Bool update(GjkContext& ctx, const GjkSupport& a)
 	return true;
 }
 
-Bool gjkIntersection(const void* shape0, GjkSupportCallback shape0Callback, const void* shape1,
-					 GjkSupportCallback shape1Callback)
+Bool gjkIntersection(const void* shape0, GjkSupportCallback shape0Callback, const void* shape1, GjkSupportCallback shape1Callback)
 {
 	ANKI_ASSERT(shape0 && shape0Callback && shape1 && shape1Callback);
 

+ 1 - 2
AnKi/Collision/GjkEpa.h

@@ -16,8 +16,7 @@ namespace anki {
 using GjkSupportCallback = Vec4 (*)(const void* shape, const Vec4& dir);
 
 /// Return true if the two convex shapes intersect.
-Bool gjkIntersection(const void* shape0, GjkSupportCallback shape0Callback, const void* shape1,
-					 GjkSupportCallback shape1Callback);
+Bool gjkIntersection(const void* shape0, GjkSupportCallback shape0Callback, const void* shape1, GjkSupportCallback shape1Callback);
 /// @}
 
 } // end namespace anki

+ 1 - 2
AnKi/Collision/Obb.cpp

@@ -74,8 +74,7 @@ void Obb::getExtremePoints(Array<Vec4, 8>& points) const
 	}
 }
 
-void Obb::setFromPointCloud(const Vec3* pointBuffer, U pointCount, PtrSize pointStride,
-							[[maybe_unused]] PtrSize buffSize)
+void Obb::setFromPointCloud(const Vec3* pointBuffer, U pointCount, PtrSize pointStride, [[maybe_unused]] PtrSize buffSize)
 {
 	// Preconditions
 	ANKI_ASSERT(pointBuffer);

+ 1 - 2
AnKi/Collision/Ray.h

@@ -108,8 +108,7 @@ private:
 
 	void check() const
 	{
-		ANKI_ASSERT(m_origin.w() == 0.0f && m_dir.w() == 0.0f
-					&& isZero(m_dir.getLengthSquared() - 1.0f, kEpsilonf * 100.0f));
+		ANKI_ASSERT(m_origin.w() == 0.0f && m_dir.w() == 0.0f && isZero(m_dir.getLengthSquared() - 1.0f, kEpsilonf * 100.0f));
 	}
 };
 /// @}

+ 1 - 2
AnKi/Collision/Sphere.cpp

@@ -31,8 +31,7 @@ Sphere Sphere::getCompoundShape(const Sphere& b) const
 	return Sphere((ca + cb) / 2.0f, (ca - cb).getLength() / 2.0f);
 }
 
-void Sphere::setFromPointCloud(const Vec3* pointBuffer, U pointCount, PtrSize pointStride,
-							   [[maybe_unused]] PtrSize buffSize)
+void Sphere::setFromPointCloud(const Vec3* pointBuffer, U pointCount, PtrSize pointStride, [[maybe_unused]] PtrSize buffSize)
 {
 	// Calc center
 	{

+ 11 - 19
AnKi/Core/App.cpp

@@ -72,8 +72,7 @@ void* App::MemStats::allocCallback(void* userData, void* ptr, PtrSize size, [[ma
 
 		// Allocate
 		App* self = static_cast<App*>(userData);
-		Header* allocation = static_cast<Header*>(
-			self->m_originalAllocCallback(self->m_originalAllocUserData, nullptr, newSize, newAlignment));
+		Header* allocation = static_cast<Header*>(self->m_originalAllocCallback(self->m_originalAllocUserData, nullptr, newSize, newAlignment));
 		allocation->m_allocatedSize = size;
 		++allocation;
 		out = static_cast<void*>(allocation);
@@ -216,8 +215,7 @@ Error App::initInternal()
 #if ANKI_SIMD_SSE && ANKI_COMPILER_GCC_COMPATIBLE
 	if(!__builtin_cpu_supports("sse4.2"))
 	{
-		ANKI_CORE_LOGF(
-			"AnKi is built with sse4.2 support but your CPU doesn't support it. Try bulding without SSE support");
+		ANKI_CORE_LOGF("AnKi is built with sse4.2 support but your CPU doesn't support it. Try bulding without SSE support");
 	}
 #endif
 
@@ -276,8 +274,7 @@ Error App::initInternal()
 	//
 	// Mali HW counters
 	//
-	if(GrManager::getSingleton().getDeviceCapabilities().m_gpuVendor == GpuVendor::kArm
-	   && ConfigSet::getSingleton().getCoreMaliHwCounters())
+	if(GrManager::getSingleton().getDeviceCapabilities().m_gpuVendor == GpuVendor::kArm && ConfigSet::getSingleton().getCoreMaliHwCounters())
 	{
 		MaliHwCounters::allocateSingleton();
 	}
@@ -327,8 +324,7 @@ Error App::initInternal()
 	// Renderer
 	//
 	MainRendererInitInfo renderInit;
-	renderInit.m_swapchainSize =
-		UVec2(NativeWindow::getSingleton().getWidth(), NativeWindow::getSingleton().getHeight());
+	renderInit.m_swapchainSize = UVec2(NativeWindow::getSingleton().getWidth(), NativeWindow::getSingleton().getHeight());
 	renderInit.m_allocCallback = allocCb;
 	renderInit.m_allocCallbackUserData = allocCbUserData;
 	ANKI_CHECK(MainRenderer::allocateSingleton().init(renderInit));
@@ -444,8 +440,7 @@ Error App::mainLoop()
 
 			// Render
 			TexturePtr presentableTex = GrManager::getSingleton().acquireNextPresentableTexture();
-			MainRenderer::getSingleton().setStatsEnabled(ConfigSet::getSingleton().getCoreDisplayStats() > 0
-														 || benchmarkMode
+			MainRenderer::getSingleton().setStatsEnabled(ConfigSet::getSingleton().getCoreDisplayStats() > 0 || benchmarkMode
 #if ANKI_ENABLE_TRACE
 														 || Tracer::getSingleton().getEnabled()
 #endif
@@ -539,10 +534,9 @@ Error App::mainLoop()
 				in.m_cpuFreeCount = m_memStats.m_freeCount.load();
 
 				const GrManagerStats grStats = GrManager::getSingleton().getStats();
-				UnifiedGeometryBuffer::getSingleton().getStats(
-					in.m_unifiedGometryExternalFragmentation, in.m_unifiedGeometryAllocated, in.m_unifiedGeometryTotal);
-				GpuSceneBuffer::getSingleton().getStats(in.m_gpuSceneExternalFragmentation, in.m_gpuSceneAllocated,
-														in.m_gpuSceneTotal);
+				UnifiedGeometryBuffer::getSingleton().getStats(in.m_unifiedGometryExternalFragmentation, in.m_unifiedGeometryAllocated,
+															   in.m_unifiedGeometryTotal);
+				GpuSceneBuffer::getSingleton().getStats(in.m_gpuSceneExternalFragmentation, in.m_gpuSceneAllocated, in.m_gpuSceneTotal);
 				in.m_gpuDeviceMemoryAllocated = grStats.m_deviceMemoryAllocated;
 				in.m_gpuDeviceMemoryInUse = grStats.m_deviceMemoryInUse;
 				in.m_reBar = rebarMemUsed;
@@ -551,9 +545,8 @@ Error App::mainLoop()
 				in.m_vkCommandBufferCount = grStats.m_commandBufferCount;
 
 				StatsUi& statsUi = *static_cast<StatsUi*>(m_statsUi.get());
-				const StatsUiDetail detail = (ConfigSet::getSingleton().getCoreDisplayStats() == 1)
-												 ? StatsUiDetail::kFpsOnly
-												 : StatsUiDetail::kDetailed;
+				const StatsUiDetail detail =
+					(ConfigSet::getSingleton().getCoreDisplayStats() == 1) ? StatsUiDetail::kFpsOnly : StatsUiDetail::kDetailed;
 				statsUi.setStats(in, detail);
 			}
 
@@ -569,8 +562,7 @@ Error App::mainLoop()
 
 			if(benchmarkMode) [[unlikely]]
 			{
-				if(GlobalFrameIndex::getSingleton().m_value
-				   >= ConfigSet::getSingleton().getCoreBenchmarkModeFrameCount())
+				if(GlobalFrameIndex::getSingleton().m_value >= ConfigSet::getSingleton().getCoreBenchmarkModeFrameCount())
 				{
 					quit = true;
 				}

+ 2 - 4
AnKi/Core/ConfigSet.cpp

@@ -174,8 +174,7 @@ Error ConfigSet::saveToFile(CString filename) const
 
 #define ANKI_NUMERIC_UINT(name) \
 	ANKI_CHECK(file.writeTextf("\t<!-- %s -->\n", m_##name.m_description.cstr())); \
-	ANKI_CHECK(file.writeTextf("\t<%s>%" PRIu64 "</%s>\n", m_##name.m_name.cstr(), U64(m_##name.m_value), \
-							   m_##name.m_name.cstr()));
+	ANKI_CHECK(file.writeTextf("\t<%s>%" PRIu64 "</%s>\n", m_##name.m_name.cstr(), U64(m_##name.m_value), m_##name.m_name.cstr()));
 
 #define ANKI_CONFIG_VAR_U8(name, defaultValue, minValue, maxValue, description) ANKI_NUMERIC_UINT(name)
 #define ANKI_CONFIG_VAR_U32(name, defaultValue, minValue, maxValue, description) ANKI_NUMERIC_UINT(name)
@@ -183,8 +182,7 @@ Error ConfigSet::saveToFile(CString filename) const
 #define ANKI_CONFIG_VAR_F32(name, defaultValue, minValue, maxValue, description) ANKI_NUMERIC_UINT(name)
 #define ANKI_CONFIG_VAR_BOOL(name, defaultValue, description) \
 	ANKI_CHECK(file.writeTextf("\t<!-- %s -->\n", m_##name.m_description.cstr())); \
-	ANKI_CHECK(file.writeTextf("\t<%s>%s</%s>\n", m_##name.m_name.cstr(), (m_##name.m_value) ? "true" : "false", \
-							   m_##name.m_name.cstr()));
+	ANKI_CHECK(file.writeTextf("\t<%s>%s</%s>\n", m_##name.m_name.cstr(), (m_##name.m_value) ? "true" : "false", m_##name.m_name.cstr()));
 #define ANKI_CONFIG_VAR_STRING(name, defaultValue, description) \
 	ANKI_CHECK(file.writeTextf("\t<!-- %s -->\n", m_##name.m_description.cstr())); \
 	ANKI_CHECK(file.writeTextf("\t<%s>%s</%s>\n", m_##name.m_name.cstr(), m_##name.m_value, m_##name.m_name.cstr()));

+ 1 - 2
AnKi/Core/ConfigVars.defs.h

@@ -21,5 +21,4 @@ ANKI_CONFIG_VAR_U32(CoreDisplayStats, 0, 0, 2, "Display stats, 0: None, 1: Simpl
 ANKI_CONFIG_VAR_BOOL(CoreClearCaches, false, "Clear all caches")
 ANKI_CONFIG_VAR_BOOL(CoreVerboseLog, false, "Verbose logging")
 ANKI_CONFIG_VAR_BOOL(CoreBenchmarkMode, false, "Run in a benchmark mode. Fixed timestep, unlimited target FPS")
-ANKI_CONFIG_VAR_U32(CoreBenchmarkModeFrameCount, 60 * 60 * 2, 1, kMaxU32,
-					"How many frames the benchmark will run before it quits")
+ANKI_CONFIG_VAR_U32(CoreBenchmarkModeFrameCount, 60 * 60 * 2, 1, kMaxU32, "How many frames the benchmark will run before it quits")

+ 3 - 6
AnKi/Core/CoreTracer.cpp

@@ -100,8 +100,7 @@ Error CoreTracer::init(CString directory)
 
 	std::tm tm = getLocalTime();
 	CoreString fname;
-	fname.sprintf("%s/%d%02d%02d-%02d%02d_", directory.cstr(), tm.tm_year + 1900, tm.tm_mon + 1, tm.tm_mday, tm.tm_hour,
-				  tm.tm_min);
+	fname.sprintf("%s/%d%02d%02d-%02d%02d_", directory.cstr(), tm.tm_year + 1900, tm.tm_mon + 1, tm.tm_mday, tm.tm_hour, tm.tm_min);
 
 	ANKI_CHECK(m_traceJsonFile.open(CoreString().sprintf("%strace.json", fname.cstr()), FileOpenFlag::kWrite));
 	ANKI_CHECK(m_traceJsonFile.writeText("[\n"));
@@ -174,8 +173,7 @@ Error CoreTracer::writeEvents(ThreadWorkItem& item)
 		const ThreadId tid = (event.m_name == "GPU_TIME") ? 1 : item.m_tid;
 
 		ANKI_CHECK(m_traceJsonFile.writeTextf("{\"name\": \"%s\", \"cat\": \"PERF\", \"ph\": \"X\", "
-											  "\"pid\": 1, \"tid\": %" PRIu64 ", \"ts\": %" PRIi64 ", \"dur\": %" PRIi64
-											  "},\n",
+											  "\"pid\": 1, \"tid\": %" PRIu64 ", \"ts\": %" PRIi64 ", \"dur\": %" PRIi64 "},\n",
 											  event.m_name.cstr(), tid, startMicroSec, durMicroSec));
 	}
 
@@ -360,8 +358,7 @@ Error CoreTracer::writeCountersForReal()
 		{
 			Array<char, 3> columnName;
 			getSpreadsheetColumnName(i + 1, columnName);
-			ANKI_CHECK(m_countersCsvFile.writeTextf(",=%s(%s2:%s%zu)", func, &columnName[0], &columnName[0],
-													m_frameCounters.getSize() + 1));
+			ANKI_CHECK(m_countersCsvFile.writeTextf(",=%s(%s2:%s%zu)", func, &columnName[0], &columnName[0], m_frameCounters.getSize() + 1));
 		}
 
 		ANKI_CHECK(m_countersCsvFile.writeText("\n"));

+ 3 - 5
AnKi/Core/DeveloperConsole.cpp

@@ -68,9 +68,8 @@ void DeveloperConsole::build(CanvasPtr ctx)
 		}
 
 		constexpr Array<const Char*, U(LoggerMessageType::kCount)> kMsgText = {"I", "E", "W", "F"};
-		ImGui::TextWrapped("[%s][%s] %s [%s:%d][%s][%s]", kMsgText[item.m_type],
-						   (item.m_subsystem) ? item.m_subsystem : "N/A ", item.m_msg.cstr(), item.m_file, item.m_line,
-						   item.m_func, item.m_threadName.cstr());
+		ImGui::TextWrapped("[%s][%s] %s [%s:%d][%s][%s]", kMsgText[item.m_type], (item.m_subsystem) ? item.m_subsystem : "N/A ", item.m_msg.cstr(),
+						   item.m_file, item.m_line, item.m_func, item.m_threadName.cstr());
 
 		ImGui::PopStyleColor();
 	}
@@ -89,8 +88,7 @@ void DeveloperConsole::build(CanvasPtr ctx)
 	// Commands
 	ImGui::Separator();
 	ImGui::PushItemWidth(-1.0f); // Use the whole size
-	if(ImGui::InputText("##noname", &m_inputText[0], m_inputText.getSizeInBytes(), ImGuiInputTextFlags_EnterReturnsTrue,
-						nullptr, nullptr))
+	if(ImGui::InputText("##noname", &m_inputText[0], m_inputText.getSizeInBytes(), ImGuiInputTextFlags_EnterReturnsTrue, nullptr, nullptr))
 	{
 		const Error err = m_scriptEnv.evalString(&m_inputText[0]);
 		if(!err)

+ 5 - 10
AnKi/Core/GpuMemory/GpuSceneBuffer.cpp

@@ -47,8 +47,7 @@ GpuSceneMicroPatcher::~GpuSceneMicroPatcher()
 
 Error GpuSceneMicroPatcher::init()
 {
-	ANKI_CHECK(ResourceManager::getSingleton().loadResource("ShaderBinaries/GpuSceneMicroPatching.ankiprogbin",
-															m_copyProgram));
+	ANKI_CHECK(ResourceManager::getSingleton().loadResource("ShaderBinaries/GpuSceneMicroPatching.ankiprogbin", m_copyProgram));
 	const ShaderProgramResourceVariant* variant;
 	m_copyProgram->getOrCreateVariant(variant);
 	m_grProgram = variant->getProgram();
@@ -56,8 +55,7 @@ Error GpuSceneMicroPatcher::init()
 	return Error::kNone;
 }
 
-void GpuSceneMicroPatcher::newCopy(StackMemoryPool& frameCpuPool, PtrSize gpuSceneDestOffset, PtrSize dataSize,
-								   const void* data)
+void GpuSceneMicroPatcher::newCopy(StackMemoryPool& frameCpuPool, PtrSize gpuSceneDestOffset, PtrSize dataSize, const void* data)
 {
 	ANKI_ASSERT(dataSize > 0 && (dataSize % 4) == 0);
 	ANKI_ASSERT((ptrToNumber(data) % 4) == 0);
@@ -112,18 +110,15 @@ void GpuSceneMicroPatcher::patchGpuScene(CommandBuffer& cmdb)
 	ANKI_TRACE_INC_COUNTER(GpuSceneMicroPatchUploadData, m_crntFramePatchData.getSizeInBytes());
 
 	RebarAllocation headersToken;
-	void* mapped =
-		RebarTransientMemoryPool::getSingleton().allocateFrame(m_crntFramePatchHeaders.getSizeInBytes(), headersToken);
+	void* mapped = RebarTransientMemoryPool::getSingleton().allocateFrame(m_crntFramePatchHeaders.getSizeInBytes(), headersToken);
 	memcpy(mapped, &m_crntFramePatchHeaders[0], m_crntFramePatchHeaders.getSizeInBytes());
 
 	RebarAllocation dataToken;
 	mapped = RebarTransientMemoryPool::getSingleton().allocateFrame(m_crntFramePatchData.getSizeInBytes(), dataToken);
 	memcpy(mapped, &m_crntFramePatchData[0], m_crntFramePatchData.getSizeInBytes());
 
-	cmdb.bindStorageBuffer(0, 0, RebarTransientMemoryPool::getSingleton().getBuffer(), headersToken.m_offset,
-						   headersToken.m_range);
-	cmdb.bindStorageBuffer(0, 1, RebarTransientMemoryPool::getSingleton().getBuffer(), dataToken.m_offset,
-						   dataToken.m_range);
+	cmdb.bindStorageBuffer(0, 0, RebarTransientMemoryPool::getSingleton().getBuffer(), headersToken.m_offset, headersToken.m_range);
+	cmdb.bindStorageBuffer(0, 1, RebarTransientMemoryPool::getSingleton().getBuffer(), dataToken.m_offset, dataToken.m_range);
 	cmdb.bindStorageBuffer(0, 2, GpuSceneBuffer::getSingleton().getBuffer(), 0, kMaxPtrSize);
 
 	cmdb.bindShaderProgram(m_grProgram);

+ 1 - 2
AnKi/Core/GpuMemory/GpuSceneBuffer.h

@@ -138,8 +138,7 @@ public:
 	}
 
 	/// @see newCopy
-	void newCopy(StackMemoryPool& frameCpuPool, const GpuSceneBufferAllocation& dest, PtrSize dataSize,
-				 const void* data)
+	void newCopy(StackMemoryPool& frameCpuPool, const GpuSceneBufferAllocation& dest, PtrSize dataSize, const void* data)
 	{
 		ANKI_ASSERT(dataSize <= dest.getAllocatedSize());
 		newCopy(frameCpuPool, dest.getOffset(), dataSize, data);

+ 4 - 6
AnKi/Core/GpuMemory/GpuVisibleTransientMemoryPool.h

@@ -55,14 +55,12 @@ private:
 	GpuVisibleTransientMemoryPool()
 	{
 		U32 alignment = GrManager::getSingleton().getDeviceCapabilities().m_uniformBufferBindOffsetAlignment;
-		alignment =
-			max(alignment, GrManager::getSingleton().getDeviceCapabilities().m_storageBufferBindOffsetAlignment);
+		alignment = max(alignment, GrManager::getSingleton().getDeviceCapabilities().m_storageBufferBindOffsetAlignment);
 		alignment = max(alignment, GrManager::getSingleton().getDeviceCapabilities().m_sbtRecordAlignment);
 
-		const BufferUsageBit buffUsage = BufferUsageBit::kAllUniform | BufferUsageBit::kAllStorage
-										 | BufferUsageBit::kIndirectDraw | BufferUsageBit::kVertex;
-		m_pool.init(10_MB, 2.0, 0, alignment, buffUsage, BufferMapAccessBit::kNone, true,
-					"GpuVisibleTransientMemoryPool");
+		const BufferUsageBit buffUsage =
+			BufferUsageBit::kAllUniform | BufferUsageBit::kAllStorage | BufferUsageBit::kIndirectDraw | BufferUsageBit::kVertex;
+		m_pool.init(10_MB, 2.0, 0, alignment, buffUsage, BufferMapAccessBit::kNone, true, "GpuVisibleTransientMemoryPool");
 	}
 
 	~GpuVisibleTransientMemoryPool() = default;

+ 3 - 4
AnKi/Core/GpuMemory/RebarTransientMemoryPool.cpp

@@ -24,15 +24,14 @@ void RebarTransientMemoryPool::init()
 	BufferInitInfo buffInit("ReBar");
 	buffInit.m_mapAccess = BufferMapAccessBit::kWrite;
 	buffInit.m_size = ConfigSet::getSingleton().getCoreRebarGpuMemorySize();
-	buffInit.m_usage = BufferUsageBit::kAllUniform | BufferUsageBit::kAllStorage | BufferUsageBit::kVertex
-					   | BufferUsageBit::kIndex | BufferUsageBit::kShaderBindingTable;
+	buffInit.m_usage = BufferUsageBit::kAllUniform | BufferUsageBit::kAllStorage | BufferUsageBit::kVertex | BufferUsageBit::kIndex
+					   | BufferUsageBit::kShaderBindingTable;
 	m_buffer = GrManager::getSingleton().newBuffer(buffInit);
 
 	m_bufferSize = buffInit.m_size;
 
 	m_alignment = GrManager::getSingleton().getDeviceCapabilities().m_uniformBufferBindOffsetAlignment;
-	m_alignment =
-		max(m_alignment, GrManager::getSingleton().getDeviceCapabilities().m_storageBufferBindOffsetAlignment);
+	m_alignment = max(m_alignment, GrManager::getSingleton().getDeviceCapabilities().m_storageBufferBindOffsetAlignment);
 	m_alignment = max(m_alignment, GrManager::getSingleton().getDeviceCapabilities().m_sbtRecordAlignment);
 
 	m_mappedMem = static_cast<U8*>(m_buffer->map(0, kMaxPtrSize, BufferMapAccessBit::kWrite));

+ 10 - 6
AnKi/Core/GpuMemory/RebarTransientMemoryPool.h

@@ -49,20 +49,22 @@ class RebarTransientMemoryPool : public MakeSingleton<RebarTransientMemoryPool>
 public:
 	RebarTransientMemoryPool(const RebarTransientMemoryPool&) = delete; // Non-copyable
 
-	~RebarTransientMemoryPool();
-
 	RebarTransientMemoryPool& operator=(const RebarTransientMemoryPool&) = delete; // Non-copyable
 
 	void init();
 
 	PtrSize endFrame();
 
-	/// Allocate staging memory for various operations. The memory will be reclaimed at the begining of the
-	/// N-(kMaxFramesInFlight-1) frame.
+	/// Allocate staging memory for various operations. The memory will be reclaimed at the begining of the N-(kMaxFramesInFlight-1) frame.
 	void* allocateFrame(PtrSize size, RebarAllocation& token);
 
-	/// Allocate staging memory for various operations. The memory will be reclaimed at the begining of the
-	/// N-(kMaxFramesInFlight-1) frame.
+	template<typename T>
+	T* allocateFrame(U32 count, RebarAllocation& token)
+	{
+		return static_cast<T*>(allocateFrame(count * sizeof(T), token));
+	}
+
+	/// Allocate staging memory for various operations. The memory will be reclaimed at the begining of the N-(kMaxFramesInFlight-1) frame.
 	void* tryAllocateFrame(PtrSize size, RebarAllocation& token);
 
 	ANKI_PURE const BufferPtr& getBuffer() const
@@ -84,6 +86,8 @@ private:
 	U32 m_alignment = 0;
 
 	RebarTransientMemoryPool() = default;
+
+	~RebarTransientMemoryPool();
 };
 /// @}
 

+ 1 - 2
AnKi/Core/MaliHwCounters.cpp

@@ -17,8 +17,7 @@ MaliHwCounters::MaliHwCounters()
 {
 #if ANKI_HWCPIPE_ENABLE
 	const hwcpipe::CpuCounterSet cpuCounters;
-	const hwcpipe::GpuCounterSet gpuCounters = {hwcpipe::GpuCounter::GpuCycles,
-												hwcpipe::GpuCounter::ExternalMemoryWriteBytes,
+	const hwcpipe::GpuCounterSet gpuCounters = {hwcpipe::GpuCounter::GpuCycles, hwcpipe::GpuCounter::ExternalMemoryWriteBytes,
 												hwcpipe::GpuCounter::ExternalMemoryReadBytes};
 	hwcpipe::HWCPipe* hwc = newInstance<hwcpipe::HWCPipe>(CoreMemoryPool::getSingleton(), cpuCounters, gpuCounters);
 

+ 2 - 4
AnKi/Core/StatsUi.cpp

@@ -128,8 +128,7 @@ void StatsUi::build(CanvasPtr canvas)
 	ImGui::Text("----"); \
 	ImGui::Text(x); \
 	ImGui::Text("----");
-#define ANKI_STATS_UI_VALUE(type, name, text, flags) \
-	writeText(m_##name, text, flags, std::is_floating_point<type>::value);
+#define ANKI_STATS_UI_VALUE(type, name, text, flags) writeText(m_##name, text, flags, std::is_floating_point<type>::value);
 #include <AnKi/Core/StatsUi.defs.h>
 #undef ANKI_STATS_UI_BEGIN_GROUP
 #undef ANKI_STATS_UI_VALUE
@@ -139,8 +138,7 @@ void StatsUi::build(CanvasPtr canvas)
 			const Second maxTime = max(m_cpuFrameTime.m_float, m_gpuFrameTime.m_float);
 			const F32 fps = F32(1.0 / maxTime);
 			const Bool cpuBound = m_cpuFrameTime.m_float > m_gpuFrameTime.m_float;
-			ImGui::TextColored((cpuBound) ? Vec4(1.0f, 0.5f, 0.5f, 1.0f) : Vec4(0.5f, 1.0f, 0.5f, 1.0f), "FPS %.1f",
-							   fps);
+			ImGui::TextColored((cpuBound) ? Vec4(1.0f, 0.5f, 0.5f, 1.0f) : Vec4(0.5f, 1.0f, 0.5f, 1.0f), "FPS %.1f", fps);
 		}
 	}
 

+ 2 - 3
AnKi/Gr/AccelerationStructure.h

@@ -31,9 +31,8 @@ public:
 
 	Bool isValid() const
 	{
-		if(m_indexBuffer.get() == nullptr || m_indexCount == 0 || m_indexType == IndexType::kCount
-		   || m_positionBuffer.get() == nullptr || m_positionStride == 0 || m_positionsFormat == Format::kNone
-		   || m_positionCount == 0)
+		if(m_indexBuffer.get() == nullptr || m_indexCount == 0 || m_indexType == IndexType::kCount || m_positionBuffer.get() == nullptr
+		   || m_positionStride == 0 || m_positionsFormat == Format::kNone || m_positionCount == 0)
 		{
 			return false;
 		}

+ 16 - 26
AnKi/Gr/CommandBuffer.h

@@ -102,8 +102,7 @@ public:
 	/// @{
 
 	/// Bind vertex buffer.
-	void bindVertexBuffer(U32 binding, const BufferPtr& buff, PtrSize offset, PtrSize stride,
-						  VertexStepRate stepRate = VertexStepRate::kVertex);
+	void bindVertexBuffer(U32 binding, const BufferPtr& buff, PtrSize offset, PtrSize stride, VertexStepRate stepRate = VertexStepRate::kVertex);
 
 	/// Setup a vertex attribute.
 	void setVertexAttribute(U32 location, U32 buffBinding, Format fmt, PtrSize relativeOffset);
@@ -131,8 +130,8 @@ public:
 	void setPolygonOffset(F32 factor, F32 units);
 
 	/// Set stencil operations. To disable stencil test put StencilOperation::KEEP to all operations.
-	void setStencilOperations(FaceSelectionBit face, StencilOperation stencilFail,
-							  StencilOperation stencilPassDepthFail, StencilOperation stencilPassDepthPass);
+	void setStencilOperations(FaceSelectionBit face, StencilOperation stencilFail, StencilOperation stencilPassDepthFail,
+							  StencilOperation stencilPassDepthPass);
 
 	/// Set stencil compare operation.
 	void setStencilCompareOperation(FaceSelectionBit face, CompareOperation comp);
@@ -192,8 +191,7 @@ public:
 	/// @param texView The texture view to bind.
 	/// @param sampler The sampler to override the default sampler of the tex.
 	/// @param arrayIdx The array index if the binding is an array.
-	void bindTextureAndSampler(U32 set, U32 binding, const TextureViewPtr& texView, const SamplerPtr& sampler,
-							   U32 arrayIdx = 0);
+	void bindTextureAndSampler(U32 set, U32 binding, const TextureViewPtr& texView, const SamplerPtr& sampler, U32 arrayIdx = 0);
 
 	/// Bind sampler.
 	/// @param set The set to bind to.
@@ -217,8 +215,7 @@ public:
 	/// @param range The bytes to bind starting from the offset. If it's kMaxPtrSize then map from offset to the end
 	///              of the buffer.
 	/// @param arrayIdx The array index if the binding is an array.
-	void bindUniformBuffer(U32 set, U32 binding, const BufferPtr& buff, PtrSize offset, PtrSize range,
-						   U32 arrayIdx = 0);
+	void bindUniformBuffer(U32 set, U32 binding, const BufferPtr& buff, PtrSize offset, PtrSize range, U32 arrayIdx = 0);
 
 	/// Bind storage buffer.
 	/// @param set The set to bind to.
@@ -228,8 +225,7 @@ public:
 	/// @param range The bytes to bind starting from the offset. If it's kMaxPtrSize then map from offset to the end
 	///              of the buffer.
 	/// @param arrayIdx The array index if the binding is an array.
-	void bindStorageBuffer(U32 set, U32 binding, const BufferPtr& buff, PtrSize offset, PtrSize range,
-						   U32 arrayIdx = 0);
+	void bindStorageBuffer(U32 set, U32 binding, const BufferPtr& buff, PtrSize offset, PtrSize range, U32 arrayIdx = 0);
 
 	/// Bind load/store image.
 	/// @param set The set to bind to.
@@ -247,8 +243,7 @@ public:
 	///              of the buffer.
 	/// @param fmt The format of the buffer.
 	/// @param arrayIdx The array index if the binding is an array.
-	void bindReadOnlyTextureBuffer(U32 set, U32 binding, const BufferPtr& buff, PtrSize offset, PtrSize range,
-								   Format fmt, U32 arrayIdx = 0);
+	void bindReadOnlyTextureBuffer(U32 set, U32 binding, const BufferPtr& buff, PtrSize offset, PtrSize range, Format fmt, U32 arrayIdx = 0);
 
 	/// Bind an acceleration structure.
 	/// @param set The set to bind to.
@@ -269,10 +264,8 @@ public:
 	/// Begin renderpass.
 	/// The minx, miny, width, height control the area that the load and store operations will happen. If the scissor is
 	/// bigger than the render area the results are undefined.
-	void beginRenderPass(const FramebufferPtr& fb,
-						 const Array<TextureUsageBit, kMaxColorRenderTargets>& colorAttachmentUsages,
-						 TextureUsageBit depthStencilAttachmentUsage, U32 minx = 0, U32 miny = 0, U32 width = kMaxU32,
-						 U32 height = kMaxU32);
+	void beginRenderPass(const FramebufferPtr& fb, const Array<TextureUsageBit, kMaxColorRenderTargets>& colorAttachmentUsages,
+						 TextureUsageBit depthStencilAttachmentUsage, U32 minx = 0, U32 miny = 0, U32 width = kMaxU32, U32 height = kMaxU32);
 
 	/// End renderpass.
 	void endRenderPass();
@@ -283,8 +276,7 @@ public:
 
 	/// @name Jobs
 	/// @{
-	void drawElements(PrimitiveTopology topology, U32 count, U32 instanceCount = 1, U32 firstIndex = 0,
-					  U32 baseVertex = 0, U32 baseInstance = 0);
+	void drawElements(PrimitiveTopology topology, U32 count, U32 instanceCount = 1, U32 firstIndex = 0, U32 baseVertex = 0, U32 baseInstance = 0);
 
 	void drawArrays(PrimitiveTopology topology, U32 count, U32 instanceCount = 1, U32 first = 0, U32 baseInstance = 0);
 
@@ -323,8 +315,8 @@ public:
 	/// @param width Width.
 	/// @param height Height.
 	/// @param depth Depth.
-	void traceRays(const BufferPtr& sbtBuffer, PtrSize sbtBufferOffset, U32 sbtRecordSize, U32 hitGroupSbtRecordCount,
-				   U32 rayTypeCount, U32 width, U32 height, U32 depth);
+	void traceRays(const BufferPtr& sbtBuffer, PtrSize sbtBufferOffset, U32 sbtRecordSize, U32 hitGroupSbtRecordCount, U32 rayTypeCount, U32 width,
+				   U32 height, U32 depth);
 
 	/// Generate mipmaps for non-3D textures. You have to transition all the mip levels of this face and layer to
 	/// TextureUsageBit::kGenerateMipmaps before calling this method.
@@ -364,8 +356,7 @@ public:
 	/// @param[in] queries The queries to write the result of.
 	/// @param offset The offset inside the buffer to write the result.
 	/// @param buff The buffer to update.
-	void writeOcclusionQueriesResultToBuffer(ConstWeakArray<OcclusionQuery*> queries, PtrSize offset,
-											 const BufferPtr& buff);
+	void writeOcclusionQueriesResultToBuffer(ConstWeakArray<OcclusionQuery*> queries, PtrSize offset, const BufferPtr& buff);
 
 	/// Copy buffer to buffer.
 	/// @param[in] src Source buffer.
@@ -373,8 +364,7 @@ public:
 	/// @param[out] dst Destination buffer.
 	/// @param dstOffset Offset in the destination buffer.
 	/// @param range Size to copy.
-	void copyBufferToBuffer(const BufferPtr& src, PtrSize srcOffset, const BufferPtr& dst, PtrSize dstOffset,
-							PtrSize range)
+	void copyBufferToBuffer(const BufferPtr& src, PtrSize srcOffset, const BufferPtr& dst, PtrSize dstOffset, PtrSize range)
 	{
 		Array<CopyBufferToBufferInfo, 1> copies = {{{srcOffset, dstOffset, range}}};
 		copyBufferToBuffer(src, dst, copies);
@@ -401,8 +391,8 @@ public:
 	/// @param[in] motionVectorsScale Any scale factor that might need to be applied to the motionVectorsTexture (i.e UV
 	///                               space to Pixel space conversion)
 	void upscale(const GrUpscalerPtr& upscaler, const TextureViewPtr& inColor, const TextureViewPtr& outUpscaledColor,
-				 const TextureViewPtr& motionVectors, const TextureViewPtr& depth, const TextureViewPtr& exposure,
-				 const Bool resetAccumulation, const Vec2& jitterOffset, const Vec2& motionVectorsScale);
+				 const TextureViewPtr& motionVectors, const TextureViewPtr& depth, const TextureViewPtr& exposure, const Bool resetAccumulation,
+				 const Vec2& jitterOffset, const Vec2& motionVectorsScale);
 	/// @}
 
 	/// @name Sync

+ 3 - 11
AnKi/Gr/Common.cpp

@@ -11,8 +11,7 @@ namespace anki {
 
 /// @warning Don't use Array because the compilers can't handle it for some reason.
 inline constexpr ShaderVariableDataTypeInfo kShaderVariableDataTypeInfos[] = {
-#define ANKI_SVDT_MACRO(type, baseType, rowCount, columnCount, isIntagralType) \
-	{ANKI_STRINGIZE(type), sizeof(type), false, isIntagralType},
+#define ANKI_SVDT_MACRO(type, baseType, rowCount, columnCount, isIntagralType) {ANKI_STRINGIZE(type), sizeof(type), false, isIntagralType},
 #define ANKI_SVDT_MACRO_OPAQUE(constant, type) {ANKI_STRINGIZE(type), kMaxU32, true, false},
 #include <AnKi/Gr/ShaderVariableDataType.defs.h>
 #undef ANKI_SVDT_MACRO
@@ -35,16 +34,9 @@ FormatInfo getFormatInfo(Format fmt)
 	FormatInfo out = {};
 	switch(fmt)
 	{
-#define ANKI_FORMAT_DEF(type, id, componentCount, texelSize, blockWidth, blockHeight, blockSize, shaderType, \
-						depthStencil) \
+#define ANKI_FORMAT_DEF(type, id, componentCount, texelSize, blockWidth, blockHeight, blockSize, shaderType, depthStencil) \
 	case Format::k##type: \
-		out = {componentCount, \
-			   texelSize, \
-			   blockWidth, \
-			   blockHeight, \
-			   blockSize, \
-			   shaderType, \
-			   DepthStencilAspectBit::k##depthStencil, \
+		out = {componentCount,      texelSize, blockWidth, blockHeight, blockSize, shaderType, DepthStencilAspectBit::k##depthStencil, \
 			   ANKI_STRINGIZE(type)}; \
 		break;
 #include <AnKi/Gr/Format.defs.h>

+ 30 - 41
AnKi/Gr/Common.h

@@ -136,8 +136,7 @@ enum class GpuVendor : U8
 	kCount
 };
 
-inline constexpr Array<CString, U(GpuVendor::kCount)> kGPUVendorStrings = {"unknown", "ARM",   "nVidia",
-																		   "AMD",     "Intel", "Qualcomm"};
+inline constexpr Array<CString, U(GpuVendor::kCount)> kGPUVendorStrings = {"unknown", "ARM", "nVidia", "AMD", "Intel", "Qualcomm"};
 
 /// Device capabilities.
 ANKI_BEGIN_PACKED_STRUCT
@@ -389,9 +388,7 @@ enum class Format : U32
 {
 	kNone = 0,
 
-#define ANKI_FORMAT_DEF(type, id, componentCount, texelSize, blockWidth, blockHeight, blockSize, shaderType, \
-						depthStencil) \
-	k##type = id,
+#define ANKI_FORMAT_DEF(type, id, componentCount, texelSize, blockWidth, blockHeight, blockSize, shaderType, depthStencil) k##type = id,
 #include <AnKi/Gr/Format.defs.h>
 #undef ANKI_FORMAT_DEF
 };
@@ -487,19 +484,19 @@ enum class TextureUsageBit : U32
 
 	// Derived
 	kAllSampled = kSampledGeometry | kSampledFragment | kSampledCompute | kSampledTraceRays,
-	kAllImage = kImageGeometryRead | kImageGeometryWrite | kImageFragmentRead | kImageFragmentWrite | kImageComputeRead
-				| kImageComputeWrite | kImageTraceRaysRead | kImageTraceRaysWrite,
+	kAllImage = kImageGeometryRead | kImageGeometryWrite | kImageFragmentRead | kImageFragmentWrite | kImageComputeRead | kImageComputeWrite
+				| kImageTraceRaysRead | kImageTraceRaysWrite,
 	kAllFramebuffer = kFramebufferRead | kFramebufferWrite,
 
-	kAllGraphics = kSampledGeometry | kSampledFragment | kImageGeometryRead | kImageGeometryWrite | kImageFragmentRead
-				   | kImageFragmentWrite | kFramebufferRead | kFramebufferWrite | kFramebufferShadingRate,
+	kAllGraphics = kSampledGeometry | kSampledFragment | kImageGeometryRead | kImageGeometryWrite | kImageFragmentRead | kImageFragmentWrite
+				   | kFramebufferRead | kFramebufferWrite | kFramebufferShadingRate,
 	kAllCompute = kSampledCompute | kImageComputeRead | kImageComputeWrite,
 	kAllTransfer = kTransferDestination | kGenerateMipmaps,
 
-	kAllRead = kAllSampled | kImageGeometryRead | kImageFragmentRead | kImageComputeRead | kImageTraceRaysRead
-			   | kFramebufferRead | kFramebufferShadingRate | kPresent | kGenerateMipmaps,
-	kAllWrite = kImageGeometryWrite | kImageFragmentWrite | kImageComputeWrite | kImageTraceRaysWrite
-				| kFramebufferWrite | kTransferDestination | kGenerateMipmaps,
+	kAllRead = kAllSampled | kImageGeometryRead | kImageFragmentRead | kImageComputeRead | kImageTraceRaysRead | kFramebufferRead
+			   | kFramebufferShadingRate | kPresent | kGenerateMipmaps,
+	kAllWrite = kImageGeometryWrite | kImageFragmentWrite | kImageComputeWrite | kImageTraceRaysWrite | kFramebufferWrite | kTransferDestination
+				| kGenerateMipmaps,
 };
 ANKI_ENUM_ALLOW_NUMERIC_OPERATIONS(TextureUsageBit)
 
@@ -658,8 +655,7 @@ enum class AttachmentStoreOperation : U8
 };
 
 /// Buffer usage modes.
-/// The graphics work consists of the following pipes: indirect, geometry (all programmable and fixed function geometry
-/// stages) and finaly fragment.
+/// The graphics work consists of the following pipes: indirect, geometry (all programmable and fixed function geometry stages) and finaly fragment.
 /// The compute from the consists of the following: indirect and compute.
 /// The trace rays from the: indirect and trace_rays
 /// !!WARNING!! If you change this remember to change PrivateBufferUsageBit.
@@ -705,31 +701,26 @@ enum class BufferUsageBit : U64
 
 	// Derived
 	kAllUniform = kUniformGeometry | kUniformFragment | kUniformCompute | kUniformTraceRays,
-	kAllStorage = kStorageGeometryRead | kStorageGeometryWrite | kStorageFragmentRead | kStorageFragmentWrite
-				  | kStorageComputeRead | kStorageComputeWrite | kStorageTraceRaysRead | kStorageTraceRaysWrite,
-	kAllTexture = kTextureGeometryRead | kTextureGeometryWrite | kTextureFragmentRead | kTextureFragmentWrite
-				  | kTextureComputeRead | kTextureComputeWrite | kTextureTraceRaysRead | kTextureTraceRaysWrite,
+	kAllStorage = kStorageGeometryRead | kStorageGeometryWrite | kStorageFragmentRead | kStorageFragmentWrite | kStorageComputeRead
+				  | kStorageComputeWrite | kStorageTraceRaysRead | kStorageTraceRaysWrite,
+	kAllTexture = kTextureGeometryRead | kTextureGeometryWrite | kTextureFragmentRead | kTextureFragmentWrite | kTextureComputeRead
+				  | kTextureComputeWrite | kTextureTraceRaysRead | kTextureTraceRaysWrite,
 	kAllIndirect = kIndirectCompute | kIndirectDraw | kIndirectTraceRays,
 	kAllTransfer = kTransferSource | kTransferDestination,
 
-	kAllGeometry = kUniformGeometry | kStorageGeometryRead | kStorageGeometryWrite | kTextureGeometryRead
-				   | kTextureGeometryWrite | kIndex | kVertex,
-	kAllFragment =
-		kUniformFragment | kStorageFragmentRead | kStorageFragmentWrite | kTextureFragmentRead | kTextureFragmentWrite,
+	kAllGeometry = kUniformGeometry | kStorageGeometryRead | kStorageGeometryWrite | kTextureGeometryRead | kTextureGeometryWrite | kIndex | kVertex,
+	kAllFragment = kUniformFragment | kStorageFragmentRead | kStorageFragmentWrite | kTextureFragmentRead | kTextureFragmentWrite,
 	kAllGraphics = kAllGeometry | kAllFragment | kIndirectDraw,
-	kAllCompute = kUniformCompute | kStorageComputeRead | kStorageComputeWrite | kTextureComputeRead
-				  | kTextureComputeWrite | kIndirectCompute,
-	kAllTraceRays = kUniformTraceRays | kStorageTraceRaysRead | kStorageTraceRaysWrite | kTextureTraceRaysRead
-					| kTextureTraceRaysWrite | kIndirectTraceRays | kShaderBindingTable,
+	kAllCompute = kUniformCompute | kStorageComputeRead | kStorageComputeWrite | kTextureComputeRead | kTextureComputeWrite | kIndirectCompute,
+	kAllTraceRays = kUniformTraceRays | kStorageTraceRaysRead | kStorageTraceRaysWrite | kTextureTraceRaysRead | kTextureTraceRaysWrite
+					| kIndirectTraceRays | kShaderBindingTable,
 
 	kAllRayTracing = kAllTraceRays | kAccelerationStructureBuild,
-	kAllRead = kAllUniform | kStorageGeometryRead | kStorageFragmentRead | kStorageComputeRead | kStorageTraceRaysRead
-			   | kTextureGeometryRead | kTextureFragmentRead | kTextureComputeRead | kTextureTraceRaysRead | kIndex
-			   | kVertex | kIndirectCompute | kIndirectDraw | kIndirectTraceRays | kTransferSource
-			   | kAccelerationStructureBuild | kShaderBindingTable,
-	kAllWrite = kStorageGeometryWrite | kStorageFragmentWrite | kStorageComputeWrite | kStorageTraceRaysWrite
-				| kTextureGeometryWrite | kTextureFragmentWrite | kTextureComputeWrite | kTextureTraceRaysWrite
-				| kTransferDestination,
+	kAllRead = kAllUniform | kStorageGeometryRead | kStorageFragmentRead | kStorageComputeRead | kStorageTraceRaysRead | kTextureGeometryRead
+			   | kTextureFragmentRead | kTextureComputeRead | kTextureTraceRaysRead | kIndex | kVertex | kIndirectCompute | kIndirectDraw
+			   | kIndirectTraceRays | kTransferSource | kAccelerationStructureBuild | kShaderBindingTable,
+	kAllWrite = kStorageGeometryWrite | kStorageFragmentWrite | kStorageComputeWrite | kStorageTraceRaysWrite | kTextureGeometryWrite
+				| kTextureFragmentWrite | kTextureComputeWrite | kTextureTraceRaysWrite | kTransferDestination,
 	kAll = kAllRead | kAllWrite,
 };
 ANKI_ENUM_ALLOW_NUMERIC_OPERATIONS(BufferUsageBit)
@@ -929,8 +920,7 @@ public:
 
 	TextureSubresourceInfo(const TextureSubresourceInfo&) = default;
 
-	constexpr TextureSubresourceInfo(const TextureSurfaceInfo& surf,
-									 DepthStencilAspectBit aspect = DepthStencilAspectBit::kNone)
+	constexpr TextureSubresourceInfo(const TextureSurfaceInfo& surf, DepthStencilAspectBit aspect = DepthStencilAspectBit::kNone)
 		: m_firstMipmap(surf.m_level)
 		, m_mipmapCount(1)
 		, m_firstLayer(surf.m_layer)
@@ -941,8 +931,7 @@ public:
 	{
 	}
 
-	constexpr TextureSubresourceInfo(const TextureVolumeInfo& vol,
-									 DepthStencilAspectBit aspect = DepthStencilAspectBit::kNone)
+	constexpr TextureSubresourceInfo(const TextureVolumeInfo& vol, DepthStencilAspectBit aspect = DepthStencilAspectBit::kNone)
 		: m_firstMipmap(vol.m_level)
 		, m_mipmapCount(1)
 		, m_firstLayer(0)
@@ -979,9 +968,9 @@ public:
 			return (beginA < beginB) ? (beginA + countA > beginB) : (beginB + countB > beginA);
 		};
 
-		const Bool depthStencilOverlaps = (m_depthStencilAspect == DepthStencilAspectBit::kNone
-										   && b.m_depthStencilAspect == DepthStencilAspectBit::kNone)
-										  || !!(m_depthStencilAspect & b.m_depthStencilAspect);
+		const Bool depthStencilOverlaps =
+			(m_depthStencilAspect == DepthStencilAspectBit::kNone && b.m_depthStencilAspect == DepthStencilAspectBit::kNone)
+			|| !!(m_depthStencilAspect & b.m_depthStencilAspect);
 
 		return overlaps(m_firstMipmap, m_mipmapCount, b.m_firstMipmap, b.m_mipmapCount)
 			   && overlaps(m_firstLayer, m_layerCount, b.m_firstLayer, b.m_layerCount)

+ 33 - 60
AnKi/Gr/Gl/CommandBuffer.cpp

@@ -44,14 +44,11 @@ void CommandBuffer::flush(FencePtr* fence)
 
 	if(!self.isSecondLevel())
 	{
-		static_cast<GrManagerImpl&>(getManager())
-			.getRenderingThread()
-			.flushCommandBuffer(CommandBufferPtr(this), fence);
+		static_cast<GrManagerImpl&>(getManager()).getRenderingThread().flushCommandBuffer(CommandBufferPtr(this), fence);
 	}
 }
 
-void CommandBuffer::bindVertexBuffer(U32 binding, BufferPtr buff, PtrSize offset, PtrSize stride,
-									 VertexStepRate stepRate)
+void CommandBuffer::bindVertexBuffer(U32 binding, BufferPtr buff, PtrSize offset, PtrSize stride, VertexStepRate stepRate)
 {
 	class Cmd final : public GlCommand
 	{
@@ -350,8 +347,8 @@ void CommandBuffer::setPolygonOffset(F32 factor, F32 units)
 	}
 }
 
-void CommandBuffer::setStencilOperations(FaceSelectionBit face, StencilOperation stencilFail,
-										 StencilOperation stencilPassDepthFail, StencilOperation stencilPassDepthPass)
+void CommandBuffer::setStencilOperations(FaceSelectionBit face, StencilOperation stencilFail, StencilOperation stencilPassDepthFail,
+										 StencilOperation stencilPassDepthPass)
 {
 	class Cmd final : public GlCommand
 	{
@@ -379,8 +376,7 @@ void CommandBuffer::setStencilOperations(FaceSelectionBit face, StencilOperation
 	ANKI_GL_SELF(CommandBufferImpl);
 	if(self.m_state.setStencilOperations(face, stencilFail, stencilPassDepthFail, stencilPassDepthPass))
 	{
-		self.pushBackNewCommand<Cmd>(convertFaceMode(face), convertStencilOperation(stencilFail),
-									 convertStencilOperation(stencilPassDepthFail),
+		self.pushBackNewCommand<Cmd>(convertFaceMode(face), convertStencilOperation(stencilFail), convertStencilOperation(stencilPassDepthFail),
 									 convertStencilOperation(stencilPassDepthPass));
 	}
 }
@@ -540,8 +536,7 @@ void CommandBuffer::setColorChannelWriteMask(U32 attachment, ColorBit mask)
 	}
 }
 
-void CommandBuffer::setBlendFactors(U32 attachment, BlendFactor srcRgb, BlendFactor dstRgb, BlendFactor srcA,
-									BlendFactor dstA)
+void CommandBuffer::setBlendFactors(U32 attachment, BlendFactor srcRgb, BlendFactor dstRgb, BlendFactor srcA, BlendFactor dstA)
 {
 	class Cmd final : public GlCommand
 	{
@@ -571,8 +566,8 @@ void CommandBuffer::setBlendFactors(U32 attachment, BlendFactor srcRgb, BlendFac
 	ANKI_GL_SELF(CommandBufferImpl);
 	if(self.m_state.setBlendFactors(attachment, srcRgb, dstRgb, srcA, dstA))
 	{
-		self.pushBackNewCommand<Cmd>(attachment, convertBlendFactor(srcRgb), convertBlendFactor(dstRgb),
-									 convertBlendFactor(srcA), convertBlendFactor(dstA));
+		self.pushBackNewCommand<Cmd>(attachment, convertBlendFactor(srcRgb), convertBlendFactor(dstRgb), convertBlendFactor(srcA),
+									 convertBlendFactor(dstA));
 	}
 }
 
@@ -606,8 +601,7 @@ void CommandBuffer::setBlendOperation(U32 attachment, BlendOperation funcRgb, Bl
 	}
 }
 
-void CommandBuffer::bindTextureAndSampler(U32 set, U32 binding, TextureViewPtr texView, SamplerPtr sampler,
-										  TextureUsageBit usage)
+void CommandBuffer::bindTextureAndSampler(U32 set, U32 binding, TextureViewPtr texView, SamplerPtr sampler, TextureUsageBit usage)
 {
 	class Cmd final : public GlCommand
 	{
@@ -831,10 +825,8 @@ void CommandBuffer::bindShaderProgram(ShaderProgramPtr prog)
 	}
 }
 
-void CommandBuffer::beginRenderPass(FramebufferPtr fb,
-									const Array<TextureUsageBit, kMaxColorRenderTargets>& colorAttachmentUsages,
-									TextureUsageBit depthStencilAttachmentUsage, U32 minx, U32 miny, U32 width,
-									U32 height)
+void CommandBuffer::beginRenderPass(FramebufferPtr fb, const Array<TextureUsageBit, kMaxColorRenderTargets>& colorAttachmentUsages,
+									TextureUsageBit depthStencilAttachmentUsage, U32 minx, U32 miny, U32 width, U32 height)
 {
 	class BindFramebufferCommand final : public GlCommand
 	{
@@ -850,8 +842,7 @@ void CommandBuffer::beginRenderPass(FramebufferPtr fb,
 
 		Error operator()(GlState& state)
 		{
-			static_cast<const FramebufferImpl&>(*m_fb).bind(state, m_renderArea[0], m_renderArea[1], m_renderArea[2],
-															m_renderArea[3]);
+			static_cast<const FramebufferImpl&>(*m_fb).bind(state, m_renderArea[0], m_renderArea[1], m_renderArea[2], m_renderArea[3]);
 			return Error::kNone;
 		}
 	};
@@ -888,8 +879,7 @@ void CommandBuffer::endRenderPass()
 	self.m_state.endRenderPass();
 }
 
-void CommandBuffer::drawElements(PrimitiveTopology topology, U32 count, U32 instanceCount, U32 firstIndex,
-								 U32 baseVertex, U32 baseInstance)
+void CommandBuffer::drawElements(PrimitiveTopology topology, U32 count, U32 instanceCount, U32 firstIndex, U32 baseVertex, U32 baseInstance)
 {
 	class Cmd final : public GlCommand
 	{
@@ -907,9 +897,8 @@ void CommandBuffer::drawElements(PrimitiveTopology topology, U32 count, U32 inst
 
 		Error operator()(GlState&)
 		{
-			glDrawElementsInstancedBaseVertexBaseInstance(
-				m_topology, m_info.m_count, m_indexType, numberToPtr<void*>(m_info.m_firstIndex),
-				m_info.m_instanceCount, m_info.m_baseVertex, m_info.m_baseInstance);
+			glDrawElementsInstancedBaseVertexBaseInstance(m_topology, m_info.m_count, m_indexType, numberToPtr<void*>(m_info.m_firstIndex),
+														  m_info.m_instanceCount, m_info.m_baseVertex, m_info.m_baseInstance);
 
 			ANKI_TRACE_INC_COUNTER(GR_DRAWCALLS, 1);
 			ANKI_TRACE_INC_COUNTER(GR_VERTICES, m_info.m_instanceCount * m_info.m_count);
@@ -955,8 +944,7 @@ void CommandBuffer::drawArrays(PrimitiveTopology topology, U32 count, U32 instan
 
 		Error operator()(GlState& state)
 		{
-			glDrawArraysInstancedBaseInstance(m_topology, m_info.m_first, m_info.m_count, m_info.m_instanceCount,
-											  m_info.m_baseInstance);
+			glDrawArraysInstancedBaseInstance(m_topology, m_info.m_first, m_info.m_count, m_info.m_instanceCount, m_info.m_baseInstance);
 
 			ANKI_TRACE_INC_COUNTER(GR_DRAWCALLS, 1);
 			ANKI_TRACE_INC_COUNTER(GR_VERTICES, m_info.m_instanceCount * m_info.m_count);
@@ -973,8 +961,7 @@ void CommandBuffer::drawArrays(PrimitiveTopology topology, U32 count, U32 instan
 	self.pushBackNewCommand<DrawArraysCommand>(convertPrimitiveTopology(topology), info);
 }
 
-void CommandBuffer::drawElementsIndirect(PrimitiveTopology topology, U32 drawCount, PtrSize offset,
-										 BufferPtr indirectBuff)
+void CommandBuffer::drawElementsIndirect(PrimitiveTopology topology, U32 drawCount, PtrSize offset, BufferPtr indirectBuff)
 {
 	class DrawElementsIndirectCommand final : public GlCommand
 	{
@@ -1004,8 +991,7 @@ void CommandBuffer::drawElementsIndirect(PrimitiveTopology topology, U32 drawCou
 
 			glBindBuffer(GL_DRAW_INDIRECT_BUFFER, buff.getGlName());
 
-			glMultiDrawElementsIndirect(m_topology, m_indexType, numberToPtr<void*>(m_offset), m_drawCount,
-										sizeof(DrawElementsIndirectInfo));
+			glMultiDrawElementsIndirect(m_topology, m_indexType, numberToPtr<void*>(m_offset), m_drawCount, sizeof(DrawElementsIndirectInfo));
 
 			glBindBuffer(GL_DRAW_INDIRECT_BUFFER, 0);
 			return Error::kNone;
@@ -1016,12 +1002,11 @@ void CommandBuffer::drawElementsIndirect(PrimitiveTopology topology, U32 drawCou
 
 	self.m_state.checkIndexedDracall();
 	self.flushDrawcall(*this);
-	self.pushBackNewCommand<DrawElementsIndirectCommand>(
-		convertPrimitiveTopology(topology), self.m_state.m_idx.m_indexType, drawCount, offset, indirectBuff);
+	self.pushBackNewCommand<DrawElementsIndirectCommand>(convertPrimitiveTopology(topology), self.m_state.m_idx.m_indexType, drawCount, offset,
+														 indirectBuff);
 }
 
-void CommandBuffer::drawArraysIndirect(PrimitiveTopology topology, U32 drawCount, PtrSize offset,
-									   BufferPtr indirectBuff)
+void CommandBuffer::drawArraysIndirect(PrimitiveTopology topology, U32 drawCount, PtrSize offset, BufferPtr indirectBuff)
 {
 	class DrawArraysIndirectCommand final : public GlCommand
 	{
@@ -1049,8 +1034,7 @@ void CommandBuffer::drawArraysIndirect(PrimitiveTopology topology, U32 drawCount
 
 			glBindBuffer(GL_DRAW_INDIRECT_BUFFER, buff.getGlName());
 
-			glMultiDrawArraysIndirect(m_topology, numberToPtr<void*>(m_offset), m_drawCount,
-									  sizeof(DrawArraysIndirectInfo));
+			glMultiDrawArraysIndirect(m_topology, numberToPtr<void*>(m_offset), m_drawCount, sizeof(DrawArraysIndirectInfo));
 
 			glBindBuffer(GL_DRAW_INDIRECT_BUFFER, 0);
 			return Error::kNone;
@@ -1060,8 +1044,7 @@ void CommandBuffer::drawArraysIndirect(PrimitiveTopology topology, U32 drawCount
 	ANKI_GL_SELF(CommandBufferImpl);
 	self.m_state.checkNonIndexedDrawcall();
 	self.flushDrawcall(*this);
-	self.pushBackNewCommand<DrawArraysIndirectCommand>(convertPrimitiveTopology(topology), drawCount, offset,
-													   indirectBuff);
+	self.pushBackNewCommand<DrawArraysIndirectCommand>(convertPrimitiveTopology(topology), drawCount, offset, indirectBuff);
 }
 
 void CommandBuffer::dispatchCompute(U32 groupCountX, U32 groupCountY, U32 groupCountZ)
@@ -1164,8 +1147,7 @@ void CommandBuffer::copyBufferToTextureView(BufferPtr buff, PtrSize offset, PtrS
 			const TextureViewImpl& viewImpl = static_cast<TextureViewImpl&>(*m_texView);
 			const TextureImpl& texImpl = static_cast<TextureImpl&>(*viewImpl.m_tex);
 
-			texImpl.copyFromBuffer(viewImpl.getSubresource(), static_cast<const BufferImpl&>(*m_buff).getGlName(),
-								   m_offset, m_range);
+			texImpl.copyFromBuffer(viewImpl.getSubresource(), static_cast<const BufferImpl&>(*m_buff).getGlName(), m_offset, m_range);
 			return Error::kNone;
 		}
 	};
@@ -1179,8 +1161,7 @@ void CommandBuffer::copyBufferToTextureView(BufferPtr buff, PtrSize offset, PtrS
 	self.pushBackNewCommand<TexSurfUploadCommand>(buff, offset, range, texView);
 }
 
-void CommandBuffer::copyBufferToBuffer(BufferPtr src, PtrSize srcOffset, BufferPtr dst, PtrSize dstOffset,
-									   PtrSize range)
+void CommandBuffer::copyBufferToBuffer(BufferPtr src, PtrSize srcOffset, BufferPtr dst, PtrSize dstOffset, PtrSize range)
 {
 	class Cmd final : public GlCommand
 	{
@@ -1202,8 +1183,7 @@ void CommandBuffer::copyBufferToBuffer(BufferPtr src, PtrSize srcOffset, BufferP
 
 		Error operator()(GlState& state)
 		{
-			static_cast<BufferImpl&>(*m_dst).write(static_cast<const BufferImpl&>(*m_src).getGlName(), m_srcOffset,
-												   m_dstOffset, m_range);
+			static_cast<BufferImpl&>(*m_dst).write(static_cast<const BufferImpl&>(*m_src).getGlName(), m_srcOffset, m_dstOffset, m_range);
 			return Error::kNone;
 		}
 	};
@@ -1284,8 +1264,7 @@ void CommandBuffer::blitTextureViews(TextureViewPtr srcView, TextureViewPtr dest
 	ANKI_ASSERT(!"TODO");
 }
 
-void CommandBuffer::setBufferBarrier(BufferPtr buff, BufferUsageBit prevUsage, BufferUsageBit nextUsage, PtrSize offset,
-									 PtrSize size)
+void CommandBuffer::setBufferBarrier(BufferPtr buff, BufferUsageBit prevUsage, BufferUsageBit nextUsage, PtrSize offset, PtrSize size)
 {
 	class SetBufferMemBarrierCommand final : public GlCommand
 	{
@@ -1332,8 +1311,7 @@ void CommandBuffer::setBufferBarrier(BufferPtr buff, BufferUsageBit prevUsage, B
 		d |= GL_COMMAND_BARRIER_BIT;
 	}
 
-	if(!!(all
-		  & (BufferUsageBit::FILL | BufferUsageBit::BUFFER_UPLOAD_SOURCE | BufferUsageBit::BUFFER_UPLOAD_DESTINATION)))
+	if(!!(all & (BufferUsageBit::FILL | BufferUsageBit::BUFFER_UPLOAD_SOURCE | BufferUsageBit::BUFFER_UPLOAD_DESTINATION)))
 	{
 		d |= GL_BUFFER_UPDATE_BARRIER_BIT;
 	}
@@ -1348,22 +1326,19 @@ void CommandBuffer::setBufferBarrier(BufferPtr buff, BufferUsageBit prevUsage, B
 	self.pushBackNewCommand<SetBufferMemBarrierCommand>(d);
 }
 
-void CommandBuffer::setTextureSurfaceBarrier(TexturePtr tex, TextureUsageBit prevUsage, TextureUsageBit nextUsage,
-											 const TextureSurfaceInfo& surf)
+void CommandBuffer::setTextureSurfaceBarrier(TexturePtr tex, TextureUsageBit prevUsage, TextureUsageBit nextUsage, const TextureSurfaceInfo& surf)
 {
 	TextureSubresourceInfo subresource;
 	setTextureBarrier(tex, prevUsage, nextUsage, subresource);
 }
 
-void CommandBuffer::setTextureVolumeBarrier(TexturePtr tex, TextureUsageBit prevUsage, TextureUsageBit nextUsage,
-											const TextureVolumeInfo& vol)
+void CommandBuffer::setTextureVolumeBarrier(TexturePtr tex, TextureUsageBit prevUsage, TextureUsageBit nextUsage, const TextureVolumeInfo& vol)
 {
 	TextureSubresourceInfo subresource;
 	setTextureBarrier(tex, prevUsage, nextUsage, subresource);
 }
 
-void CommandBuffer::setTextureBarrier(TexturePtr tex, TextureUsageBit prevUsage, TextureUsageBit nextUsage,
-									  const TextureSubresourceInfo& subresource)
+void CommandBuffer::setTextureBarrier(TexturePtr tex, TextureUsageBit prevUsage, TextureUsageBit nextUsage, const TextureSubresourceInfo& subresource)
 {
 	class Cmd final : public GlCommand
 	{
@@ -1505,8 +1480,7 @@ void CommandBuffer::writeOcclusionQueryResultToBuffer(OcclusionQueryPtr query, P
 			ANKI_ASSERT(m_offset + 4 <= buff.getSize());
 
 			glBindBuffer(GL_QUERY_BUFFER, buff.getGlName());
-			glGetQueryObjectuiv(static_cast<const OcclusionQueryImpl&>(*m_query).getGlName(), GL_QUERY_RESULT,
-								numberToPtr<GLuint*>(m_offset));
+			glGetQueryObjectuiv(static_cast<const OcclusionQueryImpl&>(*m_query).getGlName(), GL_QUERY_RESULT, numberToPtr<GLuint*>(m_offset));
 			glBindBuffer(GL_QUERY_BUFFER, 0);
 
 			return Error::kNone;
@@ -1534,8 +1508,7 @@ void CommandBuffer::setPushConstants(const void* data, U32 dataSize)
 
 		Error operator()(GlState& state)
 		{
-			const ShaderProgramImplReflection& refl =
-				static_cast<ShaderProgramImpl&>(*state.m_crntProg).getReflection();
+			const ShaderProgramImplReflection& refl = static_cast<ShaderProgramImpl&>(*state.m_crntProg).getReflection();
 			ANKI_ASSERT(refl.m_uniformDataSize == m_data.getSizeInBytes());
 
 			const Bool transpose = true;

+ 5 - 6
AnKi/Gr/Gl/CommandBufferImpl.cpp

@@ -24,8 +24,8 @@ void CommandBufferImpl::init(const CommandBufferInitInfo& init)
 {
 	auto& pool = getManager().getAllocator().getMemoryPool();
 
-	m_alloc = CommandBufferAllocator<GlCommand*>(pool.getAllocationCallback(), pool.getAllocationCallbackUserData(),
-												 init.m_hints.m_chunkSize, 1.0, 0, false);
+	m_alloc = CommandBufferAllocator<GlCommand*>(pool.getAllocationCallback(), pool.getAllocationCallbackUserData(), init.m_hints.m_chunkSize, 1.0, 0,
+												 false);
 
 	m_flags = init.m_flags;
 
@@ -59,8 +59,7 @@ void CommandBufferImpl::destroy()
 		command = next;
 	}
 
-	ANKI_ASSERT(m_alloc.getMemoryPool().getUsersCount() == 1
-				&& "Someone is holding a reference to the command buffer's allocator");
+	ANKI_ASSERT(m_alloc.getMemoryPool().getUsersCount() == 1 && "Someone is holding a reference to the command buffer's allocator");
 
 	m_alloc = CommandBufferAllocator<U8>();
 }
@@ -215,8 +214,8 @@ void CommandBufferImpl::flushDrawcall(CommandBuffer& cmdb)
 	{
 		if(m_state.m_glStencilFuncSeparateDirty[i])
 		{
-			pushBackNewCommand<StencilCmd>(GL_FRONT + i, convertCompareOperation(m_state.m_stencilCompare[i]),
-										   m_state.m_stencilRef[i], m_state.m_stencilCompareMask[i]);
+			pushBackNewCommand<StencilCmd>(GL_FRONT + i, convertCompareOperation(m_state.m_stencilCompare[i]), m_state.m_stencilRef[i],
+										   m_state.m_stencilCompareMask[i]);
 
 			m_state.m_glStencilFuncSeparateDirty[i] = false;
 		}

+ 1 - 2
AnKi/Gr/Gl/Common.cpp

@@ -270,8 +270,7 @@ GLenum convertBlendFactor(BlendFactor in)
 	return out;
 }
 
-void convertTextureInformation(Format pf, Bool& compressed, GLenum& format, GLenum& internalFormat, GLenum& type,
-							   DepthStencilAspectBit& dsAspect)
+void convertTextureInformation(Format pf, Bool& compressed, GLenum& format, GLenum& internalFormat, GLenum& type, DepthStencilAspectBit& dsAspect)
 {
 	compressed = formatIsCompressed(pf);
 	dsAspect = computeFormatAspect(pf);

+ 1 - 2
AnKi/Gr/Gl/Common.h

@@ -177,8 +177,7 @@ inline GLenum convertPrimitiveTopology(PrimitiveTopology ak)
 	return out;
 }
 
-void convertTextureInformation(Format pf, Bool& compressed, GLenum& format, GLenum& internalFormat, GLenum& type,
-							   DepthStencilAspectBit& dsAspect);
+void convertTextureInformation(Format pf, Bool& compressed, GLenum& format, GLenum& internalFormat, GLenum& type, DepthStencilAspectBit& dsAspect);
 /// @}
 
 } // end namespace anki

+ 2 - 4
AnKi/Gr/Gl/Fence.cpp

@@ -82,8 +82,7 @@ Bool Fence::clientWait(Second seconds)
 	{
 		// Send a cmd that will update the fence's status in case someone calls clientWait with seconds==0.0 all the
 		// time
-		static_cast<CommandBufferImpl&>(*cmdb).pushBackNewCommand<CheckFenceCommand>(FencePtr(this), seconds, 0.0,
-																					 nullptr);
+		static_cast<CommandBufferImpl&>(*cmdb).pushBackNewCommand<CheckFenceCommand>(FencePtr(this), seconds, 0.0, nullptr);
 		static_cast<CommandBufferImpl&>(*cmdb).flush();
 
 		return false;
@@ -94,8 +93,7 @@ Bool Fence::clientWait(Second seconds)
 
 		Second flushTime = HighRezTimer::getCurrentTime();
 
-		static_cast<CommandBufferImpl&>(*cmdb).pushBackNewCommand<CheckFenceCommand>(FencePtr(this), seconds, flushTime,
-																					 &barrier);
+		static_cast<CommandBufferImpl&>(*cmdb).pushBackNewCommand<CheckFenceCommand>(FencePtr(this), seconds, flushTime, &barrier);
 		static_cast<CommandBufferImpl&>(*cmdb).flush();
 
 		barrier.wait();

+ 1 - 2
AnKi/Gr/Gl/Framebuffer.cpp

@@ -29,8 +29,7 @@ Framebuffer* Framebuffer::newInstance(GrManager* manager, const FramebufferInitI
 			FramebufferImpl& impl = static_cast<FramebufferImpl&>(*m_fb);
 			Error err = impl.init(m_init);
 
-			GlObject::State oldState =
-				impl.setStateAtomically((err) ? GlObject::State::ERROR : GlObject::State::CREATED);
+			GlObject::State oldState = impl.setStateAtomically((err) ? GlObject::State::ERROR : GlObject::State::CREATED);
 			ANKI_ASSERT(oldState == GlObject::State::TO_BE_CREATED);
 			(void)oldState;
 

+ 9 - 12
AnKi/Gr/Gl/FramebufferImpl.cpp

@@ -114,8 +114,7 @@ Error FramebufferImpl::init(const FramebufferInitInfo& init)
 	return Error::kNone;
 }
 
-void FramebufferImpl::attachTextureInternal(GLenum attachment, const TextureViewImpl& view,
-											const FramebufferAttachmentInfo& info)
+void FramebufferImpl::attachTextureInternal(GLenum attachment, const TextureViewImpl& view, const FramebufferAttachmentInfo& info)
 {
 	const GLenum target = GL_FRAMEBUFFER;
 	const TextureImpl& tex = static_cast<const TextureImpl&>(*view.m_tex);
@@ -129,12 +128,11 @@ void FramebufferImpl::attachTextureInternal(GLenum attachment, const TextureView
 		glFramebufferTexture2D(target, attachment, tex.m_target, tex.getGlName(), view.getSubresource().m_firstMipmap);
 		break;
 	case GL_TEXTURE_CUBE_MAP:
-		glFramebufferTexture2D(target, attachment, GL_TEXTURE_CUBE_MAP_POSITIVE_X + view.getSubresource().m_firstFace,
-							   tex.getGlName(), view.getSubresource().m_firstMipmap);
+		glFramebufferTexture2D(target, attachment, GL_TEXTURE_CUBE_MAP_POSITIVE_X + view.getSubresource().m_firstFace, tex.getGlName(),
+							   view.getSubresource().m_firstMipmap);
 		break;
 	case GL_TEXTURE_2D_ARRAY:
-		glFramebufferTextureLayer(target, attachment, tex.getGlName(), view.getSubresource().m_firstMipmap,
-								  view.getSubresource().m_firstLayer);
+		glFramebufferTextureLayer(target, attachment, tex.getGlName(), view.getSubresource().m_firstMipmap, view.getSubresource().m_firstLayer);
 		break;
 	case GL_TEXTURE_3D:
 		ANKI_ASSERT(!"TODO");
@@ -178,8 +176,7 @@ void FramebufferImpl::bind(const GlState& state, U32 minx, U32 miny, U32 width,
 	// Invalidate
 	if(m_invalidateBuffersCount)
 	{
-		glInvalidateSubFramebuffer(GL_FRAMEBUFFER, m_invalidateBuffersCount, &m_invalidateBuffers[0], minx, miny, width,
-								   height);
+		glInvalidateSubFramebuffer(GL_FRAMEBUFFER, m_invalidateBuffersCount, &m_invalidateBuffers[0], minx, miny, width, height);
 	}
 
 	// Clear buffers
@@ -191,8 +188,8 @@ void FramebufferImpl::bind(const GlState& state, U32 minx, U32 miny, U32 width,
 		{
 			// Enable write mask in case a pipeline changed it (else no clear will happen) and then restore state
 			Bool restore = false;
-			if(state.m_colorWriteMasks[i][0] != true || state.m_colorWriteMasks[i][1] != true
-			   || state.m_colorWriteMasks[i][2] != true || state.m_colorWriteMasks[i][3] != true)
+			if(state.m_colorWriteMasks[i][0] != true || state.m_colorWriteMasks[i][1] != true || state.m_colorWriteMasks[i][2] != true
+			   || state.m_colorWriteMasks[i][3] != true)
 			{
 				glColorMaski(i, true, true, true, true);
 				restore = true;
@@ -202,8 +199,8 @@ void FramebufferImpl::bind(const GlState& state, U32 minx, U32 miny, U32 width,
 
 			if(restore)
 			{
-				glColorMaski(i, state.m_colorWriteMasks[i][0], state.m_colorWriteMasks[i][1],
-							 state.m_colorWriteMasks[i][2], state.m_colorWriteMasks[i][3]);
+				glColorMaski(i, state.m_colorWriteMasks[i][0], state.m_colorWriteMasks[i][1], state.m_colorWriteMasks[i][2],
+							 state.m_colorWriteMasks[i][3]);
 			}
 		}
 	}

+ 2 - 4
AnKi/Gr/Gl/FramebufferImpl.h

@@ -52,12 +52,10 @@ private:
 	Bool m_clearStencil = false;
 
 	/// Attach a texture
-	static void attachTextureInternal(GLenum attachment, const TextureViewImpl& view,
-									  const FramebufferAttachmentInfo& info);
+	static void attachTextureInternal(GLenum attachment, const TextureViewImpl& view, const FramebufferAttachmentInfo& info);
 
 	/// Create the FBO
-	ANKI_USE_RESULT Error createFbo(const Array<U, kMaxColorRenderTargets + 1>& layers,
-									GLenum depthStencilBindingPoint);
+	ANKI_USE_RESULT Error createFbo(const Array<U, kMaxColorRenderTargets + 1>& layers, GLenum depthStencilBindingPoint);
 };
 /// @}
 

+ 2 - 4
AnKi/Gr/Gl/GlState.cpp

@@ -43,8 +43,7 @@ static const GlDbg gldbgseverity[] = {{GL_DEBUG_SEVERITY_LOW, "GL_DEBUG_SEVERITY
 __stdcall
 #	endif
 	void
-	oglMessagesCallback(GLenum source, GLenum type, GLuint id, GLenum severity, GLsizei length, const char* message,
-						const GLvoid* userParam)
+	oglMessagesCallback(GLenum source, GLenum type, GLuint id, GLenum severity, GLsizei length, const char* message, const GLvoid* userParam)
 {
 	using namespace anki;
 
@@ -122,8 +121,7 @@ void GlState::initRenderThread()
 			{
 				for(U sv = 0; sv < sizeof(gldbgseverity) / sizeof(GlDbg); sv++)
 				{
-					glDebugMessageControl(gldbgsource[s].token, gldbgtype[t].token, gldbgseverity[sv].token, 0, nullptr,
-										  GL_TRUE);
+					glDebugMessageControl(gldbgsource[s].token, gldbgtype[t].token, gldbgseverity[sv].token, 0, nullptr, GL_TRUE);
 				}
 			}
 		}

+ 2 - 4
AnKi/Gr/Gl/GlState.h

@@ -42,10 +42,8 @@ public:
 
 	/// @name FB
 	/// @{
-	Array2d<Bool, kMaxColorRenderTargets, 4> m_colorWriteMasks = {{{{true, true, true, true}},
-																   {{true, true, true, true}},
-																   {{true, true, true, true}},
-																   {{true, true, true, true}}}};
+	Array2d<Bool, kMaxColorRenderTargets, 4> m_colorWriteMasks = {
+		{{{true, true, true, true}}, {{true, true, true, true}}, {{true, true, true, true}}, {{true, true, true, true}}}};
 
 	Bool m_depthWriteMask = true;
 

+ 1 - 2
AnKi/Gr/Gl/GrManagerImpl.cpp

@@ -82,8 +82,7 @@ void GrManagerImpl::initFakeDefaultFb(GrManagerInitInfo& init)
 	texinit.m_width = defaultFbWidth;
 	texinit.m_height = defaultFbHeight;
 	texinit.m_format = Format::kR8G8B8A8_Unorm;
-	texinit.m_usage =
-		TextureUsageBit::kFramebufferWrite | TextureUsageBit::kImageComputeWrite | TextureUsageBit::kPresent;
+	texinit.m_usage = TextureUsageBit::kFramebufferWrite | TextureUsageBit::kImageComputeWrite | TextureUsageBit::kPresent;
 	m_fakeFbTex = newTexture(texinit);
 
 	TextureViewPtr view = newTextureView(TextureViewInitInfo(m_fakeFbTex, "FB view"));

+ 1 - 2
AnKi/Gr/Gl/GrManagerImplSdl.cpp

@@ -34,8 +34,7 @@ public:
 	{
 		m_window = init.m_window->getNative().m_window;
 
-		ANKI_GL_LOGI("Creating GL %u.%u context...", U(init.m_config->getNumber("gr.glmajor")),
-					 U(init.m_config->getNumber("gr.glminor")));
+		ANKI_GL_LOGI("Creating GL %u.%u context...", U(init.m_config->getNumber("gr.glmajor")), U(init.m_config->getNumber("gr.glminor")));
 
 		if(init.m_config->getNumber("gr_debugContext"))
 		{

+ 2 - 4
AnKi/Gr/Gl/RenderingThread.cpp

@@ -55,8 +55,7 @@ public:
 		const FramebufferImpl& fb = static_cast<FramebufferImpl&>(*gr.m_fakeDefaultFb);
 		const U width = gr.m_fakeFbTex->getWidth();
 		const U height = gr.m_fakeFbTex->getHeight();
-		glBlitNamedFramebuffer(fb.getGlName(), 0, 0, 0, width, height, 0, 0, width, height, GL_COLOR_BUFFER_BIT,
-							   GL_NEAREST);
+		glBlitNamedFramebuffer(fb.getGlName(), 0, 0, 0, width, height, 0, 0, width, height, GL_COLOR_BUFFER_BIT, GL_NEAREST);
 
 		// Swap buffers
 		m_renderingThread->swapBuffersInternal();
@@ -184,8 +183,7 @@ void RenderingThread::prepare()
 	// Ignore the first error
 	glGetError();
 
-	ANKI_GL_LOGI("OpenGL async thread started: OpenGL version \"%s\", GLSL version \"%s\"",
-				 reinterpret_cast<const char*>(glGetString(GL_VERSION)),
+	ANKI_GL_LOGI("OpenGL async thread started: OpenGL version \"%s\", GLSL version \"%s\"", reinterpret_cast<const char*>(glGetString(GL_VERSION)),
 				 reinterpret_cast<const char*>(glGetString(GL_SHADING_LANGUAGE_VERSION)));
 
 	// Get thread id

+ 2 - 4
AnKi/Gr/Gl/Shader.cpp

@@ -19,8 +19,7 @@ Shader* Shader::newInstance(GrManager* manager, const ShaderInitInfo& init)
 		StringRaii m_source;
 		DynamicArrayRaii<ShaderSpecializationConstValue> m_constValues;
 
-		ShaderCreateCommand(Shader* shader, ConstWeakArray<U8> bin,
-							ConstWeakArray<ShaderSpecializationConstValue> constValues,
+		ShaderCreateCommand(Shader* shader, ConstWeakArray<U8> bin, ConstWeakArray<ShaderSpecializationConstValue> constValues,
 							const CommandBufferAllocator<U8>& alloc)
 			: m_shader(shader)
 			, m_source(alloc)
@@ -41,8 +40,7 @@ Shader* Shader::newInstance(GrManager* manager, const ShaderInitInfo& init)
 
 			Error err = impl.init(m_source.toCString(), m_constValues);
 
-			GlObject::State oldState =
-				impl.setStateAtomically((err) ? GlObject::State::ERROR : GlObject::State::CREATED);
+			GlObject::State oldState = impl.setStateAtomically((err) ? GlObject::State::ERROR : GlObject::State::CREATED);
 			ANKI_ASSERT(oldState == GlObject::State::TO_BE_CREATED);
 			(void)oldState;
 

+ 2 - 2
AnKi/Gr/Gl/ShaderImpl.cpp

@@ -37,8 +37,8 @@ Error ShaderImpl::init(CString source, ConstWeakArray<ShaderSpecializationConstV
 	ANKI_ASSERT(source);
 	ANKI_ASSERT(!isCreated());
 
-	static const Array<GLenum, 6> gltype = {{GL_VERTEX_SHADER, GL_TESS_CONTROL_SHADER, GL_TESS_EVALUATION_SHADER,
-											 GL_GEOMETRY_SHADER, GL_FRAGMENT_SHADER, GL_COMPUTE_SHADER}};
+	static const Array<GLenum, 6> gltype = {
+		{GL_VERTEX_SHADER, GL_TESS_CONTROL_SHADER, GL_TESS_EVALUATION_SHADER, GL_GEOMETRY_SHADER, GL_FRAGMENT_SHADER, GL_COMPUTE_SHADER}};
 
 	m_glType = gltype[U(m_shaderType)];
 

+ 3 - 4
AnKi/Gr/Gl/ShaderProgram.cpp

@@ -24,8 +24,7 @@ ShaderProgram* ShaderProgram::newInstance(GrManager* manager, const ShaderProgra
 		ShaderPtr m_frag;
 		ShaderPtr m_comp;
 
-		CreateCommand(ShaderProgram* prog, ShaderPtr vert, ShaderPtr tessc, ShaderPtr tesse, ShaderPtr geom,
-					  ShaderPtr frag, ShaderPtr comp)
+		CreateCommand(ShaderProgram* prog, ShaderPtr vert, ShaderPtr tessc, ShaderPtr tesse, ShaderPtr geom, ShaderPtr frag, ShaderPtr comp)
 			: m_prog(prog)
 			, m_vert(vert)
 			, m_tessc(tessc)
@@ -57,8 +56,8 @@ ShaderProgram* ShaderProgram::newInstance(GrManager* manager, const ShaderProgra
 	CommandBufferPtr cmdb = manager->newCommandBuffer(CommandBufferInitInfo());
 	static_cast<CommandBufferImpl&>(*cmdb).pushBackNewCommand<CreateCommand>(
 		impl, init.m_shaders[ShaderType::kVertex], init.m_shaders[ShaderType::kTessellationControl],
-		init.m_shaders[ShaderType::kTessellationEvaluation], init.m_shaders[ShaderType::kGeometry],
-		init.m_shaders[ShaderType::kFragment], init.m_shaders[ShaderType::kCompute]);
+		init.m_shaders[ShaderType::kTessellationEvaluation], init.m_shaders[ShaderType::kGeometry], init.m_shaders[ShaderType::kFragment],
+		init.m_shaders[ShaderType::kCompute]);
 	static_cast<CommandBufferImpl&>(*cmdb).flush();
 
 	return impl;

+ 1 - 2
AnKi/Gr/Gl/ShaderProgramImpl.h

@@ -39,8 +39,7 @@ public:
 
 	~ShaderProgramImpl();
 
-	ANKI_USE_RESULT Error initGraphics(ShaderPtr vert, ShaderPtr tessc, ShaderPtr tesse, ShaderPtr geom,
-									   ShaderPtr frag);
+	ANKI_USE_RESULT Error initGraphics(ShaderPtr vert, ShaderPtr tessc, ShaderPtr tesse, ShaderPtr geom, ShaderPtr frag);
 	ANKI_USE_RESULT Error initCompute(ShaderPtr comp);
 
 	// Do that only when is needed to avoid serializing the thread the driver is using for compilation.

+ 8 - 13
AnKi/Gr/Gl/StateTracker.h

@@ -139,8 +139,7 @@ public:
 
 	Bool setScissor(GLsizei minx, GLsizei miny, GLsizei width, GLsizei height)
 	{
-		if(!m_scissorSet
-		   || (m_scissor[0] != minx || m_scissor[1] != miny || m_scissor[2] != width || m_scissor[3] != height))
+		if(!m_scissorSet || (m_scissor[0] != minx || m_scissor[1] != miny || m_scissor[2] != width || m_scissor[3] != height))
 		{
 			m_scissor = {{minx, miny, width, height}};
 			m_scissorSet = true;
@@ -197,11 +196,8 @@ public:
 
 	Bool maybeEnableStencilTest()
 	{
-		Bool enable = !stencilTestDisabled(m_stencilFail[0], m_stencilPassDepthFail[0], m_stencilPassDepthPass[0],
-										   m_stencilCompare[0]);
-		enable = enable
-				 || !stencilTestDisabled(m_stencilFail[1], m_stencilPassDepthFail[1], m_stencilPassDepthPass[1],
-										 m_stencilCompare[1]);
+		Bool enable = !stencilTestDisabled(m_stencilFail[0], m_stencilPassDepthFail[0], m_stencilPassDepthPass[0], m_stencilCompare[0]);
+		enable = enable || !stencilTestDisabled(m_stencilFail[1], m_stencilPassDepthFail[1], m_stencilPassDepthPass[1], m_stencilCompare[1]);
 
 		if(enable != m_stencilTestEnabled)
 		{
@@ -215,8 +211,8 @@ public:
 	Array<StencilOperation, 2> m_stencilPassDepthFail = {{StencilOperation::COUNT, StencilOperation::COUNT}};
 	Array<StencilOperation, 2> m_stencilPassDepthPass = {{StencilOperation::COUNT, StencilOperation::COUNT}};
 
-	Bool setStencilOperations(FaceSelectionBit face, StencilOperation stencilFail,
-							  StencilOperation stencilPassDepthFail, StencilOperation stencilPassDepthPass)
+	Bool setStencilOperations(FaceSelectionBit face, StencilOperation stencilFail, StencilOperation stencilPassDepthFail,
+							  StencilOperation stencilPassDepthPass)
 	{
 		Bool changed = false;
 		if(!!(face & FaceSelectionBit::FRONT)
@@ -395,8 +391,8 @@ public:
 	Bool maybeEnableBlend(U attidx)
 	{
 		ColorAttachment& att = m_colorAtt[attidx];
-		Bool wantBlend = !blendingDisabled(att.m_blendSrcFactorRgb, att.m_blendDstFactorRgb, att.m_blendSrcFactorA,
-										   att.m_blendDstFactorA, att.m_blendOpRgb, att.m_blendOpA);
+		Bool wantBlend = !blendingDisabled(att.m_blendSrcFactorRgb, att.m_blendDstFactorRgb, att.m_blendSrcFactorA, att.m_blendDstFactorA,
+										   att.m_blendOpRgb, att.m_blendOpA);
 
 		if(wantBlend != att.m_enableBlend)
 		{
@@ -409,8 +405,7 @@ public:
 	Bool setBlendFactors(U32 attachment, BlendFactor srcRgb, BlendFactor dstRgb, BlendFactor srcA, BlendFactor dstA)
 	{
 		auto& att = m_colorAtt[attachment];
-		if(att.m_blendSrcFactorRgb != srcRgb || att.m_blendDstFactorRgb != dstRgb || att.m_blendSrcFactorA != srcA
-		   || att.m_blendDstFactorA != dstA)
+		if(att.m_blendSrcFactorRgb != srcRgb || att.m_blendDstFactorRgb != dstRgb || att.m_blendSrcFactorA != srcA || att.m_blendDstFactorA != dstA)
 		{
 			att.m_blendSrcFactorRgb = srcRgb;
 			att.m_blendDstFactorRgb = dstRgb;

+ 11 - 19
AnKi/Gr/Gl/TextureImpl.cpp

@@ -93,8 +93,7 @@ TextureImpl::~TextureImpl()
 		CommandBufferPtr commands;
 
 		commands = manager.newCommandBuffer(CommandBufferInitInfo());
-		static_cast<CommandBufferImpl&>(*commands).pushBackNewCommand<DeleteTextureCommand>(m_glName, m_viewsMap,
-																							getAllocator());
+		static_cast<CommandBufferImpl&>(*commands).pushBackNewCommand<DeleteTextureCommand>(m_glName, m_viewsMap, getAllocator());
 		static_cast<CommandBufferImpl&>(*commands).flush();
 	}
 	else
@@ -208,8 +207,7 @@ void TextureImpl::init(const TextureInitInfo& init)
 	ANKI_CHECK_GL_ERROR();
 }
 
-void TextureImpl::copyFromBuffer(const TextureSubresourceInfo& subresource, GLuint pbo, PtrSize offset,
-								 PtrSize dataSize) const
+void TextureImpl::copyFromBuffer(const TextureSubresourceInfo& subresource, GLuint pbo, PtrSize offset, PtrSize dataSize) const
 {
 	ANKI_ASSERT(isSubresourceGoodForCopyFromBuffer(subresource));
 	ANKI_ASSERT(dataSize > 0);
@@ -242,13 +240,11 @@ void TextureImpl::copyFromBuffer(const TextureSubresourceInfo& subresource, GLui
 		const U surfIdx = computeSurfaceIdx(TextureSurfaceInfo(mipmap, 0, subresource.m_firstFace, 0));
 		if(!m_compressed)
 		{
-			glTexSubImage2D(GL_TEXTURE_CUBE_MAP_POSITIVE_X + surfIdx, mipmap, 0, 0, w, h, m_glFormat, m_glType,
-							ptrOffset);
+			glTexSubImage2D(GL_TEXTURE_CUBE_MAP_POSITIVE_X + surfIdx, mipmap, 0, 0, w, h, m_glFormat, m_glType, ptrOffset);
 		}
 		else
 		{
-			glCompressedTexSubImage2D(GL_TEXTURE_CUBE_MAP_POSITIVE_X + surfIdx, mipmap, 0, 0, w, h, m_glFormat,
-									  dataSize, ptrOffset);
+			glCompressedTexSubImage2D(GL_TEXTURE_CUBE_MAP_POSITIVE_X + surfIdx, mipmap, 0, 0, w, h, m_glFormat, dataSize, ptrOffset);
 		}
 		break;
 	}
@@ -333,15 +329,13 @@ void TextureImpl::clear(const TextureSubresourceInfo& subresource, const ClearVa
 	{
 		for(U face = subresource.m_firstFace; face < subresource.m_firstFace + subresource.m_faceCount; ++face)
 		{
-			for(U layer = subresource.m_firstLayer; layer < subresource.m_firstLayer + subresource.m_layerCount;
-				++layer)
+			for(U layer = subresource.m_firstLayer; layer < subresource.m_firstLayer + subresource.m_layerCount; ++layer)
 			{
 				const U surfaceIdx = computeSurfaceIdx(TextureSurfaceInfo(mip, 0, face, layer));
 				const U width = m_width >> mip;
 				const U height = m_height >> mip;
 
-				glClearTexSubImage(m_glName, mip, 0, 0, surfaceIdx, width, height, 1, format, GL_FLOAT,
-								   &clearValue.m_colorf[0]);
+				glClearTexSubImage(m_glName, mip, 0, 0, surfaceIdx, width, height, 1, format, GL_FLOAT, &clearValue.m_colorf[0]);
 			}
 		}
 	}
@@ -402,19 +396,17 @@ MicroTextureView TextureImpl::getOrCreateView(const TextureSubresourceInfo& subr
 			glTarget = GL_TEXTURE_2D;
 		}
 
-		const U firstSurf = computeSurfaceIdx(
-			TextureSurfaceInfo(subresource.m_firstMipmap, 0, subresource.m_firstFace, subresource.m_firstLayer));
-		const U lastSurf = computeSurfaceIdx(
-			TextureSurfaceInfo(subresource.m_firstMipmap, 0, subresource.m_firstFace + subresource.m_faceCount - 1,
-							   subresource.m_firstLayer + subresource.m_layerCount - 1));
+		const U firstSurf = computeSurfaceIdx(TextureSurfaceInfo(subresource.m_firstMipmap, 0, subresource.m_firstFace, subresource.m_firstLayer));
+		const U lastSurf = computeSurfaceIdx(TextureSurfaceInfo(subresource.m_firstMipmap, 0, subresource.m_firstFace + subresource.m_faceCount - 1,
+																subresource.m_firstLayer + subresource.m_layerCount - 1));
 		ANKI_ASSERT(firstSurf <= lastSurf);
 
 		MicroTextureView view;
 		view.m_aspect = subresource.m_depthStencilAspect;
 
 		glGenTextures(1, &view.m_glName);
-		glTextureView(view.m_glName, glTarget, m_glName, m_internalFormat, subresource.m_firstMipmap,
-					  subresource.m_mipmapCount, firstSurf, lastSurf - firstSurf + 1);
+		glTextureView(view.m_glName, glTarget, m_glName, m_internalFormat, subresource.m_firstMipmap, subresource.m_mipmapCount, firstSurf,
+					  lastSurf - firstSurf + 1);
 
 		m_viewsMap.emplace(getAllocator(), subresource, view);
 

+ 63 - 86
AnKi/Gr/RenderGraph.cpp

@@ -67,8 +67,7 @@ public:
 	TextureSurfaceInfo m_surface;
 	DepthStencilAspectBit m_dsAspect;
 
-	TextureBarrier(U32 rtIdx, TextureUsageBit usageBefore, TextureUsageBit usageAfter, const TextureSurfaceInfo& surf,
-				   DepthStencilAspectBit dsAspect)
+	TextureBarrier(U32 rtIdx, TextureUsageBit usageBefore, TextureUsageBit usageAfter, const TextureSurfaceInfo& surf, DepthStencilAspectBit dsAspect)
 		: m_idx(rtIdx)
 		, m_usageBefore(usageBefore)
 		, m_usageAfter(usageAfter)
@@ -278,8 +277,7 @@ void FramebufferDescription::bake()
 		sriToHash.m_sriTexelHeight = m_shadingRateAttachmentTexelHeight;
 		sriToHash.m_surface = m_shadingRateAttachmentSurface;
 
-		m_hash = (m_hash != 0) ? appendHash(&sriToHash, sizeof(sriToHash), m_hash)
-							   : computeHash(&sriToHash, sizeof(sriToHash));
+		m_hash = (m_hash != 0) ? appendHash(&sriToHash, sizeof(sriToHash), m_hash) : computeHash(&sriToHash, sizeof(sriToHash));
 	}
 
 	ANKI_ASSERT(m_hash != 0 && m_hash != 1);
@@ -422,8 +420,7 @@ TexturePtr RenderGraph::getOrCreateRenderTarget(const TextureInitInfo& initInf,
 	return tex;
 }
 
-FramebufferPtr RenderGraph::getOrCreateFramebuffer(const FramebufferDescription& fbDescr,
-												   const RenderTargetHandle* rtHandles, CString name,
+FramebufferPtr RenderGraph::getOrCreateFramebuffer(const FramebufferDescription& fbDescr, const RenderTargetHandle* rtHandles, CString name,
 												   Bool& drawsToPresentable)
 {
 	ANKI_ASSERT(rtHandles);
@@ -482,8 +479,7 @@ FramebufferPtr RenderGraph::getOrCreateFramebuffer(const FramebufferDescription&
 			outAtt.m_storeOperation = inAtt.m_storeOperation;
 
 			// Create texture view
-			TextureViewInitInfo viewInit(m_ctx->m_rts[rtHandles[i].m_idx].m_texture,
-										 TextureSubresourceInfo(inAtt.m_surface), "RenderGraph");
+			TextureViewInitInfo viewInit(m_ctx->m_rts[rtHandles[i].m_idx].m_texture, TextureSubresourceInfo(inAtt.m_surface), "RenderGraph");
 			TextureViewPtr view = GrManager::getSingleton().newTextureView(viewInit);
 
 			outAtt.m_textureView = std::move(view);
@@ -510,8 +506,8 @@ FramebufferPtr RenderGraph::getOrCreateFramebuffer(const FramebufferDescription&
 
 		if(fbDescr.m_shadingRateAttachmentTexelWidth > 0)
 		{
-			TextureViewInitInfo viewInit(m_ctx->m_rts[rtHandles[kMaxColorRenderTargets + 1].m_idx].m_texture,
-										 fbDescr.m_shadingRateAttachmentSurface, "RenderGraph SRI");
+			TextureViewInitInfo viewInit(m_ctx->m_rts[rtHandles[kMaxColorRenderTargets + 1].m_idx].m_texture, fbDescr.m_shadingRateAttachmentSurface,
+										 "RenderGraph SRI");
 			TextureViewPtr view = GrManager::getSingleton().newTextureView(viewInit);
 
 			fbInit.m_shadingRateImage.m_texelWidth = fbDescr.m_shadingRateAttachmentTexelWidth;
@@ -532,8 +528,7 @@ FramebufferPtr RenderGraph::getOrCreateFramebuffer(const FramebufferDescription&
 
 Bool RenderGraph::overlappingTextureSubresource(const TextureSubresourceInfo& suba, const TextureSubresourceInfo& subb)
 {
-#define ANKI_OVERLAPPING(first, count) \
-	((suba.first < subb.first + subb.count) && (subb.first < suba.first + suba.count))
+#define ANKI_OVERLAPPING(first, count) ((suba.first < subb.first + subb.count) && (subb.first < suba.first + suba.count))
 
 	const Bool overlappingFaces = ANKI_OVERLAPPING(m_firstFace, m_faceCount);
 	const Bool overlappingMips = ANKI_OVERLAPPING(m_firstMipmap, m_mipmapCount);
@@ -817,14 +812,12 @@ void RenderGraph::initRenderPassesAndSetDeps(const RenderGraphDescription& descr
 		// Create command buffers and framebuffer
 		if(inPass.m_type == RenderPassDescriptionBase::Type::kGraphics)
 		{
-			const GraphicsRenderPassDescription& graphicsPass =
-				static_cast<const GraphicsRenderPassDescription&>(inPass);
+			const GraphicsRenderPassDescription& graphicsPass = static_cast<const GraphicsRenderPassDescription&>(inPass);
 
 			if(graphicsPass.hasFramebuffer())
 			{
 				Bool drawsToPresentable;
-				outPass.fb() = getOrCreateFramebuffer(graphicsPass.m_fbDescr, &graphicsPass.m_rtHandles[0],
-													  inPass.m_name.cstr(), drawsToPresentable);
+				outPass.fb() = getOrCreateFramebuffer(graphicsPass.m_fbDescr, &graphicsPass.m_rtHandles[0], inPass.m_name.cstr(), drawsToPresentable);
 
 				outPass.m_fbRenderArea = graphicsPass.m_fbRenderArea;
 				outPass.m_drawsToPresentable = drawsToPresentable;
@@ -934,8 +927,7 @@ void RenderGraph::initGraphicsPasses(const RenderGraphDescription& descr)
 		// Create command buffers and framebuffer
 		if(inPass.m_type == RenderPassDescriptionBase::Type::kGraphics)
 		{
-			const GraphicsRenderPassDescription& graphicsPass =
-				static_cast<const GraphicsRenderPassDescription&>(inPass);
+			const GraphicsRenderPassDescription& graphicsPass = static_cast<const GraphicsRenderPassDescription&>(inPass);
 
 			if(graphicsPass.hasFramebuffer())
 			{
@@ -951,12 +943,10 @@ void RenderGraph::initGraphicsPasses(const RenderGraphDescription& descr)
 
 				if(!!graphicsPass.m_fbDescr.m_depthStencilAttachment.m_aspect)
 				{
-					TextureSubresourceInfo subresource =
-						TextureSubresourceInfo(graphicsPass.m_fbDescr.m_depthStencilAttachment.m_surface,
-											   graphicsPass.m_fbDescr.m_depthStencilAttachment.m_aspect);
+					TextureSubresourceInfo subresource = TextureSubresourceInfo(graphicsPass.m_fbDescr.m_depthStencilAttachment.m_surface,
+																				graphicsPass.m_fbDescr.m_depthStencilAttachment.m_aspect);
 
-					getCrntUsage(graphicsPass.m_rtHandles[kMaxColorRenderTargets], outPass.m_batchIdx, subresource,
-								 usage);
+					getCrntUsage(graphicsPass.m_rtHandles[kMaxColorRenderTargets], outPass.m_batchIdx, subresource, usage);
 
 					outPass.m_dsUsage = usage;
 				}
@@ -991,8 +981,7 @@ void RenderGraph::iterateSurfsOrVolumes(const TexturePtr& tex, const TextureSubr
 	{
 		for(U32 layer = subresource.m_firstLayer; layer < subresource.m_firstLayer + subresource.m_layerCount; ++layer)
 		{
-			for(U32 face = subresource.m_firstFace; face < U32(subresource.m_firstFace + subresource.m_faceCount);
-				++face)
+			for(U32 face = subresource.m_firstFace; face < U32(subresource.m_firstFace + subresource.m_faceCount); ++face)
 			{
 				// Compute surf or vol idx
 				const U32 faceCount = textureTypeIsCube(tex->getTextureType()) ? 6 : 1;
@@ -1018,45 +1007,43 @@ void RenderGraph::setTextureBarrier(Batch& batch, const RenderPassDependency& de
 	const TextureUsageBit depUsage = dep.m_texture.m_usage;
 	RT& rt = ctx.m_rts[rtIdx];
 
-	iterateSurfsOrVolumes(
-		rt.m_texture, dep.m_texture.m_subresource, [&](U32 surfOrVolIdx, const TextureSurfaceInfo& surf) {
-			TextureUsageBit& crntUsage = rt.m_surfOrVolUsages[surfOrVolIdx];
-			if(crntUsage != depUsage)
+	iterateSurfsOrVolumes(rt.m_texture, dep.m_texture.m_subresource, [&](U32 surfOrVolIdx, const TextureSurfaceInfo& surf) {
+		TextureUsageBit& crntUsage = rt.m_surfOrVolUsages[surfOrVolIdx];
+		if(crntUsage != depUsage)
+		{
+			// Check if we can merge barriers
+			if(rt.m_lastBatchThatTransitionedIt[surfOrVolIdx] == batchIdx)
 			{
-				// Check if we can merge barriers
-				if(rt.m_lastBatchThatTransitionedIt[surfOrVolIdx] == batchIdx)
-				{
-					// Will merge the barriers
+				// Will merge the barriers
 
-					crntUsage |= depUsage;
+				crntUsage |= depUsage;
 
-					[[maybe_unused]] Bool found = false;
-					for(TextureBarrier& b : batch.m_textureBarriersBefore)
+				[[maybe_unused]] Bool found = false;
+				for(TextureBarrier& b : batch.m_textureBarriersBefore)
+				{
+					if(b.m_idx == rtIdx && b.m_surface == surf)
 					{
-						if(b.m_idx == rtIdx && b.m_surface == surf)
-						{
-							b.m_usageAfter |= depUsage;
-							found = true;
-							break;
-						}
+						b.m_usageAfter |= depUsage;
+						found = true;
+						break;
 					}
-
-					ANKI_ASSERT(found);
 				}
-				else
-				{
-					// Create a new barrier for this surface
 
-					batch.m_textureBarriersBefore.emplaceBack(rtIdx, crntUsage, depUsage, surf,
-															  dep.m_texture.m_subresource.m_depthStencilAspect);
+				ANKI_ASSERT(found);
+			}
+			else
+			{
+				// Create a new barrier for this surface
+
+				batch.m_textureBarriersBefore.emplaceBack(rtIdx, crntUsage, depUsage, surf, dep.m_texture.m_subresource.m_depthStencilAspect);
 
-					crntUsage = depUsage;
-					rt.m_lastBatchThatTransitionedIt[surfOrVolIdx] = U16(batchIdx);
-				}
+				crntUsage = depUsage;
+				rt.m_lastBatchThatTransitionedIt[surfOrVolIdx] = U16(batchIdx);
 			}
+		}
 
-			return true;
-		});
+		return true;
+	});
 }
 
 void RenderGraph::setBatchBarriers(const RenderGraphDescription& descr)
@@ -1204,10 +1191,9 @@ void RenderGraph::setBatchBarriers(const RenderGraphDescription& descr)
 					  return a.m_idx < b.m_idx;
 				  });
 
-		std::sort(batch.m_asBarriersBefore.getBegin(), batch.m_asBarriersBefore.getEnd(),
-				  [&](const ASBarrier& a, const ASBarrier& b) {
-					  return a.m_idx < b.m_idx;
-				  });
+		std::sort(batch.m_asBarriersBefore.getBegin(), batch.m_asBarriersBefore.getEnd(), [&](const ASBarrier& a, const ASBarrier& b) {
+			return a.m_idx < b.m_idx;
+		});
 #endif
 	} // For all batches
 }
@@ -1351,8 +1337,8 @@ void RenderGraph::run() const
 
 			if(pass.fb().isCreated())
 			{
-				cmdb->beginRenderPass(pass.fb(), pass.m_colorUsages, pass.m_dsUsage, pass.m_fbRenderArea[0],
-									  pass.m_fbRenderArea[1], pass.m_fbRenderArea[2], pass.m_fbRenderArea[3]);
+				cmdb->beginRenderPass(pass.fb(), pass.m_colorUsages, pass.m_dsUsage, pass.m_fbRenderArea[0], pass.m_fbRenderArea[1],
+									  pass.m_fbRenderArea[2], pass.m_fbRenderArea[3]);
 			}
 
 			const U32 size = pass.m_secondLevelCmdbs.getSize();
@@ -1407,8 +1393,7 @@ void RenderGraph::flush()
 	}
 }
 
-void RenderGraph::getCrntUsage(RenderTargetHandle handle, U32 batchIdx, const TextureSubresourceInfo& subresource,
-							   TextureUsageBit& usage) const
+void RenderGraph::getCrntUsage(RenderTargetHandle handle, U32 batchIdx, const TextureSubresourceInfo& subresource, TextureUsageBit& usage) const
 {
 	usage = TextureUsageBit::kNone;
 	const Batch& batch = m_ctx->m_batches[batchIdx];
@@ -1618,8 +1603,7 @@ StringRaii RenderGraph::asUsageToStr(StackMemoryPool& pool, AccelerationStructur
 	return str;
 }
 
-Error RenderGraph::dumpDependencyDotFile(const RenderGraphDescription& descr, const BakeContext& ctx,
-										 CString path) const
+Error RenderGraph::dumpDependencyDotFile(const RenderGraphDescription& descr, const BakeContext& ctx, CString path) const
 {
 	ANKI_GR_LOGW("Running with debug code");
 
@@ -1645,9 +1629,8 @@ Error RenderGraph::dumpDependencyDotFile(const RenderGraphDescription& descr, co
 		{
 			CString passName = descr.m_passes[passIdx]->m_name.toCString();
 
-			slist.pushBackSprintf(
-				"\t\"%s\"[color=%s,style=%s,shape=box];\n", passName.cstr(), COLORS[batchIdx % COLORS.getSize()],
-				(descr.m_passes[passIdx]->m_type == RenderPassDescriptionBase::Type::kGraphics) ? "bold" : "dashed");
+			slist.pushBackSprintf("\t\"%s\"[color=%s,style=%s,shape=box];\n", passName.cstr(), COLORS[batchIdx % COLORS.getSize()],
+								  (descr.m_passes[passIdx]->m_type == RenderPassDescriptionBase::Type::kGraphics) ? "bold" : "dashed");
 
 			for(U32 depIdx : ctx.m_passes[passIdx].m_dependsOn)
 			{
@@ -1688,17 +1671,15 @@ Error RenderGraph::dumpDependencyDotFile(const RenderGraphDescription& descr, co
 			const TextureBarrier& barrier = batch.m_textureBarriersBefore[barrierIdx];
 
 			StringRaii barrierLabel(&pool);
-			barrierLabel.sprintf("<b>%s</b> (mip,dp,f,l)=(%u,%u,%u,%u)<br/>%s <b>to</b> %s",
-								 &descr.m_renderTargets[barrier.m_idx].m_name[0], barrier.m_surface.m_level,
-								 barrier.m_surface.m_depth, barrier.m_surface.m_face, barrier.m_surface.m_layer,
-								 textureUsageToStr(pool, barrier.m_usageBefore).cstr(),
-								 textureUsageToStr(pool, barrier.m_usageAfter).cstr());
+			barrierLabel.sprintf("<b>%s</b> (mip,dp,f,l)=(%u,%u,%u,%u)<br/>%s <b>to</b> %s", &descr.m_renderTargets[barrier.m_idx].m_name[0],
+								 barrier.m_surface.m_level, barrier.m_surface.m_depth, barrier.m_surface.m_face, barrier.m_surface.m_layer,
+								 textureUsageToStr(pool, barrier.m_usageBefore).cstr(), textureUsageToStr(pool, barrier.m_usageAfter).cstr());
 
 			StringRaii barrierName(&pool);
 			barrierName.sprintf("%s tex barrier%u", batchName.cstr(), barrierIdx);
 
-			slist.pushBackSprintf("\t\"%s\"[color=%s,style=bold,shape=box,label=< %s >];\n", barrierName.cstr(),
-								  COLORS[batchIdx % COLORS.getSize()], barrierLabel.cstr());
+			slist.pushBackSprintf("\t\"%s\"[color=%s,style=bold,shape=box,label=< %s >];\n", barrierName.cstr(), COLORS[batchIdx % COLORS.getSize()],
+								  barrierLabel.cstr());
 			slist.pushBackSprintf("\t\"%s\"->\"%s\";\n", prevBubble.cstr(), barrierName.cstr());
 
 			prevBubble = barrierName;
@@ -1710,14 +1691,13 @@ Error RenderGraph::dumpDependencyDotFile(const RenderGraphDescription& descr, co
 
 			StringRaii barrierLabel(&pool);
 			barrierLabel.sprintf("<b>%s</b><br/>%s <b>to</b> %s", &descr.m_buffers[barrier.m_idx].m_name[0],
-								 bufferUsageToStr(pool, barrier.m_usageBefore).cstr(),
-								 bufferUsageToStr(pool, barrier.m_usageAfter).cstr());
+								 bufferUsageToStr(pool, barrier.m_usageBefore).cstr(), bufferUsageToStr(pool, barrier.m_usageAfter).cstr());
 
 			StringRaii barrierName(&pool);
 			barrierName.sprintf("%s buff barrier%u", batchName.cstr(), barrierIdx);
 
-			slist.pushBackSprintf("\t\"%s\"[color=%s,style=bold,shape=box,label=< %s >];\n", barrierName.cstr(),
-								  COLORS[batchIdx % COLORS.getSize()], barrierLabel.cstr());
+			slist.pushBackSprintf("\t\"%s\"[color=%s,style=bold,shape=box,label=< %s >];\n", barrierName.cstr(), COLORS[batchIdx % COLORS.getSize()],
+								  barrierLabel.cstr());
 			slist.pushBackSprintf("\t\"%s\"->\"%s\";\n", prevBubble.cstr(), barrierName.cstr());
 
 			prevBubble = barrierName;
@@ -1729,14 +1709,13 @@ Error RenderGraph::dumpDependencyDotFile(const RenderGraphDescription& descr, co
 
 			StringRaii barrierLabel(&pool);
 			barrierLabel.sprintf("<b>%s</b><br/>%s <b>to</b> %s", descr.m_as[barrier.m_idx].m_name.getBegin(),
-								 asUsageToStr(pool, barrier.m_usageBefore).cstr(),
-								 asUsageToStr(pool, barrier.m_usageAfter).cstr());
+								 asUsageToStr(pool, barrier.m_usageBefore).cstr(), asUsageToStr(pool, barrier.m_usageAfter).cstr());
 
 			StringRaii barrierName(&pool);
 			barrierName.sprintf("%s AS barrier%u", batchName.cstr(), barrierIdx);
 
-			slist.pushBackSprintf("\t\"%s\"[color=%s,style=bold,shape=box,label=< %s >];\n", barrierName.cstr(),
-								  COLORS[batchIdx % COLORS.getSize()], barrierLabel.cstr());
+			slist.pushBackSprintf("\t\"%s\"[color=%s,style=bold,shape=box,label=< %s >];\n", barrierName.cstr(), COLORS[batchIdx % COLORS.getSize()],
+								  barrierLabel.cstr());
 			slist.pushBackSprintf("\t\"%s\"->\"%s\";\n", prevBubble.cstr(), barrierName.cstr());
 
 			prevBubble = barrierName;
@@ -1747,8 +1726,7 @@ Error RenderGraph::dumpDependencyDotFile(const RenderGraphDescription& descr, co
 			const RenderPassDescriptionBase& pass = *descr.m_passes[passIdx];
 			StringRaii passName(&pool);
 			passName.sprintf("%s pass", pass.m_name.cstr());
-			slist.pushBackSprintf("\t\"%s\"[color=%s,style=bold];\n", passName.cstr(),
-								  COLORS[batchIdx % COLORS.getSize()]);
+			slist.pushBackSprintf("\t\"%s\"[color=%s,style=bold];\n", passName.cstr(), COLORS[batchIdx % COLORS.getSize()]);
 			slist.pushBackSprintf("\t\"%s\"->\"%s\";\n", prevBubble.cstr(), passName.cstr());
 
 			prevBubble = passName;
@@ -1759,8 +1737,7 @@ Error RenderGraph::dumpDependencyDotFile(const RenderGraphDescription& descr, co
 	slist.pushBackSprintf("}");
 
 	File file;
-	ANKI_CHECK(file.open(StringRaii(&pool).sprintf("%s/rgraph_%05u.dot", &path[0], m_version).toCString(),
-						 FileOpenFlag::kWrite));
+	ANKI_CHECK(file.open(StringRaii(&pool).sprintf("%s/rgraph_%05u.dot", &path[0], m_version).toCString(), FileOpenFlag::kWrite));
 	for(const String& s : slist)
 	{
 		ANKI_CHECK(file.writeTextf("%s", &s[0]));

+ 17 - 30
AnKi/Gr/RenderGraph.h

@@ -123,8 +123,7 @@ public:
 
 	void getBufferState(BufferHandle handle, Buffer*& buff, PtrSize& offset, PtrSize& range) const;
 
-	void getRenderTargetState(RenderTargetHandle handle, const TextureSubresourceInfo& subresource,
-							  TexturePtr& tex) const;
+	void getRenderTargetState(RenderTargetHandle handle, const TextureSubresourceInfo& subresource, TexturePtr& tex) const;
 
 	/// Create a whole texture view from a handle
 	TextureViewPtr createTextureView(RenderTargetHandle handle)
@@ -136,8 +135,7 @@ public:
 	}
 
 	/// Convenience method.
-	void bindTextureAndSampler(U32 set, U32 binding, RenderTargetHandle handle,
-							   const TextureSubresourceInfo& subresource, const SamplerPtr& sampler)
+	void bindTextureAndSampler(U32 set, U32 binding, RenderTargetHandle handle, const TextureSubresourceInfo& subresource, const SamplerPtr& sampler)
 	{
 		TexturePtr tex;
 		getRenderTargetState(handle, subresource, tex);
@@ -177,8 +175,7 @@ public:
 	}
 
 	/// Convenience method.
-	void bindImage(U32 set, U32 binding, RenderTargetHandle handle, const TextureSubresourceInfo& subresource,
-				   U32 arrayIdx = 0)
+	void bindImage(U32 set, U32 binding, RenderTargetHandle handle, const TextureSubresourceInfo& subresource, U32 arrayIdx = 0)
 	{
 		TexturePtr tex;
 		getRenderTargetState(handle, subresource, tex);
@@ -193,8 +190,7 @@ public:
 		TexturePtr tex;
 #if ANKI_ENABLE_ASSERTIONS
 		tex = getTexture(handle);
-		ANKI_ASSERT(tex->getLayerCount() == 1 && tex->getMipmapCount() == 1
-					&& tex->getDepthStencilAspect() == DepthStencilAspectBit::kNone);
+		ANKI_ASSERT(tex->getLayerCount() == 1 && tex->getMipmapCount() == 1 && tex->getDepthStencilAspect() == DepthStencilAspectBit::kNone);
 #endif
 		const TextureSubresourceInfo subresource;
 		getRenderTargetState(handle, subresource, tex);
@@ -249,8 +245,7 @@ public:
 	}
 
 	/// Dependency to the whole texture.
-	RenderPassDependency(RenderTargetHandle handle, TextureUsageBit usage,
-						 DepthStencilAspectBit aspect = DepthStencilAspectBit::kNone)
+	RenderPassDependency(RenderTargetHandle handle, TextureUsageBit usage, DepthStencilAspectBit aspect = DepthStencilAspectBit::kNone)
 		: m_texture({handle, usage, TextureSubresourceInfo()})
 		, m_type(Type::kTexture)
 	{
@@ -335,14 +330,12 @@ public:
 		setWork(0, func);
 	}
 
-	void newTextureDependency(RenderTargetHandle handle, TextureUsageBit usage,
-							  const TextureSubresourceInfo& subresource)
+	void newTextureDependency(RenderTargetHandle handle, TextureUsageBit usage, const TextureSubresourceInfo& subresource)
 	{
 		newDependency<RenderPassDependency::Type::kTexture>(RenderPassDependency(handle, usage, subresource));
 	}
 
-	void newTextureDependency(RenderTargetHandle handle, TextureUsageBit usage,
-							  DepthStencilAspectBit aspect = DepthStencilAspectBit::kNone)
+	void newTextureDependency(RenderTargetHandle handle, TextureUsageBit usage, DepthStencilAspectBit aspect = DepthStencilAspectBit::kNone)
 	{
 		newDependency<RenderPassDependency::Type::kTexture>(RenderPassDependency(handle, usage, aspect));
 	}
@@ -465,17 +458,13 @@ public:
 		memset(&m_rtHandles[0], 0xFF, sizeof(m_rtHandles));
 	}
 
-	void setFramebufferInfo(const FramebufferDescription& fbInfo,
-							ConstWeakArray<RenderTargetHandle> colorRenderTargetHandles,
-							RenderTargetHandle depthStencilRenderTargetHandle = {},
-							RenderTargetHandle shadingRateRenderTargetHandle = {}, U32 minx = 0, U32 miny = 0,
-							U32 maxx = kMaxU32, U32 maxy = kMaxU32);
+	void setFramebufferInfo(const FramebufferDescription& fbInfo, ConstWeakArray<RenderTargetHandle> colorRenderTargetHandles,
+							RenderTargetHandle depthStencilRenderTargetHandle = {}, RenderTargetHandle shadingRateRenderTargetHandle = {},
+							U32 minx = 0, U32 miny = 0, U32 maxx = kMaxU32, U32 maxy = kMaxU32);
 
-	void setFramebufferInfo(const FramebufferDescription& fbInfo,
-							std::initializer_list<RenderTargetHandle> colorRenderTargetHandles,
-							RenderTargetHandle depthStencilRenderTargetHandle = {},
-							RenderTargetHandle shadingRateRenderTargetHandle = {}, U32 minx = 0, U32 miny = 0,
-							U32 maxx = kMaxU32, U32 maxy = kMaxU32);
+	void setFramebufferInfo(const FramebufferDescription& fbInfo, std::initializer_list<RenderTargetHandle> colorRenderTargetHandles,
+							RenderTargetHandle depthStencilRenderTargetHandle = {}, RenderTargetHandle shadingRateRenderTargetHandle = {},
+							U32 minx = 0, U32 miny = 0, U32 maxx = kMaxU32, U32 maxy = kMaxU32);
 
 private:
 	Array<RenderTargetHandle, kMaxColorRenderTargets + 2> m_rtHandles;
@@ -536,8 +525,7 @@ public:
 	BufferHandle importBuffer(BufferPtr buff, BufferUsageBit usage, PtrSize offset = 0, PtrSize range = kMaxPtrSize);
 
 	/// Import an AS.
-	AccelerationStructureHandle importAccelerationStructure(AccelerationStructurePtr as,
-															AccelerationStructureUsageBit usage);
+	AccelerationStructureHandle importAccelerationStructure(AccelerationStructurePtr as, AccelerationStructureUsageBit usage);
 
 	/// Gather statistics.
 	void setStatisticsEnabled(Bool gather)
@@ -737,8 +725,8 @@ private:
 	void setBatchBarriers(const RenderGraphDescription& descr);
 
 	TexturePtr getOrCreateRenderTarget(const TextureInitInfo& initInf, U64 hash);
-	FramebufferPtr getOrCreateFramebuffer(const FramebufferDescription& fbDescr, const RenderTargetHandle* rtHandles,
-										  CString name, Bool& drawsToPresentableTex);
+	FramebufferPtr getOrCreateFramebuffer(const FramebufferDescription& fbDescr, const RenderTargetHandle* rtHandles, CString name,
+										  Bool& drawsToPresentableTex);
 
 	/// Every N number of frames clean unused cached items.
 	void periodicCleanup();
@@ -754,8 +742,7 @@ private:
 	template<typename TFunc>
 	static void iterateSurfsOrVolumes(const TexturePtr& tex, const TextureSubresourceInfo& subresource, TFunc func);
 
-	void getCrntUsage(RenderTargetHandle handle, U32 batchIdx, const TextureSubresourceInfo& subresource,
-					  TextureUsageBit& usage) const;
+	void getCrntUsage(RenderTargetHandle handle, U32 batchIdx, const TextureSubresourceInfo& subresource, TextureUsageBit& usage) const;
 
 	/// @name Dump the dependency graph into a file.
 	/// @{

+ 19 - 23
AnKi/Gr/RenderGraph.inl.h

@@ -12,15 +12,12 @@ inline void RenderPassWorkContext::bindAccelerationStructure(U32 set, U32 bindin
 	m_commandBuffer->bindAccelerationStructure(set, binding, m_rgraph->getAs(handle));
 }
 
-inline void RenderPassWorkContext::getBufferState(BufferHandle handle, Buffer*& buff, PtrSize& offset,
-												  PtrSize& range) const
+inline void RenderPassWorkContext::getBufferState(BufferHandle handle, Buffer*& buff, PtrSize& offset, PtrSize& range) const
 {
 	m_rgraph->getCachedBuffer(handle, buff, offset, range);
 }
 
-inline void RenderPassWorkContext::getRenderTargetState(RenderTargetHandle handle,
-														const TextureSubresourceInfo& subresource,
-														TexturePtr& tex) const
+inline void RenderPassWorkContext::getRenderTargetState(RenderTargetHandle handle, const TextureSubresourceInfo& subresource, TexturePtr& tex) const
 {
 	TextureUsageBit usage;
 	m_rgraph->getCrntUsage(handle, m_batchIdx, subresource, usage);
@@ -187,10 +184,11 @@ inline void RenderPassDescriptionBase::newDependency(const RenderPassDependency&
 	}
 }
 
-inline void GraphicsRenderPassDescription::setFramebufferInfo(
-	const FramebufferDescription& fbInfo, std::initializer_list<RenderTargetHandle> colorRenderTargetHandles,
-	RenderTargetHandle depthStencilRenderTargetHandle, RenderTargetHandle shadingRateRenderTargetHandle, U32 minx,
-	U32 miny, U32 maxx, U32 maxy)
+inline void GraphicsRenderPassDescription::setFramebufferInfo(const FramebufferDescription& fbInfo,
+															  std::initializer_list<RenderTargetHandle> colorRenderTargetHandles,
+															  RenderTargetHandle depthStencilRenderTargetHandle,
+															  RenderTargetHandle shadingRateRenderTargetHandle, U32 minx, U32 miny, U32 maxx,
+															  U32 maxy)
 {
 	Array<RenderTargetHandle, kMaxColorRenderTargets> rts;
 	U32 count = 0;
@@ -198,14 +196,15 @@ inline void GraphicsRenderPassDescription::setFramebufferInfo(
 	{
 		rts[count++] = h;
 	}
-	setFramebufferInfo(fbInfo, ConstWeakArray<RenderTargetHandle>(&rts[0], count), depthStencilRenderTargetHandle,
-					   shadingRateRenderTargetHandle, minx, miny, maxx, maxy);
+	setFramebufferInfo(fbInfo, ConstWeakArray<RenderTargetHandle>(&rts[0], count), depthStencilRenderTargetHandle, shadingRateRenderTargetHandle,
+					   minx, miny, maxx, maxy);
 }
 
-inline void GraphicsRenderPassDescription::setFramebufferInfo(
-	const FramebufferDescription& fbInfo, ConstWeakArray<RenderTargetHandle> colorRenderTargetHandles,
-	RenderTargetHandle depthStencilRenderTargetHandle, RenderTargetHandle shadingRateRenderTargetHandle, U32 minx,
-	U32 miny, U32 maxx, U32 maxy)
+inline void GraphicsRenderPassDescription::setFramebufferInfo(const FramebufferDescription& fbInfo,
+															  ConstWeakArray<RenderTargetHandle> colorRenderTargetHandles,
+															  RenderTargetHandle depthStencilRenderTargetHandle,
+															  RenderTargetHandle shadingRateRenderTargetHandle, U32 minx, U32 miny, U32 maxx,
+															  U32 maxy)
 {
 #if ANKI_ENABLE_ASSERTIONS
 	ANKI_ASSERT(fbInfo.isBacked() && "Forgot call GraphicsRenderPassFramebufferInfo::bake");
@@ -299,8 +298,7 @@ inline RenderTargetHandle RenderGraphDescription::importRenderTarget(TexturePtr
 inline RenderTargetHandle RenderGraphDescription::newRenderTarget(const RenderTargetDescription& initInf)
 {
 	ANKI_ASSERT(initInf.m_hash && "Forgot to call RenderTargetDescription::bake");
-	ANKI_ASSERT(initInf.m_usage == TextureUsageBit::kNone
-				&& "Don't need to supply the usage. Render grap will find it");
+	ANKI_ASSERT(initInf.m_usage == TextureUsageBit::kNone && "Don't need to supply the usage. Render grap will find it");
 	RT& rt = *m_renderTargets.emplaceBack();
 	rt.m_initInfo = initInf;
 	rt.m_hash = initInf.m_hash;
@@ -313,8 +311,7 @@ inline RenderTargetHandle RenderGraphDescription::newRenderTarget(const RenderTa
 	return out;
 }
 
-inline BufferHandle RenderGraphDescription::importBuffer(BufferPtr buff, BufferUsageBit usage, PtrSize offset,
-														 PtrSize range)
+inline BufferHandle RenderGraphDescription::importBuffer(BufferPtr buff, BufferUsageBit usage, PtrSize offset, PtrSize range)
 {
 	// Checks
 	if(range == kMaxPtrSize)
@@ -330,8 +327,7 @@ inline BufferHandle RenderGraphDescription::importBuffer(BufferPtr buff, BufferU
 
 	for([[maybe_unused]] const Buffer& bb : m_buffers)
 	{
-		ANKI_ASSERT((bb.m_importedBuff != buff || !bufferRangeOverlaps(bb.m_offset, bb.m_range, offset, range))
-					&& "Range already imported");
+		ANKI_ASSERT((bb.m_importedBuff != buff || !bufferRangeOverlaps(bb.m_offset, bb.m_range, offset, range)) && "Range already imported");
 	}
 
 	Buffer& b = *m_buffers.emplaceBack();
@@ -346,8 +342,8 @@ inline BufferHandle RenderGraphDescription::importBuffer(BufferPtr buff, BufferU
 	return out;
 }
 
-inline AccelerationStructureHandle
-RenderGraphDescription::importAccelerationStructure(AccelerationStructurePtr as, AccelerationStructureUsageBit usage)
+inline AccelerationStructureHandle RenderGraphDescription::importAccelerationStructure(AccelerationStructurePtr as,
+																					   AccelerationStructureUsageBit usage)
 {
 	for([[maybe_unused]] const AS& a : m_as)
 	{

+ 1 - 3
AnKi/Gr/Sampler.h

@@ -38,9 +38,7 @@ public:
 		const U8* first = reinterpret_cast<const U8*>(&m_minLod);
 		const U8* last = reinterpret_cast<const U8*>(&m_addressing) + sizeof(m_addressing);
 		const U32 size = U32(last - first);
-		ANKI_ASSERT(size
-					== sizeof(F32) * 3 + sizeof(SamplingFilter) * 2 + sizeof(CompareOperation) + sizeof(I8)
-						   + sizeof(SamplingAddressing));
+		ANKI_ASSERT(size == sizeof(F32) * 3 + sizeof(SamplingFilter) * 2 + sizeof(CompareOperation) + sizeof(I8) + sizeof(SamplingAddressing));
 		return anki::computeHash(first, size);
 	}
 };

+ 1 - 3
AnKi/Gr/ShaderProgram.cpp

@@ -22,9 +22,7 @@ Bool ShaderProgramInitInfo::isValid() const
 		}
 	}
 
-	if(!!graphicsMask
-	   && (graphicsMask & (ShaderTypeBit::kVertex | ShaderTypeBit::kFragment))
-			  != (ShaderTypeBit::kVertex | ShaderTypeBit::kFragment))
+	if(!!graphicsMask && (graphicsMask & (ShaderTypeBit::kVertex | ShaderTypeBit::kFragment)) != (ShaderTypeBit::kVertex | ShaderTypeBit::kFragment))
 	{
 		return false;
 	}

+ 4 - 5
AnKi/Gr/Texture.h

@@ -45,8 +45,8 @@ public:
 		const U8* last = reinterpret_cast<const U8*>(&m_samples) + sizeof(m_samples);
 		const U size = U(last - first);
 		ANKI_ASSERT(size
-					== sizeof(m_width) + sizeof(m_height) + sizeof(m_depth) + sizeof(m_layerCount) + sizeof(m_format)
-						   + sizeof(m_usage) + sizeof(m_type) + sizeof(m_mipmapCount) + sizeof(m_samples));
+					== sizeof(m_width) + sizeof(m_height) + sizeof(m_depth) + sizeof(m_layerCount) + sizeof(m_format) + sizeof(m_usage)
+						   + sizeof(m_type) + sizeof(m_mipmapCount) + sizeof(m_samples));
 		return anki::computeHash(first, size);
 	}
 
@@ -198,9 +198,8 @@ public:
 		ANKI_ASSERT(isSubresourceValid(subresource));
 		if(m_texType != TextureType::k3D)
 		{
-			return subresource.m_firstMipmap == 0 && subresource.m_mipmapCount == m_mipCount
-				   && subresource.m_faceCount == 1 && subresource.m_layerCount == 1
-				   && subresource.m_depthStencilAspect == m_aspect;
+			return subresource.m_firstMipmap == 0 && subresource.m_mipmapCount == m_mipCount && subresource.m_faceCount == 1
+				   && subresource.m_layerCount == 1 && subresource.m_depthStencilAspect == m_aspect;
 		}
 		else
 		{

+ 5 - 7
AnKi/Gr/TextureView.h

@@ -28,8 +28,7 @@ public:
 		m_firstLayer = 0;
 		m_layerCount = tex->getLayerCount();
 		m_firstFace = 0;
-		m_faceCount =
-			(tex->getTextureType() == TextureType::kCubeArray || tex->getTextureType() == TextureType::kCube) ? 6 : 1;
+		m_faceCount = (tex->getTextureType() == TextureType::kCubeArray || tex->getTextureType() == TextureType::kCube) ? 6 : 1;
 
 		m_depthStencilAspect = getFormatInfo(tex->getFormat()).m_depthStencil;
 	}
@@ -39,8 +38,8 @@ public:
 	{
 	}
 
-	TextureViewInitInfo(TexturePtr tex, const TextureSurfaceInfo& surf,
-						DepthStencilAspectBit aspect = DepthStencilAspectBit::kNone, CString name = {})
+	TextureViewInitInfo(TexturePtr tex, const TextureSurfaceInfo& surf, DepthStencilAspectBit aspect = DepthStencilAspectBit::kNone,
+						CString name = {})
 		: GrBaseInitInfo(name)
 		, m_texture(tex)
 	{
@@ -119,9 +118,8 @@ protected:
 
 	Bool initialized() const
 	{
-		return m_texType != TextureType::kCount && m_subresource.m_firstMipmap < kMaxU32
-			   && m_subresource.m_mipmapCount < kMaxU32 && m_subresource.m_firstLayer < kMaxU32
-			   && m_subresource.m_layerCount < kMaxU32 && m_subresource.m_firstFace < kMaxU8
+		return m_texType != TextureType::kCount && m_subresource.m_firstMipmap < kMaxU32 && m_subresource.m_mipmapCount < kMaxU32
+			   && m_subresource.m_firstLayer < kMaxU32 && m_subresource.m_layerCount < kMaxU32 && m_subresource.m_firstFace < kMaxU8
 			   && m_subresource.m_faceCount < kMaxU8;
 	}
 

+ 9 - 12
AnKi/Gr/Utils/Functions.cpp

@@ -8,8 +8,7 @@
 namespace anki {
 
 template<typename T>
-static void writeShaderBlockMemorySanityChecks(const ShaderVariableBlockInfo& varBlkInfo,
-											   [[maybe_unused]] const void* elements,
+static void writeShaderBlockMemorySanityChecks(const ShaderVariableBlockInfo& varBlkInfo, [[maybe_unused]] const void* elements,
 											   [[maybe_unused]] U32 elementsCount, [[maybe_unused]] void* buffBegin,
 											   [[maybe_unused]] const void* buffEnd)
 {
@@ -34,8 +33,8 @@ static void writeShaderBlockMemorySanityChecks(const ShaderVariableBlockInfo& va
 }
 
 template<typename T>
-static void writeShaderBlockMemorySimple(const ShaderVariableBlockInfo& varBlkInfo, const void* elements,
-										 U32 elementsCount, void* buffBegin, const void* buffEnd)
+static void writeShaderBlockMemorySimple(const ShaderVariableBlockInfo& varBlkInfo, const void* elements, U32 elementsCount, void* buffBegin,
+										 const void* buffEnd)
 {
 	writeShaderBlockMemorySanityChecks<T>(varBlkInfo, elements, elementsCount, buffBegin, buffEnd);
 
@@ -53,8 +52,8 @@ static void writeShaderBlockMemorySimple(const ShaderVariableBlockInfo& varBlkIn
 }
 
 template<typename T, typename Vec>
-static void writeShaderBlockMemoryMatrix(const ShaderVariableBlockInfo& varBlkInfo, const void* elements,
-										 U32 elementsCount, void* buffBegin, const void* buffEnd)
+static void writeShaderBlockMemoryMatrix(const ShaderVariableBlockInfo& varBlkInfo, const void* elements, U32 elementsCount, void* buffBegin,
+										 const void* buffEnd)
 {
 	writeShaderBlockMemorySanityChecks<T>(varBlkInfo, elements, elementsCount, buffBegin, buffEnd);
 	ANKI_ASSERT(varBlkInfo.m_matrixStride > 0);
@@ -102,8 +101,7 @@ template<typename T, Bool isMatrix = IsShaderVarDataTypeAMatrix<T>::kValue>
 class WriteShaderBlockMemory
 {
 public:
-	void operator()(const ShaderVariableBlockInfo& varBlkInfo, const void* elements, U32 elementsCount, void* buffBegin,
-					const void* buffEnd)
+	void operator()(const ShaderVariableBlockInfo& varBlkInfo, const void* elements, U32 elementsCount, void* buffBegin, const void* buffEnd)
 	{
 		using RowVec = typename T::RowVec;
 		writeShaderBlockMemoryMatrix<T, RowVec>(varBlkInfo, elements, elementsCount, buffBegin, buffEnd);
@@ -114,8 +112,7 @@ template<typename T>
 class WriteShaderBlockMemory<T, false>
 {
 public:
-	void operator()(const ShaderVariableBlockInfo& varBlkInfo, const void* elements, U32 elementsCount, void* buffBegin,
-					const void* buffEnd)
+	void operator()(const ShaderVariableBlockInfo& varBlkInfo, const void* elements, U32 elementsCount, void* buffBegin, const void* buffEnd)
 	{
 		writeShaderBlockMemorySimple<T>(varBlkInfo, elements, elementsCount, buffBegin, buffEnd);
 	}
@@ -123,8 +120,8 @@ public:
 
 } // namespace
 
-void writeShaderBlockMemory(ShaderVariableDataType type, const ShaderVariableBlockInfo& varBlkInfo,
-							const void* elements, U32 elementsCount, void* buffBegin, const void* buffEnd)
+void writeShaderBlockMemory(ShaderVariableDataType type, const ShaderVariableBlockInfo& varBlkInfo, const void* elements, U32 elementsCount,
+							void* buffBegin, const void* buffEnd)
 {
 	switch(type)
 	{

+ 8 - 9
AnKi/Gr/Utils/Functions.h

@@ -10,19 +10,18 @@
 
 namespace anki {
 
-inline Bool stencilTestDisabled(StencilOperation stencilFail, StencilOperation stencilPassDepthFail,
-								StencilOperation stencilPassDepthPass, CompareOperation compare)
+inline Bool stencilTestDisabled(StencilOperation stencilFail, StencilOperation stencilPassDepthFail, StencilOperation stencilPassDepthPass,
+								CompareOperation compare)
 {
 	return stencilFail == StencilOperation::kKeep && stencilPassDepthFail == StencilOperation::kKeep
 		   && stencilPassDepthPass == StencilOperation::kKeep && compare == CompareOperation::kAlways;
 }
 
-inline Bool blendingDisabled(BlendFactor srcFactorRgb, BlendFactor dstFactorRgb, BlendFactor srcFactorA,
-							 BlendFactor dstFactorA, BlendOperation opRgb, BlendOperation opA)
+inline Bool blendingDisabled(BlendFactor srcFactorRgb, BlendFactor dstFactorRgb, BlendFactor srcFactorA, BlendFactor dstFactorA, BlendOperation opRgb,
+							 BlendOperation opA)
 {
-	Bool dontWantBlend = srcFactorRgb == BlendFactor::kOne && dstFactorRgb == BlendFactor::kZero
-						 && srcFactorA == BlendFactor::kOne && dstFactorA == BlendFactor::kZero
-						 && (opRgb == BlendOperation::kAdd || opRgb == BlendOperation::kSubtract)
+	Bool dontWantBlend = srcFactorRgb == BlendFactor::kOne && dstFactorRgb == BlendFactor::kZero && srcFactorA == BlendFactor::kOne
+						 && dstFactorA == BlendFactor::kZero && (opRgb == BlendOperation::kAdd || opRgb == BlendOperation::kSubtract)
 						 && (opA == BlendOperation::kAdd || opA == BlendOperation::kSubtract);
 	return dontWantBlend;
 }
@@ -42,7 +41,7 @@ ShaderVariableDataType getShaderVariableTypeFromTypename();
 #undef ANKI_SVDT_MACRO
 
 /// Populate the memory of a variable that is inside a shader block.
-void writeShaderBlockMemory(ShaderVariableDataType type, const ShaderVariableBlockInfo& varBlkInfo,
-							const void* elements, U32 elementsCount, void* buffBegin, const void* buffEnd);
+void writeShaderBlockMemory(ShaderVariableDataType type, const ShaderVariableBlockInfo& varBlkInfo, const void* elements, U32 elementsCount,
+							void* buffBegin, const void* buffEnd);
 
 } // end namespace anki

+ 4 - 6
AnKi/Gr/Utils/SegregatedListsGpuMemoryPool.cpp

@@ -9,8 +9,7 @@
 
 namespace anki {
 
-class SegregatedListsGpuMemoryPool::Chunk :
-	public SegregatedListsAllocatorBuilderChunkBase<SingletonMemoryPoolWrapper<GrMemoryPool>>
+class SegregatedListsGpuMemoryPool::Chunk : public SegregatedListsAllocatorBuilderChunkBase<SingletonMemoryPoolWrapper<GrMemoryPool>>
 {
 public:
 	PtrSize m_offsetInGpuBuffer;
@@ -50,8 +49,8 @@ public:
 	/// @}
 };
 
-void SegregatedListsGpuMemoryPool::init(BufferUsageBit gpuBufferUsage, ConstWeakArray<PtrSize> classUpperSizes,
-										PtrSize initialGpuBufferSize, CString bufferName, Bool allowCoWs)
+void SegregatedListsGpuMemoryPool::init(BufferUsageBit gpuBufferUsage, ConstWeakArray<PtrSize> classUpperSizes, PtrSize initialGpuBufferSize,
+										CString bufferName, Bool allowCoWs)
 {
 	ANKI_ASSERT(!isInitialized());
 
@@ -245,8 +244,7 @@ void SegregatedListsGpuMemoryPool::endFrame()
 	m_garbage[m_frame].destroy();
 }
 
-void SegregatedListsGpuMemoryPool::getStats(F32& externalFragmentation, PtrSize& userAllocatedSize,
-											PtrSize& totalSize) const
+void SegregatedListsGpuMemoryPool::getStats(F32& externalFragmentation, PtrSize& userAllocatedSize, PtrSize& totalSize) const
 {
 	ANKI_ASSERT(isInitialized());
 

+ 3 - 4
AnKi/Gr/Utils/SegregatedListsGpuMemoryPool.h

@@ -56,8 +56,8 @@ public:
 
 	SegregatedListsGpuMemoryPool& operator=(const SegregatedListsGpuMemoryPool&) = delete;
 
-	void init(BufferUsageBit gpuBufferUsage, ConstWeakArray<PtrSize> classUpperSizes, PtrSize initialGpuBufferSize,
-			  CString bufferName, Bool allowCoWs);
+	void init(BufferUsageBit gpuBufferUsage, ConstWeakArray<PtrSize> classUpperSizes, PtrSize initialGpuBufferSize, CString bufferName,
+			  Bool allowCoWs);
 
 	void destroy();
 
@@ -86,8 +86,7 @@ public:
 private:
 	class BuilderInterface;
 	class Chunk;
-	using Builder =
-		SegregatedListsAllocatorBuilder<Chunk, BuilderInterface, DummyMutex, SingletonMemoryPoolWrapper<GrMemoryPool>>;
+	using Builder = SegregatedListsAllocatorBuilder<Chunk, BuilderInterface, DummyMutex, SingletonMemoryPoolWrapper<GrMemoryPool>>;
 
 	BufferUsageBit m_bufferUsage = BufferUsageBit::kNone;
 	GrDynamicArray<PtrSize> m_classes;

+ 2 - 3
AnKi/Gr/Utils/StackGpuMemoryPool.cpp

@@ -117,9 +117,8 @@ StackGpuMemoryPool::~StackGpuMemoryPool()
 	}
 }
 
-void StackGpuMemoryPool::init(PtrSize initialSize, F64 nextChunkGrowScale, PtrSize nextChunkGrowBias, U32 alignment,
-							  BufferUsageBit bufferUsage, BufferMapAccessBit bufferMapping, Bool allowToGrow,
-							  CString bufferName)
+void StackGpuMemoryPool::init(PtrSize initialSize, F64 nextChunkGrowScale, PtrSize nextChunkGrowBias, U32 alignment, BufferUsageBit bufferUsage,
+							  BufferMapAccessBit bufferMapping, Bool allowToGrow, CString bufferName)
 {
 	ANKI_ASSERT(m_builder == nullptr);
 	ANKI_ASSERT(initialSize > 0 && alignment > 0);

+ 2 - 2
AnKi/Gr/Utils/StackGpuMemoryPool.h

@@ -25,8 +25,8 @@ public:
 
 	StackGpuMemoryPool& operator=(const StackGpuMemoryPool&) = delete; // Non-copyable
 
-	void init(PtrSize initialSize, F64 nextChunkGrowScale, PtrSize nextChunkGrowBias, U32 alignment,
-			  BufferUsageBit bufferUsage, BufferMapAccessBit bufferMapping, Bool allowToGrow, CString bufferName);
+	void init(PtrSize initialSize, F64 nextChunkGrowScale, PtrSize nextChunkGrowBias, U32 alignment, BufferUsageBit bufferUsage,
+			  BufferMapAccessBit bufferMapping, Bool allowToGrow, CString bufferName);
 
 	/// @note It's thread-safe against other allocate()
 	void allocate(PtrSize size, PtrSize& outOffset, Buffer*& buffer)

+ 1 - 2
AnKi/Gr/Vulkan/AccelerationStructure.cpp

@@ -11,8 +11,7 @@ namespace anki {
 
 AccelerationStructure* AccelerationStructure::newInstance(const AccelerationStructureInitInfo& init)
 {
-	AccelerationStructureImpl* impl =
-		anki::newInstance<AccelerationStructureImpl>(GrMemoryPool::getSingleton(), init.getName());
+	AccelerationStructureImpl* impl = anki::newInstance<AccelerationStructureImpl>(GrMemoryPool::getSingleton(), init.getName());
 	const Error err = impl->init(init);
 	if(err)
 	{

+ 9 - 15
AnKi/Gr/Vulkan/AccelerationStructureImpl.cpp

@@ -39,8 +39,7 @@ Error AccelerationStructureImpl::init(const AccelerationStructureInitInfo& inf)
 		geom.geometry.triangles.vertexStride = inf.m_bottomLevel.m_positionStride;
 		geom.geometry.triangles.maxVertex = inf.m_bottomLevel.m_positionCount - 1;
 		geom.geometry.triangles.indexType = convertIndexType(inf.m_bottomLevel.m_indexType);
-		geom.geometry.triangles.indexData.deviceAddress =
-			inf.m_bottomLevel.m_indexBuffer->getGpuAddress() + inf.m_bottomLevel.m_indexBufferOffset;
+		geom.geometry.triangles.indexData.deviceAddress = inf.m_bottomLevel.m_indexBuffer->getGpuAddress() + inf.m_bottomLevel.m_indexBufferOffset;
 		geom.flags = 0; // VK_GEOMETRY_OPAQUE_BIT_KHR; // TODO
 
 		// Geom build info
@@ -56,8 +55,7 @@ Error AccelerationStructureImpl::init(const AccelerationStructureInitInfo& inf)
 		VkAccelerationStructureBuildSizesInfoKHR buildSizes = {};
 		const U32 primitiveCount = inf.m_bottomLevel.m_indexCount / 3;
 		buildSizes.sType = VK_STRUCTURE_TYPE_ACCELERATION_STRUCTURE_BUILD_SIZES_INFO_KHR;
-		vkGetAccelerationStructureBuildSizesKHR(vkdev, VK_ACCELERATION_STRUCTURE_BUILD_TYPE_DEVICE_KHR, &buildInfo,
-												&primitiveCount, &buildSizes);
+		vkGetAccelerationStructureBuildSizesKHR(vkdev, VK_ACCELERATION_STRUCTURE_BUILD_TYPE_DEVICE_KHR, &buildInfo, &primitiveCount, &buildSizes);
 		m_scratchBufferSize = U32(buildSizes.buildScratchSize);
 
 		// Create the buffer that holds the AS memory
@@ -99,8 +97,8 @@ Error AccelerationStructureImpl::init(const AccelerationStructureInitInfo& inf)
 		buffInit.m_mapAccess = BufferMapAccessBit::kWrite;
 		m_topLevelInfo.m_instancesBuffer = getGrManagerImpl().newBuffer(buffInit);
 
-		VkAccelerationStructureInstanceKHR* instances = static_cast<VkAccelerationStructureInstanceKHR*>(
-			m_topLevelInfo.m_instancesBuffer->map(0, kMaxPtrSize, BufferMapAccessBit::kWrite));
+		VkAccelerationStructureInstanceKHR* instances =
+			static_cast<VkAccelerationStructureInstanceKHR*>(m_topLevelInfo.m_instancesBuffer->map(0, kMaxPtrSize, BufferMapAccessBit::kWrite));
 		for(U32 i = 0; i < inf.m_topLevel.m_instances.getSize(); ++i)
 		{
 			VkAccelerationStructureInstanceKHR& outInst = instances[i];
@@ -110,10 +108,8 @@ Error AccelerationStructureImpl::init(const AccelerationStructureInitInfo& inf)
 			outInst.instanceCustomIndex = i & 0xFFFFFF;
 			outInst.mask = inInst.m_mask;
 			outInst.instanceShaderBindingTableRecordOffset = inInst.m_hitgroupSbtRecordIndex & 0xFFFFFF;
-			outInst.flags = VK_GEOMETRY_INSTANCE_TRIANGLE_FRONT_COUNTERCLOCKWISE_BIT_KHR
-							| VK_GEOMETRY_INSTANCE_TRIANGLE_FACING_CULL_DISABLE_BIT_KHR;
-			outInst.accelerationStructureReference =
-				static_cast<const AccelerationStructureImpl&>(*inInst.m_bottomLevel).m_deviceAddress;
+			outInst.flags = VK_GEOMETRY_INSTANCE_TRIANGLE_FRONT_COUNTERCLOCKWISE_BIT_KHR | VK_GEOMETRY_INSTANCE_TRIANGLE_FACING_CULL_DISABLE_BIT_KHR;
+			outInst.accelerationStructureReference = static_cast<const AccelerationStructureImpl&>(*inInst.m_bottomLevel).m_deviceAddress;
 			ANKI_ASSERT(outInst.accelerationStructureReference != 0);
 
 			// Hold the reference
@@ -145,8 +141,7 @@ Error AccelerationStructureImpl::init(const AccelerationStructureInitInfo& inf)
 		VkAccelerationStructureBuildSizesInfoKHR buildSizes = {};
 		const U32 instanceCount = inf.m_topLevel.m_instances.getSize();
 		buildSizes.sType = VK_STRUCTURE_TYPE_ACCELERATION_STRUCTURE_BUILD_SIZES_INFO_KHR;
-		vkGetAccelerationStructureBuildSizesKHR(vkdev, VK_ACCELERATION_STRUCTURE_BUILD_TYPE_DEVICE_KHR, &buildInfo,
-												&instanceCount, &buildSizes);
+		vkGetAccelerationStructureBuildSizesKHR(vkdev, VK_ACCELERATION_STRUCTURE_BUILD_TYPE_DEVICE_KHR, &buildInfo, &instanceCount, &buildSizes);
 		m_scratchBufferSize = U32(buildSizes.buildScratchSize);
 
 		// Create the buffer that holds the AS memory
@@ -175,9 +170,8 @@ Error AccelerationStructureImpl::init(const AccelerationStructureInitInfo& inf)
 	return Error::kNone;
 }
 
-void AccelerationStructureImpl::computeBarrierInfo(AccelerationStructureUsageBit before,
-												   AccelerationStructureUsageBit after, VkPipelineStageFlags& srcStages,
-												   VkAccessFlags& srcAccesses, VkPipelineStageFlags& dstStages,
+void AccelerationStructureImpl::computeBarrierInfo(AccelerationStructureUsageBit before, AccelerationStructureUsageBit after,
+												   VkPipelineStageFlags& srcStages, VkAccessFlags& srcAccesses, VkPipelineStageFlags& dstStages,
 												   VkAccessFlags& dstAccesses)
 {
 	// Before

+ 2 - 3
AnKi/Gr/Vulkan/AccelerationStructureImpl.h

@@ -46,9 +46,8 @@ public:
 		rangeInfo = m_rangeInfo;
 	}
 
-	static void computeBarrierInfo(AccelerationStructureUsageBit before, AccelerationStructureUsageBit after,
-								   VkPipelineStageFlags& srcStages, VkAccessFlags& srcAccesses,
-								   VkPipelineStageFlags& dstStages, VkAccessFlags& dstAccesses);
+	static void computeBarrierInfo(AccelerationStructureUsageBit before, AccelerationStructureUsageBit after, VkPipelineStageFlags& srcStages,
+								   VkAccessFlags& srcAccesses, VkPipelineStageFlags& dstStages, VkAccessFlags& dstAccesses);
 
 private:
 	class ASBottomLevelInfo

+ 16 - 25
AnKi/Gr/Vulkan/BufferImpl.cpp

@@ -46,8 +46,8 @@ BufferImpl::~BufferImpl()
 Error BufferImpl::init(const BufferInitInfo& inf)
 {
 	ANKI_ASSERT(!isCreated());
-	const Bool exposeGpuAddress = !!(getGrManagerImpl().getExtensions() & VulkanExtensions::kKHR_buffer_device_address)
-								  && !!(inf.m_usage & ~BufferUsageBit::kAllTransfer);
+	const Bool exposeGpuAddress =
+		!!(getGrManagerImpl().getExtensions() & VulkanExtensions::kKHR_buffer_device_address) && !!(inf.m_usage & ~BufferUsageBit::kAllTransfer);
 
 	PtrSize size = inf.m_size;
 	BufferMapAccessBit access = inf.m_mapAccess;
@@ -134,11 +134,8 @@ Error BufferImpl::init(const BufferInitInfo& inf)
 		// Read or read/write
 
 		// Cached & coherent
-		memIdx = getGrManagerImpl().getGpuMemoryManager().findMemoryType(req.memoryTypeBits,
-																		 VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT
-																			 | VK_MEMORY_PROPERTY_HOST_CACHED_BIT
-																			 | VK_MEMORY_PROPERTY_HOST_COHERENT_BIT,
-																		 0);
+		memIdx = getGrManagerImpl().getGpuMemoryManager().findMemoryType(
+			req.memoryTypeBits, VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT | VK_MEMORY_PROPERTY_HOST_CACHED_BIT | VK_MEMORY_PROPERTY_HOST_COHERENT_BIT, 0);
 
 		// Fallback: Just cached
 		if(memIdx == kMaxU32)
@@ -159,14 +156,13 @@ Error BufferImpl::init(const BufferInitInfo& inf)
 		ANKI_ASSERT(access == BufferMapAccessBit::kNone);
 
 		// Device only
-		memIdx = getGrManagerImpl().getGpuMemoryManager().findMemoryType(
-			req.memoryTypeBits, VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT, VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT);
+		memIdx = getGrManagerImpl().getGpuMemoryManager().findMemoryType(req.memoryTypeBits, VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT,
+																		 VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT);
 
 		// Fallback: Device with anything else
 		if(memIdx == kMaxU32)
 		{
-			memIdx = getGrManagerImpl().getGpuMemoryManager().findMemoryType(req.memoryTypeBits,
-																			 VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT, 0);
+			memIdx = getGrManagerImpl().getGpuMemoryManager().findMemoryType(req.memoryTypeBits, VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT, 0);
 		}
 	}
 
@@ -305,14 +301,12 @@ VkAccessFlags BufferImpl::computeAccessMask(BufferUsageBit usage)
 	constexpr BufferUsageBit kShaderRead = BufferUsageBit::kStorageGeometryRead | BufferUsageBit::kStorageFragmentRead
 										   | BufferUsageBit::kStorageComputeRead | BufferUsageBit::kStorageTraceRaysRead
 										   | BufferUsageBit::kTextureGeometryRead | BufferUsageBit::kTextureFragmentRead
-										   | BufferUsageBit::kTextureComputeRead
-										   | BufferUsageBit::kTextureTraceRaysRead;
+										   | BufferUsageBit::kTextureComputeRead | BufferUsageBit::kTextureTraceRaysRead;
 
-	constexpr BufferUsageBit kShaderWrite =
-		BufferUsageBit::kStorageGeometryWrite | BufferUsageBit::kStorageFragmentWrite
-		| BufferUsageBit::kStorageComputeWrite | BufferUsageBit::kStorageTraceRaysWrite
-		| BufferUsageBit::kTextureGeometryWrite | BufferUsageBit::kTextureFragmentWrite
-		| BufferUsageBit::kTextureComputeWrite | BufferUsageBit::kTextureTraceRaysWrite;
+	constexpr BufferUsageBit kShaderWrite = BufferUsageBit::kStorageGeometryWrite | BufferUsageBit::kStorageFragmentWrite
+											| BufferUsageBit::kStorageComputeWrite | BufferUsageBit::kStorageTraceRaysWrite
+											| BufferUsageBit::kTextureGeometryWrite | BufferUsageBit::kTextureFragmentWrite
+											| BufferUsageBit::kTextureComputeWrite | BufferUsageBit::kTextureTraceRaysWrite;
 
 	if(!!(usage & BufferUsageBit::kAllUniform))
 	{
@@ -362,9 +356,8 @@ VkAccessFlags BufferImpl::computeAccessMask(BufferUsageBit usage)
 	return mask;
 }
 
-void BufferImpl::computeBarrierInfo(BufferUsageBit before, BufferUsageBit after, VkPipelineStageFlags& srcStages,
-									VkAccessFlags& srcAccesses, VkPipelineStageFlags& dstStages,
-									VkAccessFlags& dstAccesses) const
+void BufferImpl::computeBarrierInfo(BufferUsageBit before, BufferUsageBit after, VkPipelineStageFlags& srcStages, VkAccessFlags& srcAccesses,
+									VkPipelineStageFlags& dstStages, VkAccessFlags& dstAccesses) const
 {
 	ANKI_ASSERT(usageValid(before) && usageValid(after));
 	ANKI_ASSERT(!!after);
@@ -388,11 +381,9 @@ VkBufferView BufferImpl::getOrCreateBufferView(Format fmt, PtrSize offset, PtrSi
 	ANKI_ASSERT(!!(m_usage & BufferUsageBit::kAllTexture));
 	ANKI_ASSERT(offset + range <= m_size);
 
-	ANKI_ASSERT(isAligned(getGrManagerImpl().getDeviceCapabilities().m_textureBufferBindOffsetAlignment, offset)
-				&& "Offset not aligned");
+	ANKI_ASSERT(isAligned(getGrManagerImpl().getDeviceCapabilities().m_textureBufferBindOffsetAlignment, offset) && "Offset not aligned");
 
-	ANKI_ASSERT((range % getFormatInfo(fmt).m_texelSize) == 0
-				&& "Range doesn't align with the number of texel elements");
+	ANKI_ASSERT((range % getFormatInfo(fmt).m_texelSize) == 0 && "Range doesn't align with the number of texel elements");
 
 	[[maybe_unused]] const PtrSize elementCount = range / getFormatInfo(fmt).m_texelSize;
 	ANKI_ASSERT(elementCount <= getGrManagerImpl().getPhysicalDeviceProperties().limits.maxTexelBufferElements);

+ 2 - 3
AnKi/Gr/Vulkan/BufferImpl.h

@@ -56,9 +56,8 @@ public:
 		return m_actualSize;
 	}
 
-	void computeBarrierInfo(BufferUsageBit before, BufferUsageBit after, VkPipelineStageFlags& srcStages,
-							VkAccessFlags& srcAccesses, VkPipelineStageFlags& dstStages,
-							VkAccessFlags& dstAccesses) const;
+	void computeBarrierInfo(BufferUsageBit before, BufferUsageBit after, VkPipelineStageFlags& srcStages, VkAccessFlags& srcAccesses,
+							VkPipelineStageFlags& dstStages, VkAccessFlags& dstAccesses) const;
 
 	ANKI_FORCE_INLINE void flush(PtrSize offset, PtrSize range) const
 	{

+ 27 - 46
AnKi/Gr/Vulkan/CommandBuffer.cpp

@@ -38,10 +38,9 @@ void CommandBuffer::flush(ConstWeakArray<FencePtr> waitFences, FencePtr* signalF
 		}
 
 		MicroSemaphorePtr signalSemaphore;
-		getGrManagerImpl().flushCommandBuffer(
-			self.getMicroCommandBuffer(), self.renderedToDefaultFramebuffer(),
-			WeakArray<MicroSemaphorePtr>(waitSemaphores.getBegin(), waitFences.getSize()),
-			(signalFence) ? &signalSemaphore : nullptr);
+		getGrManagerImpl().flushCommandBuffer(self.getMicroCommandBuffer(), self.renderedToDefaultFramebuffer(),
+											  WeakArray<MicroSemaphorePtr>(waitSemaphores.getBegin(), waitFences.getSize()),
+											  (signalFence) ? &signalSemaphore : nullptr);
 
 		if(signalFence)
 		{
@@ -57,8 +56,7 @@ void CommandBuffer::flush(ConstWeakArray<FencePtr> waitFences, FencePtr* signalF
 	}
 }
 
-void CommandBuffer::bindVertexBuffer(U32 binding, const BufferPtr& buff, PtrSize offset, PtrSize stride,
-									 VertexStepRate stepRate)
+void CommandBuffer::bindVertexBuffer(U32 binding, const BufferPtr& buff, PtrSize offset, PtrSize stride, VertexStepRate stepRate)
 {
 	ANKI_VK_SELF(CommandBufferImpl);
 	self.bindVertexBufferInternal(binding, buff, offset, stride, stepRate);
@@ -112,8 +110,8 @@ void CommandBuffer::setPolygonOffset(F32 factor, F32 units)
 	self.setPolygonOffsetInternal(factor, units);
 }
 
-void CommandBuffer::setStencilOperations(FaceSelectionBit face, StencilOperation stencilFail,
-										 StencilOperation stencilPassDepthFail, StencilOperation stencilPassDepthPass)
+void CommandBuffer::setStencilOperations(FaceSelectionBit face, StencilOperation stencilFail, StencilOperation stencilPassDepthFail,
+										 StencilOperation stencilPassDepthPass)
 {
 	ANKI_VK_SELF(CommandBufferImpl);
 	self.setStencilOperationsInternal(face, stencilFail, stencilPassDepthFail, stencilPassDepthPass);
@@ -167,8 +165,7 @@ void CommandBuffer::setColorChannelWriteMask(U32 attachment, ColorBit mask)
 	self.setColorChannelWriteMaskInternal(attachment, mask);
 }
 
-void CommandBuffer::setBlendFactors(U32 attachment, BlendFactor srcRgb, BlendFactor dstRgb, BlendFactor srcA,
-									BlendFactor dstA)
+void CommandBuffer::setBlendFactors(U32 attachment, BlendFactor srcRgb, BlendFactor dstRgb, BlendFactor srcA, BlendFactor dstA)
 {
 	ANKI_VK_SELF(CommandBufferImpl);
 	self.setBlendFactorsInternal(attachment, srcRgb, dstRgb, srcA, dstA);
@@ -180,8 +177,7 @@ void CommandBuffer::setBlendOperation(U32 attachment, BlendOperation funcRgb, Bl
 	self.setBlendOperationInternal(attachment, funcRgb, funcA);
 }
 
-void CommandBuffer::bindTextureAndSampler(U32 set, U32 binding, const TextureViewPtr& texView,
-										  const SamplerPtr& sampler, U32 arrayIdx)
+void CommandBuffer::bindTextureAndSampler(U32 set, U32 binding, const TextureViewPtr& texView, const SamplerPtr& sampler, U32 arrayIdx)
 {
 	ANKI_VK_SELF(CommandBufferImpl);
 	self.bindTextureAndSamplerInternal(set, binding, texView, sampler, arrayIdx);
@@ -199,15 +195,13 @@ void CommandBuffer::bindSampler(U32 set, U32 binding, const SamplerPtr& sampler,
 	self.bindSamplerInternal(set, binding, sampler, arrayIdx);
 }
 
-void CommandBuffer::bindUniformBuffer(U32 set, U32 binding, const BufferPtr& buff, PtrSize offset, PtrSize range,
-									  U32 arrayIdx)
+void CommandBuffer::bindUniformBuffer(U32 set, U32 binding, const BufferPtr& buff, PtrSize offset, PtrSize range, U32 arrayIdx)
 {
 	ANKI_VK_SELF(CommandBufferImpl);
 	self.bindUniformBufferInternal(set, binding, buff, offset, range, arrayIdx);
 }
 
-void CommandBuffer::bindStorageBuffer(U32 set, U32 binding, const BufferPtr& buff, PtrSize offset, PtrSize range,
-									  U32 arrayIdx)
+void CommandBuffer::bindStorageBuffer(U32 set, U32 binding, const BufferPtr& buff, PtrSize offset, PtrSize range, U32 arrayIdx)
 {
 	ANKI_VK_SELF(CommandBufferImpl);
 	self.bindStorageBufferInternal(set, binding, buff, offset, range, arrayIdx);
@@ -225,8 +219,7 @@ void CommandBuffer::bindAccelerationStructure(U32 set, U32 binding, const Accele
 	self.bindAccelerationStructureInternal(set, binding, as, arrayIdx);
 }
 
-void CommandBuffer::bindReadOnlyTextureBuffer(U32 set, U32 binding, const BufferPtr& buff, PtrSize offset,
-											  PtrSize range, Format fmt, U32 arrayIdx)
+void CommandBuffer::bindReadOnlyTextureBuffer(U32 set, U32 binding, const BufferPtr& buff, PtrSize offset, PtrSize range, Format fmt, U32 arrayIdx)
 {
 	ANKI_VK_SELF(CommandBufferImpl);
 	self.bindReadOnlyTextureBufferInternal(set, binding, buff, offset, range, fmt, arrayIdx);
@@ -244,10 +237,8 @@ void CommandBuffer::bindShaderProgram(const ShaderProgramPtr& prog)
 	self.bindShaderProgramInternal(prog);
 }
 
-void CommandBuffer::beginRenderPass(const FramebufferPtr& fb,
-									const Array<TextureUsageBit, kMaxColorRenderTargets>& colorAttachmentUsages,
-									TextureUsageBit depthStencilAttachmentUsage, U32 minx, U32 miny, U32 width,
-									U32 height)
+void CommandBuffer::beginRenderPass(const FramebufferPtr& fb, const Array<TextureUsageBit, kMaxColorRenderTargets>& colorAttachmentUsages,
+									TextureUsageBit depthStencilAttachmentUsage, U32 minx, U32 miny, U32 width, U32 height)
 {
 	ANKI_VK_SELF(CommandBufferImpl);
 	self.beginRenderPassInternal(fb, colorAttachmentUsages, depthStencilAttachmentUsage, minx, miny, width, height);
@@ -265,8 +256,7 @@ void CommandBuffer::setVrsRate(VrsRate rate)
 	self.setVrsRateInternal(rate);
 }
 
-void CommandBuffer::drawElements(PrimitiveTopology topology, U32 count, U32 instanceCount, U32 firstIndex,
-								 U32 baseVertex, U32 baseInstance)
+void CommandBuffer::drawElements(PrimitiveTopology topology, U32 count, U32 instanceCount, U32 firstIndex, U32 baseVertex, U32 baseInstance)
 {
 	ANKI_VK_SELF(CommandBufferImpl);
 	self.drawElementsInternal(topology, count, instanceCount, firstIndex, baseVertex, baseInstance);
@@ -284,8 +274,7 @@ void CommandBuffer::drawArraysIndirect(PrimitiveTopology topology, U32 drawCount
 	self.drawArraysIndirectInternal(topology, drawCount, offset, buff);
 }
 
-void CommandBuffer::drawElementsIndirect(PrimitiveTopology topology, U32 drawCount, PtrSize offset,
-										 const BufferPtr& buff)
+void CommandBuffer::drawElementsIndirect(PrimitiveTopology topology, U32 drawCount, PtrSize offset, const BufferPtr& buff)
 {
 	ANKI_VK_SELF(CommandBufferImpl);
 	self.drawElementsIndirectInternal(topology, drawCount, offset, buff);
@@ -297,12 +286,11 @@ void CommandBuffer::dispatchCompute(U32 groupCountX, U32 groupCountY, U32 groupC
 	self.dispatchComputeInternal(groupCountX, groupCountY, groupCountZ);
 }
 
-void CommandBuffer::traceRays(const BufferPtr& sbtBuffer, PtrSize sbtBufferOffset, U32 sbtRecordSize,
-							  U32 hitGroupSbtRecordCount, U32 rayTypeCount, U32 width, U32 height, U32 depth)
+void CommandBuffer::traceRays(const BufferPtr& sbtBuffer, PtrSize sbtBufferOffset, U32 sbtRecordSize, U32 hitGroupSbtRecordCount, U32 rayTypeCount,
+							  U32 width, U32 height, U32 depth)
 {
 	ANKI_VK_SELF(CommandBufferImpl);
-	self.traceRaysInternal(sbtBuffer, sbtBufferOffset, sbtRecordSize, hitGroupSbtRecordCount, rayTypeCount, width,
-						   height, depth);
+	self.traceRaysInternal(sbtBuffer, sbtBufferOffset, sbtRecordSize, hitGroupSbtRecordCount, rayTypeCount, width, height, depth);
 }
 
 void CommandBuffer::generateMipmaps2d(const TextureViewPtr& texView)
@@ -316,8 +304,7 @@ void CommandBuffer::generateMipmaps3d([[maybe_unused]] const TextureViewPtr& tex
 	ANKI_ASSERT(!"TODO");
 }
 
-void CommandBuffer::blitTextureViews([[maybe_unused]] const TextureViewPtr& srcView,
-									 [[maybe_unused]] const TextureViewPtr& destView)
+void CommandBuffer::blitTextureViews([[maybe_unused]] const TextureViewPtr& srcView, [[maybe_unused]] const TextureViewPtr& destView)
 {
 	ANKI_ASSERT(!"TODO");
 }
@@ -328,8 +315,7 @@ void CommandBuffer::clearTextureView(const TextureViewPtr& texView, const ClearV
 	self.clearTextureViewInternal(texView, clearValue);
 }
 
-void CommandBuffer::copyBufferToTextureView(const BufferPtr& buff, PtrSize offset, PtrSize range,
-											const TextureViewPtr& texView)
+void CommandBuffer::copyBufferToTextureView(const BufferPtr& buff, PtrSize offset, PtrSize range, const TextureViewPtr& texView)
 {
 	ANKI_VK_SELF(CommandBufferImpl);
 	self.copyBufferToTextureViewInternal(buff, offset, range, texView);
@@ -341,15 +327,13 @@ void CommandBuffer::fillBuffer(const BufferPtr& buff, PtrSize offset, PtrSize si
 	self.fillBufferInternal(buff, offset, size, value);
 }
 
-void CommandBuffer::writeOcclusionQueriesResultToBuffer(ConstWeakArray<OcclusionQuery*> queries, PtrSize offset,
-														const BufferPtr& buff)
+void CommandBuffer::writeOcclusionQueriesResultToBuffer(ConstWeakArray<OcclusionQuery*> queries, PtrSize offset, const BufferPtr& buff)
 {
 	ANKI_VK_SELF(CommandBufferImpl);
 	self.writeOcclusionQueriesResultToBufferInternal(queries, offset, buff);
 }
 
-void CommandBuffer::copyBufferToBuffer(const BufferPtr& src, const BufferPtr& dst,
-									   ConstWeakArray<CopyBufferToBufferInfo> copies)
+void CommandBuffer::copyBufferToBuffer(const BufferPtr& src, const BufferPtr& dst, ConstWeakArray<CopyBufferToBufferInfo> copies)
 {
 	ANKI_VK_SELF(CommandBufferImpl);
 	self.copyBufferToBufferInternal(src, dst, copies);
@@ -361,18 +345,15 @@ void CommandBuffer::buildAccelerationStructure(const AccelerationStructurePtr& a
 	self.buildAccelerationStructureInternal(as);
 }
 
-void CommandBuffer::upscale(const GrUpscalerPtr& upscaler, const TextureViewPtr& inColor,
-							const TextureViewPtr& outUpscaledColor, const TextureViewPtr& motionVectors,
-							const TextureViewPtr& depth, const TextureViewPtr& exposure, const Bool resetAccumulation,
-							const Vec2& jitterOffset, const Vec2& motionVectorsScale)
+void CommandBuffer::upscale(const GrUpscalerPtr& upscaler, const TextureViewPtr& inColor, const TextureViewPtr& outUpscaledColor,
+							const TextureViewPtr& motionVectors, const TextureViewPtr& depth, const TextureViewPtr& exposure,
+							const Bool resetAccumulation, const Vec2& jitterOffset, const Vec2& motionVectorsScale)
 {
 	ANKI_VK_SELF(CommandBufferImpl);
-	self.upscaleInternal(upscaler, inColor, outUpscaledColor, motionVectors, depth, exposure, resetAccumulation,
-						 jitterOffset, motionVectorsScale);
+	self.upscaleInternal(upscaler, inColor, outUpscaledColor, motionVectors, depth, exposure, resetAccumulation, jitterOffset, motionVectorsScale);
 }
 
-void CommandBuffer::setPipelineBarrier(ConstWeakArray<TextureBarrierInfo> textures,
-									   ConstWeakArray<BufferBarrierInfo> buffers,
+void CommandBuffer::setPipelineBarrier(ConstWeakArray<TextureBarrierInfo> textures, ConstWeakArray<BufferBarrierInfo> buffers,
 									   ConstWeakArray<AccelerationStructureBarrierInfo> accelerationStructures)
 {
 	ANKI_VK_SELF(CommandBufferImpl);

+ 3 - 4
AnKi/Gr/Vulkan/CommandBufferFactory.cpp

@@ -8,8 +8,7 @@
 
 namespace anki {
 
-static VulkanQueueType getQueueTypeFromCommandBufferFlags(CommandBufferFlag flags,
-														  const VulkanQueueFamilies& queueFamilies)
+static VulkanQueueType getQueueTypeFromCommandBufferFlags(CommandBufferFlag flags, const VulkanQueueFamilies& queueFamilies)
 {
 	ANKI_ASSERT(!!(flags & CommandBufferFlag::kGeneralWork) ^ !!(flags & CommandBufferFlag::kComputeWork));
 	if(!(flags & CommandBufferFlag::kGeneralWork) && queueFamilies[VulkanQueueType::kCompute] != kMaxU32)
@@ -129,8 +128,8 @@ Error CommandBufferThreadAllocator::newCommandBuffer(CommandBufferFlag cmdbFlags
 
 		MicroCommandBuffer* newCmdb = newInstance<MicroCommandBuffer>(GrMemoryPool::getSingleton(), this);
 
-		newCmdb->m_fastPool.init(GrMemoryPool::getSingleton().getAllocationCallback(),
-								 GrMemoryPool::getSingleton().getAllocationCallbackUserData(), 256_KB, 2.0f);
+		newCmdb->m_fastPool.init(GrMemoryPool::getSingleton().getAllocationCallback(), GrMemoryPool::getSingleton().getAllocationCallbackUserData(),
+								 256_KB, 2.0f);
 
 		for(DynamicArray<GrObjectPtr, MemoryPoolPtrWrapper<StackMemoryPool>>& arr : newCmdb->m_objectRefs)
 		{

+ 28 - 46
AnKi/Gr/Vulkan/CommandBufferImpl.cpp

@@ -118,9 +118,8 @@ void CommandBufferImpl::beginRecording()
 	}
 }
 
-void CommandBufferImpl::beginRenderPassInternal(
-	const FramebufferPtr& fb, const Array<TextureUsageBit, kMaxColorRenderTargets>& colorAttachmentUsages,
-	TextureUsageBit depthStencilAttachmentUsage, U32 minx, U32 miny, U32 width, U32 height)
+void CommandBufferImpl::beginRenderPassInternal(const FramebufferPtr& fb, const Array<TextureUsageBit, kMaxColorRenderTargets>& colorAttachmentUsages,
+												TextureUsageBit depthStencilAttachmentUsage, U32 minx, U32 miny, U32 width, U32 height)
 {
 	commandCommon();
 	ANKI_ASSERT(!insideRenderPass());
@@ -332,24 +331,20 @@ void CommandBufferImpl::generateMipmaps2dInternal(const TextureViewPtr& texView)
 		if(i > 0)
 		{
 			VkImageSubresourceRange range;
-			tex.computeVkImageSubresourceRange(TextureSubresourceInfo(TextureSurfaceInfo(i, 0, face, layer), aspect),
-											   range);
+			tex.computeVkImageSubresourceRange(TextureSubresourceInfo(TextureSurfaceInfo(i, 0, face, layer), aspect), range);
 
-			setImageBarrier(VK_PIPELINE_STAGE_TRANSFER_BIT, VK_ACCESS_TRANSFER_WRITE_BIT,
-							VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, VK_PIPELINE_STAGE_TRANSFER_BIT,
-							VK_ACCESS_TRANSFER_READ_BIT, VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL, tex.m_imageHandle,
+			setImageBarrier(VK_PIPELINE_STAGE_TRANSFER_BIT, VK_ACCESS_TRANSFER_WRITE_BIT, VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL,
+							VK_PIPELINE_STAGE_TRANSFER_BIT, VK_ACCESS_TRANSFER_READ_BIT, VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL, tex.m_imageHandle,
 							range);
 		}
 
 		// Transition destination
 		{
 			VkImageSubresourceRange range;
-			tex.computeVkImageSubresourceRange(
-				TextureSubresourceInfo(TextureSurfaceInfo(i + 1, 0, face, layer), aspect), range);
+			tex.computeVkImageSubresourceRange(TextureSubresourceInfo(TextureSurfaceInfo(i + 1, 0, face, layer), aspect), range);
 
-			setImageBarrier(VK_PIPELINE_STAGE_BOTTOM_OF_PIPE_BIT, 0, VK_IMAGE_LAYOUT_UNDEFINED,
-							VK_PIPELINE_STAGE_TRANSFER_BIT, VK_ACCESS_TRANSFER_WRITE_BIT,
-							VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, tex.m_imageHandle, range);
+			setImageBarrier(VK_PIPELINE_STAGE_BOTTOM_OF_PIPE_BIT, 0, VK_IMAGE_LAYOUT_UNDEFINED, VK_PIPELINE_STAGE_TRANSFER_BIT,
+							VK_ACCESS_TRANSFER_WRITE_BIT, VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, tex.m_imageHandle, range);
 		}
 
 		// Setup the blit struct
@@ -393,17 +388,16 @@ void CommandBufferImpl::generateMipmaps2dInternal(const TextureViewPtr& texView)
 		blit.dstOffsets[0] = {0, 0, 0};
 		blit.dstOffsets[1] = {dstWidth, dstHeight, 1};
 
-		vkCmdBlitImage(m_handle, tex.m_imageHandle, VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL, tex.m_imageHandle,
-					   VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, 1, &blit,
-					   (!!aspect) ? VK_FILTER_NEAREST : VK_FILTER_LINEAR);
+		vkCmdBlitImage(m_handle, tex.m_imageHandle, VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL, tex.m_imageHandle, VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, 1,
+					   &blit, (!!aspect) ? VK_FILTER_NEAREST : VK_FILTER_LINEAR);
 	}
 
 	// Hold the reference
 	m_microCmdb->pushObjectRef(texView);
 }
 
-void CommandBufferImpl::copyBufferToTextureViewInternal(const BufferPtr& buff, PtrSize offset,
-														[[maybe_unused]] PtrSize range, const TextureViewPtr& texView)
+void CommandBufferImpl::copyBufferToTextureViewInternal(const BufferPtr& buff, PtrSize offset, [[maybe_unused]] PtrSize range,
+														const TextureViewPtr& texView)
 {
 	commandCommon();
 
@@ -415,8 +409,7 @@ void CommandBufferImpl::copyBufferToTextureViewInternal(const BufferPtr& buff, P
 	const Bool is3D = tex.getTextureType() == TextureType::k3D;
 	const VkImageAspectFlags aspect = convertImageAspect(view.getSubresource().m_depthStencilAspect);
 
-	const TextureSurfaceInfo surf(view.getSubresource().m_firstMipmap, view.getSubresource().m_firstFace, 0,
-								  view.getSubresource().m_firstLayer);
+	const TextureSurfaceInfo surf(view.getSubresource().m_firstMipmap, view.getSubresource().m_firstFace, 0, view.getSubresource().m_firstLayer);
 	const TextureVolumeInfo vol(view.getSubresource().m_firstMipmap);
 
 	// Compute the sizes of the mip
@@ -448,8 +441,7 @@ void CommandBufferImpl::copyBufferToTextureViewInternal(const BufferPtr& buff, P
 	region.bufferImageHeight = 0;
 	region.bufferRowLength = 0;
 
-	vkCmdCopyBufferToImage(m_handle, static_cast<const BufferImpl&>(*buff).getHandle(), tex.m_imageHandle, layout, 1,
-						   &region);
+	vkCmdCopyBufferToImage(m_handle, static_cast<const BufferImpl&>(*buff).getHandle(), tex.m_imageHandle, layout, 1, &region);
 
 	m_microCmdb->pushObjectRef(texView);
 	m_microCmdb->pushObjectRef(buff);
@@ -467,8 +459,7 @@ void CommandBufferImpl::rebindDynamicState()
 	// Rebind the stencil compare mask
 	if(m_stencilCompareMasks[0] == m_stencilCompareMasks[1])
 	{
-		vkCmdSetStencilCompareMask(m_handle, VK_STENCIL_FACE_FRONT_BIT | VK_STENCIL_FACE_BACK_BIT,
-								   m_stencilCompareMasks[0]);
+		vkCmdSetStencilCompareMask(m_handle, VK_STENCIL_FACE_FRONT_BIT | VK_STENCIL_FACE_BACK_BIT, m_stencilCompareMasks[0]);
 	}
 	else
 	{
@@ -479,8 +470,7 @@ void CommandBufferImpl::rebindDynamicState()
 	// Rebind the stencil write mask
 	if(m_stencilWriteMasks[0] == m_stencilWriteMasks[1])
 	{
-		vkCmdSetStencilWriteMask(m_handle, VK_STENCIL_FACE_FRONT_BIT | VK_STENCIL_FACE_BACK_BIT,
-								 m_stencilWriteMasks[0]);
+		vkCmdSetStencilWriteMask(m_handle, VK_STENCIL_FACE_FRONT_BIT | VK_STENCIL_FACE_BACK_BIT, m_stencilWriteMasks[0]);
 	}
 	else
 	{
@@ -491,8 +481,7 @@ void CommandBufferImpl::rebindDynamicState()
 	// Rebind the stencil reference
 	if(m_stencilReferenceMasks[0] == m_stencilReferenceMasks[1])
 	{
-		vkCmdSetStencilReference(m_handle, VK_STENCIL_FACE_FRONT_BIT | VK_STENCIL_FACE_BACK_BIT,
-								 m_stencilReferenceMasks[0]);
+		vkCmdSetStencilReference(m_handle, VK_STENCIL_FACE_FRONT_BIT | VK_STENCIL_FACE_BACK_BIT, m_stencilReferenceMasks[0]);
 	}
 	else
 	{
@@ -542,16 +531,13 @@ static NVSDK_NGX_Resource_VK getNGXResourceFromAnkiTexture(const TextureViewImpl
 	const Bool isUAV = !!(tex.m_vkUsageFlags & VK_IMAGE_USAGE_STORAGE_BIT);
 
 	// TODO Not sure if I should pass the width,height of the image or the view
-	return NVSDK_NGX_Create_ImageView_Resource_VK(imageView, image, subresourceRange, format, tex.getWidth(),
-												  tex.getHeight(), isUAV);
+	return NVSDK_NGX_Create_ImageView_Resource_VK(imageView, image, subresourceRange, format, tex.getWidth(), tex.getHeight(), isUAV);
 }
 #endif
 
-void CommandBufferImpl::upscaleInternal(const GrUpscalerPtr& upscaler, const TextureViewPtr& inColor,
-										const TextureViewPtr& outUpscaledColor, const TextureViewPtr& motionVectors,
-										const TextureViewPtr& depth, const TextureViewPtr& exposure,
-										const Bool resetAccumulation, const Vec2& jitterOffset,
-										const Vec2& motionVectorsScale)
+void CommandBufferImpl::upscaleInternal(const GrUpscalerPtr& upscaler, const TextureViewPtr& inColor, const TextureViewPtr& outUpscaledColor,
+										const TextureViewPtr& motionVectors, const TextureViewPtr& depth, const TextureViewPtr& exposure,
+										const Bool resetAccumulation, const Vec2& jitterOffset, const Vec2& motionVectorsScale)
 {
 #if ANKI_DLSS
 	ANKI_ASSERT(getGrManagerImpl().getDeviceCapabilities().m_dlss);
@@ -599,14 +585,12 @@ void CommandBufferImpl::upscaleInternal(const GrUpscalerPtr& upscaler, const Tex
 	getGrManagerImpl().beginMarker(m_handle, "DLSS");
 	NVSDK_NGX_Parameter* dlssParameters = &upscalerImpl.getParameters();
 	NVSDK_NGX_Handle* dlssFeature = &upscalerImpl.getFeature();
-	const NVSDK_NGX_Result result =
-		NGX_VULKAN_EVALUATE_DLSS_EXT(m_handle, dlssFeature, dlssParameters, &vkDlssEvalParams);
+	const NVSDK_NGX_Result result = NGX_VULKAN_EVALUATE_DLSS_EXT(m_handle, dlssFeature, dlssParameters, &vkDlssEvalParams);
 	getGrManagerImpl().endMarker(m_handle);
 
 	if(NVSDK_NGX_FAILED(result))
 	{
-		ANKI_VK_LOGF("Failed to NVSDK_NGX_VULKAN_EvaluateFeature for DLSS, code = 0x%08x, info: %ls", result,
-					 GetNGXResultAsString(result));
+		ANKI_VK_LOGF("Failed to NVSDK_NGX_VULKAN_EvaluateFeature for DLSS, code = 0x%08x, info: %ls", result, GetNGXResultAsString(result));
 	}
 #else
 	ANKI_ASSERT(0 && "Not supported");
@@ -622,9 +606,8 @@ void CommandBufferImpl::upscaleInternal(const GrUpscalerPtr& upscaler, const Tex
 #endif
 }
 
-void CommandBufferImpl::setPipelineBarrierInternal(
-	ConstWeakArray<TextureBarrierInfo> textures, ConstWeakArray<BufferBarrierInfo> buffers,
-	ConstWeakArray<AccelerationStructureBarrierInfo> accelerationStructures)
+void CommandBufferImpl::setPipelineBarrierInternal(ConstWeakArray<TextureBarrierInfo> textures, ConstWeakArray<BufferBarrierInfo> buffers,
+												   ConstWeakArray<AccelerationStructureBarrierInfo> accelerationStructures)
 {
 	commandCommon();
 
@@ -674,8 +657,7 @@ void CommandBufferImpl::setPipelineBarrierInternal(
 
 		VkPipelineStageFlags srcStage;
 		VkPipelineStageFlags dstStage;
-		impl.computeBarrierInfo(prevUsage, nextUsage, inf.subresourceRange.baseMipLevel, srcStage, inf.srcAccessMask,
-								dstStage, inf.dstAccessMask);
+		impl.computeBarrierInfo(prevUsage, nextUsage, inf.subresourceRange.baseMipLevel, srcStage, inf.srcAccessMask, dstStage, inf.dstAccessMask);
 		inf.oldLayout = impl.computeLayout(prevUsage, inf.subresourceRange.baseMipLevel);
 		inf.newLayout = impl.computeLayout(nextUsage, inf.subresourceRange.baseMipLevel);
 
@@ -734,8 +716,8 @@ void CommandBufferImpl::setPipelineBarrierInternal(
 
 		VkPipelineStageFlags srcStage;
 		VkPipelineStageFlags dstStage;
-		AccelerationStructureImpl::computeBarrierInfo(barrier.m_previousUsage, barrier.m_nextUsage, srcStage,
-													  inf.srcAccessMask, dstStage, inf.dstAccessMask);
+		AccelerationStructureImpl::computeBarrierInfo(barrier.m_previousUsage, barrier.m_nextUsage, srcStage, inf.srcAccessMask, dstStage,
+													  inf.dstAccessMask);
 
 		srcStageMask |= srcStage;
 		dstStageMask |= dstStage;

+ 25 - 43
AnKi/Gr/Vulkan/CommandBufferImpl.h

@@ -76,8 +76,7 @@ public:
 		return !!(m_flags & CommandBufferFlag::kSecondLevel);
 	}
 
-	void bindVertexBufferInternal(U32 binding, const BufferPtr& buff, PtrSize offset, PtrSize stride,
-								  VertexStepRate stepRate)
+	void bindVertexBufferInternal(U32 binding, const BufferPtr& buff, PtrSize offset, PtrSize stride, VertexStepRate stepRate)
 	{
 		commandCommon();
 		m_state.bindVertexBuffer(binding, stride, stepRate);
@@ -95,8 +94,7 @@ public:
 	void bindIndexBufferInternal(const BufferPtr& buff, PtrSize offset, IndexType type)
 	{
 		commandCommon();
-		vkCmdBindIndexBuffer(m_handle, static_cast<const BufferImpl&>(*buff).getHandle(), offset,
-							 convertIndexType(type));
+		vkCmdBindIndexBuffer(m_handle, static_cast<const BufferImpl&>(*buff).getHandle(), offset, convertIndexType(type));
 		m_microCmdb->pushObjectRef(buff);
 	}
 
@@ -157,8 +155,8 @@ public:
 		vkCmdSetDepthBias(m_handle, factor, 0.0f, units);
 	}
 
-	void setStencilOperationsInternal(FaceSelectionBit face, StencilOperation stencilFail,
-									  StencilOperation stencilPassDepthFail, StencilOperation stencilPassDepthPass)
+	void setStencilOperationsInternal(FaceSelectionBit face, StencilOperation stencilFail, StencilOperation stencilPassDepthFail,
+									  StencilOperation stencilPassDepthPass)
 	{
 		commandCommon();
 		m_state.setStencilOperations(face, stencilFail, stencilPassDepthFail, stencilPassDepthPass);
@@ -200,8 +198,7 @@ public:
 		m_state.setColorChannelWriteMask(attachment, mask);
 	}
 
-	void setBlendFactorsInternal(U32 attachment, BlendFactor srcRgb, BlendFactor dstRgb, BlendFactor srcA,
-								 BlendFactor dstA)
+	void setBlendFactorsInternal(U32 attachment, BlendFactor srcRgb, BlendFactor dstRgb, BlendFactor srcA, BlendFactor dstA)
 	{
 		commandCommon();
 		m_state.setBlendFactors(attachment, srcRgb, dstRgb, srcA, dstA);
@@ -213,8 +210,7 @@ public:
 		m_state.setBlendOperation(attachment, funcRgb, funcA);
 	}
 
-	void bindTextureAndSamplerInternal(U32 set, U32 binding, const TextureViewPtr& texView, const SamplerPtr& sampler,
-									   U32 arrayIdx)
+	void bindTextureAndSamplerInternal(U32 set, U32 binding, const TextureViewPtr& texView, const SamplerPtr& sampler, U32 arrayIdx)
 	{
 		commandCommon();
 		const TextureViewImpl& view = static_cast<const TextureViewImpl&>(*texView);
@@ -253,8 +249,7 @@ public:
 		commandCommon();
 		m_dsetState[set].bindImage(binding, arrayIdx, img.get());
 
-		const Bool isPresentable = !!(static_cast<const TextureViewImpl&>(*img).getTextureImpl().getTextureUsage()
-									  & TextureUsageBit::kPresent);
+		const Bool isPresentable = !!(static_cast<const TextureViewImpl&>(*img).getTextureImpl().getTextureUsage() & TextureUsageBit::kPresent);
 		if(isPresentable)
 		{
 			m_renderedToDefaultFb = true;
@@ -276,10 +271,8 @@ public:
 		m_dsetState[set].bindBindlessDescriptorSet();
 	}
 
-	void beginRenderPassInternal(const FramebufferPtr& fb,
-								 const Array<TextureUsageBit, kMaxColorRenderTargets>& colorAttachmentUsages,
-								 TextureUsageBit depthStencilAttachmentUsage, U32 minx, U32 miny, U32 width,
-								 U32 height);
+	void beginRenderPassInternal(const FramebufferPtr& fb, const Array<TextureUsageBit, kMaxColorRenderTargets>& colorAttachmentUsages,
+								 TextureUsageBit depthStencilAttachmentUsage, U32 minx, U32 miny, U32 width, U32 height);
 
 	void endRenderPassInternal();
 
@@ -287,8 +280,7 @@ public:
 
 	void drawArraysInternal(PrimitiveTopology topology, U32 count, U32 instanceCount, U32 first, U32 baseInstance);
 
-	void drawElementsInternal(PrimitiveTopology topology, U32 count, U32 instanceCount, U32 firstIndex, U32 baseVertex,
-							  U32 baseInstance);
+	void drawElementsInternal(PrimitiveTopology topology, U32 count, U32 instanceCount, U32 firstIndex, U32 baseVertex, U32 baseInstance);
 
 	void drawArraysIndirectInternal(PrimitiveTopology topology, U32 drawCount, PtrSize offset, const BufferPtr& buff);
 
@@ -296,8 +288,8 @@ public:
 
 	void dispatchComputeInternal(U32 groupCountX, U32 groupCountY, U32 groupCountZ);
 
-	void traceRaysInternal(const BufferPtr& sbtBuffer, PtrSize sbtBufferOffset, U32 sbtRecordSize,
-						   U32 hitGroupSbtRecordCount, U32 rayTypeCount, U32 width, U32 height, U32 depth);
+	void traceRaysInternal(const BufferPtr& sbtBuffer, PtrSize sbtBufferOffset, U32 sbtRecordSize, U32 hitGroupSbtRecordCount, U32 rayTypeCount,
+						   U32 width, U32 height, U32 depth);
 
 	void resetOcclusionQueriesInternal(ConstWeakArray<OcclusionQuery*> queries);
 
@@ -323,53 +315,45 @@ public:
 
 	void endRecording();
 
-	void setPipelineBarrierInternal(ConstWeakArray<TextureBarrierInfo> textures,
-									ConstWeakArray<BufferBarrierInfo> buffers,
+	void setPipelineBarrierInternal(ConstWeakArray<TextureBarrierInfo> textures, ConstWeakArray<BufferBarrierInfo> buffers,
 									ConstWeakArray<AccelerationStructureBarrierInfo> accelerationStructures);
 
 	void fillBufferInternal(const BufferPtr& buff, PtrSize offset, PtrSize size, U32 value);
 
-	void writeOcclusionQueriesResultToBufferInternal(ConstWeakArray<OcclusionQuery*> queries, PtrSize offset,
-													 const BufferPtr& buff);
+	void writeOcclusionQueriesResultToBufferInternal(ConstWeakArray<OcclusionQuery*> queries, PtrSize offset, const BufferPtr& buff);
 
 	void bindShaderProgramInternal(const ShaderProgramPtr& prog);
 
-	void bindUniformBufferInternal(U32 set, U32 binding, const BufferPtr& buff, PtrSize offset, PtrSize range,
-								   U32 arrayIdx)
+	void bindUniformBufferInternal(U32 set, U32 binding, const BufferPtr& buff, PtrSize offset, PtrSize range, U32 arrayIdx)
 	{
 		commandCommon();
 		m_dsetState[set].bindUniformBuffer(binding, arrayIdx, buff.get(), offset, range);
 		m_microCmdb->pushObjectRef(buff);
 	}
 
-	void bindStorageBufferInternal(U32 set, U32 binding, const BufferPtr& buff, PtrSize offset, PtrSize range,
-								   U32 arrayIdx)
+	void bindStorageBufferInternal(U32 set, U32 binding, const BufferPtr& buff, PtrSize offset, PtrSize range, U32 arrayIdx)
 	{
 		commandCommon();
 		m_dsetState[set].bindStorageBuffer(binding, arrayIdx, buff.get(), offset, range);
 		m_microCmdb->pushObjectRef(buff);
 	}
 
-	void bindReadOnlyTextureBufferInternal(U32 set, U32 binding, const BufferPtr& buff, PtrSize offset, PtrSize range,
-										   Format fmt, U32 arrayIdx)
+	void bindReadOnlyTextureBufferInternal(U32 set, U32 binding, const BufferPtr& buff, PtrSize offset, PtrSize range, Format fmt, U32 arrayIdx)
 	{
 		commandCommon();
 		m_dsetState[set].bindReadOnlyTextureBuffer(binding, arrayIdx, buff.get(), offset, range, fmt);
 		m_microCmdb->pushObjectRef(buff);
 	}
 
-	void copyBufferToTextureViewInternal(const BufferPtr& buff, PtrSize offset, PtrSize range,
-										 const TextureViewPtr& texView);
+	void copyBufferToTextureViewInternal(const BufferPtr& buff, PtrSize offset, PtrSize range, const TextureViewPtr& texView);
 
-	void copyBufferToBufferInternal(const BufferPtr& src, const BufferPtr& dst,
-									ConstWeakArray<CopyBufferToBufferInfo> copies);
+	void copyBufferToBufferInternal(const BufferPtr& src, const BufferPtr& dst, ConstWeakArray<CopyBufferToBufferInfo> copies);
 
 	void buildAccelerationStructureInternal(const AccelerationStructurePtr& as);
 
-	void upscaleInternal(const GrUpscalerPtr& upscaler, const TextureViewPtr& inColor,
-						 const TextureViewPtr& outUpscaledColor, const TextureViewPtr& motionVectors,
-						 const TextureViewPtr& depth, const TextureViewPtr& exposure, const Bool resetAccumulation,
-						 const Vec2& jitterOffset, const Vec2& motionVectorsScale);
+	void upscaleInternal(const GrUpscalerPtr& upscaler, const TextureViewPtr& inColor, const TextureViewPtr& outUpscaledColor,
+						 const TextureViewPtr& motionVectors, const TextureViewPtr& depth, const TextureViewPtr& exposure,
+						 const Bool resetAccumulation, const Vec2& jitterOffset, const Vec2& motionVectorsScale);
 
 	void setPushConstantsInternal(const void* data, U32 dataSize);
 
@@ -446,8 +430,7 @@ private:
 			m_beganRecording = true;
 		}
 
-		ANKI_ASSERT(Thread::getCurrentThreadId() == m_tid
-					&& "Commands must be recorder and flushed by the thread this command buffer was created");
+		ANKI_ASSERT(Thread::getCurrentThreadId() == m_tid && "Commands must be recorder and flushed by the thread this command buffer was created");
 		ANKI_ASSERT(m_handle);
 	}
 
@@ -465,9 +448,8 @@ private:
 		return !!(m_flags & CommandBufferFlag::kSecondLevel);
 	}
 
-	void setImageBarrier(VkPipelineStageFlags srcStage, VkAccessFlags srcAccess, VkImageLayout prevLayout,
-						 VkPipelineStageFlags dstStage, VkAccessFlags dstAccess, VkImageLayout newLayout, VkImage img,
-						 const VkImageSubresourceRange& range);
+	void setImageBarrier(VkPipelineStageFlags srcStage, VkAccessFlags srcAccess, VkImageLayout prevLayout, VkPipelineStageFlags dstStage,
+						 VkAccessFlags dstAccess, VkImageLayout newLayout, VkImage img, const VkImageSubresourceRange& range);
 
 	void beginRecording();
 

+ 35 - 49
AnKi/Gr/Vulkan/CommandBufferImpl.inl.h

@@ -85,9 +85,8 @@ inline void CommandBufferImpl::setStencilReferenceInternal(FaceSelectionBit face
 	}
 }
 
-inline void CommandBufferImpl::setImageBarrier(VkPipelineStageFlags srcStage, VkAccessFlags srcAccess,
-											   VkImageLayout prevLayout, VkPipelineStageFlags dstStage,
-											   VkAccessFlags dstAccess, VkImageLayout newLayout, VkImage img,
+inline void CommandBufferImpl::setImageBarrier(VkPipelineStageFlags srcStage, VkAccessFlags srcAccess, VkImageLayout prevLayout,
+											   VkPipelineStageFlags dstStage, VkAccessFlags dstAccess, VkImageLayout newLayout, VkImage img,
 											   const VkImageSubresourceRange& range)
 {
 	ANKI_ASSERT(img);
@@ -108,53 +107,49 @@ inline void CommandBufferImpl::setImageBarrier(VkPipelineStageFlags srcStage, Vk
 	ANKI_TRACE_INC_COUNTER(VkBarrier, 1);
 }
 
-inline void CommandBufferImpl::drawArraysInternal(PrimitiveTopology topology, U32 count, U32 instanceCount, U32 first,
-												  U32 baseInstance)
+inline void CommandBufferImpl::drawArraysInternal(PrimitiveTopology topology, U32 count, U32 instanceCount, U32 first, U32 baseInstance)
 {
 	m_state.setPrimitiveTopology(topology);
 	drawcallCommon();
 	vkCmdDraw(m_handle, count, instanceCount, first, baseInstance);
 }
 
-inline void CommandBufferImpl::drawElementsInternal(PrimitiveTopology topology, U32 count, U32 instanceCount,
-													U32 firstIndex, U32 baseVertex, U32 baseInstance)
+inline void CommandBufferImpl::drawElementsInternal(PrimitiveTopology topology, U32 count, U32 instanceCount, U32 firstIndex, U32 baseVertex,
+													U32 baseInstance)
 {
 	m_state.setPrimitiveTopology(topology);
 	drawcallCommon();
 	vkCmdDrawIndexed(m_handle, count, instanceCount, firstIndex, baseVertex, baseInstance);
 }
 
-inline void CommandBufferImpl::drawArraysIndirectInternal(PrimitiveTopology topology, U32 drawCount, PtrSize offset,
-														  const BufferPtr& buff)
+inline void CommandBufferImpl::drawArraysIndirectInternal(PrimitiveTopology topology, U32 drawCount, PtrSize offset, const BufferPtr& buff)
 {
 	m_state.setPrimitiveTopology(topology);
 	drawcallCommon();
 	const BufferImpl& impl = static_cast<const BufferImpl&>(*buff);
 	ANKI_ASSERT(impl.usageValid(BufferUsageBit::kIndirectDraw));
 	ANKI_ASSERT((offset % 4) == 0);
-	ANKI_ASSERT((offset + sizeof(DrawIndirectInfo) * drawCount) <= impl.getSize());
+	ANKI_ASSERT((offset + sizeof(DrawIndirectArgs) * drawCount) <= impl.getSize());
 
-	vkCmdDrawIndirect(m_handle, impl.getHandle(), offset, drawCount, sizeof(DrawIndirectInfo));
+	vkCmdDrawIndirect(m_handle, impl.getHandle(), offset, drawCount, sizeof(DrawIndirectArgs));
 }
 
-inline void CommandBufferImpl::drawElementsIndirectInternal(PrimitiveTopology topology, U32 drawCount, PtrSize offset,
-															const BufferPtr& buff)
+inline void CommandBufferImpl::drawElementsIndirectInternal(PrimitiveTopology topology, U32 drawCount, PtrSize offset, const BufferPtr& buff)
 {
 	m_state.setPrimitiveTopology(topology);
 	drawcallCommon();
 	const BufferImpl& impl = static_cast<const BufferImpl&>(*buff);
 	ANKI_ASSERT(impl.usageValid(BufferUsageBit::kIndirectDraw));
 	ANKI_ASSERT((offset % 4) == 0);
-	ANKI_ASSERT((offset + sizeof(DrawIndexedIndirectInfo) * drawCount) <= impl.getSize());
+	ANKI_ASSERT((offset + sizeof(DrawIndexedIndirectArgs) * drawCount) <= impl.getSize());
 
-	vkCmdDrawIndexedIndirect(m_handle, impl.getHandle(), offset, drawCount, sizeof(DrawIndexedIndirectInfo));
+	vkCmdDrawIndexedIndirect(m_handle, impl.getHandle(), offset, drawCount, sizeof(DrawIndexedIndirectArgs));
 }
 
 inline void CommandBufferImpl::dispatchComputeInternal(U32 groupCountX, U32 groupCountY, U32 groupCountZ)
 {
 	ANKI_ASSERT(m_computeProg);
-	ANKI_ASSERT(m_computeProg->getReflectionInfo().m_pushConstantsSize == m_setPushConstantsSize
-				&& "Forgot to set pushConstants");
+	ANKI_ASSERT(m_computeProg->getReflectionInfo().m_pushConstantsSize == m_setPushConstantsSize && "Forgot to set pushConstants");
 
 	commandCommon();
 
@@ -169,8 +164,8 @@ inline void CommandBufferImpl::dispatchComputeInternal(U32 groupCountX, U32 grou
 			Bool dirty;
 			Array<PtrSize, kMaxBindingsPerDescriptorSet> dynamicOffsetsPtrSize;
 			U32 dynamicOffsetCount;
-			if(getGrManagerImpl().getDescriptorSetFactory().newDescriptorSet(*m_pool, m_dsetState[i], dset, dirty,
-																			 dynamicOffsetsPtrSize, dynamicOffsetCount))
+			if(getGrManagerImpl().getDescriptorSetFactory().newDescriptorSet(*m_pool, m_dsetState[i], dset, dirty, dynamicOffsetsPtrSize,
+																			 dynamicOffsetCount))
 			{
 				ANKI_VK_LOGF("Cannot recover");
 			}
@@ -186,8 +181,7 @@ inline void CommandBufferImpl::dispatchComputeInternal(U32 groupCountX, U32 grou
 
 				VkDescriptorSet dsHandle = dset.getHandle();
 
-				vkCmdBindDescriptorSets(m_handle, VK_PIPELINE_BIND_POINT_COMPUTE,
-										m_computeProg->getPipelineLayout().getHandle(), i, 1, &dsHandle,
+				vkCmdBindDescriptorSets(m_handle, VK_PIPELINE_BIND_POINT_COMPUTE, m_computeProg->getPipelineLayout().getHandle(), i, 1, &dsHandle,
 										dynamicOffsetCount, &dynamicOffsets[0]);
 			}
 		}
@@ -198,17 +192,15 @@ inline void CommandBufferImpl::dispatchComputeInternal(U32 groupCountX, U32 grou
 	getGrManagerImpl().endMarker(m_handle);
 }
 
-inline void CommandBufferImpl::traceRaysInternal(const BufferPtr& sbtBuffer, PtrSize sbtBufferOffset,
-												 U32 sbtRecordSize32, U32 hitGroupSbtRecordCount, U32 rayTypeCount,
-												 U32 width, U32 height, U32 depth)
+inline void CommandBufferImpl::traceRaysInternal(const BufferPtr& sbtBuffer, PtrSize sbtBufferOffset, U32 sbtRecordSize32, U32 hitGroupSbtRecordCount,
+												 U32 rayTypeCount, U32 width, U32 height, U32 depth)
 {
 	const PtrSize sbtRecordSize = sbtRecordSize32;
 	ANKI_ASSERT(hitGroupSbtRecordCount > 0);
 	ANKI_ASSERT(width > 0 && height > 0 && depth > 0);
 	ANKI_ASSERT(m_rtProg);
 	const ShaderProgramImpl& sprog = static_cast<const ShaderProgramImpl&>(*m_rtProg);
-	ANKI_ASSERT(sprog.getReflectionInfo().m_pushConstantsSize == m_setPushConstantsSize
-				&& "Forgot to set pushConstants");
+	ANKI_ASSERT(sprog.getReflectionInfo().m_pushConstantsSize == m_setPushConstantsSize && "Forgot to set pushConstants");
 
 	ANKI_ASSERT(rayTypeCount == sprog.getMissShaderCount() && "All the miss shaders should be in use");
 	ANKI_ASSERT((hitGroupSbtRecordCount % rayTypeCount) == 0);
@@ -230,8 +222,8 @@ inline void CommandBufferImpl::traceRaysInternal(const BufferPtr& sbtBuffer, Ptr
 			Bool dirty;
 			Array<PtrSize, kMaxBindingsPerDescriptorSet> dynamicOffsetsPtrSize;
 			U32 dynamicOffsetCount;
-			if(getGrManagerImpl().getDescriptorSetFactory().newDescriptorSet(*m_pool, m_dsetState[i], dset, dirty,
-																			 dynamicOffsetsPtrSize, dynamicOffsetCount))
+			if(getGrManagerImpl().getDescriptorSetFactory().newDescriptorSet(*m_pool, m_dsetState[i], dset, dirty, dynamicOffsetsPtrSize,
+																			 dynamicOffsetCount))
 			{
 				ANKI_VK_LOGF("Cannot recover");
 			}
@@ -247,9 +239,8 @@ inline void CommandBufferImpl::traceRaysInternal(const BufferPtr& sbtBuffer, Ptr
 
 				VkDescriptorSet dsHandle = dset.getHandle();
 
-				vkCmdBindDescriptorSets(m_handle, VK_PIPELINE_BIND_POINT_RAY_TRACING_KHR,
-										sprog.getPipelineLayout().getHandle(), i, 1, &dsHandle, dynamicOffsetCount,
-										&dynamicOffsets[0]);
+				vkCmdBindDescriptorSets(m_handle, VK_PIPELINE_BIND_POINT_RAY_TRACING_KHR, sprog.getPipelineLayout().getHandle(), i, 1, &dsHandle,
+										dynamicOffsetCount, &dynamicOffsets[0]);
 			}
 		}
 	}
@@ -384,8 +375,7 @@ inline void CommandBufferImpl::pushSecondLevelCommandBuffersInternal(ConstWeakAr
 	ANKI_ASSERT(cmdbs.getSize() > 0);
 	commandCommon();
 	ANKI_ASSERT(insideRenderPass());
-	ANKI_ASSERT(m_subpassContents == VK_SUBPASS_CONTENTS_MAX_ENUM
-				|| m_subpassContents == VK_SUBPASS_CONTENTS_SECONDARY_COMMAND_BUFFERS);
+	ANKI_ASSERT(m_subpassContents == VK_SUBPASS_CONTENTS_MAX_ENUM || m_subpassContents == VK_SUBPASS_CONTENTS_SECONDARY_COMMAND_BUFFERS);
 
 	m_subpassContents = VK_SUBPASS_CONTENTS_SECONDARY_COMMAND_BUFFERS;
 
@@ -415,8 +405,7 @@ inline void CommandBufferImpl::drawcallCommon()
 	ANKI_ASSERT(m_graphicsProg);
 	ANKI_ASSERT(insideRenderPass() || secondLevel());
 	ANKI_ASSERT(m_subpassContents == VK_SUBPASS_CONTENTS_MAX_ENUM || m_subpassContents == VK_SUBPASS_CONTENTS_INLINE);
-	ANKI_ASSERT(m_graphicsProg->getReflectionInfo().m_pushConstantsSize == m_setPushConstantsSize
-				&& "Forgot to set pushConstants");
+	ANKI_ASSERT(m_graphicsProg->getReflectionInfo().m_pushConstantsSize == m_setPushConstantsSize && "Forgot to set pushConstants");
 
 	m_subpassContents = VK_SUBPASS_CONTENTS_INLINE;
 
@@ -446,8 +435,8 @@ inline void CommandBufferImpl::drawcallCommon()
 			Bool dirty;
 			Array<PtrSize, kMaxBindingsPerDescriptorSet> dynamicOffsetsPtrSize;
 			U32 dynamicOffsetCount;
-			if(getGrManagerImpl().getDescriptorSetFactory().newDescriptorSet(*m_pool, m_dsetState[i], dset, dirty,
-																			 dynamicOffsetsPtrSize, dynamicOffsetCount))
+			if(getGrManagerImpl().getDescriptorSetFactory().newDescriptorSet(*m_pool, m_dsetState[i], dset, dirty, dynamicOffsetsPtrSize,
+																			 dynamicOffsetCount))
 			{
 				ANKI_VK_LOGF("Cannot recover");
 			}
@@ -463,8 +452,7 @@ inline void CommandBufferImpl::drawcallCommon()
 
 				VkDescriptorSet dsHandle = dset.getHandle();
 
-				vkCmdBindDescriptorSets(m_handle, VK_PIPELINE_BIND_POINT_GRAPHICS,
-										m_graphicsProg->getPipelineLayout().getHandle(), i, 1, &dsHandle,
+				vkCmdBindDescriptorSets(m_handle, VK_PIPELINE_BIND_POINT_GRAPHICS, m_graphicsProg->getPipelineLayout().getHandle(), i, 1, &dsHandle,
 										dynamicOffsetCount, &dynamicOffsets[0]);
 			}
 		}
@@ -524,8 +512,7 @@ inline void CommandBufferImpl::drawcallCommon()
 
 	// Some checks
 #if ANKI_ENABLE_ASSERTIONS
-	if(m_state.getPrimitiveTopology() == PrimitiveTopology::kLines
-	   || m_state.getPrimitiveTopology() == PrimitiveTopology::kLineStip)
+	if(m_state.getPrimitiveTopology() == PrimitiveTopology::kLines || m_state.getPrimitiveTopology() == PrimitiveTopology::kLineStip)
 	{
 		ANKI_ASSERT(m_lineWidthSet == true);
 	}
@@ -554,8 +541,8 @@ inline void CommandBufferImpl::fillBufferInternal(const BufferPtr& buff, PtrSize
 	m_microCmdb->pushObjectRef(buff);
 }
 
-inline void CommandBufferImpl::writeOcclusionQueriesResultToBufferInternal(ConstWeakArray<OcclusionQuery*> queries,
-																		   PtrSize offset, const BufferPtr& buff)
+inline void CommandBufferImpl::writeOcclusionQueriesResultToBufferInternal(ConstWeakArray<OcclusionQuery*> queries, PtrSize offset,
+																		   const BufferPtr& buff)
 {
 	ANKI_ASSERT(queries.getSize() > 0);
 	commandCommon();
@@ -572,8 +559,8 @@ inline void CommandBufferImpl::writeOcclusionQueriesResultToBufferInternal(Const
 
 		OcclusionQueryImpl* q = static_cast<OcclusionQueryImpl*>(queries[i]);
 
-		vkCmdCopyQueryPoolResults(m_handle, q->m_handle.getQueryPool(), q->m_handle.getQueryIndex(), 1,
-								  impl.getHandle(), offset, sizeof(U32), VK_QUERY_RESULT_PARTIAL_BIT);
+		vkCmdCopyQueryPoolResults(m_handle, q->m_handle.getQueryPool(), q->m_handle.getQueryIndex(), 1, impl.getHandle(), offset, sizeof(U32),
+								  VK_QUERY_RESULT_PARTIAL_BIT);
 
 		offset += sizeof(U32);
 		m_microCmdb->pushObjectRef(q);
@@ -636,8 +623,7 @@ inline void CommandBufferImpl::bindShaderProgramInternal(const ShaderProgramPtr&
 #endif
 }
 
-inline void CommandBufferImpl::copyBufferToBufferInternal(const BufferPtr& src, const BufferPtr& dst,
-														  ConstWeakArray<CopyBufferToBufferInfo> copies)
+inline void CommandBufferImpl::copyBufferToBufferInternal(const BufferPtr& src, const BufferPtr& dst, ConstWeakArray<CopyBufferToBufferInfo> copies)
 {
 	ANKI_ASSERT(static_cast<const BufferImpl&>(*src).usageValid(BufferUsageBit::kTransferSource));
 	ANKI_ASSERT(static_cast<const BufferImpl&>(*dst).usageValid(BufferUsageBit::kTransferDestination));
@@ -648,8 +634,8 @@ inline void CommandBufferImpl::copyBufferToBufferInternal(const BufferPtr& src,
 	static_assert(sizeof(CopyBufferToBufferInfo) == sizeof(VkBufferCopy));
 	const VkBufferCopy* vkCopies = reinterpret_cast<const VkBufferCopy*>(&copies[0]);
 
-	vkCmdCopyBuffer(m_handle, static_cast<const BufferImpl&>(*src).getHandle(),
-					static_cast<const BufferImpl&>(*dst).getHandle(), copies.getSize(), &vkCopies[0]);
+	vkCmdCopyBuffer(m_handle, static_cast<const BufferImpl&>(*src).getHandle(), static_cast<const BufferImpl&>(*dst).getHandle(), copies.getSize(),
+					&vkCopies[0]);
 
 	m_microCmdb->pushObjectRef(src);
 	m_microCmdb->pushObjectRef(dst);

+ 1 - 2
AnKi/Gr/Vulkan/Common.cpp

@@ -331,8 +331,7 @@ VkBufferUsageFlags convertBufferUsageBit(BufferUsageBit usageMask)
 
 	if(!!(usageMask & PrivateBufferUsageBit::kAccelerationStructure))
 	{
-		out |= VK_BUFFER_USAGE_ACCELERATION_STRUCTURE_STORAGE_BIT_KHR
-			   | VK_BUFFER_USAGE_ACCELERATION_STRUCTURE_BUILD_INPUT_READ_ONLY_BIT_KHR;
+		out |= VK_BUFFER_USAGE_ACCELERATION_STRUCTURE_STORAGE_BIT_KHR | VK_BUFFER_USAGE_ACCELERATION_STRUCTURE_BUILD_INPUT_READ_ONLY_BIT_KHR;
 	}
 
 	if(!!(usageMask & BufferUsageBit::kShaderBindingTable))

+ 27 - 49
AnKi/Gr/Vulkan/DescriptorSet.cpp

@@ -100,8 +100,7 @@ public:
 	Error init();
 	Error createNewPool();
 
-	Error getOrCreateSet(U64 hash, const Array<AnyBindingExtended, kMaxBindingsPerDescriptorSet>& bindings,
-						 StackMemoryPool& tmpPool, const DS*& out)
+	Error getOrCreateSet(U64 hash, const Array<AnyBindingExtended, kMaxBindingsPerDescriptorSet>& bindings, StackMemoryPool& tmpPool, const DS*& out)
 	{
 		out = tryFindSet(hash);
 		if(out == nullptr)
@@ -123,10 +122,8 @@ private:
 	GrHashMap<U64, DS*> m_hashmap;
 
 	[[nodiscard]] const DS* tryFindSet(U64 hash);
-	Error newSet(U64 hash, const Array<AnyBindingExtended, kMaxBindingsPerDescriptorSet>& bindings,
-				 StackMemoryPool& tmpPool, const DS*& out);
-	void writeSet(const Array<AnyBindingExtended, kMaxBindingsPerDescriptorSet>& bindings, const DS& set,
-				  StackMemoryPool& tmpPool);
+	Error newSet(U64 hash, const Array<AnyBindingExtended, kMaxBindingsPerDescriptorSet>& bindings, StackMemoryPool& tmpPool, const DS*& out);
+	void writeSet(const Array<AnyBindingExtended, kMaxBindingsPerDescriptorSet>& bindings, const DS& set, StackMemoryPool& tmpPool);
 };
 
 class alignas(ANKI_CACHE_LINE_SIZE) DescriptorSetFactory::ThreadLocal
@@ -171,8 +168,7 @@ public:
 DescriptorSetFactory::BindlessDescriptorSet::~BindlessDescriptorSet()
 {
 	ANKI_ASSERT(m_freeTexIndexCount == m_freeTexIndices.getSize() && "Forgot to unbind some textures");
-	ANKI_ASSERT(m_freeTexelBufferIndexCount == m_freeTexelBufferIndices.getSize()
-				&& "Forgot to unbind some texel buffers");
+	ANKI_ASSERT(m_freeTexelBufferIndexCount == m_freeTexelBufferIndices.getSize() && "Forgot to unbind some texel buffers");
 
 	if(m_dsPool)
 	{
@@ -203,8 +199,7 @@ Error DescriptorSetFactory::BindlessDescriptorSet::init(U32 bindlessTextureCount
 		bindings[1].descriptorType = VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER;
 
 		Array<VkDescriptorBindingFlagsEXT, 2> bindingFlags = {};
-		bindingFlags[0] = VK_DESCRIPTOR_BINDING_UPDATE_AFTER_BIND_BIT_EXT
-						  | VK_DESCRIPTOR_BINDING_UPDATE_UNUSED_WHILE_PENDING_BIT_EXT
+		bindingFlags[0] = VK_DESCRIPTOR_BINDING_UPDATE_AFTER_BIND_BIT_EXT | VK_DESCRIPTOR_BINDING_UPDATE_UNUSED_WHILE_PENDING_BIT_EXT
 						  | VK_DESCRIPTOR_BINDING_PARTIALLY_BOUND_BIT_EXT;
 		bindingFlags[1] = bindingFlags[0];
 
@@ -334,8 +329,7 @@ U32 DescriptorSetFactory::BindlessDescriptorSet::bindUniformTexelBuffer(VkBuffer
 	return idx;
 }
 
-void DescriptorSetFactory::BindlessDescriptorSet::unbindCommon(U32 idx, GrDynamicArray<U16>& freeIndices,
-															   U16& freeIndexCount)
+void DescriptorSetFactory::BindlessDescriptorSet::unbindCommon(U32 idx, GrDynamicArray<U16>& freeIndices, U16& freeIndexCount)
 {
 	LockGuard<Mutex> lock(m_mtx);
 
@@ -379,14 +373,12 @@ Error DescriptorSetFactory::DSAllocator::init()
 
 Error DescriptorSetFactory::DSAllocator::createNewPool()
 {
-	m_lastPoolDSCount =
-		(m_lastPoolDSCount != 0) ? U32(F32(m_lastPoolDSCount) * kDescriptorPoolSizeScale) : kDescriptorPoolInitialSize;
+	m_lastPoolDSCount = (m_lastPoolDSCount != 0) ? U32(F32(m_lastPoolDSCount) * kDescriptorPoolSizeScale) : kDescriptorPoolInitialSize;
 	m_lastPoolFreeDSCount = m_lastPoolDSCount;
 
 	// Set the create info
 	Array<VkDescriptorPoolSize, U(DescriptorType::kCount)> poolSizes;
-	memcpy(&poolSizes[0], &m_layoutEntry->m_poolSizesCreateInf[0],
-		   sizeof(poolSizes[0]) * m_layoutEntry->m_poolCreateInf.poolSizeCount);
+	memcpy(&poolSizes[0], &m_layoutEntry->m_poolSizesCreateInf[0], sizeof(poolSizes[0]) * m_layoutEntry->m_poolCreateInf.poolSizeCount);
 
 	for(U i = 0; i < m_layoutEntry->m_poolCreateInf.poolSizeCount; ++i)
 	{
@@ -432,8 +424,7 @@ const DS* DescriptorSetFactory::DSAllocator::tryFindSet(U64 hash)
 	}
 }
 
-Error DescriptorSetFactory::DSAllocator::newSet(U64 hash,
-												const Array<AnyBindingExtended, kMaxBindingsPerDescriptorSet>& bindings,
+Error DescriptorSetFactory::DSAllocator::newSet(U64 hash, const Array<AnyBindingExtended, kMaxBindingsPerDescriptorSet>& bindings,
 												StackMemoryPool& tmpPool, const DS*& out_)
 {
 	DS* out = nullptr;
@@ -504,8 +495,8 @@ Error DescriptorSetFactory::DSAllocator::newSet(U64 hash,
 	return Error::kNone;
 }
 
-void DescriptorSetFactory::DSAllocator::writeSet(
-	const Array<AnyBindingExtended, kMaxBindingsPerDescriptorSet>& bindings, const DS& set, StackMemoryPool& tmpPool)
+void DescriptorSetFactory::DSAllocator::writeSet(const Array<AnyBindingExtended, kMaxBindingsPerDescriptorSet>& bindings, const DS& set,
+												 StackMemoryPool& tmpPool)
 {
 	DynamicArray<VkWriteDescriptorSet, MemoryPoolPtrWrapper<StackMemoryPool>> writeInfos(&tmpPool);
 	DynamicArray<VkDescriptorImageInfo, MemoryPoolPtrWrapper<StackMemoryPool>> texInfos(&tmpPool);
@@ -521,8 +512,7 @@ void DescriptorSetFactory::DSAllocator::writeSet(
 			for(U arrIdx = 0; arrIdx < m_layoutEntry->m_bindingArraySize[bindingIdx]; ++arrIdx)
 			{
 				ANKI_ASSERT(bindings[bindingIdx].m_arraySize >= m_layoutEntry->m_bindingArraySize[bindingIdx]);
-				const AnyBinding& b = (bindings[bindingIdx].m_arraySize == 1) ? bindings[bindingIdx].m_single
-																			  : bindings[bindingIdx].m_array[arrIdx];
+				const AnyBinding& b = (bindings[bindingIdx].m_arraySize == 1) ? bindings[bindingIdx].m_single : bindings[bindingIdx].m_array[arrIdx];
 
 				switch(b.m_type)
 				{
@@ -608,8 +598,7 @@ void DescriptorSetFactory::DSAllocator::writeSet(
 		{
 			for(U32 arrIdx = 0; arrIdx < m_layoutEntry->m_bindingArraySize[bindingIdx]; ++arrIdx)
 			{
-				const AnyBinding& b = (bindings[bindingIdx].m_arraySize == 1) ? bindings[bindingIdx].m_single
-																			  : bindings[bindingIdx].m_array[arrIdx];
+				const AnyBinding& b = (bindings[bindingIdx].m_arraySize == 1) ? bindings[bindingIdx].m_single : bindings[bindingIdx].m_array[arrIdx];
 
 				VkWriteDescriptorSet& writeInfo = *writeInfos.emplaceBack(writeTemplate);
 				writeInfo.descriptorType = convertDescriptorType(b.m_type);
@@ -643,8 +632,7 @@ void DescriptorSetFactory::DSAllocator::writeSet(
 	}
 
 	// Write
-	vkUpdateDescriptorSets(getVkDevice(), writeInfos.getSize(), (writeInfos.getSize() > 0) ? &writeInfos[0] : nullptr,
-						   0, nullptr);
+	vkUpdateDescriptorSets(getVkDevice(), writeInfos.getSize(), (writeInfos.getSize() > 0) ? &writeInfos[0] : nullptr, 0, nullptr);
 }
 
 DSLayoutCacheEntry::~DSLayoutCacheEntry()
@@ -818,8 +806,7 @@ AnyBinding& DescriptorSetState::getBindingToPopulate(U32 bindingIdx, U32 arrayId
 	return *out;
 }
 
-void DescriptorSetState::flush(U64& hash, Array<PtrSize, kMaxBindingsPerDescriptorSet>& dynamicOffsets,
-							   U32& dynamicOffsetCount, Bool& bindlessDSet)
+void DescriptorSetState::flush(U64& hash, Array<PtrSize, kMaxBindingsPerDescriptorSet>& dynamicOffsets, U32& dynamicOffsetCount, Bool& bindlessDSet)
 {
 	// Set some values
 	hash = 0;
@@ -829,8 +816,7 @@ void DescriptorSetState::flush(U64& hash, Array<PtrSize, kMaxBindingsPerDescript
 	// There is a chance where the bindless set is bound but the actual shaders have an empty DS layout (maybe because
 	// the dead code elimination eliminated the bindless set). In that case we can't bind the bindless DS. We have to
 	// treat it as regular set
-	ANKI_ASSERT(!(m_layout.m_entry == nullptr && !m_bindlessDSetBound)
-				&& "DS layout points to bindless but no bindless is bound");
+	ANKI_ASSERT(!(m_layout.m_entry == nullptr && !m_bindlessDSetBound) && "DS layout points to bindless but no bindless is bound");
 	const Bool reallyBindless = m_bindlessDSetBound && m_layout.m_entry == nullptr;
 
 	if(!reallyBindless)
@@ -872,8 +858,7 @@ void DescriptorSetState::flush(U64& hash, Array<PtrSize, kMaxBindingsPerDescript
 						ANKI_ASSERT(m_bindings[i].m_array[arrIdx].m_type == m_bindings[i].m_array[arrIdx - 1].m_type);
 					}
 
-					const AnyBinding& anyBinding =
-						(m_bindings[i].m_arraySize == 1) ? m_bindings[i].m_single : m_bindings[i].m_array[arrIdx];
+					const AnyBinding& anyBinding = (m_bindings[i].m_arraySize == 1) ? m_bindings[i].m_single : m_bindings[i].m_array[arrIdx];
 
 					ANKI_ASSERT(anyBinding.m_uuids[0] != 0 && "Forgot to bind");
 
@@ -882,8 +867,7 @@ void DescriptorSetState::flush(U64& hash, Array<PtrSize, kMaxBindingsPerDescript
 					switch(entry.m_bindingType[i])
 					{
 					case DescriptorType::kCombinedTextureSampler:
-						ANKI_ASSERT(anyBinding.m_type == DescriptorType::kCombinedTextureSampler
-									&& "Have bound the wrong type");
+						ANKI_ASSERT(anyBinding.m_type == DescriptorType::kCombinedTextureSampler && "Have bound the wrong type");
 						toHash[toHashCount++] = anyBinding.m_uuids[1];
 						toHash[toHashCount++] = U64(anyBinding.m_texAndSampler.m_layout);
 						break;
@@ -907,21 +891,18 @@ void DescriptorSetState::flush(U64& hash, Array<PtrSize, kMaxBindingsPerDescript
 						dynamicOffsetsDirty = dynamicOffsetsDirty || crntBindingDirty;
 						break;
 					case DescriptorType::kReadTextureBuffer:
-						ANKI_ASSERT(anyBinding.m_type == DescriptorType::kReadTextureBuffer
-									&& "Have bound the wrong type");
+						ANKI_ASSERT(anyBinding.m_type == DescriptorType::kReadTextureBuffer && "Have bound the wrong type");
 						toHash[toHashCount++] = anyBinding.m_uuids[1];
 						break;
 					case DescriptorType::kReadWriteTextureBuffer:
-						ANKI_ASSERT(anyBinding.m_type == DescriptorType::kReadWriteTextureBuffer
-									&& "Have bound the wrong type");
+						ANKI_ASSERT(anyBinding.m_type == DescriptorType::kReadWriteTextureBuffer && "Have bound the wrong type");
 						toHash[toHashCount++] = anyBinding.m_uuids[1];
 						break;
 					case DescriptorType::kImage:
 						ANKI_ASSERT(anyBinding.m_type == DescriptorType::kImage && "Have bound the wrong type");
 						break;
 					case DescriptorType::kAccelerationStructure:
-						ANKI_ASSERT(anyBinding.m_type == DescriptorType::kAccelerationStructure
-									&& "Have bound the wrong type");
+						ANKI_ASSERT(anyBinding.m_type == DescriptorType::kAccelerationStructure && "Have bound the wrong type");
 						break;
 					default:
 						ANKI_ASSERT(0);
@@ -1010,10 +991,9 @@ Error DescriptorSetFactory::newDescriptorSetLayout(const DescriptorSetLayoutInit
 	if(init.m_bindings.getSize() > 0)
 	{
 		memcpy(bindings.getBegin(), init.m_bindings.getBegin(), init.m_bindings.getSizeInBytes());
-		std::sort(bindings.getBegin(), bindings.getBegin() + bindingCount,
-				  [](const DescriptorBinding& a, const DescriptorBinding& b) {
-					  return a.m_binding < b.m_binding;
-				  });
+		std::sort(bindings.getBegin(), bindings.getBegin() + bindingCount, [](const DescriptorBinding& a, const DescriptorBinding& b) {
+			return a.m_binding < b.m_binding;
+		});
 
 		hash = computeHash(&bindings[0], init.m_bindings.getSizeInBytes());
 		ANKI_ASSERT(hash != 1);
@@ -1031,8 +1011,7 @@ Error DescriptorSetFactory::newDescriptorSetLayout(const DescriptorSetLayoutInit
 		for(U32 i = 0; i < bindingCount; ++i)
 		{
 			const DescriptorBinding& binding = bindings[i];
-			if(binding.m_binding == 0 && binding.m_type == DescriptorType::kTexture
-			   && binding.m_arraySize == m_bindlessTextureCount)
+			if(binding.m_binding == 0 && binding.m_type == DescriptorType::kTexture && binding.m_arraySize == m_bindlessTextureCount)
 			{
 				// All good
 			}
@@ -1086,9 +1065,8 @@ Error DescriptorSetFactory::newDescriptorSetLayout(const DescriptorSetLayoutInit
 	return Error::kNone;
 }
 
-Error DescriptorSetFactory::newDescriptorSet(StackMemoryPool& tmpPool, DescriptorSetState& state, DescriptorSet& set,
-											 Bool& dirty, Array<PtrSize, kMaxBindingsPerDescriptorSet>& dynamicOffsets,
-											 U32& dynamicOffsetCount)
+Error DescriptorSetFactory::newDescriptorSet(StackMemoryPool& tmpPool, DescriptorSetState& state, DescriptorSet& set, Bool& dirty,
+											 Array<PtrSize, kMaxBindingsPerDescriptorSet>& dynamicOffsets, U32& dynamicOffsetCount)
 {
 	ANKI_TRACE_SCOPED_EVENT(VkDescriptorSetGetOrCreate);
 

+ 4 - 8
AnKi/Gr/Vulkan/DescriptorSet.h

@@ -193,8 +193,7 @@ public:
 		m_layout = layout;
 	}
 
-	void bindTextureAndSampler(U32 binding, U32 arrayIdx, const TextureView* texView, const Sampler* sampler,
-							   VkImageLayout layout)
+	void bindTextureAndSampler(U32 binding, U32 arrayIdx, const TextureView* texView, const Sampler* sampler, VkImageLayout layout)
 	{
 		const TextureViewImpl& viewImpl = static_cast<const TextureViewImpl&>(*texView);
 		ANKI_ASSERT(viewImpl.getTextureImpl().isSubresourceGoodForSampling(viewImpl.getSubresource()));
@@ -272,8 +271,7 @@ public:
 		unbindBindlessDSet();
 	}
 
-	void bindReadOnlyTextureBuffer(U32 binding, U32 arrayIdx, const Buffer* buff, PtrSize offset, PtrSize range,
-								   Format fmt)
+	void bindReadOnlyTextureBuffer(U32 binding, U32 arrayIdx, const Buffer* buff, PtrSize offset, PtrSize range, Format fmt)
 	{
 		const VkBufferView view = static_cast<const BufferImpl*>(buff)->getOrCreateBufferView(fmt, offset, range);
 		AnyBinding& b = getBindingToPopulate(binding, arrayIdx);
@@ -311,8 +309,7 @@ public:
 		b = {};
 		b.m_type = DescriptorType::kAccelerationStructure;
 		b.m_uuids[0] = b.m_uuids[1] = as->getUuid();
-		b.m_accelerationStructure.m_accelerationStructureHandle =
-			static_cast<const AccelerationStructureImpl*>(as)->getHandle();
+		b.m_accelerationStructure.m_accelerationStructureHandle = static_cast<const AccelerationStructureImpl*>(as)->getHandle();
 
 		m_dirtyBindings.set(binding);
 		unbindBindlessDSet();
@@ -341,8 +338,7 @@ private:
 
 	/// Only DescriptorSetFactory should call this.
 	/// @param hash If hash is zero then the DS doesn't need rebind.
-	void flush(U64& hash, Array<PtrSize, kMaxBindingsPerDescriptorSet>& dynamicOffsets, U32& dynamicOffsetCount,
-			   Bool& bindlessDSet);
+	void flush(U64& hash, Array<PtrSize, kMaxBindingsPerDescriptorSet>& dynamicOffsets, U32& dynamicOffsetCount, Bool& bindlessDSet);
 
 	void unbindBindlessDSet()
 	{

+ 7 - 12
AnKi/Gr/Vulkan/FramebufferImpl.cpp

@@ -84,11 +84,9 @@ void FramebufferImpl::initClearValues(const FramebufferInitInfo& init)
 		if(init.m_depthStencilAttachment.m_loadOperation == AttachmentLoadOperation::kClear
 		   || init.m_depthStencilAttachment.m_stencilLoadOperation == AttachmentLoadOperation::kClear)
 		{
-			m_clearVals[m_colorAttCount].depthStencil.depth =
-				init.m_depthStencilAttachment.m_clearValue.m_depthStencil.m_depth;
+			m_clearVals[m_colorAttCount].depthStencil.depth = init.m_depthStencilAttachment.m_clearValue.m_depthStencil.m_depth;
 
-			m_clearVals[m_colorAttCount].depthStencil.stencil =
-				init.m_depthStencilAttachment.m_clearValue.m_depthStencil.m_stencil;
+			m_clearVals[m_colorAttCount].depthStencil.stencil = init.m_depthStencilAttachment.m_clearValue.m_depthStencil.m_stencil;
 		}
 		else
 		{
@@ -179,8 +177,7 @@ Error FramebufferImpl::initFbs(const FramebufferInitInfo& init)
 	return Error::kNone;
 }
 
-void FramebufferImpl::setupAttachmentDescriptor(const FramebufferAttachmentInfo& att, VkAttachmentDescription2& desc,
-												VkImageLayout layout) const
+void FramebufferImpl::setupAttachmentDescriptor(const FramebufferAttachmentInfo& att, VkAttachmentDescription2& desc, VkImageLayout layout) const
 {
 	desc = {};
 	desc.sType = VK_STRUCTURE_TYPE_ATTACHMENT_DESCRIPTION_2;
@@ -199,8 +196,7 @@ void FramebufferImpl::initRpassCreateInfo(const FramebufferInitInfo& init)
 	// Setup attachments and references
 	for(U32 i = 0; i < init.m_colorAttachmentCount; ++i)
 	{
-		setupAttachmentDescriptor(init.m_colorAttachments[i], m_attachmentDescriptions[i],
-								  VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL);
+		setupAttachmentDescriptor(init.m_colorAttachments[i], m_attachmentDescriptions[i], VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL);
 
 		VkAttachmentReference2& ref = m_references[i];
 		ref.sType = VK_STRUCTURE_TYPE_ATTACHMENT_REFERENCE_2;
@@ -229,8 +225,7 @@ void FramebufferImpl::initRpassCreateInfo(const FramebufferInitInfo& init)
 		VkAttachmentDescription2& desc = m_attachmentDescriptions[sriAttachmentIdx];
 		desc = {};
 		desc.sType = VK_STRUCTURE_TYPE_ATTACHMENT_DESCRIPTION_2;
-		desc.format = convertFormat(
-			static_cast<const TextureViewImpl&>(*init.m_shadingRateImage.m_textureView).getTextureImpl().getFormat());
+		desc.format = convertFormat(static_cast<const TextureViewImpl&>(*init.m_shadingRateImage.m_textureView).getTextureImpl().getFormat());
 		desc.samples = VK_SAMPLE_COUNT_1_BIT;
 		desc.loadOp = VK_ATTACHMENT_LOAD_OP_DONT_CARE;
 		desc.storeOp = VK_ATTACHMENT_STORE_OP_DONT_CARE;
@@ -270,8 +265,8 @@ void FramebufferImpl::initRpassCreateInfo(const FramebufferInitInfo& init)
 	m_rpassCi.pSubpasses = &m_subpassDescr;
 }
 
-VkRenderPass FramebufferImpl::getRenderPassHandle(const Array<VkImageLayout, kMaxColorRenderTargets>& colorLayouts,
-												  VkImageLayout dsLayout, VkImageLayout shadingRateImageLayout)
+VkRenderPass FramebufferImpl::getRenderPassHandle(const Array<VkImageLayout, kMaxColorRenderTargets>& colorLayouts, VkImageLayout dsLayout,
+												  VkImageLayout shadingRateImageLayout)
 {
 	VkRenderPass out = VK_NULL_HANDLE;
 

+ 3 - 4
AnKi/Gr/Vulkan/FramebufferImpl.h

@@ -39,8 +39,8 @@ public:
 	}
 
 	/// Use it for binding. It's thread-safe
-	VkRenderPass getRenderPassHandle(const Array<VkImageLayout, kMaxColorRenderTargets>& colorLayouts,
-									 VkImageLayout dsLayout, VkImageLayout shadingRateImageLayout);
+	VkRenderPass getRenderPassHandle(const Array<VkImageLayout, kMaxColorRenderTargets>& colorLayouts, VkImageLayout dsLayout,
+									 VkImageLayout shadingRateImageLayout);
 
 	VkFramebuffer getFramebufferHandle() const
 	{
@@ -146,8 +146,7 @@ private:
 	Error initFbs(const FramebufferInitInfo& init);
 	void initRpassCreateInfo(const FramebufferInitInfo& init);
 	void initClearValues(const FramebufferInitInfo& init);
-	void setupAttachmentDescriptor(const FramebufferAttachmentInfo& att, VkAttachmentDescription2& desc,
-								   VkImageLayout layout) const;
+	void setupAttachmentDescriptor(const FramebufferAttachmentInfo& att, VkAttachmentDescription2& desc, VkImageLayout layout) const;
 
 	U32 getTotalAttachmentCount() const
 	{

+ 15 - 29
AnKi/Gr/Vulkan/GpuMemoryManager.cpp

@@ -8,13 +8,8 @@
 
 namespace anki {
 
-static constexpr Array<GpuMemoryManagerClassInfo, 7> kClasses{{{4_KB, 256_KB},
-															   {128_KB, 8_MB},
-															   {1_MB, 64_MB},
-															   {16_MB, 128_MB},
-															   {64_MB, 128_MB},
-															   {128_MB, 128_MB},
-															   {256_MB, 256_MB}}};
+static constexpr Array<GpuMemoryManagerClassInfo, 7> kClasses{
+	{{4_KB, 256_KB}, {128_KB, 8_MB}, {1_MB, 64_MB}, {16_MB, 128_MB}, {64_MB, 128_MB}, {128_MB, 128_MB}, {256_MB, 256_MB}}};
 
 /// Special classes for the ReBAR memory. Have that as a special case because it's so limited and needs special care.
 static constexpr Array<GpuMemoryManagerClassInfo, 3> kRebarClasses{{{1_MB, 1_MB}, {12_MB, 12_MB}, {24_MB, 24_MB}}};
@@ -37,8 +32,7 @@ Error GpuMemoryManagerInterface::allocateChunk(U32 classIdx, GpuMemoryManagerChu
 	VkDeviceMemory memHandle;
 	if(vkAllocateMemory(getVkDevice(), &ci, nullptr, &memHandle) != VK_SUCCESS) [[unlikely]]
 	{
-		ANKI_VK_LOGF("Out of GPU memory. Mem type index %u, size %zu", m_memTypeIdx,
-					 m_classInfos[classIdx].m_suballocationSize);
+		ANKI_VK_LOGF("Out of GPU memory. Mem type index %u, size %zu", m_memTypeIdx, m_classInfos[classIdx].m_suballocationSize);
 	}
 
 	chunk = newInstance<GpuMemoryManagerChunk>(GrMemoryPool::getSingleton());
@@ -88,8 +82,8 @@ void GpuMemoryManager::init(Bool exposeBufferGpuAddress)
 	ANKI_VK_LOGV("Initializing memory manager");
 	for(const GpuMemoryManagerClassInfo& c : kClasses)
 	{
-		ANKI_VK_LOGV("\tGPU mem class. Chunk size: %lu, suballocationSize: %lu, allocsPerChunk %lu", c.m_chunkSize,
-					 c.m_suballocationSize, c.m_chunkSize / c.m_suballocationSize);
+		ANKI_VK_LOGV("\tGPU mem class. Chunk size: %lu, suballocationSize: %lu, allocsPerChunk %lu", c.m_chunkSize, c.m_suballocationSize,
+					 c.m_chunkSize / c.m_suballocationSize);
 	}
 
 	// Image buffer granularity
@@ -101,9 +95,8 @@ void GpuMemoryManager::init(Bool exposeBufferGpuAddress)
 
 		if(m_bufferImageGranularity > 4_KB)
 		{
-			ANKI_VK_LOGW(
-				"Buffer/image mem granularity is too high (%u). It will force high alignments and it will waste memory",
-				m_bufferImageGranularity);
+			ANKI_VK_LOGW("Buffer/image mem granularity is too high (%u). It will force high alignments and it will waste memory",
+						 m_bufferImageGranularity);
 		}
 
 		for(const GpuMemoryManagerClassInfo& c : kClasses)
@@ -112,8 +105,7 @@ void GpuMemoryManager::init(Bool exposeBufferGpuAddress)
 			{
 				ANKI_VK_LOGW("Memory class is not aligned to buffer/image granularity (%u). It won't be used in "
 							 "allocations: Chunk size: %lu, suballocationSize: %lu, allocsPerChunk %lu",
-							 m_bufferImageGranularity, c.m_chunkSize, c.m_suballocationSize,
-							 c.m_chunkSize / c.m_suballocationSize);
+							 m_bufferImageGranularity, c.m_chunkSize, c.m_suballocationSize, c.m_chunkSize / c.m_suballocationSize);
 			}
 		}
 	}
@@ -133,11 +125,9 @@ void GpuMemoryManager::init(Bool exposeBufferGpuAddress)
 
 		// Find if it's ReBAR
 		const VkMemoryPropertyFlags props = m_memoryProperties.memoryTypes[memTypeIdx].propertyFlags;
-		const VkMemoryPropertyFlags reBarProps = VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT
-												 | VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT
-												 | VK_MEMORY_PROPERTY_HOST_COHERENT_BIT;
-		const PtrSize heapSize =
-			m_memoryProperties.memoryHeaps[m_memoryProperties.memoryTypes[memTypeIdx].heapIndex].size;
+		const VkMemoryPropertyFlags reBarProps =
+			VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT | VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT | VK_MEMORY_PROPERTY_HOST_COHERENT_BIT;
+		const PtrSize heapSize = m_memoryProperties.memoryHeaps[m_memoryProperties.memoryTypes[memTypeIdx].heapIndex].size;
 		const Bool isReBar = props == reBarProps && heapSize <= 256_MB;
 
 		if(isReBar)
@@ -233,15 +223,13 @@ void* GpuMemoryManager::getMappedAddress(GpuMemoryHandle& handle)
 
 	if(handle.m_chunk->m_mappedAddress == nullptr)
 	{
-		ANKI_VK_CHECKF(vkMapMemory(getVkDevice(), handle.m_chunk->m_handle, 0, handle.m_chunk->m_size, 0,
-								   &handle.m_chunk->m_mappedAddress));
+		ANKI_VK_CHECKF(vkMapMemory(getVkDevice(), handle.m_chunk->m_handle, 0, handle.m_chunk->m_size, 0, &handle.m_chunk->m_mappedAddress));
 	}
 
 	return static_cast<void*>(static_cast<U8*>(handle.m_chunk->m_mappedAddress) + handle.m_offset);
 }
 
-U32 GpuMemoryManager::findMemoryType(U32 resourceMemTypeBits, VkMemoryPropertyFlags preferFlags,
-									 VkMemoryPropertyFlags avoidFlags) const
+U32 GpuMemoryManager::findMemoryType(U32 resourceMemTypeBits, VkMemoryPropertyFlags preferFlags, VkMemoryPropertyFlags avoidFlags) const
 {
 	U32 prefered = kMaxU32;
 
@@ -265,10 +253,8 @@ U32 GpuMemoryManager::findMemoryType(U32 resourceMemTypeBits, VkMemoryPropertyFl
 					// On some Intel drivers there are identical memory types pointing to different heaps. Choose the
 					// biggest heap
 
-					const PtrSize crntHeapSize =
-						m_memoryProperties.memoryHeaps[m_memoryProperties.memoryTypes[i].heapIndex].size;
-					const PtrSize prevHeapSize =
-						m_memoryProperties.memoryHeaps[m_memoryProperties.memoryTypes[prefered].heapIndex].size;
+					const PtrSize crntHeapSize = m_memoryProperties.memoryHeaps[m_memoryProperties.memoryTypes[i].heapIndex].size;
+					const PtrSize prevHeapSize = m_memoryProperties.memoryHeaps[m_memoryProperties.memoryTypes[prefered].heapIndex].size;
 
 					if(crntHeapSize > prevHeapSize)
 					{

+ 2 - 4
AnKi/Gr/Vulkan/GpuMemoryManager.h

@@ -146,15 +146,13 @@ public:
 	[[nodiscard]] void* getMappedAddress(GpuMemoryHandle& handle);
 
 	/// Find a suitable memory type.
-	U32 findMemoryType(U32 resourceMemTypeBits, VkMemoryPropertyFlags preferFlags,
-					   VkMemoryPropertyFlags avoidFlags) const;
+	U32 findMemoryType(U32 resourceMemTypeBits, VkMemoryPropertyFlags preferFlags, VkMemoryPropertyFlags avoidFlags) const;
 
 	/// Get some statistics.
 	void getStats(GpuMemoryManagerStats& stats) const;
 
 private:
-	using ClassAllocator = ClassAllocatorBuilder<GpuMemoryManagerChunk, GpuMemoryManagerInterface, Mutex,
-												 SingletonMemoryPoolWrapper<GrMemoryPool>>;
+	using ClassAllocator = ClassAllocatorBuilder<GpuMemoryManagerChunk, GpuMemoryManagerInterface, Mutex, SingletonMemoryPoolWrapper<GrMemoryPool>>;
 
 	GrDynamicArray<ClassAllocator> m_callocs;
 

+ 49 - 89
AnKi/Gr/Vulkan/GrManagerImpl.cpp

@@ -166,25 +166,22 @@ Error GrManagerImpl::initInternal(const GrManagerInitInfo& init)
 		m_capabilities.m_unalignedBbpTextureFormats = true;
 
 		VkImageFormatProperties props = {};
-		VkResult res = vkGetPhysicalDeviceImageFormatProperties(
-			m_physicalDevice, VK_FORMAT_R8G8B8_UNORM, VK_IMAGE_TYPE_2D, VK_IMAGE_TILING_OPTIMAL,
-			VK_IMAGE_USAGE_SAMPLED_BIT | VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT, 0, &props);
+		VkResult res = vkGetPhysicalDeviceImageFormatProperties(m_physicalDevice, VK_FORMAT_R8G8B8_UNORM, VK_IMAGE_TYPE_2D, VK_IMAGE_TILING_OPTIMAL,
+																VK_IMAGE_USAGE_SAMPLED_BIT | VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT, 0, &props);
 		if(res == VK_ERROR_FORMAT_NOT_SUPPORTED)
 		{
 			m_capabilities.m_unalignedBbpTextureFormats = false;
 		}
 
-		res = vkGetPhysicalDeviceImageFormatProperties(
-			m_physicalDevice, VK_FORMAT_R16G16B16_UNORM, VK_IMAGE_TYPE_2D, VK_IMAGE_TILING_OPTIMAL,
-			VK_IMAGE_USAGE_SAMPLED_BIT | VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT, 0, &props);
+		res = vkGetPhysicalDeviceImageFormatProperties(m_physicalDevice, VK_FORMAT_R16G16B16_UNORM, VK_IMAGE_TYPE_2D, VK_IMAGE_TILING_OPTIMAL,
+													   VK_IMAGE_USAGE_SAMPLED_BIT | VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT, 0, &props);
 		if(res == VK_ERROR_FORMAT_NOT_SUPPORTED)
 		{
 			m_capabilities.m_unalignedBbpTextureFormats = false;
 		}
 
-		res = vkGetPhysicalDeviceImageFormatProperties(
-			m_physicalDevice, VK_FORMAT_R32G32B32_SFLOAT, VK_IMAGE_TYPE_2D, VK_IMAGE_TILING_OPTIMAL,
-			VK_IMAGE_USAGE_SAMPLED_BIT | VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT, 0, &props);
+		res = vkGetPhysicalDeviceImageFormatProperties(m_physicalDevice, VK_FORMAT_R32G32B32_SFLOAT, VK_IMAGE_TYPE_2D, VK_IMAGE_TILING_OPTIMAL,
+													   VK_IMAGE_USAGE_SAMPLED_BIT | VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT, 0, &props);
 		if(res == VK_ERROR_FORMAT_NOT_SUPPORTED)
 		{
 			m_capabilities.m_unalignedBbpTextureFormats = false;
@@ -245,8 +242,7 @@ Error GrManagerImpl::initInstance()
 				CString layerName = layer.layerName;
 
 				static constexpr const Char* kValidationName = "VK_LAYER_KHRONOS_validation";
-				if((ConfigSet::getSingleton().getGrValidation() || ConfigSet::getSingleton().getGrDebugPrintf())
-				   && layerName == kValidationName)
+				if((ConfigSet::getSingleton().getGrValidation() || ConfigSet::getSingleton().getGrDebugPrintf()) && layerName == kValidationName)
 				{
 					layersToEnable.emplaceBack(kValidationName);
 				}
@@ -363,9 +359,8 @@ Error GrManagerImpl::initInstance()
 		}
 
 		if(!(m_extensions
-			 & (VulkanExtensions::kEXT_headless_surface | VulkanExtensions::kKHR_xcb_surface
-				| VulkanExtensions::kKHR_xlib_surface | VulkanExtensions::kKHR_win32_surface
-				| VulkanExtensions::kKHR_android_surface)))
+			 & (VulkanExtensions::kEXT_headless_surface | VulkanExtensions::kKHR_xcb_surface | VulkanExtensions::kKHR_xlib_surface
+				| VulkanExtensions::kKHR_win32_surface | VulkanExtensions::kKHR_android_surface)))
 		{
 			ANKI_VK_LOGE("Couldn't find suitable surface extension");
 			return Error::kFunctionFailed;
@@ -407,11 +402,9 @@ Error GrManagerImpl::initInstance()
 	{
 		VkDebugUtilsMessengerCreateInfoEXT info = {};
 		info.sType = VK_STRUCTURE_TYPE_DEBUG_UTILS_MESSENGER_CREATE_INFO_EXT;
-		info.messageSeverity = VK_DEBUG_UTILS_MESSAGE_SEVERITY_INFO_BIT_EXT
-							   | VK_DEBUG_UTILS_MESSAGE_SEVERITY_WARNING_BIT_EXT
+		info.messageSeverity = VK_DEBUG_UTILS_MESSAGE_SEVERITY_INFO_BIT_EXT | VK_DEBUG_UTILS_MESSAGE_SEVERITY_WARNING_BIT_EXT
 							   | VK_DEBUG_UTILS_MESSAGE_SEVERITY_ERROR_BIT_EXT;
-		info.messageType = VK_DEBUG_UTILS_MESSAGE_TYPE_VALIDATION_BIT_EXT
-						   | VK_DEBUG_UTILS_MESSAGE_TYPE_PERFORMANCE_BIT_EXT
+		info.messageType = VK_DEBUG_UTILS_MESSAGE_TYPE_VALIDATION_BIT_EXT | VK_DEBUG_UTILS_MESSAGE_TYPE_PERFORMANCE_BIT_EXT
 						   | VK_DEBUG_UTILS_MESSAGE_TYPE_GENERAL_BIT_EXT;
 		info.pfnUserCallback = debugReportCallbackEXT;
 		info.pUserData = this;
@@ -467,8 +460,7 @@ Error GrManagerImpl::initInstance()
 				};
 
 				// Put descrete GPUs first
-				return findDeviceTypeWeight(a.m_vkProps.properties.deviceType)
-					   > findDeviceTypeWeight(b.m_vkProps.properties.deviceType);
+				return findDeviceTypeWeight(a.m_vkProps.properties.deviceType) > findDeviceTypeWeight(b.m_vkProps.properties.deviceType);
 			}
 			else
 			{
@@ -481,12 +473,10 @@ Error GrManagerImpl::initInstance()
 		ANKI_VK_LOGI("Physical devices:");
 		for(U32 devIdx = 0; devIdx < count; ++devIdx)
 		{
-			ANKI_VK_LOGI((devIdx == chosenPhysDevIdx) ? "\t(Selected) %s" : "\t%s",
-						 devs[devIdx].m_vkProps.properties.deviceName);
+			ANKI_VK_LOGI((devIdx == chosenPhysDevIdx) ? "\t(Selected) %s" : "\t%s", devs[devIdx].m_vkProps.properties.deviceName);
 		}
 
-		m_capabilities.m_discreteGpu =
-			devs[chosenPhysDevIdx].m_vkProps.properties.deviceType == VK_PHYSICAL_DEVICE_TYPE_DISCRETE_GPU;
+		m_capabilities.m_discreteGpu = devs[chosenPhysDevIdx].m_vkProps.properties.deviceType == VK_PHYSICAL_DEVICE_TYPE_DISCRETE_GPU;
 		m_physicalDevice = devs[chosenPhysDevIdx].m_pdev;
 	}
 
@@ -534,8 +524,7 @@ Error GrManagerImpl::initInstance()
 		m_capabilities.m_minSubgroupSize = 8;
 		m_capabilities.m_maxSubgroupSize = 8;
 	}
-	ANKI_VK_LOGI("GPU is %s. Vendor identified as %s", m_devProps.properties.deviceName,
-				 &kGPUVendorStrings[m_capabilities.m_gpuVendor][0]);
+	ANKI_VK_LOGI("GPU is %s. Vendor identified as %s", m_devProps.properties.deviceName, &kGPUVendorStrings[m_capabilities.m_gpuVendor][0]);
 
 	// Set limits
 	m_capabilities.m_uniformBufferBindOffsetAlignment =
@@ -592,8 +581,7 @@ Error GrManagerImpl::initDevice()
 			{
 				m_queueFamilyIndices[VulkanQueueType::kGeneral] = i;
 			}
-			else if((queueInfos[i].queueFlags & VK_QUEUE_COMPUTE_BIT)
-					&& !(queueInfos[i].queueFlags & VK_QUEUE_GRAPHICS_BIT))
+			else if((queueInfos[i].queueFlags & VK_QUEUE_COMPUTE_BIT) && !(queueInfos[i].queueFlags & VK_QUEUE_GRAPHICS_BIT))
 			{
 				// This must be the async compute
 				m_queueFamilyIndices[VulkanQueueType::kCompute] = i;
@@ -670,8 +658,7 @@ Error GrManagerImpl::initDevice()
 				m_extensions |= VulkanExtensions::kKHR_swapchain;
 				extensionsToEnable[extensionsToEnableCount++] = extensionName.cstr();
 			}
-			else if(extensionName == VK_AMD_SHADER_INFO_EXTENSION_NAME
-					&& ConfigSet::getSingleton().getCoreDisplayStats())
+			else if(extensionName == VK_AMD_SHADER_INFO_EXTENSION_NAME && ConfigSet::getSingleton().getCoreDisplayStats())
 			{
 				m_extensions |= VulkanExtensions::kAMD_shader_info;
 				extensionsToEnable[extensionsToEnableCount++] = extensionName.cstr();
@@ -681,8 +668,7 @@ Error GrManagerImpl::initDevice()
 				m_extensions |= VulkanExtensions::kAMD_rasterization_order;
 				extensionsToEnable[extensionsToEnableCount++] = extensionName.cstr();
 			}
-			else if(extensionName == VK_KHR_RAY_TRACING_PIPELINE_EXTENSION_NAME
-					&& ConfigSet::getSingleton().getGrRayTracing())
+			else if(extensionName == VK_KHR_RAY_TRACING_PIPELINE_EXTENSION_NAME && ConfigSet::getSingleton().getGrRayTracing())
 			{
 				m_extensions |= VulkanExtensions::kKHR_ray_tracing;
 				extensionsToEnable[extensionsToEnableCount++] = extensionName.cstr();
@@ -692,29 +678,24 @@ Error GrManagerImpl::initDevice()
 			{
 				extensionsToEnable[extensionsToEnableCount++] = extensionName.cstr();
 			}
-			else if(extensionName == VK_KHR_ACCELERATION_STRUCTURE_EXTENSION_NAME
-					&& ConfigSet::getSingleton().getGrRayTracing())
+			else if(extensionName == VK_KHR_ACCELERATION_STRUCTURE_EXTENSION_NAME && ConfigSet::getSingleton().getGrRayTracing())
 			{
 				extensionsToEnable[extensionsToEnableCount++] = extensionName.cstr();
 			}
-			else if(extensionName == VK_KHR_DEFERRED_HOST_OPERATIONS_EXTENSION_NAME
-					&& ConfigSet::getSingleton().getGrRayTracing())
+			else if(extensionName == VK_KHR_DEFERRED_HOST_OPERATIONS_EXTENSION_NAME && ConfigSet::getSingleton().getGrRayTracing())
 			{
 				extensionsToEnable[extensionsToEnableCount++] = extensionName.cstr();
 			}
-			else if(extensionName == VK_KHR_PIPELINE_LIBRARY_EXTENSION_NAME
-					&& ConfigSet::getSingleton().getGrRayTracing())
+			else if(extensionName == VK_KHR_PIPELINE_LIBRARY_EXTENSION_NAME && ConfigSet::getSingleton().getGrRayTracing())
 			{
 				extensionsToEnable[extensionsToEnableCount++] = extensionName.cstr();
 			}
-			else if(extensionName == VK_KHR_PIPELINE_EXECUTABLE_PROPERTIES_EXTENSION_NAME
-					&& ConfigSet::getSingleton().getCoreDisplayStats() > 1)
+			else if(extensionName == VK_KHR_PIPELINE_EXECUTABLE_PROPERTIES_EXTENSION_NAME && ConfigSet::getSingleton().getCoreDisplayStats() > 1)
 			{
 				m_extensions |= VulkanExtensions::kKHR_pipeline_executable_properties;
 				extensionsToEnable[extensionsToEnableCount++] = extensionName.cstr();
 			}
-			else if(extensionName == VK_KHR_SHADER_NON_SEMANTIC_INFO_EXTENSION_NAME
-					&& ConfigSet::getSingleton().getGrDebugPrintf())
+			else if(extensionName == VK_KHR_SHADER_NON_SEMANTIC_INFO_EXTENSION_NAME && ConfigSet::getSingleton().getGrDebugPrintf())
 			{
 				extensionsToEnable[extensionsToEnableCount++] = extensionName.cstr();
 			}
@@ -743,8 +724,7 @@ Error GrManagerImpl::initDevice()
 				m_extensions |= VulkanExtensions::kKHR_shader_float16_int8;
 				extensionsToEnable[extensionsToEnableCount++] = extensionName.cstr();
 			}
-			else if(extensionName == VK_KHR_SHADER_ATOMIC_INT64_EXTENSION_NAME
-					&& ConfigSet::getSingleton().getGr64bitAtomics())
+			else if(extensionName == VK_KHR_SHADER_ATOMIC_INT64_EXTENSION_NAME && ConfigSet::getSingleton().getGr64bitAtomics())
 			{
 				m_extensions |= VulkanExtensions::kKHR_shader_atomic_int64;
 				extensionsToEnable[extensionsToEnableCount++] = extensionName.cstr();
@@ -759,8 +739,7 @@ Error GrManagerImpl::initDevice()
 				m_extensions |= VulkanExtensions::kKHR_shader_float_controls;
 				extensionsToEnable[extensionsToEnableCount++] = extensionName.cstr();
 			}
-			else if(extensionName == VK_EXT_SAMPLER_FILTER_MINMAX_EXTENSION_NAME
-					&& ConfigSet::getSingleton().getGrSamplerFilterMinMax())
+			else if(extensionName == VK_EXT_SAMPLER_FILTER_MINMAX_EXTENSION_NAME && ConfigSet::getSingleton().getGrSamplerFilterMinMax())
 			{
 				m_extensions |= VulkanExtensions::kKHR_sampler_filter_min_max;
 				extensionsToEnable[extensionsToEnableCount++] = extensionName.cstr();
@@ -770,8 +749,7 @@ Error GrManagerImpl::initDevice()
 				m_extensions |= VulkanExtensions::kKHR_create_renderpass_2;
 				extensionsToEnable[extensionsToEnableCount++] = extensionName.cstr();
 			}
-			else if(extensionName == VK_KHR_FRAGMENT_SHADING_RATE_EXTENSION_NAME
-					&& ConfigSet::getSingleton().getGrVrs())
+			else if(extensionName == VK_KHR_FRAGMENT_SHADING_RATE_EXTENSION_NAME && ConfigSet::getSingleton().getGrVrs())
 			{
 				m_extensions |= VulkanExtensions::kKHR_fragment_shading_rate;
 				extensionsToEnable[extensionsToEnableCount++] = extensionName.cstr();
@@ -824,8 +802,7 @@ Error GrManagerImpl::initDevice()
 		devFeatures.sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_FEATURES_2;
 		vkGetPhysicalDeviceFeatures2(m_physicalDevice, &devFeatures);
 		m_devFeatures = devFeatures.features;
-		m_devFeatures.robustBufferAccess =
-			(ConfigSet::getSingleton().getGrValidation() && m_devFeatures.robustBufferAccess) ? true : false;
+		m_devFeatures.robustBufferAccess = (ConfigSet::getSingleton().getGrValidation() && m_devFeatures.robustBufferAccess) ? true : false;
 		ANKI_VK_LOGI("Robust buffer access is %s", (m_devFeatures.robustBufferAccess) ? "enabled" : "disabled");
 
 		ci.pEnabledFeatures = &m_devFeatures;
@@ -980,8 +957,7 @@ Error GrManagerImpl::initDevice()
 		m_rayQueryFeatures.pNext = &m_accelerationStructureFeatures;
 		vkGetPhysicalDeviceFeatures2(m_physicalDevice, &features);
 
-		if(!m_rtPipelineFeatures.rayTracingPipeline || !m_rayQueryFeatures.rayQuery
-		   || !m_accelerationStructureFeatures.accelerationStructure)
+		if(!m_rtPipelineFeatures.rayTracingPipeline || !m_rayQueryFeatures.rayQuery || !m_accelerationStructureFeatures.accelerationStructure)
 		{
 			ANKI_VK_LOGE("Ray tracing and ray query are both required");
 			return Error::kFunctionFailed;
@@ -1003,8 +979,7 @@ Error GrManagerImpl::initDevice()
 	// Pipeline features
 	if(!!(m_extensions & VulkanExtensions::kKHR_pipeline_executable_properties))
 	{
-		m_pplineExecutablePropertiesFeatures.sType =
-			VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PIPELINE_EXECUTABLE_PROPERTIES_FEATURES_KHR;
+		m_pplineExecutablePropertiesFeatures.sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PIPELINE_EXECUTABLE_PROPERTIES_FEATURES_KHR;
 		m_pplineExecutablePropertiesFeatures.pipelineExecutableInfo = true;
 
 		m_pplineExecutablePropertiesFeatures.pNext = const_cast<void*>(ci.pNext);
@@ -1069,11 +1044,9 @@ Error GrManagerImpl::initDevice()
 		vkGetPhysicalDeviceFeatures2(m_physicalDevice, &features);
 
 		// Some checks
-		if(!m_fragmentShadingRateFeatures.attachmentFragmentShadingRate
-		   || !m_fragmentShadingRateFeatures.pipelineFragmentShadingRate)
+		if(!m_fragmentShadingRateFeatures.attachmentFragmentShadingRate || !m_fragmentShadingRateFeatures.pipelineFragmentShadingRate)
 		{
-			ANKI_VK_LOGW(VK_KHR_FRAGMENT_SHADING_RATE_EXTENSION_NAME
-						 " doesn't support attachment and/or pipeline rates. Will disable VRS");
+			ANKI_VK_LOGW(VK_KHR_FRAGMENT_SHADING_RATE_EXTENSION_NAME " doesn't support attachment and/or pipeline rates. Will disable VRS");
 			m_capabilities.m_vrs = false;
 		}
 		else
@@ -1085,8 +1058,7 @@ Error GrManagerImpl::initDevice()
 		if(m_capabilities.m_vrs)
 		{
 			VkPhysicalDeviceFragmentShadingRatePropertiesKHR fragmentShadingRateProperties = {};
-			fragmentShadingRateProperties.sType =
-				VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_FRAGMENT_SHADING_RATE_PROPERTIES_KHR;
+			fragmentShadingRateProperties.sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_FRAGMENT_SHADING_RATE_PROPERTIES_KHR;
 
 			VkPhysicalDeviceProperties2 properties = {};
 			properties.sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PROPERTIES_2;
@@ -1104,9 +1076,8 @@ Error GrManagerImpl::initDevice()
 			}
 			else
 			{
-				m_capabilities.m_minShadingRateImageTexelSize =
-					max(fragmentShadingRateProperties.minFragmentShadingRateAttachmentTexelSize.width,
-						fragmentShadingRateProperties.minFragmentShadingRateAttachmentTexelSize.height);
+				m_capabilities.m_minShadingRateImageTexelSize = max(fragmentShadingRateProperties.minFragmentShadingRateAttachmentTexelSize.width,
+																	fragmentShadingRateProperties.minFragmentShadingRateAttachmentTexelSize.height);
 			}
 		}
 
@@ -1131,8 +1102,7 @@ Error GrManagerImpl::initDevice()
 	// Get VK_AMD_shader_info entry points
 	if(!!(m_extensions & VulkanExtensions::kAMD_shader_info))
 	{
-		m_pfnGetShaderInfoAMD =
-			reinterpret_cast<PFN_vkGetShaderInfoAMD>(vkGetDeviceProcAddr(m_device, "vkGetShaderInfoAMD"));
+		m_pfnGetShaderInfoAMD = reinterpret_cast<PFN_vkGetShaderInfoAMD>(vkGetDeviceProcAddr(m_device, "vkGetShaderInfoAMD"));
 		if(!m_pfnGetShaderInfoAMD)
 		{
 			ANKI_VK_LOGW("VK_AMD_shader_info is present but vkGetShaderInfoAMD is not there");
@@ -1166,8 +1136,7 @@ Error GrManagerImpl::initMemory()
 	}
 	for(U32 i = 0; i < m_memoryProperties.memoryTypeCount; ++i)
 	{
-		ANKI_VK_LOGV("\tMem type %u points to heap %u, flags %" ANKI_PRIb32, i,
-					 m_memoryProperties.memoryTypes[i].heapIndex,
+		ANKI_VK_LOGV("\tMem type %u points to heap %u, flags %" ANKI_PRIb32, i, m_memoryProperties.memoryTypes[i].heapIndex,
 					 ANKI_FORMAT_U32(m_memoryProperties.memoryTypes[i].propertyFlags));
 	}
 
@@ -1177,8 +1146,7 @@ Error GrManagerImpl::initMemory()
 }
 
 #if ANKI_GR_MANAGER_DEBUG_MEMMORY
-void* GrManagerImpl::allocateCallback(void* userData, size_t size, size_t alignment,
-									  VkSystemAllocationScope allocationScope)
+void* GrManagerImpl::allocateCallback(void* userData, size_t size, size_t alignment, VkSystemAllocationScope allocationScope)
 {
 	if(size == 0) [[unlikely]]
 	{
@@ -1201,8 +1169,7 @@ void* GrManagerImpl::allocateCallback(void* userData, size_t size, size_t alignm
 	return static_cast<AllocHeader*>(header);
 }
 
-void* GrManagerImpl::reallocateCallback(void* userData, void* original, size_t size, size_t alignment,
-										VkSystemAllocationScope allocationScope)
+void* GrManagerImpl::reallocateCallback(void* userData, void* original, size_t size, size_t alignment, VkSystemAllocationScope allocationScope)
 {
 	if(original && size == 0)
 	{
@@ -1254,8 +1221,8 @@ TexturePtr GrManagerImpl::acquireNextPresentableTexture()
 	// Get new image
 	uint32_t imageIdx;
 
-	VkResult res = vkAcquireNextImageKHR(m_device, m_crntSwapchain->m_swapchain, UINT64_MAX,
-										 frame.m_acquireSemaphore->getHandle(), fence->getHandle(), &imageIdx);
+	VkResult res = vkAcquireNextImageKHR(m_device, m_crntSwapchain->m_swapchain, UINT64_MAX, frame.m_acquireSemaphore->getHandle(),
+										 fence->getHandle(), &imageIdx);
 
 	if(res == VK_ERROR_OUT_OF_DATE_KHR)
 	{
@@ -1271,8 +1238,8 @@ TexturePtr GrManagerImpl::acquireNextPresentableTexture()
 		m_crntSwapchain = m_swapchainFactory.newInstance();
 
 		// Can't fail a second time
-		ANKI_VK_CHECKF(vkAcquireNextImageKHR(m_device, m_crntSwapchain->m_swapchain, UINT64_MAX,
-											 frame.m_acquireSemaphore->getHandle(), fence->getHandle(), &imageIdx));
+		ANKI_VK_CHECKF(vkAcquireNextImageKHR(m_device, m_crntSwapchain->m_swapchain, UINT64_MAX, frame.m_acquireSemaphore->getHandle(),
+											 fence->getHandle(), &imageIdx));
 	}
 	else
 	{
@@ -1352,8 +1319,7 @@ void GrManagerImpl::resetFrame(PerFrame& frame)
 	frame.m_renderSemaphore.reset(nullptr);
 }
 
-void GrManagerImpl::flushCommandBuffer(MicroCommandBufferPtr cmdb, Bool cmdbRenderedToSwapchain,
-									   WeakArray<MicroSemaphorePtr> userWaitSemaphores,
+void GrManagerImpl::flushCommandBuffer(MicroCommandBufferPtr cmdb, Bool cmdbRenderedToSwapchain, WeakArray<MicroSemaphorePtr> userWaitSemaphores,
 									   MicroSemaphorePtr* userSignalSemaphore, Bool wait)
 {
 	constexpr U32 maxSemaphores = 8;
@@ -1429,8 +1395,7 @@ void GrManagerImpl::flushCommandBuffer(MicroCommandBufferPtr cmdb, Bool cmdbRend
 			waitSemaphores[submit.waitSemaphoreCount] = frame.m_acquireSemaphore->getHandle();
 
 			// That depends on how we use the swapchain img. Be a bit conservative
-			waitStages[submit.waitSemaphoreCount] =
-				VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT | VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT;
+			waitStages[submit.waitSemaphoreCount] = VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT | VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT;
 			++submit.waitSemaphoreCount;
 
 			// Refresh the fence because the semaphore can't be recycled until the current submission is done
@@ -1496,8 +1461,7 @@ void GrManagerImpl::trySetVulkanHandleName(CString name, VkObjectType type, U64
 
 VkBool32 GrManagerImpl::debugReportCallbackEXT(VkDebugUtilsMessageSeverityFlagBitsEXT messageSeverity,
 											   [[maybe_unused]] VkDebugUtilsMessageTypeFlagsEXT messageTypes,
-											   const VkDebugUtilsMessengerCallbackDataEXT* pCallbackData,
-											   [[maybe_unused]] void* pUserData)
+											   const VkDebugUtilsMessengerCallbackDataEXT* pCallbackData, [[maybe_unused]] void* pUserData)
 {
 #if ANKI_PLATFORM_MOBILE
 	if(pCallbackData->messageIdNumber == 101294395)
@@ -1552,8 +1516,7 @@ void GrManagerImpl::printPipelineShaderInfo(VkPipeline ppline, CString name, Sha
 	}
 }
 
-Error GrManagerImpl::printPipelineShaderInfoInternal(VkPipeline ppline, CString name, ShaderTypeBit stages,
-													 U64 hash) const
+Error GrManagerImpl::printPipelineShaderInfoInternal(VkPipeline ppline, CString name, ShaderTypeBit stages, U64 hash) const
 {
 	if(m_pfnGetShaderInfoAMD)
 	{
@@ -1564,8 +1527,7 @@ Error GrManagerImpl::printPipelineShaderInfoInternal(VkPipeline ppline, CString
 		// Open the file
 		if(!m_shaderStatsFile.isOpen())
 		{
-			ANKI_CHECK(m_shaderStatsFile.open(
-				GrString().sprintf("%s/../ppline_stats.csv", m_cacheDir.cstr()).toCString(), FileOpenFlag::kWrite));
+			ANKI_CHECK(m_shaderStatsFile.open(GrString().sprintf("%s/../ppline_stats.csv", m_cacheDir.cstr()).toCString(), FileOpenFlag::kWrite));
 
 			ANKI_CHECK(m_shaderStatsFile.writeText("ppline name,hash,"
 												   "stage 0 VGPR,stage 0 SGPR,"
@@ -1596,8 +1558,7 @@ Error GrManagerImpl::printPipelineShaderInfoInternal(VkPipeline ppline, CString
 			str += GrString().sprintf("Stage %u: VGRPS %02u, SGRPS %02u ", U32(type), stats.resourceUsage.numUsedVgprs,
 									  stats.resourceUsage.numUsedSgprs);
 
-			ANKI_CHECK(m_shaderStatsFile.writeTextf((type != ShaderType::kLast) ? "%u,%u," : "%u,%u\n",
-													stats.resourceUsage.numUsedVgprs,
+			ANKI_CHECK(m_shaderStatsFile.writeTextf((type != ShaderType::kLast) ? "%u,%u," : "%u,%u\n", stats.resourceUsage.numUsedVgprs,
 													stats.resourceUsage.numUsedSgprs));
 		}
 
@@ -1623,8 +1584,7 @@ Error GrManagerImpl::printPipelineShaderInfoInternal(VkPipeline ppline, CString
 			prop = {};
 			prop.sType = VK_STRUCTURE_TYPE_PIPELINE_EXECUTABLE_PROPERTIES_KHR;
 		}
-		ANKI_VK_CHECK(
-			vkGetPipelineExecutablePropertiesKHR(m_device, &pplineInf, &executableCount, &executableProps[0]));
+		ANKI_VK_CHECK(vkGetPipelineExecutablePropertiesKHR(m_device, &pplineInf, &executableCount, &executableProps[0]));
 
 		log.pushBackSprintf("Pipeline info \"%s\" (0x%016" PRIx64 "): ", name.cstr(), hash);
 		for(U32 i = 0; i < executableCount; ++i)

+ 5 - 9
AnKi/Gr/Vulkan/GrManagerImpl.h

@@ -105,9 +105,8 @@ public:
 	}
 	/// @}
 
-	void flushCommandBuffer(MicroCommandBufferPtr cmdb, Bool cmdbRenderedToSwapchain,
-							WeakArray<MicroSemaphorePtr> waitSemaphores, MicroSemaphorePtr* signalSemaphore,
-							Bool wait = false);
+	void flushCommandBuffer(MicroCommandBufferPtr cmdb, Bool cmdbRenderedToSwapchain, WeakArray<MicroSemaphorePtr> waitSemaphores,
+							MicroSemaphorePtr* signalSemaphore, Bool wait = false);
 
 	/// @name Memory
 	/// @{
@@ -338,19 +337,16 @@ private:
 	Error initMemory();
 
 #if ANKI_GR_MANAGER_DEBUG_MEMMORY
-	static void* allocateCallback(void* userData, size_t size, size_t alignment,
-								  VkSystemAllocationScope allocationScope);
+	static void* allocateCallback(void* userData, size_t size, size_t alignment, VkSystemAllocationScope allocationScope);
 
-	static void* reallocateCallback(void* userData, void* original, size_t size, size_t alignment,
-									VkSystemAllocationScope allocationScope);
+	static void* reallocateCallback(void* userData, void* original, size_t size, size_t alignment, VkSystemAllocationScope allocationScope);
 
 	static void freeCallback(void* userData, void* ptr);
 #endif
 
 	void resetFrame(PerFrame& frame);
 
-	static VkBool32 debugReportCallbackEXT(VkDebugUtilsMessageSeverityFlagBitsEXT messageSeverity,
-										   VkDebugUtilsMessageTypeFlagsEXT messageTypes,
+	static VkBool32 debugReportCallbackEXT(VkDebugUtilsMessageSeverityFlagBitsEXT messageSeverity, VkDebugUtilsMessageTypeFlagsEXT messageTypes,
 										   const VkDebugUtilsMessengerCallbackDataEXT* pCallbackData, void* pUserData);
 
 	Error printPipelineShaderInfoInternal(VkPipeline ppline, CString name, ShaderTypeBit stages, U64 hash) const;

+ 1 - 2
AnKi/Gr/Vulkan/GrManagerImplSdl.cpp

@@ -19,8 +19,7 @@ namespace anki {
 
 Error GrManagerImpl::initSurface()
 {
-	if(!SDL_Vulkan_CreateSurface(static_cast<NativeWindowSdl&>(NativeWindow::getSingleton()).m_sdlWindow, m_instance,
-								 &m_surface))
+	if(!SDL_Vulkan_CreateSurface(static_cast<NativeWindowSdl&>(NativeWindow::getSingleton()).m_sdlWindow, m_instance, &m_surface))
 	{
 		ANKI_VK_LOGE("SDL_Vulkan_CreateSurface() failed: %s", SDL_GetError());
 		return Error::kFunctionFailed;

+ 11 - 15
AnKi/Gr/Vulkan/GrUpscalerImpl.cpp

@@ -60,8 +60,7 @@ Error GrUpscalerImpl::initInternal(const GrUpscalerInitInfo& initInfo)
 static NVSDK_NGX_PerfQuality_Value getDlssQualityModeToNVQualityMode(GrUpscalerQualityMode mode)
 {
 	static Array<NVSDK_NGX_PerfQuality_Value, U32(GrUpscalerQualityMode::kCount)> nvQualityModes = {
-		NVSDK_NGX_PerfQuality_Value_MaxPerf, NVSDK_NGX_PerfQuality_Value_Balanced,
-		NVSDK_NGX_PerfQuality_Value_MaxQuality};
+		NVSDK_NGX_PerfQuality_Value_MaxPerf, NVSDK_NGX_PerfQuality_Value_Balanced, NVSDK_NGX_PerfQuality_Value_MaxQuality};
 
 	return nvQualityModes[mode];
 }
@@ -82,8 +81,7 @@ Error GrUpscalerImpl::initDlss(const GrUpscalerInitInfo& initInfo)
 	// Currently, the SDK and this sample are not in sync.  The sample is a bit forward looking, in this case. This will
 	// likely be resolved very shortly, and therefore, the code below should be thought of as needed for a smooth user
 	// experience.
-#	if defined(NVSDK_NGX_Parameter_SuperSampling_NeedsUpdatedDriver) \
-		&& defined(NVSDK_NGX_Parameter_SuperSampling_MinDriverVersionMajor) \
+#	if defined(NVSDK_NGX_Parameter_SuperSampling_NeedsUpdatedDriver) && defined(NVSDK_NGX_Parameter_SuperSampling_MinDriverVersionMajor) \
 		&& defined(NVSDK_NGX_Parameter_SuperSampling_MinDriverVersionMinor)
 
 	// If NGX Successfully initialized then it should set those flags in return
@@ -106,8 +104,7 @@ Error GrUpscalerImpl::initDlss(const GrUpscalerInitInfo& initInfo)
 	}
 
 	// Create the feature
-	ANKI_CHECK(createDlssFeature(initInfo.m_sourceTextureResolution, initInfo.m_targetTextureResolution,
-								 initInfo.m_qualityMode));
+	ANKI_CHECK(createDlssFeature(initInfo.m_sourceTextureResolution, initInfo.m_targetTextureResolution, initInfo.m_qualityMode));
 
 	return Error::kNone;
 }
@@ -116,15 +113,14 @@ Error GrUpscalerImpl::createDlssFeature(const UVec2& srcRes, const UVec2& dstRes
 {
 	NVSDK_NGX_PerfQuality_Value nvQuality = getDlssQualityModeToNVQualityMode(quality);
 	F32 sharpness; // Deprecared in newer DLSS
-	ANKI_NGX_CHECK(NGX_DLSS_GET_OPTIMAL_SETTINGS(
-		m_ngxParameters, dstRes.x(), dstRes.y(), nvQuality, &m_recommendedSettings.m_optimalRenderSize.x(),
-		&m_recommendedSettings.m_optimalRenderSize.y(), &m_recommendedSettings.m_dynamicMaximumRenderSize.x(),
-		&m_recommendedSettings.m_dynamicMaximumRenderSize.y(), &m_recommendedSettings.m_dynamicMinimumRenderSize.x(),
-		&m_recommendedSettings.m_dynamicMinimumRenderSize.y(), &sharpness));
+	ANKI_NGX_CHECK(
+		NGX_DLSS_GET_OPTIMAL_SETTINGS(m_ngxParameters, dstRes.x(), dstRes.y(), nvQuality, &m_recommendedSettings.m_optimalRenderSize.x(),
+									  &m_recommendedSettings.m_optimalRenderSize.y(), &m_recommendedSettings.m_dynamicMaximumRenderSize.x(),
+									  &m_recommendedSettings.m_dynamicMaximumRenderSize.y(), &m_recommendedSettings.m_dynamicMinimumRenderSize.x(),
+									  &m_recommendedSettings.m_dynamicMinimumRenderSize.y(), &sharpness));
 
 	// Next create features	(See NVSDK_NGX_DLSS_Feature_Flags in nvsdk_ngx_defs.h)
-	const I32 dlssCreateFeatureFlags =
-		NVSDK_NGX_DLSS_Feature_Flags_MVLowRes | NVSDK_NGX_DLSS_Feature_Flags_IsHDR; // TODO
+	const I32 dlssCreateFeatureFlags = NVSDK_NGX_DLSS_Feature_Flags_MVLowRes | NVSDK_NGX_DLSS_Feature_Flags_IsHDR; // TODO
 
 	NVSDK_NGX_DLSS_Create_Params dlssCreateParams;
 	memset(&dlssCreateParams, 0, sizeof(dlssCreateParams));
@@ -144,8 +140,8 @@ Error GrUpscalerImpl::createDlssFeature(const UVec2& srcRes, const UVec2& dstRes
 	cmdbImpl.beginRecordingExt();
 	const U32 creationNodeMask = 1;
 	const U32 visibilityNodeMask = 1;
-	ANKI_NGX_CHECK(NGX_VULKAN_CREATE_DLSS_EXT(cmdbImpl.getHandle(), creationNodeMask, visibilityNodeMask,
-											  &m_dlssFeature, m_ngxParameters, &dlssCreateParams));
+	ANKI_NGX_CHECK(
+		NGX_VULKAN_CREATE_DLSS_EXT(cmdbImpl.getHandle(), creationNodeMask, visibilityNodeMask, &m_dlssFeature, m_ngxParameters, &dlssCreateParams));
 	FencePtr fence;
 	cmdb->flush({}, &fence);
 	fence->clientWait(60.0_sec);

+ 2 - 4
AnKi/Gr/Vulkan/OcclusionQueryImpl.cpp

@@ -28,10 +28,8 @@ OcclusionQueryResult OcclusionQueryImpl::getResultInternal() const
 	U64 out = 0;
 
 	VkResult res;
-	ANKI_VK_CHECKF(res = vkGetQueryPoolResults(getVkDevice(), m_handle.getQueryPool(), m_handle.getQueryIndex(), 1,
-											   sizeof(out), &out, sizeof(out),
-											   VK_QUERY_RESULT_64_BIT | VK_QUERY_RESULT_WITH_AVAILABILITY_BIT
-												   | VK_QUERY_RESULT_PARTIAL_BIT));
+	ANKI_VK_CHECKF(res = vkGetQueryPoolResults(getVkDevice(), m_handle.getQueryPool(), m_handle.getQueryIndex(), 1, sizeof(out), &out, sizeof(out),
+											   VK_QUERY_RESULT_64_BIT | VK_QUERY_RESULT_WITH_AVAILABILITY_BIT | VK_QUERY_RESULT_PARTIAL_BIT));
 
 	OcclusionQueryResult qout = OcclusionQueryResult::kNotAvailable;
 	if(res == VK_SUCCESS)

+ 25 - 34
AnKi/Gr/Vulkan/Pipeline.cpp

@@ -71,11 +71,9 @@ Bool PipelineStateTracker::updateHashes()
 
 				if(dirty)
 				{
+					m_hashes.m_vertexAttribs[i] = computeHash(&m_state.m_vertex.m_attributes[i], sizeof(m_state.m_vertex.m_attributes[i]));
 					m_hashes.m_vertexAttribs[i] =
-						computeHash(&m_state.m_vertex.m_attributes[i], sizeof(m_state.m_vertex.m_attributes[i]));
-					m_hashes.m_vertexAttribs[i] =
-						appendHash(&m_state.m_vertex.m_bindings[i], sizeof(m_state.m_vertex.m_bindings[i]),
-								   m_hashes.m_vertexAttribs[i]);
+						appendHash(&m_state.m_vertex.m_bindings[i], sizeof(m_state.m_vertex.m_bindings[i]), m_hashes.m_vertexAttribs[i]);
 
 					stateDirty = true;
 				}
@@ -135,8 +133,7 @@ Bool PipelineStateTracker::updateHashes()
 				if(m_fbColorAttachmentMask.get(i) && m_dirty.m_colAttachments.get(i))
 				{
 					m_dirty.m_colAttachments.unset(i);
-					m_hashes.m_colAttachments[i] =
-						computeHash(&m_state.m_color.m_attachments[i], sizeof(m_state.m_color.m_attachments[i]));
+					m_hashes.m_colAttachments[i] = computeHash(&m_state.m_color.m_attachments[i], sizeof(m_state.m_color.m_attachments[i]));
 					stateDirty = true;
 				}
 			}
@@ -310,32 +307,28 @@ const VkGraphicsPipelineCreateInfo& PipelineStateTracker::updatePipelineCreateIn
 		if(m_fbDepth)
 		{
 			dsCi.sType = VK_STRUCTURE_TYPE_PIPELINE_DEPTH_STENCIL_STATE_CREATE_INFO;
-			dsCi.depthTestEnable = m_state.m_depth.m_depthCompareFunction != CompareOperation::kAlways
-								   || m_state.m_depth.m_depthWriteEnabled;
+			dsCi.depthTestEnable = m_state.m_depth.m_depthCompareFunction != CompareOperation::kAlways || m_state.m_depth.m_depthWriteEnabled;
 			dsCi.depthWriteEnable = m_state.m_depth.m_depthWriteEnabled;
 			dsCi.depthCompareOp = convertCompareOp(m_state.m_depth.m_depthCompareFunction);
 		}
 
 		if(m_fbStencil)
 		{
-			dsCi.stencilTestEnable =
-				!stencilTestDisabled(m_state.m_stencil.m_face[0].m_stencilFailOperation,
-									 m_state.m_stencil.m_face[0].m_stencilPassDepthFailOperation,
-									 m_state.m_stencil.m_face[0].m_stencilPassDepthPassOperation,
-									 m_state.m_stencil.m_face[0].m_compareFunction)
-				|| !stencilTestDisabled(m_state.m_stencil.m_face[1].m_stencilFailOperation,
-										m_state.m_stencil.m_face[1].m_stencilPassDepthFailOperation,
-										m_state.m_stencil.m_face[1].m_stencilPassDepthPassOperation,
-										m_state.m_stencil.m_face[1].m_compareFunction);
-
-			dsCi.front.failOp = convertStencilOp(m_state.m_stencil.m_face[0].m_stencilFailOperation);
-			dsCi.front.passOp = convertStencilOp(m_state.m_stencil.m_face[0].m_stencilPassDepthPassOperation);
-			dsCi.front.depthFailOp = convertStencilOp(m_state.m_stencil.m_face[0].m_stencilPassDepthFailOperation);
-			dsCi.front.compareOp = convertCompareOp(m_state.m_stencil.m_face[0].m_compareFunction);
-			dsCi.back.failOp = convertStencilOp(m_state.m_stencil.m_face[1].m_stencilFailOperation);
-			dsCi.back.passOp = convertStencilOp(m_state.m_stencil.m_face[1].m_stencilPassDepthPassOperation);
-			dsCi.back.depthFailOp = convertStencilOp(m_state.m_stencil.m_face[1].m_stencilPassDepthFailOperation);
-			dsCi.back.compareOp = convertCompareOp(m_state.m_stencil.m_face[1].m_compareFunction);
+			const StencilPipelineState& ss = m_state.m_stencil;
+
+			dsCi.stencilTestEnable = !stencilTestDisabled(ss.m_face[0].m_stencilFailOperation, ss.m_face[0].m_stencilPassDepthFailOperation,
+														  ss.m_face[0].m_stencilPassDepthPassOperation, ss.m_face[0].m_compareFunction)
+									 || !stencilTestDisabled(ss.m_face[1].m_stencilFailOperation, ss.m_face[1].m_stencilPassDepthFailOperation,
+															 ss.m_face[1].m_stencilPassDepthPassOperation, ss.m_face[1].m_compareFunction);
+
+			dsCi.front.failOp = convertStencilOp(ss.m_face[0].m_stencilFailOperation);
+			dsCi.front.passOp = convertStencilOp(ss.m_face[0].m_stencilPassDepthPassOperation);
+			dsCi.front.depthFailOp = convertStencilOp(ss.m_face[0].m_stencilPassDepthFailOperation);
+			dsCi.front.compareOp = convertCompareOp(ss.m_face[0].m_compareFunction);
+			dsCi.back.failOp = convertStencilOp(ss.m_face[1].m_stencilFailOperation);
+			dsCi.back.passOp = convertStencilOp(ss.m_face[1].m_stencilPassDepthPassOperation);
+			dsCi.back.depthFailOp = convertStencilOp(ss.m_face[1].m_stencilPassDepthFailOperation);
+			dsCi.back.compareOp = convertCompareOp(ss.m_face[1].m_compareFunction);
 		}
 
 		ci.pDepthStencilState = &dsCi;
@@ -356,8 +349,8 @@ const VkGraphicsPipelineCreateInfo& PipelineStateTracker::updatePipelineCreateIn
 			VkPipelineColorBlendAttachmentState& out = m_ci.m_colAttachments[i];
 			const ColorAttachmentState& in = m_state.m_color.m_attachments[i];
 
-			out.blendEnable = !blendingDisabled(in.m_srcBlendFactorRgb, in.m_dstBlendFactorRgb, in.m_srcBlendFactorA,
-												in.m_dstBlendFactorA, in.m_blendFunctionRgb, in.m_blendFunctionA);
+			out.blendEnable = !blendingDisabled(in.m_srcBlendFactorRgb, in.m_dstBlendFactorRgb, in.m_srcBlendFactorA, in.m_dstBlendFactorA,
+												in.m_blendFunctionRgb, in.m_blendFunctionA);
 			out.srcColorBlendFactor = convertBlendFactor(in.m_srcBlendFactorRgb);
 			out.dstColorBlendFactor = convertBlendFactor(in.m_dstBlendFactorRgb);
 			out.srcAlphaBlendFactor = convertBlendFactor(in.m_srcBlendFactorA);
@@ -378,10 +371,9 @@ const VkGraphicsPipelineCreateInfo& PipelineStateTracker::updatePipelineCreateIn
 
 	// Almost all state is dynamic. Depth bias is static
 	static constexpr Array<VkDynamicState, 10> kDyn = {
-		{VK_DYNAMIC_STATE_VIEWPORT, VK_DYNAMIC_STATE_SCISSOR, VK_DYNAMIC_STATE_BLEND_CONSTANTS,
-		 VK_DYNAMIC_STATE_DEPTH_BOUNDS, VK_DYNAMIC_STATE_STENCIL_COMPARE_MASK, VK_DYNAMIC_STATE_STENCIL_WRITE_MASK,
-		 VK_DYNAMIC_STATE_STENCIL_REFERENCE, VK_DYNAMIC_STATE_LINE_WIDTH, VK_DYNAMIC_STATE_DEPTH_BIAS,
-		 VK_DYNAMIC_STATE_FRAGMENT_SHADING_RATE_KHR}};
+		{VK_DYNAMIC_STATE_VIEWPORT, VK_DYNAMIC_STATE_SCISSOR, VK_DYNAMIC_STATE_BLEND_CONSTANTS, VK_DYNAMIC_STATE_DEPTH_BOUNDS,
+		 VK_DYNAMIC_STATE_STENCIL_COMPARE_MASK, VK_DYNAMIC_STATE_STENCIL_WRITE_MASK, VK_DYNAMIC_STATE_STENCIL_REFERENCE, VK_DYNAMIC_STATE_LINE_WIDTH,
+		 VK_DYNAMIC_STATE_DEPTH_BIAS, VK_DYNAMIC_STATE_FRAGMENT_SHADING_RATE_KHR}};
 
 	dynCi.dynamicStateCount = (m_vrsCapable) ? kDyn.getSize() : (kDyn.getSize() - 1);
 	dynCi.pDynamicStates = &kDyn[0];
@@ -498,8 +490,7 @@ void PipelineFactory::getOrCreatePipeline(PipelineStateTracker& state, Pipeline&
 	ppline.m_handle = pp.m_handle;
 
 	// Print shader info
-	getGrManagerImpl().printPipelineShaderInfo(pp.m_handle, state.m_state.m_prog->getName(),
-											   state.m_state.m_prog->getStages(), hash);
+	getGrManagerImpl().printPipelineShaderInfo(pp.m_handle, state.m_state.m_prog->getName(), state.m_state.m_prog->getStages(), hash);
 }
 
 } // end namespace anki

+ 4 - 6
AnKi/Gr/Vulkan/Pipeline.h

@@ -128,8 +128,7 @@ public:
 	Bool m_alphaToCoverageEnabled = false;
 	Array<ColorAttachmentState, kMaxColorRenderTargets> m_attachments;
 };
-static_assert(sizeof(ColorPipelineState) == sizeof(ColorAttachmentState) * kMaxColorRenderTargets + sizeof(U8),
-			  "Packed because it will be hashed");
+static_assert(sizeof(ColorPipelineState) == sizeof(ColorAttachmentState) * kMaxColorRenderTargets + sizeof(U8), "Packed because it will be hashed");
 
 class AllPipelineState
 {
@@ -237,8 +236,8 @@ public:
 		}
 	}
 
-	void setStencilOperations(FaceSelectionBit face, StencilOperation stencilFail,
-							  StencilOperation stencilPassDepthFail, StencilOperation stencilPassDepthPass)
+	void setStencilOperations(FaceSelectionBit face, StencilOperation stencilFail, StencilOperation stencilPassDepthFail,
+							  StencilOperation stencilPassDepthPass)
 	{
 		if(!!(face & FaceSelectionBit::kFront)
 		   && (m_state.m_stencil.m_face[0].m_stencilFailOperation != stencilFail
@@ -317,8 +316,7 @@ public:
 	void setBlendFactors(U32 attachment, BlendFactor srcRgb, BlendFactor dstRgb, BlendFactor srcA, BlendFactor dstA)
 	{
 		ColorAttachmentState& c = m_state.m_color.m_attachments[attachment];
-		if(c.m_srcBlendFactorRgb != srcRgb || c.m_dstBlendFactorRgb != dstRgb || c.m_srcBlendFactorA != srcA
-		   || c.m_dstBlendFactorA != dstA)
+		if(c.m_srcBlendFactorRgb != srcRgb || c.m_dstBlendFactorRgb != dstRgb || c.m_srcBlendFactorA != srcA || c.m_dstBlendFactorA != dstA)
 		{
 			c.m_srcBlendFactorRgb = srcRgb;
 			c.m_dstBlendFactorRgb = dstRgb;

+ 1 - 2
AnKi/Gr/Vulkan/PipelineLayout.cpp

@@ -19,8 +19,7 @@ void PipelineLayoutFactory::destroy()
 	}
 }
 
-Error PipelineLayoutFactory::newPipelineLayout(const WeakArray<DescriptorSetLayout>& dsetLayouts, U32 pushConstantsSize,
-											   PipelineLayout& layout)
+Error PipelineLayoutFactory::newPipelineLayout(const WeakArray<DescriptorSetLayout>& dsetLayouts, U32 pushConstantsSize, PipelineLayout& layout)
 {
 	U64 hash = computeHash(&pushConstantsSize, sizeof(pushConstantsSize));
 	Array<VkDescriptorSetLayout, kMaxDescriptorSets> vkDsetLayouts;

+ 1 - 2
AnKi/Gr/Vulkan/PipelineLayout.h

@@ -38,8 +38,7 @@ public:
 	void destroy();
 
 	/// @note It's thread-safe.
-	Error newPipelineLayout(const WeakArray<DescriptorSetLayout>& dsetLayouts, U32 pushConstantsSize,
-							PipelineLayout& layout);
+	Error newPipelineLayout(const WeakArray<DescriptorSetLayout>& dsetLayouts, U32 pushConstantsSize, PipelineLayout& layout);
 
 private:
 	GrHashMap<U64, VkPipelineLayout> m_layouts;

+ 4 - 8
AnKi/Gr/Vulkan/ShaderImpl.cpp

@@ -32,8 +32,7 @@ ShaderImpl::~ShaderImpl()
 
 	if(m_specConstInfo.pMapEntries)
 	{
-		deleteArray(GrMemoryPool::getSingleton(), const_cast<VkSpecializationMapEntry*>(m_specConstInfo.pMapEntries),
-					m_specConstInfo.mapEntryCount);
+		deleteArray(GrMemoryPool::getSingleton(), const_cast<VkSpecializationMapEntry*>(m_specConstInfo.pMapEntries), m_specConstInfo.mapEntryCount);
 	}
 
 	if(m_specConstInfo.pData)
@@ -52,12 +51,10 @@ Error ShaderImpl::init(const ShaderInitInfo& inf)
 #if ANKI_DUMP_SHADERS
 	{
 		StringRaii fnameSpirv(getAllocator());
-		fnameSpirv.sprintf("%s/%s_t%u_%05u.spv", getManager().getCacheDirectory().cstr(), getName().cstr(),
-						   U(m_shaderType), getUuid());
+		fnameSpirv.sprintf("%s/%s_t%u_%05u.spv", getManager().getCacheDirectory().cstr(), getName().cstr(), U(m_shaderType), getUuid());
 
 		File fileSpirv;
-		ANKI_CHECK(fileSpirv.open(fnameSpirv.toCString(),
-								  FileOpenFlag::kBinary | FileOpenFlag::kWrite | FileOpenFlag::kSpecial));
+		ANKI_CHECK(fileSpirv.open(fnameSpirv.toCString(), FileOpenFlag::kBinary | FileOpenFlag::kWrite | FileOpenFlag::kSpecial));
 		ANKI_CHECK(fileSpirv.write(&inf.m_binary[0], inf.m_binary.getSize()));
 	}
 #endif
@@ -233,8 +230,7 @@ void ShaderImpl::doReflection(ConstWeakArray<U8> spirv, SpecConstsVector& specCo
 	// Push consts
 	if(rsrc.push_constant_buffers.size() == 1)
 	{
-		const U32 blockSize =
-			U32(spvc.get_declared_struct_size(spvc.get_type(rsrc.push_constant_buffers[0].base_type_id)));
+		const U32 blockSize = U32(spvc.get_declared_struct_size(spvc.get_type(rsrc.push_constant_buffers[0].base_type_id)));
 		ANKI_ASSERT(blockSize > 0);
 		ANKI_ASSERT(blockSize % 16 == 0 && "Should be aligned");
 		ANKI_ASSERT(blockSize <= getGrManagerImpl().getDeviceCapabilities().m_pushConstantsSize);

+ 1 - 2
AnKi/Gr/Vulkan/ShaderImpl.h

@@ -24,8 +24,7 @@ public:
 	BitSet<kMaxColorRenderTargets, U8> m_colorAttachmentWritemask = {false};
 	BitSet<kMaxVertexAttributes, U8> m_attributeMask = {false};
 	BitSet<kMaxDescriptorSets, U8> m_descriptorSetMask = {false};
-	Array<BitSet<kMaxBindingsPerDescriptorSet, U8>, kMaxDescriptorSets> m_activeBindingMask = {
-		{{false}, {false}, {false}}};
+	Array<BitSet<kMaxBindingsPerDescriptorSet, U8>, kMaxDescriptorSets> m_activeBindingMask = {{{false}, {false}, {false}}};
 	U32 m_pushConstantsSize = 0;
 
 	ShaderImpl(CString name)

+ 15 - 26
AnKi/Gr/Vulkan/ShaderProgramImpl.cpp

@@ -55,8 +55,8 @@ Error ShaderProgramImpl::init(const ShaderProgramInitInfo& inf)
 	{
 		// Ray tracing
 
-		m_shaders.resizeStorage(inf.m_rayTracingShaders.m_rayGenShaders.getSize()
-								+ inf.m_rayTracingShaders.m_missShaders.getSize() + 1); // Plus at least one hit shader
+		m_shaders.resizeStorage(inf.m_rayTracingShaders.m_rayGenShaders.getSize() + inf.m_rayTracingShaders.m_missShaders.getSize()
+								+ 1); // Plus at least one hit shader
 
 		for(const ShaderPtr& s : inf.m_rayTracingShaders.m_rayGenShaders)
 		{
@@ -162,8 +162,7 @@ Error ShaderProgramImpl::init(const ShaderProgramInitInfo& inf)
 		DescriptorSetLayoutInitInfo dsinf;
 		dsinf.m_bindings = WeakArray<DescriptorBinding>((counts[set]) ? &bindings[set][0] : nullptr, counts[set]);
 
-		ANKI_CHECK(
-			getGrManagerImpl().getDescriptorSetFactory().newDescriptorSetLayout(dsinf, m_descriptorSetLayouts[set]));
+		ANKI_CHECK(getGrManagerImpl().getDescriptorSetFactory().newDescriptorSetLayout(dsinf, m_descriptorSetLayouts[set]));
 
 		// Even if the dslayout is empty we will have to list it because we'll have to bind a DS for it.
 		m_refl.m_descriptorSetMask.set(set);
@@ -171,21 +170,17 @@ Error ShaderProgramImpl::init(const ShaderProgramInitInfo& inf)
 
 	// Create the ppline layout
 	//
-	WeakArray<DescriptorSetLayout> dsetLayouts((descriptorSetCount) ? &m_descriptorSetLayouts[0] : nullptr,
-											   descriptorSetCount);
-	ANKI_CHECK(getGrManagerImpl().getPipelineLayoutFactory().newPipelineLayout(dsetLayouts, m_refl.m_pushConstantsSize,
-																			   m_pplineLayout));
+	WeakArray<DescriptorSetLayout> dsetLayouts((descriptorSetCount) ? &m_descriptorSetLayouts[0] : nullptr, descriptorSetCount);
+	ANKI_CHECK(getGrManagerImpl().getPipelineLayoutFactory().newPipelineLayout(dsetLayouts, m_refl.m_pushConstantsSize, m_pplineLayout));
 
 	// Get some masks
 	//
 	const Bool graphicsProg = !!(m_stages & ShaderTypeBit::kAllGraphics);
 	if(graphicsProg)
 	{
-		m_refl.m_attributeMask =
-			static_cast<const ShaderImpl&>(*inf.m_graphicsShaders[ShaderType::kVertex]).m_attributeMask;
+		m_refl.m_attributeMask = static_cast<const ShaderImpl&>(*inf.m_graphicsShaders[ShaderType::kVertex]).m_attributeMask;
 
-		m_refl.m_colorAttachmentWritemask =
-			static_cast<const ShaderImpl&>(*inf.m_graphicsShaders[ShaderType::kFragment]).m_colorAttachmentWritemask;
+		m_refl.m_colorAttachmentWritemask = static_cast<const ShaderImpl&>(*inf.m_graphicsShaders[ShaderType::kFragment]).m_colorAttachmentWritemask;
 
 		const U32 attachmentCount = m_refl.m_colorAttachmentWritemask.getEnabledBitCount();
 		for(U32 i = 0; i < attachmentCount; ++i)
@@ -202,8 +197,7 @@ Error ShaderProgramImpl::init(const ShaderProgramInitInfo& inf)
 		{
 			const ShaderImpl& shaderImpl = static_cast<const ShaderImpl&>(*shader);
 
-			VkPipelineShaderStageCreateInfo& createInf =
-				m_graphics.m_shaderCreateInfos[m_graphics.m_shaderCreateInfoCount++];
+			VkPipelineShaderStageCreateInfo& createInf = m_graphics.m_shaderCreateInfos[m_graphics.m_shaderCreateInfoCount++];
 			createInf = {};
 			createInf.sType = VK_STRUCTURE_TYPE_PIPELINE_SHADER_STAGE_CREATE_INFO;
 			createInf.stage = VkShaderStageFlagBits(convertShaderTypeBit(ShaderTypeBit(1 << shader->getShaderType())));
@@ -249,8 +243,7 @@ Error ShaderProgramImpl::init(const ShaderProgramInitInfo& inf)
 		ci.stage.pSpecializationInfo = shaderImpl.getSpecConstInfo();
 
 		ANKI_TRACE_SCOPED_EVENT(VkPipelineCreate);
-		ANKI_VK_CHECK(vkCreateComputePipelines(getVkDevice(), getGrManagerImpl().getPipelineCache(), 1, &ci, nullptr,
-											   &m_compute.m_ppline));
+		ANKI_VK_CHECK(vkCreateComputePipelines(getVkDevice(), getGrManagerImpl().getPipelineCache(), 1, &ci, nullptr, &m_compute.m_ppline));
 		getGrManagerImpl().printPipelineShaderInfo(m_compute.m_ppline, getName(), ShaderTypeBit::kCompute);
 	}
 
@@ -282,8 +275,7 @@ Error ShaderProgramImpl::init(const ShaderProgramInitInfo& inf)
 		defaultGroup.anyHitShader = VK_SHADER_UNUSED_KHR;
 		defaultGroup.intersectionShader = VK_SHADER_UNUSED_KHR;
 
-		U32 groupCount = inf.m_rayTracingShaders.m_rayGenShaders.getSize()
-						 + inf.m_rayTracingShaders.m_missShaders.getSize()
+		U32 groupCount = inf.m_rayTracingShaders.m_rayGenShaders.getSize() + inf.m_rayTracingShaders.m_missShaders.getSize()
 						 + inf.m_rayTracingShaders.m_hitGroups.getSize();
 		GrDynamicArray<VkRayTracingShaderGroupCreateInfoKHR> groups;
 		groups.resize(groupCount, defaultGroup);
@@ -311,8 +303,7 @@ Error ShaderProgramImpl::init(const ShaderProgramInitInfo& inf)
 			groups[groupCount].type = VK_RAY_TRACING_SHADER_GROUP_TYPE_TRIANGLES_HIT_GROUP_KHR;
 			if(inf.m_rayTracingShaders.m_hitGroups[i].m_anyHitShader)
 			{
-				groups[groupCount].anyHitShader =
-					*shaderUuidToMShadersIdx.find(inf.m_rayTracingShaders.m_hitGroups[i].m_anyHitShader->getUuid());
+				groups[groupCount].anyHitShader = *shaderUuidToMShadersIdx.find(inf.m_rayTracingShaders.m_hitGroups[i].m_anyHitShader->getUuid());
 			}
 
 			if(inf.m_rayTracingShaders.m_hitGroups[i].m_closestHitShader)
@@ -337,16 +328,14 @@ Error ShaderProgramImpl::init(const ShaderProgramInitInfo& inf)
 
 		{
 			ANKI_TRACE_SCOPED_EVENT(VkPipelineCreate);
-			ANKI_VK_CHECK(vkCreateRayTracingPipelinesKHR(
-				getVkDevice(), VK_NULL_HANDLE, getGrManagerImpl().getPipelineCache(), 1, &ci, nullptr, &m_rt.m_ppline));
+			ANKI_VK_CHECK(vkCreateRayTracingPipelinesKHR(getVkDevice(), VK_NULL_HANDLE, getGrManagerImpl().getPipelineCache(), 1, &ci, nullptr,
+														 &m_rt.m_ppline));
 		}
 
 		// Get RT handles
-		const U32 handleArraySize =
-			getGrManagerImpl().getPhysicalDeviceRayTracingProperties().shaderGroupHandleSize * groupCount;
+		const U32 handleArraySize = getGrManagerImpl().getPhysicalDeviceRayTracingProperties().shaderGroupHandleSize * groupCount;
 		m_rt.m_allHandles.resize(handleArraySize, 0_U8);
-		ANKI_VK_CHECK(vkGetRayTracingShaderGroupHandlesKHR(getVkDevice(), m_rt.m_ppline, 0, groupCount, handleArraySize,
-														   &m_rt.m_allHandles[0]));
+		ANKI_VK_CHECK(vkGetRayTracingShaderGroupHandlesKHR(getVkDevice(), m_rt.m_ppline, 0, groupCount, handleArraySize, &m_rt.m_allHandles[0]));
 	}
 
 	return Error::kNone;

+ 2 - 4
AnKi/Gr/Vulkan/ShaderProgramImpl.h

@@ -22,8 +22,7 @@ public:
 	BitSet<kMaxColorRenderTargets, U8> m_colorAttachmentWritemask = {false};
 	BitSet<kMaxVertexAttributes, U8> m_attributeMask = {false};
 	BitSet<kMaxDescriptorSets, U8> m_descriptorSetMask = {false};
-	Array<BitSet<kMaxBindingsPerDescriptorSet, U8>, kMaxDescriptorSets> m_activeBindingMask = {
-		{{false}, {false}, {false}}};
+	Array<BitSet<kMaxBindingsPerDescriptorSet, U8>, kMaxDescriptorSets> m_activeBindingMask = {{{false}, {false}, {false}}};
 	U32 m_pushConstantsSize = 0;
 };
 
@@ -117,8 +116,7 @@ private:
 	class
 	{
 	public:
-		Array<VkPipelineShaderStageCreateInfo, U32(ShaderType::kFragment - ShaderType::kVertex) + 1>
-			m_shaderCreateInfos;
+		Array<VkPipelineShaderStageCreateInfo, U32(ShaderType::kFragment - ShaderType::kVertex) + 1> m_shaderCreateInfos;
 		U32 m_shaderCreateInfoCount = 0;
 		PipelineFactory* m_pplineFactory = nullptr;
 	} m_graphics;

+ 11 - 17
AnKi/Gr/Vulkan/SwapchainFactory.cpp

@@ -39,8 +39,8 @@ Error MicroSwapchain::initInternal()
 	VkSurfaceCapabilitiesKHR surfaceProperties;
 	U32 surfaceWidth = 0, surfaceHeight = 0;
 	{
-		ANKI_VK_CHECK(vkGetPhysicalDeviceSurfaceCapabilitiesKHR(getGrManagerImpl().getPhysicalDevice(),
-																getGrManagerImpl().getSurface(), &surfaceProperties));
+		ANKI_VK_CHECK(
+			vkGetPhysicalDeviceSurfaceCapabilitiesKHR(getGrManagerImpl().getPhysicalDevice(), getGrManagerImpl().getSurface(), &surfaceProperties));
 
 #if ANKI_WINDOWING_SYSTEM_HEADLESS
 		if(surfaceProperties.currentExtent.width != kMaxU32 || surfaceProperties.currentExtent.height != kMaxU32)
@@ -66,13 +66,11 @@ Error MicroSwapchain::initInternal()
 	VkColorSpaceKHR colorspace = VK_COLOR_SPACE_MAX_ENUM_KHR;
 	{
 		uint32_t formatCount;
-		ANKI_VK_CHECK(
-			vkGetPhysicalDeviceSurfaceFormatsKHR(pdev, getGrManagerImpl().getSurface(), &formatCount, nullptr));
+		ANKI_VK_CHECK(vkGetPhysicalDeviceSurfaceFormatsKHR(pdev, getGrManagerImpl().getSurface(), &formatCount, nullptr));
 
 		GrDynamicArray<VkSurfaceFormatKHR> formats;
 		formats.resize(formatCount);
-		ANKI_VK_CHECK(
-			vkGetPhysicalDeviceSurfaceFormatsKHR(pdev, getGrManagerImpl().getSurface(), &formatCount, &formats[0]));
+		ANKI_VK_CHECK(vkGetPhysicalDeviceSurfaceFormatsKHR(pdev, getGrManagerImpl().getSurface(), &formatCount, &formats[0]));
 
 		ANKI_VK_LOGV("Supported surface formats:");
 		Format akSurfaceFormat = Format::kNone;
@@ -82,8 +80,7 @@ Error MicroSwapchain::initInternal()
 			Format akFormat;
 			switch(formats[i].format)
 			{
-#define ANKI_FORMAT_DEF(type, id, componentCount, texelSize, blockWidth, blockHeight, blockSize, shaderType, \
-						depthStencil) \
+#define ANKI_FORMAT_DEF(type, id, componentCount, texelSize, blockWidth, blockHeight, blockSize, shaderType, depthStencil) \
 	case id: \
 		akFormat = Format::k##type; \
 		break;
@@ -96,8 +93,7 @@ Error MicroSwapchain::initInternal()
 			ANKI_VK_LOGV("\t%s", (akFormat != Format::kNone) ? getFormatInfo(akFormat).m_name : "Unknown format");
 
 			if(surfaceFormat == VK_FORMAT_UNDEFINED
-			   && (vkFormat == VK_FORMAT_R8G8B8A8_UNORM || vkFormat == VK_FORMAT_B8G8R8A8_UNORM
-				   || vkFormat == VK_FORMAT_A8B8G8R8_UNORM_PACK32))
+			   && (vkFormat == VK_FORMAT_R8G8B8A8_UNORM || vkFormat == VK_FORMAT_B8G8R8A8_UNORM || vkFormat == VK_FORMAT_A8B8G8R8_UNORM_PACK32))
 			{
 				surfaceFormat = vkFormat;
 				colorspace = formats[i].colorSpace;
@@ -124,8 +120,7 @@ Error MicroSwapchain::initInternal()
 		vkGetPhysicalDeviceSurfacePresentModesKHR(pdev, getGrManagerImpl().getSurface(), &presentModeCount, nullptr);
 		presentModeCount = min(presentModeCount, 4u);
 		Array<VkPresentModeKHR, 4> presentModes;
-		vkGetPhysicalDeviceSurfacePresentModesKHR(pdev, getGrManagerImpl().getSurface(), &presentModeCount,
-												  &presentModes[0]);
+		vkGetPhysicalDeviceSurfacePresentModesKHR(pdev, getGrManagerImpl().getSurface(), &presentModeCount, &presentModes[0]);
 
 		if(m_factory->m_vsync)
 		{
@@ -222,8 +217,8 @@ Error MicroSwapchain::initInternal()
 
 		m_textures.resize(count);
 
-		ANKI_VK_LOGI("Created a swapchain. Image count: %u, present mode: %u, size: %ux%u, vsync: %u", count,
-					 presentMode, surfaceWidth, surfaceHeight, U32(m_factory->m_vsync));
+		ANKI_VK_LOGI("Created a swapchain. Image count: %u, present mode: %u, size: %ux%u, vsync: %u", count, presentMode, surfaceWidth,
+					 surfaceHeight, U32(m_factory->m_vsync));
 
 		Array<VkImage, 64> images;
 		ANKI_ASSERT(count <= 64);
@@ -234,9 +229,8 @@ Error MicroSwapchain::initInternal()
 			init.m_width = surfaceWidth;
 			init.m_height = surfaceHeight;
 			init.m_format = Format(surfaceFormat); // anki::Format is compatible with VkFormat
-			init.m_usage = TextureUsageBit::kImageComputeWrite | TextureUsageBit::kImageTraceRaysWrite
-						   | TextureUsageBit::kFramebufferRead | TextureUsageBit::kFramebufferWrite
-						   | TextureUsageBit::kPresent;
+			init.m_usage = TextureUsageBit::kImageComputeWrite | TextureUsageBit::kImageTraceRaysWrite | TextureUsageBit::kFramebufferRead
+						   | TextureUsageBit::kFramebufferWrite | TextureUsageBit::kPresent;
 			init.m_type = TextureType::k2D;
 
 			TextureImpl* tex = newInstance<TextureImpl>(GrMemoryPool::getSingleton(), init.getName());

+ 12 - 18
AnKi/Gr/Vulkan/TextureImpl.cpp

@@ -118,8 +118,7 @@ Error TextureImpl::initInternal(VkImage externalImage, const TextureInitInfo& in
 		ANKI_ASSERT(!!(init.m_usage & TextureUsageBit::kPresent));
 	}
 
-	ANKI_ASSERT(getGrManagerImpl().getDeviceCapabilities().m_vrs
-				|| !(init.m_usage & TextureUsageBit::kFramebufferShadingRate));
+	ANKI_ASSERT(getGrManagerImpl().getDeviceCapabilities().m_vrs || !(init.m_usage & TextureUsageBit::kFramebufferShadingRate));
 
 	// Set some stuff
 	m_width = init.m_width;
@@ -188,8 +187,7 @@ Error TextureImpl::initInternal(VkImage externalImage, const TextureInitInfo& in
 		ANKI_ASSERT(m_singleSurfaceImageView.m_derivedTextureType == m_texType);
 
 		ANKI_VK_CHECKF(vkCreateImageView(getVkDevice(), &viewCi, nullptr, &m_singleSurfaceImageView.m_handle));
-		getGrManagerImpl().trySetVulkanHandleName(getName(), VK_OBJECT_TYPE_IMAGE_VIEW,
-												  ptrToNumber(m_singleSurfaceImageView.m_handle));
+		getGrManagerImpl().trySetVulkanHandleName(getName(), VK_OBJECT_TYPE_IMAGE_VIEW, ptrToNumber(m_singleSurfaceImageView.m_handle));
 	}
 
 	return Error::kNone;
@@ -210,9 +208,9 @@ Bool TextureImpl::imageSupported(const TextureInitInfo& init)
 {
 	VkImageFormatProperties props = {};
 
-	const VkResult res = vkGetPhysicalDeviceImageFormatProperties(
-		getGrManagerImpl().getPhysicalDevice(), m_vkFormat, convertTextureType(init.m_type), VK_IMAGE_TILING_OPTIMAL,
-		convertTextureUsage(init.m_usage, init.m_format), calcCreateFlags(init), &props);
+	const VkResult res = vkGetPhysicalDeviceImageFormatProperties(getGrManagerImpl().getPhysicalDevice(), m_vkFormat, convertTextureType(init.m_type),
+																  VK_IMAGE_TILING_OPTIMAL, convertTextureUsage(init.m_usage, init.m_format),
+																  calcCreateFlags(init), &props);
 
 	if(res == VK_ERROR_FORMAT_NOT_SUPPORTED)
 	{
@@ -306,8 +304,7 @@ Error TextureImpl::initImage(const TextureInitInfo& init)
 	vkGetImageMemoryRequirements2(getVkDevice(), &imageRequirementsInfo, &requirements);
 
 	U32 memIdx = getGrManagerImpl().getGpuMemoryManager().findMemoryType(requirements.memoryRequirements.memoryTypeBits,
-																		 VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT,
-																		 VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT);
+																		 VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT, VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT);
 
 	// Fallback
 	if(memIdx == kMaxU32)
@@ -321,13 +318,12 @@ Error TextureImpl::initImage(const TextureInitInfo& init)
 	// Allocate
 	if(!dedicatedRequirements.prefersDedicatedAllocation)
 	{
-		getGrManagerImpl().getGpuMemoryManager().allocateMemory(
-			memIdx, requirements.memoryRequirements.size, U32(requirements.memoryRequirements.alignment), m_memHandle);
+		getGrManagerImpl().getGpuMemoryManager().allocateMemory(memIdx, requirements.memoryRequirements.size,
+																U32(requirements.memoryRequirements.alignment), m_memHandle);
 	}
 	else
 	{
-		getGrManagerImpl().getGpuMemoryManager().allocateMemoryDedicated(memIdx, requirements.memoryRequirements.size,
-																		 m_imageHandle, m_memHandle);
+		getGrManagerImpl().getGpuMemoryManager().allocateMemoryDedicated(memIdx, requirements.memoryRequirements.size, m_imageHandle, m_memHandle);
 	}
 
 	// Bind
@@ -336,8 +332,7 @@ Error TextureImpl::initImage(const TextureInitInfo& init)
 	return Error::kNone;
 }
 
-void TextureImpl::computeBarrierInfo(TextureUsageBit usage, Bool src, U32 level, VkPipelineStageFlags& stages,
-									 VkAccessFlags& accesses) const
+void TextureImpl::computeBarrierInfo(TextureUsageBit usage, Bool src, U32 level, VkPipelineStageFlags& stages, VkAccessFlags& accesses) const
 {
 	ANKI_ASSERT(level < m_mipCount);
 	ANKI_ASSERT(usageValid(usage));
@@ -466,9 +461,8 @@ void TextureImpl::computeBarrierInfo(TextureUsageBit usage, Bool src, U32 level,
 	}
 }
 
-void TextureImpl::computeBarrierInfo(TextureUsageBit before, TextureUsageBit after, U32 level,
-									 VkPipelineStageFlags& srcStages, VkAccessFlags& srcAccesses,
-									 VkPipelineStageFlags& dstStages, VkAccessFlags& dstAccesses) const
+void TextureImpl::computeBarrierInfo(TextureUsageBit before, TextureUsageBit after, U32 level, VkPipelineStageFlags& srcStages,
+									 VkAccessFlags& srcAccesses, VkPipelineStageFlags& dstStages, VkAccessFlags& dstAccesses) const
 {
 	computeBarrierInfo(before, true, level, srcStages, srcAccesses);
 	computeBarrierInfo(after, false, level, dstStages, dstAccesses);

+ 4 - 7
AnKi/Gr/Vulkan/TextureImpl.h

@@ -150,9 +150,8 @@ public:
 	}
 
 	/// By knowing the previous and new texture usage calculate the relavant info for a ppline barrier.
-	void computeBarrierInfo(TextureUsageBit before, TextureUsageBit after, U32 level, VkPipelineStageFlags& srcStages,
-							VkAccessFlags& srcAccesses, VkPipelineStageFlags& dstStages,
-							VkAccessFlags& dstAccesses) const;
+	void computeBarrierInfo(TextureUsageBit before, TextureUsageBit after, U32 level, VkPipelineStageFlags& srcStages, VkAccessFlags& srcAccesses,
+							VkPipelineStageFlags& dstStages, VkAccessFlags& dstAccesses) const;
 
 	/// Predict the image layout.
 	VkImageLayout computeLayout(TextureUsageBit usage, U level) const;
@@ -170,8 +169,7 @@ public:
 		range.layerCount = in.m_layerCount * in.m_faceCount;
 	}
 
-	void computeVkImageViewCreateInfo(const TextureSubresourceInfo& subresource, VkImageViewCreateInfo& viewCi,
-									  TextureType& newTextureType) const
+	void computeVkImageViewCreateInfo(const TextureSubresourceInfo& subresource, VkImageViewCreateInfo& viewCi, TextureType& newTextureType) const
 	{
 		ANKI_ASSERT(isSubresourceValid(subresource));
 
@@ -209,8 +207,7 @@ private:
 
 	Error initInternal(VkImage externalImage, const TextureInitInfo& init);
 
-	void computeBarrierInfo(TextureUsageBit usage, Bool src, U32 level, VkPipelineStageFlags& stages,
-							VkAccessFlags& accesses) const;
+	void computeBarrierInfo(TextureUsageBit usage, Bool src, U32 level, VkPipelineStageFlags& stages, VkAccessFlags& accesses) const;
 };
 /// @}
 

+ 1 - 2
AnKi/Gr/Vulkan/TextureView.cpp

@@ -24,8 +24,7 @@ TextureView* TextureView::newInstance(const TextureViewInitInfo& init)
 U32 TextureView::getOrCreateBindlessTextureIndex()
 {
 	ANKI_VK_SELF(TextureViewImpl);
-	ANKI_ASSERT(self.getTextureImpl().computeLayout(TextureUsageBit::kAllSampled, 0)
-				== VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL);
+	ANKI_ASSERT(self.getTextureImpl().computeLayout(TextureUsageBit::kAllSampled, 0) == VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL);
 	return self.getOrCreateBindlessIndex();
 }
 

+ 2 - 2
AnKi/Gr/Vulkan/TimestampQueryImpl.cpp

@@ -32,8 +32,8 @@ TimestampQueryResult TimestampQueryImpl::getResultInternal(Second& timestamp) co
 
 	VkResult res;
 	U64 value;
-	ANKI_VK_CHECKF(res = vkGetQueryPoolResults(getVkDevice(), m_handle.getQueryPool(), m_handle.getQueryIndex(), 1,
-											   sizeof(value), &value, sizeof(value), VK_QUERY_RESULT_64_BIT));
+	ANKI_VK_CHECKF(res = vkGetQueryPoolResults(getVkDevice(), m_handle.getQueryPool(), m_handle.getQueryIndex(), 1, sizeof(value), &value,
+											   sizeof(value), VK_QUERY_RESULT_64_BIT));
 
 	TimestampQueryResult qout = TimestampQueryResult::kNotAvailable;
 	if(res == VK_SUCCESS)

+ 25 - 42
AnKi/Importer/GltfImporter.cpp

@@ -136,8 +136,7 @@ static Error getNodeTransform(const cgltf_node& node, Transform& trf)
 	return Error::kNone;
 }
 
-static Bool stringsExist(const ImporterHashMap<CString, ImporterString>& map,
-						 const std::initializer_list<CString>& list)
+static Bool stringsExist(const ImporterHashMap<CString, ImporterString>& map, const std::initializer_list<CString>& list)
 {
 	for(CString item : list)
 	{
@@ -275,8 +274,7 @@ Error GltfImporter::writeAll()
 			m_hive->submitTask(
 				[](void* userData, [[maybe_unused]] U32 threadId, [[maybe_unused]] ThreadHive& hive,
 				   [[maybe_unused]] ThreadHiveSemaphore* signalSemaphore) {
-					ImportRequest<MaterialImportRequest>* req =
-						static_cast<ImportRequest<MaterialImportRequest>*>(userData);
+					ImportRequest<MaterialImportRequest>* req = static_cast<ImportRequest<MaterialImportRequest>*>(userData);
 					Error err = req->m_importer->m_errorInThread.load();
 
 					if(!err)
@@ -510,15 +508,13 @@ Error GltfImporter::parseArrayOfNumbers(CString str, ImporterDynamicArray<F64>&
 
 	if(expectedArraySize && *expectedArraySize != out.getSize())
 	{
-		ANKI_IMPORTER_LOGE("Failed to parse floats. Expecting %u floats got %u: %s", *expectedArraySize, out.getSize(),
-						   str.cstr());
+		ANKI_IMPORTER_LOGE("Failed to parse floats. Expecting %u floats got %u: %s", *expectedArraySize, out.getSize(), str.cstr());
 	}
 
 	return Error::kNone;
 }
 
-Error GltfImporter::visitNode(const cgltf_node& node, const Transform& parentTrf,
-							  const ImporterHashMap<CString, ImporterString>& parentExtras)
+Error GltfImporter::visitNode(const cgltf_node& node, const Transform& parentTrf, const ImporterHashMap<CString, ImporterString>& parentExtras)
 {
 	// Check error from a thread
 	const Error threadErr = m_errorInThread.load();
@@ -578,8 +574,8 @@ Error GltfImporter::visitNode(const cgltf_node& node, const Transform& parentTrf
 				ANKI_CHECK(writeTransform(parentTrf.combineTransformations(localTrf)));
 			}
 		}
-		else if(stringsExist(extras, {"skybox_solid_color", "skybox_image", "fog_min_density", "fog_max_density",
-									  "fog_height_of_min_density", "fog_height_of_max_density"}))
+		else if(stringsExist(extras, {"skybox_solid_color", "skybox_image", "fog_min_density", "fog_max_density", "fog_height_of_min_density",
+									  "fog_height_of_max_density"}))
 		{
 			// Atmosphere
 
@@ -611,8 +607,7 @@ Error GltfImporter::visitNode(const cgltf_node& node, const Transform& parentTrf
 					solidColor[count++] = f;
 				}
 
-				ANKI_CHECK(m_sceneFile.writeTextf("comp:setSolidColor(Vec3.new(%f, %f, %f))\n", solidColor.x(),
-												  solidColor.y(), solidColor.z()));
+				ANKI_CHECK(m_sceneFile.writeTextf("comp:setSolidColor(Vec3.new(%f, %f, %f))\n", solidColor.x(), solidColor.y(), solidColor.z()));
 			}
 			else if((it = extras.find("skybox_image")) != extras.getEnd())
 			{
@@ -675,8 +670,7 @@ Error GltfImporter::visitNode(const cgltf_node& node, const Transform& parentTrf
 			ANKI_CHECK(m_sceneFile.writeTextf("\nnode = scene:newSceneNode(\"%s\")\n", getNodeName(node).cstr()));
 
 			ANKI_CHECK(m_sceneFile.writeText("comp = node:newReflectionProbeComponent()\n"));
-			ANKI_CHECK(m_sceneFile.writeTextf("comp:setBoxVolumeSize(Vec3.new(%f, %f, %f))\n", boxSize.x(), boxSize.y(),
-											  boxSize.z()));
+			ANKI_CHECK(m_sceneFile.writeTextf("comp:setBoxVolumeSize(Vec3.new(%f, %f, %f))\n", boxSize.x(), boxSize.y(), boxSize.z()));
 
 			const Transform localTrf = Transform(tsl.xyz0(), Mat3x4(Vec3(0.0f), rot), 1.0f);
 			ANKI_CHECK(writeTransform(parentTrf.combineTransformations(localTrf)));
@@ -704,8 +698,7 @@ Error GltfImporter::visitNode(const cgltf_node& node, const Transform& parentTrf
 
 			ANKI_CHECK(m_sceneFile.writeTextf("\nnode = scene:newSceneNode(\"%s\")\n", getNodeName(node).cstr()));
 			ANKI_CHECK(m_sceneFile.writeText("comp = node:newGlobalIlluminationProbeComponent()\n"));
-			ANKI_CHECK(m_sceneFile.writeTextf("comp:setBoxVolumeSize(Vec3.new(%f, %f, %f))\n", boxSize.x(), boxSize.y(),
-											  boxSize.z()));
+			ANKI_CHECK(m_sceneFile.writeTextf("comp:setBoxVolumeSize(Vec3.new(%f, %f, %f))\n", boxSize.x(), boxSize.y(), boxSize.z()));
 
 			if(fadeDistance > 0.0f)
 			{
@@ -762,14 +755,12 @@ Error GltfImporter::visitNode(const cgltf_node& node, const Transform& parentTrf
 			ANKI_CHECK(m_sceneFile.writeText("comp = node:newDecalComponent()\n"));
 			if(diffuseAtlas)
 			{
-				ANKI_CHECK(m_sceneFile.writeTextf("comp:loadDiffuseImageResource(\"%s\", %f)\n", diffuseAtlas.cstr(),
-												  diffuseFactor));
+				ANKI_CHECK(m_sceneFile.writeTextf("comp:loadDiffuseImageResource(\"%s\", %f)\n", diffuseAtlas.cstr(), diffuseFactor));
 			}
 
 			if(specularRougnessMetallicAtlas)
 			{
-				ANKI_CHECK(m_sceneFile.writeTextf("comp:loadRoughnessMetallnessTexture(\"%s\", %f)\n",
-												  specularRougnessMetallicAtlas.cstr(),
+				ANKI_CHECK(m_sceneFile.writeTextf("comp:loadRoughnessMetallnessTexture(\"%s\", %f)\n", specularRougnessMetallicAtlas.cstr(),
 												  specularRougnessMetallicFactor));
 			}
 
@@ -846,8 +837,7 @@ Error GltfImporter::visitNode(const cgltf_node& node, const Transform& parentTrf
 Error GltfImporter::writeTransform(const Transform& trf)
 {
 	ANKI_CHECK(m_sceneFile.writeText("trf = Transform.new()\n"));
-	ANKI_CHECK(m_sceneFile.writeTextf("trf:setOrigin(Vec4.new(%f, %f, %f, 0))\n", trf.getOrigin().x(),
-									  trf.getOrigin().y(), trf.getOrigin().z()));
+	ANKI_CHECK(m_sceneFile.writeTextf("trf:setOrigin(Vec4.new(%f, %f, %f, 0))\n", trf.getOrigin().x(), trf.getOrigin().y(), trf.getOrigin().z()));
 
 	ANKI_CHECK(m_sceneFile.writeText("rot = Mat3x4.new()\n"));
 	ANKI_CHECK(m_sceneFile.writeText("rot:setAll("));
@@ -891,8 +881,7 @@ Error GltfImporter::writeModel(const cgltf_mesh& mesh) const
 		}
 		else
 		{
-			ANKI_CHECK(file.writeTextf("\t\t\t<mesh subMeshIndex=\"%u\">%s%s</mesh>\n", primIdx, m_rpath.cstr(),
-									   meshFname.cstr()));
+			ANKI_CHECK(file.writeTextf("\t\t\t<mesh subMeshIndex=\"%u\">%s%s</mesh>\n", primIdx, m_rpath.cstr(), meshFname.cstr()));
 		}
 
 		ImporterHashMap<CString, ImporterString> materialExtras(m_pool);
@@ -937,8 +926,7 @@ Error GltfImporter::writeSkeleton(const cgltf_skin& skin) const
 	File file;
 	ANKI_CHECK(file.open(fname.toCString(), FileOpenFlag::kWrite));
 
-	ANKI_CHECK(
-		file.writeTextf("%s\n<skeleton>\n", XmlDocument<MemoryPoolPtrWrapper<BaseMemoryPool>>::kXmlHeader.cstr()));
+	ANKI_CHECK(file.writeTextf("%s\n<skeleton>\n", XmlDocument<MemoryPoolPtrWrapper<BaseMemoryPool>>::kXmlHeader.cstr()));
 	ANKI_CHECK(file.writeTextf("\t<bones>\n"));
 
 	for(U32 i = 0; i < skin.joints_count; ++i)
@@ -1019,8 +1007,7 @@ Error GltfImporter::writeLight(const cgltf_node& node, const ImporterHashMap<CSt
 	Vec3 color(light.color[0], light.color[1], light.color[2]);
 	color *= light.intensity;
 	color *= m_lightIntensityScale;
-	ANKI_CHECK(
-		m_sceneFile.writeTextf("lcomp:setDiffuseColor(Vec4.new(%f, %f, %f, 1))\n", color.x(), color.y(), color.z()));
+	ANKI_CHECK(m_sceneFile.writeTextf("lcomp:setDiffuseColor(Vec4.new(%f, %f, %f, 1))\n", color.x(), color.y(), color.z()));
 
 	auto shadow = extras.find("shadow");
 	if(shadow != extras.getEnd())
@@ -1037,13 +1024,11 @@ Error GltfImporter::writeLight(const cgltf_node& node, const ImporterHashMap<CSt
 
 	if(light.type == cgltf_light_type_point)
 	{
-		ANKI_CHECK(m_sceneFile.writeTextf("lcomp:setRadius(%f)\n",
-										  (light.range > 0.0f) ? light.range : computeLightRadius(color)));
+		ANKI_CHECK(m_sceneFile.writeTextf("lcomp:setRadius(%f)\n", (light.range > 0.0f) ? light.range : computeLightRadius(color)));
 	}
 	else if(light.type == cgltf_light_type_spot)
 	{
-		ANKI_CHECK(m_sceneFile.writeTextf("lcomp:setDistance(%f)\n",
-										  (light.range > 0.0f) ? light.range : computeLightRadius(color)));
+		ANKI_CHECK(m_sceneFile.writeTextf("lcomp:setDistance(%f)\n", (light.range > 0.0f) ? light.range : computeLightRadius(color)));
 
 		const F32 outer = light.spot_outer_cone_angle * 2.0f;
 		ANKI_CHECK(m_sceneFile.writeTextf("lcomp:setOuterAngle(%f)\n", outer));
@@ -1093,8 +1078,8 @@ Error GltfImporter::writeLight(const cgltf_node& node, const ImporterHashMap<CSt
 			const U32 count = 4;
 			ANKI_CHECK(parseArrayOfNumbers(lsColor->toCString(), numbers, &count));
 
-			ANKI_CHECK(m_sceneFile.writeTextf("lfcomp:setColorMultiplier(Vec4.new(%f, %f, %f, %f))\n", numbers[0],
-											  numbers[1], numbers[2], numbers[3]));
+			ANKI_CHECK(
+				m_sceneFile.writeTextf("lfcomp:setColorMultiplier(Vec4.new(%f, %f, %f, %f))\n", numbers[0], numbers[1], numbers[2], numbers[3]));
 		}
 	}
 
@@ -1109,8 +1094,8 @@ Error GltfImporter::writeLight(const cgltf_node& node, const ImporterHashMap<CSt
 			ImporterDynamicArray<F64> numbers(m_pool);
 			const U32 count = 4;
 			ANKI_CHECK(parseArrayOfNumbers(lightEventIntensity->toCString(), numbers, &count));
-			ANKI_CHECK(m_sceneFile.writeTextf("event:setIntensityMultiplier(Vec4.new(%f, %f, %f, %f))\n", numbers[0],
-											  numbers[1], numbers[2], numbers[3]));
+			ANKI_CHECK(
+				m_sceneFile.writeTextf("event:setIntensityMultiplier(Vec4.new(%f, %f, %f, %f))\n", numbers[0], numbers[1], numbers[2], numbers[3]));
 		}
 
 		if(lightEventFrequency != extras.getEnd())
@@ -1125,8 +1110,7 @@ Error GltfImporter::writeLight(const cgltf_node& node, const ImporterHashMap<CSt
 	return Error::kNone;
 }
 
-Error GltfImporter::writeCamera(const cgltf_node& node,
-								[[maybe_unused]] const ImporterHashMap<CString, ImporterString>& parentExtras)
+Error GltfImporter::writeCamera(const cgltf_node& node, [[maybe_unused]] const ImporterHashMap<CString, ImporterString>& parentExtras)
 {
 	if(node.camera->type != cgltf_camera_type_perspective)
 	{
@@ -1141,8 +1125,8 @@ Error GltfImporter::writeCamera(const cgltf_node& node,
 	ANKI_CHECK(m_sceneFile.writeText("scene:setActiveCameraNode(node)\n"));
 	ANKI_CHECK(m_sceneFile.writeText("comp = node:newCameraComponent()\n"));
 
-	ANKI_CHECK(m_sceneFile.writeTextf("comp:setPerspective(%f, %f, getMainRenderer():getAspectRatio() * %f, %f)\n",
-									  cam.znear, cam.zfar, cam.yfov, cam.yfov));
+	ANKI_CHECK(m_sceneFile.writeTextf("comp:setPerspective(%f, %f, getMainRenderer():getAspectRatio() * %f, %f)\n", cam.znear, cam.zfar, cam.yfov,
+									  cam.yfov));
 
 	return Error::kNone;
 }
@@ -1157,8 +1141,7 @@ Error GltfImporter::writeModelNode(const cgltf_node& node, const ImporterHashMap
 	const ImporterString modelFname = computeModelResourceFilename(*node.mesh);
 
 	ANKI_CHECK(m_sceneFile.writeTextf("\nnode = scene:newSceneNode(\"%s\")\n", getNodeName(node).cstr()));
-	ANKI_CHECK(m_sceneFile.writeTextf("node:newModelComponent():loadModelResource(\"%s%s\")\n", m_rpath.cstr(),
-									  modelFname.cstr()));
+	ANKI_CHECK(m_sceneFile.writeTextf("node:newModelComponent():loadModelResource(\"%s%s\")\n", m_rpath.cstr(), modelFname.cstr()));
 
 	if(node.skin)
 	{

+ 2 - 4
AnKi/Importer/GltfImporter.h

@@ -183,8 +183,7 @@ private:
 
 	// Scene
 	Error writeTransform(const Transform& trf);
-	Error visitNode(const cgltf_node& node, const Transform& parentTrf,
-					const ImporterHashMap<CString, ImporterString>& parentExtras);
+	Error visitNode(const cgltf_node& node, const Transform& parentTrf, const ImporterHashMap<CString, ImporterString>& parentExtras);
 	Error writeLight(const cgltf_node& node, const ImporterHashMap<CString, ImporterString>& parentExtras);
 	Error writeCamera(const cgltf_node& node, const ImporterHashMap<CString, ImporterString>& parentExtras);
 	Error writeModelNode(const cgltf_node& node, const ImporterHashMap<CString, ImporterString>& parentExtras);
@@ -194,8 +193,7 @@ private:
 template<typename T, typename TFunc>
 void GltfImporter::visitAccessor(const cgltf_accessor& accessor, TFunc func)
 {
-	const U8* base =
-		static_cast<const U8*>(accessor.buffer_view->buffer->data) + accessor.offset + accessor.buffer_view->offset;
+	const U8* base = static_cast<const U8*>(accessor.buffer_view->buffer->data) + accessor.offset + accessor.buffer_view->offset;
 
 	PtrSize stride = accessor.buffer_view->stride;
 	if(stride == 0)

+ 11 - 16
AnKi/Importer/GltfImporterAnimation.cpp

@@ -36,8 +36,7 @@ public:
 
 /// Optimize out same animation keys.
 template<typename T, typename TZeroFunc, typename TLerpFunc>
-static void optimizeChannel(ImporterDynamicArray<GltfAnimKey<T>>& arr, const T& identity, TZeroFunc isZeroFunc,
-							TLerpFunc lerpFunc)
+static void optimizeChannel(ImporterDynamicArray<GltfAnimKey<T>>& arr, const T& identity, TZeroFunc isZeroFunc, TLerpFunc lerpFunc)
 {
 	constexpr F32 kMinSkippedToTotalRatio = 0.1f;
 
@@ -93,8 +92,7 @@ static void optimizeChannel(ImporterDynamicArray<GltfAnimKey<T>>& arr, const T&
 		ANKI_ASSERT(newArr.getSize() <= arr.getSize());
 
 		// Check if identity
-		if(newArr.getSize() == 2 && isZeroFunc(newArr[0].m_value - newArr[1].m_value)
-		   && isZeroFunc(newArr[0].m_value - identity))
+		if(newArr.getSize() == 2 && isZeroFunc(newArr[0].m_value - newArr[1].m_value) && isZeroFunc(newArr[0].m_value - identity))
 		{
 			newArr.destroy();
 		}
@@ -246,11 +244,9 @@ Error GltfImporter::writeAnimation(const cgltf_animation& anim)
 				Vec3 scale = scales[i];
 				scale.normalize();
 
-				if(!scaleErrorReported
-				   && (absolute(scale[0] - scale[1]) > scaleEpsilon || absolute(scale[0] - scale[2]) > scaleEpsilon))
+				if(!scaleErrorReported && (absolute(scale[0] - scale[1]) > scaleEpsilon || absolute(scale[0] - scale[2]) > scaleEpsilon))
 				{
-					ANKI_IMPORTER_LOGW("Expecting uniform scale (%f %f %f)", scales[i].x(), scales[i].y(),
-									   scales[i].z());
+					ANKI_IMPORTER_LOGW("Expecting uniform scale (%f %f %f)", scales[i].x(), scales[i].y(), scales[i].z());
 					scaleErrorReported = true;
 				}
 
@@ -307,8 +303,7 @@ Error GltfImporter::writeAnimation(const cgltf_animation& anim)
 	File file;
 	ANKI_CHECK(file.open(fname.toCString(), FileOpenFlag::kWrite));
 
-	ANKI_CHECK(
-		file.writeTextf("%s\n<animation>\n", XmlDocument<MemoryPoolPtrWrapper<BaseMemoryPool>>::kXmlHeader.cstr()));
+	ANKI_CHECK(file.writeTextf("%s\n<animation>\n", XmlDocument<MemoryPoolPtrWrapper<BaseMemoryPool>>::kXmlHeader.cstr()));
 	ANKI_CHECK(file.writeText("\t<channels>\n"));
 
 	for(const GltfAnimChannel& channel : tempChannels)
@@ -321,8 +316,8 @@ Error GltfImporter::writeAnimation(const cgltf_animation& anim)
 			ANKI_CHECK(file.writeText("\t\t\t<positionKeys>\n"));
 			for(const GltfAnimKey<Vec3>& key : channel.m_positions)
 			{
-				ANKI_CHECK(file.writeTextf("\t\t\t\t<key time=\"%f\">%f %f %f</key>\n", key.m_time, key.m_value.x(),
-										   key.m_value.y(), key.m_value.z()));
+				ANKI_CHECK(
+					file.writeTextf("\t\t\t\t<key time=\"%f\">%f %f %f</key>\n", key.m_time, key.m_value.x(), key.m_value.y(), key.m_value.z()));
 			}
 			ANKI_CHECK(file.writeText("\t\t\t</positionKeys>\n"));
 		}
@@ -333,8 +328,8 @@ Error GltfImporter::writeAnimation(const cgltf_animation& anim)
 			ANKI_CHECK(file.writeText("\t\t\t<rotationKeys>\n"));
 			for(const GltfAnimKey<Quat>& key : channel.m_rotations)
 			{
-				ANKI_CHECK(file.writeTextf("\t\t\t\t<key time=\"%f\">%f %f %f %f</key>\n", key.m_time, key.m_value.x(),
-										   key.m_value.y(), key.m_value.z(), key.m_value.w()));
+				ANKI_CHECK(file.writeTextf("\t\t\t\t<key time=\"%f\">%f %f %f %f</key>\n", key.m_time, key.m_value.x(), key.m_value.y(),
+										   key.m_value.z(), key.m_value.w()));
 			}
 			ANKI_CHECK(file.writeText("\t\t\t</rotationKeys>\n"));
 		}
@@ -372,8 +367,8 @@ Error GltfImporter::writeAnimation(const cgltf_animation& anim)
 		}
 
 		ANKI_CHECK(m_sceneFile.writeTextf("\nnode = scene:tryFindSceneNode(\"%s\")\n", node.name));
-		ANKI_CHECK(m_sceneFile.writeTextf("getEventManager():newAnimationEvent(\"%s%s\", \"%s\", node)\n",
-										  m_rpath.cstr(), animFname.cstr(), node.name));
+		ANKI_CHECK(
+			m_sceneFile.writeTextf("getEventManager():newAnimationEvent(\"%s%s\", \"%s\", node)\n", m_rpath.cstr(), animFname.cstr(), node.name));
 	}
 
 	return Error::kNone;

+ 24 - 38
AnKi/Importer/GltfImporterMaterial.cpp

@@ -123,8 +123,7 @@ static Error importImage(BaseMemoryPool& pool, CString in, CString out, Bool alp
 	config.m_tempDirectory = tmp;
 
 #if ANKI_OS_WINDOWS
-	config.m_compressonatorFilename =
-		ANKI_SOURCE_DIRECTORY "/ThirdParty/Bin/Windows64/Compressonator/compressonatorcli.exe";
+	config.m_compressonatorFilename = ANKI_SOURCE_DIRECTORY "/ThirdParty/Bin/Windows64/Compressonator/compressonatorcli.exe";
 	config.m_astcencFilename = ANKI_SOURCE_DIRECTORY "/ThirdParty/Bin/Windows64/astcenc-avx2.exe";
 #elif ANKI_OS_LINUX
 	config.m_compressonatorFilename = ANKI_SOURCE_DIRECTORY "/ThirdParty/Bin/Linux64/Compressonator/compressonatorcli";
@@ -183,8 +182,7 @@ Error GltfImporter::writeMaterial(const cgltf_material& mtl, Bool writeRayTracin
 		ImporterString uri(m_pool);
 		uri.sprintf("%s%s", m_texrpath.cstr(), fname.cstr());
 
-		xml.replaceAll("%diff%",
-					   ImporterString(m_pool).sprintf("<input name=\"m_diffTex\" value=\"%s\"/>", uri.cstr()));
+		xml.replaceAll("%diff%", ImporterString(m_pool).sprintf("<input name=\"m_diffTex\" value=\"%s\"/>", uri.cstr()));
 		xml.replaceAll("%diffTexMutator%", "1");
 
 		Vec4 constantColor;
@@ -206,8 +204,8 @@ Error GltfImporter::writeMaterial(const cgltf_material& mtl, Bool writeRayTracin
 	{
 		const F32* diffCol = &mtl.pbr_metallic_roughness.base_color_factor[0];
 
-		xml.replaceAll("%diff%", ImporterString(m_pool).sprintf("<input name=\"m_diffColor\" value=\"%f %f %f\"/>",
-																diffCol[0], diffCol[1], diffCol[2]));
+		xml.replaceAll("%diff%",
+					   ImporterString(m_pool).sprintf("<input name=\"m_diffColor\" value=\"%f %f %f\"/>", diffCol[0], diffCol[1], diffCol[2]));
 
 		xml.replaceAll("%diffTexMutator%", "0");
 		xml.replaceAll("%alphaTestMutator%", "0");
@@ -241,8 +239,8 @@ Error GltfImporter::writeMaterial(const cgltf_material& mtl, Bool writeRayTracin
 			specular = Vec3(0.04f);
 		}
 
-		xml.replaceAll("%spec%", ImporterString(m_pool).sprintf("<input name=\"m_specColor\" value=\"%f %f %f\"/>",
-																specular.x(), specular.y(), specular.z()));
+		xml.replaceAll("%spec%",
+					   ImporterString(m_pool).sprintf("<input name=\"m_specColor\" value=\"%f %f %f\"/>", specular.x(), specular.y(), specular.z()));
 
 		xml.replaceAll("%specTexMutator%", "0");
 	}
@@ -264,11 +262,9 @@ Error GltfImporter::writeMaterial(const cgltf_material& mtl, Bool writeRayTracin
 	if(mtl.pbr_metallic_roughness.metallic_roughness_texture.texture && constantRoughness < 0.0f)
 	{
 		ImporterString uri(m_pool);
-		uri.sprintf("%s%s", m_texrpath.cstr(),
-					getTextureUri(mtl.pbr_metallic_roughness.metallic_roughness_texture).cstr());
+		uri.sprintf("%s%s", m_texrpath.cstr(), getTextureUri(mtl.pbr_metallic_roughness.metallic_roughness_texture).cstr());
 
-		xml.replaceAll("%roughness%",
-					   ImporterString(m_pool).sprintf("<input name=\"m_roughnessTex\" value=\"%s\"/>", uri.cstr()));
+		xml.replaceAll("%roughness%", ImporterString(m_pool).sprintf("<input name=\"m_roughnessTex\" value=\"%s\"/>", uri.cstr()));
 
 		xml.replaceAll("%roughnessTexMutator%", "1");
 
@@ -276,12 +272,10 @@ Error GltfImporter::writeMaterial(const cgltf_material& mtl, Bool writeRayTracin
 	}
 	else
 	{
-		const F32 roughness = (constantRoughness >= 0.0f)
-								  ? constantRoughness * mtl.pbr_metallic_roughness.roughness_factor
-								  : mtl.pbr_metallic_roughness.roughness_factor;
+		const F32 roughness = (constantRoughness >= 0.0f) ? constantRoughness * mtl.pbr_metallic_roughness.roughness_factor
+														  : mtl.pbr_metallic_roughness.roughness_factor;
 
-		xml.replaceAll("%roughness%",
-					   ImporterString(m_pool).sprintf("<input name=\"m_roughness\" value=\"%f\"/>", roughness));
+		xml.replaceAll("%roughness%", ImporterString(m_pool).sprintf("<input name=\"m_roughness\" value=\"%f\"/>", roughness));
 
 		xml.replaceAll("%roughnessTexMutator%", "0");
 	}
@@ -290,11 +284,9 @@ Error GltfImporter::writeMaterial(const cgltf_material& mtl, Bool writeRayTracin
 	if(mtl.pbr_metallic_roughness.metallic_roughness_texture.texture && constantMetaliness < 0.0f)
 	{
 		ImporterString uri(m_pool);
-		uri.sprintf("%s%s", m_texrpath.cstr(),
-					getTextureUri(mtl.pbr_metallic_roughness.metallic_roughness_texture).cstr());
+		uri.sprintf("%s%s", m_texrpath.cstr(), getTextureUri(mtl.pbr_metallic_roughness.metallic_roughness_texture).cstr());
 
-		xml.replaceAll("%metallic%",
-					   ImporterString(m_pool).sprintf("<input name=\"m_metallicTex\" value=\"%s\"/>", uri.cstr()));
+		xml.replaceAll("%metallic%", ImporterString(m_pool).sprintf("<input name=\"m_metallicTex\" value=\"%s\"/>", uri.cstr()));
 
 		xml.replaceAll("%metalTexMutator%", "1");
 
@@ -302,12 +294,10 @@ Error GltfImporter::writeMaterial(const cgltf_material& mtl, Bool writeRayTracin
 	}
 	else
 	{
-		const F32 metalines = (constantMetaliness >= 0.0f)
-								  ? constantMetaliness * mtl.pbr_metallic_roughness.metallic_factor
-								  : mtl.pbr_metallic_roughness.metallic_factor;
+		const F32 metalines = (constantMetaliness >= 0.0f) ? constantMetaliness * mtl.pbr_metallic_roughness.metallic_factor
+														   : mtl.pbr_metallic_roughness.metallic_factor;
 
-		xml.replaceAll("%metallic%",
-					   ImporterString(m_pool).sprintf("<input name=\"m_metallic\" value=\"%f\"/>", metalines));
+		xml.replaceAll("%metallic%", ImporterString(m_pool).sprintf("<input name=\"m_metallic\" value=\"%f\"/>", metalines));
 
 		xml.replaceAll("%metalTexMutator%", "0");
 	}
@@ -331,8 +321,7 @@ Error GltfImporter::writeMaterial(const cgltf_material& mtl, Bool writeRayTracin
 			ImporterString uri(m_pool);
 			uri.sprintf("%s%s", m_texrpath.cstr(), getTextureUri(mtl.normal_texture).cstr());
 
-			xml.replaceAll("%normal%",
-						   ImporterString(m_pool).sprintf("<input name=\"m_normalTex\" value=\"%s\"/>", uri.cstr()));
+			xml.replaceAll("%normal%", ImporterString(m_pool).sprintf("<input name=\"m_normalTex\" value=\"%s\"/>", uri.cstr()));
 
 			xml.replaceAll("%normalTexMutator%", "1");
 
@@ -363,8 +352,7 @@ Error GltfImporter::writeMaterial(const cgltf_material& mtl, Bool writeRayTracin
 		ImporterString uri(m_pool);
 		uri.sprintf("%s%s", m_texrpath.cstr(), getTextureUri(mtl.emissive_texture).cstr());
 
-		xml.replaceAll("%emission%",
-					   ImporterString(m_pool).sprintf("<input name=\"m_emissiveTex\" value=\"%s\"/>", uri.cstr()));
+		xml.replaceAll("%emission%", ImporterString(m_pool).sprintf("<input name=\"m_emissiveTex\" value=\"%s\"/>", uri.cstr()));
 
 		xml.replaceAll("%emissiveTexMutator%", "1");
 
@@ -381,8 +369,8 @@ Error GltfImporter::writeMaterial(const cgltf_material& mtl, Bool writeRayTracin
 	{
 		const F32* emissionCol = &mtl.emissive_factor[0];
 
-		xml.replaceAll("%emission%", ImporterString(m_pool).sprintf("<input name=\"m_emission\" value=\"%f %f %f\"/>",
-																	emissionCol[0], emissionCol[1], emissionCol[2]));
+		xml.replaceAll("%emission%", ImporterString(m_pool).sprintf("<input name=\"m_emission\" value=\"%f %f %f\"/>", emissionCol[0], emissionCol[1],
+																	emissionCol[2]));
 
 		xml.replaceAll("%emissiveTexMutator%", "0");
 	}
@@ -400,8 +388,7 @@ Error GltfImporter::writeMaterial(const cgltf_material& mtl, Bool writeRayTracin
 			subsurface = 0.0f;
 		}
 
-		xml.replaceAll("%subsurface%",
-					   ImporterString(m_pool).sprintf("<input name=\"m_subsurface\" value=\"%f\"/>", subsurface));
+		xml.replaceAll("%subsurface%", ImporterString(m_pool).sprintf("<input name=\"m_subsurface\" value=\"%f\"/>", subsurface));
 	}
 
 	// Height texture
@@ -411,10 +398,9 @@ Error GltfImporter::writeMaterial(const cgltf_material& mtl, Bool writeRayTracin
 		ImporterString uri(m_pool);
 		uri.sprintf("%s%s", m_texrpath.cstr(), it->cstr());
 
-		xml.replaceAll("%height%",
-					   ImporterString(m_pool).sprintf("<input name=\"m_heightTex\" value=\"%s\" \"/>\n"
-													  "\t\t<input name=\"m_heightmapScale\" value=\"0.05\"/>",
-													  uri.cstr()));
+		xml.replaceAll("%height%", ImporterString(m_pool).sprintf("<input name=\"m_heightTex\" value=\"%s\" \"/>\n"
+																  "\t\t<input name=\"m_heightmapScale\" value=\"0.05\"/>",
+																  uri.cstr()));
 
 		xml.replaceAll("%parallaxMutator%", "1");
 	}

+ 20 - 27
AnKi/Importer/GltfImporterMesh.cpp

@@ -138,8 +138,8 @@ static void reindexSubmesh(SubMesh& submesh, BaseMemoryPool* pool)
 	ImporterDynamicArray<U32> remap(pool);
 	remap.resize(submesh.m_verts.getSize(), 0);
 
-	const U32 vertCount = U32(meshopt_generateVertexRemap(&remap[0], &submesh.m_indices[0], submesh.m_indices.getSize(),
-														  &submesh.m_verts[0], submesh.m_verts.getSize(), vertSize));
+	const U32 vertCount = U32(meshopt_generateVertexRemap(&remap[0], &submesh.m_indices[0], submesh.m_indices.getSize(), &submesh.m_verts[0],
+														  submesh.m_verts.getSize(), vertSize));
 
 	ImporterDynamicArray<U32> newIdxArray(pool);
 	newIdxArray.resize(submesh.m_indices.getSize(), 0);
@@ -163,8 +163,7 @@ static void optimizeSubmesh(SubMesh& submesh, BaseMemoryPool* pool)
 		ImporterDynamicArray<U32> newIdxArray(pool);
 		newIdxArray.resize(submesh.m_indices.getSize(), 0);
 
-		meshopt_optimizeVertexCache(&newIdxArray[0], &submesh.m_indices[0], submesh.m_indices.getSize(),
-									submesh.m_verts.getSize());
+		meshopt_optimizeVertexCache(&newIdxArray[0], &submesh.m_indices[0], submesh.m_indices.getSize(), submesh.m_verts.getSize());
 
 		submesh.m_indices = std::move(newIdxArray);
 	}
@@ -174,8 +173,8 @@ static void optimizeSubmesh(SubMesh& submesh, BaseMemoryPool* pool)
 		ImporterDynamicArray<U32> newIdxArray(pool);
 		newIdxArray.resize(submesh.m_indices.getSize(), 0);
 
-		meshopt_optimizeOverdraw(&newIdxArray[0], &submesh.m_indices[0], submesh.m_indices.getSize(),
-								 &submesh.m_verts[0].m_position.x(), submesh.m_verts.getSize(), vertSize, 1.05f);
+		meshopt_optimizeOverdraw(&newIdxArray[0], &submesh.m_indices[0], submesh.m_indices.getSize(), &submesh.m_verts[0].m_position.x(),
+								 submesh.m_verts.getSize(), vertSize, 1.05f);
 
 		submesh.m_indices = std::move(newIdxArray);
 	}
@@ -185,10 +184,10 @@ static void optimizeSubmesh(SubMesh& submesh, BaseMemoryPool* pool)
 		ImporterDynamicArray<TempVertex> newVertArray(pool);
 		newVertArray.resize(submesh.m_verts.getSize());
 
-		const U32 newVertCount = U32(meshopt_optimizeVertexFetch(&newVertArray[0],
-																 &submesh.m_indices[0], // Inplace
-																 submesh.m_indices.getSize(), &submesh.m_verts[0],
-																 submesh.m_verts.getSize(), vertSize));
+		const U32 newVertCount =
+			U32(meshopt_optimizeVertexFetch(&newVertArray[0],
+											&submesh.m_indices[0], // Inplace
+											submesh.m_indices.getSize(), &submesh.m_verts[0], submesh.m_verts.getSize(), vertSize));
 
 		if(newVertCount != submesh.m_verts.getSize())
 		{
@@ -212,9 +211,8 @@ static void decimateSubmesh(F32 factor, SubMesh& submesh, BaseMemoryPool* pool)
 
 	// Decimate
 	ImporterDynamicArray<U32> newIndices(pool);
-	newIndices.resize(U32(meshopt_simplify(&newIndices[0], &submesh.m_indices[0], submesh.m_indices.getSize(),
-										   &submesh.m_verts[0].m_position.x(), submesh.m_verts.getSize(),
-										   sizeof(TempVertex), targetIndexCount, 1e-2f)));
+	newIndices.resize(U32(meshopt_simplify(&newIndices[0], &submesh.m_indices[0], submesh.m_indices.getSize(), &submesh.m_verts[0].m_position.x(),
+										   submesh.m_verts.getSize(), sizeof(TempVertex), targetIndexCount, 1e-2f)));
 
 	// Re-pack
 	ImporterDynamicArray<U32> reindexedIndices(pool);
@@ -415,8 +413,8 @@ static Bool isConvex(const ImporterList<SubMesh>& submeshes)
 	return convex;
 }
 
-static void writeVertexAttribAndBufferInfoToHeader(VertexStreamId stream, MeshBinaryHeader& header,
-												   const Vec4& scale = Vec4(1.0f), const Vec4& translation = Vec4(0.0f))
+static void writeVertexAttribAndBufferInfoToHeader(VertexStreamId stream, MeshBinaryHeader& header, const Vec4& scale = Vec4(1.0f),
+												   const Vec4& translation = Vec4(0.0f))
 {
 	MeshBinaryVertexAttribute& attrib = header.m_vertexAttributes[stream];
 	attrib.m_bufferIndex = U32(stream);
@@ -433,8 +431,7 @@ U32 GltfImporter::getMeshTotalVertexCount(const cgltf_mesh& mesh)
 {
 	U32 totalVertexCount = 0;
 
-	for(const cgltf_primitive* primitive = mesh.primitives; primitive < mesh.primitives + mesh.primitives_count;
-		++primitive)
+	for(const cgltf_primitive* primitive = mesh.primitives; primitive < mesh.primitives + mesh.primitives_count; ++primitive)
 	{
 		totalVertexCount += U32(primitive->attributes[0].data->count);
 	}
@@ -455,8 +452,7 @@ Error GltfImporter::writeMesh(const cgltf_mesh& mesh) const
 	Bool hasBoneWeights = false;
 
 	// Iterate primitives. Every primitive is a submesh
-	for(const cgltf_primitive* primitive = mesh.primitives; primitive < mesh.primitives + mesh.primitives_count;
-		++primitive)
+	for(const cgltf_primitive* primitive = mesh.primitives; primitive < mesh.primitives + mesh.primitives_count; ++primitive)
 	{
 		if(primitive->type != cgltf_primitive_type_triangles)
 		{
@@ -468,8 +464,7 @@ Error GltfImporter::writeMesh(const cgltf_mesh& mesh) const
 
 		U minVertCount = kMaxU;
 		U maxVertCount = kMinU;
-		for(const cgltf_attribute* attrib = primitive->attributes;
-			attrib < primitive->attributes + primitive->attributes_count; ++attrib)
+		for(const cgltf_attribute* attrib = primitive->attributes; attrib < primitive->attributes + primitive->attributes_count; ++attrib)
 		{
 			minVertCount = min(minVertCount, U(attrib->data->count));
 			maxVertCount = max(maxVertCount, U(attrib->data->count));
@@ -487,8 +482,7 @@ Error GltfImporter::writeMesh(const cgltf_mesh& mesh) const
 		//
 		// Gather positions + normals + UVs
 		//
-		for(const cgltf_attribute* attrib = primitive->attributes;
-			attrib < primitive->attributes + primitive->attributes_count; ++attrib)
+		for(const cgltf_attribute* attrib = primitive->attributes; attrib < primitive->attributes + primitive->attributes_count; ++attrib)
 		{
 			if(attrib->type == cgltf_attribute_type_position)
 			{
@@ -568,8 +562,8 @@ Error GltfImporter::writeMesh(const cgltf_mesh& mesh) const
 				return Error::kUserData;
 			}
 			submesh.m_indices.resize(U32(primitive->indices->count));
-			const U8* base = static_cast<const U8*>(primitive->indices->buffer_view->buffer->data)
-							 + primitive->indices->offset + primitive->indices->buffer_view->offset;
+			const U8* base = static_cast<const U8*>(primitive->indices->buffer_view->buffer->data) + primitive->indices->offset
+							 + primitive->indices->buffer_view->offset;
 			for(U32 i = 0; i < primitive->indices->count; ++i)
 			{
 				U32 idx;
@@ -667,8 +661,7 @@ Error GltfImporter::writeMesh(const cgltf_mesh& mesh) const
 	posScale = (posScale < 1.0f) ? 1.0f : (1.0f / posScale);
 	const Vec3 posTranslation = -aabbMin;
 
-	writeVertexAttribAndBufferInfoToHeader(VertexStreamId::kPosition, header, Vec4(1.0f / posScale),
-										   (-posTranslation).xyz1());
+	writeVertexAttribAndBufferInfoToHeader(VertexStreamId::kPosition, header, Vec4(1.0f / posScale), (-posTranslation).xyz1());
 	writeVertexAttribAndBufferInfoToHeader(VertexStreamId::kNormal, header);
 	writeVertexAttribAndBufferInfoToHeader(VertexStreamId::kTangent, header);
 	writeVertexAttribAndBufferInfoToHeader(VertexStreamId::kUv, header);

+ 61 - 105
AnKi/Importer/ImageImporter.cpp

@@ -158,33 +158,27 @@ static Error checkConfig(const ImageImporterConfig& config)
 
 	// Type
 	ANKI_CFG_ASSERT(config.m_type != ImageBinaryType::kNone, "Wrong image type");
-	ANKI_CFG_ASSERT(config.m_inputFilenames.getSize() == 1 || config.m_type != ImageBinaryType::k2D,
-					"2D images require only one input image");
+	ANKI_CFG_ASSERT(config.m_inputFilenames.getSize() == 1 || config.m_type != ImageBinaryType::k2D, "2D images require only one input image");
 	ANKI_CFG_ASSERT(config.m_inputFilenames.getSize() != 1 || config.m_type != ImageBinaryType::k2DArray,
 					"2D array images require more than one input image");
-	ANKI_CFG_ASSERT(config.m_inputFilenames.getSize() != 1 || config.m_type != ImageBinaryType::k3D,
-					"3D images require more than one input image");
-	ANKI_CFG_ASSERT(config.m_inputFilenames.getSize() != 6 || config.m_type != ImageBinaryType::kCube,
-					"Cube images require 6 input images");
+	ANKI_CFG_ASSERT(config.m_inputFilenames.getSize() != 1 || config.m_type != ImageBinaryType::k3D, "3D images require more than one input image");
+	ANKI_CFG_ASSERT(config.m_inputFilenames.getSize() != 6 || config.m_type != ImageBinaryType::kCube, "Cube images require 6 input images");
 
 	// Compressions
 	ANKI_CFG_ASSERT(config.m_compressions != ImageBinaryDataCompression::kNone, "Missing output compressions");
-	ANKI_CFG_ASSERT(config.m_compressions == ImageBinaryDataCompression::kRaw || config.m_type != ImageBinaryType::k3D,
-					"Can't compress 3D textures");
+	ANKI_CFG_ASSERT(config.m_compressions == ImageBinaryDataCompression::kRaw || config.m_type != ImageBinaryType::k3D, "Can't compress 3D textures");
 
 	// ASTC
 	if(!!(config.m_compressions & ImageBinaryDataCompression::kAstc))
 	{
-		ANKI_CFG_ASSERT(config.m_astcBlockSize == UVec2(4u) || config.m_astcBlockSize == UVec2(8u),
-						"Incorrect ASTC block sizes");
+		ANKI_CFG_ASSERT(config.m_astcBlockSize == UVec2(4u) || config.m_astcBlockSize == UVec2(8u), "Incorrect ASTC block sizes");
 	}
 
 	// Mip size
 	ANKI_CFG_ASSERT(config.m_minMipmapDimension >= 4, "Mimpap min dimension can be less than 4");
 
 	// Color conversions
-	ANKI_CFG_ASSERT(!(config.m_linearToSRgb && config.m_sRgbToLinear),
-					"Can't have a conversion to sRGB and to linear at the same time");
+	ANKI_CFG_ASSERT(!(config.m_linearToSRgb && config.m_sRgbToLinear), "Can't have a conversion to sRGB and to linear at the same time");
 
 #undef ANKI_CFG_ASSERT
 	return Error::kNone;
@@ -210,8 +204,7 @@ static Error identifyImage(CString filename, U32& width, U32& height, U32& chann
 	return Error::kNone;
 }
 
-static Error checkInputImages(const ImageImporterConfig& config, U32& width, U32& height, U32& channelCount,
-							  Bool& isHdr)
+static Error checkInputImages(const ImageImporterConfig& config, U32& width, U32& height, U32& channelCount, Bool& isHdr)
 {
 	width = 0;
 	height = 0;
@@ -233,8 +226,7 @@ static Error checkInputImages(const ImageImporterConfig& config, U32& width, U32
 		}
 		else if(width != nwidth || height != nheight || channelCount != nchannelCount || isHdr != nhdr)
 		{
-			ANKI_IMPORTER_LOGE("Input image doesn't match previous input images: %s",
-							   config.m_inputFilenames[i].cstr());
+			ANKI_IMPORTER_LOGE("Input image doesn't match previous input images: %s", config.m_inputFilenames[i].cstr());
 			return Error::kUserData;
 		}
 	}
@@ -249,8 +241,8 @@ static Error checkInputImages(const ImageImporterConfig& config, U32& width, U32
 	return Error::kNone;
 }
 
-static Error resizeImage(CString inImageFilename, U32 outWidth, U32 outHeight, CString tempDirectory,
-						 BaseMemoryPool& pool, ImporterString& tmpFilename)
+static Error resizeImage(CString inImageFilename, U32 outWidth, U32 outHeight, CString tempDirectory, BaseMemoryPool& pool,
+						 ImporterString& tmpFilename)
 {
 	U32 inWidth, inHeight, channelCount;
 	Bool hdr;
@@ -281,14 +273,13 @@ static Error resizeImage(CString inImageFilename, U32 outWidth, U32 outHeight, C
 	if(!hdr)
 	{
 		outPixels.resize(outWidth * outHeight * channelCount);
-		ok = stbir_resize_uint8(static_cast<const U8*>(inPixels), inWidth, inHeight, 0, outPixels.getBegin(), outWidth,
-								outHeight, 0, channelCount);
+		ok = stbir_resize_uint8(static_cast<const U8*>(inPixels), inWidth, inHeight, 0, outPixels.getBegin(), outWidth, outHeight, 0, channelCount);
 	}
 	else
 	{
 		outPixels.resize(outWidth * outHeight * channelCount * sizeof(F32));
-		ok = stbir_resize_float(static_cast<const F32*>(inPixels), inWidth, inHeight, 0,
-								reinterpret_cast<F32*>(outPixels.getBegin()), outWidth, outHeight, 0, channelCount);
+		ok = stbir_resize_float(static_cast<const F32*>(inPixels), inWidth, inHeight, 0, reinterpret_cast<F32*>(outPixels.getBegin()), outWidth,
+								outHeight, 0, channelCount);
 	}
 
 	stbi_image_free(inPixels);
@@ -300,8 +291,7 @@ static Error resizeImage(CString inImageFilename, U32 outWidth, U32 outHeight, C
 	}
 
 	// Store
-	tmpFilename.sprintf("%s/AnKiImageImporter_%u.%s", tempDirectory.cstr(), g_tempFileIndex.fetchAdd(1),
-						(hdr) ? "exr" : "png");
+	tmpFilename.sprintf("%s/AnKiImageImporter_%u.%s", tempDirectory.cstr(), g_tempFileIndex.fetchAdd(1), (hdr) ? "exr" : "png");
 	ANKI_IMPORTER_LOGV("Will store: %s", tmpFilename.cstr());
 
 	if(!hdr)
@@ -310,8 +300,7 @@ static Error resizeImage(CString inImageFilename, U32 outWidth, U32 outHeight, C
 	}
 	else
 	{
-		const I ret = SaveEXR(reinterpret_cast<const F32*>(outPixels.getBegin()), outWidth, outHeight, channelCount, 0,
-							  tmpFilename.cstr(), nullptr);
+		const I ret = SaveEXR(reinterpret_cast<const F32*>(outPixels.getBegin()), outWidth, outHeight, channelCount, 0, tmpFilename.cstr(), nullptr);
 		ok = ret >= 0;
 	}
 
@@ -419,8 +408,7 @@ static Error loadFirstMipmap(const ImageImporterConfig& config, ImageImporterCon
 		{
 			for(U32 l = 0; l < ctx.m_layerCount; ++l)
 			{
-				mip0.m_surfacesOrVolume[l * ctx.m_faceCount + f].m_pixels.resize(ctx.m_pixelSize * ctx.m_width
-																				 * ctx.m_height);
+				mip0.m_surfacesOrVolume[l * ctx.m_faceCount + f].m_pixels.resize(ctx.m_pixelSize * ctx.m_width * ctx.m_height);
 			}
 		}
 	}
@@ -456,13 +444,11 @@ static Error loadFirstMipmap(const ImageImporterConfig& config, ImageImporterCon
 			{
 				if(!ctx.m_hdr)
 				{
-					linearToSRgbBatch(WeakArray<U8Vec3>(static_cast<U8Vec3*>(data), ctx.m_width * ctx.m_height),
-									  linearToSRgb);
+					linearToSRgbBatch(WeakArray<U8Vec3>(static_cast<U8Vec3*>(data), ctx.m_width * ctx.m_height), linearToSRgb);
 				}
 				else
 				{
-					linearToSRgbBatch(WeakArray<Vec3>(static_cast<Vec3*>(data), ctx.m_width * ctx.m_height),
-									  linearToSRgb);
+					linearToSRgbBatch(WeakArray<Vec3>(static_cast<Vec3*>(data), ctx.m_width * ctx.m_height), linearToSRgb);
 				}
 			}
 			else
@@ -470,13 +456,11 @@ static Error loadFirstMipmap(const ImageImporterConfig& config, ImageImporterCon
 				ANKI_ASSERT(ctx.m_channelCount == 4);
 				if(!ctx.m_hdr)
 				{
-					linearToSRgbBatch(WeakArray<U8Vec4>(static_cast<U8Vec4*>(data), ctx.m_width * ctx.m_height),
-									  linearToSRgb);
+					linearToSRgbBatch(WeakArray<U8Vec4>(static_cast<U8Vec4*>(data), ctx.m_width * ctx.m_height), linearToSRgb);
 				}
 				else
 				{
-					linearToSRgbBatch(WeakArray<Vec4>(static_cast<Vec4*>(data), ctx.m_width * ctx.m_height),
-									  linearToSRgb);
+					linearToSRgbBatch(WeakArray<Vec4>(static_cast<Vec4*>(data), ctx.m_width * ctx.m_height), linearToSRgb);
 				}
 			}
 		}
@@ -488,13 +472,11 @@ static Error loadFirstMipmap(const ImageImporterConfig& config, ImageImporterCon
 			{
 				if(!ctx.m_hdr)
 				{
-					linearToSRgbBatch(WeakArray<U8Vec3>(static_cast<U8Vec3*>(data), ctx.m_width * ctx.m_height),
-									  sRgbToLinear);
+					linearToSRgbBatch(WeakArray<U8Vec3>(static_cast<U8Vec3*>(data), ctx.m_width * ctx.m_height), sRgbToLinear);
 				}
 				else
 				{
-					linearToSRgbBatch(WeakArray<Vec3>(static_cast<Vec3*>(data), ctx.m_width * ctx.m_height),
-									  sRgbToLinear);
+					linearToSRgbBatch(WeakArray<Vec3>(static_cast<Vec3*>(data), ctx.m_width * ctx.m_height), sRgbToLinear);
 				}
 			}
 			else
@@ -502,13 +484,11 @@ static Error loadFirstMipmap(const ImageImporterConfig& config, ImageImporterCon
 				ANKI_ASSERT(ctx.m_channelCount == 4);
 				if(!ctx.m_hdr)
 				{
-					linearToSRgbBatch(WeakArray<U8Vec4>(static_cast<U8Vec4*>(data), ctx.m_width * ctx.m_height),
-									  sRgbToLinear);
+					linearToSRgbBatch(WeakArray<U8Vec4>(static_cast<U8Vec4*>(data), ctx.m_width * ctx.m_height), sRgbToLinear);
 				}
 				else
 				{
-					linearToSRgbBatch(WeakArray<Vec4>(static_cast<Vec4*>(data), ctx.m_width * ctx.m_height),
-									  sRgbToLinear);
+					linearToSRgbBatch(WeakArray<Vec4>(static_cast<Vec4*>(data), ctx.m_width * ctx.m_height), sRgbToLinear);
 				}
 			}
 		}
@@ -516,8 +496,7 @@ static Error loadFirstMipmap(const ImageImporterConfig& config, ImageImporterCon
 		if(ctx.m_hdr && (config.m_hdrScale != Vec3(1.0f) || config.m_hdrBias != Vec3(0.0f)))
 		{
 			ANKI_IMPORTER_LOGV("Will apply scale and/or bias to the image");
-			applyScaleAndBias(WeakArray(static_cast<Vec3*>(data), ctx.m_width * ctx.m_height), config.m_hdrScale,
-							  config.m_hdrBias);
+			applyScaleAndBias(WeakArray(static_cast<Vec3*>(data), ctx.m_width * ctx.m_height), config.m_hdrScale, config.m_hdrBias);
 		}
 
 		if(ctx.m_depth > 1)
@@ -536,16 +515,14 @@ static Error loadFirstMipmap(const ImageImporterConfig& config, ImageImporterCon
 }
 
 template<typename TStorageVec>
-static void generateSurfaceMipmap(ConstWeakArray<U8, PtrSize> inBuffer, U32 inWidth, U32 inHeight,
-								  WeakArray<U8, PtrSize> outBuffer)
+static void generateSurfaceMipmap(ConstWeakArray<U8, PtrSize> inBuffer, U32 inWidth, U32 inHeight, WeakArray<U8, PtrSize> outBuffer)
 {
 	constexpr U32 channelCount = TStorageVec::getSize();
 	using FVecType = typename std::conditional_t<channelCount == 3, Vec3, Vec4>;
 
 	const ConstWeakArray<TStorageVec, PtrSize> inPixels(reinterpret_cast<const TStorageVec*>(&inBuffer[0]),
 														inBuffer.getSizeInBytes() / sizeof(TStorageVec));
-	WeakArray<TStorageVec, PtrSize> outPixels(reinterpret_cast<TStorageVec*>(&outBuffer[0]),
-											  outBuffer.getSizeInBytes() / sizeof(TStorageVec));
+	WeakArray<TStorageVec, PtrSize> outPixels(reinterpret_cast<TStorageVec*>(&outBuffer[0]), outBuffer.getSizeInBytes() / sizeof(TStorageVec));
 
 	const U32 outWidth = inWidth >> 1;
 	const U32 outHeight = inHeight >> 1;
@@ -582,20 +559,16 @@ static void generateSurfaceMipmap(ConstWeakArray<U8, PtrSize> inBuffer, U32 inWi
 	}
 }
 
-static Error compressS3tc(BaseMemoryPool& pool, CString tempDirectory, CString compressonatorFilename,
-						  ConstWeakArray<U8, PtrSize> inPixels, U32 inWidth, U32 inHeight, U32 channelCount, Bool hdr,
-						  WeakArray<U8, PtrSize> outPixels)
+static Error compressS3tc(BaseMemoryPool& pool, CString tempDirectory, CString compressonatorFilename, ConstWeakArray<U8, PtrSize> inPixels,
+						  U32 inWidth, U32 inHeight, U32 channelCount, Bool hdr, WeakArray<U8, PtrSize> outPixels)
 {
-	ANKI_ASSERT(inPixels.getSizeInBytes()
-				== PtrSize(inWidth) * inHeight * channelCount * ((hdr) ? sizeof(F32) : sizeof(U8)));
+	ANKI_ASSERT(inPixels.getSizeInBytes() == PtrSize(inWidth) * inHeight * channelCount * ((hdr) ? sizeof(F32) : sizeof(U8)));
 	ANKI_ASSERT(inWidth > 0 && isPowerOfTwo(inWidth) && inHeight > 0 && isPowerOfTwo(inHeight));
-	ANKI_ASSERT(outPixels.getSizeInBytes()
-				== PtrSize((hdr || channelCount == 4) ? 16 : 8) * (inWidth / 4) * (inHeight / 4));
+	ANKI_ASSERT(outPixels.getSizeInBytes() == PtrSize((hdr || channelCount == 4) ? 16 : 8) * (inWidth / 4) * (inHeight / 4));
 
 	// Create a PNG image to feed to the compressor
 	ImporterString tmpFilename(&pool);
-	tmpFilename.sprintf("%s/AnKiImageImporter_%u.%s", tempDirectory.cstr(), g_tempFileIndex.fetchAdd(1),
-						(hdr) ? "exr" : "png");
+	tmpFilename.sprintf("%s/AnKiImageImporter_%u.%s", tempDirectory.cstr(), g_tempFileIndex.fetchAdd(1), (hdr) ? "exr" : "png");
 	ANKI_IMPORTER_LOGV("Will store: %s", tmpFilename.cstr());
 	Bool saveTmpImageOk = false;
 	if(!hdr)
@@ -605,8 +578,7 @@ static Error compressS3tc(BaseMemoryPool& pool, CString tempDirectory, CString c
 	}
 	else
 	{
-		const I ret = SaveEXR(reinterpret_cast<const F32*>(inPixels.getBegin()), inWidth, inHeight, channelCount, 0,
-							  tmpFilename.cstr(), nullptr);
+		const I ret = SaveEXR(reinterpret_cast<const F32*>(inPixels.getBegin()), inWidth, inHeight, channelCount, 0, tmpFilename.cstr(), nullptr);
 		saveTmpImageOk = ret >= 0;
 	}
 
@@ -629,8 +601,8 @@ static Error compressS3tc(BaseMemoryPool& pool, CString tempDirectory, CString c
 	args[argCount++] = tmpFilename;
 	args[argCount++] = ddsFilename;
 
-	ANKI_IMPORTER_LOGV("Will invoke process: %s %s %s %s %s %s", compressonatorFilename.cstr(), args[0].cstr(),
-					   args[1].cstr(), args[2].cstr(), args[3].cstr(), args[4].cstr());
+	ANKI_IMPORTER_LOGV("Will invoke process: %s %s %s %s %s %s", compressonatorFilename.cstr(), args[0].cstr(), args[1].cstr(), args[2].cstr(),
+					   args[3].cstr(), args[4].cstr());
 	ANKI_CHECK(proc.start(compressonatorFilename, args));
 	CleanupFile ddsCleanup(ddsFilename);
 	ProcessStatus status;
@@ -695,20 +667,17 @@ static Error compressS3tc(BaseMemoryPool& pool, CString tempDirectory, CString c
 	return Error::kNone;
 }
 
-static Error compressAstc(BaseMemoryPool& pool, CString tempDirectory, CString astcencFilename,
-						  ConstWeakArray<U8, PtrSize> inPixels, U32 inWidth, U32 inHeight, U32 inChannelCount,
-						  UVec2 blockSize, Bool hdr, WeakArray<U8, PtrSize> outPixels)
+static Error compressAstc(BaseMemoryPool& pool, CString tempDirectory, CString astcencFilename, ConstWeakArray<U8, PtrSize> inPixels, U32 inWidth,
+						  U32 inHeight, U32 inChannelCount, UVec2 blockSize, Bool hdr, WeakArray<U8, PtrSize> outPixels)
 {
 	[[maybe_unused]] const PtrSize blockBytes = 16;
-	ANKI_ASSERT(inPixels.getSizeInBytes()
-				== PtrSize(inWidth) * inHeight * inChannelCount * ((hdr) ? sizeof(F32) : sizeof(U8)));
+	ANKI_ASSERT(inPixels.getSizeInBytes() == PtrSize(inWidth) * inHeight * inChannelCount * ((hdr) ? sizeof(F32) : sizeof(U8)));
 	ANKI_ASSERT(inWidth > 0 && isPowerOfTwo(inWidth) && inHeight > 0 && isPowerOfTwo(inHeight));
 	ANKI_ASSERT(outPixels.getSizeInBytes() == blockBytes * (inWidth / blockSize.x()) * (inHeight / blockSize.y()));
 
 	// Create a BMP image to feed to the astcebc
 	ImporterString tmpFilename(&pool);
-	tmpFilename.sprintf("%s/AnKiImageImporter_%u.%s", tempDirectory.cstr(), g_tempFileIndex.fetchAdd(1),
-						(hdr) ? "exr" : "png");
+	tmpFilename.sprintf("%s/AnKiImageImporter_%u.%s", tempDirectory.cstr(), g_tempFileIndex.fetchAdd(1), (hdr) ? "exr" : "png");
 	ANKI_IMPORTER_LOGV("Will store: %s", tmpFilename.cstr());
 	Bool saveTmpImageOk = false;
 	if(!hdr)
@@ -718,8 +687,7 @@ static Error compressAstc(BaseMemoryPool& pool, CString tempDirectory, CString a
 	}
 	else
 	{
-		const I ret = SaveEXR(reinterpret_cast<const F32*>(inPixels.getBegin()), inWidth, inHeight, inChannelCount, 0,
-							  tmpFilename.cstr(), nullptr);
+		const I ret = SaveEXR(reinterpret_cast<const F32*>(inPixels.getBegin()), inWidth, inHeight, inChannelCount, 0, tmpFilename.cstr(), nullptr);
 		saveTmpImageOk = ret >= 0;
 	}
 
@@ -744,8 +712,8 @@ static Error compressAstc(BaseMemoryPool& pool, CString tempDirectory, CString a
 	args[argCount++] = blockStr;
 	args[argCount++] = "-fast";
 
-	ANKI_IMPORTER_LOGV("Will invoke process: %s %s %s %s %s %s", astcencFilename.cstr(), args[0].cstr(), args[1].cstr(),
-					   args[2].cstr(), args[3].cstr(), args[4].cstr());
+	ANKI_IMPORTER_LOGV("Will invoke process: %s %s %s %s %s %s", astcencFilename.cstr(), args[0].cstr(), args[1].cstr(), args[2].cstr(),
+					   args[3].cstr(), args[4].cstr());
 	ANKI_CHECK(proc.start(astcencFilename, args));
 
 	CleanupFile astcCleanup(astcFilename);
@@ -826,13 +794,11 @@ static Error storeAnkiImage(const ImageImporterConfig& config, const ImageImport
 	header.m_type = config.m_type;
 	if(ctx.m_hdr)
 	{
-		header.m_colorFormat =
-			(ctx.m_channelCount == 3) ? ImageBinaryColorFormat::kRgbFloat : ImageBinaryColorFormat::kRgbaFloat;
+		header.m_colorFormat = (ctx.m_channelCount == 3) ? ImageBinaryColorFormat::kRgbFloat : ImageBinaryColorFormat::kRgbaFloat;
 	}
 	else
 	{
-		header.m_colorFormat =
-			(ctx.m_channelCount == 3) ? ImageBinaryColorFormat::kRgb8 : ImageBinaryColorFormat::kRgba8;
+		header.m_colorFormat = (ctx.m_channelCount == 3) ? ImageBinaryColorFormat::kRgb8 : ImageBinaryColorFormat::kRgba8;
 	}
 	header.m_compressionMask = config.m_compressions;
 	header.m_isNormal = false;
@@ -931,16 +897,14 @@ static Error importImageInternal(const ImageImporterConfig& configOriginal)
 		const U32 newWidth = max(width, config.m_minMipmapDimension);
 		const U32 newHeight = max(height, config.m_minMipmapDimension);
 
-		ANKI_IMPORTER_LOGV("Image is smaller than the min mipmap dimension. Will resize it to %ux%u", newWidth,
-						   newHeight);
+		ANKI_IMPORTER_LOGV("Image is smaller than the min mipmap dimension. Will resize it to %ux%u", newWidth, newHeight);
 
 		newFilenames.resize(config.m_inputFilenames.getSize(), ImporterString(&pool));
 		newFilenamesCString.resize(config.m_inputFilenames.getSize());
 
 		for(U32 i = 0; i < config.m_inputFilenames.getSize(); ++i)
 		{
-			ANKI_CHECK(resizeImage(config.m_inputFilenames[i], newWidth, newHeight, config.m_tempDirectory, pool,
-								   newFilenames[i]));
+			ANKI_CHECK(resizeImage(config.m_inputFilenames[i], newWidth, newHeight, config.m_tempDirectory, pool, newFilenames[i]));
 
 			newFilenamesCString[i] = newFilenames[i];
 			resizedImagesCleanup.emplaceBack(newFilenames[i]);
@@ -1006,10 +970,9 @@ static Error importImageInternal(const ImageImporterConfig& configOriginal)
 	ANKI_CHECK(loadFirstMipmap(config, ctx));
 
 	// Generate mipmaps
-	const U32 mipCount =
-		min(config.m_mipmapCount, (config.m_type == ImageBinaryType::k3D)
-									  ? computeMaxMipmapCount3d(width, height, ctx.m_depth, config.m_minMipmapDimension)
-									  : computeMaxMipmapCount2d(width, height, config.m_minMipmapDimension));
+	const U32 mipCount = min(config.m_mipmapCount, (config.m_type == ImageBinaryType::k3D)
+													   ? computeMaxMipmapCount3d(width, height, ctx.m_depth, config.m_minMipmapDimension)
+													   : computeMaxMipmapCount2d(width, height, config.m_minMipmapDimension));
 	for(U32 mip = 1; mip < mipCount; ++mip)
 	{
 		ctx.m_mipmaps.emplaceBack(&pool);
@@ -1030,15 +993,13 @@ static Error importImageInternal(const ImageImporterConfig& configOriginal)
 					{
 						if(ctx.m_hdr)
 						{
-							generateSurfaceMipmap<Vec3>(ConstWeakArray<U8, PtrSize>(inSurface.m_pixels),
-														ctx.m_width >> (mip - 1), ctx.m_height >> (mip - 1),
-														WeakArray<U8, PtrSize>(outSurface.m_pixels));
+							generateSurfaceMipmap<Vec3>(ConstWeakArray<U8, PtrSize>(inSurface.m_pixels), ctx.m_width >> (mip - 1),
+														ctx.m_height >> (mip - 1), WeakArray<U8, PtrSize>(outSurface.m_pixels));
 						}
 						else
 						{
-							generateSurfaceMipmap<U8Vec3>(ConstWeakArray<U8, PtrSize>(inSurface.m_pixels),
-														  ctx.m_width >> (mip - 1), ctx.m_height >> (mip - 1),
-														  WeakArray<U8, PtrSize>(outSurface.m_pixels));
+							generateSurfaceMipmap<U8Vec3>(ConstWeakArray<U8, PtrSize>(inSurface.m_pixels), ctx.m_width >> (mip - 1),
+														  ctx.m_height >> (mip - 1), WeakArray<U8, PtrSize>(outSurface.m_pixels));
 						}
 					}
 					else
@@ -1047,15 +1008,13 @@ static Error importImageInternal(const ImageImporterConfig& configOriginal)
 
 						if(ctx.m_hdr)
 						{
-							generateSurfaceMipmap<Vec4>(ConstWeakArray<U8, PtrSize>(inSurface.m_pixels),
-														ctx.m_width >> (mip - 1), ctx.m_height >> (mip - 1),
-														WeakArray<U8, PtrSize>(outSurface.m_pixels));
+							generateSurfaceMipmap<Vec4>(ConstWeakArray<U8, PtrSize>(inSurface.m_pixels), ctx.m_width >> (mip - 1),
+														ctx.m_height >> (mip - 1), WeakArray<U8, PtrSize>(outSurface.m_pixels));
 						}
 						else
 						{
-							generateSurfaceMipmap<U8Vec4>(ConstWeakArray<U8, PtrSize>(inSurface.m_pixels),
-														  ctx.m_width >> (mip - 1), ctx.m_height >> (mip - 1),
-														  WeakArray<U8, PtrSize>(outSurface.m_pixels));
+							generateSurfaceMipmap<U8Vec4>(ConstWeakArray<U8, PtrSize>(inSurface.m_pixels), ctx.m_width >> (mip - 1),
+														  ctx.m_height >> (mip - 1), WeakArray<U8, PtrSize>(outSurface.m_pixels));
 						}
 					}
 				}
@@ -1089,8 +1048,7 @@ static Error importImageInternal(const ImageImporterConfig& configOriginal)
 					surface.m_s3tcPixels.resize(s3tcImageSize);
 
 					ANKI_CHECK(compressS3tc(pool, config.m_tempDirectory, config.m_compressonatorFilename,
-											ConstWeakArray<U8, PtrSize>(surface.m_pixels), width, height,
-											ctx.m_channelCount, ctx.m_hdr,
+											ConstWeakArray<U8, PtrSize>(surface.m_pixels), width, height, ctx.m_channelCount, ctx.m_hdr,
 											WeakArray<U8, PtrSize>(surface.m_s3tcPixels)));
 				}
 			}
@@ -1113,14 +1071,12 @@ static Error importImageInternal(const ImageImporterConfig& configOriginal)
 					const U32 width = ctx.m_width >> mip;
 					const U32 height = ctx.m_height >> mip;
 					const PtrSize blockSize = 16;
-					const PtrSize astcImageSize =
-						blockSize * (width / config.m_astcBlockSize.x()) * (height / config.m_astcBlockSize.y());
+					const PtrSize astcImageSize = blockSize * (width / config.m_astcBlockSize.x()) * (height / config.m_astcBlockSize.y());
 
 					surface.m_astcPixels.resize(astcImageSize);
 
-					ANKI_CHECK(compressAstc(pool, config.m_tempDirectory, config.m_astcencFilename,
-											ConstWeakArray<U8, PtrSize>(surface.m_pixels), width, height,
-											ctx.m_channelCount, config.m_astcBlockSize, ctx.m_hdr,
+					ANKI_CHECK(compressAstc(pool, config.m_tempDirectory, config.m_astcencFilename, ConstWeakArray<U8, PtrSize>(surface.m_pixels),
+											width, height, ctx.m_channelCount, config.m_astcBlockSize, ctx.m_hdr,
 											WeakArray<U8, PtrSize>(surface.m_astcPixels)));
 				}
 			}

+ 1 - 3
AnKi/Math/Axisang.h

@@ -54,10 +54,8 @@ public:
 	{
 		if(isZero<T>(m3(0, 1) - m3(1, 0)) && isZero<T>(m3(0, 2) - m3(2, 0)) && isZero<T>(m3(1, 2) - m3(2, 1)))
 		{
-
 			if((absolute<T>(m3(0, 1) + m3(1, 0)) < T(0.1)) && (absolute<T>(m3(0, 2) + m3(2, 0)) < T(0.1))
-			   && (absolute<T>(m3(1, 2) + m3(2, 1)) < T(0.1))
-			   && (absolute<T>(m3(0, 0) + m3(1, 1) + m3(2, 2)) - 3) < T(0.1))
+			   && (absolute<T>(m3(1, 2) + m3(2, 1)) < T(0.1)) && (absolute<T>(m3(0, 0) + m3(1, 1) + m3(2, 2)) - 3) < T(0.1))
 			{
 				m_axis = TVec<T, 3>(T(1), T(0), T(0));
 				m_ang = T(0);

+ 1 - 2
AnKi/Math/Functions.h

@@ -105,8 +105,7 @@ inline T modf(T x, T& intPart)
 }
 
 /// The same as abs/fabs. For ints and floats.
-template<typename T,
-		 ANKI_ENABLE(std::is_floating_point<T>::value || (std::is_integral<T>::value && std::is_signed<T>::value))>
+template<typename T, ANKI_ENABLE(std::is_floating_point<T>::value || (std::is_integral<T>::value && std::is_signed<T>::value))>
 inline constexpr T absolute(const T f)
 {
 	return (f < T(0)) ? -f : f;

部分文件因文件數量過多而無法顯示