浏览代码

Some code style refactoring #8

Panagiotis Christopoulos Charitos 3 年之前
父节点
当前提交
d31b8d843c

+ 2 - 2
AnKi/Core/GpuMemoryPools.cpp

@@ -108,9 +108,9 @@ void StagingGpuMemoryPool::initBuffer(StagingGpuMemoryType type, U32 alignment,
 {
 	PerFrameBuffer& perframe = m_perFrameBuffers[type];
 
-	perframe.m_buff = gr.newBuffer(BufferInitInfo(perframe.m_size, usage, BufferMapAccessBit::WRITE, "Staging"));
+	perframe.m_buff = gr.newBuffer(BufferInitInfo(perframe.m_size, usage, BufferMapAccessBit::kWrite, "Staging"));
 	perframe.m_alloc.init(perframe.m_size, alignment, maxAllocSize);
-	perframe.m_mappedMem = static_cast<U8*>(perframe.m_buff->map(0, perframe.m_size, BufferMapAccessBit::WRITE));
+	perframe.m_mappedMem = static_cast<U8*>(perframe.m_buff->map(0, perframe.m_size, BufferMapAccessBit::kWrite));
 }
 
 void* StagingGpuMemoryPool::allocateFrame(PtrSize size, StagingGpuMemoryType usage, StagingGpuMemoryToken& token)

+ 8 - 8
AnKi/Gr/AccelerationStructure.h

@@ -21,7 +21,7 @@ public:
 	BufferPtr m_indexBuffer;
 	PtrSize m_indexBufferOffset = 0;
 	U32 m_indexCount = 0;
-	IndexType m_indexType = IndexType::COUNT;
+	IndexType m_indexType = IndexType::kCount;
 
 	BufferPtr m_positionBuffer;
 	PtrSize m_positionBufferOffset = 0;
@@ -31,7 +31,7 @@ public:
 
 	Bool isValid() const
 	{
-		if(m_indexBuffer.get() == nullptr || m_indexCount == 0 || m_indexType == IndexType::COUNT
+		if(m_indexBuffer.get() == nullptr || m_indexCount == 0 || m_indexType == IndexType::kCount
 		   || m_positionBuffer.get() == nullptr || m_positionStride == 0 || m_positionsFormat == Format::kNone
 		   || m_positionCount == 0)
 		{
@@ -50,7 +50,7 @@ public:
 			return false;
 		}
 
-		const PtrSize idxStride = (m_indexType == IndexType::U16) ? 2 : 4;
+		const PtrSize idxStride = (m_indexType == IndexType::kU16) ? 2 : 4;
 		if(m_indexBufferOffset + idxStride * m_indexCount > m_indexBuffer->getSize())
 		{
 			return false;
@@ -87,7 +87,7 @@ public:
 class AccelerationStructureInitInfo : public GrBaseInitInfo
 {
 public:
-	AccelerationStructureType m_type = AccelerationStructureType::COUNT;
+	AccelerationStructureType m_type = AccelerationStructureType::kCount;
 	BottomLevelAccelerationStructureInitInfo m_bottomLevel;
 	TopLevelAccelerationStructureInitInfo m_topLevel;
 
@@ -98,12 +98,12 @@ public:
 
 	Bool isValid() const
 	{
-		if(m_type == AccelerationStructureType::COUNT)
+		if(m_type == AccelerationStructureType::kCount)
 		{
 			return false;
 		}
 
-		return (m_type == AccelerationStructureType::BOTTOM_LEVEL) ? m_bottomLevel.isValid() : m_topLevel.isValid();
+		return (m_type == AccelerationStructureType::kBottomLevel) ? m_bottomLevel.isValid() : m_topLevel.isValid();
 	}
 };
 
@@ -117,12 +117,12 @@ public:
 
 	AccelerationStructureType getType() const
 	{
-		ANKI_ASSERT(m_type != AccelerationStructureType::COUNT);
+		ANKI_ASSERT(m_type != AccelerationStructureType::kCount);
 		return m_type;
 	}
 
 protected:
-	AccelerationStructureType m_type = AccelerationStructureType::COUNT;
+	AccelerationStructureType m_type = AccelerationStructureType::kCount;
 
 	/// Construct.
 	AccelerationStructure(GrManager* manager, CString name)

+ 2 - 2
AnKi/Gr/Buffer.h

@@ -20,7 +20,7 @@ class BufferInitInfo : public GrBaseInitInfo
 public:
 	PtrSize m_size = 0;
 	BufferUsageBit m_usage = BufferUsageBit::kNone;
-	BufferMapAccessBit m_mapAccess = BufferMapAccessBit::NONE;
+	BufferMapAccessBit m_mapAccess = BufferMapAccessBit::kNone;
 
 	BufferInitInfo(CString name = {})
 		: GrBaseInitInfo(name)
@@ -109,7 +109,7 @@ public:
 protected:
 	PtrSize m_size = 0;
 	BufferUsageBit m_usage = BufferUsageBit::kNone;
-	BufferMapAccessBit m_access = BufferMapAccessBit::NONE;
+	BufferMapAccessBit m_access = BufferMapAccessBit::kNone;
 	U64 m_gpuAddress = 0;
 
 	/// Construct.

+ 34 - 34
AnKi/Gr/Common.h

@@ -713,66 +713,66 @@ ANKI_ENUM_ALLOW_NUMERIC_OPERATIONS(BufferUsageBit)
 /// Buffer access when mapped.
 enum class BufferMapAccessBit : U8
 {
-	NONE = 0,
-	READ = 1 << 0,
-	WRITE = 1 << 1
+	kNone = 0,
+	kRead = 1 << 0,
+	kWrite = 1 << 1
 };
 ANKI_ENUM_ALLOW_NUMERIC_OPERATIONS(BufferMapAccessBit)
 
 /// Index buffer's index type.
 enum class IndexType : U8
 {
-	U16,
-	U32,
-	COUNT
+	kU16,
+	kU32,
+	kCount
 };
 
 /// Rasterization order.
 enum class RasterizationOrder : U8
 {
-	ORDERED,
-	RELAXED,
-	COUNT
+	kOrdered,
+	kRelaxed,
+	kCount
 };
 
 /// Acceleration structure type.
 enum class AccelerationStructureType : U8
 {
-	TOP_LEVEL,
-	BOTTOM_LEVEL,
-	COUNT
+	kTopLevel,
+	kBottomLevel,
+	kCount
 };
 
 enum class AccelerationStructureUsageBit : U8
 {
-	NONE = 0,
-	BUILD = 1 << 0,
-	ATTACH = 1 << 1, ///< Attached to a TLAS. Only for BLAS.
-	GEOMETRY_READ = 1 << 2,
-	FRAGMENT_READ = 1 << 3,
-	COMPUTE_READ = 1 << 4,
-	TRACE_RAYS_READ = 1 << 5,
+	kNone = 0,
+	kBuild = 1 << 0,
+	kAttach = 1 << 1, ///< Attached to a TLAS. Only for BLAS.
+	kGeometryRead = 1 << 2,
+	kFragmentRead = 1 << 3,
+	kComputeRead = 1 << 4,
+	kTraceRaysRead = 1 << 5,
 
 	// Derived
-	ALL_GRAPHICS = GEOMETRY_READ | FRAGMENT_READ,
-	ALL_READ = ATTACH | GEOMETRY_READ | FRAGMENT_READ | COMPUTE_READ | TRACE_RAYS_READ,
-	ALL_WRITE = BUILD
+	kAllGraphics = kGeometryRead | kFragmentRead,
+	kAllRead = kAttach | kGeometryRead | kFragmentRead | kComputeRead | kTraceRaysRead,
+	kAllWrite = kBuild
 };
 ANKI_ENUM_ALLOW_NUMERIC_OPERATIONS(AccelerationStructureUsageBit)
 
 /// VRS rates.
 enum class VrsRate : U8
 {
-	_1x1, ///< Disable VRS. Always supported.
-	_2x1, ///< Always supported.
-	_1x2,
-	_2x2, ///< Always supported.
-	_4x2,
-	_2x4,
-	_4x4,
-
-	COUNT,
-	FIRST = 0
+	k1x1, ///< Disable VRS. Always supported.
+	k2x1, ///< Always supported.
+	k1x2,
+	k2x2, ///< Always supported.
+	k4x2,
+	k2x4,
+	k4x4,
+
+	kCount,
+	kFirst = 0
 };
 ANKI_ENUM_ALLOW_NUMERIC_OPERATIONS(VrsRate)
 
@@ -1050,8 +1050,8 @@ class AccelerationStructureBarrierInfo
 {
 public:
 	AccelerationStructure* m_as = nullptr;
-	AccelerationStructureUsageBit m_previousUsage = AccelerationStructureUsageBit::NONE;
-	AccelerationStructureUsageBit m_nextUsage = AccelerationStructureUsageBit::NONE;
+	AccelerationStructureUsageBit m_previousUsage = AccelerationStructureUsageBit::kNone;
+	AccelerationStructureUsageBit m_nextUsage = AccelerationStructureUsageBit::kNone;
 };
 
 /// Compute max number of mipmaps for a 2D texture.

+ 2 - 2
AnKi/Gr/Gl/BufferImpl.cpp

@@ -42,7 +42,7 @@ void BufferImpl::init()
 		flags |= GL_DYNAMIC_STORAGE_BIT;
 	}
 
-	if((access & BufferMapAccessBit::WRITE) != BufferMapAccessBit::NONE)
+	if((access & BufferMapAccessBit::kWrite) != BufferMapAccessBit::kNone)
 	{
 		flags |= GL_MAP_WRITE_BIT;
 		flags |= GL_MAP_PERSISTENT_BIT;
@@ -51,7 +51,7 @@ void BufferImpl::init()
 		shouldMap = true;
 	}
 
-	if((access & BufferMapAccessBit::READ) != BufferMapAccessBit::NONE)
+	if((access & BufferMapAccessBit::kRead) != BufferMapAccessBit::kNone)
 	{
 		flags |= GL_MAP_READ_BIT;
 		flags |= GL_MAP_PERSISTENT_BIT;

+ 2 - 2
AnKi/Gr/Gl/Common.h

@@ -79,10 +79,10 @@ inline GLenum convertIndexType(IndexType ak)
 	GLenum out;
 	switch(ak)
 	{
-	case IndexType::U16:
+	case IndexType::kU16:
 		out = GL_UNSIGNED_SHORT;
 		break;
-	case IndexType::U32:
+	case IndexType::kU32:
 		out = GL_UNSIGNED_INT;
 		break;
 	default:

+ 1 - 1
AnKi/Gr/RenderGraph.cpp

@@ -644,7 +644,7 @@ Bool RenderGraph::passADependsOnB(const RenderPassDescriptionBase& a, const Rend
 						continue;
 					}
 
-					if(!((aDep.m_as.m_usage | bDep.m_as.m_usage) & AccelerationStructureUsageBit::ALL_WRITE))
+					if(!((aDep.m_as.m_usage | bDep.m_as.m_usage) & AccelerationStructureUsageBit::kAllWrite))
 					{
 						// Don't care about read to read deps
 						continue;

+ 4 - 4
AnKi/Gr/RenderGraph.inl.h

@@ -98,11 +98,11 @@ inline void RenderPassDescriptionBase::validateDep(const RenderPassDependency& d
 		ANKI_ASSERT(dep.m_type == RenderPassDependency::Type::ACCELERATION_STRUCTURE);
 		if(m_type == Type::GRAPHICS)
 		{
-			ANKI_ASSERT(!(dep.m_as.m_usage & ~AccelerationStructureUsageBit::ALL_GRAPHICS));
+			ANKI_ASSERT(!(dep.m_as.m_usage & ~AccelerationStructureUsageBit::kAllGraphics));
 		}
 		else
 		{
-			ANKI_ASSERT(!(dep.m_as.m_usage & AccelerationStructureUsageBit::ALL_GRAPHICS));
+			ANKI_ASSERT(!(dep.m_as.m_usage & AccelerationStructureUsageBit::kAllGraphics));
 		}
 	}
 }
@@ -148,12 +148,12 @@ inline void RenderPassDescriptionBase::newDependency(const RenderPassDependency&
 		ANKI_ASSERT(dep.m_type == RenderPassDependency::Type::ACCELERATION_STRUCTURE);
 		m_asDeps.emplaceBack(m_alloc, dep);
 
-		if(!!(dep.m_as.m_usage & AccelerationStructureUsageBit::ALL_READ))
+		if(!!(dep.m_as.m_usage & AccelerationStructureUsageBit::kAllRead))
 		{
 			m_readAsMask.set(dep.m_as.m_handle.m_idx);
 		}
 
-		if(!!(dep.m_as.m_usage & AccelerationStructureUsageBit::ALL_WRITE))
+		if(!!(dep.m_as.m_usage & AccelerationStructureUsageBit::kAllWrite))
 		{
 			m_writeAsMask.set(dep.m_as.m_handle.m_idx);
 		}

+ 16 - 16
AnKi/Gr/Vulkan/AccelerationStructureImpl.cpp

@@ -25,7 +25,7 @@ Error AccelerationStructureImpl::init(const AccelerationStructureInitInfo& inf)
 	ANKI_ASSERT(inf.isValid());
 	m_type = inf.m_type;
 
-	if(m_type == AccelerationStructureType::BOTTOM_LEVEL)
+	if(m_type == AccelerationStructureType::kBottomLevel)
 	{
 		// Geom
 		VkAccelerationStructureGeometryKHR& geom = m_geometry;
@@ -95,11 +95,11 @@ Error AccelerationStructureImpl::init(const AccelerationStructureInitInfo& inf)
 		BufferInitInfo buffInit("AS instances");
 		buffInit.m_size = sizeof(VkAccelerationStructureInstanceKHR) * inf.m_topLevel.m_instances.getSize();
 		buffInit.m_usage = PrivateBufferUsageBit::ACCELERATION_STRUCTURE;
-		buffInit.m_mapAccess = BufferMapAccessBit::WRITE;
+		buffInit.m_mapAccess = BufferMapAccessBit::kWrite;
 		m_topLevelInfo.m_instancesBuffer = getManager().newBuffer(buffInit);
 
 		VkAccelerationStructureInstanceKHR* instances = static_cast<VkAccelerationStructureInstanceKHR*>(
-			m_topLevelInfo.m_instancesBuffer->map(0, kMaxPtrSize, BufferMapAccessBit::WRITE));
+			m_topLevelInfo.m_instancesBuffer->map(0, kMaxPtrSize, BufferMapAccessBit::kWrite));
 		for(U32 i = 0; i < inf.m_topLevel.m_instances.getSize(); ++i)
 		{
 			VkAccelerationStructureInstanceKHR& outInst = instances[i];
@@ -183,44 +183,44 @@ void AccelerationStructureImpl::computeBarrierInfo(AccelerationStructureUsageBit
 	srcStages = 0;
 	srcAccesses = 0;
 
-	if(before == AccelerationStructureUsageBit::NONE)
+	if(before == AccelerationStructureUsageBit::kNone)
 	{
 		srcStages |= VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT;
 		srcAccesses |= 0;
 	}
 
-	if(!!(before & AccelerationStructureUsageBit::BUILD))
+	if(!!(before & AccelerationStructureUsageBit::kBuild))
 	{
 		srcStages |= VK_PIPELINE_STAGE_ACCELERATION_STRUCTURE_BUILD_BIT_KHR;
 		srcAccesses |= VK_ACCESS_ACCELERATION_STRUCTURE_WRITE_BIT_KHR;
 	}
 
-	if(!!(before & AccelerationStructureUsageBit::ATTACH))
+	if(!!(before & AccelerationStructureUsageBit::kAttach))
 	{
 		srcStages |= VK_PIPELINE_STAGE_ACCELERATION_STRUCTURE_BUILD_BIT_KHR;
 		srcAccesses |= VK_ACCESS_ACCELERATION_STRUCTURE_READ_BIT_KHR;
 	}
 
-	if(!!(before & AccelerationStructureUsageBit::GEOMETRY_READ))
+	if(!!(before & AccelerationStructureUsageBit::kGeometryRead))
 	{
 		srcStages |= VK_PIPELINE_STAGE_VERTEX_SHADER_BIT | VK_PIPELINE_STAGE_TESSELLATION_CONTROL_SHADER_BIT
 					 | VK_PIPELINE_STAGE_TESSELLATION_EVALUATION_SHADER_BIT | VK_PIPELINE_STAGE_GEOMETRY_SHADER_BIT;
 		srcAccesses |= VK_ACCESS_MEMORY_READ_BIT; // READ_BIT is the only viable solution by elimination
 	}
 
-	if(!!(before & AccelerationStructureUsageBit::FRAGMENT_READ))
+	if(!!(before & AccelerationStructureUsageBit::kFragmentRead))
 	{
 		srcStages |= VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT;
 		srcAccesses |= VK_ACCESS_MEMORY_READ_BIT;
 	}
 
-	if(!!(before & AccelerationStructureUsageBit::COMPUTE_READ))
+	if(!!(before & AccelerationStructureUsageBit::kComputeRead))
 	{
 		srcStages |= VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT;
 		srcAccesses |= VK_ACCESS_MEMORY_READ_BIT;
 	}
 
-	if(!!(before & AccelerationStructureUsageBit::TRACE_RAYS_READ))
+	if(!!(before & AccelerationStructureUsageBit::kTraceRaysRead))
 	{
 		srcStages |= VK_PIPELINE_STAGE_RAY_TRACING_SHADER_BIT_KHR;
 		srcAccesses |= VK_ACCESS_ACCELERATION_STRUCTURE_READ_BIT_KHR;
@@ -230,38 +230,38 @@ void AccelerationStructureImpl::computeBarrierInfo(AccelerationStructureUsageBit
 	dstStages = 0;
 	dstAccesses = 0;
 
-	if(!!(after & AccelerationStructureUsageBit::BUILD))
+	if(!!(after & AccelerationStructureUsageBit::kBuild))
 	{
 		dstStages |= VK_PIPELINE_STAGE_ACCELERATION_STRUCTURE_BUILD_BIT_KHR;
 		dstAccesses |= VK_ACCESS_ACCELERATION_STRUCTURE_WRITE_BIT_KHR;
 	}
 
-	if(!!(after & AccelerationStructureUsageBit::ATTACH))
+	if(!!(after & AccelerationStructureUsageBit::kAttach))
 	{
 		dstStages |= VK_PIPELINE_STAGE_ACCELERATION_STRUCTURE_BUILD_BIT_KHR;
 		dstAccesses |= VK_ACCESS_ACCELERATION_STRUCTURE_READ_BIT_KHR;
 	}
 
-	if(!!(after & AccelerationStructureUsageBit::GEOMETRY_READ))
+	if(!!(after & AccelerationStructureUsageBit::kGeometryRead))
 	{
 		dstStages |= VK_PIPELINE_STAGE_VERTEX_SHADER_BIT | VK_PIPELINE_STAGE_TESSELLATION_CONTROL_SHADER_BIT
 					 | VK_PIPELINE_STAGE_TESSELLATION_EVALUATION_SHADER_BIT | VK_PIPELINE_STAGE_GEOMETRY_SHADER_BIT;
 		dstAccesses |= VK_ACCESS_MEMORY_READ_BIT; // READ_BIT is the only viable solution by elimination
 	}
 
-	if(!!(after & AccelerationStructureUsageBit::FRAGMENT_READ))
+	if(!!(after & AccelerationStructureUsageBit::kFragmentRead))
 	{
 		dstStages |= VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT;
 		dstAccesses |= VK_ACCESS_MEMORY_READ_BIT;
 	}
 
-	if(!!(after & AccelerationStructureUsageBit::COMPUTE_READ))
+	if(!!(after & AccelerationStructureUsageBit::kComputeRead))
 	{
 		dstStages |= VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT;
 		dstAccesses |= VK_ACCESS_MEMORY_READ_BIT;
 	}
 
-	if(!!(after & AccelerationStructureUsageBit::TRACE_RAYS_READ))
+	if(!!(after & AccelerationStructureUsageBit::kTraceRaysRead))
 	{
 		dstStages |= VK_PIPELINE_STAGE_RAY_TRACING_SHADER_BIT_KHR;
 		dstAccesses |= VK_ACCESS_ACCELERATION_STRUCTURE_READ_BIT_KHR;

+ 7 - 7
AnKi/Gr/Vulkan/BufferImpl.cpp

@@ -87,7 +87,7 @@ Error BufferImpl::init(const BufferInitInfo& inf)
 	U32 memIdx = kMaxU32;
 	const Bool isDiscreteGpu = getGrManagerImpl().getDeviceCapabilities().m_discreteGpu;
 
-	if(access == BufferMapAccessBit::WRITE)
+	if(access == BufferMapAccessBit::kWrite)
 	{
 		// Only write, probably for uploads
 
@@ -131,7 +131,7 @@ Error BufferImpl::init(const BufferInitInfo& inf)
 			memIdx = getGrManagerImpl().getGpuMemoryManager().findMemoryType(req.memoryTypeBits, prefer, avoid);
 		}
 	}
-	else if(!!(access & BufferMapAccessBit::READ))
+	else if(!!(access & BufferMapAccessBit::kRead))
 	{
 		// Read or read/write
 
@@ -158,7 +158,7 @@ Error BufferImpl::init(const BufferInitInfo& inf)
 	{
 		// Not mapped
 
-		ANKI_ASSERT(access == BufferMapAccessBit::NONE);
+		ANKI_ASSERT(access == BufferMapAccessBit::kNone);
 
 		// Device only
 		memIdx = getGrManagerImpl().getGpuMemoryManager().findMemoryType(
@@ -181,12 +181,12 @@ Error BufferImpl::init(const BufferInitInfo& inf)
 	const VkPhysicalDeviceMemoryProperties& props = getGrManagerImpl().getMemoryProperties();
 	m_memoryFlags = props.memoryTypes[memIdx].propertyFlags;
 
-	if(!!(access & BufferMapAccessBit::READ) && !(m_memoryFlags & VK_MEMORY_PROPERTY_HOST_COHERENT_BIT))
+	if(!!(access & BufferMapAccessBit::kRead) && !(m_memoryFlags & VK_MEMORY_PROPERTY_HOST_COHERENT_BIT))
 	{
 		m_needsInvalidate = true;
 	}
 
-	if(!!(access & BufferMapAccessBit::WRITE) && !(m_memoryFlags & VK_MEMORY_PROPERTY_HOST_COHERENT_BIT))
+	if(!!(access & BufferMapAccessBit::kWrite) && !(m_memoryFlags & VK_MEMORY_PROPERTY_HOST_COHERENT_BIT))
 	{
 		m_needsFlush = true;
 	}
@@ -226,8 +226,8 @@ Error BufferImpl::init(const BufferInitInfo& inf)
 void* BufferImpl::map(PtrSize offset, PtrSize range, [[maybe_unused]] BufferMapAccessBit access)
 {
 	ANKI_ASSERT(isCreated());
-	ANKI_ASSERT(access != BufferMapAccessBit::NONE);
-	ANKI_ASSERT((access & m_access) != BufferMapAccessBit::NONE);
+	ANKI_ASSERT(access != BufferMapAccessBit::kNone);
+	ANKI_ASSERT((access & m_access) != BufferMapAccessBit::kNone);
 	ANKI_ASSERT(!m_mapped);
 	ANKI_ASSERT(offset < m_size);
 	if(range == kMaxPtrSize)

+ 2 - 2
AnKi/Gr/Vulkan/BufferImpl.h

@@ -65,7 +65,7 @@ public:
 
 	ANKI_FORCE_INLINE void flush(PtrSize offset, PtrSize range) const
 	{
-		ANKI_ASSERT(!!(m_access & BufferMapAccessBit::WRITE) && "No need to flush when the CPU doesn't write");
+		ANKI_ASSERT(!!(m_access & BufferMapAccessBit::kWrite) && "No need to flush when the CPU doesn't write");
 		if(m_needsFlush)
 		{
 			VkMappedMemoryRange vkrange = setVkMappedMemoryRange(offset, range);
@@ -78,7 +78,7 @@ public:
 
 	ANKI_FORCE_INLINE void invalidate(PtrSize offset, PtrSize range) const
 	{
-		ANKI_ASSERT(!!(m_access & BufferMapAccessBit::READ) && "No need to invalidate when the CPU doesn't read");
+		ANKI_ASSERT(!!(m_access & BufferMapAccessBit::kRead) && "No need to invalidate when the CPU doesn't read");
 		if(m_needsInvalidate)
 		{
 			VkMappedMemoryRange vkrange = setVkMappedMemoryRange(offset, range);

+ 1 - 1
AnKi/Gr/Vulkan/CommandBufferImpl.cpp

@@ -718,7 +718,7 @@ void CommandBufferImpl::rebindDynamicState()
 	m_scissorDirty = true;
 	m_lastScissor = {};
 	m_vrsRateDirty = true;
-	m_vrsRate = VrsRate::_1x1;
+	m_vrsRate = VrsRate::k1x1;
 
 	// Rebind the stencil compare mask
 	if(m_stencilCompareMasks[0] == m_stencilCompareMasks[1])

+ 1 - 1
AnKi/Gr/Vulkan/CommandBufferImpl.h

@@ -464,7 +464,7 @@ private:
 	Bool m_lineWidthSet = false;
 #endif
 	Bool m_vrsRateDirty = true;
-	VrsRate m_vrsRate = VrsRate::_1x1;
+	VrsRate m_vrsRate = VrsRate::k1x1;
 
 	/// Rebind the above dynamic state. Needed after pushing secondary command buffers (they dirty the state).
 	void rebindDynamicState();

+ 1 - 1
AnKi/Gr/Vulkan/CommandBufferImpl.inl.h

@@ -973,7 +973,7 @@ inline void CommandBufferImpl::setLineWidthInternal(F32 width)
 inline void CommandBufferImpl::setVrsRateInternal(VrsRate rate)
 {
 	ANKI_ASSERT(getGrManagerImpl().getDeviceCapabilities().m_vrs);
-	ANKI_ASSERT(rate < VrsRate::COUNT);
+	ANKI_ASSERT(rate < VrsRate::kCount);
 
 	commandCommon();
 

+ 13 - 13
AnKi/Gr/Vulkan/Common.h

@@ -305,10 +305,10 @@ static_assert(!(BufferUsageBit::kAll & PrivateBufferUsageBit::ALL_PRIVATE), "Upd
 	VkIndexType out;
 	switch(ak)
 	{
-	case IndexType::U16:
+	case IndexType::kU16:
 		out = VK_INDEX_TYPE_UINT16;
 		break;
-	case IndexType::U32:
+	case IndexType::kU32:
 		out = VK_INDEX_TYPE_UINT32;
 		break;
 	default:
@@ -324,10 +324,10 @@ static_assert(!(BufferUsageBit::kAll & PrivateBufferUsageBit::ALL_PRIVATE), "Upd
 	VkRasterizationOrderAMD out;
 	switch(ak)
 	{
-	case RasterizationOrder::ORDERED:
+	case RasterizationOrder::kOrdered:
 		out = VK_RASTERIZATION_ORDER_STRICT_AMD;
 		break;
-	case RasterizationOrder::RELAXED:
+	case RasterizationOrder::kRelaxed:
 		out = VK_RASTERIZATION_ORDER_RELAXED_AMD;
 		break;
 	default:
@@ -343,10 +343,10 @@ static_assert(!(BufferUsageBit::kAll & PrivateBufferUsageBit::ALL_PRIVATE), "Upd
 	VkAccelerationStructureTypeKHR out;
 	switch(ak)
 	{
-	case AccelerationStructureType::BOTTOM_LEVEL:
+	case AccelerationStructureType::kBottomLevel:
 		out = VK_ACCELERATION_STRUCTURE_TYPE_BOTTOM_LEVEL_KHR;
 		break;
-	case AccelerationStructureType::TOP_LEVEL:
+	case AccelerationStructureType::kTopLevel:
 		out = VK_ACCELERATION_STRUCTURE_TYPE_TOP_LEVEL_KHR;
 		break;
 	default:
@@ -364,25 +364,25 @@ static_assert(!(BufferUsageBit::kAll & PrivateBufferUsageBit::ALL_PRIVATE), "Upd
 	VkExtent2D out = {};
 	switch(rate)
 	{
-	case VrsRate::_1x1:
+	case VrsRate::k1x1:
 		out = {1, 1};
 		break;
-	case VrsRate::_2x1:
+	case VrsRate::k2x1:
 		out = {2, 1};
 		break;
-	case VrsRate::_1x2:
+	case VrsRate::k1x2:
 		out = {1, 2};
 		break;
-	case VrsRate::_2x2:
+	case VrsRate::k2x2:
 		out = {2, 2};
 		break;
-	case VrsRate::_4x2:
+	case VrsRate::k4x2:
 		out = {4, 2};
 		break;
-	case VrsRate::_2x4:
+	case VrsRate::k2x4:
 		out = {2, 4};
 		break;
-	case VrsRate::_4x4:
+	case VrsRate::k4x4:
 		out = {4, 4};
 		break;
 	default:

+ 1 - 1
AnKi/Gr/Vulkan/Pipeline.cpp

@@ -286,7 +286,7 @@ const VkGraphicsPipelineCreateInfo& PipelineStateTracker::updatePipelineCreateIn
 	rastCi.lineWidth = 1.0;
 	ci.pRasterizationState = &rastCi;
 
-	if(m_state.m_rasterizer.m_rasterizationOrder != RasterizationOrder::ORDERED)
+	if(m_state.m_rasterizer.m_rasterizationOrder != RasterizationOrder::kOrdered)
 	{
 		VkPipelineRasterizationStateRasterizationOrderAMD& rastOrderCi = m_ci.m_rasterOrder;
 		rastOrderCi = {};

+ 1 - 1
AnKi/Gr/Vulkan/Pipeline.h

@@ -80,7 +80,7 @@ class RasterizerPipelineState
 public:
 	FillMode m_fillMode = FillMode::kSolid;
 	FaceSelectionBit m_cullMode = FaceSelectionBit::kBack;
-	RasterizationOrder m_rasterizationOrder = RasterizationOrder::ORDERED;
+	RasterizationOrder m_rasterizationOrder = RasterizationOrder::kOrdered;
 	U8 m_padding = 0;
 	F32 m_depthBiasConstantFactor = 0.0f;
 	F32 m_depthBiasSlopeFactor = 0.0f;

+ 1 - 1
AnKi/Importer/GltfImporterMesh.cpp

@@ -709,7 +709,7 @@ Error GltfImporter::writeMesh(const cgltf_mesh& mesh, U32 lod, F32 decimateFacto
 		{
 			header.m_flags |= MeshBinaryFlag::CONVEX;
 		}
-		header.m_indexType = IndexType::U16;
+		header.m_indexType = IndexType::kU16;
 		header.m_totalIndexCount = totalIndexCount;
 		header.m_totalVertexCount = totalVertexCount;
 		header.m_subMeshCount = U32(submeshes.getSize());

+ 3 - 3
AnKi/Renderer/AccelerationStructureBuilder.cpp

@@ -42,7 +42,7 @@ void AccelerationStructureBuilder::populateRenderGraph(RenderingContext& ctx)
 
 	// Create the TLAS
 	AccelerationStructureInitInfo initInf("MainTlas");
-	initInf.m_type = AccelerationStructureType::TOP_LEVEL;
+	initInf.m_type = AccelerationStructureType::kTopLevel;
 	initInf.m_topLevel.m_instances = instances;
 	m_runCtx.m_tlas = getGrManager().newAccelerationStructure(initInf);
 
@@ -54,14 +54,14 @@ void AccelerationStructureBuilder::populateRenderGraph(RenderingContext& ctx)
 
 	// Build the job
 	RenderGraphDescription& rgraph = ctx.m_renderGraphDescr;
-	m_runCtx.m_tlasHandle = rgraph.importAccelerationStructure(m_runCtx.m_tlas, AccelerationStructureUsageBit::NONE);
+	m_runCtx.m_tlasHandle = rgraph.importAccelerationStructure(m_runCtx.m_tlas, AccelerationStructureUsageBit::kNone);
 	ComputeRenderPassDescription& rpass = rgraph.newComputeRenderPass("BuildTlas");
 	rpass.setWork([this](RenderPassWorkContext& rgraphCtx) {
 		ANKI_TRACE_SCOPED_EVENT(R_TLAS);
 		rgraphCtx.m_commandBuffer->buildAccelerationStructure(m_runCtx.m_tlas);
 	});
 
-	rpass.newDependency(RenderPassDependency(m_runCtx.m_tlasHandle, AccelerationStructureUsageBit::BUILD));
+	rpass.newDependency(RenderPassDependency(m_runCtx.m_tlasHandle, AccelerationStructureUsageBit::kBuild));
 }
 
 } // end namespace anki

+ 2 - 2
AnKi/Renderer/DepthDownscale.cpp

@@ -112,12 +112,12 @@ Error DepthDownscale::initInternal()
 	{
 		// Create buffer
 		BufferInitInfo buffInit("HiZ Client");
-		buffInit.m_mapAccess = BufferMapAccessBit::READ;
+		buffInit.m_mapAccess = BufferMapAccessBit::kRead;
 		buffInit.m_size = PtrSize(m_lastMipSize.y()) * PtrSize(m_lastMipSize.x()) * sizeof(F32);
 		buffInit.m_usage = BufferUsageBit::kStorageComputeWrite | BufferUsageBit::kStorageFragmentWrite;
 		m_clientBuffer = getGrManager().newBuffer(buffInit);
 
-		m_clientBufferAddr = m_clientBuffer->map(0, buffInit.m_size, BufferMapAccessBit::READ);
+		m_clientBufferAddr = m_clientBuffer->map(0, buffInit.m_size, BufferMapAccessBit::kRead);
 
 		// Fill the buffer with 1.0f
 		for(U32 i = 0; i < m_lastMipSize.x() * m_lastMipSize.y(); ++i)

+ 2 - 2
AnKi/Renderer/GBuffer.cpp

@@ -110,14 +110,14 @@ void GBuffer::runInThread(const RenderingContext& ctx, RenderPassWorkContext& rg
 	const I32 colorStart = max(I32(start) - I32(earlyZCount), 0);
 	const I32 colorEnd = I32(end) - I32(earlyZCount);
 
-	cmdb->setRasterizationOrder(RasterizationOrder::RELAXED);
+	cmdb->setRasterizationOrder(RasterizationOrder::kRelaxed);
 
 	const Bool enableVrs =
 		getGrManager().getDeviceCapabilities().m_vrs && getConfig().getRVrs() && getConfig().getRGBufferVrs();
 	if(enableVrs)
 	{
 		// Just set some low value, the attachment will take over
-		cmdb->setVrsRate(VrsRate::_1x1);
+		cmdb->setVrsRate(VrsRate::k1x1);
 	}
 
 	RenderableDrawerArguments args;

+ 1 - 1
AnKi/Renderer/IndirectDiffuse.cpp

@@ -303,7 +303,7 @@ void IndirectDiffuse::populateRenderGraph(RenderingContext& ctx)
 
 				if(enableVrs)
 				{
-					cmdb->setVrsRate(VrsRate::_1x1);
+					cmdb->setVrsRate(VrsRate::k1x1);
 				}
 
 				cmdb->drawArrays(PrimitiveTopology::kTriangles, 3);

+ 1 - 1
AnKi/Renderer/LensFlare.cpp

@@ -65,7 +65,7 @@ Error LensFlare::initOcclusion()
 
 	m_indirectBuff = gr.newBuffer(BufferInitInfo(m_maxFlares * sizeof(DrawArraysIndirectInfo),
 												 BufferUsageBit::kIndirectDraw | BufferUsageBit::kStorageComputeWrite,
-												 BufferMapAccessBit::NONE, "LensFlares"));
+												 BufferMapAccessBit::kNone, "LensFlares"));
 
 	ANKI_CHECK(getResourceManager().loadResource("ShaderBinaries/LensFlareUpdateIndirectInfo.ankiprogbin",
 												 m_updateIndirectBuffProg));

+ 3 - 3
AnKi/Renderer/LightShading.cpp

@@ -160,7 +160,7 @@ void LightShading::run(const RenderingContext& ctx, RenderPassWorkContext& rgrap
 	if(enableVrs)
 	{
 		// Just set some low value, the attachment will take over
-		cmdb->setVrsRate(VrsRate::_1x1);
+		cmdb->setVrsRate(VrsRate::k1x1);
 	}
 
 	// Do light shading first
@@ -311,7 +311,7 @@ void LightShading::run(const RenderingContext& ctx, RenderPassWorkContext& rgrap
 	// Forward shading last
 	if(enableVrs)
 	{
-		cmdb->setVrsRate(VrsRate::_2x2);
+		cmdb->setVrsRate(VrsRate::k2x2);
 	}
 
 	m_r->getForwardShading().run(ctx, rgraphCtx);
@@ -319,7 +319,7 @@ void LightShading::run(const RenderingContext& ctx, RenderPassWorkContext& rgrap
 	if(enableVrs)
 	{
 		// Restore
-		cmdb->setVrsRate(VrsRate::_1x1);
+		cmdb->setVrsRate(VrsRate::k1x1);
 	}
 }
 

+ 1 - 1
AnKi/Renderer/Renderer.cpp

@@ -166,7 +166,7 @@ Error Renderer::initInternal(UVec2 swapchainResolution)
 		m_dummyTexView3d = getGrManager().newTextureView(viewinit);
 
 		m_dummyBuff = getGrManager().newBuffer(BufferInitInfo(
-			1024, BufferUsageBit::kAllUniform | BufferUsageBit::kAllStorage, BufferMapAccessBit::NONE, "Dummy"));
+			1024, BufferUsageBit::kAllUniform | BufferUsageBit::kAllStorage, BufferMapAccessBit::kNone, "Dummy"));
 	}
 
 	// Init the stages. Careful with the order!!!!!!!!!!

+ 1 - 1
AnKi/Renderer/RtShadows.cpp

@@ -250,7 +250,7 @@ void RtShadows::populateRenderGraph(RenderingContext& ctx)
 			RenderPassDependency(m_runCtx.m_intermediateShadowsRts[0], TextureUsageBit::kImageTraceRaysWrite));
 		rpass.newDependency(
 			RenderPassDependency(m_r->getAccelerationStructureBuilder().getAccelerationStructureHandle(),
-								 AccelerationStructureUsageBit::TRACE_RAYS_READ));
+								 AccelerationStructureUsageBit::kTraceRaysRead));
 		rpass.newDependency(depthDependency);
 		rpass.newDependency(
 			RenderPassDependency(m_r->getMotionVectors().getMotionVectorsRt(), TextureUsageBit::kSampledTraceRays));

+ 2 - 2
AnKi/Resource/MeshBinaryLoader.cpp

@@ -162,7 +162,7 @@ Error MeshBinaryLoader::checkHeader() const
 	}
 
 	// Indices format
-	if(h.m_indexType != IndexType::U16)
+	if(h.m_indexType != IndexType::kU16)
 	{
 		ANKI_RESOURCE_LOGE("Wrong format for indices");
 		return Error::kUserData;
@@ -266,7 +266,7 @@ Error MeshBinaryLoader::storeIndicesAndPosition(DynamicArrayAuto<U32>& indices,
 		ANKI_CHECK(storeIndexBuffer(&staging[0], staging.getSizeInBytes()));
 
 		// Copy from staging
-		ANKI_ASSERT(m_header.m_indexType == IndexType::U16);
+		ANKI_ASSERT(m_header.m_indexType == IndexType::kU16);
 		for(U32 i = 0; i < m_header.m_totalIndexCount; ++i)
 		{
 			indices[i] = *reinterpret_cast<U16*>(&staging[PtrSize(i) * 2]);

+ 1 - 1
AnKi/Resource/MeshBinaryLoader.h

@@ -72,7 +72,7 @@ private:
 	PtrSize getIndexBufferSize() const
 	{
 		ANKI_ASSERT(isLoaded());
-		return PtrSize(m_header.m_totalIndexCount) * ((m_header.m_indexType == IndexType::U16) ? 2 : 4);
+		return PtrSize(m_header.m_totalIndexCount) * ((m_header.m_indexType == IndexType::kU16) ? 2 : 4);
 	}
 
 	PtrSize getAlignedIndexBufferSize() const

+ 8 - 8
AnKi/Resource/MeshResource.cpp

@@ -66,7 +66,7 @@ MeshResource::~MeshResource()
 
 	if(m_indexBufferOffset != kMaxPtrSize)
 	{
-		const PtrSize indexBufferSize = PtrSize(m_indexCount) * ((m_indexType == IndexType::U32) ? 4 : 2);
+		const PtrSize indexBufferSize = PtrSize(m_indexCount) * ((m_indexType == IndexType::kU32) ? 4 : 2);
 		getManager().getVertexGpuMemory().free(indexBufferSize, m_indexBufferOffset);
 	}
 }
@@ -122,7 +122,7 @@ Error MeshResource::load(const ResourceFilename& filename, Bool async)
 	ANKI_ASSERT((m_indexCount % 3) == 0 && "Expecting triangles");
 	m_indexType = header.m_indexType;
 
-	const PtrSize indexBufferSize = PtrSize(m_indexCount) * ((m_indexType == IndexType::U32) ? 4 : 2);
+	const PtrSize indexBufferSize = PtrSize(m_indexCount) * ((m_indexType == IndexType::kU32) ? 4 : 2);
 	ANKI_CHECK(getManager().getVertexGpuMemory().allocate(indexBufferSize, m_indexBufferOffset));
 
 	//
@@ -195,7 +195,7 @@ Error MeshResource::load(const ResourceFilename& filename, Bool async)
 	if(rayTracingEnabled)
 	{
 		AccelerationStructureInitInfo inf(StringAuto(getTempAllocator()).sprintf("%s_%s", "Blas", basename.cstr()));
-		inf.m_type = AccelerationStructureType::BOTTOM_LEVEL;
+		inf.m_type = AccelerationStructureType::kBottomLevel;
 
 		inf.m_bottomLevel.m_indexBuffer = m_vertexBuffer;
 		inf.m_bottomLevel.m_indexBufferOffset = m_indexBufferOffset;
@@ -286,7 +286,7 @@ Error MeshResource::loadAsync(MeshBinaryLoader& loader) const
 
 	// Write index buffer
 	{
-		const PtrSize indexBufferSize = PtrSize(m_indexCount) * ((m_indexType == IndexType::U32) ? 4 : 2);
+		const PtrSize indexBufferSize = PtrSize(m_indexCount) * ((m_indexType == IndexType::kU32) ? 4 : 2);
 
 		ANKI_CHECK(transferAlloc.allocate(indexBufferSize, handles[1]));
 		void* data = handles[1].getMappedMemory();
@@ -329,15 +329,15 @@ Error MeshResource::loadAsync(MeshBinaryLoader& loader) const
 											   BufferUsageBit::kAccelerationStructureBuild | BufferUsageBit::kVertex
 												   | BufferUsageBit::kIndex,
 											   0, kMaxPtrSize};
-		const AccelerationStructureBarrierInfo asBarrier = {m_blas.get(), AccelerationStructureUsageBit::NONE,
-															AccelerationStructureUsageBit::BUILD};
+		const AccelerationStructureBarrierInfo asBarrier = {m_blas.get(), AccelerationStructureUsageBit::kNone,
+															AccelerationStructureUsageBit::kBuild};
 
 		cmdb->setPipelineBarrier({}, {&buffBarrier, 1}, {&asBarrier, 1});
 
 		cmdb->buildAccelerationStructure(m_blas);
 
-		const AccelerationStructureBarrierInfo asBarrier2 = {m_blas.get(), AccelerationStructureUsageBit::BUILD,
-															 AccelerationStructureUsageBit::ALL_READ};
+		const AccelerationStructureBarrierInfo asBarrier2 = {m_blas.get(), AccelerationStructureUsageBit::kBuild,
+															 AccelerationStructureUsageBit::kAllRead};
 
 		cmdb->setPipelineBarrier({}, {}, {&asBarrier2, 1});
 	}

+ 2 - 2
AnKi/Resource/TransferGpuAllocator.cpp

@@ -15,10 +15,10 @@ Error TransferGpuAllocator::StackAllocatorBuilderInterface::allocateChunk(PtrSiz
 {
 	out = m_parent->m_alloc.newInstance<Chunk>();
 
-	BufferInitInfo bufferInit(size, BufferUsageBit::kTransferSource, BufferMapAccessBit::WRITE, "Transfer");
+	BufferInitInfo bufferInit(size, BufferUsageBit::kTransferSource, BufferMapAccessBit::kWrite, "Transfer");
 	out->m_buffer = m_parent->m_gr->newBuffer(bufferInit);
 
-	out->m_mappedBuffer = out->m_buffer->map(0, kMaxPtrSize, BufferMapAccessBit::WRITE);
+	out->m_mappedBuffer = out->m_buffer->map(0, kMaxPtrSize, BufferMapAccessBit::kWrite);
 
 	return Error::kNone;
 }

+ 6 - 6
AnKi/Scene/Components/GpuParticleEmitterComponent.cpp

@@ -51,12 +51,12 @@ Error GpuParticleEmitterComponent::loadParticleEmitterResource(CString filename)
 	// Create a UBO with the props
 	{
 		BufferInitInfo buffInit("GpuParticlesProps");
-		buffInit.m_mapAccess = BufferMapAccessBit::WRITE;
+		buffInit.m_mapAccess = BufferMapAccessBit::kWrite;
 		buffInit.m_usage = BufferUsageBit::kUniformCompute;
 		buffInit.m_size = sizeof(GpuParticleEmitterProperties);
 		m_propsBuff = m_node->getSceneGraph().getGrManager().newBuffer(buffInit);
 		GpuParticleEmitterProperties* props =
-			static_cast<GpuParticleEmitterProperties*>(m_propsBuff->map(0, kMaxPtrSize, BufferMapAccessBit::WRITE));
+			static_cast<GpuParticleEmitterProperties*>(m_propsBuff->map(0, kMaxPtrSize, BufferMapAccessBit::kWrite));
 
 		props->m_minGravity = inProps.m_particle.m_minGravity;
 		props->m_minMass = inProps.m_particle.m_minMass;
@@ -77,13 +77,13 @@ Error GpuParticleEmitterComponent::loadParticleEmitterResource(CString filename)
 	// Create the particle buffer
 	{
 		BufferInitInfo buffInit("GpuParticles");
-		buffInit.m_mapAccess = BufferMapAccessBit::WRITE;
+		buffInit.m_mapAccess = BufferMapAccessBit::kWrite;
 		buffInit.m_usage = BufferUsageBit::kAllStorage;
 		buffInit.m_size = sizeof(GpuParticle) * m_maxParticleCount;
 		m_particlesBuff = m_node->getSceneGraph().getGrManager().newBuffer(buffInit);
 
 		GpuParticle* particle =
-			static_cast<GpuParticle*>(m_particlesBuff->map(0, kMaxPtrSize, BufferMapAccessBit::WRITE));
+			static_cast<GpuParticle*>(m_particlesBuff->map(0, kMaxPtrSize, BufferMapAccessBit::kWrite));
 		const GpuParticle* end = particle + m_maxParticleCount;
 		for(; particle < end; ++particle)
 		{
@@ -97,12 +97,12 @@ Error GpuParticleEmitterComponent::loadParticleEmitterResource(CString filename)
 	// Create the rand buffer
 	{
 		BufferInitInfo buffInit("GpuParticlesRand");
-		buffInit.m_mapAccess = BufferMapAccessBit::WRITE;
+		buffInit.m_mapAccess = BufferMapAccessBit::kWrite;
 		buffInit.m_usage = BufferUsageBit::kAllStorage;
 		buffInit.m_size = sizeof(U32) + MAX_RAND_FACTORS * sizeof(F32);
 		m_randFactorsBuff = m_node->getSceneGraph().getGrManager().newBuffer(buffInit);
 
-		F32* randFactors = static_cast<F32*>(m_randFactorsBuff->map(0, kMaxPtrSize, BufferMapAccessBit::WRITE));
+		F32* randFactors = static_cast<F32*>(m_randFactorsBuff->map(0, kMaxPtrSize, BufferMapAccessBit::kWrite));
 
 		*reinterpret_cast<U32*>(randFactors) = MAX_RAND_FACTORS;
 		++randFactors;

+ 5 - 5
AnKi/Scene/DebugDrawer.cpp

@@ -73,10 +73,10 @@ Error DebugDrawer2::init(ResourceManager* rsrcManager)
 		BufferInitInfo bufferInit("DebugCube");
 		bufferInit.m_usage = BufferUsageBit::kVertex;
 		bufferInit.m_size = sizeof(Vec3) * 8;
-		bufferInit.m_mapAccess = BufferMapAccessBit::WRITE;
+		bufferInit.m_mapAccess = BufferMapAccessBit::kWrite;
 		m_cubePositionsBuffer = rsrcManager->getGrManager().newBuffer(bufferInit);
 
-		Vec3* verts = static_cast<Vec3*>(m_cubePositionsBuffer->map(0, kMaxPtrSize, BufferMapAccessBit::WRITE));
+		Vec3* verts = static_cast<Vec3*>(m_cubePositionsBuffer->map(0, kMaxPtrSize, BufferMapAccessBit::kWrite));
 
 		const F32 size = 1.0f;
 		verts[0] = Vec3(size, size, size); // front top right
@@ -98,10 +98,10 @@ Error DebugDrawer2::init(ResourceManager* rsrcManager)
 		BufferInitInfo bufferInit("DebugCube");
 		bufferInit.m_usage = BufferUsageBit::kVertex;
 		bufferInit.m_size = sizeof(U16) * INDEX_COUNT;
-		bufferInit.m_mapAccess = BufferMapAccessBit::WRITE;
+		bufferInit.m_mapAccess = BufferMapAccessBit::kWrite;
 		m_cubeIndicesBuffer = rsrcManager->getGrManager().newBuffer(bufferInit);
 
-		U16* indices = static_cast<U16*>(m_cubeIndicesBuffer->map(0, kMaxPtrSize, BufferMapAccessBit::WRITE));
+		U16* indices = static_cast<U16*>(m_cubeIndicesBuffer->map(0, kMaxPtrSize, BufferMapAccessBit::kWrite));
 
 		U32 indexCount = 0;
 		indices[indexCount++] = 0;
@@ -169,7 +169,7 @@ void DebugDrawer2::drawCubes(ConstWeakArray<Mat4> mvps, const Vec4& color, F32 l
 
 	cmdb->setVertexAttribute(0, 0, Format::kR32G32B32_Sfloat, 0);
 	cmdb->bindVertexBuffer(0, m_cubePositionsBuffer, 0, sizeof(Vec3));
-	cmdb->bindIndexBuffer(m_cubeIndicesBuffer, 0, IndexType::U16);
+	cmdb->bindIndexBuffer(m_cubeIndicesBuffer, 0, IndexType::kU16);
 
 	cmdb->bindUniformBuffer(1, 0, unisToken.m_buffer, unisToken.m_offset, unisToken.m_range);
 

+ 1 - 1
AnKi/Scene/ModelNode.cpp

@@ -279,7 +279,7 @@ void ModelNode::draw(RenderQueueDrawContext& ctx, ConstWeakArray<void*> userData
 		}
 
 		// Index buffer
-		cmdb->bindIndexBuffer(modelInf.m_indexBuffer, modelInf.m_indexBufferOffset, IndexType::U16);
+		cmdb->bindIndexBuffer(modelInf.m_indexBuffer, modelInf.m_indexBufferOffset, IndexType::kU16);
 
 		// Draw
 		cmdb->drawElements(PrimitiveTopology::kTriangles, modelInf.m_indexCount, instanceCount, modelInf.m_firstIndex,

+ 1 - 1
AnKi/Ui/Canvas.cpp

@@ -244,7 +244,7 @@ void Canvas::appendToCommandBufferInternal(CommandBufferPtr& cmdb)
 	cmdb->setVertexAttribute(1, 0, Format::kR8G8B8A8_Unorm, sizeof(Vec2) * 2);
 	cmdb->setVertexAttribute(2, 0, Format::kR32G32_Sfloat, sizeof(Vec2));
 
-	cmdb->bindIndexBuffer(indicesToken.m_buffer, indicesToken.m_offset, IndexType::U16);
+	cmdb->bindIndexBuffer(indicesToken.m_buffer, indicesToken.m_offset, IndexType::kU16);
 
 	// Will project scissor/clipping rectangles into framebuffer space
 	const Vec2 clipOff = drawData.DisplayPos; // (0,0) unless using multi-viewports

+ 2 - 2
AnKi/Ui/Font.cpp

@@ -71,8 +71,8 @@ void Font::createTexture(const void* data, U32 width, U32 height)
 	// Create and populate the buffer
 	const U32 buffSize = width * height * 4;
 	BufferPtr buff = m_manager->getGrManager().newBuffer(
-		BufferInitInfo(buffSize, BufferUsageBit::kTransferSource, BufferMapAccessBit::WRITE, "UI"));
-	void* mapped = buff->map(0, buffSize, BufferMapAccessBit::WRITE);
+		BufferInitInfo(buffSize, BufferUsageBit::kTransferSource, BufferMapAccessBit::kWrite, "UI"));
+	void* mapped = buff->map(0, buffSize, BufferMapAccessBit::kWrite);
 	memcpy(mapped, data, buffSize);
 	buff->flush(0, buffSize);
 	buff->unmap();

+ 73 - 72
Tests/Gr/Gr.cpp

@@ -359,14 +359,14 @@ static void createCube(GrManager& gr, BufferPtr& verts, BufferPtr& indices)
 	static const Array<U16, 6 * 2 * 3> idx = {
 		{0, 1, 3, 3, 1, 2, 1, 5, 6, 1, 6, 2, 7, 4, 0, 7, 0, 3, 6, 5, 7, 7, 5, 4, 0, 4, 5, 0, 5, 1, 3, 2, 6, 3, 6, 7}};
 
-	verts = gr.newBuffer(BufferInitInfo(sizeof(pos), BufferUsageBit::kVertex, BufferMapAccessBit::WRITE));
+	verts = gr.newBuffer(BufferInitInfo(sizeof(pos), BufferUsageBit::kVertex, BufferMapAccessBit::kWrite));
 
-	void* mapped = verts->map(0, sizeof(pos), BufferMapAccessBit::WRITE);
+	void* mapped = verts->map(0, sizeof(pos), BufferMapAccessBit::kWrite);
 	memcpy(mapped, &pos[0], sizeof(pos));
 	verts->unmap();
 
-	indices = gr.newBuffer(BufferInitInfo(sizeof(idx), BufferUsageBit::kIndex, BufferMapAccessBit::WRITE));
-	mapped = indices->map(0, sizeof(idx), BufferMapAccessBit::WRITE);
+	indices = gr.newBuffer(BufferInitInfo(sizeof(idx), BufferUsageBit::kIndex, BufferMapAccessBit::kWrite));
+	mapped = indices->map(0, sizeof(idx), BufferMapAccessBit::kWrite);
 	memcpy(mapped, &idx[0], sizeof(idx));
 	indices->unmap();
 }
@@ -687,23 +687,23 @@ ANKI_TEST(Gr, Buffer)
 	BufferInitInfo buffInit("a");
 	buffInit.m_size = 512;
 	buffInit.m_usage = BufferUsageBit::kAllUniform;
-	buffInit.m_mapAccess = BufferMapAccessBit::NONE;
+	buffInit.m_mapAccess = BufferMapAccessBit::kNone;
 	BufferPtr a = gr->newBuffer(buffInit);
 
 	buffInit.setName("b");
 	buffInit.m_size = 64;
 	buffInit.m_usage = BufferUsageBit::kAllStorage;
-	buffInit.m_mapAccess = BufferMapAccessBit::WRITE | BufferMapAccessBit::READ;
+	buffInit.m_mapAccess = BufferMapAccessBit::kWrite | BufferMapAccessBit::kRead;
 	BufferPtr b = gr->newBuffer(buffInit);
 
-	void* ptr = b->map(0, 64, BufferMapAccessBit::WRITE);
+	void* ptr = b->map(0, 64, BufferMapAccessBit::kWrite);
 	ANKI_TEST_EXPECT_NEQ(ptr, nullptr);
 	U8 ptr2[64];
 	memset(ptr, 0xCC, 64);
 	memset(ptr2, 0xCC, 64);
 	b->unmap();
 
-	ptr = b->map(0, 64, BufferMapAccessBit::READ);
+	ptr = b->map(0, 64, BufferMapAccessBit::kRead);
 	ANKI_TEST_EXPECT_NEQ(ptr, nullptr);
 	ANKI_TEST_EXPECT_EQ(memcmp(ptr, ptr2, 64), 0);
 	b->unmap();
@@ -717,9 +717,9 @@ ANKI_TEST(Gr, DrawWithUniforms)
 
 	// A non-uploaded buffer
 	BufferPtr b =
-		gr->newBuffer(BufferInitInfo(sizeof(Vec4) * 3, BufferUsageBit::kAllUniform, BufferMapAccessBit::WRITE));
+		gr->newBuffer(BufferInitInfo(sizeof(Vec4) * 3, BufferUsageBit::kAllUniform, BufferMapAccessBit::kWrite));
 
-	Vec4* ptr = static_cast<Vec4*>(b->map(0, sizeof(Vec4) * 3, BufferMapAccessBit::WRITE));
+	Vec4* ptr = static_cast<Vec4*>(b->map(0, sizeof(Vec4) * 3, BufferMapAccessBit::kWrite));
 	ANKI_TEST_EXPECT_NEQ(ptr, nullptr);
 	ptr[0] = Vec4(1.0, 0.0, 0.0, 0.0);
 	ptr[1] = Vec4(0.0, 1.0, 0.0, 0.0);
@@ -787,9 +787,9 @@ ANKI_TEST(Gr, DrawWithVertex)
 	};
 	static_assert(sizeof(Vert) == sizeof(Vec4), "See file");
 
-	BufferPtr b = gr->newBuffer(BufferInitInfo(sizeof(Vert) * 3, BufferUsageBit::kVertex, BufferMapAccessBit::WRITE));
+	BufferPtr b = gr->newBuffer(BufferInitInfo(sizeof(Vert) * 3, BufferUsageBit::kVertex, BufferMapAccessBit::kWrite));
 
-	Vert* ptr = static_cast<Vert*>(b->map(0, sizeof(Vert) * 3, BufferMapAccessBit::WRITE));
+	Vert* ptr = static_cast<Vert*>(b->map(0, sizeof(Vert) * 3, BufferMapAccessBit::kWrite));
 	ANKI_TEST_EXPECT_NEQ(ptr, nullptr);
 
 	ptr[0].m_pos = Vec3(-1.0, 1.0, 0.0);
@@ -801,9 +801,9 @@ ANKI_TEST(Gr, DrawWithVertex)
 	ptr[2].m_color = {{0, 0, 255}};
 	b->unmap();
 
-	BufferPtr c = gr->newBuffer(BufferInitInfo(sizeof(Vec3) * 3, BufferUsageBit::kVertex, BufferMapAccessBit::WRITE));
+	BufferPtr c = gr->newBuffer(BufferInitInfo(sizeof(Vec3) * 3, BufferUsageBit::kVertex, BufferMapAccessBit::kWrite));
 
-	Vec3* otherColor = static_cast<Vec3*>(c->map(0, sizeof(Vec3) * 3, BufferMapAccessBit::WRITE));
+	Vec3* otherColor = static_cast<Vec3*>(c->map(0, sizeof(Vec3) * 3, BufferMapAccessBit::kWrite));
 
 	otherColor[0] = Vec3(0.0, 1.0, 1.0);
 	otherColor[1] = Vec3(1.0, 0.0, 1.0);
@@ -1106,7 +1106,7 @@ static void drawOffscreenDrawcalls([[maybe_unused]] GrManager& gr, ShaderProgram
 	cmdb->bindVertexBuffer(0, vertBuff, 0, sizeof(Vec3));
 	cmdb->setVertexAttribute(0, 0, Format::kR32G32B32_Sfloat, 0);
 	cmdb->bindShaderProgram(prog);
-	cmdb->bindIndexBuffer(indexBuff, 0, IndexType::U16);
+	cmdb->bindIndexBuffer(indexBuff, 0, IndexType::kU16);
 	cmdb->setViewport(0, 0, viewPortSize, viewPortSize);
 	cmdb->drawElements(PrimitiveTopology::kTriangles, 6 * 2 * 3);
 
@@ -1774,8 +1774,8 @@ void main()
 
 	// Create the buffer to copy to the texture
 	BufferPtr uploadBuff = gr->newBuffer(BufferInitInfo(PtrSize(texInit.m_width) * texInit.m_height * 3,
-														BufferUsageBit::kAllTransfer, BufferMapAccessBit::WRITE));
-	U8* data = static_cast<U8*>(uploadBuff->map(0, uploadBuff->getSize(), BufferMapAccessBit::WRITE));
+														BufferUsageBit::kAllTransfer, BufferMapAccessBit::kWrite));
+	U8* data = static_cast<U8*>(uploadBuff->map(0, uploadBuff->getSize(), BufferMapAccessBit::kWrite));
 	for(U32 i = 0; i < texInit.m_width * texInit.m_height; ++i)
 	{
 		data[0] = U8(i);
@@ -1786,8 +1786,8 @@ void main()
 	uploadBuff->unmap();
 
 	BufferPtr uploadBuff2 = gr->newBuffer(BufferInitInfo(PtrSize(texInit.m_width >> 1) * (texInit.m_height >> 1) * 3,
-														 BufferUsageBit::kAllTransfer, BufferMapAccessBit::WRITE));
-	data = static_cast<U8*>(uploadBuff2->map(0, uploadBuff2->getSize(), BufferMapAccessBit::WRITE));
+														 BufferUsageBit::kAllTransfer, BufferMapAccessBit::kWrite));
+	data = static_cast<U8*>(uploadBuff2->map(0, uploadBuff2->getSize(), BufferMapAccessBit::kWrite));
 	for(U i = 0; i < (texInit.m_width >> 1) * (texInit.m_height >> 1); ++i)
 	{
 		data[0] = U8(i);
@@ -1799,7 +1799,7 @@ void main()
 
 	// Create the result buffer
 	BufferPtr resultBuff =
-		gr->newBuffer(BufferInitInfo(sizeof(UVec4), BufferUsageBit::kStorageComputeWrite, BufferMapAccessBit::READ));
+		gr->newBuffer(BufferInitInfo(sizeof(UVec4), BufferUsageBit::kStorageComputeWrite, BufferMapAccessBit::kRead));
 
 	// Upload data and test them
 	CommandBufferInitInfo cmdbInit;
@@ -1827,7 +1827,7 @@ void main()
 	gr->finish();
 
 	// Get the result
-	UVec4* result = static_cast<UVec4*>(resultBuff->map(0, resultBuff->getSize(), BufferMapAccessBit::READ));
+	UVec4* result = static_cast<UVec4*>(resultBuff->map(0, resultBuff->getSize(), BufferMapAccessBit::kRead));
 	ANKI_TEST_EXPECT_EQ(result->x(), 2);
 	ANKI_TEST_EXPECT_EQ(result->y(), 2);
 	ANKI_TEST_EXPECT_EQ(result->z(), 2);
@@ -1912,7 +1912,7 @@ void main()
 
 	// Create the result buffer
 	BufferPtr resultBuff =
-		gr->newBuffer(BufferInitInfo(sizeof(UVec4), BufferUsageBit::kStorageComputeWrite, BufferMapAccessBit::READ));
+		gr->newBuffer(BufferInitInfo(sizeof(UVec4), BufferUsageBit::kStorageComputeWrite, BufferMapAccessBit::kRead));
 
 	// Draw
 
@@ -1936,7 +1936,7 @@ void main()
 	gr->finish();
 
 	// Get the result
-	UVec4* result = static_cast<UVec4*>(resultBuff->map(0, resultBuff->getSize(), BufferMapAccessBit::READ));
+	UVec4* result = static_cast<UVec4*>(resultBuff->map(0, resultBuff->getSize(), BufferMapAccessBit::kRead));
 	ANKI_TEST_EXPECT_EQ(result->x(), 2);
 	ANKI_TEST_EXPECT_EQ(result->y(), 2);
 	ANKI_TEST_EXPECT_EQ(result->z(), 2);
@@ -2025,7 +2025,7 @@ void main()
 
 	// Create the result buffer
 	BufferPtr resultBuff = gr->newBuffer(BufferInitInfo(
-		sizeof(UVec4), BufferUsageBit::kAllStorage | BufferUsageBit::kTransferDestination, BufferMapAccessBit::READ));
+		sizeof(UVec4), BufferUsageBit::kAllStorage | BufferUsageBit::kTransferDestination, BufferMapAccessBit::kRead));
 
 	// Draw
 	CommandBufferInitInfo cinit;
@@ -2063,7 +2063,7 @@ void main()
 	gr->finish();
 
 	// Get the result
-	UVec4* result = static_cast<UVec4*>(resultBuff->map(0, resultBuff->getSize(), BufferMapAccessBit::READ));
+	UVec4* result = static_cast<UVec4*>(resultBuff->map(0, resultBuff->getSize(), BufferMapAccessBit::kRead));
 	ANKI_TEST_EXPECT_EQ(result->x(), 2);
 	ANKI_TEST_EXPECT_EQ(result->y(), 2);
 	ANKI_TEST_EXPECT_EQ(result->z(), 2);
@@ -2079,15 +2079,15 @@ ANKI_TEST(Gr, BindingWithArray)
 
 	// Create result buffer
 	BufferPtr resBuff =
-		gr->newBuffer(BufferInitInfo(sizeof(Vec4), BufferUsageBit::kAllCompute, BufferMapAccessBit::READ));
+		gr->newBuffer(BufferInitInfo(sizeof(Vec4), BufferUsageBit::kAllCompute, BufferMapAccessBit::kRead));
 
 	Array<BufferPtr, 4> uniformBuffers;
 	F32 count = 1.0f;
 	for(BufferPtr& ptr : uniformBuffers)
 	{
-		ptr = gr->newBuffer(BufferInitInfo(sizeof(Vec4), BufferUsageBit::kAllCompute, BufferMapAccessBit::WRITE));
+		ptr = gr->newBuffer(BufferInitInfo(sizeof(Vec4), BufferUsageBit::kAllCompute, BufferMapAccessBit::kWrite));
 
-		Vec4* mapped = static_cast<Vec4*>(ptr->map(0, sizeof(Vec4), BufferMapAccessBit::WRITE));
+		Vec4* mapped = static_cast<Vec4*>(ptr->map(0, sizeof(Vec4), BufferMapAccessBit::kWrite));
 		*mapped = Vec4(count, count + 1.0f, count + 2.0f, count + 3.0f);
 		count += 4.0f;
 		ptr->unmap();
@@ -2136,7 +2136,7 @@ void main()
 	gr->finish();
 
 	// Check result
-	Vec4* res = static_cast<Vec4*>(resBuff->map(0, sizeof(Vec4), BufferMapAccessBit::READ));
+	Vec4* res = static_cast<Vec4*>(resBuff->map(0, sizeof(Vec4), BufferMapAccessBit::kRead));
 
 	ANKI_TEST_EXPECT_EQ(res->x(), 28.0f);
 	ANKI_TEST_EXPECT_EQ(res->y(), 32.0f);
@@ -2181,7 +2181,7 @@ ANKI_TEST(Gr, Bindless)
 
 	// Create result buffer
 	BufferPtr resBuff =
-		gr->newBuffer(BufferInitInfo(sizeof(UVec4), BufferUsageBit::kAllCompute, BufferMapAccessBit::READ));
+		gr->newBuffer(BufferInitInfo(sizeof(UVec4), BufferUsageBit::kAllCompute, BufferMapAccessBit::kRead));
 
 	// Create program A
 	static const char* PROG_SRC = R"(
@@ -2265,7 +2265,7 @@ void main()
 	gr->finish();
 
 	// Check result
-	UVec4* res = static_cast<UVec4*>(resBuff->map(0, sizeof(UVec4), BufferMapAccessBit::READ));
+	UVec4* res = static_cast<UVec4*>(resBuff->map(0, sizeof(UVec4), BufferMapAccessBit::kRead));
 
 	ANKI_TEST_EXPECT_EQ(res->x(), 13);
 	ANKI_TEST_EXPECT_EQ(res->y(), 25);
@@ -2313,10 +2313,10 @@ void main()
 	BufferInitInfo info;
 	info.m_size = sizeof(Vec4) * 2;
 	info.m_usage = BufferUsageBit::kAllCompute;
-	info.m_mapAccess = BufferMapAccessBit::WRITE;
+	info.m_mapAccess = BufferMapAccessBit::kWrite;
 	BufferPtr ptrBuff = gr->newBuffer(info);
 
-	Vec4* mapped = static_cast<Vec4*>(ptrBuff->map(0, kMaxPtrSize, BufferMapAccessBit::WRITE));
+	Vec4* mapped = static_cast<Vec4*>(ptrBuff->map(0, kMaxPtrSize, BufferMapAccessBit::kWrite));
 	const Vec4 VEC(123.456f, -1.1f, 100.0f, -666.0f);
 	*mapped = VEC;
 	++mapped;
@@ -2324,7 +2324,7 @@ void main()
 	ptrBuff->unmap();
 
 	BufferPtr resBuff =
-		gr->newBuffer(BufferInitInfo(sizeof(Vec4), BufferUsageBit::kAllCompute, BufferMapAccessBit::READ));
+		gr->newBuffer(BufferInitInfo(sizeof(Vec4), BufferUsageBit::kAllCompute, BufferMapAccessBit::kRead));
 
 	// Run
 	CommandBufferInitInfo cinit;
@@ -2348,7 +2348,7 @@ void main()
 	gr->finish();
 
 	// Check
-	mapped = static_cast<Vec4*>(resBuff->map(0, kMaxPtrSize, BufferMapAccessBit::READ));
+	mapped = static_cast<Vec4*>(resBuff->map(0, kMaxPtrSize, BufferMapAccessBit::kRead));
 	ANKI_TEST_EXPECT_EQ(*mapped, VEC + VEC * 10.0f);
 	resBuff->unmap();
 
@@ -2371,12 +2371,12 @@ ANKI_TEST(Gr, RayQuery)
 	{
 		Array<U16, 3> indices = {0, 1, 2};
 		BufferInitInfo init;
-		init.m_mapAccess = BufferMapAccessBit::WRITE;
+		init.m_mapAccess = BufferMapAccessBit::kWrite;
 		init.m_usage = BufferUsageBit::kIndex;
 		init.m_size = sizeof(indices);
 		idxBuffer = gr->newBuffer(init);
 
-		void* addr = idxBuffer->map(0, kMaxPtrSize, BufferMapAccessBit::WRITE);
+		void* addr = idxBuffer->map(0, kMaxPtrSize, BufferMapAccessBit::kWrite);
 		memcpy(addr, &indices[0], sizeof(indices));
 		idxBuffer->unmap();
 	}
@@ -2388,12 +2388,12 @@ ANKI_TEST(Gr, RayQuery)
 		Array<Vec4, 3> verts = {{{-1.0f, 0.0f, 0.0f, 100.0f}, {1.0f, 0.0f, 0.0f, 100.0f}, {0.0f, 2.0f, 0.0f, 100.0f}}};
 
 		BufferInitInfo init;
-		init.m_mapAccess = BufferMapAccessBit::WRITE;
+		init.m_mapAccess = BufferMapAccessBit::kWrite;
 		init.m_usage = BufferUsageBit::kVertex;
 		init.m_size = sizeof(verts);
 		vertBuffer = gr->newBuffer(init);
 
-		void* addr = vertBuffer->map(0, kMaxPtrSize, BufferMapAccessBit::WRITE);
+		void* addr = vertBuffer->map(0, kMaxPtrSize, BufferMapAccessBit::kWrite);
 		memcpy(addr, &verts[0], sizeof(verts));
 		vertBuffer->unmap();
 	}
@@ -2403,10 +2403,10 @@ ANKI_TEST(Gr, RayQuery)
 	if(useRayTracing)
 	{
 		AccelerationStructureInitInfo init;
-		init.m_type = AccelerationStructureType::BOTTOM_LEVEL;
+		init.m_type = AccelerationStructureType::kBottomLevel;
 		init.m_bottomLevel.m_indexBuffer = idxBuffer;
 		init.m_bottomLevel.m_indexCount = 3;
-		init.m_bottomLevel.m_indexType = IndexType::U16;
+		init.m_bottomLevel.m_indexType = IndexType::kU16;
 		init.m_bottomLevel.m_positionBuffer = vertBuffer;
 		init.m_bottomLevel.m_positionCount = 3;
 		init.m_bottomLevel.m_positionsFormat = Format::kR32G32B32_Sfloat;
@@ -2420,7 +2420,7 @@ ANKI_TEST(Gr, RayQuery)
 	if(useRayTracing)
 	{
 		AccelerationStructureInitInfo init;
-		init.m_type = AccelerationStructureType::TOP_LEVEL;
+		init.m_type = AccelerationStructureType::kTopLevel;
 		Array<AccelerationStructureInstance, 1> instances = {{{blas, Mat3x4::getIdentity()}}};
 		init.m_topLevel.m_instances = instances;
 
@@ -2551,17 +2551,17 @@ void main()
 		cinit.m_flags = CommandBufferFlag::GENERAL_WORK | CommandBufferFlag::SMALL_BATCH;
 		CommandBufferPtr cmdb = gr->newCommandBuffer(cinit);
 
-		cmdb->setAccelerationStructureBarrier(blas, AccelerationStructureUsageBit::NONE,
-											  AccelerationStructureUsageBit::BUILD);
+		cmdb->setAccelerationStructureBarrier(blas, AccelerationStructureUsageBit::kNone,
+											  AccelerationStructureUsageBit::kBuild);
 		cmdb->buildAccelerationStructure(blas);
-		cmdb->setAccelerationStructureBarrier(blas, AccelerationStructureUsageBit::BUILD,
-											  AccelerationStructureUsageBit::ATTACH);
+		cmdb->setAccelerationStructureBarrier(blas, AccelerationStructureUsageBit::kBuild,
+											  AccelerationStructureUsageBit::kAttach);
 
-		cmdb->setAccelerationStructureBarrier(tlas, AccelerationStructureUsageBit::NONE,
-											  AccelerationStructureUsageBit::BUILD);
+		cmdb->setAccelerationStructureBarrier(tlas, AccelerationStructureUsageBit::kNone,
+											  AccelerationStructureUsageBit::kBuild);
 		cmdb->buildAccelerationStructure(tlas);
-		cmdb->setAccelerationStructureBarrier(tlas, AccelerationStructureUsageBit::BUILD,
-											  AccelerationStructureUsageBit::FRAGMENT_READ);
+		cmdb->setAccelerationStructureBarrier(tlas, AccelerationStructureUsageBit::kBuild,
+											  AccelerationStructureUsageBit::kFragmentRead);
 
 		cmdb->flush();
 	}
@@ -2630,11 +2630,11 @@ static void createCubeBuffers(GrManager& gr, Vec3 min, Vec3 max, BufferPtr& inde
 							  Bool turnInsideOut = false)
 {
 	BufferInitInfo inf;
-	inf.m_mapAccess = BufferMapAccessBit::WRITE;
+	inf.m_mapAccess = BufferMapAccessBit::kWrite;
 	inf.m_usage = BufferUsageBit::kIndex | BufferUsageBit::kVertex | BufferUsageBit::kStorageTraceRaysRead;
 	inf.m_size = sizeof(Vec3) * 8;
 	vertBuffer = gr.newBuffer(inf);
-	WeakArray<Vec3, PtrSize> positions = vertBuffer->map<Vec3>(0, 8, BufferMapAccessBit::WRITE);
+	WeakArray<Vec3, PtrSize> positions = vertBuffer->map<Vec3>(0, 8, BufferMapAccessBit::kWrite);
 
 	//   7------6
 	//  /|     /|
@@ -2656,7 +2656,7 @@ static void createCubeBuffers(GrManager& gr, Vec3 min, Vec3 max, BufferPtr& inde
 
 	inf.m_size = sizeof(U16) * 36;
 	indexBuffer = gr.newBuffer(inf);
-	WeakArray<U16, PtrSize> indices = indexBuffer->map<U16>(0, 36, BufferMapAccessBit::WRITE);
+	WeakArray<U16, PtrSize> indices = indexBuffer->map<U16>(0, 36, BufferMapAccessBit::kWrite);
 	U32 t = 0;
 
 	// Top
@@ -2856,9 +2856,9 @@ void main()
 		for(Geom& g : geometries)
 		{
 			AccelerationStructureInitInfo inf;
-			inf.m_type = AccelerationStructureType::BOTTOM_LEVEL;
+			inf.m_type = AccelerationStructureType::kBottomLevel;
 			inf.m_bottomLevel.m_indexBuffer = g.m_indexBuffer;
-			inf.m_bottomLevel.m_indexType = IndexType::U16;
+			inf.m_bottomLevel.m_indexType = IndexType::kU16;
 			inf.m_bottomLevel.m_indexCount = g.m_indexCount;
 			inf.m_bottomLevel.m_positionBuffer = g.m_vertexBuffer;
 			inf.m_bottomLevel.m_positionCount = 8;
@@ -2882,7 +2882,7 @@ void main()
 		}
 
 		AccelerationStructureInitInfo inf;
-		inf.m_type = AccelerationStructureType::TOP_LEVEL;
+		inf.m_type = AccelerationStructureType::kTopLevel;
 		inf.m_topLevel.m_instances = instances;
 
 		tlas = gr->newAccelerationStructure(inf);
@@ -2892,12 +2892,13 @@ void main()
 	BufferPtr modelBuffer;
 	{
 		BufferInitInfo inf;
-		inf.m_mapAccess = BufferMapAccessBit::WRITE;
+		inf.m_mapAccess = BufferMapAccessBit::kWrite;
 		inf.m_usage = BufferUsageBit::kAllStorage;
 		inf.m_size = sizeof(Model) * U32(GeomWhat::kCount);
 
 		modelBuffer = gr->newBuffer(inf);
-		WeakArray<Model, PtrSize> models = modelBuffer->map<Model>(0, U32(GeomWhat::kCount), BufferMapAccessBit::WRITE);
+		WeakArray<Model, PtrSize> models =
+			modelBuffer->map<Model>(0, U32(GeomWhat::kCount), BufferMapAccessBit::kWrite);
 		memset(&models[0], 0, inf.m_size);
 
 		for(GeomWhat i : EnumIterable<GeomWhat>())
@@ -3304,12 +3305,12 @@ void main()
 		const U32 sbtRecordSize = gr->getDeviceCapabilities().m_sbtRecordAlignment;
 
 		BufferInitInfo inf;
-		inf.m_mapAccess = BufferMapAccessBit::WRITE;
+		inf.m_mapAccess = BufferMapAccessBit::kWrite;
 		inf.m_usage = BufferUsageBit::kSBT;
 		inf.m_size = sbtRecordSize * recordCount;
 
 		sbt = gr->newBuffer(inf);
-		WeakArray<U8, PtrSize> mapped = sbt->map<U8>(0, inf.m_size, BufferMapAccessBit::WRITE);
+		WeakArray<U8, PtrSize> mapped = sbt->map<U8>(0, inf.m_size, BufferMapAccessBit::kWrite);
 		memset(&mapped[0], 0, inf.m_size);
 
 		ConstWeakArray<U8> handles = rtProg->getShaderGroupHandles();
@@ -3370,12 +3371,12 @@ void main()
 	constexpr U32 lightCount = 1;
 	{
 		BufferInitInfo inf;
-		inf.m_mapAccess = BufferMapAccessBit::WRITE;
+		inf.m_mapAccess = BufferMapAccessBit::kWrite;
 		inf.m_usage = BufferUsageBit::kAllStorage;
 		inf.m_size = sizeof(Light) * lightCount;
 
 		lightBuffer = gr->newBuffer(inf);
-		WeakArray<Light, PtrSize> lights = lightBuffer->map<Light>(0, lightCount, BufferMapAccessBit::WRITE);
+		WeakArray<Light, PtrSize> lights = lightBuffer->map<Light>(0, lightCount, BufferMapAccessBit::kWrite);
 
 		lights[0].m_min = geometries[GeomWhat::LIGHT].m_aabb.getMin().xyz();
 		lights[0].m_max = geometries[GeomWhat::LIGHT].m_aabb.getMax().xyz();
@@ -3406,8 +3407,8 @@ void main()
 		{
 			for(const Geom& g : geometries)
 			{
-				cmdb->setAccelerationStructureBarrier(g.m_blas, AccelerationStructureUsageBit::NONE,
-													  AccelerationStructureUsageBit::BUILD);
+				cmdb->setAccelerationStructureBarrier(g.m_blas, AccelerationStructureUsageBit::kNone,
+													  AccelerationStructureUsageBit::kBuild);
 			}
 
 			for(const Geom& g : geometries)
@@ -3417,15 +3418,15 @@ void main()
 
 			for(const Geom& g : geometries)
 			{
-				cmdb->setAccelerationStructureBarrier(g.m_blas, AccelerationStructureUsageBit::BUILD,
-													  AccelerationStructureUsageBit::ATTACH);
+				cmdb->setAccelerationStructureBarrier(g.m_blas, AccelerationStructureUsageBit::kBuild,
+													  AccelerationStructureUsageBit::kAttach);
 			}
 
-			cmdb->setAccelerationStructureBarrier(tlas, AccelerationStructureUsageBit::NONE,
-												  AccelerationStructureUsageBit::BUILD);
+			cmdb->setAccelerationStructureBarrier(tlas, AccelerationStructureUsageBit::kNone,
+												  AccelerationStructureUsageBit::kBuild);
 			cmdb->buildAccelerationStructure(tlas);
-			cmdb->setAccelerationStructureBarrier(tlas, AccelerationStructureUsageBit::BUILD,
-												  AccelerationStructureUsageBit::TRACE_RAYS_READ);
+			cmdb->setAccelerationStructureBarrier(tlas, AccelerationStructureUsageBit::kBuild,
+												  AccelerationStructureUsageBit::kTraceRaysRead);
 		}
 
 		TexturePtr presentTex = gr->acquireNextPresentableTexture();
@@ -3563,10 +3564,10 @@ void main()
 	BufferInitInfo info;
 	info.m_size = sizeof(U32) * ARRAY_SIZE;
 	info.m_usage = BufferUsageBit::kAllCompute;
-	info.m_mapAccess = BufferMapAccessBit::WRITE | BufferMapAccessBit::READ;
+	info.m_mapAccess = BufferMapAccessBit::kWrite | BufferMapAccessBit::kRead;
 	BufferPtr atomicsBuffer = gr->newBuffer(info);
 	U32* values =
-		static_cast<U32*>(atomicsBuffer->map(0, kMaxPtrSize, BufferMapAccessBit::READ | BufferMapAccessBit::WRITE));
+		static_cast<U32*>(atomicsBuffer->map(0, kMaxPtrSize, BufferMapAccessBit::kRead | BufferMapAccessBit::kWrite));
 	memset(values, 0, info.m_size);
 
 	// Pre-create some CPU result buffers

+ 4 - 4
Tests/Gr/GrTextureBuffer.cpp

@@ -36,12 +36,12 @@ void main()
 		ShaderProgramPtr prog = gr->newShaderProgram(progInit);
 
 		BufferInitInfo buffInit;
-		buffInit.m_mapAccess = BufferMapAccessBit::WRITE;
+		buffInit.m_mapAccess = BufferMapAccessBit::kWrite;
 		buffInit.m_size = sizeof(U8) * 4;
 		buffInit.m_usage = BufferUsageBit::kAllTexture;
 		BufferPtr texBuff = gr->newBuffer(buffInit);
 
-		I8* data = static_cast<I8*>(texBuff->map(0, kMaxPtrSize, BufferMapAccessBit::WRITE));
+		I8* data = static_cast<I8*>(texBuff->map(0, kMaxPtrSize, BufferMapAccessBit::kWrite));
 		const Vec4 values(-1.0f, -0.25f, 0.1345f, 0.8952f);
 		for(U i = 0; i < 4; ++i)
 		{
@@ -50,7 +50,7 @@ void main()
 
 		texBuff->unmap();
 
-		buffInit.m_mapAccess = BufferMapAccessBit::READ;
+		buffInit.m_mapAccess = BufferMapAccessBit::kRead;
 		buffInit.m_size = sizeof(F32) * 4;
 		buffInit.m_usage = BufferUsageBit::kAllStorage;
 		BufferPtr storageBuff = gr->newBuffer(buffInit);
@@ -66,7 +66,7 @@ void main()
 		cmdb->flush();
 		gr->finish();
 
-		const Vec4* inData = static_cast<const Vec4*>(storageBuff->map(0, kMaxPtrSize, BufferMapAccessBit::READ));
+		const Vec4* inData = static_cast<const Vec4*>(storageBuff->map(0, kMaxPtrSize, BufferMapAccessBit::kRead));
 		for(U i = 0; i < 4; ++i)
 		{
 			ANKI_TEST_EXPECT_NEAR(values[i], (*inData)[i], 0.01f);