소스 검색

Some code style refactoring #3

Panagiotis Christopoulos Charitos 3 년 전
부모
커밋
5decb3a6b8

+ 17 - 17
AnKi/Util/Atomic.h

@@ -15,21 +15,21 @@ namespace anki {
 
 enum class AtomicMemoryOrder
 {
-	RELAXED = std::memory_order_relaxed,
-	CONSUME = std::memory_order_consume,
-	ACQUIRE = std::memory_order_acquire,
-	RELEASE = std::memory_order_release,
-	ACQ_REL = std::memory_order_acq_rel,
-	SEQ_CST = std::memory_order_seq_cst
+	kRelaxed = std::memory_order_relaxed,
+	kConsume = std::memory_order_consume,
+	kAcquire = std::memory_order_acquire,
+	kRelease = std::memory_order_release,
+	kAcqRel = std::memory_order_acq_rel,
+	kSeqCst = std::memory_order_seq_cst
 };
 
 /// Atomic template. At the moment it doesn't work well with pointers.
-template<typename T, AtomicMemoryOrder tmemOrd = AtomicMemoryOrder::RELAXED>
+template<typename T, AtomicMemoryOrder kMemOrder = AtomicMemoryOrder::kRelaxed>
 class Atomic
 {
 public:
 	using Value = T;
-	static constexpr AtomicMemoryOrder MEMORY_ORDER = tmemOrd;
+	static constexpr AtomicMemoryOrder kDefaultMemoryOrder = kMemOrder;
 
 	/// It will not set itself to zero.
 	Atomic()
@@ -58,41 +58,41 @@ public:
 	}
 
 	/// Get the value of the atomic.
-	Value load(AtomicMemoryOrder memOrd = MEMORY_ORDER) const
+	Value load(AtomicMemoryOrder memOrd = kDefaultMemoryOrder) const
 	{
 		return m_att.load(static_cast<std::memory_order>(memOrd));
 	}
 
 	/// Store
-	void store(Value a, AtomicMemoryOrder memOrd = MEMORY_ORDER)
+	void store(Value a, AtomicMemoryOrder memOrd = kDefaultMemoryOrder)
 	{
 		m_att.store(a, static_cast<std::memory_order>(memOrd));
 	}
 
 	/// Fetch and add.
 	template<typename Y>
-	Value fetchAdd(Y a, AtomicMemoryOrder memOrd = MEMORY_ORDER)
+	Value fetchAdd(Y a, AtomicMemoryOrder memOrd = kDefaultMemoryOrder)
 	{
 		return m_att.fetch_add(a, static_cast<std::memory_order>(memOrd));
 	}
 
 	/// Fetch and subtract.
 	template<typename Y>
-	Value fetchSub(Y a, AtomicMemoryOrder memOrd = MEMORY_ORDER)
+	Value fetchSub(Y a, AtomicMemoryOrder memOrd = kDefaultMemoryOrder)
 	{
 		return m_att.fetch_sub(a, static_cast<std::memory_order>(memOrd));
 	}
 
 	/// Fetch and do bitwise or.
 	template<typename Y>
-	Value fetchOr(Y a, AtomicMemoryOrder memOrd = MEMORY_ORDER)
+	Value fetchOr(Y a, AtomicMemoryOrder memOrd = kDefaultMemoryOrder)
 	{
 		return m_att.fetch_or(a, static_cast<std::memory_order>(memOrd));
 	}
 
 	/// Fetch and do bitwise and.
 	template<typename Y>
-	Value fetchAnd(Y a, AtomicMemoryOrder memOrd = MEMORY_ORDER)
+	Value fetchAnd(Y a, AtomicMemoryOrder memOrd = kDefaultMemoryOrder)
 	{
 		return m_att.fetch_and(a, static_cast<std::memory_order>(memOrd));
 	}
@@ -106,15 +106,15 @@ public:
 	/// 	return false;
 	/// }
 	/// @endcode
-	Bool compareExchange(Value& expected, Value desired, AtomicMemoryOrder successMemOrd = MEMORY_ORDER,
-						 AtomicMemoryOrder failMemOrd = MEMORY_ORDER)
+	Bool compareExchange(Value& expected, Value desired, AtomicMemoryOrder successMemOrd = kDefaultMemoryOrder,
+						 AtomicMemoryOrder failMemOrd = kDefaultMemoryOrder)
 	{
 		return m_att.compare_exchange_weak(expected, desired, static_cast<std::memory_order>(successMemOrd),
 										   static_cast<std::memory_order>(failMemOrd));
 	}
 
 	/// Set @a a to the atomic and return the previous value.
-	Value exchange(Value a, AtomicMemoryOrder memOrd = MEMORY_ORDER)
+	Value exchange(Value a, AtomicMemoryOrder memOrd = kDefaultMemoryOrder)
 	{
 		return m_att.exchange(a, static_cast<std::memory_order>(memOrd));
 	}

+ 23 - 23
AnKi/Util/BitSet.h

@@ -24,10 +24,10 @@ private:
 	using ChunkType = TChunkType;
 
 	/// Number of bits a chunk holds.
-	static constexpr U32 CHUNK_BIT_COUNT = sizeof(ChunkType) * 8;
+	static constexpr U32 kChunkBitCount = sizeof(ChunkType) * 8;
 
 	/// Number of chunks.
-	static constexpr U32 CHUNK_COUNT = (N + (CHUNK_BIT_COUNT - 1)) / CHUNK_BIT_COUNT;
+	static constexpr U32 kChunkCount = (N + (kChunkBitCount - 1)) / kChunkBitCount;
 
 public:
 	/// Constructor. It will set all the bits or unset them.
@@ -61,7 +61,7 @@ public:
 	BitSet operator|(const BitSet& b) const
 	{
 		BitSet out;
-		for(U32 i = 0; i < CHUNK_COUNT; ++i)
+		for(U32 i = 0; i < kChunkCount; ++i)
 		{
 			out.m_chunks[i] = m_chunks[i] | b.m_chunks[i];
 		}
@@ -71,7 +71,7 @@ public:
 	/// Bitwise or between this and @a b sets.
 	BitSet& operator|=(const BitSet& b)
 	{
-		for(U32 i = 0; i < CHUNK_COUNT; ++i)
+		for(U32 i = 0; i < kChunkCount; ++i)
 		{
 			m_chunks[i] = m_chunks[i] | b.m_chunks[i];
 		}
@@ -82,7 +82,7 @@ public:
 	BitSet operator&(const BitSet& b) const
 	{
 		BitSet out;
-		for(U32 i = 0; i < CHUNK_COUNT; ++i)
+		for(U32 i = 0; i < kChunkCount; ++i)
 		{
 			out.m_chunks[i] = m_chunks[i] & b.m_chunks[i];
 		}
@@ -92,7 +92,7 @@ public:
 	/// Bitwise and between this and @a b sets.
 	BitSet& operator&=(const BitSet& b)
 	{
-		for(U32 i = 0; i < CHUNK_COUNT; ++i)
+		for(U32 i = 0; i < kChunkCount; ++i)
 		{
 			m_chunks[i] = m_chunks[i] & b.m_chunks[i];
 		}
@@ -103,7 +103,7 @@ public:
 	BitSet operator^(const BitSet& b) const
 	{
 		BitSet out;
-		for(U i = 0; i < CHUNK_COUNT; ++i)
+		for(U i = 0; i < kChunkCount; ++i)
 		{
 			out.m_chunks[i] = m_chunks[i] ^ b.m_chunks[i];
 		}
@@ -113,7 +113,7 @@ public:
 	/// Bitwise xor between this and @a b sets.
 	BitSet& operator^=(const BitSet& b)
 	{
-		for(U32 i = 0; i < CHUNK_COUNT; ++i)
+		for(U32 i = 0; i < kChunkCount; ++i)
 		{
 			m_chunks[i] = m_chunks[i] ^ b.m_chunks[i];
 		}
@@ -124,7 +124,7 @@ public:
 	BitSet operator~() const
 	{
 		BitSet out;
-		for(U32 i = 0; i < CHUNK_COUNT; ++i)
+		for(U32 i = 0; i < kChunkCount; ++i)
 		{
 			out.m_chunks[i] = TChunkType(~m_chunks[i]);
 		}
@@ -135,7 +135,7 @@ public:
 	Bool operator==(const BitSet& b) const
 	{
 		Bool same = m_chunks[0] == b.m_chunks[0];
-		for(U32 i = 1; i < CHUNK_COUNT; ++i)
+		for(U32 i = 1; i < kChunkCount; ++i)
 		{
 			same = same && (m_chunks[i] == b.m_chunks[i]);
 		}
@@ -240,7 +240,7 @@ public:
 	U32 getEnabledBitCount() const
 	{
 		U32 count = 0;
-		for(U i = 0; i < CHUNK_COUNT; ++i)
+		for(U i = 0; i < kChunkCount; ++i)
 		{
 			count += __builtin_popcountl(m_chunks[i]);
 		}
@@ -250,27 +250,27 @@ public:
 	/// Get the most significant bit that is enabled. Or kMaxU32 if all is zero.
 	U32 getMostSignificantBit() const
 	{
-		U32 i = CHUNK_COUNT;
+		U32 i = kChunkCount;
 		while(i--)
 		{
 			const U64 bits = m_chunks[i];
 			if(bits != 0)
 			{
 				const U32 msb = U32(__builtin_clzll(bits));
-				return (63 - msb) + (i * CHUNK_BIT_COUNT);
+				return (63 - msb) + (i * kChunkBitCount);
 			}
 		}
 
 		return kMaxU32;
 	}
 
-	Array<TChunkType, CHUNK_COUNT> getData() const
+	Array<TChunkType, kChunkCount> getData() const
 	{
 		return m_chunks;
 	}
 
 private:
-	Array<ChunkType, CHUNK_COUNT> m_chunks;
+	Array<ChunkType, kChunkCount> m_chunks;
 
 	BitSet()
 	{
@@ -279,20 +279,20 @@ private:
 	static void position(U32 bit, U32& high, U32& low)
 	{
 		ANKI_ASSERT(bit < N);
-		high = bit / CHUNK_BIT_COUNT;
-		low = bit % CHUNK_BIT_COUNT;
-		ANKI_ASSERT(high < CHUNK_COUNT);
-		ANKI_ASSERT(low < CHUNK_BIT_COUNT);
+		high = bit / kChunkBitCount;
+		low = bit % kChunkBitCount;
+		ANKI_ASSERT(high < kChunkCount);
+		ANKI_ASSERT(low < kChunkBitCount);
 	}
 
 	/// Zero the unused bits.
 	void zeroUnusedBits()
 	{
-		const ChunkType UNUSED_BITS = CHUNK_COUNT * CHUNK_BIT_COUNT - N;
-		const ChunkType USED_BITMASK = std::numeric_limits<ChunkType>::max() >> UNUSED_BITS;
-		if(USED_BITMASK > 0)
+		constexpr ChunkType kUnusedBits = kChunkCount * kChunkBitCount - N;
+		constexpr ChunkType kUsedBitmask = std::numeric_limits<ChunkType>::max() >> kUnusedBits;
+		if(kUsedBitmask > 0)
 		{
-			m_chunks[CHUNK_COUNT - 1] &= USED_BITMASK;
+			m_chunks[kChunkCount - 1] &= kUsedBitmask;
 		}
 	}
 };

+ 3 - 3
AnKi/Util/BuddyAllocatorBuilder.h

@@ -23,14 +23,14 @@ public:
 };
 
 /// This is a generic implementation of a buddy allocator.
-/// @tparam T_MAX_MEMORY_RANGE_LOG2 The max memory to allocate.
+/// @tparam kMaxMemoryRangeLog2 The max memory to allocate.
 /// @tparam TLock This an optional lock. Can be a Mutex or SpinLock or some dummy class.
-template<U32 T_MAX_MEMORY_RANGE_LOG2, typename TLock>
+template<U32 kMaxMemoryRangeLog2, typename TLock>
 class BuddyAllocatorBuilder
 {
 public:
 	/// The type of the address.
-	using Address = std::conditional_t<(T_MAX_MEMORY_RANGE_LOG2 > 32), PtrSize, U32>;
+	using Address = std::conditional_t<(kMaxMemoryRangeLog2 > 32), PtrSize, U32>;
 
 	BuddyAllocatorBuilder()
 	{

+ 18 - 19
AnKi/Util/BuddyAllocatorBuilder.inl.h

@@ -7,11 +7,11 @@
 
 namespace anki {
 
-template<U32 T_MAX_MEMORY_RANGE_LOG2, typename TLock>
-void BuddyAllocatorBuilder<T_MAX_MEMORY_RANGE_LOG2, TLock>::init(GenericMemoryPoolAllocator<U8> alloc,
-																 U32 maxMemoryRangeLog2)
+template<U32 kMaxMemoryRangeLog2, typename TLock>
+void BuddyAllocatorBuilder<kMaxMemoryRangeLog2, TLock>::init(GenericMemoryPoolAllocator<U8> alloc,
+															 U32 maxMemoryRangeLog2)
 {
-	ANKI_ASSERT(maxMemoryRangeLog2 >= 1 && maxMemoryRangeLog2 <= T_MAX_MEMORY_RANGE_LOG2);
+	ANKI_ASSERT(maxMemoryRangeLog2 >= 1 && maxMemoryRangeLog2 <= kMaxMemoryRangeLog2);
 	ANKI_ASSERT(m_freeLists.getSize() == 0 && m_userAllocatedSize == 0 && m_realAllocatedSize == 0);
 
 	const U32 orderCount = maxMemoryRangeLog2 + 1;
@@ -21,8 +21,8 @@ void BuddyAllocatorBuilder<T_MAX_MEMORY_RANGE_LOG2, TLock>::init(GenericMemoryPo
 	m_freeLists.create(m_alloc, orderCount);
 }
 
-template<U32 T_MAX_MEMORY_RANGE_LOG2, typename TLock>
-void BuddyAllocatorBuilder<T_MAX_MEMORY_RANGE_LOG2, TLock>::destroy()
+template<U32 kMaxMemoryRangeLog2, typename TLock>
+void BuddyAllocatorBuilder<kMaxMemoryRangeLog2, TLock>::destroy()
 {
 	ANKI_ASSERT(m_userAllocatedSize == 0 && "Forgot to free all memory");
 	m_freeLists.destroy(m_alloc);
@@ -31,9 +31,8 @@ void BuddyAllocatorBuilder<T_MAX_MEMORY_RANGE_LOG2, TLock>::destroy()
 	m_realAllocatedSize = 0;
 }
 
-template<U32 T_MAX_MEMORY_RANGE_LOG2, typename TLock>
-Bool BuddyAllocatorBuilder<T_MAX_MEMORY_RANGE_LOG2, TLock>::allocate(PtrSize size, PtrSize alignment,
-																	 Address& outAddress)
+template<U32 kMaxMemoryRangeLog2, typename TLock>
+Bool BuddyAllocatorBuilder<kMaxMemoryRangeLog2, TLock>::allocate(PtrSize size, PtrSize alignment, Address& outAddress)
 {
 	ANKI_ASSERT(size > 0 && size <= m_maxMemoryRange);
 
@@ -109,8 +108,8 @@ Bool BuddyAllocatorBuilder<T_MAX_MEMORY_RANGE_LOG2, TLock>::allocate(PtrSize siz
 	return true;
 }
 
-template<U32 T_MAX_MEMORY_RANGE_LOG2, typename TLock>
-void BuddyAllocatorBuilder<T_MAX_MEMORY_RANGE_LOG2, TLock>::free(Address address, PtrSize size, PtrSize alignment)
+template<U32 kMaxMemoryRangeLog2, typename TLock>
+void BuddyAllocatorBuilder<kMaxMemoryRangeLog2, TLock>::free(Address address, PtrSize size, PtrSize alignment)
 {
 	PtrSize alignedSize = nextPowerOfTwo(size);
 	U32 order = log2(alignedSize);
@@ -147,8 +146,8 @@ void BuddyAllocatorBuilder<T_MAX_MEMORY_RANGE_LOG2, TLock>::free(Address address
 	}
 }
 
-template<U32 T_MAX_MEMORY_RANGE_LOG2, typename TLock>
-void BuddyAllocatorBuilder<T_MAX_MEMORY_RANGE_LOG2, TLock>::freeInternal(PtrSize address, PtrSize size)
+template<U32 kMaxMemoryRangeLog2, typename TLock>
+void BuddyAllocatorBuilder<kMaxMemoryRangeLog2, TLock>::freeInternal(PtrSize address, PtrSize size)
 {
 	ANKI_ASSERT(size);
 	ANKI_ASSERT(isPowerOfTwo(size));
@@ -196,13 +195,13 @@ void BuddyAllocatorBuilder<T_MAX_MEMORY_RANGE_LOG2, TLock>::freeInternal(PtrSize
 	}
 }
 
-template<U32 T_MAX_MEMORY_RANGE_LOG2, typename TLock>
-void BuddyAllocatorBuilder<T_MAX_MEMORY_RANGE_LOG2, TLock>::debugPrint() const
+template<U32 kMaxMemoryRangeLog2, typename TLock>
+void BuddyAllocatorBuilder<kMaxMemoryRangeLog2, TLock>::debugPrint() const
 {
-	constexpr PtrSize MAX_MEMORY_RANGE = pow2<PtrSize>(T_MAX_MEMORY_RANGE_LOG2);
+	constexpr PtrSize kMaxMemoryRange = pow2<PtrSize>(kMaxMemoryRangeLog2);
 
 	// Allocate because we can't possibly have that in the stack
-	BitSet<MAX_MEMORY_RANGE>* freeBytes = m_alloc.newInstance<BitSet<MAX_MEMORY_RANGE>>(false);
+	BitSet<kMaxMemoryRange>* freeBytes = m_alloc.newInstance<BitSet<kMaxMemoryRange>>(false);
 
 	LockGuard<TLock> lock(m_mutex);
 
@@ -230,8 +229,8 @@ void BuddyAllocatorBuilder<T_MAX_MEMORY_RANGE_LOG2, TLock>::debugPrint() const
 	m_alloc.deleteInstance(freeBytes);
 }
 
-template<U32 T_MAX_MEMORY_RANGE_LOG2, typename TLock>
-void BuddyAllocatorBuilder<T_MAX_MEMORY_RANGE_LOG2, TLock>::getStats(BuddyAllocatorBuilderStats& stats) const
+template<U32 kMaxMemoryRangeLog2, typename TLock>
+void BuddyAllocatorBuilder<kMaxMemoryRangeLog2, TLock>::getStats(BuddyAllocatorBuilderStats& stats) const
 {
 	LockGuard<TLock> lock(m_mutex);
 

+ 2 - 2
AnKi/Util/DynamicArray.h

@@ -29,8 +29,8 @@ public:
 	using ConstReference = const Value&;
 	using Size = TSize;
 
-	static constexpr F32 GROW_SCALE = 2.0f;
-	static constexpr F32 SHRINK_SCALE = 2.0f;
+	static constexpr F32 kGrowScale = 2.0f;
+	static constexpr F32 kShrinkScale = 2.0f;
 
 	DynamicArray()
 		: m_data(nullptr)

+ 2 - 2
AnKi/Util/DynamicArray.inl.h

@@ -29,7 +29,7 @@ void DynamicArray<T, TSize>::resizeStorage(TAllocator alloc, Size newSize)
 	{
 		// Need to grow
 
-		m_capacity = (newSize > Size(F64(m_capacity) * GROW_SCALE)) ? newSize : Size(F64(m_capacity) * GROW_SCALE);
+		m_capacity = (newSize > Size(F64(m_capacity) * kGrowScale)) ? newSize : Size(F64(m_capacity) * kGrowScale);
 		Value* newStorage =
 			static_cast<Value*>(alloc.getMemoryPool().allocate(m_capacity * sizeof(Value), alignof(Value)));
 
@@ -61,7 +61,7 @@ void DynamicArray<T, TSize>::resizeStorage(TAllocator alloc, Size newSize)
 
 		m_size = newSize;
 
-		if(newSize < Size(F64(m_capacity) / SHRINK_SCALE) || newSize == 0)
+		if(newSize < Size(F64(m_capacity) / kShrinkScale) || newSize == 0)
 		{
 			// Need to shrink
 

+ 3 - 3
AnKi/Util/FilesystemWindows.cpp

@@ -10,7 +10,7 @@
 
 namespace anki {
 
-static constexpr U MAX_PATH_LEN = MAX_PATH - 1;
+static constexpr U kMaxPathLen = MAX_PATH - 1;
 
 Bool fileExists(const CString& filename)
 {
@@ -97,7 +97,7 @@ static Error walkDirectoryTreeRecursive(const CString& dir, const Function<Error
 										U baseDirLen)
 {
 	// Append something to the path
-	if(dir.getLength() > MAX_PATH_LEN - 2)
+	if(dir.getLength() > kMaxPathLen - 2)
 	{
 		ANKI_UTIL_LOGE("Path too long");
 		return Error::kFunctionFailed;
@@ -141,7 +141,7 @@ static Error walkDirectoryTreeRecursive(const CString& dir, const Function<Error
 
 			// Compute new path
 			const PtrSize oldLen = strlen(&dir2[0]);
-			if(oldLen + filename.getLength() > MAX_PATH_LEN)
+			if(oldLen + filename.getLength() > kMaxPathLen)
 			{
 				ANKI_UTIL_LOGE("Path too long");
 				return Error::kFunctionFailed;

+ 3 - 3
AnKi/Util/Forward.h

@@ -17,7 +17,7 @@ class BitSet;
 template<typename T>
 class BitMask;
 
-template<typename, typename, typename>
+template<typename, typename, typename, typename>
 class HashMap;
 
 template<typename T>
@@ -29,7 +29,7 @@ class List;
 template<typename T>
 class ListAuto;
 
-template<typename T, typename TIndex>
+template<typename T, typename TConfig>
 class SparseArray;
 
 class CString;
@@ -38,7 +38,7 @@ class StringAuto;
 
 class ThreadHive;
 
-template<typename T, PtrSize T_PREALLOCATED_STORAGE = ANKI_SAFE_ALIGNMENT>
+template<typename T, PtrSize kPreallocatedStorage = ANKI_SAFE_ALIGNMENT>
 class Function;
 
 template<typename T, typename TSize = U32>

+ 30 - 31
AnKi/Util/Function.h

@@ -21,9 +21,9 @@ namespace anki {
 /// func.init(allocator, [&someInt](U32 u, F32 f) {someInt = xxx + u + f; return Error::kNone;});
 /// func.call(10, 1.2f);
 /// @endcode
-/// @tparam T_INLINE_STORAGE_SIZE Optional inline storage to avoid deallocations (small object optimization)
-template<typename TReturn, typename... TArgs, PtrSize T_INLINE_STORAGE_SIZE>
-class Function<TReturn(TArgs...), T_INLINE_STORAGE_SIZE>
+/// @tparam kTInlineStorageSize Optional inline storage to avoid deallocations (small object optimization)
+template<typename TReturn, typename... TArgs, PtrSize kTInlineStorageSize>
+class Function<TReturn(TArgs...), kTInlineStorageSize>
 {
 public:
 	Function() = default;
@@ -47,7 +47,7 @@ public:
 	// Does nothing important.
 	~Function()
 	{
-		ANKI_ASSERT(getState() == STATE_UNINITIALIZED && "Forgot to call destroy()");
+		ANKI_ASSERT(getState() == kStateUninitialized && "Forgot to call destroy()");
 	}
 
 	// Non-copyable.
@@ -56,9 +56,9 @@ public:
 	/// Move.
 	Function& operator=(Function&& b)
 	{
-		ANKI_ASSERT(getState() == STATE_UNINITIALIZED);
+		ANKI_ASSERT(getState() == kStateUninitialized);
 		m_state = b.m_state;
-		b.m_state = STATE_UNINITIALIZED;
+		b.m_state = kStateUninitialized;
 		memcpy(&m_callableInlineStorage[0], &b.m_callableInlineStorage[0], sizeof(m_callableInlineStorage));
 		return *this;
 	}
@@ -69,19 +69,19 @@ public:
 	template<typename TAlloc, typename T>
 	void init(TAlloc alloc, const T& func)
 	{
-		ANKI_ASSERT(getState() == STATE_UNINITIALIZED);
+		ANKI_ASSERT(getState() == kStateUninitialized);
 
 		// Init storage
-		constexpr Bool useInlineStorage = sizeof(T) <= INLINE_STORAGE_SIZE && std::is_trivially_copyable<T>::value
+		constexpr Bool useInlineStorage = sizeof(T) <= kInlineStorageSize && std::is_trivially_copyable<T>::value
 										  && std::is_trivially_destructible<T>::value;
 		if(useInlineStorage)
 		{
-			setState(STATE_INLINE_STORAGE);
+			setState(kStateInlineStorage);
 			memcpy(&m_callableInlineStorage[0], &func, sizeof(func));
 
 			setFunctionCallback([](const Function& self, TArgs... args) -> TReturn {
 				// Yes I know, a const_cast hack follows. If the T was in some pointer then all would be fine. Look
-				// at the setFunctionCallback() of the STATE_ALLOCATED. Only a static_cast there. It's unfair.
+				// at the setFunctionCallback() of the kStateAllocated. Only a static_cast there. It's unfair.
 				const T* t0 = reinterpret_cast<const T*>(&self.m_callableInlineStorage[0]);
 				T* t1 = const_cast<T*>(t0);
 				return (*t1)(args...);
@@ -89,7 +89,7 @@ public:
 		}
 		else
 		{
-			setState(STATE_ALLOCATED);
+			setState(kStateAllocated);
 			using CallableT = Callable<T>;
 			CallableT* callable = alloc.template newInstance<CallableT>(func);
 			m_callablePtr = callable;
@@ -115,14 +115,14 @@ public:
 	template<typename TAlloc>
 	void destroy(TAlloc alloc)
 	{
-		if(getState() == STATE_ALLOCATED)
+		if(getState() == kStateAllocated)
 		{
 			ANKI_ASSERT(m_callablePtr && m_callablePtr->m_destroyCallback);
 			m_callablePtr->m_destroyCallback(*m_callablePtr);
 			alloc.getMemoryPool().free(m_callablePtr);
 		}
 
-		m_state = STATE_UNINITIALIZED;
+		m_state = kStateUninitialized;
 	}
 
 	/// Call the Function with some arguments.
@@ -141,13 +141,13 @@ public:
 	template<typename TAlloc>
 	Function& copy(const Function& other, TAlloc alloc)
 	{
-		ANKI_ASSERT(getState() == STATE_UNINITIALIZED && "Need to destroy it first");
+		ANKI_ASSERT(getState() == kStateUninitialized && "Need to destroy it first");
 
-		if(other.getState() == STATE_UNINITIALIZED)
+		if(other.getState() == kStateUninitialized)
 		{
 			// Nothing to do
 		}
-		else if(other.getState() == STATE_INLINE_STORAGE)
+		else if(other.getState() == kStateInlineStorage)
 		{
 			// It should be trivially copyable, can use memcpy then
 			m_state = other.m_state;
@@ -155,7 +155,7 @@ public:
 		}
 		else
 		{
-			ANKI_ASSERT(other.getState() == STATE_ALLOCATED);
+			ANKI_ASSERT(other.getState() == kStateAllocated);
 			m_state = other.m_state;
 
 			// Allocate callable
@@ -212,10 +212,10 @@ private:
 		Callable& operator=(const Callable&) = delete; // You won't need it
 	};
 
-	static constexpr PtrSize STATE_UNINITIALIZED = PtrSize(0b1001) << PtrSize(60);
-	static constexpr PtrSize STATE_ALLOCATED = PtrSize(0b1101) << PtrSize(60);
-	static constexpr PtrSize STATE_INLINE_STORAGE = PtrSize(0b1011) << PtrSize(60);
-	static constexpr PtrSize STATE_ALL_BITS = PtrSize(0b1111) << PtrSize(60);
+	static constexpr PtrSize kStateUninitialized = PtrSize(0b1001) << PtrSize(60);
+	static constexpr PtrSize kStateAllocated = PtrSize(0b1101) << PtrSize(60);
+	static constexpr PtrSize kStateInlineStorage = PtrSize(0b1011) << PtrSize(60);
+	static constexpr PtrSize kStateAllBits = PtrSize(0b1111) << PtrSize(60);
 	static_assert(sizeof(void*) == 8, "Wrong assumption");
 
 	static constexpr PtrSize lmax(PtrSize a, PtrSize b)
@@ -223,44 +223,43 @@ private:
 		return (a > b) ? a : b;
 	}
 
-	static constexpr PtrSize INLINE_STORAGE_SIZE =
-		lmax(T_INLINE_STORAGE_SIZE, lmax(ANKI_SAFE_ALIGNMENT, sizeof(void*)));
+	static constexpr PtrSize kInlineStorageSize = lmax(kTInlineStorageSize, lmax(ANKI_SAFE_ALIGNMENT, sizeof(void*)));
 
 	union
 	{
 		CallableBase* m_callablePtr;
-		alignas(ANKI_SAFE_ALIGNMENT) Array<U8, INLINE_STORAGE_SIZE> m_callableInlineStorage;
+		alignas(ANKI_SAFE_ALIGNMENT) Array<U8, kInlineStorageSize> m_callableInlineStorage;
 	};
 
 	union
 	{
 		// Hide the state in the high bits of the m_functionCallback pointer.
-		PtrSize m_state = STATE_UNINITIALIZED;
+		PtrSize m_state = kStateUninitialized;
 
 		FunctionCallback m_functionCallback;
 	};
 
 	PtrSize getState() const
 	{
-		const PtrSize s = m_state & STATE_ALL_BITS;
-		ANKI_ASSERT(s == STATE_UNINITIALIZED || s == STATE_ALLOCATED || s == STATE_INLINE_STORAGE);
+		const PtrSize s = m_state & kStateAllBits;
+		ANKI_ASSERT(s == kStateUninitialized || s == kStateAllocated || s == kStateInlineStorage);
 		return s;
 	}
 
 	void setState(PtrSize s)
 	{
-		ANKI_ASSERT(s == STATE_UNINITIALIZED || s == STATE_ALLOCATED || s == STATE_INLINE_STORAGE);
-		m_state = (m_state & ~STATE_ALL_BITS) | s;
+		ANKI_ASSERT(s == kStateUninitialized || s == kStateAllocated || s == kStateInlineStorage);
+		m_state = (m_state & ~kStateAllBits) | s;
 	}
 
 	FunctionCallback getFunctionCallback() const
 	{
-		return numberToPtr<FunctionCallback>(m_state & ~STATE_ALL_BITS);
+		return numberToPtr<FunctionCallback>(m_state & ~kStateAllBits);
 	}
 
 	void setFunctionCallback(FunctionCallback f)
 	{
-		m_state = (m_state & STATE_ALL_BITS) | ptrToNumber(f);
+		m_state = (m_state & kStateAllBits) | ptrToNumber(f);
 		ANKI_ASSERT(f == getFunctionCallback());
 	}
 };

+ 2 - 2
AnKi/Util/Functions.h

@@ -59,7 +59,7 @@ struct DummyType
 template<bool B>
 struct RequiresBool
 {
-	static constexpr bool VALUE = B;
+	static constexpr bool kValue = B;
 };
 
 template<typename T, int N>
@@ -76,7 +76,7 @@ struct PrivateEnum
 	};
 };
 
-#	define ANKI_REQUIRES_BOOL(line, ...) RequiresUnwrap<decltype(RequiresBool<(__VA_ARGS__)>{}), line>::VALUE
+#	define ANKI_REQUIRES_BOOL(line, ...) RequiresUnwrap<decltype(RequiresBool<(__VA_ARGS__)>{}), line>::kValue
 
 #	define ANKI_ENABLE_INTERNAL(line, ...) \
 		typename PrivateEnum<line>::Type ANKI_CONCATENATE( \

+ 11 - 11
AnKi/Util/Hash.cpp

@@ -8,8 +8,8 @@
 
 namespace anki {
 
-constexpr U64 HASH_M = 0xc6a4a7935bd1e995;
-constexpr U64 HASH_R = 47;
+constexpr U64 kHashM = 0xc6a4a7935bd1e995;
+constexpr U64 kHashR = 47;
 
 U64 appendHash(const void* buffer, PtrSize bufferSize, U64 h)
 {
@@ -20,12 +20,12 @@ U64 appendHash(const void* buffer, PtrSize bufferSize, U64 h)
 	{
 		U64 k = *data++;
 
-		k *= HASH_M;
-		k ^= k >> HASH_R;
-		k *= HASH_M;
+		k *= kHashM;
+		k ^= k >> kHashR;
+		k *= kHashM;
 
 		h ^= k;
-		h *= HASH_M;
+		h *= kHashM;
 	}
 
 	const U8* data2 = reinterpret_cast<const U8*>(data);
@@ -46,12 +46,12 @@ U64 appendHash(const void* buffer, PtrSize bufferSize, U64 h)
 		h ^= U64(data2[1]) << 8;
 	case 1:
 		h ^= U64(data2[0]);
-		h *= HASH_M;
+		h *= kHashM;
 	};
 
-	h ^= h >> HASH_R;
-	h *= HASH_M;
-	h ^= h >> HASH_R;
+	h ^= h >> kHashR;
+	h *= kHashM;
+	h ^= h >> kHashR;
 
 	ANKI_ASSERT(h != 0);
 	return h;
@@ -59,7 +59,7 @@ U64 appendHash(const void* buffer, PtrSize bufferSize, U64 h)
 
 U64 computeHash(const void* buffer, PtrSize bufferSize, U64 seed)
 {
-	const U64 h = seed ^ (bufferSize * HASH_M);
+	const U64 h = seed ^ (bufferSize * kHashM);
 	return appendHash(buffer, bufferSize, h);
 }
 

+ 33 - 24
AnKi/Util/HashMap.h

@@ -63,34 +63,45 @@ public:
 	}
 };
 
+/// SparseArray configuration. See SparseArray docs for details.
+/// @memberof HashMap
+class HashMapSparseArrayConfig
+{
+public:
+	using Index = U64;
+
+	static constexpr Index getInitialStorageSize()
+	{
+		return 64;
+	}
+
+	static constexpr U32 getLinearProbingCount()
+	{
+		return 8;
+	}
+
+	static constexpr F32 getMaxLoadFactor()
+	{
+		return 0.8f;
+	}
+};
+
 /// Hash map template.
-template<typename TKey, typename TValue, typename THasher = DefaultHasher<TKey>>
+template<typename TKey, typename TValue, typename THasher = DefaultHasher<TKey>,
+		 typename TSparseArrayConfig = HashMapSparseArrayConfig>
 class HashMap
 {
 public:
 	// Typedefs
-	using SparseArrayType = SparseArray<TValue, U64>;
+	using SparseArrayType = SparseArray<TValue, TSparseArrayConfig>;
 	using Value = TValue;
 	using Key = TKey;
 	using Hasher = THasher;
 	using Iterator = typename SparseArrayType::Iterator;
 	using ConstIterator = typename SparseArrayType::ConstIterator;
 
-	// Consts
-	/// @see SparseArray::INITIAL_STORAGE_SIZE
-	static constexpr U32 INITIAL_STORAGE_SIZE = SparseArrayType::INITIAL_STORAGE_SIZE;
-	/// @see SparseArray::LINEAR_PROBING_COUNT
-	static constexpr U32 LINEAR_PROBING_COUNT = SparseArrayType::LINEAR_PROBING_COUNT;
-	/// @see SparseArray::MAX_LOAD_FACTOR
-	static constexpr F32 MAX_LOAD_FACTOR = SparseArrayType::MAX_LOAD_FACTOR;
-
 	/// Default constructor.
-	/// @copy doc SparseArray::SparseArray
-	HashMap(U32 initialStorageSize = INITIAL_STORAGE_SIZE, U32 probeCount = LINEAR_PROBING_COUNT,
-			F32 maxLoadFactor = MAX_LOAD_FACTOR)
-		: m_sparseArr(initialStorageSize, probeCount, maxLoadFactor)
-	{
-	}
+	HashMap() = default;
 
 	/// Move.
 	HashMap(HashMap&& b)
@@ -211,18 +222,16 @@ protected:
 };
 
 /// Hash map template with automatic cleanup.
-template<typename TKey, typename TValue, typename THasher = DefaultHasher<TKey>>
-class HashMapAuto : public HashMap<TKey, TValue, THasher>
+template<typename TKey, typename TValue, typename THasher = DefaultHasher<TKey>,
+		 typename TSparseArrayConfig = HashMapSparseArrayConfig>
+class HashMapAuto : public HashMap<TKey, TValue, THasher, TSparseArrayConfig>
 {
 public:
-	using Base = HashMap<TKey, TValue, THasher>;
+	using Base = HashMap<TKey, TValue, THasher, TSparseArrayConfig>;
 
 	/// Default constructor.
-	/// @copy doc SparseArray::SparseArray
-	HashMapAuto(const GenericMemoryPoolAllocator<U8>& alloc, U32 initialStorageSize = Base::INITIAL_STORAGE_SIZE,
-				U32 probeCount = Base::LINEAR_PROBING_COUNT, F32 maxLoadFactor = Base::MAX_LOAD_FACTOR)
-		: Base(initialStorageSize, probeCount, maxLoadFactor)
-		, m_alloc(alloc)
+	HashMapAuto(const GenericMemoryPoolAllocator<U8>& alloc)
+		: m_alloc(alloc)
 	{
 	}
 

+ 3 - 4
AnKi/Util/Logger.cpp

@@ -173,7 +173,6 @@ void Logger::defaultSystemMessageHandler(void*, const LoggerMessageInfo& info)
 	}
 
 	static_assert(Thread::kThreadNameMaxLength == 15, "See file");
-	constexpr const Char* fmt = "%s[%s][%s][%-15s]%s%s %s (%s:%d %s)%s\n";
 	if(!runningFromATerminal())
 	{
 		terminalColor = "";
@@ -181,9 +180,9 @@ void Logger::defaultSystemMessageHandler(void*, const LoggerMessageInfo& info)
 		endTerminalColor = "";
 	}
 
-	fprintf(out, fmt, terminalColorBg, kMessageTypeTxt[U(info.m_type)], info.m_subsystem ? info.m_subsystem : "N/A ",
-			info.m_threadName, endTerminalColor, terminalColor, info.m_msg, info.m_file, info.m_line, info.m_func,
-			endTerminalColor);
+	fprintf(out, "%s[%s][%s][%-15s]%s%s %s (%s:%d %s)%s\n", terminalColorBg, kMessageTypeTxt[U(info.m_type)],
+			info.m_subsystem ? info.m_subsystem : "N/A ", info.m_threadName, endTerminalColor, terminalColor,
+			info.m_msg, info.m_file, info.m_line, info.m_func, endTerminalColor);
 #elif ANKI_OS_WINDOWS
 	WORD attribs = 0;
 	FILE* out = nullptr;

+ 9 - 9
AnKi/Util/Memory.cpp

@@ -34,8 +34,8 @@ public:
 	PoolSignature m_signature;
 };
 
-constexpr U32 MAX_ALIGNMENT = 64;
-constexpr U32 ALLOCATION_HEADER_SIZE = getAlignedRoundUp(MAX_ALIGNMENT, sizeof(AllocationHeader));
+constexpr U32 kMaxAlignment = 64;
+constexpr U32 kAllocationHeaderSize = getAlignedRoundUp(kMaxAlignment, sizeof(AllocationHeader));
 #endif
 
 #define ANKI_CREATION_OOM_ACTION() ANKI_UTIL_LOGF("Out of memory")
@@ -182,8 +182,8 @@ void* HeapMemoryPool::allocate(PtrSize size, PtrSize alignment)
 {
 	ANKI_ASSERT(size > 0);
 #if ANKI_MEM_EXTRA_CHECKS
-	ANKI_ASSERT(alignment <= MAX_ALIGNMENT && "Wrong assumption");
-	size += ALLOCATION_HEADER_SIZE;
+	ANKI_ASSERT(alignment <= kMaxAlignment && "Wrong assumption");
+	size += kAllocationHeaderSize;
 #endif
 
 	void* mem = m_allocCb(m_allocCbUserData, nullptr, size, alignment);
@@ -193,12 +193,12 @@ void* HeapMemoryPool::allocate(PtrSize size, PtrSize alignment)
 		m_allocationCount.fetchAdd(1);
 
 #if ANKI_MEM_EXTRA_CHECKS
-		memset(mem, 0, ALLOCATION_HEADER_SIZE);
+		memset(mem, 0, kAllocationHeaderSize);
 		AllocationHeader& header = *static_cast<AllocationHeader*>(mem);
 		header.m_signature = m_signature;
 		header.m_allocationSize = size;
 
-		mem = static_cast<void*>(static_cast<U8*>(mem) + ALLOCATION_HEADER_SIZE);
+		mem = static_cast<void*>(static_cast<U8*>(mem) + kAllocationHeaderSize);
 #endif
 	}
 	else
@@ -217,7 +217,7 @@ void HeapMemoryPool::free(void* ptr)
 	}
 
 #if ANKI_MEM_EXTRA_CHECKS
-	U8* memU8 = static_cast<U8*>(ptr) - ALLOCATION_HEADER_SIZE;
+	U8* memU8 = static_cast<U8*>(ptr) - kAllocationHeaderSize;
 	AllocationHeader& header = *reinterpret_cast<AllocationHeader*>(memU8);
 	if(header.m_signature != m_signature)
 	{
@@ -237,7 +237,7 @@ Error StackMemoryPool::StackAllocatorBuilderInterface::allocateChunk(PtrSize siz
 
 	const PtrSize fullChunkSize = offsetof(Chunk, m_memoryStart) + size;
 
-	void* mem = m_parent->m_allocCb(m_parent->m_allocCbUserData, nullptr, fullChunkSize, MAX_ALIGNMENT);
+	void* mem = m_parent->m_allocCb(m_parent->m_allocCbUserData, nullptr, fullChunkSize, kMaxAlignment);
 
 	if(ANKI_LIKELY(mem))
 	{
@@ -272,7 +272,7 @@ StackMemoryPool::StackMemoryPool(AllocAlignedCallback allocCb, void* allocCbUser
 {
 	ANKI_ASSERT(initialChunkSize > 0);
 	ANKI_ASSERT(nextChunkScale >= 1.0);
-	ANKI_ASSERT(alignmentBytes > 0 && alignmentBytes <= MAX_ALIGNMENT);
+	ANKI_ASSERT(alignmentBytes > 0 && alignmentBytes <= kMaxAlignment);
 
 	m_builder.getInterface().m_parent = this;
 	m_builder.getInterface().m_alignmentBytes = alignmentBytes;

+ 63 - 25
AnKi/Util/SparseArray.h

@@ -158,10 +158,38 @@ private:
 	}
 };
 
+/// Contains the default configuration for SparseArray.
+/// @memberof SparseArray
+class SparseArrayDefaultConfig
+{
+public:
+	/// Indicates the max size of the sparse indices it can accept. Can be U32 or U64.
+	using Index = U32;
+
+	/// The initial storage size of the array.
+	static constexpr Index getInitialStorageSize()
+	{
+		return 64;
+	}
+
+	/// The number of linear probes.
+	static constexpr U32 getLinearProbingCount()
+	{
+		return 8;
+	}
+
+	/// Load factor. If storage is loaded more than getMaxLoadFactor() then increase it.
+	static constexpr F32 getMaxLoadFactor()
+	{
+		return 0.8f;
+	}
+};
+
 /// Sparse array.
 /// @tparam T The type of the valut it will hold.
-/// @tparam TIndex Indicates the max size of the sparse indices it can accept. Can be U32 or U64.
-template<typename T, typename TIndex = U32>
+/// @tparam TConfig A class that has configuration required by the SparseArray. See SparseArrayDefaultConfig for
+/// details.
+template<typename T, typename TConfig = SparseArrayDefaultConfig>
 class SparseArray
 {
 	template<typename, typename, typename>
@@ -169,29 +197,17 @@ class SparseArray
 
 public:
 	// Typedefs
+	using Config = TConfig;
 	using Value = T;
 	using Iterator = SparseArrayIterator<T*, T&, SparseArray*>;
 	using ConstIterator = SparseArrayIterator<const T*, const T&, const SparseArray*>;
-	using Index = TIndex;
+	using Index = typename Config::Index;
 
-	// Consts
-	static constexpr Index INITIAL_STORAGE_SIZE = 64; ///< The initial storage size of the array.
-	static constexpr U32 LINEAR_PROBING_COUNT = 8; ///< The number of linear probes.
-	static constexpr F32 MAX_LOAD_FACTOR = 0.8f; ///< Load factor.
+	SparseArray() = default;
 
-	/// Constructor.
-	/// @param initialStorageSize The initial size of the array.
-	/// @param probeCount         The number of probe queries. It's the linear probe count the sparse array is using.
-	/// @param maxLoadFactor      If storage is loaded more than maxLoadFactor then increase it.
-	SparseArray(Index initialStorageSize = INITIAL_STORAGE_SIZE, U32 probeCount = LINEAR_PROBING_COUNT,
-				F32 maxLoadFactor = MAX_LOAD_FACTOR)
-		: m_initialStorageSize(initialStorageSize)
-		, m_probeCount(probeCount)
-		, m_maxLoadFactor(maxLoadFactor)
+	SparseArray(const Config& config)
+		: m_config(config)
 	{
-		ANKI_ASSERT(initialStorageSize > 0 && isPowerOfTwo(initialStorageSize));
-		ANKI_ASSERT(probeCount > 0 && probeCount < initialStorageSize);
-		ANKI_ASSERT(maxLoadFactor > 0.5f && maxLoadFactor < 1.0f);
 	}
 
 	/// Non-copyable.
@@ -221,9 +237,7 @@ public:
 		m_metadata = b.m_metadata;
 		m_elementCount = b.m_elementCount;
 		m_capacity = b.m_capacity;
-		m_initialStorageSize = b.m_initialStorageSize;
-		m_probeCount = b.m_probeCount;
-		m_maxLoadFactor = b.m_maxLoadFactor;
+		m_config = std::move(b.m_config);
 #if ANKI_EXTRA_CHECKS
 		++m_iteratorVer;
 #endif
@@ -354,6 +368,11 @@ public:
 	template<typename TAlloc>
 	void clone(TAlloc& alloc, SparseArray& b) const;
 
+	const Config& getConfig() const
+	{
+		return m_config;
+	}
+
 protected:
 	/// Element metadata.
 	class Metadata
@@ -367,10 +386,8 @@ protected:
 	Metadata* m_metadata = nullptr;
 	Index m_elementCount = 0;
 	Index m_capacity = 0;
+	Config m_config;
 
-	Index m_initialStorageSize = 0;
-	U32 m_probeCount = 0;
-	F32 m_maxLoadFactor = 0.0f;
 #if ANKI_EXTRA_CHECKS
 	/// Iterators version. Used to check if iterators point to the newest storage. Needs to be changed whenever we need
 	/// to invalidate iterators.
@@ -482,6 +499,27 @@ protected:
 		++m_iteratorVer;
 #endif
 	}
+
+	F32 getMaxLoadFactor() const
+	{
+		const F32 f = m_config.getMaxLoadFactor();
+		ANKI_ASSERT(f > 0.0f && f < 1.0f);
+		return f;
+	}
+
+	U32 getLinearProbingCount() const
+	{
+		const U32 o = m_config.getLinearProbingCount();
+		ANKI_ASSERT(o > 0);
+		return o;
+	}
+
+	Index getInitialStorageSize() const
+	{
+		const Index o = m_config.getInitialStorageSize();
+		ANKI_ASSERT(o > 0);
+		return o;
+	}
 };
 /// @}
 

+ 24 - 26
AnKi/Util/SparseArray.inl.h

@@ -7,9 +7,9 @@
 
 namespace anki {
 
-template<typename T, typename TIndex>
+template<typename T, typename TConfig>
 template<typename TAlloc>
-void SparseArray<T, TIndex>::destroy(TAlloc& alloc)
+void SparseArray<T, TConfig>::destroy(TAlloc& alloc)
 {
 	if(m_elements)
 	{
@@ -30,11 +30,11 @@ void SparseArray<T, TIndex>::destroy(TAlloc& alloc)
 	resetMembers();
 }
 
-template<typename T, typename TIndex>
+template<typename T, typename TConfig>
 template<typename TAlloc, typename... TArgs>
-void SparseArray<T, TIndex>::emplaceInternal(TAlloc& alloc, Index idx, TArgs&&... args)
+void SparseArray<T, TConfig>::emplaceInternal(TAlloc& alloc, Index idx, TArgs&&... args)
 {
-	if(m_capacity == 0 || calcLoadFactor() > m_maxLoadFactor)
+	if(m_capacity == 0 || calcLoadFactor() > getMaxLoadFactor())
 	{
 		grow(alloc);
 	}
@@ -45,9 +45,9 @@ void SparseArray<T, TIndex>::emplaceInternal(TAlloc& alloc, Index idx, TArgs&&..
 	invalidateIterators();
 }
 
-template<typename T, typename TIndex>
+template<typename T, typename TConfig>
 template<typename TAlloc, typename... TArgs>
-typename SparseArray<T, TIndex>::Iterator SparseArray<T, TIndex>::emplace(TAlloc& alloc, Index idx, TArgs&&... args)
+typename SparseArray<T, TConfig>::Iterator SparseArray<T, TConfig>::emplace(TAlloc& alloc, Index idx, TArgs&&... args)
 {
 	emplaceInternal(alloc, idx, std::forward<TArgs>(args)...);
 
@@ -59,14 +59,14 @@ typename SparseArray<T, TIndex>::Iterator SparseArray<T, TIndex>::emplace(TAlloc
 	);
 }
 
-template<typename T, typename TIndex>
+template<typename T, typename TConfig>
 template<typename TAlloc>
-TIndex SparseArray<T, TIndex>::insert(TAlloc& alloc, Index idx, Value& val)
+typename TConfig::Index SparseArray<T, TConfig>::insert(TAlloc& alloc, Index idx, Value& val)
 {
 	while(true)
 	{
 		const Index desiredPos = mod(idx);
-		const Index endPos = mod(desiredPos + m_probeCount);
+		const Index endPos = mod(desiredPos + getLinearProbingCount());
 		Index pos = desiredPos;
 
 		while(pos != endPos)
@@ -118,14 +118,14 @@ TIndex SparseArray<T, TIndex>::insert(TAlloc& alloc, Index idx, Value& val)
 	return 0;
 }
 
-template<typename T, typename TIndex>
+template<typename T, typename TConfig>
 template<typename TAlloc>
-void SparseArray<T, TIndex>::grow(TAlloc& alloc)
+void SparseArray<T, TConfig>::grow(TAlloc& alloc)
 {
 	if(m_capacity == 0)
 	{
 		ANKI_ASSERT(m_elementCount == 0);
-		m_capacity = m_initialStorageSize;
+		m_capacity = getInitialStorageSize();
 		m_elements = static_cast<Value*>(alloc.getMemoryPool().allocate(m_capacity * sizeof(Value), alignof(Value)));
 
 		m_metadata =
@@ -190,9 +190,9 @@ void SparseArray<T, TIndex>::grow(TAlloc& alloc)
 	alloc.getMemoryPool().free(oldMetadata);
 }
 
-template<typename T, typename TIndex>
+template<typename T, typename TConfig>
 template<typename TAlloc>
-void SparseArray<T, TIndex>::erase(TAlloc& alloc, Iterator it)
+void SparseArray<T, TConfig>::erase(TAlloc& alloc, Iterator it)
 {
 	ANKI_ASSERT(it.m_array == this);
 	ANKI_ASSERT(it.m_elementIdx != getMaxNumericLimit<Index>());
@@ -248,8 +248,8 @@ void SparseArray<T, TIndex>::erase(TAlloc& alloc, Iterator it)
 	invalidateIterators();
 }
 
-template<typename T, typename TIndex>
-void SparseArray<T, TIndex>::validate() const
+template<typename T, typename TConfig>
+void SparseArray<T, TConfig>::validate() const
 {
 	if(m_capacity == 0)
 	{
@@ -284,7 +284,7 @@ void SparseArray<T, TIndex>::validate() const
 		if(m_metadata[pos].m_alive)
 		{
 			[[maybe_unused]] const Index myDesiredPos = mod(m_metadata[pos].m_idx);
-			ANKI_ASSERT(distanceFromDesired(pos, myDesiredPos) < m_probeCount);
+			ANKI_ASSERT(distanceFromDesired(pos, myDesiredPos) < getLinearProbingCount());
 
 			if(prevPos != ~Index(0))
 			{
@@ -306,8 +306,8 @@ void SparseArray<T, TIndex>::validate() const
 	ANKI_ASSERT(m_elementCount == elementCount);
 }
 
-template<typename T, typename TIndex>
-TIndex SparseArray<T, TIndex>::findInternal(Index idx) const
+template<typename T, typename TConfig>
+typename TConfig::Index SparseArray<T, TConfig>::findInternal(Index idx) const
 {
 	if(ANKI_UNLIKELY(m_elementCount == 0))
 	{
@@ -315,7 +315,7 @@ TIndex SparseArray<T, TIndex>::findInternal(Index idx) const
 	}
 
 	const Index desiredPos = mod(idx);
-	const Index endPos = mod(desiredPos + m_probeCount);
+	const Index endPos = mod(desiredPos + getLinearProbingCount());
 	Index pos = desiredPos;
 	while(pos != endPos)
 	{
@@ -330,9 +330,9 @@ TIndex SparseArray<T, TIndex>::findInternal(Index idx) const
 	return getMaxNumericLimit<Index>();
 }
 
-template<typename T, typename TIndex>
+template<typename T, typename TConfig>
 template<typename TAlloc>
-void SparseArray<T, TIndex>::clone(TAlloc& alloc, SparseArray& b) const
+void SparseArray<T, TConfig>::clone(TAlloc& alloc, SparseArray& b) const
 {
 	ANKI_ASSERT(b.m_elements == nullptr && b.m_metadata == nullptr);
 	if(m_capacity == 0)
@@ -357,9 +357,7 @@ void SparseArray<T, TIndex>::clone(TAlloc& alloc, SparseArray& b) const
 	// Set the rest
 	b.m_elementCount = m_elementCount;
 	b.m_capacity = m_capacity;
-	b.m_initialStorageSize = m_initialStorageSize;
-	b.m_probeCount = m_probeCount;
-	b.m_maxLoadFactor = m_maxLoadFactor;
+	b.m_config = m_config;
 	b.invalidateIterators();
 }
 

+ 1 - 1
AnKi/Util/StackAllocatorBuilder.h

@@ -81,7 +81,7 @@ public:
 
 private:
 	/// The current chunk. Chose the more strict memory order to avoid compiler re-ordering of instructions
-	Atomic<TChunk*, AtomicMemoryOrder::SEQ_CST> m_crntChunk = {nullptr};
+	Atomic<TChunk*, AtomicMemoryOrder::kSeqCst> m_crntChunk = {nullptr};
 
 	/// The beginning of the chunk list.
 	TChunk* m_chunksListHead = nullptr;

+ 2 - 2
AnKi/Util/Thread.h

@@ -393,13 +393,13 @@ public:
 	/// Unlock.
 	void unlock()
 	{
-		m_lock.store(false, AtomicMemoryOrder::RELEASE);
+		m_lock.store(false, AtomicMemoryOrder::kRelease);
 	}
 
 	/// Try to lock.
 	Bool tryLock()
 	{
-		return !m_lock.load(AtomicMemoryOrder::RELAXED) && !m_lock.exchange(true, AtomicMemoryOrder::ACQUIRE);
+		return !m_lock.load(AtomicMemoryOrder::kRelaxed) && !m_lock.exchange(true, AtomicMemoryOrder::kAcquire);
 	}
 
 private:

+ 24 - 2
Tests/Util/HashMap.cpp

@@ -142,8 +142,30 @@ ANKI_TEST(Util, HashMap)
 
 	// Bench it
 	{
-		using AkMap = HashMap<int, int, Hasher>;
-		AkMap akMap(128, 32, 0.9f);
+		class Config
+		{
+		public:
+			using Index = U64;
+
+			static Index getInitialStorageSize()
+			{
+				return 128;
+			}
+
+			static U32 getLinearProbingCount()
+			{
+				return 32;
+			}
+
+			static F32 getMaxLoadFactor()
+			{
+				return 0.9f;
+			}
+		};
+
+		using AkMap = HashMap<int, int, Hasher, Config>;
+
+		AkMap akMap;
 		using StlMap =
 			std::unordered_map<int, int, std::hash<int>, std::equal_to<int>, HeapAllocator<std::pair<const int, int>>>;
 		StlMap stdMap(10, std::hash<int>(), std::equal_to<int>(), alloc);

+ 43 - 15
Tests/Util/SparseArray.cpp

@@ -87,6 +87,32 @@ public:
 			moveCount = 0;
 	}
 };
+
+template<typename TIndex>
+class Config
+{
+public:
+	using Index = TIndex;
+
+	Index m_initialStorage = 0;
+	U32 m_linearProbingCount = 0;
+	F32 m_maxLodFactor = 0;
+
+	Index getInitialStorageSize() const
+	{
+		return m_initialStorage;
+	}
+
+	U32 getLinearProbingCount() const
+	{
+		return m_linearProbingCount;
+	}
+
+	F32 getMaxLoadFactor() const
+	{
+		return m_maxLodFactor;
+	}
+};
 } // namespace
 } // namespace anki
 
@@ -107,7 +133,7 @@ ANKI_TEST(Util, SparseArray)
 
 	// Check destroy and grow
 	{
-		SparseArray<SAFoo> arr(64, 2);
+		SparseArray<SAFoo, Config<U32>> arr(Config<U32>{64, 2, 0.8f});
 
 		arr.emplace(alloc, 64 * 1, 123);
 		arr.emplace(alloc, 64 * 2, 124);
@@ -117,15 +143,17 @@ ANKI_TEST(Util, SparseArray)
 		ANKI_TEST_EXPECT_EQ(arr.find(64 * 2)->m_x, 124);
 		ANKI_TEST_EXPECT_EQ(arr.find(64 * 3)->m_x, 125);
 
-		arr.destroy(alloc);
+		SparseArray<SAFoo, Config<U32>> arr2(std::move(arr));
+
+		arr2.destroy(alloc);
 		SAFoo::checkCalls();
 	}
 
 	// Do complex insertions
 	{
-		SparseArray<SAFoo, U32> arr(64, 3);
+		SparseArray<SAFoo, Config<U32>> arr(Config<U32>{64, 3, 0.8f});
 
-		arr.emplace(alloc, 64 * 0 - 1, 1);
+		arr.emplace(alloc, 64u * 0 - 1, 1);
 		// Linear probing to 0
 		arr.emplace(alloc, 64 * 1 - 1, 2);
 		// Linear probing to 1
@@ -143,14 +171,14 @@ ANKI_TEST(Util, SparseArray)
 
 	// Fuzzy test
 	{
-		const U MAX = 10000;
-		SparseArray<SAFoo, U32> arr;
+		constexpr U kMax = 10000;
+		SparseArray<SAFoo> arr;
 		std::vector<int> numbers;
 
 		srand(U32(time(nullptr)));
 
 		// Insert random
-		for(U i = 0; i < MAX; ++i)
+		for(U i = 0; i < kMax; ++i)
 		{
 			I32 num;
 			while(1)
@@ -176,10 +204,10 @@ ANKI_TEST(Util, SparseArray)
 			arr.validate();
 		}
 
-		ANKI_TEST_EXPECT_EQ(arr.getSize(), MAX);
+		ANKI_TEST_EXPECT_EQ(arr.getSize(), kMax);
 
 		// Remove randomly
-		U count = MAX;
+		U count = kMax;
 		while(count--)
 		{
 			U idx = rand() % (count + 1);
@@ -197,13 +225,13 @@ ANKI_TEST(Util, SparseArray)
 
 	// Fuzzy test #2: Do random insertions and removals
 	{
-		const U MAX = 10000;
-		SparseArray<SAFoo, U64> arr;
+		constexpr U kMax = 10000;
+		SparseArray<SAFoo, Config<U64>> arr(Config<U64>{64, 8, 0.8f});
 		using StlMap =
 			std::unordered_map<int, int, std::hash<int>, std::equal_to<int>, HeapAllocator<std::pair<const int, int>>>;
 		StlMap map(10, std::hash<int>(), std::equal_to<int>(), alloc);
 
-		for(U i = 0; i < MAX; ++i)
+		for(U i = 0; i < kMax; ++i)
 		{
 			const Bool insert = (rand() & 1) || arr.getSize() == 0;
 
@@ -324,8 +352,8 @@ ANKI_TEST(Util, SparseArrayBench)
 		std::unordered_map<int, int, std::hash<int>, std::equal_to<int>, HeapAllocator<std::pair<const int, int>>>;
 	StlMap stdMap(10, std::hash<int>(), std::equal_to<int>(), allocStl);
 
-	using AkMap = SparseArray<int, U32>;
-	AkMap akMap(256, U32(log2(256.0f)), 0.90f);
+	using AkMap = SparseArray<int, Config<U32>>;
+	AkMap akMap(Config<U32>{256, U32(log2(256.0f)), 0.9f});
 
 	HighRezTimer timer;
 
@@ -439,7 +467,7 @@ ANKI_TEST(Util, SparseArrayBench)
 			stlTime += timer.getElapsedTime();
 		}
 
-		ANKI_TEST_LOGI("Deleting bench: STL %f AnKi %f | %f%%\n", stlTime, akTime, stlTime / akTime * 100.0);
+		ANKI_TEST_LOGI("Deleting bench: STL %f AnKi %f | %f%%", stlTime, akTime, stlTime / akTime * 100.0);
 	}
 
 	akMap.destroy(allocAk);