123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350 |
- // SPDX-FileCopyrightText: 2021 Jorrit Rouwe
- // SPDX-License-Identifier: MIT
- #pragma once
- JPH_NAMESPACE_BEGIN
- ///////////////////////////////////////////////////////////////////////////////////
- // LFHMAllocator
- ///////////////////////////////////////////////////////////////////////////////////
- inline LFHMAllocator::~LFHMAllocator()
- {
- AlignedFree(mObjectStore);
- }
- inline void LFHMAllocator::Init(uint inObjectStoreSizeBytes)
- {
- JPH_ASSERT(mObjectStore == nullptr);
- mObjectStoreSizeBytes = inObjectStoreSizeBytes;
- mObjectStore = reinterpret_cast<uint8 *>(JPH::AlignedAllocate(inObjectStoreSizeBytes, 16));
- }
- inline void LFHMAllocator::Clear()
- {
- mWriteOffset = 0;
- }
- inline void LFHMAllocator::Allocate(uint32 inBlockSize, uint32 &ioBegin, uint32 &ioEnd)
- {
- // If we're already beyond the end of our buffer then don't do an atomic add.
- // It's possible that many keys are inserted after the allocator is full, making it possible
- // for mWriteOffset (uint32) to wrap around to zero. When this happens, there will be a memory corruption.
- // This way, we will be able to progress the write offset beyond the size of the buffer
- // worst case by max <CPU count> * inBlockSize.
- if (mWriteOffset.load(memory_order_relaxed) >= mObjectStoreSizeBytes)
- return;
- // Atomically fetch a block from the pool
- uint32 begin = mWriteOffset.fetch_add(inBlockSize, memory_order_relaxed);
- uint32 end = min(begin + inBlockSize, mObjectStoreSizeBytes);
- if (ioEnd == begin)
- {
- // Block is allocated straight after our previous block
- begin = ioBegin;
- }
- else
- {
- // Block is a new block
- begin = min(begin, mObjectStoreSizeBytes);
- }
- // Store the begin and end of the resulting block
- ioBegin = begin;
- ioEnd = end;
- }
- template <class T>
- inline uint32 LFHMAllocator::ToOffset(const T *inData) const
- {
- const uint8 *data = reinterpret_cast<const uint8 *>(inData);
- JPH_ASSERT(data >= mObjectStore && data < mObjectStore + mObjectStoreSizeBytes);
- return uint32(data - mObjectStore);
- }
- template <class T>
- inline T *LFHMAllocator::FromOffset(uint32 inOffset) const
- {
- JPH_ASSERT(inOffset < mObjectStoreSizeBytes);
- return reinterpret_cast<T *>(mObjectStore + inOffset);
- }
- ///////////////////////////////////////////////////////////////////////////////////
- // LFHMAllocatorContext
- ///////////////////////////////////////////////////////////////////////////////////
- inline LFHMAllocatorContext::LFHMAllocatorContext(LFHMAllocator &inAllocator, uint32 inBlockSize) :
- mAllocator(inAllocator),
- mBlockSize(inBlockSize)
- {
- }
- inline bool LFHMAllocatorContext::Allocate(uint32 inSize, uint32 inAlignment, uint32 &outWriteOffset)
- {
- // Calculate needed bytes for alignment
- JPH_ASSERT(IsPowerOf2(inAlignment));
- uint32 alignment_mask = inAlignment - 1;
- uint32 alignment = (inAlignment - (mBegin & alignment_mask)) & alignment_mask;
-
- // Check if we have space
- if (mEnd - mBegin < inSize + alignment)
- {
- // Allocate a new block
- mAllocator.Allocate(mBlockSize, mBegin, mEnd);
- // Update alignment
- alignment = (inAlignment - (mBegin & alignment_mask)) & alignment_mask;
-
- // Check if we have space again
- if (mEnd - mBegin < inSize + alignment)
- return false;
- }
- // Make the allocation
- mBegin += alignment;
- outWriteOffset = mBegin;
- mBegin += inSize;
- return true;
- }
- ///////////////////////////////////////////////////////////////////////////////////
- // LockFreeHashMap
- ///////////////////////////////////////////////////////////////////////////////////
- template <class Key, class Value>
- void LockFreeHashMap<Key, Value>::Init(uint32 inMaxBuckets)
- {
- JPH_ASSERT(inMaxBuckets >= 4 && IsPowerOf2(inMaxBuckets));
- JPH_ASSERT(mBuckets == nullptr);
- mNumBuckets = inMaxBuckets;
- mMaxBuckets = inMaxBuckets;
- mBuckets = reinterpret_cast<atomic<uint32> *>(AlignedAllocate(inMaxBuckets * sizeof(atomic<uint32>), 16));
- Clear();
- }
- template <class Key, class Value>
- LockFreeHashMap<Key, Value>::~LockFreeHashMap()
- {
- AlignedFree(mBuckets);
- }
- template <class Key, class Value>
- void LockFreeHashMap<Key, Value>::Clear()
- {
- #ifdef JPH_ENABLE_ASSERTS
- // Reset number of key value pairs
- mNumKeyValues = 0;
- #endif // JPH_ENABLE_ASSERTS
- // Reset buckets 4 at a time
- static_assert(sizeof(atomic<uint32>) == sizeof(uint32));
- UVec4 invalid_handle = UVec4::sReplicate(cInvalidHandle);
- uint32 *start = reinterpret_cast<uint32 *>(mBuckets);
- const uint32 *end = start + mNumBuckets;
- JPH_ASSERT(IsAligned(start, 16));
- while (start < end)
- {
- invalid_handle.StoreInt4Aligned(start);
- start += 4;
- }
- }
- template <class Key, class Value>
- void LockFreeHashMap<Key, Value>::SetNumBuckets(uint32 inNumBuckets)
- {
- JPH_ASSERT(mNumKeyValues == 0);
- JPH_ASSERT(inNumBuckets <= mMaxBuckets);
- JPH_ASSERT(inNumBuckets >= 4 && IsPowerOf2(inNumBuckets));
- mNumBuckets = inNumBuckets;
- }
- template <class Key, class Value>
- template <class... Params>
- inline typename LockFreeHashMap<Key, Value>::KeyValue *LockFreeHashMap<Key, Value>::Create(LFHMAllocatorContext &ioContext, const Key &inKey, uint64 inKeyHash, int inExtraBytes, Params &&... inConstructorParams)
- {
- // This is not a multi map, test the key hasn't been inserted yet
- JPH_ASSERT(Find(inKey, inKeyHash) == nullptr);
- // Calculate total size
- uint size = sizeof(KeyValue) + inExtraBytes;
- // Get the write offset for this key value pair
- uint32 write_offset;
- if (!ioContext.Allocate(size, alignof(KeyValue), write_offset))
- return nullptr;
- #ifdef JPH_ENABLE_ASSERTS
- // Increment amount of entries in map
- mNumKeyValues.fetch_add(1, memory_order_relaxed);
- #endif // JPH_ENABLE_ASSERTS
- // Construct the key/value pair
- KeyValue *kv = mAllocator.template FromOffset<KeyValue>(write_offset);
- JPH_ASSERT(intptr_t(kv) % alignof(KeyValue) == 0);
- #ifdef _DEBUG
- memset(kv, 0xcd, size);
- #endif
- kv->mKey = inKey;
- new (&kv->mValue) Value(std::forward<Params>(inConstructorParams)...);
- // Get the offset to the first object from the bucket with corresponding hash
- atomic<uint32> &offset = mBuckets[inKeyHash & (mNumBuckets - 1)];
- // Add this entry as the first element in the linked list
- uint32 old_offset = offset.load(memory_order_relaxed);
- for (;;)
- {
- kv->mNextOffset = old_offset;
- if (offset.compare_exchange_weak(old_offset, write_offset, memory_order_release))
- break;
- }
- return kv;
- }
- template <class Key, class Value>
- inline const typename LockFreeHashMap<Key, Value>::KeyValue *LockFreeHashMap<Key, Value>::Find(const Key &inKey, uint64 inKeyHash) const
- {
- // Get the offset to the keyvalue object from the bucket with corresponding hash
- uint32 offset = mBuckets[inKeyHash & (mNumBuckets - 1)].load(memory_order_acquire);
- while (offset != cInvalidHandle)
- {
- // Loop through linked list of values until the right one is found
- const KeyValue *kv = mAllocator.template FromOffset<const KeyValue>(offset);
- if (kv->mKey == inKey)
- return kv;
- offset = kv->mNextOffset;
- }
- // Not found
- return nullptr;
- }
- template <class Key, class Value>
- inline uint32 LockFreeHashMap<Key, Value>::ToHandle(const KeyValue *inKeyValue) const
- {
- return mAllocator.ToOffset(inKeyValue);
- }
- template <class Key, class Value>
- inline const typename LockFreeHashMap<Key, Value>::KeyValue *LockFreeHashMap<Key, Value>::FromHandle(uint32 inHandle) const
- {
- return mAllocator.template FromOffset<const KeyValue>(inHandle);
- }
- template <class Key, class Value>
- inline void LockFreeHashMap<Key, Value>::GetAllKeyValues(Array<const KeyValue *> &outAll) const
- {
- for (const atomic<uint32> *bucket = mBuckets; bucket < mBuckets + mNumBuckets; ++bucket)
- {
- uint32 offset = *bucket;
- while (offset != cInvalidHandle)
- {
- const KeyValue *kv = mAllocator.template FromOffset<const KeyValue>(offset);
- outAll.push_back(kv);
- offset = kv->mNextOffset;
- }
- }
- }
- template <class Key, class Value>
- typename LockFreeHashMap<Key, Value>::Iterator LockFreeHashMap<Key, Value>::begin()
- {
- // Start with the first bucket
- Iterator it { this, 0, mBuckets[0] };
- // If it doesn't contain a valid entry, use the ++ operator to find the first valid entry
- if (it.mOffset == cInvalidHandle)
- ++it;
- return it;
- }
- template <class Key, class Value>
- typename LockFreeHashMap<Key, Value>::Iterator LockFreeHashMap<Key, Value>::end()
- {
- return { this, mNumBuckets, cInvalidHandle };
- }
- template <class Key, class Value>
- typename LockFreeHashMap<Key, Value>::KeyValue &LockFreeHashMap<Key, Value>::Iterator::operator* ()
- {
- JPH_ASSERT(mOffset != cInvalidHandle);
- return *mMap->mAllocator.template FromOffset<KeyValue>(mOffset);
- }
- template <class Key, class Value>
- typename LockFreeHashMap<Key, Value>::Iterator &LockFreeHashMap<Key, Value>::Iterator::operator++ ()
- {
- JPH_ASSERT(mBucket < mMap->mNumBuckets);
- // Find the next key value in this bucket
- if (mOffset != cInvalidHandle)
- {
- const KeyValue *kv = mMap->mAllocator.template FromOffset<const KeyValue>(mOffset);
- mOffset = kv->mNextOffset;
- if (mOffset != cInvalidHandle)
- return *this;
- }
- // Loop over next buckets
- for (;;)
- {
- // Next bucket
- ++mBucket;
- if (mBucket >= mMap->mNumBuckets)
- return *this;
- // Fetch the first entry in the bucket
- mOffset = mMap->mBuckets[mBucket];
- if (mOffset != cInvalidHandle)
- return *this;
- }
- }
- #ifdef _DEBUG
- template <class Key, class Value>
- void LockFreeHashMap<Key, Value>::TraceStats() const
- {
- const int cMaxPerBucket = 256;
- int max_objects_per_bucket = 0;
- int num_objects = 0;
- int histogram[cMaxPerBucket];
- for (int i = 0; i < cMaxPerBucket; ++i)
- histogram[i] = 0;
- for (atomic<uint32> *bucket = mBuckets, *bucket_end = mBuckets + mNumBuckets; bucket < bucket_end; ++bucket)
- {
- int objects_in_bucket = 0;
- uint32 offset = *bucket;
- while (offset != cInvalidHandle)
- {
- const KeyValue *kv = mAllocator.template FromOffset<const KeyValue>(offset);
- offset = kv->mNextOffset;
- ++objects_in_bucket;
- ++num_objects;
- }
- max_objects_per_bucket = max(objects_in_bucket, max_objects_per_bucket);
- histogram[min(objects_in_bucket, cMaxPerBucket - 1)]++;
- }
- Trace("max_objects_per_bucket = %d, num_buckets = %d, num_objects = %d", max_objects_per_bucket, mNumBuckets, num_objects);
-
- for (int i = 0; i < cMaxPerBucket; ++i)
- if (histogram[i] != 0)
- Trace("%d: %d", i, histogram[i]);
- }
- #endif
- JPH_NAMESPACE_END
|