LockFreeHashMap.inl 9.0 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333
  1. // SPDX-FileCopyrightText: 2021 Jorrit Rouwe
  2. // SPDX-License-Identifier: MIT
  3. #pragma once
  4. JPH_NAMESPACE_BEGIN
  5. ///////////////////////////////////////////////////////////////////////////////////
  6. // LFHMAllocator
  7. ///////////////////////////////////////////////////////////////////////////////////
  8. inline LFHMAllocator::~LFHMAllocator()
  9. {
  10. delete [] mObjectStore;
  11. }
  12. inline void LFHMAllocator::Init(uint inObjectStoreSizeBytes)
  13. {
  14. JPH_ASSERT(mObjectStore == nullptr);
  15. mObjectStoreSizeBytes = inObjectStoreSizeBytes;
  16. mObjectStore = new uint8 [inObjectStoreSizeBytes];
  17. }
  18. inline void LFHMAllocator::Clear()
  19. {
  20. mWriteOffset = 0;
  21. }
  22. inline void LFHMAllocator::Allocate(uint32 inBlockSize, uint32 &ioBegin, uint32 &ioEnd)
  23. {
  24. // Atomically fetch a block from the pool
  25. uint32 begin = mWriteOffset.fetch_add(inBlockSize, memory_order_relaxed);
  26. uint32 end = min(begin + inBlockSize, mObjectStoreSizeBytes);
  27. if (ioEnd == begin)
  28. {
  29. // Block is allocated straight after our previous block
  30. begin = ioBegin;
  31. }
  32. else
  33. {
  34. // Block is a new block
  35. begin = min(begin, mObjectStoreSizeBytes);
  36. }
  37. // Store the begin and end of the resulting block
  38. ioBegin = begin;
  39. ioEnd = end;
  40. }
  41. template <class T>
  42. inline uint32 LFHMAllocator::ToOffset(const T *inData) const
  43. {
  44. const uint8 *data = reinterpret_cast<const uint8 *>(inData);
  45. JPH_ASSERT(data >= mObjectStore && data < mObjectStore + mObjectStoreSizeBytes);
  46. return uint32(data - mObjectStore);
  47. }
  48. template <class T>
  49. inline T *LFHMAllocator::FromOffset(uint32 inOffset) const
  50. {
  51. JPH_ASSERT(inOffset < mObjectStoreSizeBytes);
  52. return reinterpret_cast<T *>(mObjectStore + inOffset);
  53. }
  54. ///////////////////////////////////////////////////////////////////////////////////
  55. // LFHMAllocatorContext
  56. ///////////////////////////////////////////////////////////////////////////////////
  57. inline LFHMAllocatorContext::LFHMAllocatorContext(LFHMAllocator &inAllocator, uint32 inBlockSize) :
  58. mAllocator(inAllocator),
  59. mBlockSize(inBlockSize)
  60. {
  61. }
  62. inline bool LFHMAllocatorContext::Allocate(uint32 inSize, uint32 &outWriteOffset)
  63. {
  64. // Check if we have space
  65. if (mEnd - mBegin < inSize)
  66. {
  67. // Allocate a new block
  68. mAllocator.Allocate(mBlockSize, mBegin, mEnd);
  69. // Check if we have space again
  70. if (mEnd - mBegin < inSize)
  71. return false;
  72. }
  73. // Make the allocation
  74. outWriteOffset = mBegin;
  75. mBegin += inSize;
  76. return true;
  77. }
  78. ///////////////////////////////////////////////////////////////////////////////////
  79. // LockFreeHashMap
  80. ///////////////////////////////////////////////////////////////////////////////////
  81. template <class Key, class Value>
  82. void LockFreeHashMap<Key, Value>::Init(uint32 inMaxBuckets)
  83. {
  84. JPH_ASSERT(inMaxBuckets >= 4 && IsPowerOf2(inMaxBuckets));
  85. JPH_ASSERT(mBuckets == nullptr);
  86. mNumBuckets = inMaxBuckets;
  87. mMaxBuckets = inMaxBuckets;
  88. mBuckets = new atomic<uint32> [inMaxBuckets];
  89. Clear();
  90. }
  91. template <class Key, class Value>
  92. LockFreeHashMap<Key, Value>::~LockFreeHashMap()
  93. {
  94. delete [] mBuckets;
  95. }
  96. template <class Key, class Value>
  97. void LockFreeHashMap<Key, Value>::Clear()
  98. {
  99. #ifdef JPH_ENABLE_ASSERTS
  100. // Reset number of key value pairs
  101. mNumKeyValues = 0;
  102. #endif // JPH_ENABLE_ASSERTS
  103. // Reset buckets 4 at a time
  104. static_assert(sizeof(atomic<uint32>) == sizeof(uint32));
  105. UVec4 invalid_handle = UVec4::sReplicate(cInvalidHandle);
  106. uint32 *start = reinterpret_cast<uint32 *>(mBuckets);
  107. const uint32 *end = start + mNumBuckets;
  108. JPH_ASSERT(IsAligned(start, 16));
  109. while (start < end)
  110. {
  111. invalid_handle.StoreInt4Aligned(start);
  112. start += 4;
  113. }
  114. }
  115. template <class Key, class Value>
  116. void LockFreeHashMap<Key, Value>::SetNumBuckets(uint32 inNumBuckets)
  117. {
  118. JPH_ASSERT(mNumKeyValues == 0);
  119. JPH_ASSERT(inNumBuckets <= mMaxBuckets);
  120. JPH_ASSERT(inNumBuckets >= 4 && IsPowerOf2(inNumBuckets));
  121. mNumBuckets = inNumBuckets;
  122. }
  123. template <class Key, class Value>
  124. template <class... Params>
  125. inline typename LockFreeHashMap<Key, Value>::KeyValue *LockFreeHashMap<Key, Value>::Create(LFHMAllocatorContext &ioContext, const Key &inKey, size_t inKeyHash, int inExtraBytes, Params &&... inConstructorParams)
  126. {
  127. // This is not a multi map, test the key hasn't been inserted yet
  128. JPH_ASSERT(Find(inKey, inKeyHash) == nullptr);
  129. // Calculate total size
  130. uint size = sizeof(KeyValue) + inExtraBytes;
  131. // Get the write offset for this key value pair
  132. uint32 write_offset;
  133. if (!ioContext.Allocate(size, write_offset))
  134. return nullptr;
  135. #ifdef JPH_ENABLE_ASSERTS
  136. // Increment amount of entries in map
  137. mNumKeyValues.fetch_add(1, memory_order_relaxed);
  138. #endif // JPH_ENABLE_ASSERTS
  139. // Construct the key/value pair
  140. KeyValue *kv = mAllocator.template FromOffset<KeyValue>(write_offset);
  141. JPH_ASSERT(intptr_t(kv) % alignof(KeyValue) == 0);
  142. #ifdef _DEBUG
  143. memset(kv, 0xcd, size);
  144. #endif
  145. kv->mKey = inKey;
  146. new (&kv->mValue) Value(forward<Params>(inConstructorParams)...);
  147. // Get the offset to the first object from the bucket with corresponding hash
  148. atomic<uint32> &offset = mBuckets[inKeyHash & (mNumBuckets - 1)];
  149. // Add this entry as the first element in the linked list
  150. for (;;)
  151. {
  152. uint32 old_offset = offset.load(memory_order_relaxed);
  153. kv->mNextOffset = old_offset;
  154. if (offset.compare_exchange_weak(old_offset, write_offset, memory_order_release))
  155. break;
  156. }
  157. return kv;
  158. }
  159. template <class Key, class Value>
  160. inline const typename LockFreeHashMap<Key, Value>::KeyValue *LockFreeHashMap<Key, Value>::Find(const Key &inKey, size_t inKeyHash) const
  161. {
  162. // Get the offset to the keyvalue object from the bucket with corresponding hash
  163. uint32 offset = mBuckets[inKeyHash & (mNumBuckets - 1)].load(memory_order_acquire);
  164. while (offset != cInvalidHandle)
  165. {
  166. // Loop through linked list of values until the right one is found
  167. const KeyValue *kv = mAllocator.template FromOffset<const KeyValue>(offset);
  168. if (kv->mKey == inKey)
  169. return kv;
  170. offset = kv->mNextOffset;
  171. }
  172. // Not found
  173. return nullptr;
  174. }
  175. template <class Key, class Value>
  176. inline uint32 LockFreeHashMap<Key, Value>::ToHandle(const KeyValue *inKeyValue) const
  177. {
  178. return mAllocator.ToOffset(inKeyValue);
  179. }
  180. template <class Key, class Value>
  181. inline const typename LockFreeHashMap<Key, Value>::KeyValue *LockFreeHashMap<Key, Value>::FromHandle(uint32 inHandle) const
  182. {
  183. return mAllocator.template FromOffset<const KeyValue>(inHandle);
  184. }
  185. template <class Key, class Value>
  186. inline void LockFreeHashMap<Key, Value>::GetAllKeyValues(vector<const KeyValue *> &outAll) const
  187. {
  188. for (const atomic<uint32> *bucket = mBuckets; bucket < mBuckets + mNumBuckets; ++bucket)
  189. {
  190. uint32 offset = *bucket;
  191. while (offset != cInvalidHandle)
  192. {
  193. const KeyValue *kv = mAllocator.template FromOffset<const KeyValue>(offset);
  194. outAll.push_back(kv);
  195. offset = kv->mNextOffset;
  196. }
  197. }
  198. }
  199. template <class Key, class Value>
  200. typename LockFreeHashMap<Key, Value>::Iterator LockFreeHashMap<Key, Value>::begin()
  201. {
  202. // Start with the first bucket
  203. Iterator it { this, 0, mBuckets[0] };
  204. // If it doesn't contain a valid entry, use the ++ operator to find the first valid entry
  205. if (it.mOffset == cInvalidHandle)
  206. ++it;
  207. return it;
  208. }
  209. template <class Key, class Value>
  210. typename LockFreeHashMap<Key, Value>::Iterator LockFreeHashMap<Key, Value>::end()
  211. {
  212. return { this, mNumBuckets, cInvalidHandle };
  213. }
  214. template <class Key, class Value>
  215. typename LockFreeHashMap<Key, Value>::KeyValue &LockFreeHashMap<Key, Value>::Iterator::operator* ()
  216. {
  217. JPH_ASSERT(mOffset != cInvalidHandle);
  218. return *mMap->mAllocator.template FromOffset<KeyValue>(mOffset);
  219. }
  220. template <class Key, class Value>
  221. typename LockFreeHashMap<Key, Value>::Iterator &LockFreeHashMap<Key, Value>::Iterator::operator++ ()
  222. {
  223. JPH_ASSERT(mBucket < mMap->mNumBuckets);
  224. // Find the next key value in this bucket
  225. if (mOffset != cInvalidHandle)
  226. {
  227. const KeyValue *kv = mMap->mAllocator.template FromOffset<const KeyValue>(mOffset);
  228. mOffset = kv->mNextOffset;
  229. if (mOffset != cInvalidHandle)
  230. return *this;
  231. }
  232. // Loop over next buckets
  233. for (;;)
  234. {
  235. // Next bucket
  236. ++mBucket;
  237. if (mBucket >= mMap->mNumBuckets)
  238. return *this;
  239. // Fetch the first entry in the bucket
  240. mOffset = mMap->mBuckets[mBucket];
  241. if (mOffset != cInvalidHandle)
  242. return *this;
  243. }
  244. }
  245. #ifdef _DEBUG
  246. template <class Key, class Value>
  247. void LockFreeHashMap<Key, Value>::TraceStats() const
  248. {
  249. const int cMaxPerBucket = 256;
  250. int max_objects_per_bucket = 0;
  251. int num_objects = 0;
  252. int histogram[cMaxPerBucket];
  253. for (int i = 0; i < cMaxPerBucket; ++i)
  254. histogram[i] = 0;
  255. for (atomic<uint32> *bucket = mBuckets, *bucket_end = mBuckets + mNumBuckets; bucket < bucket_end; ++bucket)
  256. {
  257. int objects_in_bucket = 0;
  258. uint32 offset = *bucket;
  259. while (offset != cInvalidHandle)
  260. {
  261. const KeyValue *kv = mAllocator.template FromOffset<const KeyValue>(offset);
  262. offset = kv->mNextOffset;
  263. ++objects_in_bucket;
  264. ++num_objects;
  265. }
  266. max_objects_per_bucket = max(objects_in_bucket, max_objects_per_bucket);
  267. histogram[min(objects_in_bucket, cMaxPerBucket - 1)]++;
  268. }
  269. Trace("max_objects_per_bucket = %d, num_buckets = %d, num_objects = %d", max_objects_per_bucket, mNumBuckets, num_objects);
  270. for (int i = 0; i < cMaxPerBucket; ++i)
  271. if (histogram[i] != 0)
  272. Trace("%d: %d", i, histogram[i]);
  273. }
  274. #endif
  275. JPH_NAMESPACE_END