LockFreeHashMap.inl 9.0 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332
  1. // SPDX-FileCopyrightText: 2021 Jorrit Rouwe
  2. // SPDX-License-Identifier: MIT
  3. #pragma once
  4. namespace JPH {
  5. ///////////////////////////////////////////////////////////////////////////////////
  6. // LFHMAllocator
  7. ///////////////////////////////////////////////////////////////////////////////////
  8. inline LFHMAllocator::~LFHMAllocator()
  9. {
  10. delete [] mObjectStore;
  11. }
  12. inline void LFHMAllocator::Init(uint inObjectStoreSizeBytes)
  13. {
  14. JPH_ASSERT(mObjectStore == nullptr);
  15. mObjectStoreSizeBytes = inObjectStoreSizeBytes;
  16. mObjectStore = new uint8 [inObjectStoreSizeBytes];
  17. }
  18. inline void LFHMAllocator::Clear()
  19. {
  20. mWriteOffset = 0;
  21. }
  22. inline void LFHMAllocator::Allocate(uint32 inBlockSize, uint32 &ioBegin, uint32 &ioEnd)
  23. {
  24. // Atomically fetch a block from the pool
  25. uint32 begin = mWriteOffset.fetch_add(inBlockSize, memory_order_relaxed);
  26. uint32 end = min(begin + inBlockSize, mObjectStoreSizeBytes);
  27. if (ioEnd == begin)
  28. {
  29. // Block is allocated straight after our previous block
  30. begin = ioBegin;
  31. }
  32. else
  33. {
  34. // Block is a new block
  35. begin = min(begin, mObjectStoreSizeBytes);
  36. }
  37. // Store the begin and end of the resulting block
  38. ioBegin = begin;
  39. ioEnd = end;
  40. }
  41. template <class T>
  42. inline uint32 LFHMAllocator::ToOffset(const T *inData) const
  43. {
  44. const uint8 *data = reinterpret_cast<const uint8 *>(inData);
  45. JPH_ASSERT(data >= mObjectStore && data < mObjectStore + mObjectStoreSizeBytes);
  46. return uint32(data - mObjectStore);
  47. }
  48. template <class T>
  49. inline T *LFHMAllocator::FromOffset(uint32 inOffset) const
  50. {
  51. JPH_ASSERT(inOffset < mObjectStoreSizeBytes);
  52. return reinterpret_cast<T *>(mObjectStore + inOffset);
  53. }
  54. ///////////////////////////////////////////////////////////////////////////////////
  55. // LFHMAllocatorContext
  56. ///////////////////////////////////////////////////////////////////////////////////
  57. inline LFHMAllocatorContext::LFHMAllocatorContext(LFHMAllocator &inAllocator, uint32 inBlockSize) :
  58. mAllocator(inAllocator),
  59. mBlockSize(inBlockSize)
  60. {
  61. }
  62. inline bool LFHMAllocatorContext::Allocate(uint32 inSize, uint32 &outWriteOffset)
  63. {
  64. // Check if we have space
  65. if (mEnd - mBegin < inSize)
  66. {
  67. // Allocate a new block
  68. mAllocator.Allocate(mBlockSize, mBegin, mEnd);
  69. // Check if we have space again
  70. if (mEnd - mBegin < inSize)
  71. return false;
  72. }
  73. // Make the allocation
  74. outWriteOffset = mBegin;
  75. mBegin += inSize;
  76. return true;
  77. }
  78. ///////////////////////////////////////////////////////////////////////////////////
  79. // LockFreeHashMap
  80. ///////////////////////////////////////////////////////////////////////////////////
  81. template <class Key, class Value>
  82. void LockFreeHashMap<Key, Value>::Init(uint32 inMaxBuckets)
  83. {
  84. JPH_ASSERT(inMaxBuckets >= 4 && IsPowerOf2(inMaxBuckets));
  85. JPH_ASSERT(mBuckets == nullptr);
  86. mNumBuckets = inMaxBuckets;
  87. mMaxBuckets = inMaxBuckets;
  88. mBuckets = new atomic<uint32> [inMaxBuckets];
  89. Clear();
  90. }
  91. template <class Key, class Value>
  92. LockFreeHashMap<Key, Value>::~LockFreeHashMap()
  93. {
  94. delete [] mBuckets;
  95. }
  96. template <class Key, class Value>
  97. void LockFreeHashMap<Key, Value>::Clear()
  98. {
  99. #ifdef JPH_ENABLE_ASSERTS
  100. // Reset number of key value pairs
  101. mNumKeyValues = 0;
  102. #endif // JPH_ENABLE_ASSERTS
  103. // Reset buckets 4 at a time
  104. static_assert(sizeof(atomic<uint32>) == sizeof(uint32));
  105. UVec4 invalid_handle = UVec4::sReplicate(cInvalidHandle);
  106. uint32 *start = reinterpret_cast<uint32 *>(mBuckets), *end = start + mNumBuckets;
  107. JPH_ASSERT(IsAligned(start, 16));
  108. while (start < end)
  109. {
  110. invalid_handle.StoreInt4Aligned(start);
  111. start += 4;
  112. }
  113. }
  114. template <class Key, class Value>
  115. void LockFreeHashMap<Key, Value>::SetNumBuckets(uint32 inNumBuckets)
  116. {
  117. JPH_ASSERT(mNumKeyValues == 0);
  118. JPH_ASSERT(inNumBuckets <= mMaxBuckets);
  119. JPH_ASSERT(inNumBuckets >= 4 && IsPowerOf2(inNumBuckets));
  120. mNumBuckets = inNumBuckets;
  121. }
  122. template <class Key, class Value>
  123. template <class... Params>
  124. inline typename LockFreeHashMap<Key, Value>::KeyValue *LockFreeHashMap<Key, Value>::Create(LFHMAllocatorContext &ioContext, const Key &inKey, size_t inKeyHash, int inExtraBytes, Params &&... inConstructorParams)
  125. {
  126. // This is not a multi map, test the key hasn't been inserted yet
  127. JPH_ASSERT(Find(inKey, inKeyHash) == nullptr);
  128. // Calculate total size
  129. uint size = sizeof(KeyValue) + inExtraBytes;
  130. // Get the write offset for this key value pair
  131. uint32 write_offset;
  132. if (!ioContext.Allocate(size, write_offset))
  133. return nullptr;
  134. #ifdef JPH_ENABLE_ASSERTS
  135. // Increment amount of entries in map
  136. mNumKeyValues.fetch_add(1, memory_order_relaxed);
  137. #endif // JPH_ENABLE_ASSERTS
  138. // Construct the key/value pair
  139. KeyValue *kv = mAllocator.template FromOffset<KeyValue>(write_offset);
  140. JPH_ASSERT(intptr_t(kv) % alignof(KeyValue) == 0);
  141. #ifdef _DEBUG
  142. memset(kv, 0xcd, size);
  143. #endif
  144. kv->mKey = inKey;
  145. new (&kv->mValue) Value(forward<Params>(inConstructorParams)...);
  146. // Get the offset to the first object from the bucket with corresponding hash
  147. atomic<uint32> &offset = mBuckets[inKeyHash & (mNumBuckets - 1)];
  148. // Add this entry as the first element in the linked list
  149. for (;;)
  150. {
  151. uint32 old_offset = offset.load(memory_order_relaxed);
  152. kv->mNextOffset = old_offset;
  153. if (offset.compare_exchange_weak(old_offset, write_offset, memory_order_release))
  154. break;
  155. }
  156. return kv;
  157. }
  158. template <class Key, class Value>
  159. inline const typename LockFreeHashMap<Key, Value>::KeyValue *LockFreeHashMap<Key, Value>::Find(const Key &inKey, size_t inKeyHash) const
  160. {
  161. // Get the offset to the keyvalue object from the bucket with corresponding hash
  162. uint32 offset = mBuckets[inKeyHash & (mNumBuckets - 1)].load(memory_order_acquire);
  163. while (offset != cInvalidHandle)
  164. {
  165. // Loop through linked list of values until the right one is found
  166. const KeyValue *kv = mAllocator.template FromOffset<const KeyValue>(offset);
  167. if (kv->mKey == inKey)
  168. return kv;
  169. offset = kv->mNextOffset;
  170. }
  171. // Not found
  172. return nullptr;
  173. }
  174. template <class Key, class Value>
  175. inline uint32 LockFreeHashMap<Key, Value>::ToHandle(const KeyValue *inKeyValue) const
  176. {
  177. return mAllocator.ToOffset(inKeyValue);
  178. }
  179. template <class Key, class Value>
  180. inline const typename LockFreeHashMap<Key, Value>::KeyValue *LockFreeHashMap<Key, Value>::FromHandle(uint32 inHandle) const
  181. {
  182. return mAllocator.template FromOffset<const KeyValue>(inHandle);
  183. }
  184. template <class Key, class Value>
  185. inline void LockFreeHashMap<Key, Value>::GetAllKeyValues(vector<const KeyValue *> &outAll) const
  186. {
  187. for (atomic<uint32> *bucket = mBuckets; bucket < mBuckets + mNumBuckets; ++bucket)
  188. {
  189. uint32 offset = *bucket;
  190. while (offset != cInvalidHandle)
  191. {
  192. const KeyValue *kv = mAllocator.template FromOffset<const KeyValue>(offset);
  193. outAll.push_back(kv);
  194. offset = kv->mNextOffset;
  195. }
  196. }
  197. }
  198. template <class Key, class Value>
  199. typename LockFreeHashMap<Key, Value>::Iterator LockFreeHashMap<Key, Value>::begin()
  200. {
  201. // Start with the first bucket
  202. Iterator it { this, 0, mBuckets[0] };
  203. // If it doesn't contain a valid entry, use the ++ operator to find the first valid entry
  204. if (it.mOffset == cInvalidHandle)
  205. ++it;
  206. return it;
  207. }
  208. template <class Key, class Value>
  209. typename LockFreeHashMap<Key, Value>::Iterator LockFreeHashMap<Key, Value>::end()
  210. {
  211. return { this, mNumBuckets, cInvalidHandle };
  212. }
  213. template <class Key, class Value>
  214. typename LockFreeHashMap<Key, Value>::KeyValue &LockFreeHashMap<Key, Value>::Iterator::operator* ()
  215. {
  216. JPH_ASSERT(mOffset != cInvalidHandle);
  217. return *mMap->mAllocator.template FromOffset<KeyValue>(mOffset);
  218. }
  219. template <class Key, class Value>
  220. typename LockFreeHashMap<Key, Value>::Iterator &LockFreeHashMap<Key, Value>::Iterator::operator++ ()
  221. {
  222. JPH_ASSERT(mBucket < mMap->mNumBuckets);
  223. // Find the next key value in this bucket
  224. if (mOffset != cInvalidHandle)
  225. {
  226. const KeyValue *kv = mMap->mAllocator.template FromOffset<const KeyValue>(mOffset);
  227. mOffset = kv->mNextOffset;
  228. if (mOffset != cInvalidHandle)
  229. return *this;
  230. }
  231. // Loop over next buckets
  232. for (;;)
  233. {
  234. // Next bucket
  235. ++mBucket;
  236. if (mBucket >= mMap->mNumBuckets)
  237. return *this;
  238. // Fetch the first entry in the bucket
  239. mOffset = mMap->mBuckets[mBucket];
  240. if (mOffset != cInvalidHandle)
  241. return *this;
  242. }
  243. }
  244. #ifdef _DEBUG
  245. template <class Key, class Value>
  246. void LockFreeHashMap<Key, Value>::TraceStats() const
  247. {
  248. const int cMaxPerBucket = 256;
  249. int max_objects_per_bucket = 0;
  250. int num_objects = 0;
  251. int histogram[cMaxPerBucket];
  252. for (int i = 0; i < cMaxPerBucket; ++i)
  253. histogram[i] = 0;
  254. for (atomic<uint32> *bucket = mBuckets, *bucket_end = mBuckets + mNumBuckets; bucket < bucket_end; ++bucket)
  255. {
  256. int objects_in_bucket = 0;
  257. uint32 offset = *bucket;
  258. while (offset != cInvalidHandle)
  259. {
  260. const KeyValue *kv = mAllocator.template FromOffset<const KeyValue>(offset);
  261. offset = kv->mNextOffset;
  262. ++objects_in_bucket;
  263. ++num_objects;
  264. }
  265. max_objects_per_bucket = max(objects_in_bucket, max_objects_per_bucket);
  266. histogram[min(objects_in_bucket, cMaxPerBucket - 1)]++;
  267. }
  268. Trace("max_objects_per_bucket = %d, num_buckets = %d, num_objects = %d", max_objects_per_bucket, mNumBuckets, num_objects);
  269. for (int i = 0; i < cMaxPerBucket; ++i)
  270. if (histogram[i] != 0)
  271. Trace("%d: %d", i, histogram[i]);
  272. }
  273. #endif
  274. } // JPH