|
@@ -7,7 +7,7 @@ template <typename Object>
|
|
|
FixedSizeFreeList<Object>::~FixedSizeFreeList()
|
|
|
{
|
|
|
// Ensure everything is freed before the freelist is destructed
|
|
|
- JPH_ASSERT(mNumFreeObjects == mNumPages * mPageSize);
|
|
|
+ JPH_ASSERT(mNumFreeObjects.load(memory_order_relaxed) == mNumPages * mPageSize);
|
|
|
|
|
|
// Free memory for pages
|
|
|
uint32 num_pages = mNumObjectsAllocated / mPageSize;
|
|
@@ -51,12 +51,12 @@ uint32 FixedSizeFreeList<Object>::ConstructObject(Parameters &&... inParameters)
|
|
|
for (;;)
|
|
|
{
|
|
|
// Get first object from the linked list
|
|
|
- uint64 first_free_object_and_tag = mFirstFreeObjectAndTag;
|
|
|
+ uint64 first_free_object_and_tag = mFirstFreeObjectAndTag.load(memory_order_acquire);
|
|
|
uint32 first_free = uint32(first_free_object_and_tag);
|
|
|
if (first_free == cInvalidObjectIndex)
|
|
|
{
|
|
|
// The free list is empty, we take an object from the page that has never been used before
|
|
|
- first_free = mFirstFreeObjectInNewPage++;
|
|
|
+ first_free = mFirstFreeObjectInNewPage.fetch_add(1, memory_order_relaxed);
|
|
|
if (first_free >= mNumObjectsAllocated)
|
|
|
{
|
|
|
// Allocate new page
|
|
@@ -72,28 +72,28 @@ uint32 FixedSizeFreeList<Object>::ConstructObject(Parameters &&... inParameters)
|
|
|
}
|
|
|
|
|
|
// Allocation successful
|
|
|
- JPH_IF_ENABLE_ASSERTS(--mNumFreeObjects;)
|
|
|
+ JPH_IF_ENABLE_ASSERTS(mNumFreeObjects.fetch_sub(1, memory_order_relaxed);)
|
|
|
ObjectStorage &storage = GetStorage(first_free);
|
|
|
new (&storage.mData) Object(forward<Parameters>(inParameters)...);
|
|
|
- storage.mNextFreeObject = first_free;
|
|
|
+ storage.mNextFreeObject.store(first_free, memory_order_release);
|
|
|
return first_free;
|
|
|
}
|
|
|
else
|
|
|
{
|
|
|
// Load next pointer
|
|
|
- uint32 new_first_free = GetStorage(first_free).mNextFreeObject;
|
|
|
+ uint32 new_first_free = GetStorage(first_free).mNextFreeObject.load(memory_order_acquire);
|
|
|
|
|
|
// Construct a new first free object tag
|
|
|
- uint64 new_first_free_object_and_tag = uint64(new_first_free) + (uint64(mAllocationTag++) << 32);
|
|
|
+ uint64 new_first_free_object_and_tag = uint64(new_first_free) + (uint64(mAllocationTag.fetch_add(1, memory_order_relaxed)) << 32);
|
|
|
|
|
|
// Compare and swap
|
|
|
- if (mFirstFreeObjectAndTag.compare_exchange_strong(first_free_object_and_tag, new_first_free_object_and_tag))
|
|
|
+ if (mFirstFreeObjectAndTag.compare_exchange_weak(first_free_object_and_tag, new_first_free_object_and_tag, memory_order_release))
|
|
|
{
|
|
|
// Allocation successful
|
|
|
- JPH_IF_ENABLE_ASSERTS(--mNumFreeObjects;)
|
|
|
+ JPH_IF_ENABLE_ASSERTS(mNumFreeObjects.fetch_sub(1, memory_order_relaxed);)
|
|
|
ObjectStorage &storage = GetStorage(first_free);
|
|
|
new (&storage.mData) Object(forward<Parameters>(inParameters)...);
|
|
|
- storage.mNextFreeObject = first_free;
|
|
|
+ storage.mNextFreeObject.store(first_free, memory_order_release);
|
|
|
return first_free;
|
|
|
}
|
|
|
}
|
|
@@ -103,14 +103,14 @@ uint32 FixedSizeFreeList<Object>::ConstructObject(Parameters &&... inParameters)
|
|
|
template <typename Object>
|
|
|
void FixedSizeFreeList<Object>::AddObjectToBatch(Batch &ioBatch, uint32 inObjectIndex)
|
|
|
{
|
|
|
- JPH_ASSERT(GetStorage(inObjectIndex).mNextFreeObject == inObjectIndex, "Trying to add a object to the batch that is already in a free list");
|
|
|
+ JPH_ASSERT(GetStorage(inObjectIndex).mNextFreeObject.load(memory_order_relaxed) == inObjectIndex, "Trying to add a object to the batch that is already in a free list");
|
|
|
JPH_ASSERT(ioBatch.mNumObjects != uint32(-1), "Trying to reuse a batch that has already been freed");
|
|
|
|
|
|
// Link object in batch to free
|
|
|
if (ioBatch.mFirstObjectIndex == cInvalidObjectIndex)
|
|
|
ioBatch.mFirstObjectIndex = inObjectIndex;
|
|
|
else
|
|
|
- GetStorage(ioBatch.mLastObjectIndex).mNextFreeObject = inObjectIndex;
|
|
|
+ GetStorage(ioBatch.mLastObjectIndex).mNextFreeObject.store(inObjectIndex, memory_order_release);
|
|
|
ioBatch.mLastObjectIndex = inObjectIndex;
|
|
|
ioBatch.mNumObjects++;
|
|
|
}
|
|
@@ -128,29 +128,30 @@ void FixedSizeFreeList<Object>::DestructObjectBatch(Batch &ioBatch)
|
|
|
{
|
|
|
ObjectStorage &storage = GetStorage(object_idx);
|
|
|
reinterpret_cast<Object &>(storage.mData).~Object();
|
|
|
- object_idx = storage.mNextFreeObject;
|
|
|
+ object_idx = storage.mNextFreeObject.load(memory_order_relaxed);
|
|
|
}
|
|
|
while (object_idx != cInvalidObjectIndex);
|
|
|
}
|
|
|
|
|
|
// Add to objects free list
|
|
|
+ ObjectStorage &storage = GetStorage(ioBatch.mLastObjectIndex);
|
|
|
for (;;)
|
|
|
{
|
|
|
// Get first object from the list
|
|
|
- uint64 first_free_object_and_tag = mFirstFreeObjectAndTag;
|
|
|
+ uint64 first_free_object_and_tag = mFirstFreeObjectAndTag.load(memory_order_acquire);
|
|
|
uint32 first_free = uint32(first_free_object_and_tag);
|
|
|
|
|
|
// Make it the next pointer of the last object in the batch that is to be freed
|
|
|
- GetStorage(ioBatch.mLastObjectIndex).mNextFreeObject = first_free;
|
|
|
+ storage.mNextFreeObject.store(first_free, memory_order_release);
|
|
|
|
|
|
// Construct a new first free object tag
|
|
|
- uint64 new_first_free_object_and_tag = uint64(ioBatch.mFirstObjectIndex) + (uint64(mAllocationTag++) << 32);
|
|
|
+ uint64 new_first_free_object_and_tag = uint64(ioBatch.mFirstObjectIndex) + (uint64(mAllocationTag.fetch_add(1, memory_order_relaxed)) << 32);
|
|
|
|
|
|
// Compare and swap
|
|
|
- if (mFirstFreeObjectAndTag.compare_exchange_strong(first_free_object_and_tag, new_first_free_object_and_tag))
|
|
|
+ if (mFirstFreeObjectAndTag.compare_exchange_weak(first_free_object_and_tag, new_first_free_object_and_tag, memory_order_release))
|
|
|
{
|
|
|
// Free successful
|
|
|
- JPH_IF_ENABLE_ASSERTS(mNumFreeObjects += ioBatch.mNumObjects;)
|
|
|
+ JPH_IF_ENABLE_ASSERTS(mNumFreeObjects.fetch_add(ioBatch.mNumObjects, memory_order_relaxed);)
|
|
|
|
|
|
// Mark the batch as freed
|
|
|
#ifdef JPH_ENABLE_ASSERTS
|
|
@@ -175,20 +176,20 @@ void FixedSizeFreeList<Object>::DestructObject(uint32 inObjectIndex)
|
|
|
for (;;)
|
|
|
{
|
|
|
// Get first object from the list
|
|
|
- uint64 first_free_object_and_tag = mFirstFreeObjectAndTag;
|
|
|
+ uint64 first_free_object_and_tag = mFirstFreeObjectAndTag.load(memory_order_acquire);
|
|
|
uint32 first_free = uint32(first_free_object_and_tag);
|
|
|
|
|
|
// Make it the next pointer of the last object in the batch that is to be freed
|
|
|
- storage.mNextFreeObject = first_free;
|
|
|
+ storage.mNextFreeObject.store(first_free, memory_order_release);
|
|
|
|
|
|
// Construct a new first free object tag
|
|
|
- uint64 new_first_free_object_and_tag = uint64(inObjectIndex) + (uint64(mAllocationTag++) << 32);
|
|
|
+ uint64 new_first_free_object_and_tag = uint64(inObjectIndex) + (uint64(mAllocationTag.fetch_add(1, memory_order_relaxed)) << 32);
|
|
|
|
|
|
// Compare and swap
|
|
|
- if (mFirstFreeObjectAndTag.compare_exchange_strong(first_free_object_and_tag, new_first_free_object_and_tag))
|
|
|
+ if (mFirstFreeObjectAndTag.compare_exchange_weak(first_free_object_and_tag, new_first_free_object_and_tag, memory_order_release))
|
|
|
{
|
|
|
// Free successful
|
|
|
- JPH_IF_ENABLE_ASSERTS(mNumFreeObjects++;)
|
|
|
+ JPH_IF_ENABLE_ASSERTS(mNumFreeObjects.fetch_add(1, memory_order_relaxed);)
|
|
|
return;
|
|
|
}
|
|
|
}
|
|
@@ -197,7 +198,7 @@ void FixedSizeFreeList<Object>::DestructObject(uint32 inObjectIndex)
|
|
|
template<typename Object>
|
|
|
inline void FixedSizeFreeList<Object>::DestructObject(Object *inObject)
|
|
|
{
|
|
|
- uint32 index = reinterpret_cast<ObjectStorage *>(inObject)->mNextFreeObject;
|
|
|
+ uint32 index = reinterpret_cast<ObjectStorage *>(inObject)->mNextFreeObject.load(memory_order_relaxed);
|
|
|
JPH_ASSERT(index < mNumObjectsAllocated);
|
|
|
DestructObject(index);
|
|
|
}
|