| 123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607 |
- // Copyright (C) 2009-present, Panagiotis Christopoulos Charitos and contributors.
- // All rights reserved.
- // Code licensed under the BSD License.
- // http://www.anki3d.org/LICENSE
- #include <AnKi/Util/SegregatedListsAllocatorBuilder.h>
- namespace anki {
- template<typename TChunk, typename TInterface, typename TLock, typename TMemoryPool>
- SegregatedListsAllocatorBuilder<TChunk, TInterface, TLock, TMemoryPool>::~SegregatedListsAllocatorBuilder()
- {
- if(!m_chunks.isEmpty())
- {
- ANKI_UTIL_LOGE("Forgot to free memory");
- }
- }
- template<typename TChunk, typename TInterface, typename TLock, typename TMemoryPool>
- U32 SegregatedListsAllocatorBuilder<TChunk, TInterface, TLock, TMemoryPool>::findClass(PtrSize size, PtrSize alignment) const
- {
- ANKI_ASSERT(size > 0 && alignment > 0);
- for(U32 i = 0; i < m_interface.getClassCount(); ++i)
- {
- PtrSize maxSize;
- m_interface.getClassInfo(i, maxSize);
- if(size <= maxSize)
- {
- if(alignment <= maxSize)
- {
- // Found the class
- return i;
- }
- else
- {
- // The class found doesn't have the proper alignment. Need to go higher
- }
- }
- }
- // Not found
- return kMaxU32;
- }
- template<typename TChunk, typename TInterface, typename TLock, typename TMemoryPool>
- Bool SegregatedListsAllocatorBuilder<TChunk, TInterface, TLock, TMemoryPool>::chooseBestFit(PtrSize allocSize, PtrSize allocAlignment,
- FreeBlock* blockA, FreeBlock* blockB,
- FreeBlock*& bestBlock)
- {
- ANKI_ASSERT(allocSize > 0 && allocAlignment > 0);
- ANKI_ASSERT(blockA || blockB);
- bestBlock = nullptr;
- PtrSize remainingSizeA = 0;
- if(blockA)
- {
- const PtrSize alignedAddress = getAlignedRoundUp(allocAlignment, blockA->m_address);
- const Bool fits = alignedAddress + allocSize <= blockA->m_address + blockA->m_size;
- if(fits)
- {
- bestBlock = blockA;
- remainingSizeA = blockA->m_size - allocSize - (alignedAddress - blockA->m_address);
- }
- }
- if(blockB)
- {
- const PtrSize alignedAddress = getAlignedRoundUp(allocAlignment, blockB->m_address);
- const Bool fits = alignedAddress + allocSize <= blockB->m_address + blockB->m_size;
- if(fits)
- {
- if(bestBlock)
- {
- const PtrSize remainingSizeB = blockB->m_size - allocSize - (alignedAddress - blockB->m_address);
- if(remainingSizeB < remainingSizeA)
- {
- bestBlock = blockB;
- }
- }
- else
- {
- bestBlock = blockB;
- }
- }
- }
- if(bestBlock)
- {
- return allocSize == bestBlock->m_size;
- }
- else
- {
- return false;
- }
- }
- template<typename TChunk, typename TInterface, typename TLock, typename TMemoryPool>
- void SegregatedListsAllocatorBuilder<TChunk, TInterface, TLock, TMemoryPool>::placeFreeBlock(PtrSize address, PtrSize size, ChunksIterator chunkIt)
- {
- ANKI_ASSERT(size > 0 && isAligned(m_interface.getMinSizeAlignment(), size));
- ANKI_ASSERT(chunkIt != m_chunks.getEnd());
- TChunk& chunk = *(*chunkIt);
- ANKI_ASSERT(address + size <= chunk.m_totalSize);
- // First search everything to find other coalesced free blocks
- U32 leftBlock = kMaxU32;
- U32 leftClass = kMaxU32;
- U32 rightBlock = kMaxU32;
- U32 rightClass = kMaxU32;
- for(U32 classIdx = 0; classIdx < m_interface.getClassCount(); ++classIdx)
- {
- const DynamicArray<FreeBlock, TMemoryPool>& freeLists = chunk.m_freeLists[classIdx];
- if(freeLists.getSize() == 0)
- {
- continue;
- }
- FreeBlock newBlock;
- newBlock.m_address = address;
- newBlock.m_size = size;
- auto it = std::lower_bound(freeLists.getBegin(), freeLists.getEnd(), newBlock, [](const FreeBlock& a, const FreeBlock& b) {
- return a.m_address + a.m_size < b.m_address;
- });
- if(it == freeLists.getEnd())
- {
- continue;
- }
- const U32 pos = U32(it - freeLists.getBegin());
- const FreeBlock& freeBlock = freeLists[pos];
- if(freeBlock.m_address + freeBlock.m_size == address)
- {
- // Free block is in the left of the new
- ANKI_ASSERT(leftBlock == kMaxU32);
- leftBlock = pos;
- leftClass = classIdx;
- if(pos + 1 < freeLists.getSize())
- {
- const FreeBlock& freeBlock2 = freeLists[pos + 1];
- if(address + size == freeBlock2.m_address)
- {
- ANKI_ASSERT(rightBlock == kMaxU32);
- rightBlock = pos + 1;
- rightClass = classIdx;
- }
- }
- }
- else if(address + size == freeBlock.m_address)
- {
- // Free block is in the right of the new
- ANKI_ASSERT(rightBlock == kMaxU32);
- rightBlock = pos;
- rightClass = classIdx;
- if(pos > 0)
- {
- const FreeBlock& freeBlock2 = freeLists[pos - 1];
- if(freeBlock2.m_address + freeBlock2.m_size == address)
- {
- ANKI_ASSERT(leftBlock == kMaxU32);
- leftBlock = pos - 1;
- leftClass = classIdx;
- }
- }
- }
- else
- {
- // Do nothing
- }
- if(leftBlock != kMaxU32 && rightBlock != kMaxU32)
- {
- // Best case, stop searching
- break;
- }
- }
- // Merge the new block
- FreeBlock newBlock;
- newBlock.m_address = address;
- newBlock.m_size = size;
- if(leftBlock != kMaxU32)
- {
- const FreeBlock& lblock = chunk.m_freeLists[leftClass][leftBlock];
- newBlock.m_address = lblock.m_address;
- newBlock.m_size += lblock.m_size;
- chunk.m_freeLists[leftClass].erase(chunk.m_freeLists[leftClass].getBegin() + leftBlock);
- if(rightBlock != kMaxU32 && rightClass == leftClass)
- {
- // Both right and left blocks live in the same dynamic array. Due to the erase() above the rights's index is no longer valid, adjust it
- ANKI_ASSERT(rightBlock > leftBlock);
- --rightBlock;
- }
- }
- if(rightBlock != kMaxU32)
- {
- const FreeBlock& rblock = chunk.m_freeLists[rightClass][rightBlock];
- newBlock.m_size += rblock.m_size;
- chunk.m_freeLists[rightClass].erase(chunk.m_freeLists[rightClass].getBegin() + rightBlock);
- }
- // Store the new block
- const U32 newClassIdx = findClass(newBlock.m_size, 1);
- ANKI_ASSERT(newClassIdx != kMaxU32);
- chunk.m_freeLists[newClassIdx].emplaceBack(newBlock);
- std::sort(chunk.m_freeLists[newClassIdx].getBegin(), chunk.m_freeLists[newClassIdx].getEnd(), [](const FreeBlock& a, const FreeBlock& b) {
- return a.m_address < b.m_address;
- });
- // Adjust chunk free size
- chunk.m_freeSize += size;
- ANKI_ASSERT(chunk.m_freeSize <= chunk.m_totalSize);
- if(chunk.m_freeSize == chunk.m_totalSize)
- {
- // Chunk completely free, delete it
- U32 blockCount = 0;
- for(U32 classIdx = 0; classIdx < m_interface.getClassCount(); ++classIdx)
- {
- blockCount += chunk.m_freeLists[classIdx].getSize();
- chunk.m_freeLists[classIdx].destroy();
- }
- chunk.m_freeLists.destroy();
- ANKI_ASSERT(blockCount == 1);
- m_chunks.erase(chunkIt);
- m_interface.deleteChunk(&chunk);
- }
- }
- template<typename TChunk, typename TInterface, typename TLock, typename TMemoryPool>
- Error SegregatedListsAllocatorBuilder<TChunk, TInterface, TLock, TMemoryPool>::allocate(PtrSize origSize, PtrSize origAlignment, TChunk*& outChunk,
- PtrSize& outOffset)
- {
- ANKI_ASSERT(origSize > 0 && origAlignment > 0);
- const PtrSize size = getAlignedRoundUp(m_interface.getMinSizeAlignment(), origSize);
- const PtrSize alignment = max<PtrSize>(m_interface.getMinSizeAlignment(), origAlignment);
- // Find starting class
- const U32 startingClassIdx = findClass(size, alignment);
- if(startingClassIdx == kMaxU32) [[unlikely]]
- {
- ANKI_UTIL_LOGE("Couldn't find class for allocation of size %zu", origSize);
- return Error::kOutOfMemory;
- }
- LockGuard<TLock> lock(m_lock);
- // Sort the chunks because we want to try allocate from the least empty first
- std::sort(m_chunks.getBegin(), m_chunks.getEnd(), [](const TChunk* a, const TChunk* b) {
- return a->m_freeSize < b->m_freeSize;
- });
- // Find a free block, its class and its chunk
- FreeBlock* freeBlock = nullptr;
- ChunksIterator chunkIt = m_chunks.getBegin();
- U32 classIdx = kMaxU32;
- for(; chunkIt != m_chunks.getEnd(); ++chunkIt)
- {
- classIdx = startingClassIdx;
- while(classIdx < m_interface.getClassCount())
- {
- // Find the best fit
- for(FreeBlock& block : (*chunkIt)->m_freeLists[classIdx])
- {
- const Bool theBestBlock = chooseBestFit(size, alignment, freeBlock, &block, freeBlock);
- if(theBestBlock)
- {
- break;
- }
- }
- if(freeBlock)
- {
- // Done
- break;
- }
- else
- {
- // Check for free blocks in the next class
- ++classIdx;
- }
- }
- if(freeBlock)
- {
- break;
- }
- }
- if(freeBlock == nullptr)
- {
- // No free blocks, allocate new chunk
- // Init the new chunk
- PtrSize chunkSize;
- TChunk* chunk;
- ANKI_CHECK(m_interface.allocateChunk(chunk, chunkSize));
- chunk->m_freeLists.resize(m_interface.getClassCount());
- if(chunkSize < size)
- {
- ANKI_UTIL_LOGE("Chunk allocated can't fit the current allocation of %zu", origSize);
- chunk->m_freeLists.destroy();
- m_interface.deleteChunk(chunk);
- return Error::kOutOfMemory;
- }
- chunk->m_totalSize = chunkSize;
- chunk->m_freeSize = 0;
- m_chunks.emplaceBack(chunk);
- placeFreeBlock(size, chunkSize - size, m_chunks.getBegin() + m_chunks.getSize() - 1);
- // Allocate
- outChunk = chunk;
- outOffset = 0;
- }
- else
- {
- // Have a free block, allocate from it
- ANKI_ASSERT(chunkIt != m_chunks.getEnd() && freeBlock);
- TChunk& chunk = *(*chunkIt);
- const FreeBlock fBlock = *freeBlock;
- chunk.m_freeLists[classIdx].erase(freeBlock);
- freeBlock = nullptr;
- ANKI_ASSERT(chunk.m_freeSize >= fBlock.m_size);
- chunk.m_freeSize -= fBlock.m_size;
- const PtrSize alignedAddress = getAlignedRoundUp(alignment, fBlock.m_address);
- if(alignedAddress != fBlock.m_address)
- {
- // Add a free block because of alignment missmatch
- placeFreeBlock(fBlock.m_address, alignedAddress - fBlock.m_address, chunkIt);
- }
- const PtrSize allocationEnd = alignedAddress + size;
- const PtrSize freeBlockEnd = fBlock.m_address + fBlock.m_size;
- if(allocationEnd < freeBlockEnd)
- {
- // Add what remains
- placeFreeBlock(allocationEnd, freeBlockEnd - allocationEnd, chunkIt);
- }
- // Allocate
- outChunk = &chunk;
- outOffset = alignedAddress;
- }
- ANKI_ASSERT(outChunk);
- ANKI_ASSERT(isAligned(alignment, outOffset));
- return Error::kNone;
- }
- template<typename TChunk, typename TInterface, typename TLock, typename TMemoryPool>
- void SegregatedListsAllocatorBuilder<TChunk, TInterface, TLock, TMemoryPool>::free(TChunk* chunk, PtrSize offset, PtrSize size)
- {
- ANKI_ASSERT(chunk && size);
- LockGuard<TLock> lock(m_lock);
- ChunksIterator it = m_chunks.getBegin();
- for(; it != m_chunks.getEnd(); ++it)
- {
- if(*it == chunk)
- {
- break;
- }
- }
- ANKI_ASSERT(it != m_chunks.getEnd());
- size = getAlignedRoundUp(m_interface.getMinSizeAlignment(), size);
- placeFreeBlock(offset, size, it);
- }
- template<typename TChunk, typename TInterface, typename TLock, typename TMemoryPool>
- Error SegregatedListsAllocatorBuilder<TChunk, TInterface, TLock, TMemoryPool>::validate() const
- {
- #define ANKI_SLAB_ASSERT(x, ...) \
- do \
- { \
- if(!(x)) [[unlikely]] \
- { \
- ANKI_UTIL_LOGE(__VA_ARGS__); \
- ANKI_DEBUG_BREAK(); \
- return Error::kFunctionFailed; \
- } \
- } while(0)
- LockGuard<TLock> lock(m_lock);
- U32 chunkIdx = 0;
- for(const TChunk* chunk : m_chunks)
- {
- ANKI_SLAB_ASSERT(chunk->m_totalSize > 0, "Chunk %u: Total size can't be 0", chunkIdx);
- ANKI_SLAB_ASSERT(chunk->m_freeSize < chunk->m_totalSize, "Chunk %u: Free size (%zu) should be less than total size (%zu)", chunkIdx,
- chunk->m_freeSize, chunk->m_totalSize);
- PtrSize freeSize = 0;
- for(U32 c = 0; c < m_interface.getClassCount(); ++c)
- {
- for(U32 i = 0; i < chunk->m_freeLists[c].getSize(); ++i)
- {
- const FreeBlock& crnt = chunk->m_freeLists[c][i];
- ANKI_SLAB_ASSERT(crnt.m_size > 0, "Chunk %u class %u block %u: Block size can't be 0", chunkIdx, c, i);
- freeSize += crnt.m_size;
- const U32 classIdx = findClass(crnt.m_size, 1);
- ANKI_SLAB_ASSERT(classIdx == c, "Chunk %u class %u block %u: Free block not in the correct class", chunkIdx, c, i);
- if(i > 0)
- {
- const FreeBlock& prev = chunk->m_freeLists[c][i - 1];
- ANKI_SLAB_ASSERT(prev.m_address < crnt.m_address, "Chunk %u class %u block %u: Previous block should have lower address",
- chunkIdx, c, i);
- ANKI_SLAB_ASSERT(prev.m_address + prev.m_size < crnt.m_address, "Chunk %u class %u block %u: Block overlaps with previous",
- chunkIdx, c, i);
- }
- }
- }
- ANKI_SLAB_ASSERT(freeSize == chunk->m_freeSize, "Chunk %u: Free size calculated doesn't match chunk's free size", chunkIdx);
- ++chunkIdx;
- }
- // Now do an expensive check that blocks never overlap with other blocks of other classes
- chunkIdx = 0;
- for(const TChunk* chunk : m_chunks)
- {
- for(U32 c = 0; c < m_interface.getClassCount(); ++c)
- {
- for(U32 i = 0; i < chunk->m_freeLists[c].getSize(); ++i)
- {
- const FreeBlock& crnt = chunk->m_freeLists[c][i];
- for(U32 c2 = 0; c2 < m_interface.getClassCount(); ++c2)
- {
- for(U32 j = 0; j < chunk->m_freeLists[c2].getSize(); ++j)
- {
- if(c == c2 && i == j)
- {
- // Skip "crnt"
- continue;
- }
- const FreeBlock& other = chunk->m_freeLists[c2][j];
- // Check coalescing
- if(crnt.m_address < other.m_address)
- {
- ANKI_SLAB_ASSERT(crnt.m_address + crnt.m_size < other.m_address,
- "Chunk %u class %u block %u: Block overlaps or should have been merged "
- "with block: class %u block %u",
- chunkIdx, c, i, c2, j);
- }
- else if(crnt.m_address > other.m_address)
- {
- ANKI_SLAB_ASSERT(other.m_address + other.m_size < crnt.m_address,
- "Chunk %u class %u block %u: Block overlaps or should have been merged "
- "with block: class %u block %u",
- chunkIdx, c, i, c2, j);
- }
- else
- {
- ANKI_SLAB_ASSERT(false,
- "Chunk %u class %u block %u: Block shouldn't be having the same address "
- "with: class %u block %u",
- chunkIdx, c, i, c2, j);
- }
- }
- }
- }
- }
- ++chunkIdx;
- }
- #undef ANKI_SLAB_ASSERT
- return Error::kNone;
- }
- template<typename TChunk, typename TInterface, typename TLock, typename TMemoryPool>
- void SegregatedListsAllocatorBuilder<TChunk, TInterface, TLock, TMemoryPool>::printFreeBlocks(StringList& strList) const
- {
- LockGuard<TLock> lock(m_lock);
- U32 chunkCount = 0;
- for(const TChunk* chunk : m_chunks)
- {
- strList.pushBackSprintf("Chunk #%u, total size %zu, free size %zu\n", chunkCount, chunk->m_totalSize, chunk->m_freeSize);
- for(U32 c = 0; c < m_interface.getClassCount(); ++c)
- {
- if(chunk->m_freeLists[c].getSize())
- {
- strList.pushBackSprintf(" Class #%u\n ", c);
- }
- for(U32 i = 0; i < chunk->m_freeLists[c].getSize(); ++i)
- {
- const FreeBlock& blk = chunk->m_freeLists[c][i];
- strList.pushBackSprintf("| %zu-%zu(%zu) ", blk.m_address, blk.m_address + blk.m_size - 1, blk.m_size);
- }
- if(chunk->m_freeLists[c].getSize())
- {
- strList.pushBack("|\n");
- }
- }
- ++chunkCount;
- }
- }
- template<typename TChunk, typename TInterface, typename TLock, typename TMemoryPool>
- F32 SegregatedListsAllocatorBuilder<TChunk, TInterface, TLock, TMemoryPool>::computeExternalFragmentation(PtrSize baseSize) const
- {
- ANKI_ASSERT(baseSize > 0);
- LockGuard<TLock> lock(m_lock);
- F32 maxFragmentation = 0.0f;
- for(const TChunk* chunk : m_chunks)
- {
- PtrSize largestFreeBlockSize = 0;
- for(U32 c = 0; c < m_interface.getClassCount(); ++c)
- {
- for(const FreeBlock& block : chunk->m_freeLists[c])
- {
- largestFreeBlockSize = max(largestFreeBlockSize, block.m_size / baseSize);
- }
- }
- const F32 frag = F32(1.0 - F64(largestFreeBlockSize) / F64(chunk->m_freeSize / baseSize));
- maxFragmentation = max(maxFragmentation, frag);
- }
- return maxFragmentation;
- }
- template<typename TChunk, typename TInterface, typename TLock, typename TMemoryPool>
- F32 SegregatedListsAllocatorBuilder<TChunk, TInterface, TLock, TMemoryPool>::computeExternalFragmentationSawicki(PtrSize baseSize) const
- {
- ANKI_ASSERT(baseSize > 0);
- LockGuard<TLock> lock(m_lock);
- F32 maxFragmentation = 0.0f;
- for(const TChunk* chunk : m_chunks)
- {
- F64 quality = 0.0;
- for(U32 c = 0; c < m_interface.getClassCount(); ++c)
- {
- for(const FreeBlock& block : chunk->m_freeLists[c])
- {
- const F64 size = F64(block.m_size / baseSize);
- quality += size * size;
- }
- }
- quality = sqrt(quality) / F64(chunk->m_freeSize / baseSize);
- const F32 frag = 1.0f - F32(quality * quality);
- maxFragmentation = max(maxFragmentation, frag);
- }
- return maxFragmentation;
- }
- } // end namespace anki
|