2
0

StackGpuAllocator.cpp 2.9 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136
  1. // Copyright (C) 2009-2021, Panagiotis Christopoulos Charitos and contributors.
  2. // All rights reserved.
  3. // Code licensed under the BSD License.
  4. // http://www.anki3d.org/LICENSE
  5. #include <AnKi/Gr/Utils/StackGpuAllocator.h>
  6. namespace anki {
  7. class StackGpuAllocatorChunk
  8. {
  9. public:
  10. StackGpuAllocatorChunk* m_next;
  11. StackGpuAllocatorMemory* m_mem;
  12. Atomic<PtrSize> m_offset;
  13. PtrSize m_size;
  14. };
  15. StackGpuAllocator::~StackGpuAllocator()
  16. {
  17. Chunk* chunk = m_chunkListHead;
  18. while(chunk)
  19. {
  20. if(chunk->m_mem)
  21. {
  22. m_iface->free(chunk->m_mem);
  23. }
  24. Chunk* next = chunk->m_next;
  25. m_alloc.deleteInstance(chunk);
  26. chunk = next;
  27. }
  28. }
  29. void StackGpuAllocator::init(GenericMemoryPoolAllocator<U8> alloc, StackGpuAllocatorInterface* iface)
  30. {
  31. ANKI_ASSERT(iface);
  32. m_alloc = alloc;
  33. m_iface = iface;
  34. iface->getChunkGrowInfo(m_scale, m_bias, m_initialSize);
  35. ANKI_ASSERT(m_scale >= 1.0);
  36. ANKI_ASSERT(m_initialSize > 0);
  37. m_alignment = iface->getMaxAlignment();
  38. ANKI_ASSERT(m_alignment > 0);
  39. alignRoundUp(m_alignment, m_initialSize);
  40. }
  41. Error StackGpuAllocator::allocate(PtrSize size, StackGpuAllocatorHandle& handle)
  42. {
  43. alignRoundUp(m_alignment, size);
  44. ANKI_ASSERT(size > 0);
  45. ANKI_ASSERT(size <= m_initialSize && "The chunks should have enough space to hold at least one allocation");
  46. Chunk* crntChunk;
  47. Bool retry = true;
  48. do
  49. {
  50. crntChunk = m_crntChunk.load();
  51. PtrSize offset;
  52. if(crntChunk && ((offset = crntChunk->m_offset.fetchAdd(size)) + size) <= crntChunk->m_size)
  53. {
  54. // All is fine, there is enough space in the chunk
  55. handle.m_memory = crntChunk->m_mem;
  56. handle.m_offset = offset;
  57. retry = false;
  58. }
  59. else
  60. {
  61. // Need new chunk
  62. LockGuard<Mutex> lock(m_lock);
  63. // Make sure that only one thread will create a new chunk
  64. if(m_crntChunk.load() == crntChunk)
  65. {
  66. // We can create a new chunk
  67. if(crntChunk == nullptr || crntChunk->m_next == nullptr)
  68. {
  69. // Need to create a new chunk
  70. Chunk* newChunk = m_alloc.newInstance<Chunk>();
  71. if(crntChunk)
  72. {
  73. crntChunk->m_next = newChunk;
  74. newChunk->m_size = PtrSize(F32(crntChunk->m_size) * m_scale + F32(m_bias));
  75. }
  76. else
  77. {
  78. newChunk->m_size = m_initialSize;
  79. if(m_chunkListHead == nullptr)
  80. {
  81. m_chunkListHead = newChunk;
  82. }
  83. }
  84. alignRoundUp(m_alignment, newChunk->m_size);
  85. newChunk->m_next = nullptr;
  86. newChunk->m_offset.setNonAtomically(0);
  87. ANKI_CHECK(m_iface->allocate(newChunk->m_size, newChunk->m_mem));
  88. m_crntChunk.store(newChunk);
  89. }
  90. else
  91. {
  92. // Need to recycle one
  93. crntChunk->m_next->m_offset.setNonAtomically(0);
  94. m_crntChunk.store(crntChunk->m_next);
  95. }
  96. }
  97. }
  98. } while(retry);
  99. return Error::NONE;
  100. }
  101. void StackGpuAllocator::reset()
  102. {
  103. m_crntChunk.setNonAtomically(m_chunkListHead);
  104. if(m_chunkListHead)
  105. {
  106. m_chunkListHead->m_offset.setNonAtomically(0);
  107. }
  108. }
  109. } // end namespace anki