TransferGpuAllocator.h 4.4 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213
  1. // Copyright (C) 2009-present, Panagiotis Christopoulos Charitos and contributors.
  2. // All rights reserved.
  3. // Code licensed under the BSD License.
  4. // http://www.anki3d.org/LICENSE
  5. #pragma once
  6. #include <AnKi/Resource/Common.h>
  7. #include <AnKi/Util/StackAllocatorBuilder.h>
  8. #include <AnKi/Util/List.h>
  9. #include <AnKi/Gr/Buffer.h>
  10. namespace anki {
  11. /// @addtogroup resource
  12. /// @{
  13. /// Memory handle.
  14. class TransferGpuAllocatorHandle
  15. {
  16. friend class TransferGpuAllocator;
  17. public:
  18. TransferGpuAllocatorHandle()
  19. {
  20. }
  21. TransferGpuAllocatorHandle(const TransferGpuAllocatorHandle&) = delete;
  22. TransferGpuAllocatorHandle(TransferGpuAllocatorHandle&& b)
  23. {
  24. *this = std::move(b);
  25. }
  26. ~TransferGpuAllocatorHandle()
  27. {
  28. ANKI_ASSERT(!valid() && "Forgot to release");
  29. }
  30. TransferGpuAllocatorHandle& operator=(TransferGpuAllocatorHandle&& b)
  31. {
  32. m_buffer = b.m_buffer;
  33. m_mappedMemory = b.m_mappedMemory;
  34. m_offsetInBuffer = b.m_offsetInBuffer;
  35. m_range = b.m_range;
  36. m_pool = b.m_pool;
  37. b.invalidate();
  38. return *this;
  39. }
  40. operator BufferView() const
  41. {
  42. return {m_buffer.get(), m_offsetInBuffer, m_range};
  43. }
  44. Buffer& getBuffer() const
  45. {
  46. return *m_buffer;
  47. }
  48. void* getMappedMemory() const
  49. {
  50. ANKI_ASSERT(m_mappedMemory);
  51. return m_mappedMemory;
  52. }
  53. PtrSize getOffset() const
  54. {
  55. ANKI_ASSERT(m_offsetInBuffer != kMaxPtrSize);
  56. return m_offsetInBuffer;
  57. }
  58. PtrSize getRange() const
  59. {
  60. ANKI_ASSERT(m_range != 0);
  61. return m_range;
  62. }
  63. private:
  64. BufferPtr m_buffer;
  65. void* m_mappedMemory = nullptr;
  66. PtrSize m_offsetInBuffer = kMaxPtrSize;
  67. PtrSize m_range = 0;
  68. U8 m_pool = kMaxU8;
  69. Bool valid() const
  70. {
  71. return m_range != 0 && m_pool < kMaxU8;
  72. }
  73. void invalidate()
  74. {
  75. m_buffer.reset(nullptr);
  76. m_mappedMemory = nullptr;
  77. m_offsetInBuffer = kMaxPtrSize;
  78. m_range = 0;
  79. m_pool = kMaxU8;
  80. }
  81. };
  82. /// GPU memory allocator for GPU buffers used in transfer operations.
  83. class TransferGpuAllocator : public MakeSingleton<TransferGpuAllocator>
  84. {
  85. friend class TransferGpuAllocatorHandle;
  86. public:
  87. /// Choose an alignment that satisfies 16 bytes and 3 bytes. RGB8 formats require 3 bytes alignment for the source
  88. /// of the buffer to image copies.
  89. static constexpr U32 kGpuBufferAlignment = 16 * 3;
  90. static constexpr U32 kPoolCount = 2;
  91. static constexpr PtrSize kChunkInitialSize = 64_MB;
  92. static constexpr Second kMaxFenceWaitTime = 500.0_ms;
  93. TransferGpuAllocator();
  94. ~TransferGpuAllocator();
  95. Error init(PtrSize maxSize);
  96. /// Allocate some transfer memory. If there is not enough memory it will block until some is releaced. It's
  97. /// threadsafe.
  98. Error allocate(PtrSize size, TransferGpuAllocatorHandle& handle);
  99. /// Release the memory. It will not be recycled before the fence is signaled. It's threadsafe.
  100. void release(TransferGpuAllocatorHandle& handle, FencePtr fence);
  101. private:
  102. /// This is the chunk the StackAllocatorBuilder will be allocating.
  103. class Chunk
  104. {
  105. public:
  106. /// Required by StackAllocatorBuilder.
  107. Chunk* m_nextChunk;
  108. /// Required by StackAllocatorBuilder.
  109. Atomic<PtrSize> m_offsetInChunk;
  110. /// Required by StackAllocatorBuilder.
  111. PtrSize m_chunkSize;
  112. /// The GPU memory.
  113. BufferPtr m_buffer;
  114. /// Points to the mapped m_buffer.
  115. void* m_mappedBuffer;
  116. };
  117. /// Implements the StackAllocatorBuilder TInterface
  118. class StackAllocatorBuilderInterface
  119. {
  120. public:
  121. // The rest of the functions implement the StackAllocatorBuilder TInterface.
  122. static constexpr PtrSize getInitialChunkSize()
  123. {
  124. return kChunkInitialSize;
  125. }
  126. static constexpr F64 getNextChunkGrowScale()
  127. {
  128. return 1.0;
  129. }
  130. static constexpr PtrSize getNextChunkGrowBias()
  131. {
  132. return 0;
  133. }
  134. static constexpr Bool ignoreDeallocationErrors()
  135. {
  136. return false;
  137. }
  138. static constexpr PtrSize getMaxChunkSize()
  139. {
  140. return kChunkInitialSize;
  141. }
  142. Error allocateChunk(PtrSize size, Chunk*& out);
  143. void freeChunk(Chunk* chunk);
  144. void recycleChunk([[maybe_unused]] Chunk& chunk)
  145. {
  146. // Do nothing
  147. }
  148. constexpr Atomic<U32>* getAllocationCount()
  149. {
  150. return nullptr;
  151. }
  152. };
  153. class Pool
  154. {
  155. public:
  156. StackAllocatorBuilder<Chunk, StackAllocatorBuilderInterface, DummyMutex> m_stackAlloc;
  157. ResourceList<FencePtr> m_fences;
  158. U32 m_pendingReleases = 0;
  159. };
  160. PtrSize m_maxAllocSize = 0;
  161. Mutex m_mtx; ///< Protect all members bellow.
  162. ConditionVariable m_condVar;
  163. Array<Pool, kPoolCount> m_pools;
  164. U8 m_crntPool = 0;
  165. PtrSize m_crntPoolAllocatedSize = 0;
  166. };
  167. /// @}
  168. } // end namespace anki