TransferGpuAllocator.cpp 4.5 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194
  1. // Copyright (C) 2009-2021, Panagiotis Christopoulos Charitos and contributors.
  2. // All rights reserved.
  3. // Code licensed under the BSD License.
  4. // http://www.anki3d.org/LICENSE
  5. #include <AnKi/Resource/TransferGpuAllocator.h>
  6. #include <AnKi/Gr/Fence.h>
  7. #include <AnKi/Gr/Buffer.h>
  8. #include <AnKi/Gr/GrManager.h>
  9. #include <AnKi/Util/Tracer.h>
  10. namespace anki
  11. {
  12. class TransferGpuAllocator::Memory : public StackGpuAllocatorMemory
  13. {
  14. public:
  15. BufferPtr m_buffer;
  16. void* m_mappedMemory;
  17. };
  18. class TransferGpuAllocator::Interface : public StackGpuAllocatorInterface
  19. {
  20. public:
  21. GrManager* m_gr;
  22. ResourceAllocator<U8> m_alloc;
  23. ResourceAllocator<U8> getAllocator() const
  24. {
  25. return m_alloc;
  26. }
  27. ANKI_USE_RESULT Error allocate(PtrSize size, StackGpuAllocatorMemory*& mem) final
  28. {
  29. TransferGpuAllocator::Memory* mm = m_alloc.newInstance<TransferGpuAllocator::Memory>();
  30. mm->m_buffer = m_gr->newBuffer(
  31. BufferInitInfo(size, BufferUsageBit::TRANSFER_SOURCE, BufferMapAccessBit::WRITE, "Transfer"));
  32. mm->m_mappedMemory = mm->m_buffer->map(0, size, BufferMapAccessBit::WRITE);
  33. mem = mm;
  34. return Error::NONE;
  35. }
  36. void free(StackGpuAllocatorMemory* mem) final
  37. {
  38. ANKI_ASSERT(mem);
  39. TransferGpuAllocator::Memory* mm = static_cast<TransferGpuAllocator::Memory*>(mem);
  40. if(mm->m_mappedMemory)
  41. {
  42. mm->m_buffer->unmap();
  43. }
  44. m_alloc.deleteInstance(mm);
  45. }
  46. void getChunkGrowInfo(F32& scale, PtrSize& bias, PtrSize& initialSize) final
  47. {
  48. scale = 1.5;
  49. bias = 0;
  50. initialSize = TransferGpuAllocator::CHUNK_INITIAL_SIZE;
  51. }
  52. U32 getMaxAlignment() final
  53. {
  54. return TransferGpuAllocator::GPU_BUFFER_ALIGNMENT;
  55. }
  56. };
  57. BufferPtr TransferGpuAllocatorHandle::getBuffer() const
  58. {
  59. ANKI_ASSERT(m_handle.m_memory);
  60. const TransferGpuAllocator::Memory* mm = static_cast<const TransferGpuAllocator::Memory*>(m_handle.m_memory);
  61. ANKI_ASSERT(mm->m_buffer);
  62. return mm->m_buffer;
  63. }
  64. void* TransferGpuAllocatorHandle::getMappedMemory() const
  65. {
  66. ANKI_ASSERT(m_handle.m_memory);
  67. const TransferGpuAllocator::Memory* mm = static_cast<const TransferGpuAllocator::Memory*>(m_handle.m_memory);
  68. ANKI_ASSERT(mm->m_mappedMemory);
  69. return static_cast<U8*>(mm->m_mappedMemory) + m_handle.m_offset;
  70. }
  71. TransferGpuAllocator::TransferGpuAllocator()
  72. {
  73. }
  74. TransferGpuAllocator::~TransferGpuAllocator()
  75. {
  76. for(Frame& frame : m_frames)
  77. {
  78. ANKI_ASSERT(frame.m_pendingReleases == 0);
  79. frame.m_fences.destroy(m_alloc);
  80. }
  81. }
  82. Error TransferGpuAllocator::init(PtrSize maxSize, GrManager* gr, ResourceAllocator<U8> alloc)
  83. {
  84. m_alloc = alloc;
  85. m_gr = gr;
  86. m_maxAllocSize = getAlignedRoundUp(CHUNK_INITIAL_SIZE * FRAME_COUNT, maxSize);
  87. ANKI_RESOURCE_LOGI("Will use %luMB of memory for transfer scratch", m_maxAllocSize / 1024 / 1024);
  88. m_interface.reset(m_alloc.newInstance<Interface>());
  89. m_interface->m_gr = gr;
  90. m_interface->m_alloc = alloc;
  91. for(Frame& frame : m_frames)
  92. {
  93. frame.m_stackAlloc.init(m_alloc, m_interface.get());
  94. }
  95. return Error::NONE;
  96. }
  97. Error TransferGpuAllocator::allocate(PtrSize size, TransferGpuAllocatorHandle& handle)
  98. {
  99. ANKI_TRACE_SCOPED_EVENT(RSRC_ALLOCATE_TRANSFER);
  100. const PtrSize frameSize = m_maxAllocSize / FRAME_COUNT;
  101. LockGuard<Mutex> lock(m_mtx);
  102. Frame* frame;
  103. if(m_crntFrameAllocatedSize + size <= frameSize)
  104. {
  105. // Have enough space in the frame
  106. frame = &m_frames[m_frameCount];
  107. }
  108. else
  109. {
  110. // Don't have enough space. Wait for next frame
  111. m_frameCount = U8((m_frameCount + 1) % FRAME_COUNT);
  112. Frame& nextFrame = m_frames[m_frameCount];
  113. // Wait for all memory to be released
  114. while(nextFrame.m_pendingReleases != 0)
  115. {
  116. m_condVar.wait(m_mtx);
  117. }
  118. // Wait all fences
  119. while(!nextFrame.m_fences.isEmpty())
  120. {
  121. FencePtr fence = nextFrame.m_fences.getFront();
  122. const Bool done = fence->clientWait(MAX_FENCE_WAIT_TIME);
  123. if(done)
  124. {
  125. nextFrame.m_fences.popFront(m_alloc);
  126. }
  127. }
  128. nextFrame.m_stackAlloc.reset();
  129. m_crntFrameAllocatedSize = 0;
  130. frame = &nextFrame;
  131. }
  132. ANKI_CHECK(frame->m_stackAlloc.allocate(size, handle.m_handle));
  133. handle.m_range = size;
  134. handle.m_frame = U8(frame - &m_frames[0]);
  135. m_crntFrameAllocatedSize += size;
  136. ++frame->m_pendingReleases;
  137. return Error::NONE;
  138. }
  139. void TransferGpuAllocator::release(TransferGpuAllocatorHandle& handle, FencePtr fence)
  140. {
  141. ANKI_ASSERT(fence);
  142. ANKI_ASSERT(handle.valid());
  143. Frame& frame = m_frames[handle.m_frame];
  144. {
  145. LockGuard<Mutex> lock(m_mtx);
  146. frame.m_fences.pushBack(m_alloc, fence);
  147. ANKI_ASSERT(frame.m_pendingReleases > 0);
  148. --frame.m_pendingReleases;
  149. m_condVar.notifyOne();
  150. }
  151. handle.invalidate();
  152. }
  153. } // end namespace anki