TransferGpuAllocator.cpp 4.5 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193
  1. // Copyright (C) 2009-2021, Panagiotis Christopoulos Charitos and contributors.
  2. // All rights reserved.
  3. // Code licensed under the BSD License.
  4. // http://www.anki3d.org/LICENSE
  5. #include <AnKi/Resource/TransferGpuAllocator.h>
  6. #include <AnKi/Gr/Fence.h>
  7. #include <AnKi/Gr/Buffer.h>
  8. #include <AnKi/Gr/GrManager.h>
  9. #include <AnKi/Util/Tracer.h>
  10. namespace anki {
  11. class TransferGpuAllocator::Memory : public StackGpuAllocatorMemory
  12. {
  13. public:
  14. BufferPtr m_buffer;
  15. void* m_mappedMemory;
  16. };
  17. class TransferGpuAllocator::Interface : public StackGpuAllocatorInterface
  18. {
  19. public:
  20. GrManager* m_gr;
  21. ResourceAllocator<U8> m_alloc;
  22. ResourceAllocator<U8> getAllocator() const
  23. {
  24. return m_alloc;
  25. }
  26. ANKI_USE_RESULT Error allocate(PtrSize size, StackGpuAllocatorMemory*& mem) final
  27. {
  28. TransferGpuAllocator::Memory* mm = m_alloc.newInstance<TransferGpuAllocator::Memory>();
  29. mm->m_buffer = m_gr->newBuffer(
  30. BufferInitInfo(size, BufferUsageBit::TRANSFER_SOURCE, BufferMapAccessBit::WRITE, "Transfer"));
  31. mm->m_mappedMemory = mm->m_buffer->map(0, size, BufferMapAccessBit::WRITE);
  32. mem = mm;
  33. return Error::NONE;
  34. }
  35. void free(StackGpuAllocatorMemory* mem) final
  36. {
  37. ANKI_ASSERT(mem);
  38. TransferGpuAllocator::Memory* mm = static_cast<TransferGpuAllocator::Memory*>(mem);
  39. if(mm->m_mappedMemory)
  40. {
  41. mm->m_buffer->unmap();
  42. }
  43. m_alloc.deleteInstance(mm);
  44. }
  45. void getChunkGrowInfo(F32& scale, PtrSize& bias, PtrSize& initialSize) final
  46. {
  47. scale = 1.5;
  48. bias = 0;
  49. initialSize = TransferGpuAllocator::CHUNK_INITIAL_SIZE;
  50. }
  51. U32 getMaxAlignment() final
  52. {
  53. return TransferGpuAllocator::GPU_BUFFER_ALIGNMENT;
  54. }
  55. };
  56. BufferPtr TransferGpuAllocatorHandle::getBuffer() const
  57. {
  58. ANKI_ASSERT(m_handle.m_memory);
  59. const TransferGpuAllocator::Memory* mm = static_cast<const TransferGpuAllocator::Memory*>(m_handle.m_memory);
  60. ANKI_ASSERT(mm->m_buffer);
  61. return mm->m_buffer;
  62. }
  63. void* TransferGpuAllocatorHandle::getMappedMemory() const
  64. {
  65. ANKI_ASSERT(m_handle.m_memory);
  66. const TransferGpuAllocator::Memory* mm = static_cast<const TransferGpuAllocator::Memory*>(m_handle.m_memory);
  67. ANKI_ASSERT(mm->m_mappedMemory);
  68. return static_cast<U8*>(mm->m_mappedMemory) + m_handle.m_offset;
  69. }
  70. TransferGpuAllocator::TransferGpuAllocator()
  71. {
  72. }
  73. TransferGpuAllocator::~TransferGpuAllocator()
  74. {
  75. for(Frame& frame : m_frames)
  76. {
  77. ANKI_ASSERT(frame.m_pendingReleases == 0);
  78. frame.m_fences.destroy(m_alloc);
  79. }
  80. }
  81. Error TransferGpuAllocator::init(PtrSize maxSize, GrManager* gr, ResourceAllocator<U8> alloc)
  82. {
  83. m_alloc = alloc;
  84. m_gr = gr;
  85. m_maxAllocSize = getAlignedRoundUp(CHUNK_INITIAL_SIZE * FRAME_COUNT, maxSize);
  86. ANKI_RESOURCE_LOGI("Will use %luMB of memory for transfer scratch", m_maxAllocSize / 1024 / 1024);
  87. m_interface.reset(m_alloc.newInstance<Interface>());
  88. m_interface->m_gr = gr;
  89. m_interface->m_alloc = alloc;
  90. for(Frame& frame : m_frames)
  91. {
  92. frame.m_stackAlloc.init(m_alloc, m_interface.get());
  93. }
  94. return Error::NONE;
  95. }
  96. Error TransferGpuAllocator::allocate(PtrSize size, TransferGpuAllocatorHandle& handle)
  97. {
  98. ANKI_TRACE_SCOPED_EVENT(RSRC_ALLOCATE_TRANSFER);
  99. const PtrSize frameSize = m_maxAllocSize / FRAME_COUNT;
  100. LockGuard<Mutex> lock(m_mtx);
  101. Frame* frame;
  102. if(m_crntFrameAllocatedSize + size <= frameSize)
  103. {
  104. // Have enough space in the frame
  105. frame = &m_frames[m_frameCount];
  106. }
  107. else
  108. {
  109. // Don't have enough space. Wait for next frame
  110. m_frameCount = U8((m_frameCount + 1) % FRAME_COUNT);
  111. Frame& nextFrame = m_frames[m_frameCount];
  112. // Wait for all memory to be released
  113. while(nextFrame.m_pendingReleases != 0)
  114. {
  115. m_condVar.wait(m_mtx);
  116. }
  117. // Wait all fences
  118. while(!nextFrame.m_fences.isEmpty())
  119. {
  120. FencePtr fence = nextFrame.m_fences.getFront();
  121. const Bool done = fence->clientWait(MAX_FENCE_WAIT_TIME);
  122. if(done)
  123. {
  124. nextFrame.m_fences.popFront(m_alloc);
  125. }
  126. }
  127. nextFrame.m_stackAlloc.reset();
  128. m_crntFrameAllocatedSize = 0;
  129. frame = &nextFrame;
  130. }
  131. ANKI_CHECK(frame->m_stackAlloc.allocate(size, handle.m_handle));
  132. handle.m_range = size;
  133. handle.m_frame = U8(frame - &m_frames[0]);
  134. m_crntFrameAllocatedSize += size;
  135. ++frame->m_pendingReleases;
  136. return Error::NONE;
  137. }
  138. void TransferGpuAllocator::release(TransferGpuAllocatorHandle& handle, FencePtr fence)
  139. {
  140. ANKI_ASSERT(fence);
  141. ANKI_ASSERT(handle.valid());
  142. Frame& frame = m_frames[handle.m_frame];
  143. {
  144. LockGuard<Mutex> lock(m_mtx);
  145. frame.m_fences.pushBack(m_alloc, fence);
  146. ANKI_ASSERT(frame.m_pendingReleases > 0);
  147. --frame.m_pendingReleases;
  148. m_condVar.notifyOne();
  149. }
  150. handle.invalidate();
  151. }
  152. } // end namespace anki