StackGpuAllocator.cpp 3.0 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145
  1. // Copyright (C) 2009-2021, Panagiotis Christopoulos Charitos and contributors.
  2. // All rights reserved.
  3. // Code licensed under the BSD License.
  4. // http://www.anki3d.org/LICENSE
  5. #include <AnKi/Gr/Utils/StackGpuAllocator.h>
  6. #include <AnKi/Util/ThreadHive.h>
  7. #include <Tests/Framework/Framework.h>
  8. #include <algorithm>
  9. using namespace anki;
  10. namespace {
  11. const U ALLOCATION_COUNT = 1024;
  12. const U THREAD_COUNT = 4;
  13. const U32 MIN_ALLOCATION_SIZE = 256;
  14. const U32 MAX_ALLOCATION_SIZE = 1024 * 10;
  15. const U32 ALIGNMENT = 256;
  16. class Mem : public StackGpuAllocatorMemory
  17. {
  18. public:
  19. void* m_mem = nullptr;
  20. PtrSize m_size = 0;
  21. };
  22. class Interface final : public StackGpuAllocatorInterface
  23. {
  24. public:
  25. ANKI_USE_RESULT Error allocate(PtrSize size, StackGpuAllocatorMemory*& mem)
  26. {
  27. Mem* m = new Mem();
  28. m->m_mem = mallocAligned(size, ALIGNMENT);
  29. m->m_size = size;
  30. mem = m;
  31. return Error::NONE;
  32. }
  33. void free(StackGpuAllocatorMemory* mem)
  34. {
  35. Mem* m = static_cast<Mem*>(mem);
  36. freeAligned(m->m_mem);
  37. delete m;
  38. }
  39. void getChunkGrowInfo(F32& scale, PtrSize& bias, PtrSize& initialSize)
  40. {
  41. scale = 2.0;
  42. bias = 0;
  43. initialSize = ALIGNMENT * 1024;
  44. }
  45. U32 getMaxAlignment()
  46. {
  47. return ALIGNMENT;
  48. }
  49. };
  50. class Allocation
  51. {
  52. public:
  53. StackGpuAllocatorHandle m_handle;
  54. PtrSize m_size;
  55. };
  56. class TestContext
  57. {
  58. public:
  59. StackGpuAllocator* m_salloc;
  60. Array<Allocation, ALLOCATION_COUNT> m_allocs;
  61. Atomic<U32> m_allocCount;
  62. };
  63. static void doAllocation(void* arg, U32 threadId, ThreadHive& hive, ThreadHiveSemaphore* sem)
  64. {
  65. TestContext* ctx = static_cast<TestContext*>(arg);
  66. U allocCount = ctx->m_allocCount.fetchAdd(1);
  67. PtrSize allocSize = getRandomRange(MIN_ALLOCATION_SIZE, MAX_ALLOCATION_SIZE);
  68. ctx->m_allocs[allocCount].m_size = allocSize;
  69. ANKI_TEST_EXPECT_NO_ERR(ctx->m_salloc->allocate(allocSize, ctx->m_allocs[allocCount].m_handle));
  70. }
  71. } // end anonymous namespace
  72. ANKI_TEST(Gr, StackGpuAllocator)
  73. {
  74. HeapAllocator<U8> alloc(allocAligned, nullptr);
  75. Interface iface;
  76. StackGpuAllocator salloc;
  77. salloc.init(alloc, &iface);
  78. ThreadHive hive(THREAD_COUNT, alloc);
  79. for(U i = 0; i < 1024; ++i)
  80. {
  81. TestContext ctx;
  82. memset(&ctx, 0, sizeof(ctx));
  83. ctx.m_salloc = &salloc;
  84. ThreadHiveTask task;
  85. task.m_callback = doAllocation;
  86. task.m_argument = &ctx;
  87. for(U i = 0; i < ALLOCATION_COUNT; ++i)
  88. {
  89. hive.submitTasks(&task, 1);
  90. }
  91. hive.waitAllTasks();
  92. // Make sure memory doesn't overlap
  93. std::sort(ctx.m_allocs.getBegin(), ctx.m_allocs.getEnd(), [](const Allocation& a, const Allocation& b) {
  94. if(a.m_handle.m_memory != b.m_handle.m_memory)
  95. {
  96. return a.m_handle.m_memory < b.m_handle.m_memory;
  97. }
  98. if(a.m_handle.m_offset != b.m_handle.m_offset)
  99. {
  100. return a.m_handle.m_offset <= b.m_handle.m_offset;
  101. }
  102. ANKI_TEST_EXPECT_EQ(1, 0);
  103. return true;
  104. });
  105. for(U i = 1; i < ALLOCATION_COUNT; ++i)
  106. {
  107. const Allocation& a = ctx.m_allocs[i - 1];
  108. const Allocation& b = ctx.m_allocs[i];
  109. if(a.m_handle.m_memory == b.m_handle.m_memory)
  110. {
  111. ANKI_TEST_EXPECT_LEQ(a.m_handle.m_offset + a.m_size, b.m_handle.m_offset);
  112. }
  113. }
  114. salloc.reset();
  115. }
  116. }