StackGpuAllocator.cpp 3.0 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146
  1. // Copyright (C) 2009-2021, Panagiotis Christopoulos Charitos and contributors.
  2. // All rights reserved.
  3. // Code licensed under the BSD License.
  4. // http://www.anki3d.org/LICENSE
  5. #include <anki/gr/utils/StackGpuAllocator.h>
  6. #include <anki/util/ThreadHive.h>
  7. #include <Tests/Framework/Framework.h>
  8. #include <algorithm>
  9. using namespace anki;
  10. namespace
  11. {
  12. const U ALLOCATION_COUNT = 1024;
  13. const U THREAD_COUNT = 4;
  14. const U32 MIN_ALLOCATION_SIZE = 256;
  15. const U32 MAX_ALLOCATION_SIZE = 1024 * 10;
  16. const U32 ALIGNMENT = 256;
  17. class Mem : public StackGpuAllocatorMemory
  18. {
  19. public:
  20. void* m_mem = nullptr;
  21. PtrSize m_size = 0;
  22. };
  23. class Interface final : public StackGpuAllocatorInterface
  24. {
  25. public:
  26. ANKI_USE_RESULT Error allocate(PtrSize size, StackGpuAllocatorMemory*& mem)
  27. {
  28. Mem* m = new Mem();
  29. m->m_mem = mallocAligned(size, ALIGNMENT);
  30. m->m_size = size;
  31. mem = m;
  32. return Error::NONE;
  33. }
  34. void free(StackGpuAllocatorMemory* mem)
  35. {
  36. Mem* m = static_cast<Mem*>(mem);
  37. freeAligned(m->m_mem);
  38. delete m;
  39. }
  40. void getChunkGrowInfo(F32& scale, PtrSize& bias, PtrSize& initialSize)
  41. {
  42. scale = 2.0;
  43. bias = 0;
  44. initialSize = ALIGNMENT * 1024;
  45. }
  46. U32 getMaxAlignment()
  47. {
  48. return ALIGNMENT;
  49. }
  50. };
  51. class Allocation
  52. {
  53. public:
  54. StackGpuAllocatorHandle m_handle;
  55. PtrSize m_size;
  56. };
  57. class TestContext
  58. {
  59. public:
  60. StackGpuAllocator* m_salloc;
  61. Array<Allocation, ALLOCATION_COUNT> m_allocs;
  62. Atomic<U32> m_allocCount;
  63. };
  64. static void doAllocation(void* arg, U32 threadId, ThreadHive& hive, ThreadHiveSemaphore* sem)
  65. {
  66. TestContext* ctx = static_cast<TestContext*>(arg);
  67. U allocCount = ctx->m_allocCount.fetchAdd(1);
  68. PtrSize allocSize = getRandomRange(MIN_ALLOCATION_SIZE, MAX_ALLOCATION_SIZE);
  69. ctx->m_allocs[allocCount].m_size = allocSize;
  70. ANKI_TEST_EXPECT_NO_ERR(ctx->m_salloc->allocate(allocSize, ctx->m_allocs[allocCount].m_handle));
  71. }
  72. } // end anonymous namespace
  73. ANKI_TEST(Gr, StackGpuAllocator)
  74. {
  75. HeapAllocator<U8> alloc(allocAligned, nullptr);
  76. Interface iface;
  77. StackGpuAllocator salloc;
  78. salloc.init(alloc, &iface);
  79. ThreadHive hive(THREAD_COUNT, alloc);
  80. for(U i = 0; i < 1024; ++i)
  81. {
  82. TestContext ctx;
  83. memset(&ctx, 0, sizeof(ctx));
  84. ctx.m_salloc = &salloc;
  85. ThreadHiveTask task;
  86. task.m_callback = doAllocation;
  87. task.m_argument = &ctx;
  88. for(U i = 0; i < ALLOCATION_COUNT; ++i)
  89. {
  90. hive.submitTasks(&task, 1);
  91. }
  92. hive.waitAllTasks();
  93. // Make sure memory doesn't overlap
  94. std::sort(ctx.m_allocs.getBegin(), ctx.m_allocs.getEnd(), [](const Allocation& a, const Allocation& b) {
  95. if(a.m_handle.m_memory != b.m_handle.m_memory)
  96. {
  97. return a.m_handle.m_memory < b.m_handle.m_memory;
  98. }
  99. if(a.m_handle.m_offset != b.m_handle.m_offset)
  100. {
  101. return a.m_handle.m_offset <= b.m_handle.m_offset;
  102. }
  103. ANKI_TEST_EXPECT_EQ(1, 0);
  104. return true;
  105. });
  106. for(U i = 1; i < ALLOCATION_COUNT; ++i)
  107. {
  108. const Allocation& a = ctx.m_allocs[i - 1];
  109. const Allocation& b = ctx.m_allocs[i];
  110. if(a.m_handle.m_memory == b.m_handle.m_memory)
  111. {
  112. ANKI_TEST_EXPECT_LEQ(a.m_handle.m_offset + a.m_size, b.m_handle.m_offset);
  113. }
  114. }
  115. salloc.reset();
  116. }
  117. }