2
0

heap.cpp 9.1 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229
  1. // zlib open source license
  2. //
  3. // Copyright (c) 2024 David Forsgren Piuva
  4. //
  5. // This software is provided 'as-is', without any express or implied
  6. // warranty. In no event will the authors be held liable for any damages
  7. // arising from the use of this software.
  8. //
  9. // Permission is granted to anyone to use this software for any purpose,
  10. // including commercial applications, and to alter it and redistribute it
  11. // freely, subject to the following restrictions:
  12. //
  13. // 1. The origin of this software must not be misrepresented; you must not
  14. // claim that you wrote the original software. If you use this software
  15. // in a product, an acknowledgment in the product documentation would be
  16. // appreciated but is not required.
  17. //
  18. // 2. Altered source versions must be plainly marked as such, and must not be
  19. // misrepresented as being the original software.
  20. //
  21. // 3. This notice may not be removed or altered from any source
  22. // distribution.
  23. #include "heap.h"
  24. #include <mutex>
  25. #include <thread>
  26. // TODO: Avoid dynamic memory allocation in error messages from failed memory allocation.
  27. #include "../api/stringAPI.h"
  28. // Get settings from here.
  29. #include "../settings.h"
  30. namespace dsr {
  31. // The framework's maximum memory alignment is either the largest float SIMD vector or the thread safe alignment.
  32. static const uintptr_t heapAlignment = DSR_MAXIMUM_ALIGNMENT;
  33. static const uintptr_t heapAlignmentAndMask = memory_createAlignmentAndMask(heapAlignment);
  34. // Calculates the largest power of two allocation size that does not overflow a pointer on the target platform.
  35. constexpr int calculateBinCount() {
  36. int largestBinIndex = 0;
  37. intptr_t p = 0;
  38. while (true) {
  39. uintptr_t result = ((uintptr_t)1 << p) * heapAlignment;
  40. // Once the value overflows in uintptr_t we have found the index that can not be used, which is also the bin count when including index zero.
  41. if (result == 0) {
  42. return p;
  43. }
  44. p++;
  45. }
  46. }
  47. static const int MAX_BIN_COUNT = calculateBinCount();
  48. static int32_t getBinIndex(uintptr_t minimumSize) {
  49. for (intptr_t p = 0; p < MAX_BIN_COUNT; p++) {
  50. uintptr_t result = ((uintptr_t)1 << p) * heapAlignment;
  51. if (result >= minimumSize) {
  52. return p;
  53. }
  54. }
  55. // Failed to match any bin.
  56. return -1;
  57. }
  58. static const uint16_t heapFlag_recycled = 1 << 0;
  59. struct HeapHeader : public AllocationHeader {
  60. HeapHeader *nextRecycled = nullptr;
  61. //uint64_t useCount;
  62. uint16_t flags = 0;
  63. uint8_t binIndex;
  64. HeapHeader(uintptr_t totalSize, uint8_t binIndex)
  65. : AllocationHeader(totalSize, false), binIndex(binIndex) {}
  66. };
  67. static const uintptr_t heapHeaderPaddedSize = memory_getPaddedSize(sizeof(HeapHeader), heapAlignment);
  68. inline HeapHeader *headerFromAllocation(uint8_t* allocation) {
  69. return (HeapHeader*)(allocation - heapHeaderPaddedSize);
  70. }
  71. inline uint8_t *allocationFromHeader(HeapHeader* header) {
  72. return (uint8_t*)header + heapHeaderPaddedSize;
  73. }
  74. uint64_t heap_getAllocationSize(uint8_t* allocation) {
  75. return headerFromAllocation(allocation)->totalSize - heapHeaderPaddedSize;
  76. }
  77. // A block of memory where heap data can be allocated.
  78. struct HeapMemory {
  79. // TODO: Recycle memory using groups of allocations in power of two bytes.
  80. HeapMemory *prevHeap = nullptr;
  81. uint8_t *top = nullptr; // The start of the arena, where the allocation pointer is when full.
  82. uint8_t *allocationPointer = nullptr; // The allocation pointer that moves from bottom to top when filling the arena.
  83. uint8_t *bottom = nullptr; // The end of the arena, where the allocation pointer is when empty.
  84. HeapMemory(uintptr_t size) {
  85. this->top = (uint8_t*)malloc(size);
  86. this->bottom = this->top + size;
  87. this->allocationPointer = this->bottom;
  88. }
  89. ~HeapMemory() {
  90. if (this->top != nullptr) {
  91. free(this->top);
  92. this->top = nullptr;
  93. }
  94. this->allocationPointer = nullptr;
  95. this->bottom = nullptr;
  96. }
  97. };
  98. // The heap can have memory freed after its own destruction by telling the remaining allocations to clean up after themselves.
  99. struct HeapPool {
  100. std::mutex poolLock;
  101. HeapMemory *lastHeap = nullptr;
  102. HeapHeader *recyclingBin[MAX_BIN_COUNT] = {};
  103. bool terminating = false;
  104. HeapPool() {}
  105. ~HeapPool() {
  106. this->poolLock.lock();
  107. this->terminating = true;
  108. HeapMemory *nextHeap = this->lastHeap;
  109. while (nextHeap != nullptr) {
  110. HeapMemory *currentHeap = nextHeap;
  111. nextHeap = currentHeap->prevHeap;
  112. delete currentHeap;
  113. }
  114. this->poolLock.unlock();
  115. }
  116. };
  117. static UnsafeAllocation tryToAllocate(HeapMemory &heap, uintptr_t paddedSize, uintptr_t alignmentAndMask, uint8_t binIndex) {
  118. UnsafeAllocation result(nullptr, nullptr);
  119. uint8_t *dataPointer = (uint8_t*)(((uintptr_t)(heap.allocationPointer) - paddedSize) & alignmentAndMask);
  120. AllocationHeader *headerPointer = (AllocationHeader*)(dataPointer - heapHeaderPaddedSize);
  121. if ((uint8_t*)headerPointer >= heap.top) {
  122. // There is enough space, so confirm the allocation.
  123. result = UnsafeAllocation(dataPointer, headerPointer);
  124. // Write data to the header.
  125. *headerPointer = HeapHeader((uintptr_t)heap.allocationPointer - (uintptr_t)headerPointer, binIndex);
  126. // Reserve the data in the heap by moving the allocation pointer.
  127. heap.allocationPointer = (uint8_t*)headerPointer;
  128. }
  129. return result;
  130. }
  131. static UnsafeAllocation tryToAllocate(HeapPool &pool, uintptr_t paddedSize, uintptr_t alignmentAndMask, uint8_t binIndex) {
  132. // Start with the most recent heap, which is most likely to have enough space.
  133. HeapMemory *currentHeap = pool.lastHeap;
  134. while (currentHeap != nullptr) {
  135. UnsafeAllocation result = tryToAllocate(*currentHeap, paddedSize, heapAlignmentAndMask, binIndex);
  136. if (result.data != nullptr) {
  137. return result;
  138. }
  139. currentHeap = currentHeap->prevHeap;
  140. }
  141. // If no free space could be found, allocate a new memory block.
  142. uintptr_t allocationSize = 16777216;
  143. uintptr_t usefulSize = paddedSize << 4; // TODO: Handle integer overflow.
  144. if (usefulSize > allocationSize) allocationSize = usefulSize;
  145. // Append to the linked list of memory.
  146. HeapMemory *previousHeap = pool.lastHeap;
  147. pool.lastHeap = new HeapMemory(allocationSize);
  148. pool.lastHeap->prevHeap = previousHeap;
  149. // Make one last attempt at allocating the memory.
  150. return tryToAllocate(*(pool.lastHeap), paddedSize, heapAlignmentAndMask, binIndex);
  151. }
  152. static HeapPool defaultHeap;
  153. UnsafeAllocation heap_allocate(uintptr_t minimumSize) {
  154. int32_t binIndex = getBinIndex(minimumSize);
  155. UnsafeAllocation result(nullptr, nullptr);
  156. if (binIndex == -1) {
  157. // If the requested allocation is so big that there is no power of two that can contain it without overflowing the address space, then it can not be allocated.
  158. throwError(U"Exceeded the maximum size when trying to allocate ", minimumSize, U" bytes in heap_allocate!.\n");
  159. } else {
  160. uintptr_t paddedSize = ((uintptr_t)1 << binIndex) * heapAlignment;
  161. defaultHeap.poolLock.lock();
  162. if (!(defaultHeap.terminating)) {
  163. // Look for pre-existing allocations in the recycling bins.
  164. HeapHeader *binHeader = defaultHeap.recyclingBin[binIndex];
  165. if (binHeader != nullptr) {
  166. // Make the recycled allocation's tail into the new head.
  167. defaultHeap.recyclingBin[binIndex] = binHeader->nextRecycled;
  168. // Mark the allocation as not recycled. (assume that it was recycled when found in the bin)
  169. binHeader->flags &= ~heapFlag_recycled;
  170. result = UnsafeAllocation(allocationFromHeader(binHeader), binHeader);
  171. } else {
  172. // Look for a heap with enough space for a new allocation.
  173. result = tryToAllocate(defaultHeap, paddedSize, heapAlignmentAndMask, binIndex);
  174. if (result.data == nullptr) {
  175. throwError(U"Failed to allocate ", minimumSize, U" bytes of data with heap_allocate!\n");
  176. }
  177. }
  178. }
  179. defaultHeap.poolLock.unlock();
  180. }
  181. return result;
  182. }
  183. void heap_free(uint8_t *allocation) {
  184. defaultHeap.poolLock.lock();
  185. if (!(defaultHeap.terminating)) {
  186. HeapHeader *header = (HeapHeader*)(allocation - heapHeaderPaddedSize);
  187. // Get the recycled allocation's header and its bin index.
  188. HeapHeader *newHeader = headerFromAllocation(allocation);
  189. if (newHeader->flags & heapFlag_recycled) {
  190. throwError(U"A heap allocation was freed twice!\n");
  191. //#ifdef SAFE_POINTER_CHECKS
  192. // TODO: Print more information when possible.
  193. //#endif
  194. } else {
  195. int binIndex = header->binIndex;
  196. if (binIndex >= MAX_BIN_COUNT) {
  197. throwError(U"Out of bound recycling bin index in corrupted head of freed allocation!\n");
  198. } else {
  199. // Make any previous head from the bin into the new tail.
  200. HeapHeader *oldHeader = defaultHeap.recyclingBin[binIndex];
  201. newHeader->nextRecycled = oldHeader;
  202. // Mark the allocation as recycled.
  203. newHeader->flags |= heapFlag_recycled;
  204. // Store the newly recycled allocation in the bin.
  205. defaultHeap.recyclingBin[binIndex] = newHeader;
  206. newHeader->nextRecycled = oldHeader;
  207. }
  208. }
  209. }
  210. defaultHeap.poolLock.unlock();
  211. }
  212. }