heap.cpp 20 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508
  1. // zlib open source license
  2. //
  3. // Copyright (c) 2024 to 2025 David Forsgren Piuva
  4. //
  5. // This software is provided 'as-is', without any express or implied
  6. // warranty. In no event will the authors be held liable for any damages
  7. // arising from the use of this software.
  8. //
  9. // Permission is granted to anyone to use this software for any purpose,
  10. // including commercial applications, and to alter it and redistribute it
  11. // freely, subject to the following restrictions:
  12. //
  13. // 1. The origin of this software must not be misrepresented; you must not
  14. // claim that you wrote the original software. If you use this software
  15. // in a product, an acknowledgment in the product documentation would be
  16. // appreciated but is not required.
  17. //
  18. // 2. Altered source versions must be plainly marked as such, and must not be
  19. // misrepresented as being the original software.
  20. //
  21. // 3. This notice may not be removed or altered from any source
  22. // distribution.
  23. #include "heap.h"
  24. #include "../api/stringAPI.h"
  25. #include <mutex>
  26. #include <thread>
  27. #include <stdio.h>
  28. #include <new>
  29. // Get settings from here.
  30. #include "../settings.h"
  31. namespace dsr {
  32. using HeapFlag = uint16_t;
  33. using BinIndex = uint16_t;
  34. // The framework's maximum memory alignment is either the largest float SIMD vector or the thread safe alignment.
  35. static const uintptr_t heapAlignment = DSR_MAXIMUM_ALIGNMENT;
  36. static const uintptr_t heapAlignmentAndMask = memory_createAlignmentAndMask(heapAlignment);
  37. // Because locking is recursive, it is safest to just have one global mutex for allocating, freeing and manipulating use counters.
  38. // Otherwise each use counter would need to store the thread identity and recursive depth for each heap allocation.
  39. static thread_local intptr_t lockDepth = 0;
  40. std::mutex memoryLock;
  41. // The free function is hidden, because all allocations are forced to use reference counting,
  42. // so that a hard exit can disable recursive calls to heap_free by incrementing all reference counters.
  43. static void heap_free(void * const allocation);
  44. // Calculates the largest power of two allocation size that does not overflow a pointer on the target platform.
  45. constexpr int calculateBinCount() {
  46. intptr_t p = 0;
  47. while (true) {
  48. uintptr_t result = ((uintptr_t)1 << p) * heapAlignment;
  49. // Once the value overflows in uintptr_t we have found the index that can not be used, which is also the bin count when including index zero.
  50. if (result == 0) {
  51. return p;
  52. }
  53. p++;
  54. }
  55. }
  56. // TODO: Leave a few bins empty in the beginning until reaching heapAlignment from a minimum alignment.
  57. static const int LOWEST_BIN_INDEX = 0;
  58. static const int MAX_BIN_COUNT = calculateBinCount();
  59. inline uintptr_t getBinSize(BinIndex index) {
  60. return (((uintptr_t)1) << ((uintptr_t)index)) * heapAlignment;
  61. }
  62. static BinIndex getBinIndex(uintptr_t minimumSize) {
  63. for (intptr_t p = 0; p < MAX_BIN_COUNT; p++) {
  64. uintptr_t result = getBinSize(p);
  65. if (result >= minimumSize) {
  66. //printf("getBinIndex %i from %i\n", (int)p, (int)minimumSize);
  67. return p;
  68. }
  69. }
  70. // Failed to match any bin.
  71. return -1;
  72. }
  73. static const HeapFlag heapFlag_recycled = 1 << 0;
  74. struct HeapHeader : public AllocationHeader {
  75. // Because nextRecycled and usedSize have mutually exclusive lifetimes, they can share memory location.
  76. union {
  77. // When allocated
  78. uintptr_t usedSize; // The actual size requested.
  79. // When recycled
  80. HeapHeader *nextRecycled = nullptr;
  81. };
  82. HeapDestructor destructor;
  83. uintptr_t useCount = 0; // How many handles that point to the data.
  84. // TODO: Allow placing a pointer to a singleton in another heap allocation, which will simply be freed using heap_free when the owner is freed.
  85. // This allow accessing any amount of additional information shared between all handles to the same payload.
  86. // Useful when you in hindsight realize that you need more information attached to something that is already created and shared, like a value allocated image needing a texture.
  87. // Check if it already exists. If it does not exist, lock the allocation mutex and check again if it still does not exist, before allocating the singleton.
  88. // Then there will only be a mutex overhead for accessing the singleton when accessed for the first time.
  89. // Once created, it can not go away until the allocation that knows about it is gone.
  90. // If using the reference counting, the singleton could also be reused between different allocations.
  91. // void *singleton = nullptr;
  92. // TODO: Allow the caller to access custom bit flags in allocations.
  93. // uint32_t userFlags = 0;
  94. HeapFlag flags = 0; // Flags use the heapFlag_ prefix.
  95. BinIndex binIndex = 0; // Recycling bin index to use when freeing the allocation.
  96. HeapHeader(uintptr_t totalSize)
  97. : AllocationHeader(totalSize, false, "Nameless heap allocation") {}
  98. inline uintptr_t getAllocationSize() {
  99. return getBinSize(this->binIndex);
  100. }
  101. inline uintptr_t getUsedSize() {
  102. if (this->isRecycled()) {
  103. return 0;
  104. } else {
  105. return this->usedSize;
  106. }
  107. }
  108. inline uintptr_t setUsedSize(uintptr_t size) {
  109. //printf("setUsedSize: try %i\n", (int)size);
  110. //printf(" MAX_BIN_COUNT: %i\n", (int)MAX_BIN_COUNT);
  111. if (!(this->isRecycled())) {
  112. uintptr_t allocationSize = getAllocationSize();
  113. //printf(" binIndex: %i\n", (int)this->binIndex);
  114. //printf(" allocationSize: %i\n", (int)allocationSize);
  115. if (size > allocationSize) {
  116. //printf(" too big!\n");
  117. size = allocationSize;
  118. }
  119. this->usedSize = size;
  120. //printf(" assigned size: %i\n", (int)this->usedSize);
  121. return size;
  122. } else {
  123. return 0;
  124. }
  125. }
  126. inline bool isRecycled() const {
  127. return (this->flags & heapFlag_recycled) != 0;
  128. }
  129. inline void makeRecycled() {
  130. this->flags |= heapFlag_recycled;
  131. }
  132. inline void makeUsed() {
  133. this->flags &= ~heapFlag_recycled;
  134. }
  135. };
  136. static const uintptr_t heapHeaderPaddedSize = memory_getPaddedSize(sizeof(HeapHeader), heapAlignment);
  137. AllocationHeader *heap_getHeader(void * const allocation) {
  138. return (AllocationHeader*)((uint8_t*)allocation - heapHeaderPaddedSize);
  139. }
  140. inline HeapHeader *headerFromAllocation(void const * const allocation) {
  141. return (HeapHeader *)((uint8_t*)allocation - heapHeaderPaddedSize);
  142. }
  143. inline void *allocationFromHeader(HeapHeader * const header) {
  144. return ((uint8_t*)header) + heapHeaderPaddedSize;
  145. }
  146. inline void const *allocationFromHeader(HeapHeader const * const header) {
  147. return ((uint8_t const *)header) + heapHeaderPaddedSize;
  148. }
  149. #ifdef SAFE_POINTER_CHECKS
  150. void heap_setAllocationName(void * const allocation, const char *name) {
  151. if (allocation != nullptr) {
  152. HeapHeader *header = headerFromAllocation(allocation);
  153. header->name = name;
  154. }
  155. }
  156. const char * heap_getAllocationName(void * const allocation) {
  157. if (allocation == nullptr) {
  158. return "none";
  159. } else {
  160. HeapHeader *header = headerFromAllocation(allocation);
  161. return header->name;
  162. }
  163. }
  164. uintptr_t heap_getPaddedSize(void const * const allocation) {
  165. if (allocation == nullptr) {
  166. return 0;
  167. } else {
  168. HeapHeader *header = headerFromAllocation(allocation);
  169. return memory_getPaddedSize_usingAndMask(header->getUsedSize(), heapAlignmentAndMask);
  170. }
  171. }
  172. #endif
  173. uintptr_t heap_getAllocationSize(void const * const allocation) {
  174. HeapHeader *header = headerFromAllocation(allocation);
  175. return getBinSize(header->binIndex);
  176. }
  177. uintptr_t heap_getUsedSize(void const * const allocation) {
  178. uintptr_t result = 0;
  179. if (allocation != nullptr) {
  180. HeapHeader *header = headerFromAllocation(allocation);
  181. result = header->getUsedSize();
  182. //printf(" heap_getUsedSize: get %i\n", (int)result);
  183. }
  184. return result;
  185. }
  186. uintptr_t heap_setUsedSize(void * const allocation, uintptr_t size) {
  187. uintptr_t result = 0;
  188. if (allocation != nullptr) {
  189. //uintptr_t allocationSize = heap_getAllocationSize(allocation);
  190. HeapHeader *header = headerFromAllocation(allocation);
  191. result = header->setUsedSize(size);
  192. //printf(" heap_setUsedSize: try %i get %i\n", (int)size, (int)result);
  193. }
  194. return result;
  195. }
  196. void heap_increaseUseCount(void const * const allocation) {
  197. if (allocation != nullptr) {
  198. HeapHeader *header = headerFromAllocation(allocation);
  199. if (lockDepth == 0) memoryLock.lock();
  200. //printf("heap_increaseUseCount called for allocation @ %ld\n", (uintptr_t)allocation);
  201. //printf(" Use count: %ld -> %ld\n", header->useCount, header->useCount + 1);
  202. //#ifdef SAFE_POINTER_CHECKS
  203. // printf(" ID: %lu\n", header->allocationIdentity);
  204. // printf(" Name: %s\n", header->name ? header->name : "<nameless>");
  205. //#endif
  206. header->useCount++;
  207. if (lockDepth == 0) memoryLock.unlock();
  208. }
  209. }
  210. void heap_decreaseUseCount(void const * const allocation) {
  211. if (allocation != nullptr) {
  212. HeapHeader *header = headerFromAllocation(allocation);
  213. if (lockDepth == 0) memoryLock.lock();
  214. //printf("heap_decreaseUseCount called for allocation @ %ld\n", (uintptr_t)allocation);
  215. //printf(" Use count: %ld -> %ld\n", header->useCount, header->useCount - 1);
  216. //#ifdef SAFE_POINTER_CHECKS
  217. // printf(" ID: %lu\n", header->allocationIdentity);
  218. // printf(" Name: %s\n", header->name ? header->name : "<nameless>");
  219. //#endif
  220. if (header->useCount == 0) {
  221. throwError(U"Heap error: Decreasing a count that is already zero!\n");
  222. } else {
  223. header->useCount--;
  224. if (header->useCount == 0) {
  225. lockDepth++;
  226. heap_free((void*)allocation);
  227. lockDepth--;
  228. }
  229. }
  230. if (lockDepth == 0) memoryLock.unlock();
  231. }
  232. }
  233. uintptr_t heap_getUseCount(void const * const allocation) {
  234. return headerFromAllocation(allocation)->useCount;
  235. }
  236. // A block of memory where heap data can be allocated.
  237. struct HeapMemory {
  238. HeapMemory *prevHeap = nullptr;
  239. uint8_t *top = nullptr; // The start of the arena, where the allocation pointer is when full.
  240. uint8_t *allocationPointer = nullptr; // The allocation pointer that moves from bottom to top when filling the arena.
  241. uint8_t *bottom = nullptr; // The end of the arena, where the allocation pointer is when empty.
  242. HeapMemory(uintptr_t size) {
  243. this->top = (uint8_t*)(operator new (size));
  244. this->bottom = this->top + size;
  245. this->allocationPointer = this->bottom;
  246. }
  247. ~HeapMemory() {
  248. if (this->top != nullptr) {
  249. delete this->top;
  250. this->top = nullptr;
  251. }
  252. this->allocationPointer = nullptr;
  253. this->bottom = nullptr;
  254. }
  255. };
  256. // The total number of used heap allocations, excluding recycled memory.
  257. // Only accessed when defaultHeap.poolLock is locked.
  258. static intptr_t allocationCount = 0;
  259. // The heap can have memory freed after its own destruction by telling the remaining allocations to clean up after themselves.
  260. struct HeapPool {
  261. HeapMemory *lastHeap = nullptr;
  262. HeapHeader *recyclingBin[MAX_BIN_COUNT] = {};
  263. bool terminating = false;
  264. void cleanUp() {
  265. // If memory safety checks are enabled, then we should indicate that everything is fine with the memory once cleaning up.
  266. // There is however no way to distinguish between leaking memory and not yet having terminated everything, so there is no leak warning to print.
  267. #ifdef SAFE_POINTER_CHECKS
  268. // Can't allocate more memory after freeing all memory.
  269. printf("All heap memory was freed without leaks.\n");
  270. #endif
  271. HeapMemory *nextHeap = this->lastHeap;
  272. while (nextHeap != nullptr) {
  273. HeapMemory *currentHeap = nextHeap;
  274. nextHeap = currentHeap->prevHeap;
  275. operator delete(currentHeap);
  276. }
  277. this->lastHeap = nullptr;
  278. }
  279. HeapPool() {}
  280. ~HeapPool() {
  281. memoryLock.lock();
  282. this->terminating = true;
  283. if (allocationCount == 0) {
  284. this->cleanUp();
  285. }
  286. memoryLock.unlock();
  287. }
  288. };
  289. static UnsafeAllocation tryToAllocate(HeapMemory &heap, uintptr_t paddedSize, uintptr_t alignmentAndMask) {
  290. UnsafeAllocation result(nullptr, nullptr);
  291. uint8_t *dataPointer = (uint8_t*)(((uintptr_t)(heap.allocationPointer) - paddedSize) & alignmentAndMask);
  292. AllocationHeader *headerPointer = (AllocationHeader*)(dataPointer - heapHeaderPaddedSize);
  293. if ((uint8_t*)headerPointer >= heap.top) {
  294. // There is enough space, so confirm the allocation.
  295. result = UnsafeAllocation(dataPointer, headerPointer);
  296. // Write data to the header.
  297. *headerPointer = HeapHeader((uintptr_t)heap.allocationPointer - (uintptr_t)headerPointer);
  298. // Reserve the data in the heap by moving the allocation pointer.
  299. heap.allocationPointer = (uint8_t*)headerPointer;
  300. }
  301. return result;
  302. }
  303. static UnsafeAllocation tryToAllocate(HeapPool &pool, uintptr_t paddedSize, uintptr_t alignmentAndMask) {
  304. // Start with the most recent heap, which is most likely to have enough space.
  305. HeapMemory *currentHeap = pool.lastHeap;
  306. while (currentHeap != nullptr) {
  307. UnsafeAllocation result = tryToAllocate(*currentHeap, paddedSize, heapAlignmentAndMask);
  308. if (result.data != nullptr) {
  309. return result;
  310. }
  311. currentHeap = currentHeap->prevHeap;
  312. }
  313. // If no free space could be found, allocate a new memory block.
  314. uintptr_t allocationSize = 16777216;
  315. uintptr_t usefulSize = paddedSize << 4; // TODO: Handle integer overflow.
  316. if (usefulSize > allocationSize) allocationSize = usefulSize;
  317. // Append to the linked list of memory.
  318. HeapMemory *previousHeap = pool.lastHeap;
  319. pool.lastHeap = new HeapMemory(allocationSize);
  320. pool.lastHeap->prevHeap = previousHeap;
  321. // Make one last attempt at allocating the memory.
  322. return tryToAllocate(*(pool.lastHeap), paddedSize, heapAlignmentAndMask);
  323. }
  324. static HeapPool defaultHeap;
  325. UnsafeAllocation heap_allocate(uintptr_t minimumSize, bool zeroed) {
  326. UnsafeAllocation result(nullptr, nullptr);
  327. int32_t binIndex = getBinIndex(minimumSize);
  328. //printf("heap_allocate\n");
  329. //printf(" minimumSize: %i\n", (int)minimumSize);
  330. //printf(" binIndex: %i\n", (int)binIndex);
  331. if (binIndex == -1) {
  332. // If the requested allocation is so big that there is no power of two that can contain it without overflowing the address space, then it can not be allocated.
  333. throwError(U"Heap error: Exceeded the maximum size when trying to allocate!\n");
  334. } else {
  335. uintptr_t paddedSize = ((uintptr_t)1 << binIndex) * heapAlignment;
  336. if (lockDepth == 0) memoryLock.lock();
  337. allocationCount++;
  338. // Look for pre-existing allocations in the recycling bins.
  339. HeapHeader *binHeader = defaultHeap.recyclingBin[binIndex];
  340. if (binHeader != nullptr) {
  341. // Make the recycled allocation's tail into the new head.
  342. defaultHeap.recyclingBin[binIndex] = binHeader->nextRecycled;
  343. // Clear the pointer to make room for the allocation's size in the union.
  344. binHeader->nextRecycled = nullptr;
  345. // Mark the allocation as not recycled. (assume that it was recycled when found in the bin)
  346. binHeader->makeUsed();
  347. binHeader->reuse(false, "Nameless reused allocation");
  348. result = UnsafeAllocation((uint8_t*)allocationFromHeader(binHeader), binHeader);
  349. } else {
  350. // Look for a heap with enough space for a new allocation.
  351. result = tryToAllocate(defaultHeap, paddedSize, heapAlignmentAndMask);
  352. if (result.data == nullptr) {
  353. throwError(U"Heap error: Failed to allocate more memory!\n");
  354. }
  355. }
  356. if (lockDepth == 0) memoryLock.unlock();
  357. if (zeroed && result.data != nullptr) {
  358. memset(result.data, 0, paddedSize);
  359. }
  360. }
  361. if (result.data != nullptr) {
  362. // Get the header.
  363. HeapHeader *head = (HeapHeader*)(result.header);
  364. // Tell the allocation where it should be recycled when freed.
  365. head->binIndex = binIndex;
  366. // Tell the allocation how many of the bytes that are used.
  367. head->setUsedSize(minimumSize);
  368. // Give a debug name to the allocation if we are debugging.
  369. //printf("Allocated memory @ %ld\n", (uintptr_t)result.data);
  370. //printf(" Use count = %ld\n", head->useCount);
  371. //#ifdef SAFE_POINTER_CHECKS
  372. // printf(" ID = %lu\n", head->allocationIdentity);
  373. //#endif
  374. }
  375. return result;
  376. }
  377. void heap_setAllocationDestructor(void * const allocation, const HeapDestructor &destructor) {
  378. HeapHeader *header = headerFromAllocation(allocation);
  379. header->destructor = destructor;
  380. }
  381. static void heap_free(void * const allocation) {
  382. if (lockDepth == 0) memoryLock.lock();
  383. // Get the recycled allocation's header.
  384. HeapHeader *header = headerFromAllocation(allocation);
  385. if (header->isRecycled()) {
  386. throwError(U"Heap error: A heap allocation was freed twice!\n");
  387. } else {
  388. // Call the destructor without using the mutex (lockDepth > 0).
  389. lockDepth++;
  390. //printf("heap_free called for allocation @ %ld\n", (uintptr_t)allocation);
  391. //printf(" Use count: %ld\n", header->useCount);
  392. //#ifdef SAFE_POINTER_CHECKS
  393. // printf(" ID: %lu\n", header->allocationIdentity);
  394. // printf(" Name: %s\n", header->name ? header->name : "<nameless>");
  395. //#endif
  396. //printf(" Calling destructor\n");
  397. // Call the destructor provided with any external resource that also needs to be freed.
  398. if (header->destructor.destructor) {
  399. header->destructor.destructor(allocation, header->destructor.externalResource);
  400. }
  401. //printf(" Finished destructor\n");
  402. lockDepth--;
  403. assert(lockDepth >= 0);
  404. // Remove the destructor so that it is not called again for the next allocation.
  405. header->destructor = HeapDestructor();
  406. int binIndex = header->binIndex;
  407. if (binIndex >= MAX_BIN_COUNT) {
  408. throwError(U"Heap error: Out of bound recycling bin index in corrupted head of freed allocation!\n");
  409. } else {
  410. // Make any previous head from the bin into the new tail.
  411. HeapHeader *oldHeader = defaultHeap.recyclingBin[binIndex];
  412. header->nextRecycled = oldHeader;
  413. // Mark the allocation as recycled.
  414. header->makeRecycled();
  415. #ifdef SAFE_POINTER_CHECKS
  416. // Remove the allocation identity, so that use of freed memory can be detected in SafePointer and Handle.
  417. header->allocationIdentity = 0;
  418. header->threadHash = 0;
  419. #endif
  420. // Store the newly recycled allocation in the bin.
  421. defaultHeap.recyclingBin[binIndex] = header;
  422. header->nextRecycled = oldHeader;
  423. }
  424. }
  425. // By decreasing the count after recursive calls to destructors, we can make sure that the arena is freed last.
  426. // If a destructor allocates new memory, it will have to allocate a new arena and then clean it up again.
  427. allocationCount--;
  428. // If the heap has been told to terminate and we reached zero allocations, we can tell it to clean up.
  429. if (defaultHeap.terminating && allocationCount == 0) {
  430. defaultHeap.cleanUp();
  431. }
  432. if (lockDepth == 0) memoryLock.unlock();
  433. }
  434. static void forAllHeapAllocations(HeapMemory &heap, std::function<void(AllocationHeader * header, void * allocation)> callback) {
  435. uint8_t * current = heap.allocationPointer;
  436. while (current < heap.bottom) {
  437. HeapHeader *header = (HeapHeader*)current;
  438. void *payload = allocationFromHeader(header);
  439. if (!(header->isRecycled())) {
  440. callback(header, payload);
  441. }
  442. current += header->totalSize;
  443. }
  444. }
  445. void heap_forAllHeapAllocations(std::function<void(AllocationHeader * header, void * allocation)> callback) {
  446. HeapMemory *currentHeap = defaultHeap.lastHeap;
  447. while (currentHeap != nullptr) {
  448. forAllHeapAllocations(*currentHeap, callback);
  449. currentHeap = currentHeap->prevHeap;
  450. }
  451. }
  452. void heap_hardExitCleaning() {
  453. // TODO:
  454. // * Implement a function that iterates over all heap allocations.
  455. // * Increment the use count for each allocation, to prevent recursive freeing of resources.
  456. // * Call the destructor on each allocation without freeing any memory, while all memory is still available.
  457. // Then the arenas can safely be deallocated without looking at individual allocations again.
  458. allocationCount = 0;
  459. defaultHeap.terminating = true;
  460. defaultHeap.cleanUp();
  461. }
  462. void impl_throwAllocationFailure() {
  463. throwError(U"Failed to allocate memory for a new object!\n");
  464. }
  465. void impl_throwNullException() {
  466. throwError(U"Null handle exception!\n");
  467. }
  468. void impl_throwIdentityMismatch(uint64_t allocationIdentity, uint64_t pointerIdentity) {
  469. throwError(U"Identity mismatch! The allocation pointed to had identity ", allocationIdentity, U" but ", pointerIdentity, U" was expected by the pointer from when it was allocated.\n");
  470. }
  471. }