common_memory.cpp 13 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480
  1. gb_inline void zero_size(void *ptr, isize len) {
  2. memset(ptr, 0, len);
  3. }
  4. #define zero_item(ptr) zero_size((ptr), gb_size_of(ptr))
  5. template <typename U, typename V>
  6. gb_inline U bit_cast(V &v) { return reinterpret_cast<U &>(v); }
  7. template <typename U, typename V>
  8. gb_inline U const &bit_cast(V const &v) { return reinterpret_cast<U const &>(v); }
  9. gb_inline i64 align_formula(i64 size, i64 align) {
  10. if (align > 0) {
  11. i64 result = size + align-1;
  12. return result - result%align;
  13. }
  14. return size;
  15. }
  16. gb_inline isize align_formula_isize(isize size, isize align) {
  17. if (align > 0) {
  18. isize result = size + align-1;
  19. return result - result%align;
  20. }
  21. return size;
  22. }
  23. gb_inline void *align_formula_ptr(void *ptr, isize align) {
  24. if (align > 0) {
  25. uintptr result = (cast(uintptr)ptr) + align-1;
  26. return (void *)(result - result%align);
  27. }
  28. return ptr;
  29. }
  30. gb_global BlockingMutex global_memory_block_mutex;
  31. gb_global BlockingMutex global_memory_allocator_mutex;
  32. void platform_virtual_memory_init(void);
  33. void virtual_memory_init(void) {
  34. mutex_init(&global_memory_block_mutex);
  35. mutex_init(&global_memory_allocator_mutex);
  36. platform_virtual_memory_init();
  37. }
  38. struct MemoryBlock {
  39. MemoryBlock *prev;
  40. u8 * base;
  41. isize size;
  42. isize used;
  43. };
  44. struct Arena {
  45. MemoryBlock *curr_block;
  46. isize minimum_block_size;
  47. bool ignore_mutex;
  48. };
  49. enum { DEFAULT_MINIMUM_BLOCK_SIZE = 8ll*1024ll*1024ll };
  50. gb_global isize DEFAULT_PAGE_SIZE = 4096;
  51. MemoryBlock *virtual_memory_alloc(isize size);
  52. void virtual_memory_dealloc(MemoryBlock *block);
  53. void *arena_alloc(Arena *arena, isize min_size, isize alignment);
  54. void arena_free_all(Arena *arena);
  55. isize arena_align_forward_offset(Arena *arena, isize alignment) {
  56. isize alignment_offset = 0;
  57. isize ptr = cast(isize)(arena->curr_block->base + arena->curr_block->used);
  58. isize mask = alignment-1;
  59. if (ptr & mask) {
  60. alignment_offset = alignment - (ptr & mask);
  61. }
  62. return alignment_offset;
  63. }
  64. void *arena_alloc(Arena *arena, isize min_size, isize alignment) {
  65. GB_ASSERT(gb_is_power_of_two(alignment));
  66. BlockingMutex *mutex = &global_memory_allocator_mutex;
  67. if (!arena->ignore_mutex) {
  68. mutex_lock(mutex);
  69. }
  70. isize size = 0;
  71. if (arena->curr_block != nullptr) {
  72. size = min_size + arena_align_forward_offset(arena, alignment);
  73. }
  74. if (arena->curr_block == nullptr || (arena->curr_block->used + size) > arena->curr_block->size) {
  75. size = align_formula_isize(min_size, alignment);
  76. arena->minimum_block_size = gb_max(DEFAULT_MINIMUM_BLOCK_SIZE, arena->minimum_block_size);
  77. isize block_size = gb_max(size, arena->minimum_block_size);
  78. MemoryBlock *new_block = virtual_memory_alloc(block_size);
  79. new_block->prev = arena->curr_block;
  80. arena->curr_block = new_block;
  81. }
  82. MemoryBlock *curr_block = arena->curr_block;
  83. GB_ASSERT((curr_block->used + size) <= curr_block->size);
  84. u8 *ptr = curr_block->base + curr_block->used;
  85. ptr += arena_align_forward_offset(arena, alignment);
  86. curr_block->used += size;
  87. GB_ASSERT(curr_block->used <= curr_block->size);
  88. if (!arena->ignore_mutex) {
  89. mutex_unlock(mutex);
  90. }
  91. // NOTE(bill): memory will be zeroed by default due to virtual memory
  92. return ptr;
  93. }
  94. void arena_free_all(Arena *arena) {
  95. while (arena->curr_block != nullptr) {
  96. MemoryBlock *free_block = arena->curr_block;
  97. arena->curr_block = free_block->prev;
  98. virtual_memory_dealloc(free_block);
  99. }
  100. }
  101. struct PlatformMemoryBlock {
  102. MemoryBlock block; // IMPORTANT NOTE: must be at the start
  103. isize total_size;
  104. PlatformMemoryBlock *prev, *next;
  105. };
  106. gb_global std::atomic<isize> global_platform_memory_total_usage;
  107. gb_global PlatformMemoryBlock global_platform_memory_block_sentinel;
  108. PlatformMemoryBlock *platform_virtual_memory_alloc(isize total_size);
  109. void platform_virtual_memory_free(PlatformMemoryBlock *block);
  110. void platform_virtual_memory_protect(void *memory, isize size);
  111. #if defined(GB_SYSTEM_WINDOWS)
  112. void platform_virtual_memory_init(void) {
  113. global_platform_memory_block_sentinel.prev = &global_platform_memory_block_sentinel;
  114. global_platform_memory_block_sentinel.next = &global_platform_memory_block_sentinel;
  115. SYSTEM_INFO sys_info = {};
  116. GetSystemInfo(&sys_info);
  117. DEFAULT_PAGE_SIZE = gb_max(DEFAULT_PAGE_SIZE, cast(isize)sys_info.dwPageSize);
  118. GB_ASSERT(gb_is_power_of_two(DEFAULT_PAGE_SIZE));
  119. }
  120. PlatformMemoryBlock *platform_virtual_memory_alloc(isize total_size) {
  121. PlatformMemoryBlock *pmblock = (PlatformMemoryBlock *)VirtualAlloc(0, total_size, MEM_RESERVE|MEM_COMMIT, PAGE_READWRITE);
  122. if (pmblock == nullptr) {
  123. gb_printf_err("Out of Virtual memory, oh no...\n");
  124. gb_printf_err("Requested: %lld bytes\n", cast(long long)total_size);
  125. gb_printf_err("Total Usage: %lld bytes\n", cast(long long)global_platform_memory_total_usage);
  126. GB_ASSERT_MSG(pmblock != nullptr, "Out of Virtual Memory, oh no...");
  127. }
  128. global_platform_memory_total_usage += total_size;
  129. return pmblock;
  130. }
  131. void platform_virtual_memory_free(PlatformMemoryBlock *block) {
  132. global_platform_memory_total_usage -= block->total_size;
  133. GB_ASSERT(VirtualFree(block, 0, MEM_RELEASE));
  134. }
  135. void platform_virtual_memory_protect(void *memory, isize size) {
  136. DWORD old_protect = 0;
  137. BOOL is_protected = VirtualProtect(memory, size, PAGE_NOACCESS, &old_protect);
  138. GB_ASSERT(is_protected);
  139. }
  140. #else
  141. void platform_virtual_memory_init(void) {
  142. global_platform_memory_block_sentinel.prev = &global_platform_memory_block_sentinel;
  143. global_platform_memory_block_sentinel.next = &global_platform_memory_block_sentinel;
  144. DEFAULT_PAGE_SIZE = gb_max(DEFAULT_PAGE_SIZE, cast(isize)sysconf(_SC_PAGE_SIZE));
  145. GB_ASSERT(gb_is_power_of_two(DEFAULT_PAGE_SIZE));
  146. }
  147. PlatformMemoryBlock *platform_virtual_memory_alloc(isize total_size) {
  148. PlatformMemoryBlock *pmblock = (PlatformMemoryBlock *)mmap(nullptr, total_size, PROT_READ | PROT_WRITE, MAP_ANONYMOUS | MAP_PRIVATE, -1, 0);
  149. if (pmblock == nullptr) {
  150. gb_printf_err("Out of Virtual memory, oh no...\n");
  151. gb_printf_err("Requested: %lld bytes\n", cast(long long)total_size);
  152. gb_printf_err("Total Usage: %lld bytes\n", cast(long long)global_platform_memory_total_usage);
  153. GB_ASSERT_MSG(pmblock != nullptr, "Out of Virtual Memory, oh no...");
  154. }
  155. global_platform_memory_total_usage += total_size;
  156. return pmblock;
  157. }
  158. void platform_virtual_memory_free(PlatformMemoryBlock *block) {
  159. isize size = block->total_size;
  160. global_platform_memory_total_usage -= size;
  161. munmap(block, size);
  162. }
  163. void platform_virtual_memory_protect(void *memory, isize size) {
  164. int err = mprotect(memory, size, PROT_NONE);
  165. GB_ASSERT(err == 0);
  166. }
  167. #endif
  168. MemoryBlock *virtual_memory_alloc(isize size) {
  169. isize const page_size = DEFAULT_PAGE_SIZE;
  170. isize total_size = size + gb_size_of(PlatformMemoryBlock);
  171. isize base_offset = gb_size_of(PlatformMemoryBlock);
  172. isize protect_offset = 0;
  173. bool do_protection = false;
  174. { // overflow protection
  175. isize rounded_size = align_formula_isize(size, page_size);
  176. total_size = rounded_size + 2*page_size;
  177. base_offset = page_size + rounded_size - size;
  178. protect_offset = page_size + rounded_size;
  179. do_protection = true;
  180. }
  181. PlatformMemoryBlock *pmblock = platform_virtual_memory_alloc(total_size);
  182. GB_ASSERT_MSG(pmblock != nullptr, "Out of Virtual Memory, oh no...");
  183. pmblock->block.base = cast(u8 *)pmblock + base_offset;
  184. // Should be zeroed
  185. GB_ASSERT(pmblock->block.used == 0);
  186. GB_ASSERT(pmblock->block.prev == nullptr);
  187. if (do_protection) {
  188. platform_virtual_memory_protect(cast(u8 *)pmblock + protect_offset, page_size);
  189. }
  190. pmblock->block.size = size;
  191. pmblock->total_size = total_size;
  192. PlatformMemoryBlock *sentinel = &global_platform_memory_block_sentinel;
  193. mutex_lock(&global_memory_block_mutex);
  194. pmblock->next = sentinel;
  195. pmblock->prev = sentinel->prev;
  196. pmblock->prev->next = pmblock;
  197. pmblock->next->prev = pmblock;
  198. mutex_unlock(&global_memory_block_mutex);
  199. return &pmblock->block;
  200. }
  201. void virtual_memory_dealloc(MemoryBlock *block_to_free) {
  202. PlatformMemoryBlock *block = cast(PlatformMemoryBlock *)block_to_free;
  203. if (block != nullptr) {
  204. mutex_lock(&global_memory_block_mutex);
  205. block->prev->next = block->next;
  206. block->next->prev = block->prev;
  207. mutex_unlock(&global_memory_block_mutex);
  208. platform_virtual_memory_free(block);
  209. }
  210. }
  211. GB_ALLOCATOR_PROC(arena_allocator_proc);
  212. gbAllocator arena_allocator(Arena *arena) {
  213. gbAllocator a;
  214. a.proc = arena_allocator_proc;
  215. a.data = arena;
  216. return a;
  217. }
  218. GB_ALLOCATOR_PROC(arena_allocator_proc) {
  219. void *ptr = nullptr;
  220. Arena *arena = cast(Arena *)allocator_data;
  221. GB_ASSERT_NOT_NULL(arena);
  222. switch (type) {
  223. case gbAllocation_Alloc:
  224. ptr = arena_alloc(arena, size, alignment);
  225. break;
  226. case gbAllocation_Free:
  227. break;
  228. case gbAllocation_Resize:
  229. if (size == 0) {
  230. ptr = nullptr;
  231. } else if (size <= old_size) {
  232. ptr = old_memory;
  233. } else {
  234. ptr = arena_alloc(arena, size, alignment);
  235. gb_memmove(ptr, old_memory, old_size);
  236. }
  237. break;
  238. case gbAllocation_FreeAll:
  239. GB_PANIC("use arena_free_all directly");
  240. arena_free_all(arena);
  241. break;
  242. }
  243. return ptr;
  244. }
  245. gb_global gb_thread_local Arena permanent_arena = {nullptr, DEFAULT_MINIMUM_BLOCK_SIZE, true};
  246. gbAllocator permanent_allocator() {
  247. return arena_allocator(&permanent_arena);
  248. }
  249. gbAllocator temporary_allocator() {
  250. return permanent_allocator();
  251. }
  252. GB_ALLOCATOR_PROC(heap_allocator_proc);
  253. gbAllocator heap_allocator(void) {
  254. gbAllocator a;
  255. a.proc = heap_allocator_proc;
  256. a.data = nullptr;
  257. return a;
  258. }
  259. GB_ALLOCATOR_PROC(heap_allocator_proc) {
  260. void *ptr = nullptr;
  261. gb_unused(allocator_data);
  262. gb_unused(old_size);
  263. // TODO(bill): Throughly test!
  264. switch (type) {
  265. #if defined(GB_COMPILER_MSVC)
  266. case gbAllocation_Alloc:
  267. if (size == 0) {
  268. return NULL;
  269. } else {
  270. isize aligned_size = align_formula_isize(size, alignment);
  271. // TODO(bill): Make sure this is aligned correctly
  272. ptr = HeapAlloc(GetProcessHeap(), HEAP_ZERO_MEMORY, aligned_size);
  273. }
  274. break;
  275. case gbAllocation_Free:
  276. if (old_memory != nullptr) {
  277. HeapFree(GetProcessHeap(), 0, old_memory);
  278. }
  279. break;
  280. case gbAllocation_Resize:
  281. if (old_memory != nullptr && size > 0) {
  282. isize aligned_size = align_formula_isize(size, alignment);
  283. ptr = HeapReAlloc(GetProcessHeap(), HEAP_ZERO_MEMORY, old_memory, aligned_size);
  284. } else if (old_memory != nullptr) {
  285. HeapFree(GetProcessHeap(), 0, old_memory);
  286. } else if (size != 0) {
  287. isize aligned_size = align_formula_isize(size, alignment);
  288. // TODO(bill): Make sure this is aligned correctly
  289. ptr = HeapAlloc(GetProcessHeap(), HEAP_ZERO_MEMORY, aligned_size);
  290. }
  291. break;
  292. #elif defined(GB_SYSTEM_LINUX)
  293. // TODO(bill): *nix version that's decent
  294. case gbAllocation_Alloc: {
  295. ptr = aligned_alloc(alignment, (size + alignment - 1) & ~(alignment - 1));
  296. gb_zero_size(ptr, size);
  297. } break;
  298. case gbAllocation_Free:
  299. if (old_memory != nullptr) {
  300. free(old_memory);
  301. }
  302. break;
  303. case gbAllocation_Resize:
  304. if (size == 0) {
  305. if (old_memory != nullptr) {
  306. free(old_memory);
  307. }
  308. break;
  309. }
  310. alignment = gb_max(alignment, gb_align_of(max_align_t));
  311. if (old_memory == nullptr) {
  312. ptr = aligned_alloc(alignment, (size + alignment - 1) & ~(alignment - 1));
  313. gb_zero_size(ptr, size);
  314. break;
  315. }
  316. if (size <= old_size) {
  317. ptr = old_memory;
  318. break;
  319. }
  320. ptr = aligned_alloc(alignment, (size + alignment - 1) & ~(alignment - 1));
  321. gb_memmove(ptr, old_memory, old_size);
  322. free(old_memory);
  323. gb_zero_size(cast(u8 *)ptr + old_size, gb_max(size-old_size, 0));
  324. break;
  325. #else
  326. // TODO(bill): *nix version that's decent
  327. case gbAllocation_Alloc: {
  328. int err = 0;
  329. alignment = gb_max(alignment, gb_align_of(max_align_t));
  330. err = posix_memalign(&ptr, alignment, size);
  331. GB_ASSERT_MSG(err == 0, "posix_memalign err: %d", err);
  332. gb_zero_size(ptr, size);
  333. } break;
  334. case gbAllocation_Free:
  335. if (old_memory != nullptr) {
  336. free(old_memory);
  337. }
  338. break;
  339. case gbAllocation_Resize: {
  340. int err = 0;
  341. if (size == 0) {
  342. free(old_memory);
  343. break;
  344. }
  345. alignment = gb_max(alignment, gb_align_of(max_align_t));
  346. if (old_memory == nullptr) {
  347. err = posix_memalign(&ptr, alignment, size);
  348. GB_ASSERT_MSG(err == 0, "posix_memalign err: %d", err);
  349. GB_ASSERT(ptr != nullptr);
  350. gb_zero_size(ptr, size);
  351. break;
  352. }
  353. if (size <= old_size) {
  354. ptr = old_memory;
  355. break;
  356. }
  357. err = posix_memalign(&ptr, alignment, size);
  358. GB_ASSERT_MSG(err == 0, "posix_memalign err: %d", err);
  359. GB_ASSERT(ptr != nullptr);
  360. gb_memmove(ptr, old_memory, old_size);
  361. free(old_memory);
  362. gb_zero_size(cast(u8 *)ptr + old_size, gb_max(size-old_size, 0));
  363. } break;
  364. #endif
  365. case gbAllocation_FreeAll:
  366. break;
  367. }
  368. return ptr;
  369. }
  370. template <typename T>
  371. void resize_array_raw(T **array, gbAllocator const &a, isize old_count, isize new_count) {
  372. GB_ASSERT(new_count >= 0);
  373. if (new_count == 0) {
  374. gb_free(a, *array);
  375. *array = nullptr;
  376. return;
  377. }
  378. if (new_count < old_count) {
  379. return;
  380. }
  381. isize old_size = old_count * gb_size_of(T);
  382. isize new_size = new_count * gb_size_of(T);
  383. isize alignment = gb_align_of(T);
  384. auto new_data = cast(T *)gb_resize_align(a, *array, old_size, new_size, alignment);
  385. GB_ASSERT(new_data != nullptr);
  386. *array = new_data;
  387. }