common_memory.cpp 11 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423
  1. gb_inline void zero_size(void *ptr, isize len) {
  2. memset(ptr, 0, len);
  3. }
  4. #define zero_item(ptr) zero_size((ptr), gb_size_of(ptr))
  5. template <typename U, typename V>
  6. gb_inline U bit_cast(V &v) { return reinterpret_cast<U &>(v); }
  7. template <typename U, typename V>
  8. gb_inline U const &bit_cast(V const &v) { return reinterpret_cast<U const &>(v); }
  9. gb_inline i64 align_formula(i64 size, i64 align) {
  10. if (align > 0) {
  11. i64 result = size + align-1;
  12. return result - result%align;
  13. }
  14. return size;
  15. }
  16. gb_inline isize align_formula_isize(isize size, isize align) {
  17. if (align > 0) {
  18. isize result = size + align-1;
  19. return result - result%align;
  20. }
  21. return size;
  22. }
  23. gb_inline void *align_formula_ptr(void *ptr, isize align) {
  24. if (align > 0) {
  25. uintptr result = (cast(uintptr)ptr) + align-1;
  26. return (void *)(result - result%align);
  27. }
  28. return ptr;
  29. }
  30. gb_global BlockingMutex global_memory_block_mutex;
  31. gb_global BlockingMutex global_memory_allocator_mutex;
  32. void platform_virtual_memory_init(void);
  33. void virtual_memory_init(void) {
  34. mutex_init(&global_memory_block_mutex);
  35. mutex_init(&global_memory_allocator_mutex);
  36. platform_virtual_memory_init();
  37. }
  38. struct MemoryBlock {
  39. u8 * base;
  40. isize size;
  41. isize used;
  42. MemoryBlock *prev;
  43. };
  44. struct Arena {
  45. MemoryBlock * curr_block;
  46. isize minimum_block_size;
  47. };
  48. enum { DEFAULT_MINIMUM_BLOCK_SIZE = 8ll*1024ll*1024ll };
  49. gb_global isize DEFAULT_PAGE_SIZE = 4096;
  50. MemoryBlock *virtual_memory_alloc(isize size);
  51. void virtual_memory_dealloc(MemoryBlock *block);
  52. void arena_free_all(Arena *arena);
  53. isize arena_align_forward_offset(Arena *arena, isize alignment) {
  54. isize alignment_offset = 0;
  55. isize ptr = cast(isize)(arena->curr_block->base + arena->curr_block->used);
  56. isize mask = alignment-1;
  57. if (ptr & mask) {
  58. alignment_offset = alignment - (ptr & mask);
  59. }
  60. return alignment_offset;
  61. }
  62. void *arena_alloc(Arena *arena, isize min_size, isize alignment) {
  63. GB_ASSERT(gb_is_power_of_two(alignment));
  64. isize size = 0;
  65. // TODO(bill): make it so that this can be done lock free (if possible)
  66. mutex_lock(&global_memory_allocator_mutex);
  67. if (arena->curr_block != nullptr) {
  68. size = min_size + arena_align_forward_offset(arena, alignment);
  69. }
  70. if (arena->curr_block == nullptr || (arena->curr_block->used + size) > arena->curr_block->size) {
  71. size = align_formula_isize(min_size, alignment);
  72. arena->minimum_block_size = gb_max(DEFAULT_MINIMUM_BLOCK_SIZE, arena->minimum_block_size);
  73. isize block_size = gb_max(size, arena->minimum_block_size);
  74. MemoryBlock *new_block = virtual_memory_alloc(block_size);
  75. new_block->prev = arena->curr_block;
  76. arena->curr_block = new_block;
  77. }
  78. MemoryBlock *curr_block = arena->curr_block;
  79. GB_ASSERT((curr_block->used + size) <= curr_block->size);
  80. u8 *ptr = curr_block->base + curr_block->used;
  81. ptr += arena_align_forward_offset(arena, alignment);
  82. curr_block->used += size;
  83. GB_ASSERT(curr_block->used <= curr_block->size);
  84. mutex_unlock(&global_memory_allocator_mutex);
  85. // NOTE(bill): memory will be zeroed by default due to virtual memory
  86. return ptr;
  87. }
  88. void arena_free_all(Arena *arena) {
  89. while (arena->curr_block != nullptr) {
  90. MemoryBlock *free_block = arena->curr_block;
  91. arena->curr_block = free_block->prev;
  92. virtual_memory_dealloc(free_block);
  93. }
  94. }
  95. struct PlatformMemoryBlock {
  96. MemoryBlock block; // IMPORTANT NOTE: must be at the start
  97. isize total_size;
  98. PlatformMemoryBlock *prev, *next;
  99. };
  100. gb_global PlatformMemoryBlock global_platform_memory_block_sentinel;
  101. PlatformMemoryBlock *platform_virtual_memory_alloc(isize total_size);
  102. void platform_virtual_memory_free(PlatformMemoryBlock *block);
  103. void platform_virtual_memory_protect(void *memory, isize size);
  104. #if defined(GB_SYSTEM_WINDOWS)
  105. void platform_virtual_memory_init(void) {
  106. global_platform_memory_block_sentinel.prev = &global_platform_memory_block_sentinel;
  107. global_platform_memory_block_sentinel.next = &global_platform_memory_block_sentinel;
  108. SYSTEM_INFO sys_info = {};
  109. GetSystemInfo(&sys_info);
  110. DEFAULT_PAGE_SIZE = gb_max(DEFAULT_PAGE_SIZE, cast(isize)sys_info.dwPageSize);
  111. GB_ASSERT(gb_is_power_of_two(DEFAULT_PAGE_SIZE));
  112. }
  113. PlatformMemoryBlock *platform_virtual_memory_alloc(isize total_size) {
  114. PlatformMemoryBlock *pmblock = (PlatformMemoryBlock *)VirtualAlloc(0, total_size, MEM_RESERVE|MEM_COMMIT, PAGE_READWRITE);
  115. GB_ASSERT_MSG(pmblock != nullptr, "Out of Virtual Memory, oh no...");
  116. return pmblock;
  117. }
  118. void platform_virtual_memory_free(PlatformMemoryBlock *block) {
  119. GB_ASSERT(VirtualFree(block, 0, MEM_RELEASE));
  120. }
  121. void platform_virtual_memory_protect(void *memory, isize size) {
  122. DWORD old_protect = 0;
  123. BOOL is_protected = VirtualProtect(memory, size, PAGE_NOACCESS, &old_protect);
  124. GB_ASSERT(is_protected);
  125. }
  126. #else
  127. void platform_virtual_memory_init(void) {
  128. global_platform_memory_block_sentinel.prev = &global_platform_memory_block_sentinel;
  129. global_platform_memory_block_sentinel.next = &global_platform_memory_block_sentinel;
  130. DEFAULT_PAGE_SIZE = gb_max(DEFAULT_PAGE_SIZE, cast(isize)sysconf(_SC_PAGE_SIZE));
  131. GB_ASSERT(gb_is_power_of_two(DEFAULT_PAGE_SIZE));
  132. }
  133. PlatformMemoryBlock *platform_virtual_memory_alloc(isize total_size) {
  134. PlatformMemoryBlock *pmblock = (PlatformMemoryBlock *)mmap(nullptr, total_size, PROT_READ | PROT_WRITE, MAP_ANONYMOUS | MAP_PRIVATE, -1, 0);
  135. GB_ASSERT_MSG(pmblock != nullptr, "Out of Virtual Memory, oh no...");
  136. return pmblock;
  137. }
  138. void platform_virtual_memory_free(PlatformMemoryBlock *block) {
  139. isize size = block->total_size;
  140. munmap(block, size);
  141. }
  142. void platform_virtual_memory_protect(void *memory, isize size) {
  143. int err = mprotect(memory, size, PROT_NONE);
  144. GB_ASSERT(err != 0);
  145. }
  146. #endif
  147. MemoryBlock *virtual_memory_alloc(isize size) {
  148. isize const page_size = DEFAULT_PAGE_SIZE;
  149. isize total_size = size + gb_size_of(PlatformMemoryBlock);
  150. isize base_offset = gb_size_of(PlatformMemoryBlock);
  151. isize protect_offset = 0;
  152. bool do_protection = false;
  153. { // overflow protection
  154. isize rounded_size = align_formula_isize(size, page_size);
  155. total_size = rounded_size + 2*page_size;
  156. base_offset = page_size + rounded_size - size;
  157. protect_offset = page_size + rounded_size;
  158. do_protection = true;
  159. }
  160. PlatformMemoryBlock *pmblock = platform_virtual_memory_alloc(total_size);
  161. GB_ASSERT_MSG(pmblock != nullptr, "Out of Virtual Memory, oh no...");
  162. pmblock->block.base = cast(u8 *)pmblock + base_offset;
  163. // Should be zeroed
  164. GB_ASSERT(pmblock->block.used == 0);
  165. GB_ASSERT(pmblock->block.prev == nullptr);
  166. if (do_protection) {
  167. platform_virtual_memory_protect(cast(u8 *)pmblock + protect_offset, page_size);
  168. }
  169. pmblock->block.size = size;
  170. pmblock->total_size = total_size;
  171. PlatformMemoryBlock *sentinel = &global_platform_memory_block_sentinel;
  172. mutex_lock(&global_memory_block_mutex);
  173. pmblock->next = sentinel;
  174. pmblock->prev = sentinel->prev;
  175. pmblock->prev->next = pmblock;
  176. pmblock->next->prev = pmblock;
  177. mutex_unlock(&global_memory_block_mutex);
  178. return &pmblock->block;
  179. }
  180. void virtual_memory_dealloc(MemoryBlock *block_to_free) {
  181. PlatformMemoryBlock *block = cast(PlatformMemoryBlock *)block_to_free;
  182. if (block != nullptr) {
  183. mutex_lock(&global_memory_block_mutex);
  184. block->prev->next = block->next;
  185. block->next->prev = block->prev;
  186. mutex_unlock(&global_memory_block_mutex);
  187. platform_virtual_memory_free(block);
  188. }
  189. }
  190. GB_ALLOCATOR_PROC(arena_allocator_proc);
  191. gbAllocator arena_allocator(Arena *arena) {
  192. gbAllocator a;
  193. a.proc = arena_allocator_proc;
  194. a.data = arena;
  195. return a;
  196. }
  197. GB_ALLOCATOR_PROC(arena_allocator_proc) {
  198. void *ptr = nullptr;
  199. Arena *arena = cast(Arena *)allocator_data;
  200. GB_ASSERT_NOT_NULL(arena);
  201. switch (type) {
  202. case gbAllocation_Alloc:
  203. ptr = arena_alloc(arena, size, alignment);
  204. break;
  205. case gbAllocation_Free:
  206. break;
  207. case gbAllocation_Resize:
  208. if (size == 0) {
  209. ptr = nullptr;
  210. } else if (size <= old_size) {
  211. ptr = old_memory;
  212. } else {
  213. ptr = arena_alloc(arena, size, alignment);
  214. gb_memmove(ptr, old_memory, old_size);
  215. }
  216. break;
  217. case gbAllocation_FreeAll:
  218. arena_free_all(arena);
  219. break;
  220. }
  221. return ptr;
  222. }
  223. gb_global Arena permanent_arena = {};
  224. gbAllocator permanent_allocator() {
  225. return arena_allocator(&permanent_arena);
  226. }
  227. gb_global Arena temporary_arena = {};
  228. gbAllocator temporary_allocator() {
  229. return arena_allocator(&temporary_arena);
  230. }
  231. GB_ALLOCATOR_PROC(heap_allocator_proc);
  232. gbAllocator heap_allocator(void) {
  233. gbAllocator a;
  234. a.proc = heap_allocator_proc;
  235. a.data = nullptr;
  236. return a;
  237. }
  238. GB_ALLOCATOR_PROC(heap_allocator_proc) {
  239. void *ptr = nullptr;
  240. gb_unused(allocator_data);
  241. gb_unused(old_size);
  242. // TODO(bill): Throughly test!
  243. switch (type) {
  244. #if defined(GB_COMPILER_MSVC)
  245. case gbAllocation_Alloc: {
  246. isize aligned_size = align_formula_isize(size, alignment);
  247. // TODO(bill): Make sure this is aligned correctly
  248. ptr = HeapAlloc(GetProcessHeap(), HEAP_ZERO_MEMORY, aligned_size);
  249. } break;
  250. case gbAllocation_Free:
  251. HeapFree(GetProcessHeap(), 0, old_memory);
  252. break;
  253. case gbAllocation_Resize: {
  254. isize aligned_size = align_formula_isize(size, alignment);
  255. ptr = HeapReAlloc(GetProcessHeap(), HEAP_ZERO_MEMORY, old_memory, aligned_size);
  256. } break;
  257. #elif defined(GB_SYSTEM_LINUX)
  258. // TODO(bill): *nix version that's decent
  259. case gbAllocation_Alloc: {
  260. ptr = aligned_alloc(alignment, (size + alignment - 1) & ~(alignment - 1));
  261. gb_zero_size(ptr, size);
  262. } break;
  263. case gbAllocation_Free: {
  264. free(old_memory);
  265. } break;
  266. case gbAllocation_Resize:
  267. if (size == 0) {
  268. free(old_memory);
  269. break;
  270. }
  271. if (!old_memory) {
  272. ptr = aligned_alloc(alignment, (size + alignment - 1) & ~(alignment - 1));
  273. gb_zero_size(ptr, size);
  274. break;
  275. }
  276. if (size <= old_size) {
  277. ptr = old_memory;
  278. break;
  279. }
  280. ptr = aligned_alloc(alignment, (size + alignment - 1) & ~(alignment - 1));
  281. gb_memmove(ptr, old_memory, old_size);
  282. gb_zero_size(cast(u8 *)ptr + old_size, gb_max(size-old_size, 0));
  283. break;
  284. #else
  285. // TODO(bill): *nix version that's decent
  286. case gbAllocation_Alloc:
  287. posix_memalign(&ptr, alignment, size);
  288. gb_zero_size(ptr, size);
  289. break;
  290. case gbAllocation_Free:
  291. free(old_memory);
  292. break;
  293. case gbAllocation_Resize:
  294. if (size == 0) {
  295. free(old_memory);
  296. break;
  297. }
  298. if (!old_memory) {
  299. posix_memalign(&ptr, alignment, size);
  300. gb_zero_size(ptr, size);
  301. break;
  302. }
  303. if (size <= old_size) {
  304. ptr = old_memory;
  305. break;
  306. }
  307. posix_memalign(&ptr, alignment, size);
  308. gb_memmove(ptr, old_memory, old_size);
  309. gb_zero_size(cast(u8 *)ptr + old_size, gb_max(size-old_size, 0));
  310. break;
  311. #endif
  312. case gbAllocation_FreeAll:
  313. break;
  314. }
  315. return ptr;
  316. }
  317. template <typename T>
  318. void resize_array_raw(T **array, gbAllocator const &a, isize old_count, isize new_count) {
  319. GB_ASSERT(new_count >= 0);
  320. if (new_count == 0) {
  321. gb_free(a, *array);
  322. *array = nullptr;
  323. return;
  324. }
  325. if (new_count < old_count) {
  326. return;
  327. }
  328. isize old_size = old_count * gb_size_of(T);
  329. isize new_size = new_count * gb_size_of(T);
  330. isize alignment = gb_align_of(T);
  331. auto new_data = cast(T *)gb_resize_align(a, *array, old_size, new_size, alignment);
  332. GB_ASSERT(new_data != nullptr);
  333. *array = new_data;
  334. }