common_memory.cpp 13 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463
  1. gb_internal gb_inline void zero_size(void *ptr, isize len) {
  2. memset(ptr, 0, len);
  3. }
  4. #define zero_item(ptr) zero_size((ptr), gb_size_of(ptr))
  5. template <typename U, typename V>
  6. gb_internal gb_inline U bit_cast(V &v) { return reinterpret_cast<U &>(v); }
  7. template <typename U, typename V>
  8. gb_internal gb_inline U const &bit_cast(V const &v) { return reinterpret_cast<U const &>(v); }
  9. gb_internal gb_inline i64 align_formula(i64 size, i64 align) {
  10. i64 result = size + align-1;
  11. return result - (i64)((u64)result%(u64)align);
  12. }
  13. gb_internal gb_inline isize align_formula_isize(isize size, isize align) {
  14. isize result = size + align-1;
  15. return result - (isize)((usize)result%(usize)align);
  16. }
  17. gb_internal gb_inline void *align_formula_ptr(void *ptr, isize align) {
  18. uintptr result = (cast(uintptr)ptr) + align-1;
  19. return (void *)(result - result%align);
  20. }
  21. gb_global BlockingMutex global_memory_block_mutex;
  22. gb_internal void platform_virtual_memory_init(void);
  23. gb_internal void virtual_memory_init(void) {
  24. platform_virtual_memory_init();
  25. }
  26. struct MemoryBlock {
  27. MemoryBlock *prev;
  28. u8 * base;
  29. isize size;
  30. isize used;
  31. };
  32. struct Arena {
  33. MemoryBlock * curr_block;
  34. isize minimum_block_size;
  35. BlockingMutex mutex;
  36. };
  37. enum { DEFAULT_MINIMUM_BLOCK_SIZE = 8ll*1024ll*1024ll };
  38. gb_global isize DEFAULT_PAGE_SIZE = 4096;
  39. gb_internal MemoryBlock *virtual_memory_alloc(isize size);
  40. gb_internal void virtual_memory_dealloc(MemoryBlock *block);
  41. gb_internal void *arena_alloc(Arena *arena, isize min_size, isize alignment);
  42. gb_internal void arena_free_all(Arena *arena);
  43. gb_internal isize arena_align_forward_offset(Arena *arena, isize alignment) {
  44. isize alignment_offset = 0;
  45. isize ptr = cast(isize)(arena->curr_block->base + arena->curr_block->used);
  46. isize mask = alignment-1;
  47. if (ptr & mask) {
  48. alignment_offset = alignment - (ptr & mask);
  49. }
  50. return alignment_offset;
  51. }
  52. gb_internal void *arena_alloc(Arena *arena, isize min_size, isize alignment) {
  53. GB_ASSERT(gb_is_power_of_two(alignment));
  54. mutex_lock(&arena->mutex);
  55. isize size = 0;
  56. if (arena->curr_block != nullptr) {
  57. size = min_size + arena_align_forward_offset(arena, alignment);
  58. }
  59. if (arena->curr_block == nullptr || (arena->curr_block->used + size) > arena->curr_block->size) {
  60. size = align_formula_isize(min_size, alignment);
  61. arena->minimum_block_size = gb_max(DEFAULT_MINIMUM_BLOCK_SIZE, arena->minimum_block_size);
  62. isize block_size = gb_max(size, arena->minimum_block_size);
  63. MemoryBlock *new_block = virtual_memory_alloc(block_size);
  64. new_block->prev = arena->curr_block;
  65. arena->curr_block = new_block;
  66. }
  67. MemoryBlock *curr_block = arena->curr_block;
  68. GB_ASSERT((curr_block->used + size) <= curr_block->size);
  69. u8 *ptr = curr_block->base + curr_block->used;
  70. ptr += arena_align_forward_offset(arena, alignment);
  71. curr_block->used += size;
  72. GB_ASSERT(curr_block->used <= curr_block->size);
  73. mutex_unlock(&arena->mutex);
  74. // NOTE(bill): memory will be zeroed by default due to virtual memory
  75. return ptr;
  76. }
  77. gb_internal void arena_free_all(Arena *arena) {
  78. while (arena->curr_block != nullptr) {
  79. MemoryBlock *free_block = arena->curr_block;
  80. arena->curr_block = free_block->prev;
  81. virtual_memory_dealloc(free_block);
  82. }
  83. }
  84. struct PlatformMemoryBlock {
  85. MemoryBlock block; // IMPORTANT NOTE: must be at the start
  86. isize total_size;
  87. PlatformMemoryBlock *prev, *next;
  88. };
  89. gb_global std::atomic<isize> global_platform_memory_total_usage;
  90. gb_global PlatformMemoryBlock global_platform_memory_block_sentinel;
  91. gb_internal PlatformMemoryBlock *platform_virtual_memory_alloc(isize total_size);
  92. gb_internal void platform_virtual_memory_free(PlatformMemoryBlock *block);
  93. gb_internal void platform_virtual_memory_protect(void *memory, isize size);
  94. #if defined(GB_SYSTEM_WINDOWS)
  95. gb_internal void platform_virtual_memory_init(void) {
  96. global_platform_memory_block_sentinel.prev = &global_platform_memory_block_sentinel;
  97. global_platform_memory_block_sentinel.next = &global_platform_memory_block_sentinel;
  98. SYSTEM_INFO sys_info = {};
  99. GetSystemInfo(&sys_info);
  100. DEFAULT_PAGE_SIZE = gb_max(DEFAULT_PAGE_SIZE, cast(isize)sys_info.dwPageSize);
  101. GB_ASSERT(gb_is_power_of_two(DEFAULT_PAGE_SIZE));
  102. }
  103. gb_internal PlatformMemoryBlock *platform_virtual_memory_alloc(isize total_size) {
  104. PlatformMemoryBlock *pmblock = (PlatformMemoryBlock *)VirtualAlloc(0, total_size, MEM_RESERVE|MEM_COMMIT, PAGE_READWRITE);
  105. if (pmblock == nullptr) {
  106. gb_printf_err("Out of Virtual memory, oh no...\n");
  107. gb_printf_err("Requested: %lld bytes\n", cast(long long)total_size);
  108. gb_printf_err("Total Usage: %lld bytes\n", cast(long long)global_platform_memory_total_usage);
  109. GB_ASSERT_MSG(pmblock != nullptr, "Out of Virtual Memory, oh no...");
  110. }
  111. global_platform_memory_total_usage.fetch_add(total_size);
  112. return pmblock;
  113. }
  114. gb_internal void platform_virtual_memory_free(PlatformMemoryBlock *block) {
  115. global_platform_memory_total_usage.fetch_sub(block->total_size);
  116. GB_ASSERT(VirtualFree(block, 0, MEM_RELEASE));
  117. }
  118. gb_internal void platform_virtual_memory_protect(void *memory, isize size) {
  119. DWORD old_protect = 0;
  120. BOOL is_protected = VirtualProtect(memory, size, PAGE_NOACCESS, &old_protect);
  121. GB_ASSERT(is_protected);
  122. }
  123. #else
  124. gb_internal void platform_virtual_memory_init(void) {
  125. global_platform_memory_block_sentinel.prev = &global_platform_memory_block_sentinel;
  126. global_platform_memory_block_sentinel.next = &global_platform_memory_block_sentinel;
  127. DEFAULT_PAGE_SIZE = gb_max(DEFAULT_PAGE_SIZE, cast(isize)sysconf(_SC_PAGE_SIZE));
  128. GB_ASSERT(gb_is_power_of_two(DEFAULT_PAGE_SIZE));
  129. }
  130. gb_internal PlatformMemoryBlock *platform_virtual_memory_alloc(isize total_size) {
  131. PlatformMemoryBlock *pmblock = (PlatformMemoryBlock *)mmap(nullptr, total_size, PROT_READ | PROT_WRITE, MAP_ANONYMOUS | MAP_PRIVATE, -1, 0);
  132. if (pmblock == nullptr) {
  133. gb_printf_err("Out of Virtual memory, oh no...\n");
  134. gb_printf_err("Requested: %lld bytes\n", cast(long long)total_size);
  135. gb_printf_err("Total Usage: %lld bytes\n", cast(long long)global_platform_memory_total_usage);
  136. GB_ASSERT_MSG(pmblock != nullptr, "Out of Virtual Memory, oh no...");
  137. }
  138. global_platform_memory_total_usage += total_size;
  139. return pmblock;
  140. }
  141. gb_internal void platform_virtual_memory_free(PlatformMemoryBlock *block) {
  142. isize size = block->total_size;
  143. global_platform_memory_total_usage -= size;
  144. munmap(block, size);
  145. }
  146. gb_internal void platform_virtual_memory_protect(void *memory, isize size) {
  147. int err = mprotect(memory, size, PROT_NONE);
  148. GB_ASSERT(err == 0);
  149. }
  150. #endif
  151. gb_internal MemoryBlock *virtual_memory_alloc(isize size) {
  152. isize const page_size = DEFAULT_PAGE_SIZE;
  153. isize total_size = size + gb_size_of(PlatformMemoryBlock);
  154. isize base_offset = gb_size_of(PlatformMemoryBlock);
  155. isize protect_offset = 0;
  156. bool do_protection = false;
  157. { // overflow protection
  158. isize rounded_size = align_formula_isize(size, page_size);
  159. total_size = rounded_size + 2*page_size;
  160. base_offset = page_size + rounded_size - size;
  161. protect_offset = page_size + rounded_size;
  162. do_protection = true;
  163. }
  164. PlatformMemoryBlock *pmblock = platform_virtual_memory_alloc(total_size);
  165. GB_ASSERT_MSG(pmblock != nullptr, "Out of Virtual Memory, oh no...");
  166. pmblock->block.base = cast(u8 *)pmblock + base_offset;
  167. // Should be zeroed
  168. GB_ASSERT(pmblock->block.used == 0);
  169. GB_ASSERT(pmblock->block.prev == nullptr);
  170. if (do_protection) {
  171. platform_virtual_memory_protect(cast(u8 *)pmblock + protect_offset, page_size);
  172. }
  173. pmblock->block.size = size;
  174. pmblock->total_size = total_size;
  175. PlatformMemoryBlock *sentinel = &global_platform_memory_block_sentinel;
  176. mutex_lock(&global_memory_block_mutex);
  177. pmblock->next = sentinel;
  178. pmblock->prev = sentinel->prev;
  179. pmblock->prev->next = pmblock;
  180. pmblock->next->prev = pmblock;
  181. mutex_unlock(&global_memory_block_mutex);
  182. return &pmblock->block;
  183. }
  184. gb_internal void virtual_memory_dealloc(MemoryBlock *block_to_free) {
  185. PlatformMemoryBlock *block = cast(PlatformMemoryBlock *)block_to_free;
  186. if (block != nullptr) {
  187. mutex_lock(&global_memory_block_mutex);
  188. block->prev->next = block->next;
  189. block->next->prev = block->prev;
  190. mutex_unlock(&global_memory_block_mutex);
  191. platform_virtual_memory_free(block);
  192. }
  193. }
  194. gb_internal GB_ALLOCATOR_PROC(arena_allocator_proc);
  195. gb_internal gbAllocator arena_allocator(Arena *arena) {
  196. gbAllocator a;
  197. a.proc = arena_allocator_proc;
  198. a.data = arena;
  199. return a;
  200. }
  201. gb_internal GB_ALLOCATOR_PROC(arena_allocator_proc) {
  202. void *ptr = nullptr;
  203. Arena *arena = cast(Arena *)allocator_data;
  204. GB_ASSERT_NOT_NULL(arena);
  205. switch (type) {
  206. case gbAllocation_Alloc:
  207. ptr = arena_alloc(arena, size, alignment);
  208. break;
  209. case gbAllocation_Free:
  210. break;
  211. case gbAllocation_Resize:
  212. if (size == 0) {
  213. ptr = nullptr;
  214. } else if (size <= old_size) {
  215. ptr = old_memory;
  216. } else {
  217. ptr = arena_alloc(arena, size, alignment);
  218. gb_memmove(ptr, old_memory, old_size);
  219. }
  220. break;
  221. case gbAllocation_FreeAll:
  222. GB_PANIC("use arena_free_all directly");
  223. arena_free_all(arena);
  224. break;
  225. }
  226. return ptr;
  227. }
  228. gb_global gb_thread_local Arena permanent_arena = {nullptr, DEFAULT_MINIMUM_BLOCK_SIZE};
  229. gb_internal gbAllocator permanent_allocator() {
  230. return arena_allocator(&permanent_arena);
  231. }
  232. gb_internal gbAllocator temporary_allocator() {
  233. return permanent_allocator();
  234. }
  235. gb_internal GB_ALLOCATOR_PROC(heap_allocator_proc);
  236. gb_internal gbAllocator heap_allocator(void) {
  237. gbAllocator a;
  238. a.proc = heap_allocator_proc;
  239. a.data = nullptr;
  240. return a;
  241. }
  242. gb_internal GB_ALLOCATOR_PROC(heap_allocator_proc) {
  243. void *ptr = nullptr;
  244. gb_unused(allocator_data);
  245. gb_unused(old_size);
  246. // TODO(bill): Throughly test!
  247. switch (type) {
  248. #if defined(GB_COMPILER_MSVC)
  249. case gbAllocation_Alloc:
  250. if (size == 0) {
  251. return NULL;
  252. } else {
  253. isize aligned_size = align_formula_isize(size, alignment);
  254. // TODO(bill): Make sure this is aligned correctly
  255. ptr = HeapAlloc(GetProcessHeap(), HEAP_ZERO_MEMORY, aligned_size);
  256. }
  257. break;
  258. case gbAllocation_Free:
  259. if (old_memory != nullptr) {
  260. HeapFree(GetProcessHeap(), 0, old_memory);
  261. }
  262. break;
  263. case gbAllocation_Resize:
  264. if (old_memory != nullptr && size > 0) {
  265. isize aligned_size = align_formula_isize(size, alignment);
  266. ptr = HeapReAlloc(GetProcessHeap(), HEAP_ZERO_MEMORY, old_memory, aligned_size);
  267. } else if (old_memory != nullptr) {
  268. HeapFree(GetProcessHeap(), 0, old_memory);
  269. } else if (size != 0) {
  270. isize aligned_size = align_formula_isize(size, alignment);
  271. // TODO(bill): Make sure this is aligned correctly
  272. ptr = HeapAlloc(GetProcessHeap(), HEAP_ZERO_MEMORY, aligned_size);
  273. }
  274. break;
  275. #elif defined(GB_SYSTEM_LINUX)
  276. // TODO(bill): *nix version that's decent
  277. case gbAllocation_Alloc: {
  278. ptr = aligned_alloc(alignment, (size + alignment - 1) & ~(alignment - 1));
  279. gb_zero_size(ptr, size);
  280. } break;
  281. case gbAllocation_Free:
  282. if (old_memory != nullptr) {
  283. free(old_memory);
  284. }
  285. break;
  286. case gbAllocation_Resize:
  287. if (size == 0) {
  288. if (old_memory != nullptr) {
  289. free(old_memory);
  290. }
  291. break;
  292. }
  293. alignment = gb_max(alignment, gb_align_of(max_align_t));
  294. if (old_memory == nullptr) {
  295. ptr = aligned_alloc(alignment, (size + alignment - 1) & ~(alignment - 1));
  296. gb_zero_size(ptr, size);
  297. break;
  298. }
  299. if (size <= old_size) {
  300. ptr = old_memory;
  301. break;
  302. }
  303. ptr = aligned_alloc(alignment, (size + alignment - 1) & ~(alignment - 1));
  304. gb_memmove(ptr, old_memory, old_size);
  305. free(old_memory);
  306. gb_zero_size(cast(u8 *)ptr + old_size, gb_max(size-old_size, 0));
  307. break;
  308. #else
  309. // TODO(bill): *nix version that's decent
  310. case gbAllocation_Alloc: {
  311. int err = 0;
  312. alignment = gb_max(alignment, gb_align_of(max_align_t));
  313. err = posix_memalign(&ptr, alignment, size);
  314. GB_ASSERT_MSG(err == 0, "posix_memalign err: %d", err);
  315. gb_zero_size(ptr, size);
  316. } break;
  317. case gbAllocation_Free:
  318. if (old_memory != nullptr) {
  319. free(old_memory);
  320. }
  321. break;
  322. case gbAllocation_Resize: {
  323. int err = 0;
  324. if (size == 0) {
  325. free(old_memory);
  326. break;
  327. }
  328. alignment = gb_max(alignment, gb_align_of(max_align_t));
  329. if (old_memory == nullptr) {
  330. err = posix_memalign(&ptr, alignment, size);
  331. GB_ASSERT_MSG(err == 0, "posix_memalign err: %d", err);
  332. GB_ASSERT(ptr != nullptr);
  333. gb_zero_size(ptr, size);
  334. break;
  335. }
  336. if (size <= old_size) {
  337. ptr = old_memory;
  338. break;
  339. }
  340. err = posix_memalign(&ptr, alignment, size);
  341. GB_ASSERT_MSG(err == 0, "posix_memalign err: %d", err);
  342. GB_ASSERT(ptr != nullptr);
  343. gb_memmove(ptr, old_memory, old_size);
  344. free(old_memory);
  345. gb_zero_size(cast(u8 *)ptr + old_size, gb_max(size-old_size, 0));
  346. } break;
  347. #endif
  348. case gbAllocation_FreeAll:
  349. break;
  350. }
  351. return ptr;
  352. }
  353. template <typename T>
  354. gb_internal void resize_array_raw(T **array, gbAllocator const &a, isize old_count, isize new_count) {
  355. GB_ASSERT(new_count >= 0);
  356. if (new_count == 0) {
  357. gb_free(a, *array);
  358. *array = nullptr;
  359. return;
  360. }
  361. if (new_count < old_count) {
  362. return;
  363. }
  364. isize old_size = old_count * gb_size_of(T);
  365. isize new_size = new_count * gb_size_of(T);
  366. isize alignment = gb_align_of(T);
  367. auto new_data = cast(T *)gb_resize_align(a, *array, old_size, new_size, alignment);
  368. GB_ASSERT(new_data != nullptr);
  369. *array = new_data;
  370. }