allocators.odin 16 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638
  1. package mem
  2. nil_allocator_proc :: proc(allocator_data: rawptr, mode: Allocator_Mode,
  3. size, alignment: int,
  4. old_memory: rawptr, old_size: int, flags: u64 = 0, loc := #caller_location) -> rawptr {
  5. return nil;
  6. }
  7. nil_allocator :: proc() -> Allocator {
  8. return Allocator{
  9. procedure = nil_allocator_proc,
  10. data = nil,
  11. };
  12. }
  13. // Custom allocators
  14. Arena :: struct {
  15. data: []byte,
  16. offset: int,
  17. peak_used: int,
  18. temp_count: int,
  19. }
  20. Arena_Temp_Memory :: struct {
  21. arena: ^Arena,
  22. prev_offset: int,
  23. }
  24. init_arena :: proc(a: ^Arena, data: []byte) {
  25. a.data = data;
  26. a.offset = 0;
  27. a.peak_used = 0;
  28. a.temp_count = 0;
  29. }
  30. arena_allocator :: proc(arena: ^Arena) -> Allocator {
  31. return Allocator{
  32. procedure = arena_allocator_proc,
  33. data = arena,
  34. };
  35. }
  36. arena_allocator_proc :: proc(allocator_data: rawptr, mode: Allocator_Mode,
  37. size, alignment: int,
  38. old_memory: rawptr, old_size: int, flags: u64, location := #caller_location) -> rawptr {
  39. arena := cast(^Arena)allocator_data;
  40. switch mode {
  41. case .Alloc:
  42. total_size := size + alignment;
  43. if arena.offset + total_size > len(arena.data) {
  44. return nil;
  45. }
  46. #no_bounds_check end := &arena.data[arena.offset];
  47. ptr := align_forward(end, uintptr(alignment));
  48. arena.offset += total_size;
  49. arena.peak_used = max(arena.peak_used, arena.offset);
  50. return zero(ptr, size);
  51. case .Free:
  52. // NOTE(bill): Free all at once
  53. // Use Arena_Temp_Memory if you want to free a block
  54. case .Free_All:
  55. arena.offset = 0;
  56. case .Resize:
  57. return default_resize_align(old_memory, old_size, size, alignment, arena_allocator(arena));
  58. }
  59. return nil;
  60. }
  61. begin_arena_temp_memory :: proc(a: ^Arena) -> Arena_Temp_Memory {
  62. tmp: Arena_Temp_Memory;
  63. tmp.arena = a;
  64. tmp.prev_offset = a.offset;
  65. a.temp_count += 1;
  66. return tmp;
  67. }
  68. end_arena_temp_memory :: proc(using tmp: Arena_Temp_Memory) {
  69. assert(arena.offset >= prev_offset);
  70. assert(arena.temp_count > 0);
  71. arena.offset = prev_offset;
  72. arena.temp_count -= 1;
  73. }
  74. Scratch_Allocator :: struct {
  75. data: []byte,
  76. curr_offset: int,
  77. prev_offset: int,
  78. backup_allocator: Allocator,
  79. leaked_allocations: [dynamic]rawptr,
  80. default_to_default_allocator: bool,
  81. }
  82. scratch_allocator_init :: proc(scratch: ^Scratch_Allocator, data: []byte, backup_allocator := context.allocator) {
  83. scratch.data = data;
  84. scratch.curr_offset = 0;
  85. scratch.prev_offset = 0;
  86. scratch.backup_allocator = backup_allocator;
  87. }
  88. scratch_allocator_destroy :: proc(using scratch: ^Scratch_Allocator) {
  89. if scratch == nil {
  90. return;
  91. }
  92. for ptr in leaked_allocations {
  93. free(ptr, backup_allocator);
  94. }
  95. delete(leaked_allocations);
  96. delete(data, backup_allocator);
  97. scratch^ = {};
  98. }
  99. scratch_allocator_proc :: proc(allocator_data: rawptr, mode: Allocator_Mode,
  100. size, alignment: int,
  101. old_memory: rawptr, old_size: int, flags: u64 = 0, loc := #caller_location) -> rawptr {
  102. scratch := (^Scratch_Allocator)(allocator_data);
  103. if scratch.data == nil {
  104. DEFAULT_SCRATCH_BACKING_SIZE :: 1<<22;
  105. if !(context.allocator.procedure != scratch_allocator_proc &&
  106. context.allocator.data != allocator_data) {
  107. panic("cyclic initialization of the scratch allocator with itself");
  108. }
  109. scratch_allocator_init(scratch, make([]byte, 1<<22));
  110. }
  111. switch mode {
  112. case .Alloc:
  113. switch {
  114. case scratch.curr_offset+size <= len(scratch.data):
  115. offset := align_forward_uintptr(uintptr(scratch.curr_offset), uintptr(alignment));
  116. ptr := &scratch.data[offset];
  117. zero(ptr, size);
  118. scratch.prev_offset = int(offset);
  119. scratch.curr_offset = int(offset) + size;
  120. return ptr;
  121. case size <= len(scratch.data):
  122. offset := align_forward_uintptr(uintptr(0), uintptr(alignment));
  123. ptr := &scratch.data[offset];
  124. zero(ptr, size);
  125. scratch.prev_offset = int(offset);
  126. scratch.curr_offset = int(offset) + size;
  127. return ptr;
  128. }
  129. // TODO(bill): Should leaks be notified about? Should probably use a logging system that is built into the context system
  130. a := scratch.backup_allocator;
  131. if a.procedure == nil {
  132. a = context.allocator;
  133. scratch.backup_allocator = a;
  134. }
  135. ptr := alloc(size, alignment, a, loc);
  136. if scratch.leaked_allocations == nil {
  137. scratch.leaked_allocations = make([dynamic]rawptr, a);
  138. }
  139. append(&scratch.leaked_allocations, ptr);
  140. return ptr;
  141. case .Free:
  142. last_ptr := rawptr(&scratch.data[scratch.prev_offset]);
  143. if old_memory == last_ptr {
  144. full_size := scratch.curr_offset - scratch.prev_offset;
  145. scratch.curr_offset = scratch.prev_offset;
  146. zero(last_ptr, full_size);
  147. return nil;
  148. }
  149. // NOTE(bill): It's scratch memory, don't worry about freeing
  150. case .Free_All:
  151. scratch.curr_offset = 0;
  152. scratch.prev_offset = 0;
  153. for ptr in scratch.leaked_allocations {
  154. free(ptr, scratch.backup_allocator);
  155. }
  156. clear(&scratch.leaked_allocations);
  157. case .Resize:
  158. last_ptr := rawptr(&scratch.data[scratch.prev_offset]);
  159. if old_memory == last_ptr && len(scratch.data)-scratch.prev_offset >= size {
  160. scratch.curr_offset = scratch.prev_offset+size;
  161. return old_memory;
  162. }
  163. return scratch_allocator_proc(allocator_data, Allocator_Mode.Alloc, size, alignment, old_memory, old_size, flags, loc);
  164. }
  165. return nil;
  166. }
  167. scratch_allocator :: proc(scratch: ^Scratch_Allocator) -> Allocator {
  168. return Allocator{
  169. procedure = scratch_allocator_proc,
  170. data = scratch,
  171. };
  172. }
  173. Stack_Allocation_Header :: struct {
  174. prev_offset: int,
  175. padding: int,
  176. }
  177. // Stack is a stack-like allocator which has a strict memory freeing order
  178. Stack :: struct {
  179. data: []byte,
  180. prev_offset: int,
  181. curr_offset: int,
  182. peak_used: int,
  183. }
  184. init_stack :: proc(s: ^Stack, data: []byte) {
  185. s.data = data;
  186. s.prev_offset = 0;
  187. s.curr_offset = 0;
  188. s.peak_used = 0;
  189. }
  190. stack_allocator :: proc(stack: ^Stack) -> Allocator {
  191. return Allocator{
  192. procedure = stack_allocator_proc,
  193. data = stack,
  194. };
  195. }
  196. stack_allocator_proc :: proc(allocator_data: rawptr, mode: Allocator_Mode,
  197. size, alignment: int,
  198. old_memory: rawptr, old_size: int, flags: u64, location := #caller_location) -> rawptr {
  199. s := cast(^Stack)allocator_data;
  200. if s.data == nil {
  201. return nil;
  202. }
  203. raw_alloc :: proc(s: ^Stack, size, alignment: int) -> rawptr {
  204. curr_addr := uintptr(&s.data[0]) + uintptr(s.curr_offset);
  205. padding := calc_padding_with_header(curr_addr, uintptr(alignment), size_of(Stack_Allocation_Header));
  206. if s.curr_offset + padding + size > len(s.data) {
  207. return nil;
  208. }
  209. s.prev_offset = s.curr_offset;
  210. s.curr_offset += padding;
  211. next_addr := curr_addr + uintptr(padding);
  212. header := (^Stack_Allocation_Header)(next_addr - size_of(Stack_Allocation_Header));
  213. header.padding = auto_cast padding;
  214. header.prev_offset = auto_cast s.prev_offset;
  215. s.curr_offset += size;
  216. s.peak_used = max(s.peak_used, s.curr_offset);
  217. return zero(rawptr(next_addr), size);
  218. }
  219. switch mode {
  220. case .Alloc:
  221. return raw_alloc(s, size, alignment);
  222. case .Free:
  223. if old_memory == nil {
  224. return nil;
  225. }
  226. start := uintptr(&s.data[0]);
  227. end := start + uintptr(len(s.data));
  228. curr_addr := uintptr(old_memory);
  229. if !(start <= curr_addr && curr_addr < end) {
  230. panic("Out of bounds memory address passed to stack allocator (free)");
  231. return nil;
  232. }
  233. if curr_addr >= start+uintptr(s.curr_offset) {
  234. // NOTE(bill): Allow double frees
  235. return nil;
  236. }
  237. header := (^Stack_Allocation_Header)(curr_addr - size_of(Stack_Allocation_Header));
  238. old_offset := int(curr_addr - uintptr(header.padding) - uintptr(&s.data[0]));
  239. if old_offset != int(header.prev_offset) {
  240. panic("Out of order stack allocator free");
  241. return nil;
  242. }
  243. s.curr_offset = int(old_offset);
  244. s.prev_offset = int(header.prev_offset);
  245. case .Free_All:
  246. s.prev_offset = 0;
  247. s.curr_offset = 0;
  248. case .Resize:
  249. if old_memory == nil {
  250. return raw_alloc(s, size, alignment);
  251. }
  252. if size == 0 {
  253. return nil;
  254. }
  255. start := uintptr(&s.data[0]);
  256. end := start + uintptr(len(s.data));
  257. curr_addr := uintptr(old_memory);
  258. if !(start <= curr_addr && curr_addr < end) {
  259. panic("Out of bounds memory address passed to stack allocator (resize)");
  260. return nil;
  261. }
  262. if curr_addr >= start+uintptr(s.curr_offset) {
  263. // NOTE(bill): Allow double frees
  264. return nil;
  265. }
  266. if old_size == size {
  267. return old_memory;
  268. }
  269. header := (^Stack_Allocation_Header)(curr_addr - size_of(Stack_Allocation_Header));
  270. old_offset := int(curr_addr - uintptr(header.padding) - uintptr(&s.data[0]));
  271. if old_offset != int(header.prev_offset) {
  272. ptr := raw_alloc(s, size, alignment);
  273. copy(ptr, old_memory, min(old_size, size));
  274. return ptr;
  275. }
  276. old_memory_size := uintptr(s.curr_offset) - (curr_addr - start);
  277. assert(old_memory_size == uintptr(old_size));
  278. diff := size - old_size;
  279. s.curr_offset += diff; // works for smaller sizes too
  280. if diff > 0 {
  281. zero(rawptr(curr_addr + uintptr(diff)), diff);
  282. }
  283. return old_memory;
  284. }
  285. return nil;
  286. }
  287. Small_Stack_Allocation_Header :: struct {
  288. padding: u8,
  289. }
  290. // Small_Stack is a stack-like allocator which uses the smallest possible header but at the cost of non-strict memory freeing order
  291. Small_Stack :: struct {
  292. data: []byte,
  293. offset: int,
  294. peak_used: int,
  295. }
  296. init_small_stack :: proc(s: ^Small_Stack, data: []byte) {
  297. s.data = data;
  298. s.offset = 0;
  299. s.peak_used = 0;
  300. }
  301. small_stack_allocator :: proc(stack: ^Small_Stack) -> Allocator {
  302. return Allocator{
  303. procedure = small_stack_allocator_proc,
  304. data = stack,
  305. };
  306. }
  307. small_stack_allocator_proc :: proc(allocator_data: rawptr, mode: Allocator_Mode,
  308. size, alignment: int,
  309. old_memory: rawptr, old_size: int, flags: u64, location := #caller_location) -> rawptr {
  310. s := cast(^Small_Stack)allocator_data;
  311. if s.data == nil {
  312. return nil;
  313. }
  314. align := clamp(alignment, 1, 8*size_of(Stack_Allocation_Header{}.padding)/2);
  315. raw_alloc :: proc(s: ^Small_Stack, size, alignment: int) -> rawptr {
  316. curr_addr := uintptr(&s.data[0]) + uintptr(s.offset);
  317. padding := calc_padding_with_header(curr_addr, uintptr(alignment), size_of(Small_Stack_Allocation_Header));
  318. if s.offset + padding + size > len(s.data) {
  319. return nil;
  320. }
  321. s.offset += padding;
  322. next_addr := curr_addr + uintptr(padding);
  323. header := (^Small_Stack_Allocation_Header)(next_addr - size_of(Small_Stack_Allocation_Header));
  324. header.padding = auto_cast padding;
  325. s.offset += size;
  326. s.peak_used = max(s.peak_used, s.offset);
  327. return zero(rawptr(next_addr), size);
  328. }
  329. switch mode {
  330. case .Alloc:
  331. return raw_alloc(s, size, align);
  332. case .Free:
  333. if old_memory == nil {
  334. return nil;
  335. }
  336. start := uintptr(&s.data[0]);
  337. end := start + uintptr(len(s.data));
  338. curr_addr := uintptr(old_memory);
  339. if !(start <= curr_addr && curr_addr < end) {
  340. panic("Out of bounds memory address passed to stack allocator (free)");
  341. return nil;
  342. }
  343. if curr_addr >= start+uintptr(s.offset) {
  344. // NOTE(bill): Allow double frees
  345. return nil;
  346. }
  347. header := (^Small_Stack_Allocation_Header)(curr_addr - size_of(Small_Stack_Allocation_Header));
  348. old_offset := int(curr_addr - uintptr(header.padding) - uintptr(&s.data[0]));
  349. s.offset = int(old_offset);
  350. case .Free_All:
  351. s.offset = 0;
  352. case .Resize:
  353. if old_memory == nil {
  354. return raw_alloc(s, size, align);
  355. }
  356. if size == 0 {
  357. return nil;
  358. }
  359. start := uintptr(&s.data[0]);
  360. end := start + uintptr(len(s.data));
  361. curr_addr := uintptr(old_memory);
  362. if !(start <= curr_addr && curr_addr < end) {
  363. panic("Out of bounds memory address passed to stack allocator (resize)");
  364. return nil;
  365. }
  366. if curr_addr >= start+uintptr(s.offset) {
  367. // NOTE(bill): Treat as a double free
  368. return nil;
  369. }
  370. if old_size == size {
  371. return old_memory;
  372. }
  373. ptr := raw_alloc(s, size, align);
  374. copy(ptr, old_memory, min(old_size, size));
  375. return ptr;
  376. }
  377. return nil;
  378. }
  379. Dynamic_Pool :: struct {
  380. block_size: int,
  381. out_band_size: int,
  382. alignment: int,
  383. unused_blocks: [dynamic]rawptr,
  384. used_blocks: [dynamic]rawptr,
  385. out_band_allocations: [dynamic]rawptr,
  386. current_block: rawptr,
  387. current_pos: rawptr,
  388. bytes_left: int,
  389. block_allocator: Allocator,
  390. }
  391. DYNAMIC_POOL_BLOCK_SIZE_DEFAULT :: 65536;
  392. DYNAMIC_POOL_OUT_OF_BAND_SIZE_DEFAULT :: 6554;
  393. dynamic_pool_allocator_proc :: proc(allocator_data: rawptr, mode: Allocator_Mode,
  394. size, alignment: int,
  395. old_memory: rawptr, old_size: int,
  396. flags: u64 = 0, loc := #caller_location) -> rawptr {
  397. pool := (^Dynamic_Pool)(allocator_data);
  398. switch mode {
  399. case .Alloc:
  400. return dynamic_pool_alloc(pool, size);
  401. case .Free:
  402. panic("Allocator_Mode.Free is not supported for a pool");
  403. case .Free_All:
  404. dynamic_pool_free_all(pool);
  405. case .Resize:
  406. panic("Allocator_Mode.Resize is not supported for a pool");
  407. if old_size >= size {
  408. return old_memory;
  409. }
  410. ptr := dynamic_pool_alloc(pool, size);
  411. copy(ptr, old_memory, old_size);
  412. return ptr;
  413. }
  414. return nil;
  415. }
  416. dynamic_pool_allocator :: proc(pool: ^Dynamic_Pool) -> Allocator {
  417. return Allocator{
  418. procedure = dynamic_pool_allocator_proc,
  419. data = pool,
  420. };
  421. }
  422. dynamic_pool_init :: proc(pool: ^Dynamic_Pool,
  423. block_allocator := context.allocator,
  424. array_allocator := context.allocator,
  425. block_size := DYNAMIC_POOL_BLOCK_SIZE_DEFAULT,
  426. out_band_size := DYNAMIC_POOL_OUT_OF_BAND_SIZE_DEFAULT,
  427. alignment := 8) {
  428. pool.block_size = block_size;
  429. pool.out_band_size = out_band_size;
  430. pool.alignment = alignment;
  431. pool.block_allocator = block_allocator;
  432. pool.out_band_allocations.allocator = array_allocator;
  433. pool. unused_blocks.allocator = array_allocator;
  434. pool. used_blocks.allocator = array_allocator;
  435. }
  436. dynamic_pool_destroy :: proc(using pool: ^Dynamic_Pool) {
  437. dynamic_pool_free_all(pool);
  438. delete(unused_blocks);
  439. delete(used_blocks);
  440. zero(pool, size_of(pool^));
  441. }
  442. dynamic_pool_alloc :: proc(using pool: ^Dynamic_Pool, bytes: int) -> rawptr {
  443. cycle_new_block :: proc(using pool: ^Dynamic_Pool) {
  444. if block_allocator.procedure == nil {
  445. panic("You must call pool_init on a Pool before using it");
  446. }
  447. if current_block != nil {
  448. append(&used_blocks, current_block);
  449. }
  450. new_block: rawptr;
  451. if len(unused_blocks) > 0 {
  452. new_block = pop(&unused_blocks);
  453. } else {
  454. new_block = block_allocator.procedure(block_allocator.data, Allocator_Mode.Alloc,
  455. block_size, alignment,
  456. nil, 0);
  457. }
  458. bytes_left = block_size;
  459. current_pos = new_block;
  460. current_block = new_block;
  461. }
  462. n := bytes;
  463. extra := alignment - (n % alignment);
  464. n += extra;
  465. if n >= out_band_size {
  466. assert(block_allocator.procedure != nil);
  467. memory := block_allocator.procedure(block_allocator.data, Allocator_Mode.Alloc,
  468. block_size, alignment,
  469. nil, 0);
  470. if memory != nil {
  471. append(&out_band_allocations, (^byte)(memory));
  472. }
  473. return memory;
  474. }
  475. if bytes_left < n {
  476. cycle_new_block(pool);
  477. if current_block == nil {
  478. return nil;
  479. }
  480. }
  481. memory := current_pos;
  482. current_pos = ptr_offset((^byte)(current_pos), n);
  483. bytes_left -= n;
  484. return memory;
  485. }
  486. dynamic_pool_reset :: proc(using pool: ^Dynamic_Pool) {
  487. if current_block != nil {
  488. append(&unused_blocks, current_block);
  489. current_block = nil;
  490. }
  491. for block in used_blocks {
  492. append(&unused_blocks, block);
  493. }
  494. clear(&used_blocks);
  495. for a in out_band_allocations {
  496. free(a, block_allocator);
  497. }
  498. clear(&out_band_allocations);
  499. }
  500. dynamic_pool_free_all :: proc(using pool: ^Dynamic_Pool) {
  501. dynamic_pool_reset(pool);
  502. for block in unused_blocks {
  503. free(block, block_allocator);
  504. }
  505. clear(&unused_blocks);
  506. }