allocators.odin 22 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882
  1. package mem
  2. import "intrinsics"
  3. import "core:runtime"
  4. nil_allocator_proc :: proc(allocator_data: rawptr, mode: Allocator_Mode,
  5. size, alignment: int,
  6. old_memory: rawptr, old_size: int, flags: u64 = 0, loc := #caller_location) -> rawptr {
  7. return nil;
  8. }
  9. nil_allocator :: proc() -> Allocator {
  10. return Allocator{
  11. procedure = nil_allocator_proc,
  12. data = nil,
  13. };
  14. }
  15. // Custom allocators
  16. Arena :: struct {
  17. data: []byte,
  18. offset: int,
  19. peak_used: int,
  20. temp_count: int,
  21. }
  22. Arena_Temp_Memory :: struct {
  23. arena: ^Arena,
  24. prev_offset: int,
  25. }
  26. init_arena :: proc(a: ^Arena, data: []byte) {
  27. a.data = data;
  28. a.offset = 0;
  29. a.peak_used = 0;
  30. a.temp_count = 0;
  31. }
  32. arena_allocator :: proc(arena: ^Arena) -> Allocator {
  33. return Allocator{
  34. procedure = arena_allocator_proc,
  35. data = arena,
  36. };
  37. }
  38. arena_allocator_proc :: proc(allocator_data: rawptr, mode: Allocator_Mode,
  39. size, alignment: int,
  40. old_memory: rawptr, old_size: int, flags: u64, location := #caller_location) -> rawptr {
  41. arena := cast(^Arena)allocator_data;
  42. switch mode {
  43. case .Alloc:
  44. total_size := size + alignment;
  45. if arena.offset + total_size > len(arena.data) {
  46. return nil;
  47. }
  48. #no_bounds_check end := &arena.data[arena.offset];
  49. ptr := align_forward(end, uintptr(alignment));
  50. arena.offset += total_size;
  51. arena.peak_used = max(arena.peak_used, arena.offset);
  52. return zero(ptr, size);
  53. case .Free:
  54. // NOTE(bill): Free all at once
  55. // Use Arena_Temp_Memory if you want to free a block
  56. case .Free_All:
  57. arena.offset = 0;
  58. case .Resize:
  59. return default_resize_align(old_memory, old_size, size, alignment, arena_allocator(arena));
  60. case .Query_Features:
  61. set := (^Allocator_Mode_Set)(old_memory);
  62. if set != nil {
  63. set^ = {.Alloc, .Free_All, .Resize, .Query_Features};
  64. }
  65. return set;
  66. case .Query_Info:
  67. return nil;
  68. }
  69. return nil;
  70. }
  71. begin_arena_temp_memory :: proc(a: ^Arena) -> Arena_Temp_Memory {
  72. tmp: Arena_Temp_Memory;
  73. tmp.arena = a;
  74. tmp.prev_offset = a.offset;
  75. a.temp_count += 1;
  76. return tmp;
  77. }
  78. end_arena_temp_memory :: proc(using tmp: Arena_Temp_Memory) {
  79. assert(arena.offset >= prev_offset);
  80. assert(arena.temp_count > 0);
  81. arena.offset = prev_offset;
  82. arena.temp_count -= 1;
  83. }
  84. Scratch_Allocator :: struct {
  85. data: []byte,
  86. curr_offset: int,
  87. prev_offset: int,
  88. backup_allocator: Allocator,
  89. leaked_allocations: [dynamic]rawptr,
  90. default_to_default_allocator: bool,
  91. }
  92. scratch_allocator_init :: proc(scratch: ^Scratch_Allocator, data: []byte, backup_allocator := context.allocator) {
  93. scratch.data = data;
  94. scratch.curr_offset = 0;
  95. scratch.prev_offset = 0;
  96. scratch.backup_allocator = backup_allocator;
  97. }
  98. scratch_allocator_destroy :: proc(using scratch: ^Scratch_Allocator) {
  99. if scratch == nil {
  100. return;
  101. }
  102. for ptr in leaked_allocations {
  103. free(ptr, backup_allocator);
  104. }
  105. delete(leaked_allocations);
  106. delete(data, backup_allocator);
  107. scratch^ = {};
  108. }
  109. scratch_allocator_proc :: proc(allocator_data: rawptr, mode: Allocator_Mode,
  110. size, alignment: int,
  111. old_memory: rawptr, old_size: int, flags: u64 = 0, loc := #caller_location) -> rawptr {
  112. scratch := (^Scratch_Allocator)(allocator_data);
  113. if scratch.data == nil {
  114. DEFAULT_SCRATCH_BACKING_SIZE :: 1<<22;
  115. if !(context.allocator.procedure != scratch_allocator_proc &&
  116. context.allocator.data != allocator_data) {
  117. panic("cyclic initialization of the scratch allocator with itself");
  118. }
  119. scratch_allocator_init(scratch, make([]byte, 1<<22));
  120. }
  121. switch mode {
  122. case .Alloc:
  123. switch {
  124. case scratch.curr_offset+size <= len(scratch.data):
  125. offset := align_forward_uintptr(uintptr(scratch.curr_offset), uintptr(alignment));
  126. ptr := &scratch.data[offset];
  127. zero(ptr, size);
  128. scratch.prev_offset = int(offset);
  129. scratch.curr_offset = int(offset) + size;
  130. return ptr;
  131. case size <= len(scratch.data):
  132. offset := align_forward_uintptr(uintptr(0), uintptr(alignment));
  133. ptr := &scratch.data[offset];
  134. zero(ptr, size);
  135. scratch.prev_offset = int(offset);
  136. scratch.curr_offset = int(offset) + size;
  137. return ptr;
  138. }
  139. // TODO(bill): Should leaks be notified about? Should probably use a logging system that is built into the context system
  140. a := scratch.backup_allocator;
  141. if a.procedure == nil {
  142. a = context.allocator;
  143. scratch.backup_allocator = a;
  144. }
  145. ptr := alloc(size, alignment, a, loc);
  146. if scratch.leaked_allocations == nil {
  147. scratch.leaked_allocations = make([dynamic]rawptr, a);
  148. }
  149. append(&scratch.leaked_allocations, ptr);
  150. return ptr;
  151. case .Free:
  152. last_ptr := rawptr(&scratch.data[scratch.prev_offset]);
  153. if old_memory == last_ptr {
  154. full_size := scratch.curr_offset - scratch.prev_offset;
  155. scratch.curr_offset = scratch.prev_offset;
  156. zero(last_ptr, full_size);
  157. return nil;
  158. }
  159. // NOTE(bill): It's scratch memory, don't worry about freeing
  160. case .Free_All:
  161. scratch.curr_offset = 0;
  162. scratch.prev_offset = 0;
  163. for ptr in scratch.leaked_allocations {
  164. free(ptr, scratch.backup_allocator);
  165. }
  166. clear(&scratch.leaked_allocations);
  167. case .Resize:
  168. last_ptr := rawptr(&scratch.data[scratch.prev_offset]);
  169. if old_memory == last_ptr && len(scratch.data)-scratch.prev_offset >= size {
  170. scratch.curr_offset = scratch.prev_offset+size;
  171. return old_memory;
  172. }
  173. return scratch_allocator_proc(allocator_data, Allocator_Mode.Alloc, size, alignment, old_memory, old_size, flags, loc);
  174. case .Query_Features:
  175. set := (^Allocator_Mode_Set)(old_memory);
  176. if set != nil {
  177. set^ = {.Alloc, .Free, .Free_All, .Resize, .Query_Features};
  178. }
  179. return set;
  180. case .Query_Info:
  181. return nil;
  182. }
  183. return nil;
  184. }
  185. scratch_allocator :: proc(scratch: ^Scratch_Allocator) -> Allocator {
  186. return Allocator{
  187. procedure = scratch_allocator_proc,
  188. data = scratch,
  189. };
  190. }
  191. Stack_Allocation_Header :: struct {
  192. prev_offset: int,
  193. padding: int,
  194. }
  195. // Stack is a stack-like allocator which has a strict memory freeing order
  196. Stack :: struct {
  197. data: []byte,
  198. prev_offset: int,
  199. curr_offset: int,
  200. peak_used: int,
  201. }
  202. init_stack :: proc(s: ^Stack, data: []byte) {
  203. s.data = data;
  204. s.prev_offset = 0;
  205. s.curr_offset = 0;
  206. s.peak_used = 0;
  207. }
  208. stack_allocator :: proc(stack: ^Stack) -> Allocator {
  209. return Allocator{
  210. procedure = stack_allocator_proc,
  211. data = stack,
  212. };
  213. }
  214. stack_allocator_proc :: proc(allocator_data: rawptr, mode: Allocator_Mode,
  215. size, alignment: int,
  216. old_memory: rawptr, old_size: int, flags: u64, location := #caller_location) -> rawptr {
  217. s := cast(^Stack)allocator_data;
  218. if s.data == nil {
  219. return nil;
  220. }
  221. raw_alloc :: proc(s: ^Stack, size, alignment: int) -> rawptr {
  222. curr_addr := uintptr(raw_data(s.data)) + uintptr(s.curr_offset);
  223. padding := calc_padding_with_header(curr_addr, uintptr(alignment), size_of(Stack_Allocation_Header));
  224. if s.curr_offset + padding + size > len(s.data) {
  225. return nil;
  226. }
  227. s.prev_offset = s.curr_offset;
  228. s.curr_offset += padding;
  229. next_addr := curr_addr + uintptr(padding);
  230. header := (^Stack_Allocation_Header)(next_addr - size_of(Stack_Allocation_Header));
  231. header.padding = auto_cast padding;
  232. header.prev_offset = auto_cast s.prev_offset;
  233. s.curr_offset += size;
  234. s.peak_used = max(s.peak_used, s.curr_offset);
  235. return zero(rawptr(next_addr), size);
  236. }
  237. switch mode {
  238. case .Alloc:
  239. return raw_alloc(s, size, alignment);
  240. case .Free:
  241. if old_memory == nil {
  242. return nil;
  243. }
  244. start := uintptr(raw_data(s.data));
  245. end := start + uintptr(len(s.data));
  246. curr_addr := uintptr(old_memory);
  247. if !(start <= curr_addr && curr_addr < end) {
  248. panic("Out of bounds memory address passed to stack allocator (free)");
  249. }
  250. if curr_addr >= start+uintptr(s.curr_offset) {
  251. // NOTE(bill): Allow double frees
  252. return nil;
  253. }
  254. header := (^Stack_Allocation_Header)(curr_addr - size_of(Stack_Allocation_Header));
  255. old_offset := int(curr_addr - uintptr(header.padding) - uintptr(raw_data(s.data)));
  256. if old_offset != int(header.prev_offset) {
  257. panic("Out of order stack allocator free");
  258. }
  259. s.curr_offset = int(old_offset);
  260. s.prev_offset = int(header.prev_offset);
  261. case .Free_All:
  262. s.prev_offset = 0;
  263. s.curr_offset = 0;
  264. case .Resize:
  265. if old_memory == nil {
  266. return raw_alloc(s, size, alignment);
  267. }
  268. if size == 0 {
  269. return nil;
  270. }
  271. start := uintptr(raw_data(s.data));
  272. end := start + uintptr(len(s.data));
  273. curr_addr := uintptr(old_memory);
  274. if !(start <= curr_addr && curr_addr < end) {
  275. panic("Out of bounds memory address passed to stack allocator (resize)");
  276. }
  277. if curr_addr >= start+uintptr(s.curr_offset) {
  278. // NOTE(bill): Allow double frees
  279. return nil;
  280. }
  281. if old_size == size {
  282. return old_memory;
  283. }
  284. header := (^Stack_Allocation_Header)(curr_addr - size_of(Stack_Allocation_Header));
  285. old_offset := int(curr_addr - uintptr(header.padding) - uintptr(raw_data(s.data)));
  286. if old_offset != int(header.prev_offset) {
  287. ptr := raw_alloc(s, size, alignment);
  288. copy(ptr, old_memory, min(old_size, size));
  289. return ptr;
  290. }
  291. old_memory_size := uintptr(s.curr_offset) - (curr_addr - start);
  292. assert(old_memory_size == uintptr(old_size));
  293. diff := size - old_size;
  294. s.curr_offset += diff; // works for smaller sizes too
  295. if diff > 0 {
  296. zero(rawptr(curr_addr + uintptr(diff)), diff);
  297. }
  298. return old_memory;
  299. case .Query_Features:
  300. set := (^Allocator_Mode_Set)(old_memory);
  301. if set != nil {
  302. set^ = {.Alloc, .Free, .Free_All, .Resize, .Query_Features};
  303. }
  304. return set;
  305. case .Query_Info:
  306. return nil;
  307. }
  308. return nil;
  309. }
  310. Small_Stack_Allocation_Header :: struct {
  311. padding: u8,
  312. }
  313. // Small_Stack is a stack-like allocator which uses the smallest possible header but at the cost of non-strict memory freeing order
  314. Small_Stack :: struct {
  315. data: []byte,
  316. offset: int,
  317. peak_used: int,
  318. }
  319. init_small_stack :: proc(s: ^Small_Stack, data: []byte) {
  320. s.data = data;
  321. s.offset = 0;
  322. s.peak_used = 0;
  323. }
  324. small_stack_allocator :: proc(stack: ^Small_Stack) -> Allocator {
  325. return Allocator{
  326. procedure = small_stack_allocator_proc,
  327. data = stack,
  328. };
  329. }
  330. small_stack_allocator_proc :: proc(allocator_data: rawptr, mode: Allocator_Mode,
  331. size, alignment: int,
  332. old_memory: rawptr, old_size: int, flags: u64, location := #caller_location) -> rawptr {
  333. s := cast(^Small_Stack)allocator_data;
  334. if s.data == nil {
  335. return nil;
  336. }
  337. align := clamp(alignment, 1, 8*size_of(Stack_Allocation_Header{}.padding)/2);
  338. raw_alloc :: proc(s: ^Small_Stack, size, alignment: int) -> rawptr {
  339. curr_addr := uintptr(raw_data(s.data)) + uintptr(s.offset);
  340. padding := calc_padding_with_header(curr_addr, uintptr(alignment), size_of(Small_Stack_Allocation_Header));
  341. if s.offset + padding + size > len(s.data) {
  342. return nil;
  343. }
  344. s.offset += padding;
  345. next_addr := curr_addr + uintptr(padding);
  346. header := (^Small_Stack_Allocation_Header)(next_addr - size_of(Small_Stack_Allocation_Header));
  347. header.padding = auto_cast padding;
  348. s.offset += size;
  349. s.peak_used = max(s.peak_used, s.offset);
  350. return zero(rawptr(next_addr), size);
  351. }
  352. switch mode {
  353. case .Alloc:
  354. return raw_alloc(s, size, align);
  355. case .Free:
  356. if old_memory == nil {
  357. return nil;
  358. }
  359. start := uintptr(raw_data(s.data));
  360. end := start + uintptr(len(s.data));
  361. curr_addr := uintptr(old_memory);
  362. if !(start <= curr_addr && curr_addr < end) {
  363. panic("Out of bounds memory address passed to stack allocator (free)");
  364. }
  365. if curr_addr >= start+uintptr(s.offset) {
  366. // NOTE(bill): Allow double frees
  367. return nil;
  368. }
  369. header := (^Small_Stack_Allocation_Header)(curr_addr - size_of(Small_Stack_Allocation_Header));
  370. old_offset := int(curr_addr - uintptr(header.padding) - uintptr(raw_data(s.data)));
  371. s.offset = int(old_offset);
  372. case .Free_All:
  373. s.offset = 0;
  374. case .Resize:
  375. if old_memory == nil {
  376. return raw_alloc(s, size, align);
  377. }
  378. if size == 0 {
  379. return nil;
  380. }
  381. start := uintptr(raw_data(s.data));
  382. end := start + uintptr(len(s.data));
  383. curr_addr := uintptr(old_memory);
  384. if !(start <= curr_addr && curr_addr < end) {
  385. panic("Out of bounds memory address passed to stack allocator (resize)");
  386. }
  387. if curr_addr >= start+uintptr(s.offset) {
  388. // NOTE(bill): Treat as a double free
  389. return nil;
  390. }
  391. if old_size == size {
  392. return old_memory;
  393. }
  394. ptr := raw_alloc(s, size, align);
  395. copy(ptr, old_memory, min(old_size, size));
  396. return ptr;
  397. case .Query_Features:
  398. set := (^Allocator_Mode_Set)(old_memory);
  399. if set != nil {
  400. set^ = {.Alloc, .Free, .Free_All, .Resize, .Query_Features};
  401. }
  402. return set;
  403. case .Query_Info:
  404. return nil;
  405. }
  406. return nil;
  407. }
  408. Dynamic_Pool :: struct {
  409. block_size: int,
  410. out_band_size: int,
  411. alignment: int,
  412. unused_blocks: [dynamic]rawptr,
  413. used_blocks: [dynamic]rawptr,
  414. out_band_allocations: [dynamic]rawptr,
  415. current_block: rawptr,
  416. current_pos: rawptr,
  417. bytes_left: int,
  418. block_allocator: Allocator,
  419. }
  420. DYNAMIC_POOL_BLOCK_SIZE_DEFAULT :: 65536;
  421. DYNAMIC_POOL_OUT_OF_BAND_SIZE_DEFAULT :: 6554;
  422. dynamic_pool_allocator_proc :: proc(allocator_data: rawptr, mode: Allocator_Mode,
  423. size, alignment: int,
  424. old_memory: rawptr, old_size: int,
  425. flags: u64 = 0, loc := #caller_location) -> rawptr {
  426. pool := (^Dynamic_Pool)(allocator_data);
  427. switch mode {
  428. case .Alloc:
  429. return dynamic_pool_alloc(pool, size);
  430. case .Free:
  431. //
  432. case .Free_All:
  433. dynamic_pool_free_all(pool);
  434. case .Resize:
  435. if old_size >= size {
  436. return old_memory;
  437. }
  438. ptr := dynamic_pool_alloc(pool, size);
  439. copy(ptr, old_memory, old_size);
  440. return ptr;
  441. case .Query_Features:
  442. set := (^Allocator_Mode_Set)(old_memory);
  443. if set != nil {
  444. set^ = {.Alloc, .Free_All, .Resize, .Query_Features, .Query_Info};
  445. }
  446. return set;
  447. case .Query_Info:
  448. info := (^Allocator_Query_Info)(old_memory);
  449. if info != nil && info.pointer != nil {
  450. info.size = pool.block_size;
  451. info.alignment = pool.alignment;
  452. return info;
  453. }
  454. return nil;
  455. }
  456. return nil;
  457. }
  458. dynamic_pool_allocator :: proc(pool: ^Dynamic_Pool) -> Allocator {
  459. return Allocator{
  460. procedure = dynamic_pool_allocator_proc,
  461. data = pool,
  462. };
  463. }
  464. dynamic_pool_init :: proc(pool: ^Dynamic_Pool,
  465. block_allocator := context.allocator,
  466. array_allocator := context.allocator,
  467. block_size := DYNAMIC_POOL_BLOCK_SIZE_DEFAULT,
  468. out_band_size := DYNAMIC_POOL_OUT_OF_BAND_SIZE_DEFAULT,
  469. alignment := 8) {
  470. pool.block_size = block_size;
  471. pool.out_band_size = out_band_size;
  472. pool.alignment = alignment;
  473. pool.block_allocator = block_allocator;
  474. pool.out_band_allocations.allocator = array_allocator;
  475. pool. unused_blocks.allocator = array_allocator;
  476. pool. used_blocks.allocator = array_allocator;
  477. }
  478. dynamic_pool_destroy :: proc(using pool: ^Dynamic_Pool) {
  479. dynamic_pool_free_all(pool);
  480. delete(unused_blocks);
  481. delete(used_blocks);
  482. zero(pool, size_of(pool^));
  483. }
  484. dynamic_pool_alloc :: proc(using pool: ^Dynamic_Pool, bytes: int) -> rawptr {
  485. cycle_new_block :: proc(using pool: ^Dynamic_Pool) {
  486. if block_allocator.procedure == nil {
  487. panic("You must call pool_init on a Pool before using it");
  488. }
  489. if current_block != nil {
  490. append(&used_blocks, current_block);
  491. }
  492. new_block: rawptr;
  493. if len(unused_blocks) > 0 {
  494. new_block = pop(&unused_blocks);
  495. } else {
  496. new_block = block_allocator.procedure(block_allocator.data, Allocator_Mode.Alloc,
  497. block_size, alignment,
  498. nil, 0);
  499. }
  500. bytes_left = block_size;
  501. current_pos = new_block;
  502. current_block = new_block;
  503. }
  504. n := bytes;
  505. extra := alignment - (n % alignment);
  506. n += extra;
  507. if n >= out_band_size {
  508. assert(block_allocator.procedure != nil);
  509. memory := block_allocator.procedure(block_allocator.data, Allocator_Mode.Alloc,
  510. block_size, alignment,
  511. nil, 0);
  512. if memory != nil {
  513. append(&out_band_allocations, (^byte)(memory));
  514. }
  515. return memory;
  516. }
  517. if bytes_left < n {
  518. cycle_new_block(pool);
  519. if current_block == nil {
  520. return nil;
  521. }
  522. }
  523. memory := current_pos;
  524. current_pos = ptr_offset((^byte)(current_pos), n);
  525. bytes_left -= n;
  526. return memory;
  527. }
  528. dynamic_pool_reset :: proc(using pool: ^Dynamic_Pool) {
  529. if current_block != nil {
  530. append(&unused_blocks, current_block);
  531. current_block = nil;
  532. }
  533. for block in used_blocks {
  534. append(&unused_blocks, block);
  535. }
  536. clear(&used_blocks);
  537. for a in out_band_allocations {
  538. free(a, block_allocator);
  539. }
  540. clear(&out_band_allocations);
  541. }
  542. dynamic_pool_free_all :: proc(using pool: ^Dynamic_Pool) {
  543. dynamic_pool_reset(pool);
  544. for block in unused_blocks {
  545. free(block, block_allocator);
  546. }
  547. clear(&unused_blocks);
  548. }
  549. panic_allocator_proc :: proc(allocator_data: rawptr, mode: Allocator_Mode,
  550. size, alignment: int,
  551. old_memory: rawptr, old_size: int, flags: u64 = 0, loc := #caller_location) -> rawptr {
  552. switch mode {
  553. case .Alloc:
  554. if size > 0 {
  555. panic("mem: panic allocator, .Alloc called");
  556. }
  557. case .Resize:
  558. if size > 0 {
  559. panic("mem: panic allocator, .Resize called");
  560. }
  561. case .Free:
  562. if old_memory != nil {
  563. panic("mem: panic allocator, .Free called");
  564. }
  565. case .Free_All:
  566. panic("mem: panic allocator, .Free_All called");
  567. case .Query_Features:
  568. set := (^Allocator_Mode_Set)(old_memory);
  569. if set != nil {
  570. set^ = {.Query_Features};
  571. }
  572. return set;
  573. case .Query_Info:
  574. return nil;
  575. }
  576. return nil;
  577. }
  578. panic_allocator :: proc() -> Allocator {
  579. return Allocator{
  580. procedure = panic_allocator_proc,
  581. data = nil,
  582. };
  583. }
  584. alloca_allocator_proc :: proc(allocator_data: rawptr, mode: Allocator_Mode,
  585. size, alignment: int,
  586. old_memory: rawptr, old_size: int, flags: u64 = 0, loc := #caller_location) -> rawptr {
  587. switch mode {
  588. case .Alloc:
  589. switch alignment {
  590. case: return intrinsics.alloca(size, 2*align_of(uintptr));
  591. case 0: return intrinsics.alloca(size, 0);
  592. case 1: return intrinsics.alloca(size, 1);
  593. case 2: return intrinsics.alloca(size, 2);
  594. case 4: return intrinsics.alloca(size, 4);
  595. case 8: return intrinsics.alloca(size, 8);
  596. case 16: return intrinsics.alloca(size, 16);
  597. case 32: return intrinsics.alloca(size, 32);
  598. case 64: return intrinsics.alloca(size, 64);
  599. case 128: return intrinsics.alloca(size, 128);
  600. case 256: return intrinsics.alloca(size, 256);
  601. case 512: return intrinsics.alloca(size, 512);
  602. case 1024: return intrinsics.alloca(size, 1024);
  603. case 2048: return intrinsics.alloca(size, 2048);
  604. case 4096: return intrinsics.alloca(size, 4096);
  605. case 8192: return intrinsics.alloca(size, 8192);
  606. case 16384: return intrinsics.alloca(size, 16384);
  607. case 32768: return intrinsics.alloca(size, 32768);
  608. case 65536: return intrinsics.alloca(size, 65536);
  609. }
  610. case .Resize:
  611. return default_resize_align(old_memory, old_size, size, alignment, alloca_allocator());
  612. case .Free:
  613. // Do nothing
  614. case .Free_All:
  615. // Do nothing
  616. case .Query_Features:
  617. set := (^Allocator_Mode_Set)(old_memory);
  618. if set != nil {
  619. set^ = {.Alloc, .Resize, .Query_Features};
  620. }
  621. return set;
  622. case .Query_Info:
  623. return nil;
  624. }
  625. return nil;
  626. }
  627. alloca_allocator :: proc() -> Allocator {
  628. return Allocator{
  629. procedure = alloca_allocator_proc,
  630. data = nil,
  631. };
  632. }
  633. Tracking_Allocator_Entry :: struct {
  634. memory: rawptr,
  635. size: int,
  636. alignment: int,
  637. location: runtime.Source_Code_Location,
  638. }
  639. Tracking_Allocator :: struct {
  640. backing: Allocator,
  641. allocation_map: map[rawptr]Tracking_Allocator_Entry,
  642. clear_on_free_all: bool,
  643. }
  644. tracking_allocator_init :: proc(t: ^Tracking_Allocator, backing_allocator: Allocator, allocation_map_allocator := context.allocator) {
  645. t.backing = backing_allocator;
  646. t.allocation_map.allocator = allocation_map_allocator;
  647. }
  648. tracking_allocator_destroy :: proc(t: ^Tracking_Allocator) {
  649. delete(t.allocation_map);
  650. }
  651. tracking_allocator :: proc(data: ^Tracking_Allocator) -> Allocator {
  652. return Allocator{
  653. data = data,
  654. procedure = tracking_allocator_proc,
  655. };
  656. }
  657. tracking_allocator_proc :: proc(allocator_data: rawptr, mode: Allocator_Mode, size, alignment: int, old_memory: rawptr, old_size: int, flags: u64 = 0, loc := #caller_location) -> rawptr {
  658. data := (^Tracking_Allocator)(allocator_data);
  659. if mode == .Query_Info {
  660. info := (^Allocator_Query_Info)(old_memory);
  661. if info != nil && info.pointer != nil {
  662. if entry, ok := data.allocation_map[info.pointer]; ok {
  663. info.size = entry.size;
  664. info.alignment = entry.alignment;
  665. return info;
  666. }
  667. info.pointer = nil;
  668. }
  669. return nil;
  670. }
  671. result := data.backing.procedure(data.backing.data, mode, size, alignment, old_memory, old_size, flags, loc);
  672. if data.allocation_map.allocator.procedure == nil {
  673. data.allocation_map.allocator = context.allocator;
  674. }
  675. switch mode {
  676. case .Alloc:
  677. data.allocation_map[result] = Tracking_Allocator_Entry{
  678. memory = result,
  679. size = size,
  680. alignment = alignment,
  681. location = loc,
  682. };
  683. case .Free:
  684. delete_key(&data.allocation_map, old_memory);
  685. case .Resize:
  686. if old_memory != result {
  687. delete_key(&data.allocation_map, old_memory);
  688. }
  689. data.allocation_map[result] = Tracking_Allocator_Entry{
  690. memory = result,
  691. size = size,
  692. alignment = alignment,
  693. location = loc,
  694. };
  695. case .Free_All:
  696. if data.clear_on_free_all {
  697. clear_map(&data.allocation_map);
  698. }
  699. case .Query_Features:
  700. set := (^Allocator_Mode_Set)(old_memory);
  701. if set != nil {
  702. set^ = {.Alloc, .Free, .Free_All, .Resize, .Query_Features, .Query_Info};
  703. }
  704. return set;
  705. case .Query_Info:
  706. unreachable();
  707. }
  708. return result;
  709. }