allocators.odin 25 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013
  1. package mem
  2. import "intrinsics"
  3. import "core:runtime"
  4. nil_allocator_proc :: proc(allocator_data: rawptr, mode: Allocator_Mode,
  5. size, alignment: int,
  6. old_memory: rawptr, old_size: int, loc := #caller_location) -> ([]byte, Allocator_Error) {
  7. return nil, nil;
  8. }
  9. nil_allocator :: proc() -> Allocator {
  10. return Allocator{
  11. procedure = nil_allocator_proc,
  12. data = nil,
  13. };
  14. }
  15. // Custom allocators
  16. Arena :: struct {
  17. data: []byte,
  18. offset: int,
  19. peak_used: int,
  20. temp_count: int,
  21. }
  22. Arena_Temp_Memory :: struct {
  23. arena: ^Arena,
  24. prev_offset: int,
  25. }
  26. init_arena :: proc(a: ^Arena, data: []byte) {
  27. a.data = data;
  28. a.offset = 0;
  29. a.peak_used = 0;
  30. a.temp_count = 0;
  31. }
  32. arena_allocator :: proc(arena: ^Arena) -> Allocator {
  33. return Allocator{
  34. procedure = arena_allocator_proc,
  35. data = arena,
  36. };
  37. }
  38. arena_allocator_proc :: proc(allocator_data: rawptr, mode: Allocator_Mode,
  39. size, alignment: int,
  40. old_memory: rawptr, old_size: int, location := #caller_location) -> ([]byte, Allocator_Error) {
  41. arena := cast(^Arena)allocator_data;
  42. switch mode {
  43. case .Alloc:
  44. total_size := size + alignment;
  45. if arena.offset + total_size > len(arena.data) {
  46. return nil, .Out_Of_Memory;
  47. }
  48. #no_bounds_check end := &arena.data[arena.offset];
  49. ptr := align_forward(end, uintptr(alignment));
  50. arena.offset += total_size;
  51. arena.peak_used = max(arena.peak_used, arena.offset);
  52. zero(ptr, size);
  53. return byte_slice(ptr, size), nil;
  54. case .Free:
  55. // NOTE(bill): Free all at once
  56. // Use Arena_Temp_Memory if you want to free a block
  57. case .Free_All:
  58. arena.offset = 0;
  59. case .Resize:
  60. return default_resize_bytes_align(byte_slice(old_memory, old_size), size, alignment, arena_allocator(arena));
  61. case .Query_Features:
  62. set := (^Allocator_Mode_Set)(old_memory);
  63. if set != nil {
  64. set^ = {.Alloc, .Free_All, .Resize, .Query_Features};
  65. }
  66. return nil, nil;
  67. case .Query_Info:
  68. return nil, nil;
  69. }
  70. return nil, nil;
  71. }
  72. begin_arena_temp_memory :: proc(a: ^Arena) -> Arena_Temp_Memory {
  73. tmp: Arena_Temp_Memory;
  74. tmp.arena = a;
  75. tmp.prev_offset = a.offset;
  76. a.temp_count += 1;
  77. return tmp;
  78. }
  79. end_arena_temp_memory :: proc(using tmp: Arena_Temp_Memory) {
  80. assert(arena.offset >= prev_offset);
  81. assert(arena.temp_count > 0);
  82. arena.offset = prev_offset;
  83. arena.temp_count -= 1;
  84. }
  85. Scratch_Allocator :: struct {
  86. data: []byte,
  87. curr_offset: int,
  88. prev_allocation: rawptr,
  89. backup_allocator: Allocator,
  90. leaked_allocations: [dynamic][]byte,
  91. }
  92. scratch_allocator_init :: proc(s: ^Scratch_Allocator, size: int, backup_allocator := context.allocator) {
  93. s.data = make_aligned([]byte, size, 2*align_of(rawptr), backup_allocator);
  94. s.curr_offset = 0;
  95. s.prev_allocation = nil;
  96. s.backup_allocator = backup_allocator;
  97. s.leaked_allocations.allocator = backup_allocator;
  98. }
  99. scratch_allocator_destroy :: proc(s: ^Scratch_Allocator) {
  100. if s == nil {
  101. return;
  102. }
  103. for ptr in s.leaked_allocations {
  104. free_bytes(ptr, s.backup_allocator);
  105. }
  106. delete(s.leaked_allocations);
  107. delete(s.data, s.backup_allocator);
  108. s^ = {};
  109. }
  110. scratch_allocator_proc :: proc(allocator_data: rawptr, mode: Allocator_Mode,
  111. size, alignment: int,
  112. old_memory: rawptr, old_size: int, loc := #caller_location) -> ([]byte, Allocator_Error) {
  113. s := (^Scratch_Allocator)(allocator_data);
  114. if s.data == nil {
  115. DEFAULT_BACKING_SIZE :: 1<<22;
  116. if !(context.allocator.procedure != scratch_allocator_proc &&
  117. context.allocator.data != allocator_data) {
  118. panic("cyclic initialization of the scratch allocator with itself");
  119. }
  120. scratch_allocator_init(s, DEFAULT_BACKING_SIZE);
  121. }
  122. size := size;
  123. switch mode {
  124. case .Alloc:
  125. size = align_forward_int(size, alignment);
  126. switch {
  127. case s.curr_offset+size <= len(s.data):
  128. start := uintptr(raw_data(s.data));
  129. ptr := start + uintptr(s.curr_offset);
  130. ptr = align_forward_uintptr(ptr, uintptr(alignment));
  131. zero(rawptr(ptr), size);
  132. s.prev_allocation = rawptr(ptr);
  133. offset := int(ptr - start);
  134. s.curr_offset = offset + size;
  135. return byte_slice(rawptr(ptr), size), nil;
  136. case size <= len(s.data):
  137. start := uintptr(raw_data(s.data));
  138. ptr := align_forward_uintptr(start, uintptr(alignment));
  139. zero(rawptr(ptr), size);
  140. s.prev_allocation = rawptr(ptr);
  141. offset := int(ptr - start);
  142. s.curr_offset = offset + size;
  143. return byte_slice(rawptr(ptr), size), nil;
  144. }
  145. a := s.backup_allocator;
  146. if a.procedure == nil {
  147. a = context.allocator;
  148. s.backup_allocator = a;
  149. }
  150. ptr, err := alloc_bytes(size, alignment, a, loc);
  151. if err != nil {
  152. return ptr, err;
  153. }
  154. if s.leaked_allocations == nil {
  155. s.leaked_allocations = make([dynamic][]byte, a);
  156. }
  157. append(&s.leaked_allocations, ptr);
  158. if logger := context.logger; logger.lowest_level <= .Warning {
  159. if logger.procedure != nil {
  160. logger.procedure(logger.data, .Warning, "mem.Scratch_Allocator resorted to backup_allocator" , logger.options, loc);
  161. }
  162. }
  163. return ptr, err;
  164. case .Free:
  165. start := uintptr(raw_data(s.data));
  166. end := start + uintptr(len(s.data));
  167. old_ptr := uintptr(old_memory);
  168. if s.prev_allocation == old_memory {
  169. s.curr_offset = int(uintptr(s.prev_allocation) - start);
  170. s.prev_allocation = nil;
  171. return nil, nil;
  172. }
  173. if start <= old_ptr && old_ptr < end {
  174. // NOTE(bill): Cannot free this pointer but it is valid
  175. return nil, nil;
  176. }
  177. if len(s.leaked_allocations) != 0 {
  178. for data, i in s.leaked_allocations {
  179. ptr := raw_data(data);
  180. if ptr == old_memory {
  181. free_bytes(data, s.backup_allocator);
  182. ordered_remove(&s.leaked_allocations, i);
  183. return nil, nil;
  184. }
  185. }
  186. }
  187. return nil, .Invalid_Pointer;
  188. // panic("invalid pointer passed to default_temp_allocator");
  189. case .Free_All:
  190. s.curr_offset = 0;
  191. s.prev_allocation = nil;
  192. for ptr in s.leaked_allocations {
  193. free_bytes(ptr, s.backup_allocator);
  194. }
  195. clear(&s.leaked_allocations);
  196. case .Resize:
  197. begin := uintptr(raw_data(s.data));
  198. end := begin + uintptr(len(s.data));
  199. old_ptr := uintptr(old_memory);
  200. if begin <= old_ptr && old_ptr < end && old_ptr+uintptr(size) < end {
  201. s.curr_offset = int(old_ptr-begin)+size;
  202. return byte_slice(old_memory, size), nil;
  203. }
  204. data, err := scratch_allocator_proc(allocator_data, .Alloc, size, alignment, old_memory, old_size, loc);
  205. if err != nil {
  206. return data, err;
  207. }
  208. runtime.copy(data, byte_slice(old_memory, old_size));
  209. _, err = scratch_allocator_proc(allocator_data, .Free, 0, alignment, old_memory, old_size, loc);
  210. return data, err;
  211. case .Query_Features:
  212. set := (^Allocator_Mode_Set)(old_memory);
  213. if set != nil {
  214. set^ = {.Alloc, .Free, .Free_All, .Resize, .Query_Features};
  215. }
  216. return nil, nil;
  217. case .Query_Info:
  218. return nil, nil;
  219. }
  220. return nil, nil;
  221. }
  222. scratch_allocator :: proc(allocator: ^Scratch_Allocator) -> Allocator {
  223. return Allocator{
  224. procedure = scratch_allocator_proc,
  225. data = allocator,
  226. };
  227. }
  228. Stack_Allocation_Header :: struct {
  229. prev_offset: int,
  230. padding: int,
  231. }
  232. // Stack is a stack-like allocator which has a strict memory freeing order
  233. Stack :: struct {
  234. data: []byte,
  235. prev_offset: int,
  236. curr_offset: int,
  237. peak_used: int,
  238. }
  239. init_stack :: proc(s: ^Stack, data: []byte) {
  240. s.data = data;
  241. s.prev_offset = 0;
  242. s.curr_offset = 0;
  243. s.peak_used = 0;
  244. }
  245. stack_allocator :: proc(stack: ^Stack) -> Allocator {
  246. return Allocator{
  247. procedure = stack_allocator_proc,
  248. data = stack,
  249. };
  250. }
  251. stack_allocator_proc :: proc(allocator_data: rawptr, mode: Allocator_Mode,
  252. size, alignment: int,
  253. old_memory: rawptr, old_size: int, location := #caller_location) -> ([]byte, Allocator_Error) {
  254. s := cast(^Stack)allocator_data;
  255. if s.data == nil {
  256. return nil, .Invalid_Argument;
  257. }
  258. raw_alloc :: proc(s: ^Stack, size, alignment: int) -> ([]byte, Allocator_Error) {
  259. curr_addr := uintptr(raw_data(s.data)) + uintptr(s.curr_offset);
  260. padding := calc_padding_with_header(curr_addr, uintptr(alignment), size_of(Stack_Allocation_Header));
  261. if s.curr_offset + padding + size > len(s.data) {
  262. return nil, .Out_Of_Memory;
  263. }
  264. s.prev_offset = s.curr_offset;
  265. s.curr_offset += padding;
  266. next_addr := curr_addr + uintptr(padding);
  267. header := (^Stack_Allocation_Header)(next_addr - size_of(Stack_Allocation_Header));
  268. header.padding = padding;
  269. header.prev_offset = s.prev_offset;
  270. s.curr_offset += size;
  271. s.peak_used = max(s.peak_used, s.curr_offset);
  272. zero(rawptr(next_addr), size);
  273. return byte_slice(rawptr(next_addr), size), nil;
  274. }
  275. switch mode {
  276. case .Alloc:
  277. return raw_alloc(s, size, alignment);
  278. case .Free:
  279. if old_memory == nil {
  280. return nil, nil;
  281. }
  282. start := uintptr(raw_data(s.data));
  283. end := start + uintptr(len(s.data));
  284. curr_addr := uintptr(old_memory);
  285. if !(start <= curr_addr && curr_addr < end) {
  286. panic("Out of bounds memory address passed to stack allocator (free)");
  287. }
  288. if curr_addr >= start+uintptr(s.curr_offset) {
  289. // NOTE(bill): Allow double frees
  290. return nil, nil;
  291. }
  292. header := (^Stack_Allocation_Header)(curr_addr - size_of(Stack_Allocation_Header));
  293. old_offset := int(curr_addr - uintptr(header.padding) - uintptr(raw_data(s.data)));
  294. if old_offset != header.prev_offset {
  295. // panic("Out of order stack allocator free");
  296. return nil, .Invalid_Pointer;
  297. }
  298. s.curr_offset = old_offset;
  299. s.prev_offset = header.prev_offset;
  300. case .Free_All:
  301. s.prev_offset = 0;
  302. s.curr_offset = 0;
  303. case .Resize:
  304. if old_memory == nil {
  305. return raw_alloc(s, size, alignment);
  306. }
  307. if size == 0 {
  308. return nil, nil;
  309. }
  310. start := uintptr(raw_data(s.data));
  311. end := start + uintptr(len(s.data));
  312. curr_addr := uintptr(old_memory);
  313. if !(start <= curr_addr && curr_addr < end) {
  314. panic("Out of bounds memory address passed to stack allocator (resize)");
  315. }
  316. if curr_addr >= start+uintptr(s.curr_offset) {
  317. // NOTE(bill): Allow double frees
  318. return nil, nil;
  319. }
  320. if old_size == size {
  321. return byte_slice(old_memory, size), nil;
  322. }
  323. header := (^Stack_Allocation_Header)(curr_addr - size_of(Stack_Allocation_Header));
  324. old_offset := int(curr_addr - uintptr(header.padding) - uintptr(raw_data(s.data)));
  325. if old_offset != header.prev_offset {
  326. data, err := raw_alloc(s, size, alignment);
  327. if err == nil {
  328. runtime.copy(data, byte_slice(old_memory, old_size));
  329. }
  330. return data, err;
  331. }
  332. old_memory_size := uintptr(s.curr_offset) - (curr_addr - start);
  333. assert(old_memory_size == uintptr(old_size));
  334. diff := size - old_size;
  335. s.curr_offset += diff; // works for smaller sizes too
  336. if diff > 0 {
  337. zero(rawptr(curr_addr + uintptr(diff)), diff);
  338. }
  339. return byte_slice(old_memory, size), nil;
  340. case .Query_Features:
  341. set := (^Allocator_Mode_Set)(old_memory);
  342. if set != nil {
  343. set^ = {.Alloc, .Free, .Free_All, .Resize, .Query_Features};
  344. }
  345. return nil, nil;
  346. case .Query_Info:
  347. return nil, nil;
  348. }
  349. return nil, nil;
  350. }
  351. Small_Stack_Allocation_Header :: struct {
  352. padding: u8,
  353. }
  354. // Small_Stack is a stack-like allocator which uses the smallest possible header but at the cost of non-strict memory freeing order
  355. Small_Stack :: struct {
  356. data: []byte,
  357. offset: int,
  358. peak_used: int,
  359. }
  360. init_small_stack :: proc(s: ^Small_Stack, data: []byte) {
  361. s.data = data;
  362. s.offset = 0;
  363. s.peak_used = 0;
  364. }
  365. small_stack_allocator :: proc(stack: ^Small_Stack) -> Allocator {
  366. return Allocator{
  367. procedure = small_stack_allocator_proc,
  368. data = stack,
  369. };
  370. }
  371. small_stack_allocator_proc :: proc(allocator_data: rawptr, mode: Allocator_Mode,
  372. size, alignment: int,
  373. old_memory: rawptr, old_size: int, ocation := #caller_location) -> ([]byte, Allocator_Error) {
  374. s := cast(^Small_Stack)allocator_data;
  375. if s.data == nil {
  376. return nil, .Invalid_Argument;
  377. }
  378. align := clamp(alignment, 1, 8*size_of(Stack_Allocation_Header{}.padding)/2);
  379. raw_alloc :: proc(s: ^Small_Stack, size, alignment: int) -> ([]byte, Allocator_Error) {
  380. curr_addr := uintptr(raw_data(s.data)) + uintptr(s.offset);
  381. padding := calc_padding_with_header(curr_addr, uintptr(alignment), size_of(Small_Stack_Allocation_Header));
  382. if s.offset + padding + size > len(s.data) {
  383. return nil, .Out_Of_Memory;
  384. }
  385. s.offset += padding;
  386. next_addr := curr_addr + uintptr(padding);
  387. header := (^Small_Stack_Allocation_Header)(next_addr - size_of(Small_Stack_Allocation_Header));
  388. header.padding = auto_cast padding;
  389. s.offset += size;
  390. s.peak_used = max(s.peak_used, s.offset);
  391. zero(rawptr(next_addr), size);
  392. return byte_slice(rawptr(next_addr), size), nil;
  393. }
  394. switch mode {
  395. case .Alloc:
  396. return raw_alloc(s, size, align);
  397. case .Free:
  398. if old_memory == nil {
  399. return nil, nil;
  400. }
  401. start := uintptr(raw_data(s.data));
  402. end := start + uintptr(len(s.data));
  403. curr_addr := uintptr(old_memory);
  404. if !(start <= curr_addr && curr_addr < end) {
  405. // panic("Out of bounds memory address passed to stack allocator (free)");
  406. return nil, .Invalid_Pointer;
  407. }
  408. if curr_addr >= start+uintptr(s.offset) {
  409. // NOTE(bill): Allow double frees
  410. return nil, nil;
  411. }
  412. header := (^Small_Stack_Allocation_Header)(curr_addr - size_of(Small_Stack_Allocation_Header));
  413. old_offset := int(curr_addr - uintptr(header.padding) - uintptr(raw_data(s.data)));
  414. s.offset = old_offset;
  415. case .Free_All:
  416. s.offset = 0;
  417. case .Resize:
  418. if old_memory == nil {
  419. return raw_alloc(s, size, align);
  420. }
  421. if size == 0 {
  422. return nil, nil;
  423. }
  424. start := uintptr(raw_data(s.data));
  425. end := start + uintptr(len(s.data));
  426. curr_addr := uintptr(old_memory);
  427. if !(start <= curr_addr && curr_addr < end) {
  428. // panic("Out of bounds memory address passed to stack allocator (resize)");
  429. return nil, .Invalid_Pointer;
  430. }
  431. if curr_addr >= start+uintptr(s.offset) {
  432. // NOTE(bill): Treat as a double free
  433. return nil, nil;
  434. }
  435. if old_size == size {
  436. return byte_slice(old_memory, size), nil;
  437. }
  438. data, err := raw_alloc(s, size, align);
  439. if err == nil {
  440. runtime.copy(data, byte_slice(old_memory, old_size));
  441. }
  442. return data, err;
  443. case .Query_Features:
  444. set := (^Allocator_Mode_Set)(old_memory);
  445. if set != nil {
  446. set^ = {.Alloc, .Free, .Free_All, .Resize, .Query_Features};
  447. }
  448. return nil, nil;
  449. case .Query_Info:
  450. return nil, nil;
  451. }
  452. return nil, nil;
  453. }
  454. Dynamic_Pool :: struct {
  455. block_size: int,
  456. out_band_size: int,
  457. alignment: int,
  458. unused_blocks: [dynamic]rawptr,
  459. used_blocks: [dynamic]rawptr,
  460. out_band_allocations: [dynamic]rawptr,
  461. current_block: rawptr,
  462. current_pos: rawptr,
  463. bytes_left: int,
  464. block_allocator: Allocator,
  465. }
  466. DYNAMIC_POOL_BLOCK_SIZE_DEFAULT :: 65536;
  467. DYNAMIC_POOL_OUT_OF_BAND_SIZE_DEFAULT :: 6554;
  468. dynamic_pool_allocator_proc :: proc(allocator_data: rawptr, mode: Allocator_Mode,
  469. size, alignment: int,
  470. old_memory: rawptr, old_size: int, loc := #caller_location) -> ([]byte, Allocator_Error) {
  471. pool := (^Dynamic_Pool)(allocator_data);
  472. switch mode {
  473. case .Alloc:
  474. return dynamic_pool_alloc_bytes(pool, size);
  475. case .Free:
  476. return nil, nil;
  477. case .Free_All:
  478. dynamic_pool_free_all(pool);
  479. return nil, nil;
  480. case .Resize:
  481. if old_size >= size {
  482. return byte_slice(old_memory, size), nil;
  483. }
  484. data, err := dynamic_pool_alloc_bytes(pool, size);
  485. if err == nil {
  486. runtime.copy(data, byte_slice(old_memory, old_size));
  487. }
  488. return data, err;
  489. case .Query_Features:
  490. set := (^Allocator_Mode_Set)(old_memory);
  491. if set != nil {
  492. set^ = {.Alloc, .Free_All, .Resize, .Query_Features, .Query_Info};
  493. }
  494. return nil, nil;
  495. case .Query_Info:
  496. info := (^Allocator_Query_Info)(old_memory);
  497. if info != nil && info.pointer != nil {
  498. info.size = pool.block_size;
  499. info.alignment = pool.alignment;
  500. return byte_slice(info, size_of(info^)), nil;
  501. }
  502. return nil, nil;
  503. }
  504. return nil, nil;
  505. }
  506. dynamic_pool_allocator :: proc(pool: ^Dynamic_Pool) -> Allocator {
  507. return Allocator{
  508. procedure = dynamic_pool_allocator_proc,
  509. data = pool,
  510. };
  511. }
  512. dynamic_pool_init :: proc(pool: ^Dynamic_Pool,
  513. block_allocator := context.allocator,
  514. array_allocator := context.allocator,
  515. block_size := DYNAMIC_POOL_BLOCK_SIZE_DEFAULT,
  516. out_band_size := DYNAMIC_POOL_OUT_OF_BAND_SIZE_DEFAULT,
  517. alignment := 8) {
  518. pool.block_size = block_size;
  519. pool.out_band_size = out_band_size;
  520. pool.alignment = alignment;
  521. pool.block_allocator = block_allocator;
  522. pool.out_band_allocations.allocator = array_allocator;
  523. pool. unused_blocks.allocator = array_allocator;
  524. pool. used_blocks.allocator = array_allocator;
  525. }
  526. dynamic_pool_destroy :: proc(using pool: ^Dynamic_Pool) {
  527. dynamic_pool_free_all(pool);
  528. delete(unused_blocks);
  529. delete(used_blocks);
  530. zero(pool, size_of(pool^));
  531. }
  532. dynamic_pool_alloc :: proc(pool: ^Dynamic_Pool, bytes: int) -> rawptr {
  533. data, err := dynamic_pool_alloc_bytes(pool, bytes);
  534. assert(err == nil);
  535. return raw_data(data);
  536. }
  537. dynamic_pool_alloc_bytes :: proc(using pool: ^Dynamic_Pool, bytes: int) -> ([]byte, Allocator_Error) {
  538. cycle_new_block :: proc(using pool: ^Dynamic_Pool) -> (err: Allocator_Error) {
  539. if block_allocator.procedure == nil {
  540. panic("You must call pool_init on a Pool before using it");
  541. }
  542. if current_block != nil {
  543. append(&used_blocks, current_block);
  544. }
  545. new_block: rawptr;
  546. if len(unused_blocks) > 0 {
  547. new_block = pop(&unused_blocks);
  548. } else {
  549. data: []byte;
  550. data, err = block_allocator.procedure(block_allocator.data, Allocator_Mode.Alloc,
  551. block_size, alignment,
  552. nil, 0);
  553. new_block = raw_data(data);
  554. }
  555. bytes_left = block_size;
  556. current_pos = new_block;
  557. current_block = new_block;
  558. return;
  559. }
  560. n := bytes;
  561. extra := alignment - (n % alignment);
  562. n += extra;
  563. if n >= out_band_size {
  564. assert(block_allocator.procedure != nil);
  565. memory, err := block_allocator.procedure(block_allocator.data, Allocator_Mode.Alloc,
  566. block_size, alignment,
  567. nil, 0);
  568. if memory != nil {
  569. append(&out_band_allocations, raw_data(memory));
  570. }
  571. return memory, err;
  572. }
  573. if bytes_left < n {
  574. err := cycle_new_block(pool);
  575. if err != nil {
  576. return nil, err;
  577. }
  578. if current_block == nil {
  579. return nil, .Out_Of_Memory;
  580. }
  581. }
  582. memory := current_pos;
  583. current_pos = ptr_offset((^byte)(current_pos), n);
  584. bytes_left -= n;
  585. return byte_slice(memory, bytes), nil;
  586. }
  587. dynamic_pool_reset :: proc(using pool: ^Dynamic_Pool) {
  588. if current_block != nil {
  589. append(&unused_blocks, current_block);
  590. current_block = nil;
  591. }
  592. for block in used_blocks {
  593. append(&unused_blocks, block);
  594. }
  595. clear(&used_blocks);
  596. for a in out_band_allocations {
  597. free(a, block_allocator);
  598. }
  599. clear(&out_band_allocations);
  600. }
  601. dynamic_pool_free_all :: proc(using pool: ^Dynamic_Pool) {
  602. dynamic_pool_reset(pool);
  603. for block in unused_blocks {
  604. free(block, block_allocator);
  605. }
  606. clear(&unused_blocks);
  607. }
  608. panic_allocator_proc :: proc(allocator_data: rawptr, mode: Allocator_Mode,
  609. size, alignment: int,
  610. old_memory: rawptr, old_size: int,loc := #caller_location) -> ([]byte, Allocator_Error) {
  611. switch mode {
  612. case .Alloc:
  613. if size > 0 {
  614. panic("mem: panic allocator, .Alloc called");
  615. }
  616. case .Resize:
  617. if size > 0 {
  618. panic("mem: panic allocator, .Resize called");
  619. }
  620. case .Free:
  621. if old_memory != nil {
  622. panic("mem: panic allocator, .Free called");
  623. }
  624. case .Free_All:
  625. panic("mem: panic allocator, .Free_All called");
  626. case .Query_Features:
  627. set := (^Allocator_Mode_Set)(old_memory);
  628. if set != nil {
  629. set^ = {.Query_Features};
  630. }
  631. return nil, nil;
  632. case .Query_Info:
  633. return nil, nil;
  634. }
  635. return nil, nil;
  636. }
  637. panic_allocator :: proc() -> Allocator {
  638. return Allocator{
  639. procedure = panic_allocator_proc,
  640. data = nil,
  641. };
  642. }
  643. Tracking_Allocator_Entry :: struct {
  644. memory: rawptr,
  645. size: int,
  646. alignment: int,
  647. err: Allocator_Error,
  648. location: runtime.Source_Code_Location,
  649. }
  650. Tracking_Allocator_Bad_Free_Entry :: struct {
  651. memory: rawptr,
  652. location: runtime.Source_Code_Location,
  653. }
  654. Tracking_Allocator :: struct {
  655. backing: Allocator,
  656. allocation_map: map[rawptr]Tracking_Allocator_Entry,
  657. bad_free_array: [dynamic]Tracking_Allocator_Bad_Free_Entry,
  658. clear_on_free_all: bool,
  659. }
  660. tracking_allocator_init :: proc(t: ^Tracking_Allocator, backing_allocator: Allocator, internals_allocator := context.allocator) {
  661. t.backing = backing_allocator;
  662. t.allocation_map.allocator = internals_allocator;
  663. t.bad_free_array.allocator = internals_allocator;
  664. }
  665. tracking_allocator_destroy :: proc(t: ^Tracking_Allocator) {
  666. delete(t.allocation_map);
  667. delete(t.bad_free_array);
  668. }
  669. tracking_allocator :: proc(data: ^Tracking_Allocator) -> Allocator {
  670. return Allocator{
  671. data = data,
  672. procedure = tracking_allocator_proc,
  673. };
  674. }
  675. tracking_allocator_proc :: proc(allocator_data: rawptr, mode: Allocator_Mode,
  676. size, alignment: int,
  677. old_memory: rawptr, old_size: int, loc := #caller_location) -> ([]byte, Allocator_Error) {
  678. data := (^Tracking_Allocator)(allocator_data);
  679. if mode == .Query_Info {
  680. info := (^Allocator_Query_Info)(old_memory);
  681. if info != nil && info.pointer != nil {
  682. if entry, ok := data.allocation_map[info.pointer]; ok {
  683. info.size = entry.size;
  684. info.alignment = entry.alignment;
  685. }
  686. info.pointer = nil;
  687. }
  688. return nil, nil;
  689. }
  690. result: []byte;
  691. err: Allocator_Error;
  692. if mode == .Free && old_memory not_in data.allocation_map {
  693. append(&data.bad_free_array, Tracking_Allocator_Bad_Free_Entry{
  694. memory = old_memory,
  695. location = loc,
  696. });
  697. } else {
  698. result, err = data.backing.procedure(data.backing.data, mode, size, alignment, old_memory, old_size, loc);
  699. if err != nil {
  700. return result, err;
  701. }
  702. }
  703. result_ptr := raw_data(result);
  704. if data.allocation_map.allocator.procedure == nil {
  705. data.allocation_map.allocator = context.allocator;
  706. }
  707. switch mode {
  708. case .Alloc:
  709. data.allocation_map[result_ptr] = Tracking_Allocator_Entry{
  710. memory = result_ptr,
  711. size = size,
  712. alignment = alignment,
  713. err = err,
  714. location = loc,
  715. };
  716. case .Free:
  717. delete_key(&data.allocation_map, old_memory);
  718. case .Resize:
  719. if old_memory != result_ptr {
  720. delete_key(&data.allocation_map, old_memory);
  721. }
  722. data.allocation_map[result_ptr] = Tracking_Allocator_Entry{
  723. memory = result_ptr,
  724. size = size,
  725. alignment = alignment,
  726. err = err,
  727. location = loc,
  728. };
  729. case .Free_All:
  730. if data.clear_on_free_all {
  731. clear_map(&data.allocation_map);
  732. }
  733. case .Query_Features:
  734. set := (^Allocator_Mode_Set)(old_memory);
  735. if set != nil {
  736. set^ = {.Alloc, .Free, .Free_All, .Resize, .Query_Features, .Query_Info};
  737. }
  738. return nil, nil;
  739. case .Query_Info:
  740. return nil, nil;
  741. }
  742. return result, err;
  743. }
  744. // Small_Allocator primary allocates memory from its local buffer of size BUFFER_SIZE
  745. // If that buffer's memory is exhausted, it will use the backing allocator (a scratch allocator is recommended)
  746. // Memory allocated with Small_Allocator cannot be freed individually using 'free' and must be freed using 'free_all'
  747. Small_Allocator :: struct(BUFFER_SIZE: int)
  748. where
  749. BUFFER_SIZE >= 2*size_of(uintptr),
  750. BUFFER_SIZE & (BUFFER_SIZE-1) == 0 {
  751. buffer: [BUFFER_SIZE]byte,
  752. backing: Allocator,
  753. start: uintptr,
  754. curr: uintptr,
  755. end: uintptr,
  756. chunk_size: int,
  757. }
  758. small_allocator :: proc(s: ^$S/Small_Allocator, backing := context.allocator) -> (a: Allocator) {
  759. if s.backing.procedure == nil {
  760. s.backing = backing;
  761. }
  762. a.data = s;
  763. a.procedure = proc(allocator_data: rawptr, mode: Allocator_Mode, size, alignment: int, old_memory: rawptr, old_size: int, flags: u64 = 0, loc := #caller_location) -> rawptr {
  764. s := (^S)(allocator_data);
  765. if s.chunk_size <= 0 {
  766. s.chunk_size = 4*1024;
  767. }
  768. if s.start == 0 {
  769. s.start = uintptr(&s.buffer[0]);
  770. s.curr = s.start;
  771. s.end = s.start + uintptr(S.BUFFER_SIZE);
  772. (^rawptr)(s.start)^ = nil;
  773. s.curr += size_of(rawptr);
  774. }
  775. switch mode {
  776. case .Alloc:
  777. s.curr = align_forward_uintptr(s.curr, uintptr(alignment));
  778. if size > int(s.end - s.curr) {
  779. to_allocate := size_of(rawptr) + size + alignment;
  780. if to_allocate < s.chunk_size {
  781. to_allocate = s.chunk_size;
  782. }
  783. s.chunk_size *= 2;
  784. p := alloc(to_allocate, 16, s.backing, loc);
  785. (^rawptr)(s.start)^ = p;
  786. s.start = uintptr(p);
  787. s.curr = s.start;
  788. s.end = s.start + uintptr(to_allocate);
  789. (^rawptr)(s.start)^ = nil;
  790. s.curr += size_of(rawptr);
  791. s.curr = align_forward_uintptr(s.curr, uintptr(alignment));
  792. }
  793. p := rawptr(s.curr);
  794. s.curr += uintptr(size);
  795. return mem_zero(p, size);
  796. case .Free:
  797. // NOP
  798. return nil;
  799. case .Resize:
  800. // No need copying the code
  801. return default_resize_align(old_memory, old_size, size, alignment, small_allocator(s, s.backing), loc);
  802. case .Free_All:
  803. p := (^rawptr)(&s.buffer[0])^;
  804. for p != nil {
  805. next := (^rawptr)(p)^;
  806. free(next, s.backing, loc);
  807. p = next;
  808. }
  809. // Reset to default
  810. s.start = uintptr(&s.buffer[0]);
  811. s.curr = s.start;
  812. s.end = s.start + uintptr(S.BUFFER_SIZE);
  813. (^rawptr)(s.start)^ = nil;
  814. s.curr += size_of(rawptr);
  815. case .Query_Features:
  816. return nil, nil;
  817. case .Query_Info:
  818. return nil, nil;
  819. }
  820. return nil, nil;
  821. };
  822. return a;
  823. }