allocators.odin 23 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919
  1. package mem
  2. import "core:intrinsics"
  3. import "core:runtime"
  4. nil_allocator_proc :: proc(allocator_data: rawptr, mode: Allocator_Mode,
  5. size, alignment: int,
  6. old_memory: rawptr, old_size: int, loc := #caller_location) -> ([]byte, Allocator_Error) {
  7. return nil, nil
  8. }
  9. nil_allocator :: proc() -> Allocator {
  10. return Allocator{
  11. procedure = nil_allocator_proc,
  12. data = nil,
  13. }
  14. }
  15. // Custom allocators
  16. Arena :: struct {
  17. data: []byte,
  18. offset: int,
  19. peak_used: int,
  20. temp_count: int,
  21. }
  22. Arena_Temp_Memory :: struct {
  23. arena: ^Arena,
  24. prev_offset: int,
  25. }
  26. init_arena :: proc(a: ^Arena, data: []byte) {
  27. a.data = data
  28. a.offset = 0
  29. a.peak_used = 0
  30. a.temp_count = 0
  31. }
  32. arena_allocator :: proc(arena: ^Arena) -> Allocator {
  33. return Allocator{
  34. procedure = arena_allocator_proc,
  35. data = arena,
  36. }
  37. }
  38. arena_allocator_proc :: proc(allocator_data: rawptr, mode: Allocator_Mode,
  39. size, alignment: int,
  40. old_memory: rawptr, old_size: int, location := #caller_location) -> ([]byte, Allocator_Error) {
  41. arena := cast(^Arena)allocator_data
  42. switch mode {
  43. case .Alloc:
  44. #no_bounds_check end := &arena.data[arena.offset]
  45. ptr := align_forward(end, uintptr(alignment))
  46. total_size := size + ptr_sub((^byte)(ptr), (^byte)(end))
  47. if arena.offset + total_size > len(arena.data) {
  48. return nil, .Out_Of_Memory
  49. }
  50. arena.offset += total_size
  51. arena.peak_used = max(arena.peak_used, arena.offset)
  52. zero(ptr, size)
  53. return byte_slice(ptr, size), nil
  54. case .Free:
  55. return nil, .Mode_Not_Implemented
  56. case .Free_All:
  57. arena.offset = 0
  58. case .Resize:
  59. return default_resize_bytes_align(byte_slice(old_memory, old_size), size, alignment, arena_allocator(arena))
  60. case .Query_Features:
  61. set := (^Allocator_Mode_Set)(old_memory)
  62. if set != nil {
  63. set^ = {.Alloc, .Free_All, .Resize, .Query_Features}
  64. }
  65. return nil, nil
  66. case .Query_Info:
  67. return nil, .Mode_Not_Implemented
  68. }
  69. return nil, nil
  70. }
  71. begin_arena_temp_memory :: proc(a: ^Arena) -> Arena_Temp_Memory {
  72. tmp: Arena_Temp_Memory
  73. tmp.arena = a
  74. tmp.prev_offset = a.offset
  75. a.temp_count += 1
  76. return tmp
  77. }
  78. end_arena_temp_memory :: proc(using tmp: Arena_Temp_Memory) {
  79. assert(arena.offset >= prev_offset)
  80. assert(arena.temp_count > 0)
  81. arena.offset = prev_offset
  82. arena.temp_count -= 1
  83. }
  84. Scratch_Allocator :: struct {
  85. data: []byte,
  86. curr_offset: int,
  87. prev_allocation: rawptr,
  88. backup_allocator: Allocator,
  89. leaked_allocations: [dynamic][]byte,
  90. }
  91. scratch_allocator_init :: proc(s: ^Scratch_Allocator, size: int, backup_allocator := context.allocator) -> Allocator_Error {
  92. s.data = make_aligned([]byte, size, 2*align_of(rawptr), backup_allocator) or_return
  93. s.curr_offset = 0
  94. s.prev_allocation = nil
  95. s.backup_allocator = backup_allocator
  96. s.leaked_allocations.allocator = backup_allocator
  97. return nil
  98. }
  99. scratch_allocator_destroy :: proc(s: ^Scratch_Allocator) {
  100. if s == nil {
  101. return
  102. }
  103. for ptr in s.leaked_allocations {
  104. free_bytes(ptr, s.backup_allocator)
  105. }
  106. delete(s.leaked_allocations)
  107. delete(s.data, s.backup_allocator)
  108. s^ = {}
  109. }
  110. scratch_allocator_proc :: proc(allocator_data: rawptr, mode: Allocator_Mode,
  111. size, alignment: int,
  112. old_memory: rawptr, old_size: int, loc := #caller_location) -> ([]byte, Allocator_Error) {
  113. s := (^Scratch_Allocator)(allocator_data)
  114. if s.data == nil {
  115. DEFAULT_BACKING_SIZE :: 1<<22
  116. if !(context.allocator.procedure != scratch_allocator_proc &&
  117. context.allocator.data != allocator_data) {
  118. panic("cyclic initialization of the scratch allocator with itself")
  119. }
  120. scratch_allocator_init(s, DEFAULT_BACKING_SIZE)
  121. }
  122. size := size
  123. switch mode {
  124. case .Alloc:
  125. size = align_forward_int(size, alignment)
  126. switch {
  127. case s.curr_offset+size <= len(s.data):
  128. start := uintptr(raw_data(s.data))
  129. ptr := start + uintptr(s.curr_offset)
  130. ptr = align_forward_uintptr(ptr, uintptr(alignment))
  131. zero(rawptr(ptr), size)
  132. s.prev_allocation = rawptr(ptr)
  133. offset := int(ptr - start)
  134. s.curr_offset = offset + size
  135. return byte_slice(rawptr(ptr), size), nil
  136. case size <= len(s.data):
  137. start := uintptr(raw_data(s.data))
  138. ptr := align_forward_uintptr(start, uintptr(alignment))
  139. zero(rawptr(ptr), size)
  140. s.prev_allocation = rawptr(ptr)
  141. offset := int(ptr - start)
  142. s.curr_offset = offset + size
  143. return byte_slice(rawptr(ptr), size), nil
  144. }
  145. a := s.backup_allocator
  146. if a.procedure == nil {
  147. a = context.allocator
  148. s.backup_allocator = a
  149. }
  150. ptr, err := alloc_bytes(size, alignment, a, loc)
  151. if err != nil {
  152. return ptr, err
  153. }
  154. if s.leaked_allocations == nil {
  155. s.leaked_allocations, err = make([dynamic][]byte, a)
  156. }
  157. append(&s.leaked_allocations, ptr)
  158. if logger := context.logger; logger.lowest_level <= .Warning {
  159. if logger.procedure != nil {
  160. logger.procedure(logger.data, .Warning, "mem.Scratch_Allocator resorted to backup_allocator" , logger.options, loc)
  161. }
  162. }
  163. return ptr, err
  164. case .Free:
  165. start := uintptr(raw_data(s.data))
  166. end := start + uintptr(len(s.data))
  167. old_ptr := uintptr(old_memory)
  168. if s.prev_allocation == old_memory {
  169. s.curr_offset = int(uintptr(s.prev_allocation) - start)
  170. s.prev_allocation = nil
  171. return nil, nil
  172. }
  173. if start <= old_ptr && old_ptr < end {
  174. // NOTE(bill): Cannot free this pointer but it is valid
  175. return nil, nil
  176. }
  177. if len(s.leaked_allocations) != 0 {
  178. for data, i in s.leaked_allocations {
  179. ptr := raw_data(data)
  180. if ptr == old_memory {
  181. free_bytes(data, s.backup_allocator)
  182. ordered_remove(&s.leaked_allocations, i)
  183. return nil, nil
  184. }
  185. }
  186. }
  187. return nil, .Invalid_Pointer
  188. // panic("invalid pointer passed to default_temp_allocator");
  189. case .Free_All:
  190. s.curr_offset = 0
  191. s.prev_allocation = nil
  192. for ptr in s.leaked_allocations {
  193. free_bytes(ptr, s.backup_allocator)
  194. }
  195. clear(&s.leaked_allocations)
  196. case .Resize:
  197. begin := uintptr(raw_data(s.data))
  198. end := begin + uintptr(len(s.data))
  199. old_ptr := uintptr(old_memory)
  200. if begin <= old_ptr && old_ptr < end && old_ptr+uintptr(size) < end {
  201. s.curr_offset = int(old_ptr-begin)+size
  202. return byte_slice(old_memory, size), nil
  203. }
  204. data, err := scratch_allocator_proc(allocator_data, .Alloc, size, alignment, old_memory, old_size, loc)
  205. if err != nil {
  206. return data, err
  207. }
  208. runtime.copy(data, byte_slice(old_memory, old_size))
  209. _, err = scratch_allocator_proc(allocator_data, .Free, 0, alignment, old_memory, old_size, loc)
  210. return data, err
  211. case .Query_Features:
  212. set := (^Allocator_Mode_Set)(old_memory)
  213. if set != nil {
  214. set^ = {.Alloc, .Free, .Free_All, .Resize, .Query_Features}
  215. }
  216. return nil, nil
  217. case .Query_Info:
  218. return nil, .Mode_Not_Implemented
  219. }
  220. return nil, nil
  221. }
  222. scratch_allocator :: proc(allocator: ^Scratch_Allocator) -> Allocator {
  223. return Allocator{
  224. procedure = scratch_allocator_proc,
  225. data = allocator,
  226. }
  227. }
  228. Stack_Allocation_Header :: struct {
  229. prev_offset: int,
  230. padding: int,
  231. }
  232. // Stack is a stack-like allocator which has a strict memory freeing order
  233. Stack :: struct {
  234. data: []byte,
  235. prev_offset: int,
  236. curr_offset: int,
  237. peak_used: int,
  238. }
  239. init_stack :: proc(s: ^Stack, data: []byte) {
  240. s.data = data
  241. s.prev_offset = 0
  242. s.curr_offset = 0
  243. s.peak_used = 0
  244. }
  245. stack_allocator :: proc(stack: ^Stack) -> Allocator {
  246. return Allocator{
  247. procedure = stack_allocator_proc,
  248. data = stack,
  249. }
  250. }
  251. stack_allocator_proc :: proc(allocator_data: rawptr, mode: Allocator_Mode,
  252. size, alignment: int,
  253. old_memory: rawptr, old_size: int, location := #caller_location) -> ([]byte, Allocator_Error) {
  254. s := cast(^Stack)allocator_data
  255. if s.data == nil {
  256. return nil, .Invalid_Argument
  257. }
  258. raw_alloc :: proc(s: ^Stack, size, alignment: int) -> ([]byte, Allocator_Error) {
  259. curr_addr := uintptr(raw_data(s.data)) + uintptr(s.curr_offset)
  260. padding := calc_padding_with_header(curr_addr, uintptr(alignment), size_of(Stack_Allocation_Header))
  261. if s.curr_offset + padding + size > len(s.data) {
  262. return nil, .Out_Of_Memory
  263. }
  264. s.prev_offset = s.curr_offset
  265. s.curr_offset += padding
  266. next_addr := curr_addr + uintptr(padding)
  267. header := (^Stack_Allocation_Header)(next_addr - size_of(Stack_Allocation_Header))
  268. header.padding = padding
  269. header.prev_offset = s.prev_offset
  270. s.curr_offset += size
  271. s.peak_used = max(s.peak_used, s.curr_offset)
  272. zero(rawptr(next_addr), size)
  273. return byte_slice(rawptr(next_addr), size), nil
  274. }
  275. switch mode {
  276. case .Alloc:
  277. return raw_alloc(s, size, alignment)
  278. case .Free:
  279. if old_memory == nil {
  280. return nil, nil
  281. }
  282. start := uintptr(raw_data(s.data))
  283. end := start + uintptr(len(s.data))
  284. curr_addr := uintptr(old_memory)
  285. if !(start <= curr_addr && curr_addr < end) {
  286. panic("Out of bounds memory address passed to stack allocator (free)")
  287. }
  288. if curr_addr >= start+uintptr(s.curr_offset) {
  289. // NOTE(bill): Allow double frees
  290. return nil, nil
  291. }
  292. header := (^Stack_Allocation_Header)(curr_addr - size_of(Stack_Allocation_Header))
  293. old_offset := int(curr_addr - uintptr(header.padding) - uintptr(raw_data(s.data)))
  294. if old_offset != header.prev_offset {
  295. // panic("Out of order stack allocator free");
  296. return nil, .Invalid_Pointer
  297. }
  298. s.curr_offset = old_offset
  299. s.prev_offset = header.prev_offset
  300. case .Free_All:
  301. s.prev_offset = 0
  302. s.curr_offset = 0
  303. case .Resize:
  304. if old_memory == nil {
  305. return raw_alloc(s, size, alignment)
  306. }
  307. if size == 0 {
  308. return nil, nil
  309. }
  310. start := uintptr(raw_data(s.data))
  311. end := start + uintptr(len(s.data))
  312. curr_addr := uintptr(old_memory)
  313. if !(start <= curr_addr && curr_addr < end) {
  314. panic("Out of bounds memory address passed to stack allocator (resize)")
  315. }
  316. if curr_addr >= start+uintptr(s.curr_offset) {
  317. // NOTE(bill): Allow double frees
  318. return nil, nil
  319. }
  320. if old_size == size {
  321. return byte_slice(old_memory, size), nil
  322. }
  323. header := (^Stack_Allocation_Header)(curr_addr - size_of(Stack_Allocation_Header))
  324. old_offset := int(curr_addr - uintptr(header.padding) - uintptr(raw_data(s.data)))
  325. if old_offset != header.prev_offset {
  326. data, err := raw_alloc(s, size, alignment)
  327. if err == nil {
  328. runtime.copy(data, byte_slice(old_memory, old_size))
  329. }
  330. return data, err
  331. }
  332. old_memory_size := uintptr(s.curr_offset) - (curr_addr - start)
  333. assert(old_memory_size == uintptr(old_size))
  334. diff := size - old_size
  335. s.curr_offset += diff // works for smaller sizes too
  336. if diff > 0 {
  337. zero(rawptr(curr_addr + uintptr(diff)), diff)
  338. }
  339. return byte_slice(old_memory, size), nil
  340. case .Query_Features:
  341. set := (^Allocator_Mode_Set)(old_memory)
  342. if set != nil {
  343. set^ = {.Alloc, .Free, .Free_All, .Resize, .Query_Features}
  344. }
  345. return nil, nil
  346. case .Query_Info:
  347. return nil, .Mode_Not_Implemented
  348. }
  349. return nil, nil
  350. }
  351. Small_Stack_Allocation_Header :: struct {
  352. padding: u8,
  353. }
  354. // Small_Stack is a stack-like allocator which uses the smallest possible header but at the cost of non-strict memory freeing order
  355. Small_Stack :: struct {
  356. data: []byte,
  357. offset: int,
  358. peak_used: int,
  359. }
  360. init_small_stack :: proc(s: ^Small_Stack, data: []byte) {
  361. s.data = data
  362. s.offset = 0
  363. s.peak_used = 0
  364. }
  365. small_stack_allocator :: proc(stack: ^Small_Stack) -> Allocator {
  366. return Allocator{
  367. procedure = small_stack_allocator_proc,
  368. data = stack,
  369. }
  370. }
  371. small_stack_allocator_proc :: proc(allocator_data: rawptr, mode: Allocator_Mode,
  372. size, alignment: int,
  373. old_memory: rawptr, old_size: int, ocation := #caller_location) -> ([]byte, Allocator_Error) {
  374. s := cast(^Small_Stack)allocator_data
  375. if s.data == nil {
  376. return nil, .Invalid_Argument
  377. }
  378. align := clamp(alignment, 1, 8*size_of(Stack_Allocation_Header{}.padding)/2)
  379. raw_alloc :: proc(s: ^Small_Stack, size, alignment: int) -> ([]byte, Allocator_Error) {
  380. curr_addr := uintptr(raw_data(s.data)) + uintptr(s.offset)
  381. padding := calc_padding_with_header(curr_addr, uintptr(alignment), size_of(Small_Stack_Allocation_Header))
  382. if s.offset + padding + size > len(s.data) {
  383. return nil, .Out_Of_Memory
  384. }
  385. s.offset += padding
  386. next_addr := curr_addr + uintptr(padding)
  387. header := (^Small_Stack_Allocation_Header)(next_addr - size_of(Small_Stack_Allocation_Header))
  388. header.padding = auto_cast padding
  389. s.offset += size
  390. s.peak_used = max(s.peak_used, s.offset)
  391. zero(rawptr(next_addr), size)
  392. return byte_slice(rawptr(next_addr), size), nil
  393. }
  394. switch mode {
  395. case .Alloc:
  396. return raw_alloc(s, size, align)
  397. case .Free:
  398. if old_memory == nil {
  399. return nil, nil
  400. }
  401. start := uintptr(raw_data(s.data))
  402. end := start + uintptr(len(s.data))
  403. curr_addr := uintptr(old_memory)
  404. if !(start <= curr_addr && curr_addr < end) {
  405. // panic("Out of bounds memory address passed to stack allocator (free)");
  406. return nil, .Invalid_Pointer
  407. }
  408. if curr_addr >= start+uintptr(s.offset) {
  409. // NOTE(bill): Allow double frees
  410. return nil, nil
  411. }
  412. header := (^Small_Stack_Allocation_Header)(curr_addr - size_of(Small_Stack_Allocation_Header))
  413. old_offset := int(curr_addr - uintptr(header.padding) - uintptr(raw_data(s.data)))
  414. s.offset = old_offset
  415. case .Free_All:
  416. s.offset = 0
  417. case .Resize:
  418. if old_memory == nil {
  419. return raw_alloc(s, size, align)
  420. }
  421. if size == 0 {
  422. return nil, nil
  423. }
  424. start := uintptr(raw_data(s.data))
  425. end := start + uintptr(len(s.data))
  426. curr_addr := uintptr(old_memory)
  427. if !(start <= curr_addr && curr_addr < end) {
  428. // panic("Out of bounds memory address passed to stack allocator (resize)");
  429. return nil, .Invalid_Pointer
  430. }
  431. if curr_addr >= start+uintptr(s.offset) {
  432. // NOTE(bill): Treat as a double free
  433. return nil, nil
  434. }
  435. if old_size == size {
  436. return byte_slice(old_memory, size), nil
  437. }
  438. data, err := raw_alloc(s, size, align)
  439. if err == nil {
  440. runtime.copy(data, byte_slice(old_memory, old_size))
  441. }
  442. return data, err
  443. case .Query_Features:
  444. set := (^Allocator_Mode_Set)(old_memory)
  445. if set != nil {
  446. set^ = {.Alloc, .Free, .Free_All, .Resize, .Query_Features}
  447. }
  448. return nil, nil
  449. case .Query_Info:
  450. return nil, .Mode_Not_Implemented
  451. }
  452. return nil, nil
  453. }
  454. Dynamic_Pool :: struct {
  455. block_size: int,
  456. out_band_size: int,
  457. alignment: int,
  458. unused_blocks: [dynamic]rawptr,
  459. used_blocks: [dynamic]rawptr,
  460. out_band_allocations: [dynamic]rawptr,
  461. current_block: rawptr,
  462. current_pos: rawptr,
  463. bytes_left: int,
  464. block_allocator: Allocator,
  465. }
  466. DYNAMIC_POOL_BLOCK_SIZE_DEFAULT :: 65536
  467. DYNAMIC_POOL_OUT_OF_BAND_SIZE_DEFAULT :: 6554
  468. dynamic_pool_allocator_proc :: proc(allocator_data: rawptr, mode: Allocator_Mode,
  469. size, alignment: int,
  470. old_memory: rawptr, old_size: int, loc := #caller_location) -> ([]byte, Allocator_Error) {
  471. pool := (^Dynamic_Pool)(allocator_data)
  472. switch mode {
  473. case .Alloc:
  474. return dynamic_pool_alloc_bytes(pool, size)
  475. case .Free:
  476. return nil, .Mode_Not_Implemented
  477. case .Free_All:
  478. dynamic_pool_free_all(pool)
  479. return nil, nil
  480. case .Resize:
  481. if old_size >= size {
  482. return byte_slice(old_memory, size), nil
  483. }
  484. data, err := dynamic_pool_alloc_bytes(pool, size)
  485. if err == nil {
  486. runtime.copy(data, byte_slice(old_memory, old_size))
  487. }
  488. return data, err
  489. case .Query_Features:
  490. set := (^Allocator_Mode_Set)(old_memory)
  491. if set != nil {
  492. set^ = {.Alloc, .Free_All, .Resize, .Query_Features, .Query_Info}
  493. }
  494. return nil, nil
  495. case .Query_Info:
  496. info := (^Allocator_Query_Info)(old_memory)
  497. if info != nil && info.pointer != nil {
  498. info.size = pool.block_size
  499. info.alignment = pool.alignment
  500. return byte_slice(info, size_of(info^)), nil
  501. }
  502. return nil, nil
  503. }
  504. return nil, nil
  505. }
  506. dynamic_pool_allocator :: proc(pool: ^Dynamic_Pool) -> Allocator {
  507. return Allocator{
  508. procedure = dynamic_pool_allocator_proc,
  509. data = pool,
  510. }
  511. }
  512. dynamic_pool_init :: proc(pool: ^Dynamic_Pool,
  513. block_allocator := context.allocator,
  514. array_allocator := context.allocator,
  515. block_size := DYNAMIC_POOL_BLOCK_SIZE_DEFAULT,
  516. out_band_size := DYNAMIC_POOL_OUT_OF_BAND_SIZE_DEFAULT,
  517. alignment := 8) {
  518. pool.block_size = block_size
  519. pool.out_band_size = out_band_size
  520. pool.alignment = alignment
  521. pool.block_allocator = block_allocator
  522. pool.out_band_allocations.allocator = array_allocator
  523. pool. unused_blocks.allocator = array_allocator
  524. pool. used_blocks.allocator = array_allocator
  525. }
  526. dynamic_pool_destroy :: proc(using pool: ^Dynamic_Pool) {
  527. dynamic_pool_free_all(pool)
  528. delete(unused_blocks)
  529. delete(used_blocks)
  530. delete(out_band_allocations)
  531. zero(pool, size_of(pool^))
  532. }
  533. dynamic_pool_alloc :: proc(pool: ^Dynamic_Pool, bytes: int) -> rawptr {
  534. data, err := dynamic_pool_alloc_bytes(pool, bytes)
  535. assert(err == nil)
  536. return raw_data(data)
  537. }
  538. dynamic_pool_alloc_bytes :: proc(using pool: ^Dynamic_Pool, bytes: int) -> ([]byte, Allocator_Error) {
  539. cycle_new_block :: proc(using pool: ^Dynamic_Pool) -> (err: Allocator_Error) {
  540. if block_allocator.procedure == nil {
  541. panic("You must call pool_init on a Pool before using it")
  542. }
  543. if current_block != nil {
  544. append(&used_blocks, current_block)
  545. }
  546. new_block: rawptr
  547. if len(unused_blocks) > 0 {
  548. new_block = pop(&unused_blocks)
  549. } else {
  550. data: []byte
  551. data, err = block_allocator.procedure(block_allocator.data, Allocator_Mode.Alloc,
  552. block_size, alignment,
  553. nil, 0)
  554. new_block = raw_data(data)
  555. }
  556. bytes_left = block_size
  557. current_pos = new_block
  558. current_block = new_block
  559. return
  560. }
  561. n := bytes
  562. extra := alignment - (n % alignment)
  563. n += extra
  564. if n >= out_band_size {
  565. assert(block_allocator.procedure != nil)
  566. memory, err := block_allocator.procedure(block_allocator.data, Allocator_Mode.Alloc,
  567. block_size, alignment,
  568. nil, 0)
  569. if memory != nil {
  570. append(&out_band_allocations, raw_data(memory))
  571. }
  572. return memory, err
  573. }
  574. if bytes_left < n {
  575. err := cycle_new_block(pool)
  576. if err != nil {
  577. return nil, err
  578. }
  579. if current_block == nil {
  580. return nil, .Out_Of_Memory
  581. }
  582. }
  583. memory := current_pos
  584. current_pos = ptr_offset((^byte)(current_pos), n)
  585. bytes_left -= n
  586. return byte_slice(memory, bytes), nil
  587. }
  588. dynamic_pool_reset :: proc(using pool: ^Dynamic_Pool) {
  589. if current_block != nil {
  590. append(&unused_blocks, current_block)
  591. current_block = nil
  592. }
  593. for block in used_blocks {
  594. append(&unused_blocks, block)
  595. }
  596. clear(&used_blocks)
  597. for a in out_band_allocations {
  598. free(a, block_allocator)
  599. }
  600. clear(&out_band_allocations)
  601. bytes_left = 0 // Make new allocations call `cycle_new_block` again.
  602. }
  603. dynamic_pool_free_all :: proc(using pool: ^Dynamic_Pool) {
  604. dynamic_pool_reset(pool)
  605. for block in unused_blocks {
  606. free(block, block_allocator)
  607. }
  608. clear(&unused_blocks)
  609. }
  610. panic_allocator_proc :: proc(allocator_data: rawptr, mode: Allocator_Mode,
  611. size, alignment: int,
  612. old_memory: rawptr, old_size: int,loc := #caller_location) -> ([]byte, Allocator_Error) {
  613. switch mode {
  614. case .Alloc:
  615. if size > 0 {
  616. panic("mem: panic allocator, .Alloc called")
  617. }
  618. case .Resize:
  619. if size > 0 {
  620. panic("mem: panic allocator, .Resize called")
  621. }
  622. case .Free:
  623. if old_memory != nil {
  624. panic("mem: panic allocator, .Free called")
  625. }
  626. case .Free_All:
  627. panic("mem: panic allocator, .Free_All called")
  628. case .Query_Features:
  629. set := (^Allocator_Mode_Set)(old_memory)
  630. if set != nil {
  631. set^ = {.Query_Features}
  632. }
  633. return nil, nil
  634. case .Query_Info:
  635. panic("mem: panic allocator, .Query_Info called")
  636. }
  637. return nil, nil
  638. }
  639. panic_allocator :: proc() -> Allocator {
  640. return Allocator{
  641. procedure = panic_allocator_proc,
  642. data = nil,
  643. }
  644. }
  645. Tracking_Allocator_Entry :: struct {
  646. memory: rawptr,
  647. size: int,
  648. alignment: int,
  649. err: Allocator_Error,
  650. location: runtime.Source_Code_Location,
  651. }
  652. Tracking_Allocator_Bad_Free_Entry :: struct {
  653. memory: rawptr,
  654. location: runtime.Source_Code_Location,
  655. }
  656. Tracking_Allocator :: struct {
  657. backing: Allocator,
  658. allocation_map: map[rawptr]Tracking_Allocator_Entry,
  659. bad_free_array: [dynamic]Tracking_Allocator_Bad_Free_Entry,
  660. clear_on_free_all: bool,
  661. }
  662. tracking_allocator_init :: proc(t: ^Tracking_Allocator, backing_allocator: Allocator, internals_allocator := context.allocator) {
  663. t.backing = backing_allocator
  664. t.allocation_map.allocator = internals_allocator
  665. t.bad_free_array.allocator = internals_allocator
  666. }
  667. tracking_allocator_destroy :: proc(t: ^Tracking_Allocator) {
  668. delete(t.allocation_map)
  669. delete(t.bad_free_array)
  670. }
  671. tracking_allocator :: proc(data: ^Tracking_Allocator) -> Allocator {
  672. return Allocator{
  673. data = data,
  674. procedure = tracking_allocator_proc,
  675. }
  676. }
  677. tracking_allocator_proc :: proc(allocator_data: rawptr, mode: Allocator_Mode,
  678. size, alignment: int,
  679. old_memory: rawptr, old_size: int, loc := #caller_location) -> ([]byte, Allocator_Error) {
  680. data := (^Tracking_Allocator)(allocator_data)
  681. if mode == .Query_Info {
  682. info := (^Allocator_Query_Info)(old_memory)
  683. if info != nil && info.pointer != nil {
  684. if entry, ok := data.allocation_map[info.pointer]; ok {
  685. info.size = entry.size
  686. info.alignment = entry.alignment
  687. }
  688. info.pointer = nil
  689. }
  690. return nil, nil
  691. }
  692. result: []byte
  693. err: Allocator_Error
  694. if mode == .Free && old_memory != nil && old_memory not_in data.allocation_map {
  695. append(&data.bad_free_array, Tracking_Allocator_Bad_Free_Entry{
  696. memory = old_memory,
  697. location = loc,
  698. })
  699. } else {
  700. result, err = data.backing.procedure(data.backing.data, mode, size, alignment, old_memory, old_size, loc)
  701. if err != nil {
  702. return result, err
  703. }
  704. }
  705. result_ptr := raw_data(result)
  706. if data.allocation_map.allocator.procedure == nil {
  707. data.allocation_map.allocator = context.allocator
  708. }
  709. switch mode {
  710. case .Alloc:
  711. data.allocation_map[result_ptr] = Tracking_Allocator_Entry{
  712. memory = result_ptr,
  713. size = size,
  714. alignment = alignment,
  715. err = err,
  716. location = loc,
  717. }
  718. case .Free:
  719. delete_key(&data.allocation_map, old_memory)
  720. case .Free_All:
  721. if data.clear_on_free_all {
  722. clear_map(&data.allocation_map)
  723. }
  724. case .Resize:
  725. if old_memory != result_ptr {
  726. delete_key(&data.allocation_map, old_memory)
  727. }
  728. data.allocation_map[result_ptr] = Tracking_Allocator_Entry{
  729. memory = result_ptr,
  730. size = size,
  731. alignment = alignment,
  732. err = err,
  733. location = loc,
  734. }
  735. case .Query_Features:
  736. set := (^Allocator_Mode_Set)(old_memory)
  737. if set != nil {
  738. set^ = {.Alloc, .Free, .Free_All, .Resize, .Query_Features, .Query_Info}
  739. }
  740. return nil, nil
  741. case .Query_Info:
  742. unreachable()
  743. }
  744. return result, err
  745. }