allocators.odin 23 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942
  1. package mem
  2. import "core:intrinsics"
  3. import "core:runtime"
  4. nil_allocator_proc :: proc(allocator_data: rawptr, mode: Allocator_Mode,
  5. size, alignment: int,
  6. old_memory: rawptr, old_size: int, loc := #caller_location) -> ([]byte, Allocator_Error) {
  7. return nil, nil
  8. }
  9. nil_allocator :: proc() -> Allocator {
  10. return Allocator{
  11. procedure = nil_allocator_proc,
  12. data = nil,
  13. }
  14. }
  15. // Custom allocators
  16. Arena :: struct {
  17. data: []byte,
  18. offset: int,
  19. peak_used: int,
  20. temp_count: int,
  21. }
  22. Arena_Temp_Memory :: struct {
  23. arena: ^Arena,
  24. prev_offset: int,
  25. }
  26. arena_init :: proc(a: ^Arena, data: []byte) {
  27. a.data = data
  28. a.offset = 0
  29. a.peak_used = 0
  30. a.temp_count = 0
  31. }
  32. @(deprecated="prefer 'mem.arena_init'")
  33. init_arena :: proc(a: ^Arena, data: []byte) {
  34. a.data = data
  35. a.offset = 0
  36. a.peak_used = 0
  37. a.temp_count = 0
  38. }
  39. arena_allocator :: proc(arena: ^Arena) -> Allocator {
  40. return Allocator{
  41. procedure = arena_allocator_proc,
  42. data = arena,
  43. }
  44. }
  45. arena_allocator_proc :: proc(allocator_data: rawptr, mode: Allocator_Mode,
  46. size, alignment: int,
  47. old_memory: rawptr, old_size: int, location := #caller_location) -> ([]byte, Allocator_Error) {
  48. arena := cast(^Arena)allocator_data
  49. switch mode {
  50. case .Alloc:
  51. #no_bounds_check end := &arena.data[arena.offset]
  52. ptr := align_forward(end, uintptr(alignment))
  53. total_size := size + ptr_sub((^byte)(ptr), (^byte)(end))
  54. if arena.offset + total_size > len(arena.data) {
  55. return nil, .Out_Of_Memory
  56. }
  57. arena.offset += total_size
  58. arena.peak_used = max(arena.peak_used, arena.offset)
  59. zero(ptr, size)
  60. return byte_slice(ptr, size), nil
  61. case .Free:
  62. return nil, .Mode_Not_Implemented
  63. case .Free_All:
  64. arena.offset = 0
  65. case .Resize:
  66. return default_resize_bytes_align(byte_slice(old_memory, old_size), size, alignment, arena_allocator(arena))
  67. case .Query_Features:
  68. set := (^Allocator_Mode_Set)(old_memory)
  69. if set != nil {
  70. set^ = {.Alloc, .Free_All, .Resize, .Query_Features}
  71. }
  72. return nil, nil
  73. case .Query_Info:
  74. return nil, .Mode_Not_Implemented
  75. }
  76. return nil, nil
  77. }
  78. begin_arena_temp_memory :: proc(a: ^Arena) -> Arena_Temp_Memory {
  79. tmp: Arena_Temp_Memory
  80. tmp.arena = a
  81. tmp.prev_offset = a.offset
  82. a.temp_count += 1
  83. return tmp
  84. }
  85. end_arena_temp_memory :: proc(using tmp: Arena_Temp_Memory) {
  86. assert(arena.offset >= prev_offset)
  87. assert(arena.temp_count > 0)
  88. arena.offset = prev_offset
  89. arena.temp_count -= 1
  90. }
  91. Scratch_Allocator :: struct {
  92. data: []byte,
  93. curr_offset: int,
  94. prev_allocation: rawptr,
  95. backup_allocator: Allocator,
  96. leaked_allocations: [dynamic][]byte,
  97. }
  98. scratch_allocator_init :: proc(s: ^Scratch_Allocator, size: int, backup_allocator := context.allocator) -> Allocator_Error {
  99. s.data = make_aligned([]byte, size, 2*align_of(rawptr), backup_allocator) or_return
  100. s.curr_offset = 0
  101. s.prev_allocation = nil
  102. s.backup_allocator = backup_allocator
  103. s.leaked_allocations.allocator = backup_allocator
  104. return nil
  105. }
  106. scratch_allocator_destroy :: proc(s: ^Scratch_Allocator) {
  107. if s == nil {
  108. return
  109. }
  110. for ptr in s.leaked_allocations {
  111. free_bytes(ptr, s.backup_allocator)
  112. }
  113. delete(s.leaked_allocations)
  114. delete(s.data, s.backup_allocator)
  115. s^ = {}
  116. }
  117. scratch_allocator_proc :: proc(allocator_data: rawptr, mode: Allocator_Mode,
  118. size, alignment: int,
  119. old_memory: rawptr, old_size: int, loc := #caller_location) -> ([]byte, Allocator_Error) {
  120. s := (^Scratch_Allocator)(allocator_data)
  121. if s.data == nil {
  122. DEFAULT_BACKING_SIZE :: 1<<22
  123. if !(context.allocator.procedure != scratch_allocator_proc &&
  124. context.allocator.data != allocator_data) {
  125. panic("cyclic initialization of the scratch allocator with itself")
  126. }
  127. scratch_allocator_init(s, DEFAULT_BACKING_SIZE)
  128. }
  129. size := size
  130. switch mode {
  131. case .Alloc:
  132. size = align_forward_int(size, alignment)
  133. switch {
  134. case s.curr_offset+size <= len(s.data):
  135. start := uintptr(raw_data(s.data))
  136. ptr := start + uintptr(s.curr_offset)
  137. ptr = align_forward_uintptr(ptr, uintptr(alignment))
  138. zero(rawptr(ptr), size)
  139. s.prev_allocation = rawptr(ptr)
  140. offset := int(ptr - start)
  141. s.curr_offset = offset + size
  142. return byte_slice(rawptr(ptr), size), nil
  143. case size <= len(s.data):
  144. start := uintptr(raw_data(s.data))
  145. ptr := align_forward_uintptr(start, uintptr(alignment))
  146. zero(rawptr(ptr), size)
  147. s.prev_allocation = rawptr(ptr)
  148. offset := int(ptr - start)
  149. s.curr_offset = offset + size
  150. return byte_slice(rawptr(ptr), size), nil
  151. }
  152. a := s.backup_allocator
  153. if a.procedure == nil {
  154. a = context.allocator
  155. s.backup_allocator = a
  156. }
  157. ptr, err := alloc_bytes(size, alignment, a, loc)
  158. if err != nil {
  159. return ptr, err
  160. }
  161. if s.leaked_allocations == nil {
  162. s.leaked_allocations, err = make([dynamic][]byte, a)
  163. }
  164. append(&s.leaked_allocations, ptr)
  165. if logger := context.logger; logger.lowest_level <= .Warning {
  166. if logger.procedure != nil {
  167. logger.procedure(logger.data, .Warning, "mem.Scratch_Allocator resorted to backup_allocator" , logger.options, loc)
  168. }
  169. }
  170. return ptr, err
  171. case .Free:
  172. start := uintptr(raw_data(s.data))
  173. end := start + uintptr(len(s.data))
  174. old_ptr := uintptr(old_memory)
  175. if s.prev_allocation == old_memory {
  176. s.curr_offset = int(uintptr(s.prev_allocation) - start)
  177. s.prev_allocation = nil
  178. return nil, nil
  179. }
  180. if start <= old_ptr && old_ptr < end {
  181. // NOTE(bill): Cannot free this pointer but it is valid
  182. return nil, nil
  183. }
  184. if len(s.leaked_allocations) != 0 {
  185. for data, i in s.leaked_allocations {
  186. ptr := raw_data(data)
  187. if ptr == old_memory {
  188. free_bytes(data, s.backup_allocator)
  189. ordered_remove(&s.leaked_allocations, i)
  190. return nil, nil
  191. }
  192. }
  193. }
  194. return nil, .Invalid_Pointer
  195. // panic("invalid pointer passed to default_temp_allocator");
  196. case .Free_All:
  197. s.curr_offset = 0
  198. s.prev_allocation = nil
  199. for ptr in s.leaked_allocations {
  200. free_bytes(ptr, s.backup_allocator)
  201. }
  202. clear(&s.leaked_allocations)
  203. case .Resize:
  204. begin := uintptr(raw_data(s.data))
  205. end := begin + uintptr(len(s.data))
  206. old_ptr := uintptr(old_memory)
  207. if begin <= old_ptr && old_ptr < end && old_ptr+uintptr(size) < end {
  208. s.curr_offset = int(old_ptr-begin)+size
  209. return byte_slice(old_memory, size), nil
  210. }
  211. data, err := scratch_allocator_proc(allocator_data, .Alloc, size, alignment, old_memory, old_size, loc)
  212. if err != nil {
  213. return data, err
  214. }
  215. runtime.copy(data, byte_slice(old_memory, old_size))
  216. _, err = scratch_allocator_proc(allocator_data, .Free, 0, alignment, old_memory, old_size, loc)
  217. return data, err
  218. case .Query_Features:
  219. set := (^Allocator_Mode_Set)(old_memory)
  220. if set != nil {
  221. set^ = {.Alloc, .Free, .Free_All, .Resize, .Query_Features}
  222. }
  223. return nil, nil
  224. case .Query_Info:
  225. return nil, .Mode_Not_Implemented
  226. }
  227. return nil, nil
  228. }
  229. scratch_allocator :: proc(allocator: ^Scratch_Allocator) -> Allocator {
  230. return Allocator{
  231. procedure = scratch_allocator_proc,
  232. data = allocator,
  233. }
  234. }
  235. Stack_Allocation_Header :: struct {
  236. prev_offset: int,
  237. padding: int,
  238. }
  239. // Stack is a stack-like allocator which has a strict memory freeing order
  240. Stack :: struct {
  241. data: []byte,
  242. prev_offset: int,
  243. curr_offset: int,
  244. peak_used: int,
  245. }
  246. stack_init :: proc(s: ^Stack, data: []byte) {
  247. s.data = data
  248. s.prev_offset = 0
  249. s.curr_offset = 0
  250. s.peak_used = 0
  251. }
  252. @(deprecated="prefer 'mem.stack_init'")
  253. init_stack :: proc(s: ^Stack, data: []byte) {
  254. s.data = data
  255. s.prev_offset = 0
  256. s.curr_offset = 0
  257. s.peak_used = 0
  258. }
  259. stack_allocator :: proc(stack: ^Stack) -> Allocator {
  260. return Allocator{
  261. procedure = stack_allocator_proc,
  262. data = stack,
  263. }
  264. }
  265. stack_allocator_proc :: proc(allocator_data: rawptr, mode: Allocator_Mode,
  266. size, alignment: int,
  267. old_memory: rawptr, old_size: int, location := #caller_location) -> ([]byte, Allocator_Error) {
  268. s := cast(^Stack)allocator_data
  269. if s.data == nil {
  270. return nil, .Invalid_Argument
  271. }
  272. raw_alloc :: proc(s: ^Stack, size, alignment: int) -> ([]byte, Allocator_Error) {
  273. curr_addr := uintptr(raw_data(s.data)) + uintptr(s.curr_offset)
  274. padding := calc_padding_with_header(curr_addr, uintptr(alignment), size_of(Stack_Allocation_Header))
  275. if s.curr_offset + padding + size > len(s.data) {
  276. return nil, .Out_Of_Memory
  277. }
  278. s.prev_offset = s.curr_offset
  279. s.curr_offset += padding
  280. next_addr := curr_addr + uintptr(padding)
  281. header := (^Stack_Allocation_Header)(next_addr - size_of(Stack_Allocation_Header))
  282. header.padding = padding
  283. header.prev_offset = s.prev_offset
  284. s.curr_offset += size
  285. s.peak_used = max(s.peak_used, s.curr_offset)
  286. zero(rawptr(next_addr), size)
  287. return byte_slice(rawptr(next_addr), size), nil
  288. }
  289. switch mode {
  290. case .Alloc:
  291. return raw_alloc(s, size, alignment)
  292. case .Free:
  293. if old_memory == nil {
  294. return nil, nil
  295. }
  296. start := uintptr(raw_data(s.data))
  297. end := start + uintptr(len(s.data))
  298. curr_addr := uintptr(old_memory)
  299. if !(start <= curr_addr && curr_addr < end) {
  300. panic("Out of bounds memory address passed to stack allocator (free)")
  301. }
  302. if curr_addr >= start+uintptr(s.curr_offset) {
  303. // NOTE(bill): Allow double frees
  304. return nil, nil
  305. }
  306. header := (^Stack_Allocation_Header)(curr_addr - size_of(Stack_Allocation_Header))
  307. old_offset := int(curr_addr - uintptr(header.padding) - uintptr(raw_data(s.data)))
  308. if old_offset != header.prev_offset {
  309. // panic("Out of order stack allocator free");
  310. return nil, .Invalid_Pointer
  311. }
  312. s.curr_offset = old_offset
  313. s.prev_offset = header.prev_offset
  314. case .Free_All:
  315. s.prev_offset = 0
  316. s.curr_offset = 0
  317. case .Resize:
  318. if old_memory == nil {
  319. return raw_alloc(s, size, alignment)
  320. }
  321. if size == 0 {
  322. return nil, nil
  323. }
  324. start := uintptr(raw_data(s.data))
  325. end := start + uintptr(len(s.data))
  326. curr_addr := uintptr(old_memory)
  327. if !(start <= curr_addr && curr_addr < end) {
  328. panic("Out of bounds memory address passed to stack allocator (resize)")
  329. }
  330. if curr_addr >= start+uintptr(s.curr_offset) {
  331. // NOTE(bill): Allow double frees
  332. return nil, nil
  333. }
  334. if old_size == size {
  335. return byte_slice(old_memory, size), nil
  336. }
  337. header := (^Stack_Allocation_Header)(curr_addr - size_of(Stack_Allocation_Header))
  338. old_offset := int(curr_addr - uintptr(header.padding) - uintptr(raw_data(s.data)))
  339. if old_offset != header.prev_offset {
  340. data, err := raw_alloc(s, size, alignment)
  341. if err == nil {
  342. runtime.copy(data, byte_slice(old_memory, old_size))
  343. }
  344. return data, err
  345. }
  346. old_memory_size := uintptr(s.curr_offset) - (curr_addr - start)
  347. assert(old_memory_size == uintptr(old_size))
  348. diff := size - old_size
  349. s.curr_offset += diff // works for smaller sizes too
  350. if diff > 0 {
  351. zero(rawptr(curr_addr + uintptr(diff)), diff)
  352. }
  353. return byte_slice(old_memory, size), nil
  354. case .Query_Features:
  355. set := (^Allocator_Mode_Set)(old_memory)
  356. if set != nil {
  357. set^ = {.Alloc, .Free, .Free_All, .Resize, .Query_Features}
  358. }
  359. return nil, nil
  360. case .Query_Info:
  361. return nil, .Mode_Not_Implemented
  362. }
  363. return nil, nil
  364. }
  365. Small_Stack_Allocation_Header :: struct {
  366. padding: u8,
  367. }
  368. // Small_Stack is a stack-like allocator which uses the smallest possible header but at the cost of non-strict memory freeing order
  369. Small_Stack :: struct {
  370. data: []byte,
  371. offset: int,
  372. peak_used: int,
  373. }
  374. small_stack_init :: proc(s: ^Small_Stack, data: []byte) {
  375. s.data = data
  376. s.offset = 0
  377. s.peak_used = 0
  378. }
  379. @(deprecated="prefer 'small_stack_init'")
  380. init_small_stack :: proc(s: ^Small_Stack, data: []byte) {
  381. s.data = data
  382. s.offset = 0
  383. s.peak_used = 0
  384. }
  385. small_stack_allocator :: proc(stack: ^Small_Stack) -> Allocator {
  386. return Allocator{
  387. procedure = small_stack_allocator_proc,
  388. data = stack,
  389. }
  390. }
  391. small_stack_allocator_proc :: proc(allocator_data: rawptr, mode: Allocator_Mode,
  392. size, alignment: int,
  393. old_memory: rawptr, old_size: int, location := #caller_location) -> ([]byte, Allocator_Error) {
  394. s := cast(^Small_Stack)allocator_data
  395. if s.data == nil {
  396. return nil, .Invalid_Argument
  397. }
  398. align := clamp(alignment, 1, 8*size_of(Stack_Allocation_Header{}.padding)/2)
  399. raw_alloc :: proc(s: ^Small_Stack, size, alignment: int) -> ([]byte, Allocator_Error) {
  400. curr_addr := uintptr(raw_data(s.data)) + uintptr(s.offset)
  401. padding := calc_padding_with_header(curr_addr, uintptr(alignment), size_of(Small_Stack_Allocation_Header))
  402. if s.offset + padding + size > len(s.data) {
  403. return nil, .Out_Of_Memory
  404. }
  405. s.offset += padding
  406. next_addr := curr_addr + uintptr(padding)
  407. header := (^Small_Stack_Allocation_Header)(next_addr - size_of(Small_Stack_Allocation_Header))
  408. header.padding = auto_cast padding
  409. s.offset += size
  410. s.peak_used = max(s.peak_used, s.offset)
  411. zero(rawptr(next_addr), size)
  412. return byte_slice(rawptr(next_addr), size), nil
  413. }
  414. switch mode {
  415. case .Alloc:
  416. return raw_alloc(s, size, align)
  417. case .Free:
  418. if old_memory == nil {
  419. return nil, nil
  420. }
  421. start := uintptr(raw_data(s.data))
  422. end := start + uintptr(len(s.data))
  423. curr_addr := uintptr(old_memory)
  424. if !(start <= curr_addr && curr_addr < end) {
  425. // panic("Out of bounds memory address passed to stack allocator (free)");
  426. return nil, .Invalid_Pointer
  427. }
  428. if curr_addr >= start+uintptr(s.offset) {
  429. // NOTE(bill): Allow double frees
  430. return nil, nil
  431. }
  432. header := (^Small_Stack_Allocation_Header)(curr_addr - size_of(Small_Stack_Allocation_Header))
  433. old_offset := int(curr_addr - uintptr(header.padding) - uintptr(raw_data(s.data)))
  434. s.offset = old_offset
  435. case .Free_All:
  436. s.offset = 0
  437. case .Resize:
  438. if old_memory == nil {
  439. return raw_alloc(s, size, align)
  440. }
  441. if size == 0 {
  442. return nil, nil
  443. }
  444. start := uintptr(raw_data(s.data))
  445. end := start + uintptr(len(s.data))
  446. curr_addr := uintptr(old_memory)
  447. if !(start <= curr_addr && curr_addr < end) {
  448. // panic("Out of bounds memory address passed to stack allocator (resize)");
  449. return nil, .Invalid_Pointer
  450. }
  451. if curr_addr >= start+uintptr(s.offset) {
  452. // NOTE(bill): Treat as a double free
  453. return nil, nil
  454. }
  455. if old_size == size {
  456. return byte_slice(old_memory, size), nil
  457. }
  458. data, err := raw_alloc(s, size, align)
  459. if err == nil {
  460. runtime.copy(data, byte_slice(old_memory, old_size))
  461. }
  462. return data, err
  463. case .Query_Features:
  464. set := (^Allocator_Mode_Set)(old_memory)
  465. if set != nil {
  466. set^ = {.Alloc, .Free, .Free_All, .Resize, .Query_Features}
  467. }
  468. return nil, nil
  469. case .Query_Info:
  470. return nil, .Mode_Not_Implemented
  471. }
  472. return nil, nil
  473. }
  474. Dynamic_Pool :: struct {
  475. block_size: int,
  476. out_band_size: int,
  477. alignment: int,
  478. unused_blocks: [dynamic]rawptr,
  479. used_blocks: [dynamic]rawptr,
  480. out_band_allocations: [dynamic]rawptr,
  481. current_block: rawptr,
  482. current_pos: rawptr,
  483. bytes_left: int,
  484. block_allocator: Allocator,
  485. }
  486. DYNAMIC_POOL_BLOCK_SIZE_DEFAULT :: 65536
  487. DYNAMIC_POOL_OUT_OF_BAND_SIZE_DEFAULT :: 6554
  488. dynamic_pool_allocator_proc :: proc(allocator_data: rawptr, mode: Allocator_Mode,
  489. size, alignment: int,
  490. old_memory: rawptr, old_size: int, loc := #caller_location) -> ([]byte, Allocator_Error) {
  491. pool := (^Dynamic_Pool)(allocator_data)
  492. switch mode {
  493. case .Alloc:
  494. return dynamic_pool_alloc_bytes(pool, size)
  495. case .Free:
  496. return nil, .Mode_Not_Implemented
  497. case .Free_All:
  498. dynamic_pool_free_all(pool)
  499. return nil, nil
  500. case .Resize:
  501. if old_size >= size {
  502. return byte_slice(old_memory, size), nil
  503. }
  504. data, err := dynamic_pool_alloc_bytes(pool, size)
  505. if err == nil {
  506. runtime.copy(data, byte_slice(old_memory, old_size))
  507. }
  508. return data, err
  509. case .Query_Features:
  510. set := (^Allocator_Mode_Set)(old_memory)
  511. if set != nil {
  512. set^ = {.Alloc, .Free_All, .Resize, .Query_Features, .Query_Info}
  513. }
  514. return nil, nil
  515. case .Query_Info:
  516. info := (^Allocator_Query_Info)(old_memory)
  517. if info != nil && info.pointer != nil {
  518. info.size = pool.block_size
  519. info.alignment = pool.alignment
  520. return byte_slice(info, size_of(info^)), nil
  521. }
  522. return nil, nil
  523. }
  524. return nil, nil
  525. }
  526. dynamic_pool_allocator :: proc(pool: ^Dynamic_Pool) -> Allocator {
  527. return Allocator{
  528. procedure = dynamic_pool_allocator_proc,
  529. data = pool,
  530. }
  531. }
  532. dynamic_pool_init :: proc(pool: ^Dynamic_Pool,
  533. block_allocator := context.allocator,
  534. array_allocator := context.allocator,
  535. block_size := DYNAMIC_POOL_BLOCK_SIZE_DEFAULT,
  536. out_band_size := DYNAMIC_POOL_OUT_OF_BAND_SIZE_DEFAULT,
  537. alignment := 8) {
  538. pool.block_size = block_size
  539. pool.out_band_size = out_band_size
  540. pool.alignment = alignment
  541. pool.block_allocator = block_allocator
  542. pool.out_band_allocations.allocator = array_allocator
  543. pool. unused_blocks.allocator = array_allocator
  544. pool. used_blocks.allocator = array_allocator
  545. }
  546. dynamic_pool_destroy :: proc(using pool: ^Dynamic_Pool) {
  547. dynamic_pool_free_all(pool)
  548. delete(unused_blocks)
  549. delete(used_blocks)
  550. delete(out_band_allocations)
  551. zero(pool, size_of(pool^))
  552. }
  553. dynamic_pool_alloc :: proc(pool: ^Dynamic_Pool, bytes: int) -> rawptr {
  554. data, err := dynamic_pool_alloc_bytes(pool, bytes)
  555. assert(err == nil)
  556. return raw_data(data)
  557. }
  558. dynamic_pool_alloc_bytes :: proc(using pool: ^Dynamic_Pool, bytes: int) -> ([]byte, Allocator_Error) {
  559. cycle_new_block :: proc(using pool: ^Dynamic_Pool) -> (err: Allocator_Error) {
  560. if block_allocator.procedure == nil {
  561. panic("You must call pool_init on a Pool before using it")
  562. }
  563. if current_block != nil {
  564. append(&used_blocks, current_block)
  565. }
  566. new_block: rawptr
  567. if len(unused_blocks) > 0 {
  568. new_block = pop(&unused_blocks)
  569. } else {
  570. data: []byte
  571. data, err = block_allocator.procedure(block_allocator.data, Allocator_Mode.Alloc,
  572. block_size, alignment,
  573. nil, 0)
  574. new_block = raw_data(data)
  575. }
  576. bytes_left = block_size
  577. current_pos = new_block
  578. current_block = new_block
  579. return
  580. }
  581. n := bytes
  582. extra := alignment - (n % alignment)
  583. n += extra
  584. if n >= out_band_size {
  585. assert(block_allocator.procedure != nil)
  586. memory, err := block_allocator.procedure(block_allocator.data, Allocator_Mode.Alloc,
  587. block_size, alignment,
  588. nil, 0)
  589. if memory != nil {
  590. append(&out_band_allocations, raw_data(memory))
  591. }
  592. return memory, err
  593. }
  594. if bytes_left < n {
  595. err := cycle_new_block(pool)
  596. if err != nil {
  597. return nil, err
  598. }
  599. if current_block == nil {
  600. return nil, .Out_Of_Memory
  601. }
  602. }
  603. memory := current_pos
  604. current_pos = ptr_offset((^byte)(current_pos), n)
  605. bytes_left -= n
  606. return byte_slice(memory, bytes), nil
  607. }
  608. dynamic_pool_reset :: proc(using pool: ^Dynamic_Pool) {
  609. if current_block != nil {
  610. append(&unused_blocks, current_block)
  611. current_block = nil
  612. }
  613. for block in used_blocks {
  614. append(&unused_blocks, block)
  615. }
  616. clear(&used_blocks)
  617. for a in out_band_allocations {
  618. free(a, block_allocator)
  619. }
  620. clear(&out_band_allocations)
  621. bytes_left = 0 // Make new allocations call `cycle_new_block` again.
  622. }
  623. dynamic_pool_free_all :: proc(using pool: ^Dynamic_Pool) {
  624. dynamic_pool_reset(pool)
  625. for block in unused_blocks {
  626. free(block, block_allocator)
  627. }
  628. clear(&unused_blocks)
  629. }
  630. panic_allocator_proc :: proc(allocator_data: rawptr, mode: Allocator_Mode,
  631. size, alignment: int,
  632. old_memory: rawptr, old_size: int,loc := #caller_location) -> ([]byte, Allocator_Error) {
  633. switch mode {
  634. case .Alloc:
  635. if size > 0 {
  636. panic("mem: panic allocator, .Alloc called")
  637. }
  638. case .Resize:
  639. if size > 0 {
  640. panic("mem: panic allocator, .Resize called")
  641. }
  642. case .Free:
  643. if old_memory != nil {
  644. panic("mem: panic allocator, .Free called")
  645. }
  646. case .Free_All:
  647. panic("mem: panic allocator, .Free_All called")
  648. case .Query_Features:
  649. set := (^Allocator_Mode_Set)(old_memory)
  650. if set != nil {
  651. set^ = {.Query_Features}
  652. }
  653. return nil, nil
  654. case .Query_Info:
  655. panic("mem: panic allocator, .Query_Info called")
  656. }
  657. return nil, nil
  658. }
  659. panic_allocator :: proc() -> Allocator {
  660. return Allocator{
  661. procedure = panic_allocator_proc,
  662. data = nil,
  663. }
  664. }
  665. Tracking_Allocator_Entry :: struct {
  666. memory: rawptr,
  667. size: int,
  668. alignment: int,
  669. err: Allocator_Error,
  670. location: runtime.Source_Code_Location,
  671. }
  672. Tracking_Allocator_Bad_Free_Entry :: struct {
  673. memory: rawptr,
  674. location: runtime.Source_Code_Location,
  675. }
  676. Tracking_Allocator :: struct {
  677. backing: Allocator,
  678. allocation_map: map[rawptr]Tracking_Allocator_Entry,
  679. bad_free_array: [dynamic]Tracking_Allocator_Bad_Free_Entry,
  680. clear_on_free_all: bool,
  681. }
  682. tracking_allocator_init :: proc(t: ^Tracking_Allocator, backing_allocator: Allocator, internals_allocator := context.allocator) {
  683. t.backing = backing_allocator
  684. t.allocation_map.allocator = internals_allocator
  685. t.bad_free_array.allocator = internals_allocator
  686. }
  687. tracking_allocator_destroy :: proc(t: ^Tracking_Allocator) {
  688. delete(t.allocation_map)
  689. delete(t.bad_free_array)
  690. }
  691. tracking_allocator :: proc(data: ^Tracking_Allocator) -> Allocator {
  692. return Allocator{
  693. data = data,
  694. procedure = tracking_allocator_proc,
  695. }
  696. }
  697. tracking_allocator_proc :: proc(allocator_data: rawptr, mode: Allocator_Mode,
  698. size, alignment: int,
  699. old_memory: rawptr, old_size: int, loc := #caller_location) -> ([]byte, Allocator_Error) {
  700. data := (^Tracking_Allocator)(allocator_data)
  701. if mode == .Query_Info {
  702. info := (^Allocator_Query_Info)(old_memory)
  703. if info != nil && info.pointer != nil {
  704. if entry, ok := data.allocation_map[info.pointer]; ok {
  705. info.size = entry.size
  706. info.alignment = entry.alignment
  707. }
  708. info.pointer = nil
  709. }
  710. return nil, nil
  711. }
  712. result: []byte
  713. err: Allocator_Error
  714. if mode == .Free && old_memory != nil && old_memory not_in data.allocation_map {
  715. append(&data.bad_free_array, Tracking_Allocator_Bad_Free_Entry{
  716. memory = old_memory,
  717. location = loc,
  718. })
  719. } else {
  720. result, err = data.backing.procedure(data.backing.data, mode, size, alignment, old_memory, old_size, loc)
  721. if err != nil {
  722. return result, err
  723. }
  724. }
  725. result_ptr := raw_data(result)
  726. if data.allocation_map.allocator.procedure == nil {
  727. data.allocation_map.allocator = context.allocator
  728. }
  729. switch mode {
  730. case .Alloc:
  731. data.allocation_map[result_ptr] = Tracking_Allocator_Entry{
  732. memory = result_ptr,
  733. size = size,
  734. alignment = alignment,
  735. err = err,
  736. location = loc,
  737. }
  738. case .Free:
  739. delete_key(&data.allocation_map, old_memory)
  740. case .Free_All:
  741. if data.clear_on_free_all {
  742. clear_map(&data.allocation_map)
  743. }
  744. case .Resize:
  745. if old_memory != result_ptr {
  746. delete_key(&data.allocation_map, old_memory)
  747. }
  748. data.allocation_map[result_ptr] = Tracking_Allocator_Entry{
  749. memory = result_ptr,
  750. size = size,
  751. alignment = alignment,
  752. err = err,
  753. location = loc,
  754. }
  755. case .Query_Features:
  756. set := (^Allocator_Mode_Set)(old_memory)
  757. if set != nil {
  758. set^ = {.Alloc, .Free, .Free_All, .Resize, .Query_Features, .Query_Info}
  759. }
  760. return nil, nil
  761. case .Query_Info:
  762. unreachable()
  763. }
  764. return result, err
  765. }