allocators.odin 24 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968
  1. package mem
  2. import "core:intrinsics"
  3. import "core:runtime"
  4. nil_allocator_proc :: proc(allocator_data: rawptr, mode: Allocator_Mode,
  5. size, alignment: int,
  6. old_memory: rawptr, old_size: int, loc := #caller_location) -> ([]byte, Allocator_Error) {
  7. return nil, nil
  8. }
  9. nil_allocator :: proc() -> Allocator {
  10. return Allocator{
  11. procedure = nil_allocator_proc,
  12. data = nil,
  13. }
  14. }
  15. // Custom allocators
  16. Arena :: struct {
  17. data: []byte,
  18. offset: int,
  19. peak_used: int,
  20. temp_count: int,
  21. }
  22. Arena_Temp_Memory :: struct {
  23. arena: ^Arena,
  24. prev_offset: int,
  25. }
  26. arena_init :: proc(a: ^Arena, data: []byte) {
  27. a.data = data
  28. a.offset = 0
  29. a.peak_used = 0
  30. a.temp_count = 0
  31. }
  32. @(deprecated="prefer 'mem.arena_init'")
  33. init_arena :: proc(a: ^Arena, data: []byte) {
  34. a.data = data
  35. a.offset = 0
  36. a.peak_used = 0
  37. a.temp_count = 0
  38. }
  39. arena_allocator :: proc(arena: ^Arena) -> Allocator {
  40. return Allocator{
  41. procedure = arena_allocator_proc,
  42. data = arena,
  43. }
  44. }
  45. arena_allocator_proc :: proc(allocator_data: rawptr, mode: Allocator_Mode,
  46. size, alignment: int,
  47. old_memory: rawptr, old_size: int, location := #caller_location) -> ([]byte, Allocator_Error) {
  48. arena := cast(^Arena)allocator_data
  49. switch mode {
  50. case .Alloc, .Alloc_Non_Zeroed:
  51. #no_bounds_check end := &arena.data[arena.offset]
  52. ptr := align_forward(end, uintptr(alignment))
  53. total_size := size + ptr_sub((^byte)(ptr), (^byte)(end))
  54. if arena.offset + total_size > len(arena.data) {
  55. return nil, .Out_Of_Memory
  56. }
  57. arena.offset += total_size
  58. arena.peak_used = max(arena.peak_used, arena.offset)
  59. if mode != .Alloc_Non_Zeroed {
  60. zero(ptr, size)
  61. }
  62. return byte_slice(ptr, size), nil
  63. case .Free:
  64. return nil, .Mode_Not_Implemented
  65. case .Free_All:
  66. arena.offset = 0
  67. case .Resize:
  68. return default_resize_bytes_align(byte_slice(old_memory, old_size), size, alignment, arena_allocator(arena))
  69. case .Query_Features:
  70. set := (^Allocator_Mode_Set)(old_memory)
  71. if set != nil {
  72. set^ = {.Alloc, .Alloc_Non_Zeroed, .Free_All, .Resize, .Query_Features}
  73. }
  74. return nil, nil
  75. case .Query_Info:
  76. return nil, .Mode_Not_Implemented
  77. }
  78. return nil, nil
  79. }
  80. begin_arena_temp_memory :: proc(a: ^Arena) -> Arena_Temp_Memory {
  81. tmp: Arena_Temp_Memory
  82. tmp.arena = a
  83. tmp.prev_offset = a.offset
  84. a.temp_count += 1
  85. return tmp
  86. }
  87. end_arena_temp_memory :: proc(using tmp: Arena_Temp_Memory) {
  88. assert(arena.offset >= prev_offset)
  89. assert(arena.temp_count > 0)
  90. arena.offset = prev_offset
  91. arena.temp_count -= 1
  92. }
  93. Scratch_Allocator :: struct {
  94. data: []byte,
  95. curr_offset: int,
  96. prev_allocation: rawptr,
  97. backup_allocator: Allocator,
  98. leaked_allocations: [dynamic][]byte,
  99. }
  100. scratch_allocator_init :: proc(s: ^Scratch_Allocator, size: int, backup_allocator := context.allocator) -> Allocator_Error {
  101. s.data = make_aligned([]byte, size, 2*align_of(rawptr), backup_allocator) or_return
  102. s.curr_offset = 0
  103. s.prev_allocation = nil
  104. s.backup_allocator = backup_allocator
  105. s.leaked_allocations.allocator = backup_allocator
  106. return nil
  107. }
  108. scratch_allocator_destroy :: proc(s: ^Scratch_Allocator) {
  109. if s == nil {
  110. return
  111. }
  112. for ptr in s.leaked_allocations {
  113. free_bytes(ptr, s.backup_allocator)
  114. }
  115. delete(s.leaked_allocations)
  116. delete(s.data, s.backup_allocator)
  117. s^ = {}
  118. }
  119. scratch_allocator_proc :: proc(allocator_data: rawptr, mode: Allocator_Mode,
  120. size, alignment: int,
  121. old_memory: rawptr, old_size: int, loc := #caller_location) -> ([]byte, Allocator_Error) {
  122. s := (^Scratch_Allocator)(allocator_data)
  123. if s.data == nil {
  124. DEFAULT_BACKING_SIZE :: 4 * Megabyte
  125. if !(context.allocator.procedure != scratch_allocator_proc &&
  126. context.allocator.data != allocator_data) {
  127. panic("cyclic initialization of the scratch allocator with itself")
  128. }
  129. scratch_allocator_init(s, DEFAULT_BACKING_SIZE)
  130. }
  131. size := size
  132. switch mode {
  133. case .Alloc, .Alloc_Non_Zeroed:
  134. size = align_forward_int(size, alignment)
  135. switch {
  136. case s.curr_offset+size <= len(s.data):
  137. start := uintptr(raw_data(s.data))
  138. ptr := start + uintptr(s.curr_offset)
  139. ptr = align_forward_uintptr(ptr, uintptr(alignment))
  140. if mode != .Alloc_Non_Zeroed {
  141. zero(rawptr(ptr), size)
  142. }
  143. s.prev_allocation = rawptr(ptr)
  144. offset := int(ptr - start)
  145. s.curr_offset = offset + size
  146. return byte_slice(rawptr(ptr), size), nil
  147. case size <= len(s.data):
  148. start := uintptr(raw_data(s.data))
  149. ptr := align_forward_uintptr(start, uintptr(alignment))
  150. if mode != .Alloc_Non_Zeroed {
  151. zero(rawptr(ptr), size)
  152. }
  153. s.prev_allocation = rawptr(ptr)
  154. offset := int(ptr - start)
  155. s.curr_offset = offset + size
  156. return byte_slice(rawptr(ptr), size), nil
  157. }
  158. a := s.backup_allocator
  159. if a.procedure == nil {
  160. a = context.allocator
  161. s.backup_allocator = a
  162. }
  163. ptr, err := alloc_bytes(size, alignment, a, loc)
  164. if err != nil {
  165. return ptr, err
  166. }
  167. if s.leaked_allocations == nil {
  168. s.leaked_allocations, err = make([dynamic][]byte, a)
  169. }
  170. append(&s.leaked_allocations, ptr)
  171. if logger := context.logger; logger.lowest_level <= .Warning {
  172. if logger.procedure != nil {
  173. logger.procedure(logger.data, .Warning, "mem.Scratch_Allocator resorted to backup_allocator" , logger.options, loc)
  174. }
  175. }
  176. return ptr, err
  177. case .Free:
  178. if old_memory == nil {
  179. return nil, nil
  180. }
  181. start := uintptr(raw_data(s.data))
  182. end := start + uintptr(len(s.data))
  183. old_ptr := uintptr(old_memory)
  184. if s.prev_allocation == old_memory {
  185. s.curr_offset = int(uintptr(s.prev_allocation) - start)
  186. s.prev_allocation = nil
  187. return nil, nil
  188. }
  189. if start <= old_ptr && old_ptr < end {
  190. // NOTE(bill): Cannot free this pointer but it is valid
  191. return nil, nil
  192. }
  193. if len(s.leaked_allocations) != 0 {
  194. for data, i in s.leaked_allocations {
  195. ptr := raw_data(data)
  196. if ptr == old_memory {
  197. free_bytes(data, s.backup_allocator)
  198. ordered_remove(&s.leaked_allocations, i)
  199. return nil, nil
  200. }
  201. }
  202. }
  203. return nil, .Invalid_Pointer
  204. // panic("invalid pointer passed to default_temp_allocator");
  205. case .Free_All:
  206. s.curr_offset = 0
  207. s.prev_allocation = nil
  208. for ptr in s.leaked_allocations {
  209. free_bytes(ptr, s.backup_allocator)
  210. }
  211. clear(&s.leaked_allocations)
  212. case .Resize:
  213. begin := uintptr(raw_data(s.data))
  214. end := begin + uintptr(len(s.data))
  215. old_ptr := uintptr(old_memory)
  216. if begin <= old_ptr && old_ptr < end && old_ptr+uintptr(size) < end {
  217. s.curr_offset = int(old_ptr-begin)+size
  218. return byte_slice(old_memory, size), nil
  219. }
  220. data, err := scratch_allocator_proc(allocator_data, .Alloc, size, alignment, old_memory, old_size, loc)
  221. if err != nil {
  222. return data, err
  223. }
  224. runtime.copy(data, byte_slice(old_memory, old_size))
  225. _, err = scratch_allocator_proc(allocator_data, .Free, 0, alignment, old_memory, old_size, loc)
  226. return data, err
  227. case .Query_Features:
  228. set := (^Allocator_Mode_Set)(old_memory)
  229. if set != nil {
  230. set^ = {.Alloc, .Alloc_Non_Zeroed, .Free, .Free_All, .Resize, .Query_Features}
  231. }
  232. return nil, nil
  233. case .Query_Info:
  234. return nil, .Mode_Not_Implemented
  235. }
  236. return nil, nil
  237. }
  238. scratch_allocator :: proc(allocator: ^Scratch_Allocator) -> Allocator {
  239. return Allocator{
  240. procedure = scratch_allocator_proc,
  241. data = allocator,
  242. }
  243. }
  244. Stack_Allocation_Header :: struct {
  245. prev_offset: int,
  246. padding: int,
  247. }
  248. // Stack is a stack-like allocator which has a strict memory freeing order
  249. Stack :: struct {
  250. data: []byte,
  251. prev_offset: int,
  252. curr_offset: int,
  253. peak_used: int,
  254. }
  255. stack_init :: proc(s: ^Stack, data: []byte) {
  256. s.data = data
  257. s.prev_offset = 0
  258. s.curr_offset = 0
  259. s.peak_used = 0
  260. }
  261. @(deprecated="prefer 'mem.stack_init'")
  262. init_stack :: proc(s: ^Stack, data: []byte) {
  263. s.data = data
  264. s.prev_offset = 0
  265. s.curr_offset = 0
  266. s.peak_used = 0
  267. }
  268. stack_allocator :: proc(stack: ^Stack) -> Allocator {
  269. return Allocator{
  270. procedure = stack_allocator_proc,
  271. data = stack,
  272. }
  273. }
  274. stack_allocator_proc :: proc(allocator_data: rawptr, mode: Allocator_Mode,
  275. size, alignment: int,
  276. old_memory: rawptr, old_size: int, location := #caller_location) -> ([]byte, Allocator_Error) {
  277. s := cast(^Stack)allocator_data
  278. if s.data == nil {
  279. return nil, .Invalid_Argument
  280. }
  281. raw_alloc :: proc(s: ^Stack, size, alignment: int, zero_memory: bool) -> ([]byte, Allocator_Error) {
  282. curr_addr := uintptr(raw_data(s.data)) + uintptr(s.curr_offset)
  283. padding := calc_padding_with_header(curr_addr, uintptr(alignment), size_of(Stack_Allocation_Header))
  284. if s.curr_offset + padding + size > len(s.data) {
  285. return nil, .Out_Of_Memory
  286. }
  287. s.prev_offset = s.curr_offset
  288. s.curr_offset += padding
  289. next_addr := curr_addr + uintptr(padding)
  290. header := (^Stack_Allocation_Header)(next_addr - size_of(Stack_Allocation_Header))
  291. header.padding = padding
  292. header.prev_offset = s.prev_offset
  293. s.curr_offset += size
  294. s.peak_used = max(s.peak_used, s.curr_offset)
  295. if zero_memory {
  296. zero(rawptr(next_addr), size)
  297. }
  298. return byte_slice(rawptr(next_addr), size), nil
  299. }
  300. switch mode {
  301. case .Alloc, .Alloc_Non_Zeroed:
  302. return raw_alloc(s, size, alignment, mode == .Alloc)
  303. case .Free:
  304. if old_memory == nil {
  305. return nil, nil
  306. }
  307. start := uintptr(raw_data(s.data))
  308. end := start + uintptr(len(s.data))
  309. curr_addr := uintptr(old_memory)
  310. if !(start <= curr_addr && curr_addr < end) {
  311. panic("Out of bounds memory address passed to stack allocator (free)")
  312. }
  313. if curr_addr >= start+uintptr(s.curr_offset) {
  314. // NOTE(bill): Allow double frees
  315. return nil, nil
  316. }
  317. header := (^Stack_Allocation_Header)(curr_addr - size_of(Stack_Allocation_Header))
  318. old_offset := int(curr_addr - uintptr(header.padding) - uintptr(raw_data(s.data)))
  319. if old_offset != header.prev_offset {
  320. // panic("Out of order stack allocator free");
  321. return nil, .Invalid_Pointer
  322. }
  323. s.curr_offset = old_offset
  324. s.prev_offset = header.prev_offset
  325. case .Free_All:
  326. s.prev_offset = 0
  327. s.curr_offset = 0
  328. case .Resize:
  329. if old_memory == nil {
  330. return raw_alloc(s, size, alignment, true)
  331. }
  332. if size == 0 {
  333. return nil, nil
  334. }
  335. start := uintptr(raw_data(s.data))
  336. end := start + uintptr(len(s.data))
  337. curr_addr := uintptr(old_memory)
  338. if !(start <= curr_addr && curr_addr < end) {
  339. panic("Out of bounds memory address passed to stack allocator (resize)")
  340. }
  341. if curr_addr >= start+uintptr(s.curr_offset) {
  342. // NOTE(bill): Allow double frees
  343. return nil, nil
  344. }
  345. if old_size == size {
  346. return byte_slice(old_memory, size), nil
  347. }
  348. header := (^Stack_Allocation_Header)(curr_addr - size_of(Stack_Allocation_Header))
  349. old_offset := int(curr_addr - uintptr(header.padding) - uintptr(raw_data(s.data)))
  350. if old_offset != header.prev_offset {
  351. data, err := raw_alloc(s, size, alignment, true)
  352. if err == nil {
  353. runtime.copy(data, byte_slice(old_memory, old_size))
  354. }
  355. return data, err
  356. }
  357. old_memory_size := uintptr(s.curr_offset) - (curr_addr - start)
  358. assert(old_memory_size == uintptr(old_size))
  359. diff := size - old_size
  360. s.curr_offset += diff // works for smaller sizes too
  361. if diff > 0 {
  362. zero(rawptr(curr_addr + uintptr(diff)), diff)
  363. }
  364. return byte_slice(old_memory, size), nil
  365. case .Query_Features:
  366. set := (^Allocator_Mode_Set)(old_memory)
  367. if set != nil {
  368. set^ = {.Alloc, .Alloc_Non_Zeroed, .Free, .Free_All, .Resize, .Query_Features}
  369. }
  370. return nil, nil
  371. case .Query_Info:
  372. return nil, .Mode_Not_Implemented
  373. }
  374. return nil, nil
  375. }
  376. Small_Stack_Allocation_Header :: struct {
  377. padding: u8,
  378. }
  379. // Small_Stack is a stack-like allocator which uses the smallest possible header but at the cost of non-strict memory freeing order
  380. Small_Stack :: struct {
  381. data: []byte,
  382. offset: int,
  383. peak_used: int,
  384. }
  385. small_stack_init :: proc(s: ^Small_Stack, data: []byte) {
  386. s.data = data
  387. s.offset = 0
  388. s.peak_used = 0
  389. }
  390. @(deprecated="prefer 'small_stack_init'")
  391. init_small_stack :: proc(s: ^Small_Stack, data: []byte) {
  392. s.data = data
  393. s.offset = 0
  394. s.peak_used = 0
  395. }
  396. small_stack_allocator :: proc(stack: ^Small_Stack) -> Allocator {
  397. return Allocator{
  398. procedure = small_stack_allocator_proc,
  399. data = stack,
  400. }
  401. }
  402. small_stack_allocator_proc :: proc(allocator_data: rawptr, mode: Allocator_Mode,
  403. size, alignment: int,
  404. old_memory: rawptr, old_size: int, location := #caller_location) -> ([]byte, Allocator_Error) {
  405. s := cast(^Small_Stack)allocator_data
  406. if s.data == nil {
  407. return nil, .Invalid_Argument
  408. }
  409. align := clamp(alignment, 1, 8*size_of(Stack_Allocation_Header{}.padding)/2)
  410. raw_alloc :: proc(s: ^Small_Stack, size, alignment: int, zero_memory: bool) -> ([]byte, Allocator_Error) {
  411. curr_addr := uintptr(raw_data(s.data)) + uintptr(s.offset)
  412. padding := calc_padding_with_header(curr_addr, uintptr(alignment), size_of(Small_Stack_Allocation_Header))
  413. if s.offset + padding + size > len(s.data) {
  414. return nil, .Out_Of_Memory
  415. }
  416. s.offset += padding
  417. next_addr := curr_addr + uintptr(padding)
  418. header := (^Small_Stack_Allocation_Header)(next_addr - size_of(Small_Stack_Allocation_Header))
  419. header.padding = auto_cast padding
  420. s.offset += size
  421. s.peak_used = max(s.peak_used, s.offset)
  422. if zero_memory {
  423. zero(rawptr(next_addr), size)
  424. }
  425. return byte_slice(rawptr(next_addr), size), nil
  426. }
  427. switch mode {
  428. case .Alloc, .Alloc_Non_Zeroed:
  429. return raw_alloc(s, size, align, mode == .Alloc)
  430. case .Free:
  431. if old_memory == nil {
  432. return nil, nil
  433. }
  434. start := uintptr(raw_data(s.data))
  435. end := start + uintptr(len(s.data))
  436. curr_addr := uintptr(old_memory)
  437. if !(start <= curr_addr && curr_addr < end) {
  438. // panic("Out of bounds memory address passed to stack allocator (free)");
  439. return nil, .Invalid_Pointer
  440. }
  441. if curr_addr >= start+uintptr(s.offset) {
  442. // NOTE(bill): Allow double frees
  443. return nil, nil
  444. }
  445. header := (^Small_Stack_Allocation_Header)(curr_addr - size_of(Small_Stack_Allocation_Header))
  446. old_offset := int(curr_addr - uintptr(header.padding) - uintptr(raw_data(s.data)))
  447. s.offset = old_offset
  448. case .Free_All:
  449. s.offset = 0
  450. case .Resize:
  451. if old_memory == nil {
  452. return raw_alloc(s, size, align, true)
  453. }
  454. if size == 0 {
  455. return nil, nil
  456. }
  457. start := uintptr(raw_data(s.data))
  458. end := start + uintptr(len(s.data))
  459. curr_addr := uintptr(old_memory)
  460. if !(start <= curr_addr && curr_addr < end) {
  461. // panic("Out of bounds memory address passed to stack allocator (resize)");
  462. return nil, .Invalid_Pointer
  463. }
  464. if curr_addr >= start+uintptr(s.offset) {
  465. // NOTE(bill): Treat as a double free
  466. return nil, nil
  467. }
  468. if old_size == size {
  469. return byte_slice(old_memory, size), nil
  470. }
  471. data, err := raw_alloc(s, size, align, true)
  472. if err == nil {
  473. runtime.copy(data, byte_slice(old_memory, old_size))
  474. }
  475. return data, err
  476. case .Query_Features:
  477. set := (^Allocator_Mode_Set)(old_memory)
  478. if set != nil {
  479. set^ = {.Alloc, .Alloc_Non_Zeroed, .Free, .Free_All, .Resize, .Query_Features}
  480. }
  481. return nil, nil
  482. case .Query_Info:
  483. return nil, .Mode_Not_Implemented
  484. }
  485. return nil, nil
  486. }
  487. Dynamic_Pool :: struct {
  488. block_size: int,
  489. out_band_size: int,
  490. alignment: int,
  491. unused_blocks: [dynamic]rawptr,
  492. used_blocks: [dynamic]rawptr,
  493. out_band_allocations: [dynamic]rawptr,
  494. current_block: rawptr,
  495. current_pos: rawptr,
  496. bytes_left: int,
  497. block_allocator: Allocator,
  498. }
  499. DYNAMIC_POOL_BLOCK_SIZE_DEFAULT :: 65536
  500. DYNAMIC_POOL_OUT_OF_BAND_SIZE_DEFAULT :: 6554
  501. dynamic_pool_allocator_proc :: proc(allocator_data: rawptr, mode: Allocator_Mode,
  502. size, alignment: int,
  503. old_memory: rawptr, old_size: int, loc := #caller_location) -> ([]byte, Allocator_Error) {
  504. pool := (^Dynamic_Pool)(allocator_data)
  505. switch mode {
  506. case .Alloc, .Alloc_Non_Zeroed:
  507. return dynamic_pool_alloc_bytes(pool, size)
  508. case .Free:
  509. return nil, .Mode_Not_Implemented
  510. case .Free_All:
  511. dynamic_pool_free_all(pool)
  512. return nil, nil
  513. case .Resize:
  514. if old_size >= size {
  515. return byte_slice(old_memory, size), nil
  516. }
  517. data, err := dynamic_pool_alloc_bytes(pool, size)
  518. if err == nil {
  519. runtime.copy(data, byte_slice(old_memory, old_size))
  520. }
  521. return data, err
  522. case .Query_Features:
  523. set := (^Allocator_Mode_Set)(old_memory)
  524. if set != nil {
  525. set^ = {.Alloc, .Alloc_Non_Zeroed, .Free_All, .Resize, .Query_Features, .Query_Info}
  526. }
  527. return nil, nil
  528. case .Query_Info:
  529. info := (^Allocator_Query_Info)(old_memory)
  530. if info != nil && info.pointer != nil {
  531. info.size = pool.block_size
  532. info.alignment = pool.alignment
  533. return byte_slice(info, size_of(info^)), nil
  534. }
  535. return nil, nil
  536. }
  537. return nil, nil
  538. }
  539. dynamic_pool_allocator :: proc(pool: ^Dynamic_Pool) -> Allocator {
  540. return Allocator{
  541. procedure = dynamic_pool_allocator_proc,
  542. data = pool,
  543. }
  544. }
  545. dynamic_pool_init :: proc(pool: ^Dynamic_Pool,
  546. block_allocator := context.allocator,
  547. array_allocator := context.allocator,
  548. block_size := DYNAMIC_POOL_BLOCK_SIZE_DEFAULT,
  549. out_band_size := DYNAMIC_POOL_OUT_OF_BAND_SIZE_DEFAULT,
  550. alignment := 8) {
  551. pool.block_size = block_size
  552. pool.out_band_size = out_band_size
  553. pool.alignment = alignment
  554. pool.block_allocator = block_allocator
  555. pool.out_band_allocations.allocator = array_allocator
  556. pool. unused_blocks.allocator = array_allocator
  557. pool. used_blocks.allocator = array_allocator
  558. }
  559. dynamic_pool_destroy :: proc(using pool: ^Dynamic_Pool) {
  560. dynamic_pool_free_all(pool)
  561. delete(unused_blocks)
  562. delete(used_blocks)
  563. delete(out_band_allocations)
  564. zero(pool, size_of(pool^))
  565. }
  566. dynamic_pool_alloc :: proc(pool: ^Dynamic_Pool, bytes: int) -> rawptr {
  567. data, err := dynamic_pool_alloc_bytes(pool, bytes)
  568. assert(err == nil)
  569. return raw_data(data)
  570. }
  571. dynamic_pool_alloc_bytes :: proc(using pool: ^Dynamic_Pool, bytes: int) -> ([]byte, Allocator_Error) {
  572. cycle_new_block :: proc(using pool: ^Dynamic_Pool) -> (err: Allocator_Error) {
  573. if block_allocator.procedure == nil {
  574. panic("You must call pool_init on a Pool before using it")
  575. }
  576. if current_block != nil {
  577. append(&used_blocks, current_block)
  578. }
  579. new_block: rawptr
  580. if len(unused_blocks) > 0 {
  581. new_block = pop(&unused_blocks)
  582. } else {
  583. data: []byte
  584. data, err = block_allocator.procedure(block_allocator.data, Allocator_Mode.Alloc,
  585. block_size, alignment,
  586. nil, 0)
  587. new_block = raw_data(data)
  588. }
  589. bytes_left = block_size
  590. current_pos = new_block
  591. current_block = new_block
  592. return
  593. }
  594. n := bytes
  595. extra := alignment - (n % alignment)
  596. n += extra
  597. if n >= out_band_size {
  598. assert(block_allocator.procedure != nil)
  599. memory, err := block_allocator.procedure(block_allocator.data, Allocator_Mode.Alloc,
  600. block_size, alignment,
  601. nil, 0)
  602. if memory != nil {
  603. append(&out_band_allocations, raw_data(memory))
  604. }
  605. return memory, err
  606. }
  607. if bytes_left < n {
  608. err := cycle_new_block(pool)
  609. if err != nil {
  610. return nil, err
  611. }
  612. if current_block == nil {
  613. return nil, .Out_Of_Memory
  614. }
  615. }
  616. memory := current_pos
  617. current_pos = ptr_offset((^byte)(current_pos), n)
  618. bytes_left -= n
  619. return byte_slice(memory, bytes), nil
  620. }
  621. dynamic_pool_reset :: proc(using pool: ^Dynamic_Pool) {
  622. if current_block != nil {
  623. append(&unused_blocks, current_block)
  624. current_block = nil
  625. }
  626. for block in used_blocks {
  627. append(&unused_blocks, block)
  628. }
  629. clear(&used_blocks)
  630. for a in out_band_allocations {
  631. free(a, block_allocator)
  632. }
  633. clear(&out_band_allocations)
  634. bytes_left = 0 // Make new allocations call `cycle_new_block` again.
  635. }
  636. dynamic_pool_free_all :: proc(using pool: ^Dynamic_Pool) {
  637. dynamic_pool_reset(pool)
  638. for block in unused_blocks {
  639. free(block, block_allocator)
  640. }
  641. clear(&unused_blocks)
  642. }
  643. panic_allocator_proc :: proc(allocator_data: rawptr, mode: Allocator_Mode,
  644. size, alignment: int,
  645. old_memory: rawptr, old_size: int,loc := #caller_location) -> ([]byte, Allocator_Error) {
  646. switch mode {
  647. case .Alloc:
  648. if size > 0 {
  649. panic("mem: panic allocator, .Alloc called")
  650. }
  651. case .Alloc_Non_Zeroed:
  652. if size > 0 {
  653. panic("mem: panic allocator, .Alloc_Non_Zeroed called")
  654. }
  655. case .Resize:
  656. if size > 0 {
  657. panic("mem: panic allocator, .Resize called")
  658. }
  659. case .Free:
  660. if old_memory != nil {
  661. panic("mem: panic allocator, .Free called")
  662. }
  663. case .Free_All:
  664. panic("mem: panic allocator, .Free_All called")
  665. case .Query_Features:
  666. set := (^Allocator_Mode_Set)(old_memory)
  667. if set != nil {
  668. set^ = {.Query_Features}
  669. }
  670. return nil, nil
  671. case .Query_Info:
  672. panic("mem: panic allocator, .Query_Info called")
  673. }
  674. return nil, nil
  675. }
  676. panic_allocator :: proc() -> Allocator {
  677. return Allocator{
  678. procedure = panic_allocator_proc,
  679. data = nil,
  680. }
  681. }
  682. Tracking_Allocator_Entry :: struct {
  683. memory: rawptr,
  684. size: int,
  685. alignment: int,
  686. mode: Allocator_Mode,
  687. err: Allocator_Error,
  688. location: runtime.Source_Code_Location,
  689. }
  690. Tracking_Allocator_Bad_Free_Entry :: struct {
  691. memory: rawptr,
  692. location: runtime.Source_Code_Location,
  693. }
  694. Tracking_Allocator :: struct {
  695. backing: Allocator,
  696. allocation_map: map[rawptr]Tracking_Allocator_Entry,
  697. bad_free_array: [dynamic]Tracking_Allocator_Bad_Free_Entry,
  698. clear_on_free_all: bool,
  699. }
  700. tracking_allocator_init :: proc(t: ^Tracking_Allocator, backing_allocator: Allocator, internals_allocator := context.allocator) {
  701. t.backing = backing_allocator
  702. t.allocation_map.allocator = internals_allocator
  703. t.bad_free_array.allocator = internals_allocator
  704. if .Free_All in query_features(t.backing) {
  705. t.clear_on_free_all = true
  706. }
  707. }
  708. tracking_allocator_destroy :: proc(t: ^Tracking_Allocator) {
  709. delete(t.allocation_map)
  710. delete(t.bad_free_array)
  711. }
  712. tracking_allocator_clear :: proc(t: ^Tracking_Allocator) {
  713. clear(&t.allocation_map)
  714. clear(&t.bad_free_array)
  715. }
  716. tracking_allocator :: proc(data: ^Tracking_Allocator) -> Allocator {
  717. return Allocator{
  718. data = data,
  719. procedure = tracking_allocator_proc,
  720. }
  721. }
  722. tracking_allocator_proc :: proc(allocator_data: rawptr, mode: Allocator_Mode,
  723. size, alignment: int,
  724. old_memory: rawptr, old_size: int, loc := #caller_location) -> (result: []byte, err: Allocator_Error) {
  725. data := (^Tracking_Allocator)(allocator_data)
  726. if mode == .Query_Info {
  727. info := (^Allocator_Query_Info)(old_memory)
  728. if info != nil && info.pointer != nil {
  729. if entry, ok := data.allocation_map[info.pointer]; ok {
  730. info.size = entry.size
  731. info.alignment = entry.alignment
  732. }
  733. info.pointer = nil
  734. }
  735. return
  736. }
  737. if mode == .Free && old_memory != nil && old_memory not_in data.allocation_map {
  738. append(&data.bad_free_array, Tracking_Allocator_Bad_Free_Entry{
  739. memory = old_memory,
  740. location = loc,
  741. })
  742. } else {
  743. result = data.backing.procedure(data.backing.data, mode, size, alignment, old_memory, old_size, loc) or_return
  744. }
  745. result_ptr := raw_data(result)
  746. if data.allocation_map.allocator.procedure == nil {
  747. data.allocation_map.allocator = context.allocator
  748. }
  749. switch mode {
  750. case .Alloc, .Alloc_Non_Zeroed:
  751. data.allocation_map[result_ptr] = Tracking_Allocator_Entry{
  752. memory = result_ptr,
  753. size = size,
  754. mode = mode,
  755. alignment = alignment,
  756. err = err,
  757. location = loc,
  758. }
  759. case .Free:
  760. delete_key(&data.allocation_map, old_memory)
  761. case .Free_All:
  762. if data.clear_on_free_all {
  763. clear_map(&data.allocation_map)
  764. }
  765. case .Resize:
  766. if old_memory != result_ptr {
  767. delete_key(&data.allocation_map, old_memory)
  768. }
  769. data.allocation_map[result_ptr] = Tracking_Allocator_Entry{
  770. memory = result_ptr,
  771. size = size,
  772. mode = mode,
  773. alignment = alignment,
  774. err = err,
  775. location = loc,
  776. }
  777. case .Query_Features:
  778. set := (^Allocator_Mode_Set)(old_memory)
  779. if set != nil {
  780. set^ = {.Alloc, .Alloc_Non_Zeroed, .Free, .Free_All, .Resize, .Query_Features, .Query_Info}
  781. }
  782. return nil, nil
  783. case .Query_Info:
  784. unreachable()
  785. }
  786. return
  787. }