allocators.odin 24 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987
  1. package mem
  2. import "core:intrinsics"
  3. import "core:runtime"
  4. import "core:sync"
  5. nil_allocator_proc :: proc(allocator_data: rawptr, mode: Allocator_Mode,
  6. size, alignment: int,
  7. old_memory: rawptr, old_size: int, loc := #caller_location) -> ([]byte, Allocator_Error) {
  8. return nil, nil
  9. }
  10. nil_allocator :: proc() -> Allocator {
  11. return Allocator{
  12. procedure = nil_allocator_proc,
  13. data = nil,
  14. }
  15. }
  16. // Custom allocators
  17. Arena :: struct {
  18. data: []byte,
  19. offset: int,
  20. peak_used: int,
  21. temp_count: int,
  22. }
  23. Arena_Temp_Memory :: struct {
  24. arena: ^Arena,
  25. prev_offset: int,
  26. }
  27. arena_init :: proc(a: ^Arena, data: []byte) {
  28. a.data = data
  29. a.offset = 0
  30. a.peak_used = 0
  31. a.temp_count = 0
  32. }
  33. @(deprecated="prefer 'mem.arena_init'")
  34. init_arena :: proc(a: ^Arena, data: []byte) {
  35. a.data = data
  36. a.offset = 0
  37. a.peak_used = 0
  38. a.temp_count = 0
  39. }
  40. @(require_results)
  41. arena_allocator :: proc(arena: ^Arena) -> Allocator {
  42. return Allocator{
  43. procedure = arena_allocator_proc,
  44. data = arena,
  45. }
  46. }
  47. arena_allocator_proc :: proc(allocator_data: rawptr, mode: Allocator_Mode,
  48. size, alignment: int,
  49. old_memory: rawptr, old_size: int, location := #caller_location) -> ([]byte, Allocator_Error) {
  50. arena := cast(^Arena)allocator_data
  51. switch mode {
  52. case .Alloc, .Alloc_Non_Zeroed:
  53. #no_bounds_check end := &arena.data[arena.offset]
  54. ptr := align_forward(end, uintptr(alignment))
  55. total_size := size + ptr_sub((^byte)(ptr), (^byte)(end))
  56. if arena.offset + total_size > len(arena.data) {
  57. return nil, .Out_Of_Memory
  58. }
  59. arena.offset += total_size
  60. arena.peak_used = max(arena.peak_used, arena.offset)
  61. if mode != .Alloc_Non_Zeroed {
  62. zero(ptr, size)
  63. }
  64. return byte_slice(ptr, size), nil
  65. case .Free:
  66. return nil, .Mode_Not_Implemented
  67. case .Free_All:
  68. arena.offset = 0
  69. case .Resize:
  70. return default_resize_bytes_align(byte_slice(old_memory, old_size), size, alignment, arena_allocator(arena))
  71. case .Query_Features:
  72. set := (^Allocator_Mode_Set)(old_memory)
  73. if set != nil {
  74. set^ = {.Alloc, .Alloc_Non_Zeroed, .Free_All, .Resize, .Query_Features}
  75. }
  76. return nil, nil
  77. case .Query_Info:
  78. return nil, .Mode_Not_Implemented
  79. }
  80. return nil, nil
  81. }
  82. @(require_results)
  83. begin_arena_temp_memory :: proc(a: ^Arena) -> Arena_Temp_Memory {
  84. tmp: Arena_Temp_Memory
  85. tmp.arena = a
  86. tmp.prev_offset = a.offset
  87. a.temp_count += 1
  88. return tmp
  89. }
  90. end_arena_temp_memory :: proc(tmp: Arena_Temp_Memory) {
  91. assert(tmp.arena.offset >= tmp.prev_offset)
  92. assert(tmp.arena.temp_count > 0)
  93. tmp.arena.offset = tmp.prev_offset
  94. tmp.arena.temp_count -= 1
  95. }
  96. Scratch_Allocator :: struct {
  97. data: []byte,
  98. curr_offset: int,
  99. prev_allocation: rawptr,
  100. backup_allocator: Allocator,
  101. leaked_allocations: [dynamic][]byte,
  102. }
  103. scratch_allocator_init :: proc(s: ^Scratch_Allocator, size: int, backup_allocator := context.allocator) -> Allocator_Error {
  104. s.data = make_aligned([]byte, size, 2*align_of(rawptr), backup_allocator) or_return
  105. s.curr_offset = 0
  106. s.prev_allocation = nil
  107. s.backup_allocator = backup_allocator
  108. s.leaked_allocations.allocator = backup_allocator
  109. return nil
  110. }
  111. scratch_allocator_destroy :: proc(s: ^Scratch_Allocator) {
  112. if s == nil {
  113. return
  114. }
  115. for ptr in s.leaked_allocations {
  116. free_bytes(ptr, s.backup_allocator)
  117. }
  118. delete(s.leaked_allocations)
  119. delete(s.data, s.backup_allocator)
  120. s^ = {}
  121. }
  122. scratch_allocator_proc :: proc(allocator_data: rawptr, mode: Allocator_Mode,
  123. size, alignment: int,
  124. old_memory: rawptr, old_size: int, loc := #caller_location) -> ([]byte, Allocator_Error) {
  125. s := (^Scratch_Allocator)(allocator_data)
  126. if s.data == nil {
  127. DEFAULT_BACKING_SIZE :: 4 * Megabyte
  128. if !(context.allocator.procedure != scratch_allocator_proc &&
  129. context.allocator.data != allocator_data) {
  130. panic("cyclic initialization of the scratch allocator with itself")
  131. }
  132. scratch_allocator_init(s, DEFAULT_BACKING_SIZE)
  133. }
  134. size := size
  135. switch mode {
  136. case .Alloc, .Alloc_Non_Zeroed:
  137. size = align_forward_int(size, alignment)
  138. switch {
  139. case s.curr_offset+size <= len(s.data):
  140. start := uintptr(raw_data(s.data))
  141. ptr := start + uintptr(s.curr_offset)
  142. ptr = align_forward_uintptr(ptr, uintptr(alignment))
  143. if mode != .Alloc_Non_Zeroed {
  144. zero(rawptr(ptr), size)
  145. }
  146. s.prev_allocation = rawptr(ptr)
  147. offset := int(ptr - start)
  148. s.curr_offset = offset + size
  149. return byte_slice(rawptr(ptr), size), nil
  150. case size <= len(s.data):
  151. start := uintptr(raw_data(s.data))
  152. ptr := align_forward_uintptr(start, uintptr(alignment))
  153. if mode != .Alloc_Non_Zeroed {
  154. zero(rawptr(ptr), size)
  155. }
  156. s.prev_allocation = rawptr(ptr)
  157. offset := int(ptr - start)
  158. s.curr_offset = offset + size
  159. return byte_slice(rawptr(ptr), size), nil
  160. }
  161. a := s.backup_allocator
  162. if a.procedure == nil {
  163. a = context.allocator
  164. s.backup_allocator = a
  165. }
  166. ptr, err := alloc_bytes(size, alignment, a, loc)
  167. if err != nil {
  168. return ptr, err
  169. }
  170. if s.leaked_allocations == nil {
  171. s.leaked_allocations, err = make([dynamic][]byte, a)
  172. }
  173. append(&s.leaked_allocations, ptr)
  174. if logger := context.logger; logger.lowest_level <= .Warning {
  175. if logger.procedure != nil {
  176. logger.procedure(logger.data, .Warning, "mem.Scratch_Allocator resorted to backup_allocator" , logger.options, loc)
  177. }
  178. }
  179. return ptr, err
  180. case .Free:
  181. if old_memory == nil {
  182. return nil, nil
  183. }
  184. start := uintptr(raw_data(s.data))
  185. end := start + uintptr(len(s.data))
  186. old_ptr := uintptr(old_memory)
  187. if s.prev_allocation == old_memory {
  188. s.curr_offset = int(uintptr(s.prev_allocation) - start)
  189. s.prev_allocation = nil
  190. return nil, nil
  191. }
  192. if start <= old_ptr && old_ptr < end {
  193. // NOTE(bill): Cannot free this pointer but it is valid
  194. return nil, nil
  195. }
  196. if len(s.leaked_allocations) != 0 {
  197. for data, i in s.leaked_allocations {
  198. ptr := raw_data(data)
  199. if ptr == old_memory {
  200. free_bytes(data, s.backup_allocator)
  201. ordered_remove(&s.leaked_allocations, i)
  202. return nil, nil
  203. }
  204. }
  205. }
  206. return nil, .Invalid_Pointer
  207. // panic("invalid pointer passed to default_temp_allocator");
  208. case .Free_All:
  209. s.curr_offset = 0
  210. s.prev_allocation = nil
  211. for ptr in s.leaked_allocations {
  212. free_bytes(ptr, s.backup_allocator)
  213. }
  214. clear(&s.leaked_allocations)
  215. case .Resize:
  216. begin := uintptr(raw_data(s.data))
  217. end := begin + uintptr(len(s.data))
  218. old_ptr := uintptr(old_memory)
  219. if begin <= old_ptr && old_ptr < end && old_ptr+uintptr(size) < end {
  220. s.curr_offset = int(old_ptr-begin)+size
  221. return byte_slice(old_memory, size), nil
  222. }
  223. data, err := scratch_allocator_proc(allocator_data, .Alloc, size, alignment, old_memory, old_size, loc)
  224. if err != nil {
  225. return data, err
  226. }
  227. runtime.copy(data, byte_slice(old_memory, old_size))
  228. _, err = scratch_allocator_proc(allocator_data, .Free, 0, alignment, old_memory, old_size, loc)
  229. return data, err
  230. case .Query_Features:
  231. set := (^Allocator_Mode_Set)(old_memory)
  232. if set != nil {
  233. set^ = {.Alloc, .Alloc_Non_Zeroed, .Free, .Free_All, .Resize, .Query_Features}
  234. }
  235. return nil, nil
  236. case .Query_Info:
  237. return nil, .Mode_Not_Implemented
  238. }
  239. return nil, nil
  240. }
  241. @(require_results)
  242. scratch_allocator :: proc(allocator: ^Scratch_Allocator) -> Allocator {
  243. return Allocator{
  244. procedure = scratch_allocator_proc,
  245. data = allocator,
  246. }
  247. }
  248. Stack_Allocation_Header :: struct {
  249. prev_offset: int,
  250. padding: int,
  251. }
  252. // Stack is a stack-like allocator which has a strict memory freeing order
  253. Stack :: struct {
  254. data: []byte,
  255. prev_offset: int,
  256. curr_offset: int,
  257. peak_used: int,
  258. }
  259. stack_init :: proc(s: ^Stack, data: []byte) {
  260. s.data = data
  261. s.prev_offset = 0
  262. s.curr_offset = 0
  263. s.peak_used = 0
  264. }
  265. @(deprecated="prefer 'mem.stack_init'")
  266. init_stack :: proc(s: ^Stack, data: []byte) {
  267. s.data = data
  268. s.prev_offset = 0
  269. s.curr_offset = 0
  270. s.peak_used = 0
  271. }
  272. @(require_results)
  273. stack_allocator :: proc(stack: ^Stack) -> Allocator {
  274. return Allocator{
  275. procedure = stack_allocator_proc,
  276. data = stack,
  277. }
  278. }
  279. stack_allocator_proc :: proc(allocator_data: rawptr, mode: Allocator_Mode,
  280. size, alignment: int,
  281. old_memory: rawptr, old_size: int, location := #caller_location) -> ([]byte, Allocator_Error) {
  282. s := cast(^Stack)allocator_data
  283. if s.data == nil {
  284. return nil, .Invalid_Argument
  285. }
  286. raw_alloc :: proc(s: ^Stack, size, alignment: int, zero_memory: bool) -> ([]byte, Allocator_Error) {
  287. curr_addr := uintptr(raw_data(s.data)) + uintptr(s.curr_offset)
  288. padding := calc_padding_with_header(curr_addr, uintptr(alignment), size_of(Stack_Allocation_Header))
  289. if s.curr_offset + padding + size > len(s.data) {
  290. return nil, .Out_Of_Memory
  291. }
  292. s.prev_offset = s.curr_offset
  293. s.curr_offset += padding
  294. next_addr := curr_addr + uintptr(padding)
  295. header := (^Stack_Allocation_Header)(next_addr - size_of(Stack_Allocation_Header))
  296. header.padding = padding
  297. header.prev_offset = s.prev_offset
  298. s.curr_offset += size
  299. s.peak_used = max(s.peak_used, s.curr_offset)
  300. if zero_memory {
  301. zero(rawptr(next_addr), size)
  302. }
  303. return byte_slice(rawptr(next_addr), size), nil
  304. }
  305. switch mode {
  306. case .Alloc, .Alloc_Non_Zeroed:
  307. return raw_alloc(s, size, alignment, mode == .Alloc)
  308. case .Free:
  309. if old_memory == nil {
  310. return nil, nil
  311. }
  312. start := uintptr(raw_data(s.data))
  313. end := start + uintptr(len(s.data))
  314. curr_addr := uintptr(old_memory)
  315. if !(start <= curr_addr && curr_addr < end) {
  316. panic("Out of bounds memory address passed to stack allocator (free)")
  317. }
  318. if curr_addr >= start+uintptr(s.curr_offset) {
  319. // NOTE(bill): Allow double frees
  320. return nil, nil
  321. }
  322. header := (^Stack_Allocation_Header)(curr_addr - size_of(Stack_Allocation_Header))
  323. old_offset := int(curr_addr - uintptr(header.padding) - uintptr(raw_data(s.data)))
  324. if old_offset != header.prev_offset {
  325. // panic("Out of order stack allocator free");
  326. return nil, .Invalid_Pointer
  327. }
  328. s.curr_offset = old_offset
  329. s.prev_offset = header.prev_offset
  330. case .Free_All:
  331. s.prev_offset = 0
  332. s.curr_offset = 0
  333. case .Resize:
  334. if old_memory == nil {
  335. return raw_alloc(s, size, alignment, true)
  336. }
  337. if size == 0 {
  338. return nil, nil
  339. }
  340. start := uintptr(raw_data(s.data))
  341. end := start + uintptr(len(s.data))
  342. curr_addr := uintptr(old_memory)
  343. if !(start <= curr_addr && curr_addr < end) {
  344. panic("Out of bounds memory address passed to stack allocator (resize)")
  345. }
  346. if curr_addr >= start+uintptr(s.curr_offset) {
  347. // NOTE(bill): Allow double frees
  348. return nil, nil
  349. }
  350. if old_size == size {
  351. return byte_slice(old_memory, size), nil
  352. }
  353. header := (^Stack_Allocation_Header)(curr_addr - size_of(Stack_Allocation_Header))
  354. old_offset := int(curr_addr - uintptr(header.padding) - uintptr(raw_data(s.data)))
  355. if old_offset != header.prev_offset {
  356. data, err := raw_alloc(s, size, alignment, true)
  357. if err == nil {
  358. runtime.copy(data, byte_slice(old_memory, old_size))
  359. }
  360. return data, err
  361. }
  362. old_memory_size := uintptr(s.curr_offset) - (curr_addr - start)
  363. assert(old_memory_size == uintptr(old_size))
  364. diff := size - old_size
  365. s.curr_offset += diff // works for smaller sizes too
  366. if diff > 0 {
  367. zero(rawptr(curr_addr + uintptr(diff)), diff)
  368. }
  369. return byte_slice(old_memory, size), nil
  370. case .Query_Features:
  371. set := (^Allocator_Mode_Set)(old_memory)
  372. if set != nil {
  373. set^ = {.Alloc, .Alloc_Non_Zeroed, .Free, .Free_All, .Resize, .Query_Features}
  374. }
  375. return nil, nil
  376. case .Query_Info:
  377. return nil, .Mode_Not_Implemented
  378. }
  379. return nil, nil
  380. }
  381. Small_Stack_Allocation_Header :: struct {
  382. padding: u8,
  383. }
  384. // Small_Stack is a stack-like allocator which uses the smallest possible header but at the cost of non-strict memory freeing order
  385. Small_Stack :: struct {
  386. data: []byte,
  387. offset: int,
  388. peak_used: int,
  389. }
  390. small_stack_init :: proc(s: ^Small_Stack, data: []byte) {
  391. s.data = data
  392. s.offset = 0
  393. s.peak_used = 0
  394. }
  395. @(deprecated="prefer 'small_stack_init'")
  396. init_small_stack :: proc(s: ^Small_Stack, data: []byte) {
  397. s.data = data
  398. s.offset = 0
  399. s.peak_used = 0
  400. }
  401. @(require_results)
  402. small_stack_allocator :: proc(stack: ^Small_Stack) -> Allocator {
  403. return Allocator{
  404. procedure = small_stack_allocator_proc,
  405. data = stack,
  406. }
  407. }
  408. small_stack_allocator_proc :: proc(allocator_data: rawptr, mode: Allocator_Mode,
  409. size, alignment: int,
  410. old_memory: rawptr, old_size: int, location := #caller_location) -> ([]byte, Allocator_Error) {
  411. s := cast(^Small_Stack)allocator_data
  412. if s.data == nil {
  413. return nil, .Invalid_Argument
  414. }
  415. align := clamp(alignment, 1, 8*size_of(Stack_Allocation_Header{}.padding)/2)
  416. raw_alloc :: proc(s: ^Small_Stack, size, alignment: int, zero_memory: bool) -> ([]byte, Allocator_Error) {
  417. curr_addr := uintptr(raw_data(s.data)) + uintptr(s.offset)
  418. padding := calc_padding_with_header(curr_addr, uintptr(alignment), size_of(Small_Stack_Allocation_Header))
  419. if s.offset + padding + size > len(s.data) {
  420. return nil, .Out_Of_Memory
  421. }
  422. s.offset += padding
  423. next_addr := curr_addr + uintptr(padding)
  424. header := (^Small_Stack_Allocation_Header)(next_addr - size_of(Small_Stack_Allocation_Header))
  425. header.padding = auto_cast padding
  426. s.offset += size
  427. s.peak_used = max(s.peak_used, s.offset)
  428. if zero_memory {
  429. zero(rawptr(next_addr), size)
  430. }
  431. return byte_slice(rawptr(next_addr), size), nil
  432. }
  433. switch mode {
  434. case .Alloc, .Alloc_Non_Zeroed:
  435. return raw_alloc(s, size, align, mode == .Alloc)
  436. case .Free:
  437. if old_memory == nil {
  438. return nil, nil
  439. }
  440. start := uintptr(raw_data(s.data))
  441. end := start + uintptr(len(s.data))
  442. curr_addr := uintptr(old_memory)
  443. if !(start <= curr_addr && curr_addr < end) {
  444. // panic("Out of bounds memory address passed to stack allocator (free)");
  445. return nil, .Invalid_Pointer
  446. }
  447. if curr_addr >= start+uintptr(s.offset) {
  448. // NOTE(bill): Allow double frees
  449. return nil, nil
  450. }
  451. header := (^Small_Stack_Allocation_Header)(curr_addr - size_of(Small_Stack_Allocation_Header))
  452. old_offset := int(curr_addr - uintptr(header.padding) - uintptr(raw_data(s.data)))
  453. s.offset = old_offset
  454. case .Free_All:
  455. s.offset = 0
  456. case .Resize:
  457. if old_memory == nil {
  458. return raw_alloc(s, size, align, true)
  459. }
  460. if size == 0 {
  461. return nil, nil
  462. }
  463. start := uintptr(raw_data(s.data))
  464. end := start + uintptr(len(s.data))
  465. curr_addr := uintptr(old_memory)
  466. if !(start <= curr_addr && curr_addr < end) {
  467. // panic("Out of bounds memory address passed to stack allocator (resize)");
  468. return nil, .Invalid_Pointer
  469. }
  470. if curr_addr >= start+uintptr(s.offset) {
  471. // NOTE(bill): Treat as a double free
  472. return nil, nil
  473. }
  474. if old_size == size {
  475. return byte_slice(old_memory, size), nil
  476. }
  477. data, err := raw_alloc(s, size, align, true)
  478. if err == nil {
  479. runtime.copy(data, byte_slice(old_memory, old_size))
  480. }
  481. return data, err
  482. case .Query_Features:
  483. set := (^Allocator_Mode_Set)(old_memory)
  484. if set != nil {
  485. set^ = {.Alloc, .Alloc_Non_Zeroed, .Free, .Free_All, .Resize, .Query_Features}
  486. }
  487. return nil, nil
  488. case .Query_Info:
  489. return nil, .Mode_Not_Implemented
  490. }
  491. return nil, nil
  492. }
  493. Dynamic_Pool :: struct {
  494. block_size: int,
  495. out_band_size: int,
  496. alignment: int,
  497. unused_blocks: [dynamic]rawptr,
  498. used_blocks: [dynamic]rawptr,
  499. out_band_allocations: [dynamic]rawptr,
  500. current_block: rawptr,
  501. current_pos: rawptr,
  502. bytes_left: int,
  503. block_allocator: Allocator,
  504. }
  505. DYNAMIC_POOL_BLOCK_SIZE_DEFAULT :: 65536
  506. DYNAMIC_POOL_OUT_OF_BAND_SIZE_DEFAULT :: 6554
  507. dynamic_pool_allocator_proc :: proc(allocator_data: rawptr, mode: Allocator_Mode,
  508. size, alignment: int,
  509. old_memory: rawptr, old_size: int, loc := #caller_location) -> ([]byte, Allocator_Error) {
  510. pool := (^Dynamic_Pool)(allocator_data)
  511. switch mode {
  512. case .Alloc, .Alloc_Non_Zeroed:
  513. return dynamic_pool_alloc_bytes(pool, size)
  514. case .Free:
  515. return nil, .Mode_Not_Implemented
  516. case .Free_All:
  517. dynamic_pool_free_all(pool)
  518. return nil, nil
  519. case .Resize:
  520. if old_size >= size {
  521. return byte_slice(old_memory, size), nil
  522. }
  523. data, err := dynamic_pool_alloc_bytes(pool, size)
  524. if err == nil {
  525. runtime.copy(data, byte_slice(old_memory, old_size))
  526. }
  527. return data, err
  528. case .Query_Features:
  529. set := (^Allocator_Mode_Set)(old_memory)
  530. if set != nil {
  531. set^ = {.Alloc, .Alloc_Non_Zeroed, .Free_All, .Resize, .Query_Features, .Query_Info}
  532. }
  533. return nil, nil
  534. case .Query_Info:
  535. info := (^Allocator_Query_Info)(old_memory)
  536. if info != nil && info.pointer != nil {
  537. info.size = pool.block_size
  538. info.alignment = pool.alignment
  539. return byte_slice(info, size_of(info^)), nil
  540. }
  541. return nil, nil
  542. }
  543. return nil, nil
  544. }
  545. @(require_results)
  546. dynamic_pool_allocator :: proc(pool: ^Dynamic_Pool) -> Allocator {
  547. return Allocator{
  548. procedure = dynamic_pool_allocator_proc,
  549. data = pool,
  550. }
  551. }
  552. dynamic_pool_init :: proc(pool: ^Dynamic_Pool,
  553. block_allocator := context.allocator,
  554. array_allocator := context.allocator,
  555. block_size := DYNAMIC_POOL_BLOCK_SIZE_DEFAULT,
  556. out_band_size := DYNAMIC_POOL_OUT_OF_BAND_SIZE_DEFAULT,
  557. alignment := 8) {
  558. pool.block_size = block_size
  559. pool.out_band_size = out_band_size
  560. pool.alignment = alignment
  561. pool.block_allocator = block_allocator
  562. pool.out_band_allocations.allocator = array_allocator
  563. pool. unused_blocks.allocator = array_allocator
  564. pool. used_blocks.allocator = array_allocator
  565. }
  566. dynamic_pool_destroy :: proc(pool: ^Dynamic_Pool) {
  567. dynamic_pool_free_all(pool)
  568. delete(pool.unused_blocks)
  569. delete(pool.used_blocks)
  570. delete(pool.out_band_allocations)
  571. zero(pool, size_of(pool^))
  572. }
  573. @(require_results)
  574. dynamic_pool_alloc :: proc(pool: ^Dynamic_Pool, bytes: int) -> (rawptr, Allocator_Error) {
  575. data, err := dynamic_pool_alloc_bytes(pool, bytes)
  576. return raw_data(data), err
  577. }
  578. @(require_results)
  579. dynamic_pool_alloc_bytes :: proc(p: ^Dynamic_Pool, bytes: int) -> ([]byte, Allocator_Error) {
  580. cycle_new_block :: proc(p: ^Dynamic_Pool) -> (err: Allocator_Error) {
  581. if p.block_allocator.procedure == nil {
  582. panic("You must call pool_init on a Pool before using it")
  583. }
  584. if p.current_block != nil {
  585. append(&p.used_blocks, p.current_block)
  586. }
  587. new_block: rawptr
  588. if len(p.unused_blocks) > 0 {
  589. new_block = pop(&p.unused_blocks)
  590. } else {
  591. data: []byte
  592. data, err = p.block_allocator.procedure(p.block_allocator.data, Allocator_Mode.Alloc,
  593. p.block_size, p.alignment,
  594. nil, 0)
  595. new_block = raw_data(data)
  596. }
  597. p.bytes_left = p.block_size
  598. p.current_pos = new_block
  599. p.current_block = new_block
  600. return
  601. }
  602. n := bytes
  603. extra := p.alignment - (n % p.alignment)
  604. n += extra
  605. if n > p.block_size {
  606. return nil, .Invalid_Argument
  607. }
  608. if n >= p.out_band_size {
  609. assert(p.block_allocator.procedure != nil)
  610. memory, err := p.block_allocator.procedure(p.block_allocator.data, Allocator_Mode.Alloc,
  611. p.block_size, p.alignment,
  612. nil, 0)
  613. if memory != nil {
  614. append(&p.out_band_allocations, raw_data(memory))
  615. }
  616. return memory, err
  617. }
  618. if p.bytes_left < n {
  619. err := cycle_new_block(p)
  620. if err != nil {
  621. return nil, err
  622. }
  623. if p.current_block == nil {
  624. return nil, .Out_Of_Memory
  625. }
  626. }
  627. memory := p.current_pos
  628. p.current_pos = ([^]byte)(p.current_pos)[n:]
  629. p.bytes_left -= n
  630. return ([^]byte)(memory)[:bytes], nil
  631. }
  632. dynamic_pool_reset :: proc(p: ^Dynamic_Pool) {
  633. if p.current_block != nil {
  634. append(&p.unused_blocks, p.current_block)
  635. p.current_block = nil
  636. }
  637. for block in p.used_blocks {
  638. append(&p.unused_blocks, block)
  639. }
  640. clear(&p.used_blocks)
  641. for a in p.out_band_allocations {
  642. free(a, p.block_allocator)
  643. }
  644. clear(&p.out_band_allocations)
  645. p.bytes_left = 0 // Make new allocations call `cycle_new_block` again.
  646. }
  647. dynamic_pool_free_all :: proc(p: ^Dynamic_Pool) {
  648. dynamic_pool_reset(p)
  649. for block in p.unused_blocks {
  650. free(block, p.block_allocator)
  651. }
  652. clear(&p.unused_blocks)
  653. }
  654. panic_allocator_proc :: proc(allocator_data: rawptr, mode: Allocator_Mode,
  655. size, alignment: int,
  656. old_memory: rawptr, old_size: int,loc := #caller_location) -> ([]byte, Allocator_Error) {
  657. switch mode {
  658. case .Alloc:
  659. if size > 0 {
  660. panic("mem: panic allocator, .Alloc called", loc=loc)
  661. }
  662. case .Alloc_Non_Zeroed:
  663. if size > 0 {
  664. panic("mem: panic allocator, .Alloc_Non_Zeroed called", loc=loc)
  665. }
  666. case .Resize:
  667. if size > 0 {
  668. panic("mem: panic allocator, .Resize called", loc=loc)
  669. }
  670. case .Free:
  671. if old_memory != nil {
  672. panic("mem: panic allocator, .Free called", loc=loc)
  673. }
  674. case .Free_All:
  675. panic("mem: panic allocator, .Free_All called", loc=loc)
  676. case .Query_Features:
  677. set := (^Allocator_Mode_Set)(old_memory)
  678. if set != nil {
  679. set^ = {.Query_Features}
  680. }
  681. return nil, nil
  682. case .Query_Info:
  683. panic("mem: panic allocator, .Query_Info called", loc=loc)
  684. }
  685. return nil, nil
  686. }
  687. @(require_results)
  688. panic_allocator :: proc() -> Allocator {
  689. return Allocator{
  690. procedure = panic_allocator_proc,
  691. data = nil,
  692. }
  693. }
  694. Tracking_Allocator_Entry :: struct {
  695. memory: rawptr,
  696. size: int,
  697. alignment: int,
  698. mode: Allocator_Mode,
  699. err: Allocator_Error,
  700. location: runtime.Source_Code_Location,
  701. }
  702. Tracking_Allocator_Bad_Free_Entry :: struct {
  703. memory: rawptr,
  704. location: runtime.Source_Code_Location,
  705. }
  706. Tracking_Allocator :: struct {
  707. backing: Allocator,
  708. allocation_map: map[rawptr]Tracking_Allocator_Entry,
  709. bad_free_array: [dynamic]Tracking_Allocator_Bad_Free_Entry,
  710. mutex: sync.Mutex,
  711. clear_on_free_all: bool,
  712. }
  713. tracking_allocator_init :: proc(t: ^Tracking_Allocator, backing_allocator: Allocator, internals_allocator := context.allocator) {
  714. t.backing = backing_allocator
  715. t.allocation_map.allocator = internals_allocator
  716. t.bad_free_array.allocator = internals_allocator
  717. if .Free_All in query_features(t.backing) {
  718. t.clear_on_free_all = true
  719. }
  720. }
  721. tracking_allocator_destroy :: proc(t: ^Tracking_Allocator) {
  722. delete(t.allocation_map)
  723. delete(t.bad_free_array)
  724. }
  725. tracking_allocator_clear :: proc(t: ^Tracking_Allocator) {
  726. sync.mutex_lock(&t.mutex)
  727. clear(&t.allocation_map)
  728. clear(&t.bad_free_array)
  729. sync.mutex_unlock(&t.mutex)
  730. }
  731. @(require_results)
  732. tracking_allocator :: proc(data: ^Tracking_Allocator) -> Allocator {
  733. return Allocator{
  734. data = data,
  735. procedure = tracking_allocator_proc,
  736. }
  737. }
  738. tracking_allocator_proc :: proc(allocator_data: rawptr, mode: Allocator_Mode,
  739. size, alignment: int,
  740. old_memory: rawptr, old_size: int, loc := #caller_location) -> (result: []byte, err: Allocator_Error) {
  741. data := (^Tracking_Allocator)(allocator_data)
  742. sync.mutex_guard(&data.mutex)
  743. if mode == .Query_Info {
  744. info := (^Allocator_Query_Info)(old_memory)
  745. if info != nil && info.pointer != nil {
  746. if entry, ok := data.allocation_map[info.pointer]; ok {
  747. info.size = entry.size
  748. info.alignment = entry.alignment
  749. }
  750. info.pointer = nil
  751. }
  752. return
  753. }
  754. if mode == .Free && old_memory != nil && old_memory not_in data.allocation_map {
  755. append(&data.bad_free_array, Tracking_Allocator_Bad_Free_Entry{
  756. memory = old_memory,
  757. location = loc,
  758. })
  759. } else {
  760. result = data.backing.procedure(data.backing.data, mode, size, alignment, old_memory, old_size, loc) or_return
  761. }
  762. result_ptr := raw_data(result)
  763. if data.allocation_map.allocator.procedure == nil {
  764. data.allocation_map.allocator = context.allocator
  765. }
  766. switch mode {
  767. case .Alloc, .Alloc_Non_Zeroed:
  768. data.allocation_map[result_ptr] = Tracking_Allocator_Entry{
  769. memory = result_ptr,
  770. size = size,
  771. mode = mode,
  772. alignment = alignment,
  773. err = err,
  774. location = loc,
  775. }
  776. case .Free:
  777. delete_key(&data.allocation_map, old_memory)
  778. case .Free_All:
  779. if data.clear_on_free_all {
  780. clear_map(&data.allocation_map)
  781. }
  782. case .Resize:
  783. if old_memory != result_ptr {
  784. delete_key(&data.allocation_map, old_memory)
  785. }
  786. data.allocation_map[result_ptr] = Tracking_Allocator_Entry{
  787. memory = result_ptr,
  788. size = size,
  789. mode = mode,
  790. alignment = alignment,
  791. err = err,
  792. location = loc,
  793. }
  794. case .Query_Features:
  795. set := (^Allocator_Mode_Set)(old_memory)
  796. if set != nil {
  797. set^ = {.Alloc, .Alloc_Non_Zeroed, .Free, .Free_All, .Resize, .Query_Features, .Query_Info}
  798. }
  799. return nil, nil
  800. case .Query_Info:
  801. unreachable()
  802. }
  803. return
  804. }