allocators.odin 25 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994
  1. package mem
  2. import "base:intrinsics"
  3. import "base:runtime"
  4. import "core:sync"
  5. nil_allocator_proc :: proc(allocator_data: rawptr, mode: Allocator_Mode,
  6. size, alignment: int,
  7. old_memory: rawptr, old_size: int, loc := #caller_location) -> ([]byte, Allocator_Error) {
  8. return nil, nil
  9. }
  10. nil_allocator :: proc() -> Allocator {
  11. return Allocator{
  12. procedure = nil_allocator_proc,
  13. data = nil,
  14. }
  15. }
  16. // Custom allocators
  17. Arena :: struct {
  18. data: []byte,
  19. offset: int,
  20. peak_used: int,
  21. temp_count: int,
  22. }
  23. Arena_Temp_Memory :: struct {
  24. arena: ^Arena,
  25. prev_offset: int,
  26. }
  27. arena_init :: proc(a: ^Arena, data: []byte) {
  28. a.data = data
  29. a.offset = 0
  30. a.peak_used = 0
  31. a.temp_count = 0
  32. }
  33. @(deprecated="prefer 'mem.arena_init'")
  34. init_arena :: proc(a: ^Arena, data: []byte) {
  35. a.data = data
  36. a.offset = 0
  37. a.peak_used = 0
  38. a.temp_count = 0
  39. }
  40. @(require_results)
  41. arena_allocator :: proc(arena: ^Arena) -> Allocator {
  42. return Allocator{
  43. procedure = arena_allocator_proc,
  44. data = arena,
  45. }
  46. }
  47. arena_allocator_proc :: proc(allocator_data: rawptr, mode: Allocator_Mode,
  48. size, alignment: int,
  49. old_memory: rawptr, old_size: int, location := #caller_location) -> ([]byte, Allocator_Error) {
  50. arena := cast(^Arena)allocator_data
  51. switch mode {
  52. case .Alloc, .Alloc_Non_Zeroed:
  53. #no_bounds_check end := &arena.data[arena.offset]
  54. ptr := align_forward(end, uintptr(alignment))
  55. total_size := size + ptr_sub((^byte)(ptr), (^byte)(end))
  56. if arena.offset + total_size > len(arena.data) {
  57. return nil, .Out_Of_Memory
  58. }
  59. arena.offset += total_size
  60. arena.peak_used = max(arena.peak_used, arena.offset)
  61. if mode != .Alloc_Non_Zeroed {
  62. zero(ptr, size)
  63. }
  64. return byte_slice(ptr, size), nil
  65. case .Free:
  66. return nil, .Mode_Not_Implemented
  67. case .Free_All:
  68. arena.offset = 0
  69. case .Resize:
  70. return default_resize_bytes_align(byte_slice(old_memory, old_size), size, alignment, arena_allocator(arena))
  71. case .Resize_Non_Zeroed:
  72. return default_resize_bytes_align_non_zeroed(byte_slice(old_memory, old_size), size, alignment, arena_allocator(arena))
  73. case .Query_Features:
  74. set := (^Allocator_Mode_Set)(old_memory)
  75. if set != nil {
  76. set^ = {.Alloc, .Alloc_Non_Zeroed, .Free_All, .Resize, .Resize_Non_Zeroed, .Query_Features}
  77. }
  78. return nil, nil
  79. case .Query_Info:
  80. return nil, .Mode_Not_Implemented
  81. }
  82. return nil, nil
  83. }
  84. @(require_results)
  85. begin_arena_temp_memory :: proc(a: ^Arena) -> Arena_Temp_Memory {
  86. tmp: Arena_Temp_Memory
  87. tmp.arena = a
  88. tmp.prev_offset = a.offset
  89. a.temp_count += 1
  90. return tmp
  91. }
  92. end_arena_temp_memory :: proc(tmp: Arena_Temp_Memory) {
  93. assert(tmp.arena.offset >= tmp.prev_offset)
  94. assert(tmp.arena.temp_count > 0)
  95. tmp.arena.offset = tmp.prev_offset
  96. tmp.arena.temp_count -= 1
  97. }
  98. Scratch_Allocator :: struct {
  99. data: []byte,
  100. curr_offset: int,
  101. prev_allocation: rawptr,
  102. backup_allocator: Allocator,
  103. leaked_allocations: [dynamic][]byte,
  104. }
  105. scratch_allocator_init :: proc(s: ^Scratch_Allocator, size: int, backup_allocator := context.allocator) -> Allocator_Error {
  106. s.data = make_aligned([]byte, size, 2*align_of(rawptr), backup_allocator) or_return
  107. s.curr_offset = 0
  108. s.prev_allocation = nil
  109. s.backup_allocator = backup_allocator
  110. s.leaked_allocations.allocator = backup_allocator
  111. return nil
  112. }
  113. scratch_allocator_destroy :: proc(s: ^Scratch_Allocator) {
  114. if s == nil {
  115. return
  116. }
  117. for ptr in s.leaked_allocations {
  118. free_bytes(ptr, s.backup_allocator)
  119. }
  120. delete(s.leaked_allocations)
  121. delete(s.data, s.backup_allocator)
  122. s^ = {}
  123. }
  124. scratch_allocator_proc :: proc(allocator_data: rawptr, mode: Allocator_Mode,
  125. size, alignment: int,
  126. old_memory: rawptr, old_size: int, loc := #caller_location) -> ([]byte, Allocator_Error) {
  127. s := (^Scratch_Allocator)(allocator_data)
  128. if s.data == nil {
  129. DEFAULT_BACKING_SIZE :: 4 * Megabyte
  130. if !(context.allocator.procedure != scratch_allocator_proc &&
  131. context.allocator.data != allocator_data) {
  132. panic("cyclic initialization of the scratch allocator with itself")
  133. }
  134. scratch_allocator_init(s, DEFAULT_BACKING_SIZE)
  135. }
  136. size := size
  137. switch mode {
  138. case .Alloc, .Alloc_Non_Zeroed:
  139. size = align_forward_int(size, alignment)
  140. switch {
  141. case s.curr_offset+size <= len(s.data):
  142. start := uintptr(raw_data(s.data))
  143. ptr := start + uintptr(s.curr_offset)
  144. ptr = align_forward_uintptr(ptr, uintptr(alignment))
  145. if mode != .Alloc_Non_Zeroed {
  146. zero(rawptr(ptr), size)
  147. }
  148. s.prev_allocation = rawptr(ptr)
  149. offset := int(ptr - start)
  150. s.curr_offset = offset + size
  151. return byte_slice(rawptr(ptr), size), nil
  152. case size <= len(s.data):
  153. start := uintptr(raw_data(s.data))
  154. ptr := align_forward_uintptr(start, uintptr(alignment))
  155. if mode != .Alloc_Non_Zeroed {
  156. zero(rawptr(ptr), size)
  157. }
  158. s.prev_allocation = rawptr(ptr)
  159. offset := int(ptr - start)
  160. s.curr_offset = offset + size
  161. return byte_slice(rawptr(ptr), size), nil
  162. }
  163. a := s.backup_allocator
  164. if a.procedure == nil {
  165. a = context.allocator
  166. s.backup_allocator = a
  167. }
  168. ptr, err := alloc_bytes(size, alignment, a, loc)
  169. if err != nil {
  170. return ptr, err
  171. }
  172. if s.leaked_allocations == nil {
  173. s.leaked_allocations, err = make([dynamic][]byte, a)
  174. }
  175. append(&s.leaked_allocations, ptr)
  176. if logger := context.logger; logger.lowest_level <= .Warning {
  177. if logger.procedure != nil {
  178. logger.procedure(logger.data, .Warning, "mem.Scratch_Allocator resorted to backup_allocator" , logger.options, loc)
  179. }
  180. }
  181. return ptr, err
  182. case .Free:
  183. if old_memory == nil {
  184. return nil, nil
  185. }
  186. start := uintptr(raw_data(s.data))
  187. end := start + uintptr(len(s.data))
  188. old_ptr := uintptr(old_memory)
  189. if s.prev_allocation == old_memory {
  190. s.curr_offset = int(uintptr(s.prev_allocation) - start)
  191. s.prev_allocation = nil
  192. return nil, nil
  193. }
  194. if start <= old_ptr && old_ptr < end {
  195. // NOTE(bill): Cannot free this pointer but it is valid
  196. return nil, nil
  197. }
  198. if len(s.leaked_allocations) != 0 {
  199. for data, i in s.leaked_allocations {
  200. ptr := raw_data(data)
  201. if ptr == old_memory {
  202. free_bytes(data, s.backup_allocator)
  203. ordered_remove(&s.leaked_allocations, i)
  204. return nil, nil
  205. }
  206. }
  207. }
  208. return nil, .Invalid_Pointer
  209. // panic("invalid pointer passed to default_temp_allocator");
  210. case .Free_All:
  211. s.curr_offset = 0
  212. s.prev_allocation = nil
  213. for ptr in s.leaked_allocations {
  214. free_bytes(ptr, s.backup_allocator)
  215. }
  216. clear(&s.leaked_allocations)
  217. case .Resize, .Resize_Non_Zeroed:
  218. begin := uintptr(raw_data(s.data))
  219. end := begin + uintptr(len(s.data))
  220. old_ptr := uintptr(old_memory)
  221. if begin <= old_ptr && old_ptr < end && old_ptr+uintptr(size) < end {
  222. s.curr_offset = int(old_ptr-begin)+size
  223. return byte_slice(old_memory, size), nil
  224. }
  225. data, err := scratch_allocator_proc(allocator_data, .Alloc, size, alignment, old_memory, old_size, loc)
  226. if err != nil {
  227. return data, err
  228. }
  229. runtime.copy(data, byte_slice(old_memory, old_size))
  230. _, err = scratch_allocator_proc(allocator_data, .Free, 0, alignment, old_memory, old_size, loc)
  231. return data, err
  232. case .Query_Features:
  233. set := (^Allocator_Mode_Set)(old_memory)
  234. if set != nil {
  235. set^ = {.Alloc, .Alloc_Non_Zeroed, .Free, .Free_All, .Resize, .Resize_Non_Zeroed, .Query_Features}
  236. }
  237. return nil, nil
  238. case .Query_Info:
  239. return nil, .Mode_Not_Implemented
  240. }
  241. return nil, nil
  242. }
  243. @(require_results)
  244. scratch_allocator :: proc(allocator: ^Scratch_Allocator) -> Allocator {
  245. return Allocator{
  246. procedure = scratch_allocator_proc,
  247. data = allocator,
  248. }
  249. }
  250. Stack_Allocation_Header :: struct {
  251. prev_offset: int,
  252. padding: int,
  253. }
  254. // Stack is a stack-like allocator which has a strict memory freeing order
  255. Stack :: struct {
  256. data: []byte,
  257. prev_offset: int,
  258. curr_offset: int,
  259. peak_used: int,
  260. }
  261. stack_init :: proc(s: ^Stack, data: []byte) {
  262. s.data = data
  263. s.prev_offset = 0
  264. s.curr_offset = 0
  265. s.peak_used = 0
  266. }
  267. @(deprecated="prefer 'mem.stack_init'")
  268. init_stack :: proc(s: ^Stack, data: []byte) {
  269. s.data = data
  270. s.prev_offset = 0
  271. s.curr_offset = 0
  272. s.peak_used = 0
  273. }
  274. @(require_results)
  275. stack_allocator :: proc(stack: ^Stack) -> Allocator {
  276. return Allocator{
  277. procedure = stack_allocator_proc,
  278. data = stack,
  279. }
  280. }
  281. stack_allocator_proc :: proc(allocator_data: rawptr, mode: Allocator_Mode,
  282. size, alignment: int,
  283. old_memory: rawptr, old_size: int, location := #caller_location) -> ([]byte, Allocator_Error) {
  284. s := cast(^Stack)allocator_data
  285. if s.data == nil {
  286. return nil, .Invalid_Argument
  287. }
  288. raw_alloc :: proc(s: ^Stack, size, alignment: int, zero_memory: bool) -> ([]byte, Allocator_Error) {
  289. curr_addr := uintptr(raw_data(s.data)) + uintptr(s.curr_offset)
  290. padding := calc_padding_with_header(curr_addr, uintptr(alignment), size_of(Stack_Allocation_Header))
  291. if s.curr_offset + padding + size > len(s.data) {
  292. return nil, .Out_Of_Memory
  293. }
  294. s.prev_offset = s.curr_offset
  295. s.curr_offset += padding
  296. next_addr := curr_addr + uintptr(padding)
  297. header := (^Stack_Allocation_Header)(next_addr - size_of(Stack_Allocation_Header))
  298. header.padding = padding
  299. header.prev_offset = s.prev_offset
  300. s.curr_offset += size
  301. s.peak_used = max(s.peak_used, s.curr_offset)
  302. if zero_memory {
  303. zero(rawptr(next_addr), size)
  304. }
  305. return byte_slice(rawptr(next_addr), size), nil
  306. }
  307. switch mode {
  308. case .Alloc, .Alloc_Non_Zeroed:
  309. return raw_alloc(s, size, alignment, mode == .Alloc)
  310. case .Free:
  311. if old_memory == nil {
  312. return nil, nil
  313. }
  314. start := uintptr(raw_data(s.data))
  315. end := start + uintptr(len(s.data))
  316. curr_addr := uintptr(old_memory)
  317. if !(start <= curr_addr && curr_addr < end) {
  318. panic("Out of bounds memory address passed to stack allocator (free)")
  319. }
  320. if curr_addr >= start+uintptr(s.curr_offset) {
  321. // NOTE(bill): Allow double frees
  322. return nil, nil
  323. }
  324. header := (^Stack_Allocation_Header)(curr_addr - size_of(Stack_Allocation_Header))
  325. old_offset := int(curr_addr - uintptr(header.padding) - uintptr(raw_data(s.data)))
  326. if old_offset != header.prev_offset {
  327. // panic("Out of order stack allocator free");
  328. return nil, .Invalid_Pointer
  329. }
  330. s.curr_offset = old_offset
  331. s.prev_offset = header.prev_offset
  332. case .Free_All:
  333. s.prev_offset = 0
  334. s.curr_offset = 0
  335. case .Resize, .Resize_Non_Zeroed:
  336. if old_memory == nil {
  337. return raw_alloc(s, size, alignment, mode == .Resize)
  338. }
  339. if size == 0 {
  340. return nil, nil
  341. }
  342. start := uintptr(raw_data(s.data))
  343. end := start + uintptr(len(s.data))
  344. curr_addr := uintptr(old_memory)
  345. if !(start <= curr_addr && curr_addr < end) {
  346. panic("Out of bounds memory address passed to stack allocator (resize)")
  347. }
  348. if curr_addr >= start+uintptr(s.curr_offset) {
  349. // NOTE(bill): Allow double frees
  350. return nil, nil
  351. }
  352. if old_size == size {
  353. return byte_slice(old_memory, size), nil
  354. }
  355. header := (^Stack_Allocation_Header)(curr_addr - size_of(Stack_Allocation_Header))
  356. old_offset := int(curr_addr - uintptr(header.padding) - uintptr(raw_data(s.data)))
  357. if old_offset != header.prev_offset {
  358. data, err := raw_alloc(s, size, alignment, mode == .Resize)
  359. if err == nil {
  360. runtime.copy(data, byte_slice(old_memory, old_size))
  361. }
  362. return data, err
  363. }
  364. old_memory_size := uintptr(s.curr_offset) - (curr_addr - start)
  365. assert(old_memory_size == uintptr(old_size))
  366. diff := size - old_size
  367. s.curr_offset += diff // works for smaller sizes too
  368. if diff > 0 {
  369. zero(rawptr(curr_addr + uintptr(diff)), diff)
  370. }
  371. return byte_slice(old_memory, size), nil
  372. case .Query_Features:
  373. set := (^Allocator_Mode_Set)(old_memory)
  374. if set != nil {
  375. set^ = {.Alloc, .Alloc_Non_Zeroed, .Free, .Free_All, .Resize, .Resize_Non_Zeroed, .Query_Features}
  376. }
  377. return nil, nil
  378. case .Query_Info:
  379. return nil, .Mode_Not_Implemented
  380. }
  381. return nil, nil
  382. }
  383. Small_Stack_Allocation_Header :: struct {
  384. padding: u8,
  385. }
  386. // Small_Stack is a stack-like allocator which uses the smallest possible header but at the cost of non-strict memory freeing order
  387. Small_Stack :: struct {
  388. data: []byte,
  389. offset: int,
  390. peak_used: int,
  391. }
  392. small_stack_init :: proc(s: ^Small_Stack, data: []byte) {
  393. s.data = data
  394. s.offset = 0
  395. s.peak_used = 0
  396. }
  397. @(deprecated="prefer 'small_stack_init'")
  398. init_small_stack :: proc(s: ^Small_Stack, data: []byte) {
  399. s.data = data
  400. s.offset = 0
  401. s.peak_used = 0
  402. }
  403. @(require_results)
  404. small_stack_allocator :: proc(stack: ^Small_Stack) -> Allocator {
  405. return Allocator{
  406. procedure = small_stack_allocator_proc,
  407. data = stack,
  408. }
  409. }
  410. small_stack_allocator_proc :: proc(allocator_data: rawptr, mode: Allocator_Mode,
  411. size, alignment: int,
  412. old_memory: rawptr, old_size: int, location := #caller_location) -> ([]byte, Allocator_Error) {
  413. s := cast(^Small_Stack)allocator_data
  414. if s.data == nil {
  415. return nil, .Invalid_Argument
  416. }
  417. align := clamp(alignment, 1, 8*size_of(Stack_Allocation_Header{}.padding)/2)
  418. raw_alloc :: proc(s: ^Small_Stack, size, alignment: int, zero_memory: bool) -> ([]byte, Allocator_Error) {
  419. curr_addr := uintptr(raw_data(s.data)) + uintptr(s.offset)
  420. padding := calc_padding_with_header(curr_addr, uintptr(alignment), size_of(Small_Stack_Allocation_Header))
  421. if s.offset + padding + size > len(s.data) {
  422. return nil, .Out_Of_Memory
  423. }
  424. s.offset += padding
  425. next_addr := curr_addr + uintptr(padding)
  426. header := (^Small_Stack_Allocation_Header)(next_addr - size_of(Small_Stack_Allocation_Header))
  427. header.padding = auto_cast padding
  428. s.offset += size
  429. s.peak_used = max(s.peak_used, s.offset)
  430. if zero_memory {
  431. zero(rawptr(next_addr), size)
  432. }
  433. return byte_slice(rawptr(next_addr), size), nil
  434. }
  435. switch mode {
  436. case .Alloc, .Alloc_Non_Zeroed:
  437. return raw_alloc(s, size, align, mode == .Alloc)
  438. case .Free:
  439. if old_memory == nil {
  440. return nil, nil
  441. }
  442. start := uintptr(raw_data(s.data))
  443. end := start + uintptr(len(s.data))
  444. curr_addr := uintptr(old_memory)
  445. if !(start <= curr_addr && curr_addr < end) {
  446. // panic("Out of bounds memory address passed to stack allocator (free)");
  447. return nil, .Invalid_Pointer
  448. }
  449. if curr_addr >= start+uintptr(s.offset) {
  450. // NOTE(bill): Allow double frees
  451. return nil, nil
  452. }
  453. header := (^Small_Stack_Allocation_Header)(curr_addr - size_of(Small_Stack_Allocation_Header))
  454. old_offset := int(curr_addr - uintptr(header.padding) - uintptr(raw_data(s.data)))
  455. s.offset = old_offset
  456. case .Free_All:
  457. s.offset = 0
  458. case .Resize, .Resize_Non_Zeroed:
  459. if old_memory == nil {
  460. return raw_alloc(s, size, align, mode == .Resize)
  461. }
  462. if size == 0 {
  463. return nil, nil
  464. }
  465. start := uintptr(raw_data(s.data))
  466. end := start + uintptr(len(s.data))
  467. curr_addr := uintptr(old_memory)
  468. if !(start <= curr_addr && curr_addr < end) {
  469. // panic("Out of bounds memory address passed to stack allocator (resize)");
  470. return nil, .Invalid_Pointer
  471. }
  472. if curr_addr >= start+uintptr(s.offset) {
  473. // NOTE(bill): Treat as a double free
  474. return nil, nil
  475. }
  476. if old_size == size {
  477. return byte_slice(old_memory, size), nil
  478. }
  479. data, err := raw_alloc(s, size, align, mode == .Resize)
  480. if err == nil {
  481. runtime.copy(data, byte_slice(old_memory, old_size))
  482. }
  483. return data, err
  484. case .Query_Features:
  485. set := (^Allocator_Mode_Set)(old_memory)
  486. if set != nil {
  487. set^ = {.Alloc, .Alloc_Non_Zeroed, .Free, .Free_All, .Resize, .Resize_Non_Zeroed, .Query_Features}
  488. }
  489. return nil, nil
  490. case .Query_Info:
  491. return nil, .Mode_Not_Implemented
  492. }
  493. return nil, nil
  494. }
  495. Dynamic_Pool :: struct {
  496. block_size: int,
  497. out_band_size: int,
  498. alignment: int,
  499. unused_blocks: [dynamic]rawptr,
  500. used_blocks: [dynamic]rawptr,
  501. out_band_allocations: [dynamic]rawptr,
  502. current_block: rawptr,
  503. current_pos: rawptr,
  504. bytes_left: int,
  505. block_allocator: Allocator,
  506. }
  507. DYNAMIC_POOL_BLOCK_SIZE_DEFAULT :: 65536
  508. DYNAMIC_POOL_OUT_OF_BAND_SIZE_DEFAULT :: 6554
  509. dynamic_pool_allocator_proc :: proc(allocator_data: rawptr, mode: Allocator_Mode,
  510. size, alignment: int,
  511. old_memory: rawptr, old_size: int, loc := #caller_location) -> ([]byte, Allocator_Error) {
  512. pool := (^Dynamic_Pool)(allocator_data)
  513. switch mode {
  514. case .Alloc, .Alloc_Non_Zeroed:
  515. return dynamic_pool_alloc_bytes(pool, size)
  516. case .Free:
  517. return nil, .Mode_Not_Implemented
  518. case .Free_All:
  519. dynamic_pool_free_all(pool)
  520. return nil, nil
  521. case .Resize, .Resize_Non_Zeroed:
  522. if old_size >= size {
  523. return byte_slice(old_memory, size), nil
  524. }
  525. data, err := dynamic_pool_alloc_bytes(pool, size)
  526. if err == nil {
  527. runtime.copy(data, byte_slice(old_memory, old_size))
  528. }
  529. return data, err
  530. case .Query_Features:
  531. set := (^Allocator_Mode_Set)(old_memory)
  532. if set != nil {
  533. set^ = {.Alloc, .Alloc_Non_Zeroed, .Free_All, .Resize, .Resize_Non_Zeroed, .Query_Features, .Query_Info}
  534. }
  535. return nil, nil
  536. case .Query_Info:
  537. info := (^Allocator_Query_Info)(old_memory)
  538. if info != nil && info.pointer != nil {
  539. info.size = pool.block_size
  540. info.alignment = pool.alignment
  541. return byte_slice(info, size_of(info^)), nil
  542. }
  543. return nil, nil
  544. }
  545. return nil, nil
  546. }
  547. @(require_results)
  548. dynamic_pool_allocator :: proc(pool: ^Dynamic_Pool) -> Allocator {
  549. return Allocator{
  550. procedure = dynamic_pool_allocator_proc,
  551. data = pool,
  552. }
  553. }
  554. dynamic_pool_init :: proc(pool: ^Dynamic_Pool,
  555. block_allocator := context.allocator,
  556. array_allocator := context.allocator,
  557. block_size := DYNAMIC_POOL_BLOCK_SIZE_DEFAULT,
  558. out_band_size := DYNAMIC_POOL_OUT_OF_BAND_SIZE_DEFAULT,
  559. alignment := 8) {
  560. pool.block_size = block_size
  561. pool.out_band_size = out_band_size
  562. pool.alignment = alignment
  563. pool.block_allocator = block_allocator
  564. pool.out_band_allocations.allocator = array_allocator
  565. pool. unused_blocks.allocator = array_allocator
  566. pool. used_blocks.allocator = array_allocator
  567. }
  568. dynamic_pool_destroy :: proc(pool: ^Dynamic_Pool) {
  569. dynamic_pool_free_all(pool)
  570. delete(pool.unused_blocks)
  571. delete(pool.used_blocks)
  572. delete(pool.out_band_allocations)
  573. zero(pool, size_of(pool^))
  574. }
  575. @(require_results)
  576. dynamic_pool_alloc :: proc(pool: ^Dynamic_Pool, bytes: int) -> (rawptr, Allocator_Error) {
  577. data, err := dynamic_pool_alloc_bytes(pool, bytes)
  578. return raw_data(data), err
  579. }
  580. @(require_results)
  581. dynamic_pool_alloc_bytes :: proc(p: ^Dynamic_Pool, bytes: int) -> ([]byte, Allocator_Error) {
  582. cycle_new_block :: proc(p: ^Dynamic_Pool) -> (err: Allocator_Error) {
  583. if p.block_allocator.procedure == nil {
  584. panic("You must call pool_init on a Pool before using it")
  585. }
  586. if p.current_block != nil {
  587. append(&p.used_blocks, p.current_block)
  588. }
  589. new_block: rawptr
  590. if len(p.unused_blocks) > 0 {
  591. new_block = pop(&p.unused_blocks)
  592. } else {
  593. data: []byte
  594. data, err = p.block_allocator.procedure(p.block_allocator.data, Allocator_Mode.Alloc,
  595. p.block_size, p.alignment,
  596. nil, 0)
  597. new_block = raw_data(data)
  598. }
  599. p.bytes_left = p.block_size
  600. p.current_pos = new_block
  601. p.current_block = new_block
  602. return
  603. }
  604. n := bytes
  605. extra := p.alignment - (n % p.alignment)
  606. n += extra
  607. if n > p.block_size {
  608. return nil, .Invalid_Argument
  609. }
  610. if n >= p.out_band_size {
  611. assert(p.block_allocator.procedure != nil)
  612. memory, err := p.block_allocator.procedure(p.block_allocator.data, Allocator_Mode.Alloc,
  613. p.block_size, p.alignment,
  614. nil, 0)
  615. if memory != nil {
  616. append(&p.out_band_allocations, raw_data(memory))
  617. }
  618. return memory, err
  619. }
  620. if p.bytes_left < n {
  621. err := cycle_new_block(p)
  622. if err != nil {
  623. return nil, err
  624. }
  625. if p.current_block == nil {
  626. return nil, .Out_Of_Memory
  627. }
  628. }
  629. memory := p.current_pos
  630. p.current_pos = ([^]byte)(p.current_pos)[n:]
  631. p.bytes_left -= n
  632. return ([^]byte)(memory)[:bytes], nil
  633. }
  634. dynamic_pool_reset :: proc(p: ^Dynamic_Pool) {
  635. if p.current_block != nil {
  636. append(&p.unused_blocks, p.current_block)
  637. p.current_block = nil
  638. }
  639. for block in p.used_blocks {
  640. append(&p.unused_blocks, block)
  641. }
  642. clear(&p.used_blocks)
  643. for a in p.out_band_allocations {
  644. free(a, p.block_allocator)
  645. }
  646. clear(&p.out_band_allocations)
  647. p.bytes_left = 0 // Make new allocations call `cycle_new_block` again.
  648. }
  649. dynamic_pool_free_all :: proc(p: ^Dynamic_Pool) {
  650. dynamic_pool_reset(p)
  651. for block in p.unused_blocks {
  652. free(block, p.block_allocator)
  653. }
  654. clear(&p.unused_blocks)
  655. }
  656. panic_allocator_proc :: proc(allocator_data: rawptr, mode: Allocator_Mode,
  657. size, alignment: int,
  658. old_memory: rawptr, old_size: int,loc := #caller_location) -> ([]byte, Allocator_Error) {
  659. switch mode {
  660. case .Alloc:
  661. if size > 0 {
  662. panic("mem: panic allocator, .Alloc called", loc=loc)
  663. }
  664. case .Alloc_Non_Zeroed:
  665. if size > 0 {
  666. panic("mem: panic allocator, .Alloc_Non_Zeroed called", loc=loc)
  667. }
  668. case .Resize:
  669. if size > 0 {
  670. panic("mem: panic allocator, .Resize called", loc=loc)
  671. }
  672. case .Resize_Non_Zeroed:
  673. if size > 0 {
  674. panic("mem: panic allocator, .Resize_Non_Zeroed called", loc=loc)
  675. }
  676. case .Free:
  677. if old_memory != nil {
  678. panic("mem: panic allocator, .Free called", loc=loc)
  679. }
  680. case .Free_All:
  681. panic("mem: panic allocator, .Free_All called", loc=loc)
  682. case .Query_Features:
  683. set := (^Allocator_Mode_Set)(old_memory)
  684. if set != nil {
  685. set^ = {.Query_Features}
  686. }
  687. return nil, nil
  688. case .Query_Info:
  689. panic("mem: panic allocator, .Query_Info called", loc=loc)
  690. }
  691. return nil, nil
  692. }
  693. @(require_results)
  694. panic_allocator :: proc() -> Allocator {
  695. return Allocator{
  696. procedure = panic_allocator_proc,
  697. data = nil,
  698. }
  699. }
  700. Tracking_Allocator_Entry :: struct {
  701. memory: rawptr,
  702. size: int,
  703. alignment: int,
  704. mode: Allocator_Mode,
  705. err: Allocator_Error,
  706. location: runtime.Source_Code_Location,
  707. }
  708. Tracking_Allocator_Bad_Free_Entry :: struct {
  709. memory: rawptr,
  710. location: runtime.Source_Code_Location,
  711. }
  712. Tracking_Allocator :: struct {
  713. backing: Allocator,
  714. allocation_map: map[rawptr]Tracking_Allocator_Entry,
  715. bad_free_array: [dynamic]Tracking_Allocator_Bad_Free_Entry,
  716. mutex: sync.Mutex,
  717. clear_on_free_all: bool,
  718. }
  719. tracking_allocator_init :: proc(t: ^Tracking_Allocator, backing_allocator: Allocator, internals_allocator := context.allocator) {
  720. t.backing = backing_allocator
  721. t.allocation_map.allocator = internals_allocator
  722. t.bad_free_array.allocator = internals_allocator
  723. if .Free_All in query_features(t.backing) {
  724. t.clear_on_free_all = true
  725. }
  726. }
  727. tracking_allocator_destroy :: proc(t: ^Tracking_Allocator) {
  728. delete(t.allocation_map)
  729. delete(t.bad_free_array)
  730. }
  731. tracking_allocator_clear :: proc(t: ^Tracking_Allocator) {
  732. sync.mutex_lock(&t.mutex)
  733. clear(&t.allocation_map)
  734. clear(&t.bad_free_array)
  735. sync.mutex_unlock(&t.mutex)
  736. }
  737. @(require_results)
  738. tracking_allocator :: proc(data: ^Tracking_Allocator) -> Allocator {
  739. return Allocator{
  740. data = data,
  741. procedure = tracking_allocator_proc,
  742. }
  743. }
  744. tracking_allocator_proc :: proc(allocator_data: rawptr, mode: Allocator_Mode,
  745. size, alignment: int,
  746. old_memory: rawptr, old_size: int, loc := #caller_location) -> (result: []byte, err: Allocator_Error) {
  747. data := (^Tracking_Allocator)(allocator_data)
  748. sync.mutex_guard(&data.mutex)
  749. if mode == .Query_Info {
  750. info := (^Allocator_Query_Info)(old_memory)
  751. if info != nil && info.pointer != nil {
  752. if entry, ok := data.allocation_map[info.pointer]; ok {
  753. info.size = entry.size
  754. info.alignment = entry.alignment
  755. }
  756. info.pointer = nil
  757. }
  758. return
  759. }
  760. if mode == .Free && old_memory != nil && old_memory not_in data.allocation_map {
  761. append(&data.bad_free_array, Tracking_Allocator_Bad_Free_Entry{
  762. memory = old_memory,
  763. location = loc,
  764. })
  765. } else {
  766. result = data.backing.procedure(data.backing.data, mode, size, alignment, old_memory, old_size, loc) or_return
  767. }
  768. result_ptr := raw_data(result)
  769. if data.allocation_map.allocator.procedure == nil {
  770. data.allocation_map.allocator = context.allocator
  771. }
  772. switch mode {
  773. case .Alloc, .Alloc_Non_Zeroed:
  774. data.allocation_map[result_ptr] = Tracking_Allocator_Entry{
  775. memory = result_ptr,
  776. size = size,
  777. mode = mode,
  778. alignment = alignment,
  779. err = err,
  780. location = loc,
  781. }
  782. case .Free:
  783. delete_key(&data.allocation_map, old_memory)
  784. case .Free_All:
  785. if data.clear_on_free_all {
  786. clear_map(&data.allocation_map)
  787. }
  788. case .Resize, .Resize_Non_Zeroed:
  789. if old_memory != result_ptr {
  790. delete_key(&data.allocation_map, old_memory)
  791. }
  792. data.allocation_map[result_ptr] = Tracking_Allocator_Entry{
  793. memory = result_ptr,
  794. size = size,
  795. mode = mode,
  796. alignment = alignment,
  797. err = err,
  798. location = loc,
  799. }
  800. case .Query_Features:
  801. set := (^Allocator_Mode_Set)(old_memory)
  802. if set != nil {
  803. set^ = {.Alloc, .Alloc_Non_Zeroed, .Free, .Free_All, .Resize, .Query_Features, .Query_Info}
  804. }
  805. return nil, nil
  806. case .Query_Info:
  807. unreachable()
  808. }
  809. return
  810. }