allocators.odin 23 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933
  1. package mem
  2. import "core:intrinsics"
  3. import "core:runtime"
  4. nil_allocator_proc :: proc(allocator_data: rawptr, mode: Allocator_Mode,
  5. size, alignment: int,
  6. old_memory: rawptr, old_size: int, loc := #caller_location) -> ([]byte, Allocator_Error) {
  7. switch mode {
  8. case .Alloc:
  9. return nil, .Out_Of_Memory
  10. case .Free:
  11. return nil, .None
  12. case .Free_All:
  13. return nil, .Mode_Not_Implemented
  14. case .Resize:
  15. if size == 0 {
  16. return nil, .None
  17. }
  18. return nil, .Out_Of_Memory
  19. case .Query_Features:
  20. return nil, .Mode_Not_Implemented
  21. case .Query_Info:
  22. return nil, .Mode_Not_Implemented
  23. }
  24. return nil, .None
  25. }
  26. nil_allocator :: proc() -> Allocator {
  27. return Allocator{
  28. procedure = nil_allocator_proc,
  29. data = nil,
  30. }
  31. }
  32. // Custom allocators
  33. Arena :: struct {
  34. data: []byte,
  35. offset: int,
  36. peak_used: int,
  37. temp_count: int,
  38. }
  39. Arena_Temp_Memory :: struct {
  40. arena: ^Arena,
  41. prev_offset: int,
  42. }
  43. init_arena :: proc(a: ^Arena, data: []byte) {
  44. a.data = data
  45. a.offset = 0
  46. a.peak_used = 0
  47. a.temp_count = 0
  48. }
  49. arena_allocator :: proc(arena: ^Arena) -> Allocator {
  50. return Allocator{
  51. procedure = arena_allocator_proc,
  52. data = arena,
  53. }
  54. }
  55. arena_allocator_proc :: proc(allocator_data: rawptr, mode: Allocator_Mode,
  56. size, alignment: int,
  57. old_memory: rawptr, old_size: int, location := #caller_location) -> ([]byte, Allocator_Error) {
  58. arena := cast(^Arena)allocator_data
  59. switch mode {
  60. case .Alloc:
  61. total_size := size + alignment
  62. if arena.offset + total_size > len(arena.data) {
  63. return nil, .Out_Of_Memory
  64. }
  65. #no_bounds_check end := &arena.data[arena.offset]
  66. ptr := align_forward(end, uintptr(alignment))
  67. arena.offset += total_size
  68. arena.peak_used = max(arena.peak_used, arena.offset)
  69. zero(ptr, size)
  70. return byte_slice(ptr, size), nil
  71. case .Free:
  72. return nil, .Mode_Not_Implemented
  73. case .Free_All:
  74. arena.offset = 0
  75. case .Resize:
  76. return default_resize_bytes_align(byte_slice(old_memory, old_size), size, alignment, arena_allocator(arena))
  77. case .Query_Features:
  78. set := (^Allocator_Mode_Set)(old_memory)
  79. if set != nil {
  80. set^ = {.Alloc, .Free_All, .Resize, .Query_Features}
  81. }
  82. return nil, nil
  83. case .Query_Info:
  84. return nil, .Mode_Not_Implemented
  85. }
  86. return nil, nil
  87. }
  88. begin_arena_temp_memory :: proc(a: ^Arena) -> Arena_Temp_Memory {
  89. tmp: Arena_Temp_Memory
  90. tmp.arena = a
  91. tmp.prev_offset = a.offset
  92. a.temp_count += 1
  93. return tmp
  94. }
  95. end_arena_temp_memory :: proc(using tmp: Arena_Temp_Memory) {
  96. assert(arena.offset >= prev_offset)
  97. assert(arena.temp_count > 0)
  98. arena.offset = prev_offset
  99. arena.temp_count -= 1
  100. }
  101. Scratch_Allocator :: struct {
  102. data: []byte,
  103. curr_offset: int,
  104. prev_allocation: rawptr,
  105. backup_allocator: Allocator,
  106. leaked_allocations: [dynamic][]byte,
  107. }
  108. scratch_allocator_init :: proc(s: ^Scratch_Allocator, size: int, backup_allocator := context.allocator) -> Allocator_Error {
  109. s.data = make_aligned([]byte, size, 2*align_of(rawptr), backup_allocator) or_return
  110. s.curr_offset = 0
  111. s.prev_allocation = nil
  112. s.backup_allocator = backup_allocator
  113. s.leaked_allocations.allocator = backup_allocator
  114. return nil
  115. }
  116. scratch_allocator_destroy :: proc(s: ^Scratch_Allocator) {
  117. if s == nil {
  118. return
  119. }
  120. for ptr in s.leaked_allocations {
  121. free_bytes(ptr, s.backup_allocator)
  122. }
  123. delete(s.leaked_allocations)
  124. delete(s.data, s.backup_allocator)
  125. s^ = {}
  126. }
  127. scratch_allocator_proc :: proc(allocator_data: rawptr, mode: Allocator_Mode,
  128. size, alignment: int,
  129. old_memory: rawptr, old_size: int, loc := #caller_location) -> ([]byte, Allocator_Error) {
  130. s := (^Scratch_Allocator)(allocator_data)
  131. if s.data == nil {
  132. DEFAULT_BACKING_SIZE :: 1<<22
  133. if !(context.allocator.procedure != scratch_allocator_proc &&
  134. context.allocator.data != allocator_data) {
  135. panic("cyclic initialization of the scratch allocator with itself")
  136. }
  137. scratch_allocator_init(s, DEFAULT_BACKING_SIZE)
  138. }
  139. size := size
  140. switch mode {
  141. case .Alloc:
  142. size = align_forward_int(size, alignment)
  143. switch {
  144. case s.curr_offset+size <= len(s.data):
  145. start := uintptr(raw_data(s.data))
  146. ptr := start + uintptr(s.curr_offset)
  147. ptr = align_forward_uintptr(ptr, uintptr(alignment))
  148. zero(rawptr(ptr), size)
  149. s.prev_allocation = rawptr(ptr)
  150. offset := int(ptr - start)
  151. s.curr_offset = offset + size
  152. return byte_slice(rawptr(ptr), size), nil
  153. case size <= len(s.data):
  154. start := uintptr(raw_data(s.data))
  155. ptr := align_forward_uintptr(start, uintptr(alignment))
  156. zero(rawptr(ptr), size)
  157. s.prev_allocation = rawptr(ptr)
  158. offset := int(ptr - start)
  159. s.curr_offset = offset + size
  160. return byte_slice(rawptr(ptr), size), nil
  161. }
  162. a := s.backup_allocator
  163. if a.procedure == nil {
  164. a = context.allocator
  165. s.backup_allocator = a
  166. }
  167. ptr, err := alloc_bytes(size, alignment, a, loc)
  168. if err != nil {
  169. return ptr, err
  170. }
  171. if s.leaked_allocations == nil {
  172. s.leaked_allocations, err = make([dynamic][]byte, a)
  173. }
  174. append(&s.leaked_allocations, ptr)
  175. if logger := context.logger; logger.lowest_level <= .Warning {
  176. if logger.procedure != nil {
  177. logger.procedure(logger.data, .Warning, "mem.Scratch_Allocator resorted to backup_allocator" , logger.options, loc)
  178. }
  179. }
  180. return ptr, err
  181. case .Free:
  182. start := uintptr(raw_data(s.data))
  183. end := start + uintptr(len(s.data))
  184. old_ptr := uintptr(old_memory)
  185. if s.prev_allocation == old_memory {
  186. s.curr_offset = int(uintptr(s.prev_allocation) - start)
  187. s.prev_allocation = nil
  188. return nil, nil
  189. }
  190. if start <= old_ptr && old_ptr < end {
  191. // NOTE(bill): Cannot free this pointer but it is valid
  192. return nil, nil
  193. }
  194. if len(s.leaked_allocations) != 0 {
  195. for data, i in s.leaked_allocations {
  196. ptr := raw_data(data)
  197. if ptr == old_memory {
  198. free_bytes(data, s.backup_allocator)
  199. ordered_remove(&s.leaked_allocations, i)
  200. return nil, nil
  201. }
  202. }
  203. }
  204. return nil, .Invalid_Pointer
  205. // panic("invalid pointer passed to default_temp_allocator");
  206. case .Free_All:
  207. s.curr_offset = 0
  208. s.prev_allocation = nil
  209. for ptr in s.leaked_allocations {
  210. free_bytes(ptr, s.backup_allocator)
  211. }
  212. clear(&s.leaked_allocations)
  213. case .Resize:
  214. begin := uintptr(raw_data(s.data))
  215. end := begin + uintptr(len(s.data))
  216. old_ptr := uintptr(old_memory)
  217. if begin <= old_ptr && old_ptr < end && old_ptr+uintptr(size) < end {
  218. s.curr_offset = int(old_ptr-begin)+size
  219. return byte_slice(old_memory, size), nil
  220. }
  221. data, err := scratch_allocator_proc(allocator_data, .Alloc, size, alignment, old_memory, old_size, loc)
  222. if err != nil {
  223. return data, err
  224. }
  225. runtime.copy(data, byte_slice(old_memory, old_size))
  226. _, err = scratch_allocator_proc(allocator_data, .Free, 0, alignment, old_memory, old_size, loc)
  227. return data, err
  228. case .Query_Features:
  229. set := (^Allocator_Mode_Set)(old_memory)
  230. if set != nil {
  231. set^ = {.Alloc, .Free, .Free_All, .Resize, .Query_Features}
  232. }
  233. return nil, nil
  234. case .Query_Info:
  235. return nil, .Mode_Not_Implemented
  236. }
  237. return nil, nil
  238. }
  239. scratch_allocator :: proc(allocator: ^Scratch_Allocator) -> Allocator {
  240. return Allocator{
  241. procedure = scratch_allocator_proc,
  242. data = allocator,
  243. }
  244. }
  245. Stack_Allocation_Header :: struct {
  246. prev_offset: int,
  247. padding: int,
  248. }
  249. // Stack is a stack-like allocator which has a strict memory freeing order
  250. Stack :: struct {
  251. data: []byte,
  252. prev_offset: int,
  253. curr_offset: int,
  254. peak_used: int,
  255. }
  256. init_stack :: proc(s: ^Stack, data: []byte) {
  257. s.data = data
  258. s.prev_offset = 0
  259. s.curr_offset = 0
  260. s.peak_used = 0
  261. }
  262. stack_allocator :: proc(stack: ^Stack) -> Allocator {
  263. return Allocator{
  264. procedure = stack_allocator_proc,
  265. data = stack,
  266. }
  267. }
  268. stack_allocator_proc :: proc(allocator_data: rawptr, mode: Allocator_Mode,
  269. size, alignment: int,
  270. old_memory: rawptr, old_size: int, location := #caller_location) -> ([]byte, Allocator_Error) {
  271. s := cast(^Stack)allocator_data
  272. if s.data == nil {
  273. return nil, .Invalid_Argument
  274. }
  275. raw_alloc :: proc(s: ^Stack, size, alignment: int) -> ([]byte, Allocator_Error) {
  276. curr_addr := uintptr(raw_data(s.data)) + uintptr(s.curr_offset)
  277. padding := calc_padding_with_header(curr_addr, uintptr(alignment), size_of(Stack_Allocation_Header))
  278. if s.curr_offset + padding + size > len(s.data) {
  279. return nil, .Out_Of_Memory
  280. }
  281. s.prev_offset = s.curr_offset
  282. s.curr_offset += padding
  283. next_addr := curr_addr + uintptr(padding)
  284. header := (^Stack_Allocation_Header)(next_addr - size_of(Stack_Allocation_Header))
  285. header.padding = padding
  286. header.prev_offset = s.prev_offset
  287. s.curr_offset += size
  288. s.peak_used = max(s.peak_used, s.curr_offset)
  289. zero(rawptr(next_addr), size)
  290. return byte_slice(rawptr(next_addr), size), nil
  291. }
  292. switch mode {
  293. case .Alloc:
  294. return raw_alloc(s, size, alignment)
  295. case .Free:
  296. if old_memory == nil {
  297. return nil, nil
  298. }
  299. start := uintptr(raw_data(s.data))
  300. end := start + uintptr(len(s.data))
  301. curr_addr := uintptr(old_memory)
  302. if !(start <= curr_addr && curr_addr < end) {
  303. panic("Out of bounds memory address passed to stack allocator (free)")
  304. }
  305. if curr_addr >= start+uintptr(s.curr_offset) {
  306. // NOTE(bill): Allow double frees
  307. return nil, nil
  308. }
  309. header := (^Stack_Allocation_Header)(curr_addr - size_of(Stack_Allocation_Header))
  310. old_offset := int(curr_addr - uintptr(header.padding) - uintptr(raw_data(s.data)))
  311. if old_offset != header.prev_offset {
  312. // panic("Out of order stack allocator free");
  313. return nil, .Invalid_Pointer
  314. }
  315. s.curr_offset = old_offset
  316. s.prev_offset = header.prev_offset
  317. case .Free_All:
  318. s.prev_offset = 0
  319. s.curr_offset = 0
  320. case .Resize:
  321. if old_memory == nil {
  322. return raw_alloc(s, size, alignment)
  323. }
  324. if size == 0 {
  325. return nil, nil
  326. }
  327. start := uintptr(raw_data(s.data))
  328. end := start + uintptr(len(s.data))
  329. curr_addr := uintptr(old_memory)
  330. if !(start <= curr_addr && curr_addr < end) {
  331. panic("Out of bounds memory address passed to stack allocator (resize)")
  332. }
  333. if curr_addr >= start+uintptr(s.curr_offset) {
  334. // NOTE(bill): Allow double frees
  335. return nil, nil
  336. }
  337. if old_size == size {
  338. return byte_slice(old_memory, size), nil
  339. }
  340. header := (^Stack_Allocation_Header)(curr_addr - size_of(Stack_Allocation_Header))
  341. old_offset := int(curr_addr - uintptr(header.padding) - uintptr(raw_data(s.data)))
  342. if old_offset != header.prev_offset {
  343. data, err := raw_alloc(s, size, alignment)
  344. if err == nil {
  345. runtime.copy(data, byte_slice(old_memory, old_size))
  346. }
  347. return data, err
  348. }
  349. old_memory_size := uintptr(s.curr_offset) - (curr_addr - start)
  350. assert(old_memory_size == uintptr(old_size))
  351. diff := size - old_size
  352. s.curr_offset += diff // works for smaller sizes too
  353. if diff > 0 {
  354. zero(rawptr(curr_addr + uintptr(diff)), diff)
  355. }
  356. return byte_slice(old_memory, size), nil
  357. case .Query_Features:
  358. set := (^Allocator_Mode_Set)(old_memory)
  359. if set != nil {
  360. set^ = {.Alloc, .Free, .Free_All, .Resize, .Query_Features}
  361. }
  362. return nil, nil
  363. case .Query_Info:
  364. return nil, .Mode_Not_Implemented
  365. }
  366. return nil, nil
  367. }
  368. Small_Stack_Allocation_Header :: struct {
  369. padding: u8,
  370. }
  371. // Small_Stack is a stack-like allocator which uses the smallest possible header but at the cost of non-strict memory freeing order
  372. Small_Stack :: struct {
  373. data: []byte,
  374. offset: int,
  375. peak_used: int,
  376. }
  377. init_small_stack :: proc(s: ^Small_Stack, data: []byte) {
  378. s.data = data
  379. s.offset = 0
  380. s.peak_used = 0
  381. }
  382. small_stack_allocator :: proc(stack: ^Small_Stack) -> Allocator {
  383. return Allocator{
  384. procedure = small_stack_allocator_proc,
  385. data = stack,
  386. }
  387. }
  388. small_stack_allocator_proc :: proc(allocator_data: rawptr, mode: Allocator_Mode,
  389. size, alignment: int,
  390. old_memory: rawptr, old_size: int, ocation := #caller_location) -> ([]byte, Allocator_Error) {
  391. s := cast(^Small_Stack)allocator_data
  392. if s.data == nil {
  393. return nil, .Invalid_Argument
  394. }
  395. align := clamp(alignment, 1, 8*size_of(Stack_Allocation_Header{}.padding)/2)
  396. raw_alloc :: proc(s: ^Small_Stack, size, alignment: int) -> ([]byte, Allocator_Error) {
  397. curr_addr := uintptr(raw_data(s.data)) + uintptr(s.offset)
  398. padding := calc_padding_with_header(curr_addr, uintptr(alignment), size_of(Small_Stack_Allocation_Header))
  399. if s.offset + padding + size > len(s.data) {
  400. return nil, .Out_Of_Memory
  401. }
  402. s.offset += padding
  403. next_addr := curr_addr + uintptr(padding)
  404. header := (^Small_Stack_Allocation_Header)(next_addr - size_of(Small_Stack_Allocation_Header))
  405. header.padding = auto_cast padding
  406. s.offset += size
  407. s.peak_used = max(s.peak_used, s.offset)
  408. zero(rawptr(next_addr), size)
  409. return byte_slice(rawptr(next_addr), size), nil
  410. }
  411. switch mode {
  412. case .Alloc:
  413. return raw_alloc(s, size, align)
  414. case .Free:
  415. if old_memory == nil {
  416. return nil, nil
  417. }
  418. start := uintptr(raw_data(s.data))
  419. end := start + uintptr(len(s.data))
  420. curr_addr := uintptr(old_memory)
  421. if !(start <= curr_addr && curr_addr < end) {
  422. // panic("Out of bounds memory address passed to stack allocator (free)");
  423. return nil, .Invalid_Pointer
  424. }
  425. if curr_addr >= start+uintptr(s.offset) {
  426. // NOTE(bill): Allow double frees
  427. return nil, nil
  428. }
  429. header := (^Small_Stack_Allocation_Header)(curr_addr - size_of(Small_Stack_Allocation_Header))
  430. old_offset := int(curr_addr - uintptr(header.padding) - uintptr(raw_data(s.data)))
  431. s.offset = old_offset
  432. case .Free_All:
  433. s.offset = 0
  434. case .Resize:
  435. if old_memory == nil {
  436. return raw_alloc(s, size, align)
  437. }
  438. if size == 0 {
  439. return nil, nil
  440. }
  441. start := uintptr(raw_data(s.data))
  442. end := start + uintptr(len(s.data))
  443. curr_addr := uintptr(old_memory)
  444. if !(start <= curr_addr && curr_addr < end) {
  445. // panic("Out of bounds memory address passed to stack allocator (resize)");
  446. return nil, .Invalid_Pointer
  447. }
  448. if curr_addr >= start+uintptr(s.offset) {
  449. // NOTE(bill): Treat as a double free
  450. return nil, nil
  451. }
  452. if old_size == size {
  453. return byte_slice(old_memory, size), nil
  454. }
  455. data, err := raw_alloc(s, size, align)
  456. if err == nil {
  457. runtime.copy(data, byte_slice(old_memory, old_size))
  458. }
  459. return data, err
  460. case .Query_Features:
  461. set := (^Allocator_Mode_Set)(old_memory)
  462. if set != nil {
  463. set^ = {.Alloc, .Free, .Free_All, .Resize, .Query_Features}
  464. }
  465. return nil, nil
  466. case .Query_Info:
  467. return nil, .Mode_Not_Implemented
  468. }
  469. return nil, nil
  470. }
  471. Dynamic_Pool :: struct {
  472. block_size: int,
  473. out_band_size: int,
  474. alignment: int,
  475. unused_blocks: [dynamic]rawptr,
  476. used_blocks: [dynamic]rawptr,
  477. out_band_allocations: [dynamic]rawptr,
  478. current_block: rawptr,
  479. current_pos: rawptr,
  480. bytes_left: int,
  481. block_allocator: Allocator,
  482. }
  483. DYNAMIC_POOL_BLOCK_SIZE_DEFAULT :: 65536
  484. DYNAMIC_POOL_OUT_OF_BAND_SIZE_DEFAULT :: 6554
  485. dynamic_pool_allocator_proc :: proc(allocator_data: rawptr, mode: Allocator_Mode,
  486. size, alignment: int,
  487. old_memory: rawptr, old_size: int, loc := #caller_location) -> ([]byte, Allocator_Error) {
  488. pool := (^Dynamic_Pool)(allocator_data)
  489. switch mode {
  490. case .Alloc:
  491. return dynamic_pool_alloc_bytes(pool, size)
  492. case .Free:
  493. return nil, .Mode_Not_Implemented
  494. case .Free_All:
  495. dynamic_pool_free_all(pool)
  496. return nil, nil
  497. case .Resize:
  498. if old_size >= size {
  499. return byte_slice(old_memory, size), nil
  500. }
  501. data, err := dynamic_pool_alloc_bytes(pool, size)
  502. if err == nil {
  503. runtime.copy(data, byte_slice(old_memory, old_size))
  504. }
  505. return data, err
  506. case .Query_Features:
  507. set := (^Allocator_Mode_Set)(old_memory)
  508. if set != nil {
  509. set^ = {.Alloc, .Free_All, .Resize, .Query_Features, .Query_Info}
  510. }
  511. return nil, nil
  512. case .Query_Info:
  513. info := (^Allocator_Query_Info)(old_memory)
  514. if info != nil && info.pointer != nil {
  515. info.size = pool.block_size
  516. info.alignment = pool.alignment
  517. return byte_slice(info, size_of(info^)), nil
  518. }
  519. return nil, nil
  520. }
  521. return nil, nil
  522. }
  523. dynamic_pool_allocator :: proc(pool: ^Dynamic_Pool) -> Allocator {
  524. return Allocator{
  525. procedure = dynamic_pool_allocator_proc,
  526. data = pool,
  527. }
  528. }
  529. dynamic_pool_init :: proc(pool: ^Dynamic_Pool,
  530. block_allocator := context.allocator,
  531. array_allocator := context.allocator,
  532. block_size := DYNAMIC_POOL_BLOCK_SIZE_DEFAULT,
  533. out_band_size := DYNAMIC_POOL_OUT_OF_BAND_SIZE_DEFAULT,
  534. alignment := 8) {
  535. pool.block_size = block_size
  536. pool.out_band_size = out_band_size
  537. pool.alignment = alignment
  538. pool.block_allocator = block_allocator
  539. pool.out_band_allocations.allocator = array_allocator
  540. pool. unused_blocks.allocator = array_allocator
  541. pool. used_blocks.allocator = array_allocator
  542. }
  543. dynamic_pool_destroy :: proc(using pool: ^Dynamic_Pool) {
  544. dynamic_pool_free_all(pool)
  545. delete(unused_blocks)
  546. delete(used_blocks)
  547. zero(pool, size_of(pool^))
  548. }
  549. dynamic_pool_alloc :: proc(pool: ^Dynamic_Pool, bytes: int) -> rawptr {
  550. data, err := dynamic_pool_alloc_bytes(pool, bytes)
  551. assert(err == nil)
  552. return raw_data(data)
  553. }
  554. dynamic_pool_alloc_bytes :: proc(using pool: ^Dynamic_Pool, bytes: int) -> ([]byte, Allocator_Error) {
  555. cycle_new_block :: proc(using pool: ^Dynamic_Pool) -> (err: Allocator_Error) {
  556. if block_allocator.procedure == nil {
  557. panic("You must call pool_init on a Pool before using it")
  558. }
  559. if current_block != nil {
  560. append(&used_blocks, current_block)
  561. }
  562. new_block: rawptr
  563. if len(unused_blocks) > 0 {
  564. new_block = pop(&unused_blocks)
  565. } else {
  566. data: []byte
  567. data, err = block_allocator.procedure(block_allocator.data, Allocator_Mode.Alloc,
  568. block_size, alignment,
  569. nil, 0)
  570. new_block = raw_data(data)
  571. }
  572. bytes_left = block_size
  573. current_pos = new_block
  574. current_block = new_block
  575. return
  576. }
  577. n := bytes
  578. extra := alignment - (n % alignment)
  579. n += extra
  580. if n >= out_band_size {
  581. assert(block_allocator.procedure != nil)
  582. memory, err := block_allocator.procedure(block_allocator.data, Allocator_Mode.Alloc,
  583. block_size, alignment,
  584. nil, 0)
  585. if memory != nil {
  586. append(&out_band_allocations, raw_data(memory))
  587. }
  588. return memory, err
  589. }
  590. if bytes_left < n {
  591. err := cycle_new_block(pool)
  592. if err != nil {
  593. return nil, err
  594. }
  595. if current_block == nil {
  596. return nil, .Out_Of_Memory
  597. }
  598. }
  599. memory := current_pos
  600. current_pos = ptr_offset((^byte)(current_pos), n)
  601. bytes_left -= n
  602. return byte_slice(memory, bytes), nil
  603. }
  604. dynamic_pool_reset :: proc(using pool: ^Dynamic_Pool) {
  605. if current_block != nil {
  606. append(&unused_blocks, current_block)
  607. current_block = nil
  608. }
  609. for block in used_blocks {
  610. append(&unused_blocks, block)
  611. }
  612. clear(&used_blocks)
  613. for a in out_band_allocations {
  614. free(a, block_allocator)
  615. }
  616. clear(&out_band_allocations)
  617. }
  618. dynamic_pool_free_all :: proc(using pool: ^Dynamic_Pool) {
  619. dynamic_pool_reset(pool)
  620. for block in unused_blocks {
  621. free(block, block_allocator)
  622. }
  623. clear(&unused_blocks)
  624. }
  625. panic_allocator_proc :: proc(allocator_data: rawptr, mode: Allocator_Mode,
  626. size, alignment: int,
  627. old_memory: rawptr, old_size: int,loc := #caller_location) -> ([]byte, Allocator_Error) {
  628. switch mode {
  629. case .Alloc:
  630. if size > 0 {
  631. panic("mem: panic allocator, .Alloc called")
  632. }
  633. case .Resize:
  634. if size > 0 {
  635. panic("mem: panic allocator, .Resize called")
  636. }
  637. case .Free:
  638. if old_memory != nil {
  639. panic("mem: panic allocator, .Free called")
  640. }
  641. case .Free_All:
  642. panic("mem: panic allocator, .Free_All called")
  643. case .Query_Features:
  644. set := (^Allocator_Mode_Set)(old_memory)
  645. if set != nil {
  646. set^ = {.Query_Features}
  647. }
  648. return nil, nil
  649. case .Query_Info:
  650. panic("mem: panic allocator, .Query_Info called")
  651. }
  652. return nil, nil
  653. }
  654. panic_allocator :: proc() -> Allocator {
  655. return Allocator{
  656. procedure = panic_allocator_proc,
  657. data = nil,
  658. }
  659. }
  660. Tracking_Allocator_Entry :: struct {
  661. memory: rawptr,
  662. size: int,
  663. alignment: int,
  664. err: Allocator_Error,
  665. location: runtime.Source_Code_Location,
  666. }
  667. Tracking_Allocator_Bad_Free_Entry :: struct {
  668. memory: rawptr,
  669. location: runtime.Source_Code_Location,
  670. }
  671. Tracking_Allocator :: struct {
  672. backing: Allocator,
  673. allocation_map: map[rawptr]Tracking_Allocator_Entry,
  674. bad_free_array: [dynamic]Tracking_Allocator_Bad_Free_Entry,
  675. clear_on_free_all: bool,
  676. }
  677. tracking_allocator_init :: proc(t: ^Tracking_Allocator, backing_allocator: Allocator, internals_allocator := context.allocator) {
  678. t.backing = backing_allocator
  679. t.allocation_map.allocator = internals_allocator
  680. t.bad_free_array.allocator = internals_allocator
  681. }
  682. tracking_allocator_destroy :: proc(t: ^Tracking_Allocator) {
  683. delete(t.allocation_map)
  684. delete(t.bad_free_array)
  685. }
  686. tracking_allocator :: proc(data: ^Tracking_Allocator) -> Allocator {
  687. return Allocator{
  688. data = data,
  689. procedure = tracking_allocator_proc,
  690. }
  691. }
  692. tracking_allocator_proc :: proc(allocator_data: rawptr, mode: Allocator_Mode,
  693. size, alignment: int,
  694. old_memory: rawptr, old_size: int, loc := #caller_location) -> ([]byte, Allocator_Error) {
  695. data := (^Tracking_Allocator)(allocator_data)
  696. if mode == .Query_Info {
  697. info := (^Allocator_Query_Info)(old_memory)
  698. if info != nil && info.pointer != nil {
  699. if entry, ok := data.allocation_map[info.pointer]; ok {
  700. info.size = entry.size
  701. info.alignment = entry.alignment
  702. }
  703. info.pointer = nil
  704. }
  705. return nil, nil
  706. }
  707. result: []byte
  708. err: Allocator_Error
  709. if mode == .Free && old_memory not_in data.allocation_map {
  710. append(&data.bad_free_array, Tracking_Allocator_Bad_Free_Entry{
  711. memory = old_memory,
  712. location = loc,
  713. })
  714. } else {
  715. result, err = data.backing.procedure(data.backing.data, mode, size, alignment, old_memory, old_size, loc)
  716. if err != nil {
  717. return result, err
  718. }
  719. }
  720. result_ptr := raw_data(result)
  721. if data.allocation_map.allocator.procedure == nil {
  722. data.allocation_map.allocator = context.allocator
  723. }
  724. switch mode {
  725. case .Alloc:
  726. data.allocation_map[result_ptr] = Tracking_Allocator_Entry{
  727. memory = result_ptr,
  728. size = size,
  729. alignment = alignment,
  730. err = err,
  731. location = loc,
  732. }
  733. case .Free:
  734. delete_key(&data.allocation_map, old_memory)
  735. case .Resize:
  736. if old_memory != result_ptr {
  737. delete_key(&data.allocation_map, old_memory)
  738. }
  739. data.allocation_map[result_ptr] = Tracking_Allocator_Entry{
  740. memory = result_ptr,
  741. size = size,
  742. alignment = alignment,
  743. err = err,
  744. location = loc,
  745. }
  746. case .Free_All:
  747. if data.clear_on_free_all {
  748. clear_map(&data.allocation_map)
  749. }
  750. case .Query_Features:
  751. set := (^Allocator_Mode_Set)(old_memory)
  752. if set != nil {
  753. set^ = {.Alloc, .Free, .Free_All, .Resize, .Query_Features, .Query_Info}
  754. }
  755. return nil, nil
  756. case .Query_Info:
  757. unreachable()
  758. }
  759. return result, err
  760. }