allocators.odin 29 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141
  1. package mem
  2. import "base:intrinsics"
  3. import "base:runtime"
  4. nil_allocator_proc :: proc(allocator_data: rawptr, mode: Allocator_Mode,
  5. size, alignment: int,
  6. old_memory: rawptr, old_size: int, loc := #caller_location) -> ([]byte, Allocator_Error) {
  7. return nil, nil
  8. }
  9. nil_allocator :: proc() -> Allocator {
  10. return Allocator{
  11. procedure = nil_allocator_proc,
  12. data = nil,
  13. }
  14. }
  15. // Custom allocators
  16. Arena :: struct {
  17. data: []byte,
  18. offset: int,
  19. peak_used: int,
  20. temp_count: int,
  21. }
  22. Arena_Temp_Memory :: struct {
  23. arena: ^Arena,
  24. prev_offset: int,
  25. }
  26. arena_init :: proc(a: ^Arena, data: []byte) {
  27. a.data = data
  28. a.offset = 0
  29. a.peak_used = 0
  30. a.temp_count = 0
  31. }
  32. @(deprecated="prefer 'mem.arena_init'")
  33. init_arena :: proc(a: ^Arena, data: []byte) {
  34. a.data = data
  35. a.offset = 0
  36. a.peak_used = 0
  37. a.temp_count = 0
  38. }
  39. @(require_results)
  40. arena_allocator :: proc(arena: ^Arena) -> Allocator {
  41. return Allocator{
  42. procedure = arena_allocator_proc,
  43. data = arena,
  44. }
  45. }
  46. arena_allocator_proc :: proc(allocator_data: rawptr, mode: Allocator_Mode,
  47. size, alignment: int,
  48. old_memory: rawptr, old_size: int, location := #caller_location) -> ([]byte, Allocator_Error) {
  49. arena := cast(^Arena)allocator_data
  50. switch mode {
  51. case .Alloc, .Alloc_Non_Zeroed:
  52. #no_bounds_check end := &arena.data[arena.offset]
  53. ptr := align_forward(end, uintptr(alignment))
  54. total_size := size + ptr_sub((^byte)(ptr), (^byte)(end))
  55. if arena.offset + total_size > len(arena.data) {
  56. return nil, .Out_Of_Memory
  57. }
  58. arena.offset += total_size
  59. arena.peak_used = max(arena.peak_used, arena.offset)
  60. if mode != .Alloc_Non_Zeroed {
  61. zero(ptr, size)
  62. }
  63. return byte_slice(ptr, size), nil
  64. case .Free:
  65. return nil, .Mode_Not_Implemented
  66. case .Free_All:
  67. arena.offset = 0
  68. case .Resize:
  69. return default_resize_bytes_align(byte_slice(old_memory, old_size), size, alignment, arena_allocator(arena))
  70. case .Resize_Non_Zeroed:
  71. return default_resize_bytes_align_non_zeroed(byte_slice(old_memory, old_size), size, alignment, arena_allocator(arena))
  72. case .Query_Features:
  73. set := (^Allocator_Mode_Set)(old_memory)
  74. if set != nil {
  75. set^ = {.Alloc, .Alloc_Non_Zeroed, .Free_All, .Resize, .Resize_Non_Zeroed, .Query_Features}
  76. }
  77. return nil, nil
  78. case .Query_Info:
  79. return nil, .Mode_Not_Implemented
  80. }
  81. return nil, nil
  82. }
  83. @(require_results)
  84. begin_arena_temp_memory :: proc(a: ^Arena) -> Arena_Temp_Memory {
  85. tmp: Arena_Temp_Memory
  86. tmp.arena = a
  87. tmp.prev_offset = a.offset
  88. a.temp_count += 1
  89. return tmp
  90. }
  91. end_arena_temp_memory :: proc(tmp: Arena_Temp_Memory) {
  92. assert(tmp.arena.offset >= tmp.prev_offset)
  93. assert(tmp.arena.temp_count > 0)
  94. tmp.arena.offset = tmp.prev_offset
  95. tmp.arena.temp_count -= 1
  96. }
  97. Scratch_Allocator :: struct {
  98. data: []byte,
  99. curr_offset: int,
  100. prev_allocation: rawptr,
  101. backup_allocator: Allocator,
  102. leaked_allocations: [dynamic][]byte,
  103. }
  104. scratch_allocator_init :: proc(s: ^Scratch_Allocator, size: int, backup_allocator := context.allocator) -> Allocator_Error {
  105. s.data = make_aligned([]byte, size, 2*align_of(rawptr), backup_allocator) or_return
  106. s.curr_offset = 0
  107. s.prev_allocation = nil
  108. s.backup_allocator = backup_allocator
  109. s.leaked_allocations.allocator = backup_allocator
  110. return nil
  111. }
  112. scratch_allocator_destroy :: proc(s: ^Scratch_Allocator) {
  113. if s == nil {
  114. return
  115. }
  116. for ptr in s.leaked_allocations {
  117. free_bytes(ptr, s.backup_allocator)
  118. }
  119. delete(s.leaked_allocations)
  120. delete(s.data, s.backup_allocator)
  121. s^ = {}
  122. }
  123. scratch_allocator_proc :: proc(allocator_data: rawptr, mode: Allocator_Mode,
  124. size, alignment: int,
  125. old_memory: rawptr, old_size: int, loc := #caller_location) -> ([]byte, Allocator_Error) {
  126. s := (^Scratch_Allocator)(allocator_data)
  127. if s.data == nil {
  128. DEFAULT_BACKING_SIZE :: 4 * Megabyte
  129. if !(context.allocator.procedure != scratch_allocator_proc &&
  130. context.allocator.data != allocator_data) {
  131. panic("cyclic initialization of the scratch allocator with itself")
  132. }
  133. scratch_allocator_init(s, DEFAULT_BACKING_SIZE)
  134. }
  135. size := size
  136. switch mode {
  137. case .Alloc, .Alloc_Non_Zeroed:
  138. size = align_forward_int(size, alignment)
  139. switch {
  140. case s.curr_offset+size <= len(s.data):
  141. start := uintptr(raw_data(s.data))
  142. ptr := start + uintptr(s.curr_offset)
  143. ptr = align_forward_uintptr(ptr, uintptr(alignment))
  144. if mode != .Alloc_Non_Zeroed {
  145. zero(rawptr(ptr), size)
  146. }
  147. s.prev_allocation = rawptr(ptr)
  148. offset := int(ptr - start)
  149. s.curr_offset = offset + size
  150. return byte_slice(rawptr(ptr), size), nil
  151. case size <= len(s.data):
  152. start := uintptr(raw_data(s.data))
  153. ptr := align_forward_uintptr(start, uintptr(alignment))
  154. if mode != .Alloc_Non_Zeroed {
  155. zero(rawptr(ptr), size)
  156. }
  157. s.prev_allocation = rawptr(ptr)
  158. offset := int(ptr - start)
  159. s.curr_offset = offset + size
  160. return byte_slice(rawptr(ptr), size), nil
  161. }
  162. a := s.backup_allocator
  163. if a.procedure == nil {
  164. a = context.allocator
  165. s.backup_allocator = a
  166. }
  167. ptr, err := alloc_bytes(size, alignment, a, loc)
  168. if err != nil {
  169. return ptr, err
  170. }
  171. if s.leaked_allocations == nil {
  172. s.leaked_allocations, err = make([dynamic][]byte, a)
  173. }
  174. append(&s.leaked_allocations, ptr)
  175. if logger := context.logger; logger.lowest_level <= .Warning {
  176. if logger.procedure != nil {
  177. logger.procedure(logger.data, .Warning, "mem.Scratch_Allocator resorted to backup_allocator" , logger.options, loc)
  178. }
  179. }
  180. return ptr, err
  181. case .Free:
  182. if old_memory == nil {
  183. return nil, nil
  184. }
  185. start := uintptr(raw_data(s.data))
  186. end := start + uintptr(len(s.data))
  187. old_ptr := uintptr(old_memory)
  188. if s.prev_allocation == old_memory {
  189. s.curr_offset = int(uintptr(s.prev_allocation) - start)
  190. s.prev_allocation = nil
  191. return nil, nil
  192. }
  193. if start <= old_ptr && old_ptr < end {
  194. // NOTE(bill): Cannot free this pointer but it is valid
  195. return nil, nil
  196. }
  197. if len(s.leaked_allocations) != 0 {
  198. for data, i in s.leaked_allocations {
  199. ptr := raw_data(data)
  200. if ptr == old_memory {
  201. free_bytes(data, s.backup_allocator)
  202. ordered_remove(&s.leaked_allocations, i)
  203. return nil, nil
  204. }
  205. }
  206. }
  207. return nil, .Invalid_Pointer
  208. // panic("invalid pointer passed to default_temp_allocator");
  209. case .Free_All:
  210. s.curr_offset = 0
  211. s.prev_allocation = nil
  212. for ptr in s.leaked_allocations {
  213. free_bytes(ptr, s.backup_allocator)
  214. }
  215. clear(&s.leaked_allocations)
  216. case .Resize, .Resize_Non_Zeroed:
  217. begin := uintptr(raw_data(s.data))
  218. end := begin + uintptr(len(s.data))
  219. old_ptr := uintptr(old_memory)
  220. if begin <= old_ptr && old_ptr < end && old_ptr+uintptr(size) < end {
  221. s.curr_offset = int(old_ptr-begin)+size
  222. return byte_slice(old_memory, size), nil
  223. }
  224. data, err := scratch_allocator_proc(allocator_data, .Alloc, size, alignment, old_memory, old_size, loc)
  225. if err != nil {
  226. return data, err
  227. }
  228. runtime.copy(data, byte_slice(old_memory, old_size))
  229. _, err = scratch_allocator_proc(allocator_data, .Free, 0, alignment, old_memory, old_size, loc)
  230. return data, err
  231. case .Query_Features:
  232. set := (^Allocator_Mode_Set)(old_memory)
  233. if set != nil {
  234. set^ = {.Alloc, .Alloc_Non_Zeroed, .Free, .Free_All, .Resize, .Resize_Non_Zeroed, .Query_Features}
  235. }
  236. return nil, nil
  237. case .Query_Info:
  238. return nil, .Mode_Not_Implemented
  239. }
  240. return nil, nil
  241. }
  242. @(require_results)
  243. scratch_allocator :: proc(allocator: ^Scratch_Allocator) -> Allocator {
  244. return Allocator{
  245. procedure = scratch_allocator_proc,
  246. data = allocator,
  247. }
  248. }
  249. Stack_Allocation_Header :: struct {
  250. prev_offset: int,
  251. padding: int,
  252. }
  253. // Stack is a stack-like allocator which has a strict memory freeing order
  254. Stack :: struct {
  255. data: []byte,
  256. prev_offset: int,
  257. curr_offset: int,
  258. peak_used: int,
  259. }
  260. stack_init :: proc(s: ^Stack, data: []byte) {
  261. s.data = data
  262. s.prev_offset = 0
  263. s.curr_offset = 0
  264. s.peak_used = 0
  265. }
  266. @(deprecated="prefer 'mem.stack_init'")
  267. init_stack :: proc(s: ^Stack, data: []byte) {
  268. s.data = data
  269. s.prev_offset = 0
  270. s.curr_offset = 0
  271. s.peak_used = 0
  272. }
  273. @(require_results)
  274. stack_allocator :: proc(stack: ^Stack) -> Allocator {
  275. return Allocator{
  276. procedure = stack_allocator_proc,
  277. data = stack,
  278. }
  279. }
  280. stack_allocator_proc :: proc(allocator_data: rawptr, mode: Allocator_Mode,
  281. size, alignment: int,
  282. old_memory: rawptr, old_size: int, location := #caller_location) -> ([]byte, Allocator_Error) {
  283. s := cast(^Stack)allocator_data
  284. if s.data == nil {
  285. return nil, .Invalid_Argument
  286. }
  287. raw_alloc :: proc(s: ^Stack, size, alignment: int, zero_memory: bool) -> ([]byte, Allocator_Error) {
  288. curr_addr := uintptr(raw_data(s.data)) + uintptr(s.curr_offset)
  289. padding := calc_padding_with_header(curr_addr, uintptr(alignment), size_of(Stack_Allocation_Header))
  290. if s.curr_offset + padding + size > len(s.data) {
  291. return nil, .Out_Of_Memory
  292. }
  293. s.prev_offset = s.curr_offset
  294. s.curr_offset += padding
  295. next_addr := curr_addr + uintptr(padding)
  296. header := (^Stack_Allocation_Header)(next_addr - size_of(Stack_Allocation_Header))
  297. header.padding = padding
  298. header.prev_offset = s.prev_offset
  299. s.curr_offset += size
  300. s.peak_used = max(s.peak_used, s.curr_offset)
  301. if zero_memory {
  302. zero(rawptr(next_addr), size)
  303. }
  304. return byte_slice(rawptr(next_addr), size), nil
  305. }
  306. switch mode {
  307. case .Alloc, .Alloc_Non_Zeroed:
  308. return raw_alloc(s, size, alignment, mode == .Alloc)
  309. case .Free:
  310. if old_memory == nil {
  311. return nil, nil
  312. }
  313. start := uintptr(raw_data(s.data))
  314. end := start + uintptr(len(s.data))
  315. curr_addr := uintptr(old_memory)
  316. if !(start <= curr_addr && curr_addr < end) {
  317. panic("Out of bounds memory address passed to stack allocator (free)")
  318. }
  319. if curr_addr >= start+uintptr(s.curr_offset) {
  320. // NOTE(bill): Allow double frees
  321. return nil, nil
  322. }
  323. header := (^Stack_Allocation_Header)(curr_addr - size_of(Stack_Allocation_Header))
  324. old_offset := int(curr_addr - uintptr(header.padding) - uintptr(raw_data(s.data)))
  325. if old_offset != header.prev_offset {
  326. // panic("Out of order stack allocator free");
  327. return nil, .Invalid_Pointer
  328. }
  329. s.curr_offset = old_offset
  330. s.prev_offset = header.prev_offset
  331. case .Free_All:
  332. s.prev_offset = 0
  333. s.curr_offset = 0
  334. case .Resize, .Resize_Non_Zeroed:
  335. if old_memory == nil {
  336. return raw_alloc(s, size, alignment, mode == .Resize)
  337. }
  338. if size == 0 {
  339. return nil, nil
  340. }
  341. start := uintptr(raw_data(s.data))
  342. end := start + uintptr(len(s.data))
  343. curr_addr := uintptr(old_memory)
  344. if !(start <= curr_addr && curr_addr < end) {
  345. panic("Out of bounds memory address passed to stack allocator (resize)")
  346. }
  347. if curr_addr >= start+uintptr(s.curr_offset) {
  348. // NOTE(bill): Allow double frees
  349. return nil, nil
  350. }
  351. if old_size == size {
  352. return byte_slice(old_memory, size), nil
  353. }
  354. header := (^Stack_Allocation_Header)(curr_addr - size_of(Stack_Allocation_Header))
  355. old_offset := int(curr_addr - uintptr(header.padding) - uintptr(raw_data(s.data)))
  356. if old_offset != header.prev_offset {
  357. data, err := raw_alloc(s, size, alignment, mode == .Resize)
  358. if err == nil {
  359. runtime.copy(data, byte_slice(old_memory, old_size))
  360. }
  361. return data, err
  362. }
  363. old_memory_size := uintptr(s.curr_offset) - (curr_addr - start)
  364. assert(old_memory_size == uintptr(old_size))
  365. diff := size - old_size
  366. s.curr_offset += diff // works for smaller sizes too
  367. if diff > 0 {
  368. zero(rawptr(curr_addr + uintptr(diff)), diff)
  369. }
  370. return byte_slice(old_memory, size), nil
  371. case .Query_Features:
  372. set := (^Allocator_Mode_Set)(old_memory)
  373. if set != nil {
  374. set^ = {.Alloc, .Alloc_Non_Zeroed, .Free, .Free_All, .Resize, .Resize_Non_Zeroed, .Query_Features}
  375. }
  376. return nil, nil
  377. case .Query_Info:
  378. return nil, .Mode_Not_Implemented
  379. }
  380. return nil, nil
  381. }
  382. Small_Stack_Allocation_Header :: struct {
  383. padding: u8,
  384. }
  385. // Small_Stack is a stack-like allocator which uses the smallest possible header but at the cost of non-strict memory freeing order
  386. Small_Stack :: struct {
  387. data: []byte,
  388. offset: int,
  389. peak_used: int,
  390. }
  391. small_stack_init :: proc(s: ^Small_Stack, data: []byte) {
  392. s.data = data
  393. s.offset = 0
  394. s.peak_used = 0
  395. }
  396. @(deprecated="prefer 'small_stack_init'")
  397. init_small_stack :: proc(s: ^Small_Stack, data: []byte) {
  398. s.data = data
  399. s.offset = 0
  400. s.peak_used = 0
  401. }
  402. @(require_results)
  403. small_stack_allocator :: proc(stack: ^Small_Stack) -> Allocator {
  404. return Allocator{
  405. procedure = small_stack_allocator_proc,
  406. data = stack,
  407. }
  408. }
  409. small_stack_allocator_proc :: proc(allocator_data: rawptr, mode: Allocator_Mode,
  410. size, alignment: int,
  411. old_memory: rawptr, old_size: int, location := #caller_location) -> ([]byte, Allocator_Error) {
  412. s := cast(^Small_Stack)allocator_data
  413. if s.data == nil {
  414. return nil, .Invalid_Argument
  415. }
  416. align := clamp(alignment, 1, 8*size_of(Stack_Allocation_Header{}.padding)/2)
  417. raw_alloc :: proc(s: ^Small_Stack, size, alignment: int, zero_memory: bool) -> ([]byte, Allocator_Error) {
  418. curr_addr := uintptr(raw_data(s.data)) + uintptr(s.offset)
  419. padding := calc_padding_with_header(curr_addr, uintptr(alignment), size_of(Small_Stack_Allocation_Header))
  420. if s.offset + padding + size > len(s.data) {
  421. return nil, .Out_Of_Memory
  422. }
  423. s.offset += padding
  424. next_addr := curr_addr + uintptr(padding)
  425. header := (^Small_Stack_Allocation_Header)(next_addr - size_of(Small_Stack_Allocation_Header))
  426. header.padding = auto_cast padding
  427. s.offset += size
  428. s.peak_used = max(s.peak_used, s.offset)
  429. if zero_memory {
  430. zero(rawptr(next_addr), size)
  431. }
  432. return byte_slice(rawptr(next_addr), size), nil
  433. }
  434. switch mode {
  435. case .Alloc, .Alloc_Non_Zeroed:
  436. return raw_alloc(s, size, align, mode == .Alloc)
  437. case .Free:
  438. if old_memory == nil {
  439. return nil, nil
  440. }
  441. start := uintptr(raw_data(s.data))
  442. end := start + uintptr(len(s.data))
  443. curr_addr := uintptr(old_memory)
  444. if !(start <= curr_addr && curr_addr < end) {
  445. // panic("Out of bounds memory address passed to stack allocator (free)");
  446. return nil, .Invalid_Pointer
  447. }
  448. if curr_addr >= start+uintptr(s.offset) {
  449. // NOTE(bill): Allow double frees
  450. return nil, nil
  451. }
  452. header := (^Small_Stack_Allocation_Header)(curr_addr - size_of(Small_Stack_Allocation_Header))
  453. old_offset := int(curr_addr - uintptr(header.padding) - uintptr(raw_data(s.data)))
  454. s.offset = old_offset
  455. case .Free_All:
  456. s.offset = 0
  457. case .Resize, .Resize_Non_Zeroed:
  458. if old_memory == nil {
  459. return raw_alloc(s, size, align, mode == .Resize)
  460. }
  461. if size == 0 {
  462. return nil, nil
  463. }
  464. start := uintptr(raw_data(s.data))
  465. end := start + uintptr(len(s.data))
  466. curr_addr := uintptr(old_memory)
  467. if !(start <= curr_addr && curr_addr < end) {
  468. // panic("Out of bounds memory address passed to stack allocator (resize)");
  469. return nil, .Invalid_Pointer
  470. }
  471. if curr_addr >= start+uintptr(s.offset) {
  472. // NOTE(bill): Treat as a double free
  473. return nil, nil
  474. }
  475. if old_size == size {
  476. return byte_slice(old_memory, size), nil
  477. }
  478. data, err := raw_alloc(s, size, align, mode == .Resize)
  479. if err == nil {
  480. runtime.copy(data, byte_slice(old_memory, old_size))
  481. }
  482. return data, err
  483. case .Query_Features:
  484. set := (^Allocator_Mode_Set)(old_memory)
  485. if set != nil {
  486. set^ = {.Alloc, .Alloc_Non_Zeroed, .Free, .Free_All, .Resize, .Resize_Non_Zeroed, .Query_Features}
  487. }
  488. return nil, nil
  489. case .Query_Info:
  490. return nil, .Mode_Not_Implemented
  491. }
  492. return nil, nil
  493. }
  494. Dynamic_Pool :: struct {
  495. block_size: int,
  496. out_band_size: int,
  497. alignment: int,
  498. unused_blocks: [dynamic]rawptr,
  499. used_blocks: [dynamic]rawptr,
  500. out_band_allocations: [dynamic]rawptr,
  501. current_block: rawptr,
  502. current_pos: rawptr,
  503. bytes_left: int,
  504. block_allocator: Allocator,
  505. }
  506. DYNAMIC_POOL_BLOCK_SIZE_DEFAULT :: 65536
  507. DYNAMIC_POOL_OUT_OF_BAND_SIZE_DEFAULT :: 6554
  508. dynamic_pool_allocator_proc :: proc(allocator_data: rawptr, mode: Allocator_Mode,
  509. size, alignment: int,
  510. old_memory: rawptr, old_size: int, loc := #caller_location) -> ([]byte, Allocator_Error) {
  511. pool := (^Dynamic_Pool)(allocator_data)
  512. switch mode {
  513. case .Alloc, .Alloc_Non_Zeroed:
  514. return dynamic_pool_alloc_bytes(pool, size)
  515. case .Free:
  516. return nil, .Mode_Not_Implemented
  517. case .Free_All:
  518. dynamic_pool_free_all(pool)
  519. return nil, nil
  520. case .Resize, .Resize_Non_Zeroed:
  521. if old_size >= size {
  522. return byte_slice(old_memory, size), nil
  523. }
  524. data, err := dynamic_pool_alloc_bytes(pool, size)
  525. if err == nil {
  526. runtime.copy(data, byte_slice(old_memory, old_size))
  527. }
  528. return data, err
  529. case .Query_Features:
  530. set := (^Allocator_Mode_Set)(old_memory)
  531. if set != nil {
  532. set^ = {.Alloc, .Alloc_Non_Zeroed, .Free_All, .Resize, .Resize_Non_Zeroed, .Query_Features, .Query_Info}
  533. }
  534. return nil, nil
  535. case .Query_Info:
  536. info := (^Allocator_Query_Info)(old_memory)
  537. if info != nil && info.pointer != nil {
  538. info.size = pool.block_size
  539. info.alignment = pool.alignment
  540. return byte_slice(info, size_of(info^)), nil
  541. }
  542. return nil, nil
  543. }
  544. return nil, nil
  545. }
  546. @(require_results)
  547. dynamic_pool_allocator :: proc(pool: ^Dynamic_Pool) -> Allocator {
  548. return Allocator{
  549. procedure = dynamic_pool_allocator_proc,
  550. data = pool,
  551. }
  552. }
  553. dynamic_pool_init :: proc(pool: ^Dynamic_Pool,
  554. block_allocator := context.allocator,
  555. array_allocator := context.allocator,
  556. block_size := DYNAMIC_POOL_BLOCK_SIZE_DEFAULT,
  557. out_band_size := DYNAMIC_POOL_OUT_OF_BAND_SIZE_DEFAULT,
  558. alignment := 8) {
  559. pool.block_size = block_size
  560. pool.out_band_size = out_band_size
  561. pool.alignment = alignment
  562. pool.block_allocator = block_allocator
  563. pool.out_band_allocations.allocator = array_allocator
  564. pool. unused_blocks.allocator = array_allocator
  565. pool. used_blocks.allocator = array_allocator
  566. }
  567. dynamic_pool_destroy :: proc(pool: ^Dynamic_Pool) {
  568. dynamic_pool_free_all(pool)
  569. delete(pool.unused_blocks)
  570. delete(pool.used_blocks)
  571. delete(pool.out_band_allocations)
  572. zero(pool, size_of(pool^))
  573. }
  574. @(require_results)
  575. dynamic_pool_alloc :: proc(pool: ^Dynamic_Pool, bytes: int) -> (rawptr, Allocator_Error) {
  576. data, err := dynamic_pool_alloc_bytes(pool, bytes)
  577. return raw_data(data), err
  578. }
  579. @(require_results)
  580. dynamic_pool_alloc_bytes :: proc(p: ^Dynamic_Pool, bytes: int) -> ([]byte, Allocator_Error) {
  581. cycle_new_block :: proc(p: ^Dynamic_Pool) -> (err: Allocator_Error) {
  582. if p.block_allocator.procedure == nil {
  583. panic("You must call pool_init on a Pool before using it")
  584. }
  585. if p.current_block != nil {
  586. append(&p.used_blocks, p.current_block)
  587. }
  588. new_block: rawptr
  589. if len(p.unused_blocks) > 0 {
  590. new_block = pop(&p.unused_blocks)
  591. } else {
  592. data: []byte
  593. data, err = p.block_allocator.procedure(p.block_allocator.data, Allocator_Mode.Alloc,
  594. p.block_size, p.alignment,
  595. nil, 0)
  596. new_block = raw_data(data)
  597. }
  598. p.bytes_left = p.block_size
  599. p.current_pos = new_block
  600. p.current_block = new_block
  601. return
  602. }
  603. n := bytes
  604. extra := p.alignment - (n % p.alignment)
  605. n += extra
  606. if n > p.block_size {
  607. return nil, .Invalid_Argument
  608. }
  609. if n >= p.out_band_size {
  610. assert(p.block_allocator.procedure != nil)
  611. memory, err := p.block_allocator.procedure(p.block_allocator.data, Allocator_Mode.Alloc,
  612. p.block_size, p.alignment,
  613. nil, 0)
  614. if memory != nil {
  615. append(&p.out_band_allocations, raw_data(memory))
  616. }
  617. return memory, err
  618. }
  619. if p.bytes_left < n {
  620. err := cycle_new_block(p)
  621. if err != nil {
  622. return nil, err
  623. }
  624. if p.current_block == nil {
  625. return nil, .Out_Of_Memory
  626. }
  627. }
  628. memory := p.current_pos
  629. p.current_pos = ([^]byte)(p.current_pos)[n:]
  630. p.bytes_left -= n
  631. return ([^]byte)(memory)[:bytes], nil
  632. }
  633. dynamic_pool_reset :: proc(p: ^Dynamic_Pool) {
  634. if p.current_block != nil {
  635. append(&p.unused_blocks, p.current_block)
  636. p.current_block = nil
  637. }
  638. for block in p.used_blocks {
  639. append(&p.unused_blocks, block)
  640. }
  641. clear(&p.used_blocks)
  642. for a in p.out_band_allocations {
  643. free(a, p.block_allocator)
  644. }
  645. clear(&p.out_band_allocations)
  646. p.bytes_left = 0 // Make new allocations call `cycle_new_block` again.
  647. }
  648. dynamic_pool_free_all :: proc(p: ^Dynamic_Pool) {
  649. dynamic_pool_reset(p)
  650. for block in p.unused_blocks {
  651. free(block, p.block_allocator)
  652. }
  653. clear(&p.unused_blocks)
  654. }
  655. panic_allocator_proc :: proc(allocator_data: rawptr, mode: Allocator_Mode,
  656. size, alignment: int,
  657. old_memory: rawptr, old_size: int,loc := #caller_location) -> ([]byte, Allocator_Error) {
  658. switch mode {
  659. case .Alloc:
  660. if size > 0 {
  661. panic("mem: panic allocator, .Alloc called", loc=loc)
  662. }
  663. case .Alloc_Non_Zeroed:
  664. if size > 0 {
  665. panic("mem: panic allocator, .Alloc_Non_Zeroed called", loc=loc)
  666. }
  667. case .Resize:
  668. if size > 0 {
  669. panic("mem: panic allocator, .Resize called", loc=loc)
  670. }
  671. case .Resize_Non_Zeroed:
  672. if size > 0 {
  673. panic("mem: panic allocator, .Resize_Non_Zeroed called", loc=loc)
  674. }
  675. case .Free:
  676. if old_memory != nil {
  677. panic("mem: panic allocator, .Free called", loc=loc)
  678. }
  679. case .Free_All:
  680. panic("mem: panic allocator, .Free_All called", loc=loc)
  681. case .Query_Features:
  682. set := (^Allocator_Mode_Set)(old_memory)
  683. if set != nil {
  684. set^ = {.Query_Features}
  685. }
  686. return nil, nil
  687. case .Query_Info:
  688. panic("mem: panic allocator, .Query_Info called", loc=loc)
  689. }
  690. return nil, nil
  691. }
  692. @(require_results)
  693. panic_allocator :: proc() -> Allocator {
  694. return Allocator{
  695. procedure = panic_allocator_proc,
  696. data = nil,
  697. }
  698. }
  699. Buddy_Block :: struct #align(align_of(uint)) {
  700. size: uint,
  701. is_free: bool,
  702. }
  703. @(require_results)
  704. buddy_block_next :: proc(block: ^Buddy_Block) -> ^Buddy_Block {
  705. return (^Buddy_Block)(([^]byte)(block)[block.size:])
  706. }
  707. @(require_results)
  708. buddy_block_split :: proc(block: ^Buddy_Block, size: uint) -> ^Buddy_Block {
  709. block := block
  710. if block != nil && size != 0 {
  711. // Recursive Split
  712. for size < block.size {
  713. sz := block.size >> 1
  714. block.size = sz
  715. block = buddy_block_next(block)
  716. block.size = sz
  717. block.is_free = true
  718. }
  719. if size <= block.size {
  720. return block
  721. }
  722. }
  723. // Block cannot fit the requested allocation size
  724. return nil
  725. }
  726. buddy_block_coalescence :: proc(head, tail: ^Buddy_Block) {
  727. for {
  728. // Keep looping until there are no more buddies to coalesce
  729. block := head
  730. buddy := buddy_block_next(block)
  731. no_coalescence := true
  732. for block < tail && buddy < tail { // make sure the buddies are within the range
  733. if block.is_free && buddy.is_free && block.size == buddy.size {
  734. // Coalesce buddies into one
  735. block.size <<= 1
  736. block = buddy_block_next(block)
  737. if block < tail {
  738. buddy = buddy_block_next(block)
  739. no_coalescence = false
  740. }
  741. } else if block.size < buddy.size {
  742. // The buddy block is split into smaller blocks
  743. block = buddy
  744. buddy = buddy_block_next(buddy)
  745. } else {
  746. block = buddy_block_next(buddy)
  747. if block < tail {
  748. // Leave the buddy block for the next iteration
  749. buddy = buddy_block_next(block)
  750. }
  751. }
  752. }
  753. if no_coalescence {
  754. return
  755. }
  756. }
  757. }
  758. @(require_results)
  759. buddy_block_find_best :: proc(head, tail: ^Buddy_Block, size: uint) -> ^Buddy_Block {
  760. assert(size != 0)
  761. best_block: ^Buddy_Block
  762. block := head // left
  763. buddy := buddy_block_next(block) // right
  764. // The entire memory section between head and tail is free,
  765. // just call 'buddy_block_split' to get the allocation
  766. if buddy == tail && block.is_free {
  767. return buddy_block_split(block, size)
  768. }
  769. // Find the block which is the 'best_block' to requested allocation sized
  770. for block < tail && buddy < tail { // make sure the buddies are within the range
  771. // If both buddies are free, coalesce them together
  772. // NOTE: this is an optimization to reduce fragmentation
  773. // this could be completely ignored
  774. if block.is_free && buddy.is_free && block.size == buddy.size {
  775. block.size <<= 1
  776. if size <= block.size && (best_block == nil || block.size <= best_block.size) {
  777. best_block = block
  778. }
  779. block = buddy_block_next(buddy)
  780. if block < tail {
  781. // Delay the buddy block for the next iteration
  782. buddy = buddy_block_next(block)
  783. }
  784. continue
  785. }
  786. if block.is_free && size <= block.size &&
  787. (best_block == nil || block.size <= best_block.size) {
  788. best_block = block
  789. }
  790. if buddy.is_free && size <= buddy.size &&
  791. (best_block == nil || buddy.size < best_block.size) {
  792. // If each buddy are the same size, then it makes more sense
  793. // to pick the buddy as it "bounces around" less
  794. best_block = buddy
  795. }
  796. if (block.size <= buddy.size) {
  797. block = buddy_block_next(buddy)
  798. if (block < tail) {
  799. // Delay the buddy block for the next iteration
  800. buddy = buddy_block_next(block)
  801. }
  802. } else {
  803. // Buddy was split into smaller blocks
  804. block = buddy
  805. buddy = buddy_block_next(buddy)
  806. }
  807. }
  808. if best_block != nil {
  809. // This will handle the case if the 'best_block' is also the perfect fit
  810. return buddy_block_split(best_block, size)
  811. }
  812. // Maybe out of memory
  813. return nil
  814. }
  815. Buddy_Allocator :: struct {
  816. head: ^Buddy_Block,
  817. tail: ^Buddy_Block,
  818. alignment: uint,
  819. }
  820. @(require_results)
  821. buddy_allocator :: proc(b: ^Buddy_Allocator) -> Allocator {
  822. return Allocator{
  823. procedure = buddy_allocator_proc,
  824. data = b,
  825. }
  826. }
  827. buddy_allocator_init :: proc(b: ^Buddy_Allocator, data: []byte, alignment: uint) {
  828. assert(data != nil)
  829. assert(is_power_of_two(uintptr(len(data))))
  830. assert(is_power_of_two(uintptr(alignment)))
  831. alignment := alignment
  832. if alignment < size_of(Buddy_Block) {
  833. alignment = size_of(Buddy_Block)
  834. }
  835. ptr := raw_data(data)
  836. assert(uintptr(ptr) % uintptr(alignment) == 0, "data is not aligned to minimum alignment")
  837. b.head = (^Buddy_Block)(ptr)
  838. b.head.size = len(data)
  839. b.head.is_free = true
  840. b.tail = buddy_block_next(b.head)
  841. b.alignment = alignment
  842. }
  843. @(require_results)
  844. buddy_block_size_required :: proc(b: ^Buddy_Allocator, size: uint) -> uint {
  845. size := size
  846. actual_size := b.alignment
  847. size += size_of(Buddy_Block)
  848. size = align_forward_uint(size, b.alignment)
  849. for size > actual_size {
  850. actual_size <<= 1
  851. }
  852. return actual_size
  853. }
  854. @(require_results)
  855. buddy_allocator_alloc :: proc(b: ^Buddy_Allocator, size: uint, zeroed: bool) -> ([]byte, Allocator_Error) {
  856. if size != 0 {
  857. actual_size := buddy_block_size_required(b, size)
  858. found := buddy_block_find_best(b.head, b.tail, actual_size)
  859. if found != nil {
  860. // Try to coalesce all the free buddy blocks and then search again
  861. buddy_block_coalescence(b.head, b.tail)
  862. found = buddy_block_find_best(b.head, b.tail, actual_size)
  863. }
  864. if found == nil {
  865. return nil, .Out_Of_Memory
  866. }
  867. found.is_free = false
  868. data := ([^]byte)(found)[b.alignment:][:size]
  869. if zeroed {
  870. zero_slice(data)
  871. }
  872. return data, nil
  873. }
  874. return nil, nil
  875. }
  876. buddy_allocator_free :: proc(b: ^Buddy_Allocator, ptr: rawptr) -> Allocator_Error {
  877. if ptr != nil {
  878. if !(b.head <= ptr && ptr <= b.tail) {
  879. return .Invalid_Pointer
  880. }
  881. block := (^Buddy_Block)(([^]byte)(ptr)[-b.alignment:])
  882. block.is_free = true
  883. buddy_block_coalescence(b.head, b.tail)
  884. }
  885. return nil
  886. }
  887. buddy_allocator_proc :: proc(allocator_data: rawptr, mode: Allocator_Mode,
  888. size, alignment: int,
  889. old_memory: rawptr, old_size: int,loc := #caller_location) -> ([]byte, Allocator_Error) {
  890. b := (^Buddy_Allocator)(allocator_data)
  891. switch mode {
  892. case .Alloc, .Alloc_Non_Zeroed:
  893. return buddy_allocator_alloc(b, uint(size), mode == .Alloc)
  894. case .Resize:
  895. return default_resize_bytes_align(byte_slice(old_memory, old_size), size, alignment, buddy_allocator(b))
  896. case .Resize_Non_Zeroed:
  897. return default_resize_bytes_align_non_zeroed(byte_slice(old_memory, old_size), size, alignment, buddy_allocator(b))
  898. case .Free:
  899. return nil, buddy_allocator_free(b, old_memory)
  900. case .Free_All:
  901. alignment := b.alignment
  902. head := ([^]byte)(b.head)
  903. tail := ([^]byte)(b.tail)
  904. data := head[:ptr_sub(tail, head)]
  905. buddy_allocator_init(b, data, alignment)
  906. case .Query_Features:
  907. set := (^Allocator_Mode_Set)(old_memory)
  908. if set != nil {
  909. set^ = {.Query_Features, .Alloc, .Alloc_Non_Zeroed, .Resize, .Resize_Non_Zeroed, .Free, .Free_All, .Query_Info}
  910. }
  911. return nil, nil
  912. case .Query_Info:
  913. info := (^Allocator_Query_Info)(old_memory)
  914. if info != nil && info.pointer != nil {
  915. ptr := old_memory
  916. if !(b.head <= ptr && ptr <= b.tail) {
  917. return nil, .Invalid_Pointer
  918. }
  919. block := (^Buddy_Block)(([^]byte)(ptr)[-b.alignment:])
  920. info.size = int(block.size)
  921. info.alignment = int(b.alignment)
  922. return byte_slice(info, size_of(info^)), nil
  923. }
  924. return nil, nil
  925. }
  926. return nil, nil
  927. }