allocators.odin 29 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139
  1. package mem
  2. import "base:intrinsics"
  3. import "base:runtime"
  4. nil_allocator_proc :: proc(allocator_data: rawptr, mode: Allocator_Mode,
  5. size, alignment: int,
  6. old_memory: rawptr, old_size: int, loc := #caller_location) -> ([]byte, Allocator_Error) {
  7. return nil, nil
  8. }
  9. nil_allocator :: proc() -> Allocator {
  10. return Allocator{
  11. procedure = nil_allocator_proc,
  12. data = nil,
  13. }
  14. }
  15. // Custom allocators
  16. Arena :: struct {
  17. data: []byte,
  18. offset: int,
  19. peak_used: int,
  20. temp_count: int,
  21. }
  22. Arena_Temp_Memory :: struct {
  23. arena: ^Arena,
  24. prev_offset: int,
  25. }
  26. arena_init :: proc(a: ^Arena, data: []byte) {
  27. a.data = data
  28. a.offset = 0
  29. a.peak_used = 0
  30. a.temp_count = 0
  31. }
  32. @(deprecated="prefer 'mem.arena_init'")
  33. init_arena :: proc(a: ^Arena, data: []byte) {
  34. a.data = data
  35. a.offset = 0
  36. a.peak_used = 0
  37. a.temp_count = 0
  38. }
  39. @(require_results)
  40. arena_allocator :: proc(arena: ^Arena) -> Allocator {
  41. return Allocator{
  42. procedure = arena_allocator_proc,
  43. data = arena,
  44. }
  45. }
  46. arena_allocator_proc :: proc(allocator_data: rawptr, mode: Allocator_Mode,
  47. size, alignment: int,
  48. old_memory: rawptr, old_size: int, location := #caller_location) -> ([]byte, Allocator_Error) {
  49. arena := cast(^Arena)allocator_data
  50. switch mode {
  51. case .Alloc, .Alloc_Non_Zeroed:
  52. #no_bounds_check end := &arena.data[arena.offset]
  53. ptr := align_forward(end, uintptr(alignment))
  54. total_size := size + ptr_sub((^byte)(ptr), (^byte)(end))
  55. if arena.offset + total_size > len(arena.data) {
  56. return nil, .Out_Of_Memory
  57. }
  58. arena.offset += total_size
  59. arena.peak_used = max(arena.peak_used, arena.offset)
  60. if mode != .Alloc_Non_Zeroed {
  61. zero(ptr, size)
  62. }
  63. return byte_slice(ptr, size), nil
  64. case .Free:
  65. return nil, .Mode_Not_Implemented
  66. case .Free_All:
  67. arena.offset = 0
  68. case .Resize:
  69. return default_resize_bytes_align(byte_slice(old_memory, old_size), size, alignment, arena_allocator(arena))
  70. case .Resize_Non_Zeroed:
  71. return default_resize_bytes_align_non_zeroed(byte_slice(old_memory, old_size), size, alignment, arena_allocator(arena))
  72. case .Query_Features:
  73. set := (^Allocator_Mode_Set)(old_memory)
  74. if set != nil {
  75. set^ = {.Alloc, .Alloc_Non_Zeroed, .Free_All, .Resize, .Resize_Non_Zeroed, .Query_Features}
  76. }
  77. return nil, nil
  78. case .Query_Info:
  79. return nil, .Mode_Not_Implemented
  80. }
  81. return nil, nil
  82. }
  83. @(require_results)
  84. begin_arena_temp_memory :: proc(a: ^Arena) -> Arena_Temp_Memory {
  85. tmp: Arena_Temp_Memory
  86. tmp.arena = a
  87. tmp.prev_offset = a.offset
  88. a.temp_count += 1
  89. return tmp
  90. }
  91. end_arena_temp_memory :: proc(tmp: Arena_Temp_Memory) {
  92. assert(tmp.arena.offset >= tmp.prev_offset)
  93. assert(tmp.arena.temp_count > 0)
  94. tmp.arena.offset = tmp.prev_offset
  95. tmp.arena.temp_count -= 1
  96. }
  97. Scratch_Allocator :: struct {
  98. data: []byte,
  99. curr_offset: int,
  100. prev_allocation: rawptr,
  101. backup_allocator: Allocator,
  102. leaked_allocations: [dynamic][]byte,
  103. }
  104. scratch_allocator_init :: proc(s: ^Scratch_Allocator, size: int, backup_allocator := context.allocator) -> Allocator_Error {
  105. s.data = make_aligned([]byte, size, 2*align_of(rawptr), backup_allocator) or_return
  106. s.curr_offset = 0
  107. s.prev_allocation = nil
  108. s.backup_allocator = backup_allocator
  109. s.leaked_allocations.allocator = backup_allocator
  110. return nil
  111. }
  112. scratch_allocator_destroy :: proc(s: ^Scratch_Allocator) {
  113. if s == nil {
  114. return
  115. }
  116. for ptr in s.leaked_allocations {
  117. free_bytes(ptr, s.backup_allocator)
  118. }
  119. delete(s.leaked_allocations)
  120. delete(s.data, s.backup_allocator)
  121. s^ = {}
  122. }
  123. scratch_allocator_proc :: proc(allocator_data: rawptr, mode: Allocator_Mode,
  124. size, alignment: int,
  125. old_memory: rawptr, old_size: int, loc := #caller_location) -> ([]byte, Allocator_Error) {
  126. s := (^Scratch_Allocator)(allocator_data)
  127. if s.data == nil {
  128. DEFAULT_BACKING_SIZE :: 4 * Megabyte
  129. if !(context.allocator.procedure != scratch_allocator_proc &&
  130. context.allocator.data != allocator_data) {
  131. panic("cyclic initialization of the scratch allocator with itself")
  132. }
  133. scratch_allocator_init(s, DEFAULT_BACKING_SIZE)
  134. }
  135. size := size
  136. switch mode {
  137. case .Alloc, .Alloc_Non_Zeroed:
  138. size = align_forward_int(size, alignment)
  139. switch {
  140. case s.curr_offset+size <= len(s.data):
  141. start := uintptr(raw_data(s.data))
  142. ptr := start + uintptr(s.curr_offset)
  143. ptr = align_forward_uintptr(ptr, uintptr(alignment))
  144. if mode != .Alloc_Non_Zeroed {
  145. zero(rawptr(ptr), size)
  146. }
  147. s.prev_allocation = rawptr(ptr)
  148. offset := int(ptr - start)
  149. s.curr_offset = offset + size
  150. return byte_slice(rawptr(ptr), size), nil
  151. case size <= len(s.data):
  152. start := uintptr(raw_data(s.data))
  153. ptr := align_forward_uintptr(start, uintptr(alignment))
  154. if mode != .Alloc_Non_Zeroed {
  155. zero(rawptr(ptr), size)
  156. }
  157. s.prev_allocation = rawptr(ptr)
  158. offset := int(ptr - start)
  159. s.curr_offset = offset + size
  160. return byte_slice(rawptr(ptr), size), nil
  161. }
  162. a := s.backup_allocator
  163. if a.procedure == nil {
  164. a = context.allocator
  165. s.backup_allocator = a
  166. }
  167. ptr, err := alloc_bytes(size, alignment, a, loc)
  168. if err != nil {
  169. return ptr, err
  170. }
  171. if s.leaked_allocations == nil {
  172. s.leaked_allocations, err = make([dynamic][]byte, a)
  173. }
  174. append(&s.leaked_allocations, ptr)
  175. if logger := context.logger; logger.lowest_level <= .Warning {
  176. if logger.procedure != nil {
  177. logger.procedure(logger.data, .Warning, "mem.Scratch_Allocator resorted to backup_allocator" , logger.options, loc)
  178. }
  179. }
  180. return ptr, err
  181. case .Free:
  182. if old_memory == nil {
  183. return nil, nil
  184. }
  185. start := uintptr(raw_data(s.data))
  186. end := start + uintptr(len(s.data))
  187. old_ptr := uintptr(old_memory)
  188. if s.prev_allocation == old_memory {
  189. s.curr_offset = int(uintptr(s.prev_allocation) - start)
  190. s.prev_allocation = nil
  191. return nil, nil
  192. }
  193. if start <= old_ptr && old_ptr < end {
  194. // NOTE(bill): Cannot free this pointer but it is valid
  195. return nil, nil
  196. }
  197. if len(s.leaked_allocations) != 0 {
  198. for data, i in s.leaked_allocations {
  199. ptr := raw_data(data)
  200. if ptr == old_memory {
  201. free_bytes(data, s.backup_allocator)
  202. ordered_remove(&s.leaked_allocations, i)
  203. return nil, nil
  204. }
  205. }
  206. }
  207. return nil, .Invalid_Pointer
  208. // panic("invalid pointer passed to default_temp_allocator");
  209. case .Free_All:
  210. s.curr_offset = 0
  211. s.prev_allocation = nil
  212. for ptr in s.leaked_allocations {
  213. free_bytes(ptr, s.backup_allocator)
  214. }
  215. clear(&s.leaked_allocations)
  216. case .Resize, .Resize_Non_Zeroed:
  217. begin := uintptr(raw_data(s.data))
  218. end := begin + uintptr(len(s.data))
  219. old_ptr := uintptr(old_memory)
  220. if begin <= old_ptr && old_ptr < end && old_ptr+uintptr(size) < end {
  221. s.curr_offset = int(old_ptr-begin)+size
  222. return byte_slice(old_memory, size), nil
  223. }
  224. data, err := scratch_allocator_proc(allocator_data, .Alloc, size, alignment, old_memory, old_size, loc)
  225. if err != nil {
  226. return data, err
  227. }
  228. runtime.copy(data, byte_slice(old_memory, old_size))
  229. _, err = scratch_allocator_proc(allocator_data, .Free, 0, alignment, old_memory, old_size, loc)
  230. return data, err
  231. case .Query_Features:
  232. set := (^Allocator_Mode_Set)(old_memory)
  233. if set != nil {
  234. set^ = {.Alloc, .Alloc_Non_Zeroed, .Free, .Free_All, .Resize, .Resize_Non_Zeroed, .Query_Features}
  235. }
  236. return nil, nil
  237. case .Query_Info:
  238. return nil, .Mode_Not_Implemented
  239. }
  240. return nil, nil
  241. }
  242. @(require_results)
  243. scratch_allocator :: proc(allocator: ^Scratch_Allocator) -> Allocator {
  244. return Allocator{
  245. procedure = scratch_allocator_proc,
  246. data = allocator,
  247. }
  248. }
  249. Stack_Allocation_Header :: struct {
  250. prev_offset: int,
  251. padding: int,
  252. }
  253. // Stack is a stack-like allocator which has a strict memory freeing order
  254. Stack :: struct {
  255. data: []byte,
  256. prev_offset: int,
  257. curr_offset: int,
  258. peak_used: int,
  259. }
  260. stack_init :: proc(s: ^Stack, data: []byte) {
  261. s.data = data
  262. s.prev_offset = 0
  263. s.curr_offset = 0
  264. s.peak_used = 0
  265. }
  266. @(deprecated="prefer 'mem.stack_init'")
  267. init_stack :: proc(s: ^Stack, data: []byte) {
  268. s.data = data
  269. s.prev_offset = 0
  270. s.curr_offset = 0
  271. s.peak_used = 0
  272. }
  273. @(require_results)
  274. stack_allocator :: proc(stack: ^Stack) -> Allocator {
  275. return Allocator{
  276. procedure = stack_allocator_proc,
  277. data = stack,
  278. }
  279. }
  280. stack_allocator_proc :: proc(allocator_data: rawptr, mode: Allocator_Mode,
  281. size, alignment: int,
  282. old_memory: rawptr, old_size: int, location := #caller_location) -> ([]byte, Allocator_Error) {
  283. s := cast(^Stack)allocator_data
  284. if s.data == nil {
  285. return nil, .Invalid_Argument
  286. }
  287. raw_alloc :: proc(s: ^Stack, size, alignment: int, zero_memory: bool) -> ([]byte, Allocator_Error) {
  288. curr_addr := uintptr(raw_data(s.data)) + uintptr(s.curr_offset)
  289. padding := calc_padding_with_header(curr_addr, uintptr(alignment), size_of(Stack_Allocation_Header))
  290. if s.curr_offset + padding + size > len(s.data) {
  291. return nil, .Out_Of_Memory
  292. }
  293. s.prev_offset = s.curr_offset
  294. s.curr_offset += padding
  295. next_addr := curr_addr + uintptr(padding)
  296. header := (^Stack_Allocation_Header)(next_addr - size_of(Stack_Allocation_Header))
  297. header.padding = padding
  298. header.prev_offset = s.prev_offset
  299. s.curr_offset += size
  300. s.peak_used = max(s.peak_used, s.curr_offset)
  301. if zero_memory {
  302. zero(rawptr(next_addr), size)
  303. }
  304. return byte_slice(rawptr(next_addr), size), nil
  305. }
  306. switch mode {
  307. case .Alloc, .Alloc_Non_Zeroed:
  308. return raw_alloc(s, size, alignment, mode == .Alloc)
  309. case .Free:
  310. if old_memory == nil {
  311. return nil, nil
  312. }
  313. start := uintptr(raw_data(s.data))
  314. end := start + uintptr(len(s.data))
  315. curr_addr := uintptr(old_memory)
  316. if !(start <= curr_addr && curr_addr < end) {
  317. panic("Out of bounds memory address passed to stack allocator (free)")
  318. }
  319. if curr_addr >= start+uintptr(s.curr_offset) {
  320. // NOTE(bill): Allow double frees
  321. return nil, nil
  322. }
  323. header := (^Stack_Allocation_Header)(curr_addr - size_of(Stack_Allocation_Header))
  324. old_offset := int(curr_addr - uintptr(header.padding) - uintptr(raw_data(s.data)))
  325. if old_offset != header.prev_offset {
  326. // panic("Out of order stack allocator free");
  327. return nil, .Invalid_Pointer
  328. }
  329. s.curr_offset = old_offset
  330. s.prev_offset = header.prev_offset
  331. case .Free_All:
  332. s.prev_offset = 0
  333. s.curr_offset = 0
  334. case .Resize, .Resize_Non_Zeroed:
  335. if old_memory == nil {
  336. return raw_alloc(s, size, alignment, mode == .Resize)
  337. }
  338. if size == 0 {
  339. return nil, nil
  340. }
  341. start := uintptr(raw_data(s.data))
  342. end := start + uintptr(len(s.data))
  343. curr_addr := uintptr(old_memory)
  344. if !(start <= curr_addr && curr_addr < end) {
  345. panic("Out of bounds memory address passed to stack allocator (resize)")
  346. }
  347. if curr_addr >= start+uintptr(s.curr_offset) {
  348. // NOTE(bill): Allow double frees
  349. return nil, nil
  350. }
  351. if old_size == size {
  352. return byte_slice(old_memory, size), nil
  353. }
  354. header := (^Stack_Allocation_Header)(curr_addr - size_of(Stack_Allocation_Header))
  355. old_offset := int(curr_addr - uintptr(header.padding) - uintptr(raw_data(s.data)))
  356. if old_offset != header.prev_offset {
  357. data, err := raw_alloc(s, size, alignment, mode == .Resize)
  358. if err == nil {
  359. runtime.copy(data, byte_slice(old_memory, old_size))
  360. }
  361. return data, err
  362. }
  363. old_memory_size := uintptr(s.curr_offset) - (curr_addr - start)
  364. assert(old_memory_size == uintptr(old_size))
  365. diff := size - old_size
  366. s.curr_offset += diff // works for smaller sizes too
  367. if diff > 0 {
  368. zero(rawptr(curr_addr + uintptr(diff)), diff)
  369. }
  370. return byte_slice(old_memory, size), nil
  371. case .Query_Features:
  372. set := (^Allocator_Mode_Set)(old_memory)
  373. if set != nil {
  374. set^ = {.Alloc, .Alloc_Non_Zeroed, .Free, .Free_All, .Resize, .Resize_Non_Zeroed, .Query_Features}
  375. }
  376. return nil, nil
  377. case .Query_Info:
  378. return nil, .Mode_Not_Implemented
  379. }
  380. return nil, nil
  381. }
  382. Small_Stack_Allocation_Header :: struct {
  383. padding: u8,
  384. }
  385. // Small_Stack is a stack-like allocator which uses the smallest possible header but at the cost of non-strict memory freeing order
  386. Small_Stack :: struct {
  387. data: []byte,
  388. offset: int,
  389. peak_used: int,
  390. }
  391. small_stack_init :: proc(s: ^Small_Stack, data: []byte) {
  392. s.data = data
  393. s.offset = 0
  394. s.peak_used = 0
  395. }
  396. @(deprecated="prefer 'small_stack_init'")
  397. init_small_stack :: proc(s: ^Small_Stack, data: []byte) {
  398. s.data = data
  399. s.offset = 0
  400. s.peak_used = 0
  401. }
  402. @(require_results)
  403. small_stack_allocator :: proc(stack: ^Small_Stack) -> Allocator {
  404. return Allocator{
  405. procedure = small_stack_allocator_proc,
  406. data = stack,
  407. }
  408. }
  409. small_stack_allocator_proc :: proc(allocator_data: rawptr, mode: Allocator_Mode,
  410. size, alignment: int,
  411. old_memory: rawptr, old_size: int, location := #caller_location) -> ([]byte, Allocator_Error) {
  412. s := cast(^Small_Stack)allocator_data
  413. if s.data == nil {
  414. return nil, .Invalid_Argument
  415. }
  416. align := clamp(alignment, 1, 8*size_of(Stack_Allocation_Header{}.padding)/2)
  417. raw_alloc :: proc(s: ^Small_Stack, size, alignment: int, zero_memory: bool) -> ([]byte, Allocator_Error) {
  418. curr_addr := uintptr(raw_data(s.data)) + uintptr(s.offset)
  419. padding := calc_padding_with_header(curr_addr, uintptr(alignment), size_of(Small_Stack_Allocation_Header))
  420. if s.offset + padding + size > len(s.data) {
  421. return nil, .Out_Of_Memory
  422. }
  423. s.offset += padding
  424. next_addr := curr_addr + uintptr(padding)
  425. header := (^Small_Stack_Allocation_Header)(next_addr - size_of(Small_Stack_Allocation_Header))
  426. header.padding = auto_cast padding
  427. s.offset += size
  428. s.peak_used = max(s.peak_used, s.offset)
  429. if zero_memory {
  430. zero(rawptr(next_addr), size)
  431. }
  432. return byte_slice(rawptr(next_addr), size), nil
  433. }
  434. switch mode {
  435. case .Alloc, .Alloc_Non_Zeroed:
  436. return raw_alloc(s, size, align, mode == .Alloc)
  437. case .Free:
  438. if old_memory == nil {
  439. return nil, nil
  440. }
  441. start := uintptr(raw_data(s.data))
  442. end := start + uintptr(len(s.data))
  443. curr_addr := uintptr(old_memory)
  444. if !(start <= curr_addr && curr_addr < end) {
  445. // panic("Out of bounds memory address passed to stack allocator (free)");
  446. return nil, .Invalid_Pointer
  447. }
  448. if curr_addr >= start+uintptr(s.offset) {
  449. // NOTE(bill): Allow double frees
  450. return nil, nil
  451. }
  452. header := (^Small_Stack_Allocation_Header)(curr_addr - size_of(Small_Stack_Allocation_Header))
  453. old_offset := int(curr_addr - uintptr(header.padding) - uintptr(raw_data(s.data)))
  454. s.offset = old_offset
  455. case .Free_All:
  456. s.offset = 0
  457. case .Resize, .Resize_Non_Zeroed:
  458. if old_memory == nil {
  459. return raw_alloc(s, size, align, mode == .Resize)
  460. }
  461. if size == 0 {
  462. return nil, nil
  463. }
  464. start := uintptr(raw_data(s.data))
  465. end := start + uintptr(len(s.data))
  466. curr_addr := uintptr(old_memory)
  467. if !(start <= curr_addr && curr_addr < end) {
  468. // panic("Out of bounds memory address passed to stack allocator (resize)");
  469. return nil, .Invalid_Pointer
  470. }
  471. if curr_addr >= start+uintptr(s.offset) {
  472. // NOTE(bill): Treat as a double free
  473. return nil, nil
  474. }
  475. if old_size == size {
  476. return byte_slice(old_memory, size), nil
  477. }
  478. data, err := raw_alloc(s, size, align, mode == .Resize)
  479. if err == nil {
  480. runtime.copy(data, byte_slice(old_memory, old_size))
  481. }
  482. return data, err
  483. case .Query_Features:
  484. set := (^Allocator_Mode_Set)(old_memory)
  485. if set != nil {
  486. set^ = {.Alloc, .Alloc_Non_Zeroed, .Free, .Free_All, .Resize, .Resize_Non_Zeroed, .Query_Features}
  487. }
  488. return nil, nil
  489. case .Query_Info:
  490. return nil, .Mode_Not_Implemented
  491. }
  492. return nil, nil
  493. }
  494. Dynamic_Pool :: struct {
  495. block_size: int,
  496. out_band_size: int,
  497. alignment: int,
  498. unused_blocks: [dynamic]rawptr,
  499. used_blocks: [dynamic]rawptr,
  500. out_band_allocations: [dynamic]rawptr,
  501. current_block: rawptr,
  502. current_pos: rawptr,
  503. bytes_left: int,
  504. block_allocator: Allocator,
  505. }
  506. DYNAMIC_POOL_BLOCK_SIZE_DEFAULT :: 65536
  507. DYNAMIC_POOL_OUT_OF_BAND_SIZE_DEFAULT :: 6554
  508. dynamic_pool_allocator_proc :: proc(allocator_data: rawptr, mode: Allocator_Mode,
  509. size, alignment: int,
  510. old_memory: rawptr, old_size: int, loc := #caller_location) -> ([]byte, Allocator_Error) {
  511. pool := (^Dynamic_Pool)(allocator_data)
  512. switch mode {
  513. case .Alloc, .Alloc_Non_Zeroed:
  514. return dynamic_pool_alloc_bytes(pool, size)
  515. case .Free:
  516. return nil, .Mode_Not_Implemented
  517. case .Free_All:
  518. dynamic_pool_free_all(pool)
  519. return nil, nil
  520. case .Resize, .Resize_Non_Zeroed:
  521. if old_size >= size {
  522. return byte_slice(old_memory, size), nil
  523. }
  524. data, err := dynamic_pool_alloc_bytes(pool, size)
  525. if err == nil {
  526. runtime.copy(data, byte_slice(old_memory, old_size))
  527. }
  528. return data, err
  529. case .Query_Features:
  530. set := (^Allocator_Mode_Set)(old_memory)
  531. if set != nil {
  532. set^ = {.Alloc, .Alloc_Non_Zeroed, .Free_All, .Resize, .Resize_Non_Zeroed, .Query_Features, .Query_Info}
  533. }
  534. return nil, nil
  535. case .Query_Info:
  536. info := (^Allocator_Query_Info)(old_memory)
  537. if info != nil && info.pointer != nil {
  538. info.size = pool.block_size
  539. info.alignment = pool.alignment
  540. return byte_slice(info, size_of(info^)), nil
  541. }
  542. return nil, nil
  543. }
  544. return nil, nil
  545. }
  546. @(require_results)
  547. dynamic_pool_allocator :: proc(pool: ^Dynamic_Pool) -> Allocator {
  548. return Allocator{
  549. procedure = dynamic_pool_allocator_proc,
  550. data = pool,
  551. }
  552. }
  553. dynamic_pool_init :: proc(pool: ^Dynamic_Pool,
  554. block_allocator := context.allocator,
  555. array_allocator := context.allocator,
  556. block_size := DYNAMIC_POOL_BLOCK_SIZE_DEFAULT,
  557. out_band_size := DYNAMIC_POOL_OUT_OF_BAND_SIZE_DEFAULT,
  558. alignment := 8) {
  559. pool.block_size = block_size
  560. pool.out_band_size = out_band_size
  561. pool.alignment = alignment
  562. pool.block_allocator = block_allocator
  563. pool.out_band_allocations.allocator = array_allocator
  564. pool. unused_blocks.allocator = array_allocator
  565. pool. used_blocks.allocator = array_allocator
  566. }
  567. dynamic_pool_destroy :: proc(pool: ^Dynamic_Pool) {
  568. dynamic_pool_free_all(pool)
  569. delete(pool.unused_blocks)
  570. delete(pool.used_blocks)
  571. delete(pool.out_band_allocations)
  572. zero(pool, size_of(pool^))
  573. }
  574. @(require_results)
  575. dynamic_pool_alloc :: proc(pool: ^Dynamic_Pool, bytes: int) -> (rawptr, Allocator_Error) {
  576. data, err := dynamic_pool_alloc_bytes(pool, bytes)
  577. return raw_data(data), err
  578. }
  579. @(require_results)
  580. dynamic_pool_alloc_bytes :: proc(p: ^Dynamic_Pool, bytes: int) -> ([]byte, Allocator_Error) {
  581. cycle_new_block :: proc(p: ^Dynamic_Pool) -> (err: Allocator_Error) {
  582. if p.block_allocator.procedure == nil {
  583. panic("You must call pool_init on a Pool before using it")
  584. }
  585. if p.current_block != nil {
  586. append(&p.used_blocks, p.current_block)
  587. }
  588. new_block: rawptr
  589. if len(p.unused_blocks) > 0 {
  590. new_block = pop(&p.unused_blocks)
  591. } else {
  592. data: []byte
  593. data, err = p.block_allocator.procedure(p.block_allocator.data, Allocator_Mode.Alloc,
  594. p.block_size, p.alignment,
  595. nil, 0)
  596. new_block = raw_data(data)
  597. }
  598. p.bytes_left = p.block_size
  599. p.current_pos = new_block
  600. p.current_block = new_block
  601. return
  602. }
  603. n := align_formula(bytes, p.alignment)
  604. if n > p.block_size {
  605. return nil, .Invalid_Argument
  606. }
  607. if n >= p.out_band_size {
  608. assert(p.block_allocator.procedure != nil)
  609. memory, err := p.block_allocator.procedure(p.block_allocator.data, Allocator_Mode.Alloc,
  610. p.block_size, p.alignment,
  611. nil, 0)
  612. if memory != nil {
  613. append(&p.out_band_allocations, raw_data(memory))
  614. }
  615. return memory, err
  616. }
  617. if p.bytes_left < n {
  618. err := cycle_new_block(p)
  619. if err != nil {
  620. return nil, err
  621. }
  622. if p.current_block == nil {
  623. return nil, .Out_Of_Memory
  624. }
  625. }
  626. memory := p.current_pos
  627. p.current_pos = ([^]byte)(p.current_pos)[n:]
  628. p.bytes_left -= n
  629. return ([^]byte)(memory)[:bytes], nil
  630. }
  631. dynamic_pool_reset :: proc(p: ^Dynamic_Pool) {
  632. if p.current_block != nil {
  633. append(&p.unused_blocks, p.current_block)
  634. p.current_block = nil
  635. }
  636. for block in p.used_blocks {
  637. append(&p.unused_blocks, block)
  638. }
  639. clear(&p.used_blocks)
  640. for a in p.out_band_allocations {
  641. free(a, p.block_allocator)
  642. }
  643. clear(&p.out_band_allocations)
  644. p.bytes_left = 0 // Make new allocations call `cycle_new_block` again.
  645. }
  646. dynamic_pool_free_all :: proc(p: ^Dynamic_Pool) {
  647. dynamic_pool_reset(p)
  648. for block in p.unused_blocks {
  649. free(block, p.block_allocator)
  650. }
  651. clear(&p.unused_blocks)
  652. }
  653. panic_allocator_proc :: proc(allocator_data: rawptr, mode: Allocator_Mode,
  654. size, alignment: int,
  655. old_memory: rawptr, old_size: int,loc := #caller_location) -> ([]byte, Allocator_Error) {
  656. switch mode {
  657. case .Alloc:
  658. if size > 0 {
  659. panic("mem: panic allocator, .Alloc called", loc=loc)
  660. }
  661. case .Alloc_Non_Zeroed:
  662. if size > 0 {
  663. panic("mem: panic allocator, .Alloc_Non_Zeroed called", loc=loc)
  664. }
  665. case .Resize:
  666. if size > 0 {
  667. panic("mem: panic allocator, .Resize called", loc=loc)
  668. }
  669. case .Resize_Non_Zeroed:
  670. if size > 0 {
  671. panic("mem: panic allocator, .Resize_Non_Zeroed called", loc=loc)
  672. }
  673. case .Free:
  674. if old_memory != nil {
  675. panic("mem: panic allocator, .Free called", loc=loc)
  676. }
  677. case .Free_All:
  678. panic("mem: panic allocator, .Free_All called", loc=loc)
  679. case .Query_Features:
  680. set := (^Allocator_Mode_Set)(old_memory)
  681. if set != nil {
  682. set^ = {.Query_Features}
  683. }
  684. return nil, nil
  685. case .Query_Info:
  686. panic("mem: panic allocator, .Query_Info called", loc=loc)
  687. }
  688. return nil, nil
  689. }
  690. @(require_results)
  691. panic_allocator :: proc() -> Allocator {
  692. return Allocator{
  693. procedure = panic_allocator_proc,
  694. data = nil,
  695. }
  696. }
  697. Buddy_Block :: struct #align(align_of(uint)) {
  698. size: uint,
  699. is_free: bool,
  700. }
  701. @(require_results)
  702. buddy_block_next :: proc(block: ^Buddy_Block) -> ^Buddy_Block {
  703. return (^Buddy_Block)(([^]byte)(block)[block.size:])
  704. }
  705. @(require_results)
  706. buddy_block_split :: proc(block: ^Buddy_Block, size: uint) -> ^Buddy_Block {
  707. block := block
  708. if block != nil && size != 0 {
  709. // Recursive Split
  710. for size < block.size {
  711. sz := block.size >> 1
  712. block.size = sz
  713. block = buddy_block_next(block)
  714. block.size = sz
  715. block.is_free = true
  716. }
  717. if size <= block.size {
  718. return block
  719. }
  720. }
  721. // Block cannot fit the requested allocation size
  722. return nil
  723. }
  724. buddy_block_coalescence :: proc(head, tail: ^Buddy_Block) {
  725. for {
  726. // Keep looping until there are no more buddies to coalesce
  727. block := head
  728. buddy := buddy_block_next(block)
  729. no_coalescence := true
  730. for block < tail && buddy < tail { // make sure the buddies are within the range
  731. if block.is_free && buddy.is_free && block.size == buddy.size {
  732. // Coalesce buddies into one
  733. block.size <<= 1
  734. block = buddy_block_next(block)
  735. if block < tail {
  736. buddy = buddy_block_next(block)
  737. no_coalescence = false
  738. }
  739. } else if block.size < buddy.size {
  740. // The buddy block is split into smaller blocks
  741. block = buddy
  742. buddy = buddy_block_next(buddy)
  743. } else {
  744. block = buddy_block_next(buddy)
  745. if block < tail {
  746. // Leave the buddy block for the next iteration
  747. buddy = buddy_block_next(block)
  748. }
  749. }
  750. }
  751. if no_coalescence {
  752. return
  753. }
  754. }
  755. }
  756. @(require_results)
  757. buddy_block_find_best :: proc(head, tail: ^Buddy_Block, size: uint) -> ^Buddy_Block {
  758. assert(size != 0)
  759. best_block: ^Buddy_Block
  760. block := head // left
  761. buddy := buddy_block_next(block) // right
  762. // The entire memory section between head and tail is free,
  763. // just call 'buddy_block_split' to get the allocation
  764. if buddy == tail && block.is_free {
  765. return buddy_block_split(block, size)
  766. }
  767. // Find the block which is the 'best_block' to requested allocation sized
  768. for block < tail && buddy < tail { // make sure the buddies are within the range
  769. // If both buddies are free, coalesce them together
  770. // NOTE: this is an optimization to reduce fragmentation
  771. // this could be completely ignored
  772. if block.is_free && buddy.is_free && block.size == buddy.size {
  773. block.size <<= 1
  774. if size <= block.size && (best_block == nil || block.size <= best_block.size) {
  775. best_block = block
  776. }
  777. block = buddy_block_next(buddy)
  778. if block < tail {
  779. // Delay the buddy block for the next iteration
  780. buddy = buddy_block_next(block)
  781. }
  782. continue
  783. }
  784. if block.is_free && size <= block.size &&
  785. (best_block == nil || block.size <= best_block.size) {
  786. best_block = block
  787. }
  788. if buddy.is_free && size <= buddy.size &&
  789. (best_block == nil || buddy.size < best_block.size) {
  790. // If each buddy are the same size, then it makes more sense
  791. // to pick the buddy as it "bounces around" less
  792. best_block = buddy
  793. }
  794. if (block.size <= buddy.size) {
  795. block = buddy_block_next(buddy)
  796. if (block < tail) {
  797. // Delay the buddy block for the next iteration
  798. buddy = buddy_block_next(block)
  799. }
  800. } else {
  801. // Buddy was split into smaller blocks
  802. block = buddy
  803. buddy = buddy_block_next(buddy)
  804. }
  805. }
  806. if best_block != nil {
  807. // This will handle the case if the 'best_block' is also the perfect fit
  808. return buddy_block_split(best_block, size)
  809. }
  810. // Maybe out of memory
  811. return nil
  812. }
  813. Buddy_Allocator :: struct {
  814. head: ^Buddy_Block,
  815. tail: ^Buddy_Block,
  816. alignment: uint,
  817. }
  818. @(require_results)
  819. buddy_allocator :: proc(b: ^Buddy_Allocator) -> Allocator {
  820. return Allocator{
  821. procedure = buddy_allocator_proc,
  822. data = b,
  823. }
  824. }
  825. buddy_allocator_init :: proc(b: ^Buddy_Allocator, data: []byte, alignment: uint) {
  826. assert(data != nil)
  827. assert(is_power_of_two(uintptr(len(data))))
  828. assert(is_power_of_two(uintptr(alignment)))
  829. alignment := alignment
  830. if alignment < size_of(Buddy_Block) {
  831. alignment = size_of(Buddy_Block)
  832. }
  833. ptr := raw_data(data)
  834. assert(uintptr(ptr) % uintptr(alignment) == 0, "data is not aligned to minimum alignment")
  835. b.head = (^Buddy_Block)(ptr)
  836. b.head.size = len(data)
  837. b.head.is_free = true
  838. b.tail = buddy_block_next(b.head)
  839. b.alignment = alignment
  840. }
  841. @(require_results)
  842. buddy_block_size_required :: proc(b: ^Buddy_Allocator, size: uint) -> uint {
  843. size := size
  844. actual_size := b.alignment
  845. size += size_of(Buddy_Block)
  846. size = align_forward_uint(size, b.alignment)
  847. for size > actual_size {
  848. actual_size <<= 1
  849. }
  850. return actual_size
  851. }
  852. @(require_results)
  853. buddy_allocator_alloc :: proc(b: ^Buddy_Allocator, size: uint, zeroed: bool) -> ([]byte, Allocator_Error) {
  854. if size != 0 {
  855. actual_size := buddy_block_size_required(b, size)
  856. found := buddy_block_find_best(b.head, b.tail, actual_size)
  857. if found != nil {
  858. // Try to coalesce all the free buddy blocks and then search again
  859. buddy_block_coalescence(b.head, b.tail)
  860. found = buddy_block_find_best(b.head, b.tail, actual_size)
  861. }
  862. if found == nil {
  863. return nil, .Out_Of_Memory
  864. }
  865. found.is_free = false
  866. data := ([^]byte)(found)[b.alignment:][:size]
  867. if zeroed {
  868. zero_slice(data)
  869. }
  870. return data, nil
  871. }
  872. return nil, nil
  873. }
  874. buddy_allocator_free :: proc(b: ^Buddy_Allocator, ptr: rawptr) -> Allocator_Error {
  875. if ptr != nil {
  876. if !(b.head <= ptr && ptr <= b.tail) {
  877. return .Invalid_Pointer
  878. }
  879. block := (^Buddy_Block)(([^]byte)(ptr)[-b.alignment:])
  880. block.is_free = true
  881. buddy_block_coalescence(b.head, b.tail)
  882. }
  883. return nil
  884. }
  885. buddy_allocator_proc :: proc(allocator_data: rawptr, mode: Allocator_Mode,
  886. size, alignment: int,
  887. old_memory: rawptr, old_size: int,loc := #caller_location) -> ([]byte, Allocator_Error) {
  888. b := (^Buddy_Allocator)(allocator_data)
  889. switch mode {
  890. case .Alloc, .Alloc_Non_Zeroed:
  891. return buddy_allocator_alloc(b, uint(size), mode == .Alloc)
  892. case .Resize:
  893. return default_resize_bytes_align(byte_slice(old_memory, old_size), size, alignment, buddy_allocator(b))
  894. case .Resize_Non_Zeroed:
  895. return default_resize_bytes_align_non_zeroed(byte_slice(old_memory, old_size), size, alignment, buddy_allocator(b))
  896. case .Free:
  897. return nil, buddy_allocator_free(b, old_memory)
  898. case .Free_All:
  899. alignment := b.alignment
  900. head := ([^]byte)(b.head)
  901. tail := ([^]byte)(b.tail)
  902. data := head[:ptr_sub(tail, head)]
  903. buddy_allocator_init(b, data, alignment)
  904. case .Query_Features:
  905. set := (^Allocator_Mode_Set)(old_memory)
  906. if set != nil {
  907. set^ = {.Query_Features, .Alloc, .Alloc_Non_Zeroed, .Resize, .Resize_Non_Zeroed, .Free, .Free_All, .Query_Info}
  908. }
  909. return nil, nil
  910. case .Query_Info:
  911. info := (^Allocator_Query_Info)(old_memory)
  912. if info != nil && info.pointer != nil {
  913. ptr := info.pointer
  914. if !(b.head <= ptr && ptr <= b.tail) {
  915. return nil, .Invalid_Pointer
  916. }
  917. block := (^Buddy_Block)(([^]byte)(ptr)[-b.alignment:])
  918. info.size = int(block.size)
  919. info.alignment = int(b.alignment)
  920. return byte_slice(info, size_of(info^)), nil
  921. }
  922. return nil, nil
  923. }
  924. return nil, nil
  925. }