allocators.odin 78 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847184818491850185118521853185418551856185718581859186018611862186318641865186618671868186918701871187218731874187518761877187818791880188118821883188418851886188718881889189018911892189318941895189618971898189919001901190219031904190519061907190819091910191119121913191419151916191719181919192019211922192319241925192619271928192919301931193219331934193519361937193819391940194119421943194419451946194719481949195019511952195319541955195619571958195919601961196219631964196519661967196819691970197119721973197419751976197719781979198019811982198319841985198619871988198919901991199219931994199519961997199819992000200120022003200420052006200720082009201020112012201320142015201620172018201920202021202220232024202520262027202820292030203120322033203420352036203720382039204020412042204320442045204620472048204920502051205220532054205520562057205820592060206120622063206420652066206720682069207020712072207320742075207620772078207920802081208220832084208520862087208820892090209120922093209420952096209720982099210021012102210321042105210621072108210921102111211221132114211521162117211821192120212121222123212421252126212721282129213021312132213321342135213621372138213921402141214221432144214521462147214821492150215121522153215421552156215721582159216021612162216321642165216621672168216921702171217221732174217521762177217821792180218121822183218421852186218721882189219021912192219321942195219621972198219922002201220222032204220522062207220822092210221122122213221422152216221722182219222022212222222322242225222622272228222922302231223222332234223522362237223822392240224122422243224422452246224722482249225022512252225322542255225622572258225922602261226222632264226522662267226822692270227122722273227422752276227722782279228022812282228322842285228622872288228922902291229222932294229522962297229822992300230123022303230423052306230723082309231023112312231323142315231623172318231923202321232223232324232523262327232823292330233123322333233423352336233723382339234023412342234323442345234623472348234923502351235223532354235523562357235823592360236123622363236423652366236723682369237023712372237323742375237623772378237923802381238223832384238523862387238823892390239123922393239423952396239723982399240024012402240324042405240624072408240924102411241224132414241524162417241824192420242124222423242424252426242724282429243024312432243324342435243624372438243924402441244224432444244524462447244824492450245124522453245424552456245724582459246024612462246324642465246624672468246924702471247224732474247524762477247824792480248124822483248424852486248724882489249024912492249324942495249624972498249925002501250225032504250525062507250825092510251125122513251425152516251725182519252025212522252325242525252625272528252925302531253225332534
  1. package mem
  2. import "base:intrinsics"
  3. import "base:runtime"
  4. // NOTE(Feoramund): Sanitizer usage in this package has been temporarily
  5. // disabled pending a thorough review per allocator, as ASan is particular
  6. // about the addresses and ranges it receives.
  7. //
  8. // In short, it keeps track only of 8-byte blocks. This can cause issues if an
  9. // allocator poisons an entire range but an allocation for less than 8 bytes is
  10. // desired or if the next allocation address would not be 8-byte aligned.
  11. //
  12. // This must be handled carefully on a per-allocator basis and some allocators
  13. // may not be able to participate.
  14. //
  15. // Please see the following link for more information:
  16. //
  17. // https://github.com/google/sanitizers/wiki/AddressSanitizerAlgorithm#mapping
  18. //
  19. // import "base:sanitizer"
  20. /*
  21. This procedure checks if a byte slice `range` is poisoned and makes sure the
  22. root address of the poison range is the base pointer of `range`.
  23. This can help guard against buggy allocators returning memory that they already returned.
  24. This has no effect if `-sanitize:address` is not enabled.
  25. */
  26. // @(disabled=.Address not_in ODIN_SANITIZER_FLAGS, private)
  27. // ensure_poisoned :: proc(range: []u8, loc := #caller_location) {
  28. // cond := sanitizer.address_region_is_poisoned(range) == raw_data(range)
  29. // // If this fails, we've overlapped an allocation and it's our fault.
  30. // ensure(cond, `This allocator has sliced a block of memory of which some part is not poisoned before returning.
  31. // This is a bug in the core library and should be reported to the Odin developers with a stack trace and minimal example code if possible.`, loc)
  32. // }
  33. /*
  34. This procedure checks if a byte slice `range` is not poisoned.
  35. This can help guard against buggy allocators resizing memory that they should not.
  36. This has no effect if `-sanitize:address` is not enabled.
  37. */
  38. // @(disabled=.Address not_in ODIN_SANITIZER_FLAGS, private)
  39. // ensure_not_poisoned :: proc(range: []u8, loc := #caller_location) {
  40. // cond := sanitizer.address_region_is_poisoned(range) == nil
  41. // // If this fails, we've tried to resize memory that is poisoned, which
  42. // // could be user error caused by an incorrect `old_memory` pointer.
  43. // ensure(cond, `This allocator has sliced a block of memory of which some part is poisoned before returning.
  44. // This may be a bug in the core library, or it could be user error due to an invalid pointer passed to a resize operation.
  45. // If after ensuring your own code is not responsible, report the problem to the Odin developers with a stack trace and minimal example code if possible.`, loc)
  46. // }
  47. /*
  48. Nil allocator.
  49. The `nil` allocator returns `nil` on every allocation attempt. This type of
  50. allocator can be used in scenarios where memory doesn't need to be allocated,
  51. but an attempt to allocate memory is not an error.
  52. */
  53. @(require_results)
  54. nil_allocator :: proc() -> Allocator {
  55. return Allocator{
  56. procedure = nil_allocator_proc,
  57. data = nil,
  58. }
  59. }
  60. nil_allocator_proc :: proc(
  61. allocator_data: rawptr,
  62. mode: Allocator_Mode,
  63. size, alignment: int,
  64. old_memory: rawptr,
  65. old_size: int,
  66. loc := #caller_location,
  67. ) -> ([]byte, Allocator_Error) {
  68. return nil, nil
  69. }
  70. /*
  71. Panic allocator.
  72. The panic allocator is a type of allocator that panics on any allocation
  73. attempt. This type of allocator can be used in scenarios where memory should
  74. not be allocated, and an attempt to allocate memory is an error.
  75. */
  76. @(require_results)
  77. panic_allocator :: proc() -> Allocator {
  78. return Allocator{
  79. procedure = panic_allocator_proc,
  80. data = nil,
  81. }
  82. }
  83. panic_allocator_proc :: proc(
  84. allocator_data: rawptr,
  85. mode: Allocator_Mode,
  86. size, alignment: int,
  87. old_memory: rawptr,
  88. old_size: int,
  89. loc := #caller_location,
  90. ) -> ([]byte, Allocator_Error) {
  91. switch mode {
  92. case .Alloc:
  93. if size > 0 {
  94. panic("mem: panic allocator, .Alloc called", loc=loc)
  95. }
  96. case .Alloc_Non_Zeroed:
  97. if size > 0 {
  98. panic("mem: panic allocator, .Alloc_Non_Zeroed called", loc=loc)
  99. }
  100. case .Resize:
  101. if size > 0 {
  102. panic("mem: panic allocator, .Resize called", loc=loc)
  103. }
  104. case .Resize_Non_Zeroed:
  105. if size > 0 {
  106. panic("mem: panic allocator, .Resize_Non_Zeroed called", loc=loc)
  107. }
  108. case .Free:
  109. if old_memory != nil {
  110. panic("mem: panic allocator, .Free called", loc=loc)
  111. }
  112. case .Free_All:
  113. panic("mem: panic allocator, .Free_All called", loc=loc)
  114. case .Query_Features:
  115. set := (^Allocator_Mode_Set)(old_memory)
  116. if set != nil {
  117. set^ = {.Query_Features}
  118. }
  119. return nil, nil
  120. case .Query_Info:
  121. panic("mem: panic allocator, .Query_Info called", loc=loc)
  122. }
  123. return nil, nil
  124. }
  125. /*
  126. Arena allocator data.
  127. */
  128. Arena :: struct {
  129. data: []byte,
  130. offset: int,
  131. peak_used: int,
  132. temp_count: int,
  133. }
  134. /*
  135. Arena allocator.
  136. The arena allocator (also known as a linear allocator, bump allocator,
  137. region allocator) is an allocator that uses a single backing buffer for
  138. allocations.
  139. The buffer is used contiguously, from start to end. Each subsequent allocation
  140. occupies the next adjacent region of memory in the buffer. Since the arena
  141. allocator does not keep track of any metadata associated with the allocations
  142. and their locations, it is impossible to free individual allocations.
  143. The arena allocator can be used for temporary allocations in frame-based memory
  144. management. Games are one example of such applications. A global arena can be
  145. used for any temporary memory allocations, and at the end of each frame all
  146. temporary allocations are freed. Since no temporary object is going to live
  147. longer than a frame, no lifetimes are violated.
  148. */
  149. @(require_results)
  150. arena_allocator :: proc(arena: ^Arena) -> Allocator {
  151. return Allocator{
  152. procedure = arena_allocator_proc,
  153. data = arena,
  154. }
  155. }
  156. /*
  157. Initialize an arena.
  158. This procedure initializes the arena `a` with memory region `data` as its
  159. backing buffer.
  160. */
  161. arena_init :: proc(a: ^Arena, data: []byte) {
  162. a.data = data
  163. a.offset = 0
  164. a.peak_used = 0
  165. a.temp_count = 0
  166. // sanitizer.address_poison(a.data)
  167. }
  168. /*
  169. Allocate memory from an arena.
  170. This procedure allocates `size` bytes of memory aligned on a boundary specified
  171. by `alignment` from an arena `a`. The allocated memory is zero-initialized.
  172. This procedure returns a pointer to the newly allocated memory region.
  173. */
  174. @(require_results)
  175. arena_alloc :: proc(
  176. a: ^Arena,
  177. size: int,
  178. alignment := DEFAULT_ALIGNMENT,
  179. loc := #caller_location,
  180. ) -> (rawptr, Allocator_Error) {
  181. bytes, err := arena_alloc_bytes(a, size, alignment, loc)
  182. return raw_data(bytes), err
  183. }
  184. /*
  185. Allocate memory from an arena.
  186. This procedure allocates `size` bytes of memory aligned on a boundary specified
  187. by `alignment` from an arena `a`. The allocated memory is zero-initialized.
  188. This procedure returns a slice of the newly allocated memory region.
  189. */
  190. @(require_results)
  191. arena_alloc_bytes :: proc(
  192. a: ^Arena,
  193. size: int,
  194. alignment := DEFAULT_ALIGNMENT,
  195. loc := #caller_location,
  196. ) -> ([]byte, Allocator_Error) {
  197. bytes, err := arena_alloc_bytes_non_zeroed(a, size, alignment, loc)
  198. if bytes != nil {
  199. zero_slice(bytes)
  200. }
  201. return bytes, err
  202. }
  203. /*
  204. Allocate non-initialized memory from an arena.
  205. This procedure allocates `size` bytes of memory aligned on a boundary specified
  206. by `alignment` from an arena `a`. The allocated memory is not explicitly
  207. zero-initialized. This procedure returns a pointer to the newly allocated
  208. memory region.
  209. */
  210. @(require_results)
  211. arena_alloc_non_zeroed :: proc(
  212. a: ^Arena,
  213. size: int,
  214. alignment := DEFAULT_ALIGNMENT,
  215. loc := #caller_location,
  216. ) -> (rawptr, Allocator_Error) {
  217. bytes, err := arena_alloc_bytes_non_zeroed(a, size, alignment, loc)
  218. return raw_data(bytes), err
  219. }
  220. /*
  221. Allocate non-initialized memory from an arena.
  222. This procedure allocates `size` bytes of memory aligned on a boundary specified
  223. by `alignment` from an arena `a`. The allocated memory is not explicitly
  224. zero-initialized. This procedure returns a slice of the newly allocated
  225. memory region.
  226. */
  227. @(require_results)
  228. arena_alloc_bytes_non_zeroed :: proc(
  229. a: ^Arena,
  230. size: int,
  231. alignment := DEFAULT_ALIGNMENT,
  232. loc := #caller_location
  233. ) -> ([]byte, Allocator_Error) {
  234. if a.data == nil {
  235. panic("Allocation on uninitialized Arena allocator.", loc)
  236. }
  237. #no_bounds_check end := &a.data[a.offset]
  238. ptr := align_forward(end, uintptr(alignment))
  239. total_size := size + ptr_sub((^byte)(ptr), (^byte)(end))
  240. if a.offset + total_size > len(a.data) {
  241. return nil, .Out_Of_Memory
  242. }
  243. a.offset += total_size
  244. a.peak_used = max(a.peak_used, a.offset)
  245. result := byte_slice(ptr, size)
  246. // ensure_poisoned(result)
  247. // sanitizer.address_unpoison(result)
  248. return result, nil
  249. }
  250. /*
  251. Free all memory back to the arena allocator.
  252. */
  253. arena_free_all :: proc(a: ^Arena) {
  254. a.offset = 0
  255. // sanitizer.address_poison(a.data)
  256. }
  257. arena_allocator_proc :: proc(
  258. allocator_data: rawptr,
  259. mode: Allocator_Mode,
  260. size: int,
  261. alignment: int,
  262. old_memory: rawptr,
  263. old_size: int,
  264. loc := #caller_location,
  265. ) -> ([]byte, Allocator_Error) {
  266. arena := cast(^Arena)allocator_data
  267. switch mode {
  268. case .Alloc:
  269. return arena_alloc_bytes(arena, size, alignment, loc)
  270. case .Alloc_Non_Zeroed:
  271. return arena_alloc_bytes_non_zeroed(arena, size, alignment, loc)
  272. case .Free:
  273. return nil, .Mode_Not_Implemented
  274. case .Free_All:
  275. arena_free_all(arena)
  276. case .Resize:
  277. return default_resize_bytes_align(byte_slice(old_memory, old_size), size, alignment, arena_allocator(arena), loc)
  278. case .Resize_Non_Zeroed:
  279. return default_resize_bytes_align_non_zeroed(byte_slice(old_memory, old_size), size, alignment, arena_allocator(arena), loc)
  280. case .Query_Features:
  281. set := (^Allocator_Mode_Set)(old_memory)
  282. if set != nil {
  283. set^ = {.Alloc, .Alloc_Non_Zeroed, .Free_All, .Resize, .Resize_Non_Zeroed, .Query_Features}
  284. }
  285. return nil, nil
  286. case .Query_Info:
  287. return nil, .Mode_Not_Implemented
  288. }
  289. return nil, nil
  290. }
  291. /*
  292. Temporary memory region of an `Arena` allocator.
  293. Temporary memory regions of an arena act as "save-points" for the allocator.
  294. When one is created, the subsequent allocations are done inside the temporary
  295. memory region. When `end_arena_temp_memory` is called, the arena is rolled
  296. back, and all of the memory that was allocated from the arena will be freed.
  297. Multiple temporary memory regions can exist at the same time for an arena.
  298. */
  299. Arena_Temp_Memory :: struct {
  300. arena: ^Arena,
  301. prev_offset: int,
  302. }
  303. /*
  304. Start a temporary memory region.
  305. This procedure creates a temporary memory region. After a temporary memory
  306. region is created, all allocations are said to be *inside* the temporary memory
  307. region, until `end_arena_temp_memory` is called.
  308. */
  309. @(require_results)
  310. begin_arena_temp_memory :: proc(a: ^Arena) -> Arena_Temp_Memory {
  311. tmp: Arena_Temp_Memory
  312. tmp.arena = a
  313. tmp.prev_offset = a.offset
  314. a.temp_count += 1
  315. return tmp
  316. }
  317. /*
  318. End a temporary memory region.
  319. This procedure ends the temporary memory region for an arena. All of the
  320. allocations *inside* the temporary memory region will be freed to the arena.
  321. */
  322. end_arena_temp_memory :: proc(tmp: Arena_Temp_Memory) {
  323. assert(tmp.arena.offset >= tmp.prev_offset)
  324. assert(tmp.arena.temp_count > 0)
  325. // sanitizer.address_poison(tmp.arena.data[tmp.prev_offset:tmp.arena.offset])
  326. tmp.arena.offset = tmp.prev_offset
  327. tmp.arena.temp_count -= 1
  328. }
  329. /* Preserved for compatibility */
  330. Scratch_Allocator :: Scratch
  331. scratch_allocator_init :: scratch_init
  332. scratch_allocator_destroy :: scratch_destroy
  333. /*
  334. Scratch allocator data.
  335. */
  336. Scratch :: struct {
  337. data: []byte,
  338. curr_offset: int,
  339. prev_allocation: rawptr,
  340. prev_allocation_root: rawptr,
  341. backup_allocator: Allocator,
  342. leaked_allocations: [dynamic][]byte,
  343. }
  344. /*
  345. Scratch allocator.
  346. The scratch allocator works in a similar way to the `Arena` allocator. The
  347. scratch allocator has a backing buffer that is allocated in contiguous regions,
  348. from start to end.
  349. Each subsequent allocation will be the next adjacent region of memory in the
  350. backing buffer. If the allocation doesn't fit into the remaining space of the
  351. backing buffer, this allocation is put at the start of the buffer, and all
  352. previous allocations will become invalidated.
  353. If the allocation doesn't fit into the backing buffer as a whole, it will be
  354. allocated using a backing allocator, and the pointer to the allocated memory
  355. region will be put into the `leaked_allocations` array. A `Warning`-level log
  356. message will be sent as well.
  357. Allocations which are resized will be resized in-place if they were the last
  358. allocation. Otherwise, they are re-allocated to avoid overwriting previous
  359. allocations.
  360. The `leaked_allocations` array is managed by the `context` allocator if no
  361. `backup_allocator` is specified in `scratch_init`.
  362. */
  363. @(require_results)
  364. scratch_allocator :: proc(allocator: ^Scratch) -> Allocator {
  365. return Allocator{
  366. procedure = scratch_allocator_proc,
  367. data = allocator,
  368. }
  369. }
  370. /*
  371. Initialize a scratch allocator.
  372. */
  373. scratch_init :: proc(s: ^Scratch, size: int, backup_allocator := context.allocator) -> Allocator_Error {
  374. s.data = make_aligned([]byte, size, 2*align_of(rawptr), backup_allocator) or_return
  375. s.curr_offset = 0
  376. s.prev_allocation = nil
  377. s.prev_allocation_root = nil
  378. s.backup_allocator = backup_allocator
  379. s.leaked_allocations.allocator = backup_allocator
  380. // sanitizer.address_poison(s.data)
  381. return nil
  382. }
  383. /*
  384. Free all data associated with a scratch allocator.
  385. This is distinct from `scratch_free_all` in that it deallocates all memory used
  386. to setup the allocator, as opposed to all allocations made from that space.
  387. */
  388. scratch_destroy :: proc(s: ^Scratch) {
  389. if s == nil {
  390. return
  391. }
  392. for ptr in s.leaked_allocations {
  393. free_bytes(ptr, s.backup_allocator)
  394. }
  395. delete(s.leaked_allocations)
  396. // sanitizer.address_unpoison(s.data)
  397. delete(s.data, s.backup_allocator)
  398. s^ = {}
  399. }
  400. /*
  401. Allocate memory from a scratch allocator.
  402. This procedure allocates `size` bytes of memory aligned on a boundary specified
  403. by `alignment`. The allocated memory region is zero-initialized. This procedure
  404. returns a pointer to the allocated memory region.
  405. */
  406. @(require_results)
  407. scratch_alloc :: proc(
  408. s: ^Scratch,
  409. size: int,
  410. alignment := DEFAULT_ALIGNMENT,
  411. loc := #caller_location,
  412. ) -> (rawptr, Allocator_Error) {
  413. bytes, err := scratch_alloc_bytes(s, size, alignment, loc)
  414. return raw_data(bytes), err
  415. }
  416. /*
  417. Allocate memory from a scratch allocator.
  418. This procedure allocates `size` bytes of memory aligned on a boundary specified
  419. by `alignment`. The allocated memory region is zero-initialized. This procedure
  420. returns a slice of the allocated memory region.
  421. */
  422. @(require_results)
  423. scratch_alloc_bytes :: proc(
  424. s: ^Scratch,
  425. size: int,
  426. alignment := DEFAULT_ALIGNMENT,
  427. loc := #caller_location,
  428. ) -> ([]byte, Allocator_Error) {
  429. bytes, err := scratch_alloc_bytes_non_zeroed(s, size, alignment, loc)
  430. if bytes != nil {
  431. zero_slice(bytes)
  432. }
  433. return bytes, err
  434. }
  435. /*
  436. Allocate non-initialized memory from a scratch allocator.
  437. This procedure allocates `size` bytes of memory aligned on a boundary specified
  438. by `alignment`. The allocated memory region is not explicitly zero-initialized.
  439. This procedure returns a pointer to the allocated memory region.
  440. */
  441. @(require_results)
  442. scratch_alloc_non_zeroed :: proc(
  443. s: ^Scratch,
  444. size: int,
  445. alignment := DEFAULT_ALIGNMENT,
  446. loc := #caller_location,
  447. ) -> (rawptr, Allocator_Error) {
  448. bytes, err := scratch_alloc_bytes_non_zeroed(s, size, alignment, loc)
  449. return raw_data(bytes), err
  450. }
  451. /*
  452. Allocate non-initialized memory from a scratch allocator.
  453. This procedure allocates `size` bytes of memory aligned on a boundary specified
  454. by `alignment`. The allocated memory region is not explicitly zero-initialized.
  455. This procedure returns a slice of the allocated memory region.
  456. */
  457. @(require_results)
  458. scratch_alloc_bytes_non_zeroed :: proc(
  459. s: ^Scratch,
  460. size: int,
  461. alignment := DEFAULT_ALIGNMENT,
  462. loc := #caller_location,
  463. ) -> ([]byte, Allocator_Error) {
  464. if s.data == nil {
  465. DEFAULT_BACKING_SIZE :: 4 * Megabyte
  466. if !(context.allocator.procedure != scratch_allocator_proc && context.allocator.data != s) {
  467. panic("Cyclic initialization of the scratch allocator with itself.", loc)
  468. }
  469. scratch_init(s, DEFAULT_BACKING_SIZE)
  470. }
  471. aligned_size := size
  472. if alignment > 1 {
  473. // It is possible to do this with less bytes, but this is the
  474. // mathematically simpler solution, and this being a Scratch allocator,
  475. // we don't need to be so strict about every byte.
  476. aligned_size += alignment - 1
  477. }
  478. if aligned_size <= len(s.data) {
  479. offset := uintptr(0)
  480. if s.curr_offset+aligned_size <= len(s.data) {
  481. offset = uintptr(s.curr_offset)
  482. } else {
  483. // The allocation will cause an overflow past the boundary of the
  484. // space available, so reset to the starting offset.
  485. offset = 0
  486. }
  487. start := uintptr(raw_data(s.data))
  488. ptr := rawptr(offset+start)
  489. // We keep track of the original base pointer without extra alignment
  490. // in order to later allow the free operation to work from that point.
  491. s.prev_allocation_root = ptr
  492. if !is_aligned(ptr, alignment) {
  493. ptr = align_forward(ptr, uintptr(alignment))
  494. }
  495. s.prev_allocation = ptr
  496. s.curr_offset = int(offset) + aligned_size
  497. result := byte_slice(ptr, size)
  498. // ensure_poisoned(result)
  499. // sanitizer.address_unpoison(result)
  500. return result, nil
  501. } else {
  502. // NOTE: No need to use `aligned_size` here, as the backup allocator will handle alignment for us.
  503. a := s.backup_allocator
  504. ptr, err := alloc_bytes_non_zeroed(size, alignment, a, loc)
  505. if err != nil {
  506. return ptr, err
  507. }
  508. append(&s.leaked_allocations, ptr)
  509. if logger := context.logger; logger.lowest_level <= .Warning {
  510. if logger.procedure != nil {
  511. logger.procedure(logger.data, .Warning, "mem.Scratch resorted to backup_allocator" , logger.options, loc)
  512. }
  513. }
  514. return ptr, err
  515. }
  516. }
  517. /*
  518. Free memory back to the scratch allocator.
  519. This procedure frees the memory region allocated at pointer `ptr`.
  520. If `ptr` is not the latest allocation and is not a leaked allocation, this
  521. operation is a no-op.
  522. */
  523. scratch_free :: proc(s: ^Scratch, ptr: rawptr, loc := #caller_location) -> Allocator_Error {
  524. if s.data == nil {
  525. panic("Free on an uninitialized Scratch allocator.", loc)
  526. }
  527. if ptr == nil {
  528. return nil
  529. }
  530. start := uintptr(raw_data(s.data))
  531. end := start + uintptr(len(s.data))
  532. old_ptr := uintptr(ptr)
  533. if s.prev_allocation == ptr {
  534. s.curr_offset = int(uintptr(s.prev_allocation_root) - start)
  535. // sanitizer.address_poison(s.data[s.curr_offset:])
  536. s.prev_allocation = nil
  537. s.prev_allocation_root = nil
  538. return nil
  539. }
  540. if start <= old_ptr && old_ptr < end {
  541. // NOTE(bill): Cannot free this pointer but it is valid
  542. return nil
  543. }
  544. if len(s.leaked_allocations) != 0 {
  545. for data, i in s.leaked_allocations {
  546. ptr := raw_data(data)
  547. if ptr == ptr {
  548. free_bytes(data, s.backup_allocator, loc)
  549. ordered_remove(&s.leaked_allocations, i, loc)
  550. return nil
  551. }
  552. }
  553. }
  554. return .Invalid_Pointer
  555. }
  556. /*
  557. Free all memory back to the scratch allocator.
  558. */
  559. scratch_free_all :: proc(s: ^Scratch, loc := #caller_location) {
  560. s.curr_offset = 0
  561. s.prev_allocation = nil
  562. for ptr in s.leaked_allocations {
  563. free_bytes(ptr, s.backup_allocator, loc)
  564. }
  565. clear(&s.leaked_allocations)
  566. // sanitizer.address_poison(s.data)
  567. }
  568. /*
  569. Resize an allocation owned by a scratch allocator.
  570. This procedure resizes a memory region defined by its location `old_memory`
  571. and its size `old_size` to have a size `size` and alignment `alignment`. The
  572. newly allocated memory, if any, is zero-initialized.
  573. If `old_memory` is `nil`, this procedure acts just like `scratch_alloc()`,
  574. allocating a memory region `size` bytes in size, aligned on a boundary specified
  575. by `alignment`.
  576. If `size` is 0, this procedure acts just like `scratch_free()`, freeing the
  577. memory region located at an address specified by `old_memory`.
  578. This procedure returns the pointer to the resized memory region.
  579. */
  580. @(require_results)
  581. scratch_resize :: proc(
  582. s: ^Scratch,
  583. old_memory: rawptr,
  584. old_size: int,
  585. size: int,
  586. alignment := DEFAULT_ALIGNMENT,
  587. loc := #caller_location
  588. ) -> (rawptr, Allocator_Error) {
  589. bytes, err := scratch_resize_bytes(s, byte_slice(old_memory, old_size), size, alignment, loc)
  590. return raw_data(bytes), err
  591. }
  592. /*
  593. Resize an allocation owned by a scratch allocator.
  594. This procedure resizes a memory region specified by `old_data` to have a size
  595. `size` and alignment `alignment`. The newly allocated memory, if any, is
  596. zero-initialized.
  597. If `old_memory` is `nil`, this procedure acts just like `scratch_alloc()`,
  598. allocating a memory region `size` bytes in size, aligned on a boundary specified
  599. by `alignment`.
  600. If `size` is 0, this procedure acts just like `scratch_free()`, freeing the
  601. memory region located at an address specified by `old_memory`.
  602. This procedure returns the slice of the resized memory region.
  603. */
  604. @(require_results)
  605. scratch_resize_bytes :: proc(
  606. s: ^Scratch,
  607. old_data: []byte,
  608. size: int,
  609. alignment := DEFAULT_ALIGNMENT,
  610. loc := #caller_location
  611. ) -> ([]byte, Allocator_Error) {
  612. bytes, err := scratch_resize_bytes_non_zeroed(s, old_data, size, alignment, loc)
  613. if bytes != nil && size > len(old_data) {
  614. zero_slice(bytes[size:])
  615. }
  616. return bytes, err
  617. }
  618. /*
  619. Resize an allocation owned by a scratch allocator, without zero-initialization.
  620. This procedure resizes a memory region defined by its location `old_memory`
  621. and its size `old_size` to have a size `size` and alignment `alignment`. The
  622. newly allocated memory, if any, is not explicitly zero-initialized.
  623. If `old_memory` is `nil`, this procedure acts just like `scratch_alloc()`,
  624. allocating a memory region `size` bytes in size, aligned on a boundary specified
  625. by `alignment`.
  626. If `size` is 0, this procedure acts just like `scratch_free()`, freeing the
  627. memory region located at an address specified by `old_memory`.
  628. This procedure returns the pointer to the resized memory region.
  629. */
  630. @(require_results)
  631. scratch_resize_non_zeroed :: proc(
  632. s: ^Scratch,
  633. old_memory: rawptr,
  634. old_size: int,
  635. size: int,
  636. alignment := DEFAULT_ALIGNMENT,
  637. loc := #caller_location
  638. ) -> (rawptr, Allocator_Error) {
  639. bytes, err := scratch_resize_bytes_non_zeroed(s, byte_slice(old_memory, old_size), size, alignment, loc)
  640. return raw_data(bytes), err
  641. }
  642. /*
  643. Resize an allocation owned by a scratch allocator.
  644. This procedure resizes a memory region specified by `old_data` to have a size
  645. `size` and alignment `alignment`. The newly allocated memory, if any, is not
  646. explicitly zero-initialized.
  647. If `old_memory` is `nil`, this procedure acts just like `scratch_alloc()`,
  648. allocating a memory region `size` bytes in size, aligned on a boundary specified
  649. by `alignment`.
  650. If `size` is 0, this procedure acts just like `scratch_free()`, freeing the
  651. memory region located at an address specified by `old_memory`.
  652. This procedure returns the slice of the resized memory region.
  653. */
  654. @(require_results)
  655. scratch_resize_bytes_non_zeroed :: proc(
  656. s: ^Scratch,
  657. old_data: []byte,
  658. size: int,
  659. alignment := DEFAULT_ALIGNMENT,
  660. loc := #caller_location
  661. ) -> ([]byte, Allocator_Error) {
  662. old_memory := raw_data(old_data)
  663. old_size := len(old_data)
  664. if s.data == nil {
  665. DEFAULT_BACKING_SIZE :: 4 * Megabyte
  666. if !(context.allocator.procedure != scratch_allocator_proc && context.allocator.data != s) {
  667. panic("Cyclic initialization of the scratch allocator with itself.", loc)
  668. }
  669. scratch_init(s, DEFAULT_BACKING_SIZE)
  670. }
  671. begin := uintptr(raw_data(s.data))
  672. end := begin + uintptr(len(s.data))
  673. old_ptr := uintptr(old_memory)
  674. // We can only sanely resize the last allocation; to do otherwise may
  675. // overwrite memory that could very well just have been allocated.
  676. //
  677. // Also, the alignments must match, otherwise we must re-allocate to
  678. // guarantee the user's request.
  679. if s.prev_allocation == old_memory && is_aligned(old_memory, alignment) && old_ptr+uintptr(size) < end {
  680. // ensure_not_poisoned(old_data)
  681. // sanitizer.address_poison(old_memory)
  682. s.curr_offset = int(old_ptr-begin)+size
  683. result := byte_slice(old_memory, size)
  684. // sanitizer.address_unpoison(result)
  685. return result, nil
  686. }
  687. data, err := scratch_alloc_bytes_non_zeroed(s, size, alignment, loc)
  688. if err != nil {
  689. return data, err
  690. }
  691. runtime.copy(data, byte_slice(old_memory, old_size))
  692. err = scratch_free(s, old_memory, loc)
  693. return data, err
  694. }
  695. scratch_allocator_proc :: proc(
  696. allocator_data: rawptr,
  697. mode: Allocator_Mode,
  698. size, alignment: int,
  699. old_memory: rawptr,
  700. old_size: int,
  701. loc := #caller_location,
  702. ) -> ([]byte, Allocator_Error) {
  703. s := (^Scratch)(allocator_data)
  704. size := size
  705. switch mode {
  706. case .Alloc:
  707. return scratch_alloc_bytes(s, size, alignment, loc)
  708. case .Alloc_Non_Zeroed:
  709. return scratch_alloc_bytes_non_zeroed(s, size, alignment, loc)
  710. case .Free:
  711. return nil, scratch_free(s, old_memory, loc)
  712. case .Free_All:
  713. scratch_free_all(s, loc)
  714. case .Resize:
  715. return scratch_resize_bytes(s, byte_slice(old_memory, old_size), size, alignment, loc)
  716. case .Resize_Non_Zeroed:
  717. return scratch_resize_bytes_non_zeroed(s, byte_slice(old_memory, old_size), size, alignment, loc)
  718. case .Query_Features:
  719. set := (^Allocator_Mode_Set)(old_memory)
  720. if set != nil {
  721. set^ = {.Alloc, .Alloc_Non_Zeroed, .Free, .Free_All, .Resize, .Resize_Non_Zeroed, .Query_Features}
  722. }
  723. return nil, nil
  724. case .Query_Info:
  725. return nil, .Mode_Not_Implemented
  726. }
  727. return nil, nil
  728. }
  729. /*
  730. Stack allocator data.
  731. */
  732. Stack :: struct {
  733. data: []byte,
  734. prev_offset: int,
  735. curr_offset: int,
  736. peak_used: int,
  737. }
  738. /*
  739. Header of a stack allocation.
  740. */
  741. Stack_Allocation_Header :: struct {
  742. prev_offset: int,
  743. padding: int,
  744. }
  745. /*
  746. Stack allocator.
  747. The stack allocator is an allocator that allocates data in the backing buffer
  748. linearly, from start to end. Each subsequent allocation will get the next
  749. adjacent memory region.
  750. Unlike arena allocator, the stack allocator saves allocation metadata and has
  751. a strict freeing order. Only the last allocated element can be freed. After the
  752. last allocated element is freed, the next previous allocated element becomes
  753. available for freeing.
  754. The metadata is stored in the allocation headers, that are located before the
  755. start of each allocated memory region. Each header points to the start of the
  756. previous allocation header.
  757. */
  758. @(require_results)
  759. stack_allocator :: proc(stack: ^Stack) -> Allocator {
  760. return Allocator{
  761. procedure = stack_allocator_proc,
  762. data = stack,
  763. }
  764. }
  765. /*
  766. Initialize a stack allocator.
  767. This procedure initializes the stack allocator with a backing buffer specified
  768. by `data` parameter.
  769. */
  770. stack_init :: proc(s: ^Stack, data: []byte) {
  771. s.data = data
  772. s.prev_offset = 0
  773. s.curr_offset = 0
  774. s.peak_used = 0
  775. // sanitizer.address_poison(data)
  776. }
  777. /*
  778. Allocate memory from a stack allocator.
  779. This procedure allocates `size` bytes of memory, aligned to the boundary
  780. specified by `alignment`. The allocated memory is zero-initialized. This
  781. procedure returns the pointer to the allocated memory.
  782. */
  783. @(require_results)
  784. stack_alloc :: proc(
  785. s: ^Stack,
  786. size: int,
  787. alignment := DEFAULT_ALIGNMENT,
  788. loc := #caller_location
  789. ) -> (rawptr, Allocator_Error) {
  790. bytes, err := stack_alloc_bytes(s, size, alignment, loc)
  791. return raw_data(bytes), err
  792. }
  793. /*
  794. Allocate memory from a stack allocator.
  795. This procedure allocates `size` bytes of memory, aligned to the boundary
  796. specified by `alignment`. The allocated memory is zero-initialized. This
  797. procedure returns the slice of the allocated memory.
  798. */
  799. @(require_results)
  800. stack_alloc_bytes :: proc(
  801. s: ^Stack,
  802. size: int,
  803. alignment := DEFAULT_ALIGNMENT,
  804. loc := #caller_location
  805. ) -> ([]byte, Allocator_Error) {
  806. bytes, err := stack_alloc_bytes_non_zeroed(s, size, alignment, loc)
  807. if bytes != nil {
  808. zero_slice(bytes)
  809. }
  810. return bytes, err
  811. }
  812. /*
  813. Allocate memory from a stack allocator.
  814. This procedure allocates `size` bytes of memory, aligned to the boundary
  815. specified by `alignment`. The allocated memory is not explicitly
  816. zero-initialized. This procedure returns the pointer to the allocated memory.
  817. */
  818. @(require_results)
  819. stack_alloc_non_zeroed :: proc(
  820. s: ^Stack,
  821. size: int,
  822. alignment := DEFAULT_ALIGNMENT,
  823. loc := #caller_location
  824. ) -> (rawptr, Allocator_Error) {
  825. bytes, err := stack_alloc_bytes_non_zeroed(s, size, alignment, loc)
  826. return raw_data(bytes), err
  827. }
  828. /*
  829. Allocate memory from a stack allocator.
  830. This procedure allocates `size` bytes of memory, aligned to the boundary
  831. specified by `alignment`. The allocated memory is not explicitly
  832. zero-initialized. This procedure returns the slice of the allocated memory.
  833. */
  834. @(require_results, no_sanitize_address)
  835. stack_alloc_bytes_non_zeroed :: proc(
  836. s: ^Stack,
  837. size: int,
  838. alignment := DEFAULT_ALIGNMENT,
  839. loc := #caller_location
  840. ) -> ([]byte, Allocator_Error) {
  841. if s.data == nil {
  842. panic("Allocation on an uninitialized Stack allocator.", loc)
  843. }
  844. curr_addr := uintptr(raw_data(s.data)) + uintptr(s.curr_offset)
  845. padding := calc_padding_with_header(
  846. curr_addr,
  847. uintptr(alignment),
  848. size_of(Stack_Allocation_Header),
  849. )
  850. if s.curr_offset + padding + size > len(s.data) {
  851. return nil, .Out_Of_Memory
  852. }
  853. old_offset := s.prev_offset
  854. s.prev_offset = s.curr_offset
  855. s.curr_offset += padding
  856. next_addr := curr_addr + uintptr(padding)
  857. header := (^Stack_Allocation_Header)(next_addr - size_of(Stack_Allocation_Header))
  858. header.padding = padding
  859. header.prev_offset = old_offset
  860. s.curr_offset += size
  861. s.peak_used = max(s.peak_used, s.curr_offset)
  862. result := byte_slice(rawptr(next_addr), size)
  863. // ensure_poisoned(result)
  864. // sanitizer.address_unpoison(result)
  865. return result, nil
  866. }
  867. /*
  868. Free memory back to the stack allocator.
  869. This procedure frees the memory region starting at `old_memory` to the stack.
  870. If the freeing is an out of order freeing, the `.Invalid_Pointer` error
  871. is returned.
  872. */
  873. stack_free :: proc(
  874. s: ^Stack,
  875. old_memory: rawptr,
  876. loc := #caller_location,
  877. ) -> (Allocator_Error) {
  878. if s.data == nil {
  879. panic("Free on an uninitialized Stack allocator.", loc)
  880. }
  881. if old_memory == nil {
  882. return nil
  883. }
  884. start := uintptr(raw_data(s.data))
  885. end := start + uintptr(len(s.data))
  886. curr_addr := uintptr(old_memory)
  887. if !(start <= curr_addr && curr_addr < end) {
  888. panic("Out of bounds memory address passed to Stack allocator. (free)", loc)
  889. }
  890. if curr_addr >= start+uintptr(s.curr_offset) {
  891. // NOTE(bill): Allow double frees
  892. return nil
  893. }
  894. header := (^Stack_Allocation_Header)(curr_addr - size_of(Stack_Allocation_Header))
  895. old_offset := int(curr_addr - uintptr(header.padding) - uintptr(raw_data(s.data)))
  896. if old_offset != s.prev_offset {
  897. return .Invalid_Pointer
  898. }
  899. s.prev_offset = header.prev_offset
  900. // sanitizer.address_poison(s.data[old_offset:s.curr_offset])
  901. s.curr_offset = old_offset
  902. return nil
  903. }
  904. /*
  905. Free all memory back to the stack allocator.
  906. */
  907. stack_free_all :: proc(s: ^Stack, loc := #caller_location) {
  908. s.prev_offset = 0
  909. s.curr_offset = 0
  910. // sanitizer.address_poison(s.data)
  911. }
  912. /*
  913. Resize an allocation owned by a stack allocator.
  914. This procedure resizes a memory region defined by its location `old_memory`
  915. and its size `old_size` to have a size `size` and alignment `alignment`. The
  916. newly allocated memory, if any, is zero-initialized.
  917. If `old_memory` is `nil`, this procedure acts just like `stack_alloc()`,
  918. allocating a memory region `size` bytes in size, aligned on a boundary specified
  919. by `alignment`.
  920. If `size` is 0, this procedure acts just like `stack_free()`, freeing the
  921. memory region located at an address specified by `old_memory`.
  922. This procedure returns the pointer to the resized memory region.
  923. */
  924. @(require_results)
  925. stack_resize :: proc(
  926. s: ^Stack,
  927. old_memory: rawptr,
  928. old_size: int,
  929. size: int,
  930. alignment := DEFAULT_ALIGNMENT,
  931. loc := #caller_location,
  932. ) -> (rawptr, Allocator_Error) {
  933. bytes, err := stack_resize_bytes(s, byte_slice(old_memory, old_size), size, alignment)
  934. return raw_data(bytes), err
  935. }
  936. /*
  937. Resize an allocation owned by a stack allocator.
  938. This procedure resizes a memory region specified by `old_data` to have a size
  939. `size` and alignment `alignment`. The newly allocated memory, if any, is
  940. zero-initialized.
  941. If `old_memory` is `nil`, this procedure acts just like `stack_alloc()`,
  942. allocating a memory region `size` bytes in size, aligned on a boundary specified
  943. by `alignment`.
  944. If `size` is 0, this procedure acts just like `stack_free()`, freeing the
  945. memory region located at an address specified by `old_memory`.
  946. This procedure returns the slice of the resized memory region.
  947. */
  948. @(require_results)
  949. stack_resize_bytes :: proc(
  950. s: ^Stack,
  951. old_data: []byte,
  952. size: int,
  953. alignment := DEFAULT_ALIGNMENT,
  954. loc := #caller_location,
  955. ) -> ([]byte, Allocator_Error) {
  956. bytes, err := stack_resize_bytes_non_zeroed(s, old_data, size, alignment, loc)
  957. if err == nil {
  958. if old_data == nil {
  959. zero_slice(bytes)
  960. } else if size > len(old_data) {
  961. zero_slice(bytes[len(old_data):])
  962. }
  963. }
  964. return bytes, err
  965. }
  966. /*
  967. Resize an allocation owned by a stack allocator, without zero-initialization.
  968. This procedure resizes a memory region defined by its location `old_memory`
  969. and its size `old_size` to have a size `size` and alignment `alignment`. The
  970. newly allocated memory, if any, is not explicitly zero-initialized.
  971. If `old_memory` is `nil`, this procedure acts just like `stack_alloc()`,
  972. allocating a memory region `size` bytes in size, aligned on a boundary specified
  973. by `alignment`.
  974. If `size` is 0, this procedure acts just like `stack_free()`, freeing the
  975. memory region located at an address specified by `old_memory`.
  976. This procedure returns the pointer to the resized memory region.
  977. */
  978. @(require_results)
  979. stack_resize_non_zeroed :: proc(
  980. s: ^Stack,
  981. old_memory: rawptr,
  982. old_size: int,
  983. size: int,
  984. alignment := DEFAULT_ALIGNMENT,
  985. loc := #caller_location,
  986. ) -> (rawptr, Allocator_Error) {
  987. bytes, err := stack_resize_bytes_non_zeroed(s, byte_slice(old_memory, old_size), size, alignment)
  988. return raw_data(bytes), err
  989. }
  990. /*
  991. Resize an allocation owned by a stack allocator, without zero-initialization.
  992. This procedure resizes a memory region specified by `old_data` to have a size
  993. `size` and alignment `alignment`. The newly allocated memory, if any, is not
  994. explicitly zero-initialized.
  995. If `old_memory` is `nil`, this procedure acts just like `stack_alloc()`,
  996. allocating a memory region `size` bytes in size, aligned on a boundary specified
  997. by `alignment`.
  998. If `size` is 0, this procedure acts just like `stack_free()`, freeing the
  999. memory region located at an address specified by `old_memory`.
  1000. This procedure returns the slice of the resized memory region.
  1001. */
  1002. @(require_results)
  1003. stack_resize_bytes_non_zeroed :: proc(
  1004. s: ^Stack,
  1005. old_data: []byte,
  1006. size: int,
  1007. alignment := DEFAULT_ALIGNMENT,
  1008. loc := #caller_location,
  1009. ) -> ([]byte, Allocator_Error) {
  1010. old_memory := raw_data(old_data)
  1011. old_size := len(old_data)
  1012. if s.data == nil {
  1013. panic("Resize on an uninitialized Stack allocator.", loc)
  1014. }
  1015. if old_memory == nil {
  1016. return stack_alloc_bytes_non_zeroed(s, size, alignment, loc)
  1017. }
  1018. if size == 0 {
  1019. return nil, stack_free(s, old_memory, loc)
  1020. }
  1021. start := uintptr(raw_data(s.data))
  1022. end := start + uintptr(len(s.data))
  1023. curr_addr := uintptr(old_memory)
  1024. if !(start <= curr_addr && curr_addr < end) {
  1025. panic("Out of bounds memory address passed to Stack allocator. (resize)")
  1026. }
  1027. if curr_addr >= start+uintptr(s.curr_offset) {
  1028. // NOTE(bill): Allow double frees
  1029. return nil, nil
  1030. }
  1031. if uintptr(old_memory) & uintptr(alignment-1) != 0 {
  1032. // A different alignment has been requested and the current address
  1033. // does not satisfy it.
  1034. data, err := stack_alloc_bytes_non_zeroed(s, size, alignment, loc)
  1035. if err == nil {
  1036. runtime.copy(data, byte_slice(old_memory, old_size))
  1037. // sanitizer.address_poison(old_memory)
  1038. }
  1039. return data, err
  1040. }
  1041. if old_size == size {
  1042. return byte_slice(old_memory, size), nil
  1043. }
  1044. header := (^Stack_Allocation_Header)(curr_addr - size_of(Stack_Allocation_Header))
  1045. old_offset := int(curr_addr - uintptr(header.padding) - uintptr(raw_data(s.data)))
  1046. if old_offset != header.prev_offset {
  1047. data, err := stack_alloc_bytes_non_zeroed(s, size, alignment, loc)
  1048. if err == nil {
  1049. runtime.copy(data, byte_slice(old_memory, old_size))
  1050. // sanitizer.address_poison(old_memory)
  1051. }
  1052. return data, err
  1053. }
  1054. old_memory_size := uintptr(s.curr_offset) - (curr_addr - start)
  1055. assert(old_memory_size == uintptr(old_size))
  1056. diff := size - old_size
  1057. s.curr_offset += diff // works for smaller sizes too
  1058. if diff > 0 {
  1059. zero(rawptr(curr_addr + uintptr(diff)), diff)
  1060. } else {
  1061. // sanitizer.address_poison(old_data[size:])
  1062. }
  1063. result := byte_slice(old_memory, size)
  1064. // ensure_poisoned(result)
  1065. // sanitizer.address_unpoison(result)
  1066. return result, nil
  1067. }
  1068. stack_allocator_proc :: proc(
  1069. allocator_data: rawptr,
  1070. mode: Allocator_Mode,
  1071. size: int,
  1072. alignment: int,
  1073. old_memory: rawptr,
  1074. old_size: int,
  1075. loc := #caller_location,
  1076. ) -> ([]byte, Allocator_Error) {
  1077. s := cast(^Stack)allocator_data
  1078. if s.data == nil {
  1079. return nil, .Invalid_Argument
  1080. }
  1081. switch mode {
  1082. case .Alloc:
  1083. return stack_alloc_bytes(s, size, alignment, loc)
  1084. case .Alloc_Non_Zeroed:
  1085. return stack_alloc_bytes_non_zeroed(s, size, alignment, loc)
  1086. case .Free:
  1087. return nil, stack_free(s, old_memory, loc)
  1088. case .Free_All:
  1089. stack_free_all(s, loc)
  1090. case .Resize:
  1091. return stack_resize_bytes(s, byte_slice(old_memory, old_size), size, alignment, loc)
  1092. case .Resize_Non_Zeroed:
  1093. return stack_resize_bytes_non_zeroed(s, byte_slice(old_memory, old_size), size, alignment, loc)
  1094. case .Query_Features:
  1095. set := (^Allocator_Mode_Set)(old_memory)
  1096. if set != nil {
  1097. set^ = {.Alloc, .Alloc_Non_Zeroed, .Free, .Free_All, .Resize, .Resize_Non_Zeroed, .Query_Features}
  1098. }
  1099. return nil, nil
  1100. case .Query_Info:
  1101. return nil, .Mode_Not_Implemented
  1102. }
  1103. return nil, nil
  1104. }
  1105. /*
  1106. Allocation header of the small stack allocator.
  1107. */
  1108. Small_Stack_Allocation_Header :: struct {
  1109. padding: u8,
  1110. }
  1111. /*
  1112. Small stack allocator data.
  1113. */
  1114. Small_Stack :: struct {
  1115. data: []byte,
  1116. offset: int,
  1117. peak_used: int,
  1118. }
  1119. /*
  1120. Initialize a small stack allocator.
  1121. This procedure initializes the small stack allocator with `data` as its backing
  1122. buffer.
  1123. */
  1124. small_stack_init :: proc(s: ^Small_Stack, data: []byte) {
  1125. s.data = data
  1126. s.offset = 0
  1127. s.peak_used = 0
  1128. // sanitizer.address_poison(data)
  1129. }
  1130. /*
  1131. Small stack allocator.
  1132. The small stack allocator is just like a `Stack` allocator, with the only
  1133. difference being an extremely small header size. Unlike the stack allocator,
  1134. the small stack allows out-of order freeing of memory, with the stipulation
  1135. that all allocations made after the freed allocation will become invalidated
  1136. upon following allocations as they will begin to overwrite the memory formerly
  1137. used by the freed allocation.
  1138. The memory is allocated in the backing buffer linearly, from start to end.
  1139. Each subsequent allocation will get the next adjacent memory region.
  1140. The metadata is stored in the allocation headers, that are located before the
  1141. start of each allocated memory region. Each header contains the amount of
  1142. padding bytes between that header and end of the previous allocation.
  1143. */
  1144. @(require_results)
  1145. small_stack_allocator :: proc(stack: ^Small_Stack) -> Allocator {
  1146. return Allocator{
  1147. procedure = small_stack_allocator_proc,
  1148. data = stack,
  1149. }
  1150. }
  1151. /*
  1152. Allocate memory from a small stack allocator.
  1153. This procedure allocates `size` bytes of memory aligned to a boundary specified
  1154. by `alignment`. The allocated memory is zero-initialized. This procedure
  1155. returns a pointer to the allocated memory region.
  1156. */
  1157. @(require_results)
  1158. small_stack_alloc :: proc(
  1159. s: ^Small_Stack,
  1160. size: int,
  1161. alignment := DEFAULT_ALIGNMENT,
  1162. loc := #caller_location,
  1163. ) -> (rawptr, Allocator_Error) {
  1164. bytes, err := small_stack_alloc_bytes(s, size, alignment, loc)
  1165. return raw_data(bytes), err
  1166. }
  1167. /*
  1168. Allocate memory from a small stack allocator.
  1169. This procedure allocates `size` bytes of memory aligned to a boundary specified
  1170. by `alignment`. The allocated memory is zero-initialized. This procedure
  1171. returns a slice of the allocated memory region.
  1172. */
  1173. @(require_results)
  1174. small_stack_alloc_bytes :: proc(
  1175. s: ^Small_Stack,
  1176. size: int,
  1177. alignment := DEFAULT_ALIGNMENT,
  1178. loc := #caller_location,
  1179. ) -> ([]byte, Allocator_Error) {
  1180. bytes, err := small_stack_alloc_bytes_non_zeroed(s, size, alignment, loc)
  1181. if bytes != nil {
  1182. zero_slice(bytes)
  1183. }
  1184. return bytes, err
  1185. }
  1186. /*
  1187. Allocate memory from a small stack allocator.
  1188. This procedure allocates `size` bytes of memory aligned to a boundary specified
  1189. by `alignment`. The allocated memory is not explicitly zero-initialized. This
  1190. procedure returns a pointer to the allocated memory region.
  1191. */
  1192. @(require_results)
  1193. small_stack_alloc_non_zeroed :: proc(
  1194. s: ^Small_Stack,
  1195. size: int,
  1196. alignment := DEFAULT_ALIGNMENT,
  1197. loc := #caller_location,
  1198. ) -> (rawptr, Allocator_Error) {
  1199. bytes, err := small_stack_alloc_bytes_non_zeroed(s, size, alignment, loc)
  1200. return raw_data(bytes), err
  1201. }
  1202. /*
  1203. Allocate memory from a small stack allocator.
  1204. This procedure allocates `size` bytes of memory aligned to a boundary specified
  1205. by `alignment`. The allocated memory is not explicitly zero-initialized. This
  1206. procedure returns a slice of the allocated memory region.
  1207. */
  1208. @(require_results, no_sanitize_address)
  1209. small_stack_alloc_bytes_non_zeroed :: proc(
  1210. s: ^Small_Stack,
  1211. size: int,
  1212. alignment := DEFAULT_ALIGNMENT,
  1213. loc := #caller_location,
  1214. ) -> ([]byte, Allocator_Error) {
  1215. if s.data == nil {
  1216. panic("Allocation on an uninitialized Small Stack allocator.", loc)
  1217. }
  1218. alignment := alignment
  1219. alignment = clamp(alignment, 1, 8*size_of(Stack_Allocation_Header{}.padding)/2)
  1220. curr_addr := uintptr(raw_data(s.data)) + uintptr(s.offset)
  1221. padding := calc_padding_with_header(curr_addr, uintptr(alignment), size_of(Small_Stack_Allocation_Header))
  1222. if s.offset + padding + size > len(s.data) {
  1223. return nil, .Out_Of_Memory
  1224. }
  1225. s.offset += padding
  1226. next_addr := curr_addr + uintptr(padding)
  1227. header := (^Small_Stack_Allocation_Header)(next_addr - size_of(Small_Stack_Allocation_Header))
  1228. header.padding = cast(u8)padding
  1229. // We must poison the header, no matter what its state is, because there
  1230. // may have been an out-of-order free before this point.
  1231. // sanitizer.address_poison(header)
  1232. s.offset += size
  1233. s.peak_used = max(s.peak_used, s.offset)
  1234. result := byte_slice(rawptr(next_addr), size)
  1235. // NOTE: We cannot ensure the poison state of this allocation, because this
  1236. // allocator allows out-of-order frees with overwriting.
  1237. // sanitizer.address_unpoison(result)
  1238. return result, nil
  1239. }
  1240. /*
  1241. Allocate memory from a small stack allocator.
  1242. This procedure allocates `size` bytes of memory aligned to a boundary specified
  1243. by `alignment`. The allocated memory is not explicitly zero-initialized. This
  1244. procedure returns a slice of the allocated memory region.
  1245. */
  1246. small_stack_free :: proc(
  1247. s: ^Small_Stack,
  1248. old_memory: rawptr,
  1249. loc := #caller_location,
  1250. ) -> Allocator_Error {
  1251. if s.data == nil {
  1252. panic("Free on an uninitialized Small Stack allocator.", loc)
  1253. }
  1254. if old_memory == nil {
  1255. return nil
  1256. }
  1257. start := uintptr(raw_data(s.data))
  1258. end := start + uintptr(len(s.data))
  1259. curr_addr := uintptr(old_memory)
  1260. if !(start <= curr_addr && curr_addr < end) {
  1261. panic("Out of bounds memory address passed to Small Stack allocator. (free)", loc)
  1262. }
  1263. if curr_addr >= start+uintptr(s.offset) {
  1264. // NOTE(bill): Allow double frees
  1265. return nil
  1266. }
  1267. header := (^Small_Stack_Allocation_Header)(curr_addr - size_of(Small_Stack_Allocation_Header))
  1268. old_offset := int(curr_addr - uintptr(header.padding) - uintptr(raw_data(s.data)))
  1269. // sanitizer.address_poison(s.data[old_offset:s.offset])
  1270. s.offset = old_offset
  1271. return nil
  1272. }
  1273. /*
  1274. Free all memory back to the small stack allocator.
  1275. */
  1276. small_stack_free_all :: proc(s: ^Small_Stack) {
  1277. s.offset = 0
  1278. // sanitizer.address_poison(s.data)
  1279. }
  1280. /*
  1281. Resize an allocation owned by a small stack allocator.
  1282. This procedure resizes a memory region defined by its location `old_memory`
  1283. and its size `old_size` to have a size `size` and alignment `alignment`. The
  1284. newly allocated memory, if any, is zero-initialized.
  1285. If `old_memory` is `nil`, this procedure acts just like `small_stack_alloc()`,
  1286. allocating a memory region `size` bytes in size, aligned on a boundary specified
  1287. by `alignment`.
  1288. If `size` is 0, this procedure acts just like `small_stack_free()`, freeing the
  1289. memory region located at an address specified by `old_memory`.
  1290. This procedure returns the pointer to the resized memory region.
  1291. */
  1292. @(require_results)
  1293. small_stack_resize :: proc(
  1294. s: ^Small_Stack,
  1295. old_memory: rawptr,
  1296. old_size: int,
  1297. size: int,
  1298. alignment := DEFAULT_ALIGNMENT,
  1299. loc := #caller_location,
  1300. ) -> (rawptr, Allocator_Error) {
  1301. bytes, err := small_stack_resize_bytes(s, byte_slice(old_memory, old_size), size, alignment, loc)
  1302. return raw_data(bytes), err
  1303. }
  1304. /*
  1305. Resize an allocation owned by a small stack allocator.
  1306. This procedure resizes a memory region specified by `old_data` to have a size
  1307. `size` and alignment `alignment`. The newly allocated memory, if any, is
  1308. zero-initialized.
  1309. If `old_memory` is `nil`, this procedure acts just like `small_stack_alloc()`,
  1310. allocating a memory region `size` bytes in size, aligned on a boundary specified
  1311. by `alignment`.
  1312. If `size` is 0, this procedure acts just like `small_stack_free()`, freeing the
  1313. memory region located at an address specified by `old_memory`.
  1314. This procedure returns the slice of the resized memory region.
  1315. */
  1316. @(require_results)
  1317. small_stack_resize_bytes :: proc(
  1318. s: ^Small_Stack,
  1319. old_data: []byte,
  1320. size: int,
  1321. alignment := DEFAULT_ALIGNMENT,
  1322. loc := #caller_location,
  1323. ) -> ([]byte, Allocator_Error) {
  1324. bytes, err := small_stack_resize_bytes_non_zeroed(s, old_data, size, alignment, loc)
  1325. if bytes != nil {
  1326. if old_data == nil {
  1327. zero_slice(bytes)
  1328. } else if size > len(old_data) {
  1329. zero_slice(bytes[len(old_data):])
  1330. }
  1331. }
  1332. return bytes, err
  1333. }
  1334. /*
  1335. Resize an allocation owned by a small stack allocator, without zero-initialization.
  1336. This procedure resizes a memory region defined by its location `old_memory`
  1337. and its size `old_size` to have a size `size` and alignment `alignment`. The
  1338. newly allocated memory, if any, is not explicitly zero-initialized.
  1339. If `old_memory` is `nil`, this procedure acts just like `small_stack_alloc()`,
  1340. allocating a memory region `size` bytes in size, aligned on a boundary specified
  1341. by `alignment`.
  1342. If `size` is 0, this procedure acts just like `small_stack_free()`, freeing the
  1343. memory region located at an address specified by `old_memory`.
  1344. This procedure returns the pointer to the resized memory region.
  1345. */
  1346. @(require_results)
  1347. small_stack_resize_non_zeroed :: proc(
  1348. s: ^Small_Stack,
  1349. old_memory: rawptr,
  1350. old_size: int,
  1351. size: int,
  1352. alignment := DEFAULT_ALIGNMENT,
  1353. loc := #caller_location,
  1354. ) -> (rawptr, Allocator_Error) {
  1355. bytes, err := small_stack_resize_bytes_non_zeroed(s, byte_slice(old_memory, old_size), size, alignment, loc)
  1356. return raw_data(bytes), err
  1357. }
  1358. /*
  1359. Resize an allocation owned by a small stack allocator, without zero-initialization.
  1360. This procedure resizes a memory region specified by `old_data` to have a size
  1361. `size` and alignment `alignment`. The newly allocated memory, if any, is not
  1362. explicitly zero-initialized.
  1363. If `old_memory` is `nil`, this procedure acts just like `small_stack_alloc()`,
  1364. allocating a memory region `size` bytes in size, aligned on a boundary specified
  1365. by `alignment`.
  1366. If `size` is 0, this procedure acts just like `small_stack_free()`, freeing the
  1367. memory region located at an address specified by `old_memory`.
  1368. This procedure returns the slice of the resized memory region.
  1369. */
  1370. @(require_results)
  1371. small_stack_resize_bytes_non_zeroed :: proc(
  1372. s: ^Small_Stack,
  1373. old_data: []byte,
  1374. size: int,
  1375. alignment := DEFAULT_ALIGNMENT,
  1376. loc := #caller_location,
  1377. ) -> ([]byte, Allocator_Error) {
  1378. if s.data == nil {
  1379. panic("Resize on an uninitialized Small Stack allocator.", loc)
  1380. }
  1381. old_memory := raw_data(old_data)
  1382. old_size := len(old_data)
  1383. alignment := alignment
  1384. alignment = clamp(alignment, 1, 8*size_of(Stack_Allocation_Header{}.padding)/2)
  1385. if old_memory == nil {
  1386. return small_stack_alloc_bytes_non_zeroed(s, size, alignment, loc)
  1387. }
  1388. if size == 0 {
  1389. return nil, small_stack_free(s, old_memory, loc)
  1390. }
  1391. start := uintptr(raw_data(s.data))
  1392. end := start + uintptr(len(s.data))
  1393. curr_addr := uintptr(old_memory)
  1394. if !(start <= curr_addr && curr_addr < end) {
  1395. panic("Out of bounds memory address passed to Small Stack allocator. (resize)", loc)
  1396. }
  1397. if curr_addr >= start+uintptr(s.offset) {
  1398. // NOTE(bill): Treat as a double free
  1399. return nil, nil
  1400. }
  1401. if uintptr(old_memory) & uintptr(alignment-1) != 0 {
  1402. // A different alignment has been requested and the current address
  1403. // does not satisfy it.
  1404. data, err := small_stack_alloc_bytes_non_zeroed(s, size, alignment, loc)
  1405. if err == nil {
  1406. runtime.copy(data, byte_slice(old_memory, old_size))
  1407. // sanitizer.address_poison(old_memory)
  1408. }
  1409. return data, err
  1410. }
  1411. if old_size == size {
  1412. result := byte_slice(old_memory, size)
  1413. // sanitizer.address_unpoison(result)
  1414. return result, nil
  1415. }
  1416. data, err := small_stack_alloc_bytes_non_zeroed(s, size, alignment, loc)
  1417. if err == nil {
  1418. runtime.copy(data, byte_slice(old_memory, old_size))
  1419. }
  1420. return data, err
  1421. }
  1422. small_stack_allocator_proc :: proc(
  1423. allocator_data: rawptr,
  1424. mode: Allocator_Mode,
  1425. size, alignment: int,
  1426. old_memory: rawptr,
  1427. old_size: int,
  1428. loc := #caller_location,
  1429. ) -> ([]byte, Allocator_Error) {
  1430. s := cast(^Small_Stack)allocator_data
  1431. if s.data == nil {
  1432. return nil, .Invalid_Argument
  1433. }
  1434. switch mode {
  1435. case .Alloc:
  1436. return small_stack_alloc_bytes(s, size, alignment, loc)
  1437. case .Alloc_Non_Zeroed:
  1438. return small_stack_alloc_bytes_non_zeroed(s, size, alignment, loc)
  1439. case .Free:
  1440. return nil, small_stack_free(s, old_memory, loc)
  1441. case .Free_All:
  1442. small_stack_free_all(s)
  1443. case .Resize:
  1444. return small_stack_resize_bytes(s, byte_slice(old_memory, old_size), size, alignment, loc)
  1445. case .Resize_Non_Zeroed:
  1446. return small_stack_resize_bytes_non_zeroed(s, byte_slice(old_memory, old_size), size, alignment, loc)
  1447. case .Query_Features:
  1448. set := (^Allocator_Mode_Set)(old_memory)
  1449. if set != nil {
  1450. set^ = {.Alloc, .Alloc_Non_Zeroed, .Free, .Free_All, .Resize, .Resize_Non_Zeroed, .Query_Features}
  1451. }
  1452. return nil, nil
  1453. case .Query_Info:
  1454. return nil, .Mode_Not_Implemented
  1455. }
  1456. return nil, nil
  1457. }
  1458. /* Preserved for compatibility */
  1459. Dynamic_Pool :: Dynamic_Arena
  1460. DYNAMIC_POOL_BLOCK_SIZE_DEFAULT :: DYNAMIC_ARENA_BLOCK_SIZE_DEFAULT
  1461. DYNAMIC_POOL_OUT_OF_BAND_SIZE_DEFAULT :: DYNAMIC_ARENA_OUT_OF_BAND_SIZE_DEFAULT
  1462. dynamic_pool_allocator_proc :: dynamic_arena_allocator_proc
  1463. dynamic_pool_free_all :: dynamic_arena_free_all
  1464. dynamic_pool_reset :: dynamic_arena_reset
  1465. dynamic_pool_alloc_bytes :: dynamic_arena_alloc_bytes
  1466. dynamic_pool_alloc :: dynamic_arena_alloc
  1467. dynamic_pool_init :: dynamic_arena_init
  1468. dynamic_pool_allocator :: dynamic_arena_allocator
  1469. dynamic_pool_destroy :: dynamic_arena_destroy
  1470. /*
  1471. Default block size for dynamic arena.
  1472. */
  1473. DYNAMIC_ARENA_BLOCK_SIZE_DEFAULT :: 65536
  1474. /*
  1475. Default out-band size of the dynamic arena.
  1476. */
  1477. DYNAMIC_ARENA_OUT_OF_BAND_SIZE_DEFAULT :: 6554
  1478. /*
  1479. Dynamic arena allocator data.
  1480. */
  1481. Dynamic_Arena :: struct {
  1482. block_size: int,
  1483. out_band_size: int,
  1484. alignment: int,
  1485. unused_blocks: [dynamic]rawptr,
  1486. used_blocks: [dynamic]rawptr,
  1487. out_band_allocations: [dynamic]rawptr,
  1488. current_block: rawptr,
  1489. current_pos: rawptr,
  1490. bytes_left: int,
  1491. block_allocator: Allocator,
  1492. }
  1493. /*
  1494. Initialize a dynamic arena.
  1495. This procedure initializes a dynamic arena. The specified `block_allocator`
  1496. will be used to allocate arena blocks, and `array_allocator` to allocate
  1497. arrays of blocks and out-band blocks. The blocks have the default size of
  1498. `block_size` and out-band threshold will be `out_band_size`. All allocations
  1499. will be aligned to a boundary specified by `alignment`.
  1500. */
  1501. dynamic_arena_init :: proc(
  1502. pool: ^Dynamic_Arena,
  1503. block_allocator := context.allocator,
  1504. array_allocator := context.allocator,
  1505. block_size := DYNAMIC_ARENA_BLOCK_SIZE_DEFAULT,
  1506. out_band_size := DYNAMIC_ARENA_OUT_OF_BAND_SIZE_DEFAULT,
  1507. alignment := DEFAULT_ALIGNMENT,
  1508. ) {
  1509. pool.block_size = block_size
  1510. pool.out_band_size = out_band_size
  1511. pool.alignment = alignment
  1512. pool.block_allocator = block_allocator
  1513. pool.out_band_allocations.allocator = array_allocator
  1514. pool.unused_blocks.allocator = array_allocator
  1515. pool.used_blocks.allocator = array_allocator
  1516. }
  1517. /*
  1518. Dynamic arena allocator.
  1519. The dynamic arena allocator uses blocks of a specific size, allocated on-demand
  1520. using the block allocator. This allocator acts similarly to `Arena`. All
  1521. allocations in a block happen contiguously, from start to end. If an allocation
  1522. does not fit into the remaining space of the block and its size is smaller
  1523. than the specified out-band size, a new block is allocated using the
  1524. `block_allocator` and the allocation is performed from a newly-allocated block.
  1525. If an allocation is larger than the specified out-band size, a new block
  1526. is allocated such that the allocation fits into this new block. This is referred
  1527. to as an *out-band allocation*. The out-band blocks are kept separately from
  1528. normal blocks.
  1529. Just like `Arena`, the dynamic arena does not support freeing of individual
  1530. objects.
  1531. */
  1532. @(require_results)
  1533. dynamic_arena_allocator :: proc(a: ^Dynamic_Arena) -> Allocator {
  1534. return Allocator{
  1535. procedure = dynamic_arena_allocator_proc,
  1536. data = a,
  1537. }
  1538. }
  1539. /*
  1540. Destroy a dynamic arena.
  1541. This procedure frees all allocations made on a dynamic arena, including the
  1542. unused blocks, as well as the arrays for storing blocks.
  1543. */
  1544. dynamic_arena_destroy :: proc(a: ^Dynamic_Arena) {
  1545. dynamic_arena_free_all(a)
  1546. delete(a.unused_blocks)
  1547. delete(a.used_blocks)
  1548. delete(a.out_band_allocations)
  1549. zero(a, size_of(a^))
  1550. }
  1551. @(private="file")
  1552. _dynamic_arena_cycle_new_block :: proc(a: ^Dynamic_Arena, loc := #caller_location) -> (err: Allocator_Error) {
  1553. if a.block_allocator.procedure == nil {
  1554. panic("You must call `dynamic_arena_init` on a Dynamic Arena before using it.", loc)
  1555. }
  1556. if a.current_block != nil {
  1557. append(&a.used_blocks, a.current_block, loc=loc)
  1558. }
  1559. new_block: rawptr
  1560. if len(a.unused_blocks) > 0 {
  1561. new_block = pop(&a.unused_blocks)
  1562. } else {
  1563. data: []byte
  1564. data, err = a.block_allocator.procedure(
  1565. a.block_allocator.data,
  1566. Allocator_Mode.Alloc,
  1567. a.block_size,
  1568. a.alignment,
  1569. nil,
  1570. 0,
  1571. )
  1572. // sanitizer.address_poison(data)
  1573. new_block = raw_data(data)
  1574. }
  1575. a.bytes_left = a.block_size
  1576. a.current_pos = new_block
  1577. a.current_block = new_block
  1578. return
  1579. }
  1580. /*
  1581. Allocate memory from a dynamic arena.
  1582. This procedure allocates `size` bytes of memory aligned on a boundary specified
  1583. by `alignment` from a dynamic arena `a`. The allocated memory is
  1584. zero-initialized. This procedure returns a pointer to the newly allocated memory
  1585. region.
  1586. */
  1587. @(require_results)
  1588. dynamic_arena_alloc :: proc(a: ^Dynamic_Arena, size: int, loc := #caller_location) -> (rawptr, Allocator_Error) {
  1589. data, err := dynamic_arena_alloc_bytes(a, size, loc)
  1590. return raw_data(data), err
  1591. }
  1592. /*
  1593. Allocate memory from a dynamic arena.
  1594. This procedure allocates `size` bytes of memory aligned on a boundary specified
  1595. by `alignment` from a dynamic arena `a`. The allocated memory is
  1596. zero-initialized. This procedure returns a slice of the newly allocated memory
  1597. region.
  1598. */
  1599. @(require_results)
  1600. dynamic_arena_alloc_bytes :: proc(a: ^Dynamic_Arena, size: int, loc := #caller_location) -> ([]byte, Allocator_Error) {
  1601. bytes, err := dynamic_arena_alloc_bytes_non_zeroed(a, size, loc)
  1602. if bytes != nil {
  1603. zero_slice(bytes)
  1604. }
  1605. return bytes, err
  1606. }
  1607. /*
  1608. Allocate non-initialized memory from a dynamic arena.
  1609. This procedure allocates `size` bytes of memory aligned on a boundary specified
  1610. by `alignment` from a dynamic arena `a`. The allocated memory is not explicitly
  1611. zero-initialized. This procedure returns a pointer to the newly allocated
  1612. memory region.
  1613. */
  1614. @(require_results)
  1615. dynamic_arena_alloc_non_zeroed :: proc(a: ^Dynamic_Arena, size: int, loc := #caller_location) -> (rawptr, Allocator_Error) {
  1616. data, err := dynamic_arena_alloc_bytes_non_zeroed(a, size, loc)
  1617. return raw_data(data), err
  1618. }
  1619. /*
  1620. Allocate non-initialized memory from a dynamic arena.
  1621. This procedure allocates `size` bytes of memory aligned on a boundary specified
  1622. by `alignment` from a dynamic arena `a`. The allocated memory is not explicitly
  1623. zero-initialized. This procedure returns a slice of the newly allocated
  1624. memory region.
  1625. */
  1626. @(require_results)
  1627. dynamic_arena_alloc_bytes_non_zeroed :: proc(a: ^Dynamic_Arena, size: int, loc := #caller_location) -> ([]byte, Allocator_Error) {
  1628. n := align_formula(size, a.alignment)
  1629. if n > a.block_size {
  1630. return nil, .Invalid_Argument
  1631. }
  1632. if n >= a.out_band_size {
  1633. assert(a.block_allocator.procedure != nil, "Backing block allocator must be initialized", loc=loc)
  1634. memory, err := alloc_bytes_non_zeroed(a.block_size, a.alignment, a.block_allocator, loc)
  1635. if memory != nil {
  1636. append(&a.out_band_allocations, raw_data(memory), loc = loc)
  1637. }
  1638. return memory, err
  1639. }
  1640. if a.bytes_left < n {
  1641. err := _dynamic_arena_cycle_new_block(a, loc)
  1642. if err != nil {
  1643. return nil, err
  1644. }
  1645. if a.current_block == nil {
  1646. return nil, .Out_Of_Memory
  1647. }
  1648. }
  1649. memory := a.current_pos
  1650. a.current_pos = ([^]byte)(a.current_pos)[n:]
  1651. a.bytes_left -= n
  1652. result := ([^]byte)(memory)[:size]
  1653. // ensure_poisoned(result)
  1654. // sanitizer.address_unpoison(result)
  1655. return result, nil
  1656. }
  1657. /*
  1658. Reset a dynamic arena allocator.
  1659. This procedure frees all the allocations owned by the dynamic arena, excluding
  1660. the unused blocks.
  1661. */
  1662. dynamic_arena_reset :: proc(a: ^Dynamic_Arena, loc := #caller_location) {
  1663. if a.current_block != nil {
  1664. // sanitizer.address_poison(a.current_block, a.block_size)
  1665. append(&a.unused_blocks, a.current_block, loc=loc)
  1666. a.current_block = nil
  1667. }
  1668. for block in a.used_blocks {
  1669. // sanitizer.address_poison(block, a.block_size)
  1670. append(&a.unused_blocks, block, loc=loc)
  1671. }
  1672. clear(&a.used_blocks)
  1673. for allocation in a.out_band_allocations {
  1674. free(allocation, a.block_allocator, loc=loc)
  1675. }
  1676. clear(&a.out_band_allocations)
  1677. a.bytes_left = 0 // Make new allocations call `_dynamic_arena_cycle_new_block` again.
  1678. }
  1679. /*
  1680. Free all memory back to the dynamic arena allocator.
  1681. This procedure frees all the allocations owned by the dynamic arena, including
  1682. the unused blocks.
  1683. */
  1684. dynamic_arena_free_all :: proc(a: ^Dynamic_Arena, loc := #caller_location) {
  1685. dynamic_arena_reset(a)
  1686. for block in a.unused_blocks {
  1687. // sanitizer.address_unpoison(block, a.block_size)
  1688. free(block, a.block_allocator, loc)
  1689. }
  1690. clear(&a.unused_blocks)
  1691. }
  1692. /*
  1693. Resize an allocation owned by a dynamic arena allocator.
  1694. This procedure resizes a memory region defined by its location `old_memory`
  1695. and its size `old_size` to have a size `size` and alignment `alignment`. The
  1696. newly allocated memory, if any, is zero-initialized.
  1697. If `old_memory` is `nil`, this procedure acts just like `dynamic_arena_alloc()`,
  1698. allocating a memory region `size` bytes in size, aligned on a boundary specified
  1699. by `alignment`.
  1700. This procedure returns the pointer to the resized memory region.
  1701. */
  1702. @(require_results)
  1703. dynamic_arena_resize :: proc(
  1704. a: ^Dynamic_Arena,
  1705. old_memory: rawptr,
  1706. old_size: int,
  1707. size: int,
  1708. loc := #caller_location,
  1709. ) -> (rawptr, Allocator_Error) {
  1710. bytes, err := dynamic_arena_resize_bytes(a, byte_slice(old_memory, old_size), size, loc)
  1711. return raw_data(bytes), err
  1712. }
  1713. /*
  1714. Resize an allocation owned by a dynamic arena allocator.
  1715. This procedure resizes a memory region specified by `old_data` to have a size
  1716. `size` and alignment `alignment`. The newly allocated memory, if any, is
  1717. zero-initialized.
  1718. If `old_memory` is `nil`, this procedure acts just like `dynamic_arena_alloc()`,
  1719. allocating a memory region `size` bytes in size, aligned on a boundary specified
  1720. by `alignment`.
  1721. This procedure returns the slice of the resized memory region.
  1722. */
  1723. @(require_results)
  1724. dynamic_arena_resize_bytes :: proc(
  1725. a: ^Dynamic_Arena,
  1726. old_data: []byte,
  1727. size: int,
  1728. loc := #caller_location,
  1729. ) -> ([]byte, Allocator_Error) {
  1730. if size == 0 {
  1731. // NOTE: This allocator has no Free mode.
  1732. return nil, nil
  1733. }
  1734. bytes, err := dynamic_arena_resize_bytes_non_zeroed(a, old_data, size, loc)
  1735. if bytes != nil {
  1736. if old_data == nil {
  1737. zero_slice(bytes)
  1738. } else if size > len(old_data) {
  1739. zero_slice(bytes[len(old_data):])
  1740. }
  1741. }
  1742. return bytes, err
  1743. }
  1744. /*
  1745. Resize an allocation owned by a dynamic arena allocator, without zero-initialization.
  1746. This procedure resizes a memory region defined by its location `old_memory`
  1747. and its size `old_size` to have a size `size` and alignment `alignment`. The
  1748. newly allocated memory, if any, is not explicitly zero-initialized.
  1749. If `old_memory` is `nil`, this procedure acts just like `dynamic_arena_alloc()`,
  1750. allocating a memory region `size` bytes in size, aligned on a boundary specified
  1751. by `alignment`.
  1752. This procedure returns the pointer to the resized memory region.
  1753. */
  1754. @(require_results)
  1755. dynamic_arena_resize_non_zeroed :: proc(
  1756. a: ^Dynamic_Arena,
  1757. old_memory: rawptr,
  1758. old_size: int,
  1759. size: int,
  1760. loc := #caller_location,
  1761. ) -> (rawptr, Allocator_Error) {
  1762. bytes, err := dynamic_arena_resize_bytes_non_zeroed(a, byte_slice(old_memory, old_size), size, loc)
  1763. return raw_data(bytes), err
  1764. }
  1765. /*
  1766. Resize an allocation owned by a dynamic arena allocator, without zero-initialization.
  1767. This procedure resizes a memory region specified by `old_data` to have a size
  1768. `size` and alignment `alignment`. The newly allocated memory, if any, is not
  1769. explicitly zero-initialized.
  1770. If `old_memory` is `nil`, this procedure acts just like `dynamic_arena_alloc()`,
  1771. allocating a memory region `size` bytes in size, aligned on a boundary specified
  1772. by `alignment`.
  1773. This procedure returns the slice of the resized memory region.
  1774. */
  1775. @(require_results)
  1776. dynamic_arena_resize_bytes_non_zeroed :: proc(
  1777. a: ^Dynamic_Arena,
  1778. old_data: []byte,
  1779. size: int,
  1780. loc := #caller_location,
  1781. ) -> ([]byte, Allocator_Error) {
  1782. if size == 0 {
  1783. // NOTE: This allocator has no Free mode.
  1784. return nil, nil
  1785. }
  1786. old_memory := raw_data(old_data)
  1787. old_size := len(old_data)
  1788. if old_size >= size {
  1789. // sanitizer.address_poison(old_data[size:])
  1790. return byte_slice(old_memory, size), nil
  1791. }
  1792. // No information is kept about allocations in this allocator, thus we
  1793. // cannot truly resize anything and must reallocate.
  1794. data, err := dynamic_arena_alloc_bytes_non_zeroed(a, size, loc)
  1795. if err == nil {
  1796. runtime.copy(data, byte_slice(old_memory, old_size))
  1797. }
  1798. return data, err
  1799. }
  1800. dynamic_arena_allocator_proc :: proc(
  1801. allocator_data: rawptr,
  1802. mode: Allocator_Mode,
  1803. size: int,
  1804. alignment: int,
  1805. old_memory: rawptr,
  1806. old_size: int,
  1807. loc := #caller_location,
  1808. ) -> ([]byte, Allocator_Error) {
  1809. arena := (^Dynamic_Arena)(allocator_data)
  1810. switch mode {
  1811. case .Alloc:
  1812. return dynamic_arena_alloc_bytes(arena, size, loc)
  1813. case .Alloc_Non_Zeroed:
  1814. return dynamic_arena_alloc_bytes_non_zeroed(arena, size, loc)
  1815. case .Free:
  1816. return nil, .Mode_Not_Implemented
  1817. case .Free_All:
  1818. dynamic_arena_free_all(arena, loc)
  1819. case .Resize:
  1820. return dynamic_arena_resize_bytes(arena, byte_slice(old_memory, old_size), size, loc)
  1821. case .Resize_Non_Zeroed:
  1822. return dynamic_arena_resize_bytes_non_zeroed(arena, byte_slice(old_memory, old_size), size, loc)
  1823. case .Query_Features:
  1824. set := (^Allocator_Mode_Set)(old_memory)
  1825. if set != nil {
  1826. set^ = {.Alloc, .Alloc_Non_Zeroed, .Free_All, .Resize, .Resize_Non_Zeroed, .Query_Features, .Query_Info}
  1827. }
  1828. return nil, nil
  1829. case .Query_Info:
  1830. info := (^Allocator_Query_Info)(old_memory)
  1831. if info != nil && info.pointer != nil {
  1832. info.size = arena.block_size
  1833. info.alignment = arena.alignment
  1834. return byte_slice(info, size_of(info^)), nil
  1835. }
  1836. return nil, nil
  1837. }
  1838. return nil, nil
  1839. }
  1840. /*
  1841. Header of the buddy block.
  1842. */
  1843. Buddy_Block :: struct #align(align_of(uint)) {
  1844. size: uint,
  1845. is_free: bool,
  1846. }
  1847. /*
  1848. Obtain the next buddy block.
  1849. */
  1850. @(require_results, no_sanitize_address)
  1851. buddy_block_next :: proc(block: ^Buddy_Block) -> ^Buddy_Block {
  1852. return (^Buddy_Block)(([^]byte)(block)[block.size:])
  1853. }
  1854. /*
  1855. Split the block into two, by truncating the given block to a given size.
  1856. */
  1857. @(require_results, no_sanitize_address)
  1858. buddy_block_split :: proc(block: ^Buddy_Block, size: uint) -> ^Buddy_Block {
  1859. block := block
  1860. if block != nil && size != 0 {
  1861. // Recursive Split
  1862. for size < block.size {
  1863. sz := block.size >> 1
  1864. block.size = sz
  1865. block = buddy_block_next(block)
  1866. block.size = sz
  1867. block.is_free = true
  1868. }
  1869. if size <= block.size {
  1870. return block
  1871. }
  1872. }
  1873. // Block cannot fit the requested allocation size
  1874. return nil
  1875. }
  1876. /*
  1877. Coalesce contiguous blocks in a range of blocks into one.
  1878. */
  1879. @(no_sanitize_address)
  1880. buddy_block_coalescence :: proc(head, tail: ^Buddy_Block) {
  1881. for {
  1882. // Keep looping until there are no more buddies to coalesce
  1883. block := head
  1884. buddy := buddy_block_next(block)
  1885. no_coalescence := true
  1886. for block < tail && buddy < tail { // make sure the buddies are within the range
  1887. if block.is_free && buddy.is_free && block.size == buddy.size {
  1888. // Coalesce buddies into one
  1889. block.size <<= 1
  1890. block = buddy_block_next(block)
  1891. if block < tail {
  1892. buddy = buddy_block_next(block)
  1893. no_coalescence = false
  1894. }
  1895. } else if block.size < buddy.size {
  1896. // The buddy block is split into smaller blocks
  1897. block = buddy
  1898. buddy = buddy_block_next(buddy)
  1899. } else {
  1900. block = buddy_block_next(buddy)
  1901. if block < tail {
  1902. // Leave the buddy block for the next iteration
  1903. buddy = buddy_block_next(block)
  1904. }
  1905. }
  1906. }
  1907. if no_coalescence {
  1908. return
  1909. }
  1910. }
  1911. }
  1912. /*
  1913. Find the best block for storing a given size in a range of blocks.
  1914. */
  1915. @(require_results, no_sanitize_address)
  1916. buddy_block_find_best :: proc(head, tail: ^Buddy_Block, size: uint) -> ^Buddy_Block {
  1917. assert(size != 0)
  1918. best_block: ^Buddy_Block
  1919. block := head // left
  1920. buddy := buddy_block_next(block) // right
  1921. // The entire memory section between head and tail is free,
  1922. // just call 'buddy_block_split' to get the allocation
  1923. if buddy == tail && block.is_free {
  1924. return buddy_block_split(block, size)
  1925. }
  1926. // Find the block which is the 'best_block' to requested allocation sized
  1927. for block < tail && buddy < tail { // make sure the buddies are within the range
  1928. // If both buddies are free, coalesce them together
  1929. // NOTE: this is an optimization to reduce fragmentation
  1930. // this could be completely ignored
  1931. if block.is_free && buddy.is_free && block.size == buddy.size {
  1932. block.size <<= 1
  1933. if size <= block.size && (best_block == nil || block.size <= best_block.size) {
  1934. best_block = block
  1935. }
  1936. block = buddy_block_next(buddy)
  1937. if block < tail {
  1938. // Delay the buddy block for the next iteration
  1939. buddy = buddy_block_next(block)
  1940. }
  1941. continue
  1942. }
  1943. if block.is_free && size <= block.size &&
  1944. (best_block == nil || block.size <= best_block.size) {
  1945. best_block = block
  1946. }
  1947. if buddy.is_free && size <= buddy.size &&
  1948. (best_block == nil || buddy.size < best_block.size) {
  1949. // If each buddy are the same size, then it makes more sense
  1950. // to pick the buddy as it "bounces around" less
  1951. best_block = buddy
  1952. }
  1953. if block.size <= buddy.size {
  1954. block = buddy_block_next(buddy)
  1955. if (block < tail) {
  1956. // Delay the buddy block for the next iteration
  1957. buddy = buddy_block_next(block)
  1958. }
  1959. } else {
  1960. // Buddy was split into smaller blocks
  1961. block = buddy
  1962. buddy = buddy_block_next(buddy)
  1963. }
  1964. }
  1965. if best_block != nil {
  1966. // This will handle the case if the 'best_block' is also the perfect fit
  1967. return buddy_block_split(best_block, size)
  1968. }
  1969. // Maybe out of memory
  1970. return nil
  1971. }
  1972. /*
  1973. The buddy allocator data.
  1974. */
  1975. Buddy_Allocator :: struct {
  1976. head: ^Buddy_Block,
  1977. tail: ^Buddy_Block `fmt:"-"`,
  1978. alignment: uint,
  1979. }
  1980. /*
  1981. Buddy allocator.
  1982. The buddy allocator is a type of allocator that splits the backing buffer into
  1983. multiple regions called buddy blocks. Initially, the allocator only has one
  1984. block with the size of the backing buffer. Upon each allocation, the allocator
  1985. finds the smallest block that can fit the size of requested memory region, and
  1986. splits the block according to the allocation size. If no block can be found,
  1987. the contiguous free blocks are coalesced and the search is performed again.
  1988. */
  1989. @(require_results)
  1990. buddy_allocator :: proc(b: ^Buddy_Allocator) -> Allocator {
  1991. return Allocator{
  1992. procedure = buddy_allocator_proc,
  1993. data = b,
  1994. }
  1995. }
  1996. /*
  1997. Initialize a buddy allocator.
  1998. This procedure initializes the buddy allocator `b` with a backing buffer `data`
  1999. and block alignment specified by `alignment`.
  2000. `alignment` may be any power of two, but the backing buffer must be aligned to
  2001. at least `size_of(Buddy_Block)`.
  2002. */
  2003. buddy_allocator_init :: proc(b: ^Buddy_Allocator, data: []byte, alignment: uint, loc := #caller_location) {
  2004. assert(data != nil)
  2005. assert(is_power_of_two(uintptr(len(data))), "Size of the backing buffer must be power of two", loc)
  2006. assert(is_power_of_two(uintptr(alignment)), "Alignment must be a power of two", loc)
  2007. alignment := alignment
  2008. if alignment < size_of(Buddy_Block) {
  2009. alignment = size_of(Buddy_Block)
  2010. }
  2011. ptr := raw_data(data)
  2012. assert(uintptr(ptr) % uintptr(alignment) == 0, "The data is not aligned to the minimum alignment, which must be at least `size_of(Buddy_Block)`.", loc)
  2013. b.head = (^Buddy_Block)(ptr)
  2014. b.head.size = len(data)
  2015. b.head.is_free = true
  2016. b.tail = buddy_block_next(b.head)
  2017. b.alignment = alignment
  2018. assert(uint(len(data)) >= 2 * buddy_block_size_required(b, 1), "The size of the backing buffer must be large enough to hold at least two 1-byte allocations given the alignment requirements, otherwise it cannot split.", loc)
  2019. // sanitizer.address_poison(data)
  2020. }
  2021. /*
  2022. Get required block size to fit in the allocation as well as the alignment padding.
  2023. */
  2024. @(require_results)
  2025. buddy_block_size_required :: proc(b: ^Buddy_Allocator, size: uint) -> uint {
  2026. assert(size > 0)
  2027. // NOTE: `size_of(Buddy_Block)` will be accounted for in `b.alignment`.
  2028. // This calculation is also previously guarded against being given a `size`
  2029. // 0 by `buddy_allocator_alloc_bytes_non_zeroed` checking for that.
  2030. actual_size := b.alignment + size
  2031. if intrinsics.count_ones(actual_size) != 1 {
  2032. // We're not a power of two. Let's fix that.
  2033. actual_size = 1 << (size_of(uint) * 8 - intrinsics.count_leading_zeros(actual_size))
  2034. }
  2035. return actual_size
  2036. }
  2037. /*
  2038. Allocate memory from a buddy allocator.
  2039. This procedure allocates `size` bytes of memory. The allocation's alignment is
  2040. fixed to the `alignment` specified at initialization. The allocated memory
  2041. region is zero-initialized. This procedure returns a pointer to the allocated
  2042. memory region.
  2043. */
  2044. @(require_results, no_sanitize_address)
  2045. buddy_allocator_alloc :: proc(b: ^Buddy_Allocator, size: uint) -> (rawptr, Allocator_Error) {
  2046. bytes, err := buddy_allocator_alloc_bytes(b, size)
  2047. return raw_data(bytes), err
  2048. }
  2049. /*
  2050. Allocate memory from a buddy allocator.
  2051. This procedure allocates `size` bytes of memory. The allocation's alignment is
  2052. fixed to the `alignment` specified at initialization. The allocated memory
  2053. region is zero-initialized. This procedure returns a slice of the allocated
  2054. memory region.
  2055. */
  2056. @(require_results, no_sanitize_address)
  2057. buddy_allocator_alloc_bytes :: proc(b: ^Buddy_Allocator, size: uint) -> ([]byte, Allocator_Error) {
  2058. bytes, err := buddy_allocator_alloc_bytes_non_zeroed(b, size)
  2059. if bytes != nil {
  2060. zero_slice(bytes)
  2061. }
  2062. return bytes, err
  2063. }
  2064. /*
  2065. Allocate non-initialized memory from a buddy allocator.
  2066. This procedure allocates `size` bytes of memory. The allocation's alignment is
  2067. fixed to the `alignment` specified at initialization. The allocated memory
  2068. region is not explicitly zero-initialized. This procedure returns a pointer to
  2069. the allocated memory region.
  2070. */
  2071. @(require_results, no_sanitize_address)
  2072. buddy_allocator_alloc_non_zeroed :: proc(b: ^Buddy_Allocator, size: uint) -> (rawptr, Allocator_Error) {
  2073. bytes, err := buddy_allocator_alloc_bytes_non_zeroed(b, size)
  2074. return raw_data(bytes), err
  2075. }
  2076. /*
  2077. Allocate non-initialized memory from a buddy allocator.
  2078. This procedure allocates `size` bytes of memory. The allocation's alignment is
  2079. fixed to the `alignment` specified at initialization. The allocated memory
  2080. region is not explicitly zero-initialized. This procedure returns a slice of
  2081. the allocated memory region.
  2082. */
  2083. @(require_results, no_sanitize_address)
  2084. buddy_allocator_alloc_bytes_non_zeroed :: proc(b: ^Buddy_Allocator, size: uint) -> ([]byte, Allocator_Error) {
  2085. if size != 0 {
  2086. actual_size := buddy_block_size_required(b, size)
  2087. found := buddy_block_find_best(b.head, b.tail, actual_size)
  2088. if found == nil {
  2089. // Try to coalesce all the free buddy blocks and then search again
  2090. buddy_block_coalescence(b.head, b.tail)
  2091. found = buddy_block_find_best(b.head, b.tail, actual_size)
  2092. }
  2093. if found == nil {
  2094. return nil, .Out_Of_Memory
  2095. }
  2096. found.is_free = false
  2097. data := ([^]byte)(found)[b.alignment:][:size]
  2098. assert(cast(uintptr)raw_data(data)+cast(uintptr)size < cast(uintptr)buddy_block_next(found), "Buddy_Allocator has made an allocation which overlaps a block header.")
  2099. // ensure_poisoned(data)
  2100. // sanitizer.address_unpoison(data)
  2101. return data, nil
  2102. }
  2103. return nil, nil
  2104. }
  2105. /*
  2106. Free memory back to the buddy allocator.
  2107. This procedure frees the memory region allocated at pointer `ptr`.
  2108. If `ptr` is not the latest allocation and is not a leaked allocation, this
  2109. operation is a no-op.
  2110. */
  2111. @(no_sanitize_address)
  2112. buddy_allocator_free :: proc(b: ^Buddy_Allocator, ptr: rawptr) -> Allocator_Error {
  2113. if ptr != nil {
  2114. if !(b.head <= ptr && ptr <= b.tail) {
  2115. return .Invalid_Pointer
  2116. }
  2117. block := (^Buddy_Block)(([^]byte)(ptr)[-b.alignment:])
  2118. // sanitizer.address_poison(ptr, block.size)
  2119. block.is_free = true
  2120. buddy_block_coalescence(b.head, b.tail)
  2121. }
  2122. return nil
  2123. }
  2124. /*
  2125. Free all memory back to the buddy allocator.
  2126. */
  2127. @(no_sanitize_address)
  2128. buddy_allocator_free_all :: proc(b: ^Buddy_Allocator) {
  2129. alignment := b.alignment
  2130. head := ([^]byte)(b.head)
  2131. tail := ([^]byte)(b.tail)
  2132. data := head[:ptr_sub(tail, head)]
  2133. buddy_allocator_init(b, data, alignment)
  2134. }
  2135. @(no_sanitize_address)
  2136. buddy_allocator_proc :: proc(
  2137. allocator_data: rawptr,
  2138. mode: Allocator_Mode,
  2139. size, alignment: int,
  2140. old_memory: rawptr,
  2141. old_size: int,
  2142. loc := #caller_location,
  2143. ) -> ([]byte, Allocator_Error) {
  2144. b := (^Buddy_Allocator)(allocator_data)
  2145. switch mode {
  2146. case .Alloc:
  2147. return buddy_allocator_alloc_bytes(b, uint(size))
  2148. case .Alloc_Non_Zeroed:
  2149. return buddy_allocator_alloc_bytes_non_zeroed(b, uint(size))
  2150. case .Resize:
  2151. return default_resize_bytes_align(byte_slice(old_memory, old_size), size, alignment, buddy_allocator(b), loc)
  2152. case .Resize_Non_Zeroed:
  2153. return default_resize_bytes_align_non_zeroed(byte_slice(old_memory, old_size), size, alignment, buddy_allocator(b), loc)
  2154. case .Free:
  2155. return nil, buddy_allocator_free(b, old_memory)
  2156. case .Free_All:
  2157. buddy_allocator_free_all(b)
  2158. case .Query_Features:
  2159. set := (^Allocator_Mode_Set)(old_memory)
  2160. if set != nil {
  2161. set^ = {.Query_Features, .Alloc, .Alloc_Non_Zeroed, .Resize, .Resize_Non_Zeroed, .Free, .Free_All, .Query_Info}
  2162. }
  2163. return nil, nil
  2164. case .Query_Info:
  2165. info := (^Allocator_Query_Info)(old_memory)
  2166. if info != nil && info.pointer != nil {
  2167. ptr := info.pointer
  2168. if !(b.head <= ptr && ptr <= b.tail) {
  2169. return nil, .Invalid_Pointer
  2170. }
  2171. block := (^Buddy_Block)(([^]byte)(ptr)[-b.alignment:])
  2172. info.size = int(block.size)
  2173. info.alignment = int(b.alignment)
  2174. return byte_slice(info, size_of(info^)), nil
  2175. }
  2176. return nil, nil
  2177. }
  2178. return nil, nil
  2179. }
  2180. // An allocator that keeps track of allocation sizes and passes it along to resizes.
  2181. // This is useful if you are using a library that needs an equivalent of `realloc` but want to use
  2182. // the Odin allocator interface.
  2183. //
  2184. // You want to wrap your allocator into this one if you are trying to use any allocator that relies
  2185. // on the old size to work.
  2186. //
  2187. // The overhead of this allocator is an extra max(alignment, size_of(Header)) bytes allocated for each allocation, these bytes are
  2188. // used to store the size and alignment.
  2189. Compat_Allocator :: struct {
  2190. parent: Allocator,
  2191. }
  2192. compat_allocator_init :: proc(rra: ^Compat_Allocator, allocator := context.allocator) {
  2193. rra.parent = allocator
  2194. }
  2195. compat_allocator :: proc(rra: ^Compat_Allocator) -> Allocator {
  2196. return Allocator{
  2197. data = rra,
  2198. procedure = compat_allocator_proc,
  2199. }
  2200. }
  2201. compat_allocator_proc :: proc(allocator_data: rawptr, mode: Allocator_Mode,
  2202. size, alignment: int,
  2203. old_memory: rawptr, old_size: int,
  2204. location := #caller_location) -> (data: []byte, err: Allocator_Error) {
  2205. Header :: struct {
  2206. size: int,
  2207. alignment: int,
  2208. }
  2209. @(no_sanitize_address)
  2210. get_unpoisoned_header :: #force_inline proc(ptr: rawptr) -> Header {
  2211. header := ([^]Header)(ptr)[-1]
  2212. // a := max(header.alignment, size_of(Header))
  2213. // sanitizer.address_unpoison(rawptr(uintptr(ptr)-uintptr(a)), a)
  2214. return header
  2215. }
  2216. rra := (^Compat_Allocator)(allocator_data)
  2217. switch mode {
  2218. case .Alloc, .Alloc_Non_Zeroed:
  2219. a := max(alignment, size_of(Header))
  2220. req_size := size + a
  2221. assert(req_size >= 0, "overflow")
  2222. allocation := rra.parent.procedure(rra.parent.data, mode, req_size, alignment, old_memory, old_size, location) or_return
  2223. #no_bounds_check data = allocation[a:]
  2224. ([^]Header)(raw_data(data))[-1] = {
  2225. size = size,
  2226. alignment = alignment,
  2227. }
  2228. // sanitizer.address_poison(raw_data(allocation), a)
  2229. return
  2230. case .Free:
  2231. header := get_unpoisoned_header(old_memory)
  2232. a := max(header.alignment, size_of(Header))
  2233. orig_ptr := rawptr(uintptr(old_memory)-uintptr(a))
  2234. orig_size := header.size + a
  2235. return rra.parent.procedure(rra.parent.data, mode, orig_size, header.alignment, orig_ptr, orig_size, location)
  2236. case .Resize, .Resize_Non_Zeroed:
  2237. header := get_unpoisoned_header(old_memory)
  2238. orig_a := max(header.alignment, size_of(Header))
  2239. orig_ptr := rawptr(uintptr(old_memory)-uintptr(orig_a))
  2240. orig_size := header.size + orig_a
  2241. new_alignment := max(header.alignment, alignment)
  2242. a := max(new_alignment, size_of(header))
  2243. req_size := size + a
  2244. assert(size >= 0, "overflow")
  2245. allocation := rra.parent.procedure(rra.parent.data, mode, req_size, new_alignment, orig_ptr, orig_size, location) or_return
  2246. #no_bounds_check data = allocation[a:]
  2247. ([^]Header)(raw_data(data))[-1] = {
  2248. size = size,
  2249. alignment = new_alignment,
  2250. }
  2251. // sanitizer.address_poison(raw_data(allocation), a)
  2252. return
  2253. case .Free_All:
  2254. return rra.parent.procedure(rra.parent.data, mode, size, alignment, old_memory, old_size, location)
  2255. case .Query_Info:
  2256. info := (^Allocator_Query_Info)(old_memory)
  2257. if info != nil && info.pointer != nil {
  2258. header := get_unpoisoned_header(info.pointer)
  2259. info.size = header.size
  2260. info.alignment = header.alignment
  2261. }
  2262. return
  2263. case .Query_Features:
  2264. data, err = rra.parent.procedure(rra.parent.data, mode, size, alignment, old_memory, old_size, location)
  2265. if err != nil {
  2266. set := (^Allocator_Mode_Set)(old_memory)
  2267. set^ += {.Query_Info}
  2268. }
  2269. return
  2270. case: unreachable()
  2271. }
  2272. }