12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847184818491850185118521853185418551856185718581859186018611862186318641865186618671868186918701871187218731874187518761877187818791880188118821883188418851886188718881889189018911892189318941895189618971898189919001901190219031904190519061907190819091910191119121913191419151916191719181919192019211922192319241925192619271928192919301931193219331934193519361937193819391940194119421943194419451946194719481949195019511952195319541955195619571958195919601961196219631964196519661967196819691970197119721973197419751976197719781979198019811982198319841985198619871988198919901991199219931994199519961997199819992000200120022003200420052006200720082009201020112012201320142015201620172018201920202021202220232024202520262027202820292030203120322033203420352036203720382039204020412042204320442045204620472048204920502051205220532054205520562057205820592060206120622063206420652066206720682069207020712072207320742075207620772078207920802081208220832084208520862087208820892090209120922093209420952096209720982099210021012102210321042105210621072108210921102111211221132114211521162117211821192120212121222123212421252126212721282129213021312132213321342135213621372138213921402141214221432144214521462147214821492150215121522153215421552156215721582159216021612162216321642165216621672168216921702171217221732174217521762177217821792180218121822183218421852186218721882189219021912192219321942195219621972198219922002201220222032204220522062207220822092210221122122213221422152216221722182219222022212222222322242225222622272228222922302231223222332234223522362237223822392240224122422243224422452246224722482249225022512252225322542255225622572258225922602261226222632264226522662267226822692270227122722273227422752276227722782279228022812282228322842285228622872288228922902291229222932294229522962297229822992300230123022303230423052306230723082309231023112312231323142315231623172318231923202321232223232324232523262327232823292330233123322333233423352336233723382339234023412342234323442345234623472348234923502351235223532354235523562357235823592360236123622363236423652366236723682369237023712372237323742375237623772378237923802381238223832384238523862387238823892390239123922393239423952396239723982399240024012402240324042405240624072408240924102411241224132414241524162417241824192420242124222423242424252426242724282429243024312432243324342435243624372438243924402441244224432444244524462447244824492450245124522453245424552456245724582459246024612462246324642465246624672468246924702471247224732474247524762477247824792480248124822483248424852486248724882489249024912492249324942495249624972498249925002501250225032504250525062507250825092510251125122513251425152516251725182519252025212522252325242525252625272528252925302531253225332534 |
- package mem
- import "base:intrinsics"
- import "base:runtime"
- // NOTE(Feoramund): Sanitizer usage in this package has been temporarily
- // disabled pending a thorough review per allocator, as ASan is particular
- // about the addresses and ranges it receives.
- //
- // In short, it keeps track only of 8-byte blocks. This can cause issues if an
- // allocator poisons an entire range but an allocation for less than 8 bytes is
- // desired or if the next allocation address would not be 8-byte aligned.
- //
- // This must be handled carefully on a per-allocator basis and some allocators
- // may not be able to participate.
- //
- // Please see the following link for more information:
- //
- // https://github.com/google/sanitizers/wiki/AddressSanitizerAlgorithm#mapping
- //
- // import "base:sanitizer"
- /*
- This procedure checks if a byte slice `range` is poisoned and makes sure the
- root address of the poison range is the base pointer of `range`.
- This can help guard against buggy allocators returning memory that they already returned.
- This has no effect if `-sanitize:address` is not enabled.
- */
- // @(disabled=.Address not_in ODIN_SANITIZER_FLAGS, private)
- // ensure_poisoned :: proc(range: []u8, loc := #caller_location) {
- // cond := sanitizer.address_region_is_poisoned(range) == raw_data(range)
- // // If this fails, we've overlapped an allocation and it's our fault.
- // ensure(cond, `This allocator has sliced a block of memory of which some part is not poisoned before returning.
- // This is a bug in the core library and should be reported to the Odin developers with a stack trace and minimal example code if possible.`, loc)
- // }
- /*
- This procedure checks if a byte slice `range` is not poisoned.
- This can help guard against buggy allocators resizing memory that they should not.
- This has no effect if `-sanitize:address` is not enabled.
- */
- // @(disabled=.Address not_in ODIN_SANITIZER_FLAGS, private)
- // ensure_not_poisoned :: proc(range: []u8, loc := #caller_location) {
- // cond := sanitizer.address_region_is_poisoned(range) == nil
- // // If this fails, we've tried to resize memory that is poisoned, which
- // // could be user error caused by an incorrect `old_memory` pointer.
- // ensure(cond, `This allocator has sliced a block of memory of which some part is poisoned before returning.
- // This may be a bug in the core library, or it could be user error due to an invalid pointer passed to a resize operation.
- // If after ensuring your own code is not responsible, report the problem to the Odin developers with a stack trace and minimal example code if possible.`, loc)
- // }
- /*
- Nil allocator.
- The `nil` allocator returns `nil` on every allocation attempt. This type of
- allocator can be used in scenarios where memory doesn't need to be allocated,
- but an attempt to allocate memory is not an error.
- */
- @(require_results)
- nil_allocator :: proc() -> Allocator {
- return Allocator{
- procedure = nil_allocator_proc,
- data = nil,
- }
- }
- nil_allocator_proc :: proc(
- allocator_data: rawptr,
- mode: Allocator_Mode,
- size, alignment: int,
- old_memory: rawptr,
- old_size: int,
- loc := #caller_location,
- ) -> ([]byte, Allocator_Error) {
- return nil, nil
- }
- /*
- Panic allocator.
- The panic allocator is a type of allocator that panics on any allocation
- attempt. This type of allocator can be used in scenarios where memory should
- not be allocated, and an attempt to allocate memory is an error.
- */
- @(require_results)
- panic_allocator :: proc() -> Allocator {
- return Allocator{
- procedure = panic_allocator_proc,
- data = nil,
- }
- }
- panic_allocator_proc :: proc(
- allocator_data: rawptr,
- mode: Allocator_Mode,
- size, alignment: int,
- old_memory: rawptr,
- old_size: int,
- loc := #caller_location,
- ) -> ([]byte, Allocator_Error) {
- switch mode {
- case .Alloc:
- if size > 0 {
- panic("mem: panic allocator, .Alloc called", loc=loc)
- }
- case .Alloc_Non_Zeroed:
- if size > 0 {
- panic("mem: panic allocator, .Alloc_Non_Zeroed called", loc=loc)
- }
- case .Resize:
- if size > 0 {
- panic("mem: panic allocator, .Resize called", loc=loc)
- }
- case .Resize_Non_Zeroed:
- if size > 0 {
- panic("mem: panic allocator, .Resize_Non_Zeroed called", loc=loc)
- }
- case .Free:
- if old_memory != nil {
- panic("mem: panic allocator, .Free called", loc=loc)
- }
- case .Free_All:
- panic("mem: panic allocator, .Free_All called", loc=loc)
- case .Query_Features:
- set := (^Allocator_Mode_Set)(old_memory)
- if set != nil {
- set^ = {.Query_Features}
- }
- return nil, nil
- case .Query_Info:
- panic("mem: panic allocator, .Query_Info called", loc=loc)
- }
- return nil, nil
- }
- /*
- Arena allocator data.
- */
- Arena :: struct {
- data: []byte,
- offset: int,
- peak_used: int,
- temp_count: int,
- }
- /*
- Arena allocator.
- The arena allocator (also known as a linear allocator, bump allocator,
- region allocator) is an allocator that uses a single backing buffer for
- allocations.
- The buffer is used contiguously, from start to end. Each subsequent allocation
- occupies the next adjacent region of memory in the buffer. Since the arena
- allocator does not keep track of any metadata associated with the allocations
- and their locations, it is impossible to free individual allocations.
- The arena allocator can be used for temporary allocations in frame-based memory
- management. Games are one example of such applications. A global arena can be
- used for any temporary memory allocations, and at the end of each frame all
- temporary allocations are freed. Since no temporary object is going to live
- longer than a frame, no lifetimes are violated.
- */
- @(require_results)
- arena_allocator :: proc(arena: ^Arena) -> Allocator {
- return Allocator{
- procedure = arena_allocator_proc,
- data = arena,
- }
- }
- /*
- Initialize an arena.
- This procedure initializes the arena `a` with memory region `data` as its
- backing buffer.
- */
- arena_init :: proc(a: ^Arena, data: []byte) {
- a.data = data
- a.offset = 0
- a.peak_used = 0
- a.temp_count = 0
- // sanitizer.address_poison(a.data)
- }
- /*
- Allocate memory from an arena.
- This procedure allocates `size` bytes of memory aligned on a boundary specified
- by `alignment` from an arena `a`. The allocated memory is zero-initialized.
- This procedure returns a pointer to the newly allocated memory region.
- */
- @(require_results)
- arena_alloc :: proc(
- a: ^Arena,
- size: int,
- alignment := DEFAULT_ALIGNMENT,
- loc := #caller_location,
- ) -> (rawptr, Allocator_Error) {
- bytes, err := arena_alloc_bytes(a, size, alignment, loc)
- return raw_data(bytes), err
- }
- /*
- Allocate memory from an arena.
- This procedure allocates `size` bytes of memory aligned on a boundary specified
- by `alignment` from an arena `a`. The allocated memory is zero-initialized.
- This procedure returns a slice of the newly allocated memory region.
- */
- @(require_results)
- arena_alloc_bytes :: proc(
- a: ^Arena,
- size: int,
- alignment := DEFAULT_ALIGNMENT,
- loc := #caller_location,
- ) -> ([]byte, Allocator_Error) {
- bytes, err := arena_alloc_bytes_non_zeroed(a, size, alignment, loc)
- if bytes != nil {
- zero_slice(bytes)
- }
- return bytes, err
- }
- /*
- Allocate non-initialized memory from an arena.
- This procedure allocates `size` bytes of memory aligned on a boundary specified
- by `alignment` from an arena `a`. The allocated memory is not explicitly
- zero-initialized. This procedure returns a pointer to the newly allocated
- memory region.
- */
- @(require_results)
- arena_alloc_non_zeroed :: proc(
- a: ^Arena,
- size: int,
- alignment := DEFAULT_ALIGNMENT,
- loc := #caller_location,
- ) -> (rawptr, Allocator_Error) {
- bytes, err := arena_alloc_bytes_non_zeroed(a, size, alignment, loc)
- return raw_data(bytes), err
- }
- /*
- Allocate non-initialized memory from an arena.
- This procedure allocates `size` bytes of memory aligned on a boundary specified
- by `alignment` from an arena `a`. The allocated memory is not explicitly
- zero-initialized. This procedure returns a slice of the newly allocated
- memory region.
- */
- @(require_results)
- arena_alloc_bytes_non_zeroed :: proc(
- a: ^Arena,
- size: int,
- alignment := DEFAULT_ALIGNMENT,
- loc := #caller_location
- ) -> ([]byte, Allocator_Error) {
- if a.data == nil {
- panic("Allocation on uninitialized Arena allocator.", loc)
- }
- #no_bounds_check end := &a.data[a.offset]
- ptr := align_forward(end, uintptr(alignment))
- total_size := size + ptr_sub((^byte)(ptr), (^byte)(end))
- if a.offset + total_size > len(a.data) {
- return nil, .Out_Of_Memory
- }
- a.offset += total_size
- a.peak_used = max(a.peak_used, a.offset)
- result := byte_slice(ptr, size)
- // ensure_poisoned(result)
- // sanitizer.address_unpoison(result)
- return result, nil
- }
- /*
- Free all memory back to the arena allocator.
- */
- arena_free_all :: proc(a: ^Arena) {
- a.offset = 0
- // sanitizer.address_poison(a.data)
- }
- arena_allocator_proc :: proc(
- allocator_data: rawptr,
- mode: Allocator_Mode,
- size: int,
- alignment: int,
- old_memory: rawptr,
- old_size: int,
- loc := #caller_location,
- ) -> ([]byte, Allocator_Error) {
- arena := cast(^Arena)allocator_data
- switch mode {
- case .Alloc:
- return arena_alloc_bytes(arena, size, alignment, loc)
- case .Alloc_Non_Zeroed:
- return arena_alloc_bytes_non_zeroed(arena, size, alignment, loc)
- case .Free:
- return nil, .Mode_Not_Implemented
- case .Free_All:
- arena_free_all(arena)
- case .Resize:
- return default_resize_bytes_align(byte_slice(old_memory, old_size), size, alignment, arena_allocator(arena), loc)
- case .Resize_Non_Zeroed:
- return default_resize_bytes_align_non_zeroed(byte_slice(old_memory, old_size), size, alignment, arena_allocator(arena), loc)
- case .Query_Features:
- set := (^Allocator_Mode_Set)(old_memory)
- if set != nil {
- set^ = {.Alloc, .Alloc_Non_Zeroed, .Free_All, .Resize, .Resize_Non_Zeroed, .Query_Features}
- }
- return nil, nil
- case .Query_Info:
- return nil, .Mode_Not_Implemented
- }
- return nil, nil
- }
- /*
- Temporary memory region of an `Arena` allocator.
- Temporary memory regions of an arena act as "save-points" for the allocator.
- When one is created, the subsequent allocations are done inside the temporary
- memory region. When `end_arena_temp_memory` is called, the arena is rolled
- back, and all of the memory that was allocated from the arena will be freed.
- Multiple temporary memory regions can exist at the same time for an arena.
- */
- Arena_Temp_Memory :: struct {
- arena: ^Arena,
- prev_offset: int,
- }
- /*
- Start a temporary memory region.
- This procedure creates a temporary memory region. After a temporary memory
- region is created, all allocations are said to be *inside* the temporary memory
- region, until `end_arena_temp_memory` is called.
- */
- @(require_results)
- begin_arena_temp_memory :: proc(a: ^Arena) -> Arena_Temp_Memory {
- tmp: Arena_Temp_Memory
- tmp.arena = a
- tmp.prev_offset = a.offset
- a.temp_count += 1
- return tmp
- }
- /*
- End a temporary memory region.
- This procedure ends the temporary memory region for an arena. All of the
- allocations *inside* the temporary memory region will be freed to the arena.
- */
- end_arena_temp_memory :: proc(tmp: Arena_Temp_Memory) {
- assert(tmp.arena.offset >= tmp.prev_offset)
- assert(tmp.arena.temp_count > 0)
- // sanitizer.address_poison(tmp.arena.data[tmp.prev_offset:tmp.arena.offset])
- tmp.arena.offset = tmp.prev_offset
- tmp.arena.temp_count -= 1
- }
- /* Preserved for compatibility */
- Scratch_Allocator :: Scratch
- scratch_allocator_init :: scratch_init
- scratch_allocator_destroy :: scratch_destroy
- /*
- Scratch allocator data.
- */
- Scratch :: struct {
- data: []byte,
- curr_offset: int,
- prev_allocation: rawptr,
- prev_allocation_root: rawptr,
- backup_allocator: Allocator,
- leaked_allocations: [dynamic][]byte,
- }
- /*
- Scratch allocator.
- The scratch allocator works in a similar way to the `Arena` allocator. The
- scratch allocator has a backing buffer that is allocated in contiguous regions,
- from start to end.
- Each subsequent allocation will be the next adjacent region of memory in the
- backing buffer. If the allocation doesn't fit into the remaining space of the
- backing buffer, this allocation is put at the start of the buffer, and all
- previous allocations will become invalidated.
- If the allocation doesn't fit into the backing buffer as a whole, it will be
- allocated using a backing allocator, and the pointer to the allocated memory
- region will be put into the `leaked_allocations` array. A `Warning`-level log
- message will be sent as well.
- Allocations which are resized will be resized in-place if they were the last
- allocation. Otherwise, they are re-allocated to avoid overwriting previous
- allocations.
- The `leaked_allocations` array is managed by the `context` allocator if no
- `backup_allocator` is specified in `scratch_init`.
- */
- @(require_results)
- scratch_allocator :: proc(allocator: ^Scratch) -> Allocator {
- return Allocator{
- procedure = scratch_allocator_proc,
- data = allocator,
- }
- }
- /*
- Initialize a scratch allocator.
- */
- scratch_init :: proc(s: ^Scratch, size: int, backup_allocator := context.allocator) -> Allocator_Error {
- s.data = make_aligned([]byte, size, 2*align_of(rawptr), backup_allocator) or_return
- s.curr_offset = 0
- s.prev_allocation = nil
- s.prev_allocation_root = nil
- s.backup_allocator = backup_allocator
- s.leaked_allocations.allocator = backup_allocator
- // sanitizer.address_poison(s.data)
- return nil
- }
- /*
- Free all data associated with a scratch allocator.
- This is distinct from `scratch_free_all` in that it deallocates all memory used
- to setup the allocator, as opposed to all allocations made from that space.
- */
- scratch_destroy :: proc(s: ^Scratch) {
- if s == nil {
- return
- }
- for ptr in s.leaked_allocations {
- free_bytes(ptr, s.backup_allocator)
- }
- delete(s.leaked_allocations)
- // sanitizer.address_unpoison(s.data)
- delete(s.data, s.backup_allocator)
- s^ = {}
- }
- /*
- Allocate memory from a scratch allocator.
- This procedure allocates `size` bytes of memory aligned on a boundary specified
- by `alignment`. The allocated memory region is zero-initialized. This procedure
- returns a pointer to the allocated memory region.
- */
- @(require_results)
- scratch_alloc :: proc(
- s: ^Scratch,
- size: int,
- alignment := DEFAULT_ALIGNMENT,
- loc := #caller_location,
- ) -> (rawptr, Allocator_Error) {
- bytes, err := scratch_alloc_bytes(s, size, alignment, loc)
- return raw_data(bytes), err
- }
- /*
- Allocate memory from a scratch allocator.
- This procedure allocates `size` bytes of memory aligned on a boundary specified
- by `alignment`. The allocated memory region is zero-initialized. This procedure
- returns a slice of the allocated memory region.
- */
- @(require_results)
- scratch_alloc_bytes :: proc(
- s: ^Scratch,
- size: int,
- alignment := DEFAULT_ALIGNMENT,
- loc := #caller_location,
- ) -> ([]byte, Allocator_Error) {
- bytes, err := scratch_alloc_bytes_non_zeroed(s, size, alignment, loc)
- if bytes != nil {
- zero_slice(bytes)
- }
- return bytes, err
- }
- /*
- Allocate non-initialized memory from a scratch allocator.
- This procedure allocates `size` bytes of memory aligned on a boundary specified
- by `alignment`. The allocated memory region is not explicitly zero-initialized.
- This procedure returns a pointer to the allocated memory region.
- */
- @(require_results)
- scratch_alloc_non_zeroed :: proc(
- s: ^Scratch,
- size: int,
- alignment := DEFAULT_ALIGNMENT,
- loc := #caller_location,
- ) -> (rawptr, Allocator_Error) {
- bytes, err := scratch_alloc_bytes_non_zeroed(s, size, alignment, loc)
- return raw_data(bytes), err
- }
- /*
- Allocate non-initialized memory from a scratch allocator.
- This procedure allocates `size` bytes of memory aligned on a boundary specified
- by `alignment`. The allocated memory region is not explicitly zero-initialized.
- This procedure returns a slice of the allocated memory region.
- */
- @(require_results)
- scratch_alloc_bytes_non_zeroed :: proc(
- s: ^Scratch,
- size: int,
- alignment := DEFAULT_ALIGNMENT,
- loc := #caller_location,
- ) -> ([]byte, Allocator_Error) {
- if s.data == nil {
- DEFAULT_BACKING_SIZE :: 4 * Megabyte
- if !(context.allocator.procedure != scratch_allocator_proc && context.allocator.data != s) {
- panic("Cyclic initialization of the scratch allocator with itself.", loc)
- }
- scratch_init(s, DEFAULT_BACKING_SIZE)
- }
- aligned_size := size
- if alignment > 1 {
- // It is possible to do this with less bytes, but this is the
- // mathematically simpler solution, and this being a Scratch allocator,
- // we don't need to be so strict about every byte.
- aligned_size += alignment - 1
- }
- if aligned_size <= len(s.data) {
- offset := uintptr(0)
- if s.curr_offset+aligned_size <= len(s.data) {
- offset = uintptr(s.curr_offset)
- } else {
- // The allocation will cause an overflow past the boundary of the
- // space available, so reset to the starting offset.
- offset = 0
- }
- start := uintptr(raw_data(s.data))
- ptr := rawptr(offset+start)
- // We keep track of the original base pointer without extra alignment
- // in order to later allow the free operation to work from that point.
- s.prev_allocation_root = ptr
- if !is_aligned(ptr, alignment) {
- ptr = align_forward(ptr, uintptr(alignment))
- }
- s.prev_allocation = ptr
- s.curr_offset = int(offset) + aligned_size
- result := byte_slice(ptr, size)
- // ensure_poisoned(result)
- // sanitizer.address_unpoison(result)
- return result, nil
- } else {
- // NOTE: No need to use `aligned_size` here, as the backup allocator will handle alignment for us.
- a := s.backup_allocator
- ptr, err := alloc_bytes_non_zeroed(size, alignment, a, loc)
- if err != nil {
- return ptr, err
- }
- append(&s.leaked_allocations, ptr)
- if logger := context.logger; logger.lowest_level <= .Warning {
- if logger.procedure != nil {
- logger.procedure(logger.data, .Warning, "mem.Scratch resorted to backup_allocator" , logger.options, loc)
- }
- }
- return ptr, err
- }
- }
- /*
- Free memory back to the scratch allocator.
- This procedure frees the memory region allocated at pointer `ptr`.
- If `ptr` is not the latest allocation and is not a leaked allocation, this
- operation is a no-op.
- */
- scratch_free :: proc(s: ^Scratch, ptr: rawptr, loc := #caller_location) -> Allocator_Error {
- if s.data == nil {
- panic("Free on an uninitialized Scratch allocator.", loc)
- }
- if ptr == nil {
- return nil
- }
- start := uintptr(raw_data(s.data))
- end := start + uintptr(len(s.data))
- old_ptr := uintptr(ptr)
- if s.prev_allocation == ptr {
- s.curr_offset = int(uintptr(s.prev_allocation_root) - start)
- // sanitizer.address_poison(s.data[s.curr_offset:])
- s.prev_allocation = nil
- s.prev_allocation_root = nil
- return nil
- }
- if start <= old_ptr && old_ptr < end {
- // NOTE(bill): Cannot free this pointer but it is valid
- return nil
- }
- if len(s.leaked_allocations) != 0 {
- for data, i in s.leaked_allocations {
- ptr := raw_data(data)
- if ptr == ptr {
- free_bytes(data, s.backup_allocator, loc)
- ordered_remove(&s.leaked_allocations, i, loc)
- return nil
- }
- }
- }
- return .Invalid_Pointer
- }
- /*
- Free all memory back to the scratch allocator.
- */
- scratch_free_all :: proc(s: ^Scratch, loc := #caller_location) {
- s.curr_offset = 0
- s.prev_allocation = nil
- for ptr in s.leaked_allocations {
- free_bytes(ptr, s.backup_allocator, loc)
- }
- clear(&s.leaked_allocations)
- // sanitizer.address_poison(s.data)
- }
- /*
- Resize an allocation owned by a scratch allocator.
- This procedure resizes a memory region defined by its location `old_memory`
- and its size `old_size` to have a size `size` and alignment `alignment`. The
- newly allocated memory, if any, is zero-initialized.
- If `old_memory` is `nil`, this procedure acts just like `scratch_alloc()`,
- allocating a memory region `size` bytes in size, aligned on a boundary specified
- by `alignment`.
- If `size` is 0, this procedure acts just like `scratch_free()`, freeing the
- memory region located at an address specified by `old_memory`.
- This procedure returns the pointer to the resized memory region.
- */
- @(require_results)
- scratch_resize :: proc(
- s: ^Scratch,
- old_memory: rawptr,
- old_size: int,
- size: int,
- alignment := DEFAULT_ALIGNMENT,
- loc := #caller_location
- ) -> (rawptr, Allocator_Error) {
- bytes, err := scratch_resize_bytes(s, byte_slice(old_memory, old_size), size, alignment, loc)
- return raw_data(bytes), err
- }
- /*
- Resize an allocation owned by a scratch allocator.
- This procedure resizes a memory region specified by `old_data` to have a size
- `size` and alignment `alignment`. The newly allocated memory, if any, is
- zero-initialized.
- If `old_memory` is `nil`, this procedure acts just like `scratch_alloc()`,
- allocating a memory region `size` bytes in size, aligned on a boundary specified
- by `alignment`.
- If `size` is 0, this procedure acts just like `scratch_free()`, freeing the
- memory region located at an address specified by `old_memory`.
- This procedure returns the slice of the resized memory region.
- */
- @(require_results)
- scratch_resize_bytes :: proc(
- s: ^Scratch,
- old_data: []byte,
- size: int,
- alignment := DEFAULT_ALIGNMENT,
- loc := #caller_location
- ) -> ([]byte, Allocator_Error) {
- bytes, err := scratch_resize_bytes_non_zeroed(s, old_data, size, alignment, loc)
- if bytes != nil && size > len(old_data) {
- zero_slice(bytes[size:])
- }
- return bytes, err
- }
- /*
- Resize an allocation owned by a scratch allocator, without zero-initialization.
- This procedure resizes a memory region defined by its location `old_memory`
- and its size `old_size` to have a size `size` and alignment `alignment`. The
- newly allocated memory, if any, is not explicitly zero-initialized.
- If `old_memory` is `nil`, this procedure acts just like `scratch_alloc()`,
- allocating a memory region `size` bytes in size, aligned on a boundary specified
- by `alignment`.
- If `size` is 0, this procedure acts just like `scratch_free()`, freeing the
- memory region located at an address specified by `old_memory`.
- This procedure returns the pointer to the resized memory region.
- */
- @(require_results)
- scratch_resize_non_zeroed :: proc(
- s: ^Scratch,
- old_memory: rawptr,
- old_size: int,
- size: int,
- alignment := DEFAULT_ALIGNMENT,
- loc := #caller_location
- ) -> (rawptr, Allocator_Error) {
- bytes, err := scratch_resize_bytes_non_zeroed(s, byte_slice(old_memory, old_size), size, alignment, loc)
- return raw_data(bytes), err
- }
- /*
- Resize an allocation owned by a scratch allocator.
- This procedure resizes a memory region specified by `old_data` to have a size
- `size` and alignment `alignment`. The newly allocated memory, if any, is not
- explicitly zero-initialized.
- If `old_memory` is `nil`, this procedure acts just like `scratch_alloc()`,
- allocating a memory region `size` bytes in size, aligned on a boundary specified
- by `alignment`.
- If `size` is 0, this procedure acts just like `scratch_free()`, freeing the
- memory region located at an address specified by `old_memory`.
- This procedure returns the slice of the resized memory region.
- */
- @(require_results)
- scratch_resize_bytes_non_zeroed :: proc(
- s: ^Scratch,
- old_data: []byte,
- size: int,
- alignment := DEFAULT_ALIGNMENT,
- loc := #caller_location
- ) -> ([]byte, Allocator_Error) {
- old_memory := raw_data(old_data)
- old_size := len(old_data)
- if s.data == nil {
- DEFAULT_BACKING_SIZE :: 4 * Megabyte
- if !(context.allocator.procedure != scratch_allocator_proc && context.allocator.data != s) {
- panic("Cyclic initialization of the scratch allocator with itself.", loc)
- }
- scratch_init(s, DEFAULT_BACKING_SIZE)
- }
- begin := uintptr(raw_data(s.data))
- end := begin + uintptr(len(s.data))
- old_ptr := uintptr(old_memory)
- // We can only sanely resize the last allocation; to do otherwise may
- // overwrite memory that could very well just have been allocated.
- //
- // Also, the alignments must match, otherwise we must re-allocate to
- // guarantee the user's request.
- if s.prev_allocation == old_memory && is_aligned(old_memory, alignment) && old_ptr+uintptr(size) < end {
- // ensure_not_poisoned(old_data)
- // sanitizer.address_poison(old_memory)
- s.curr_offset = int(old_ptr-begin)+size
- result := byte_slice(old_memory, size)
- // sanitizer.address_unpoison(result)
- return result, nil
- }
- data, err := scratch_alloc_bytes_non_zeroed(s, size, alignment, loc)
- if err != nil {
- return data, err
- }
- runtime.copy(data, byte_slice(old_memory, old_size))
- err = scratch_free(s, old_memory, loc)
- return data, err
- }
- scratch_allocator_proc :: proc(
- allocator_data: rawptr,
- mode: Allocator_Mode,
- size, alignment: int,
- old_memory: rawptr,
- old_size: int,
- loc := #caller_location,
- ) -> ([]byte, Allocator_Error) {
- s := (^Scratch)(allocator_data)
- size := size
- switch mode {
- case .Alloc:
- return scratch_alloc_bytes(s, size, alignment, loc)
- case .Alloc_Non_Zeroed:
- return scratch_alloc_bytes_non_zeroed(s, size, alignment, loc)
- case .Free:
- return nil, scratch_free(s, old_memory, loc)
- case .Free_All:
- scratch_free_all(s, loc)
- case .Resize:
- return scratch_resize_bytes(s, byte_slice(old_memory, old_size), size, alignment, loc)
- case .Resize_Non_Zeroed:
- return scratch_resize_bytes_non_zeroed(s, byte_slice(old_memory, old_size), size, alignment, loc)
- case .Query_Features:
- set := (^Allocator_Mode_Set)(old_memory)
- if set != nil {
- set^ = {.Alloc, .Alloc_Non_Zeroed, .Free, .Free_All, .Resize, .Resize_Non_Zeroed, .Query_Features}
- }
- return nil, nil
- case .Query_Info:
- return nil, .Mode_Not_Implemented
- }
- return nil, nil
- }
- /*
- Stack allocator data.
- */
- Stack :: struct {
- data: []byte,
- prev_offset: int,
- curr_offset: int,
- peak_used: int,
- }
- /*
- Header of a stack allocation.
- */
- Stack_Allocation_Header :: struct {
- prev_offset: int,
- padding: int,
- }
- /*
- Stack allocator.
- The stack allocator is an allocator that allocates data in the backing buffer
- linearly, from start to end. Each subsequent allocation will get the next
- adjacent memory region.
- Unlike arena allocator, the stack allocator saves allocation metadata and has
- a strict freeing order. Only the last allocated element can be freed. After the
- last allocated element is freed, the next previous allocated element becomes
- available for freeing.
- The metadata is stored in the allocation headers, that are located before the
- start of each allocated memory region. Each header points to the start of the
- previous allocation header.
- */
- @(require_results)
- stack_allocator :: proc(stack: ^Stack) -> Allocator {
- return Allocator{
- procedure = stack_allocator_proc,
- data = stack,
- }
- }
- /*
- Initialize a stack allocator.
- This procedure initializes the stack allocator with a backing buffer specified
- by `data` parameter.
- */
- stack_init :: proc(s: ^Stack, data: []byte) {
- s.data = data
- s.prev_offset = 0
- s.curr_offset = 0
- s.peak_used = 0
- // sanitizer.address_poison(data)
- }
- /*
- Allocate memory from a stack allocator.
- This procedure allocates `size` bytes of memory, aligned to the boundary
- specified by `alignment`. The allocated memory is zero-initialized. This
- procedure returns the pointer to the allocated memory.
- */
- @(require_results)
- stack_alloc :: proc(
- s: ^Stack,
- size: int,
- alignment := DEFAULT_ALIGNMENT,
- loc := #caller_location
- ) -> (rawptr, Allocator_Error) {
- bytes, err := stack_alloc_bytes(s, size, alignment, loc)
- return raw_data(bytes), err
- }
- /*
- Allocate memory from a stack allocator.
- This procedure allocates `size` bytes of memory, aligned to the boundary
- specified by `alignment`. The allocated memory is zero-initialized. This
- procedure returns the slice of the allocated memory.
- */
- @(require_results)
- stack_alloc_bytes :: proc(
- s: ^Stack,
- size: int,
- alignment := DEFAULT_ALIGNMENT,
- loc := #caller_location
- ) -> ([]byte, Allocator_Error) {
- bytes, err := stack_alloc_bytes_non_zeroed(s, size, alignment, loc)
- if bytes != nil {
- zero_slice(bytes)
- }
- return bytes, err
- }
- /*
- Allocate memory from a stack allocator.
- This procedure allocates `size` bytes of memory, aligned to the boundary
- specified by `alignment`. The allocated memory is not explicitly
- zero-initialized. This procedure returns the pointer to the allocated memory.
- */
- @(require_results)
- stack_alloc_non_zeroed :: proc(
- s: ^Stack,
- size: int,
- alignment := DEFAULT_ALIGNMENT,
- loc := #caller_location
- ) -> (rawptr, Allocator_Error) {
- bytes, err := stack_alloc_bytes_non_zeroed(s, size, alignment, loc)
- return raw_data(bytes), err
- }
- /*
- Allocate memory from a stack allocator.
- This procedure allocates `size` bytes of memory, aligned to the boundary
- specified by `alignment`. The allocated memory is not explicitly
- zero-initialized. This procedure returns the slice of the allocated memory.
- */
- @(require_results, no_sanitize_address)
- stack_alloc_bytes_non_zeroed :: proc(
- s: ^Stack,
- size: int,
- alignment := DEFAULT_ALIGNMENT,
- loc := #caller_location
- ) -> ([]byte, Allocator_Error) {
- if s.data == nil {
- panic("Allocation on an uninitialized Stack allocator.", loc)
- }
- curr_addr := uintptr(raw_data(s.data)) + uintptr(s.curr_offset)
- padding := calc_padding_with_header(
- curr_addr,
- uintptr(alignment),
- size_of(Stack_Allocation_Header),
- )
- if s.curr_offset + padding + size > len(s.data) {
- return nil, .Out_Of_Memory
- }
- old_offset := s.prev_offset
- s.prev_offset = s.curr_offset
- s.curr_offset += padding
- next_addr := curr_addr + uintptr(padding)
- header := (^Stack_Allocation_Header)(next_addr - size_of(Stack_Allocation_Header))
- header.padding = padding
- header.prev_offset = old_offset
- s.curr_offset += size
- s.peak_used = max(s.peak_used, s.curr_offset)
- result := byte_slice(rawptr(next_addr), size)
- // ensure_poisoned(result)
- // sanitizer.address_unpoison(result)
- return result, nil
- }
- /*
- Free memory back to the stack allocator.
- This procedure frees the memory region starting at `old_memory` to the stack.
- If the freeing is an out of order freeing, the `.Invalid_Pointer` error
- is returned.
- */
- stack_free :: proc(
- s: ^Stack,
- old_memory: rawptr,
- loc := #caller_location,
- ) -> (Allocator_Error) {
- if s.data == nil {
- panic("Free on an uninitialized Stack allocator.", loc)
- }
- if old_memory == nil {
- return nil
- }
- start := uintptr(raw_data(s.data))
- end := start + uintptr(len(s.data))
- curr_addr := uintptr(old_memory)
- if !(start <= curr_addr && curr_addr < end) {
- panic("Out of bounds memory address passed to Stack allocator. (free)", loc)
- }
- if curr_addr >= start+uintptr(s.curr_offset) {
- // NOTE(bill): Allow double frees
- return nil
- }
- header := (^Stack_Allocation_Header)(curr_addr - size_of(Stack_Allocation_Header))
- old_offset := int(curr_addr - uintptr(header.padding) - uintptr(raw_data(s.data)))
- if old_offset != s.prev_offset {
- return .Invalid_Pointer
- }
- s.prev_offset = header.prev_offset
- // sanitizer.address_poison(s.data[old_offset:s.curr_offset])
- s.curr_offset = old_offset
- return nil
- }
- /*
- Free all memory back to the stack allocator.
- */
- stack_free_all :: proc(s: ^Stack, loc := #caller_location) {
- s.prev_offset = 0
- s.curr_offset = 0
- // sanitizer.address_poison(s.data)
- }
- /*
- Resize an allocation owned by a stack allocator.
- This procedure resizes a memory region defined by its location `old_memory`
- and its size `old_size` to have a size `size` and alignment `alignment`. The
- newly allocated memory, if any, is zero-initialized.
- If `old_memory` is `nil`, this procedure acts just like `stack_alloc()`,
- allocating a memory region `size` bytes in size, aligned on a boundary specified
- by `alignment`.
- If `size` is 0, this procedure acts just like `stack_free()`, freeing the
- memory region located at an address specified by `old_memory`.
- This procedure returns the pointer to the resized memory region.
- */
- @(require_results)
- stack_resize :: proc(
- s: ^Stack,
- old_memory: rawptr,
- old_size: int,
- size: int,
- alignment := DEFAULT_ALIGNMENT,
- loc := #caller_location,
- ) -> (rawptr, Allocator_Error) {
- bytes, err := stack_resize_bytes(s, byte_slice(old_memory, old_size), size, alignment)
- return raw_data(bytes), err
- }
- /*
- Resize an allocation owned by a stack allocator.
- This procedure resizes a memory region specified by `old_data` to have a size
- `size` and alignment `alignment`. The newly allocated memory, if any, is
- zero-initialized.
- If `old_memory` is `nil`, this procedure acts just like `stack_alloc()`,
- allocating a memory region `size` bytes in size, aligned on a boundary specified
- by `alignment`.
- If `size` is 0, this procedure acts just like `stack_free()`, freeing the
- memory region located at an address specified by `old_memory`.
- This procedure returns the slice of the resized memory region.
- */
- @(require_results)
- stack_resize_bytes :: proc(
- s: ^Stack,
- old_data: []byte,
- size: int,
- alignment := DEFAULT_ALIGNMENT,
- loc := #caller_location,
- ) -> ([]byte, Allocator_Error) {
- bytes, err := stack_resize_bytes_non_zeroed(s, old_data, size, alignment, loc)
- if err == nil {
- if old_data == nil {
- zero_slice(bytes)
- } else if size > len(old_data) {
- zero_slice(bytes[len(old_data):])
- }
- }
- return bytes, err
- }
- /*
- Resize an allocation owned by a stack allocator, without zero-initialization.
- This procedure resizes a memory region defined by its location `old_memory`
- and its size `old_size` to have a size `size` and alignment `alignment`. The
- newly allocated memory, if any, is not explicitly zero-initialized.
- If `old_memory` is `nil`, this procedure acts just like `stack_alloc()`,
- allocating a memory region `size` bytes in size, aligned on a boundary specified
- by `alignment`.
- If `size` is 0, this procedure acts just like `stack_free()`, freeing the
- memory region located at an address specified by `old_memory`.
- This procedure returns the pointer to the resized memory region.
- */
- @(require_results)
- stack_resize_non_zeroed :: proc(
- s: ^Stack,
- old_memory: rawptr,
- old_size: int,
- size: int,
- alignment := DEFAULT_ALIGNMENT,
- loc := #caller_location,
- ) -> (rawptr, Allocator_Error) {
- bytes, err := stack_resize_bytes_non_zeroed(s, byte_slice(old_memory, old_size), size, alignment)
- return raw_data(bytes), err
- }
- /*
- Resize an allocation owned by a stack allocator, without zero-initialization.
- This procedure resizes a memory region specified by `old_data` to have a size
- `size` and alignment `alignment`. The newly allocated memory, if any, is not
- explicitly zero-initialized.
- If `old_memory` is `nil`, this procedure acts just like `stack_alloc()`,
- allocating a memory region `size` bytes in size, aligned on a boundary specified
- by `alignment`.
- If `size` is 0, this procedure acts just like `stack_free()`, freeing the
- memory region located at an address specified by `old_memory`.
- This procedure returns the slice of the resized memory region.
- */
- @(require_results)
- stack_resize_bytes_non_zeroed :: proc(
- s: ^Stack,
- old_data: []byte,
- size: int,
- alignment := DEFAULT_ALIGNMENT,
- loc := #caller_location,
- ) -> ([]byte, Allocator_Error) {
- old_memory := raw_data(old_data)
- old_size := len(old_data)
- if s.data == nil {
- panic("Resize on an uninitialized Stack allocator.", loc)
- }
- if old_memory == nil {
- return stack_alloc_bytes_non_zeroed(s, size, alignment, loc)
- }
- if size == 0 {
- return nil, stack_free(s, old_memory, loc)
- }
- start := uintptr(raw_data(s.data))
- end := start + uintptr(len(s.data))
- curr_addr := uintptr(old_memory)
- if !(start <= curr_addr && curr_addr < end) {
- panic("Out of bounds memory address passed to Stack allocator. (resize)")
- }
- if curr_addr >= start+uintptr(s.curr_offset) {
- // NOTE(bill): Allow double frees
- return nil, nil
- }
- if uintptr(old_memory) & uintptr(alignment-1) != 0 {
- // A different alignment has been requested and the current address
- // does not satisfy it.
- data, err := stack_alloc_bytes_non_zeroed(s, size, alignment, loc)
- if err == nil {
- runtime.copy(data, byte_slice(old_memory, old_size))
- // sanitizer.address_poison(old_memory)
- }
- return data, err
- }
- if old_size == size {
- return byte_slice(old_memory, size), nil
- }
- header := (^Stack_Allocation_Header)(curr_addr - size_of(Stack_Allocation_Header))
- old_offset := int(curr_addr - uintptr(header.padding) - uintptr(raw_data(s.data)))
- if old_offset != header.prev_offset {
- data, err := stack_alloc_bytes_non_zeroed(s, size, alignment, loc)
- if err == nil {
- runtime.copy(data, byte_slice(old_memory, old_size))
- // sanitizer.address_poison(old_memory)
- }
- return data, err
- }
- old_memory_size := uintptr(s.curr_offset) - (curr_addr - start)
- assert(old_memory_size == uintptr(old_size))
- diff := size - old_size
- s.curr_offset += diff // works for smaller sizes too
- if diff > 0 {
- zero(rawptr(curr_addr + uintptr(diff)), diff)
- } else {
- // sanitizer.address_poison(old_data[size:])
- }
- result := byte_slice(old_memory, size)
- // ensure_poisoned(result)
- // sanitizer.address_unpoison(result)
- return result, nil
- }
- stack_allocator_proc :: proc(
- allocator_data: rawptr,
- mode: Allocator_Mode,
- size: int,
- alignment: int,
- old_memory: rawptr,
- old_size: int,
- loc := #caller_location,
- ) -> ([]byte, Allocator_Error) {
- s := cast(^Stack)allocator_data
- if s.data == nil {
- return nil, .Invalid_Argument
- }
- switch mode {
- case .Alloc:
- return stack_alloc_bytes(s, size, alignment, loc)
- case .Alloc_Non_Zeroed:
- return stack_alloc_bytes_non_zeroed(s, size, alignment, loc)
- case .Free:
- return nil, stack_free(s, old_memory, loc)
- case .Free_All:
- stack_free_all(s, loc)
- case .Resize:
- return stack_resize_bytes(s, byte_slice(old_memory, old_size), size, alignment, loc)
- case .Resize_Non_Zeroed:
- return stack_resize_bytes_non_zeroed(s, byte_slice(old_memory, old_size), size, alignment, loc)
- case .Query_Features:
- set := (^Allocator_Mode_Set)(old_memory)
- if set != nil {
- set^ = {.Alloc, .Alloc_Non_Zeroed, .Free, .Free_All, .Resize, .Resize_Non_Zeroed, .Query_Features}
- }
- return nil, nil
- case .Query_Info:
- return nil, .Mode_Not_Implemented
- }
- return nil, nil
- }
- /*
- Allocation header of the small stack allocator.
- */
- Small_Stack_Allocation_Header :: struct {
- padding: u8,
- }
- /*
- Small stack allocator data.
- */
- Small_Stack :: struct {
- data: []byte,
- offset: int,
- peak_used: int,
- }
- /*
- Initialize a small stack allocator.
- This procedure initializes the small stack allocator with `data` as its backing
- buffer.
- */
- small_stack_init :: proc(s: ^Small_Stack, data: []byte) {
- s.data = data
- s.offset = 0
- s.peak_used = 0
- // sanitizer.address_poison(data)
- }
- /*
- Small stack allocator.
- The small stack allocator is just like a `Stack` allocator, with the only
- difference being an extremely small header size. Unlike the stack allocator,
- the small stack allows out-of order freeing of memory, with the stipulation
- that all allocations made after the freed allocation will become invalidated
- upon following allocations as they will begin to overwrite the memory formerly
- used by the freed allocation.
- The memory is allocated in the backing buffer linearly, from start to end.
- Each subsequent allocation will get the next adjacent memory region.
- The metadata is stored in the allocation headers, that are located before the
- start of each allocated memory region. Each header contains the amount of
- padding bytes between that header and end of the previous allocation.
- */
- @(require_results)
- small_stack_allocator :: proc(stack: ^Small_Stack) -> Allocator {
- return Allocator{
- procedure = small_stack_allocator_proc,
- data = stack,
- }
- }
- /*
- Allocate memory from a small stack allocator.
- This procedure allocates `size` bytes of memory aligned to a boundary specified
- by `alignment`. The allocated memory is zero-initialized. This procedure
- returns a pointer to the allocated memory region.
- */
- @(require_results)
- small_stack_alloc :: proc(
- s: ^Small_Stack,
- size: int,
- alignment := DEFAULT_ALIGNMENT,
- loc := #caller_location,
- ) -> (rawptr, Allocator_Error) {
- bytes, err := small_stack_alloc_bytes(s, size, alignment, loc)
- return raw_data(bytes), err
- }
- /*
- Allocate memory from a small stack allocator.
- This procedure allocates `size` bytes of memory aligned to a boundary specified
- by `alignment`. The allocated memory is zero-initialized. This procedure
- returns a slice of the allocated memory region.
- */
- @(require_results)
- small_stack_alloc_bytes :: proc(
- s: ^Small_Stack,
- size: int,
- alignment := DEFAULT_ALIGNMENT,
- loc := #caller_location,
- ) -> ([]byte, Allocator_Error) {
- bytes, err := small_stack_alloc_bytes_non_zeroed(s, size, alignment, loc)
- if bytes != nil {
- zero_slice(bytes)
- }
- return bytes, err
- }
- /*
- Allocate memory from a small stack allocator.
- This procedure allocates `size` bytes of memory aligned to a boundary specified
- by `alignment`. The allocated memory is not explicitly zero-initialized. This
- procedure returns a pointer to the allocated memory region.
- */
- @(require_results)
- small_stack_alloc_non_zeroed :: proc(
- s: ^Small_Stack,
- size: int,
- alignment := DEFAULT_ALIGNMENT,
- loc := #caller_location,
- ) -> (rawptr, Allocator_Error) {
- bytes, err := small_stack_alloc_bytes_non_zeroed(s, size, alignment, loc)
- return raw_data(bytes), err
- }
- /*
- Allocate memory from a small stack allocator.
- This procedure allocates `size` bytes of memory aligned to a boundary specified
- by `alignment`. The allocated memory is not explicitly zero-initialized. This
- procedure returns a slice of the allocated memory region.
- */
- @(require_results, no_sanitize_address)
- small_stack_alloc_bytes_non_zeroed :: proc(
- s: ^Small_Stack,
- size: int,
- alignment := DEFAULT_ALIGNMENT,
- loc := #caller_location,
- ) -> ([]byte, Allocator_Error) {
- if s.data == nil {
- panic("Allocation on an uninitialized Small Stack allocator.", loc)
- }
- alignment := alignment
- alignment = clamp(alignment, 1, 8*size_of(Stack_Allocation_Header{}.padding)/2)
- curr_addr := uintptr(raw_data(s.data)) + uintptr(s.offset)
- padding := calc_padding_with_header(curr_addr, uintptr(alignment), size_of(Small_Stack_Allocation_Header))
- if s.offset + padding + size > len(s.data) {
- return nil, .Out_Of_Memory
- }
- s.offset += padding
- next_addr := curr_addr + uintptr(padding)
- header := (^Small_Stack_Allocation_Header)(next_addr - size_of(Small_Stack_Allocation_Header))
- header.padding = cast(u8)padding
- // We must poison the header, no matter what its state is, because there
- // may have been an out-of-order free before this point.
- // sanitizer.address_poison(header)
- s.offset += size
- s.peak_used = max(s.peak_used, s.offset)
- result := byte_slice(rawptr(next_addr), size)
- // NOTE: We cannot ensure the poison state of this allocation, because this
- // allocator allows out-of-order frees with overwriting.
- // sanitizer.address_unpoison(result)
- return result, nil
- }
- /*
- Allocate memory from a small stack allocator.
- This procedure allocates `size` bytes of memory aligned to a boundary specified
- by `alignment`. The allocated memory is not explicitly zero-initialized. This
- procedure returns a slice of the allocated memory region.
- */
- small_stack_free :: proc(
- s: ^Small_Stack,
- old_memory: rawptr,
- loc := #caller_location,
- ) -> Allocator_Error {
- if s.data == nil {
- panic("Free on an uninitialized Small Stack allocator.", loc)
- }
- if old_memory == nil {
- return nil
- }
- start := uintptr(raw_data(s.data))
- end := start + uintptr(len(s.data))
- curr_addr := uintptr(old_memory)
- if !(start <= curr_addr && curr_addr < end) {
- panic("Out of bounds memory address passed to Small Stack allocator. (free)", loc)
- }
- if curr_addr >= start+uintptr(s.offset) {
- // NOTE(bill): Allow double frees
- return nil
- }
- header := (^Small_Stack_Allocation_Header)(curr_addr - size_of(Small_Stack_Allocation_Header))
- old_offset := int(curr_addr - uintptr(header.padding) - uintptr(raw_data(s.data)))
- // sanitizer.address_poison(s.data[old_offset:s.offset])
- s.offset = old_offset
- return nil
- }
- /*
- Free all memory back to the small stack allocator.
- */
- small_stack_free_all :: proc(s: ^Small_Stack) {
- s.offset = 0
- // sanitizer.address_poison(s.data)
- }
- /*
- Resize an allocation owned by a small stack allocator.
- This procedure resizes a memory region defined by its location `old_memory`
- and its size `old_size` to have a size `size` and alignment `alignment`. The
- newly allocated memory, if any, is zero-initialized.
- If `old_memory` is `nil`, this procedure acts just like `small_stack_alloc()`,
- allocating a memory region `size` bytes in size, aligned on a boundary specified
- by `alignment`.
- If `size` is 0, this procedure acts just like `small_stack_free()`, freeing the
- memory region located at an address specified by `old_memory`.
- This procedure returns the pointer to the resized memory region.
- */
- @(require_results)
- small_stack_resize :: proc(
- s: ^Small_Stack,
- old_memory: rawptr,
- old_size: int,
- size: int,
- alignment := DEFAULT_ALIGNMENT,
- loc := #caller_location,
- ) -> (rawptr, Allocator_Error) {
- bytes, err := small_stack_resize_bytes(s, byte_slice(old_memory, old_size), size, alignment, loc)
- return raw_data(bytes), err
- }
- /*
- Resize an allocation owned by a small stack allocator.
- This procedure resizes a memory region specified by `old_data` to have a size
- `size` and alignment `alignment`. The newly allocated memory, if any, is
- zero-initialized.
- If `old_memory` is `nil`, this procedure acts just like `small_stack_alloc()`,
- allocating a memory region `size` bytes in size, aligned on a boundary specified
- by `alignment`.
- If `size` is 0, this procedure acts just like `small_stack_free()`, freeing the
- memory region located at an address specified by `old_memory`.
- This procedure returns the slice of the resized memory region.
- */
- @(require_results)
- small_stack_resize_bytes :: proc(
- s: ^Small_Stack,
- old_data: []byte,
- size: int,
- alignment := DEFAULT_ALIGNMENT,
- loc := #caller_location,
- ) -> ([]byte, Allocator_Error) {
- bytes, err := small_stack_resize_bytes_non_zeroed(s, old_data, size, alignment, loc)
- if bytes != nil {
- if old_data == nil {
- zero_slice(bytes)
- } else if size > len(old_data) {
- zero_slice(bytes[len(old_data):])
- }
- }
- return bytes, err
- }
- /*
- Resize an allocation owned by a small stack allocator, without zero-initialization.
- This procedure resizes a memory region defined by its location `old_memory`
- and its size `old_size` to have a size `size` and alignment `alignment`. The
- newly allocated memory, if any, is not explicitly zero-initialized.
- If `old_memory` is `nil`, this procedure acts just like `small_stack_alloc()`,
- allocating a memory region `size` bytes in size, aligned on a boundary specified
- by `alignment`.
- If `size` is 0, this procedure acts just like `small_stack_free()`, freeing the
- memory region located at an address specified by `old_memory`.
- This procedure returns the pointer to the resized memory region.
- */
- @(require_results)
- small_stack_resize_non_zeroed :: proc(
- s: ^Small_Stack,
- old_memory: rawptr,
- old_size: int,
- size: int,
- alignment := DEFAULT_ALIGNMENT,
- loc := #caller_location,
- ) -> (rawptr, Allocator_Error) {
- bytes, err := small_stack_resize_bytes_non_zeroed(s, byte_slice(old_memory, old_size), size, alignment, loc)
- return raw_data(bytes), err
- }
- /*
- Resize an allocation owned by a small stack allocator, without zero-initialization.
- This procedure resizes a memory region specified by `old_data` to have a size
- `size` and alignment `alignment`. The newly allocated memory, if any, is not
- explicitly zero-initialized.
- If `old_memory` is `nil`, this procedure acts just like `small_stack_alloc()`,
- allocating a memory region `size` bytes in size, aligned on a boundary specified
- by `alignment`.
- If `size` is 0, this procedure acts just like `small_stack_free()`, freeing the
- memory region located at an address specified by `old_memory`.
- This procedure returns the slice of the resized memory region.
- */
- @(require_results)
- small_stack_resize_bytes_non_zeroed :: proc(
- s: ^Small_Stack,
- old_data: []byte,
- size: int,
- alignment := DEFAULT_ALIGNMENT,
- loc := #caller_location,
- ) -> ([]byte, Allocator_Error) {
- if s.data == nil {
- panic("Resize on an uninitialized Small Stack allocator.", loc)
- }
- old_memory := raw_data(old_data)
- old_size := len(old_data)
- alignment := alignment
- alignment = clamp(alignment, 1, 8*size_of(Stack_Allocation_Header{}.padding)/2)
- if old_memory == nil {
- return small_stack_alloc_bytes_non_zeroed(s, size, alignment, loc)
- }
- if size == 0 {
- return nil, small_stack_free(s, old_memory, loc)
- }
- start := uintptr(raw_data(s.data))
- end := start + uintptr(len(s.data))
- curr_addr := uintptr(old_memory)
- if !(start <= curr_addr && curr_addr < end) {
- panic("Out of bounds memory address passed to Small Stack allocator. (resize)", loc)
- }
- if curr_addr >= start+uintptr(s.offset) {
- // NOTE(bill): Treat as a double free
- return nil, nil
- }
- if uintptr(old_memory) & uintptr(alignment-1) != 0 {
- // A different alignment has been requested and the current address
- // does not satisfy it.
- data, err := small_stack_alloc_bytes_non_zeroed(s, size, alignment, loc)
- if err == nil {
- runtime.copy(data, byte_slice(old_memory, old_size))
- // sanitizer.address_poison(old_memory)
- }
- return data, err
- }
- if old_size == size {
- result := byte_slice(old_memory, size)
- // sanitizer.address_unpoison(result)
- return result, nil
- }
- data, err := small_stack_alloc_bytes_non_zeroed(s, size, alignment, loc)
- if err == nil {
- runtime.copy(data, byte_slice(old_memory, old_size))
- }
- return data, err
- }
- small_stack_allocator_proc :: proc(
- allocator_data: rawptr,
- mode: Allocator_Mode,
- size, alignment: int,
- old_memory: rawptr,
- old_size: int,
- loc := #caller_location,
- ) -> ([]byte, Allocator_Error) {
- s := cast(^Small_Stack)allocator_data
- if s.data == nil {
- return nil, .Invalid_Argument
- }
- switch mode {
- case .Alloc:
- return small_stack_alloc_bytes(s, size, alignment, loc)
- case .Alloc_Non_Zeroed:
- return small_stack_alloc_bytes_non_zeroed(s, size, alignment, loc)
- case .Free:
- return nil, small_stack_free(s, old_memory, loc)
- case .Free_All:
- small_stack_free_all(s)
- case .Resize:
- return small_stack_resize_bytes(s, byte_slice(old_memory, old_size), size, alignment, loc)
- case .Resize_Non_Zeroed:
- return small_stack_resize_bytes_non_zeroed(s, byte_slice(old_memory, old_size), size, alignment, loc)
- case .Query_Features:
- set := (^Allocator_Mode_Set)(old_memory)
- if set != nil {
- set^ = {.Alloc, .Alloc_Non_Zeroed, .Free, .Free_All, .Resize, .Resize_Non_Zeroed, .Query_Features}
- }
- return nil, nil
- case .Query_Info:
- return nil, .Mode_Not_Implemented
- }
- return nil, nil
- }
- /* Preserved for compatibility */
- Dynamic_Pool :: Dynamic_Arena
- DYNAMIC_POOL_BLOCK_SIZE_DEFAULT :: DYNAMIC_ARENA_BLOCK_SIZE_DEFAULT
- DYNAMIC_POOL_OUT_OF_BAND_SIZE_DEFAULT :: DYNAMIC_ARENA_OUT_OF_BAND_SIZE_DEFAULT
- dynamic_pool_allocator_proc :: dynamic_arena_allocator_proc
- dynamic_pool_free_all :: dynamic_arena_free_all
- dynamic_pool_reset :: dynamic_arena_reset
- dynamic_pool_alloc_bytes :: dynamic_arena_alloc_bytes
- dynamic_pool_alloc :: dynamic_arena_alloc
- dynamic_pool_init :: dynamic_arena_init
- dynamic_pool_allocator :: dynamic_arena_allocator
- dynamic_pool_destroy :: dynamic_arena_destroy
- /*
- Default block size for dynamic arena.
- */
- DYNAMIC_ARENA_BLOCK_SIZE_DEFAULT :: 65536
- /*
- Default out-band size of the dynamic arena.
- */
- DYNAMIC_ARENA_OUT_OF_BAND_SIZE_DEFAULT :: 6554
- /*
- Dynamic arena allocator data.
- */
- Dynamic_Arena :: struct {
- block_size: int,
- out_band_size: int,
- alignment: int,
- unused_blocks: [dynamic]rawptr,
- used_blocks: [dynamic]rawptr,
- out_band_allocations: [dynamic]rawptr,
- current_block: rawptr,
- current_pos: rawptr,
- bytes_left: int,
- block_allocator: Allocator,
- }
- /*
- Initialize a dynamic arena.
- This procedure initializes a dynamic arena. The specified `block_allocator`
- will be used to allocate arena blocks, and `array_allocator` to allocate
- arrays of blocks and out-band blocks. The blocks have the default size of
- `block_size` and out-band threshold will be `out_band_size`. All allocations
- will be aligned to a boundary specified by `alignment`.
- */
- dynamic_arena_init :: proc(
- pool: ^Dynamic_Arena,
- block_allocator := context.allocator,
- array_allocator := context.allocator,
- block_size := DYNAMIC_ARENA_BLOCK_SIZE_DEFAULT,
- out_band_size := DYNAMIC_ARENA_OUT_OF_BAND_SIZE_DEFAULT,
- alignment := DEFAULT_ALIGNMENT,
- ) {
- pool.block_size = block_size
- pool.out_band_size = out_band_size
- pool.alignment = alignment
- pool.block_allocator = block_allocator
- pool.out_band_allocations.allocator = array_allocator
- pool.unused_blocks.allocator = array_allocator
- pool.used_blocks.allocator = array_allocator
- }
- /*
- Dynamic arena allocator.
- The dynamic arena allocator uses blocks of a specific size, allocated on-demand
- using the block allocator. This allocator acts similarly to `Arena`. All
- allocations in a block happen contiguously, from start to end. If an allocation
- does not fit into the remaining space of the block and its size is smaller
- than the specified out-band size, a new block is allocated using the
- `block_allocator` and the allocation is performed from a newly-allocated block.
- If an allocation is larger than the specified out-band size, a new block
- is allocated such that the allocation fits into this new block. This is referred
- to as an *out-band allocation*. The out-band blocks are kept separately from
- normal blocks.
- Just like `Arena`, the dynamic arena does not support freeing of individual
- objects.
- */
- @(require_results)
- dynamic_arena_allocator :: proc(a: ^Dynamic_Arena) -> Allocator {
- return Allocator{
- procedure = dynamic_arena_allocator_proc,
- data = a,
- }
- }
- /*
- Destroy a dynamic arena.
- This procedure frees all allocations made on a dynamic arena, including the
- unused blocks, as well as the arrays for storing blocks.
- */
- dynamic_arena_destroy :: proc(a: ^Dynamic_Arena) {
- dynamic_arena_free_all(a)
- delete(a.unused_blocks)
- delete(a.used_blocks)
- delete(a.out_band_allocations)
- zero(a, size_of(a^))
- }
- @(private="file")
- _dynamic_arena_cycle_new_block :: proc(a: ^Dynamic_Arena, loc := #caller_location) -> (err: Allocator_Error) {
- if a.block_allocator.procedure == nil {
- panic("You must call `dynamic_arena_init` on a Dynamic Arena before using it.", loc)
- }
- if a.current_block != nil {
- append(&a.used_blocks, a.current_block, loc=loc)
- }
- new_block: rawptr
- if len(a.unused_blocks) > 0 {
- new_block = pop(&a.unused_blocks)
- } else {
- data: []byte
- data, err = a.block_allocator.procedure(
- a.block_allocator.data,
- Allocator_Mode.Alloc,
- a.block_size,
- a.alignment,
- nil,
- 0,
- )
- // sanitizer.address_poison(data)
- new_block = raw_data(data)
- }
- a.bytes_left = a.block_size
- a.current_pos = new_block
- a.current_block = new_block
- return
- }
- /*
- Allocate memory from a dynamic arena.
- This procedure allocates `size` bytes of memory aligned on a boundary specified
- by `alignment` from a dynamic arena `a`. The allocated memory is
- zero-initialized. This procedure returns a pointer to the newly allocated memory
- region.
- */
- @(require_results)
- dynamic_arena_alloc :: proc(a: ^Dynamic_Arena, size: int, loc := #caller_location) -> (rawptr, Allocator_Error) {
- data, err := dynamic_arena_alloc_bytes(a, size, loc)
- return raw_data(data), err
- }
- /*
- Allocate memory from a dynamic arena.
- This procedure allocates `size` bytes of memory aligned on a boundary specified
- by `alignment` from a dynamic arena `a`. The allocated memory is
- zero-initialized. This procedure returns a slice of the newly allocated memory
- region.
- */
- @(require_results)
- dynamic_arena_alloc_bytes :: proc(a: ^Dynamic_Arena, size: int, loc := #caller_location) -> ([]byte, Allocator_Error) {
- bytes, err := dynamic_arena_alloc_bytes_non_zeroed(a, size, loc)
- if bytes != nil {
- zero_slice(bytes)
- }
- return bytes, err
- }
- /*
- Allocate non-initialized memory from a dynamic arena.
- This procedure allocates `size` bytes of memory aligned on a boundary specified
- by `alignment` from a dynamic arena `a`. The allocated memory is not explicitly
- zero-initialized. This procedure returns a pointer to the newly allocated
- memory region.
- */
- @(require_results)
- dynamic_arena_alloc_non_zeroed :: proc(a: ^Dynamic_Arena, size: int, loc := #caller_location) -> (rawptr, Allocator_Error) {
- data, err := dynamic_arena_alloc_bytes_non_zeroed(a, size, loc)
- return raw_data(data), err
- }
- /*
- Allocate non-initialized memory from a dynamic arena.
- This procedure allocates `size` bytes of memory aligned on a boundary specified
- by `alignment` from a dynamic arena `a`. The allocated memory is not explicitly
- zero-initialized. This procedure returns a slice of the newly allocated
- memory region.
- */
- @(require_results)
- dynamic_arena_alloc_bytes_non_zeroed :: proc(a: ^Dynamic_Arena, size: int, loc := #caller_location) -> ([]byte, Allocator_Error) {
- n := align_formula(size, a.alignment)
- if n > a.block_size {
- return nil, .Invalid_Argument
- }
- if n >= a.out_band_size {
- assert(a.block_allocator.procedure != nil, "Backing block allocator must be initialized", loc=loc)
- memory, err := alloc_bytes_non_zeroed(a.block_size, a.alignment, a.block_allocator, loc)
- if memory != nil {
- append(&a.out_band_allocations, raw_data(memory), loc = loc)
- }
- return memory, err
- }
- if a.bytes_left < n {
- err := _dynamic_arena_cycle_new_block(a, loc)
- if err != nil {
- return nil, err
- }
- if a.current_block == nil {
- return nil, .Out_Of_Memory
- }
- }
- memory := a.current_pos
- a.current_pos = ([^]byte)(a.current_pos)[n:]
- a.bytes_left -= n
- result := ([^]byte)(memory)[:size]
- // ensure_poisoned(result)
- // sanitizer.address_unpoison(result)
- return result, nil
- }
- /*
- Reset a dynamic arena allocator.
- This procedure frees all the allocations owned by the dynamic arena, excluding
- the unused blocks.
- */
- dynamic_arena_reset :: proc(a: ^Dynamic_Arena, loc := #caller_location) {
- if a.current_block != nil {
- // sanitizer.address_poison(a.current_block, a.block_size)
- append(&a.unused_blocks, a.current_block, loc=loc)
- a.current_block = nil
- }
- for block in a.used_blocks {
- // sanitizer.address_poison(block, a.block_size)
- append(&a.unused_blocks, block, loc=loc)
- }
- clear(&a.used_blocks)
- for allocation in a.out_band_allocations {
- free(allocation, a.block_allocator, loc=loc)
- }
- clear(&a.out_band_allocations)
- a.bytes_left = 0 // Make new allocations call `_dynamic_arena_cycle_new_block` again.
- }
- /*
- Free all memory back to the dynamic arena allocator.
- This procedure frees all the allocations owned by the dynamic arena, including
- the unused blocks.
- */
- dynamic_arena_free_all :: proc(a: ^Dynamic_Arena, loc := #caller_location) {
- dynamic_arena_reset(a)
- for block in a.unused_blocks {
- // sanitizer.address_unpoison(block, a.block_size)
- free(block, a.block_allocator, loc)
- }
- clear(&a.unused_blocks)
- }
- /*
- Resize an allocation owned by a dynamic arena allocator.
- This procedure resizes a memory region defined by its location `old_memory`
- and its size `old_size` to have a size `size` and alignment `alignment`. The
- newly allocated memory, if any, is zero-initialized.
- If `old_memory` is `nil`, this procedure acts just like `dynamic_arena_alloc()`,
- allocating a memory region `size` bytes in size, aligned on a boundary specified
- by `alignment`.
- This procedure returns the pointer to the resized memory region.
- */
- @(require_results)
- dynamic_arena_resize :: proc(
- a: ^Dynamic_Arena,
- old_memory: rawptr,
- old_size: int,
- size: int,
- loc := #caller_location,
- ) -> (rawptr, Allocator_Error) {
- bytes, err := dynamic_arena_resize_bytes(a, byte_slice(old_memory, old_size), size, loc)
- return raw_data(bytes), err
- }
- /*
- Resize an allocation owned by a dynamic arena allocator.
- This procedure resizes a memory region specified by `old_data` to have a size
- `size` and alignment `alignment`. The newly allocated memory, if any, is
- zero-initialized.
- If `old_memory` is `nil`, this procedure acts just like `dynamic_arena_alloc()`,
- allocating a memory region `size` bytes in size, aligned on a boundary specified
- by `alignment`.
- This procedure returns the slice of the resized memory region.
- */
- @(require_results)
- dynamic_arena_resize_bytes :: proc(
- a: ^Dynamic_Arena,
- old_data: []byte,
- size: int,
- loc := #caller_location,
- ) -> ([]byte, Allocator_Error) {
- if size == 0 {
- // NOTE: This allocator has no Free mode.
- return nil, nil
- }
- bytes, err := dynamic_arena_resize_bytes_non_zeroed(a, old_data, size, loc)
- if bytes != nil {
- if old_data == nil {
- zero_slice(bytes)
- } else if size > len(old_data) {
- zero_slice(bytes[len(old_data):])
- }
- }
- return bytes, err
- }
- /*
- Resize an allocation owned by a dynamic arena allocator, without zero-initialization.
- This procedure resizes a memory region defined by its location `old_memory`
- and its size `old_size` to have a size `size` and alignment `alignment`. The
- newly allocated memory, if any, is not explicitly zero-initialized.
- If `old_memory` is `nil`, this procedure acts just like `dynamic_arena_alloc()`,
- allocating a memory region `size` bytes in size, aligned on a boundary specified
- by `alignment`.
- This procedure returns the pointer to the resized memory region.
- */
- @(require_results)
- dynamic_arena_resize_non_zeroed :: proc(
- a: ^Dynamic_Arena,
- old_memory: rawptr,
- old_size: int,
- size: int,
- loc := #caller_location,
- ) -> (rawptr, Allocator_Error) {
- bytes, err := dynamic_arena_resize_bytes_non_zeroed(a, byte_slice(old_memory, old_size), size, loc)
- return raw_data(bytes), err
- }
- /*
- Resize an allocation owned by a dynamic arena allocator, without zero-initialization.
- This procedure resizes a memory region specified by `old_data` to have a size
- `size` and alignment `alignment`. The newly allocated memory, if any, is not
- explicitly zero-initialized.
- If `old_memory` is `nil`, this procedure acts just like `dynamic_arena_alloc()`,
- allocating a memory region `size` bytes in size, aligned on a boundary specified
- by `alignment`.
- This procedure returns the slice of the resized memory region.
- */
- @(require_results)
- dynamic_arena_resize_bytes_non_zeroed :: proc(
- a: ^Dynamic_Arena,
- old_data: []byte,
- size: int,
- loc := #caller_location,
- ) -> ([]byte, Allocator_Error) {
- if size == 0 {
- // NOTE: This allocator has no Free mode.
- return nil, nil
- }
- old_memory := raw_data(old_data)
- old_size := len(old_data)
- if old_size >= size {
- // sanitizer.address_poison(old_data[size:])
- return byte_slice(old_memory, size), nil
- }
- // No information is kept about allocations in this allocator, thus we
- // cannot truly resize anything and must reallocate.
- data, err := dynamic_arena_alloc_bytes_non_zeroed(a, size, loc)
- if err == nil {
- runtime.copy(data, byte_slice(old_memory, old_size))
- }
- return data, err
- }
- dynamic_arena_allocator_proc :: proc(
- allocator_data: rawptr,
- mode: Allocator_Mode,
- size: int,
- alignment: int,
- old_memory: rawptr,
- old_size: int,
- loc := #caller_location,
- ) -> ([]byte, Allocator_Error) {
- arena := (^Dynamic_Arena)(allocator_data)
- switch mode {
- case .Alloc:
- return dynamic_arena_alloc_bytes(arena, size, loc)
- case .Alloc_Non_Zeroed:
- return dynamic_arena_alloc_bytes_non_zeroed(arena, size, loc)
- case .Free:
- return nil, .Mode_Not_Implemented
- case .Free_All:
- dynamic_arena_free_all(arena, loc)
- case .Resize:
- return dynamic_arena_resize_bytes(arena, byte_slice(old_memory, old_size), size, loc)
- case .Resize_Non_Zeroed:
- return dynamic_arena_resize_bytes_non_zeroed(arena, byte_slice(old_memory, old_size), size, loc)
- case .Query_Features:
- set := (^Allocator_Mode_Set)(old_memory)
- if set != nil {
- set^ = {.Alloc, .Alloc_Non_Zeroed, .Free_All, .Resize, .Resize_Non_Zeroed, .Query_Features, .Query_Info}
- }
- return nil, nil
- case .Query_Info:
- info := (^Allocator_Query_Info)(old_memory)
- if info != nil && info.pointer != nil {
- info.size = arena.block_size
- info.alignment = arena.alignment
- return byte_slice(info, size_of(info^)), nil
- }
- return nil, nil
- }
- return nil, nil
- }
- /*
- Header of the buddy block.
- */
- Buddy_Block :: struct #align(align_of(uint)) {
- size: uint,
- is_free: bool,
- }
- /*
- Obtain the next buddy block.
- */
- @(require_results, no_sanitize_address)
- buddy_block_next :: proc(block: ^Buddy_Block) -> ^Buddy_Block {
- return (^Buddy_Block)(([^]byte)(block)[block.size:])
- }
- /*
- Split the block into two, by truncating the given block to a given size.
- */
- @(require_results, no_sanitize_address)
- buddy_block_split :: proc(block: ^Buddy_Block, size: uint) -> ^Buddy_Block {
- block := block
- if block != nil && size != 0 {
- // Recursive Split
- for size < block.size {
- sz := block.size >> 1
- block.size = sz
- block = buddy_block_next(block)
- block.size = sz
- block.is_free = true
- }
- if size <= block.size {
- return block
- }
- }
- // Block cannot fit the requested allocation size
- return nil
- }
- /*
- Coalesce contiguous blocks in a range of blocks into one.
- */
- @(no_sanitize_address)
- buddy_block_coalescence :: proc(head, tail: ^Buddy_Block) {
- for {
- // Keep looping until there are no more buddies to coalesce
- block := head
- buddy := buddy_block_next(block)
- no_coalescence := true
- for block < tail && buddy < tail { // make sure the buddies are within the range
- if block.is_free && buddy.is_free && block.size == buddy.size {
- // Coalesce buddies into one
- block.size <<= 1
- block = buddy_block_next(block)
- if block < tail {
- buddy = buddy_block_next(block)
- no_coalescence = false
- }
- } else if block.size < buddy.size {
- // The buddy block is split into smaller blocks
- block = buddy
- buddy = buddy_block_next(buddy)
- } else {
- block = buddy_block_next(buddy)
- if block < tail {
- // Leave the buddy block for the next iteration
- buddy = buddy_block_next(block)
- }
- }
- }
- if no_coalescence {
- return
- }
- }
- }
- /*
- Find the best block for storing a given size in a range of blocks.
- */
- @(require_results, no_sanitize_address)
- buddy_block_find_best :: proc(head, tail: ^Buddy_Block, size: uint) -> ^Buddy_Block {
- assert(size != 0)
- best_block: ^Buddy_Block
- block := head // left
- buddy := buddy_block_next(block) // right
- // The entire memory section between head and tail is free,
- // just call 'buddy_block_split' to get the allocation
- if buddy == tail && block.is_free {
- return buddy_block_split(block, size)
- }
- // Find the block which is the 'best_block' to requested allocation sized
- for block < tail && buddy < tail { // make sure the buddies are within the range
- // If both buddies are free, coalesce them together
- // NOTE: this is an optimization to reduce fragmentation
- // this could be completely ignored
- if block.is_free && buddy.is_free && block.size == buddy.size {
- block.size <<= 1
- if size <= block.size && (best_block == nil || block.size <= best_block.size) {
- best_block = block
- }
- block = buddy_block_next(buddy)
- if block < tail {
- // Delay the buddy block for the next iteration
- buddy = buddy_block_next(block)
- }
- continue
- }
- if block.is_free && size <= block.size &&
- (best_block == nil || block.size <= best_block.size) {
- best_block = block
- }
- if buddy.is_free && size <= buddy.size &&
- (best_block == nil || buddy.size < best_block.size) {
- // If each buddy are the same size, then it makes more sense
- // to pick the buddy as it "bounces around" less
- best_block = buddy
- }
- if block.size <= buddy.size {
- block = buddy_block_next(buddy)
- if (block < tail) {
- // Delay the buddy block for the next iteration
- buddy = buddy_block_next(block)
- }
- } else {
- // Buddy was split into smaller blocks
- block = buddy
- buddy = buddy_block_next(buddy)
- }
- }
- if best_block != nil {
- // This will handle the case if the 'best_block' is also the perfect fit
- return buddy_block_split(best_block, size)
- }
- // Maybe out of memory
- return nil
- }
- /*
- The buddy allocator data.
- */
- Buddy_Allocator :: struct {
- head: ^Buddy_Block,
- tail: ^Buddy_Block `fmt:"-"`,
- alignment: uint,
- }
- /*
- Buddy allocator.
- The buddy allocator is a type of allocator that splits the backing buffer into
- multiple regions called buddy blocks. Initially, the allocator only has one
- block with the size of the backing buffer. Upon each allocation, the allocator
- finds the smallest block that can fit the size of requested memory region, and
- splits the block according to the allocation size. If no block can be found,
- the contiguous free blocks are coalesced and the search is performed again.
- */
- @(require_results)
- buddy_allocator :: proc(b: ^Buddy_Allocator) -> Allocator {
- return Allocator{
- procedure = buddy_allocator_proc,
- data = b,
- }
- }
- /*
- Initialize a buddy allocator.
- This procedure initializes the buddy allocator `b` with a backing buffer `data`
- and block alignment specified by `alignment`.
- `alignment` may be any power of two, but the backing buffer must be aligned to
- at least `size_of(Buddy_Block)`.
- */
- buddy_allocator_init :: proc(b: ^Buddy_Allocator, data: []byte, alignment: uint, loc := #caller_location) {
- assert(data != nil)
- assert(is_power_of_two(uintptr(len(data))), "Size of the backing buffer must be power of two", loc)
- assert(is_power_of_two(uintptr(alignment)), "Alignment must be a power of two", loc)
- alignment := alignment
- if alignment < size_of(Buddy_Block) {
- alignment = size_of(Buddy_Block)
- }
- ptr := raw_data(data)
- assert(uintptr(ptr) % uintptr(alignment) == 0, "The data is not aligned to the minimum alignment, which must be at least `size_of(Buddy_Block)`.", loc)
- b.head = (^Buddy_Block)(ptr)
- b.head.size = len(data)
- b.head.is_free = true
- b.tail = buddy_block_next(b.head)
- b.alignment = alignment
- assert(uint(len(data)) >= 2 * buddy_block_size_required(b, 1), "The size of the backing buffer must be large enough to hold at least two 1-byte allocations given the alignment requirements, otherwise it cannot split.", loc)
- // sanitizer.address_poison(data)
- }
- /*
- Get required block size to fit in the allocation as well as the alignment padding.
- */
- @(require_results)
- buddy_block_size_required :: proc(b: ^Buddy_Allocator, size: uint) -> uint {
- assert(size > 0)
- // NOTE: `size_of(Buddy_Block)` will be accounted for in `b.alignment`.
- // This calculation is also previously guarded against being given a `size`
- // 0 by `buddy_allocator_alloc_bytes_non_zeroed` checking for that.
- actual_size := b.alignment + size
- if intrinsics.count_ones(actual_size) != 1 {
- // We're not a power of two. Let's fix that.
- actual_size = 1 << (size_of(uint) * 8 - intrinsics.count_leading_zeros(actual_size))
- }
- return actual_size
- }
- /*
- Allocate memory from a buddy allocator.
- This procedure allocates `size` bytes of memory. The allocation's alignment is
- fixed to the `alignment` specified at initialization. The allocated memory
- region is zero-initialized. This procedure returns a pointer to the allocated
- memory region.
- */
- @(require_results, no_sanitize_address)
- buddy_allocator_alloc :: proc(b: ^Buddy_Allocator, size: uint) -> (rawptr, Allocator_Error) {
- bytes, err := buddy_allocator_alloc_bytes(b, size)
- return raw_data(bytes), err
- }
- /*
- Allocate memory from a buddy allocator.
- This procedure allocates `size` bytes of memory. The allocation's alignment is
- fixed to the `alignment` specified at initialization. The allocated memory
- region is zero-initialized. This procedure returns a slice of the allocated
- memory region.
- */
- @(require_results, no_sanitize_address)
- buddy_allocator_alloc_bytes :: proc(b: ^Buddy_Allocator, size: uint) -> ([]byte, Allocator_Error) {
- bytes, err := buddy_allocator_alloc_bytes_non_zeroed(b, size)
- if bytes != nil {
- zero_slice(bytes)
- }
- return bytes, err
- }
- /*
- Allocate non-initialized memory from a buddy allocator.
- This procedure allocates `size` bytes of memory. The allocation's alignment is
- fixed to the `alignment` specified at initialization. The allocated memory
- region is not explicitly zero-initialized. This procedure returns a pointer to
- the allocated memory region.
- */
- @(require_results, no_sanitize_address)
- buddy_allocator_alloc_non_zeroed :: proc(b: ^Buddy_Allocator, size: uint) -> (rawptr, Allocator_Error) {
- bytes, err := buddy_allocator_alloc_bytes_non_zeroed(b, size)
- return raw_data(bytes), err
- }
- /*
- Allocate non-initialized memory from a buddy allocator.
- This procedure allocates `size` bytes of memory. The allocation's alignment is
- fixed to the `alignment` specified at initialization. The allocated memory
- region is not explicitly zero-initialized. This procedure returns a slice of
- the allocated memory region.
- */
- @(require_results, no_sanitize_address)
- buddy_allocator_alloc_bytes_non_zeroed :: proc(b: ^Buddy_Allocator, size: uint) -> ([]byte, Allocator_Error) {
- if size != 0 {
- actual_size := buddy_block_size_required(b, size)
- found := buddy_block_find_best(b.head, b.tail, actual_size)
- if found == nil {
- // Try to coalesce all the free buddy blocks and then search again
- buddy_block_coalescence(b.head, b.tail)
- found = buddy_block_find_best(b.head, b.tail, actual_size)
- }
- if found == nil {
- return nil, .Out_Of_Memory
- }
- found.is_free = false
- data := ([^]byte)(found)[b.alignment:][:size]
- assert(cast(uintptr)raw_data(data)+cast(uintptr)size < cast(uintptr)buddy_block_next(found), "Buddy_Allocator has made an allocation which overlaps a block header.")
- // ensure_poisoned(data)
- // sanitizer.address_unpoison(data)
- return data, nil
- }
- return nil, nil
- }
- /*
- Free memory back to the buddy allocator.
- This procedure frees the memory region allocated at pointer `ptr`.
- If `ptr` is not the latest allocation and is not a leaked allocation, this
- operation is a no-op.
- */
- @(no_sanitize_address)
- buddy_allocator_free :: proc(b: ^Buddy_Allocator, ptr: rawptr) -> Allocator_Error {
- if ptr != nil {
- if !(b.head <= ptr && ptr <= b.tail) {
- return .Invalid_Pointer
- }
- block := (^Buddy_Block)(([^]byte)(ptr)[-b.alignment:])
- // sanitizer.address_poison(ptr, block.size)
- block.is_free = true
- buddy_block_coalescence(b.head, b.tail)
- }
- return nil
- }
- /*
- Free all memory back to the buddy allocator.
- */
- @(no_sanitize_address)
- buddy_allocator_free_all :: proc(b: ^Buddy_Allocator) {
- alignment := b.alignment
- head := ([^]byte)(b.head)
- tail := ([^]byte)(b.tail)
- data := head[:ptr_sub(tail, head)]
- buddy_allocator_init(b, data, alignment)
- }
- @(no_sanitize_address)
- buddy_allocator_proc :: proc(
- allocator_data: rawptr,
- mode: Allocator_Mode,
- size, alignment: int,
- old_memory: rawptr,
- old_size: int,
- loc := #caller_location,
- ) -> ([]byte, Allocator_Error) {
- b := (^Buddy_Allocator)(allocator_data)
- switch mode {
- case .Alloc:
- return buddy_allocator_alloc_bytes(b, uint(size))
- case .Alloc_Non_Zeroed:
- return buddy_allocator_alloc_bytes_non_zeroed(b, uint(size))
- case .Resize:
- return default_resize_bytes_align(byte_slice(old_memory, old_size), size, alignment, buddy_allocator(b), loc)
- case .Resize_Non_Zeroed:
- return default_resize_bytes_align_non_zeroed(byte_slice(old_memory, old_size), size, alignment, buddy_allocator(b), loc)
- case .Free:
- return nil, buddy_allocator_free(b, old_memory)
- case .Free_All:
- buddy_allocator_free_all(b)
- case .Query_Features:
- set := (^Allocator_Mode_Set)(old_memory)
- if set != nil {
- set^ = {.Query_Features, .Alloc, .Alloc_Non_Zeroed, .Resize, .Resize_Non_Zeroed, .Free, .Free_All, .Query_Info}
- }
- return nil, nil
- case .Query_Info:
- info := (^Allocator_Query_Info)(old_memory)
- if info != nil && info.pointer != nil {
- ptr := info.pointer
- if !(b.head <= ptr && ptr <= b.tail) {
- return nil, .Invalid_Pointer
- }
- block := (^Buddy_Block)(([^]byte)(ptr)[-b.alignment:])
- info.size = int(block.size)
- info.alignment = int(b.alignment)
- return byte_slice(info, size_of(info^)), nil
- }
- return nil, nil
- }
- return nil, nil
- }
- // An allocator that keeps track of allocation sizes and passes it along to resizes.
- // This is useful if you are using a library that needs an equivalent of `realloc` but want to use
- // the Odin allocator interface.
- //
- // You want to wrap your allocator into this one if you are trying to use any allocator that relies
- // on the old size to work.
- //
- // The overhead of this allocator is an extra max(alignment, size_of(Header)) bytes allocated for each allocation, these bytes are
- // used to store the size and alignment.
- Compat_Allocator :: struct {
- parent: Allocator,
- }
- compat_allocator_init :: proc(rra: ^Compat_Allocator, allocator := context.allocator) {
- rra.parent = allocator
- }
- compat_allocator :: proc(rra: ^Compat_Allocator) -> Allocator {
- return Allocator{
- data = rra,
- procedure = compat_allocator_proc,
- }
- }
- compat_allocator_proc :: proc(allocator_data: rawptr, mode: Allocator_Mode,
- size, alignment: int,
- old_memory: rawptr, old_size: int,
- location := #caller_location) -> (data: []byte, err: Allocator_Error) {
- Header :: struct {
- size: int,
- alignment: int,
- }
- @(no_sanitize_address)
- get_unpoisoned_header :: #force_inline proc(ptr: rawptr) -> Header {
- header := ([^]Header)(ptr)[-1]
- // a := max(header.alignment, size_of(Header))
- // sanitizer.address_unpoison(rawptr(uintptr(ptr)-uintptr(a)), a)
- return header
- }
- rra := (^Compat_Allocator)(allocator_data)
- switch mode {
- case .Alloc, .Alloc_Non_Zeroed:
- a := max(alignment, size_of(Header))
- req_size := size + a
- assert(req_size >= 0, "overflow")
- allocation := rra.parent.procedure(rra.parent.data, mode, req_size, alignment, old_memory, old_size, location) or_return
- #no_bounds_check data = allocation[a:]
- ([^]Header)(raw_data(data))[-1] = {
- size = size,
- alignment = alignment,
- }
- // sanitizer.address_poison(raw_data(allocation), a)
- return
- case .Free:
- header := get_unpoisoned_header(old_memory)
- a := max(header.alignment, size_of(Header))
- orig_ptr := rawptr(uintptr(old_memory)-uintptr(a))
- orig_size := header.size + a
- return rra.parent.procedure(rra.parent.data, mode, orig_size, header.alignment, orig_ptr, orig_size, location)
- case .Resize, .Resize_Non_Zeroed:
- header := get_unpoisoned_header(old_memory)
- orig_a := max(header.alignment, size_of(Header))
- orig_ptr := rawptr(uintptr(old_memory)-uintptr(orig_a))
- orig_size := header.size + orig_a
- new_alignment := max(header.alignment, alignment)
- a := max(new_alignment, size_of(header))
- req_size := size + a
- assert(size >= 0, "overflow")
- allocation := rra.parent.procedure(rra.parent.data, mode, req_size, new_alignment, orig_ptr, orig_size, location) or_return
- #no_bounds_check data = allocation[a:]
- ([^]Header)(raw_data(data))[-1] = {
- size = size,
- alignment = new_alignment,
- }
- // sanitizer.address_poison(raw_data(allocation), a)
- return
- case .Free_All:
- return rra.parent.procedure(rra.parent.data, mode, size, alignment, old_memory, old_size, location)
- case .Query_Info:
- info := (^Allocator_Query_Info)(old_memory)
- if info != nil && info.pointer != nil {
- header := get_unpoisoned_header(info.pointer)
- info.size = header.size
- info.alignment = header.alignment
- }
- return
- case .Query_Features:
- data, err = rra.parent.procedure(rra.parent.data, mode, size, alignment, old_memory, old_size, location)
- if err != nil {
- set := (^Allocator_Mode_Set)(old_memory)
- set^ += {.Query_Info}
- }
- return
- case: unreachable()
- }
- }
|