123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914191519161917191819191920192119221923192419251926192719281929193019311932193319341935193619371938193919401941194219431944194519461947194819491950195119521953195419551956195719581959196019611962196319641965196619671968196919701971197219731974197519761977197819791980198119821983198419851986198719881989199019911992199319941995199619971998199920002001200220032004200520062007200820092010201120122013201420152016201720182019202020212022202320242025202620272028202920302031203220332034203520362037203820392040204120422043204420452046204720482049205020512052205320542055205620572058205920602061206220632064206520662067206820692070207120722073207420752076207720782079208020812082208320842085208620872088208920902091209220932094209520962097209820992100210121022103210421052106210721082109211021112112211321142115211621172118211921202121212221232124212521262127212821292130213121322133213421352136213721382139214021412142214321442145214621472148214921502151215221532154215521562157215821592160216121622163216421652166216721682169217021712172217321742175217621772178217921802181218221832184218521862187218821892190219121922193219421952196219721982199220022012202220322042205220622072208220922102211221222132214221522162217221822192220222122222223222422252226222722282229223022312232223322342235223622372238223922402241224222432244224522462247224822492250225122522253225422552256225722582259226022612262226322642265226622672268226922702271227222732274227522762277227822792280228122822283228422852286228722882289229022912292229322942295229622972298229923002301230223032304230523062307230823092310231123122313231423152316231723182319232023212322232323242325232623272328232923302331233223332334233523362337233823392340234123422343234423452346234723482349235023512352235323542355235623572358235923602361236223632364236523662367236823692370237123722373237423752376237723782379238023812382238323842385238623872388238923902391239223932394239523962397239823992400240124022403240424052406240724082409241024112412241324142415241624172418241924202421242224232424242524262427242824292430243124322433243424352436243724382439244024412442244324442445244624472448244924502451245224532454245524562457245824592460246124622463246424652466246724682469247024712472247324742475247624772478247924802481248224832484248524862487248824892490249124922493249424952496249724982499250025012502250325042505250625072508250925102511251225132514251525162517251825192520252125222523252425252526252725282529253025312532253325342535 |
- package mem
- import "base:intrinsics"
- import "base:runtime"
- // NOTE(Feoramund): Sanitizer usage in this package has been temporarily
- // disabled pending a thorough review per allocator, as ASan is particular
- // about the addresses and ranges it receives.
- //
- // In short, it keeps track only of 8-byte blocks. This can cause issues if an
- // allocator poisons an entire range but an allocation for less than 8 bytes is
- // desired or if the next allocation address would not be 8-byte aligned.
- //
- // This must be handled carefully on a per-allocator basis and some allocators
- // may not be able to participate.
- //
- // Please see the following link for more information:
- //
- // https://github.com/google/sanitizers/wiki/AddressSanitizerAlgorithm#mapping
- //
- // import "base:sanitizer"
- /*
- This procedure checks if a byte slice `range` is poisoned and makes sure the
- root address of the poison range is the base pointer of `range`.
- This can help guard against buggy allocators returning memory that they already returned.
- This has no effect if `-sanitize:address` is not enabled.
- */
- // @(disabled=.Address not_in ODIN_SANITIZER_FLAGS, private)
- // ensure_poisoned :: proc(range: []u8, loc := #caller_location) {
- // cond := sanitizer.address_region_is_poisoned(range) == raw_data(range)
- // // If this fails, we've overlapped an allocation and it's our fault.
- // ensure(cond, `This allocator has sliced a block of memory of which some part is not poisoned before returning.
- // This is a bug in the core library and should be reported to the Odin developers with a stack trace and minimal example code if possible.`, loc)
- // }
- /*
- This procedure checks if a byte slice `range` is not poisoned.
- This can help guard against buggy allocators resizing memory that they should not.
- This has no effect if `-sanitize:address` is not enabled.
- */
- // @(disabled=.Address not_in ODIN_SANITIZER_FLAGS, private)
- // ensure_not_poisoned :: proc(range: []u8, loc := #caller_location) {
- // cond := sanitizer.address_region_is_poisoned(range) == nil
- // // If this fails, we've tried to resize memory that is poisoned, which
- // // could be user error caused by an incorrect `old_memory` pointer.
- // ensure(cond, `This allocator has sliced a block of memory of which some part is poisoned before returning.
- // This may be a bug in the core library, or it could be user error due to an invalid pointer passed to a resize operation.
- // If after ensuring your own code is not responsible, report the problem to the Odin developers with a stack trace and minimal example code if possible.`, loc)
- // }
- /*
- Nil allocator.
- The `nil` allocator returns `nil` on every allocation attempt. This type of
- allocator can be used in scenarios where memory doesn't need to be allocated,
- but an attempt to allocate memory is not an error.
- */
- @(require_results)
- nil_allocator :: proc() -> Allocator {
- return Allocator{
- procedure = nil_allocator_proc,
- data = nil,
- }
- }
- nil_allocator_proc :: proc(
- allocator_data: rawptr,
- mode: Allocator_Mode,
- size, alignment: int,
- old_memory: rawptr,
- old_size: int,
- loc := #caller_location,
- ) -> ([]byte, Allocator_Error) {
- return nil, nil
- }
- /*
- Panic allocator.
- The panic allocator is a type of allocator that panics on any allocation
- attempt. This type of allocator can be used in scenarios where memory should
- not be allocated, and an attempt to allocate memory is an error.
- */
- @(require_results)
- panic_allocator :: proc() -> Allocator {
- return Allocator{
- procedure = panic_allocator_proc,
- data = nil,
- }
- }
- panic_allocator_proc :: proc(
- allocator_data: rawptr,
- mode: Allocator_Mode,
- size, alignment: int,
- old_memory: rawptr,
- old_size: int,
- loc := #caller_location,
- ) -> ([]byte, Allocator_Error) {
- switch mode {
- case .Alloc:
- if size > 0 {
- panic("mem: panic allocator, .Alloc called", loc=loc)
- }
- case .Alloc_Non_Zeroed:
- if size > 0 {
- panic("mem: panic allocator, .Alloc_Non_Zeroed called", loc=loc)
- }
- case .Resize:
- if size > 0 {
- panic("mem: panic allocator, .Resize called", loc=loc)
- }
- case .Resize_Non_Zeroed:
- if size > 0 {
- panic("mem: panic allocator, .Resize_Non_Zeroed called", loc=loc)
- }
- case .Free:
- if old_memory != nil {
- panic("mem: panic allocator, .Free called", loc=loc)
- }
- case .Free_All:
- panic("mem: panic allocator, .Free_All called", loc=loc)
- case .Query_Features:
- set := (^Allocator_Mode_Set)(old_memory)
- if set != nil {
- set^ = {.Query_Features}
- }
- return nil, nil
- case .Query_Info:
- panic("mem: panic allocator, .Query_Info called", loc=loc)
- }
- return nil, nil
- }
- /*
- Arena allocator data.
- */
- Arena :: struct {
- data: []byte,
- offset: int,
- peak_used: int,
- temp_count: int,
- }
- /*
- Arena allocator.
- The arena allocator (also known as a linear allocator, bump allocator,
- region allocator) is an allocator that uses a single backing buffer for
- allocations.
- The buffer is used contiguously, from start to end. Each subsequent allocation
- occupies the next adjacent region of memory in the buffer. Since the arena
- allocator does not keep track of any metadata associated with the allocations
- and their locations, it is impossible to free individual allocations.
- The arena allocator can be used for temporary allocations in frame-based memory
- management. Games are one example of such applications. A global arena can be
- used for any temporary memory allocations, and at the end of each frame all
- temporary allocations are freed. Since no temporary object is going to live
- longer than a frame, no lifetimes are violated.
- */
- @(require_results)
- arena_allocator :: proc(arena: ^Arena) -> Allocator {
- return Allocator{
- procedure = arena_allocator_proc,
- data = arena,
- }
- }
- /*
- Initialize an arena.
- This procedure initializes the arena `a` with memory region `data` as its
- backing buffer.
- */
- arena_init :: proc(a: ^Arena, data: []byte) {
- a.data = data
- a.offset = 0
- a.peak_used = 0
- a.temp_count = 0
- // sanitizer.address_poison(a.data)
- }
- /*
- Allocate memory from an arena.
- This procedure allocates `size` bytes of memory aligned on a boundary specified
- by `alignment` from an arena `a`. The allocated memory is zero-initialized.
- This procedure returns a pointer to the newly allocated memory region.
- */
- @(require_results)
- arena_alloc :: proc(
- a: ^Arena,
- size: int,
- alignment := DEFAULT_ALIGNMENT,
- loc := #caller_location,
- ) -> (rawptr, Allocator_Error) {
- bytes, err := arena_alloc_bytes(a, size, alignment, loc)
- return raw_data(bytes), err
- }
- /*
- Allocate memory from an arena.
- This procedure allocates `size` bytes of memory aligned on a boundary specified
- by `alignment` from an arena `a`. The allocated memory is zero-initialized.
- This procedure returns a slice of the newly allocated memory region.
- */
- @(require_results)
- arena_alloc_bytes :: proc(
- a: ^Arena,
- size: int,
- alignment := DEFAULT_ALIGNMENT,
- loc := #caller_location,
- ) -> ([]byte, Allocator_Error) {
- bytes, err := arena_alloc_bytes_non_zeroed(a, size, alignment, loc)
- if bytes != nil {
- zero_slice(bytes)
- }
- return bytes, err
- }
- /*
- Allocate non-initialized memory from an arena.
- This procedure allocates `size` bytes of memory aligned on a boundary specified
- by `alignment` from an arena `a`. The allocated memory is not explicitly
- zero-initialized. This procedure returns a pointer to the newly allocated
- memory region.
- */
- @(require_results)
- arena_alloc_non_zeroed :: proc(
- a: ^Arena,
- size: int,
- alignment := DEFAULT_ALIGNMENT,
- loc := #caller_location,
- ) -> (rawptr, Allocator_Error) {
- bytes, err := arena_alloc_bytes_non_zeroed(a, size, alignment, loc)
- return raw_data(bytes), err
- }
- /*
- Allocate non-initialized memory from an arena.
- This procedure allocates `size` bytes of memory aligned on a boundary specified
- by `alignment` from an arena `a`. The allocated memory is not explicitly
- zero-initialized. This procedure returns a slice of the newly allocated
- memory region.
- */
- @(require_results)
- arena_alloc_bytes_non_zeroed :: proc(
- a: ^Arena,
- size: int,
- alignment := DEFAULT_ALIGNMENT,
- loc := #caller_location
- ) -> ([]byte, Allocator_Error) {
- if a.data == nil {
- panic("Allocation on uninitialized Arena allocator.", loc)
- }
- #no_bounds_check end := &a.data[a.offset]
- ptr := align_forward(end, uintptr(alignment))
- total_size := size + ptr_sub((^byte)(ptr), (^byte)(end))
- if a.offset + total_size > len(a.data) {
- return nil, .Out_Of_Memory
- }
- a.offset += total_size
- a.peak_used = max(a.peak_used, a.offset)
- result := byte_slice(ptr, size)
- // ensure_poisoned(result)
- // sanitizer.address_unpoison(result)
- return result, nil
- }
- /*
- Free all memory back to the arena allocator.
- */
- arena_free_all :: proc(a: ^Arena) {
- a.offset = 0
- // sanitizer.address_poison(a.data)
- }
- arena_allocator_proc :: proc(
- allocator_data: rawptr,
- mode: Allocator_Mode,
- size: int,
- alignment: int,
- old_memory: rawptr,
- old_size: int,
- loc := #caller_location,
- ) -> ([]byte, Allocator_Error) {
- arena := cast(^Arena)allocator_data
- switch mode {
- case .Alloc:
- return arena_alloc_bytes(arena, size, alignment, loc)
- case .Alloc_Non_Zeroed:
- return arena_alloc_bytes_non_zeroed(arena, size, alignment, loc)
- case .Free:
- return nil, .Mode_Not_Implemented
- case .Free_All:
- arena_free_all(arena)
- case .Resize:
- return default_resize_bytes_align(byte_slice(old_memory, old_size), size, alignment, arena_allocator(arena), loc)
- case .Resize_Non_Zeroed:
- return default_resize_bytes_align_non_zeroed(byte_slice(old_memory, old_size), size, alignment, arena_allocator(arena), loc)
- case .Query_Features:
- set := (^Allocator_Mode_Set)(old_memory)
- if set != nil {
- set^ = {.Alloc, .Alloc_Non_Zeroed, .Free_All, .Resize, .Resize_Non_Zeroed, .Query_Features}
- }
- return nil, nil
- case .Query_Info:
- return nil, .Mode_Not_Implemented
- }
- return nil, nil
- }
- /*
- Temporary memory region of an `Arena` allocator.
- Temporary memory regions of an arena act as "save-points" for the allocator.
- When one is created, the subsequent allocations are done inside the temporary
- memory region. When `end_arena_temp_memory` is called, the arena is rolled
- back, and all of the memory that was allocated from the arena will be freed.
- Multiple temporary memory regions can exist at the same time for an arena.
- */
- Arena_Temp_Memory :: struct {
- arena: ^Arena,
- prev_offset: int,
- }
- /*
- Start a temporary memory region.
- This procedure creates a temporary memory region. After a temporary memory
- region is created, all allocations are said to be *inside* the temporary memory
- region, until `end_arena_temp_memory` is called.
- */
- @(require_results)
- begin_arena_temp_memory :: proc(a: ^Arena) -> Arena_Temp_Memory {
- tmp: Arena_Temp_Memory
- tmp.arena = a
- tmp.prev_offset = a.offset
- a.temp_count += 1
- return tmp
- }
- /*
- End a temporary memory region.
- This procedure ends the temporary memory region for an arena. All of the
- allocations *inside* the temporary memory region will be freed to the arena.
- */
- end_arena_temp_memory :: proc(tmp: Arena_Temp_Memory) {
- assert(tmp.arena.offset >= tmp.prev_offset)
- assert(tmp.arena.temp_count > 0)
- // sanitizer.address_poison(tmp.arena.data[tmp.prev_offset:tmp.arena.offset])
- tmp.arena.offset = tmp.prev_offset
- tmp.arena.temp_count -= 1
- }
- /* Preserved for compatibility */
- Scratch_Allocator :: Scratch
- scratch_allocator_init :: scratch_init
- scratch_allocator_destroy :: scratch_destroy
- /*
- Scratch allocator data.
- */
- Scratch :: struct {
- data: []byte,
- curr_offset: int,
- prev_allocation: rawptr,
- prev_allocation_root: rawptr,
- backup_allocator: Allocator,
- leaked_allocations: [dynamic][]byte,
- }
- /*
- Scratch allocator.
- The scratch allocator works in a similar way to the `Arena` allocator. The
- scratch allocator has a backing buffer that is allocated in contiguous regions,
- from start to end.
- Each subsequent allocation will be the next adjacent region of memory in the
- backing buffer. If the allocation doesn't fit into the remaining space of the
- backing buffer, this allocation is put at the start of the buffer, and all
- previous allocations will become invalidated.
- If the allocation doesn't fit into the backing buffer as a whole, it will be
- allocated using a backing allocator, and the pointer to the allocated memory
- region will be put into the `leaked_allocations` array. A `Warning`-level log
- message will be sent as well.
- Allocations which are resized will be resized in-place if they were the last
- allocation. Otherwise, they are re-allocated to avoid overwriting previous
- allocations.
- The `leaked_allocations` array is managed by the `context` allocator if no
- `backup_allocator` is specified in `scratch_init`.
- */
- @(require_results)
- scratch_allocator :: proc(allocator: ^Scratch) -> Allocator {
- return Allocator{
- procedure = scratch_allocator_proc,
- data = allocator,
- }
- }
- /*
- Initialize a scratch allocator.
- */
- scratch_init :: proc(s: ^Scratch, size: int, backup_allocator := context.allocator) -> Allocator_Error {
- s.data = make_aligned([]byte, size, 2*align_of(rawptr), backup_allocator) or_return
- s.curr_offset = 0
- s.prev_allocation = nil
- s.prev_allocation_root = nil
- s.backup_allocator = backup_allocator
- s.leaked_allocations.allocator = backup_allocator
- // sanitizer.address_poison(s.data)
- return nil
- }
- /*
- Free all data associated with a scratch allocator.
- This is distinct from `scratch_free_all` in that it deallocates all memory used
- to setup the allocator, as opposed to all allocations made from that space.
- */
- scratch_destroy :: proc(s: ^Scratch) {
- if s == nil {
- return
- }
- for ptr in s.leaked_allocations {
- free_bytes(ptr, s.backup_allocator)
- }
- delete(s.leaked_allocations)
- // sanitizer.address_unpoison(s.data)
- delete(s.data, s.backup_allocator)
- s^ = {}
- }
- /*
- Allocate memory from a scratch allocator.
- This procedure allocates `size` bytes of memory aligned on a boundary specified
- by `alignment`. The allocated memory region is zero-initialized. This procedure
- returns a pointer to the allocated memory region.
- */
- @(require_results)
- scratch_alloc :: proc(
- s: ^Scratch,
- size: int,
- alignment := DEFAULT_ALIGNMENT,
- loc := #caller_location,
- ) -> (rawptr, Allocator_Error) {
- bytes, err := scratch_alloc_bytes(s, size, alignment, loc)
- return raw_data(bytes), err
- }
- /*
- Allocate memory from a scratch allocator.
- This procedure allocates `size` bytes of memory aligned on a boundary specified
- by `alignment`. The allocated memory region is zero-initialized. This procedure
- returns a slice of the allocated memory region.
- */
- @(require_results)
- scratch_alloc_bytes :: proc(
- s: ^Scratch,
- size: int,
- alignment := DEFAULT_ALIGNMENT,
- loc := #caller_location,
- ) -> ([]byte, Allocator_Error) {
- bytes, err := scratch_alloc_bytes_non_zeroed(s, size, alignment, loc)
- if bytes != nil {
- zero_slice(bytes)
- }
- return bytes, err
- }
- /*
- Allocate non-initialized memory from a scratch allocator.
- This procedure allocates `size` bytes of memory aligned on a boundary specified
- by `alignment`. The allocated memory region is not explicitly zero-initialized.
- This procedure returns a pointer to the allocated memory region.
- */
- @(require_results)
- scratch_alloc_non_zeroed :: proc(
- s: ^Scratch,
- size: int,
- alignment := DEFAULT_ALIGNMENT,
- loc := #caller_location,
- ) -> (rawptr, Allocator_Error) {
- bytes, err := scratch_alloc_bytes_non_zeroed(s, size, alignment, loc)
- return raw_data(bytes), err
- }
- /*
- Allocate non-initialized memory from a scratch allocator.
- This procedure allocates `size` bytes of memory aligned on a boundary specified
- by `alignment`. The allocated memory region is not explicitly zero-initialized.
- This procedure returns a slice of the allocated memory region.
- */
- @(require_results)
- scratch_alloc_bytes_non_zeroed :: proc(
- s: ^Scratch,
- size: int,
- alignment := DEFAULT_ALIGNMENT,
- loc := #caller_location,
- ) -> ([]byte, Allocator_Error) {
- if s.data == nil {
- DEFAULT_BACKING_SIZE :: 4 * Megabyte
- if !(context.allocator.procedure != scratch_allocator_proc && context.allocator.data != s) {
- panic("Cyclic initialization of the scratch allocator with itself.", loc)
- }
- scratch_init(s, DEFAULT_BACKING_SIZE)
- }
- aligned_size := size
- if alignment > 1 {
- // It is possible to do this with less bytes, but this is the
- // mathematically simpler solution, and this being a Scratch allocator,
- // we don't need to be so strict about every byte.
- aligned_size += alignment - 1
- }
- if aligned_size <= len(s.data) {
- offset := uintptr(0)
- if s.curr_offset+aligned_size <= len(s.data) {
- offset = uintptr(s.curr_offset)
- } else {
- // The allocation will cause an overflow past the boundary of the
- // space available, so reset to the starting offset.
- offset = 0
- }
- start := uintptr(raw_data(s.data))
- ptr := rawptr(offset+start)
- // We keep track of the original base pointer without extra alignment
- // in order to later allow the free operation to work from that point.
- s.prev_allocation_root = ptr
- if !is_aligned(ptr, alignment) {
- ptr = align_forward(ptr, uintptr(alignment))
- }
- s.prev_allocation = ptr
- s.curr_offset = int(offset) + aligned_size
- result := byte_slice(ptr, size)
- // ensure_poisoned(result)
- // sanitizer.address_unpoison(result)
- return result, nil
- } else {
- // NOTE: No need to use `aligned_size` here, as the backup allocator will handle alignment for us.
- a := s.backup_allocator
- ptr, err := alloc_bytes_non_zeroed(size, alignment, a, loc)
- if err != nil {
- return ptr, err
- }
- append(&s.leaked_allocations, ptr)
- if logger := context.logger; logger.lowest_level <= .Warning {
- if logger.procedure != nil {
- logger.procedure(logger.data, .Warning, "mem.Scratch resorted to backup_allocator" , logger.options, loc)
- }
- }
- return ptr, err
- }
- }
- /*
- Free memory back to the scratch allocator.
- This procedure frees the memory region allocated at pointer `ptr`.
- If `ptr` is not the latest allocation and is not a leaked allocation, this
- operation is a no-op.
- */
- scratch_free :: proc(s: ^Scratch, ptr: rawptr, loc := #caller_location) -> Allocator_Error {
- if s.data == nil {
- panic("Free on an uninitialized Scratch allocator.", loc)
- }
- if ptr == nil {
- return nil
- }
- start := uintptr(raw_data(s.data))
- end := start + uintptr(len(s.data))
- old_ptr := uintptr(ptr)
- if s.prev_allocation == ptr {
- s.curr_offset = int(uintptr(s.prev_allocation_root) - start)
- // sanitizer.address_poison(s.data[s.curr_offset:])
- s.prev_allocation = nil
- s.prev_allocation_root = nil
- return nil
- }
- if start <= old_ptr && old_ptr < end {
- // NOTE(bill): Cannot free this pointer but it is valid
- return nil
- }
- if len(s.leaked_allocations) != 0 {
- for data, i in s.leaked_allocations {
- ptr := raw_data(data)
- if ptr == ptr {
- free_bytes(data, s.backup_allocator, loc)
- ordered_remove(&s.leaked_allocations, i, loc)
- return nil
- }
- }
- }
- return .Invalid_Pointer
- }
- /*
- Free all memory back to the scratch allocator.
- */
- scratch_free_all :: proc(s: ^Scratch, loc := #caller_location) {
- s.curr_offset = 0
- s.prev_allocation = nil
- for ptr in s.leaked_allocations {
- free_bytes(ptr, s.backup_allocator, loc)
- }
- clear(&s.leaked_allocations)
- // sanitizer.address_poison(s.data)
- }
- /*
- Resize an allocation owned by a scratch allocator.
- This procedure resizes a memory region defined by its location `old_memory`
- and its size `old_size` to have a size `size` and alignment `alignment`. The
- newly allocated memory, if any, is zero-initialized.
- If `old_memory` is `nil`, this procedure acts just like `scratch_alloc()`,
- allocating a memory region `size` bytes in size, aligned on a boundary specified
- by `alignment`.
- If `size` is 0, this procedure acts just like `scratch_free()`, freeing the
- memory region located at an address specified by `old_memory`.
- This procedure returns the pointer to the resized memory region.
- */
- @(require_results)
- scratch_resize :: proc(
- s: ^Scratch,
- old_memory: rawptr,
- old_size: int,
- size: int,
- alignment := DEFAULT_ALIGNMENT,
- loc := #caller_location
- ) -> (rawptr, Allocator_Error) {
- bytes, err := scratch_resize_bytes(s, byte_slice(old_memory, old_size), size, alignment, loc)
- return raw_data(bytes), err
- }
- /*
- Resize an allocation owned by a scratch allocator.
- This procedure resizes a memory region specified by `old_data` to have a size
- `size` and alignment `alignment`. The newly allocated memory, if any, is
- zero-initialized.
- If `old_memory` is `nil`, this procedure acts just like `scratch_alloc()`,
- allocating a memory region `size` bytes in size, aligned on a boundary specified
- by `alignment`.
- If `size` is 0, this procedure acts just like `scratch_free()`, freeing the
- memory region located at an address specified by `old_memory`.
- This procedure returns the slice of the resized memory region.
- */
- @(require_results)
- scratch_resize_bytes :: proc(
- s: ^Scratch,
- old_data: []byte,
- size: int,
- alignment := DEFAULT_ALIGNMENT,
- loc := #caller_location
- ) -> ([]byte, Allocator_Error) {
- bytes, err := scratch_resize_bytes_non_zeroed(s, old_data, size, alignment, loc)
- if bytes != nil && size > len(old_data) {
- zero_slice(bytes[size:])
- }
- return bytes, err
- }
- /*
- Resize an allocation owned by a scratch allocator, without zero-initialization.
- This procedure resizes a memory region defined by its location `old_memory`
- and its size `old_size` to have a size `size` and alignment `alignment`. The
- newly allocated memory, if any, is not explicitly zero-initialized.
- If `old_memory` is `nil`, this procedure acts just like `scratch_alloc()`,
- allocating a memory region `size` bytes in size, aligned on a boundary specified
- by `alignment`.
- If `size` is 0, this procedure acts just like `scratch_free()`, freeing the
- memory region located at an address specified by `old_memory`.
- This procedure returns the pointer to the resized memory region.
- */
- @(require_results)
- scratch_resize_non_zeroed :: proc(
- s: ^Scratch,
- old_memory: rawptr,
- old_size: int,
- size: int,
- alignment := DEFAULT_ALIGNMENT,
- loc := #caller_location
- ) -> (rawptr, Allocator_Error) {
- bytes, err := scratch_resize_bytes_non_zeroed(s, byte_slice(old_memory, old_size), size, alignment, loc)
- return raw_data(bytes), err
- }
- /*
- Resize an allocation owned by a scratch allocator.
- This procedure resizes a memory region specified by `old_data` to have a size
- `size` and alignment `alignment`. The newly allocated memory, if any, is not
- explicitly zero-initialized.
- If `old_memory` is `nil`, this procedure acts just like `scratch_alloc()`,
- allocating a memory region `size` bytes in size, aligned on a boundary specified
- by `alignment`.
- If `size` is 0, this procedure acts just like `scratch_free()`, freeing the
- memory region located at an address specified by `old_memory`.
- This procedure returns the slice of the resized memory region.
- */
- @(require_results)
- scratch_resize_bytes_non_zeroed :: proc(
- s: ^Scratch,
- old_data: []byte,
- size: int,
- alignment := DEFAULT_ALIGNMENT,
- loc := #caller_location
- ) -> ([]byte, Allocator_Error) {
- old_memory := raw_data(old_data)
- old_size := len(old_data)
- if s.data == nil {
- DEFAULT_BACKING_SIZE :: 4 * Megabyte
- if !(context.allocator.procedure != scratch_allocator_proc && context.allocator.data != s) {
- panic("Cyclic initialization of the scratch allocator with itself.", loc)
- }
- scratch_init(s, DEFAULT_BACKING_SIZE)
- }
- begin := uintptr(raw_data(s.data))
- end := begin + uintptr(len(s.data))
- old_ptr := uintptr(old_memory)
- // We can only sanely resize the last allocation; to do otherwise may
- // overwrite memory that could very well just have been allocated.
- //
- // Also, the alignments must match, otherwise we must re-allocate to
- // guarantee the user's request.
- if s.prev_allocation == old_memory && is_aligned(old_memory, alignment) && old_ptr+uintptr(size) < end {
- // ensure_not_poisoned(old_data)
- // sanitizer.address_poison(old_memory)
- s.curr_offset = int(old_ptr-begin)+size
- result := byte_slice(old_memory, size)
- // sanitizer.address_unpoison(result)
- return result, nil
- }
- data, err := scratch_alloc_bytes_non_zeroed(s, size, alignment, loc)
- if err != nil {
- return data, err
- }
- runtime.copy(data, byte_slice(old_memory, old_size))
- err = scratch_free(s, old_memory, loc)
- return data, err
- }
- scratch_allocator_proc :: proc(
- allocator_data: rawptr,
- mode: Allocator_Mode,
- size, alignment: int,
- old_memory: rawptr,
- old_size: int,
- loc := #caller_location,
- ) -> ([]byte, Allocator_Error) {
- s := (^Scratch)(allocator_data)
- size := size
- switch mode {
- case .Alloc:
- return scratch_alloc_bytes(s, size, alignment, loc)
- case .Alloc_Non_Zeroed:
- return scratch_alloc_bytes_non_zeroed(s, size, alignment, loc)
- case .Free:
- return nil, scratch_free(s, old_memory, loc)
- case .Free_All:
- scratch_free_all(s, loc)
- case .Resize:
- return scratch_resize_bytes(s, byte_slice(old_memory, old_size), size, alignment, loc)
- case .Resize_Non_Zeroed:
- return scratch_resize_bytes_non_zeroed(s, byte_slice(old_memory, old_size), size, alignment, loc)
- case .Query_Features:
- set := (^Allocator_Mode_Set)(old_memory)
- if set != nil {
- set^ = {.Alloc, .Alloc_Non_Zeroed, .Free, .Free_All, .Resize, .Resize_Non_Zeroed, .Query_Features}
- }
- return nil, nil
- case .Query_Info:
- return nil, .Mode_Not_Implemented
- }
- return nil, nil
- }
- /*
- Stack allocator data.
- */
- Stack :: struct {
- data: []byte,
- prev_offset: int,
- curr_offset: int,
- peak_used: int,
- }
- /*
- Header of a stack allocation.
- */
- Stack_Allocation_Header :: struct {
- prev_offset: int,
- padding: int,
- }
- /*
- Stack allocator.
- The stack allocator is an allocator that allocates data in the backing buffer
- linearly, from start to end. Each subsequent allocation will get the next
- adjacent memory region.
- Unlike arena allocator, the stack allocator saves allocation metadata and has
- a strict freeing order. Only the last allocated element can be freed. After the
- last allocated element is freed, the next previous allocated element becomes
- available for freeing.
- The metadata is stored in the allocation headers, that are located before the
- start of each allocated memory region. Each header points to the start of the
- previous allocation header.
- */
- @(require_results)
- stack_allocator :: proc(stack: ^Stack) -> Allocator {
- return Allocator{
- procedure = stack_allocator_proc,
- data = stack,
- }
- }
- /*
- Initialize a stack allocator.
- This procedure initializes the stack allocator with a backing buffer specified
- by `data` parameter.
- */
- stack_init :: proc(s: ^Stack, data: []byte) {
- s.data = data
- s.prev_offset = 0
- s.curr_offset = 0
- s.peak_used = 0
- // sanitizer.address_poison(data)
- }
- /*
- Allocate memory from a stack allocator.
- This procedure allocates `size` bytes of memory, aligned to the boundary
- specified by `alignment`. The allocated memory is zero-initialized. This
- procedure returns the pointer to the allocated memory.
- */
- @(require_results)
- stack_alloc :: proc(
- s: ^Stack,
- size: int,
- alignment := DEFAULT_ALIGNMENT,
- loc := #caller_location
- ) -> (rawptr, Allocator_Error) {
- bytes, err := stack_alloc_bytes(s, size, alignment, loc)
- return raw_data(bytes), err
- }
- /*
- Allocate memory from a stack allocator.
- This procedure allocates `size` bytes of memory, aligned to the boundary
- specified by `alignment`. The allocated memory is zero-initialized. This
- procedure returns the slice of the allocated memory.
- */
- @(require_results)
- stack_alloc_bytes :: proc(
- s: ^Stack,
- size: int,
- alignment := DEFAULT_ALIGNMENT,
- loc := #caller_location
- ) -> ([]byte, Allocator_Error) {
- bytes, err := stack_alloc_bytes_non_zeroed(s, size, alignment, loc)
- if bytes != nil {
- zero_slice(bytes)
- }
- return bytes, err
- }
- /*
- Allocate memory from a stack allocator.
- This procedure allocates `size` bytes of memory, aligned to the boundary
- specified by `alignment`. The allocated memory is not explicitly
- zero-initialized. This procedure returns the pointer to the allocated memory.
- */
- @(require_results)
- stack_alloc_non_zeroed :: proc(
- s: ^Stack,
- size: int,
- alignment := DEFAULT_ALIGNMENT,
- loc := #caller_location
- ) -> (rawptr, Allocator_Error) {
- bytes, err := stack_alloc_bytes_non_zeroed(s, size, alignment, loc)
- return raw_data(bytes), err
- }
- /*
- Allocate memory from a stack allocator.
- This procedure allocates `size` bytes of memory, aligned to the boundary
- specified by `alignment`. The allocated memory is not explicitly
- zero-initialized. This procedure returns the slice of the allocated memory.
- */
- @(require_results, no_sanitize_address)
- stack_alloc_bytes_non_zeroed :: proc(
- s: ^Stack,
- size: int,
- alignment := DEFAULT_ALIGNMENT,
- loc := #caller_location
- ) -> ([]byte, Allocator_Error) {
- if s.data == nil {
- panic("Allocation on an uninitialized Stack allocator.", loc)
- }
- curr_addr := uintptr(raw_data(s.data)) + uintptr(s.curr_offset)
- padding := calc_padding_with_header(
- curr_addr,
- uintptr(alignment),
- size_of(Stack_Allocation_Header),
- )
- if s.curr_offset + padding + size > len(s.data) {
- return nil, .Out_Of_Memory
- }
- old_offset := s.prev_offset
- s.prev_offset = s.curr_offset
- s.curr_offset += padding
- next_addr := curr_addr + uintptr(padding)
- header := (^Stack_Allocation_Header)(next_addr - size_of(Stack_Allocation_Header))
- header.padding = padding
- header.prev_offset = old_offset
- s.curr_offset += size
- s.peak_used = max(s.peak_used, s.curr_offset)
- result := byte_slice(rawptr(next_addr), size)
- // ensure_poisoned(result)
- // sanitizer.address_unpoison(result)
- return result, nil
- }
- /*
- Free memory back to the stack allocator.
- This procedure frees the memory region starting at `old_memory` to the stack.
- If the freeing is an out of order freeing, the `.Invalid_Pointer` error
- is returned.
- */
- stack_free :: proc(
- s: ^Stack,
- old_memory: rawptr,
- loc := #caller_location,
- ) -> (Allocator_Error) {
- if s.data == nil {
- panic("Free on an uninitialized Stack allocator.", loc)
- }
- if old_memory == nil {
- return nil
- }
- start := uintptr(raw_data(s.data))
- end := start + uintptr(len(s.data))
- curr_addr := uintptr(old_memory)
- if !(start <= curr_addr && curr_addr < end) {
- panic("Out of bounds memory address passed to Stack allocator. (free)", loc)
- }
- if curr_addr >= start+uintptr(s.curr_offset) {
- // NOTE(bill): Allow double frees
- return nil
- }
- header := (^Stack_Allocation_Header)(curr_addr - size_of(Stack_Allocation_Header))
- old_offset := int(curr_addr - uintptr(header.padding) - uintptr(raw_data(s.data)))
- if old_offset != s.prev_offset {
- return .Invalid_Pointer
- }
- s.prev_offset = header.prev_offset
- // sanitizer.address_poison(s.data[old_offset:s.curr_offset])
- s.curr_offset = old_offset
- return nil
- }
- /*
- Free all memory back to the stack allocator.
- */
- stack_free_all :: proc(s: ^Stack, loc := #caller_location) {
- s.prev_offset = 0
- s.curr_offset = 0
- // sanitizer.address_poison(s.data)
- }
- /*
- Resize an allocation owned by a stack allocator.
- This procedure resizes a memory region defined by its location `old_memory`
- and its size `old_size` to have a size `size` and alignment `alignment`. The
- newly allocated memory, if any, is zero-initialized.
- If `old_memory` is `nil`, this procedure acts just like `stack_alloc()`,
- allocating a memory region `size` bytes in size, aligned on a boundary specified
- by `alignment`.
- If `size` is 0, this procedure acts just like `stack_free()`, freeing the
- memory region located at an address specified by `old_memory`.
- This procedure returns the pointer to the resized memory region.
- */
- @(require_results)
- stack_resize :: proc(
- s: ^Stack,
- old_memory: rawptr,
- old_size: int,
- size: int,
- alignment := DEFAULT_ALIGNMENT,
- loc := #caller_location,
- ) -> (rawptr, Allocator_Error) {
- bytes, err := stack_resize_bytes(s, byte_slice(old_memory, old_size), size, alignment)
- return raw_data(bytes), err
- }
- /*
- Resize an allocation owned by a stack allocator.
- This procedure resizes a memory region specified by `old_data` to have a size
- `size` and alignment `alignment`. The newly allocated memory, if any, is
- zero-initialized.
- If `old_memory` is `nil`, this procedure acts just like `stack_alloc()`,
- allocating a memory region `size` bytes in size, aligned on a boundary specified
- by `alignment`.
- If `size` is 0, this procedure acts just like `stack_free()`, freeing the
- memory region located at an address specified by `old_memory`.
- This procedure returns the slice of the resized memory region.
- */
- @(require_results)
- stack_resize_bytes :: proc(
- s: ^Stack,
- old_data: []byte,
- size: int,
- alignment := DEFAULT_ALIGNMENT,
- loc := #caller_location,
- ) -> ([]byte, Allocator_Error) {
- bytes, err := stack_resize_bytes_non_zeroed(s, old_data, size, alignment, loc)
- if err == nil {
- if old_data == nil {
- zero_slice(bytes)
- } else if size > len(old_data) {
- zero_slice(bytes[len(old_data):])
- }
- }
- return bytes, err
- }
- /*
- Resize an allocation owned by a stack allocator, without zero-initialization.
- This procedure resizes a memory region defined by its location `old_memory`
- and its size `old_size` to have a size `size` and alignment `alignment`. The
- newly allocated memory, if any, is not explicitly zero-initialized.
- If `old_memory` is `nil`, this procedure acts just like `stack_alloc()`,
- allocating a memory region `size` bytes in size, aligned on a boundary specified
- by `alignment`.
- If `size` is 0, this procedure acts just like `stack_free()`, freeing the
- memory region located at an address specified by `old_memory`.
- This procedure returns the pointer to the resized memory region.
- */
- @(require_results)
- stack_resize_non_zeroed :: proc(
- s: ^Stack,
- old_memory: rawptr,
- old_size: int,
- size: int,
- alignment := DEFAULT_ALIGNMENT,
- loc := #caller_location,
- ) -> (rawptr, Allocator_Error) {
- bytes, err := stack_resize_bytes_non_zeroed(s, byte_slice(old_memory, old_size), size, alignment)
- return raw_data(bytes), err
- }
- /*
- Resize an allocation owned by a stack allocator, without zero-initialization.
- This procedure resizes a memory region specified by `old_data` to have a size
- `size` and alignment `alignment`. The newly allocated memory, if any, is not
- explicitly zero-initialized.
- If `old_memory` is `nil`, this procedure acts just like `stack_alloc()`,
- allocating a memory region `size` bytes in size, aligned on a boundary specified
- by `alignment`.
- If `size` is 0, this procedure acts just like `stack_free()`, freeing the
- memory region located at an address specified by `old_memory`.
- This procedure returns the slice of the resized memory region.
- */
- @(require_results)
- stack_resize_bytes_non_zeroed :: proc(
- s: ^Stack,
- old_data: []byte,
- size: int,
- alignment := DEFAULT_ALIGNMENT,
- loc := #caller_location,
- ) -> ([]byte, Allocator_Error) {
- old_memory := raw_data(old_data)
- old_size := len(old_data)
- if s.data == nil {
- panic("Resize on an uninitialized Stack allocator.", loc)
- }
- if old_memory == nil {
- return stack_alloc_bytes_non_zeroed(s, size, alignment, loc)
- }
- if size == 0 {
- return nil, stack_free(s, old_memory, loc)
- }
- start := uintptr(raw_data(s.data))
- end := start + uintptr(len(s.data))
- curr_addr := uintptr(old_memory)
- if !(start <= curr_addr && curr_addr < end) {
- panic("Out of bounds memory address passed to Stack allocator. (resize)")
- }
- if curr_addr >= start+uintptr(s.curr_offset) {
- // NOTE(bill): Allow double frees
- return nil, nil
- }
- if uintptr(old_memory) & uintptr(alignment-1) != 0 {
- // A different alignment has been requested and the current address
- // does not satisfy it.
- data, err := stack_alloc_bytes_non_zeroed(s, size, alignment, loc)
- if err == nil {
- runtime.copy(data, byte_slice(old_memory, old_size))
- // sanitizer.address_poison(old_memory)
- }
- return data, err
- }
- if old_size == size {
- return byte_slice(old_memory, size), nil
- }
- header := (^Stack_Allocation_Header)(curr_addr - size_of(Stack_Allocation_Header))
- old_offset := int(curr_addr - uintptr(header.padding) - uintptr(raw_data(s.data)))
- if old_offset != header.prev_offset {
- data, err := stack_alloc_bytes_non_zeroed(s, size, alignment, loc)
- if err == nil {
- runtime.copy(data, byte_slice(old_memory, old_size))
- // sanitizer.address_poison(old_memory)
- }
- return data, err
- }
- old_memory_size := uintptr(s.curr_offset) - (curr_addr - start)
- assert(old_memory_size == uintptr(old_size))
- diff := size - old_size
- s.curr_offset += diff // works for smaller sizes too
- if diff > 0 {
- zero(rawptr(curr_addr + uintptr(diff)), diff)
- } else {
- // sanitizer.address_poison(old_data[size:])
- }
- result := byte_slice(old_memory, size)
- // ensure_poisoned(result)
- // sanitizer.address_unpoison(result)
- return result, nil
- }
- stack_allocator_proc :: proc(
- allocator_data: rawptr,
- mode: Allocator_Mode,
- size: int,
- alignment: int,
- old_memory: rawptr,
- old_size: int,
- loc := #caller_location,
- ) -> ([]byte, Allocator_Error) {
- s := cast(^Stack)allocator_data
- if s.data == nil {
- return nil, .Invalid_Argument
- }
- switch mode {
- case .Alloc:
- return stack_alloc_bytes(s, size, alignment, loc)
- case .Alloc_Non_Zeroed:
- return stack_alloc_bytes_non_zeroed(s, size, alignment, loc)
- case .Free:
- return nil, stack_free(s, old_memory, loc)
- case .Free_All:
- stack_free_all(s, loc)
- case .Resize:
- return stack_resize_bytes(s, byte_slice(old_memory, old_size), size, alignment, loc)
- case .Resize_Non_Zeroed:
- return stack_resize_bytes_non_zeroed(s, byte_slice(old_memory, old_size), size, alignment, loc)
- case .Query_Features:
- set := (^Allocator_Mode_Set)(old_memory)
- if set != nil {
- set^ = {.Alloc, .Alloc_Non_Zeroed, .Free, .Free_All, .Resize, .Resize_Non_Zeroed, .Query_Features}
- }
- return nil, nil
- case .Query_Info:
- return nil, .Mode_Not_Implemented
- }
- return nil, nil
- }
- /*
- Allocation header of the small stack allocator.
- */
- Small_Stack_Allocation_Header :: struct {
- padding: u8,
- }
- /*
- Small stack allocator data.
- */
- Small_Stack :: struct {
- data: []byte,
- offset: int,
- peak_used: int,
- }
- /*
- Initialize a small stack allocator.
- This procedure initializes the small stack allocator with `data` as its backing
- buffer.
- */
- small_stack_init :: proc(s: ^Small_Stack, data: []byte) {
- s.data = data
- s.offset = 0
- s.peak_used = 0
- // sanitizer.address_poison(data)
- }
- /*
- Small stack allocator.
- The small stack allocator is just like a `Stack` allocator, with the only
- difference being an extremely small header size. Unlike the stack allocator,
- the small stack allows out-of order freeing of memory, with the stipulation
- that all allocations made after the freed allocation will become invalidated
- upon following allocations as they will begin to overwrite the memory formerly
- used by the freed allocation.
- The memory is allocated in the backing buffer linearly, from start to end.
- Each subsequent allocation will get the next adjacent memory region.
- The metadata is stored in the allocation headers, that are located before the
- start of each allocated memory region. Each header contains the amount of
- padding bytes between that header and end of the previous allocation.
- */
- @(require_results)
- small_stack_allocator :: proc(stack: ^Small_Stack) -> Allocator {
- return Allocator{
- procedure = small_stack_allocator_proc,
- data = stack,
- }
- }
- /*
- Allocate memory from a small stack allocator.
- This procedure allocates `size` bytes of memory aligned to a boundary specified
- by `alignment`. The allocated memory is zero-initialized. This procedure
- returns a pointer to the allocated memory region.
- */
- @(require_results)
- small_stack_alloc :: proc(
- s: ^Small_Stack,
- size: int,
- alignment := DEFAULT_ALIGNMENT,
- loc := #caller_location,
- ) -> (rawptr, Allocator_Error) {
- bytes, err := small_stack_alloc_bytes(s, size, alignment, loc)
- return raw_data(bytes), err
- }
- /*
- Allocate memory from a small stack allocator.
- This procedure allocates `size` bytes of memory aligned to a boundary specified
- by `alignment`. The allocated memory is zero-initialized. This procedure
- returns a slice of the allocated memory region.
- */
- @(require_results)
- small_stack_alloc_bytes :: proc(
- s: ^Small_Stack,
- size: int,
- alignment := DEFAULT_ALIGNMENT,
- loc := #caller_location,
- ) -> ([]byte, Allocator_Error) {
- bytes, err := small_stack_alloc_bytes_non_zeroed(s, size, alignment, loc)
- if bytes != nil {
- zero_slice(bytes)
- }
- return bytes, err
- }
- /*
- Allocate memory from a small stack allocator.
- This procedure allocates `size` bytes of memory aligned to a boundary specified
- by `alignment`. The allocated memory is not explicitly zero-initialized. This
- procedure returns a pointer to the allocated memory region.
- */
- @(require_results)
- small_stack_alloc_non_zeroed :: proc(
- s: ^Small_Stack,
- size: int,
- alignment := DEFAULT_ALIGNMENT,
- loc := #caller_location,
- ) -> (rawptr, Allocator_Error) {
- bytes, err := small_stack_alloc_bytes_non_zeroed(s, size, alignment, loc)
- return raw_data(bytes), err
- }
- /*
- Allocate memory from a small stack allocator.
- This procedure allocates `size` bytes of memory aligned to a boundary specified
- by `alignment`. The allocated memory is not explicitly zero-initialized. This
- procedure returns a slice of the allocated memory region.
- */
- @(require_results, no_sanitize_address)
- small_stack_alloc_bytes_non_zeroed :: proc(
- s: ^Small_Stack,
- size: int,
- alignment := DEFAULT_ALIGNMENT,
- loc := #caller_location,
- ) -> ([]byte, Allocator_Error) {
- if s.data == nil {
- panic("Allocation on an uninitialized Small Stack allocator.", loc)
- }
- alignment := alignment
- alignment = clamp(alignment, 1, 8*size_of(Stack_Allocation_Header{}.padding)/2)
- curr_addr := uintptr(raw_data(s.data)) + uintptr(s.offset)
- padding := calc_padding_with_header(curr_addr, uintptr(alignment), size_of(Small_Stack_Allocation_Header))
- if s.offset + padding + size > len(s.data) {
- return nil, .Out_Of_Memory
- }
- s.offset += padding
- next_addr := curr_addr + uintptr(padding)
- header := (^Small_Stack_Allocation_Header)(next_addr - size_of(Small_Stack_Allocation_Header))
- header.padding = cast(u8)padding
- // We must poison the header, no matter what its state is, because there
- // may have been an out-of-order free before this point.
- // sanitizer.address_poison(header)
- s.offset += size
- s.peak_used = max(s.peak_used, s.offset)
- result := byte_slice(rawptr(next_addr), size)
- // NOTE: We cannot ensure the poison state of this allocation, because this
- // allocator allows out-of-order frees with overwriting.
- // sanitizer.address_unpoison(result)
- return result, nil
- }
- /*
- Allocate memory from a small stack allocator.
- This procedure allocates `size` bytes of memory aligned to a boundary specified
- by `alignment`. The allocated memory is not explicitly zero-initialized. This
- procedure returns a slice of the allocated memory region.
- */
- small_stack_free :: proc(
- s: ^Small_Stack,
- old_memory: rawptr,
- loc := #caller_location,
- ) -> Allocator_Error {
- if s.data == nil {
- panic("Free on an uninitialized Small Stack allocator.", loc)
- }
- if old_memory == nil {
- return nil
- }
- start := uintptr(raw_data(s.data))
- end := start + uintptr(len(s.data))
- curr_addr := uintptr(old_memory)
- if !(start <= curr_addr && curr_addr < end) {
- panic("Out of bounds memory address passed to Small Stack allocator. (free)", loc)
- }
- if curr_addr >= start+uintptr(s.offset) {
- // NOTE(bill): Allow double frees
- return nil
- }
- header := (^Small_Stack_Allocation_Header)(curr_addr - size_of(Small_Stack_Allocation_Header))
- old_offset := int(curr_addr - uintptr(header.padding) - uintptr(raw_data(s.data)))
- // sanitizer.address_poison(s.data[old_offset:s.offset])
- s.offset = old_offset
- return nil
- }
- /*
- Free all memory back to the small stack allocator.
- */
- small_stack_free_all :: proc(s: ^Small_Stack) {
- s.offset = 0
- // sanitizer.address_poison(s.data)
- }
- /*
- Resize an allocation owned by a small stack allocator.
- This procedure resizes a memory region defined by its location `old_memory`
- and its size `old_size` to have a size `size` and alignment `alignment`. The
- newly allocated memory, if any, is zero-initialized.
- If `old_memory` is `nil`, this procedure acts just like `small_stack_alloc()`,
- allocating a memory region `size` bytes in size, aligned on a boundary specified
- by `alignment`.
- If `size` is 0, this procedure acts just like `small_stack_free()`, freeing the
- memory region located at an address specified by `old_memory`.
- This procedure returns the pointer to the resized memory region.
- */
- @(require_results)
- small_stack_resize :: proc(
- s: ^Small_Stack,
- old_memory: rawptr,
- old_size: int,
- size: int,
- alignment := DEFAULT_ALIGNMENT,
- loc := #caller_location,
- ) -> (rawptr, Allocator_Error) {
- bytes, err := small_stack_resize_bytes(s, byte_slice(old_memory, old_size), size, alignment, loc)
- return raw_data(bytes), err
- }
- /*
- Resize an allocation owned by a small stack allocator.
- This procedure resizes a memory region specified by `old_data` to have a size
- `size` and alignment `alignment`. The newly allocated memory, if any, is
- zero-initialized.
- If `old_memory` is `nil`, this procedure acts just like `small_stack_alloc()`,
- allocating a memory region `size` bytes in size, aligned on a boundary specified
- by `alignment`.
- If `size` is 0, this procedure acts just like `small_stack_free()`, freeing the
- memory region located at an address specified by `old_memory`.
- This procedure returns the slice of the resized memory region.
- */
- @(require_results)
- small_stack_resize_bytes :: proc(
- s: ^Small_Stack,
- old_data: []byte,
- size: int,
- alignment := DEFAULT_ALIGNMENT,
- loc := #caller_location,
- ) -> ([]byte, Allocator_Error) {
- bytes, err := small_stack_resize_bytes_non_zeroed(s, old_data, size, alignment, loc)
- if bytes != nil {
- if old_data == nil {
- zero_slice(bytes)
- } else if size > len(old_data) {
- zero_slice(bytes[len(old_data):])
- }
- }
- return bytes, err
- }
- /*
- Resize an allocation owned by a small stack allocator, without zero-initialization.
- This procedure resizes a memory region defined by its location `old_memory`
- and its size `old_size` to have a size `size` and alignment `alignment`. The
- newly allocated memory, if any, is not explicitly zero-initialized.
- If `old_memory` is `nil`, this procedure acts just like `small_stack_alloc()`,
- allocating a memory region `size` bytes in size, aligned on a boundary specified
- by `alignment`.
- If `size` is 0, this procedure acts just like `small_stack_free()`, freeing the
- memory region located at an address specified by `old_memory`.
- This procedure returns the pointer to the resized memory region.
- */
- @(require_results)
- small_stack_resize_non_zeroed :: proc(
- s: ^Small_Stack,
- old_memory: rawptr,
- old_size: int,
- size: int,
- alignment := DEFAULT_ALIGNMENT,
- loc := #caller_location,
- ) -> (rawptr, Allocator_Error) {
- bytes, err := small_stack_resize_bytes_non_zeroed(s, byte_slice(old_memory, old_size), size, alignment, loc)
- return raw_data(bytes), err
- }
- /*
- Resize an allocation owned by a small stack allocator, without zero-initialization.
- This procedure resizes a memory region specified by `old_data` to have a size
- `size` and alignment `alignment`. The newly allocated memory, if any, is not
- explicitly zero-initialized.
- If `old_memory` is `nil`, this procedure acts just like `small_stack_alloc()`,
- allocating a memory region `size` bytes in size, aligned on a boundary specified
- by `alignment`.
- If `size` is 0, this procedure acts just like `small_stack_free()`, freeing the
- memory region located at an address specified by `old_memory`.
- This procedure returns the slice of the resized memory region.
- */
- @(require_results)
- small_stack_resize_bytes_non_zeroed :: proc(
- s: ^Small_Stack,
- old_data: []byte,
- size: int,
- alignment := DEFAULT_ALIGNMENT,
- loc := #caller_location,
- ) -> ([]byte, Allocator_Error) {
- if s.data == nil {
- panic("Resize on an uninitialized Small Stack allocator.", loc)
- }
- old_memory := raw_data(old_data)
- old_size := len(old_data)
- alignment := alignment
- alignment = clamp(alignment, 1, 8*size_of(Stack_Allocation_Header{}.padding)/2)
- if old_memory == nil {
- return small_stack_alloc_bytes_non_zeroed(s, size, alignment, loc)
- }
- if size == 0 {
- return nil, small_stack_free(s, old_memory, loc)
- }
- start := uintptr(raw_data(s.data))
- end := start + uintptr(len(s.data))
- curr_addr := uintptr(old_memory)
- if !(start <= curr_addr && curr_addr < end) {
- panic("Out of bounds memory address passed to Small Stack allocator. (resize)", loc)
- }
- if curr_addr >= start+uintptr(s.offset) {
- // NOTE(bill): Treat as a double free
- return nil, nil
- }
- if uintptr(old_memory) & uintptr(alignment-1) != 0 {
- // A different alignment has been requested and the current address
- // does not satisfy it.
- data, err := small_stack_alloc_bytes_non_zeroed(s, size, alignment, loc)
- if err == nil {
- runtime.copy(data, byte_slice(old_memory, old_size))
- // sanitizer.address_poison(old_memory)
- }
- return data, err
- }
- if old_size == size {
- result := byte_slice(old_memory, size)
- // sanitizer.address_unpoison(result)
- return result, nil
- }
- data, err := small_stack_alloc_bytes_non_zeroed(s, size, alignment, loc)
- if err == nil {
- runtime.copy(data, byte_slice(old_memory, old_size))
- }
- return data, err
- }
- small_stack_allocator_proc :: proc(
- allocator_data: rawptr,
- mode: Allocator_Mode,
- size, alignment: int,
- old_memory: rawptr,
- old_size: int,
- loc := #caller_location,
- ) -> ([]byte, Allocator_Error) {
- s := cast(^Small_Stack)allocator_data
- if s.data == nil {
- return nil, .Invalid_Argument
- }
- switch mode {
- case .Alloc:
- return small_stack_alloc_bytes(s, size, alignment, loc)
- case .Alloc_Non_Zeroed:
- return small_stack_alloc_bytes_non_zeroed(s, size, alignment, loc)
- case .Free:
- return nil, small_stack_free(s, old_memory, loc)
- case .Free_All:
- small_stack_free_all(s)
- case .Resize:
- return small_stack_resize_bytes(s, byte_slice(old_memory, old_size), size, alignment, loc)
- case .Resize_Non_Zeroed:
- return small_stack_resize_bytes_non_zeroed(s, byte_slice(old_memory, old_size), size, alignment, loc)
- case .Query_Features:
- set := (^Allocator_Mode_Set)(old_memory)
- if set != nil {
- set^ = {.Alloc, .Alloc_Non_Zeroed, .Free, .Free_All, .Resize, .Resize_Non_Zeroed, .Query_Features}
- }
- return nil, nil
- case .Query_Info:
- return nil, .Mode_Not_Implemented
- }
- return nil, nil
- }
- /* Preserved for compatibility */
- Dynamic_Pool :: Dynamic_Arena
- DYNAMIC_POOL_BLOCK_SIZE_DEFAULT :: DYNAMIC_ARENA_BLOCK_SIZE_DEFAULT
- DYNAMIC_POOL_OUT_OF_BAND_SIZE_DEFAULT :: DYNAMIC_ARENA_OUT_OF_BAND_SIZE_DEFAULT
- dynamic_pool_allocator_proc :: dynamic_arena_allocator_proc
- dynamic_pool_free_all :: dynamic_arena_free_all
- dynamic_pool_reset :: dynamic_arena_reset
- dynamic_pool_alloc_bytes :: dynamic_arena_alloc_bytes
- dynamic_pool_alloc :: dynamic_arena_alloc
- dynamic_pool_init :: dynamic_arena_init
- dynamic_pool_allocator :: dynamic_arena_allocator
- dynamic_pool_destroy :: dynamic_arena_destroy
- /*
- Default block size for dynamic arena.
- */
- DYNAMIC_ARENA_BLOCK_SIZE_DEFAULT :: 65536
- /*
- Default out-band size of the dynamic arena.
- */
- DYNAMIC_ARENA_OUT_OF_BAND_SIZE_DEFAULT :: 6554
- /*
- Dynamic arena allocator data.
- */
- Dynamic_Arena :: struct {
- block_size: int,
- out_band_size: int,
- alignment: int,
- unused_blocks: [dynamic]rawptr,
- used_blocks: [dynamic]rawptr,
- out_band_allocations: [dynamic]rawptr,
- current_block: rawptr,
- current_pos: rawptr,
- bytes_left: int,
- block_allocator: Allocator,
- }
- /*
- Initialize a dynamic arena.
- This procedure initializes a dynamic arena. The specified `block_allocator`
- will be used to allocate arena blocks, and `array_allocator` to allocate
- arrays of blocks and out-band blocks. The blocks have the default size of
- `block_size` and out-band threshold will be `out_band_size`. All allocations
- will be aligned to a boundary specified by `alignment`.
- */
- dynamic_arena_init :: proc(
- pool: ^Dynamic_Arena,
- block_allocator := context.allocator,
- array_allocator := context.allocator,
- block_size := DYNAMIC_ARENA_BLOCK_SIZE_DEFAULT,
- out_band_size := DYNAMIC_ARENA_OUT_OF_BAND_SIZE_DEFAULT,
- alignment := DEFAULT_ALIGNMENT,
- ) {
- pool.block_size = block_size
- pool.out_band_size = out_band_size
- pool.alignment = alignment
- pool.block_allocator = block_allocator
- pool.out_band_allocations.allocator = array_allocator
- pool.unused_blocks.allocator = array_allocator
- pool.used_blocks.allocator = array_allocator
- }
- /*
- Dynamic arena allocator.
- The dynamic arena allocator uses blocks of a specific size, allocated on-demand
- using the block allocator. This allocator acts similarly to `Arena`. All
- allocations in a block happen contiguously, from start to end. If an allocation
- does not fit into the remaining space of the block and its size is smaller
- than the specified out-band size, a new block is allocated using the
- `block_allocator` and the allocation is performed from a newly-allocated block.
- If an allocation is larger than the specified out-band size, a new block
- is allocated such that the allocation fits into this new block. This is referred
- to as an *out-band allocation*. The out-band blocks are kept separately from
- normal blocks.
- Just like `Arena`, the dynamic arena does not support freeing of individual
- objects.
- */
- @(require_results)
- dynamic_arena_allocator :: proc(a: ^Dynamic_Arena) -> Allocator {
- return Allocator{
- procedure = dynamic_arena_allocator_proc,
- data = a,
- }
- }
- /*
- Destroy a dynamic arena.
- This procedure frees all allocations made on a dynamic arena, including the
- unused blocks, as well as the arrays for storing blocks.
- */
- dynamic_arena_destroy :: proc(a: ^Dynamic_Arena) {
- dynamic_arena_free_all(a)
- delete(a.unused_blocks)
- delete(a.used_blocks)
- delete(a.out_band_allocations)
- zero(a, size_of(a^))
- }
- @(private="file")
- _dynamic_arena_cycle_new_block :: proc(a: ^Dynamic_Arena, loc := #caller_location) -> (err: Allocator_Error) {
- if a.block_allocator.procedure == nil {
- panic("You must call `dynamic_arena_init` on a Dynamic Arena before using it.", loc)
- }
- if a.current_block != nil {
- append(&a.used_blocks, a.current_block, loc=loc)
- }
- new_block: rawptr
- if len(a.unused_blocks) > 0 {
- new_block = pop(&a.unused_blocks)
- } else {
- data: []byte
- data, err = a.block_allocator.procedure(
- a.block_allocator.data,
- Allocator_Mode.Alloc,
- a.block_size,
- a.alignment,
- nil,
- 0,
- )
- // sanitizer.address_poison(data)
- new_block = raw_data(data)
- }
- a.bytes_left = a.block_size
- a.current_pos = new_block
- a.current_block = new_block
- return
- }
- /*
- Allocate memory from a dynamic arena.
- This procedure allocates `size` bytes of memory aligned on a boundary specified
- by `alignment` from a dynamic arena `a`. The allocated memory is
- zero-initialized. This procedure returns a pointer to the newly allocated memory
- region.
- */
- @(require_results)
- dynamic_arena_alloc :: proc(a: ^Dynamic_Arena, size: int, loc := #caller_location) -> (rawptr, Allocator_Error) {
- data, err := dynamic_arena_alloc_bytes(a, size, loc)
- return raw_data(data), err
- }
- /*
- Allocate memory from a dynamic arena.
- This procedure allocates `size` bytes of memory aligned on a boundary specified
- by `alignment` from a dynamic arena `a`. The allocated memory is
- zero-initialized. This procedure returns a slice of the newly allocated memory
- region.
- */
- @(require_results)
- dynamic_arena_alloc_bytes :: proc(a: ^Dynamic_Arena, size: int, loc := #caller_location) -> ([]byte, Allocator_Error) {
- bytes, err := dynamic_arena_alloc_bytes_non_zeroed(a, size, loc)
- if bytes != nil {
- zero_slice(bytes)
- }
- return bytes, err
- }
- /*
- Allocate non-initialized memory from a dynamic arena.
- This procedure allocates `size` bytes of memory aligned on a boundary specified
- by `alignment` from a dynamic arena `a`. The allocated memory is not explicitly
- zero-initialized. This procedure returns a pointer to the newly allocated
- memory region.
- */
- @(require_results)
- dynamic_arena_alloc_non_zeroed :: proc(a: ^Dynamic_Arena, size: int, loc := #caller_location) -> (rawptr, Allocator_Error) {
- data, err := dynamic_arena_alloc_bytes_non_zeroed(a, size, loc)
- return raw_data(data), err
- }
- /*
- Allocate non-initialized memory from a dynamic arena.
- This procedure allocates `size` bytes of memory aligned on a boundary specified
- by `alignment` from a dynamic arena `a`. The allocated memory is not explicitly
- zero-initialized. This procedure returns a slice of the newly allocated
- memory region.
- */
- @(require_results)
- dynamic_arena_alloc_bytes_non_zeroed :: proc(a: ^Dynamic_Arena, size: int, loc := #caller_location) -> ([]byte, Allocator_Error) {
- if size >= a.out_band_size {
- assert(a.out_band_allocations.allocator.procedure != nil, "Backing array allocator must be initialized", loc=loc)
- memory, err := alloc_bytes_non_zeroed(size, a.alignment, a.out_band_allocations.allocator, loc)
- if memory != nil {
- append(&a.out_band_allocations, raw_data(memory), loc = loc)
- }
- return memory, err
- }
- n := align_formula(size, a.alignment)
- if n > a.block_size {
- return nil, .Invalid_Argument
- }
- if a.bytes_left < n {
- err := _dynamic_arena_cycle_new_block(a, loc)
- if err != nil {
- return nil, err
- }
- if a.current_block == nil {
- return nil, .Out_Of_Memory
- }
- }
- memory := a.current_pos
- a.current_pos = ([^]byte)(a.current_pos)[n:]
- a.bytes_left -= n
- result := ([^]byte)(memory)[:size]
- // ensure_poisoned(result)
- // sanitizer.address_unpoison(result)
- return result, nil
- }
- /*
- Reset a dynamic arena allocator.
- This procedure frees all the allocations owned by the dynamic arena, excluding
- the unused blocks.
- */
- dynamic_arena_reset :: proc(a: ^Dynamic_Arena, loc := #caller_location) {
- if a.current_block != nil {
- // sanitizer.address_poison(a.current_block, a.block_size)
- append(&a.unused_blocks, a.current_block, loc=loc)
- a.current_block = nil
- }
- for block in a.used_blocks {
- // sanitizer.address_poison(block, a.block_size)
- append(&a.unused_blocks, block, loc=loc)
- }
- clear(&a.used_blocks)
- for allocation in a.out_band_allocations {
- free(allocation, a.out_band_allocations.allocator, loc=loc)
- }
- clear(&a.out_band_allocations)
- a.bytes_left = 0 // Make new allocations call `_dynamic_arena_cycle_new_block` again.
- }
- /*
- Free all memory back to the dynamic arena allocator.
- This procedure frees all the allocations owned by the dynamic arena, including
- the unused blocks.
- */
- dynamic_arena_free_all :: proc(a: ^Dynamic_Arena, loc := #caller_location) {
- dynamic_arena_reset(a)
- for block in a.unused_blocks {
- // sanitizer.address_unpoison(block, a.block_size)
- free(block, a.block_allocator, loc)
- }
- clear(&a.unused_blocks)
- }
- /*
- Resize an allocation owned by a dynamic arena allocator.
- This procedure resizes a memory region defined by its location `old_memory`
- and its size `old_size` to have a size `size` and alignment `alignment`. The
- newly allocated memory, if any, is zero-initialized.
- If `old_memory` is `nil`, this procedure acts just like `dynamic_arena_alloc()`,
- allocating a memory region `size` bytes in size, aligned on a boundary specified
- by `alignment`.
- This procedure returns the pointer to the resized memory region.
- */
- @(require_results)
- dynamic_arena_resize :: proc(
- a: ^Dynamic_Arena,
- old_memory: rawptr,
- old_size: int,
- size: int,
- loc := #caller_location,
- ) -> (rawptr, Allocator_Error) {
- bytes, err := dynamic_arena_resize_bytes(a, byte_slice(old_memory, old_size), size, loc)
- return raw_data(bytes), err
- }
- /*
- Resize an allocation owned by a dynamic arena allocator.
- This procedure resizes a memory region specified by `old_data` to have a size
- `size` and alignment `alignment`. The newly allocated memory, if any, is
- zero-initialized.
- If `old_memory` is `nil`, this procedure acts just like `dynamic_arena_alloc()`,
- allocating a memory region `size` bytes in size, aligned on a boundary specified
- by `alignment`.
- This procedure returns the slice of the resized memory region.
- */
- @(require_results)
- dynamic_arena_resize_bytes :: proc(
- a: ^Dynamic_Arena,
- old_data: []byte,
- size: int,
- loc := #caller_location,
- ) -> ([]byte, Allocator_Error) {
- if size == 0 {
- // NOTE: This allocator has no Free mode.
- return nil, nil
- }
- bytes, err := dynamic_arena_resize_bytes_non_zeroed(a, old_data, size, loc)
- if bytes != nil {
- if old_data == nil {
- zero_slice(bytes)
- } else if size > len(old_data) {
- zero_slice(bytes[len(old_data):])
- }
- }
- return bytes, err
- }
- /*
- Resize an allocation owned by a dynamic arena allocator, without zero-initialization.
- This procedure resizes a memory region defined by its location `old_memory`
- and its size `old_size` to have a size `size` and alignment `alignment`. The
- newly allocated memory, if any, is not explicitly zero-initialized.
- If `old_memory` is `nil`, this procedure acts just like `dynamic_arena_alloc()`,
- allocating a memory region `size` bytes in size, aligned on a boundary specified
- by `alignment`.
- This procedure returns the pointer to the resized memory region.
- */
- @(require_results)
- dynamic_arena_resize_non_zeroed :: proc(
- a: ^Dynamic_Arena,
- old_memory: rawptr,
- old_size: int,
- size: int,
- loc := #caller_location,
- ) -> (rawptr, Allocator_Error) {
- bytes, err := dynamic_arena_resize_bytes_non_zeroed(a, byte_slice(old_memory, old_size), size, loc)
- return raw_data(bytes), err
- }
- /*
- Resize an allocation owned by a dynamic arena allocator, without zero-initialization.
- This procedure resizes a memory region specified by `old_data` to have a size
- `size` and alignment `alignment`. The newly allocated memory, if any, is not
- explicitly zero-initialized.
- If `old_memory` is `nil`, this procedure acts just like `dynamic_arena_alloc()`,
- allocating a memory region `size` bytes in size, aligned on a boundary specified
- by `alignment`.
- This procedure returns the slice of the resized memory region.
- */
- @(require_results)
- dynamic_arena_resize_bytes_non_zeroed :: proc(
- a: ^Dynamic_Arena,
- old_data: []byte,
- size: int,
- loc := #caller_location,
- ) -> ([]byte, Allocator_Error) {
- if size == 0 {
- // NOTE: This allocator has no Free mode.
- return nil, nil
- }
- old_memory := raw_data(old_data)
- old_size := len(old_data)
- if old_size >= size {
- // sanitizer.address_poison(old_data[size:])
- return byte_slice(old_memory, size), nil
- }
- // No information is kept about allocations in this allocator, thus we
- // cannot truly resize anything and must reallocate.
- data, err := dynamic_arena_alloc_bytes_non_zeroed(a, size, loc)
- if err == nil {
- runtime.copy(data, byte_slice(old_memory, old_size))
- }
- return data, err
- }
- dynamic_arena_allocator_proc :: proc(
- allocator_data: rawptr,
- mode: Allocator_Mode,
- size: int,
- alignment: int,
- old_memory: rawptr,
- old_size: int,
- loc := #caller_location,
- ) -> ([]byte, Allocator_Error) {
- arena := (^Dynamic_Arena)(allocator_data)
- switch mode {
- case .Alloc:
- return dynamic_arena_alloc_bytes(arena, size, loc)
- case .Alloc_Non_Zeroed:
- return dynamic_arena_alloc_bytes_non_zeroed(arena, size, loc)
- case .Free:
- return nil, .Mode_Not_Implemented
- case .Free_All:
- dynamic_arena_free_all(arena, loc)
- case .Resize:
- return dynamic_arena_resize_bytes(arena, byte_slice(old_memory, old_size), size, loc)
- case .Resize_Non_Zeroed:
- return dynamic_arena_resize_bytes_non_zeroed(arena, byte_slice(old_memory, old_size), size, loc)
- case .Query_Features:
- set := (^Allocator_Mode_Set)(old_memory)
- if set != nil {
- set^ = {.Alloc, .Alloc_Non_Zeroed, .Free_All, .Resize, .Resize_Non_Zeroed, .Query_Features, .Query_Info}
- }
- return nil, nil
- case .Query_Info:
- info := (^Allocator_Query_Info)(old_memory)
- if info != nil && info.pointer != nil {
- info.size = arena.block_size
- info.alignment = arena.alignment
- return byte_slice(info, size_of(info^)), nil
- }
- return nil, nil
- }
- return nil, nil
- }
- /*
- Header of the buddy block.
- */
- Buddy_Block :: struct #align(align_of(uint)) {
- size: uint,
- is_free: bool,
- }
- /*
- Obtain the next buddy block.
- */
- @(require_results, no_sanitize_address)
- buddy_block_next :: proc(block: ^Buddy_Block) -> ^Buddy_Block {
- return (^Buddy_Block)(([^]byte)(block)[block.size:])
- }
- /*
- Split the block into two, by truncating the given block to a given size.
- */
- @(require_results, no_sanitize_address)
- buddy_block_split :: proc(block: ^Buddy_Block, size: uint) -> ^Buddy_Block {
- block := block
- if block != nil && size != 0 {
- // Recursive Split
- for size < block.size {
- sz := block.size >> 1
- block.size = sz
- block = buddy_block_next(block)
- block.size = sz
- block.is_free = true
- }
- if size <= block.size {
- return block
- }
- }
- // Block cannot fit the requested allocation size
- return nil
- }
- /*
- Coalesce contiguous blocks in a range of blocks into one.
- */
- @(no_sanitize_address)
- buddy_block_coalescence :: proc(head, tail: ^Buddy_Block) {
- for {
- // Keep looping until there are no more buddies to coalesce
- block := head
- buddy := buddy_block_next(block)
- no_coalescence := true
- for block < tail && buddy < tail { // make sure the buddies are within the range
- if block.is_free && buddy.is_free && block.size == buddy.size {
- // Coalesce buddies into one
- block.size <<= 1
- block = buddy_block_next(block)
- if block < tail {
- buddy = buddy_block_next(block)
- no_coalescence = false
- }
- } else if block.size < buddy.size {
- // The buddy block is split into smaller blocks
- block = buddy
- buddy = buddy_block_next(buddy)
- } else {
- block = buddy_block_next(buddy)
- if block < tail {
- // Leave the buddy block for the next iteration
- buddy = buddy_block_next(block)
- }
- }
- }
- if no_coalescence {
- return
- }
- }
- }
- /*
- Find the best block for storing a given size in a range of blocks.
- */
- @(require_results, no_sanitize_address)
- buddy_block_find_best :: proc(head, tail: ^Buddy_Block, size: uint) -> ^Buddy_Block {
- assert(size != 0)
- best_block: ^Buddy_Block
- block := head // left
- buddy := buddy_block_next(block) // right
- // The entire memory section between head and tail is free,
- // just call 'buddy_block_split' to get the allocation
- if buddy == tail && block.is_free {
- return buddy_block_split(block, size)
- }
- // Find the block which is the 'best_block' to requested allocation sized
- for block < tail && buddy < tail { // make sure the buddies are within the range
- // If both buddies are free, coalesce them together
- // NOTE: this is an optimization to reduce fragmentation
- // this could be completely ignored
- if block.is_free && buddy.is_free && block.size == buddy.size {
- block.size <<= 1
- if size <= block.size && (best_block == nil || block.size <= best_block.size) {
- best_block = block
- }
- block = buddy_block_next(buddy)
- if block < tail {
- // Delay the buddy block for the next iteration
- buddy = buddy_block_next(block)
- }
- continue
- }
- if block.is_free && size <= block.size &&
- (best_block == nil || block.size <= best_block.size) {
- best_block = block
- }
- if buddy.is_free && size <= buddy.size &&
- (best_block == nil || buddy.size < best_block.size) {
- // If each buddy are the same size, then it makes more sense
- // to pick the buddy as it "bounces around" less
- best_block = buddy
- }
- if block.size <= buddy.size {
- block = buddy_block_next(buddy)
- if (block < tail) {
- // Delay the buddy block for the next iteration
- buddy = buddy_block_next(block)
- }
- } else {
- // Buddy was split into smaller blocks
- block = buddy
- buddy = buddy_block_next(buddy)
- }
- }
- if best_block != nil {
- // This will handle the case if the 'best_block' is also the perfect fit
- return buddy_block_split(best_block, size)
- }
- // Maybe out of memory
- return nil
- }
- /*
- The buddy allocator data.
- */
- Buddy_Allocator :: struct {
- head: ^Buddy_Block,
- tail: ^Buddy_Block `fmt:"-"`,
- alignment: uint,
- }
- /*
- Buddy allocator.
- The buddy allocator is a type of allocator that splits the backing buffer into
- multiple regions called buddy blocks. Initially, the allocator only has one
- block with the size of the backing buffer. Upon each allocation, the allocator
- finds the smallest block that can fit the size of requested memory region, and
- splits the block according to the allocation size. If no block can be found,
- the contiguous free blocks are coalesced and the search is performed again.
- */
- @(require_results)
- buddy_allocator :: proc(b: ^Buddy_Allocator) -> Allocator {
- return Allocator{
- procedure = buddy_allocator_proc,
- data = b,
- }
- }
- /*
- Initialize a buddy allocator.
- This procedure initializes the buddy allocator `b` with a backing buffer `data`
- and block alignment specified by `alignment`.
- `alignment` may be any power of two, but the backing buffer must be aligned to
- at least `size_of(Buddy_Block)`.
- */
- buddy_allocator_init :: proc(b: ^Buddy_Allocator, data: []byte, alignment: uint, loc := #caller_location) {
- assert(data != nil)
- assert(is_power_of_two(uintptr(len(data))), "Size of the backing buffer must be power of two", loc)
- assert(is_power_of_two(uintptr(alignment)), "Alignment must be a power of two", loc)
- alignment := alignment
- if alignment < size_of(Buddy_Block) {
- alignment = size_of(Buddy_Block)
- }
- ptr := raw_data(data)
- assert(uintptr(ptr) % uintptr(alignment) == 0, "The data is not aligned to the minimum alignment, which must be at least `size_of(Buddy_Block)`.", loc)
- b.head = (^Buddy_Block)(ptr)
- b.head.size = len(data)
- b.head.is_free = true
- b.tail = buddy_block_next(b.head)
- b.alignment = alignment
- assert(uint(len(data)) >= 2 * buddy_block_size_required(b, 1), "The size of the backing buffer must be large enough to hold at least two 1-byte allocations given the alignment requirements, otherwise it cannot split.", loc)
- // sanitizer.address_poison(data)
- }
- /*
- Get required block size to fit in the allocation as well as the alignment padding.
- */
- @(require_results)
- buddy_block_size_required :: proc(b: ^Buddy_Allocator, size: uint) -> uint {
- assert(size > 0)
- // NOTE: `size_of(Buddy_Block)` will be accounted for in `b.alignment`.
- // This calculation is also previously guarded against being given a `size`
- // 0 by `buddy_allocator_alloc_bytes_non_zeroed` checking for that.
- actual_size := b.alignment + size
- if intrinsics.count_ones(actual_size) != 1 {
- // We're not a power of two. Let's fix that.
- actual_size = 1 << (size_of(uint) * 8 - intrinsics.count_leading_zeros(actual_size))
- }
- return actual_size
- }
- /*
- Allocate memory from a buddy allocator.
- This procedure allocates `size` bytes of memory. The allocation's alignment is
- fixed to the `alignment` specified at initialization. The allocated memory
- region is zero-initialized. This procedure returns a pointer to the allocated
- memory region.
- */
- @(require_results, no_sanitize_address)
- buddy_allocator_alloc :: proc(b: ^Buddy_Allocator, size: uint) -> (rawptr, Allocator_Error) {
- bytes, err := buddy_allocator_alloc_bytes(b, size)
- return raw_data(bytes), err
- }
- /*
- Allocate memory from a buddy allocator.
- This procedure allocates `size` bytes of memory. The allocation's alignment is
- fixed to the `alignment` specified at initialization. The allocated memory
- region is zero-initialized. This procedure returns a slice of the allocated
- memory region.
- */
- @(require_results, no_sanitize_address)
- buddy_allocator_alloc_bytes :: proc(b: ^Buddy_Allocator, size: uint) -> ([]byte, Allocator_Error) {
- bytes, err := buddy_allocator_alloc_bytes_non_zeroed(b, size)
- if bytes != nil {
- zero_slice(bytes)
- }
- return bytes, err
- }
- /*
- Allocate non-initialized memory from a buddy allocator.
- This procedure allocates `size` bytes of memory. The allocation's alignment is
- fixed to the `alignment` specified at initialization. The allocated memory
- region is not explicitly zero-initialized. This procedure returns a pointer to
- the allocated memory region.
- */
- @(require_results, no_sanitize_address)
- buddy_allocator_alloc_non_zeroed :: proc(b: ^Buddy_Allocator, size: uint) -> (rawptr, Allocator_Error) {
- bytes, err := buddy_allocator_alloc_bytes_non_zeroed(b, size)
- return raw_data(bytes), err
- }
- /*
- Allocate non-initialized memory from a buddy allocator.
- This procedure allocates `size` bytes of memory. The allocation's alignment is
- fixed to the `alignment` specified at initialization. The allocated memory
- region is not explicitly zero-initialized. This procedure returns a slice of
- the allocated memory region.
- */
- @(require_results, no_sanitize_address)
- buddy_allocator_alloc_bytes_non_zeroed :: proc(b: ^Buddy_Allocator, size: uint) -> ([]byte, Allocator_Error) {
- if size != 0 {
- actual_size := buddy_block_size_required(b, size)
- found := buddy_block_find_best(b.head, b.tail, actual_size)
- if found == nil {
- // Try to coalesce all the free buddy blocks and then search again
- buddy_block_coalescence(b.head, b.tail)
- found = buddy_block_find_best(b.head, b.tail, actual_size)
- }
- if found == nil {
- return nil, .Out_Of_Memory
- }
- found.is_free = false
- data := ([^]byte)(found)[b.alignment:][:size]
- assert(cast(uintptr)raw_data(data)+cast(uintptr)(size-1) < cast(uintptr)buddy_block_next(found), "Buddy_Allocator has made an allocation which overlaps a block header.")
- // ensure_poisoned(data)
- // sanitizer.address_unpoison(data)
- return data, nil
- }
- return nil, nil
- }
- /*
- Free memory back to the buddy allocator.
- This procedure frees the memory region allocated at pointer `ptr`.
- If `ptr` is not the latest allocation and is not a leaked allocation, this
- operation is a no-op.
- */
- @(no_sanitize_address)
- buddy_allocator_free :: proc(b: ^Buddy_Allocator, ptr: rawptr) -> Allocator_Error {
- if ptr != nil {
- if !(b.head <= ptr && ptr <= b.tail) {
- return .Invalid_Pointer
- }
- block := (^Buddy_Block)(([^]byte)(ptr)[-b.alignment:])
- // sanitizer.address_poison(ptr, block.size)
- block.is_free = true
- buddy_block_coalescence(b.head, b.tail)
- }
- return nil
- }
- /*
- Free all memory back to the buddy allocator.
- */
- @(no_sanitize_address)
- buddy_allocator_free_all :: proc(b: ^Buddy_Allocator) {
- alignment := b.alignment
- head := ([^]byte)(b.head)
- tail := ([^]byte)(b.tail)
- data := head[:ptr_sub(tail, head)]
- buddy_allocator_init(b, data, alignment)
- }
- @(no_sanitize_address)
- buddy_allocator_proc :: proc(
- allocator_data: rawptr,
- mode: Allocator_Mode,
- size, alignment: int,
- old_memory: rawptr,
- old_size: int,
- loc := #caller_location,
- ) -> ([]byte, Allocator_Error) {
- b := (^Buddy_Allocator)(allocator_data)
- switch mode {
- case .Alloc:
- return buddy_allocator_alloc_bytes(b, uint(size))
- case .Alloc_Non_Zeroed:
- return buddy_allocator_alloc_bytes_non_zeroed(b, uint(size))
- case .Resize:
- return default_resize_bytes_align(byte_slice(old_memory, old_size), size, alignment, buddy_allocator(b), loc)
- case .Resize_Non_Zeroed:
- return default_resize_bytes_align_non_zeroed(byte_slice(old_memory, old_size), size, alignment, buddy_allocator(b), loc)
- case .Free:
- return nil, buddy_allocator_free(b, old_memory)
- case .Free_All:
- buddy_allocator_free_all(b)
- case .Query_Features:
- set := (^Allocator_Mode_Set)(old_memory)
- if set != nil {
- set^ = {.Query_Features, .Alloc, .Alloc_Non_Zeroed, .Resize, .Resize_Non_Zeroed, .Free, .Free_All, .Query_Info}
- }
- return nil, nil
- case .Query_Info:
- info := (^Allocator_Query_Info)(old_memory)
- if info != nil && info.pointer != nil {
- ptr := info.pointer
- if !(b.head <= ptr && ptr <= b.tail) {
- return nil, .Invalid_Pointer
- }
- block := (^Buddy_Block)(([^]byte)(ptr)[-b.alignment:])
- info.size = int(block.size)
- info.alignment = int(b.alignment)
- return byte_slice(info, size_of(info^)), nil
- }
- return nil, nil
- }
- return nil, nil
- }
- // An allocator that keeps track of allocation sizes and passes it along to resizes.
- // This is useful if you are using a library that needs an equivalent of `realloc` but want to use
- // the Odin allocator interface.
- //
- // You want to wrap your allocator into this one if you are trying to use any allocator that relies
- // on the old size to work.
- //
- // The overhead of this allocator is an extra max(alignment, size_of(Header)) bytes allocated for each allocation, these bytes are
- // used to store the size and alignment.
- Compat_Allocator :: struct {
- parent: Allocator,
- }
- compat_allocator_init :: proc(rra: ^Compat_Allocator, allocator := context.allocator) {
- rra.parent = allocator
- }
- @(require_results)
- compat_allocator :: proc(rra: ^Compat_Allocator) -> Allocator {
- return Allocator{
- data = rra,
- procedure = compat_allocator_proc,
- }
- }
- compat_allocator_proc :: proc(allocator_data: rawptr, mode: Allocator_Mode,
- size, alignment: int,
- old_memory: rawptr, old_size: int,
- location := #caller_location) -> (data: []byte, err: Allocator_Error) {
- Header :: struct {
- size: int,
- alignment: int,
- }
- @(no_sanitize_address)
- get_unpoisoned_header :: #force_inline proc(ptr: rawptr) -> Header {
- header := ([^]Header)(ptr)[-1]
- // a := max(header.alignment, size_of(Header))
- // sanitizer.address_unpoison(rawptr(uintptr(ptr)-uintptr(a)), a)
- return header
- }
- rra := (^Compat_Allocator)(allocator_data)
- switch mode {
- case .Alloc, .Alloc_Non_Zeroed:
- a := max(alignment, size_of(Header))
- req_size := size + a
- assert(req_size >= 0, "overflow")
- allocation := rra.parent.procedure(rra.parent.data, mode, req_size, alignment, old_memory, old_size, location) or_return
- #no_bounds_check data = allocation[a:]
- ([^]Header)(raw_data(data))[-1] = {
- size = size,
- alignment = alignment,
- }
- // sanitizer.address_poison(raw_data(allocation), a)
- return
- case .Free:
- header := get_unpoisoned_header(old_memory)
- a := max(header.alignment, size_of(Header))
- orig_ptr := rawptr(uintptr(old_memory)-uintptr(a))
- orig_size := header.size + a
- return rra.parent.procedure(rra.parent.data, mode, orig_size, header.alignment, orig_ptr, orig_size, location)
- case .Resize, .Resize_Non_Zeroed:
- header := get_unpoisoned_header(old_memory)
- orig_a := max(header.alignment, size_of(Header))
- orig_ptr := rawptr(uintptr(old_memory)-uintptr(orig_a))
- orig_size := header.size + orig_a
- new_alignment := max(header.alignment, alignment)
- a := max(new_alignment, size_of(header))
- req_size := size + a
- assert(size >= 0, "overflow")
- allocation := rra.parent.procedure(rra.parent.data, mode, req_size, new_alignment, orig_ptr, orig_size, location) or_return
- #no_bounds_check data = allocation[a:]
- ([^]Header)(raw_data(data))[-1] = {
- size = size,
- alignment = new_alignment,
- }
- // sanitizer.address_poison(raw_data(allocation), a)
- return
- case .Free_All:
- return rra.parent.procedure(rra.parent.data, mode, size, alignment, old_memory, old_size, location)
- case .Query_Info:
- info := (^Allocator_Query_Info)(old_memory)
- if info != nil && info.pointer != nil {
- header := get_unpoisoned_header(info.pointer)
- info.size = header.size
- info.alignment = header.alignment
- }
- return
- case .Query_Features:
- data, err = rra.parent.procedure(rra.parent.data, mode, size, alignment, old_memory, old_size, location)
- if err != nil {
- set := (^Allocator_Mode_Set)(old_memory)
- set^ += {.Query_Info}
- }
- return
- case: unreachable()
- }
- }
|