vk_mem_alloc.h 696 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847184818491850185118521853185418551856185718581859186018611862186318641865186618671868186918701871187218731874187518761877187818791880188118821883188418851886188718881889189018911892189318941895189618971898189919001901190219031904190519061907190819091910191119121913191419151916191719181919192019211922192319241925192619271928192919301931193219331934193519361937193819391940194119421943194419451946194719481949195019511952195319541955195619571958195919601961196219631964196519661967196819691970197119721973197419751976197719781979198019811982198319841985198619871988198919901991199219931994199519961997199819992000200120022003200420052006200720082009201020112012201320142015201620172018201920202021202220232024202520262027202820292030203120322033203420352036203720382039204020412042204320442045204620472048204920502051205220532054205520562057205820592060206120622063206420652066206720682069207020712072207320742075207620772078207920802081208220832084208520862087208820892090209120922093209420952096209720982099210021012102210321042105210621072108210921102111211221132114211521162117211821192120212121222123212421252126212721282129213021312132213321342135213621372138213921402141214221432144214521462147214821492150215121522153215421552156215721582159216021612162216321642165216621672168216921702171217221732174217521762177217821792180218121822183218421852186218721882189219021912192219321942195219621972198219922002201220222032204220522062207220822092210221122122213221422152216221722182219222022212222222322242225222622272228222922302231223222332234223522362237223822392240224122422243224422452246224722482249225022512252225322542255225622572258225922602261226222632264226522662267226822692270227122722273227422752276227722782279228022812282228322842285228622872288228922902291229222932294229522962297229822992300230123022303230423052306230723082309231023112312231323142315231623172318231923202321232223232324232523262327232823292330233123322333233423352336233723382339234023412342234323442345234623472348234923502351235223532354235523562357235823592360236123622363236423652366236723682369237023712372237323742375237623772378237923802381238223832384238523862387238823892390239123922393239423952396239723982399240024012402240324042405240624072408240924102411241224132414241524162417241824192420242124222423242424252426242724282429243024312432243324342435243624372438243924402441244224432444244524462447244824492450245124522453245424552456245724582459246024612462246324642465246624672468246924702471247224732474247524762477247824792480248124822483248424852486248724882489249024912492249324942495249624972498249925002501250225032504250525062507250825092510251125122513251425152516251725182519252025212522252325242525252625272528252925302531253225332534253525362537253825392540254125422543254425452546254725482549255025512552255325542555255625572558255925602561256225632564256525662567256825692570257125722573257425752576257725782579258025812582258325842585258625872588258925902591259225932594259525962597259825992600260126022603260426052606260726082609261026112612261326142615261626172618261926202621262226232624262526262627262826292630263126322633263426352636263726382639264026412642264326442645264626472648264926502651265226532654265526562657265826592660266126622663266426652666266726682669267026712672267326742675267626772678267926802681268226832684268526862687268826892690269126922693269426952696269726982699270027012702270327042705270627072708270927102711271227132714271527162717271827192720272127222723272427252726272727282729273027312732273327342735273627372738273927402741274227432744274527462747274827492750275127522753275427552756275727582759276027612762276327642765276627672768276927702771277227732774277527762777277827792780278127822783278427852786278727882789279027912792279327942795279627972798279928002801280228032804280528062807280828092810281128122813281428152816281728182819282028212822282328242825282628272828282928302831283228332834283528362837283828392840284128422843284428452846284728482849285028512852285328542855285628572858285928602861286228632864286528662867286828692870287128722873287428752876287728782879288028812882288328842885288628872888288928902891289228932894289528962897289828992900290129022903290429052906290729082909291029112912291329142915291629172918291929202921292229232924292529262927292829292930293129322933293429352936293729382939294029412942294329442945294629472948294929502951295229532954295529562957295829592960296129622963296429652966296729682969297029712972297329742975297629772978297929802981298229832984298529862987298829892990299129922993299429952996299729982999300030013002300330043005300630073008300930103011301230133014301530163017301830193020302130223023302430253026302730283029303030313032303330343035303630373038303930403041304230433044304530463047304830493050305130523053305430553056305730583059306030613062306330643065306630673068306930703071307230733074307530763077307830793080308130823083308430853086308730883089309030913092309330943095309630973098309931003101310231033104310531063107310831093110311131123113311431153116311731183119312031213122312331243125312631273128312931303131313231333134313531363137313831393140314131423143314431453146314731483149315031513152315331543155315631573158315931603161316231633164316531663167316831693170317131723173317431753176317731783179318031813182318331843185318631873188318931903191319231933194319531963197319831993200320132023203320432053206320732083209321032113212321332143215321632173218321932203221322232233224322532263227322832293230323132323233323432353236323732383239324032413242324332443245324632473248324932503251325232533254325532563257325832593260326132623263326432653266326732683269327032713272327332743275327632773278327932803281328232833284328532863287328832893290329132923293329432953296329732983299330033013302330333043305330633073308330933103311331233133314331533163317331833193320332133223323332433253326332733283329333033313332333333343335333633373338333933403341334233433344334533463347334833493350335133523353335433553356335733583359336033613362336333643365336633673368336933703371337233733374337533763377337833793380338133823383338433853386338733883389339033913392339333943395339633973398339934003401340234033404340534063407340834093410341134123413341434153416341734183419342034213422342334243425342634273428342934303431343234333434343534363437343834393440344134423443344434453446344734483449345034513452345334543455345634573458345934603461346234633464346534663467346834693470347134723473347434753476347734783479348034813482348334843485348634873488348934903491349234933494349534963497349834993500350135023503350435053506350735083509351035113512351335143515351635173518351935203521352235233524352535263527352835293530353135323533353435353536353735383539354035413542354335443545354635473548354935503551355235533554355535563557355835593560356135623563356435653566356735683569357035713572357335743575357635773578357935803581358235833584358535863587358835893590359135923593359435953596359735983599360036013602360336043605360636073608360936103611361236133614361536163617361836193620362136223623362436253626362736283629363036313632363336343635363636373638363936403641364236433644364536463647364836493650365136523653365436553656365736583659366036613662366336643665366636673668366936703671367236733674367536763677367836793680368136823683368436853686368736883689369036913692369336943695369636973698369937003701370237033704370537063707370837093710371137123713371437153716371737183719372037213722372337243725372637273728372937303731373237333734373537363737373837393740374137423743374437453746374737483749375037513752375337543755375637573758375937603761376237633764376537663767376837693770377137723773377437753776377737783779378037813782378337843785378637873788378937903791379237933794379537963797379837993800380138023803380438053806380738083809381038113812381338143815381638173818381938203821382238233824382538263827382838293830383138323833383438353836383738383839384038413842384338443845384638473848384938503851385238533854385538563857385838593860386138623863386438653866386738683869387038713872387338743875387638773878387938803881388238833884388538863887388838893890389138923893389438953896389738983899390039013902390339043905390639073908390939103911391239133914391539163917391839193920392139223923392439253926392739283929393039313932393339343935393639373938393939403941394239433944394539463947394839493950395139523953395439553956395739583959396039613962396339643965396639673968396939703971397239733974397539763977397839793980398139823983398439853986398739883989399039913992399339943995399639973998399940004001400240034004400540064007400840094010401140124013401440154016401740184019402040214022402340244025402640274028402940304031403240334034403540364037403840394040404140424043404440454046404740484049405040514052405340544055405640574058405940604061406240634064406540664067406840694070407140724073407440754076407740784079408040814082408340844085408640874088408940904091409240934094409540964097409840994100410141024103410441054106410741084109411041114112411341144115411641174118411941204121412241234124412541264127412841294130413141324133413441354136413741384139414041414142414341444145414641474148414941504151415241534154415541564157415841594160416141624163416441654166416741684169417041714172417341744175417641774178417941804181418241834184418541864187418841894190419141924193419441954196419741984199420042014202420342044205420642074208420942104211421242134214421542164217421842194220422142224223422442254226422742284229423042314232423342344235423642374238423942404241424242434244424542464247424842494250425142524253425442554256425742584259426042614262426342644265426642674268426942704271427242734274427542764277427842794280428142824283428442854286428742884289429042914292429342944295429642974298429943004301430243034304430543064307430843094310431143124313431443154316431743184319432043214322432343244325432643274328432943304331433243334334433543364337433843394340434143424343434443454346434743484349435043514352435343544355435643574358435943604361436243634364436543664367436843694370437143724373437443754376437743784379438043814382438343844385438643874388438943904391439243934394439543964397439843994400440144024403440444054406440744084409441044114412441344144415441644174418441944204421442244234424442544264427442844294430443144324433443444354436443744384439444044414442444344444445444644474448444944504451445244534454445544564457445844594460446144624463446444654466446744684469447044714472447344744475447644774478447944804481448244834484448544864487448844894490449144924493449444954496449744984499450045014502450345044505450645074508450945104511451245134514451545164517451845194520452145224523452445254526452745284529453045314532453345344535453645374538453945404541454245434544454545464547454845494550455145524553455445554556455745584559456045614562456345644565456645674568456945704571457245734574457545764577457845794580458145824583458445854586458745884589459045914592459345944595459645974598459946004601460246034604460546064607460846094610461146124613461446154616461746184619462046214622462346244625462646274628462946304631463246334634463546364637463846394640464146424643464446454646464746484649465046514652465346544655465646574658465946604661466246634664466546664667466846694670467146724673467446754676467746784679468046814682468346844685468646874688468946904691469246934694469546964697469846994700470147024703470447054706470747084709471047114712471347144715471647174718471947204721472247234724472547264727472847294730473147324733473447354736473747384739474047414742474347444745474647474748474947504751475247534754475547564757475847594760476147624763476447654766476747684769477047714772477347744775477647774778477947804781478247834784478547864787478847894790479147924793479447954796479747984799480048014802480348044805480648074808480948104811481248134814481548164817481848194820482148224823482448254826482748284829483048314832483348344835483648374838483948404841484248434844484548464847484848494850485148524853485448554856485748584859486048614862486348644865486648674868486948704871487248734874487548764877487848794880488148824883488448854886488748884889489048914892489348944895489648974898489949004901490249034904490549064907490849094910491149124913491449154916491749184919492049214922492349244925492649274928492949304931493249334934493549364937493849394940494149424943494449454946494749484949495049514952495349544955495649574958495949604961496249634964496549664967496849694970497149724973497449754976497749784979498049814982498349844985498649874988498949904991499249934994499549964997499849995000500150025003500450055006500750085009501050115012501350145015501650175018501950205021502250235024502550265027502850295030503150325033503450355036503750385039504050415042504350445045504650475048504950505051505250535054505550565057505850595060506150625063506450655066506750685069507050715072507350745075507650775078507950805081508250835084508550865087508850895090509150925093509450955096509750985099510051015102510351045105510651075108510951105111511251135114511551165117511851195120512151225123512451255126512751285129513051315132513351345135513651375138513951405141514251435144514551465147514851495150515151525153515451555156515751585159516051615162516351645165516651675168516951705171517251735174517551765177517851795180518151825183518451855186518751885189519051915192519351945195519651975198519952005201520252035204520552065207520852095210521152125213521452155216521752185219522052215222522352245225522652275228522952305231523252335234523552365237523852395240524152425243524452455246524752485249525052515252525352545255525652575258525952605261526252635264526552665267526852695270527152725273527452755276527752785279528052815282528352845285528652875288528952905291529252935294529552965297529852995300530153025303530453055306530753085309531053115312531353145315531653175318531953205321532253235324532553265327532853295330533153325333533453355336533753385339534053415342534353445345534653475348534953505351535253535354535553565357535853595360536153625363536453655366536753685369537053715372537353745375537653775378537953805381538253835384538553865387538853895390539153925393539453955396539753985399540054015402540354045405540654075408540954105411541254135414541554165417541854195420542154225423542454255426542754285429543054315432543354345435543654375438543954405441544254435444544554465447544854495450545154525453545454555456545754585459546054615462546354645465546654675468546954705471547254735474547554765477547854795480548154825483548454855486548754885489549054915492549354945495549654975498549955005501550255035504550555065507550855095510551155125513551455155516551755185519552055215522552355245525552655275528552955305531553255335534553555365537553855395540554155425543554455455546554755485549555055515552555355545555555655575558555955605561556255635564556555665567556855695570557155725573557455755576557755785579558055815582558355845585558655875588558955905591559255935594559555965597559855995600560156025603560456055606560756085609561056115612561356145615561656175618561956205621562256235624562556265627562856295630563156325633563456355636563756385639564056415642564356445645564656475648564956505651565256535654565556565657565856595660566156625663566456655666566756685669567056715672567356745675567656775678567956805681568256835684568556865687568856895690569156925693569456955696569756985699570057015702570357045705570657075708570957105711571257135714571557165717571857195720572157225723572457255726572757285729573057315732573357345735573657375738573957405741574257435744574557465747574857495750575157525753575457555756575757585759576057615762576357645765576657675768576957705771577257735774577557765777577857795780578157825783578457855786578757885789579057915792579357945795579657975798579958005801580258035804580558065807580858095810581158125813581458155816581758185819582058215822582358245825582658275828582958305831583258335834583558365837583858395840584158425843584458455846584758485849585058515852585358545855585658575858585958605861586258635864586558665867586858695870587158725873587458755876587758785879588058815882588358845885588658875888588958905891589258935894589558965897589858995900590159025903590459055906590759085909591059115912591359145915591659175918591959205921592259235924592559265927592859295930593159325933593459355936593759385939594059415942594359445945594659475948594959505951595259535954595559565957595859595960596159625963596459655966596759685969597059715972597359745975597659775978597959805981598259835984598559865987598859895990599159925993599459955996599759985999600060016002600360046005600660076008600960106011601260136014601560166017601860196020602160226023602460256026602760286029603060316032603360346035603660376038603960406041604260436044604560466047604860496050605160526053605460556056605760586059606060616062606360646065606660676068606960706071607260736074607560766077607860796080608160826083608460856086608760886089609060916092609360946095609660976098609961006101610261036104610561066107610861096110611161126113611461156116611761186119612061216122612361246125612661276128612961306131613261336134613561366137613861396140614161426143614461456146614761486149615061516152615361546155615661576158615961606161616261636164616561666167616861696170617161726173617461756176617761786179618061816182618361846185618661876188618961906191619261936194619561966197619861996200620162026203620462056206620762086209621062116212621362146215621662176218621962206221622262236224622562266227622862296230623162326233623462356236623762386239624062416242624362446245624662476248624962506251625262536254625562566257625862596260626162626263626462656266626762686269627062716272627362746275627662776278627962806281628262836284628562866287628862896290629162926293629462956296629762986299630063016302630363046305630663076308630963106311631263136314631563166317631863196320632163226323632463256326632763286329633063316332633363346335633663376338633963406341634263436344634563466347634863496350635163526353635463556356635763586359636063616362636363646365636663676368636963706371637263736374637563766377637863796380638163826383638463856386638763886389639063916392639363946395639663976398639964006401640264036404640564066407640864096410641164126413641464156416641764186419642064216422642364246425642664276428642964306431643264336434643564366437643864396440644164426443644464456446644764486449645064516452645364546455645664576458645964606461646264636464646564666467646864696470647164726473647464756476647764786479648064816482648364846485648664876488648964906491649264936494649564966497649864996500650165026503650465056506650765086509651065116512651365146515651665176518651965206521652265236524652565266527652865296530653165326533653465356536653765386539654065416542654365446545654665476548654965506551655265536554655565566557655865596560656165626563656465656566656765686569657065716572657365746575657665776578657965806581658265836584658565866587658865896590659165926593659465956596659765986599660066016602660366046605660666076608660966106611661266136614661566166617661866196620662166226623662466256626662766286629663066316632663366346635663666376638663966406641664266436644664566466647664866496650665166526653665466556656665766586659666066616662666366646665666666676668666966706671667266736674667566766677667866796680668166826683668466856686668766886689669066916692669366946695669666976698669967006701670267036704670567066707670867096710671167126713671467156716671767186719672067216722672367246725672667276728672967306731673267336734673567366737673867396740674167426743674467456746674767486749675067516752675367546755675667576758675967606761676267636764676567666767676867696770677167726773677467756776677767786779678067816782678367846785678667876788678967906791679267936794679567966797679867996800680168026803680468056806680768086809681068116812681368146815681668176818681968206821682268236824682568266827682868296830683168326833683468356836683768386839684068416842684368446845684668476848684968506851685268536854685568566857685868596860686168626863686468656866686768686869687068716872687368746875687668776878687968806881688268836884688568866887688868896890689168926893689468956896689768986899690069016902690369046905690669076908690969106911691269136914691569166917691869196920692169226923692469256926692769286929693069316932693369346935693669376938693969406941694269436944694569466947694869496950695169526953695469556956695769586959696069616962696369646965696669676968696969706971697269736974697569766977697869796980698169826983698469856986698769886989699069916992699369946995699669976998699970007001700270037004700570067007700870097010701170127013701470157016701770187019702070217022702370247025702670277028702970307031703270337034703570367037703870397040704170427043704470457046704770487049705070517052705370547055705670577058705970607061706270637064706570667067706870697070707170727073707470757076707770787079708070817082708370847085708670877088708970907091709270937094709570967097709870997100710171027103710471057106710771087109711071117112711371147115711671177118711971207121712271237124712571267127712871297130713171327133713471357136713771387139714071417142714371447145714671477148714971507151715271537154715571567157715871597160716171627163716471657166716771687169717071717172717371747175717671777178717971807181718271837184718571867187718871897190719171927193719471957196719771987199720072017202720372047205720672077208720972107211721272137214721572167217721872197220722172227223722472257226722772287229723072317232723372347235723672377238723972407241724272437244724572467247724872497250725172527253725472557256725772587259726072617262726372647265726672677268726972707271727272737274727572767277727872797280728172827283728472857286728772887289729072917292729372947295729672977298729973007301730273037304730573067307730873097310731173127313731473157316731773187319732073217322732373247325732673277328732973307331733273337334733573367337733873397340734173427343734473457346734773487349735073517352735373547355735673577358735973607361736273637364736573667367736873697370737173727373737473757376737773787379738073817382738373847385738673877388738973907391739273937394739573967397739873997400740174027403740474057406740774087409741074117412741374147415741674177418741974207421742274237424742574267427742874297430743174327433743474357436743774387439744074417442744374447445744674477448744974507451745274537454745574567457745874597460746174627463746474657466746774687469747074717472747374747475747674777478747974807481748274837484748574867487748874897490749174927493749474957496749774987499750075017502750375047505750675077508750975107511751275137514751575167517751875197520752175227523752475257526752775287529753075317532753375347535753675377538753975407541754275437544754575467547754875497550755175527553755475557556755775587559756075617562756375647565756675677568756975707571757275737574757575767577757875797580758175827583758475857586758775887589759075917592759375947595759675977598759976007601760276037604760576067607760876097610761176127613761476157616761776187619762076217622762376247625762676277628762976307631763276337634763576367637763876397640764176427643764476457646764776487649765076517652765376547655765676577658765976607661766276637664766576667667766876697670767176727673767476757676767776787679768076817682768376847685768676877688768976907691769276937694769576967697769876997700770177027703770477057706770777087709771077117712771377147715771677177718771977207721772277237724772577267727772877297730773177327733773477357736773777387739774077417742774377447745774677477748774977507751775277537754775577567757775877597760776177627763776477657766776777687769777077717772777377747775777677777778777977807781778277837784778577867787778877897790779177927793779477957796779777987799780078017802780378047805780678077808780978107811781278137814781578167817781878197820782178227823782478257826782778287829783078317832783378347835783678377838783978407841784278437844784578467847784878497850785178527853785478557856785778587859786078617862786378647865786678677868786978707871787278737874787578767877787878797880788178827883788478857886788778887889789078917892789378947895789678977898789979007901790279037904790579067907790879097910791179127913791479157916791779187919792079217922792379247925792679277928792979307931793279337934793579367937793879397940794179427943794479457946794779487949795079517952795379547955795679577958795979607961796279637964796579667967796879697970797179727973797479757976797779787979798079817982798379847985798679877988798979907991799279937994799579967997799879998000800180028003800480058006800780088009801080118012801380148015801680178018801980208021802280238024802580268027802880298030803180328033803480358036803780388039804080418042804380448045804680478048804980508051805280538054805580568057805880598060806180628063806480658066806780688069807080718072807380748075807680778078807980808081808280838084808580868087808880898090809180928093809480958096809780988099810081018102810381048105810681078108810981108111811281138114811581168117811881198120812181228123812481258126812781288129813081318132813381348135813681378138813981408141814281438144814581468147814881498150815181528153815481558156815781588159816081618162816381648165816681678168816981708171817281738174817581768177817881798180818181828183818481858186818781888189819081918192819381948195819681978198819982008201820282038204820582068207820882098210821182128213821482158216821782188219822082218222822382248225822682278228822982308231823282338234823582368237823882398240824182428243824482458246824782488249825082518252825382548255825682578258825982608261826282638264826582668267826882698270827182728273827482758276827782788279828082818282828382848285828682878288828982908291829282938294829582968297829882998300830183028303830483058306830783088309831083118312831383148315831683178318831983208321832283238324832583268327832883298330833183328333833483358336833783388339834083418342834383448345834683478348834983508351835283538354835583568357835883598360836183628363836483658366836783688369837083718372837383748375837683778378837983808381838283838384838583868387838883898390839183928393839483958396839783988399840084018402840384048405840684078408840984108411841284138414841584168417841884198420842184228423842484258426842784288429843084318432843384348435843684378438843984408441844284438444844584468447844884498450845184528453845484558456845784588459846084618462846384648465846684678468846984708471847284738474847584768477847884798480848184828483848484858486848784888489849084918492849384948495849684978498849985008501850285038504850585068507850885098510851185128513851485158516851785188519852085218522852385248525852685278528852985308531853285338534853585368537853885398540854185428543854485458546854785488549855085518552855385548555855685578558855985608561856285638564856585668567856885698570857185728573857485758576857785788579858085818582858385848585858685878588858985908591859285938594859585968597859885998600860186028603860486058606860786088609861086118612861386148615861686178618861986208621862286238624862586268627862886298630863186328633863486358636863786388639864086418642864386448645864686478648864986508651865286538654865586568657865886598660866186628663866486658666866786688669867086718672867386748675867686778678867986808681868286838684868586868687868886898690869186928693869486958696869786988699870087018702870387048705870687078708870987108711871287138714871587168717871887198720872187228723872487258726872787288729873087318732873387348735873687378738873987408741874287438744874587468747874887498750875187528753875487558756875787588759876087618762876387648765876687678768876987708771877287738774877587768777877887798780878187828783878487858786878787888789879087918792879387948795879687978798879988008801880288038804880588068807880888098810881188128813881488158816881788188819882088218822882388248825882688278828882988308831883288338834883588368837883888398840884188428843884488458846884788488849885088518852885388548855885688578858885988608861886288638864886588668867886888698870887188728873887488758876887788788879888088818882888388848885888688878888888988908891889288938894889588968897889888998900890189028903890489058906890789088909891089118912891389148915891689178918891989208921892289238924892589268927892889298930893189328933893489358936893789388939894089418942894389448945894689478948894989508951895289538954895589568957895889598960896189628963896489658966896789688969897089718972897389748975897689778978897989808981898289838984898589868987898889898990899189928993899489958996899789988999900090019002900390049005900690079008900990109011901290139014901590169017901890199020902190229023902490259026902790289029903090319032903390349035903690379038903990409041904290439044904590469047904890499050905190529053905490559056905790589059906090619062906390649065906690679068906990709071907290739074907590769077907890799080908190829083908490859086908790889089909090919092909390949095909690979098909991009101910291039104910591069107910891099110911191129113911491159116911791189119912091219122912391249125912691279128912991309131913291339134913591369137913891399140914191429143914491459146914791489149915091519152915391549155915691579158915991609161916291639164916591669167916891699170917191729173917491759176917791789179918091819182918391849185918691879188918991909191919291939194919591969197919891999200920192029203920492059206920792089209921092119212921392149215921692179218921992209221922292239224922592269227922892299230923192329233923492359236923792389239924092419242924392449245924692479248924992509251925292539254925592569257925892599260926192629263926492659266926792689269927092719272927392749275927692779278927992809281928292839284928592869287928892899290929192929293929492959296929792989299930093019302930393049305930693079308930993109311931293139314931593169317931893199320932193229323932493259326932793289329933093319332933393349335933693379338933993409341934293439344934593469347934893499350935193529353935493559356935793589359936093619362936393649365936693679368936993709371937293739374937593769377937893799380938193829383938493859386938793889389939093919392939393949395939693979398939994009401940294039404940594069407940894099410941194129413941494159416941794189419942094219422942394249425942694279428942994309431943294339434943594369437943894399440944194429443944494459446944794489449945094519452945394549455945694579458945994609461946294639464946594669467946894699470947194729473947494759476947794789479948094819482948394849485948694879488948994909491949294939494949594969497949894999500950195029503950495059506950795089509951095119512951395149515951695179518951995209521952295239524952595269527952895299530953195329533953495359536953795389539954095419542954395449545954695479548954995509551955295539554955595569557955895599560956195629563956495659566956795689569957095719572957395749575957695779578957995809581958295839584958595869587958895899590959195929593959495959596959795989599960096019602960396049605960696079608960996109611961296139614961596169617961896199620962196229623962496259626962796289629963096319632963396349635963696379638963996409641964296439644964596469647964896499650965196529653965496559656965796589659966096619662966396649665966696679668966996709671967296739674967596769677967896799680968196829683968496859686968796889689969096919692969396949695969696979698969997009701970297039704970597069707970897099710971197129713971497159716971797189719972097219722972397249725972697279728972997309731973297339734973597369737973897399740974197429743974497459746974797489749975097519752975397549755975697579758975997609761976297639764976597669767976897699770977197729773977497759776977797789779978097819782978397849785978697879788978997909791979297939794979597969797979897999800980198029803980498059806980798089809981098119812981398149815981698179818981998209821982298239824982598269827982898299830983198329833983498359836983798389839984098419842984398449845984698479848984998509851985298539854985598569857985898599860986198629863986498659866986798689869987098719872987398749875987698779878987998809881988298839884988598869887988898899890989198929893989498959896989798989899990099019902990399049905990699079908990999109911991299139914991599169917991899199920992199229923992499259926992799289929993099319932993399349935993699379938993999409941994299439944994599469947994899499950995199529953995499559956995799589959996099619962996399649965996699679968996999709971997299739974997599769977997899799980998199829983998499859986998799889989999099919992999399949995999699979998999910000100011000210003100041000510006100071000810009100101001110012100131001410015100161001710018100191002010021100221002310024100251002610027100281002910030100311003210033100341003510036100371003810039100401004110042100431004410045100461004710048100491005010051100521005310054100551005610057100581005910060100611006210063100641006510066100671006810069100701007110072100731007410075100761007710078100791008010081100821008310084100851008610087100881008910090100911009210093100941009510096100971009810099101001010110102101031010410105101061010710108101091011010111101121011310114101151011610117101181011910120101211012210123101241012510126101271012810129101301013110132101331013410135101361013710138101391014010141101421014310144101451014610147101481014910150101511015210153101541015510156101571015810159101601016110162101631016410165101661016710168101691017010171101721017310174101751017610177101781017910180101811018210183101841018510186101871018810189101901019110192101931019410195101961019710198101991020010201102021020310204102051020610207102081020910210102111021210213102141021510216102171021810219102201022110222102231022410225102261022710228102291023010231102321023310234102351023610237102381023910240102411024210243102441024510246102471024810249102501025110252102531025410255102561025710258102591026010261102621026310264102651026610267102681026910270102711027210273102741027510276102771027810279102801028110282102831028410285102861028710288102891029010291102921029310294102951029610297102981029910300103011030210303103041030510306103071030810309103101031110312103131031410315103161031710318103191032010321103221032310324103251032610327103281032910330103311033210333103341033510336103371033810339103401034110342103431034410345103461034710348103491035010351103521035310354103551035610357103581035910360103611036210363103641036510366103671036810369103701037110372103731037410375103761037710378103791038010381103821038310384103851038610387103881038910390103911039210393103941039510396103971039810399104001040110402104031040410405104061040710408104091041010411104121041310414104151041610417104181041910420104211042210423104241042510426104271042810429104301043110432104331043410435104361043710438104391044010441104421044310444104451044610447104481044910450104511045210453104541045510456104571045810459104601046110462104631046410465104661046710468104691047010471104721047310474104751047610477104781047910480104811048210483104841048510486104871048810489104901049110492104931049410495104961049710498104991050010501105021050310504105051050610507105081050910510105111051210513105141051510516105171051810519105201052110522105231052410525105261052710528105291053010531105321053310534105351053610537105381053910540105411054210543105441054510546105471054810549105501055110552105531055410555105561055710558105591056010561105621056310564105651056610567105681056910570105711057210573105741057510576105771057810579105801058110582105831058410585105861058710588105891059010591105921059310594105951059610597105981059910600106011060210603106041060510606106071060810609106101061110612106131061410615106161061710618106191062010621106221062310624106251062610627106281062910630106311063210633106341063510636106371063810639106401064110642106431064410645106461064710648106491065010651106521065310654106551065610657106581065910660106611066210663106641066510666106671066810669106701067110672106731067410675106761067710678106791068010681106821068310684106851068610687106881068910690106911069210693106941069510696106971069810699107001070110702107031070410705107061070710708107091071010711107121071310714107151071610717107181071910720107211072210723107241072510726107271072810729107301073110732107331073410735107361073710738107391074010741107421074310744107451074610747107481074910750107511075210753107541075510756107571075810759107601076110762107631076410765107661076710768107691077010771107721077310774107751077610777107781077910780107811078210783107841078510786107871078810789107901079110792107931079410795107961079710798107991080010801108021080310804108051080610807108081080910810108111081210813108141081510816108171081810819108201082110822108231082410825108261082710828108291083010831108321083310834108351083610837108381083910840108411084210843108441084510846108471084810849108501085110852108531085410855108561085710858108591086010861108621086310864108651086610867108681086910870108711087210873108741087510876108771087810879108801088110882108831088410885108861088710888108891089010891108921089310894108951089610897108981089910900109011090210903109041090510906109071090810909109101091110912109131091410915109161091710918109191092010921109221092310924109251092610927109281092910930109311093210933109341093510936109371093810939109401094110942109431094410945109461094710948109491095010951109521095310954109551095610957109581095910960109611096210963109641096510966109671096810969109701097110972109731097410975109761097710978109791098010981109821098310984109851098610987109881098910990109911099210993109941099510996109971099810999110001100111002110031100411005110061100711008110091101011011110121101311014110151101611017110181101911020110211102211023110241102511026110271102811029110301103111032110331103411035110361103711038110391104011041110421104311044110451104611047110481104911050110511105211053110541105511056110571105811059110601106111062110631106411065110661106711068110691107011071110721107311074110751107611077110781107911080110811108211083110841108511086110871108811089110901109111092110931109411095110961109711098110991110011101111021110311104111051110611107111081110911110111111111211113111141111511116111171111811119111201112111122111231112411125111261112711128111291113011131111321113311134111351113611137111381113911140111411114211143111441114511146111471114811149111501115111152111531115411155111561115711158111591116011161111621116311164111651116611167111681116911170111711117211173111741117511176111771117811179111801118111182111831118411185111861118711188111891119011191111921119311194111951119611197111981119911200112011120211203112041120511206112071120811209112101121111212112131121411215112161121711218112191122011221112221122311224112251122611227112281122911230112311123211233112341123511236112371123811239112401124111242112431124411245112461124711248112491125011251112521125311254112551125611257112581125911260112611126211263112641126511266112671126811269112701127111272112731127411275112761127711278112791128011281112821128311284112851128611287112881128911290112911129211293112941129511296112971129811299113001130111302113031130411305113061130711308113091131011311113121131311314113151131611317113181131911320113211132211323113241132511326113271132811329113301133111332113331133411335113361133711338113391134011341113421134311344113451134611347113481134911350113511135211353113541135511356113571135811359113601136111362113631136411365113661136711368113691137011371113721137311374113751137611377113781137911380113811138211383113841138511386113871138811389113901139111392113931139411395113961139711398113991140011401114021140311404114051140611407114081140911410114111141211413114141141511416114171141811419114201142111422114231142411425114261142711428114291143011431114321143311434114351143611437114381143911440114411144211443114441144511446114471144811449114501145111452114531145411455114561145711458114591146011461114621146311464114651146611467114681146911470114711147211473114741147511476114771147811479114801148111482114831148411485114861148711488114891149011491114921149311494114951149611497114981149911500115011150211503115041150511506115071150811509115101151111512115131151411515115161151711518115191152011521115221152311524115251152611527115281152911530115311153211533115341153511536115371153811539115401154111542115431154411545115461154711548115491155011551115521155311554115551155611557115581155911560115611156211563115641156511566115671156811569115701157111572115731157411575115761157711578115791158011581115821158311584115851158611587115881158911590115911159211593115941159511596115971159811599116001160111602116031160411605116061160711608116091161011611116121161311614116151161611617116181161911620116211162211623116241162511626116271162811629116301163111632116331163411635116361163711638116391164011641116421164311644116451164611647116481164911650116511165211653116541165511656116571165811659116601166111662116631166411665116661166711668116691167011671116721167311674116751167611677116781167911680116811168211683116841168511686116871168811689116901169111692116931169411695116961169711698116991170011701117021170311704117051170611707117081170911710117111171211713117141171511716117171171811719117201172111722117231172411725117261172711728117291173011731117321173311734117351173611737117381173911740117411174211743117441174511746117471174811749117501175111752117531175411755117561175711758117591176011761117621176311764117651176611767117681176911770117711177211773117741177511776117771177811779117801178111782117831178411785117861178711788117891179011791117921179311794117951179611797117981179911800118011180211803118041180511806118071180811809118101181111812118131181411815118161181711818118191182011821118221182311824118251182611827118281182911830118311183211833118341183511836118371183811839118401184111842118431184411845118461184711848118491185011851118521185311854118551185611857118581185911860118611186211863118641186511866118671186811869118701187111872118731187411875118761187711878118791188011881118821188311884118851188611887118881188911890118911189211893118941189511896118971189811899119001190111902119031190411905119061190711908119091191011911119121191311914119151191611917119181191911920119211192211923119241192511926119271192811929119301193111932119331193411935119361193711938119391194011941119421194311944119451194611947119481194911950119511195211953119541195511956119571195811959119601196111962119631196411965119661196711968119691197011971119721197311974119751197611977119781197911980119811198211983119841198511986119871198811989119901199111992119931199411995119961199711998119991200012001120021200312004120051200612007120081200912010120111201212013120141201512016120171201812019120201202112022120231202412025120261202712028120291203012031120321203312034120351203612037120381203912040120411204212043120441204512046120471204812049120501205112052120531205412055120561205712058120591206012061120621206312064120651206612067120681206912070120711207212073120741207512076120771207812079120801208112082120831208412085120861208712088120891209012091120921209312094120951209612097120981209912100121011210212103121041210512106121071210812109121101211112112121131211412115121161211712118121191212012121121221212312124121251212612127121281212912130121311213212133121341213512136121371213812139121401214112142121431214412145121461214712148121491215012151121521215312154121551215612157121581215912160121611216212163121641216512166121671216812169121701217112172121731217412175121761217712178121791218012181121821218312184121851218612187121881218912190121911219212193121941219512196121971219812199122001220112202122031220412205122061220712208122091221012211122121221312214122151221612217122181221912220122211222212223122241222512226122271222812229122301223112232122331223412235122361223712238122391224012241122421224312244122451224612247122481224912250122511225212253122541225512256122571225812259122601226112262122631226412265122661226712268122691227012271122721227312274122751227612277122781227912280122811228212283122841228512286122871228812289122901229112292122931229412295122961229712298122991230012301123021230312304123051230612307123081230912310123111231212313123141231512316123171231812319123201232112322123231232412325123261232712328123291233012331123321233312334123351233612337123381233912340123411234212343123441234512346123471234812349123501235112352123531235412355123561235712358123591236012361123621236312364123651236612367123681236912370123711237212373123741237512376123771237812379123801238112382123831238412385123861238712388123891239012391123921239312394123951239612397123981239912400124011240212403124041240512406124071240812409124101241112412124131241412415124161241712418124191242012421124221242312424124251242612427124281242912430124311243212433124341243512436124371243812439124401244112442124431244412445124461244712448124491245012451124521245312454124551245612457124581245912460124611246212463124641246512466124671246812469124701247112472124731247412475124761247712478124791248012481124821248312484124851248612487124881248912490124911249212493124941249512496124971249812499125001250112502125031250412505125061250712508125091251012511125121251312514125151251612517125181251912520125211252212523125241252512526125271252812529125301253112532125331253412535125361253712538125391254012541125421254312544125451254612547125481254912550125511255212553125541255512556125571255812559125601256112562125631256412565125661256712568125691257012571125721257312574125751257612577125781257912580125811258212583125841258512586125871258812589125901259112592125931259412595125961259712598125991260012601126021260312604126051260612607126081260912610126111261212613126141261512616126171261812619126201262112622126231262412625126261262712628126291263012631126321263312634126351263612637126381263912640126411264212643126441264512646126471264812649126501265112652126531265412655126561265712658126591266012661126621266312664126651266612667126681266912670126711267212673126741267512676126771267812679126801268112682126831268412685126861268712688126891269012691126921269312694126951269612697126981269912700127011270212703127041270512706127071270812709127101271112712127131271412715127161271712718127191272012721127221272312724127251272612727127281272912730127311273212733127341273512736127371273812739127401274112742127431274412745127461274712748127491275012751127521275312754127551275612757127581275912760127611276212763127641276512766127671276812769127701277112772127731277412775127761277712778127791278012781127821278312784127851278612787127881278912790127911279212793127941279512796127971279812799128001280112802128031280412805128061280712808128091281012811128121281312814128151281612817128181281912820128211282212823128241282512826128271282812829128301283112832128331283412835128361283712838128391284012841128421284312844128451284612847128481284912850128511285212853128541285512856128571285812859128601286112862128631286412865128661286712868128691287012871128721287312874128751287612877128781287912880128811288212883128841288512886128871288812889128901289112892128931289412895128961289712898128991290012901129021290312904129051290612907129081290912910129111291212913129141291512916129171291812919129201292112922129231292412925129261292712928129291293012931129321293312934129351293612937129381293912940129411294212943129441294512946129471294812949129501295112952129531295412955129561295712958129591296012961129621296312964129651296612967129681296912970129711297212973129741297512976129771297812979129801298112982129831298412985129861298712988129891299012991129921299312994129951299612997129981299913000130011300213003130041300513006130071300813009130101301113012130131301413015130161301713018130191302013021130221302313024130251302613027130281302913030130311303213033130341303513036130371303813039130401304113042130431304413045130461304713048130491305013051130521305313054130551305613057130581305913060130611306213063130641306513066130671306813069130701307113072130731307413075130761307713078130791308013081130821308313084130851308613087130881308913090130911309213093130941309513096130971309813099131001310113102131031310413105131061310713108131091311013111131121311313114131151311613117131181311913120131211312213123131241312513126131271312813129131301313113132131331313413135131361313713138131391314013141131421314313144131451314613147131481314913150131511315213153131541315513156131571315813159131601316113162131631316413165131661316713168131691317013171131721317313174131751317613177131781317913180131811318213183131841318513186131871318813189131901319113192131931319413195131961319713198131991320013201132021320313204132051320613207132081320913210132111321213213132141321513216132171321813219132201322113222132231322413225132261322713228132291323013231132321323313234132351323613237132381323913240132411324213243132441324513246132471324813249132501325113252132531325413255132561325713258132591326013261132621326313264132651326613267132681326913270132711327213273132741327513276132771327813279132801328113282132831328413285132861328713288132891329013291132921329313294132951329613297132981329913300133011330213303133041330513306133071330813309133101331113312133131331413315133161331713318133191332013321133221332313324133251332613327133281332913330133311333213333133341333513336133371333813339133401334113342133431334413345133461334713348133491335013351133521335313354133551335613357133581335913360133611336213363133641336513366133671336813369133701337113372133731337413375133761337713378133791338013381133821338313384133851338613387133881338913390133911339213393133941339513396133971339813399134001340113402134031340413405134061340713408134091341013411134121341313414134151341613417134181341913420134211342213423134241342513426134271342813429134301343113432134331343413435134361343713438134391344013441134421344313444134451344613447134481344913450134511345213453134541345513456134571345813459134601346113462134631346413465134661346713468134691347013471134721347313474134751347613477134781347913480134811348213483134841348513486134871348813489134901349113492134931349413495134961349713498134991350013501135021350313504135051350613507135081350913510135111351213513135141351513516135171351813519135201352113522135231352413525135261352713528135291353013531135321353313534135351353613537135381353913540135411354213543135441354513546135471354813549135501355113552135531355413555135561355713558135591356013561135621356313564135651356613567135681356913570135711357213573135741357513576135771357813579135801358113582135831358413585135861358713588135891359013591135921359313594135951359613597135981359913600136011360213603136041360513606136071360813609136101361113612136131361413615136161361713618136191362013621136221362313624136251362613627136281362913630136311363213633136341363513636136371363813639136401364113642136431364413645136461364713648136491365013651136521365313654136551365613657136581365913660136611366213663136641366513666136671366813669136701367113672136731367413675136761367713678136791368013681136821368313684136851368613687136881368913690136911369213693136941369513696136971369813699137001370113702137031370413705137061370713708137091371013711137121371313714137151371613717137181371913720137211372213723137241372513726137271372813729137301373113732137331373413735137361373713738137391374013741137421374313744137451374613747137481374913750137511375213753137541375513756137571375813759137601376113762137631376413765137661376713768137691377013771137721377313774137751377613777137781377913780137811378213783137841378513786137871378813789137901379113792137931379413795137961379713798137991380013801138021380313804138051380613807138081380913810138111381213813138141381513816138171381813819138201382113822138231382413825138261382713828138291383013831138321383313834138351383613837138381383913840138411384213843138441384513846138471384813849138501385113852138531385413855138561385713858138591386013861138621386313864138651386613867138681386913870138711387213873138741387513876138771387813879138801388113882138831388413885138861388713888138891389013891138921389313894138951389613897138981389913900139011390213903139041390513906139071390813909139101391113912139131391413915139161391713918139191392013921139221392313924139251392613927139281392913930139311393213933139341393513936139371393813939139401394113942139431394413945139461394713948139491395013951139521395313954139551395613957139581395913960139611396213963139641396513966139671396813969139701397113972139731397413975139761397713978139791398013981139821398313984139851398613987139881398913990139911399213993139941399513996139971399813999140001400114002140031400414005140061400714008140091401014011140121401314014140151401614017140181401914020140211402214023140241402514026140271402814029140301403114032140331403414035140361403714038140391404014041140421404314044140451404614047140481404914050140511405214053140541405514056140571405814059140601406114062140631406414065140661406714068140691407014071140721407314074140751407614077140781407914080140811408214083140841408514086140871408814089140901409114092140931409414095140961409714098140991410014101141021410314104141051410614107141081410914110141111411214113141141411514116141171411814119141201412114122141231412414125141261412714128141291413014131141321413314134141351413614137141381413914140141411414214143141441414514146141471414814149141501415114152141531415414155141561415714158141591416014161141621416314164141651416614167141681416914170141711417214173141741417514176141771417814179141801418114182141831418414185141861418714188141891419014191141921419314194141951419614197141981419914200142011420214203142041420514206142071420814209142101421114212142131421414215142161421714218142191422014221142221422314224142251422614227142281422914230142311423214233142341423514236142371423814239142401424114242142431424414245142461424714248142491425014251142521425314254142551425614257142581425914260142611426214263142641426514266142671426814269142701427114272142731427414275142761427714278142791428014281142821428314284142851428614287142881428914290142911429214293142941429514296142971429814299143001430114302143031430414305143061430714308143091431014311143121431314314143151431614317143181431914320143211432214323143241432514326143271432814329143301433114332143331433414335143361433714338143391434014341143421434314344143451434614347143481434914350143511435214353143541435514356143571435814359143601436114362143631436414365143661436714368143691437014371143721437314374143751437614377143781437914380143811438214383143841438514386143871438814389143901439114392143931439414395143961439714398143991440014401144021440314404144051440614407144081440914410144111441214413144141441514416144171441814419144201442114422144231442414425144261442714428144291443014431144321443314434144351443614437144381443914440144411444214443144441444514446144471444814449144501445114452144531445414455144561445714458144591446014461144621446314464144651446614467144681446914470144711447214473144741447514476144771447814479144801448114482144831448414485144861448714488144891449014491144921449314494144951449614497144981449914500145011450214503145041450514506145071450814509145101451114512145131451414515145161451714518145191452014521145221452314524145251452614527145281452914530145311453214533145341453514536145371453814539145401454114542145431454414545145461454714548145491455014551145521455314554145551455614557145581455914560145611456214563145641456514566145671456814569145701457114572145731457414575145761457714578145791458014581145821458314584145851458614587145881458914590145911459214593145941459514596145971459814599146001460114602146031460414605146061460714608146091461014611146121461314614146151461614617146181461914620146211462214623146241462514626146271462814629146301463114632146331463414635146361463714638146391464014641146421464314644146451464614647146481464914650146511465214653146541465514656146571465814659146601466114662146631466414665146661466714668146691467014671146721467314674146751467614677146781467914680146811468214683146841468514686146871468814689146901469114692146931469414695146961469714698146991470014701147021470314704147051470614707147081470914710147111471214713147141471514716147171471814719147201472114722147231472414725147261472714728147291473014731147321473314734147351473614737147381473914740147411474214743147441474514746147471474814749147501475114752147531475414755147561475714758147591476014761147621476314764147651476614767147681476914770147711477214773147741477514776147771477814779147801478114782147831478414785147861478714788147891479014791147921479314794147951479614797147981479914800148011480214803148041480514806148071480814809148101481114812148131481414815148161481714818148191482014821148221482314824148251482614827148281482914830148311483214833148341483514836148371483814839148401484114842148431484414845148461484714848148491485014851148521485314854148551485614857148581485914860148611486214863148641486514866148671486814869148701487114872148731487414875148761487714878148791488014881148821488314884148851488614887148881488914890148911489214893148941489514896148971489814899149001490114902149031490414905149061490714908149091491014911149121491314914149151491614917149181491914920149211492214923149241492514926149271492814929149301493114932149331493414935149361493714938149391494014941149421494314944149451494614947149481494914950149511495214953149541495514956149571495814959149601496114962149631496414965149661496714968149691497014971149721497314974149751497614977149781497914980149811498214983149841498514986149871498814989149901499114992149931499414995149961499714998149991500015001150021500315004150051500615007150081500915010150111501215013150141501515016150171501815019150201502115022150231502415025150261502715028150291503015031150321503315034150351503615037150381503915040150411504215043150441504515046150471504815049150501505115052150531505415055150561505715058150591506015061150621506315064150651506615067150681506915070150711507215073150741507515076150771507815079150801508115082150831508415085150861508715088150891509015091150921509315094150951509615097150981509915100151011510215103151041510515106151071510815109151101511115112151131511415115151161511715118151191512015121151221512315124151251512615127151281512915130151311513215133151341513515136151371513815139151401514115142151431514415145151461514715148151491515015151151521515315154151551515615157151581515915160151611516215163151641516515166151671516815169151701517115172151731517415175151761517715178151791518015181151821518315184151851518615187151881518915190151911519215193151941519515196151971519815199152001520115202152031520415205152061520715208152091521015211152121521315214152151521615217152181521915220152211522215223152241522515226152271522815229152301523115232152331523415235152361523715238152391524015241152421524315244152451524615247152481524915250152511525215253152541525515256152571525815259152601526115262152631526415265152661526715268152691527015271152721527315274152751527615277152781527915280152811528215283152841528515286152871528815289152901529115292152931529415295152961529715298152991530015301153021530315304153051530615307153081530915310153111531215313153141531515316153171531815319153201532115322153231532415325153261532715328153291533015331153321533315334153351533615337153381533915340153411534215343153441534515346153471534815349153501535115352153531535415355153561535715358153591536015361153621536315364153651536615367153681536915370153711537215373153741537515376153771537815379153801538115382153831538415385153861538715388153891539015391153921539315394153951539615397153981539915400154011540215403154041540515406154071540815409154101541115412154131541415415154161541715418154191542015421154221542315424154251542615427154281542915430154311543215433154341543515436154371543815439154401544115442154431544415445154461544715448154491545015451154521545315454154551545615457154581545915460154611546215463154641546515466154671546815469154701547115472154731547415475154761547715478154791548015481154821548315484154851548615487154881548915490154911549215493154941549515496154971549815499155001550115502155031550415505155061550715508155091551015511155121551315514155151551615517155181551915520155211552215523155241552515526155271552815529155301553115532155331553415535155361553715538155391554015541155421554315544155451554615547155481554915550155511555215553155541555515556155571555815559155601556115562155631556415565155661556715568155691557015571155721557315574155751557615577155781557915580155811558215583155841558515586155871558815589155901559115592155931559415595155961559715598155991560015601156021560315604156051560615607156081560915610156111561215613156141561515616156171561815619156201562115622156231562415625156261562715628156291563015631156321563315634156351563615637156381563915640156411564215643156441564515646156471564815649156501565115652156531565415655156561565715658156591566015661156621566315664156651566615667156681566915670156711567215673156741567515676156771567815679156801568115682156831568415685156861568715688156891569015691156921569315694156951569615697156981569915700157011570215703157041570515706157071570815709157101571115712157131571415715157161571715718157191572015721157221572315724157251572615727157281572915730157311573215733157341573515736157371573815739157401574115742157431574415745157461574715748157491575015751157521575315754157551575615757157581575915760157611576215763157641576515766157671576815769157701577115772157731577415775157761577715778157791578015781157821578315784157851578615787157881578915790157911579215793157941579515796157971579815799158001580115802158031580415805158061580715808158091581015811158121581315814158151581615817158181581915820158211582215823158241582515826158271582815829158301583115832158331583415835158361583715838158391584015841158421584315844158451584615847158481584915850158511585215853158541585515856158571585815859158601586115862158631586415865158661586715868158691587015871158721587315874158751587615877158781587915880158811588215883158841588515886158871588815889158901589115892158931589415895158961589715898158991590015901159021590315904159051590615907159081590915910159111591215913159141591515916159171591815919159201592115922159231592415925159261592715928159291593015931159321593315934159351593615937159381593915940159411594215943159441594515946159471594815949159501595115952159531595415955159561595715958159591596015961159621596315964159651596615967159681596915970159711597215973159741597515976159771597815979159801598115982159831598415985159861598715988159891599015991159921599315994159951599615997159981599916000160011600216003160041600516006160071600816009160101601116012160131601416015160161601716018160191602016021160221602316024160251602616027160281602916030160311603216033160341603516036160371603816039160401604116042160431604416045160461604716048160491605016051160521605316054160551605616057160581605916060160611606216063160641606516066160671606816069160701607116072160731607416075160761607716078160791608016081160821608316084160851608616087160881608916090160911609216093160941609516096160971609816099161001610116102161031610416105161061610716108161091611016111161121611316114161151611616117161181611916120161211612216123161241612516126161271612816129161301613116132161331613416135161361613716138161391614016141161421614316144161451614616147161481614916150161511615216153161541615516156161571615816159161601616116162161631616416165161661616716168161691617016171161721617316174161751617616177161781617916180161811618216183161841618516186161871618816189161901619116192161931619416195161961619716198161991620016201162021620316204162051620616207162081620916210162111621216213162141621516216162171621816219162201622116222162231622416225162261622716228162291623016231162321623316234162351623616237162381623916240162411624216243162441624516246162471624816249162501625116252162531625416255162561625716258162591626016261162621626316264162651626616267162681626916270162711627216273162741627516276162771627816279162801628116282162831628416285162861628716288162891629016291162921629316294162951629616297162981629916300163011630216303163041630516306163071630816309163101631116312163131631416315163161631716318163191632016321163221632316324163251632616327163281632916330163311633216333163341633516336163371633816339163401634116342163431634416345163461634716348163491635016351163521635316354163551635616357163581635916360163611636216363163641636516366163671636816369163701637116372163731637416375163761637716378163791638016381163821638316384163851638616387163881638916390163911639216393163941639516396163971639816399164001640116402164031640416405164061640716408164091641016411164121641316414164151641616417164181641916420164211642216423164241642516426164271642816429164301643116432164331643416435164361643716438164391644016441164421644316444164451644616447164481644916450164511645216453164541645516456164571645816459164601646116462164631646416465164661646716468164691647016471164721647316474164751647616477164781647916480164811648216483164841648516486164871648816489164901649116492164931649416495164961649716498164991650016501165021650316504165051650616507165081650916510165111651216513165141651516516165171651816519165201652116522165231652416525165261652716528165291653016531165321653316534165351653616537165381653916540165411654216543165441654516546165471654816549165501655116552165531655416555165561655716558165591656016561165621656316564165651656616567165681656916570165711657216573165741657516576165771657816579165801658116582165831658416585165861658716588165891659016591165921659316594165951659616597165981659916600166011660216603166041660516606166071660816609166101661116612166131661416615166161661716618166191662016621166221662316624166251662616627166281662916630166311663216633166341663516636166371663816639166401664116642166431664416645166461664716648166491665016651166521665316654166551665616657166581665916660166611666216663166641666516666166671666816669166701667116672166731667416675166761667716678166791668016681166821668316684166851668616687166881668916690166911669216693166941669516696166971669816699167001670116702167031670416705167061670716708167091671016711167121671316714167151671616717167181671916720167211672216723167241672516726167271672816729167301673116732167331673416735167361673716738167391674016741167421674316744167451674616747167481674916750167511675216753167541675516756167571675816759167601676116762167631676416765167661676716768167691677016771167721677316774167751677616777167781677916780167811678216783167841678516786167871678816789167901679116792167931679416795167961679716798167991680016801168021680316804168051680616807168081680916810168111681216813168141681516816168171681816819168201682116822168231682416825168261682716828168291683016831168321683316834168351683616837168381683916840168411684216843168441684516846168471684816849168501685116852168531685416855168561685716858168591686016861168621686316864168651686616867168681686916870168711687216873168741687516876168771687816879168801688116882168831688416885168861688716888168891689016891168921689316894168951689616897168981689916900169011690216903169041690516906169071690816909169101691116912169131691416915169161691716918169191692016921169221692316924169251692616927169281692916930169311693216933169341693516936169371693816939169401694116942169431694416945169461694716948169491695016951169521695316954169551695616957169581695916960169611696216963169641696516966169671696816969169701697116972169731697416975169761697716978169791698016981169821698316984169851698616987169881698916990169911699216993169941699516996169971699816999170001700117002170031700417005170061700717008170091701017011170121701317014170151701617017170181701917020170211702217023170241702517026170271702817029170301703117032170331703417035170361703717038170391704017041170421704317044170451704617047170481704917050170511705217053170541705517056170571705817059170601706117062170631706417065170661706717068170691707017071170721707317074170751707617077170781707917080170811708217083170841708517086170871708817089170901709117092170931709417095170961709717098170991710017101171021710317104171051710617107171081710917110171111711217113171141711517116171171711817119171201712117122171231712417125171261712717128171291713017131171321713317134171351713617137171381713917140171411714217143171441714517146171471714817149171501715117152171531715417155171561715717158171591716017161171621716317164171651716617167171681716917170171711717217173171741717517176171771717817179171801718117182171831718417185171861718717188171891719017191171921719317194171951719617197171981719917200172011720217203172041720517206172071720817209172101721117212172131721417215172161721717218172191722017221172221722317224172251722617227172281722917230172311723217233172341723517236172371723817239172401724117242172431724417245172461724717248172491725017251172521725317254172551725617257172581725917260172611726217263172641726517266172671726817269172701727117272172731727417275172761727717278172791728017281172821728317284172851728617287172881728917290172911729217293172941729517296172971729817299173001730117302173031730417305173061730717308173091731017311173121731317314173151731617317173181731917320173211732217323173241732517326173271732817329173301733117332173331733417335173361733717338173391734017341173421734317344173451734617347173481734917350173511735217353173541735517356173571735817359173601736117362173631736417365173661736717368173691737017371173721737317374173751737617377173781737917380173811738217383173841738517386173871738817389173901739117392173931739417395173961739717398173991740017401174021740317404174051740617407174081740917410174111741217413174141741517416174171741817419174201742117422174231742417425174261742717428174291743017431174321743317434174351743617437174381743917440174411744217443174441744517446174471744817449174501745117452174531745417455174561745717458174591746017461174621746317464174651746617467174681746917470174711747217473174741747517476174771747817479174801748117482174831748417485174861748717488174891749017491174921749317494174951749617497174981749917500175011750217503175041750517506175071750817509175101751117512175131751417515175161751717518175191752017521175221752317524175251752617527175281752917530175311753217533175341753517536175371753817539175401754117542175431754417545175461754717548175491755017551175521755317554175551755617557175581755917560175611756217563175641756517566175671756817569175701757117572175731757417575175761757717578175791758017581175821758317584175851758617587175881758917590175911759217593175941759517596175971759817599176001760117602176031760417605176061760717608176091761017611176121761317614176151761617617176181761917620176211762217623176241762517626176271762817629176301763117632176331763417635176361763717638176391764017641176421764317644176451764617647176481764917650176511765217653176541765517656176571765817659176601766117662176631766417665176661766717668176691767017671176721767317674176751767617677176781767917680176811768217683176841768517686176871768817689176901769117692176931769417695176961769717698176991770017701177021770317704177051770617707177081770917710177111771217713177141771517716177171771817719177201772117722177231772417725177261772717728177291773017731177321773317734177351773617737177381773917740177411774217743177441774517746177471774817749177501775117752177531775417755177561775717758177591776017761177621776317764177651776617767177681776917770177711777217773177741777517776177771777817779177801778117782177831778417785177861778717788177891779017791177921779317794177951779617797177981779917800178011780217803178041780517806178071780817809178101781117812178131781417815178161781717818178191782017821178221782317824178251782617827178281782917830178311783217833178341783517836178371783817839178401784117842178431784417845178461784717848178491785017851178521785317854178551785617857178581785917860178611786217863178641786517866178671786817869178701787117872178731787417875178761787717878178791788017881178821788317884178851788617887178881788917890178911789217893178941789517896178971789817899179001790117902179031790417905179061790717908179091791017911179121791317914179151791617917179181791917920179211792217923179241792517926179271792817929179301793117932179331793417935179361793717938179391794017941179421794317944179451794617947179481794917950179511795217953179541795517956179571795817959179601796117962179631796417965179661796717968179691797017971179721797317974179751797617977179781797917980179811798217983179841798517986179871798817989179901799117992179931799417995179961799717998179991800018001180021800318004180051800618007180081800918010180111801218013180141801518016180171801818019180201802118022180231802418025180261802718028180291803018031180321803318034180351803618037180381803918040180411804218043180441804518046180471804818049180501805118052180531805418055180561805718058180591806018061180621806318064180651806618067180681806918070180711807218073180741807518076180771807818079180801808118082180831808418085180861808718088180891809018091180921809318094180951809618097180981809918100181011810218103181041810518106181071810818109181101811118112181131811418115181161811718118181191812018121181221812318124181251812618127181281812918130181311813218133181341813518136181371813818139181401814118142181431814418145181461814718148181491815018151181521815318154181551815618157181581815918160181611816218163181641816518166181671816818169181701817118172181731817418175181761817718178181791818018181181821818318184181851818618187181881818918190181911819218193181941819518196181971819818199182001820118202182031820418205182061820718208182091821018211182121821318214182151821618217182181821918220182211822218223182241822518226182271822818229182301823118232182331823418235182361823718238182391824018241182421824318244182451824618247182481824918250182511825218253182541825518256182571825818259182601826118262182631826418265182661826718268182691827018271182721827318274182751827618277182781827918280182811828218283182841828518286182871828818289182901829118292182931829418295182961829718298182991830018301183021830318304183051830618307183081830918310183111831218313183141831518316183171831818319183201832118322183231832418325183261832718328183291833018331183321833318334183351833618337183381833918340183411834218343183441834518346183471834818349183501835118352183531835418355183561835718358183591836018361183621836318364183651836618367183681836918370183711837218373183741837518376183771837818379183801838118382183831838418385183861838718388183891839018391183921839318394183951839618397183981839918400184011840218403184041840518406184071840818409184101841118412184131841418415184161841718418184191842018421184221842318424184251842618427184281842918430184311843218433184341843518436184371843818439184401844118442184431844418445184461844718448184491845018451184521845318454184551845618457184581845918460184611846218463184641846518466184671846818469184701847118472184731847418475184761847718478184791848018481184821848318484184851848618487184881848918490184911849218493184941849518496184971849818499185001850118502185031850418505185061850718508185091851018511185121851318514185151851618517185181851918520185211852218523185241852518526185271852818529185301853118532185331853418535185361853718538185391854018541185421854318544185451854618547185481854918550185511855218553185541855518556185571855818559185601856118562185631856418565185661856718568185691857018571185721857318574185751857618577185781857918580185811858218583185841858518586185871858818589185901859118592185931859418595185961859718598185991860018601186021860318604186051860618607186081860918610186111861218613186141861518616186171861818619186201862118622186231862418625186261862718628186291863018631186321863318634186351863618637186381863918640186411864218643186441864518646186471864818649186501865118652186531865418655186561865718658186591866018661186621866318664186651866618667186681866918670186711867218673186741867518676186771867818679186801868118682186831868418685186861868718688186891869018691186921869318694186951869618697186981869918700187011870218703187041870518706187071870818709
  1. //
  2. // Copyright (c) 2017-2024 Advanced Micro Devices, Inc. All rights reserved.
  3. //
  4. // Permission is hereby granted, free of charge, to any person obtaining a copy
  5. // of this software and associated documentation files (the "Software"), to deal
  6. // in the Software without restriction, including without limitation the rights
  7. // to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
  8. // copies of the Software, and to permit persons to whom the Software is
  9. // furnished to do so, subject to the following conditions:
  10. //
  11. // The above copyright notice and this permission notice shall be included in
  12. // all copies or substantial portions of the Software.
  13. //
  14. // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  15. // IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  16. // FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
  17. // AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
  18. // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
  19. // OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
  20. // THE SOFTWARE.
  21. //
  22. #ifndef AMD_VULKAN_MEMORY_ALLOCATOR_H
  23. #define AMD_VULKAN_MEMORY_ALLOCATOR_H
  24. /** \mainpage Vulkan Memory Allocator
  25. <b>Version 3.1.0</b>
  26. Copyright (c) 2017-2024 Advanced Micro Devices, Inc. All rights reserved. \n
  27. License: MIT \n
  28. See also: [product page on GPUOpen](https://gpuopen.com/gaming-product/vulkan-memory-allocator/),
  29. [repository on GitHub](https://github.com/GPUOpen-LibrariesAndSDKs/VulkanMemoryAllocator)
  30. <b>API documentation divided into groups:</b> [Topics](topics.html)
  31. <b>General documentation chapters:</b>
  32. - <b>User guide</b>
  33. - \subpage quick_start
  34. - [Project setup](@ref quick_start_project_setup)
  35. - [Initialization](@ref quick_start_initialization)
  36. - [Resource allocation](@ref quick_start_resource_allocation)
  37. - \subpage choosing_memory_type
  38. - [Usage](@ref choosing_memory_type_usage)
  39. - [Required and preferred flags](@ref choosing_memory_type_required_preferred_flags)
  40. - [Explicit memory types](@ref choosing_memory_type_explicit_memory_types)
  41. - [Custom memory pools](@ref choosing_memory_type_custom_memory_pools)
  42. - [Dedicated allocations](@ref choosing_memory_type_dedicated_allocations)
  43. - \subpage memory_mapping
  44. - [Copy functions](@ref memory_mapping_copy_functions)
  45. - [Mapping functions](@ref memory_mapping_mapping_functions)
  46. - [Persistently mapped memory](@ref memory_mapping_persistently_mapped_memory)
  47. - [Cache flush and invalidate](@ref memory_mapping_cache_control)
  48. - \subpage staying_within_budget
  49. - [Querying for budget](@ref staying_within_budget_querying_for_budget)
  50. - [Controlling memory usage](@ref staying_within_budget_controlling_memory_usage)
  51. - \subpage resource_aliasing
  52. - \subpage custom_memory_pools
  53. - [Choosing memory type index](@ref custom_memory_pools_MemTypeIndex)
  54. - [When not to use custom pools](@ref custom_memory_pools_when_not_use)
  55. - [Linear allocation algorithm](@ref linear_algorithm)
  56. - [Free-at-once](@ref linear_algorithm_free_at_once)
  57. - [Stack](@ref linear_algorithm_stack)
  58. - [Double stack](@ref linear_algorithm_double_stack)
  59. - [Ring buffer](@ref linear_algorithm_ring_buffer)
  60. - \subpage defragmentation
  61. - \subpage statistics
  62. - [Numeric statistics](@ref statistics_numeric_statistics)
  63. - [JSON dump](@ref statistics_json_dump)
  64. - \subpage allocation_annotation
  65. - [Allocation user data](@ref allocation_user_data)
  66. - [Allocation names](@ref allocation_names)
  67. - \subpage virtual_allocator
  68. - \subpage debugging_memory_usage
  69. - [Memory initialization](@ref debugging_memory_usage_initialization)
  70. - [Margins](@ref debugging_memory_usage_margins)
  71. - [Corruption detection](@ref debugging_memory_usage_corruption_detection)
  72. - [Leak detection features](@ref debugging_memory_usage_leak_detection)
  73. - \subpage other_api_interop
  74. - \subpage usage_patterns
  75. - [GPU-only resource](@ref usage_patterns_gpu_only)
  76. - [Staging copy for upload](@ref usage_patterns_staging_copy_upload)
  77. - [Readback](@ref usage_patterns_readback)
  78. - [Advanced data uploading](@ref usage_patterns_advanced_data_uploading)
  79. - [Other use cases](@ref usage_patterns_other_use_cases)
  80. - \subpage configuration
  81. - [Pointers to Vulkan functions](@ref config_Vulkan_functions)
  82. - [Custom host memory allocator](@ref custom_memory_allocator)
  83. - [Device memory allocation callbacks](@ref allocation_callbacks)
  84. - [Device heap memory limit](@ref heap_memory_limit)
  85. - <b>Extension support</b>
  86. - \subpage vk_khr_dedicated_allocation
  87. - \subpage enabling_buffer_device_address
  88. - \subpage vk_ext_memory_priority
  89. - \subpage vk_amd_device_coherent_memory
  90. - \subpage general_considerations
  91. - [Thread safety](@ref general_considerations_thread_safety)
  92. - [Versioning and compatibility](@ref general_considerations_versioning_and_compatibility)
  93. - [Validation layer warnings](@ref general_considerations_validation_layer_warnings)
  94. - [Allocation algorithm](@ref general_considerations_allocation_algorithm)
  95. - [Features not supported](@ref general_considerations_features_not_supported)
  96. \defgroup group_init Library initialization
  97. \brief API elements related to the initialization and management of the entire library, especially #VmaAllocator object.
  98. \defgroup group_alloc Memory allocation
  99. \brief API elements related to the allocation, deallocation, and management of Vulkan memory, buffers, images.
  100. Most basic ones being: vmaCreateBuffer(), vmaCreateImage().
  101. \defgroup group_virtual Virtual allocator
  102. \brief API elements related to the mechanism of \ref virtual_allocator - using the core allocation algorithm
  103. for user-defined purpose without allocating any real GPU memory.
  104. \defgroup group_stats Statistics
  105. \brief API elements that query current status of the allocator, from memory usage, budget, to full dump of the internal state in JSON format.
  106. See documentation chapter: \ref statistics.
  107. */
  108. #include "drivers/vulkan/godot_vulkan.h"
  109. #ifdef __cplusplus
  110. extern "C" {
  111. #endif
  112. #if !defined(VMA_VULKAN_VERSION)
  113. #if defined(VK_VERSION_1_3)
  114. #define VMA_VULKAN_VERSION 1003000
  115. #elif defined(VK_VERSION_1_2)
  116. #define VMA_VULKAN_VERSION 1002000
  117. #elif defined(VK_VERSION_1_1)
  118. #define VMA_VULKAN_VERSION 1001000
  119. #else
  120. #define VMA_VULKAN_VERSION 1000000
  121. #endif
  122. #endif
  123. #if defined(__ANDROID__) && defined(VK_NO_PROTOTYPES) && VMA_STATIC_VULKAN_FUNCTIONS
  124. extern PFN_vkGetInstanceProcAddr vkGetInstanceProcAddr;
  125. extern PFN_vkGetDeviceProcAddr vkGetDeviceProcAddr;
  126. extern PFN_vkGetPhysicalDeviceProperties vkGetPhysicalDeviceProperties;
  127. extern PFN_vkGetPhysicalDeviceMemoryProperties vkGetPhysicalDeviceMemoryProperties;
  128. extern PFN_vkAllocateMemory vkAllocateMemory;
  129. extern PFN_vkFreeMemory vkFreeMemory;
  130. extern PFN_vkMapMemory vkMapMemory;
  131. extern PFN_vkUnmapMemory vkUnmapMemory;
  132. extern PFN_vkFlushMappedMemoryRanges vkFlushMappedMemoryRanges;
  133. extern PFN_vkInvalidateMappedMemoryRanges vkInvalidateMappedMemoryRanges;
  134. extern PFN_vkBindBufferMemory vkBindBufferMemory;
  135. extern PFN_vkBindImageMemory vkBindImageMemory;
  136. extern PFN_vkGetBufferMemoryRequirements vkGetBufferMemoryRequirements;
  137. extern PFN_vkGetImageMemoryRequirements vkGetImageMemoryRequirements;
  138. extern PFN_vkCreateBuffer vkCreateBuffer;
  139. extern PFN_vkDestroyBuffer vkDestroyBuffer;
  140. extern PFN_vkCreateImage vkCreateImage;
  141. extern PFN_vkDestroyImage vkDestroyImage;
  142. extern PFN_vkCmdCopyBuffer vkCmdCopyBuffer;
  143. #if VMA_VULKAN_VERSION >= 1001000
  144. extern PFN_vkGetBufferMemoryRequirements2 vkGetBufferMemoryRequirements2;
  145. extern PFN_vkGetImageMemoryRequirements2 vkGetImageMemoryRequirements2;
  146. extern PFN_vkBindBufferMemory2 vkBindBufferMemory2;
  147. extern PFN_vkBindImageMemory2 vkBindImageMemory2;
  148. extern PFN_vkGetPhysicalDeviceMemoryProperties2 vkGetPhysicalDeviceMemoryProperties2;
  149. #endif // #if VMA_VULKAN_VERSION >= 1001000
  150. #endif // #if defined(__ANDROID__) && VMA_STATIC_VULKAN_FUNCTIONS && VK_NO_PROTOTYPES
  151. #if !defined(VMA_DEDICATED_ALLOCATION)
  152. #if VK_KHR_get_memory_requirements2 && VK_KHR_dedicated_allocation
  153. #define VMA_DEDICATED_ALLOCATION 1
  154. #else
  155. #define VMA_DEDICATED_ALLOCATION 0
  156. #endif
  157. #endif
  158. #if !defined(VMA_BIND_MEMORY2)
  159. #if VK_KHR_bind_memory2
  160. #define VMA_BIND_MEMORY2 1
  161. #else
  162. #define VMA_BIND_MEMORY2 0
  163. #endif
  164. #endif
  165. #if !defined(VMA_MEMORY_BUDGET)
  166. #if VK_EXT_memory_budget && (VK_KHR_get_physical_device_properties2 || VMA_VULKAN_VERSION >= 1001000)
  167. #define VMA_MEMORY_BUDGET 1
  168. #else
  169. #define VMA_MEMORY_BUDGET 0
  170. #endif
  171. #endif
  172. // Defined to 1 when VK_KHR_buffer_device_address device extension or equivalent core Vulkan 1.2 feature is defined in its headers.
  173. #if !defined(VMA_BUFFER_DEVICE_ADDRESS)
  174. #if VK_KHR_buffer_device_address || VMA_VULKAN_VERSION >= 1002000
  175. #define VMA_BUFFER_DEVICE_ADDRESS 1
  176. #else
  177. #define VMA_BUFFER_DEVICE_ADDRESS 0
  178. #endif
  179. #endif
  180. // Defined to 1 when VK_EXT_memory_priority device extension is defined in Vulkan headers.
  181. #if !defined(VMA_MEMORY_PRIORITY)
  182. #if VK_EXT_memory_priority
  183. #define VMA_MEMORY_PRIORITY 1
  184. #else
  185. #define VMA_MEMORY_PRIORITY 0
  186. #endif
  187. #endif
  188. // Defined to 1 when VK_KHR_maintenance4 device extension is defined in Vulkan headers.
  189. #if !defined(VMA_KHR_MAINTENANCE4)
  190. #if VK_KHR_maintenance4
  191. #define VMA_KHR_MAINTENANCE4 1
  192. #else
  193. #define VMA_KHR_MAINTENANCE4 0
  194. #endif
  195. #endif
  196. // Defined to 1 when VK_KHR_maintenance5 device extension is defined in Vulkan headers.
  197. #if !defined(VMA_KHR_MAINTENANCE5)
  198. #if VK_KHR_maintenance5
  199. #define VMA_KHR_MAINTENANCE5 1
  200. #else
  201. #define VMA_KHR_MAINTENANCE5 0
  202. #endif
  203. #endif
  204. // Defined to 1 when VK_KHR_external_memory device extension is defined in Vulkan headers.
  205. #if !defined(VMA_EXTERNAL_MEMORY)
  206. #if VK_KHR_external_memory
  207. #define VMA_EXTERNAL_MEMORY 1
  208. #else
  209. #define VMA_EXTERNAL_MEMORY 0
  210. #endif
  211. #endif
  212. // Define these macros to decorate all public functions with additional code,
  213. // before and after returned type, appropriately. This may be useful for
  214. // exporting the functions when compiling VMA as a separate library. Example:
  215. // #define VMA_CALL_PRE __declspec(dllexport)
  216. // #define VMA_CALL_POST __cdecl
  217. #ifndef VMA_CALL_PRE
  218. #define VMA_CALL_PRE
  219. #endif
  220. #ifndef VMA_CALL_POST
  221. #define VMA_CALL_POST
  222. #endif
  223. // Define this macro to decorate pNext pointers with an attribute specifying the Vulkan
  224. // structure that will be extended via the pNext chain.
  225. #ifndef VMA_EXTENDS_VK_STRUCT
  226. #define VMA_EXTENDS_VK_STRUCT(vkStruct)
  227. #endif
  228. // Define this macro to decorate pointers with an attribute specifying the
  229. // length of the array they point to if they are not null.
  230. //
  231. // The length may be one of
  232. // - The name of another parameter in the argument list where the pointer is declared
  233. // - The name of another member in the struct where the pointer is declared
  234. // - The name of a member of a struct type, meaning the value of that member in
  235. // the context of the call. For example
  236. // VMA_LEN_IF_NOT_NULL("VkPhysicalDeviceMemoryProperties::memoryHeapCount"),
  237. // this means the number of memory heaps available in the device associated
  238. // with the VmaAllocator being dealt with.
  239. #ifndef VMA_LEN_IF_NOT_NULL
  240. #define VMA_LEN_IF_NOT_NULL(len)
  241. #endif
  242. // The VMA_NULLABLE macro is defined to be _Nullable when compiling with Clang.
  243. // see: https://clang.llvm.org/docs/AttributeReference.html#nullable
  244. #ifndef VMA_NULLABLE
  245. #ifdef __clang__
  246. #define VMA_NULLABLE _Nullable
  247. #else
  248. #define VMA_NULLABLE
  249. #endif
  250. #endif
  251. // The VMA_NOT_NULL macro is defined to be _Nonnull when compiling with Clang.
  252. // see: https://clang.llvm.org/docs/AttributeReference.html#nonnull
  253. #ifndef VMA_NOT_NULL
  254. #ifdef __clang__
  255. #define VMA_NOT_NULL _Nonnull
  256. #else
  257. #define VMA_NOT_NULL
  258. #endif
  259. #endif
  260. // If non-dispatchable handles are represented as pointers then we can give
  261. // then nullability annotations
  262. #ifndef VMA_NOT_NULL_NON_DISPATCHABLE
  263. #if defined(__LP64__) || defined(_WIN64) || (defined(__x86_64__) && !defined(__ILP32__) ) || defined(_M_X64) || defined(__ia64) || defined (_M_IA64) || defined(__aarch64__) || defined(__powerpc64__)
  264. #define VMA_NOT_NULL_NON_DISPATCHABLE VMA_NOT_NULL
  265. #else
  266. #define VMA_NOT_NULL_NON_DISPATCHABLE
  267. #endif
  268. #endif
  269. #ifndef VMA_NULLABLE_NON_DISPATCHABLE
  270. #if defined(__LP64__) || defined(_WIN64) || (defined(__x86_64__) && !defined(__ILP32__) ) || defined(_M_X64) || defined(__ia64) || defined (_M_IA64) || defined(__aarch64__) || defined(__powerpc64__)
  271. #define VMA_NULLABLE_NON_DISPATCHABLE VMA_NULLABLE
  272. #else
  273. #define VMA_NULLABLE_NON_DISPATCHABLE
  274. #endif
  275. #endif
  276. #ifndef VMA_STATS_STRING_ENABLED
  277. #define VMA_STATS_STRING_ENABLED 1
  278. #endif
  279. ////////////////////////////////////////////////////////////////////////////////
  280. ////////////////////////////////////////////////////////////////////////////////
  281. //
  282. // INTERFACE
  283. //
  284. ////////////////////////////////////////////////////////////////////////////////
  285. ////////////////////////////////////////////////////////////////////////////////
  286. // Sections for managing code placement in file, only for development purposes e.g. for convenient folding inside an IDE.
  287. #ifndef _VMA_ENUM_DECLARATIONS
  288. /**
  289. \addtogroup group_init
  290. @{
  291. */
  292. /// Flags for created #VmaAllocator.
  293. typedef enum VmaAllocatorCreateFlagBits
  294. {
  295. /** \brief Allocator and all objects created from it will not be synchronized internally, so you must guarantee they are used from only one thread at a time or synchronized externally by you.
  296. Using this flag may increase performance because internal mutexes are not used.
  297. */
  298. VMA_ALLOCATOR_CREATE_EXTERNALLY_SYNCHRONIZED_BIT = 0x00000001,
  299. /** \brief Enables usage of VK_KHR_dedicated_allocation extension.
  300. The flag works only if VmaAllocatorCreateInfo::vulkanApiVersion `== VK_API_VERSION_1_0`.
  301. When it is `VK_API_VERSION_1_1`, the flag is ignored because the extension has been promoted to Vulkan 1.1.
  302. Using this extension will automatically allocate dedicated blocks of memory for
  303. some buffers and images instead of suballocating place for them out of bigger
  304. memory blocks (as if you explicitly used #VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT
  305. flag) when it is recommended by the driver. It may improve performance on some
  306. GPUs.
  307. You may set this flag only if you found out that following device extensions are
  308. supported, you enabled them while creating Vulkan device passed as
  309. VmaAllocatorCreateInfo::device, and you want them to be used internally by this
  310. library:
  311. - VK_KHR_get_memory_requirements2 (device extension)
  312. - VK_KHR_dedicated_allocation (device extension)
  313. When this flag is set, you can experience following warnings reported by Vulkan
  314. validation layer. You can ignore them.
  315. > vkBindBufferMemory(): Binding memory to buffer 0x2d but vkGetBufferMemoryRequirements() has not been called on that buffer.
  316. */
  317. VMA_ALLOCATOR_CREATE_KHR_DEDICATED_ALLOCATION_BIT = 0x00000002,
  318. /**
  319. Enables usage of VK_KHR_bind_memory2 extension.
  320. The flag works only if VmaAllocatorCreateInfo::vulkanApiVersion `== VK_API_VERSION_1_0`.
  321. When it is `VK_API_VERSION_1_1`, the flag is ignored because the extension has been promoted to Vulkan 1.1.
  322. You may set this flag only if you found out that this device extension is supported,
  323. you enabled it while creating Vulkan device passed as VmaAllocatorCreateInfo::device,
  324. and you want it to be used internally by this library.
  325. The extension provides functions `vkBindBufferMemory2KHR` and `vkBindImageMemory2KHR`,
  326. which allow to pass a chain of `pNext` structures while binding.
  327. This flag is required if you use `pNext` parameter in vmaBindBufferMemory2() or vmaBindImageMemory2().
  328. */
  329. VMA_ALLOCATOR_CREATE_KHR_BIND_MEMORY2_BIT = 0x00000004,
  330. /**
  331. Enables usage of VK_EXT_memory_budget extension.
  332. You may set this flag only if you found out that this device extension is supported,
  333. you enabled it while creating Vulkan device passed as VmaAllocatorCreateInfo::device,
  334. and you want it to be used internally by this library, along with another instance extension
  335. VK_KHR_get_physical_device_properties2, which is required by it (or Vulkan 1.1, where this extension is promoted).
  336. The extension provides query for current memory usage and budget, which will probably
  337. be more accurate than an estimation used by the library otherwise.
  338. */
  339. VMA_ALLOCATOR_CREATE_EXT_MEMORY_BUDGET_BIT = 0x00000008,
  340. /**
  341. Enables usage of VK_AMD_device_coherent_memory extension.
  342. You may set this flag only if you:
  343. - found out that this device extension is supported and enabled it while creating Vulkan device passed as VmaAllocatorCreateInfo::device,
  344. - checked that `VkPhysicalDeviceCoherentMemoryFeaturesAMD::deviceCoherentMemory` is true and set it while creating the Vulkan device,
  345. - want it to be used internally by this library.
  346. The extension and accompanying device feature provide access to memory types with
  347. `VK_MEMORY_PROPERTY_DEVICE_COHERENT_BIT_AMD` and `VK_MEMORY_PROPERTY_DEVICE_UNCACHED_BIT_AMD` flags.
  348. They are useful mostly for writing breadcrumb markers - a common method for debugging GPU crash/hang/TDR.
  349. When the extension is not enabled, such memory types are still enumerated, but their usage is illegal.
  350. To protect from this error, if you don't create the allocator with this flag, it will refuse to allocate any memory or create a custom pool in such memory type,
  351. returning `VK_ERROR_FEATURE_NOT_PRESENT`.
  352. */
  353. VMA_ALLOCATOR_CREATE_AMD_DEVICE_COHERENT_MEMORY_BIT = 0x00000010,
  354. /**
  355. Enables usage of "buffer device address" feature, which allows you to use function
  356. `vkGetBufferDeviceAddress*` to get raw GPU pointer to a buffer and pass it for usage inside a shader.
  357. You may set this flag only if you:
  358. 1. (For Vulkan version < 1.2) Found as available and enabled device extension
  359. VK_KHR_buffer_device_address.
  360. This extension is promoted to core Vulkan 1.2.
  361. 2. Found as available and enabled device feature `VkPhysicalDeviceBufferDeviceAddressFeatures::bufferDeviceAddress`.
  362. When this flag is set, you can create buffers with `VK_BUFFER_USAGE_SHADER_DEVICE_ADDRESS_BIT` using VMA.
  363. The library automatically adds `VK_MEMORY_ALLOCATE_DEVICE_ADDRESS_BIT` to
  364. allocated memory blocks wherever it might be needed.
  365. For more information, see documentation chapter \ref enabling_buffer_device_address.
  366. */
  367. VMA_ALLOCATOR_CREATE_BUFFER_DEVICE_ADDRESS_BIT = 0x00000020,
  368. /**
  369. Enables usage of VK_EXT_memory_priority extension in the library.
  370. You may set this flag only if you found available and enabled this device extension,
  371. along with `VkPhysicalDeviceMemoryPriorityFeaturesEXT::memoryPriority == VK_TRUE`,
  372. while creating Vulkan device passed as VmaAllocatorCreateInfo::device.
  373. When this flag is used, VmaAllocationCreateInfo::priority and VmaPoolCreateInfo::priority
  374. are used to set priorities of allocated Vulkan memory. Without it, these variables are ignored.
  375. A priority must be a floating-point value between 0 and 1, indicating the priority of the allocation relative to other memory allocations.
  376. Larger values are higher priority. The granularity of the priorities is implementation-dependent.
  377. It is automatically passed to every call to `vkAllocateMemory` done by the library using structure `VkMemoryPriorityAllocateInfoEXT`.
  378. The value to be used for default priority is 0.5.
  379. For more details, see the documentation of the VK_EXT_memory_priority extension.
  380. */
  381. VMA_ALLOCATOR_CREATE_EXT_MEMORY_PRIORITY_BIT = 0x00000040,
  382. /**
  383. Enables usage of VK_KHR_maintenance4 extension in the library.
  384. You may set this flag only if you found available and enabled this device extension,
  385. while creating Vulkan device passed as VmaAllocatorCreateInfo::device.
  386. */
  387. VMA_ALLOCATOR_CREATE_KHR_MAINTENANCE4_BIT = 0x00000080,
  388. /**
  389. Enables usage of VK_KHR_maintenance5 extension in the library.
  390. You should set this flag if you found available and enabled this device extension,
  391. while creating Vulkan device passed as VmaAllocatorCreateInfo::device.
  392. */
  393. VMA_ALLOCATOR_CREATE_KHR_MAINTENANCE5_BIT = 0x00000100,
  394. VMA_ALLOCATOR_CREATE_FLAG_BITS_MAX_ENUM = 0x7FFFFFFF
  395. } VmaAllocatorCreateFlagBits;
  396. /// See #VmaAllocatorCreateFlagBits.
  397. typedef VkFlags VmaAllocatorCreateFlags;
  398. /** @} */
  399. /**
  400. \addtogroup group_alloc
  401. @{
  402. */
  403. /// \brief Intended usage of the allocated memory.
  404. typedef enum VmaMemoryUsage
  405. {
  406. /** No intended memory usage specified.
  407. Use other members of VmaAllocationCreateInfo to specify your requirements.
  408. */
  409. VMA_MEMORY_USAGE_UNKNOWN = 0,
  410. /**
  411. \deprecated Obsolete, preserved for backward compatibility.
  412. Prefers `VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT`.
  413. */
  414. VMA_MEMORY_USAGE_GPU_ONLY = 1,
  415. /**
  416. \deprecated Obsolete, preserved for backward compatibility.
  417. Guarantees `VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT` and `VK_MEMORY_PROPERTY_HOST_COHERENT_BIT`.
  418. */
  419. VMA_MEMORY_USAGE_CPU_ONLY = 2,
  420. /**
  421. \deprecated Obsolete, preserved for backward compatibility.
  422. Guarantees `VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT`, prefers `VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT`.
  423. */
  424. VMA_MEMORY_USAGE_CPU_TO_GPU = 3,
  425. /**
  426. \deprecated Obsolete, preserved for backward compatibility.
  427. Guarantees `VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT`, prefers `VK_MEMORY_PROPERTY_HOST_CACHED_BIT`.
  428. */
  429. VMA_MEMORY_USAGE_GPU_TO_CPU = 4,
  430. /**
  431. \deprecated Obsolete, preserved for backward compatibility.
  432. Prefers not `VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT`.
  433. */
  434. VMA_MEMORY_USAGE_CPU_COPY = 5,
  435. /**
  436. Lazily allocated GPU memory having `VK_MEMORY_PROPERTY_LAZILY_ALLOCATED_BIT`.
  437. Exists mostly on mobile platforms. Using it on desktop PC or other GPUs with no such memory type present will fail the allocation.
  438. Usage: Memory for transient attachment images (color attachments, depth attachments etc.), created with `VK_IMAGE_USAGE_TRANSIENT_ATTACHMENT_BIT`.
  439. Allocations with this usage are always created as dedicated - it implies #VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT.
  440. */
  441. VMA_MEMORY_USAGE_GPU_LAZILY_ALLOCATED = 6,
  442. /**
  443. Selects best memory type automatically.
  444. This flag is recommended for most common use cases.
  445. When using this flag, if you want to map the allocation (using vmaMapMemory() or #VMA_ALLOCATION_CREATE_MAPPED_BIT),
  446. you must pass one of the flags: #VMA_ALLOCATION_CREATE_HOST_ACCESS_SEQUENTIAL_WRITE_BIT or #VMA_ALLOCATION_CREATE_HOST_ACCESS_RANDOM_BIT
  447. in VmaAllocationCreateInfo::flags.
  448. It can be used only with functions that let the library know `VkBufferCreateInfo` or `VkImageCreateInfo`, e.g.
  449. vmaCreateBuffer(), vmaCreateImage(), vmaFindMemoryTypeIndexForBufferInfo(), vmaFindMemoryTypeIndexForImageInfo()
  450. and not with generic memory allocation functions.
  451. */
  452. VMA_MEMORY_USAGE_AUTO = 7,
  453. /**
  454. Selects best memory type automatically with preference for GPU (device) memory.
  455. When using this flag, if you want to map the allocation (using vmaMapMemory() or #VMA_ALLOCATION_CREATE_MAPPED_BIT),
  456. you must pass one of the flags: #VMA_ALLOCATION_CREATE_HOST_ACCESS_SEQUENTIAL_WRITE_BIT or #VMA_ALLOCATION_CREATE_HOST_ACCESS_RANDOM_BIT
  457. in VmaAllocationCreateInfo::flags.
  458. It can be used only with functions that let the library know `VkBufferCreateInfo` or `VkImageCreateInfo`, e.g.
  459. vmaCreateBuffer(), vmaCreateImage(), vmaFindMemoryTypeIndexForBufferInfo(), vmaFindMemoryTypeIndexForImageInfo()
  460. and not with generic memory allocation functions.
  461. */
  462. VMA_MEMORY_USAGE_AUTO_PREFER_DEVICE = 8,
  463. /**
  464. Selects best memory type automatically with preference for CPU (host) memory.
  465. When using this flag, if you want to map the allocation (using vmaMapMemory() or #VMA_ALLOCATION_CREATE_MAPPED_BIT),
  466. you must pass one of the flags: #VMA_ALLOCATION_CREATE_HOST_ACCESS_SEQUENTIAL_WRITE_BIT or #VMA_ALLOCATION_CREATE_HOST_ACCESS_RANDOM_BIT
  467. in VmaAllocationCreateInfo::flags.
  468. It can be used only with functions that let the library know `VkBufferCreateInfo` or `VkImageCreateInfo`, e.g.
  469. vmaCreateBuffer(), vmaCreateImage(), vmaFindMemoryTypeIndexForBufferInfo(), vmaFindMemoryTypeIndexForImageInfo()
  470. and not with generic memory allocation functions.
  471. */
  472. VMA_MEMORY_USAGE_AUTO_PREFER_HOST = 9,
  473. VMA_MEMORY_USAGE_MAX_ENUM = 0x7FFFFFFF
  474. } VmaMemoryUsage;
  475. /// Flags to be passed as VmaAllocationCreateInfo::flags.
  476. typedef enum VmaAllocationCreateFlagBits
  477. {
  478. /** \brief Set this flag if the allocation should have its own memory block.
  479. Use it for special, big resources, like fullscreen images used as attachments.
  480. If you use this flag while creating a buffer or an image, `VkMemoryDedicatedAllocateInfo`
  481. structure is applied if possible.
  482. */
  483. VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT = 0x00000001,
  484. /** \brief Set this flag to only try to allocate from existing `VkDeviceMemory` blocks and never create new such block.
  485. If new allocation cannot be placed in any of the existing blocks, allocation
  486. fails with `VK_ERROR_OUT_OF_DEVICE_MEMORY` error.
  487. You should not use #VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT and
  488. #VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT at the same time. It makes no sense.
  489. */
  490. VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT = 0x00000002,
  491. /** \brief Set this flag to use a memory that will be persistently mapped and retrieve pointer to it.
  492. Pointer to mapped memory will be returned through VmaAllocationInfo::pMappedData.
  493. It is valid to use this flag for allocation made from memory type that is not
  494. `HOST_VISIBLE`. This flag is then ignored and memory is not mapped. This is
  495. useful if you need an allocation that is efficient to use on GPU
  496. (`DEVICE_LOCAL`) and still want to map it directly if possible on platforms that
  497. support it (e.g. Intel GPU).
  498. */
  499. VMA_ALLOCATION_CREATE_MAPPED_BIT = 0x00000004,
  500. /** \deprecated Preserved for backward compatibility. Consider using vmaSetAllocationName() instead.
  501. Set this flag to treat VmaAllocationCreateInfo::pUserData as pointer to a
  502. null-terminated string. Instead of copying pointer value, a local copy of the
  503. string is made and stored in allocation's `pName`. The string is automatically
  504. freed together with the allocation. It is also used in vmaBuildStatsString().
  505. */
  506. VMA_ALLOCATION_CREATE_USER_DATA_COPY_STRING_BIT = 0x00000020,
  507. /** Allocation will be created from upper stack in a double stack pool.
  508. This flag is only allowed for custom pools created with #VMA_POOL_CREATE_LINEAR_ALGORITHM_BIT flag.
  509. */
  510. VMA_ALLOCATION_CREATE_UPPER_ADDRESS_BIT = 0x00000040,
  511. /** Create both buffer/image and allocation, but don't bind them together.
  512. It is useful when you want to bind yourself to do some more advanced binding, e.g. using some extensions.
  513. The flag is meaningful only with functions that bind by default: vmaCreateBuffer(), vmaCreateImage().
  514. Otherwise it is ignored.
  515. If you want to make sure the new buffer/image is not tied to the new memory allocation
  516. through `VkMemoryDedicatedAllocateInfoKHR` structure in case the allocation ends up in its own memory block,
  517. use also flag #VMA_ALLOCATION_CREATE_CAN_ALIAS_BIT.
  518. */
  519. VMA_ALLOCATION_CREATE_DONT_BIND_BIT = 0x00000080,
  520. /** Create allocation only if additional device memory required for it, if any, won't exceed
  521. memory budget. Otherwise return `VK_ERROR_OUT_OF_DEVICE_MEMORY`.
  522. */
  523. VMA_ALLOCATION_CREATE_WITHIN_BUDGET_BIT = 0x00000100,
  524. /** \brief Set this flag if the allocated memory will have aliasing resources.
  525. Usage of this flag prevents supplying `VkMemoryDedicatedAllocateInfoKHR` when #VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT is specified.
  526. Otherwise created dedicated memory will not be suitable for aliasing resources, resulting in Vulkan Validation Layer errors.
  527. */
  528. VMA_ALLOCATION_CREATE_CAN_ALIAS_BIT = 0x00000200,
  529. /**
  530. Requests possibility to map the allocation (using vmaMapMemory() or #VMA_ALLOCATION_CREATE_MAPPED_BIT).
  531. - If you use #VMA_MEMORY_USAGE_AUTO or other `VMA_MEMORY_USAGE_AUTO*` value,
  532. you must use this flag to be able to map the allocation. Otherwise, mapping is incorrect.
  533. - If you use other value of #VmaMemoryUsage, this flag is ignored and mapping is always possible in memory types that are `HOST_VISIBLE`.
  534. This includes allocations created in \ref custom_memory_pools.
  535. Declares that mapped memory will only be written sequentially, e.g. using `memcpy()` or a loop writing number-by-number,
  536. never read or accessed randomly, so a memory type can be selected that is uncached and write-combined.
  537. \warning Violating this declaration may work correctly, but will likely be very slow.
  538. Watch out for implicit reads introduced by doing e.g. `pMappedData[i] += x;`
  539. Better prepare your data in a local variable and `memcpy()` it to the mapped pointer all at once.
  540. */
  541. VMA_ALLOCATION_CREATE_HOST_ACCESS_SEQUENTIAL_WRITE_BIT = 0x00000400,
  542. /**
  543. Requests possibility to map the allocation (using vmaMapMemory() or #VMA_ALLOCATION_CREATE_MAPPED_BIT).
  544. - If you use #VMA_MEMORY_USAGE_AUTO or other `VMA_MEMORY_USAGE_AUTO*` value,
  545. you must use this flag to be able to map the allocation. Otherwise, mapping is incorrect.
  546. - If you use other value of #VmaMemoryUsage, this flag is ignored and mapping is always possible in memory types that are `HOST_VISIBLE`.
  547. This includes allocations created in \ref custom_memory_pools.
  548. Declares that mapped memory can be read, written, and accessed in random order,
  549. so a `HOST_CACHED` memory type is preferred.
  550. */
  551. VMA_ALLOCATION_CREATE_HOST_ACCESS_RANDOM_BIT = 0x00000800,
  552. /**
  553. Together with #VMA_ALLOCATION_CREATE_HOST_ACCESS_SEQUENTIAL_WRITE_BIT or #VMA_ALLOCATION_CREATE_HOST_ACCESS_RANDOM_BIT,
  554. it says that despite request for host access, a not-`HOST_VISIBLE` memory type can be selected
  555. if it may improve performance.
  556. By using this flag, you declare that you will check if the allocation ended up in a `HOST_VISIBLE` memory type
  557. (e.g. using vmaGetAllocationMemoryProperties()) and if not, you will create some "staging" buffer and
  558. issue an explicit transfer to write/read your data.
  559. To prepare for this possibility, don't forget to add appropriate flags like
  560. `VK_BUFFER_USAGE_TRANSFER_DST_BIT`, `VK_BUFFER_USAGE_TRANSFER_SRC_BIT` to the parameters of created buffer or image.
  561. */
  562. VMA_ALLOCATION_CREATE_HOST_ACCESS_ALLOW_TRANSFER_INSTEAD_BIT = 0x00001000,
  563. /** Allocation strategy that chooses smallest possible free range for the allocation
  564. to minimize memory usage and fragmentation, possibly at the expense of allocation time.
  565. */
  566. VMA_ALLOCATION_CREATE_STRATEGY_MIN_MEMORY_BIT = 0x00010000,
  567. /** Allocation strategy that chooses first suitable free range for the allocation -
  568. not necessarily in terms of the smallest offset but the one that is easiest and fastest to find
  569. to minimize allocation time, possibly at the expense of allocation quality.
  570. */
  571. VMA_ALLOCATION_CREATE_STRATEGY_MIN_TIME_BIT = 0x00020000,
  572. /** Allocation strategy that chooses always the lowest offset in available space.
  573. This is not the most efficient strategy but achieves highly packed data.
  574. Used internally by defragmentation, not recommended in typical usage.
  575. */
  576. VMA_ALLOCATION_CREATE_STRATEGY_MIN_OFFSET_BIT = 0x00040000,
  577. /** Alias to #VMA_ALLOCATION_CREATE_STRATEGY_MIN_MEMORY_BIT.
  578. */
  579. VMA_ALLOCATION_CREATE_STRATEGY_BEST_FIT_BIT = VMA_ALLOCATION_CREATE_STRATEGY_MIN_MEMORY_BIT,
  580. /** Alias to #VMA_ALLOCATION_CREATE_STRATEGY_MIN_TIME_BIT.
  581. */
  582. VMA_ALLOCATION_CREATE_STRATEGY_FIRST_FIT_BIT = VMA_ALLOCATION_CREATE_STRATEGY_MIN_TIME_BIT,
  583. /** A bit mask to extract only `STRATEGY` bits from entire set of flags.
  584. */
  585. VMA_ALLOCATION_CREATE_STRATEGY_MASK =
  586. VMA_ALLOCATION_CREATE_STRATEGY_MIN_MEMORY_BIT |
  587. VMA_ALLOCATION_CREATE_STRATEGY_MIN_TIME_BIT |
  588. VMA_ALLOCATION_CREATE_STRATEGY_MIN_OFFSET_BIT,
  589. VMA_ALLOCATION_CREATE_FLAG_BITS_MAX_ENUM = 0x7FFFFFFF
  590. } VmaAllocationCreateFlagBits;
  591. /// See #VmaAllocationCreateFlagBits.
  592. typedef VkFlags VmaAllocationCreateFlags;
  593. /// Flags to be passed as VmaPoolCreateInfo::flags.
  594. typedef enum VmaPoolCreateFlagBits
  595. {
  596. /** \brief Use this flag if you always allocate only buffers and linear images or only optimal images out of this pool and so Buffer-Image Granularity can be ignored.
  597. This is an optional optimization flag.
  598. If you always allocate using vmaCreateBuffer(), vmaCreateImage(),
  599. vmaAllocateMemoryForBuffer(), then you don't need to use it because allocator
  600. knows exact type of your allocations so it can handle Buffer-Image Granularity
  601. in the optimal way.
  602. If you also allocate using vmaAllocateMemoryForImage() or vmaAllocateMemory(),
  603. exact type of such allocations is not known, so allocator must be conservative
  604. in handling Buffer-Image Granularity, which can lead to suboptimal allocation
  605. (wasted memory). In that case, if you can make sure you always allocate only
  606. buffers and linear images or only optimal images out of this pool, use this flag
  607. to make allocator disregard Buffer-Image Granularity and so make allocations
  608. faster and more optimal.
  609. */
  610. VMA_POOL_CREATE_IGNORE_BUFFER_IMAGE_GRANULARITY_BIT = 0x00000002,
  611. /** \brief Enables alternative, linear allocation algorithm in this pool.
  612. Specify this flag to enable linear allocation algorithm, which always creates
  613. new allocations after last one and doesn't reuse space from allocations freed in
  614. between. It trades memory consumption for simplified algorithm and data
  615. structure, which has better performance and uses less memory for metadata.
  616. By using this flag, you can achieve behavior of free-at-once, stack,
  617. ring buffer, and double stack.
  618. For details, see documentation chapter \ref linear_algorithm.
  619. */
  620. VMA_POOL_CREATE_LINEAR_ALGORITHM_BIT = 0x00000004,
  621. /** Bit mask to extract only `ALGORITHM` bits from entire set of flags.
  622. */
  623. VMA_POOL_CREATE_ALGORITHM_MASK =
  624. VMA_POOL_CREATE_LINEAR_ALGORITHM_BIT,
  625. VMA_POOL_CREATE_FLAG_BITS_MAX_ENUM = 0x7FFFFFFF
  626. } VmaPoolCreateFlagBits;
  627. /// Flags to be passed as VmaPoolCreateInfo::flags. See #VmaPoolCreateFlagBits.
  628. typedef VkFlags VmaPoolCreateFlags;
  629. /// Flags to be passed as VmaDefragmentationInfo::flags.
  630. typedef enum VmaDefragmentationFlagBits
  631. {
  632. /* \brief Use simple but fast algorithm for defragmentation.
  633. May not achieve best results but will require least time to compute and least allocations to copy.
  634. */
  635. VMA_DEFRAGMENTATION_FLAG_ALGORITHM_FAST_BIT = 0x1,
  636. /* \brief Default defragmentation algorithm, applied also when no `ALGORITHM` flag is specified.
  637. Offers a balance between defragmentation quality and the amount of allocations and bytes that need to be moved.
  638. */
  639. VMA_DEFRAGMENTATION_FLAG_ALGORITHM_BALANCED_BIT = 0x2,
  640. /* \brief Perform full defragmentation of memory.
  641. Can result in notably more time to compute and allocations to copy, but will achieve best memory packing.
  642. */
  643. VMA_DEFRAGMENTATION_FLAG_ALGORITHM_FULL_BIT = 0x4,
  644. /** \brief Use the most roboust algorithm at the cost of time to compute and number of copies to make.
  645. Only available when bufferImageGranularity is greater than 1, since it aims to reduce
  646. alignment issues between different types of resources.
  647. Otherwise falls back to same behavior as #VMA_DEFRAGMENTATION_FLAG_ALGORITHM_FULL_BIT.
  648. */
  649. VMA_DEFRAGMENTATION_FLAG_ALGORITHM_EXTENSIVE_BIT = 0x8,
  650. /// A bit mask to extract only `ALGORITHM` bits from entire set of flags.
  651. VMA_DEFRAGMENTATION_FLAG_ALGORITHM_MASK =
  652. VMA_DEFRAGMENTATION_FLAG_ALGORITHM_FAST_BIT |
  653. VMA_DEFRAGMENTATION_FLAG_ALGORITHM_BALANCED_BIT |
  654. VMA_DEFRAGMENTATION_FLAG_ALGORITHM_FULL_BIT |
  655. VMA_DEFRAGMENTATION_FLAG_ALGORITHM_EXTENSIVE_BIT,
  656. VMA_DEFRAGMENTATION_FLAG_BITS_MAX_ENUM = 0x7FFFFFFF
  657. } VmaDefragmentationFlagBits;
  658. /// See #VmaDefragmentationFlagBits.
  659. typedef VkFlags VmaDefragmentationFlags;
  660. /// Operation performed on single defragmentation move. See structure #VmaDefragmentationMove.
  661. typedef enum VmaDefragmentationMoveOperation
  662. {
  663. /// Buffer/image has been recreated at `dstTmpAllocation`, data has been copied, old buffer/image has been destroyed. `srcAllocation` should be changed to point to the new place. This is the default value set by vmaBeginDefragmentationPass().
  664. VMA_DEFRAGMENTATION_MOVE_OPERATION_COPY = 0,
  665. /// Set this value if you cannot move the allocation. New place reserved at `dstTmpAllocation` will be freed. `srcAllocation` will remain unchanged.
  666. VMA_DEFRAGMENTATION_MOVE_OPERATION_IGNORE = 1,
  667. /// Set this value if you decide to abandon the allocation and you destroyed the buffer/image. New place reserved at `dstTmpAllocation` will be freed, along with `srcAllocation`, which will be destroyed.
  668. VMA_DEFRAGMENTATION_MOVE_OPERATION_DESTROY = 2,
  669. } VmaDefragmentationMoveOperation;
  670. /** @} */
  671. /**
  672. \addtogroup group_virtual
  673. @{
  674. */
  675. /// Flags to be passed as VmaVirtualBlockCreateInfo::flags.
  676. typedef enum VmaVirtualBlockCreateFlagBits
  677. {
  678. /** \brief Enables alternative, linear allocation algorithm in this virtual block.
  679. Specify this flag to enable linear allocation algorithm, which always creates
  680. new allocations after last one and doesn't reuse space from allocations freed in
  681. between. It trades memory consumption for simplified algorithm and data
  682. structure, which has better performance and uses less memory for metadata.
  683. By using this flag, you can achieve behavior of free-at-once, stack,
  684. ring buffer, and double stack.
  685. For details, see documentation chapter \ref linear_algorithm.
  686. */
  687. VMA_VIRTUAL_BLOCK_CREATE_LINEAR_ALGORITHM_BIT = 0x00000001,
  688. /** \brief Bit mask to extract only `ALGORITHM` bits from entire set of flags.
  689. */
  690. VMA_VIRTUAL_BLOCK_CREATE_ALGORITHM_MASK =
  691. VMA_VIRTUAL_BLOCK_CREATE_LINEAR_ALGORITHM_BIT,
  692. VMA_VIRTUAL_BLOCK_CREATE_FLAG_BITS_MAX_ENUM = 0x7FFFFFFF
  693. } VmaVirtualBlockCreateFlagBits;
  694. /// Flags to be passed as VmaVirtualBlockCreateInfo::flags. See #VmaVirtualBlockCreateFlagBits.
  695. typedef VkFlags VmaVirtualBlockCreateFlags;
  696. /// Flags to be passed as VmaVirtualAllocationCreateInfo::flags.
  697. typedef enum VmaVirtualAllocationCreateFlagBits
  698. {
  699. /** \brief Allocation will be created from upper stack in a double stack pool.
  700. This flag is only allowed for virtual blocks created with #VMA_VIRTUAL_BLOCK_CREATE_LINEAR_ALGORITHM_BIT flag.
  701. */
  702. VMA_VIRTUAL_ALLOCATION_CREATE_UPPER_ADDRESS_BIT = VMA_ALLOCATION_CREATE_UPPER_ADDRESS_BIT,
  703. /** \brief Allocation strategy that tries to minimize memory usage.
  704. */
  705. VMA_VIRTUAL_ALLOCATION_CREATE_STRATEGY_MIN_MEMORY_BIT = VMA_ALLOCATION_CREATE_STRATEGY_MIN_MEMORY_BIT,
  706. /** \brief Allocation strategy that tries to minimize allocation time.
  707. */
  708. VMA_VIRTUAL_ALLOCATION_CREATE_STRATEGY_MIN_TIME_BIT = VMA_ALLOCATION_CREATE_STRATEGY_MIN_TIME_BIT,
  709. /** Allocation strategy that chooses always the lowest offset in available space.
  710. This is not the most efficient strategy but achieves highly packed data.
  711. */
  712. VMA_VIRTUAL_ALLOCATION_CREATE_STRATEGY_MIN_OFFSET_BIT = VMA_ALLOCATION_CREATE_STRATEGY_MIN_OFFSET_BIT,
  713. /** \brief A bit mask to extract only `STRATEGY` bits from entire set of flags.
  714. These strategy flags are binary compatible with equivalent flags in #VmaAllocationCreateFlagBits.
  715. */
  716. VMA_VIRTUAL_ALLOCATION_CREATE_STRATEGY_MASK = VMA_ALLOCATION_CREATE_STRATEGY_MASK,
  717. VMA_VIRTUAL_ALLOCATION_CREATE_FLAG_BITS_MAX_ENUM = 0x7FFFFFFF
  718. } VmaVirtualAllocationCreateFlagBits;
  719. /// Flags to be passed as VmaVirtualAllocationCreateInfo::flags. See #VmaVirtualAllocationCreateFlagBits.
  720. typedef VkFlags VmaVirtualAllocationCreateFlags;
  721. /** @} */
  722. #endif // _VMA_ENUM_DECLARATIONS
  723. #ifndef _VMA_DATA_TYPES_DECLARATIONS
  724. /**
  725. \addtogroup group_init
  726. @{ */
  727. /** \struct VmaAllocator
  728. \brief Represents main object of this library initialized.
  729. Fill structure #VmaAllocatorCreateInfo and call function vmaCreateAllocator() to create it.
  730. Call function vmaDestroyAllocator() to destroy it.
  731. It is recommended to create just one object of this type per `VkDevice` object,
  732. right after Vulkan is initialized and keep it alive until before Vulkan device is destroyed.
  733. */
  734. VK_DEFINE_HANDLE(VmaAllocator)
  735. /** @} */
  736. /**
  737. \addtogroup group_alloc
  738. @{
  739. */
  740. /** \struct VmaPool
  741. \brief Represents custom memory pool
  742. Fill structure VmaPoolCreateInfo and call function vmaCreatePool() to create it.
  743. Call function vmaDestroyPool() to destroy it.
  744. For more information see [Custom memory pools](@ref choosing_memory_type_custom_memory_pools).
  745. */
  746. VK_DEFINE_HANDLE(VmaPool)
  747. /** \struct VmaAllocation
  748. \brief Represents single memory allocation.
  749. It may be either dedicated block of `VkDeviceMemory` or a specific region of a bigger block of this type
  750. plus unique offset.
  751. There are multiple ways to create such object.
  752. You need to fill structure VmaAllocationCreateInfo.
  753. For more information see [Choosing memory type](@ref choosing_memory_type).
  754. Although the library provides convenience functions that create Vulkan buffer or image,
  755. allocate memory for it and bind them together,
  756. binding of the allocation to a buffer or an image is out of scope of the allocation itself.
  757. Allocation object can exist without buffer/image bound,
  758. binding can be done manually by the user, and destruction of it can be done
  759. independently of destruction of the allocation.
  760. The object also remembers its size and some other information.
  761. To retrieve this information, use function vmaGetAllocationInfo() and inspect
  762. returned structure VmaAllocationInfo.
  763. */
  764. VK_DEFINE_HANDLE(VmaAllocation)
  765. /** \struct VmaDefragmentationContext
  766. \brief An opaque object that represents started defragmentation process.
  767. Fill structure #VmaDefragmentationInfo and call function vmaBeginDefragmentation() to create it.
  768. Call function vmaEndDefragmentation() to destroy it.
  769. */
  770. VK_DEFINE_HANDLE(VmaDefragmentationContext)
  771. /** @} */
  772. /**
  773. \addtogroup group_virtual
  774. @{
  775. */
  776. /** \struct VmaVirtualAllocation
  777. \brief Represents single memory allocation done inside VmaVirtualBlock.
  778. Use it as a unique identifier to virtual allocation within the single block.
  779. Use value `VK_NULL_HANDLE` to represent a null/invalid allocation.
  780. */
  781. VK_DEFINE_NON_DISPATCHABLE_HANDLE(VmaVirtualAllocation)
  782. /** @} */
  783. /**
  784. \addtogroup group_virtual
  785. @{
  786. */
  787. /** \struct VmaVirtualBlock
  788. \brief Handle to a virtual block object that allows to use core allocation algorithm without allocating any real GPU memory.
  789. Fill in #VmaVirtualBlockCreateInfo structure and use vmaCreateVirtualBlock() to create it. Use vmaDestroyVirtualBlock() to destroy it.
  790. For more information, see documentation chapter \ref virtual_allocator.
  791. This object is not thread-safe - should not be used from multiple threads simultaneously, must be synchronized externally.
  792. */
  793. VK_DEFINE_HANDLE(VmaVirtualBlock)
  794. /** @} */
  795. /**
  796. \addtogroup group_init
  797. @{
  798. */
  799. /// Callback function called after successful vkAllocateMemory.
  800. typedef void (VKAPI_PTR* PFN_vmaAllocateDeviceMemoryFunction)(
  801. VmaAllocator VMA_NOT_NULL allocator,
  802. uint32_t memoryType,
  803. VkDeviceMemory VMA_NOT_NULL_NON_DISPATCHABLE memory,
  804. VkDeviceSize size,
  805. void* VMA_NULLABLE pUserData);
  806. /// Callback function called before vkFreeMemory.
  807. typedef void (VKAPI_PTR* PFN_vmaFreeDeviceMemoryFunction)(
  808. VmaAllocator VMA_NOT_NULL allocator,
  809. uint32_t memoryType,
  810. VkDeviceMemory VMA_NOT_NULL_NON_DISPATCHABLE memory,
  811. VkDeviceSize size,
  812. void* VMA_NULLABLE pUserData);
  813. /** \brief Set of callbacks that the library will call for `vkAllocateMemory` and `vkFreeMemory`.
  814. Provided for informative purpose, e.g. to gather statistics about number of
  815. allocations or total amount of memory allocated in Vulkan.
  816. Used in VmaAllocatorCreateInfo::pDeviceMemoryCallbacks.
  817. */
  818. typedef struct VmaDeviceMemoryCallbacks
  819. {
  820. /// Optional, can be null.
  821. PFN_vmaAllocateDeviceMemoryFunction VMA_NULLABLE pfnAllocate;
  822. /// Optional, can be null.
  823. PFN_vmaFreeDeviceMemoryFunction VMA_NULLABLE pfnFree;
  824. /// Optional, can be null.
  825. void* VMA_NULLABLE pUserData;
  826. } VmaDeviceMemoryCallbacks;
  827. /** \brief Pointers to some Vulkan functions - a subset used by the library.
  828. Used in VmaAllocatorCreateInfo::pVulkanFunctions.
  829. */
  830. typedef struct VmaVulkanFunctions
  831. {
  832. /// Required when using VMA_DYNAMIC_VULKAN_FUNCTIONS.
  833. PFN_vkGetInstanceProcAddr VMA_NULLABLE vkGetInstanceProcAddr;
  834. /// Required when using VMA_DYNAMIC_VULKAN_FUNCTIONS.
  835. PFN_vkGetDeviceProcAddr VMA_NULLABLE vkGetDeviceProcAddr;
  836. PFN_vkGetPhysicalDeviceProperties VMA_NULLABLE vkGetPhysicalDeviceProperties;
  837. PFN_vkGetPhysicalDeviceMemoryProperties VMA_NULLABLE vkGetPhysicalDeviceMemoryProperties;
  838. PFN_vkAllocateMemory VMA_NULLABLE vkAllocateMemory;
  839. PFN_vkFreeMemory VMA_NULLABLE vkFreeMemory;
  840. PFN_vkMapMemory VMA_NULLABLE vkMapMemory;
  841. PFN_vkUnmapMemory VMA_NULLABLE vkUnmapMemory;
  842. PFN_vkFlushMappedMemoryRanges VMA_NULLABLE vkFlushMappedMemoryRanges;
  843. PFN_vkInvalidateMappedMemoryRanges VMA_NULLABLE vkInvalidateMappedMemoryRanges;
  844. PFN_vkBindBufferMemory VMA_NULLABLE vkBindBufferMemory;
  845. PFN_vkBindImageMemory VMA_NULLABLE vkBindImageMemory;
  846. PFN_vkGetBufferMemoryRequirements VMA_NULLABLE vkGetBufferMemoryRequirements;
  847. PFN_vkGetImageMemoryRequirements VMA_NULLABLE vkGetImageMemoryRequirements;
  848. PFN_vkCreateBuffer VMA_NULLABLE vkCreateBuffer;
  849. PFN_vkDestroyBuffer VMA_NULLABLE vkDestroyBuffer;
  850. PFN_vkCreateImage VMA_NULLABLE vkCreateImage;
  851. PFN_vkDestroyImage VMA_NULLABLE vkDestroyImage;
  852. PFN_vkCmdCopyBuffer VMA_NULLABLE vkCmdCopyBuffer;
  853. #if VMA_DEDICATED_ALLOCATION || VMA_VULKAN_VERSION >= 1001000
  854. /// Fetch "vkGetBufferMemoryRequirements2" on Vulkan >= 1.1, fetch "vkGetBufferMemoryRequirements2KHR" when using VK_KHR_dedicated_allocation extension.
  855. PFN_vkGetBufferMemoryRequirements2KHR VMA_NULLABLE vkGetBufferMemoryRequirements2KHR;
  856. /// Fetch "vkGetImageMemoryRequirements2" on Vulkan >= 1.1, fetch "vkGetImageMemoryRequirements2KHR" when using VK_KHR_dedicated_allocation extension.
  857. PFN_vkGetImageMemoryRequirements2KHR VMA_NULLABLE vkGetImageMemoryRequirements2KHR;
  858. #endif
  859. #if VMA_BIND_MEMORY2 || VMA_VULKAN_VERSION >= 1001000
  860. /// Fetch "vkBindBufferMemory2" on Vulkan >= 1.1, fetch "vkBindBufferMemory2KHR" when using VK_KHR_bind_memory2 extension.
  861. PFN_vkBindBufferMemory2KHR VMA_NULLABLE vkBindBufferMemory2KHR;
  862. /// Fetch "vkBindImageMemory2" on Vulkan >= 1.1, fetch "vkBindImageMemory2KHR" when using VK_KHR_bind_memory2 extension.
  863. PFN_vkBindImageMemory2KHR VMA_NULLABLE vkBindImageMemory2KHR;
  864. #endif
  865. #if VMA_MEMORY_BUDGET || VMA_VULKAN_VERSION >= 1001000
  866. /// Fetch from "vkGetPhysicalDeviceMemoryProperties2" on Vulkan >= 1.1, but you can also fetch it from "vkGetPhysicalDeviceMemoryProperties2KHR" if you enabled extension VK_KHR_get_physical_device_properties2.
  867. PFN_vkGetPhysicalDeviceMemoryProperties2KHR VMA_NULLABLE vkGetPhysicalDeviceMemoryProperties2KHR;
  868. #endif
  869. #if VMA_KHR_MAINTENANCE4 || VMA_VULKAN_VERSION >= 1003000
  870. /// Fetch from "vkGetDeviceBufferMemoryRequirements" on Vulkan >= 1.3, but you can also fetch it from "vkGetDeviceBufferMemoryRequirementsKHR" if you enabled extension VK_KHR_maintenance4.
  871. PFN_vkGetDeviceBufferMemoryRequirementsKHR VMA_NULLABLE vkGetDeviceBufferMemoryRequirements;
  872. /// Fetch from "vkGetDeviceImageMemoryRequirements" on Vulkan >= 1.3, but you can also fetch it from "vkGetDeviceImageMemoryRequirementsKHR" if you enabled extension VK_KHR_maintenance4.
  873. PFN_vkGetDeviceImageMemoryRequirementsKHR VMA_NULLABLE vkGetDeviceImageMemoryRequirements;
  874. #endif
  875. } VmaVulkanFunctions;
  876. /// Description of a Allocator to be created.
  877. typedef struct VmaAllocatorCreateInfo
  878. {
  879. /// Flags for created allocator. Use #VmaAllocatorCreateFlagBits enum.
  880. VmaAllocatorCreateFlags flags;
  881. /// Vulkan physical device.
  882. /** It must be valid throughout whole lifetime of created allocator. */
  883. VkPhysicalDevice VMA_NOT_NULL physicalDevice;
  884. /// Vulkan device.
  885. /** It must be valid throughout whole lifetime of created allocator. */
  886. VkDevice VMA_NOT_NULL device;
  887. /// Preferred size of a single `VkDeviceMemory` block to be allocated from large heaps > 1 GiB. Optional.
  888. /** Set to 0 to use default, which is currently 256 MiB. */
  889. VkDeviceSize preferredLargeHeapBlockSize;
  890. /// Custom CPU memory allocation callbacks. Optional.
  891. /** Optional, can be null. When specified, will also be used for all CPU-side memory allocations. */
  892. const VkAllocationCallbacks* VMA_NULLABLE pAllocationCallbacks;
  893. /// Informative callbacks for `vkAllocateMemory`, `vkFreeMemory`. Optional.
  894. /** Optional, can be null. */
  895. const VmaDeviceMemoryCallbacks* VMA_NULLABLE pDeviceMemoryCallbacks;
  896. /** \brief Either null or a pointer to an array of limits on maximum number of bytes that can be allocated out of particular Vulkan memory heap.
  897. If not NULL, it must be a pointer to an array of
  898. `VkPhysicalDeviceMemoryProperties::memoryHeapCount` elements, defining limit on
  899. maximum number of bytes that can be allocated out of particular Vulkan memory
  900. heap.
  901. Any of the elements may be equal to `VK_WHOLE_SIZE`, which means no limit on that
  902. heap. This is also the default in case of `pHeapSizeLimit` = NULL.
  903. If there is a limit defined for a heap:
  904. - If user tries to allocate more memory from that heap using this allocator,
  905. the allocation fails with `VK_ERROR_OUT_OF_DEVICE_MEMORY`.
  906. - If the limit is smaller than heap size reported in `VkMemoryHeap::size`, the
  907. value of this limit will be reported instead when using vmaGetMemoryProperties().
  908. Warning! Using this feature may not be equivalent to installing a GPU with
  909. smaller amount of memory, because graphics driver doesn't necessary fail new
  910. allocations with `VK_ERROR_OUT_OF_DEVICE_MEMORY` result when memory capacity is
  911. exceeded. It may return success and just silently migrate some device memory
  912. blocks to system RAM. This driver behavior can also be controlled using
  913. VK_AMD_memory_overallocation_behavior extension.
  914. */
  915. const VkDeviceSize* VMA_NULLABLE VMA_LEN_IF_NOT_NULL("VkPhysicalDeviceMemoryProperties::memoryHeapCount") pHeapSizeLimit;
  916. /** \brief Pointers to Vulkan functions. Can be null.
  917. For details see [Pointers to Vulkan functions](@ref config_Vulkan_functions).
  918. */
  919. const VmaVulkanFunctions* VMA_NULLABLE pVulkanFunctions;
  920. /** \brief Handle to Vulkan instance object.
  921. Starting from version 3.0.0 this member is no longer optional, it must be set!
  922. */
  923. VkInstance VMA_NOT_NULL instance;
  924. /** \brief Optional. Vulkan version that the application uses.
  925. It must be a value in the format as created by macro `VK_MAKE_VERSION` or a constant like: `VK_API_VERSION_1_1`, `VK_API_VERSION_1_0`.
  926. The patch version number specified is ignored. Only the major and minor versions are considered.
  927. Only versions 1.0, 1.1, 1.2, 1.3 are supported by the current implementation.
  928. Leaving it initialized to zero is equivalent to `VK_API_VERSION_1_0`.
  929. It must match the Vulkan version used by the application and supported on the selected physical device,
  930. so it must be no higher than `VkApplicationInfo::apiVersion` passed to `vkCreateInstance`
  931. and no higher than `VkPhysicalDeviceProperties::apiVersion` found on the physical device used.
  932. */
  933. uint32_t vulkanApiVersion;
  934. #if VMA_EXTERNAL_MEMORY
  935. /** \brief Either null or a pointer to an array of external memory handle types for each Vulkan memory type.
  936. If not NULL, it must be a pointer to an array of `VkPhysicalDeviceMemoryProperties::memoryTypeCount`
  937. elements, defining external memory handle types of particular Vulkan memory type,
  938. to be passed using `VkExportMemoryAllocateInfoKHR`.
  939. Any of the elements may be equal to 0, which means not to use `VkExportMemoryAllocateInfoKHR` on this memory type.
  940. This is also the default in case of `pTypeExternalMemoryHandleTypes` = NULL.
  941. */
  942. const VkExternalMemoryHandleTypeFlagsKHR* VMA_NULLABLE VMA_LEN_IF_NOT_NULL("VkPhysicalDeviceMemoryProperties::memoryTypeCount") pTypeExternalMemoryHandleTypes;
  943. #endif // #if VMA_EXTERNAL_MEMORY
  944. } VmaAllocatorCreateInfo;
  945. /// Information about existing #VmaAllocator object.
  946. typedef struct VmaAllocatorInfo
  947. {
  948. /** \brief Handle to Vulkan instance object.
  949. This is the same value as has been passed through VmaAllocatorCreateInfo::instance.
  950. */
  951. VkInstance VMA_NOT_NULL instance;
  952. /** \brief Handle to Vulkan physical device object.
  953. This is the same value as has been passed through VmaAllocatorCreateInfo::physicalDevice.
  954. */
  955. VkPhysicalDevice VMA_NOT_NULL physicalDevice;
  956. /** \brief Handle to Vulkan device object.
  957. This is the same value as has been passed through VmaAllocatorCreateInfo::device.
  958. */
  959. VkDevice VMA_NOT_NULL device;
  960. } VmaAllocatorInfo;
  961. /** @} */
  962. /**
  963. \addtogroup group_stats
  964. @{
  965. */
  966. /** \brief Calculated statistics of memory usage e.g. in a specific memory type, heap, custom pool, or total.
  967. These are fast to calculate.
  968. See functions: vmaGetHeapBudgets(), vmaGetPoolStatistics().
  969. */
  970. typedef struct VmaStatistics
  971. {
  972. /** \brief Number of `VkDeviceMemory` objects - Vulkan memory blocks allocated.
  973. */
  974. uint32_t blockCount;
  975. /** \brief Number of #VmaAllocation objects allocated.
  976. Dedicated allocations have their own blocks, so each one adds 1 to `allocationCount` as well as `blockCount`.
  977. */
  978. uint32_t allocationCount;
  979. /** \brief Number of bytes allocated in `VkDeviceMemory` blocks.
  980. \note To avoid confusion, please be aware that what Vulkan calls an "allocation" - a whole `VkDeviceMemory` object
  981. (e.g. as in `VkPhysicalDeviceLimits::maxMemoryAllocationCount`) is called a "block" in VMA, while VMA calls
  982. "allocation" a #VmaAllocation object that represents a memory region sub-allocated from such block, usually for a single buffer or image.
  983. */
  984. VkDeviceSize blockBytes;
  985. /** \brief Total number of bytes occupied by all #VmaAllocation objects.
  986. Always less or equal than `blockBytes`.
  987. Difference `(blockBytes - allocationBytes)` is the amount of memory allocated from Vulkan
  988. but unused by any #VmaAllocation.
  989. */
  990. VkDeviceSize allocationBytes;
  991. } VmaStatistics;
  992. /** \brief More detailed statistics than #VmaStatistics.
  993. These are slower to calculate. Use for debugging purposes.
  994. See functions: vmaCalculateStatistics(), vmaCalculatePoolStatistics().
  995. Previous version of the statistics API provided averages, but they have been removed
  996. because they can be easily calculated as:
  997. \code
  998. VkDeviceSize allocationSizeAvg = detailedStats.statistics.allocationBytes / detailedStats.statistics.allocationCount;
  999. VkDeviceSize unusedBytes = detailedStats.statistics.blockBytes - detailedStats.statistics.allocationBytes;
  1000. VkDeviceSize unusedRangeSizeAvg = unusedBytes / detailedStats.unusedRangeCount;
  1001. \endcode
  1002. */
  1003. typedef struct VmaDetailedStatistics
  1004. {
  1005. /// Basic statistics.
  1006. VmaStatistics statistics;
  1007. /// Number of free ranges of memory between allocations.
  1008. uint32_t unusedRangeCount;
  1009. /// Smallest allocation size. `VK_WHOLE_SIZE` if there are 0 allocations.
  1010. VkDeviceSize allocationSizeMin;
  1011. /// Largest allocation size. 0 if there are 0 allocations.
  1012. VkDeviceSize allocationSizeMax;
  1013. /// Smallest empty range size. `VK_WHOLE_SIZE` if there are 0 empty ranges.
  1014. VkDeviceSize unusedRangeSizeMin;
  1015. /// Largest empty range size. 0 if there are 0 empty ranges.
  1016. VkDeviceSize unusedRangeSizeMax;
  1017. } VmaDetailedStatistics;
  1018. /** \brief General statistics from current state of the Allocator -
  1019. total memory usage across all memory heaps and types.
  1020. These are slower to calculate. Use for debugging purposes.
  1021. See function vmaCalculateStatistics().
  1022. */
  1023. typedef struct VmaTotalStatistics
  1024. {
  1025. VmaDetailedStatistics memoryType[VK_MAX_MEMORY_TYPES];
  1026. VmaDetailedStatistics memoryHeap[VK_MAX_MEMORY_HEAPS];
  1027. VmaDetailedStatistics total;
  1028. } VmaTotalStatistics;
  1029. /** \brief Statistics of current memory usage and available budget for a specific memory heap.
  1030. These are fast to calculate.
  1031. See function vmaGetHeapBudgets().
  1032. */
  1033. typedef struct VmaBudget
  1034. {
  1035. /** \brief Statistics fetched from the library.
  1036. */
  1037. VmaStatistics statistics;
  1038. /** \brief Estimated current memory usage of the program, in bytes.
  1039. Fetched from system using VK_EXT_memory_budget extension if enabled.
  1040. It might be different than `statistics.blockBytes` (usually higher) due to additional implicit objects
  1041. also occupying the memory, like swapchain, pipelines, descriptor heaps, command buffers, or
  1042. `VkDeviceMemory` blocks allocated outside of this library, if any.
  1043. */
  1044. VkDeviceSize usage;
  1045. /** \brief Estimated amount of memory available to the program, in bytes.
  1046. Fetched from system using VK_EXT_memory_budget extension if enabled.
  1047. It might be different (most probably smaller) than `VkMemoryHeap::size[heapIndex]` due to factors
  1048. external to the program, decided by the operating system.
  1049. Difference `budget - usage` is the amount of additional memory that can probably
  1050. be allocated without problems. Exceeding the budget may result in various problems.
  1051. */
  1052. VkDeviceSize budget;
  1053. } VmaBudget;
  1054. /** @} */
  1055. /**
  1056. \addtogroup group_alloc
  1057. @{
  1058. */
  1059. /** \brief Parameters of new #VmaAllocation.
  1060. To be used with functions like vmaCreateBuffer(), vmaCreateImage(), and many others.
  1061. */
  1062. typedef struct VmaAllocationCreateInfo
  1063. {
  1064. /// Use #VmaAllocationCreateFlagBits enum.
  1065. VmaAllocationCreateFlags flags;
  1066. /** \brief Intended usage of memory.
  1067. You can leave #VMA_MEMORY_USAGE_UNKNOWN if you specify memory requirements in other way. \n
  1068. If `pool` is not null, this member is ignored.
  1069. */
  1070. VmaMemoryUsage usage;
  1071. /** \brief Flags that must be set in a Memory Type chosen for an allocation.
  1072. Leave 0 if you specify memory requirements in other way. \n
  1073. If `pool` is not null, this member is ignored.*/
  1074. VkMemoryPropertyFlags requiredFlags;
  1075. /** \brief Flags that preferably should be set in a memory type chosen for an allocation.
  1076. Set to 0 if no additional flags are preferred. \n
  1077. If `pool` is not null, this member is ignored. */
  1078. VkMemoryPropertyFlags preferredFlags;
  1079. /** \brief Bitmask containing one bit set for every memory type acceptable for this allocation.
  1080. Value 0 is equivalent to `UINT32_MAX` - it means any memory type is accepted if
  1081. it meets other requirements specified by this structure, with no further
  1082. restrictions on memory type index. \n
  1083. If `pool` is not null, this member is ignored.
  1084. */
  1085. uint32_t memoryTypeBits;
  1086. /** \brief Pool that this allocation should be created in.
  1087. Leave `VK_NULL_HANDLE` to allocate from default pool. If not null, members:
  1088. `usage`, `requiredFlags`, `preferredFlags`, `memoryTypeBits` are ignored.
  1089. */
  1090. VmaPool VMA_NULLABLE pool;
  1091. /** \brief Custom general-purpose pointer that will be stored in #VmaAllocation, can be read as VmaAllocationInfo::pUserData and changed using vmaSetAllocationUserData().
  1092. If #VMA_ALLOCATION_CREATE_USER_DATA_COPY_STRING_BIT is used, it must be either
  1093. null or pointer to a null-terminated string. The string will be then copied to
  1094. internal buffer, so it doesn't need to be valid after allocation call.
  1095. */
  1096. void* VMA_NULLABLE pUserData;
  1097. /** \brief A floating-point value between 0 and 1, indicating the priority of the allocation relative to other memory allocations.
  1098. It is used only when #VMA_ALLOCATOR_CREATE_EXT_MEMORY_PRIORITY_BIT flag was used during creation of the #VmaAllocator object
  1099. and this allocation ends up as dedicated or is explicitly forced as dedicated using #VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT.
  1100. Otherwise, it has the priority of a memory block where it is placed and this variable is ignored.
  1101. */
  1102. float priority;
  1103. } VmaAllocationCreateInfo;
  1104. /// Describes parameter of created #VmaPool.
  1105. typedef struct VmaPoolCreateInfo
  1106. {
  1107. /** \brief Vulkan memory type index to allocate this pool from.
  1108. */
  1109. uint32_t memoryTypeIndex;
  1110. /** \brief Use combination of #VmaPoolCreateFlagBits.
  1111. */
  1112. VmaPoolCreateFlags flags;
  1113. /** \brief Size of a single `VkDeviceMemory` block to be allocated as part of this pool, in bytes. Optional.
  1114. Specify nonzero to set explicit, constant size of memory blocks used by this
  1115. pool.
  1116. Leave 0 to use default and let the library manage block sizes automatically.
  1117. Sizes of particular blocks may vary.
  1118. In this case, the pool will also support dedicated allocations.
  1119. */
  1120. VkDeviceSize blockSize;
  1121. /** \brief Minimum number of blocks to be always allocated in this pool, even if they stay empty.
  1122. Set to 0 to have no preallocated blocks and allow the pool be completely empty.
  1123. */
  1124. size_t minBlockCount;
  1125. /** \brief Maximum number of blocks that can be allocated in this pool. Optional.
  1126. Set to 0 to use default, which is `SIZE_MAX`, which means no limit.
  1127. Set to same value as VmaPoolCreateInfo::minBlockCount to have fixed amount of memory allocated
  1128. throughout whole lifetime of this pool.
  1129. */
  1130. size_t maxBlockCount;
  1131. /** \brief A floating-point value between 0 and 1, indicating the priority of the allocations in this pool relative to other memory allocations.
  1132. It is used only when #VMA_ALLOCATOR_CREATE_EXT_MEMORY_PRIORITY_BIT flag was used during creation of the #VmaAllocator object.
  1133. Otherwise, this variable is ignored.
  1134. */
  1135. float priority;
  1136. /** \brief Additional minimum alignment to be used for all allocations created from this pool. Can be 0.
  1137. Leave 0 (default) not to impose any additional alignment. If not 0, it must be a power of two.
  1138. It can be useful in cases where alignment returned by Vulkan by functions like `vkGetBufferMemoryRequirements` is not enough,
  1139. e.g. when doing interop with OpenGL.
  1140. */
  1141. VkDeviceSize minAllocationAlignment;
  1142. /** \brief Additional `pNext` chain to be attached to `VkMemoryAllocateInfo` used for every allocation made by this pool. Optional.
  1143. Optional, can be null. If not null, it must point to a `pNext` chain of structures that can be attached to `VkMemoryAllocateInfo`.
  1144. It can be useful for special needs such as adding `VkExportMemoryAllocateInfoKHR`.
  1145. Structures pointed by this member must remain alive and unchanged for the whole lifetime of the custom pool.
  1146. Please note that some structures, e.g. `VkMemoryPriorityAllocateInfoEXT`, `VkMemoryDedicatedAllocateInfoKHR`,
  1147. can be attached automatically by this library when using other, more convenient of its features.
  1148. */
  1149. void* VMA_NULLABLE VMA_EXTENDS_VK_STRUCT(VkMemoryAllocateInfo) pMemoryAllocateNext;
  1150. } VmaPoolCreateInfo;
  1151. /** @} */
  1152. /**
  1153. \addtogroup group_alloc
  1154. @{
  1155. */
  1156. /**
  1157. Parameters of #VmaAllocation objects, that can be retrieved using function vmaGetAllocationInfo().
  1158. There is also an extended version of this structure that carries additional parameters: #VmaAllocationInfo2.
  1159. */
  1160. typedef struct VmaAllocationInfo
  1161. {
  1162. /** \brief Memory type index that this allocation was allocated from.
  1163. It never changes.
  1164. */
  1165. uint32_t memoryType;
  1166. /** \brief Handle to Vulkan memory object.
  1167. Same memory object can be shared by multiple allocations.
  1168. It can change after the allocation is moved during \ref defragmentation.
  1169. */
  1170. VkDeviceMemory VMA_NULLABLE_NON_DISPATCHABLE deviceMemory;
  1171. /** \brief Offset in `VkDeviceMemory` object to the beginning of this allocation, in bytes. `(deviceMemory, offset)` pair is unique to this allocation.
  1172. You usually don't need to use this offset. If you create a buffer or an image together with the allocation using e.g. function
  1173. vmaCreateBuffer(), vmaCreateImage(), functions that operate on these resources refer to the beginning of the buffer or image,
  1174. not entire device memory block. Functions like vmaMapMemory(), vmaBindBufferMemory() also refer to the beginning of the allocation
  1175. and apply this offset automatically.
  1176. It can change after the allocation is moved during \ref defragmentation.
  1177. */
  1178. VkDeviceSize offset;
  1179. /** \brief Size of this allocation, in bytes.
  1180. It never changes.
  1181. \note Allocation size returned in this variable may be greater than the size
  1182. requested for the resource e.g. as `VkBufferCreateInfo::size`. Whole size of the
  1183. allocation is accessible for operations on memory e.g. using a pointer after
  1184. mapping with vmaMapMemory(), but operations on the resource e.g. using
  1185. `vkCmdCopyBuffer` must be limited to the size of the resource.
  1186. */
  1187. VkDeviceSize size;
  1188. /** \brief Pointer to the beginning of this allocation as mapped data.
  1189. If the allocation hasn't been mapped using vmaMapMemory() and hasn't been
  1190. created with #VMA_ALLOCATION_CREATE_MAPPED_BIT flag, this value is null.
  1191. It can change after call to vmaMapMemory(), vmaUnmapMemory().
  1192. It can also change after the allocation is moved during \ref defragmentation.
  1193. */
  1194. void* VMA_NULLABLE pMappedData;
  1195. /** \brief Custom general-purpose pointer that was passed as VmaAllocationCreateInfo::pUserData or set using vmaSetAllocationUserData().
  1196. It can change after call to vmaSetAllocationUserData() for this allocation.
  1197. */
  1198. void* VMA_NULLABLE pUserData;
  1199. /** \brief Custom allocation name that was set with vmaSetAllocationName().
  1200. It can change after call to vmaSetAllocationName() for this allocation.
  1201. Another way to set custom name is to pass it in VmaAllocationCreateInfo::pUserData with
  1202. additional flag #VMA_ALLOCATION_CREATE_USER_DATA_COPY_STRING_BIT set [DEPRECATED].
  1203. */
  1204. const char* VMA_NULLABLE pName;
  1205. } VmaAllocationInfo;
  1206. /// Extended parameters of a #VmaAllocation object that can be retrieved using function vmaGetAllocationInfo2().
  1207. typedef struct VmaAllocationInfo2
  1208. {
  1209. /** \brief Basic parameters of the allocation.
  1210. If you need only these, you can use function vmaGetAllocationInfo() and structure #VmaAllocationInfo instead.
  1211. */
  1212. VmaAllocationInfo allocationInfo;
  1213. /** \brief Size of the `VkDeviceMemory` block that the allocation belongs to.
  1214. In case of an allocation with dedicated memory, it will be equal to `allocationInfo.size`.
  1215. */
  1216. VkDeviceSize blockSize;
  1217. /** \brief `VK_TRUE` if the allocation has dedicated memory, `VK_FALSE` if it was placed as part of a larger memory block.
  1218. When `VK_TRUE`, it also means `VkMemoryDedicatedAllocateInfo` was used when creating the allocation
  1219. (if VK_KHR_dedicated_allocation extension or Vulkan version >= 1.1 is enabled).
  1220. */
  1221. VkBool32 dedicatedMemory;
  1222. } VmaAllocationInfo2;
  1223. /** Callback function called during vmaBeginDefragmentation() to check custom criterion about ending current defragmentation pass.
  1224. Should return true if the defragmentation needs to stop current pass.
  1225. */
  1226. typedef VkBool32 (VKAPI_PTR* PFN_vmaCheckDefragmentationBreakFunction)(void* VMA_NULLABLE pUserData);
  1227. /** \brief Parameters for defragmentation.
  1228. To be used with function vmaBeginDefragmentation().
  1229. */
  1230. typedef struct VmaDefragmentationInfo
  1231. {
  1232. /// \brief Use combination of #VmaDefragmentationFlagBits.
  1233. VmaDefragmentationFlags flags;
  1234. /** \brief Custom pool to be defragmented.
  1235. If null then default pools will undergo defragmentation process.
  1236. */
  1237. VmaPool VMA_NULLABLE pool;
  1238. /** \brief Maximum numbers of bytes that can be copied during single pass, while moving allocations to different places.
  1239. `0` means no limit.
  1240. */
  1241. VkDeviceSize maxBytesPerPass;
  1242. /** \brief Maximum number of allocations that can be moved during single pass to a different place.
  1243. `0` means no limit.
  1244. */
  1245. uint32_t maxAllocationsPerPass;
  1246. /** \brief Optional custom callback for stopping vmaBeginDefragmentation().
  1247. Have to return true for breaking current defragmentation pass.
  1248. */
  1249. PFN_vmaCheckDefragmentationBreakFunction VMA_NULLABLE pfnBreakCallback;
  1250. /// \brief Optional data to pass to custom callback for stopping pass of defragmentation.
  1251. void* VMA_NULLABLE pBreakCallbackUserData;
  1252. } VmaDefragmentationInfo;
  1253. /// Single move of an allocation to be done for defragmentation.
  1254. typedef struct VmaDefragmentationMove
  1255. {
  1256. /// Operation to be performed on the allocation by vmaEndDefragmentationPass(). Default value is #VMA_DEFRAGMENTATION_MOVE_OPERATION_COPY. You can modify it.
  1257. VmaDefragmentationMoveOperation operation;
  1258. /// Allocation that should be moved.
  1259. VmaAllocation VMA_NOT_NULL srcAllocation;
  1260. /** \brief Temporary allocation pointing to destination memory that will replace `srcAllocation`.
  1261. \warning Do not store this allocation in your data structures! It exists only temporarily, for the duration of the defragmentation pass,
  1262. to be used for binding new buffer/image to the destination memory using e.g. vmaBindBufferMemory().
  1263. vmaEndDefragmentationPass() will destroy it and make `srcAllocation` point to this memory.
  1264. */
  1265. VmaAllocation VMA_NOT_NULL dstTmpAllocation;
  1266. } VmaDefragmentationMove;
  1267. /** \brief Parameters for incremental defragmentation steps.
  1268. To be used with function vmaBeginDefragmentationPass().
  1269. */
  1270. typedef struct VmaDefragmentationPassMoveInfo
  1271. {
  1272. /// Number of elements in the `pMoves` array.
  1273. uint32_t moveCount;
  1274. /** \brief Array of moves to be performed by the user in the current defragmentation pass.
  1275. Pointer to an array of `moveCount` elements, owned by VMA, created in vmaBeginDefragmentationPass(), destroyed in vmaEndDefragmentationPass().
  1276. For each element, you should:
  1277. 1. Create a new buffer/image in the place pointed by VmaDefragmentationMove::dstMemory + VmaDefragmentationMove::dstOffset.
  1278. 2. Copy data from the VmaDefragmentationMove::srcAllocation e.g. using `vkCmdCopyBuffer`, `vkCmdCopyImage`.
  1279. 3. Make sure these commands finished executing on the GPU.
  1280. 4. Destroy the old buffer/image.
  1281. Only then you can finish defragmentation pass by calling vmaEndDefragmentationPass().
  1282. After this call, the allocation will point to the new place in memory.
  1283. Alternatively, if you cannot move specific allocation, you can set VmaDefragmentationMove::operation to #VMA_DEFRAGMENTATION_MOVE_OPERATION_IGNORE.
  1284. Alternatively, if you decide you want to completely remove the allocation:
  1285. 1. Destroy its buffer/image.
  1286. 2. Set VmaDefragmentationMove::operation to #VMA_DEFRAGMENTATION_MOVE_OPERATION_DESTROY.
  1287. Then, after vmaEndDefragmentationPass() the allocation will be freed.
  1288. */
  1289. VmaDefragmentationMove* VMA_NULLABLE VMA_LEN_IF_NOT_NULL(moveCount) pMoves;
  1290. } VmaDefragmentationPassMoveInfo;
  1291. /// Statistics returned for defragmentation process in function vmaEndDefragmentation().
  1292. typedef struct VmaDefragmentationStats
  1293. {
  1294. /// Total number of bytes that have been copied while moving allocations to different places.
  1295. VkDeviceSize bytesMoved;
  1296. /// Total number of bytes that have been released to the system by freeing empty `VkDeviceMemory` objects.
  1297. VkDeviceSize bytesFreed;
  1298. /// Number of allocations that have been moved to different places.
  1299. uint32_t allocationsMoved;
  1300. /// Number of empty `VkDeviceMemory` objects that have been released to the system.
  1301. uint32_t deviceMemoryBlocksFreed;
  1302. } VmaDefragmentationStats;
  1303. /** @} */
  1304. /**
  1305. \addtogroup group_virtual
  1306. @{
  1307. */
  1308. /// Parameters of created #VmaVirtualBlock object to be passed to vmaCreateVirtualBlock().
  1309. typedef struct VmaVirtualBlockCreateInfo
  1310. {
  1311. /** \brief Total size of the virtual block.
  1312. Sizes can be expressed in bytes or any units you want as long as you are consistent in using them.
  1313. For example, if you allocate from some array of structures, 1 can mean single instance of entire structure.
  1314. */
  1315. VkDeviceSize size;
  1316. /** \brief Use combination of #VmaVirtualBlockCreateFlagBits.
  1317. */
  1318. VmaVirtualBlockCreateFlags flags;
  1319. /** \brief Custom CPU memory allocation callbacks. Optional.
  1320. Optional, can be null. When specified, they will be used for all CPU-side memory allocations.
  1321. */
  1322. const VkAllocationCallbacks* VMA_NULLABLE pAllocationCallbacks;
  1323. } VmaVirtualBlockCreateInfo;
  1324. /// Parameters of created virtual allocation to be passed to vmaVirtualAllocate().
  1325. typedef struct VmaVirtualAllocationCreateInfo
  1326. {
  1327. /** \brief Size of the allocation.
  1328. Cannot be zero.
  1329. */
  1330. VkDeviceSize size;
  1331. /** \brief Required alignment of the allocation. Optional.
  1332. Must be power of two. Special value 0 has the same meaning as 1 - means no special alignment is required, so allocation can start at any offset.
  1333. */
  1334. VkDeviceSize alignment;
  1335. /** \brief Use combination of #VmaVirtualAllocationCreateFlagBits.
  1336. */
  1337. VmaVirtualAllocationCreateFlags flags;
  1338. /** \brief Custom pointer to be associated with the allocation. Optional.
  1339. It can be any value and can be used for user-defined purposes. It can be fetched or changed later.
  1340. */
  1341. void* VMA_NULLABLE pUserData;
  1342. } VmaVirtualAllocationCreateInfo;
  1343. /// Parameters of an existing virtual allocation, returned by vmaGetVirtualAllocationInfo().
  1344. typedef struct VmaVirtualAllocationInfo
  1345. {
  1346. /** \brief Offset of the allocation.
  1347. Offset at which the allocation was made.
  1348. */
  1349. VkDeviceSize offset;
  1350. /** \brief Size of the allocation.
  1351. Same value as passed in VmaVirtualAllocationCreateInfo::size.
  1352. */
  1353. VkDeviceSize size;
  1354. /** \brief Custom pointer associated with the allocation.
  1355. Same value as passed in VmaVirtualAllocationCreateInfo::pUserData or to vmaSetVirtualAllocationUserData().
  1356. */
  1357. void* VMA_NULLABLE pUserData;
  1358. } VmaVirtualAllocationInfo;
  1359. /** @} */
  1360. #endif // _VMA_DATA_TYPES_DECLARATIONS
  1361. #ifndef _VMA_FUNCTION_HEADERS
  1362. /**
  1363. \addtogroup group_init
  1364. @{
  1365. */
  1366. /// Creates #VmaAllocator object.
  1367. VMA_CALL_PRE VkResult VMA_CALL_POST vmaCreateAllocator(
  1368. const VmaAllocatorCreateInfo* VMA_NOT_NULL pCreateInfo,
  1369. VmaAllocator VMA_NULLABLE* VMA_NOT_NULL pAllocator);
  1370. /// Destroys allocator object.
  1371. VMA_CALL_PRE void VMA_CALL_POST vmaDestroyAllocator(
  1372. VmaAllocator VMA_NULLABLE allocator);
  1373. /** \brief Returns information about existing #VmaAllocator object - handle to Vulkan device etc.
  1374. It might be useful if you want to keep just the #VmaAllocator handle and fetch other required handles to
  1375. `VkPhysicalDevice`, `VkDevice` etc. every time using this function.
  1376. */
  1377. VMA_CALL_PRE void VMA_CALL_POST vmaGetAllocatorInfo(
  1378. VmaAllocator VMA_NOT_NULL allocator,
  1379. VmaAllocatorInfo* VMA_NOT_NULL pAllocatorInfo);
  1380. /**
  1381. PhysicalDeviceProperties are fetched from physicalDevice by the allocator.
  1382. You can access it here, without fetching it again on your own.
  1383. */
  1384. VMA_CALL_PRE void VMA_CALL_POST vmaGetPhysicalDeviceProperties(
  1385. VmaAllocator VMA_NOT_NULL allocator,
  1386. const VkPhysicalDeviceProperties* VMA_NULLABLE* VMA_NOT_NULL ppPhysicalDeviceProperties);
  1387. /**
  1388. PhysicalDeviceMemoryProperties are fetched from physicalDevice by the allocator.
  1389. You can access it here, without fetching it again on your own.
  1390. */
  1391. VMA_CALL_PRE void VMA_CALL_POST vmaGetMemoryProperties(
  1392. VmaAllocator VMA_NOT_NULL allocator,
  1393. const VkPhysicalDeviceMemoryProperties* VMA_NULLABLE* VMA_NOT_NULL ppPhysicalDeviceMemoryProperties);
  1394. /**
  1395. \brief Given Memory Type Index, returns Property Flags of this memory type.
  1396. This is just a convenience function. Same information can be obtained using
  1397. vmaGetMemoryProperties().
  1398. */
  1399. VMA_CALL_PRE void VMA_CALL_POST vmaGetMemoryTypeProperties(
  1400. VmaAllocator VMA_NOT_NULL allocator,
  1401. uint32_t memoryTypeIndex,
  1402. VkMemoryPropertyFlags* VMA_NOT_NULL pFlags);
  1403. /** \brief Sets index of the current frame.
  1404. */
  1405. VMA_CALL_PRE void VMA_CALL_POST vmaSetCurrentFrameIndex(
  1406. VmaAllocator VMA_NOT_NULL allocator,
  1407. uint32_t frameIndex);
  1408. /** @} */
  1409. /**
  1410. \addtogroup group_stats
  1411. @{
  1412. */
  1413. /** \brief Retrieves statistics from current state of the Allocator.
  1414. This function is called "calculate" not "get" because it has to traverse all
  1415. internal data structures, so it may be quite slow. Use it for debugging purposes.
  1416. For faster but more brief statistics suitable to be called every frame or every allocation,
  1417. use vmaGetHeapBudgets().
  1418. Note that when using allocator from multiple threads, returned information may immediately
  1419. become outdated.
  1420. */
  1421. VMA_CALL_PRE void VMA_CALL_POST vmaCalculateStatistics(
  1422. VmaAllocator VMA_NOT_NULL allocator,
  1423. VmaTotalStatistics* VMA_NOT_NULL pStats);
  1424. /** \brief Retrieves lazily allocated bytes
  1425. This function is called "calculate" not "get" because it has to traverse all
  1426. internal data structures, so it may be quite slow. Use it for debugging purposes.
  1427. For faster but more brief statistics suitable to be called every frame or every allocation,
  1428. use vmaGetHeapBudgets().
  1429. Note that when using allocator from multiple threads, returned information may immediately
  1430. become outdated.
  1431. */
  1432. VMA_CALL_PRE uint64_t VMA_CALL_POST vmaCalculateLazilyAllocatedBytes(
  1433. VmaAllocator VMA_NOT_NULL allocator);
  1434. /** \brief Retrieves information about current memory usage and budget for all memory heaps.
  1435. \param allocator
  1436. \param[out] pBudgets Must point to array with number of elements at least equal to number of memory heaps in physical device used.
  1437. This function is called "get" not "calculate" because it is very fast, suitable to be called
  1438. every frame or every allocation. For more detailed statistics use vmaCalculateStatistics().
  1439. Note that when using allocator from multiple threads, returned information may immediately
  1440. become outdated.
  1441. */
  1442. VMA_CALL_PRE void VMA_CALL_POST vmaGetHeapBudgets(
  1443. VmaAllocator VMA_NOT_NULL allocator,
  1444. VmaBudget* VMA_NOT_NULL VMA_LEN_IF_NOT_NULL("VkPhysicalDeviceMemoryProperties::memoryHeapCount") pBudgets);
  1445. /** @} */
  1446. /**
  1447. \addtogroup group_alloc
  1448. @{
  1449. */
  1450. /**
  1451. \brief Helps to find memoryTypeIndex, given memoryTypeBits and VmaAllocationCreateInfo.
  1452. This algorithm tries to find a memory type that:
  1453. - Is allowed by memoryTypeBits.
  1454. - Contains all the flags from pAllocationCreateInfo->requiredFlags.
  1455. - Matches intended usage.
  1456. - Has as many flags from pAllocationCreateInfo->preferredFlags as possible.
  1457. \return Returns VK_ERROR_FEATURE_NOT_PRESENT if not found. Receiving such result
  1458. from this function or any other allocating function probably means that your
  1459. device doesn't support any memory type with requested features for the specific
  1460. type of resource you want to use it for. Please check parameters of your
  1461. resource, like image layout (OPTIMAL versus LINEAR) or mip level count.
  1462. */
  1463. VMA_CALL_PRE VkResult VMA_CALL_POST vmaFindMemoryTypeIndex(
  1464. VmaAllocator VMA_NOT_NULL allocator,
  1465. uint32_t memoryTypeBits,
  1466. const VmaAllocationCreateInfo* VMA_NOT_NULL pAllocationCreateInfo,
  1467. uint32_t* VMA_NOT_NULL pMemoryTypeIndex);
  1468. /**
  1469. \brief Helps to find memoryTypeIndex, given VkBufferCreateInfo and VmaAllocationCreateInfo.
  1470. It can be useful e.g. to determine value to be used as VmaPoolCreateInfo::memoryTypeIndex.
  1471. It internally creates a temporary, dummy buffer that never has memory bound.
  1472. */
  1473. VMA_CALL_PRE VkResult VMA_CALL_POST vmaFindMemoryTypeIndexForBufferInfo(
  1474. VmaAllocator VMA_NOT_NULL allocator,
  1475. const VkBufferCreateInfo* VMA_NOT_NULL pBufferCreateInfo,
  1476. const VmaAllocationCreateInfo* VMA_NOT_NULL pAllocationCreateInfo,
  1477. uint32_t* VMA_NOT_NULL pMemoryTypeIndex);
  1478. /**
  1479. \brief Helps to find memoryTypeIndex, given VkImageCreateInfo and VmaAllocationCreateInfo.
  1480. It can be useful e.g. to determine value to be used as VmaPoolCreateInfo::memoryTypeIndex.
  1481. It internally creates a temporary, dummy image that never has memory bound.
  1482. */
  1483. VMA_CALL_PRE VkResult VMA_CALL_POST vmaFindMemoryTypeIndexForImageInfo(
  1484. VmaAllocator VMA_NOT_NULL allocator,
  1485. const VkImageCreateInfo* VMA_NOT_NULL pImageCreateInfo,
  1486. const VmaAllocationCreateInfo* VMA_NOT_NULL pAllocationCreateInfo,
  1487. uint32_t* VMA_NOT_NULL pMemoryTypeIndex);
  1488. /** \brief Allocates Vulkan device memory and creates #VmaPool object.
  1489. \param allocator Allocator object.
  1490. \param pCreateInfo Parameters of pool to create.
  1491. \param[out] pPool Handle to created pool.
  1492. */
  1493. VMA_CALL_PRE VkResult VMA_CALL_POST vmaCreatePool(
  1494. VmaAllocator VMA_NOT_NULL allocator,
  1495. const VmaPoolCreateInfo* VMA_NOT_NULL pCreateInfo,
  1496. VmaPool VMA_NULLABLE* VMA_NOT_NULL pPool);
  1497. /** \brief Destroys #VmaPool object and frees Vulkan device memory.
  1498. */
  1499. VMA_CALL_PRE void VMA_CALL_POST vmaDestroyPool(
  1500. VmaAllocator VMA_NOT_NULL allocator,
  1501. VmaPool VMA_NULLABLE pool);
  1502. /** @} */
  1503. /**
  1504. \addtogroup group_stats
  1505. @{
  1506. */
  1507. /** \brief Retrieves statistics of existing #VmaPool object.
  1508. \param allocator Allocator object.
  1509. \param pool Pool object.
  1510. \param[out] pPoolStats Statistics of specified pool.
  1511. */
  1512. VMA_CALL_PRE void VMA_CALL_POST vmaGetPoolStatistics(
  1513. VmaAllocator VMA_NOT_NULL allocator,
  1514. VmaPool VMA_NOT_NULL pool,
  1515. VmaStatistics* VMA_NOT_NULL pPoolStats);
  1516. /** \brief Retrieves detailed statistics of existing #VmaPool object.
  1517. \param allocator Allocator object.
  1518. \param pool Pool object.
  1519. \param[out] pPoolStats Statistics of specified pool.
  1520. */
  1521. VMA_CALL_PRE void VMA_CALL_POST vmaCalculatePoolStatistics(
  1522. VmaAllocator VMA_NOT_NULL allocator,
  1523. VmaPool VMA_NOT_NULL pool,
  1524. VmaDetailedStatistics* VMA_NOT_NULL pPoolStats);
  1525. /** @} */
  1526. /**
  1527. \addtogroup group_alloc
  1528. @{
  1529. */
  1530. /** \brief Checks magic number in margins around all allocations in given memory pool in search for corruptions.
  1531. Corruption detection is enabled only when `VMA_DEBUG_DETECT_CORRUPTION` macro is defined to nonzero,
  1532. `VMA_DEBUG_MARGIN` is defined to nonzero and the pool is created in memory type that is
  1533. `HOST_VISIBLE` and `HOST_COHERENT`. For more information, see [Corruption detection](@ref debugging_memory_usage_corruption_detection).
  1534. Possible return values:
  1535. - `VK_ERROR_FEATURE_NOT_PRESENT` - corruption detection is not enabled for specified pool.
  1536. - `VK_SUCCESS` - corruption detection has been performed and succeeded.
  1537. - `VK_ERROR_UNKNOWN` - corruption detection has been performed and found memory corruptions around one of the allocations.
  1538. `VMA_ASSERT` is also fired in that case.
  1539. - Other value: Error returned by Vulkan, e.g. memory mapping failure.
  1540. */
  1541. VMA_CALL_PRE VkResult VMA_CALL_POST vmaCheckPoolCorruption(
  1542. VmaAllocator VMA_NOT_NULL allocator,
  1543. VmaPool VMA_NOT_NULL pool);
  1544. /** \brief Retrieves name of a custom pool.
  1545. After the call `ppName` is either null or points to an internally-owned null-terminated string
  1546. containing name of the pool that was previously set. The pointer becomes invalid when the pool is
  1547. destroyed or its name is changed using vmaSetPoolName().
  1548. */
  1549. VMA_CALL_PRE void VMA_CALL_POST vmaGetPoolName(
  1550. VmaAllocator VMA_NOT_NULL allocator,
  1551. VmaPool VMA_NOT_NULL pool,
  1552. const char* VMA_NULLABLE* VMA_NOT_NULL ppName);
  1553. /** \brief Sets name of a custom pool.
  1554. `pName` can be either null or pointer to a null-terminated string with new name for the pool.
  1555. Function makes internal copy of the string, so it can be changed or freed immediately after this call.
  1556. */
  1557. VMA_CALL_PRE void VMA_CALL_POST vmaSetPoolName(
  1558. VmaAllocator VMA_NOT_NULL allocator,
  1559. VmaPool VMA_NOT_NULL pool,
  1560. const char* VMA_NULLABLE pName);
  1561. /** \brief General purpose memory allocation.
  1562. \param allocator
  1563. \param pVkMemoryRequirements
  1564. \param pCreateInfo
  1565. \param[out] pAllocation Handle to allocated memory.
  1566. \param[out] pAllocationInfo Optional. Information about allocated memory. It can be later fetched using function vmaGetAllocationInfo().
  1567. You should free the memory using vmaFreeMemory() or vmaFreeMemoryPages().
  1568. It is recommended to use vmaAllocateMemoryForBuffer(), vmaAllocateMemoryForImage(),
  1569. vmaCreateBuffer(), vmaCreateImage() instead whenever possible.
  1570. */
  1571. VMA_CALL_PRE VkResult VMA_CALL_POST vmaAllocateMemory(
  1572. VmaAllocator VMA_NOT_NULL allocator,
  1573. const VkMemoryRequirements* VMA_NOT_NULL pVkMemoryRequirements,
  1574. const VmaAllocationCreateInfo* VMA_NOT_NULL pCreateInfo,
  1575. VmaAllocation VMA_NULLABLE* VMA_NOT_NULL pAllocation,
  1576. VmaAllocationInfo* VMA_NULLABLE pAllocationInfo);
  1577. /** \brief General purpose memory allocation for multiple allocation objects at once.
  1578. \param allocator Allocator object.
  1579. \param pVkMemoryRequirements Memory requirements for each allocation.
  1580. \param pCreateInfo Creation parameters for each allocation.
  1581. \param allocationCount Number of allocations to make.
  1582. \param[out] pAllocations Pointer to array that will be filled with handles to created allocations.
  1583. \param[out] pAllocationInfo Optional. Pointer to array that will be filled with parameters of created allocations.
  1584. You should free the memory using vmaFreeMemory() or vmaFreeMemoryPages().
  1585. Word "pages" is just a suggestion to use this function to allocate pieces of memory needed for sparse binding.
  1586. It is just a general purpose allocation function able to make multiple allocations at once.
  1587. It may be internally optimized to be more efficient than calling vmaAllocateMemory() `allocationCount` times.
  1588. All allocations are made using same parameters. All of them are created out of the same memory pool and type.
  1589. If any allocation fails, all allocations already made within this function call are also freed, so that when
  1590. returned result is not `VK_SUCCESS`, `pAllocation` array is always entirely filled with `VK_NULL_HANDLE`.
  1591. */
  1592. VMA_CALL_PRE VkResult VMA_CALL_POST vmaAllocateMemoryPages(
  1593. VmaAllocator VMA_NOT_NULL allocator,
  1594. const VkMemoryRequirements* VMA_NOT_NULL VMA_LEN_IF_NOT_NULL(allocationCount) pVkMemoryRequirements,
  1595. const VmaAllocationCreateInfo* VMA_NOT_NULL VMA_LEN_IF_NOT_NULL(allocationCount) pCreateInfo,
  1596. size_t allocationCount,
  1597. VmaAllocation VMA_NULLABLE* VMA_NOT_NULL VMA_LEN_IF_NOT_NULL(allocationCount) pAllocations,
  1598. VmaAllocationInfo* VMA_NULLABLE VMA_LEN_IF_NOT_NULL(allocationCount) pAllocationInfo);
  1599. /** \brief Allocates memory suitable for given `VkBuffer`.
  1600. \param allocator
  1601. \param buffer
  1602. \param pCreateInfo
  1603. \param[out] pAllocation Handle to allocated memory.
  1604. \param[out] pAllocationInfo Optional. Information about allocated memory. It can be later fetched using function vmaGetAllocationInfo().
  1605. It only creates #VmaAllocation. To bind the memory to the buffer, use vmaBindBufferMemory().
  1606. This is a special-purpose function. In most cases you should use vmaCreateBuffer().
  1607. You must free the allocation using vmaFreeMemory() when no longer needed.
  1608. */
  1609. VMA_CALL_PRE VkResult VMA_CALL_POST vmaAllocateMemoryForBuffer(
  1610. VmaAllocator VMA_NOT_NULL allocator,
  1611. VkBuffer VMA_NOT_NULL_NON_DISPATCHABLE buffer,
  1612. const VmaAllocationCreateInfo* VMA_NOT_NULL pCreateInfo,
  1613. VmaAllocation VMA_NULLABLE* VMA_NOT_NULL pAllocation,
  1614. VmaAllocationInfo* VMA_NULLABLE pAllocationInfo);
  1615. /** \brief Allocates memory suitable for given `VkImage`.
  1616. \param allocator
  1617. \param image
  1618. \param pCreateInfo
  1619. \param[out] pAllocation Handle to allocated memory.
  1620. \param[out] pAllocationInfo Optional. Information about allocated memory. It can be later fetched using function vmaGetAllocationInfo().
  1621. It only creates #VmaAllocation. To bind the memory to the buffer, use vmaBindImageMemory().
  1622. This is a special-purpose function. In most cases you should use vmaCreateImage().
  1623. You must free the allocation using vmaFreeMemory() when no longer needed.
  1624. */
  1625. VMA_CALL_PRE VkResult VMA_CALL_POST vmaAllocateMemoryForImage(
  1626. VmaAllocator VMA_NOT_NULL allocator,
  1627. VkImage VMA_NOT_NULL_NON_DISPATCHABLE image,
  1628. const VmaAllocationCreateInfo* VMA_NOT_NULL pCreateInfo,
  1629. VmaAllocation VMA_NULLABLE* VMA_NOT_NULL pAllocation,
  1630. VmaAllocationInfo* VMA_NULLABLE pAllocationInfo);
  1631. /** \brief Frees memory previously allocated using vmaAllocateMemory(), vmaAllocateMemoryForBuffer(), or vmaAllocateMemoryForImage().
  1632. Passing `VK_NULL_HANDLE` as `allocation` is valid. Such function call is just skipped.
  1633. */
  1634. VMA_CALL_PRE void VMA_CALL_POST vmaFreeMemory(
  1635. VmaAllocator VMA_NOT_NULL allocator,
  1636. const VmaAllocation VMA_NULLABLE allocation);
  1637. /** \brief Frees memory and destroys multiple allocations.
  1638. Word "pages" is just a suggestion to use this function to free pieces of memory used for sparse binding.
  1639. It is just a general purpose function to free memory and destroy allocations made using e.g. vmaAllocateMemory(),
  1640. vmaAllocateMemoryPages() and other functions.
  1641. It may be internally optimized to be more efficient than calling vmaFreeMemory() `allocationCount` times.
  1642. Allocations in `pAllocations` array can come from any memory pools and types.
  1643. Passing `VK_NULL_HANDLE` as elements of `pAllocations` array is valid. Such entries are just skipped.
  1644. */
  1645. VMA_CALL_PRE void VMA_CALL_POST vmaFreeMemoryPages(
  1646. VmaAllocator VMA_NOT_NULL allocator,
  1647. size_t allocationCount,
  1648. const VmaAllocation VMA_NULLABLE* VMA_NOT_NULL VMA_LEN_IF_NOT_NULL(allocationCount) pAllocations);
  1649. /** \brief Returns current information about specified allocation.
  1650. Current parameters of given allocation are returned in `pAllocationInfo`.
  1651. Although this function doesn't lock any mutex, so it should be quite efficient,
  1652. you should avoid calling it too often.
  1653. You can retrieve same VmaAllocationInfo structure while creating your resource, from function
  1654. vmaCreateBuffer(), vmaCreateImage(). You can remember it if you are sure parameters don't change
  1655. (e.g. due to defragmentation).
  1656. There is also a new function vmaGetAllocationInfo2() that offers extended information
  1657. about the allocation, returned using new structure #VmaAllocationInfo2.
  1658. */
  1659. VMA_CALL_PRE void VMA_CALL_POST vmaGetAllocationInfo(
  1660. VmaAllocator VMA_NOT_NULL allocator,
  1661. VmaAllocation VMA_NOT_NULL allocation,
  1662. VmaAllocationInfo* VMA_NOT_NULL pAllocationInfo);
  1663. /** \brief Returns extended information about specified allocation.
  1664. Current parameters of given allocation are returned in `pAllocationInfo`.
  1665. Extended parameters in structure #VmaAllocationInfo2 include memory block size
  1666. and a flag telling whether the allocation has dedicated memory.
  1667. It can be useful e.g. for interop with OpenGL.
  1668. */
  1669. VMA_CALL_PRE void VMA_CALL_POST vmaGetAllocationInfo2(
  1670. VmaAllocator VMA_NOT_NULL allocator,
  1671. VmaAllocation VMA_NOT_NULL allocation,
  1672. VmaAllocationInfo2* VMA_NOT_NULL pAllocationInfo);
  1673. /** \brief Sets pUserData in given allocation to new value.
  1674. The value of pointer `pUserData` is copied to allocation's `pUserData`.
  1675. It is opaque, so you can use it however you want - e.g.
  1676. as a pointer, ordinal number or some handle to you own data.
  1677. */
  1678. VMA_CALL_PRE void VMA_CALL_POST vmaSetAllocationUserData(
  1679. VmaAllocator VMA_NOT_NULL allocator,
  1680. VmaAllocation VMA_NOT_NULL allocation,
  1681. void* VMA_NULLABLE pUserData);
  1682. /** \brief Sets pName in given allocation to new value.
  1683. `pName` must be either null, or pointer to a null-terminated string. The function
  1684. makes local copy of the string and sets it as allocation's `pName`. String
  1685. passed as pName doesn't need to be valid for whole lifetime of the allocation -
  1686. you can free it after this call. String previously pointed by allocation's
  1687. `pName` is freed from memory.
  1688. */
  1689. VMA_CALL_PRE void VMA_CALL_POST vmaSetAllocationName(
  1690. VmaAllocator VMA_NOT_NULL allocator,
  1691. VmaAllocation VMA_NOT_NULL allocation,
  1692. const char* VMA_NULLABLE pName);
  1693. /**
  1694. \brief Given an allocation, returns Property Flags of its memory type.
  1695. This is just a convenience function. Same information can be obtained using
  1696. vmaGetAllocationInfo() + vmaGetMemoryProperties().
  1697. */
  1698. VMA_CALL_PRE void VMA_CALL_POST vmaGetAllocationMemoryProperties(
  1699. VmaAllocator VMA_NOT_NULL allocator,
  1700. VmaAllocation VMA_NOT_NULL allocation,
  1701. VkMemoryPropertyFlags* VMA_NOT_NULL pFlags);
  1702. /** \brief Maps memory represented by given allocation and returns pointer to it.
  1703. Maps memory represented by given allocation to make it accessible to CPU code.
  1704. When succeeded, `*ppData` contains pointer to first byte of this memory.
  1705. \warning
  1706. If the allocation is part of a bigger `VkDeviceMemory` block, returned pointer is
  1707. correctly offsetted to the beginning of region assigned to this particular allocation.
  1708. Unlike the result of `vkMapMemory`, it points to the allocation, not to the beginning of the whole block.
  1709. You should not add VmaAllocationInfo::offset to it!
  1710. Mapping is internally reference-counted and synchronized, so despite raw Vulkan
  1711. function `vkMapMemory()` cannot be used to map same block of `VkDeviceMemory`
  1712. multiple times simultaneously, it is safe to call this function on allocations
  1713. assigned to the same memory block. Actual Vulkan memory will be mapped on first
  1714. mapping and unmapped on last unmapping.
  1715. If the function succeeded, you must call vmaUnmapMemory() to unmap the
  1716. allocation when mapping is no longer needed or before freeing the allocation, at
  1717. the latest.
  1718. It also safe to call this function multiple times on the same allocation. You
  1719. must call vmaUnmapMemory() same number of times as you called vmaMapMemory().
  1720. It is also safe to call this function on allocation created with
  1721. #VMA_ALLOCATION_CREATE_MAPPED_BIT flag. Its memory stays mapped all the time.
  1722. You must still call vmaUnmapMemory() same number of times as you called
  1723. vmaMapMemory(). You must not call vmaUnmapMemory() additional time to free the
  1724. "0-th" mapping made automatically due to #VMA_ALLOCATION_CREATE_MAPPED_BIT flag.
  1725. This function fails when used on allocation made in memory type that is not
  1726. `HOST_VISIBLE`.
  1727. This function doesn't automatically flush or invalidate caches.
  1728. If the allocation is made from a memory types that is not `HOST_COHERENT`,
  1729. you also need to use vmaInvalidateAllocation() / vmaFlushAllocation(), as required by Vulkan specification.
  1730. */
  1731. VMA_CALL_PRE VkResult VMA_CALL_POST vmaMapMemory(
  1732. VmaAllocator VMA_NOT_NULL allocator,
  1733. VmaAllocation VMA_NOT_NULL allocation,
  1734. void* VMA_NULLABLE* VMA_NOT_NULL ppData);
  1735. /** \brief Unmaps memory represented by given allocation, mapped previously using vmaMapMemory().
  1736. For details, see description of vmaMapMemory().
  1737. This function doesn't automatically flush or invalidate caches.
  1738. If the allocation is made from a memory types that is not `HOST_COHERENT`,
  1739. you also need to use vmaInvalidateAllocation() / vmaFlushAllocation(), as required by Vulkan specification.
  1740. */
  1741. VMA_CALL_PRE void VMA_CALL_POST vmaUnmapMemory(
  1742. VmaAllocator VMA_NOT_NULL allocator,
  1743. VmaAllocation VMA_NOT_NULL allocation);
  1744. /** \brief Flushes memory of given allocation.
  1745. Calls `vkFlushMappedMemoryRanges()` for memory associated with given range of given allocation.
  1746. It needs to be called after writing to a mapped memory for memory types that are not `HOST_COHERENT`.
  1747. Unmap operation doesn't do that automatically.
  1748. - `offset` must be relative to the beginning of allocation.
  1749. - `size` can be `VK_WHOLE_SIZE`. It means all memory from `offset` the the end of given allocation.
  1750. - `offset` and `size` don't have to be aligned.
  1751. They are internally rounded down/up to multiply of `nonCoherentAtomSize`.
  1752. - If `size` is 0, this call is ignored.
  1753. - If memory type that the `allocation` belongs to is not `HOST_VISIBLE` or it is `HOST_COHERENT`,
  1754. this call is ignored.
  1755. Warning! `offset` and `size` are relative to the contents of given `allocation`.
  1756. If you mean whole allocation, you can pass 0 and `VK_WHOLE_SIZE`, respectively.
  1757. Do not pass allocation's offset as `offset`!!!
  1758. This function returns the `VkResult` from `vkFlushMappedMemoryRanges` if it is
  1759. called, otherwise `VK_SUCCESS`.
  1760. */
  1761. VMA_CALL_PRE VkResult VMA_CALL_POST vmaFlushAllocation(
  1762. VmaAllocator VMA_NOT_NULL allocator,
  1763. VmaAllocation VMA_NOT_NULL allocation,
  1764. VkDeviceSize offset,
  1765. VkDeviceSize size);
  1766. /** \brief Invalidates memory of given allocation.
  1767. Calls `vkInvalidateMappedMemoryRanges()` for memory associated with given range of given allocation.
  1768. It needs to be called before reading from a mapped memory for memory types that are not `HOST_COHERENT`.
  1769. Map operation doesn't do that automatically.
  1770. - `offset` must be relative to the beginning of allocation.
  1771. - `size` can be `VK_WHOLE_SIZE`. It means all memory from `offset` the the end of given allocation.
  1772. - `offset` and `size` don't have to be aligned.
  1773. They are internally rounded down/up to multiply of `nonCoherentAtomSize`.
  1774. - If `size` is 0, this call is ignored.
  1775. - If memory type that the `allocation` belongs to is not `HOST_VISIBLE` or it is `HOST_COHERENT`,
  1776. this call is ignored.
  1777. Warning! `offset` and `size` are relative to the contents of given `allocation`.
  1778. If you mean whole allocation, you can pass 0 and `VK_WHOLE_SIZE`, respectively.
  1779. Do not pass allocation's offset as `offset`!!!
  1780. This function returns the `VkResult` from `vkInvalidateMappedMemoryRanges` if
  1781. it is called, otherwise `VK_SUCCESS`.
  1782. */
  1783. VMA_CALL_PRE VkResult VMA_CALL_POST vmaInvalidateAllocation(
  1784. VmaAllocator VMA_NOT_NULL allocator,
  1785. VmaAllocation VMA_NOT_NULL allocation,
  1786. VkDeviceSize offset,
  1787. VkDeviceSize size);
  1788. /** \brief Flushes memory of given set of allocations.
  1789. Calls `vkFlushMappedMemoryRanges()` for memory associated with given ranges of given allocations.
  1790. For more information, see documentation of vmaFlushAllocation().
  1791. \param allocator
  1792. \param allocationCount
  1793. \param allocations
  1794. \param offsets If not null, it must point to an array of offsets of regions to flush, relative to the beginning of respective allocations. Null means all offsets are zero.
  1795. \param sizes If not null, it must point to an array of sizes of regions to flush in respective allocations. Null means `VK_WHOLE_SIZE` for all allocations.
  1796. This function returns the `VkResult` from `vkFlushMappedMemoryRanges` if it is
  1797. called, otherwise `VK_SUCCESS`.
  1798. */
  1799. VMA_CALL_PRE VkResult VMA_CALL_POST vmaFlushAllocations(
  1800. VmaAllocator VMA_NOT_NULL allocator,
  1801. uint32_t allocationCount,
  1802. const VmaAllocation VMA_NOT_NULL* VMA_NULLABLE VMA_LEN_IF_NOT_NULL(allocationCount) allocations,
  1803. const VkDeviceSize* VMA_NULLABLE VMA_LEN_IF_NOT_NULL(allocationCount) offsets,
  1804. const VkDeviceSize* VMA_NULLABLE VMA_LEN_IF_NOT_NULL(allocationCount) sizes);
  1805. /** \brief Invalidates memory of given set of allocations.
  1806. Calls `vkInvalidateMappedMemoryRanges()` for memory associated with given ranges of given allocations.
  1807. For more information, see documentation of vmaInvalidateAllocation().
  1808. \param allocator
  1809. \param allocationCount
  1810. \param allocations
  1811. \param offsets If not null, it must point to an array of offsets of regions to flush, relative to the beginning of respective allocations. Null means all offsets are zero.
  1812. \param sizes If not null, it must point to an array of sizes of regions to flush in respective allocations. Null means `VK_WHOLE_SIZE` for all allocations.
  1813. This function returns the `VkResult` from `vkInvalidateMappedMemoryRanges` if it is
  1814. called, otherwise `VK_SUCCESS`.
  1815. */
  1816. VMA_CALL_PRE VkResult VMA_CALL_POST vmaInvalidateAllocations(
  1817. VmaAllocator VMA_NOT_NULL allocator,
  1818. uint32_t allocationCount,
  1819. const VmaAllocation VMA_NOT_NULL* VMA_NULLABLE VMA_LEN_IF_NOT_NULL(allocationCount) allocations,
  1820. const VkDeviceSize* VMA_NULLABLE VMA_LEN_IF_NOT_NULL(allocationCount) offsets,
  1821. const VkDeviceSize* VMA_NULLABLE VMA_LEN_IF_NOT_NULL(allocationCount) sizes);
  1822. /** \brief Maps the allocation temporarily if needed, copies data from specified host pointer to it, and flushes the memory from the host caches if needed.
  1823. \param allocator
  1824. \param pSrcHostPointer Pointer to the host data that become source of the copy.
  1825. \param dstAllocation Handle to the allocation that becomes destination of the copy.
  1826. \param dstAllocationLocalOffset Offset within `dstAllocation` where to write copied data, in bytes.
  1827. \param size Number of bytes to copy.
  1828. This is a convenience function that allows to copy data from a host pointer to an allocation easily.
  1829. Same behavior can be achieved by calling vmaMapMemory(), `memcpy()`, vmaUnmapMemory(), vmaFlushAllocation().
  1830. This function can be called only for allocations created in a memory type that has `VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT` flag.
  1831. It can be ensured e.g. by using #VMA_MEMORY_USAGE_AUTO and #VMA_ALLOCATION_CREATE_HOST_ACCESS_SEQUENTIAL_WRITE_BIT or
  1832. #VMA_ALLOCATION_CREATE_HOST_ACCESS_RANDOM_BIT.
  1833. Otherwise, the function will fail and generate a Validation Layers error.
  1834. `dstAllocationLocalOffset` is relative to the contents of given `dstAllocation`.
  1835. If you mean whole allocation, you should pass 0.
  1836. Do not pass allocation's offset within device memory block this parameter!
  1837. */
  1838. VMA_CALL_PRE VkResult VMA_CALL_POST vmaCopyMemoryToAllocation(
  1839. VmaAllocator VMA_NOT_NULL allocator,
  1840. const void* VMA_NOT_NULL VMA_LEN_IF_NOT_NULL(size) pSrcHostPointer,
  1841. VmaAllocation VMA_NOT_NULL dstAllocation,
  1842. VkDeviceSize dstAllocationLocalOffset,
  1843. VkDeviceSize size);
  1844. /** \brief Invalidates memory in the host caches if needed, maps the allocation temporarily if needed, and copies data from it to a specified host pointer.
  1845. \param allocator
  1846. \param srcAllocation Handle to the allocation that becomes source of the copy.
  1847. \param srcAllocationLocalOffset Offset within `srcAllocation` where to read copied data, in bytes.
  1848. \param pDstHostPointer Pointer to the host memory that become destination of the copy.
  1849. \param size Number of bytes to copy.
  1850. This is a convenience function that allows to copy data from an allocation to a host pointer easily.
  1851. Same behavior can be achieved by calling vmaInvalidateAllocation(), vmaMapMemory(), `memcpy()`, vmaUnmapMemory().
  1852. This function should be called only for allocations created in a memory type that has `VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT`
  1853. and `VK_MEMORY_PROPERTY_HOST_CACHED_BIT` flag.
  1854. It can be ensured e.g. by using #VMA_MEMORY_USAGE_AUTO and #VMA_ALLOCATION_CREATE_HOST_ACCESS_RANDOM_BIT.
  1855. Otherwise, the function may fail and generate a Validation Layers error.
  1856. It may also work very slowly when reading from an uncached memory.
  1857. `srcAllocationLocalOffset` is relative to the contents of given `srcAllocation`.
  1858. If you mean whole allocation, you should pass 0.
  1859. Do not pass allocation's offset within device memory block as this parameter!
  1860. */
  1861. VMA_CALL_PRE VkResult VMA_CALL_POST vmaCopyAllocationToMemory(
  1862. VmaAllocator VMA_NOT_NULL allocator,
  1863. VmaAllocation VMA_NOT_NULL srcAllocation,
  1864. VkDeviceSize srcAllocationLocalOffset,
  1865. void* VMA_NOT_NULL VMA_LEN_IF_NOT_NULL(size) pDstHostPointer,
  1866. VkDeviceSize size);
  1867. /** \brief Checks magic number in margins around all allocations in given memory types (in both default and custom pools) in search for corruptions.
  1868. \param allocator
  1869. \param memoryTypeBits Bit mask, where each bit set means that a memory type with that index should be checked.
  1870. Corruption detection is enabled only when `VMA_DEBUG_DETECT_CORRUPTION` macro is defined to nonzero,
  1871. `VMA_DEBUG_MARGIN` is defined to nonzero and only for memory types that are
  1872. `HOST_VISIBLE` and `HOST_COHERENT`. For more information, see [Corruption detection](@ref debugging_memory_usage_corruption_detection).
  1873. Possible return values:
  1874. - `VK_ERROR_FEATURE_NOT_PRESENT` - corruption detection is not enabled for any of specified memory types.
  1875. - `VK_SUCCESS` - corruption detection has been performed and succeeded.
  1876. - `VK_ERROR_UNKNOWN` - corruption detection has been performed and found memory corruptions around one of the allocations.
  1877. `VMA_ASSERT` is also fired in that case.
  1878. - Other value: Error returned by Vulkan, e.g. memory mapping failure.
  1879. */
  1880. VMA_CALL_PRE VkResult VMA_CALL_POST vmaCheckCorruption(
  1881. VmaAllocator VMA_NOT_NULL allocator,
  1882. uint32_t memoryTypeBits);
  1883. /** \brief Begins defragmentation process.
  1884. \param allocator Allocator object.
  1885. \param pInfo Structure filled with parameters of defragmentation.
  1886. \param[out] pContext Context object that must be passed to vmaEndDefragmentation() to finish defragmentation.
  1887. \returns
  1888. - `VK_SUCCESS` if defragmentation can begin.
  1889. - `VK_ERROR_FEATURE_NOT_PRESENT` if defragmentation is not supported.
  1890. For more information about defragmentation, see documentation chapter:
  1891. [Defragmentation](@ref defragmentation).
  1892. */
  1893. VMA_CALL_PRE VkResult VMA_CALL_POST vmaBeginDefragmentation(
  1894. VmaAllocator VMA_NOT_NULL allocator,
  1895. const VmaDefragmentationInfo* VMA_NOT_NULL pInfo,
  1896. VmaDefragmentationContext VMA_NULLABLE* VMA_NOT_NULL pContext);
  1897. /** \brief Ends defragmentation process.
  1898. \param allocator Allocator object.
  1899. \param context Context object that has been created by vmaBeginDefragmentation().
  1900. \param[out] pStats Optional stats for the defragmentation. Can be null.
  1901. Use this function to finish defragmentation started by vmaBeginDefragmentation().
  1902. */
  1903. VMA_CALL_PRE void VMA_CALL_POST vmaEndDefragmentation(
  1904. VmaAllocator VMA_NOT_NULL allocator,
  1905. VmaDefragmentationContext VMA_NOT_NULL context,
  1906. VmaDefragmentationStats* VMA_NULLABLE pStats);
  1907. /** \brief Starts single defragmentation pass.
  1908. \param allocator Allocator object.
  1909. \param context Context object that has been created by vmaBeginDefragmentation().
  1910. \param[out] pPassInfo Computed information for current pass.
  1911. \returns
  1912. - `VK_SUCCESS` if no more moves are possible. Then you can omit call to vmaEndDefragmentationPass() and simply end whole defragmentation.
  1913. - `VK_INCOMPLETE` if there are pending moves returned in `pPassInfo`. You need to perform them, call vmaEndDefragmentationPass(),
  1914. and then preferably try another pass with vmaBeginDefragmentationPass().
  1915. */
  1916. VMA_CALL_PRE VkResult VMA_CALL_POST vmaBeginDefragmentationPass(
  1917. VmaAllocator VMA_NOT_NULL allocator,
  1918. VmaDefragmentationContext VMA_NOT_NULL context,
  1919. VmaDefragmentationPassMoveInfo* VMA_NOT_NULL pPassInfo);
  1920. /** \brief Ends single defragmentation pass.
  1921. \param allocator Allocator object.
  1922. \param context Context object that has been created by vmaBeginDefragmentation().
  1923. \param pPassInfo Computed information for current pass filled by vmaBeginDefragmentationPass() and possibly modified by you.
  1924. Returns `VK_SUCCESS` if no more moves are possible or `VK_INCOMPLETE` if more defragmentations are possible.
  1925. Ends incremental defragmentation pass and commits all defragmentation moves from `pPassInfo`.
  1926. After this call:
  1927. - Allocations at `pPassInfo[i].srcAllocation` that had `pPassInfo[i].operation ==` #VMA_DEFRAGMENTATION_MOVE_OPERATION_COPY
  1928. (which is the default) will be pointing to the new destination place.
  1929. - Allocation at `pPassInfo[i].srcAllocation` that had `pPassInfo[i].operation ==` #VMA_DEFRAGMENTATION_MOVE_OPERATION_DESTROY
  1930. will be freed.
  1931. If no more moves are possible you can end whole defragmentation.
  1932. */
  1933. VMA_CALL_PRE VkResult VMA_CALL_POST vmaEndDefragmentationPass(
  1934. VmaAllocator VMA_NOT_NULL allocator,
  1935. VmaDefragmentationContext VMA_NOT_NULL context,
  1936. VmaDefragmentationPassMoveInfo* VMA_NOT_NULL pPassInfo);
  1937. /** \brief Binds buffer to allocation.
  1938. Binds specified buffer to region of memory represented by specified allocation.
  1939. Gets `VkDeviceMemory` handle and offset from the allocation.
  1940. If you want to create a buffer, allocate memory for it and bind them together separately,
  1941. you should use this function for binding instead of standard `vkBindBufferMemory()`,
  1942. because it ensures proper synchronization so that when a `VkDeviceMemory` object is used by multiple
  1943. allocations, calls to `vkBind*Memory()` or `vkMapMemory()` won't happen from multiple threads simultaneously
  1944. (which is illegal in Vulkan).
  1945. It is recommended to use function vmaCreateBuffer() instead of this one.
  1946. */
  1947. VMA_CALL_PRE VkResult VMA_CALL_POST vmaBindBufferMemory(
  1948. VmaAllocator VMA_NOT_NULL allocator,
  1949. VmaAllocation VMA_NOT_NULL allocation,
  1950. VkBuffer VMA_NOT_NULL_NON_DISPATCHABLE buffer);
  1951. /** \brief Binds buffer to allocation with additional parameters.
  1952. \param allocator
  1953. \param allocation
  1954. \param allocationLocalOffset Additional offset to be added while binding, relative to the beginning of the `allocation`. Normally it should be 0.
  1955. \param buffer
  1956. \param pNext A chain of structures to be attached to `VkBindBufferMemoryInfoKHR` structure used internally. Normally it should be null.
  1957. This function is similar to vmaBindBufferMemory(), but it provides additional parameters.
  1958. If `pNext` is not null, #VmaAllocator object must have been created with #VMA_ALLOCATOR_CREATE_KHR_BIND_MEMORY2_BIT flag
  1959. or with VmaAllocatorCreateInfo::vulkanApiVersion `>= VK_API_VERSION_1_1`. Otherwise the call fails.
  1960. */
  1961. VMA_CALL_PRE VkResult VMA_CALL_POST vmaBindBufferMemory2(
  1962. VmaAllocator VMA_NOT_NULL allocator,
  1963. VmaAllocation VMA_NOT_NULL allocation,
  1964. VkDeviceSize allocationLocalOffset,
  1965. VkBuffer VMA_NOT_NULL_NON_DISPATCHABLE buffer,
  1966. const void* VMA_NULLABLE VMA_EXTENDS_VK_STRUCT(VkBindBufferMemoryInfoKHR) pNext);
  1967. /** \brief Binds image to allocation.
  1968. Binds specified image to region of memory represented by specified allocation.
  1969. Gets `VkDeviceMemory` handle and offset from the allocation.
  1970. If you want to create an image, allocate memory for it and bind them together separately,
  1971. you should use this function for binding instead of standard `vkBindImageMemory()`,
  1972. because it ensures proper synchronization so that when a `VkDeviceMemory` object is used by multiple
  1973. allocations, calls to `vkBind*Memory()` or `vkMapMemory()` won't happen from multiple threads simultaneously
  1974. (which is illegal in Vulkan).
  1975. It is recommended to use function vmaCreateImage() instead of this one.
  1976. */
  1977. VMA_CALL_PRE VkResult VMA_CALL_POST vmaBindImageMemory(
  1978. VmaAllocator VMA_NOT_NULL allocator,
  1979. VmaAllocation VMA_NOT_NULL allocation,
  1980. VkImage VMA_NOT_NULL_NON_DISPATCHABLE image);
  1981. /** \brief Binds image to allocation with additional parameters.
  1982. \param allocator
  1983. \param allocation
  1984. \param allocationLocalOffset Additional offset to be added while binding, relative to the beginning of the `allocation`. Normally it should be 0.
  1985. \param image
  1986. \param pNext A chain of structures to be attached to `VkBindImageMemoryInfoKHR` structure used internally. Normally it should be null.
  1987. This function is similar to vmaBindImageMemory(), but it provides additional parameters.
  1988. If `pNext` is not null, #VmaAllocator object must have been created with #VMA_ALLOCATOR_CREATE_KHR_BIND_MEMORY2_BIT flag
  1989. or with VmaAllocatorCreateInfo::vulkanApiVersion `>= VK_API_VERSION_1_1`. Otherwise the call fails.
  1990. */
  1991. VMA_CALL_PRE VkResult VMA_CALL_POST vmaBindImageMemory2(
  1992. VmaAllocator VMA_NOT_NULL allocator,
  1993. VmaAllocation VMA_NOT_NULL allocation,
  1994. VkDeviceSize allocationLocalOffset,
  1995. VkImage VMA_NOT_NULL_NON_DISPATCHABLE image,
  1996. const void* VMA_NULLABLE VMA_EXTENDS_VK_STRUCT(VkBindImageMemoryInfoKHR) pNext);
  1997. /** \brief Creates a new `VkBuffer`, allocates and binds memory for it.
  1998. \param allocator
  1999. \param pBufferCreateInfo
  2000. \param pAllocationCreateInfo
  2001. \param[out] pBuffer Buffer that was created.
  2002. \param[out] pAllocation Allocation that was created.
  2003. \param[out] pAllocationInfo Optional. Information about allocated memory. It can be later fetched using function vmaGetAllocationInfo().
  2004. This function automatically:
  2005. -# Creates buffer.
  2006. -# Allocates appropriate memory for it.
  2007. -# Binds the buffer with the memory.
  2008. If any of these operations fail, buffer and allocation are not created,
  2009. returned value is negative error code, `*pBuffer` and `*pAllocation` are null.
  2010. If the function succeeded, you must destroy both buffer and allocation when you
  2011. no longer need them using either convenience function vmaDestroyBuffer() or
  2012. separately, using `vkDestroyBuffer()` and vmaFreeMemory().
  2013. If #VMA_ALLOCATOR_CREATE_KHR_DEDICATED_ALLOCATION_BIT flag was used,
  2014. VK_KHR_dedicated_allocation extension is used internally to query driver whether
  2015. it requires or prefers the new buffer to have dedicated allocation. If yes,
  2016. and if dedicated allocation is possible
  2017. (#VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT is not used), it creates dedicated
  2018. allocation for this buffer, just like when using
  2019. #VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT.
  2020. \note This function creates a new `VkBuffer`. Sub-allocation of parts of one large buffer,
  2021. although recommended as a good practice, is out of scope of this library and could be implemented
  2022. by the user as a higher-level logic on top of VMA.
  2023. */
  2024. VMA_CALL_PRE VkResult VMA_CALL_POST vmaCreateBuffer(
  2025. VmaAllocator VMA_NOT_NULL allocator,
  2026. const VkBufferCreateInfo* VMA_NOT_NULL pBufferCreateInfo,
  2027. const VmaAllocationCreateInfo* VMA_NOT_NULL pAllocationCreateInfo,
  2028. VkBuffer VMA_NULLABLE_NON_DISPATCHABLE* VMA_NOT_NULL pBuffer,
  2029. VmaAllocation VMA_NULLABLE* VMA_NOT_NULL pAllocation,
  2030. VmaAllocationInfo* VMA_NULLABLE pAllocationInfo);
  2031. /** \brief Creates a buffer with additional minimum alignment.
  2032. Similar to vmaCreateBuffer() but provides additional parameter `minAlignment` which allows to specify custom,
  2033. minimum alignment to be used when placing the buffer inside a larger memory block, which may be needed e.g.
  2034. for interop with OpenGL.
  2035. */
  2036. VMA_CALL_PRE VkResult VMA_CALL_POST vmaCreateBufferWithAlignment(
  2037. VmaAllocator VMA_NOT_NULL allocator,
  2038. const VkBufferCreateInfo* VMA_NOT_NULL pBufferCreateInfo,
  2039. const VmaAllocationCreateInfo* VMA_NOT_NULL pAllocationCreateInfo,
  2040. VkDeviceSize minAlignment,
  2041. VkBuffer VMA_NULLABLE_NON_DISPATCHABLE* VMA_NOT_NULL pBuffer,
  2042. VmaAllocation VMA_NULLABLE* VMA_NOT_NULL pAllocation,
  2043. VmaAllocationInfo* VMA_NULLABLE pAllocationInfo);
  2044. /** \brief Creates a new `VkBuffer`, binds already created memory for it.
  2045. \param allocator
  2046. \param allocation Allocation that provides memory to be used for binding new buffer to it.
  2047. \param pBufferCreateInfo
  2048. \param[out] pBuffer Buffer that was created.
  2049. This function automatically:
  2050. -# Creates buffer.
  2051. -# Binds the buffer with the supplied memory.
  2052. If any of these operations fail, buffer is not created,
  2053. returned value is negative error code and `*pBuffer` is null.
  2054. If the function succeeded, you must destroy the buffer when you
  2055. no longer need it using `vkDestroyBuffer()`. If you want to also destroy the corresponding
  2056. allocation you can use convenience function vmaDestroyBuffer().
  2057. \note There is a new version of this function augmented with parameter `allocationLocalOffset` - see vmaCreateAliasingBuffer2().
  2058. */
  2059. VMA_CALL_PRE VkResult VMA_CALL_POST vmaCreateAliasingBuffer(
  2060. VmaAllocator VMA_NOT_NULL allocator,
  2061. VmaAllocation VMA_NOT_NULL allocation,
  2062. const VkBufferCreateInfo* VMA_NOT_NULL pBufferCreateInfo,
  2063. VkBuffer VMA_NULLABLE_NON_DISPATCHABLE* VMA_NOT_NULL pBuffer);
  2064. /** \brief Creates a new `VkBuffer`, binds already created memory for it.
  2065. \param allocator
  2066. \param allocation Allocation that provides memory to be used for binding new buffer to it.
  2067. \param allocationLocalOffset Additional offset to be added while binding, relative to the beginning of the allocation. Normally it should be 0.
  2068. \param pBufferCreateInfo
  2069. \param[out] pBuffer Buffer that was created.
  2070. This function automatically:
  2071. -# Creates buffer.
  2072. -# Binds the buffer with the supplied memory.
  2073. If any of these operations fail, buffer is not created,
  2074. returned value is negative error code and `*pBuffer` is null.
  2075. If the function succeeded, you must destroy the buffer when you
  2076. no longer need it using `vkDestroyBuffer()`. If you want to also destroy the corresponding
  2077. allocation you can use convenience function vmaDestroyBuffer().
  2078. \note This is a new version of the function augmented with parameter `allocationLocalOffset`.
  2079. */
  2080. VMA_CALL_PRE VkResult VMA_CALL_POST vmaCreateAliasingBuffer2(
  2081. VmaAllocator VMA_NOT_NULL allocator,
  2082. VmaAllocation VMA_NOT_NULL allocation,
  2083. VkDeviceSize allocationLocalOffset,
  2084. const VkBufferCreateInfo* VMA_NOT_NULL pBufferCreateInfo,
  2085. VkBuffer VMA_NULLABLE_NON_DISPATCHABLE* VMA_NOT_NULL pBuffer);
  2086. /** \brief Destroys Vulkan buffer and frees allocated memory.
  2087. This is just a convenience function equivalent to:
  2088. \code
  2089. vkDestroyBuffer(device, buffer, allocationCallbacks);
  2090. vmaFreeMemory(allocator, allocation);
  2091. \endcode
  2092. It is safe to pass null as buffer and/or allocation.
  2093. */
  2094. VMA_CALL_PRE void VMA_CALL_POST vmaDestroyBuffer(
  2095. VmaAllocator VMA_NOT_NULL allocator,
  2096. VkBuffer VMA_NULLABLE_NON_DISPATCHABLE buffer,
  2097. VmaAllocation VMA_NULLABLE allocation);
  2098. /// Function similar to vmaCreateBuffer().
  2099. VMA_CALL_PRE VkResult VMA_CALL_POST vmaCreateImage(
  2100. VmaAllocator VMA_NOT_NULL allocator,
  2101. const VkImageCreateInfo* VMA_NOT_NULL pImageCreateInfo,
  2102. const VmaAllocationCreateInfo* VMA_NOT_NULL pAllocationCreateInfo,
  2103. VkImage VMA_NULLABLE_NON_DISPATCHABLE* VMA_NOT_NULL pImage,
  2104. VmaAllocation VMA_NULLABLE* VMA_NOT_NULL pAllocation,
  2105. VmaAllocationInfo* VMA_NULLABLE pAllocationInfo);
  2106. /// Function similar to vmaCreateAliasingBuffer() but for images.
  2107. VMA_CALL_PRE VkResult VMA_CALL_POST vmaCreateAliasingImage(
  2108. VmaAllocator VMA_NOT_NULL allocator,
  2109. VmaAllocation VMA_NOT_NULL allocation,
  2110. const VkImageCreateInfo* VMA_NOT_NULL pImageCreateInfo,
  2111. VkImage VMA_NULLABLE_NON_DISPATCHABLE* VMA_NOT_NULL pImage);
  2112. /// Function similar to vmaCreateAliasingBuffer2() but for images.
  2113. VMA_CALL_PRE VkResult VMA_CALL_POST vmaCreateAliasingImage2(
  2114. VmaAllocator VMA_NOT_NULL allocator,
  2115. VmaAllocation VMA_NOT_NULL allocation,
  2116. VkDeviceSize allocationLocalOffset,
  2117. const VkImageCreateInfo* VMA_NOT_NULL pImageCreateInfo,
  2118. VkImage VMA_NULLABLE_NON_DISPATCHABLE* VMA_NOT_NULL pImage);
  2119. /** \brief Destroys Vulkan image and frees allocated memory.
  2120. This is just a convenience function equivalent to:
  2121. \code
  2122. vkDestroyImage(device, image, allocationCallbacks);
  2123. vmaFreeMemory(allocator, allocation);
  2124. \endcode
  2125. It is safe to pass null as image and/or allocation.
  2126. */
  2127. VMA_CALL_PRE void VMA_CALL_POST vmaDestroyImage(
  2128. VmaAllocator VMA_NOT_NULL allocator,
  2129. VkImage VMA_NULLABLE_NON_DISPATCHABLE image,
  2130. VmaAllocation VMA_NULLABLE allocation);
  2131. /** @} */
  2132. /**
  2133. \addtogroup group_virtual
  2134. @{
  2135. */
  2136. /** \brief Creates new #VmaVirtualBlock object.
  2137. \param pCreateInfo Parameters for creation.
  2138. \param[out] pVirtualBlock Returned virtual block object or `VMA_NULL` if creation failed.
  2139. */
  2140. VMA_CALL_PRE VkResult VMA_CALL_POST vmaCreateVirtualBlock(
  2141. const VmaVirtualBlockCreateInfo* VMA_NOT_NULL pCreateInfo,
  2142. VmaVirtualBlock VMA_NULLABLE* VMA_NOT_NULL pVirtualBlock);
  2143. /** \brief Destroys #VmaVirtualBlock object.
  2144. Please note that you should consciously handle virtual allocations that could remain unfreed in the block.
  2145. You should either free them individually using vmaVirtualFree() or call vmaClearVirtualBlock()
  2146. if you are sure this is what you want. If you do neither, an assert is called.
  2147. If you keep pointers to some additional metadata associated with your virtual allocations in their `pUserData`,
  2148. don't forget to free them.
  2149. */
  2150. VMA_CALL_PRE void VMA_CALL_POST vmaDestroyVirtualBlock(
  2151. VmaVirtualBlock VMA_NULLABLE virtualBlock);
  2152. /** \brief Returns true of the #VmaVirtualBlock is empty - contains 0 virtual allocations and has all its space available for new allocations.
  2153. */
  2154. VMA_CALL_PRE VkBool32 VMA_CALL_POST vmaIsVirtualBlockEmpty(
  2155. VmaVirtualBlock VMA_NOT_NULL virtualBlock);
  2156. /** \brief Returns information about a specific virtual allocation within a virtual block, like its size and `pUserData` pointer.
  2157. */
  2158. VMA_CALL_PRE void VMA_CALL_POST vmaGetVirtualAllocationInfo(
  2159. VmaVirtualBlock VMA_NOT_NULL virtualBlock,
  2160. VmaVirtualAllocation VMA_NOT_NULL_NON_DISPATCHABLE allocation, VmaVirtualAllocationInfo* VMA_NOT_NULL pVirtualAllocInfo);
  2161. /** \brief Allocates new virtual allocation inside given #VmaVirtualBlock.
  2162. If the allocation fails due to not enough free space available, `VK_ERROR_OUT_OF_DEVICE_MEMORY` is returned
  2163. (despite the function doesn't ever allocate actual GPU memory).
  2164. `pAllocation` is then set to `VK_NULL_HANDLE` and `pOffset`, if not null, it set to `UINT64_MAX`.
  2165. \param virtualBlock Virtual block
  2166. \param pCreateInfo Parameters for the allocation
  2167. \param[out] pAllocation Returned handle of the new allocation
  2168. \param[out] pOffset Returned offset of the new allocation. Optional, can be null.
  2169. */
  2170. VMA_CALL_PRE VkResult VMA_CALL_POST vmaVirtualAllocate(
  2171. VmaVirtualBlock VMA_NOT_NULL virtualBlock,
  2172. const VmaVirtualAllocationCreateInfo* VMA_NOT_NULL pCreateInfo,
  2173. VmaVirtualAllocation VMA_NULLABLE_NON_DISPATCHABLE* VMA_NOT_NULL pAllocation,
  2174. VkDeviceSize* VMA_NULLABLE pOffset);
  2175. /** \brief Frees virtual allocation inside given #VmaVirtualBlock.
  2176. It is correct to call this function with `allocation == VK_NULL_HANDLE` - it does nothing.
  2177. */
  2178. VMA_CALL_PRE void VMA_CALL_POST vmaVirtualFree(
  2179. VmaVirtualBlock VMA_NOT_NULL virtualBlock,
  2180. VmaVirtualAllocation VMA_NULLABLE_NON_DISPATCHABLE allocation);
  2181. /** \brief Frees all virtual allocations inside given #VmaVirtualBlock.
  2182. You must either call this function or free each virtual allocation individually with vmaVirtualFree()
  2183. before destroying a virtual block. Otherwise, an assert is called.
  2184. If you keep pointer to some additional metadata associated with your virtual allocation in its `pUserData`,
  2185. don't forget to free it as well.
  2186. */
  2187. VMA_CALL_PRE void VMA_CALL_POST vmaClearVirtualBlock(
  2188. VmaVirtualBlock VMA_NOT_NULL virtualBlock);
  2189. /** \brief Changes custom pointer associated with given virtual allocation.
  2190. */
  2191. VMA_CALL_PRE void VMA_CALL_POST vmaSetVirtualAllocationUserData(
  2192. VmaVirtualBlock VMA_NOT_NULL virtualBlock,
  2193. VmaVirtualAllocation VMA_NOT_NULL_NON_DISPATCHABLE allocation,
  2194. void* VMA_NULLABLE pUserData);
  2195. /** \brief Calculates and returns statistics about virtual allocations and memory usage in given #VmaVirtualBlock.
  2196. This function is fast to call. For more detailed statistics, see vmaCalculateVirtualBlockStatistics().
  2197. */
  2198. VMA_CALL_PRE void VMA_CALL_POST vmaGetVirtualBlockStatistics(
  2199. VmaVirtualBlock VMA_NOT_NULL virtualBlock,
  2200. VmaStatistics* VMA_NOT_NULL pStats);
  2201. /** \brief Calculates and returns detailed statistics about virtual allocations and memory usage in given #VmaVirtualBlock.
  2202. This function is slow to call. Use for debugging purposes.
  2203. For less detailed statistics, see vmaGetVirtualBlockStatistics().
  2204. */
  2205. VMA_CALL_PRE void VMA_CALL_POST vmaCalculateVirtualBlockStatistics(
  2206. VmaVirtualBlock VMA_NOT_NULL virtualBlock,
  2207. VmaDetailedStatistics* VMA_NOT_NULL pStats);
  2208. /** @} */
  2209. #if VMA_STATS_STRING_ENABLED
  2210. /**
  2211. \addtogroup group_stats
  2212. @{
  2213. */
  2214. /** \brief Builds and returns a null-terminated string in JSON format with information about given #VmaVirtualBlock.
  2215. \param virtualBlock Virtual block.
  2216. \param[out] ppStatsString Returned string.
  2217. \param detailedMap Pass `VK_FALSE` to only obtain statistics as returned by vmaCalculateVirtualBlockStatistics(). Pass `VK_TRUE` to also obtain full list of allocations and free spaces.
  2218. Returned string must be freed using vmaFreeVirtualBlockStatsString().
  2219. */
  2220. VMA_CALL_PRE void VMA_CALL_POST vmaBuildVirtualBlockStatsString(
  2221. VmaVirtualBlock VMA_NOT_NULL virtualBlock,
  2222. char* VMA_NULLABLE* VMA_NOT_NULL ppStatsString,
  2223. VkBool32 detailedMap);
  2224. /// Frees a string returned by vmaBuildVirtualBlockStatsString().
  2225. VMA_CALL_PRE void VMA_CALL_POST vmaFreeVirtualBlockStatsString(
  2226. VmaVirtualBlock VMA_NOT_NULL virtualBlock,
  2227. char* VMA_NULLABLE pStatsString);
  2228. /** \brief Builds and returns statistics as a null-terminated string in JSON format.
  2229. \param allocator
  2230. \param[out] ppStatsString Must be freed using vmaFreeStatsString() function.
  2231. \param detailedMap
  2232. */
  2233. VMA_CALL_PRE void VMA_CALL_POST vmaBuildStatsString(
  2234. VmaAllocator VMA_NOT_NULL allocator,
  2235. char* VMA_NULLABLE* VMA_NOT_NULL ppStatsString,
  2236. VkBool32 detailedMap);
  2237. VMA_CALL_PRE void VMA_CALL_POST vmaFreeStatsString(
  2238. VmaAllocator VMA_NOT_NULL allocator,
  2239. char* VMA_NULLABLE pStatsString);
  2240. /** @} */
  2241. #endif // VMA_STATS_STRING_ENABLED
  2242. #endif // _VMA_FUNCTION_HEADERS
  2243. #ifdef __cplusplus
  2244. }
  2245. #endif
  2246. #endif // AMD_VULKAN_MEMORY_ALLOCATOR_H
  2247. ////////////////////////////////////////////////////////////////////////////////
  2248. ////////////////////////////////////////////////////////////////////////////////
  2249. //
  2250. // IMPLEMENTATION
  2251. //
  2252. ////////////////////////////////////////////////////////////////////////////////
  2253. ////////////////////////////////////////////////////////////////////////////////
  2254. // For Visual Studio IntelliSense.
  2255. #if defined(__cplusplus) && defined(__INTELLISENSE__)
  2256. #define VMA_IMPLEMENTATION
  2257. #endif
  2258. #ifdef VMA_IMPLEMENTATION
  2259. #undef VMA_IMPLEMENTATION
  2260. #include <cstdint>
  2261. #include <cstdlib>
  2262. #include <cstring>
  2263. #include <cinttypes>
  2264. #include <utility>
  2265. #include <type_traits>
  2266. #if !defined(VMA_CPP20)
  2267. #if __cplusplus >= 202002L || _MSVC_LANG >= 202002L // C++20
  2268. #define VMA_CPP20 1
  2269. #else
  2270. #define VMA_CPP20 0
  2271. #endif
  2272. #endif
  2273. #ifdef _MSC_VER
  2274. #include <intrin.h> // For functions like __popcnt, _BitScanForward etc.
  2275. #endif
  2276. #if VMA_CPP20
  2277. #include <bit>
  2278. #endif
  2279. #if VMA_STATS_STRING_ENABLED
  2280. #include <cstdio> // For snprintf
  2281. #endif
  2282. /*******************************************************************************
  2283. CONFIGURATION SECTION
  2284. Define some of these macros before each #include of this header or change them
  2285. here if you need other then default behavior depending on your environment.
  2286. */
  2287. #ifndef _VMA_CONFIGURATION
  2288. /*
  2289. Define this macro to 1 to make the library fetch pointers to Vulkan functions
  2290. internally, like:
  2291. vulkanFunctions.vkAllocateMemory = &vkAllocateMemory;
  2292. */
  2293. #if !defined(VMA_STATIC_VULKAN_FUNCTIONS) && !defined(VK_NO_PROTOTYPES)
  2294. #define VMA_STATIC_VULKAN_FUNCTIONS 1
  2295. #endif
  2296. /*
  2297. Define this macro to 1 to make the library fetch pointers to Vulkan functions
  2298. internally, like:
  2299. vulkanFunctions.vkAllocateMemory = (PFN_vkAllocateMemory)vkGetDeviceProcAddr(device, "vkAllocateMemory");
  2300. To use this feature in new versions of VMA you now have to pass
  2301. VmaVulkanFunctions::vkGetInstanceProcAddr and vkGetDeviceProcAddr as
  2302. VmaAllocatorCreateInfo::pVulkanFunctions. Other members can be null.
  2303. */
  2304. #if !defined(VMA_DYNAMIC_VULKAN_FUNCTIONS)
  2305. #define VMA_DYNAMIC_VULKAN_FUNCTIONS 1
  2306. #endif
  2307. #ifndef VMA_USE_STL_SHARED_MUTEX
  2308. #if __cplusplus >= 201703L || _MSVC_LANG >= 201703L // C++17
  2309. #define VMA_USE_STL_SHARED_MUTEX 1
  2310. // Visual studio defines __cplusplus properly only when passed additional parameter: /Zc:__cplusplus
  2311. // Otherwise it is always 199711L, despite shared_mutex works since Visual Studio 2015 Update 2.
  2312. #elif defined(_MSC_FULL_VER) && _MSC_FULL_VER >= 190023918 && __cplusplus == 199711L && _MSVC_LANG >= 201703L
  2313. #define VMA_USE_STL_SHARED_MUTEX 1
  2314. #else
  2315. #define VMA_USE_STL_SHARED_MUTEX 0
  2316. #endif
  2317. #endif
  2318. /*
  2319. Define this macro to include custom header files without having to edit this file directly, e.g.:
  2320. // Inside of "my_vma_configuration_user_includes.h":
  2321. #include "my_custom_assert.h" // for MY_CUSTOM_ASSERT
  2322. #include "my_custom_min.h" // for my_custom_min
  2323. #include <algorithm>
  2324. #include <mutex>
  2325. // Inside a different file, which includes "vk_mem_alloc.h":
  2326. #define VMA_CONFIGURATION_USER_INCLUDES_H "my_vma_configuration_user_includes.h"
  2327. #define VMA_ASSERT(expr) MY_CUSTOM_ASSERT(expr)
  2328. #define VMA_MIN(v1, v2) (my_custom_min(v1, v2))
  2329. #include "vk_mem_alloc.h"
  2330. ...
  2331. The following headers are used in this CONFIGURATION section only, so feel free to
  2332. remove them if not needed.
  2333. */
  2334. #if !defined(VMA_CONFIGURATION_USER_INCLUDES_H)
  2335. #include <cassert> // for assert
  2336. #include <algorithm> // for min, max, swap
  2337. #include <mutex>
  2338. #else
  2339. #include VMA_CONFIGURATION_USER_INCLUDES_H
  2340. #endif
  2341. #ifndef VMA_NULL
  2342. // Value used as null pointer. Define it to e.g.: nullptr, NULL, 0, (void*)0.
  2343. #define VMA_NULL nullptr
  2344. #endif
  2345. #ifndef VMA_FALLTHROUGH
  2346. #if __cplusplus >= 201703L || _MSVC_LANG >= 201703L // C++17
  2347. #define VMA_FALLTHROUGH [[fallthrough]]
  2348. #else
  2349. #define VMA_FALLTHROUGH
  2350. #endif
  2351. #endif
  2352. // Normal assert to check for programmer's errors, especially in Debug configuration.
  2353. #ifndef VMA_ASSERT
  2354. #ifdef NDEBUG
  2355. #define VMA_ASSERT(expr)
  2356. #else
  2357. #define VMA_ASSERT(expr) assert(expr)
  2358. #endif
  2359. #endif
  2360. // Assert that will be called very often, like inside data structures e.g. operator[].
  2361. // Making it non-empty can make program slow.
  2362. #ifndef VMA_HEAVY_ASSERT
  2363. #ifdef NDEBUG
  2364. #define VMA_HEAVY_ASSERT(expr)
  2365. #else
  2366. #define VMA_HEAVY_ASSERT(expr) //VMA_ASSERT(expr)
  2367. #endif
  2368. #endif
  2369. // Assert used for reporting memory leaks - unfreed allocations.
  2370. #ifndef VMA_ASSERT_LEAK
  2371. #define VMA_ASSERT_LEAK(expr) VMA_ASSERT(expr)
  2372. #endif
  2373. // If your compiler is not compatible with C++17 and definition of
  2374. // aligned_alloc() function is missing, uncommenting following line may help:
  2375. //#include <malloc.h>
  2376. #if defined(__ANDROID_API__) && (__ANDROID_API__ < 16)
  2377. #include <cstdlib>
  2378. static void* vma_aligned_alloc(size_t alignment, size_t size)
  2379. {
  2380. // alignment must be >= sizeof(void*)
  2381. if(alignment < sizeof(void*))
  2382. {
  2383. alignment = sizeof(void*);
  2384. }
  2385. return memalign(alignment, size);
  2386. }
  2387. #elif defined(__APPLE__) || defined(__ANDROID__) || (defined(__linux__) && defined(__GLIBCXX__) && !defined(_GLIBCXX_HAVE_ALIGNED_ALLOC))
  2388. #include <cstdlib>
  2389. #if defined(__APPLE__)
  2390. #include <AvailabilityMacros.h>
  2391. #endif
  2392. static void* vma_aligned_alloc(size_t alignment, size_t size)
  2393. {
  2394. // Unfortunately, aligned_alloc causes VMA to crash due to it returning null pointers. (At least under 11.4)
  2395. // Therefore, for now disable this specific exception until a proper solution is found.
  2396. //#if defined(__APPLE__) && (defined(MAC_OS_X_VERSION_10_16) || defined(__IPHONE_14_0))
  2397. //#if MAC_OS_X_VERSION_MAX_ALLOWED >= MAC_OS_X_VERSION_10_16 || __IPHONE_OS_VERSION_MAX_ALLOWED >= __IPHONE_14_0
  2398. // // For C++14, usr/include/malloc/_malloc.h declares aligned_alloc()) only
  2399. // // with the MacOSX11.0 SDK in Xcode 12 (which is what adds
  2400. // // MAC_OS_X_VERSION_10_16), even though the function is marked
  2401. // // available for 10.15. That is why the preprocessor checks for 10.16 but
  2402. // // the __builtin_available checks for 10.15.
  2403. // // People who use C++17 could call aligned_alloc with the 10.15 SDK already.
  2404. // if (__builtin_available(macOS 10.15, iOS 13, *))
  2405. // return aligned_alloc(alignment, size);
  2406. //#endif
  2407. //#endif
  2408. // alignment must be >= sizeof(void*)
  2409. if(alignment < sizeof(void*))
  2410. {
  2411. alignment = sizeof(void*);
  2412. }
  2413. void *pointer;
  2414. if(posix_memalign(&pointer, alignment, size) == 0)
  2415. return pointer;
  2416. return VMA_NULL;
  2417. }
  2418. #elif defined(_WIN32)
  2419. static void* vma_aligned_alloc(size_t alignment, size_t size)
  2420. {
  2421. return _aligned_malloc(size, alignment);
  2422. }
  2423. #elif __cplusplus >= 201703L || _MSVC_LANG >= 201703L // C++17
  2424. static void* vma_aligned_alloc(size_t alignment, size_t size)
  2425. {
  2426. return aligned_alloc(alignment, size);
  2427. }
  2428. #else
  2429. static void* vma_aligned_alloc(size_t alignment, size_t size)
  2430. {
  2431. VMA_ASSERT(0 && "Could not implement aligned_alloc automatically. Please enable C++17 or later in your compiler or provide custom implementation of macro VMA_SYSTEM_ALIGNED_MALLOC (and VMA_SYSTEM_ALIGNED_FREE if needed) using the API of your system.");
  2432. return VMA_NULL;
  2433. }
  2434. #endif
  2435. #if defined(_WIN32)
  2436. static void vma_aligned_free(void* ptr)
  2437. {
  2438. _aligned_free(ptr);
  2439. }
  2440. #else
  2441. static void vma_aligned_free(void* VMA_NULLABLE ptr)
  2442. {
  2443. free(ptr);
  2444. }
  2445. #endif
  2446. #ifndef VMA_ALIGN_OF
  2447. #define VMA_ALIGN_OF(type) (alignof(type))
  2448. #endif
  2449. #ifndef VMA_SYSTEM_ALIGNED_MALLOC
  2450. #define VMA_SYSTEM_ALIGNED_MALLOC(size, alignment) vma_aligned_alloc((alignment), (size))
  2451. #endif
  2452. #ifndef VMA_SYSTEM_ALIGNED_FREE
  2453. // VMA_SYSTEM_FREE is the old name, but might have been defined by the user
  2454. #if defined(VMA_SYSTEM_FREE)
  2455. #define VMA_SYSTEM_ALIGNED_FREE(ptr) VMA_SYSTEM_FREE(ptr)
  2456. #else
  2457. #define VMA_SYSTEM_ALIGNED_FREE(ptr) vma_aligned_free(ptr)
  2458. #endif
  2459. #endif
  2460. #ifndef VMA_COUNT_BITS_SET
  2461. // Returns number of bits set to 1 in (v)
  2462. #define VMA_COUNT_BITS_SET(v) VmaCountBitsSet(v)
  2463. #endif
  2464. #ifndef VMA_BITSCAN_LSB
  2465. // Scans integer for index of first nonzero value from the Least Significant Bit (LSB). If mask is 0 then returns UINT8_MAX
  2466. #define VMA_BITSCAN_LSB(mask) VmaBitScanLSB(mask)
  2467. #endif
  2468. #ifndef VMA_BITSCAN_MSB
  2469. // Scans integer for index of first nonzero value from the Most Significant Bit (MSB). If mask is 0 then returns UINT8_MAX
  2470. #define VMA_BITSCAN_MSB(mask) VmaBitScanMSB(mask)
  2471. #endif
  2472. #ifndef VMA_MIN
  2473. #define VMA_MIN(v1, v2) ((std::min)((v1), (v2)))
  2474. #endif
  2475. #ifndef VMA_MAX
  2476. #define VMA_MAX(v1, v2) ((std::max)((v1), (v2)))
  2477. #endif
  2478. #ifndef VMA_SORT
  2479. #define VMA_SORT(beg, end, cmp) std::sort(beg, end, cmp)
  2480. #endif
  2481. #ifndef VMA_DEBUG_LOG_FORMAT
  2482. #define VMA_DEBUG_LOG_FORMAT(format, ...)
  2483. /*
  2484. #define VMA_DEBUG_LOG_FORMAT(format, ...) do { \
  2485. printf((format), __VA_ARGS__); \
  2486. printf("\n"); \
  2487. } while(false)
  2488. */
  2489. #endif
  2490. #ifndef VMA_DEBUG_LOG
  2491. #define VMA_DEBUG_LOG(str) VMA_DEBUG_LOG_FORMAT("%s", (str))
  2492. #endif
  2493. #ifndef VMA_LEAK_LOG_FORMAT
  2494. #define VMA_LEAK_LOG_FORMAT(format, ...) VMA_DEBUG_LOG_FORMAT(format, __VA_ARGS__)
  2495. #endif
  2496. #ifndef VMA_CLASS_NO_COPY
  2497. #define VMA_CLASS_NO_COPY(className) \
  2498. private: \
  2499. className(const className&) = delete; \
  2500. className& operator=(const className&) = delete;
  2501. #endif
  2502. #ifndef VMA_CLASS_NO_COPY_NO_MOVE
  2503. #define VMA_CLASS_NO_COPY_NO_MOVE(className) \
  2504. private: \
  2505. className(const className&) = delete; \
  2506. className(className&&) = delete; \
  2507. className& operator=(const className&) = delete; \
  2508. className& operator=(className&&) = delete;
  2509. #endif
  2510. // Define this macro to 1 to enable functions: vmaBuildStatsString, vmaFreeStatsString.
  2511. #if VMA_STATS_STRING_ENABLED
  2512. static inline void VmaUint32ToStr(char* VMA_NOT_NULL outStr, size_t strLen, uint32_t num)
  2513. {
  2514. snprintf(outStr, strLen, "%" PRIu32, num);
  2515. }
  2516. static inline void VmaUint64ToStr(char* VMA_NOT_NULL outStr, size_t strLen, uint64_t num)
  2517. {
  2518. snprintf(outStr, strLen, "%" PRIu64, num);
  2519. }
  2520. static inline void VmaPtrToStr(char* VMA_NOT_NULL outStr, size_t strLen, const void* ptr)
  2521. {
  2522. snprintf(outStr, strLen, "%p", ptr);
  2523. }
  2524. #endif
  2525. #ifndef VMA_MUTEX
  2526. class VmaMutex
  2527. {
  2528. VMA_CLASS_NO_COPY_NO_MOVE(VmaMutex)
  2529. public:
  2530. VmaMutex() { }
  2531. void Lock() { m_Mutex.lock(); }
  2532. void Unlock() { m_Mutex.unlock(); }
  2533. bool TryLock() { return m_Mutex.try_lock(); }
  2534. private:
  2535. std::mutex m_Mutex;
  2536. };
  2537. #define VMA_MUTEX VmaMutex
  2538. #endif
  2539. // Read-write mutex, where "read" is shared access, "write" is exclusive access.
  2540. #ifndef VMA_RW_MUTEX
  2541. #if VMA_USE_STL_SHARED_MUTEX
  2542. // Use std::shared_mutex from C++17.
  2543. #include <shared_mutex>
  2544. class VmaRWMutex
  2545. {
  2546. public:
  2547. void LockRead() { m_Mutex.lock_shared(); }
  2548. void UnlockRead() { m_Mutex.unlock_shared(); }
  2549. bool TryLockRead() { return m_Mutex.try_lock_shared(); }
  2550. void LockWrite() { m_Mutex.lock(); }
  2551. void UnlockWrite() { m_Mutex.unlock(); }
  2552. bool TryLockWrite() { return m_Mutex.try_lock(); }
  2553. private:
  2554. std::shared_mutex m_Mutex;
  2555. };
  2556. #define VMA_RW_MUTEX VmaRWMutex
  2557. #elif defined(_WIN32) && defined(WINVER) && WINVER >= 0x0600
  2558. // Use SRWLOCK from WinAPI.
  2559. // Minimum supported client = Windows Vista, server = Windows Server 2008.
  2560. class VmaRWMutex
  2561. {
  2562. public:
  2563. VmaRWMutex() { InitializeSRWLock(&m_Lock); }
  2564. void LockRead() { AcquireSRWLockShared(&m_Lock); }
  2565. void UnlockRead() { ReleaseSRWLockShared(&m_Lock); }
  2566. bool TryLockRead() { return TryAcquireSRWLockShared(&m_Lock) != FALSE; }
  2567. void LockWrite() { AcquireSRWLockExclusive(&m_Lock); }
  2568. void UnlockWrite() { ReleaseSRWLockExclusive(&m_Lock); }
  2569. bool TryLockWrite() { return TryAcquireSRWLockExclusive(&m_Lock) != FALSE; }
  2570. private:
  2571. SRWLOCK m_Lock;
  2572. };
  2573. #define VMA_RW_MUTEX VmaRWMutex
  2574. #else
  2575. // Less efficient fallback: Use normal mutex.
  2576. class VmaRWMutex
  2577. {
  2578. public:
  2579. void LockRead() { m_Mutex.Lock(); }
  2580. void UnlockRead() { m_Mutex.Unlock(); }
  2581. bool TryLockRead() { return m_Mutex.TryLock(); }
  2582. void LockWrite() { m_Mutex.Lock(); }
  2583. void UnlockWrite() { m_Mutex.Unlock(); }
  2584. bool TryLockWrite() { return m_Mutex.TryLock(); }
  2585. private:
  2586. VMA_MUTEX m_Mutex;
  2587. };
  2588. #define VMA_RW_MUTEX VmaRWMutex
  2589. #endif // #if VMA_USE_STL_SHARED_MUTEX
  2590. #endif // #ifndef VMA_RW_MUTEX
  2591. /*
  2592. If providing your own implementation, you need to implement a subset of std::atomic.
  2593. */
  2594. #ifndef VMA_ATOMIC_UINT32
  2595. #include <atomic>
  2596. #define VMA_ATOMIC_UINT32 std::atomic<uint32_t>
  2597. #endif
  2598. #ifndef VMA_ATOMIC_UINT64
  2599. #include <atomic>
  2600. #define VMA_ATOMIC_UINT64 std::atomic<uint64_t>
  2601. #endif
  2602. #ifndef VMA_DEBUG_ALWAYS_DEDICATED_MEMORY
  2603. /**
  2604. Every allocation will have its own memory block.
  2605. Define to 1 for debugging purposes only.
  2606. */
  2607. #define VMA_DEBUG_ALWAYS_DEDICATED_MEMORY (0)
  2608. #endif
  2609. #ifndef VMA_MIN_ALIGNMENT
  2610. /**
  2611. Minimum alignment of all allocations, in bytes.
  2612. Set to more than 1 for debugging purposes. Must be power of two.
  2613. */
  2614. #ifdef VMA_DEBUG_ALIGNMENT // Old name
  2615. #define VMA_MIN_ALIGNMENT VMA_DEBUG_ALIGNMENT
  2616. #else
  2617. #define VMA_MIN_ALIGNMENT (1)
  2618. #endif
  2619. #endif
  2620. #ifndef VMA_DEBUG_MARGIN
  2621. /**
  2622. Minimum margin after every allocation, in bytes.
  2623. Set nonzero for debugging purposes only.
  2624. */
  2625. #define VMA_DEBUG_MARGIN (0)
  2626. #endif
  2627. #ifndef VMA_DEBUG_INITIALIZE_ALLOCATIONS
  2628. /**
  2629. Define this macro to 1 to automatically fill new allocations and destroyed
  2630. allocations with some bit pattern.
  2631. */
  2632. #define VMA_DEBUG_INITIALIZE_ALLOCATIONS (0)
  2633. #endif
  2634. #ifndef VMA_DEBUG_DETECT_CORRUPTION
  2635. /**
  2636. Define this macro to 1 together with non-zero value of VMA_DEBUG_MARGIN to
  2637. enable writing magic value to the margin after every allocation and
  2638. validating it, so that memory corruptions (out-of-bounds writes) are detected.
  2639. */
  2640. #define VMA_DEBUG_DETECT_CORRUPTION (0)
  2641. #endif
  2642. #ifndef VMA_DEBUG_GLOBAL_MUTEX
  2643. /**
  2644. Set this to 1 for debugging purposes only, to enable single mutex protecting all
  2645. entry calls to the library. Can be useful for debugging multithreading issues.
  2646. */
  2647. #define VMA_DEBUG_GLOBAL_MUTEX (0)
  2648. #endif
  2649. #ifndef VMA_DEBUG_MIN_BUFFER_IMAGE_GRANULARITY
  2650. /**
  2651. Minimum value for VkPhysicalDeviceLimits::bufferImageGranularity.
  2652. Set to more than 1 for debugging purposes only. Must be power of two.
  2653. */
  2654. #define VMA_DEBUG_MIN_BUFFER_IMAGE_GRANULARITY (1)
  2655. #endif
  2656. #ifndef VMA_DEBUG_DONT_EXCEED_MAX_MEMORY_ALLOCATION_COUNT
  2657. /*
  2658. Set this to 1 to make VMA never exceed VkPhysicalDeviceLimits::maxMemoryAllocationCount
  2659. and return error instead of leaving up to Vulkan implementation what to do in such cases.
  2660. */
  2661. #define VMA_DEBUG_DONT_EXCEED_MAX_MEMORY_ALLOCATION_COUNT (0)
  2662. #endif
  2663. #ifndef VMA_SMALL_HEAP_MAX_SIZE
  2664. /// Maximum size of a memory heap in Vulkan to consider it "small".
  2665. #define VMA_SMALL_HEAP_MAX_SIZE (1024ull * 1024 * 1024)
  2666. #endif
  2667. #ifndef VMA_DEFAULT_LARGE_HEAP_BLOCK_SIZE
  2668. /// Default size of a block allocated as single VkDeviceMemory from a "large" heap.
  2669. #define VMA_DEFAULT_LARGE_HEAP_BLOCK_SIZE (256ull * 1024 * 1024)
  2670. #endif
  2671. /*
  2672. Mapping hysteresis is a logic that launches when vmaMapMemory/vmaUnmapMemory is called
  2673. or a persistently mapped allocation is created and destroyed several times in a row.
  2674. It keeps additional +1 mapping of a device memory block to prevent calling actual
  2675. vkMapMemory/vkUnmapMemory too many times, which may improve performance and help
  2676. tools like RenderDoc.
  2677. */
  2678. #ifndef VMA_MAPPING_HYSTERESIS_ENABLED
  2679. #define VMA_MAPPING_HYSTERESIS_ENABLED 1
  2680. #endif
  2681. #define VMA_VALIDATE(cond) do { if(!(cond)) { \
  2682. VMA_ASSERT(0 && "Validation failed: " #cond); \
  2683. return false; \
  2684. } } while(false)
  2685. /*******************************************************************************
  2686. END OF CONFIGURATION
  2687. */
  2688. #endif // _VMA_CONFIGURATION
  2689. static const uint8_t VMA_ALLOCATION_FILL_PATTERN_CREATED = 0xDC;
  2690. static const uint8_t VMA_ALLOCATION_FILL_PATTERN_DESTROYED = 0xEF;
  2691. // Decimal 2139416166, float NaN, little-endian binary 66 E6 84 7F.
  2692. static const uint32_t VMA_CORRUPTION_DETECTION_MAGIC_VALUE = 0x7F84E666;
  2693. // Copy of some Vulkan definitions so we don't need to check their existence just to handle few constants.
  2694. static const uint32_t VK_MEMORY_PROPERTY_DEVICE_COHERENT_BIT_AMD_COPY = 0x00000040;
  2695. static const uint32_t VK_MEMORY_PROPERTY_DEVICE_UNCACHED_BIT_AMD_COPY = 0x00000080;
  2696. static const uint32_t VK_BUFFER_USAGE_SHADER_DEVICE_ADDRESS_BIT_COPY = 0x00020000;
  2697. static const uint32_t VK_IMAGE_CREATE_DISJOINT_BIT_COPY = 0x00000200;
  2698. static const int32_t VK_IMAGE_TILING_DRM_FORMAT_MODIFIER_EXT_COPY = 1000158000;
  2699. static const uint32_t VMA_ALLOCATION_INTERNAL_STRATEGY_MIN_OFFSET = 0x10000000u;
  2700. static const uint32_t VMA_ALLOCATION_TRY_COUNT = 32;
  2701. static const uint32_t VMA_VENDOR_ID_AMD = 4098;
  2702. // This one is tricky. Vulkan specification defines this code as available since
  2703. // Vulkan 1.0, but doesn't actually define it in Vulkan SDK earlier than 1.2.131.
  2704. // See pull request #207.
  2705. #define VK_ERROR_UNKNOWN_COPY ((VkResult)-13)
  2706. #if VMA_STATS_STRING_ENABLED
  2707. // Correspond to values of enum VmaSuballocationType.
  2708. static const char* VMA_SUBALLOCATION_TYPE_NAMES[] =
  2709. {
  2710. "FREE",
  2711. "UNKNOWN",
  2712. "BUFFER",
  2713. "IMAGE_UNKNOWN",
  2714. "IMAGE_LINEAR",
  2715. "IMAGE_OPTIMAL",
  2716. };
  2717. #endif
  2718. static VkAllocationCallbacks VmaEmptyAllocationCallbacks =
  2719. { VMA_NULL, VMA_NULL, VMA_NULL, VMA_NULL, VMA_NULL, VMA_NULL };
  2720. #ifndef _VMA_ENUM_DECLARATIONS
  2721. enum VmaSuballocationType
  2722. {
  2723. VMA_SUBALLOCATION_TYPE_FREE = 0,
  2724. VMA_SUBALLOCATION_TYPE_UNKNOWN = 1,
  2725. VMA_SUBALLOCATION_TYPE_BUFFER = 2,
  2726. VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN = 3,
  2727. VMA_SUBALLOCATION_TYPE_IMAGE_LINEAR = 4,
  2728. VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL = 5,
  2729. VMA_SUBALLOCATION_TYPE_MAX_ENUM = 0x7FFFFFFF
  2730. };
  2731. enum VMA_CACHE_OPERATION
  2732. {
  2733. VMA_CACHE_FLUSH,
  2734. VMA_CACHE_INVALIDATE
  2735. };
  2736. enum class VmaAllocationRequestType
  2737. {
  2738. Normal,
  2739. TLSF,
  2740. // Used by "Linear" algorithm.
  2741. UpperAddress,
  2742. EndOf1st,
  2743. EndOf2nd,
  2744. };
  2745. #endif // _VMA_ENUM_DECLARATIONS
  2746. #ifndef _VMA_FORWARD_DECLARATIONS
  2747. // Opaque handle used by allocation algorithms to identify single allocation in any conforming way.
  2748. VK_DEFINE_NON_DISPATCHABLE_HANDLE(VmaAllocHandle);
  2749. struct VmaMutexLock;
  2750. struct VmaMutexLockRead;
  2751. struct VmaMutexLockWrite;
  2752. template<typename T>
  2753. struct AtomicTransactionalIncrement;
  2754. template<typename T>
  2755. struct VmaStlAllocator;
  2756. template<typename T, typename AllocatorT>
  2757. class VmaVector;
  2758. template<typename T, typename AllocatorT, size_t N>
  2759. class VmaSmallVector;
  2760. template<typename T>
  2761. class VmaPoolAllocator;
  2762. template<typename T>
  2763. struct VmaListItem;
  2764. template<typename T>
  2765. class VmaRawList;
  2766. template<typename T, typename AllocatorT>
  2767. class VmaList;
  2768. template<typename ItemTypeTraits>
  2769. class VmaIntrusiveLinkedList;
  2770. #if VMA_STATS_STRING_ENABLED
  2771. class VmaStringBuilder;
  2772. class VmaJsonWriter;
  2773. #endif
  2774. class VmaDeviceMemoryBlock;
  2775. struct VmaDedicatedAllocationListItemTraits;
  2776. class VmaDedicatedAllocationList;
  2777. struct VmaSuballocation;
  2778. struct VmaSuballocationOffsetLess;
  2779. struct VmaSuballocationOffsetGreater;
  2780. struct VmaSuballocationItemSizeLess;
  2781. typedef VmaList<VmaSuballocation, VmaStlAllocator<VmaSuballocation>> VmaSuballocationList;
  2782. struct VmaAllocationRequest;
  2783. class VmaBlockMetadata;
  2784. class VmaBlockMetadata_Linear;
  2785. class VmaBlockMetadata_TLSF;
  2786. class VmaBlockVector;
  2787. struct VmaPoolListItemTraits;
  2788. struct VmaCurrentBudgetData;
  2789. class VmaAllocationObjectAllocator;
  2790. #endif // _VMA_FORWARD_DECLARATIONS
  2791. #ifndef _VMA_FUNCTIONS
  2792. /*
  2793. Returns number of bits set to 1 in (v).
  2794. On specific platforms and compilers you can use intrinsics like:
  2795. Visual Studio:
  2796. return __popcnt(v);
  2797. GCC, Clang:
  2798. return static_cast<uint32_t>(__builtin_popcount(v));
  2799. Define macro VMA_COUNT_BITS_SET to provide your optimized implementation.
  2800. But you need to check in runtime whether user's CPU supports these, as some old processors don't.
  2801. */
  2802. static inline uint32_t VmaCountBitsSet(uint32_t v)
  2803. {
  2804. #if VMA_CPP20
  2805. return std::popcount(v);
  2806. #else
  2807. uint32_t c = v - ((v >> 1) & 0x55555555);
  2808. c = ((c >> 2) & 0x33333333) + (c & 0x33333333);
  2809. c = ((c >> 4) + c) & 0x0F0F0F0F;
  2810. c = ((c >> 8) + c) & 0x00FF00FF;
  2811. c = ((c >> 16) + c) & 0x0000FFFF;
  2812. return c;
  2813. #endif
  2814. }
  2815. static inline uint8_t VmaBitScanLSB(uint64_t mask)
  2816. {
  2817. #if defined(_MSC_VER) && defined(_WIN64)
  2818. unsigned long pos;
  2819. if (_BitScanForward64(&pos, mask))
  2820. return static_cast<uint8_t>(pos);
  2821. return UINT8_MAX;
  2822. #elif VMA_CPP20
  2823. if(mask)
  2824. return static_cast<uint8_t>(std::countr_zero(mask));
  2825. return UINT8_MAX;
  2826. #elif defined __GNUC__ || defined __clang__
  2827. return static_cast<uint8_t>(__builtin_ffsll(mask)) - 1U;
  2828. #else
  2829. uint8_t pos = 0;
  2830. uint64_t bit = 1;
  2831. do
  2832. {
  2833. if (mask & bit)
  2834. return pos;
  2835. bit <<= 1;
  2836. } while (pos++ < 63);
  2837. return UINT8_MAX;
  2838. #endif
  2839. }
  2840. static inline uint8_t VmaBitScanLSB(uint32_t mask)
  2841. {
  2842. #ifdef _MSC_VER
  2843. unsigned long pos;
  2844. if (_BitScanForward(&pos, mask))
  2845. return static_cast<uint8_t>(pos);
  2846. return UINT8_MAX;
  2847. #elif VMA_CPP20
  2848. if(mask)
  2849. return static_cast<uint8_t>(std::countr_zero(mask));
  2850. return UINT8_MAX;
  2851. #elif defined __GNUC__ || defined __clang__
  2852. return static_cast<uint8_t>(__builtin_ffs(mask)) - 1U;
  2853. #else
  2854. uint8_t pos = 0;
  2855. uint32_t bit = 1;
  2856. do
  2857. {
  2858. if (mask & bit)
  2859. return pos;
  2860. bit <<= 1;
  2861. } while (pos++ < 31);
  2862. return UINT8_MAX;
  2863. #endif
  2864. }
  2865. static inline uint8_t VmaBitScanMSB(uint64_t mask)
  2866. {
  2867. #if defined(_MSC_VER) && defined(_WIN64)
  2868. unsigned long pos;
  2869. if (_BitScanReverse64(&pos, mask))
  2870. return static_cast<uint8_t>(pos);
  2871. #elif VMA_CPP20
  2872. if(mask)
  2873. return 63 - static_cast<uint8_t>(std::countl_zero(mask));
  2874. #elif defined __GNUC__ || defined __clang__
  2875. if (mask)
  2876. return 63 - static_cast<uint8_t>(__builtin_clzll(mask));
  2877. #else
  2878. uint8_t pos = 63;
  2879. uint64_t bit = 1ULL << 63;
  2880. do
  2881. {
  2882. if (mask & bit)
  2883. return pos;
  2884. bit >>= 1;
  2885. } while (pos-- > 0);
  2886. #endif
  2887. return UINT8_MAX;
  2888. }
  2889. static inline uint8_t VmaBitScanMSB(uint32_t mask)
  2890. {
  2891. #ifdef _MSC_VER
  2892. unsigned long pos;
  2893. if (_BitScanReverse(&pos, mask))
  2894. return static_cast<uint8_t>(pos);
  2895. #elif VMA_CPP20
  2896. if(mask)
  2897. return 31 - static_cast<uint8_t>(std::countl_zero(mask));
  2898. #elif defined __GNUC__ || defined __clang__
  2899. if (mask)
  2900. return 31 - static_cast<uint8_t>(__builtin_clz(mask));
  2901. #else
  2902. uint8_t pos = 31;
  2903. uint32_t bit = 1UL << 31;
  2904. do
  2905. {
  2906. if (mask & bit)
  2907. return pos;
  2908. bit >>= 1;
  2909. } while (pos-- > 0);
  2910. #endif
  2911. return UINT8_MAX;
  2912. }
  2913. /*
  2914. Returns true if given number is a power of two.
  2915. T must be unsigned integer number or signed integer but always nonnegative.
  2916. For 0 returns true.
  2917. */
  2918. template <typename T>
  2919. inline bool VmaIsPow2(T x)
  2920. {
  2921. return (x & (x - 1)) == 0;
  2922. }
  2923. // Aligns given value up to nearest multiply of align value. For example: VmaAlignUp(11, 8) = 16.
  2924. // Use types like uint32_t, uint64_t as T.
  2925. template <typename T>
  2926. static inline T VmaAlignUp(T val, T alignment)
  2927. {
  2928. VMA_HEAVY_ASSERT(VmaIsPow2(alignment));
  2929. return (val + alignment - 1) & ~(alignment - 1);
  2930. }
  2931. // Aligns given value down to nearest multiply of align value. For example: VmaAlignDown(11, 8) = 8.
  2932. // Use types like uint32_t, uint64_t as T.
  2933. template <typename T>
  2934. static inline T VmaAlignDown(T val, T alignment)
  2935. {
  2936. VMA_HEAVY_ASSERT(VmaIsPow2(alignment));
  2937. return val & ~(alignment - 1);
  2938. }
  2939. // Division with mathematical rounding to nearest number.
  2940. template <typename T>
  2941. static inline T VmaRoundDiv(T x, T y)
  2942. {
  2943. return (x + (y / (T)2)) / y;
  2944. }
  2945. // Divide by 'y' and round up to nearest integer.
  2946. template <typename T>
  2947. static inline T VmaDivideRoundingUp(T x, T y)
  2948. {
  2949. return (x + y - (T)1) / y;
  2950. }
  2951. // Returns smallest power of 2 greater or equal to v.
  2952. static inline uint32_t VmaNextPow2(uint32_t v)
  2953. {
  2954. v--;
  2955. v |= v >> 1;
  2956. v |= v >> 2;
  2957. v |= v >> 4;
  2958. v |= v >> 8;
  2959. v |= v >> 16;
  2960. v++;
  2961. return v;
  2962. }
  2963. static inline uint64_t VmaNextPow2(uint64_t v)
  2964. {
  2965. v--;
  2966. v |= v >> 1;
  2967. v |= v >> 2;
  2968. v |= v >> 4;
  2969. v |= v >> 8;
  2970. v |= v >> 16;
  2971. v |= v >> 32;
  2972. v++;
  2973. return v;
  2974. }
  2975. // Returns largest power of 2 less or equal to v.
  2976. static inline uint32_t VmaPrevPow2(uint32_t v)
  2977. {
  2978. v |= v >> 1;
  2979. v |= v >> 2;
  2980. v |= v >> 4;
  2981. v |= v >> 8;
  2982. v |= v >> 16;
  2983. v = v ^ (v >> 1);
  2984. return v;
  2985. }
  2986. static inline uint64_t VmaPrevPow2(uint64_t v)
  2987. {
  2988. v |= v >> 1;
  2989. v |= v >> 2;
  2990. v |= v >> 4;
  2991. v |= v >> 8;
  2992. v |= v >> 16;
  2993. v |= v >> 32;
  2994. v = v ^ (v >> 1);
  2995. return v;
  2996. }
  2997. static inline bool VmaStrIsEmpty(const char* pStr)
  2998. {
  2999. return pStr == VMA_NULL || *pStr == '\0';
  3000. }
  3001. /*
  3002. Returns true if two memory blocks occupy overlapping pages.
  3003. ResourceA must be in less memory offset than ResourceB.
  3004. Algorithm is based on "Vulkan 1.0.39 - A Specification (with all registered Vulkan extensions)"
  3005. chapter 11.6 "Resource Memory Association", paragraph "Buffer-Image Granularity".
  3006. */
  3007. static inline bool VmaBlocksOnSamePage(
  3008. VkDeviceSize resourceAOffset,
  3009. VkDeviceSize resourceASize,
  3010. VkDeviceSize resourceBOffset,
  3011. VkDeviceSize pageSize)
  3012. {
  3013. VMA_ASSERT(resourceAOffset + resourceASize <= resourceBOffset && resourceASize > 0 && pageSize > 0);
  3014. VkDeviceSize resourceAEnd = resourceAOffset + resourceASize - 1;
  3015. VkDeviceSize resourceAEndPage = resourceAEnd & ~(pageSize - 1);
  3016. VkDeviceSize resourceBStart = resourceBOffset;
  3017. VkDeviceSize resourceBStartPage = resourceBStart & ~(pageSize - 1);
  3018. return resourceAEndPage == resourceBStartPage;
  3019. }
  3020. /*
  3021. Returns true if given suballocation types could conflict and must respect
  3022. VkPhysicalDeviceLimits::bufferImageGranularity. They conflict if one is buffer
  3023. or linear image and another one is optimal image. If type is unknown, behave
  3024. conservatively.
  3025. */
  3026. static inline bool VmaIsBufferImageGranularityConflict(
  3027. VmaSuballocationType suballocType1,
  3028. VmaSuballocationType suballocType2)
  3029. {
  3030. if (suballocType1 > suballocType2)
  3031. {
  3032. std::swap(suballocType1, suballocType2);
  3033. }
  3034. switch (suballocType1)
  3035. {
  3036. case VMA_SUBALLOCATION_TYPE_FREE:
  3037. return false;
  3038. case VMA_SUBALLOCATION_TYPE_UNKNOWN:
  3039. return true;
  3040. case VMA_SUBALLOCATION_TYPE_BUFFER:
  3041. return
  3042. suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN ||
  3043. suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL;
  3044. case VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN:
  3045. return
  3046. suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN ||
  3047. suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_LINEAR ||
  3048. suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL;
  3049. case VMA_SUBALLOCATION_TYPE_IMAGE_LINEAR:
  3050. return
  3051. suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL;
  3052. case VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL:
  3053. return false;
  3054. default:
  3055. VMA_ASSERT(0);
  3056. return true;
  3057. }
  3058. }
  3059. static void VmaWriteMagicValue(void* pData, VkDeviceSize offset)
  3060. {
  3061. #if VMA_DEBUG_MARGIN > 0 && VMA_DEBUG_DETECT_CORRUPTION
  3062. uint32_t* pDst = (uint32_t*)((char*)pData + offset);
  3063. const size_t numberCount = VMA_DEBUG_MARGIN / sizeof(uint32_t);
  3064. for (size_t i = 0; i < numberCount; ++i, ++pDst)
  3065. {
  3066. *pDst = VMA_CORRUPTION_DETECTION_MAGIC_VALUE;
  3067. }
  3068. #else
  3069. // no-op
  3070. #endif
  3071. }
  3072. static bool VmaValidateMagicValue(const void* pData, VkDeviceSize offset)
  3073. {
  3074. #if VMA_DEBUG_MARGIN > 0 && VMA_DEBUG_DETECT_CORRUPTION
  3075. const uint32_t* pSrc = (const uint32_t*)((const char*)pData + offset);
  3076. const size_t numberCount = VMA_DEBUG_MARGIN / sizeof(uint32_t);
  3077. for (size_t i = 0; i < numberCount; ++i, ++pSrc)
  3078. {
  3079. if (*pSrc != VMA_CORRUPTION_DETECTION_MAGIC_VALUE)
  3080. {
  3081. return false;
  3082. }
  3083. }
  3084. #endif
  3085. return true;
  3086. }
  3087. /*
  3088. Fills structure with parameters of an example buffer to be used for transfers
  3089. during GPU memory defragmentation.
  3090. */
  3091. static void VmaFillGpuDefragmentationBufferCreateInfo(VkBufferCreateInfo& outBufCreateInfo)
  3092. {
  3093. memset(&outBufCreateInfo, 0, sizeof(outBufCreateInfo));
  3094. outBufCreateInfo.sType = VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO;
  3095. outBufCreateInfo.usage = VK_BUFFER_USAGE_TRANSFER_SRC_BIT | VK_BUFFER_USAGE_TRANSFER_DST_BIT;
  3096. outBufCreateInfo.size = (VkDeviceSize)VMA_DEFAULT_LARGE_HEAP_BLOCK_SIZE; // Example size.
  3097. }
  3098. /*
  3099. Performs binary search and returns iterator to first element that is greater or
  3100. equal to (key), according to comparison (cmp).
  3101. Cmp should return true if first argument is less than second argument.
  3102. Returned value is the found element, if present in the collection or place where
  3103. new element with value (key) should be inserted.
  3104. */
  3105. template <typename CmpLess, typename IterT, typename KeyT>
  3106. static IterT VmaBinaryFindFirstNotLess(IterT beg, IterT end, const KeyT& key, const CmpLess& cmp)
  3107. {
  3108. size_t down = 0, up = size_t(end - beg);
  3109. while (down < up)
  3110. {
  3111. const size_t mid = down + (up - down) / 2; // Overflow-safe midpoint calculation
  3112. if (cmp(*(beg + mid), key))
  3113. {
  3114. down = mid + 1;
  3115. }
  3116. else
  3117. {
  3118. up = mid;
  3119. }
  3120. }
  3121. return beg + down;
  3122. }
  3123. template<typename CmpLess, typename IterT, typename KeyT>
  3124. IterT VmaBinaryFindSorted(const IterT& beg, const IterT& end, const KeyT& value, const CmpLess& cmp)
  3125. {
  3126. IterT it = VmaBinaryFindFirstNotLess<CmpLess, IterT, KeyT>(
  3127. beg, end, value, cmp);
  3128. if (it == end ||
  3129. (!cmp(*it, value) && !cmp(value, *it)))
  3130. {
  3131. return it;
  3132. }
  3133. return end;
  3134. }
  3135. /*
  3136. Returns true if all pointers in the array are not-null and unique.
  3137. Warning! O(n^2) complexity. Use only inside VMA_HEAVY_ASSERT.
  3138. T must be pointer type, e.g. VmaAllocation, VmaPool.
  3139. */
  3140. template<typename T>
  3141. static bool VmaValidatePointerArray(uint32_t count, const T* arr)
  3142. {
  3143. for (uint32_t i = 0; i < count; ++i)
  3144. {
  3145. const T iPtr = arr[i];
  3146. if (iPtr == VMA_NULL)
  3147. {
  3148. return false;
  3149. }
  3150. for (uint32_t j = i + 1; j < count; ++j)
  3151. {
  3152. if (iPtr == arr[j])
  3153. {
  3154. return false;
  3155. }
  3156. }
  3157. }
  3158. return true;
  3159. }
  3160. template<typename MainT, typename NewT>
  3161. static inline void VmaPnextChainPushFront(MainT* mainStruct, NewT* newStruct)
  3162. {
  3163. newStruct->pNext = mainStruct->pNext;
  3164. mainStruct->pNext = newStruct;
  3165. }
  3166. // Finds structure with s->sType == sType in mainStruct->pNext chain.
  3167. // Returns pointer to it. If not found, returns null.
  3168. template<typename FindT, typename MainT>
  3169. static inline const FindT* VmaPnextChainFind(const MainT* mainStruct, VkStructureType sType)
  3170. {
  3171. for(const VkBaseInStructure* s = (const VkBaseInStructure*)mainStruct->pNext;
  3172. s != VMA_NULL; s = s->pNext)
  3173. {
  3174. if(s->sType == sType)
  3175. {
  3176. return (const FindT*)s;
  3177. }
  3178. }
  3179. return VMA_NULL;
  3180. }
  3181. // An abstraction over buffer or image `usage` flags, depending on available extensions.
  3182. struct VmaBufferImageUsage
  3183. {
  3184. #if VMA_KHR_MAINTENANCE5
  3185. typedef uint64_t BaseType; // VkFlags64
  3186. #else
  3187. typedef uint32_t BaseType; // VkFlags32
  3188. #endif
  3189. static const VmaBufferImageUsage UNKNOWN;
  3190. BaseType Value;
  3191. VmaBufferImageUsage() { *this = UNKNOWN; }
  3192. explicit VmaBufferImageUsage(BaseType usage) : Value(usage) { }
  3193. VmaBufferImageUsage(const VkBufferCreateInfo &createInfo, bool useKhrMaintenance5);
  3194. explicit VmaBufferImageUsage(const VkImageCreateInfo &createInfo);
  3195. bool operator==(const VmaBufferImageUsage& rhs) const { return Value == rhs.Value; }
  3196. bool operator!=(const VmaBufferImageUsage& rhs) const { return Value != rhs.Value; }
  3197. bool Contains(BaseType flag) const { return (Value & flag) != 0; }
  3198. bool ContainsDeviceAccess() const
  3199. {
  3200. // This relies on values of VK_IMAGE_USAGE_TRANSFER* being the same as VK_BUFFER_IMAGE_TRANSFER*.
  3201. return (Value & ~BaseType(VK_BUFFER_USAGE_TRANSFER_DST_BIT | VK_BUFFER_USAGE_TRANSFER_SRC_BIT)) != 0;
  3202. }
  3203. };
  3204. const VmaBufferImageUsage VmaBufferImageUsage::UNKNOWN = VmaBufferImageUsage(0);
  3205. static void swap(VmaBufferImageUsage& lhs, VmaBufferImageUsage& rhs) noexcept
  3206. {
  3207. using std::swap;
  3208. swap(lhs.Value, rhs.Value);
  3209. }
  3210. VmaBufferImageUsage::VmaBufferImageUsage(const VkBufferCreateInfo &createInfo,
  3211. bool useKhrMaintenance5)
  3212. {
  3213. #if VMA_KHR_MAINTENANCE5
  3214. if(useKhrMaintenance5)
  3215. {
  3216. // If VkBufferCreateInfo::pNext chain contains VkBufferUsageFlags2CreateInfoKHR,
  3217. // take usage from it and ignore VkBufferCreateInfo::usage, per specification
  3218. // of the VK_KHR_maintenance5 extension.
  3219. const VkBufferUsageFlags2CreateInfoKHR* const usageFlags2 =
  3220. VmaPnextChainFind<VkBufferUsageFlags2CreateInfoKHR>(&createInfo, VK_STRUCTURE_TYPE_BUFFER_USAGE_FLAGS_2_CREATE_INFO_KHR);
  3221. if(usageFlags2)
  3222. {
  3223. this->Value = usageFlags2->usage;
  3224. return;
  3225. }
  3226. }
  3227. #endif
  3228. this->Value = (BaseType)createInfo.usage;
  3229. }
  3230. VmaBufferImageUsage::VmaBufferImageUsage(const VkImageCreateInfo &createInfo)
  3231. {
  3232. // Maybe in the future there will be VK_KHR_maintenanceN extension with structure
  3233. // VkImageUsageFlags2CreateInfoKHR, like the one for buffers...
  3234. this->Value = (BaseType)createInfo.usage;
  3235. }
  3236. // This is the main algorithm that guides the selection of a memory type best for an allocation -
  3237. // converts usage to required/preferred/not preferred flags.
  3238. static bool FindMemoryPreferences(
  3239. bool isIntegratedGPU,
  3240. const VmaAllocationCreateInfo& allocCreateInfo,
  3241. VmaBufferImageUsage bufImgUsage,
  3242. VkMemoryPropertyFlags& outRequiredFlags,
  3243. VkMemoryPropertyFlags& outPreferredFlags,
  3244. VkMemoryPropertyFlags& outNotPreferredFlags)
  3245. {
  3246. outRequiredFlags = allocCreateInfo.requiredFlags;
  3247. outPreferredFlags = allocCreateInfo.preferredFlags;
  3248. outNotPreferredFlags = 0;
  3249. switch(allocCreateInfo.usage)
  3250. {
  3251. case VMA_MEMORY_USAGE_UNKNOWN:
  3252. break;
  3253. case VMA_MEMORY_USAGE_GPU_ONLY:
  3254. if(!isIntegratedGPU || (outPreferredFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) == 0)
  3255. {
  3256. outPreferredFlags |= VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT;
  3257. }
  3258. break;
  3259. case VMA_MEMORY_USAGE_CPU_ONLY:
  3260. outRequiredFlags |= VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT | VK_MEMORY_PROPERTY_HOST_COHERENT_BIT;
  3261. break;
  3262. case VMA_MEMORY_USAGE_CPU_TO_GPU:
  3263. outRequiredFlags |= VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT;
  3264. if(!isIntegratedGPU || (outPreferredFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) == 0)
  3265. {
  3266. outPreferredFlags |= VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT;
  3267. }
  3268. break;
  3269. case VMA_MEMORY_USAGE_GPU_TO_CPU:
  3270. outRequiredFlags |= VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT;
  3271. outPreferredFlags |= VK_MEMORY_PROPERTY_HOST_CACHED_BIT;
  3272. break;
  3273. case VMA_MEMORY_USAGE_CPU_COPY:
  3274. outNotPreferredFlags |= VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT;
  3275. break;
  3276. case VMA_MEMORY_USAGE_GPU_LAZILY_ALLOCATED:
  3277. outRequiredFlags |= VK_MEMORY_PROPERTY_LAZILY_ALLOCATED_BIT;
  3278. break;
  3279. case VMA_MEMORY_USAGE_AUTO:
  3280. case VMA_MEMORY_USAGE_AUTO_PREFER_DEVICE:
  3281. case VMA_MEMORY_USAGE_AUTO_PREFER_HOST:
  3282. {
  3283. if(bufImgUsage == VmaBufferImageUsage::UNKNOWN)
  3284. {
  3285. VMA_ASSERT(0 && "VMA_MEMORY_USAGE_AUTO* values can only be used with functions like vmaCreateBuffer, vmaCreateImage so that the details of the created resource are known."
  3286. " Maybe you use VkBufferUsageFlags2CreateInfoKHR but forgot to use VMA_ALLOCATOR_CREATE_KHR_MAINTENANCE5_BIT?" );
  3287. return false;
  3288. }
  3289. const bool deviceAccess = bufImgUsage.ContainsDeviceAccess();
  3290. const bool hostAccessSequentialWrite = (allocCreateInfo.flags & VMA_ALLOCATION_CREATE_HOST_ACCESS_SEQUENTIAL_WRITE_BIT) != 0;
  3291. const bool hostAccessRandom = (allocCreateInfo.flags & VMA_ALLOCATION_CREATE_HOST_ACCESS_RANDOM_BIT) != 0;
  3292. const bool hostAccessAllowTransferInstead = (allocCreateInfo.flags & VMA_ALLOCATION_CREATE_HOST_ACCESS_ALLOW_TRANSFER_INSTEAD_BIT) != 0;
  3293. const bool preferDevice = allocCreateInfo.usage == VMA_MEMORY_USAGE_AUTO_PREFER_DEVICE;
  3294. const bool preferHost = allocCreateInfo.usage == VMA_MEMORY_USAGE_AUTO_PREFER_HOST;
  3295. // CPU random access - e.g. a buffer written to or transferred from GPU to read back on CPU.
  3296. if(hostAccessRandom)
  3297. {
  3298. // Prefer cached. Cannot require it, because some platforms don't have it (e.g. Raspberry Pi - see #362)!
  3299. outPreferredFlags |= VK_MEMORY_PROPERTY_HOST_CACHED_BIT;
  3300. if (!isIntegratedGPU && deviceAccess && hostAccessAllowTransferInstead && !preferHost)
  3301. {
  3302. // Nice if it will end up in HOST_VISIBLE, but more importantly prefer DEVICE_LOCAL.
  3303. // Omitting HOST_VISIBLE here is intentional.
  3304. // In case there is DEVICE_LOCAL | HOST_VISIBLE | HOST_CACHED, it will pick that one.
  3305. // Otherwise, this will give same weight to DEVICE_LOCAL as HOST_VISIBLE | HOST_CACHED and select the former if occurs first on the list.
  3306. outPreferredFlags |= VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT;
  3307. }
  3308. else
  3309. {
  3310. // Always CPU memory.
  3311. outRequiredFlags |= VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT;
  3312. }
  3313. }
  3314. // CPU sequential write - may be CPU or host-visible GPU memory, uncached and write-combined.
  3315. else if(hostAccessSequentialWrite)
  3316. {
  3317. // Want uncached and write-combined.
  3318. outNotPreferredFlags |= VK_MEMORY_PROPERTY_HOST_CACHED_BIT;
  3319. if(!isIntegratedGPU && deviceAccess && hostAccessAllowTransferInstead && !preferHost)
  3320. {
  3321. outPreferredFlags |= VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT | VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT;
  3322. }
  3323. else
  3324. {
  3325. outRequiredFlags |= VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT;
  3326. // Direct GPU access, CPU sequential write (e.g. a dynamic uniform buffer updated every frame)
  3327. if(deviceAccess)
  3328. {
  3329. // Could go to CPU memory or GPU BAR/unified. Up to the user to decide. If no preference, choose GPU memory.
  3330. if(preferHost)
  3331. outNotPreferredFlags |= VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT;
  3332. else
  3333. outPreferredFlags |= VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT;
  3334. }
  3335. // GPU no direct access, CPU sequential write (e.g. an upload buffer to be transferred to the GPU)
  3336. else
  3337. {
  3338. // Could go to CPU memory or GPU BAR/unified. Up to the user to decide. If no preference, choose CPU memory.
  3339. if(preferDevice)
  3340. outPreferredFlags |= VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT;
  3341. else
  3342. outNotPreferredFlags |= VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT;
  3343. }
  3344. }
  3345. }
  3346. // No CPU access
  3347. else
  3348. {
  3349. // if(deviceAccess)
  3350. //
  3351. // GPU access, no CPU access (e.g. a color attachment image) - prefer GPU memory,
  3352. // unless there is a clear preference from the user not to do so.
  3353. //
  3354. // else:
  3355. //
  3356. // No direct GPU access, no CPU access, just transfers.
  3357. // It may be staging copy intended for e.g. preserving image for next frame (then better GPU memory) or
  3358. // a "swap file" copy to free some GPU memory (then better CPU memory).
  3359. // Up to the user to decide. If no preferece, assume the former and choose GPU memory.
  3360. if(preferHost)
  3361. outNotPreferredFlags |= VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT;
  3362. else
  3363. outPreferredFlags |= VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT;
  3364. }
  3365. break;
  3366. }
  3367. default:
  3368. VMA_ASSERT(0);
  3369. }
  3370. // Avoid DEVICE_COHERENT unless explicitly requested.
  3371. if(((allocCreateInfo.requiredFlags | allocCreateInfo.preferredFlags) &
  3372. (VK_MEMORY_PROPERTY_DEVICE_COHERENT_BIT_AMD_COPY | VK_MEMORY_PROPERTY_DEVICE_UNCACHED_BIT_AMD_COPY)) == 0)
  3373. {
  3374. outNotPreferredFlags |= VK_MEMORY_PROPERTY_DEVICE_UNCACHED_BIT_AMD_COPY;
  3375. }
  3376. return true;
  3377. }
  3378. ////////////////////////////////////////////////////////////////////////////////
  3379. // Memory allocation
  3380. static void* VmaMalloc(const VkAllocationCallbacks* pAllocationCallbacks, size_t size, size_t alignment)
  3381. {
  3382. void* result = VMA_NULL;
  3383. if ((pAllocationCallbacks != VMA_NULL) &&
  3384. (pAllocationCallbacks->pfnAllocation != VMA_NULL))
  3385. {
  3386. result = (*pAllocationCallbacks->pfnAllocation)(
  3387. pAllocationCallbacks->pUserData,
  3388. size,
  3389. alignment,
  3390. VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
  3391. }
  3392. else
  3393. {
  3394. result = VMA_SYSTEM_ALIGNED_MALLOC(size, alignment);
  3395. }
  3396. VMA_ASSERT(result != VMA_NULL && "CPU memory allocation failed.");
  3397. return result;
  3398. }
  3399. static void VmaFree(const VkAllocationCallbacks* pAllocationCallbacks, void* ptr)
  3400. {
  3401. if ((pAllocationCallbacks != VMA_NULL) &&
  3402. (pAllocationCallbacks->pfnFree != VMA_NULL))
  3403. {
  3404. (*pAllocationCallbacks->pfnFree)(pAllocationCallbacks->pUserData, ptr);
  3405. }
  3406. else
  3407. {
  3408. VMA_SYSTEM_ALIGNED_FREE(ptr);
  3409. }
  3410. }
  3411. template<typename T>
  3412. static T* VmaAllocate(const VkAllocationCallbacks* pAllocationCallbacks)
  3413. {
  3414. return (T*)VmaMalloc(pAllocationCallbacks, sizeof(T), VMA_ALIGN_OF(T));
  3415. }
  3416. template<typename T>
  3417. static T* VmaAllocateArray(const VkAllocationCallbacks* pAllocationCallbacks, size_t count)
  3418. {
  3419. return (T*)VmaMalloc(pAllocationCallbacks, sizeof(T) * count, VMA_ALIGN_OF(T));
  3420. }
  3421. #define vma_new(allocator, type) new(VmaAllocate<type>(allocator))(type)
  3422. #define vma_new_array(allocator, type, count) new(VmaAllocateArray<type>((allocator), (count)))(type)
  3423. template<typename T>
  3424. static void vma_delete(const VkAllocationCallbacks* pAllocationCallbacks, T* ptr)
  3425. {
  3426. ptr->~T();
  3427. VmaFree(pAllocationCallbacks, ptr);
  3428. }
  3429. template<typename T>
  3430. static void vma_delete_array(const VkAllocationCallbacks* pAllocationCallbacks, T* ptr, size_t count)
  3431. {
  3432. if (ptr != VMA_NULL)
  3433. {
  3434. for (size_t i = count; i--; )
  3435. {
  3436. ptr[i].~T();
  3437. }
  3438. VmaFree(pAllocationCallbacks, ptr);
  3439. }
  3440. }
  3441. static char* VmaCreateStringCopy(const VkAllocationCallbacks* allocs, const char* srcStr)
  3442. {
  3443. if (srcStr != VMA_NULL)
  3444. {
  3445. const size_t len = strlen(srcStr);
  3446. char* const result = vma_new_array(allocs, char, len + 1);
  3447. memcpy(result, srcStr, len + 1);
  3448. return result;
  3449. }
  3450. return VMA_NULL;
  3451. }
  3452. #if VMA_STATS_STRING_ENABLED
  3453. static char* VmaCreateStringCopy(const VkAllocationCallbacks* allocs, const char* srcStr, size_t strLen)
  3454. {
  3455. if (srcStr != VMA_NULL)
  3456. {
  3457. char* const result = vma_new_array(allocs, char, strLen + 1);
  3458. memcpy(result, srcStr, strLen);
  3459. result[strLen] = '\0';
  3460. return result;
  3461. }
  3462. return VMA_NULL;
  3463. }
  3464. #endif // VMA_STATS_STRING_ENABLED
  3465. static void VmaFreeString(const VkAllocationCallbacks* allocs, char* str)
  3466. {
  3467. if (str != VMA_NULL)
  3468. {
  3469. const size_t len = strlen(str);
  3470. vma_delete_array(allocs, str, len + 1);
  3471. }
  3472. }
  3473. template<typename CmpLess, typename VectorT>
  3474. size_t VmaVectorInsertSorted(VectorT& vector, const typename VectorT::value_type& value)
  3475. {
  3476. const size_t indexToInsert = VmaBinaryFindFirstNotLess(
  3477. vector.data(),
  3478. vector.data() + vector.size(),
  3479. value,
  3480. CmpLess()) - vector.data();
  3481. VmaVectorInsert(vector, indexToInsert, value);
  3482. return indexToInsert;
  3483. }
  3484. template<typename CmpLess, typename VectorT>
  3485. bool VmaVectorRemoveSorted(VectorT& vector, const typename VectorT::value_type& value)
  3486. {
  3487. CmpLess comparator;
  3488. typename VectorT::iterator it = VmaBinaryFindFirstNotLess(
  3489. vector.begin(),
  3490. vector.end(),
  3491. value,
  3492. comparator);
  3493. if ((it != vector.end()) && !comparator(*it, value) && !comparator(value, *it))
  3494. {
  3495. size_t indexToRemove = it - vector.begin();
  3496. VmaVectorRemove(vector, indexToRemove);
  3497. return true;
  3498. }
  3499. return false;
  3500. }
  3501. #endif // _VMA_FUNCTIONS
  3502. #ifndef _VMA_STATISTICS_FUNCTIONS
  3503. static void VmaClearStatistics(VmaStatistics& outStats)
  3504. {
  3505. outStats.blockCount = 0;
  3506. outStats.allocationCount = 0;
  3507. outStats.blockBytes = 0;
  3508. outStats.allocationBytes = 0;
  3509. }
  3510. static void VmaAddStatistics(VmaStatistics& inoutStats, const VmaStatistics& src)
  3511. {
  3512. inoutStats.blockCount += src.blockCount;
  3513. inoutStats.allocationCount += src.allocationCount;
  3514. inoutStats.blockBytes += src.blockBytes;
  3515. inoutStats.allocationBytes += src.allocationBytes;
  3516. }
  3517. static void VmaClearDetailedStatistics(VmaDetailedStatistics& outStats)
  3518. {
  3519. VmaClearStatistics(outStats.statistics);
  3520. outStats.unusedRangeCount = 0;
  3521. outStats.allocationSizeMin = VK_WHOLE_SIZE;
  3522. outStats.allocationSizeMax = 0;
  3523. outStats.unusedRangeSizeMin = VK_WHOLE_SIZE;
  3524. outStats.unusedRangeSizeMax = 0;
  3525. }
  3526. static void VmaAddDetailedStatisticsAllocation(VmaDetailedStatistics& inoutStats, VkDeviceSize size)
  3527. {
  3528. inoutStats.statistics.allocationCount++;
  3529. inoutStats.statistics.allocationBytes += size;
  3530. inoutStats.allocationSizeMin = VMA_MIN(inoutStats.allocationSizeMin, size);
  3531. inoutStats.allocationSizeMax = VMA_MAX(inoutStats.allocationSizeMax, size);
  3532. }
  3533. static void VmaAddDetailedStatisticsUnusedRange(VmaDetailedStatistics& inoutStats, VkDeviceSize size)
  3534. {
  3535. inoutStats.unusedRangeCount++;
  3536. inoutStats.unusedRangeSizeMin = VMA_MIN(inoutStats.unusedRangeSizeMin, size);
  3537. inoutStats.unusedRangeSizeMax = VMA_MAX(inoutStats.unusedRangeSizeMax, size);
  3538. }
  3539. static void VmaAddDetailedStatistics(VmaDetailedStatistics& inoutStats, const VmaDetailedStatistics& src)
  3540. {
  3541. VmaAddStatistics(inoutStats.statistics, src.statistics);
  3542. inoutStats.unusedRangeCount += src.unusedRangeCount;
  3543. inoutStats.allocationSizeMin = VMA_MIN(inoutStats.allocationSizeMin, src.allocationSizeMin);
  3544. inoutStats.allocationSizeMax = VMA_MAX(inoutStats.allocationSizeMax, src.allocationSizeMax);
  3545. inoutStats.unusedRangeSizeMin = VMA_MIN(inoutStats.unusedRangeSizeMin, src.unusedRangeSizeMin);
  3546. inoutStats.unusedRangeSizeMax = VMA_MAX(inoutStats.unusedRangeSizeMax, src.unusedRangeSizeMax);
  3547. }
  3548. #endif // _VMA_STATISTICS_FUNCTIONS
  3549. #ifndef _VMA_MUTEX_LOCK
  3550. // Helper RAII class to lock a mutex in constructor and unlock it in destructor (at the end of scope).
  3551. struct VmaMutexLock
  3552. {
  3553. VMA_CLASS_NO_COPY_NO_MOVE(VmaMutexLock)
  3554. public:
  3555. VmaMutexLock(VMA_MUTEX& mutex, bool useMutex = true) :
  3556. m_pMutex(useMutex ? &mutex : VMA_NULL)
  3557. {
  3558. if (m_pMutex) { m_pMutex->Lock(); }
  3559. }
  3560. ~VmaMutexLock() { if (m_pMutex) { m_pMutex->Unlock(); } }
  3561. private:
  3562. VMA_MUTEX* m_pMutex;
  3563. };
  3564. // Helper RAII class to lock a RW mutex in constructor and unlock it in destructor (at the end of scope), for reading.
  3565. struct VmaMutexLockRead
  3566. {
  3567. VMA_CLASS_NO_COPY_NO_MOVE(VmaMutexLockRead)
  3568. public:
  3569. VmaMutexLockRead(VMA_RW_MUTEX& mutex, bool useMutex) :
  3570. m_pMutex(useMutex ? &mutex : VMA_NULL)
  3571. {
  3572. if (m_pMutex) { m_pMutex->LockRead(); }
  3573. }
  3574. ~VmaMutexLockRead() { if (m_pMutex) { m_pMutex->UnlockRead(); } }
  3575. private:
  3576. VMA_RW_MUTEX* m_pMutex;
  3577. };
  3578. // Helper RAII class to lock a RW mutex in constructor and unlock it in destructor (at the end of scope), for writing.
  3579. struct VmaMutexLockWrite
  3580. {
  3581. VMA_CLASS_NO_COPY_NO_MOVE(VmaMutexLockWrite)
  3582. public:
  3583. VmaMutexLockWrite(VMA_RW_MUTEX& mutex, bool useMutex)
  3584. : m_pMutex(useMutex ? &mutex : VMA_NULL)
  3585. {
  3586. if (m_pMutex) { m_pMutex->LockWrite(); }
  3587. }
  3588. ~VmaMutexLockWrite() { if (m_pMutex) { m_pMutex->UnlockWrite(); } }
  3589. private:
  3590. VMA_RW_MUTEX* m_pMutex;
  3591. };
  3592. #if VMA_DEBUG_GLOBAL_MUTEX
  3593. static VMA_MUTEX gDebugGlobalMutex;
  3594. #define VMA_DEBUG_GLOBAL_MUTEX_LOCK VmaMutexLock debugGlobalMutexLock(gDebugGlobalMutex, true);
  3595. #else
  3596. #define VMA_DEBUG_GLOBAL_MUTEX_LOCK
  3597. #endif
  3598. #endif // _VMA_MUTEX_LOCK
  3599. #ifndef _VMA_ATOMIC_TRANSACTIONAL_INCREMENT
  3600. // An object that increments given atomic but decrements it back in the destructor unless Commit() is called.
  3601. template<typename AtomicT>
  3602. struct AtomicTransactionalIncrement
  3603. {
  3604. public:
  3605. using T = decltype(AtomicT().load());
  3606. ~AtomicTransactionalIncrement()
  3607. {
  3608. if(m_Atomic)
  3609. --(*m_Atomic);
  3610. }
  3611. void Commit() { m_Atomic = VMA_NULL; }
  3612. T Increment(AtomicT* atomic)
  3613. {
  3614. m_Atomic = atomic;
  3615. return m_Atomic->fetch_add(1);
  3616. }
  3617. private:
  3618. AtomicT* m_Atomic = VMA_NULL;
  3619. };
  3620. #endif // _VMA_ATOMIC_TRANSACTIONAL_INCREMENT
  3621. #ifndef _VMA_STL_ALLOCATOR
  3622. // STL-compatible allocator.
  3623. template<typename T>
  3624. struct VmaStlAllocator
  3625. {
  3626. const VkAllocationCallbacks* const m_pCallbacks;
  3627. typedef T value_type;
  3628. VmaStlAllocator(const VkAllocationCallbacks* pCallbacks) : m_pCallbacks(pCallbacks) {}
  3629. template<typename U>
  3630. VmaStlAllocator(const VmaStlAllocator<U>& src) : m_pCallbacks(src.m_pCallbacks) {}
  3631. VmaStlAllocator(const VmaStlAllocator&) = default;
  3632. VmaStlAllocator& operator=(const VmaStlAllocator&) = delete;
  3633. T* allocate(size_t n) { return VmaAllocateArray<T>(m_pCallbacks, n); }
  3634. void deallocate(T* p, size_t n) { VmaFree(m_pCallbacks, p); }
  3635. template<typename U>
  3636. bool operator==(const VmaStlAllocator<U>& rhs) const
  3637. {
  3638. return m_pCallbacks == rhs.m_pCallbacks;
  3639. }
  3640. template<typename U>
  3641. bool operator!=(const VmaStlAllocator<U>& rhs) const
  3642. {
  3643. return m_pCallbacks != rhs.m_pCallbacks;
  3644. }
  3645. };
  3646. #endif // _VMA_STL_ALLOCATOR
  3647. #ifndef _VMA_VECTOR
  3648. /* Class with interface compatible with subset of std::vector.
  3649. T must be POD because constructors and destructors are not called and memcpy is
  3650. used for these objects. */
  3651. template<typename T, typename AllocatorT>
  3652. class VmaVector
  3653. {
  3654. public:
  3655. typedef T value_type;
  3656. typedef T* iterator;
  3657. typedef const T* const_iterator;
  3658. VmaVector(const AllocatorT& allocator);
  3659. VmaVector(size_t count, const AllocatorT& allocator);
  3660. // This version of the constructor is here for compatibility with pre-C++14 std::vector.
  3661. // value is unused.
  3662. VmaVector(size_t count, const T& value, const AllocatorT& allocator) : VmaVector(count, allocator) {}
  3663. VmaVector(const VmaVector<T, AllocatorT>& src);
  3664. VmaVector& operator=(const VmaVector& rhs);
  3665. ~VmaVector() { VmaFree(m_Allocator.m_pCallbacks, m_pArray); }
  3666. bool empty() const { return m_Count == 0; }
  3667. size_t size() const { return m_Count; }
  3668. T* data() { return m_pArray; }
  3669. T& front() { VMA_HEAVY_ASSERT(m_Count > 0); return m_pArray[0]; }
  3670. T& back() { VMA_HEAVY_ASSERT(m_Count > 0); return m_pArray[m_Count - 1]; }
  3671. const T* data() const { return m_pArray; }
  3672. const T& front() const { VMA_HEAVY_ASSERT(m_Count > 0); return m_pArray[0]; }
  3673. const T& back() const { VMA_HEAVY_ASSERT(m_Count > 0); return m_pArray[m_Count - 1]; }
  3674. iterator begin() { return m_pArray; }
  3675. iterator end() { return m_pArray + m_Count; }
  3676. const_iterator cbegin() const { return m_pArray; }
  3677. const_iterator cend() const { return m_pArray + m_Count; }
  3678. const_iterator begin() const { return cbegin(); }
  3679. const_iterator end() const { return cend(); }
  3680. void pop_front() { VMA_HEAVY_ASSERT(m_Count > 0); remove(0); }
  3681. void pop_back() { VMA_HEAVY_ASSERT(m_Count > 0); resize(size() - 1); }
  3682. void push_front(const T& src) { insert(0, src); }
  3683. void push_back(const T& src);
  3684. void reserve(size_t newCapacity, bool freeMemory = false);
  3685. void resize(size_t newCount);
  3686. void clear() { resize(0); }
  3687. void shrink_to_fit();
  3688. void insert(size_t index, const T& src);
  3689. void remove(size_t index);
  3690. T& operator[](size_t index) { VMA_HEAVY_ASSERT(index < m_Count); return m_pArray[index]; }
  3691. const T& operator[](size_t index) const { VMA_HEAVY_ASSERT(index < m_Count); return m_pArray[index]; }
  3692. private:
  3693. AllocatorT m_Allocator;
  3694. T* m_pArray;
  3695. size_t m_Count;
  3696. size_t m_Capacity;
  3697. };
  3698. #ifndef _VMA_VECTOR_FUNCTIONS
  3699. template<typename T, typename AllocatorT>
  3700. VmaVector<T, AllocatorT>::VmaVector(const AllocatorT& allocator)
  3701. : m_Allocator(allocator),
  3702. m_pArray(VMA_NULL),
  3703. m_Count(0),
  3704. m_Capacity(0) {}
  3705. template<typename T, typename AllocatorT>
  3706. VmaVector<T, AllocatorT>::VmaVector(size_t count, const AllocatorT& allocator)
  3707. : m_Allocator(allocator),
  3708. m_pArray(count ? (T*)VmaAllocateArray<T>(allocator.m_pCallbacks, count) : VMA_NULL),
  3709. m_Count(count),
  3710. m_Capacity(count) {}
  3711. template<typename T, typename AllocatorT>
  3712. VmaVector<T, AllocatorT>::VmaVector(const VmaVector& src)
  3713. : m_Allocator(src.m_Allocator),
  3714. m_pArray(src.m_Count ? (T*)VmaAllocateArray<T>(src.m_Allocator.m_pCallbacks, src.m_Count) : VMA_NULL),
  3715. m_Count(src.m_Count),
  3716. m_Capacity(src.m_Count)
  3717. {
  3718. if (m_Count != 0)
  3719. {
  3720. memcpy(m_pArray, src.m_pArray, m_Count * sizeof(T));
  3721. }
  3722. }
  3723. template<typename T, typename AllocatorT>
  3724. VmaVector<T, AllocatorT>& VmaVector<T, AllocatorT>::operator=(const VmaVector& rhs)
  3725. {
  3726. if (&rhs != this)
  3727. {
  3728. resize(rhs.m_Count);
  3729. if (m_Count != 0)
  3730. {
  3731. memcpy(m_pArray, rhs.m_pArray, m_Count * sizeof(T));
  3732. }
  3733. }
  3734. return *this;
  3735. }
  3736. template<typename T, typename AllocatorT>
  3737. void VmaVector<T, AllocatorT>::push_back(const T& src)
  3738. {
  3739. const size_t newIndex = size();
  3740. resize(newIndex + 1);
  3741. m_pArray[newIndex] = src;
  3742. }
  3743. template<typename T, typename AllocatorT>
  3744. void VmaVector<T, AllocatorT>::reserve(size_t newCapacity, bool freeMemory)
  3745. {
  3746. newCapacity = VMA_MAX(newCapacity, m_Count);
  3747. if ((newCapacity < m_Capacity) && !freeMemory)
  3748. {
  3749. newCapacity = m_Capacity;
  3750. }
  3751. if (newCapacity != m_Capacity)
  3752. {
  3753. T* const newArray = newCapacity ? VmaAllocateArray<T>(m_Allocator, newCapacity) : VMA_NULL;
  3754. if (m_Count != 0)
  3755. {
  3756. memcpy(newArray, m_pArray, m_Count * sizeof(T));
  3757. }
  3758. VmaFree(m_Allocator.m_pCallbacks, m_pArray);
  3759. m_Capacity = newCapacity;
  3760. m_pArray = newArray;
  3761. }
  3762. }
  3763. template<typename T, typename AllocatorT>
  3764. void VmaVector<T, AllocatorT>::resize(size_t newCount)
  3765. {
  3766. size_t newCapacity = m_Capacity;
  3767. if (newCount > m_Capacity)
  3768. {
  3769. newCapacity = VMA_MAX(newCount, VMA_MAX(m_Capacity * 3 / 2, (size_t)8));
  3770. }
  3771. if (newCapacity != m_Capacity)
  3772. {
  3773. T* const newArray = newCapacity ? VmaAllocateArray<T>(m_Allocator.m_pCallbacks, newCapacity) : VMA_NULL;
  3774. const size_t elementsToCopy = VMA_MIN(m_Count, newCount);
  3775. if (elementsToCopy != 0)
  3776. {
  3777. memcpy(newArray, m_pArray, elementsToCopy * sizeof(T));
  3778. }
  3779. VmaFree(m_Allocator.m_pCallbacks, m_pArray);
  3780. m_Capacity = newCapacity;
  3781. m_pArray = newArray;
  3782. }
  3783. m_Count = newCount;
  3784. }
  3785. template<typename T, typename AllocatorT>
  3786. void VmaVector<T, AllocatorT>::shrink_to_fit()
  3787. {
  3788. if (m_Capacity > m_Count)
  3789. {
  3790. T* newArray = VMA_NULL;
  3791. if (m_Count > 0)
  3792. {
  3793. newArray = VmaAllocateArray<T>(m_Allocator.m_pCallbacks, m_Count);
  3794. memcpy(newArray, m_pArray, m_Count * sizeof(T));
  3795. }
  3796. VmaFree(m_Allocator.m_pCallbacks, m_pArray);
  3797. m_Capacity = m_Count;
  3798. m_pArray = newArray;
  3799. }
  3800. }
  3801. template<typename T, typename AllocatorT>
  3802. void VmaVector<T, AllocatorT>::insert(size_t index, const T& src)
  3803. {
  3804. VMA_HEAVY_ASSERT(index <= m_Count);
  3805. const size_t oldCount = size();
  3806. resize(oldCount + 1);
  3807. if (index < oldCount)
  3808. {
  3809. memmove(m_pArray + (index + 1), m_pArray + index, (oldCount - index) * sizeof(T));
  3810. }
  3811. m_pArray[index] = src;
  3812. }
  3813. template<typename T, typename AllocatorT>
  3814. void VmaVector<T, AllocatorT>::remove(size_t index)
  3815. {
  3816. VMA_HEAVY_ASSERT(index < m_Count);
  3817. const size_t oldCount = size();
  3818. if (index < oldCount - 1)
  3819. {
  3820. memmove(m_pArray + index, m_pArray + (index + 1), (oldCount - index - 1) * sizeof(T));
  3821. }
  3822. resize(oldCount - 1);
  3823. }
  3824. #endif // _VMA_VECTOR_FUNCTIONS
  3825. template<typename T, typename allocatorT>
  3826. static void VmaVectorInsert(VmaVector<T, allocatorT>& vec, size_t index, const T& item)
  3827. {
  3828. vec.insert(index, item);
  3829. }
  3830. template<typename T, typename allocatorT>
  3831. static void VmaVectorRemove(VmaVector<T, allocatorT>& vec, size_t index)
  3832. {
  3833. vec.remove(index);
  3834. }
  3835. #endif // _VMA_VECTOR
  3836. #ifndef _VMA_SMALL_VECTOR
  3837. /*
  3838. This is a vector (a variable-sized array), optimized for the case when the array is small.
  3839. It contains some number of elements in-place, which allows it to avoid heap allocation
  3840. when the actual number of elements is below that threshold. This allows normal "small"
  3841. cases to be fast without losing generality for large inputs.
  3842. */
  3843. template<typename T, typename AllocatorT, size_t N>
  3844. class VmaSmallVector
  3845. {
  3846. public:
  3847. typedef T value_type;
  3848. typedef T* iterator;
  3849. VmaSmallVector(const AllocatorT& allocator);
  3850. VmaSmallVector(size_t count, const AllocatorT& allocator);
  3851. template<typename SrcT, typename SrcAllocatorT, size_t SrcN>
  3852. VmaSmallVector(const VmaSmallVector<SrcT, SrcAllocatorT, SrcN>&) = delete;
  3853. template<typename SrcT, typename SrcAllocatorT, size_t SrcN>
  3854. VmaSmallVector<T, AllocatorT, N>& operator=(const VmaSmallVector<SrcT, SrcAllocatorT, SrcN>&) = delete;
  3855. ~VmaSmallVector() = default;
  3856. bool empty() const { return m_Count == 0; }
  3857. size_t size() const { return m_Count; }
  3858. T* data() { return m_Count > N ? m_DynamicArray.data() : m_StaticArray; }
  3859. T& front() { VMA_HEAVY_ASSERT(m_Count > 0); return data()[0]; }
  3860. T& back() { VMA_HEAVY_ASSERT(m_Count > 0); return data()[m_Count - 1]; }
  3861. const T* data() const { return m_Count > N ? m_DynamicArray.data() : m_StaticArray; }
  3862. const T& front() const { VMA_HEAVY_ASSERT(m_Count > 0); return data()[0]; }
  3863. const T& back() const { VMA_HEAVY_ASSERT(m_Count > 0); return data()[m_Count - 1]; }
  3864. iterator begin() { return data(); }
  3865. iterator end() { return data() + m_Count; }
  3866. void pop_front() { VMA_HEAVY_ASSERT(m_Count > 0); remove(0); }
  3867. void pop_back() { VMA_HEAVY_ASSERT(m_Count > 0); resize(size() - 1); }
  3868. void push_front(const T& src) { insert(0, src); }
  3869. void push_back(const T& src);
  3870. void resize(size_t newCount, bool freeMemory = false);
  3871. void clear(bool freeMemory = false);
  3872. void insert(size_t index, const T& src);
  3873. void remove(size_t index);
  3874. T& operator[](size_t index) { VMA_HEAVY_ASSERT(index < m_Count); return data()[index]; }
  3875. const T& operator[](size_t index) const { VMA_HEAVY_ASSERT(index < m_Count); return data()[index]; }
  3876. private:
  3877. size_t m_Count;
  3878. T m_StaticArray[N]; // Used when m_Size <= N
  3879. VmaVector<T, AllocatorT> m_DynamicArray; // Used when m_Size > N
  3880. };
  3881. #ifndef _VMA_SMALL_VECTOR_FUNCTIONS
  3882. template<typename T, typename AllocatorT, size_t N>
  3883. VmaSmallVector<T, AllocatorT, N>::VmaSmallVector(const AllocatorT& allocator)
  3884. : m_Count(0),
  3885. m_DynamicArray(allocator) {}
  3886. template<typename T, typename AllocatorT, size_t N>
  3887. VmaSmallVector<T, AllocatorT, N>::VmaSmallVector(size_t count, const AllocatorT& allocator)
  3888. : m_Count(count),
  3889. m_DynamicArray(count > N ? count : 0, allocator) {}
  3890. template<typename T, typename AllocatorT, size_t N>
  3891. void VmaSmallVector<T, AllocatorT, N>::push_back(const T& src)
  3892. {
  3893. const size_t newIndex = size();
  3894. resize(newIndex + 1);
  3895. data()[newIndex] = src;
  3896. }
  3897. template<typename T, typename AllocatorT, size_t N>
  3898. void VmaSmallVector<T, AllocatorT, N>::resize(size_t newCount, bool freeMemory)
  3899. {
  3900. if (newCount > N && m_Count > N)
  3901. {
  3902. // Any direction, staying in m_DynamicArray
  3903. m_DynamicArray.resize(newCount);
  3904. if (freeMemory)
  3905. {
  3906. m_DynamicArray.shrink_to_fit();
  3907. }
  3908. }
  3909. else if (newCount > N && m_Count <= N)
  3910. {
  3911. // Growing, moving from m_StaticArray to m_DynamicArray
  3912. m_DynamicArray.resize(newCount);
  3913. if (m_Count > 0)
  3914. {
  3915. memcpy(m_DynamicArray.data(), m_StaticArray, m_Count * sizeof(T));
  3916. }
  3917. }
  3918. else if (newCount <= N && m_Count > N)
  3919. {
  3920. // Shrinking, moving from m_DynamicArray to m_StaticArray
  3921. if (newCount > 0)
  3922. {
  3923. memcpy(m_StaticArray, m_DynamicArray.data(), newCount * sizeof(T));
  3924. }
  3925. m_DynamicArray.resize(0);
  3926. if (freeMemory)
  3927. {
  3928. m_DynamicArray.shrink_to_fit();
  3929. }
  3930. }
  3931. else
  3932. {
  3933. // Any direction, staying in m_StaticArray - nothing to do here
  3934. }
  3935. m_Count = newCount;
  3936. }
  3937. template<typename T, typename AllocatorT, size_t N>
  3938. void VmaSmallVector<T, AllocatorT, N>::clear(bool freeMemory)
  3939. {
  3940. m_DynamicArray.clear();
  3941. if (freeMemory)
  3942. {
  3943. m_DynamicArray.shrink_to_fit();
  3944. }
  3945. m_Count = 0;
  3946. }
  3947. template<typename T, typename AllocatorT, size_t N>
  3948. void VmaSmallVector<T, AllocatorT, N>::insert(size_t index, const T& src)
  3949. {
  3950. VMA_HEAVY_ASSERT(index <= m_Count);
  3951. const size_t oldCount = size();
  3952. resize(oldCount + 1);
  3953. T* const dataPtr = data();
  3954. if (index < oldCount)
  3955. {
  3956. // I know, this could be more optimal for case where memmove can be memcpy directly from m_StaticArray to m_DynamicArray.
  3957. memmove(dataPtr + (index + 1), dataPtr + index, (oldCount - index) * sizeof(T));
  3958. }
  3959. dataPtr[index] = src;
  3960. }
  3961. template<typename T, typename AllocatorT, size_t N>
  3962. void VmaSmallVector<T, AllocatorT, N>::remove(size_t index)
  3963. {
  3964. VMA_HEAVY_ASSERT(index < m_Count);
  3965. const size_t oldCount = size();
  3966. if (index < oldCount - 1)
  3967. {
  3968. // I know, this could be more optimal for case where memmove can be memcpy directly from m_DynamicArray to m_StaticArray.
  3969. T* const dataPtr = data();
  3970. memmove(dataPtr + index, dataPtr + (index + 1), (oldCount - index - 1) * sizeof(T));
  3971. }
  3972. resize(oldCount - 1);
  3973. }
  3974. #endif // _VMA_SMALL_VECTOR_FUNCTIONS
  3975. #endif // _VMA_SMALL_VECTOR
  3976. #ifndef _VMA_POOL_ALLOCATOR
  3977. /*
  3978. Allocator for objects of type T using a list of arrays (pools) to speed up
  3979. allocation. Number of elements that can be allocated is not bounded because
  3980. allocator can create multiple blocks.
  3981. */
  3982. template<typename T>
  3983. class VmaPoolAllocator
  3984. {
  3985. VMA_CLASS_NO_COPY_NO_MOVE(VmaPoolAllocator)
  3986. public:
  3987. VmaPoolAllocator(const VkAllocationCallbacks* pAllocationCallbacks, uint32_t firstBlockCapacity);
  3988. ~VmaPoolAllocator();
  3989. template<typename... Types> T* Alloc(Types&&... args);
  3990. void Free(T* ptr);
  3991. private:
  3992. union Item
  3993. {
  3994. uint32_t NextFreeIndex;
  3995. alignas(T) char Value[sizeof(T)];
  3996. };
  3997. struct ItemBlock
  3998. {
  3999. Item* pItems;
  4000. uint32_t Capacity;
  4001. uint32_t FirstFreeIndex;
  4002. };
  4003. const VkAllocationCallbacks* m_pAllocationCallbacks;
  4004. const uint32_t m_FirstBlockCapacity;
  4005. VmaVector<ItemBlock, VmaStlAllocator<ItemBlock>> m_ItemBlocks;
  4006. ItemBlock& CreateNewBlock();
  4007. };
  4008. #ifndef _VMA_POOL_ALLOCATOR_FUNCTIONS
  4009. template<typename T>
  4010. VmaPoolAllocator<T>::VmaPoolAllocator(const VkAllocationCallbacks* pAllocationCallbacks, uint32_t firstBlockCapacity)
  4011. : m_pAllocationCallbacks(pAllocationCallbacks),
  4012. m_FirstBlockCapacity(firstBlockCapacity),
  4013. m_ItemBlocks(VmaStlAllocator<ItemBlock>(pAllocationCallbacks))
  4014. {
  4015. VMA_ASSERT(m_FirstBlockCapacity > 1);
  4016. }
  4017. template<typename T>
  4018. VmaPoolAllocator<T>::~VmaPoolAllocator()
  4019. {
  4020. for (size_t i = m_ItemBlocks.size(); i--;)
  4021. vma_delete_array(m_pAllocationCallbacks, m_ItemBlocks[i].pItems, m_ItemBlocks[i].Capacity);
  4022. m_ItemBlocks.clear();
  4023. }
  4024. template<typename T>
  4025. template<typename... Types> T* VmaPoolAllocator<T>::Alloc(Types&&... args)
  4026. {
  4027. for (size_t i = m_ItemBlocks.size(); i--; )
  4028. {
  4029. ItemBlock& block = m_ItemBlocks[i];
  4030. // This block has some free items: Use first one.
  4031. if (block.FirstFreeIndex != UINT32_MAX)
  4032. {
  4033. Item* const pItem = &block.pItems[block.FirstFreeIndex];
  4034. block.FirstFreeIndex = pItem->NextFreeIndex;
  4035. T* result = (T*)&pItem->Value;
  4036. new(result)T(std::forward<Types>(args)...); // Explicit constructor call.
  4037. return result;
  4038. }
  4039. }
  4040. // No block has free item: Create new one and use it.
  4041. ItemBlock& newBlock = CreateNewBlock();
  4042. Item* const pItem = &newBlock.pItems[0];
  4043. newBlock.FirstFreeIndex = pItem->NextFreeIndex;
  4044. T* result = (T*)&pItem->Value;
  4045. new(result) T(std::forward<Types>(args)...); // Explicit constructor call.
  4046. return result;
  4047. }
  4048. template<typename T>
  4049. void VmaPoolAllocator<T>::Free(T* ptr)
  4050. {
  4051. // Search all memory blocks to find ptr.
  4052. for (size_t i = m_ItemBlocks.size(); i--; )
  4053. {
  4054. ItemBlock& block = m_ItemBlocks[i];
  4055. // Casting to union.
  4056. Item* pItemPtr;
  4057. memcpy(&pItemPtr, &ptr, sizeof(pItemPtr));
  4058. // Check if pItemPtr is in address range of this block.
  4059. if ((pItemPtr >= block.pItems) && (pItemPtr < block.pItems + block.Capacity))
  4060. {
  4061. ptr->~T(); // Explicit destructor call.
  4062. const uint32_t index = static_cast<uint32_t>(pItemPtr - block.pItems);
  4063. pItemPtr->NextFreeIndex = block.FirstFreeIndex;
  4064. block.FirstFreeIndex = index;
  4065. return;
  4066. }
  4067. }
  4068. VMA_ASSERT(0 && "Pointer doesn't belong to this memory pool.");
  4069. }
  4070. template<typename T>
  4071. typename VmaPoolAllocator<T>::ItemBlock& VmaPoolAllocator<T>::CreateNewBlock()
  4072. {
  4073. const uint32_t newBlockCapacity = m_ItemBlocks.empty() ?
  4074. m_FirstBlockCapacity : m_ItemBlocks.back().Capacity * 3 / 2;
  4075. const ItemBlock newBlock =
  4076. {
  4077. vma_new_array(m_pAllocationCallbacks, Item, newBlockCapacity),
  4078. newBlockCapacity,
  4079. 0
  4080. };
  4081. m_ItemBlocks.push_back(newBlock);
  4082. // Setup singly-linked list of all free items in this block.
  4083. for (uint32_t i = 0; i < newBlockCapacity - 1; ++i)
  4084. newBlock.pItems[i].NextFreeIndex = i + 1;
  4085. newBlock.pItems[newBlockCapacity - 1].NextFreeIndex = UINT32_MAX;
  4086. return m_ItemBlocks.back();
  4087. }
  4088. #endif // _VMA_POOL_ALLOCATOR_FUNCTIONS
  4089. #endif // _VMA_POOL_ALLOCATOR
  4090. #ifndef _VMA_RAW_LIST
  4091. template<typename T>
  4092. struct VmaListItem
  4093. {
  4094. VmaListItem* pPrev;
  4095. VmaListItem* pNext;
  4096. T Value;
  4097. };
  4098. // Doubly linked list.
  4099. template<typename T>
  4100. class VmaRawList
  4101. {
  4102. VMA_CLASS_NO_COPY_NO_MOVE(VmaRawList)
  4103. public:
  4104. typedef VmaListItem<T> ItemType;
  4105. VmaRawList(const VkAllocationCallbacks* pAllocationCallbacks);
  4106. // Intentionally not calling Clear, because that would be unnecessary
  4107. // computations to return all items to m_ItemAllocator as free.
  4108. ~VmaRawList() = default;
  4109. size_t GetCount() const { return m_Count; }
  4110. bool IsEmpty() const { return m_Count == 0; }
  4111. ItemType* Front() { return m_pFront; }
  4112. ItemType* Back() { return m_pBack; }
  4113. const ItemType* Front() const { return m_pFront; }
  4114. const ItemType* Back() const { return m_pBack; }
  4115. ItemType* PushFront();
  4116. ItemType* PushBack();
  4117. ItemType* PushFront(const T& value);
  4118. ItemType* PushBack(const T& value);
  4119. void PopFront();
  4120. void PopBack();
  4121. // Item can be null - it means PushBack.
  4122. ItemType* InsertBefore(ItemType* pItem);
  4123. // Item can be null - it means PushFront.
  4124. ItemType* InsertAfter(ItemType* pItem);
  4125. ItemType* InsertBefore(ItemType* pItem, const T& value);
  4126. ItemType* InsertAfter(ItemType* pItem, const T& value);
  4127. void Clear();
  4128. void Remove(ItemType* pItem);
  4129. private:
  4130. const VkAllocationCallbacks* const m_pAllocationCallbacks;
  4131. VmaPoolAllocator<ItemType> m_ItemAllocator;
  4132. ItemType* m_pFront;
  4133. ItemType* m_pBack;
  4134. size_t m_Count;
  4135. };
  4136. #ifndef _VMA_RAW_LIST_FUNCTIONS
  4137. template<typename T>
  4138. VmaRawList<T>::VmaRawList(const VkAllocationCallbacks* pAllocationCallbacks)
  4139. : m_pAllocationCallbacks(pAllocationCallbacks),
  4140. m_ItemAllocator(pAllocationCallbacks, 128),
  4141. m_pFront(VMA_NULL),
  4142. m_pBack(VMA_NULL),
  4143. m_Count(0) {}
  4144. template<typename T>
  4145. VmaListItem<T>* VmaRawList<T>::PushFront()
  4146. {
  4147. ItemType* const pNewItem = m_ItemAllocator.Alloc();
  4148. pNewItem->pPrev = VMA_NULL;
  4149. if (IsEmpty())
  4150. {
  4151. pNewItem->pNext = VMA_NULL;
  4152. m_pFront = pNewItem;
  4153. m_pBack = pNewItem;
  4154. m_Count = 1;
  4155. }
  4156. else
  4157. {
  4158. pNewItem->pNext = m_pFront;
  4159. m_pFront->pPrev = pNewItem;
  4160. m_pFront = pNewItem;
  4161. ++m_Count;
  4162. }
  4163. return pNewItem;
  4164. }
  4165. template<typename T>
  4166. VmaListItem<T>* VmaRawList<T>::PushBack()
  4167. {
  4168. ItemType* const pNewItem = m_ItemAllocator.Alloc();
  4169. pNewItem->pNext = VMA_NULL;
  4170. if(IsEmpty())
  4171. {
  4172. pNewItem->pPrev = VMA_NULL;
  4173. m_pFront = pNewItem;
  4174. m_pBack = pNewItem;
  4175. m_Count = 1;
  4176. }
  4177. else
  4178. {
  4179. pNewItem->pPrev = m_pBack;
  4180. m_pBack->pNext = pNewItem;
  4181. m_pBack = pNewItem;
  4182. ++m_Count;
  4183. }
  4184. return pNewItem;
  4185. }
  4186. template<typename T>
  4187. VmaListItem<T>* VmaRawList<T>::PushFront(const T& value)
  4188. {
  4189. ItemType* const pNewItem = PushFront();
  4190. pNewItem->Value = value;
  4191. return pNewItem;
  4192. }
  4193. template<typename T>
  4194. VmaListItem<T>* VmaRawList<T>::PushBack(const T& value)
  4195. {
  4196. ItemType* const pNewItem = PushBack();
  4197. pNewItem->Value = value;
  4198. return pNewItem;
  4199. }
  4200. template<typename T>
  4201. void VmaRawList<T>::PopFront()
  4202. {
  4203. VMA_HEAVY_ASSERT(m_Count > 0);
  4204. ItemType* const pFrontItem = m_pFront;
  4205. ItemType* const pNextItem = pFrontItem->pNext;
  4206. if (pNextItem != VMA_NULL)
  4207. {
  4208. pNextItem->pPrev = VMA_NULL;
  4209. }
  4210. m_pFront = pNextItem;
  4211. m_ItemAllocator.Free(pFrontItem);
  4212. --m_Count;
  4213. }
  4214. template<typename T>
  4215. void VmaRawList<T>::PopBack()
  4216. {
  4217. VMA_HEAVY_ASSERT(m_Count > 0);
  4218. ItemType* const pBackItem = m_pBack;
  4219. ItemType* const pPrevItem = pBackItem->pPrev;
  4220. if(pPrevItem != VMA_NULL)
  4221. {
  4222. pPrevItem->pNext = VMA_NULL;
  4223. }
  4224. m_pBack = pPrevItem;
  4225. m_ItemAllocator.Free(pBackItem);
  4226. --m_Count;
  4227. }
  4228. template<typename T>
  4229. void VmaRawList<T>::Clear()
  4230. {
  4231. if (IsEmpty() == false)
  4232. {
  4233. ItemType* pItem = m_pBack;
  4234. while (pItem != VMA_NULL)
  4235. {
  4236. ItemType* const pPrevItem = pItem->pPrev;
  4237. m_ItemAllocator.Free(pItem);
  4238. pItem = pPrevItem;
  4239. }
  4240. m_pFront = VMA_NULL;
  4241. m_pBack = VMA_NULL;
  4242. m_Count = 0;
  4243. }
  4244. }
  4245. template<typename T>
  4246. void VmaRawList<T>::Remove(ItemType* pItem)
  4247. {
  4248. VMA_HEAVY_ASSERT(pItem != VMA_NULL);
  4249. VMA_HEAVY_ASSERT(m_Count > 0);
  4250. if(pItem->pPrev != VMA_NULL)
  4251. {
  4252. pItem->pPrev->pNext = pItem->pNext;
  4253. }
  4254. else
  4255. {
  4256. VMA_HEAVY_ASSERT(m_pFront == pItem);
  4257. m_pFront = pItem->pNext;
  4258. }
  4259. if(pItem->pNext != VMA_NULL)
  4260. {
  4261. pItem->pNext->pPrev = pItem->pPrev;
  4262. }
  4263. else
  4264. {
  4265. VMA_HEAVY_ASSERT(m_pBack == pItem);
  4266. m_pBack = pItem->pPrev;
  4267. }
  4268. m_ItemAllocator.Free(pItem);
  4269. --m_Count;
  4270. }
  4271. template<typename T>
  4272. VmaListItem<T>* VmaRawList<T>::InsertBefore(ItemType* pItem)
  4273. {
  4274. if(pItem != VMA_NULL)
  4275. {
  4276. ItemType* const prevItem = pItem->pPrev;
  4277. ItemType* const newItem = m_ItemAllocator.Alloc();
  4278. newItem->pPrev = prevItem;
  4279. newItem->pNext = pItem;
  4280. pItem->pPrev = newItem;
  4281. if(prevItem != VMA_NULL)
  4282. {
  4283. prevItem->pNext = newItem;
  4284. }
  4285. else
  4286. {
  4287. VMA_HEAVY_ASSERT(m_pFront == pItem);
  4288. m_pFront = newItem;
  4289. }
  4290. ++m_Count;
  4291. return newItem;
  4292. }
  4293. else
  4294. return PushBack();
  4295. }
  4296. template<typename T>
  4297. VmaListItem<T>* VmaRawList<T>::InsertAfter(ItemType* pItem)
  4298. {
  4299. if(pItem != VMA_NULL)
  4300. {
  4301. ItemType* const nextItem = pItem->pNext;
  4302. ItemType* const newItem = m_ItemAllocator.Alloc();
  4303. newItem->pNext = nextItem;
  4304. newItem->pPrev = pItem;
  4305. pItem->pNext = newItem;
  4306. if(nextItem != VMA_NULL)
  4307. {
  4308. nextItem->pPrev = newItem;
  4309. }
  4310. else
  4311. {
  4312. VMA_HEAVY_ASSERT(m_pBack == pItem);
  4313. m_pBack = newItem;
  4314. }
  4315. ++m_Count;
  4316. return newItem;
  4317. }
  4318. else
  4319. return PushFront();
  4320. }
  4321. template<typename T>
  4322. VmaListItem<T>* VmaRawList<T>::InsertBefore(ItemType* pItem, const T& value)
  4323. {
  4324. ItemType* const newItem = InsertBefore(pItem);
  4325. newItem->Value = value;
  4326. return newItem;
  4327. }
  4328. template<typename T>
  4329. VmaListItem<T>* VmaRawList<T>::InsertAfter(ItemType* pItem, const T& value)
  4330. {
  4331. ItemType* const newItem = InsertAfter(pItem);
  4332. newItem->Value = value;
  4333. return newItem;
  4334. }
  4335. #endif // _VMA_RAW_LIST_FUNCTIONS
  4336. #endif // _VMA_RAW_LIST
  4337. #ifndef _VMA_LIST
  4338. template<typename T, typename AllocatorT>
  4339. class VmaList
  4340. {
  4341. VMA_CLASS_NO_COPY_NO_MOVE(VmaList)
  4342. public:
  4343. class reverse_iterator;
  4344. class const_iterator;
  4345. class const_reverse_iterator;
  4346. class iterator
  4347. {
  4348. friend class const_iterator;
  4349. friend class VmaList<T, AllocatorT>;
  4350. public:
  4351. iterator() : m_pList(VMA_NULL), m_pItem(VMA_NULL) {}
  4352. iterator(const reverse_iterator& src) : m_pList(src.m_pList), m_pItem(src.m_pItem) {}
  4353. T& operator*() const { VMA_HEAVY_ASSERT(m_pItem != VMA_NULL); return m_pItem->Value; }
  4354. T* operator->() const { VMA_HEAVY_ASSERT(m_pItem != VMA_NULL); return &m_pItem->Value; }
  4355. bool operator==(const iterator& rhs) const { VMA_HEAVY_ASSERT(m_pList == rhs.m_pList); return m_pItem == rhs.m_pItem; }
  4356. bool operator!=(const iterator& rhs) const { VMA_HEAVY_ASSERT(m_pList == rhs.m_pList); return m_pItem != rhs.m_pItem; }
  4357. iterator operator++(int) { iterator result = *this; ++*this; return result; }
  4358. iterator operator--(int) { iterator result = *this; --*this; return result; }
  4359. iterator& operator++() { VMA_HEAVY_ASSERT(m_pItem != VMA_NULL); m_pItem = m_pItem->pNext; return *this; }
  4360. iterator& operator--();
  4361. private:
  4362. VmaRawList<T>* m_pList;
  4363. VmaListItem<T>* m_pItem;
  4364. iterator(VmaRawList<T>* pList, VmaListItem<T>* pItem) : m_pList(pList), m_pItem(pItem) {}
  4365. };
  4366. class reverse_iterator
  4367. {
  4368. friend class const_reverse_iterator;
  4369. friend class VmaList<T, AllocatorT>;
  4370. public:
  4371. reverse_iterator() : m_pList(VMA_NULL), m_pItem(VMA_NULL) {}
  4372. reverse_iterator(const iterator& src) : m_pList(src.m_pList), m_pItem(src.m_pItem) {}
  4373. T& operator*() const { VMA_HEAVY_ASSERT(m_pItem != VMA_NULL); return m_pItem->Value; }
  4374. T* operator->() const { VMA_HEAVY_ASSERT(m_pItem != VMA_NULL); return &m_pItem->Value; }
  4375. bool operator==(const reverse_iterator& rhs) const { VMA_HEAVY_ASSERT(m_pList == rhs.m_pList); return m_pItem == rhs.m_pItem; }
  4376. bool operator!=(const reverse_iterator& rhs) const { VMA_HEAVY_ASSERT(m_pList == rhs.m_pList); return m_pItem != rhs.m_pItem; }
  4377. reverse_iterator operator++(int) { reverse_iterator result = *this; ++* this; return result; }
  4378. reverse_iterator operator--(int) { reverse_iterator result = *this; --* this; return result; }
  4379. reverse_iterator& operator++() { VMA_HEAVY_ASSERT(m_pItem != VMA_NULL); m_pItem = m_pItem->pPrev; return *this; }
  4380. reverse_iterator& operator--();
  4381. private:
  4382. VmaRawList<T>* m_pList;
  4383. VmaListItem<T>* m_pItem;
  4384. reverse_iterator(VmaRawList<T>* pList, VmaListItem<T>* pItem) : m_pList(pList), m_pItem(pItem) {}
  4385. };
  4386. class const_iterator
  4387. {
  4388. friend class VmaList<T, AllocatorT>;
  4389. public:
  4390. const_iterator() : m_pList(VMA_NULL), m_pItem(VMA_NULL) {}
  4391. const_iterator(const iterator& src) : m_pList(src.m_pList), m_pItem(src.m_pItem) {}
  4392. const_iterator(const reverse_iterator& src) : m_pList(src.m_pList), m_pItem(src.m_pItem) {}
  4393. iterator drop_const() { return { const_cast<VmaRawList<T>*>(m_pList), const_cast<VmaListItem<T>*>(m_pItem) }; }
  4394. const T& operator*() const { VMA_HEAVY_ASSERT(m_pItem != VMA_NULL); return m_pItem->Value; }
  4395. const T* operator->() const { VMA_HEAVY_ASSERT(m_pItem != VMA_NULL); return &m_pItem->Value; }
  4396. bool operator==(const const_iterator& rhs) const { VMA_HEAVY_ASSERT(m_pList == rhs.m_pList); return m_pItem == rhs.m_pItem; }
  4397. bool operator!=(const const_iterator& rhs) const { VMA_HEAVY_ASSERT(m_pList == rhs.m_pList); return m_pItem != rhs.m_pItem; }
  4398. const_iterator operator++(int) { const_iterator result = *this; ++* this; return result; }
  4399. const_iterator operator--(int) { const_iterator result = *this; --* this; return result; }
  4400. const_iterator& operator++() { VMA_HEAVY_ASSERT(m_pItem != VMA_NULL); m_pItem = m_pItem->pNext; return *this; }
  4401. const_iterator& operator--();
  4402. private:
  4403. const VmaRawList<T>* m_pList;
  4404. const VmaListItem<T>* m_pItem;
  4405. const_iterator(const VmaRawList<T>* pList, const VmaListItem<T>* pItem) : m_pList(pList), m_pItem(pItem) {}
  4406. };
  4407. class const_reverse_iterator
  4408. {
  4409. friend class VmaList<T, AllocatorT>;
  4410. public:
  4411. const_reverse_iterator() : m_pList(VMA_NULL), m_pItem(VMA_NULL) {}
  4412. const_reverse_iterator(const reverse_iterator& src) : m_pList(src.m_pList), m_pItem(src.m_pItem) {}
  4413. const_reverse_iterator(const iterator& src) : m_pList(src.m_pList), m_pItem(src.m_pItem) {}
  4414. reverse_iterator drop_const() { return { const_cast<VmaRawList<T>*>(m_pList), const_cast<VmaListItem<T>*>(m_pItem) }; }
  4415. const T& operator*() const { VMA_HEAVY_ASSERT(m_pItem != VMA_NULL); return m_pItem->Value; }
  4416. const T* operator->() const { VMA_HEAVY_ASSERT(m_pItem != VMA_NULL); return &m_pItem->Value; }
  4417. bool operator==(const const_reverse_iterator& rhs) const { VMA_HEAVY_ASSERT(m_pList == rhs.m_pList); return m_pItem == rhs.m_pItem; }
  4418. bool operator!=(const const_reverse_iterator& rhs) const { VMA_HEAVY_ASSERT(m_pList == rhs.m_pList); return m_pItem != rhs.m_pItem; }
  4419. const_reverse_iterator operator++(int) { const_reverse_iterator result = *this; ++* this; return result; }
  4420. const_reverse_iterator operator--(int) { const_reverse_iterator result = *this; --* this; return result; }
  4421. const_reverse_iterator& operator++() { VMA_HEAVY_ASSERT(m_pItem != VMA_NULL); m_pItem = m_pItem->pPrev; return *this; }
  4422. const_reverse_iterator& operator--();
  4423. private:
  4424. const VmaRawList<T>* m_pList;
  4425. const VmaListItem<T>* m_pItem;
  4426. const_reverse_iterator(const VmaRawList<T>* pList, const VmaListItem<T>* pItem) : m_pList(pList), m_pItem(pItem) {}
  4427. };
  4428. VmaList(const AllocatorT& allocator) : m_RawList(allocator.m_pCallbacks) {}
  4429. bool empty() const { return m_RawList.IsEmpty(); }
  4430. size_t size() const { return m_RawList.GetCount(); }
  4431. iterator begin() { return iterator(&m_RawList, m_RawList.Front()); }
  4432. iterator end() { return iterator(&m_RawList, VMA_NULL); }
  4433. const_iterator cbegin() const { return const_iterator(&m_RawList, m_RawList.Front()); }
  4434. const_iterator cend() const { return const_iterator(&m_RawList, VMA_NULL); }
  4435. const_iterator begin() const { return cbegin(); }
  4436. const_iterator end() const { return cend(); }
  4437. reverse_iterator rbegin() { return reverse_iterator(&m_RawList, m_RawList.Back()); }
  4438. reverse_iterator rend() { return reverse_iterator(&m_RawList, VMA_NULL); }
  4439. const_reverse_iterator crbegin() const { return const_reverse_iterator(&m_RawList, m_RawList.Back()); }
  4440. const_reverse_iterator crend() const { return const_reverse_iterator(&m_RawList, VMA_NULL); }
  4441. const_reverse_iterator rbegin() const { return crbegin(); }
  4442. const_reverse_iterator rend() const { return crend(); }
  4443. void push_back(const T& value) { m_RawList.PushBack(value); }
  4444. iterator insert(iterator it, const T& value) { return iterator(&m_RawList, m_RawList.InsertBefore(it.m_pItem, value)); }
  4445. void clear() { m_RawList.Clear(); }
  4446. void erase(iterator it) { m_RawList.Remove(it.m_pItem); }
  4447. private:
  4448. VmaRawList<T> m_RawList;
  4449. };
  4450. #ifndef _VMA_LIST_FUNCTIONS
  4451. template<typename T, typename AllocatorT>
  4452. typename VmaList<T, AllocatorT>::iterator& VmaList<T, AllocatorT>::iterator::operator--()
  4453. {
  4454. if (m_pItem != VMA_NULL)
  4455. {
  4456. m_pItem = m_pItem->pPrev;
  4457. }
  4458. else
  4459. {
  4460. VMA_HEAVY_ASSERT(!m_pList->IsEmpty());
  4461. m_pItem = m_pList->Back();
  4462. }
  4463. return *this;
  4464. }
  4465. template<typename T, typename AllocatorT>
  4466. typename VmaList<T, AllocatorT>::reverse_iterator& VmaList<T, AllocatorT>::reverse_iterator::operator--()
  4467. {
  4468. if (m_pItem != VMA_NULL)
  4469. {
  4470. m_pItem = m_pItem->pNext;
  4471. }
  4472. else
  4473. {
  4474. VMA_HEAVY_ASSERT(!m_pList->IsEmpty());
  4475. m_pItem = m_pList->Front();
  4476. }
  4477. return *this;
  4478. }
  4479. template<typename T, typename AllocatorT>
  4480. typename VmaList<T, AllocatorT>::const_iterator& VmaList<T, AllocatorT>::const_iterator::operator--()
  4481. {
  4482. if (m_pItem != VMA_NULL)
  4483. {
  4484. m_pItem = m_pItem->pPrev;
  4485. }
  4486. else
  4487. {
  4488. VMA_HEAVY_ASSERT(!m_pList->IsEmpty());
  4489. m_pItem = m_pList->Back();
  4490. }
  4491. return *this;
  4492. }
  4493. template<typename T, typename AllocatorT>
  4494. typename VmaList<T, AllocatorT>::const_reverse_iterator& VmaList<T, AllocatorT>::const_reverse_iterator::operator--()
  4495. {
  4496. if (m_pItem != VMA_NULL)
  4497. {
  4498. m_pItem = m_pItem->pNext;
  4499. }
  4500. else
  4501. {
  4502. VMA_HEAVY_ASSERT(!m_pList->IsEmpty());
  4503. m_pItem = m_pList->Back();
  4504. }
  4505. return *this;
  4506. }
  4507. #endif // _VMA_LIST_FUNCTIONS
  4508. #endif // _VMA_LIST
  4509. #ifndef _VMA_INTRUSIVE_LINKED_LIST
  4510. /*
  4511. Expected interface of ItemTypeTraits:
  4512. struct MyItemTypeTraits
  4513. {
  4514. typedef MyItem ItemType;
  4515. static ItemType* GetPrev(const ItemType* item) { return item->myPrevPtr; }
  4516. static ItemType* GetNext(const ItemType* item) { return item->myNextPtr; }
  4517. static ItemType*& AccessPrev(ItemType* item) { return item->myPrevPtr; }
  4518. static ItemType*& AccessNext(ItemType* item) { return item->myNextPtr; }
  4519. };
  4520. */
  4521. template<typename ItemTypeTraits>
  4522. class VmaIntrusiveLinkedList
  4523. {
  4524. public:
  4525. typedef typename ItemTypeTraits::ItemType ItemType;
  4526. static ItemType* GetPrev(const ItemType* item) { return ItemTypeTraits::GetPrev(item); }
  4527. static ItemType* GetNext(const ItemType* item) { return ItemTypeTraits::GetNext(item); }
  4528. // Movable, not copyable.
  4529. VmaIntrusiveLinkedList() = default;
  4530. VmaIntrusiveLinkedList(VmaIntrusiveLinkedList && src);
  4531. VmaIntrusiveLinkedList(const VmaIntrusiveLinkedList&) = delete;
  4532. VmaIntrusiveLinkedList& operator=(VmaIntrusiveLinkedList&& src);
  4533. VmaIntrusiveLinkedList& operator=(const VmaIntrusiveLinkedList&) = delete;
  4534. ~VmaIntrusiveLinkedList() { VMA_HEAVY_ASSERT(IsEmpty()); }
  4535. size_t GetCount() const { return m_Count; }
  4536. bool IsEmpty() const { return m_Count == 0; }
  4537. ItemType* Front() { return m_Front; }
  4538. ItemType* Back() { return m_Back; }
  4539. const ItemType* Front() const { return m_Front; }
  4540. const ItemType* Back() const { return m_Back; }
  4541. void PushBack(ItemType* item);
  4542. void PushFront(ItemType* item);
  4543. ItemType* PopBack();
  4544. ItemType* PopFront();
  4545. // MyItem can be null - it means PushBack.
  4546. void InsertBefore(ItemType* existingItem, ItemType* newItem);
  4547. // MyItem can be null - it means PushFront.
  4548. void InsertAfter(ItemType* existingItem, ItemType* newItem);
  4549. void Remove(ItemType* item);
  4550. void RemoveAll();
  4551. private:
  4552. ItemType* m_Front = VMA_NULL;
  4553. ItemType* m_Back = VMA_NULL;
  4554. size_t m_Count = 0;
  4555. };
  4556. #ifndef _VMA_INTRUSIVE_LINKED_LIST_FUNCTIONS
  4557. template<typename ItemTypeTraits>
  4558. VmaIntrusiveLinkedList<ItemTypeTraits>::VmaIntrusiveLinkedList(VmaIntrusiveLinkedList&& src)
  4559. : m_Front(src.m_Front), m_Back(src.m_Back), m_Count(src.m_Count)
  4560. {
  4561. src.m_Front = src.m_Back = VMA_NULL;
  4562. src.m_Count = 0;
  4563. }
  4564. template<typename ItemTypeTraits>
  4565. VmaIntrusiveLinkedList<ItemTypeTraits>& VmaIntrusiveLinkedList<ItemTypeTraits>::operator=(VmaIntrusiveLinkedList&& src)
  4566. {
  4567. if (&src != this)
  4568. {
  4569. VMA_HEAVY_ASSERT(IsEmpty());
  4570. m_Front = src.m_Front;
  4571. m_Back = src.m_Back;
  4572. m_Count = src.m_Count;
  4573. src.m_Front = src.m_Back = VMA_NULL;
  4574. src.m_Count = 0;
  4575. }
  4576. return *this;
  4577. }
  4578. template<typename ItemTypeTraits>
  4579. void VmaIntrusiveLinkedList<ItemTypeTraits>::PushBack(ItemType* item)
  4580. {
  4581. VMA_HEAVY_ASSERT(ItemTypeTraits::GetPrev(item) == VMA_NULL && ItemTypeTraits::GetNext(item) == VMA_NULL);
  4582. if (IsEmpty())
  4583. {
  4584. m_Front = item;
  4585. m_Back = item;
  4586. m_Count = 1;
  4587. }
  4588. else
  4589. {
  4590. ItemTypeTraits::AccessPrev(item) = m_Back;
  4591. ItemTypeTraits::AccessNext(m_Back) = item;
  4592. m_Back = item;
  4593. ++m_Count;
  4594. }
  4595. }
  4596. template<typename ItemTypeTraits>
  4597. void VmaIntrusiveLinkedList<ItemTypeTraits>::PushFront(ItemType* item)
  4598. {
  4599. VMA_HEAVY_ASSERT(ItemTypeTraits::GetPrev(item) == VMA_NULL && ItemTypeTraits::GetNext(item) == VMA_NULL);
  4600. if (IsEmpty())
  4601. {
  4602. m_Front = item;
  4603. m_Back = item;
  4604. m_Count = 1;
  4605. }
  4606. else
  4607. {
  4608. ItemTypeTraits::AccessNext(item) = m_Front;
  4609. ItemTypeTraits::AccessPrev(m_Front) = item;
  4610. m_Front = item;
  4611. ++m_Count;
  4612. }
  4613. }
  4614. template<typename ItemTypeTraits>
  4615. typename VmaIntrusiveLinkedList<ItemTypeTraits>::ItemType* VmaIntrusiveLinkedList<ItemTypeTraits>::PopBack()
  4616. {
  4617. VMA_HEAVY_ASSERT(m_Count > 0);
  4618. ItemType* const backItem = m_Back;
  4619. ItemType* const prevItem = ItemTypeTraits::GetPrev(backItem);
  4620. if (prevItem != VMA_NULL)
  4621. {
  4622. ItemTypeTraits::AccessNext(prevItem) = VMA_NULL;
  4623. }
  4624. m_Back = prevItem;
  4625. --m_Count;
  4626. ItemTypeTraits::AccessPrev(backItem) = VMA_NULL;
  4627. ItemTypeTraits::AccessNext(backItem) = VMA_NULL;
  4628. return backItem;
  4629. }
  4630. template<typename ItemTypeTraits>
  4631. typename VmaIntrusiveLinkedList<ItemTypeTraits>::ItemType* VmaIntrusiveLinkedList<ItemTypeTraits>::PopFront()
  4632. {
  4633. VMA_HEAVY_ASSERT(m_Count > 0);
  4634. ItemType* const frontItem = m_Front;
  4635. ItemType* const nextItem = ItemTypeTraits::GetNext(frontItem);
  4636. if (nextItem != VMA_NULL)
  4637. {
  4638. ItemTypeTraits::AccessPrev(nextItem) = VMA_NULL;
  4639. }
  4640. m_Front = nextItem;
  4641. --m_Count;
  4642. ItemTypeTraits::AccessPrev(frontItem) = VMA_NULL;
  4643. ItemTypeTraits::AccessNext(frontItem) = VMA_NULL;
  4644. return frontItem;
  4645. }
  4646. template<typename ItemTypeTraits>
  4647. void VmaIntrusiveLinkedList<ItemTypeTraits>::InsertBefore(ItemType* existingItem, ItemType* newItem)
  4648. {
  4649. VMA_HEAVY_ASSERT(newItem != VMA_NULL && ItemTypeTraits::GetPrev(newItem) == VMA_NULL && ItemTypeTraits::GetNext(newItem) == VMA_NULL);
  4650. if (existingItem != VMA_NULL)
  4651. {
  4652. ItemType* const prevItem = ItemTypeTraits::GetPrev(existingItem);
  4653. ItemTypeTraits::AccessPrev(newItem) = prevItem;
  4654. ItemTypeTraits::AccessNext(newItem) = existingItem;
  4655. ItemTypeTraits::AccessPrev(existingItem) = newItem;
  4656. if (prevItem != VMA_NULL)
  4657. {
  4658. ItemTypeTraits::AccessNext(prevItem) = newItem;
  4659. }
  4660. else
  4661. {
  4662. VMA_HEAVY_ASSERT(m_Front == existingItem);
  4663. m_Front = newItem;
  4664. }
  4665. ++m_Count;
  4666. }
  4667. else
  4668. PushBack(newItem);
  4669. }
  4670. template<typename ItemTypeTraits>
  4671. void VmaIntrusiveLinkedList<ItemTypeTraits>::InsertAfter(ItemType* existingItem, ItemType* newItem)
  4672. {
  4673. VMA_HEAVY_ASSERT(newItem != VMA_NULL && ItemTypeTraits::GetPrev(newItem) == VMA_NULL && ItemTypeTraits::GetNext(newItem) == VMA_NULL);
  4674. if (existingItem != VMA_NULL)
  4675. {
  4676. ItemType* const nextItem = ItemTypeTraits::GetNext(existingItem);
  4677. ItemTypeTraits::AccessNext(newItem) = nextItem;
  4678. ItemTypeTraits::AccessPrev(newItem) = existingItem;
  4679. ItemTypeTraits::AccessNext(existingItem) = newItem;
  4680. if (nextItem != VMA_NULL)
  4681. {
  4682. ItemTypeTraits::AccessPrev(nextItem) = newItem;
  4683. }
  4684. else
  4685. {
  4686. VMA_HEAVY_ASSERT(m_Back == existingItem);
  4687. m_Back = newItem;
  4688. }
  4689. ++m_Count;
  4690. }
  4691. else
  4692. return PushFront(newItem);
  4693. }
  4694. template<typename ItemTypeTraits>
  4695. void VmaIntrusiveLinkedList<ItemTypeTraits>::Remove(ItemType* item)
  4696. {
  4697. VMA_HEAVY_ASSERT(item != VMA_NULL && m_Count > 0);
  4698. if (ItemTypeTraits::GetPrev(item) != VMA_NULL)
  4699. {
  4700. ItemTypeTraits::AccessNext(ItemTypeTraits::AccessPrev(item)) = ItemTypeTraits::GetNext(item);
  4701. }
  4702. else
  4703. {
  4704. VMA_HEAVY_ASSERT(m_Front == item);
  4705. m_Front = ItemTypeTraits::GetNext(item);
  4706. }
  4707. if (ItemTypeTraits::GetNext(item) != VMA_NULL)
  4708. {
  4709. ItemTypeTraits::AccessPrev(ItemTypeTraits::AccessNext(item)) = ItemTypeTraits::GetPrev(item);
  4710. }
  4711. else
  4712. {
  4713. VMA_HEAVY_ASSERT(m_Back == item);
  4714. m_Back = ItemTypeTraits::GetPrev(item);
  4715. }
  4716. ItemTypeTraits::AccessPrev(item) = VMA_NULL;
  4717. ItemTypeTraits::AccessNext(item) = VMA_NULL;
  4718. --m_Count;
  4719. }
  4720. template<typename ItemTypeTraits>
  4721. void VmaIntrusiveLinkedList<ItemTypeTraits>::RemoveAll()
  4722. {
  4723. if (!IsEmpty())
  4724. {
  4725. ItemType* item = m_Back;
  4726. while (item != VMA_NULL)
  4727. {
  4728. ItemType* const prevItem = ItemTypeTraits::AccessPrev(item);
  4729. ItemTypeTraits::AccessPrev(item) = VMA_NULL;
  4730. ItemTypeTraits::AccessNext(item) = VMA_NULL;
  4731. item = prevItem;
  4732. }
  4733. m_Front = VMA_NULL;
  4734. m_Back = VMA_NULL;
  4735. m_Count = 0;
  4736. }
  4737. }
  4738. #endif // _VMA_INTRUSIVE_LINKED_LIST_FUNCTIONS
  4739. #endif // _VMA_INTRUSIVE_LINKED_LIST
  4740. #if !defined(_VMA_STRING_BUILDER) && VMA_STATS_STRING_ENABLED
  4741. class VmaStringBuilder
  4742. {
  4743. public:
  4744. VmaStringBuilder(const VkAllocationCallbacks* allocationCallbacks) : m_Data(VmaStlAllocator<char>(allocationCallbacks)) {}
  4745. ~VmaStringBuilder() = default;
  4746. size_t GetLength() const { return m_Data.size(); }
  4747. const char* GetData() const { return m_Data.data(); }
  4748. void AddNewLine() { Add('\n'); }
  4749. void Add(char ch) { m_Data.push_back(ch); }
  4750. void Add(const char* pStr);
  4751. void AddNumber(uint32_t num);
  4752. void AddNumber(uint64_t num);
  4753. void AddPointer(const void* ptr);
  4754. private:
  4755. VmaVector<char, VmaStlAllocator<char>> m_Data;
  4756. };
  4757. #ifndef _VMA_STRING_BUILDER_FUNCTIONS
  4758. void VmaStringBuilder::Add(const char* pStr)
  4759. {
  4760. const size_t strLen = strlen(pStr);
  4761. if (strLen > 0)
  4762. {
  4763. const size_t oldCount = m_Data.size();
  4764. m_Data.resize(oldCount + strLen);
  4765. memcpy(m_Data.data() + oldCount, pStr, strLen);
  4766. }
  4767. }
  4768. void VmaStringBuilder::AddNumber(uint32_t num)
  4769. {
  4770. char buf[11];
  4771. buf[10] = '\0';
  4772. char* p = &buf[10];
  4773. do
  4774. {
  4775. *--p = '0' + (char)(num % 10);
  4776. num /= 10;
  4777. } while (num);
  4778. Add(p);
  4779. }
  4780. void VmaStringBuilder::AddNumber(uint64_t num)
  4781. {
  4782. char buf[21];
  4783. buf[20] = '\0';
  4784. char* p = &buf[20];
  4785. do
  4786. {
  4787. *--p = '0' + (char)(num % 10);
  4788. num /= 10;
  4789. } while (num);
  4790. Add(p);
  4791. }
  4792. void VmaStringBuilder::AddPointer(const void* ptr)
  4793. {
  4794. char buf[21];
  4795. VmaPtrToStr(buf, sizeof(buf), ptr);
  4796. Add(buf);
  4797. }
  4798. #endif //_VMA_STRING_BUILDER_FUNCTIONS
  4799. #endif // _VMA_STRING_BUILDER
  4800. #if !defined(_VMA_JSON_WRITER) && VMA_STATS_STRING_ENABLED
  4801. /*
  4802. Allows to conveniently build a correct JSON document to be written to the
  4803. VmaStringBuilder passed to the constructor.
  4804. */
  4805. class VmaJsonWriter
  4806. {
  4807. VMA_CLASS_NO_COPY_NO_MOVE(VmaJsonWriter)
  4808. public:
  4809. // sb - string builder to write the document to. Must remain alive for the whole lifetime of this object.
  4810. VmaJsonWriter(const VkAllocationCallbacks* pAllocationCallbacks, VmaStringBuilder& sb);
  4811. ~VmaJsonWriter();
  4812. // Begins object by writing "{".
  4813. // Inside an object, you must call pairs of WriteString and a value, e.g.:
  4814. // j.BeginObject(true); j.WriteString("A"); j.WriteNumber(1); j.WriteString("B"); j.WriteNumber(2); j.EndObject();
  4815. // Will write: { "A": 1, "B": 2 }
  4816. void BeginObject(bool singleLine = false);
  4817. // Ends object by writing "}".
  4818. void EndObject();
  4819. // Begins array by writing "[".
  4820. // Inside an array, you can write a sequence of any values.
  4821. void BeginArray(bool singleLine = false);
  4822. // Ends array by writing "[".
  4823. void EndArray();
  4824. // Writes a string value inside "".
  4825. // pStr can contain any ANSI characters, including '"', new line etc. - they will be properly escaped.
  4826. void WriteString(const char* pStr);
  4827. // Begins writing a string value.
  4828. // Call BeginString, ContinueString, ContinueString, ..., EndString instead of
  4829. // WriteString to conveniently build the string content incrementally, made of
  4830. // parts including numbers.
  4831. void BeginString(const char* pStr = VMA_NULL);
  4832. // Posts next part of an open string.
  4833. void ContinueString(const char* pStr);
  4834. // Posts next part of an open string. The number is converted to decimal characters.
  4835. void ContinueString(uint32_t n);
  4836. void ContinueString(uint64_t n);
  4837. // Posts next part of an open string. Pointer value is converted to characters
  4838. // using "%p" formatting - shown as hexadecimal number, e.g.: 000000081276Ad00
  4839. void ContinueString_Pointer(const void* ptr);
  4840. // Ends writing a string value by writing '"'.
  4841. void EndString(const char* pStr = VMA_NULL);
  4842. // Writes a number value.
  4843. void WriteNumber(uint32_t n);
  4844. void WriteNumber(uint64_t n);
  4845. // Writes a boolean value - false or true.
  4846. void WriteBool(bool b);
  4847. // Writes a null value.
  4848. void WriteNull();
  4849. private:
  4850. enum COLLECTION_TYPE
  4851. {
  4852. COLLECTION_TYPE_OBJECT,
  4853. COLLECTION_TYPE_ARRAY,
  4854. };
  4855. struct StackItem
  4856. {
  4857. COLLECTION_TYPE type;
  4858. uint32_t valueCount;
  4859. bool singleLineMode;
  4860. };
  4861. static const char* const INDENT;
  4862. VmaStringBuilder& m_SB;
  4863. VmaVector< StackItem, VmaStlAllocator<StackItem> > m_Stack;
  4864. bool m_InsideString;
  4865. void BeginValue(bool isString);
  4866. void WriteIndent(bool oneLess = false);
  4867. };
  4868. const char* const VmaJsonWriter::INDENT = " ";
  4869. #ifndef _VMA_JSON_WRITER_FUNCTIONS
  4870. VmaJsonWriter::VmaJsonWriter(const VkAllocationCallbacks* pAllocationCallbacks, VmaStringBuilder& sb)
  4871. : m_SB(sb),
  4872. m_Stack(VmaStlAllocator<StackItem>(pAllocationCallbacks)),
  4873. m_InsideString(false) {}
  4874. VmaJsonWriter::~VmaJsonWriter()
  4875. {
  4876. VMA_ASSERT(!m_InsideString);
  4877. VMA_ASSERT(m_Stack.empty());
  4878. }
  4879. void VmaJsonWriter::BeginObject(bool singleLine)
  4880. {
  4881. VMA_ASSERT(!m_InsideString);
  4882. BeginValue(false);
  4883. m_SB.Add('{');
  4884. StackItem item;
  4885. item.type = COLLECTION_TYPE_OBJECT;
  4886. item.valueCount = 0;
  4887. item.singleLineMode = singleLine;
  4888. m_Stack.push_back(item);
  4889. }
  4890. void VmaJsonWriter::EndObject()
  4891. {
  4892. VMA_ASSERT(!m_InsideString);
  4893. WriteIndent(true);
  4894. m_SB.Add('}');
  4895. VMA_ASSERT(!m_Stack.empty() && m_Stack.back().type == COLLECTION_TYPE_OBJECT);
  4896. m_Stack.pop_back();
  4897. }
  4898. void VmaJsonWriter::BeginArray(bool singleLine)
  4899. {
  4900. VMA_ASSERT(!m_InsideString);
  4901. BeginValue(false);
  4902. m_SB.Add('[');
  4903. StackItem item;
  4904. item.type = COLLECTION_TYPE_ARRAY;
  4905. item.valueCount = 0;
  4906. item.singleLineMode = singleLine;
  4907. m_Stack.push_back(item);
  4908. }
  4909. void VmaJsonWriter::EndArray()
  4910. {
  4911. VMA_ASSERT(!m_InsideString);
  4912. WriteIndent(true);
  4913. m_SB.Add(']');
  4914. VMA_ASSERT(!m_Stack.empty() && m_Stack.back().type == COLLECTION_TYPE_ARRAY);
  4915. m_Stack.pop_back();
  4916. }
  4917. void VmaJsonWriter::WriteString(const char* pStr)
  4918. {
  4919. BeginString(pStr);
  4920. EndString();
  4921. }
  4922. void VmaJsonWriter::BeginString(const char* pStr)
  4923. {
  4924. VMA_ASSERT(!m_InsideString);
  4925. BeginValue(true);
  4926. m_SB.Add('"');
  4927. m_InsideString = true;
  4928. if (pStr != VMA_NULL && pStr[0] != '\0')
  4929. {
  4930. ContinueString(pStr);
  4931. }
  4932. }
  4933. void VmaJsonWriter::ContinueString(const char* pStr)
  4934. {
  4935. VMA_ASSERT(m_InsideString);
  4936. const size_t strLen = strlen(pStr);
  4937. for (size_t i = 0; i < strLen; ++i)
  4938. {
  4939. char ch = pStr[i];
  4940. if (ch == '\\')
  4941. {
  4942. m_SB.Add("\\\\");
  4943. }
  4944. else if (ch == '"')
  4945. {
  4946. m_SB.Add("\\\"");
  4947. }
  4948. else if ((uint8_t)ch >= 32)
  4949. {
  4950. m_SB.Add(ch);
  4951. }
  4952. else switch (ch)
  4953. {
  4954. case '\b':
  4955. m_SB.Add("\\b");
  4956. break;
  4957. case '\f':
  4958. m_SB.Add("\\f");
  4959. break;
  4960. case '\n':
  4961. m_SB.Add("\\n");
  4962. break;
  4963. case '\r':
  4964. m_SB.Add("\\r");
  4965. break;
  4966. case '\t':
  4967. m_SB.Add("\\t");
  4968. break;
  4969. default:
  4970. VMA_ASSERT(0 && "Character not currently supported.");
  4971. }
  4972. }
  4973. }
  4974. void VmaJsonWriter::ContinueString(uint32_t n)
  4975. {
  4976. VMA_ASSERT(m_InsideString);
  4977. m_SB.AddNumber(n);
  4978. }
  4979. void VmaJsonWriter::ContinueString(uint64_t n)
  4980. {
  4981. VMA_ASSERT(m_InsideString);
  4982. m_SB.AddNumber(n);
  4983. }
  4984. void VmaJsonWriter::ContinueString_Pointer(const void* ptr)
  4985. {
  4986. VMA_ASSERT(m_InsideString);
  4987. m_SB.AddPointer(ptr);
  4988. }
  4989. void VmaJsonWriter::EndString(const char* pStr)
  4990. {
  4991. VMA_ASSERT(m_InsideString);
  4992. if (pStr != VMA_NULL && pStr[0] != '\0')
  4993. {
  4994. ContinueString(pStr);
  4995. }
  4996. m_SB.Add('"');
  4997. m_InsideString = false;
  4998. }
  4999. void VmaJsonWriter::WriteNumber(uint32_t n)
  5000. {
  5001. VMA_ASSERT(!m_InsideString);
  5002. BeginValue(false);
  5003. m_SB.AddNumber(n);
  5004. }
  5005. void VmaJsonWriter::WriteNumber(uint64_t n)
  5006. {
  5007. VMA_ASSERT(!m_InsideString);
  5008. BeginValue(false);
  5009. m_SB.AddNumber(n);
  5010. }
  5011. void VmaJsonWriter::WriteBool(bool b)
  5012. {
  5013. VMA_ASSERT(!m_InsideString);
  5014. BeginValue(false);
  5015. m_SB.Add(b ? "true" : "false");
  5016. }
  5017. void VmaJsonWriter::WriteNull()
  5018. {
  5019. VMA_ASSERT(!m_InsideString);
  5020. BeginValue(false);
  5021. m_SB.Add("null");
  5022. }
  5023. void VmaJsonWriter::BeginValue(bool isString)
  5024. {
  5025. if (!m_Stack.empty())
  5026. {
  5027. StackItem& currItem = m_Stack.back();
  5028. if (currItem.type == COLLECTION_TYPE_OBJECT &&
  5029. currItem.valueCount % 2 == 0)
  5030. {
  5031. VMA_ASSERT(isString);
  5032. }
  5033. if (currItem.type == COLLECTION_TYPE_OBJECT &&
  5034. currItem.valueCount % 2 != 0)
  5035. {
  5036. m_SB.Add(": ");
  5037. }
  5038. else if (currItem.valueCount > 0)
  5039. {
  5040. m_SB.Add(", ");
  5041. WriteIndent();
  5042. }
  5043. else
  5044. {
  5045. WriteIndent();
  5046. }
  5047. ++currItem.valueCount;
  5048. }
  5049. }
  5050. void VmaJsonWriter::WriteIndent(bool oneLess)
  5051. {
  5052. if (!m_Stack.empty() && !m_Stack.back().singleLineMode)
  5053. {
  5054. m_SB.AddNewLine();
  5055. size_t count = m_Stack.size();
  5056. if (count > 0 && oneLess)
  5057. {
  5058. --count;
  5059. }
  5060. for (size_t i = 0; i < count; ++i)
  5061. {
  5062. m_SB.Add(INDENT);
  5063. }
  5064. }
  5065. }
  5066. #endif // _VMA_JSON_WRITER_FUNCTIONS
  5067. static void VmaPrintDetailedStatistics(VmaJsonWriter& json, const VmaDetailedStatistics& stat)
  5068. {
  5069. json.BeginObject();
  5070. json.WriteString("BlockCount");
  5071. json.WriteNumber(stat.statistics.blockCount);
  5072. json.WriteString("BlockBytes");
  5073. json.WriteNumber(stat.statistics.blockBytes);
  5074. json.WriteString("AllocationCount");
  5075. json.WriteNumber(stat.statistics.allocationCount);
  5076. json.WriteString("AllocationBytes");
  5077. json.WriteNumber(stat.statistics.allocationBytes);
  5078. json.WriteString("UnusedRangeCount");
  5079. json.WriteNumber(stat.unusedRangeCount);
  5080. if (stat.statistics.allocationCount > 1)
  5081. {
  5082. json.WriteString("AllocationSizeMin");
  5083. json.WriteNumber(stat.allocationSizeMin);
  5084. json.WriteString("AllocationSizeMax");
  5085. json.WriteNumber(stat.allocationSizeMax);
  5086. }
  5087. if (stat.unusedRangeCount > 1)
  5088. {
  5089. json.WriteString("UnusedRangeSizeMin");
  5090. json.WriteNumber(stat.unusedRangeSizeMin);
  5091. json.WriteString("UnusedRangeSizeMax");
  5092. json.WriteNumber(stat.unusedRangeSizeMax);
  5093. }
  5094. json.EndObject();
  5095. }
  5096. #endif // _VMA_JSON_WRITER
  5097. #ifndef _VMA_MAPPING_HYSTERESIS
  5098. class VmaMappingHysteresis
  5099. {
  5100. VMA_CLASS_NO_COPY_NO_MOVE(VmaMappingHysteresis)
  5101. public:
  5102. VmaMappingHysteresis() = default;
  5103. uint32_t GetExtraMapping() const { return m_ExtraMapping; }
  5104. // Call when Map was called.
  5105. // Returns true if switched to extra +1 mapping reference count.
  5106. bool PostMap()
  5107. {
  5108. #if VMA_MAPPING_HYSTERESIS_ENABLED
  5109. if(m_ExtraMapping == 0)
  5110. {
  5111. ++m_MajorCounter;
  5112. if(m_MajorCounter >= COUNTER_MIN_EXTRA_MAPPING)
  5113. {
  5114. m_ExtraMapping = 1;
  5115. m_MajorCounter = 0;
  5116. m_MinorCounter = 0;
  5117. return true;
  5118. }
  5119. }
  5120. else // m_ExtraMapping == 1
  5121. PostMinorCounter();
  5122. #endif // #if VMA_MAPPING_HYSTERESIS_ENABLED
  5123. return false;
  5124. }
  5125. // Call when Unmap was called.
  5126. void PostUnmap()
  5127. {
  5128. #if VMA_MAPPING_HYSTERESIS_ENABLED
  5129. if(m_ExtraMapping == 0)
  5130. ++m_MajorCounter;
  5131. else // m_ExtraMapping == 1
  5132. PostMinorCounter();
  5133. #endif // #if VMA_MAPPING_HYSTERESIS_ENABLED
  5134. }
  5135. // Call when allocation was made from the memory block.
  5136. void PostAlloc()
  5137. {
  5138. #if VMA_MAPPING_HYSTERESIS_ENABLED
  5139. if(m_ExtraMapping == 1)
  5140. ++m_MajorCounter;
  5141. else // m_ExtraMapping == 0
  5142. PostMinorCounter();
  5143. #endif // #if VMA_MAPPING_HYSTERESIS_ENABLED
  5144. }
  5145. // Call when allocation was freed from the memory block.
  5146. // Returns true if switched to extra -1 mapping reference count.
  5147. bool PostFree()
  5148. {
  5149. #if VMA_MAPPING_HYSTERESIS_ENABLED
  5150. if(m_ExtraMapping == 1)
  5151. {
  5152. ++m_MajorCounter;
  5153. if(m_MajorCounter >= COUNTER_MIN_EXTRA_MAPPING &&
  5154. m_MajorCounter > m_MinorCounter + 1)
  5155. {
  5156. m_ExtraMapping = 0;
  5157. m_MajorCounter = 0;
  5158. m_MinorCounter = 0;
  5159. return true;
  5160. }
  5161. }
  5162. else // m_ExtraMapping == 0
  5163. PostMinorCounter();
  5164. #endif // #if VMA_MAPPING_HYSTERESIS_ENABLED
  5165. return false;
  5166. }
  5167. private:
  5168. static const int32_t COUNTER_MIN_EXTRA_MAPPING = 7;
  5169. uint32_t m_MinorCounter = 0;
  5170. uint32_t m_MajorCounter = 0;
  5171. uint32_t m_ExtraMapping = 0; // 0 or 1.
  5172. void PostMinorCounter()
  5173. {
  5174. if(m_MinorCounter < m_MajorCounter)
  5175. {
  5176. ++m_MinorCounter;
  5177. }
  5178. else if(m_MajorCounter > 0)
  5179. {
  5180. --m_MajorCounter;
  5181. --m_MinorCounter;
  5182. }
  5183. }
  5184. };
  5185. #endif // _VMA_MAPPING_HYSTERESIS
  5186. #ifndef _VMA_DEVICE_MEMORY_BLOCK
  5187. /*
  5188. Represents a single block of device memory (`VkDeviceMemory`) with all the
  5189. data about its regions (aka suballocations, #VmaAllocation), assigned and free.
  5190. Thread-safety:
  5191. - Access to m_pMetadata must be externally synchronized.
  5192. - Map, Unmap, Bind* are synchronized internally.
  5193. */
  5194. class VmaDeviceMemoryBlock
  5195. {
  5196. VMA_CLASS_NO_COPY_NO_MOVE(VmaDeviceMemoryBlock)
  5197. public:
  5198. VmaBlockMetadata* m_pMetadata;
  5199. VmaDeviceMemoryBlock(VmaAllocator hAllocator);
  5200. ~VmaDeviceMemoryBlock();
  5201. // Always call after construction.
  5202. void Init(
  5203. VmaAllocator hAllocator,
  5204. VmaPool hParentPool,
  5205. uint32_t newMemoryTypeIndex,
  5206. VkDeviceMemory newMemory,
  5207. VkDeviceSize newSize,
  5208. uint32_t id,
  5209. uint32_t algorithm,
  5210. VkDeviceSize bufferImageGranularity);
  5211. // Always call before destruction.
  5212. void Destroy(VmaAllocator allocator);
  5213. VmaPool GetParentPool() const { return m_hParentPool; }
  5214. VkDeviceMemory GetDeviceMemory() const { return m_hMemory; }
  5215. uint32_t GetMemoryTypeIndex() const { return m_MemoryTypeIndex; }
  5216. uint32_t GetId() const { return m_Id; }
  5217. void* GetMappedData() const { return m_pMappedData; }
  5218. uint32_t GetMapRefCount() const { return m_MapCount; }
  5219. // Call when allocation/free was made from m_pMetadata.
  5220. // Used for m_MappingHysteresis.
  5221. void PostAlloc(VmaAllocator hAllocator);
  5222. void PostFree(VmaAllocator hAllocator);
  5223. // Validates all data structures inside this object. If not valid, returns false.
  5224. bool Validate() const;
  5225. VkResult CheckCorruption(VmaAllocator hAllocator);
  5226. // ppData can be null.
  5227. VkResult Map(VmaAllocator hAllocator, uint32_t count, void** ppData);
  5228. void Unmap(VmaAllocator hAllocator, uint32_t count);
  5229. VkResult WriteMagicValueAfterAllocation(VmaAllocator hAllocator, VkDeviceSize allocOffset, VkDeviceSize allocSize);
  5230. VkResult ValidateMagicValueAfterAllocation(VmaAllocator hAllocator, VkDeviceSize allocOffset, VkDeviceSize allocSize);
  5231. VkResult BindBufferMemory(
  5232. const VmaAllocator hAllocator,
  5233. const VmaAllocation hAllocation,
  5234. VkDeviceSize allocationLocalOffset,
  5235. VkBuffer hBuffer,
  5236. const void* pNext);
  5237. VkResult BindImageMemory(
  5238. const VmaAllocator hAllocator,
  5239. const VmaAllocation hAllocation,
  5240. VkDeviceSize allocationLocalOffset,
  5241. VkImage hImage,
  5242. const void* pNext);
  5243. private:
  5244. VmaPool m_hParentPool; // VK_NULL_HANDLE if not belongs to custom pool.
  5245. uint32_t m_MemoryTypeIndex;
  5246. uint32_t m_Id;
  5247. VkDeviceMemory m_hMemory;
  5248. /*
  5249. Protects access to m_hMemory so it is not used by multiple threads simultaneously, e.g. vkMapMemory, vkBindBufferMemory.
  5250. Also protects m_MapCount, m_pMappedData.
  5251. Allocations, deallocations, any change in m_pMetadata is protected by parent's VmaBlockVector::m_Mutex.
  5252. */
  5253. VMA_MUTEX m_MapAndBindMutex;
  5254. VmaMappingHysteresis m_MappingHysteresis;
  5255. uint32_t m_MapCount;
  5256. void* m_pMappedData;
  5257. };
  5258. #endif // _VMA_DEVICE_MEMORY_BLOCK
  5259. #ifndef _VMA_ALLOCATION_T
  5260. struct VmaAllocation_T
  5261. {
  5262. friend struct VmaDedicatedAllocationListItemTraits;
  5263. enum FLAGS
  5264. {
  5265. FLAG_PERSISTENT_MAP = 0x01,
  5266. FLAG_MAPPING_ALLOWED = 0x02,
  5267. };
  5268. public:
  5269. enum ALLOCATION_TYPE
  5270. {
  5271. ALLOCATION_TYPE_NONE,
  5272. ALLOCATION_TYPE_BLOCK,
  5273. ALLOCATION_TYPE_DEDICATED,
  5274. };
  5275. // This struct is allocated using VmaPoolAllocator.
  5276. VmaAllocation_T(bool mappingAllowed);
  5277. ~VmaAllocation_T();
  5278. void InitBlockAllocation(
  5279. VmaDeviceMemoryBlock* block,
  5280. VmaAllocHandle allocHandle,
  5281. VkDeviceSize alignment,
  5282. VkDeviceSize size,
  5283. uint32_t memoryTypeIndex,
  5284. VmaSuballocationType suballocationType,
  5285. bool mapped);
  5286. // pMappedData not null means allocation is created with MAPPED flag.
  5287. void InitDedicatedAllocation(
  5288. VmaPool hParentPool,
  5289. uint32_t memoryTypeIndex,
  5290. VkDeviceMemory hMemory,
  5291. VmaSuballocationType suballocationType,
  5292. void* pMappedData,
  5293. VkDeviceSize size);
  5294. ALLOCATION_TYPE GetType() const { return (ALLOCATION_TYPE)m_Type; }
  5295. VkDeviceSize GetAlignment() const { return m_Alignment; }
  5296. VkDeviceSize GetSize() const { return m_Size; }
  5297. void* GetUserData() const { return m_pUserData; }
  5298. const char* GetName() const { return m_pName; }
  5299. VmaSuballocationType GetSuballocationType() const { return (VmaSuballocationType)m_SuballocationType; }
  5300. VmaDeviceMemoryBlock* GetBlock() const { VMA_ASSERT(m_Type == ALLOCATION_TYPE_BLOCK); return m_BlockAllocation.m_Block; }
  5301. uint32_t GetMemoryTypeIndex() const { return m_MemoryTypeIndex; }
  5302. bool IsPersistentMap() const { return (m_Flags & FLAG_PERSISTENT_MAP) != 0; }
  5303. bool IsMappingAllowed() const { return (m_Flags & FLAG_MAPPING_ALLOWED) != 0; }
  5304. void SetUserData(VmaAllocator hAllocator, void* pUserData) { m_pUserData = pUserData; }
  5305. void SetName(VmaAllocator hAllocator, const char* pName);
  5306. void FreeName(VmaAllocator hAllocator);
  5307. uint8_t SwapBlockAllocation(VmaAllocator hAllocator, VmaAllocation allocation);
  5308. VmaAllocHandle GetAllocHandle() const;
  5309. VkDeviceSize GetOffset() const;
  5310. VmaPool GetParentPool() const;
  5311. VkDeviceMemory GetMemory() const;
  5312. void* GetMappedData() const;
  5313. void BlockAllocMap();
  5314. void BlockAllocUnmap();
  5315. VkResult DedicatedAllocMap(VmaAllocator hAllocator, void** ppData);
  5316. void DedicatedAllocUnmap(VmaAllocator hAllocator);
  5317. #if VMA_STATS_STRING_ENABLED
  5318. VmaBufferImageUsage GetBufferImageUsage() const { return m_BufferImageUsage; }
  5319. void InitBufferUsage(const VkBufferCreateInfo &createInfo, bool useKhrMaintenance5)
  5320. {
  5321. VMA_ASSERT(m_BufferImageUsage == VmaBufferImageUsage::UNKNOWN);
  5322. m_BufferImageUsage = VmaBufferImageUsage(createInfo, useKhrMaintenance5);
  5323. }
  5324. void InitImageUsage(const VkImageCreateInfo &createInfo)
  5325. {
  5326. VMA_ASSERT(m_BufferImageUsage == VmaBufferImageUsage::UNKNOWN);
  5327. m_BufferImageUsage = VmaBufferImageUsage(createInfo);
  5328. }
  5329. void PrintParameters(class VmaJsonWriter& json) const;
  5330. #endif
  5331. private:
  5332. // Allocation out of VmaDeviceMemoryBlock.
  5333. struct BlockAllocation
  5334. {
  5335. VmaDeviceMemoryBlock* m_Block;
  5336. VmaAllocHandle m_AllocHandle;
  5337. };
  5338. // Allocation for an object that has its own private VkDeviceMemory.
  5339. struct DedicatedAllocation
  5340. {
  5341. VmaPool m_hParentPool; // VK_NULL_HANDLE if not belongs to custom pool.
  5342. VkDeviceMemory m_hMemory;
  5343. void* m_pMappedData; // Not null means memory is mapped.
  5344. VmaAllocation_T* m_Prev;
  5345. VmaAllocation_T* m_Next;
  5346. };
  5347. union
  5348. {
  5349. // Allocation out of VmaDeviceMemoryBlock.
  5350. BlockAllocation m_BlockAllocation;
  5351. // Allocation for an object that has its own private VkDeviceMemory.
  5352. DedicatedAllocation m_DedicatedAllocation;
  5353. };
  5354. VkDeviceSize m_Alignment;
  5355. VkDeviceSize m_Size;
  5356. void* m_pUserData;
  5357. char* m_pName;
  5358. uint32_t m_MemoryTypeIndex;
  5359. uint8_t m_Type; // ALLOCATION_TYPE
  5360. uint8_t m_SuballocationType; // VmaSuballocationType
  5361. // Reference counter for vmaMapMemory()/vmaUnmapMemory().
  5362. uint8_t m_MapCount;
  5363. uint8_t m_Flags; // enum FLAGS
  5364. #if VMA_STATS_STRING_ENABLED
  5365. VmaBufferImageUsage m_BufferImageUsage; // 0 if unknown.
  5366. #endif
  5367. };
  5368. #endif // _VMA_ALLOCATION_T
  5369. #ifndef _VMA_DEDICATED_ALLOCATION_LIST_ITEM_TRAITS
  5370. struct VmaDedicatedAllocationListItemTraits
  5371. {
  5372. typedef VmaAllocation_T ItemType;
  5373. static ItemType* GetPrev(const ItemType* item)
  5374. {
  5375. VMA_HEAVY_ASSERT(item->GetType() == VmaAllocation_T::ALLOCATION_TYPE_DEDICATED);
  5376. return item->m_DedicatedAllocation.m_Prev;
  5377. }
  5378. static ItemType* GetNext(const ItemType* item)
  5379. {
  5380. VMA_HEAVY_ASSERT(item->GetType() == VmaAllocation_T::ALLOCATION_TYPE_DEDICATED);
  5381. return item->m_DedicatedAllocation.m_Next;
  5382. }
  5383. static ItemType*& AccessPrev(ItemType* item)
  5384. {
  5385. VMA_HEAVY_ASSERT(item->GetType() == VmaAllocation_T::ALLOCATION_TYPE_DEDICATED);
  5386. return item->m_DedicatedAllocation.m_Prev;
  5387. }
  5388. static ItemType*& AccessNext(ItemType* item)
  5389. {
  5390. VMA_HEAVY_ASSERT(item->GetType() == VmaAllocation_T::ALLOCATION_TYPE_DEDICATED);
  5391. return item->m_DedicatedAllocation.m_Next;
  5392. }
  5393. };
  5394. #endif // _VMA_DEDICATED_ALLOCATION_LIST_ITEM_TRAITS
  5395. #ifndef _VMA_DEDICATED_ALLOCATION_LIST
  5396. /*
  5397. Stores linked list of VmaAllocation_T objects.
  5398. Thread-safe, synchronized internally.
  5399. */
  5400. class VmaDedicatedAllocationList
  5401. {
  5402. VMA_CLASS_NO_COPY_NO_MOVE(VmaDedicatedAllocationList)
  5403. public:
  5404. VmaDedicatedAllocationList() {}
  5405. ~VmaDedicatedAllocationList();
  5406. void Init(bool useMutex) { m_UseMutex = useMutex; }
  5407. bool Validate();
  5408. void AddDetailedStatistics(VmaDetailedStatistics& inoutStats);
  5409. void AddStatistics(VmaStatistics& inoutStats);
  5410. #if VMA_STATS_STRING_ENABLED
  5411. // Writes JSON array with the list of allocations.
  5412. void BuildStatsString(VmaJsonWriter& json);
  5413. #endif
  5414. bool IsEmpty();
  5415. void Register(VmaAllocation alloc);
  5416. void Unregister(VmaAllocation alloc);
  5417. private:
  5418. typedef VmaIntrusiveLinkedList<VmaDedicatedAllocationListItemTraits> DedicatedAllocationLinkedList;
  5419. bool m_UseMutex = true;
  5420. VMA_RW_MUTEX m_Mutex;
  5421. DedicatedAllocationLinkedList m_AllocationList;
  5422. };
  5423. #ifndef _VMA_DEDICATED_ALLOCATION_LIST_FUNCTIONS
  5424. VmaDedicatedAllocationList::~VmaDedicatedAllocationList()
  5425. {
  5426. VMA_HEAVY_ASSERT(Validate());
  5427. if (!m_AllocationList.IsEmpty())
  5428. {
  5429. VMA_ASSERT_LEAK(false && "Unfreed dedicated allocations found!");
  5430. }
  5431. }
  5432. bool VmaDedicatedAllocationList::Validate()
  5433. {
  5434. const size_t declaredCount = m_AllocationList.GetCount();
  5435. size_t actualCount = 0;
  5436. VmaMutexLockRead lock(m_Mutex, m_UseMutex);
  5437. for (VmaAllocation alloc = m_AllocationList.Front();
  5438. alloc != VMA_NULL; alloc = m_AllocationList.GetNext(alloc))
  5439. {
  5440. ++actualCount;
  5441. }
  5442. VMA_VALIDATE(actualCount == declaredCount);
  5443. return true;
  5444. }
  5445. void VmaDedicatedAllocationList::AddDetailedStatistics(VmaDetailedStatistics& inoutStats)
  5446. {
  5447. for(auto* item = m_AllocationList.Front(); item != VMA_NULL; item = DedicatedAllocationLinkedList::GetNext(item))
  5448. {
  5449. const VkDeviceSize size = item->GetSize();
  5450. inoutStats.statistics.blockCount++;
  5451. inoutStats.statistics.blockBytes += size;
  5452. VmaAddDetailedStatisticsAllocation(inoutStats, item->GetSize());
  5453. }
  5454. }
  5455. void VmaDedicatedAllocationList::AddStatistics(VmaStatistics& inoutStats)
  5456. {
  5457. VmaMutexLockRead lock(m_Mutex, m_UseMutex);
  5458. const uint32_t allocCount = (uint32_t)m_AllocationList.GetCount();
  5459. inoutStats.blockCount += allocCount;
  5460. inoutStats.allocationCount += allocCount;
  5461. for(auto* item = m_AllocationList.Front(); item != VMA_NULL; item = DedicatedAllocationLinkedList::GetNext(item))
  5462. {
  5463. const VkDeviceSize size = item->GetSize();
  5464. inoutStats.blockBytes += size;
  5465. inoutStats.allocationBytes += size;
  5466. }
  5467. }
  5468. #if VMA_STATS_STRING_ENABLED
  5469. void VmaDedicatedAllocationList::BuildStatsString(VmaJsonWriter& json)
  5470. {
  5471. VmaMutexLockRead lock(m_Mutex, m_UseMutex);
  5472. json.BeginArray();
  5473. for (VmaAllocation alloc = m_AllocationList.Front();
  5474. alloc != VMA_NULL; alloc = m_AllocationList.GetNext(alloc))
  5475. {
  5476. json.BeginObject(true);
  5477. alloc->PrintParameters(json);
  5478. json.EndObject();
  5479. }
  5480. json.EndArray();
  5481. }
  5482. #endif // VMA_STATS_STRING_ENABLED
  5483. bool VmaDedicatedAllocationList::IsEmpty()
  5484. {
  5485. VmaMutexLockRead lock(m_Mutex, m_UseMutex);
  5486. return m_AllocationList.IsEmpty();
  5487. }
  5488. void VmaDedicatedAllocationList::Register(VmaAllocation alloc)
  5489. {
  5490. VmaMutexLockWrite lock(m_Mutex, m_UseMutex);
  5491. m_AllocationList.PushBack(alloc);
  5492. }
  5493. void VmaDedicatedAllocationList::Unregister(VmaAllocation alloc)
  5494. {
  5495. VmaMutexLockWrite lock(m_Mutex, m_UseMutex);
  5496. m_AllocationList.Remove(alloc);
  5497. }
  5498. #endif // _VMA_DEDICATED_ALLOCATION_LIST_FUNCTIONS
  5499. #endif // _VMA_DEDICATED_ALLOCATION_LIST
  5500. #ifndef _VMA_SUBALLOCATION
  5501. /*
  5502. Represents a region of VmaDeviceMemoryBlock that is either assigned and returned as
  5503. allocated memory block or free.
  5504. */
  5505. struct VmaSuballocation
  5506. {
  5507. VkDeviceSize offset;
  5508. VkDeviceSize size;
  5509. void* userData;
  5510. VmaSuballocationType type;
  5511. };
  5512. // Comparator for offsets.
  5513. struct VmaSuballocationOffsetLess
  5514. {
  5515. bool operator()(const VmaSuballocation& lhs, const VmaSuballocation& rhs) const
  5516. {
  5517. return lhs.offset < rhs.offset;
  5518. }
  5519. };
  5520. struct VmaSuballocationOffsetGreater
  5521. {
  5522. bool operator()(const VmaSuballocation& lhs, const VmaSuballocation& rhs) const
  5523. {
  5524. return lhs.offset > rhs.offset;
  5525. }
  5526. };
  5527. struct VmaSuballocationItemSizeLess
  5528. {
  5529. bool operator()(const VmaSuballocationList::iterator lhs,
  5530. const VmaSuballocationList::iterator rhs) const
  5531. {
  5532. return lhs->size < rhs->size;
  5533. }
  5534. bool operator()(const VmaSuballocationList::iterator lhs,
  5535. VkDeviceSize rhsSize) const
  5536. {
  5537. return lhs->size < rhsSize;
  5538. }
  5539. };
  5540. #endif // _VMA_SUBALLOCATION
  5541. #ifndef _VMA_ALLOCATION_REQUEST
  5542. /*
  5543. Parameters of planned allocation inside a VmaDeviceMemoryBlock.
  5544. item points to a FREE suballocation.
  5545. */
  5546. struct VmaAllocationRequest
  5547. {
  5548. VmaAllocHandle allocHandle;
  5549. VkDeviceSize size;
  5550. VmaSuballocationList::iterator item;
  5551. void* customData;
  5552. uint64_t algorithmData;
  5553. VmaAllocationRequestType type;
  5554. };
  5555. #endif // _VMA_ALLOCATION_REQUEST
  5556. #ifndef _VMA_BLOCK_METADATA
  5557. /*
  5558. Data structure used for bookkeeping of allocations and unused ranges of memory
  5559. in a single VkDeviceMemory block.
  5560. */
  5561. class VmaBlockMetadata
  5562. {
  5563. VMA_CLASS_NO_COPY_NO_MOVE(VmaBlockMetadata)
  5564. public:
  5565. // pAllocationCallbacks, if not null, must be owned externally - alive and unchanged for the whole lifetime of this object.
  5566. VmaBlockMetadata(const VkAllocationCallbacks* pAllocationCallbacks,
  5567. VkDeviceSize bufferImageGranularity, bool isVirtual);
  5568. virtual ~VmaBlockMetadata() = default;
  5569. virtual void Init(VkDeviceSize size) { m_Size = size; }
  5570. bool IsVirtual() const { return m_IsVirtual; }
  5571. VkDeviceSize GetSize() const { return m_Size; }
  5572. // Validates all data structures inside this object. If not valid, returns false.
  5573. virtual bool Validate() const = 0;
  5574. virtual size_t GetAllocationCount() const = 0;
  5575. virtual size_t GetFreeRegionsCount() const = 0;
  5576. virtual VkDeviceSize GetSumFreeSize() const = 0;
  5577. // Returns true if this block is empty - contains only single free suballocation.
  5578. virtual bool IsEmpty() const = 0;
  5579. virtual void GetAllocationInfo(VmaAllocHandle allocHandle, VmaVirtualAllocationInfo& outInfo) = 0;
  5580. virtual VkDeviceSize GetAllocationOffset(VmaAllocHandle allocHandle) const = 0;
  5581. virtual void* GetAllocationUserData(VmaAllocHandle allocHandle) const = 0;
  5582. virtual VmaAllocHandle GetAllocationListBegin() const = 0;
  5583. virtual VmaAllocHandle GetNextAllocation(VmaAllocHandle prevAlloc) const = 0;
  5584. virtual VkDeviceSize GetNextFreeRegionSize(VmaAllocHandle alloc) const = 0;
  5585. // Shouldn't modify blockCount.
  5586. virtual void AddDetailedStatistics(VmaDetailedStatistics& inoutStats) const = 0;
  5587. virtual void AddStatistics(VmaStatistics& inoutStats) const = 0;
  5588. #if VMA_STATS_STRING_ENABLED
  5589. virtual void PrintDetailedMap(class VmaJsonWriter& json) const = 0;
  5590. #endif
  5591. // Tries to find a place for suballocation with given parameters inside this block.
  5592. // If succeeded, fills pAllocationRequest and returns true.
  5593. // If failed, returns false.
  5594. virtual bool CreateAllocationRequest(
  5595. VkDeviceSize allocSize,
  5596. VkDeviceSize allocAlignment,
  5597. bool upperAddress,
  5598. VmaSuballocationType allocType,
  5599. // Always one of VMA_ALLOCATION_CREATE_STRATEGY_* or VMA_ALLOCATION_INTERNAL_STRATEGY_* flags.
  5600. uint32_t strategy,
  5601. VmaAllocationRequest* pAllocationRequest) = 0;
  5602. virtual VkResult CheckCorruption(const void* pBlockData) = 0;
  5603. // Makes actual allocation based on request. Request must already be checked and valid.
  5604. virtual void Alloc(
  5605. const VmaAllocationRequest& request,
  5606. VmaSuballocationType type,
  5607. void* userData) = 0;
  5608. // Frees suballocation assigned to given memory region.
  5609. virtual void Free(VmaAllocHandle allocHandle) = 0;
  5610. // Frees all allocations.
  5611. // Careful! Don't call it if there are VmaAllocation objects owned by userData of cleared allocations!
  5612. virtual void Clear() = 0;
  5613. virtual void SetAllocationUserData(VmaAllocHandle allocHandle, void* userData) = 0;
  5614. virtual void DebugLogAllAllocations() const = 0;
  5615. protected:
  5616. const VkAllocationCallbacks* GetAllocationCallbacks() const { return m_pAllocationCallbacks; }
  5617. VkDeviceSize GetBufferImageGranularity() const { return m_BufferImageGranularity; }
  5618. VkDeviceSize GetDebugMargin() const { return VkDeviceSize(IsVirtual() ? 0 : VMA_DEBUG_MARGIN); }
  5619. void DebugLogAllocation(VkDeviceSize offset, VkDeviceSize size, void* userData) const;
  5620. #if VMA_STATS_STRING_ENABLED
  5621. // mapRefCount == UINT32_MAX means unspecified.
  5622. void PrintDetailedMap_Begin(class VmaJsonWriter& json,
  5623. VkDeviceSize unusedBytes,
  5624. size_t allocationCount,
  5625. size_t unusedRangeCount) const;
  5626. void PrintDetailedMap_Allocation(class VmaJsonWriter& json,
  5627. VkDeviceSize offset, VkDeviceSize size, void* userData) const;
  5628. void PrintDetailedMap_UnusedRange(class VmaJsonWriter& json,
  5629. VkDeviceSize offset,
  5630. VkDeviceSize size) const;
  5631. void PrintDetailedMap_End(class VmaJsonWriter& json) const;
  5632. #endif
  5633. private:
  5634. VkDeviceSize m_Size;
  5635. const VkAllocationCallbacks* m_pAllocationCallbacks;
  5636. const VkDeviceSize m_BufferImageGranularity;
  5637. const bool m_IsVirtual;
  5638. };
  5639. #ifndef _VMA_BLOCK_METADATA_FUNCTIONS
  5640. VmaBlockMetadata::VmaBlockMetadata(const VkAllocationCallbacks* pAllocationCallbacks,
  5641. VkDeviceSize bufferImageGranularity, bool isVirtual)
  5642. : m_Size(0),
  5643. m_pAllocationCallbacks(pAllocationCallbacks),
  5644. m_BufferImageGranularity(bufferImageGranularity),
  5645. m_IsVirtual(isVirtual) {}
  5646. void VmaBlockMetadata::DebugLogAllocation(VkDeviceSize offset, VkDeviceSize size, void* userData) const
  5647. {
  5648. if (IsVirtual())
  5649. {
  5650. VMA_LEAK_LOG_FORMAT("UNFREED VIRTUAL ALLOCATION; Offset: %" PRIu64 "; Size: %" PRIu64 "; UserData: %p", offset, size, userData);
  5651. }
  5652. else
  5653. {
  5654. VMA_ASSERT(userData != VMA_NULL);
  5655. VmaAllocation allocation = reinterpret_cast<VmaAllocation>(userData);
  5656. userData = allocation->GetUserData();
  5657. const char* name = allocation->GetName();
  5658. #if VMA_STATS_STRING_ENABLED
  5659. VMA_LEAK_LOG_FORMAT("UNFREED ALLOCATION; Offset: %" PRIu64 "; Size: %" PRIu64 "; UserData: %p; Name: %s; Type: %s; Usage: %" PRIu64,
  5660. offset, size, userData, name ? name : "vma_empty",
  5661. VMA_SUBALLOCATION_TYPE_NAMES[allocation->GetSuballocationType()],
  5662. (uint64_t)allocation->GetBufferImageUsage().Value);
  5663. #else
  5664. VMA_LEAK_LOG_FORMAT("UNFREED ALLOCATION; Offset: %" PRIu64 "; Size: %" PRIu64 "; UserData: %p; Name: %s; Type: %u",
  5665. offset, size, userData, name ? name : "vma_empty",
  5666. (unsigned)allocation->GetSuballocationType());
  5667. #endif // VMA_STATS_STRING_ENABLED
  5668. }
  5669. }
  5670. #if VMA_STATS_STRING_ENABLED
  5671. void VmaBlockMetadata::PrintDetailedMap_Begin(class VmaJsonWriter& json,
  5672. VkDeviceSize unusedBytes, size_t allocationCount, size_t unusedRangeCount) const
  5673. {
  5674. json.WriteString("TotalBytes");
  5675. json.WriteNumber(GetSize());
  5676. json.WriteString("UnusedBytes");
  5677. json.WriteNumber(unusedBytes);
  5678. json.WriteString("Allocations");
  5679. json.WriteNumber((uint64_t)allocationCount);
  5680. json.WriteString("UnusedRanges");
  5681. json.WriteNumber((uint64_t)unusedRangeCount);
  5682. json.WriteString("Suballocations");
  5683. json.BeginArray();
  5684. }
  5685. void VmaBlockMetadata::PrintDetailedMap_Allocation(class VmaJsonWriter& json,
  5686. VkDeviceSize offset, VkDeviceSize size, void* userData) const
  5687. {
  5688. json.BeginObject(true);
  5689. json.WriteString("Offset");
  5690. json.WriteNumber(offset);
  5691. if (IsVirtual())
  5692. {
  5693. json.WriteString("Size");
  5694. json.WriteNumber(size);
  5695. if (userData)
  5696. {
  5697. json.WriteString("CustomData");
  5698. json.BeginString();
  5699. json.ContinueString_Pointer(userData);
  5700. json.EndString();
  5701. }
  5702. }
  5703. else
  5704. {
  5705. ((VmaAllocation)userData)->PrintParameters(json);
  5706. }
  5707. json.EndObject();
  5708. }
  5709. void VmaBlockMetadata::PrintDetailedMap_UnusedRange(class VmaJsonWriter& json,
  5710. VkDeviceSize offset, VkDeviceSize size) const
  5711. {
  5712. json.BeginObject(true);
  5713. json.WriteString("Offset");
  5714. json.WriteNumber(offset);
  5715. json.WriteString("Type");
  5716. json.WriteString(VMA_SUBALLOCATION_TYPE_NAMES[VMA_SUBALLOCATION_TYPE_FREE]);
  5717. json.WriteString("Size");
  5718. json.WriteNumber(size);
  5719. json.EndObject();
  5720. }
  5721. void VmaBlockMetadata::PrintDetailedMap_End(class VmaJsonWriter& json) const
  5722. {
  5723. json.EndArray();
  5724. }
  5725. #endif // VMA_STATS_STRING_ENABLED
  5726. #endif // _VMA_BLOCK_METADATA_FUNCTIONS
  5727. #endif // _VMA_BLOCK_METADATA
  5728. #ifndef _VMA_BLOCK_BUFFER_IMAGE_GRANULARITY
  5729. // Before deleting object of this class remember to call 'Destroy()'
  5730. class VmaBlockBufferImageGranularity final
  5731. {
  5732. public:
  5733. struct ValidationContext
  5734. {
  5735. const VkAllocationCallbacks* allocCallbacks;
  5736. uint16_t* pageAllocs;
  5737. };
  5738. VmaBlockBufferImageGranularity(VkDeviceSize bufferImageGranularity);
  5739. ~VmaBlockBufferImageGranularity();
  5740. bool IsEnabled() const { return m_BufferImageGranularity > MAX_LOW_BUFFER_IMAGE_GRANULARITY; }
  5741. void Init(const VkAllocationCallbacks* pAllocationCallbacks, VkDeviceSize size);
  5742. // Before destroying object you must call free it's memory
  5743. void Destroy(const VkAllocationCallbacks* pAllocationCallbacks);
  5744. void RoundupAllocRequest(VmaSuballocationType allocType,
  5745. VkDeviceSize& inOutAllocSize,
  5746. VkDeviceSize& inOutAllocAlignment) const;
  5747. bool CheckConflictAndAlignUp(VkDeviceSize& inOutAllocOffset,
  5748. VkDeviceSize allocSize,
  5749. VkDeviceSize blockOffset,
  5750. VkDeviceSize blockSize,
  5751. VmaSuballocationType allocType) const;
  5752. void AllocPages(uint8_t allocType, VkDeviceSize offset, VkDeviceSize size);
  5753. void FreePages(VkDeviceSize offset, VkDeviceSize size);
  5754. void Clear();
  5755. ValidationContext StartValidation(const VkAllocationCallbacks* pAllocationCallbacks,
  5756. bool isVirutal) const;
  5757. bool Validate(ValidationContext& ctx, VkDeviceSize offset, VkDeviceSize size) const;
  5758. bool FinishValidation(ValidationContext& ctx) const;
  5759. private:
  5760. static const uint16_t MAX_LOW_BUFFER_IMAGE_GRANULARITY = 256;
  5761. struct RegionInfo
  5762. {
  5763. uint8_t allocType;
  5764. uint16_t allocCount;
  5765. };
  5766. VkDeviceSize m_BufferImageGranularity;
  5767. uint32_t m_RegionCount;
  5768. RegionInfo* m_RegionInfo;
  5769. uint32_t GetStartPage(VkDeviceSize offset) const { return OffsetToPageIndex(offset & ~(m_BufferImageGranularity - 1)); }
  5770. uint32_t GetEndPage(VkDeviceSize offset, VkDeviceSize size) const { return OffsetToPageIndex((offset + size - 1) & ~(m_BufferImageGranularity - 1)); }
  5771. uint32_t OffsetToPageIndex(VkDeviceSize offset) const;
  5772. void AllocPage(RegionInfo& page, uint8_t allocType);
  5773. };
  5774. #ifndef _VMA_BLOCK_BUFFER_IMAGE_GRANULARITY_FUNCTIONS
  5775. VmaBlockBufferImageGranularity::VmaBlockBufferImageGranularity(VkDeviceSize bufferImageGranularity)
  5776. : m_BufferImageGranularity(bufferImageGranularity),
  5777. m_RegionCount(0),
  5778. m_RegionInfo(VMA_NULL) {}
  5779. VmaBlockBufferImageGranularity::~VmaBlockBufferImageGranularity()
  5780. {
  5781. VMA_ASSERT(m_RegionInfo == VMA_NULL && "Free not called before destroying object!");
  5782. }
  5783. void VmaBlockBufferImageGranularity::Init(const VkAllocationCallbacks* pAllocationCallbacks, VkDeviceSize size)
  5784. {
  5785. if (IsEnabled())
  5786. {
  5787. m_RegionCount = static_cast<uint32_t>(VmaDivideRoundingUp(size, m_BufferImageGranularity));
  5788. m_RegionInfo = vma_new_array(pAllocationCallbacks, RegionInfo, m_RegionCount);
  5789. memset(m_RegionInfo, 0, m_RegionCount * sizeof(RegionInfo));
  5790. }
  5791. }
  5792. void VmaBlockBufferImageGranularity::Destroy(const VkAllocationCallbacks* pAllocationCallbacks)
  5793. {
  5794. if (m_RegionInfo)
  5795. {
  5796. vma_delete_array(pAllocationCallbacks, m_RegionInfo, m_RegionCount);
  5797. m_RegionInfo = VMA_NULL;
  5798. }
  5799. }
  5800. void VmaBlockBufferImageGranularity::RoundupAllocRequest(VmaSuballocationType allocType,
  5801. VkDeviceSize& inOutAllocSize,
  5802. VkDeviceSize& inOutAllocAlignment) const
  5803. {
  5804. if (m_BufferImageGranularity > 1 &&
  5805. m_BufferImageGranularity <= MAX_LOW_BUFFER_IMAGE_GRANULARITY)
  5806. {
  5807. if (allocType == VMA_SUBALLOCATION_TYPE_UNKNOWN ||
  5808. allocType == VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN ||
  5809. allocType == VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL)
  5810. {
  5811. inOutAllocAlignment = VMA_MAX(inOutAllocAlignment, m_BufferImageGranularity);
  5812. inOutAllocSize = VmaAlignUp(inOutAllocSize, m_BufferImageGranularity);
  5813. }
  5814. }
  5815. }
  5816. bool VmaBlockBufferImageGranularity::CheckConflictAndAlignUp(VkDeviceSize& inOutAllocOffset,
  5817. VkDeviceSize allocSize,
  5818. VkDeviceSize blockOffset,
  5819. VkDeviceSize blockSize,
  5820. VmaSuballocationType allocType) const
  5821. {
  5822. if (IsEnabled())
  5823. {
  5824. uint32_t startPage = GetStartPage(inOutAllocOffset);
  5825. if (m_RegionInfo[startPage].allocCount > 0 &&
  5826. VmaIsBufferImageGranularityConflict(static_cast<VmaSuballocationType>(m_RegionInfo[startPage].allocType), allocType))
  5827. {
  5828. inOutAllocOffset = VmaAlignUp(inOutAllocOffset, m_BufferImageGranularity);
  5829. if (blockSize < allocSize + inOutAllocOffset - blockOffset)
  5830. return true;
  5831. ++startPage;
  5832. }
  5833. uint32_t endPage = GetEndPage(inOutAllocOffset, allocSize);
  5834. if (endPage != startPage &&
  5835. m_RegionInfo[endPage].allocCount > 0 &&
  5836. VmaIsBufferImageGranularityConflict(static_cast<VmaSuballocationType>(m_RegionInfo[endPage].allocType), allocType))
  5837. {
  5838. return true;
  5839. }
  5840. }
  5841. return false;
  5842. }
  5843. void VmaBlockBufferImageGranularity::AllocPages(uint8_t allocType, VkDeviceSize offset, VkDeviceSize size)
  5844. {
  5845. if (IsEnabled())
  5846. {
  5847. uint32_t startPage = GetStartPage(offset);
  5848. AllocPage(m_RegionInfo[startPage], allocType);
  5849. uint32_t endPage = GetEndPage(offset, size);
  5850. if (startPage != endPage)
  5851. AllocPage(m_RegionInfo[endPage], allocType);
  5852. }
  5853. }
  5854. void VmaBlockBufferImageGranularity::FreePages(VkDeviceSize offset, VkDeviceSize size)
  5855. {
  5856. if (IsEnabled())
  5857. {
  5858. uint32_t startPage = GetStartPage(offset);
  5859. --m_RegionInfo[startPage].allocCount;
  5860. if (m_RegionInfo[startPage].allocCount == 0)
  5861. m_RegionInfo[startPage].allocType = VMA_SUBALLOCATION_TYPE_FREE;
  5862. uint32_t endPage = GetEndPage(offset, size);
  5863. if (startPage != endPage)
  5864. {
  5865. --m_RegionInfo[endPage].allocCount;
  5866. if (m_RegionInfo[endPage].allocCount == 0)
  5867. m_RegionInfo[endPage].allocType = VMA_SUBALLOCATION_TYPE_FREE;
  5868. }
  5869. }
  5870. }
  5871. void VmaBlockBufferImageGranularity::Clear()
  5872. {
  5873. if (m_RegionInfo)
  5874. memset(m_RegionInfo, 0, m_RegionCount * sizeof(RegionInfo));
  5875. }
  5876. VmaBlockBufferImageGranularity::ValidationContext VmaBlockBufferImageGranularity::StartValidation(
  5877. const VkAllocationCallbacks* pAllocationCallbacks, bool isVirutal) const
  5878. {
  5879. ValidationContext ctx{ pAllocationCallbacks, VMA_NULL };
  5880. if (!isVirutal && IsEnabled())
  5881. {
  5882. ctx.pageAllocs = vma_new_array(pAllocationCallbacks, uint16_t, m_RegionCount);
  5883. memset(ctx.pageAllocs, 0, m_RegionCount * sizeof(uint16_t));
  5884. }
  5885. return ctx;
  5886. }
  5887. bool VmaBlockBufferImageGranularity::Validate(ValidationContext& ctx,
  5888. VkDeviceSize offset, VkDeviceSize size) const
  5889. {
  5890. if (IsEnabled())
  5891. {
  5892. uint32_t start = GetStartPage(offset);
  5893. ++ctx.pageAllocs[start];
  5894. VMA_VALIDATE(m_RegionInfo[start].allocCount > 0);
  5895. uint32_t end = GetEndPage(offset, size);
  5896. if (start != end)
  5897. {
  5898. ++ctx.pageAllocs[end];
  5899. VMA_VALIDATE(m_RegionInfo[end].allocCount > 0);
  5900. }
  5901. }
  5902. return true;
  5903. }
  5904. bool VmaBlockBufferImageGranularity::FinishValidation(ValidationContext& ctx) const
  5905. {
  5906. // Check proper page structure
  5907. if (IsEnabled())
  5908. {
  5909. VMA_ASSERT(ctx.pageAllocs != VMA_NULL && "Validation context not initialized!");
  5910. for (uint32_t page = 0; page < m_RegionCount; ++page)
  5911. {
  5912. VMA_VALIDATE(ctx.pageAllocs[page] == m_RegionInfo[page].allocCount);
  5913. }
  5914. vma_delete_array(ctx.allocCallbacks, ctx.pageAllocs, m_RegionCount);
  5915. ctx.pageAllocs = VMA_NULL;
  5916. }
  5917. return true;
  5918. }
  5919. uint32_t VmaBlockBufferImageGranularity::OffsetToPageIndex(VkDeviceSize offset) const
  5920. {
  5921. return static_cast<uint32_t>(offset >> VMA_BITSCAN_MSB(m_BufferImageGranularity));
  5922. }
  5923. void VmaBlockBufferImageGranularity::AllocPage(RegionInfo& page, uint8_t allocType)
  5924. {
  5925. // When current alloc type is free then it can be overridden by new type
  5926. if (page.allocCount == 0 || (page.allocCount > 0 && page.allocType == VMA_SUBALLOCATION_TYPE_FREE))
  5927. page.allocType = allocType;
  5928. ++page.allocCount;
  5929. }
  5930. #endif // _VMA_BLOCK_BUFFER_IMAGE_GRANULARITY_FUNCTIONS
  5931. #endif // _VMA_BLOCK_BUFFER_IMAGE_GRANULARITY
  5932. #ifndef _VMA_BLOCK_METADATA_LINEAR
  5933. /*
  5934. Allocations and their references in internal data structure look like this:
  5935. if(m_2ndVectorMode == SECOND_VECTOR_EMPTY):
  5936. 0 +-------+
  5937. | |
  5938. | |
  5939. | |
  5940. +-------+
  5941. | Alloc | 1st[m_1stNullItemsBeginCount]
  5942. +-------+
  5943. | Alloc | 1st[m_1stNullItemsBeginCount + 1]
  5944. +-------+
  5945. | ... |
  5946. +-------+
  5947. | Alloc | 1st[1st.size() - 1]
  5948. +-------+
  5949. | |
  5950. | |
  5951. | |
  5952. GetSize() +-------+
  5953. if(m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER):
  5954. 0 +-------+
  5955. | Alloc | 2nd[0]
  5956. +-------+
  5957. | Alloc | 2nd[1]
  5958. +-------+
  5959. | ... |
  5960. +-------+
  5961. | Alloc | 2nd[2nd.size() - 1]
  5962. +-------+
  5963. | |
  5964. | |
  5965. | |
  5966. +-------+
  5967. | Alloc | 1st[m_1stNullItemsBeginCount]
  5968. +-------+
  5969. | Alloc | 1st[m_1stNullItemsBeginCount + 1]
  5970. +-------+
  5971. | ... |
  5972. +-------+
  5973. | Alloc | 1st[1st.size() - 1]
  5974. +-------+
  5975. | |
  5976. GetSize() +-------+
  5977. if(m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK):
  5978. 0 +-------+
  5979. | |
  5980. | |
  5981. | |
  5982. +-------+
  5983. | Alloc | 1st[m_1stNullItemsBeginCount]
  5984. +-------+
  5985. | Alloc | 1st[m_1stNullItemsBeginCount + 1]
  5986. +-------+
  5987. | ... |
  5988. +-------+
  5989. | Alloc | 1st[1st.size() - 1]
  5990. +-------+
  5991. | |
  5992. | |
  5993. | |
  5994. +-------+
  5995. | Alloc | 2nd[2nd.size() - 1]
  5996. +-------+
  5997. | ... |
  5998. +-------+
  5999. | Alloc | 2nd[1]
  6000. +-------+
  6001. | Alloc | 2nd[0]
  6002. GetSize() +-------+
  6003. */
  6004. class VmaBlockMetadata_Linear : public VmaBlockMetadata
  6005. {
  6006. VMA_CLASS_NO_COPY_NO_MOVE(VmaBlockMetadata_Linear)
  6007. public:
  6008. VmaBlockMetadata_Linear(const VkAllocationCallbacks* pAllocationCallbacks,
  6009. VkDeviceSize bufferImageGranularity, bool isVirtual);
  6010. virtual ~VmaBlockMetadata_Linear() = default;
  6011. VkDeviceSize GetSumFreeSize() const override { return m_SumFreeSize; }
  6012. bool IsEmpty() const override { return GetAllocationCount() == 0; }
  6013. VkDeviceSize GetAllocationOffset(VmaAllocHandle allocHandle) const override { return (VkDeviceSize)allocHandle - 1; }
  6014. void Init(VkDeviceSize size) override;
  6015. bool Validate() const override;
  6016. size_t GetAllocationCount() const override;
  6017. size_t GetFreeRegionsCount() const override;
  6018. void AddDetailedStatistics(VmaDetailedStatistics& inoutStats) const override;
  6019. void AddStatistics(VmaStatistics& inoutStats) const override;
  6020. #if VMA_STATS_STRING_ENABLED
  6021. void PrintDetailedMap(class VmaJsonWriter& json) const override;
  6022. #endif
  6023. bool CreateAllocationRequest(
  6024. VkDeviceSize allocSize,
  6025. VkDeviceSize allocAlignment,
  6026. bool upperAddress,
  6027. VmaSuballocationType allocType,
  6028. uint32_t strategy,
  6029. VmaAllocationRequest* pAllocationRequest) override;
  6030. VkResult CheckCorruption(const void* pBlockData) override;
  6031. void Alloc(
  6032. const VmaAllocationRequest& request,
  6033. VmaSuballocationType type,
  6034. void* userData) override;
  6035. void Free(VmaAllocHandle allocHandle) override;
  6036. void GetAllocationInfo(VmaAllocHandle allocHandle, VmaVirtualAllocationInfo& outInfo) override;
  6037. void* GetAllocationUserData(VmaAllocHandle allocHandle) const override;
  6038. VmaAllocHandle GetAllocationListBegin() const override;
  6039. VmaAllocHandle GetNextAllocation(VmaAllocHandle prevAlloc) const override;
  6040. VkDeviceSize GetNextFreeRegionSize(VmaAllocHandle alloc) const override;
  6041. void Clear() override;
  6042. void SetAllocationUserData(VmaAllocHandle allocHandle, void* userData) override;
  6043. void DebugLogAllAllocations() const override;
  6044. private:
  6045. /*
  6046. There are two suballocation vectors, used in ping-pong way.
  6047. The one with index m_1stVectorIndex is called 1st.
  6048. The one with index (m_1stVectorIndex ^ 1) is called 2nd.
  6049. 2nd can be non-empty only when 1st is not empty.
  6050. When 2nd is not empty, m_2ndVectorMode indicates its mode of operation.
  6051. */
  6052. typedef VmaVector<VmaSuballocation, VmaStlAllocator<VmaSuballocation>> SuballocationVectorType;
  6053. enum SECOND_VECTOR_MODE
  6054. {
  6055. SECOND_VECTOR_EMPTY,
  6056. /*
  6057. Suballocations in 2nd vector are created later than the ones in 1st, but they
  6058. all have smaller offset.
  6059. */
  6060. SECOND_VECTOR_RING_BUFFER,
  6061. /*
  6062. Suballocations in 2nd vector are upper side of double stack.
  6063. They all have offsets higher than those in 1st vector.
  6064. Top of this stack means smaller offsets, but higher indices in this vector.
  6065. */
  6066. SECOND_VECTOR_DOUBLE_STACK,
  6067. };
  6068. VkDeviceSize m_SumFreeSize;
  6069. SuballocationVectorType m_Suballocations0, m_Suballocations1;
  6070. uint32_t m_1stVectorIndex;
  6071. SECOND_VECTOR_MODE m_2ndVectorMode;
  6072. // Number of items in 1st vector with hAllocation = null at the beginning.
  6073. size_t m_1stNullItemsBeginCount;
  6074. // Number of other items in 1st vector with hAllocation = null somewhere in the middle.
  6075. size_t m_1stNullItemsMiddleCount;
  6076. // Number of items in 2nd vector with hAllocation = null.
  6077. size_t m_2ndNullItemsCount;
  6078. SuballocationVectorType& AccessSuballocations1st() { return m_1stVectorIndex ? m_Suballocations1 : m_Suballocations0; }
  6079. SuballocationVectorType& AccessSuballocations2nd() { return m_1stVectorIndex ? m_Suballocations0 : m_Suballocations1; }
  6080. const SuballocationVectorType& AccessSuballocations1st() const { return m_1stVectorIndex ? m_Suballocations1 : m_Suballocations0; }
  6081. const SuballocationVectorType& AccessSuballocations2nd() const { return m_1stVectorIndex ? m_Suballocations0 : m_Suballocations1; }
  6082. VmaSuballocation& FindSuballocation(VkDeviceSize offset) const;
  6083. bool ShouldCompact1st() const;
  6084. void CleanupAfterFree();
  6085. bool CreateAllocationRequest_LowerAddress(
  6086. VkDeviceSize allocSize,
  6087. VkDeviceSize allocAlignment,
  6088. VmaSuballocationType allocType,
  6089. uint32_t strategy,
  6090. VmaAllocationRequest* pAllocationRequest);
  6091. bool CreateAllocationRequest_UpperAddress(
  6092. VkDeviceSize allocSize,
  6093. VkDeviceSize allocAlignment,
  6094. VmaSuballocationType allocType,
  6095. uint32_t strategy,
  6096. VmaAllocationRequest* pAllocationRequest);
  6097. };
  6098. #ifndef _VMA_BLOCK_METADATA_LINEAR_FUNCTIONS
  6099. VmaBlockMetadata_Linear::VmaBlockMetadata_Linear(const VkAllocationCallbacks* pAllocationCallbacks,
  6100. VkDeviceSize bufferImageGranularity, bool isVirtual)
  6101. : VmaBlockMetadata(pAllocationCallbacks, bufferImageGranularity, isVirtual),
  6102. m_SumFreeSize(0),
  6103. m_Suballocations0(VmaStlAllocator<VmaSuballocation>(pAllocationCallbacks)),
  6104. m_Suballocations1(VmaStlAllocator<VmaSuballocation>(pAllocationCallbacks)),
  6105. m_1stVectorIndex(0),
  6106. m_2ndVectorMode(SECOND_VECTOR_EMPTY),
  6107. m_1stNullItemsBeginCount(0),
  6108. m_1stNullItemsMiddleCount(0),
  6109. m_2ndNullItemsCount(0) {}
  6110. void VmaBlockMetadata_Linear::Init(VkDeviceSize size)
  6111. {
  6112. VmaBlockMetadata::Init(size);
  6113. m_SumFreeSize = size;
  6114. }
  6115. bool VmaBlockMetadata_Linear::Validate() const
  6116. {
  6117. const SuballocationVectorType& suballocations1st = AccessSuballocations1st();
  6118. const SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
  6119. VMA_VALIDATE(suballocations2nd.empty() == (m_2ndVectorMode == SECOND_VECTOR_EMPTY));
  6120. VMA_VALIDATE(!suballocations1st.empty() ||
  6121. suballocations2nd.empty() ||
  6122. m_2ndVectorMode != SECOND_VECTOR_RING_BUFFER);
  6123. if (!suballocations1st.empty())
  6124. {
  6125. // Null item at the beginning should be accounted into m_1stNullItemsBeginCount.
  6126. VMA_VALIDATE(suballocations1st[m_1stNullItemsBeginCount].type != VMA_SUBALLOCATION_TYPE_FREE);
  6127. // Null item at the end should be just pop_back().
  6128. VMA_VALIDATE(suballocations1st.back().type != VMA_SUBALLOCATION_TYPE_FREE);
  6129. }
  6130. if (!suballocations2nd.empty())
  6131. {
  6132. // Null item at the end should be just pop_back().
  6133. VMA_VALIDATE(suballocations2nd.back().type != VMA_SUBALLOCATION_TYPE_FREE);
  6134. }
  6135. VMA_VALIDATE(m_1stNullItemsBeginCount + m_1stNullItemsMiddleCount <= suballocations1st.size());
  6136. VMA_VALIDATE(m_2ndNullItemsCount <= suballocations2nd.size());
  6137. VkDeviceSize sumUsedSize = 0;
  6138. const size_t suballoc1stCount = suballocations1st.size();
  6139. const VkDeviceSize debugMargin = GetDebugMargin();
  6140. VkDeviceSize offset = 0;
  6141. if (m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER)
  6142. {
  6143. const size_t suballoc2ndCount = suballocations2nd.size();
  6144. size_t nullItem2ndCount = 0;
  6145. for (size_t i = 0; i < suballoc2ndCount; ++i)
  6146. {
  6147. const VmaSuballocation& suballoc = suballocations2nd[i];
  6148. const bool currFree = (suballoc.type == VMA_SUBALLOCATION_TYPE_FREE);
  6149. VmaAllocation const alloc = (VmaAllocation)suballoc.userData;
  6150. if (!IsVirtual())
  6151. {
  6152. VMA_VALIDATE(currFree == (alloc == VK_NULL_HANDLE));
  6153. }
  6154. VMA_VALIDATE(suballoc.offset >= offset);
  6155. if (!currFree)
  6156. {
  6157. if (!IsVirtual())
  6158. {
  6159. VMA_VALIDATE((VkDeviceSize)alloc->GetAllocHandle() == suballoc.offset + 1);
  6160. VMA_VALIDATE(alloc->GetSize() == suballoc.size);
  6161. }
  6162. sumUsedSize += suballoc.size;
  6163. }
  6164. else
  6165. {
  6166. ++nullItem2ndCount;
  6167. }
  6168. offset = suballoc.offset + suballoc.size + debugMargin;
  6169. }
  6170. VMA_VALIDATE(nullItem2ndCount == m_2ndNullItemsCount);
  6171. }
  6172. for (size_t i = 0; i < m_1stNullItemsBeginCount; ++i)
  6173. {
  6174. const VmaSuballocation& suballoc = suballocations1st[i];
  6175. VMA_VALIDATE(suballoc.type == VMA_SUBALLOCATION_TYPE_FREE &&
  6176. suballoc.userData == VMA_NULL);
  6177. }
  6178. size_t nullItem1stCount = m_1stNullItemsBeginCount;
  6179. for (size_t i = m_1stNullItemsBeginCount; i < suballoc1stCount; ++i)
  6180. {
  6181. const VmaSuballocation& suballoc = suballocations1st[i];
  6182. const bool currFree = (suballoc.type == VMA_SUBALLOCATION_TYPE_FREE);
  6183. VmaAllocation const alloc = (VmaAllocation)suballoc.userData;
  6184. if (!IsVirtual())
  6185. {
  6186. VMA_VALIDATE(currFree == (alloc == VK_NULL_HANDLE));
  6187. }
  6188. VMA_VALIDATE(suballoc.offset >= offset);
  6189. VMA_VALIDATE(i >= m_1stNullItemsBeginCount || currFree);
  6190. if (!currFree)
  6191. {
  6192. if (!IsVirtual())
  6193. {
  6194. VMA_VALIDATE((VkDeviceSize)alloc->GetAllocHandle() == suballoc.offset + 1);
  6195. VMA_VALIDATE(alloc->GetSize() == suballoc.size);
  6196. }
  6197. sumUsedSize += suballoc.size;
  6198. }
  6199. else
  6200. {
  6201. ++nullItem1stCount;
  6202. }
  6203. offset = suballoc.offset + suballoc.size + debugMargin;
  6204. }
  6205. VMA_VALIDATE(nullItem1stCount == m_1stNullItemsBeginCount + m_1stNullItemsMiddleCount);
  6206. if (m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK)
  6207. {
  6208. const size_t suballoc2ndCount = suballocations2nd.size();
  6209. size_t nullItem2ndCount = 0;
  6210. for (size_t i = suballoc2ndCount; i--; )
  6211. {
  6212. const VmaSuballocation& suballoc = suballocations2nd[i];
  6213. const bool currFree = (suballoc.type == VMA_SUBALLOCATION_TYPE_FREE);
  6214. VmaAllocation const alloc = (VmaAllocation)suballoc.userData;
  6215. if (!IsVirtual())
  6216. {
  6217. VMA_VALIDATE(currFree == (alloc == VK_NULL_HANDLE));
  6218. }
  6219. VMA_VALIDATE(suballoc.offset >= offset);
  6220. if (!currFree)
  6221. {
  6222. if (!IsVirtual())
  6223. {
  6224. VMA_VALIDATE((VkDeviceSize)alloc->GetAllocHandle() == suballoc.offset + 1);
  6225. VMA_VALIDATE(alloc->GetSize() == suballoc.size);
  6226. }
  6227. sumUsedSize += suballoc.size;
  6228. }
  6229. else
  6230. {
  6231. ++nullItem2ndCount;
  6232. }
  6233. offset = suballoc.offset + suballoc.size + debugMargin;
  6234. }
  6235. VMA_VALIDATE(nullItem2ndCount == m_2ndNullItemsCount);
  6236. }
  6237. VMA_VALIDATE(offset <= GetSize());
  6238. VMA_VALIDATE(m_SumFreeSize == GetSize() - sumUsedSize);
  6239. return true;
  6240. }
  6241. size_t VmaBlockMetadata_Linear::GetAllocationCount() const
  6242. {
  6243. return AccessSuballocations1st().size() - m_1stNullItemsBeginCount - m_1stNullItemsMiddleCount +
  6244. AccessSuballocations2nd().size() - m_2ndNullItemsCount;
  6245. }
  6246. size_t VmaBlockMetadata_Linear::GetFreeRegionsCount() const
  6247. {
  6248. // Function only used for defragmentation, which is disabled for this algorithm
  6249. VMA_ASSERT(0);
  6250. return SIZE_MAX;
  6251. }
  6252. void VmaBlockMetadata_Linear::AddDetailedStatistics(VmaDetailedStatistics& inoutStats) const
  6253. {
  6254. const VkDeviceSize size = GetSize();
  6255. const SuballocationVectorType& suballocations1st = AccessSuballocations1st();
  6256. const SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
  6257. const size_t suballoc1stCount = suballocations1st.size();
  6258. const size_t suballoc2ndCount = suballocations2nd.size();
  6259. inoutStats.statistics.blockCount++;
  6260. inoutStats.statistics.blockBytes += size;
  6261. VkDeviceSize lastOffset = 0;
  6262. if (m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER)
  6263. {
  6264. const VkDeviceSize freeSpace2ndTo1stEnd = suballocations1st[m_1stNullItemsBeginCount].offset;
  6265. size_t nextAlloc2ndIndex = 0;
  6266. while (lastOffset < freeSpace2ndTo1stEnd)
  6267. {
  6268. // Find next non-null allocation or move nextAllocIndex to the end.
  6269. while (nextAlloc2ndIndex < suballoc2ndCount &&
  6270. suballocations2nd[nextAlloc2ndIndex].userData == VMA_NULL)
  6271. {
  6272. ++nextAlloc2ndIndex;
  6273. }
  6274. // Found non-null allocation.
  6275. if (nextAlloc2ndIndex < suballoc2ndCount)
  6276. {
  6277. const VmaSuballocation& suballoc = suballocations2nd[nextAlloc2ndIndex];
  6278. // 1. Process free space before this allocation.
  6279. if (lastOffset < suballoc.offset)
  6280. {
  6281. // There is free space from lastOffset to suballoc.offset.
  6282. const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset;
  6283. VmaAddDetailedStatisticsUnusedRange(inoutStats, unusedRangeSize);
  6284. }
  6285. // 2. Process this allocation.
  6286. // There is allocation with suballoc.offset, suballoc.size.
  6287. VmaAddDetailedStatisticsAllocation(inoutStats, suballoc.size);
  6288. // 3. Prepare for next iteration.
  6289. lastOffset = suballoc.offset + suballoc.size;
  6290. ++nextAlloc2ndIndex;
  6291. }
  6292. // We are at the end.
  6293. else
  6294. {
  6295. // There is free space from lastOffset to freeSpace2ndTo1stEnd.
  6296. if (lastOffset < freeSpace2ndTo1stEnd)
  6297. {
  6298. const VkDeviceSize unusedRangeSize = freeSpace2ndTo1stEnd - lastOffset;
  6299. VmaAddDetailedStatisticsUnusedRange(inoutStats, unusedRangeSize);
  6300. }
  6301. // End of loop.
  6302. lastOffset = freeSpace2ndTo1stEnd;
  6303. }
  6304. }
  6305. }
  6306. size_t nextAlloc1stIndex = m_1stNullItemsBeginCount;
  6307. const VkDeviceSize freeSpace1stTo2ndEnd =
  6308. m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK ? suballocations2nd.back().offset : size;
  6309. while (lastOffset < freeSpace1stTo2ndEnd)
  6310. {
  6311. // Find next non-null allocation or move nextAllocIndex to the end.
  6312. while (nextAlloc1stIndex < suballoc1stCount &&
  6313. suballocations1st[nextAlloc1stIndex].userData == VMA_NULL)
  6314. {
  6315. ++nextAlloc1stIndex;
  6316. }
  6317. // Found non-null allocation.
  6318. if (nextAlloc1stIndex < suballoc1stCount)
  6319. {
  6320. const VmaSuballocation& suballoc = suballocations1st[nextAlloc1stIndex];
  6321. // 1. Process free space before this allocation.
  6322. if (lastOffset < suballoc.offset)
  6323. {
  6324. // There is free space from lastOffset to suballoc.offset.
  6325. const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset;
  6326. VmaAddDetailedStatisticsUnusedRange(inoutStats, unusedRangeSize);
  6327. }
  6328. // 2. Process this allocation.
  6329. // There is allocation with suballoc.offset, suballoc.size.
  6330. VmaAddDetailedStatisticsAllocation(inoutStats, suballoc.size);
  6331. // 3. Prepare for next iteration.
  6332. lastOffset = suballoc.offset + suballoc.size;
  6333. ++nextAlloc1stIndex;
  6334. }
  6335. // We are at the end.
  6336. else
  6337. {
  6338. // There is free space from lastOffset to freeSpace1stTo2ndEnd.
  6339. if (lastOffset < freeSpace1stTo2ndEnd)
  6340. {
  6341. const VkDeviceSize unusedRangeSize = freeSpace1stTo2ndEnd - lastOffset;
  6342. VmaAddDetailedStatisticsUnusedRange(inoutStats, unusedRangeSize);
  6343. }
  6344. // End of loop.
  6345. lastOffset = freeSpace1stTo2ndEnd;
  6346. }
  6347. }
  6348. if (m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK)
  6349. {
  6350. size_t nextAlloc2ndIndex = suballocations2nd.size() - 1;
  6351. while (lastOffset < size)
  6352. {
  6353. // Find next non-null allocation or move nextAllocIndex to the end.
  6354. while (nextAlloc2ndIndex != SIZE_MAX &&
  6355. suballocations2nd[nextAlloc2ndIndex].userData == VMA_NULL)
  6356. {
  6357. --nextAlloc2ndIndex;
  6358. }
  6359. // Found non-null allocation.
  6360. if (nextAlloc2ndIndex != SIZE_MAX)
  6361. {
  6362. const VmaSuballocation& suballoc = suballocations2nd[nextAlloc2ndIndex];
  6363. // 1. Process free space before this allocation.
  6364. if (lastOffset < suballoc.offset)
  6365. {
  6366. // There is free space from lastOffset to suballoc.offset.
  6367. const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset;
  6368. VmaAddDetailedStatisticsUnusedRange(inoutStats, unusedRangeSize);
  6369. }
  6370. // 2. Process this allocation.
  6371. // There is allocation with suballoc.offset, suballoc.size.
  6372. VmaAddDetailedStatisticsAllocation(inoutStats, suballoc.size);
  6373. // 3. Prepare for next iteration.
  6374. lastOffset = suballoc.offset + suballoc.size;
  6375. --nextAlloc2ndIndex;
  6376. }
  6377. // We are at the end.
  6378. else
  6379. {
  6380. // There is free space from lastOffset to size.
  6381. if (lastOffset < size)
  6382. {
  6383. const VkDeviceSize unusedRangeSize = size - lastOffset;
  6384. VmaAddDetailedStatisticsUnusedRange(inoutStats, unusedRangeSize);
  6385. }
  6386. // End of loop.
  6387. lastOffset = size;
  6388. }
  6389. }
  6390. }
  6391. }
  6392. void VmaBlockMetadata_Linear::AddStatistics(VmaStatistics& inoutStats) const
  6393. {
  6394. const SuballocationVectorType& suballocations1st = AccessSuballocations1st();
  6395. const SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
  6396. const VkDeviceSize size = GetSize();
  6397. const size_t suballoc1stCount = suballocations1st.size();
  6398. const size_t suballoc2ndCount = suballocations2nd.size();
  6399. inoutStats.blockCount++;
  6400. inoutStats.blockBytes += size;
  6401. inoutStats.allocationBytes += size - m_SumFreeSize;
  6402. VkDeviceSize lastOffset = 0;
  6403. if (m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER)
  6404. {
  6405. const VkDeviceSize freeSpace2ndTo1stEnd = suballocations1st[m_1stNullItemsBeginCount].offset;
  6406. size_t nextAlloc2ndIndex = m_1stNullItemsBeginCount;
  6407. while (lastOffset < freeSpace2ndTo1stEnd)
  6408. {
  6409. // Find next non-null allocation or move nextAlloc2ndIndex to the end.
  6410. while (nextAlloc2ndIndex < suballoc2ndCount &&
  6411. suballocations2nd[nextAlloc2ndIndex].userData == VMA_NULL)
  6412. {
  6413. ++nextAlloc2ndIndex;
  6414. }
  6415. // Found non-null allocation.
  6416. if (nextAlloc2ndIndex < suballoc2ndCount)
  6417. {
  6418. const VmaSuballocation& suballoc = suballocations2nd[nextAlloc2ndIndex];
  6419. // Process this allocation.
  6420. // There is allocation with suballoc.offset, suballoc.size.
  6421. ++inoutStats.allocationCount;
  6422. // Prepare for next iteration.
  6423. lastOffset = suballoc.offset + suballoc.size;
  6424. ++nextAlloc2ndIndex;
  6425. }
  6426. // We are at the end.
  6427. else
  6428. {
  6429. // End of loop.
  6430. lastOffset = freeSpace2ndTo1stEnd;
  6431. }
  6432. }
  6433. }
  6434. size_t nextAlloc1stIndex = m_1stNullItemsBeginCount;
  6435. const VkDeviceSize freeSpace1stTo2ndEnd =
  6436. m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK ? suballocations2nd.back().offset : size;
  6437. while (lastOffset < freeSpace1stTo2ndEnd)
  6438. {
  6439. // Find next non-null allocation or move nextAllocIndex to the end.
  6440. while (nextAlloc1stIndex < suballoc1stCount &&
  6441. suballocations1st[nextAlloc1stIndex].userData == VMA_NULL)
  6442. {
  6443. ++nextAlloc1stIndex;
  6444. }
  6445. // Found non-null allocation.
  6446. if (nextAlloc1stIndex < suballoc1stCount)
  6447. {
  6448. const VmaSuballocation& suballoc = suballocations1st[nextAlloc1stIndex];
  6449. // Process this allocation.
  6450. // There is allocation with suballoc.offset, suballoc.size.
  6451. ++inoutStats.allocationCount;
  6452. // Prepare for next iteration.
  6453. lastOffset = suballoc.offset + suballoc.size;
  6454. ++nextAlloc1stIndex;
  6455. }
  6456. // We are at the end.
  6457. else
  6458. {
  6459. // End of loop.
  6460. lastOffset = freeSpace1stTo2ndEnd;
  6461. }
  6462. }
  6463. if (m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK)
  6464. {
  6465. size_t nextAlloc2ndIndex = suballocations2nd.size() - 1;
  6466. while (lastOffset < size)
  6467. {
  6468. // Find next non-null allocation or move nextAlloc2ndIndex to the end.
  6469. while (nextAlloc2ndIndex != SIZE_MAX &&
  6470. suballocations2nd[nextAlloc2ndIndex].userData == VMA_NULL)
  6471. {
  6472. --nextAlloc2ndIndex;
  6473. }
  6474. // Found non-null allocation.
  6475. if (nextAlloc2ndIndex != SIZE_MAX)
  6476. {
  6477. const VmaSuballocation& suballoc = suballocations2nd[nextAlloc2ndIndex];
  6478. // Process this allocation.
  6479. // There is allocation with suballoc.offset, suballoc.size.
  6480. ++inoutStats.allocationCount;
  6481. // Prepare for next iteration.
  6482. lastOffset = suballoc.offset + suballoc.size;
  6483. --nextAlloc2ndIndex;
  6484. }
  6485. // We are at the end.
  6486. else
  6487. {
  6488. // End of loop.
  6489. lastOffset = size;
  6490. }
  6491. }
  6492. }
  6493. }
  6494. #if VMA_STATS_STRING_ENABLED
  6495. void VmaBlockMetadata_Linear::PrintDetailedMap(class VmaJsonWriter& json) const
  6496. {
  6497. const VkDeviceSize size = GetSize();
  6498. const SuballocationVectorType& suballocations1st = AccessSuballocations1st();
  6499. const SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
  6500. const size_t suballoc1stCount = suballocations1st.size();
  6501. const size_t suballoc2ndCount = suballocations2nd.size();
  6502. // FIRST PASS
  6503. size_t unusedRangeCount = 0;
  6504. VkDeviceSize usedBytes = 0;
  6505. VkDeviceSize lastOffset = 0;
  6506. size_t alloc2ndCount = 0;
  6507. if (m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER)
  6508. {
  6509. const VkDeviceSize freeSpace2ndTo1stEnd = suballocations1st[m_1stNullItemsBeginCount].offset;
  6510. size_t nextAlloc2ndIndex = 0;
  6511. while (lastOffset < freeSpace2ndTo1stEnd)
  6512. {
  6513. // Find next non-null allocation or move nextAlloc2ndIndex to the end.
  6514. while (nextAlloc2ndIndex < suballoc2ndCount &&
  6515. suballocations2nd[nextAlloc2ndIndex].userData == VMA_NULL)
  6516. {
  6517. ++nextAlloc2ndIndex;
  6518. }
  6519. // Found non-null allocation.
  6520. if (nextAlloc2ndIndex < suballoc2ndCount)
  6521. {
  6522. const VmaSuballocation& suballoc = suballocations2nd[nextAlloc2ndIndex];
  6523. // 1. Process free space before this allocation.
  6524. if (lastOffset < suballoc.offset)
  6525. {
  6526. // There is free space from lastOffset to suballoc.offset.
  6527. ++unusedRangeCount;
  6528. }
  6529. // 2. Process this allocation.
  6530. // There is allocation with suballoc.offset, suballoc.size.
  6531. ++alloc2ndCount;
  6532. usedBytes += suballoc.size;
  6533. // 3. Prepare for next iteration.
  6534. lastOffset = suballoc.offset + suballoc.size;
  6535. ++nextAlloc2ndIndex;
  6536. }
  6537. // We are at the end.
  6538. else
  6539. {
  6540. if (lastOffset < freeSpace2ndTo1stEnd)
  6541. {
  6542. // There is free space from lastOffset to freeSpace2ndTo1stEnd.
  6543. ++unusedRangeCount;
  6544. }
  6545. // End of loop.
  6546. lastOffset = freeSpace2ndTo1stEnd;
  6547. }
  6548. }
  6549. }
  6550. size_t nextAlloc1stIndex = m_1stNullItemsBeginCount;
  6551. size_t alloc1stCount = 0;
  6552. const VkDeviceSize freeSpace1stTo2ndEnd =
  6553. m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK ? suballocations2nd.back().offset : size;
  6554. while (lastOffset < freeSpace1stTo2ndEnd)
  6555. {
  6556. // Find next non-null allocation or move nextAllocIndex to the end.
  6557. while (nextAlloc1stIndex < suballoc1stCount &&
  6558. suballocations1st[nextAlloc1stIndex].userData == VMA_NULL)
  6559. {
  6560. ++nextAlloc1stIndex;
  6561. }
  6562. // Found non-null allocation.
  6563. if (nextAlloc1stIndex < suballoc1stCount)
  6564. {
  6565. const VmaSuballocation& suballoc = suballocations1st[nextAlloc1stIndex];
  6566. // 1. Process free space before this allocation.
  6567. if (lastOffset < suballoc.offset)
  6568. {
  6569. // There is free space from lastOffset to suballoc.offset.
  6570. ++unusedRangeCount;
  6571. }
  6572. // 2. Process this allocation.
  6573. // There is allocation with suballoc.offset, suballoc.size.
  6574. ++alloc1stCount;
  6575. usedBytes += suballoc.size;
  6576. // 3. Prepare for next iteration.
  6577. lastOffset = suballoc.offset + suballoc.size;
  6578. ++nextAlloc1stIndex;
  6579. }
  6580. // We are at the end.
  6581. else
  6582. {
  6583. if (lastOffset < freeSpace1stTo2ndEnd)
  6584. {
  6585. // There is free space from lastOffset to freeSpace1stTo2ndEnd.
  6586. ++unusedRangeCount;
  6587. }
  6588. // End of loop.
  6589. lastOffset = freeSpace1stTo2ndEnd;
  6590. }
  6591. }
  6592. if (m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK)
  6593. {
  6594. size_t nextAlloc2ndIndex = suballocations2nd.size() - 1;
  6595. while (lastOffset < size)
  6596. {
  6597. // Find next non-null allocation or move nextAlloc2ndIndex to the end.
  6598. while (nextAlloc2ndIndex != SIZE_MAX &&
  6599. suballocations2nd[nextAlloc2ndIndex].userData == VMA_NULL)
  6600. {
  6601. --nextAlloc2ndIndex;
  6602. }
  6603. // Found non-null allocation.
  6604. if (nextAlloc2ndIndex != SIZE_MAX)
  6605. {
  6606. const VmaSuballocation& suballoc = suballocations2nd[nextAlloc2ndIndex];
  6607. // 1. Process free space before this allocation.
  6608. if (lastOffset < suballoc.offset)
  6609. {
  6610. // There is free space from lastOffset to suballoc.offset.
  6611. ++unusedRangeCount;
  6612. }
  6613. // 2. Process this allocation.
  6614. // There is allocation with suballoc.offset, suballoc.size.
  6615. ++alloc2ndCount;
  6616. usedBytes += suballoc.size;
  6617. // 3. Prepare for next iteration.
  6618. lastOffset = suballoc.offset + suballoc.size;
  6619. --nextAlloc2ndIndex;
  6620. }
  6621. // We are at the end.
  6622. else
  6623. {
  6624. if (lastOffset < size)
  6625. {
  6626. // There is free space from lastOffset to size.
  6627. ++unusedRangeCount;
  6628. }
  6629. // End of loop.
  6630. lastOffset = size;
  6631. }
  6632. }
  6633. }
  6634. const VkDeviceSize unusedBytes = size - usedBytes;
  6635. PrintDetailedMap_Begin(json, unusedBytes, alloc1stCount + alloc2ndCount, unusedRangeCount);
  6636. // SECOND PASS
  6637. lastOffset = 0;
  6638. if (m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER)
  6639. {
  6640. const VkDeviceSize freeSpace2ndTo1stEnd = suballocations1st[m_1stNullItemsBeginCount].offset;
  6641. size_t nextAlloc2ndIndex = 0;
  6642. while (lastOffset < freeSpace2ndTo1stEnd)
  6643. {
  6644. // Find next non-null allocation or move nextAlloc2ndIndex to the end.
  6645. while (nextAlloc2ndIndex < suballoc2ndCount &&
  6646. suballocations2nd[nextAlloc2ndIndex].userData == VMA_NULL)
  6647. {
  6648. ++nextAlloc2ndIndex;
  6649. }
  6650. // Found non-null allocation.
  6651. if (nextAlloc2ndIndex < suballoc2ndCount)
  6652. {
  6653. const VmaSuballocation& suballoc = suballocations2nd[nextAlloc2ndIndex];
  6654. // 1. Process free space before this allocation.
  6655. if (lastOffset < suballoc.offset)
  6656. {
  6657. // There is free space from lastOffset to suballoc.offset.
  6658. const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset;
  6659. PrintDetailedMap_UnusedRange(json, lastOffset, unusedRangeSize);
  6660. }
  6661. // 2. Process this allocation.
  6662. // There is allocation with suballoc.offset, suballoc.size.
  6663. PrintDetailedMap_Allocation(json, suballoc.offset, suballoc.size, suballoc.userData);
  6664. // 3. Prepare for next iteration.
  6665. lastOffset = suballoc.offset + suballoc.size;
  6666. ++nextAlloc2ndIndex;
  6667. }
  6668. // We are at the end.
  6669. else
  6670. {
  6671. if (lastOffset < freeSpace2ndTo1stEnd)
  6672. {
  6673. // There is free space from lastOffset to freeSpace2ndTo1stEnd.
  6674. const VkDeviceSize unusedRangeSize = freeSpace2ndTo1stEnd - lastOffset;
  6675. PrintDetailedMap_UnusedRange(json, lastOffset, unusedRangeSize);
  6676. }
  6677. // End of loop.
  6678. lastOffset = freeSpace2ndTo1stEnd;
  6679. }
  6680. }
  6681. }
  6682. nextAlloc1stIndex = m_1stNullItemsBeginCount;
  6683. while (lastOffset < freeSpace1stTo2ndEnd)
  6684. {
  6685. // Find next non-null allocation or move nextAllocIndex to the end.
  6686. while (nextAlloc1stIndex < suballoc1stCount &&
  6687. suballocations1st[nextAlloc1stIndex].userData == VMA_NULL)
  6688. {
  6689. ++nextAlloc1stIndex;
  6690. }
  6691. // Found non-null allocation.
  6692. if (nextAlloc1stIndex < suballoc1stCount)
  6693. {
  6694. const VmaSuballocation& suballoc = suballocations1st[nextAlloc1stIndex];
  6695. // 1. Process free space before this allocation.
  6696. if (lastOffset < suballoc.offset)
  6697. {
  6698. // There is free space from lastOffset to suballoc.offset.
  6699. const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset;
  6700. PrintDetailedMap_UnusedRange(json, lastOffset, unusedRangeSize);
  6701. }
  6702. // 2. Process this allocation.
  6703. // There is allocation with suballoc.offset, suballoc.size.
  6704. PrintDetailedMap_Allocation(json, suballoc.offset, suballoc.size, suballoc.userData);
  6705. // 3. Prepare for next iteration.
  6706. lastOffset = suballoc.offset + suballoc.size;
  6707. ++nextAlloc1stIndex;
  6708. }
  6709. // We are at the end.
  6710. else
  6711. {
  6712. if (lastOffset < freeSpace1stTo2ndEnd)
  6713. {
  6714. // There is free space from lastOffset to freeSpace1stTo2ndEnd.
  6715. const VkDeviceSize unusedRangeSize = freeSpace1stTo2ndEnd - lastOffset;
  6716. PrintDetailedMap_UnusedRange(json, lastOffset, unusedRangeSize);
  6717. }
  6718. // End of loop.
  6719. lastOffset = freeSpace1stTo2ndEnd;
  6720. }
  6721. }
  6722. if (m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK)
  6723. {
  6724. size_t nextAlloc2ndIndex = suballocations2nd.size() - 1;
  6725. while (lastOffset < size)
  6726. {
  6727. // Find next non-null allocation or move nextAlloc2ndIndex to the end.
  6728. while (nextAlloc2ndIndex != SIZE_MAX &&
  6729. suballocations2nd[nextAlloc2ndIndex].userData == VMA_NULL)
  6730. {
  6731. --nextAlloc2ndIndex;
  6732. }
  6733. // Found non-null allocation.
  6734. if (nextAlloc2ndIndex != SIZE_MAX)
  6735. {
  6736. const VmaSuballocation& suballoc = suballocations2nd[nextAlloc2ndIndex];
  6737. // 1. Process free space before this allocation.
  6738. if (lastOffset < suballoc.offset)
  6739. {
  6740. // There is free space from lastOffset to suballoc.offset.
  6741. const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset;
  6742. PrintDetailedMap_UnusedRange(json, lastOffset, unusedRangeSize);
  6743. }
  6744. // 2. Process this allocation.
  6745. // There is allocation with suballoc.offset, suballoc.size.
  6746. PrintDetailedMap_Allocation(json, suballoc.offset, suballoc.size, suballoc.userData);
  6747. // 3. Prepare for next iteration.
  6748. lastOffset = suballoc.offset + suballoc.size;
  6749. --nextAlloc2ndIndex;
  6750. }
  6751. // We are at the end.
  6752. else
  6753. {
  6754. if (lastOffset < size)
  6755. {
  6756. // There is free space from lastOffset to size.
  6757. const VkDeviceSize unusedRangeSize = size - lastOffset;
  6758. PrintDetailedMap_UnusedRange(json, lastOffset, unusedRangeSize);
  6759. }
  6760. // End of loop.
  6761. lastOffset = size;
  6762. }
  6763. }
  6764. }
  6765. PrintDetailedMap_End(json);
  6766. }
  6767. #endif // VMA_STATS_STRING_ENABLED
  6768. bool VmaBlockMetadata_Linear::CreateAllocationRequest(
  6769. VkDeviceSize allocSize,
  6770. VkDeviceSize allocAlignment,
  6771. bool upperAddress,
  6772. VmaSuballocationType allocType,
  6773. uint32_t strategy,
  6774. VmaAllocationRequest* pAllocationRequest)
  6775. {
  6776. VMA_ASSERT(allocSize > 0);
  6777. VMA_ASSERT(allocType != VMA_SUBALLOCATION_TYPE_FREE);
  6778. VMA_ASSERT(pAllocationRequest != VMA_NULL);
  6779. VMA_HEAVY_ASSERT(Validate());
  6780. if(allocSize > GetSize())
  6781. return false;
  6782. pAllocationRequest->size = allocSize;
  6783. return upperAddress ?
  6784. CreateAllocationRequest_UpperAddress(
  6785. allocSize, allocAlignment, allocType, strategy, pAllocationRequest) :
  6786. CreateAllocationRequest_LowerAddress(
  6787. allocSize, allocAlignment, allocType, strategy, pAllocationRequest);
  6788. }
  6789. VkResult VmaBlockMetadata_Linear::CheckCorruption(const void* pBlockData)
  6790. {
  6791. VMA_ASSERT(!IsVirtual());
  6792. SuballocationVectorType& suballocations1st = AccessSuballocations1st();
  6793. for (size_t i = m_1stNullItemsBeginCount, count = suballocations1st.size(); i < count; ++i)
  6794. {
  6795. const VmaSuballocation& suballoc = suballocations1st[i];
  6796. if (suballoc.type != VMA_SUBALLOCATION_TYPE_FREE)
  6797. {
  6798. if (!VmaValidateMagicValue(pBlockData, suballoc.offset + suballoc.size))
  6799. {
  6800. VMA_ASSERT(0 && "MEMORY CORRUPTION DETECTED AFTER VALIDATED ALLOCATION!");
  6801. return VK_ERROR_UNKNOWN_COPY;
  6802. }
  6803. }
  6804. }
  6805. SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
  6806. for (size_t i = 0, count = suballocations2nd.size(); i < count; ++i)
  6807. {
  6808. const VmaSuballocation& suballoc = suballocations2nd[i];
  6809. if (suballoc.type != VMA_SUBALLOCATION_TYPE_FREE)
  6810. {
  6811. if (!VmaValidateMagicValue(pBlockData, suballoc.offset + suballoc.size))
  6812. {
  6813. VMA_ASSERT(0 && "MEMORY CORRUPTION DETECTED AFTER VALIDATED ALLOCATION!");
  6814. return VK_ERROR_UNKNOWN_COPY;
  6815. }
  6816. }
  6817. }
  6818. return VK_SUCCESS;
  6819. }
  6820. void VmaBlockMetadata_Linear::Alloc(
  6821. const VmaAllocationRequest& request,
  6822. VmaSuballocationType type,
  6823. void* userData)
  6824. {
  6825. const VkDeviceSize offset = (VkDeviceSize)request.allocHandle - 1;
  6826. const VmaSuballocation newSuballoc = { offset, request.size, userData, type };
  6827. switch (request.type)
  6828. {
  6829. case VmaAllocationRequestType::UpperAddress:
  6830. {
  6831. VMA_ASSERT(m_2ndVectorMode != SECOND_VECTOR_RING_BUFFER &&
  6832. "CRITICAL ERROR: Trying to use linear allocator as double stack while it was already used as ring buffer.");
  6833. SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
  6834. suballocations2nd.push_back(newSuballoc);
  6835. m_2ndVectorMode = SECOND_VECTOR_DOUBLE_STACK;
  6836. }
  6837. break;
  6838. case VmaAllocationRequestType::EndOf1st:
  6839. {
  6840. SuballocationVectorType& suballocations1st = AccessSuballocations1st();
  6841. VMA_ASSERT(suballocations1st.empty() ||
  6842. offset >= suballocations1st.back().offset + suballocations1st.back().size);
  6843. // Check if it fits before the end of the block.
  6844. VMA_ASSERT(offset + request.size <= GetSize());
  6845. suballocations1st.push_back(newSuballoc);
  6846. }
  6847. break;
  6848. case VmaAllocationRequestType::EndOf2nd:
  6849. {
  6850. SuballocationVectorType& suballocations1st = AccessSuballocations1st();
  6851. // New allocation at the end of 2-part ring buffer, so before first allocation from 1st vector.
  6852. VMA_ASSERT(!suballocations1st.empty() &&
  6853. offset + request.size <= suballocations1st[m_1stNullItemsBeginCount].offset);
  6854. SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
  6855. switch (m_2ndVectorMode)
  6856. {
  6857. case SECOND_VECTOR_EMPTY:
  6858. // First allocation from second part ring buffer.
  6859. VMA_ASSERT(suballocations2nd.empty());
  6860. m_2ndVectorMode = SECOND_VECTOR_RING_BUFFER;
  6861. break;
  6862. case SECOND_VECTOR_RING_BUFFER:
  6863. // 2-part ring buffer is already started.
  6864. VMA_ASSERT(!suballocations2nd.empty());
  6865. break;
  6866. case SECOND_VECTOR_DOUBLE_STACK:
  6867. VMA_ASSERT(0 && "CRITICAL ERROR: Trying to use linear allocator as ring buffer while it was already used as double stack.");
  6868. break;
  6869. default:
  6870. VMA_ASSERT(0);
  6871. }
  6872. suballocations2nd.push_back(newSuballoc);
  6873. }
  6874. break;
  6875. default:
  6876. VMA_ASSERT(0 && "CRITICAL INTERNAL ERROR.");
  6877. }
  6878. m_SumFreeSize -= newSuballoc.size;
  6879. }
  6880. void VmaBlockMetadata_Linear::Free(VmaAllocHandle allocHandle)
  6881. {
  6882. SuballocationVectorType& suballocations1st = AccessSuballocations1st();
  6883. SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
  6884. VkDeviceSize offset = (VkDeviceSize)allocHandle - 1;
  6885. if (!suballocations1st.empty())
  6886. {
  6887. // First allocation: Mark it as next empty at the beginning.
  6888. VmaSuballocation& firstSuballoc = suballocations1st[m_1stNullItemsBeginCount];
  6889. if (firstSuballoc.offset == offset)
  6890. {
  6891. firstSuballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
  6892. firstSuballoc.userData = VMA_NULL;
  6893. m_SumFreeSize += firstSuballoc.size;
  6894. ++m_1stNullItemsBeginCount;
  6895. CleanupAfterFree();
  6896. return;
  6897. }
  6898. }
  6899. // Last allocation in 2-part ring buffer or top of upper stack (same logic).
  6900. if (m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER ||
  6901. m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK)
  6902. {
  6903. VmaSuballocation& lastSuballoc = suballocations2nd.back();
  6904. if (lastSuballoc.offset == offset)
  6905. {
  6906. m_SumFreeSize += lastSuballoc.size;
  6907. suballocations2nd.pop_back();
  6908. CleanupAfterFree();
  6909. return;
  6910. }
  6911. }
  6912. // Last allocation in 1st vector.
  6913. else if (m_2ndVectorMode == SECOND_VECTOR_EMPTY)
  6914. {
  6915. VmaSuballocation& lastSuballoc = suballocations1st.back();
  6916. if (lastSuballoc.offset == offset)
  6917. {
  6918. m_SumFreeSize += lastSuballoc.size;
  6919. suballocations1st.pop_back();
  6920. CleanupAfterFree();
  6921. return;
  6922. }
  6923. }
  6924. VmaSuballocation refSuballoc;
  6925. refSuballoc.offset = offset;
  6926. // Rest of members stays uninitialized intentionally for better performance.
  6927. // Item from the middle of 1st vector.
  6928. {
  6929. const SuballocationVectorType::iterator it = VmaBinaryFindSorted(
  6930. suballocations1st.begin() + m_1stNullItemsBeginCount,
  6931. suballocations1st.end(),
  6932. refSuballoc,
  6933. VmaSuballocationOffsetLess());
  6934. if (it != suballocations1st.end())
  6935. {
  6936. it->type = VMA_SUBALLOCATION_TYPE_FREE;
  6937. it->userData = VMA_NULL;
  6938. ++m_1stNullItemsMiddleCount;
  6939. m_SumFreeSize += it->size;
  6940. CleanupAfterFree();
  6941. return;
  6942. }
  6943. }
  6944. if (m_2ndVectorMode != SECOND_VECTOR_EMPTY)
  6945. {
  6946. // Item from the middle of 2nd vector.
  6947. const SuballocationVectorType::iterator it = m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER ?
  6948. VmaBinaryFindSorted(suballocations2nd.begin(), suballocations2nd.end(), refSuballoc, VmaSuballocationOffsetLess()) :
  6949. VmaBinaryFindSorted(suballocations2nd.begin(), suballocations2nd.end(), refSuballoc, VmaSuballocationOffsetGreater());
  6950. if (it != suballocations2nd.end())
  6951. {
  6952. it->type = VMA_SUBALLOCATION_TYPE_FREE;
  6953. it->userData = VMA_NULL;
  6954. ++m_2ndNullItemsCount;
  6955. m_SumFreeSize += it->size;
  6956. CleanupAfterFree();
  6957. return;
  6958. }
  6959. }
  6960. VMA_ASSERT(0 && "Allocation to free not found in linear allocator!");
  6961. }
  6962. void VmaBlockMetadata_Linear::GetAllocationInfo(VmaAllocHandle allocHandle, VmaVirtualAllocationInfo& outInfo)
  6963. {
  6964. outInfo.offset = (VkDeviceSize)allocHandle - 1;
  6965. VmaSuballocation& suballoc = FindSuballocation(outInfo.offset);
  6966. outInfo.size = suballoc.size;
  6967. outInfo.pUserData = suballoc.userData;
  6968. }
  6969. void* VmaBlockMetadata_Linear::GetAllocationUserData(VmaAllocHandle allocHandle) const
  6970. {
  6971. return FindSuballocation((VkDeviceSize)allocHandle - 1).userData;
  6972. }
  6973. VmaAllocHandle VmaBlockMetadata_Linear::GetAllocationListBegin() const
  6974. {
  6975. // Function only used for defragmentation, which is disabled for this algorithm
  6976. VMA_ASSERT(0);
  6977. return VK_NULL_HANDLE;
  6978. }
  6979. VmaAllocHandle VmaBlockMetadata_Linear::GetNextAllocation(VmaAllocHandle prevAlloc) const
  6980. {
  6981. // Function only used for defragmentation, which is disabled for this algorithm
  6982. VMA_ASSERT(0);
  6983. return VK_NULL_HANDLE;
  6984. }
  6985. VkDeviceSize VmaBlockMetadata_Linear::GetNextFreeRegionSize(VmaAllocHandle alloc) const
  6986. {
  6987. // Function only used for defragmentation, which is disabled for this algorithm
  6988. VMA_ASSERT(0);
  6989. return 0;
  6990. }
  6991. void VmaBlockMetadata_Linear::Clear()
  6992. {
  6993. m_SumFreeSize = GetSize();
  6994. m_Suballocations0.clear();
  6995. m_Suballocations1.clear();
  6996. // Leaving m_1stVectorIndex unchanged - it doesn't matter.
  6997. m_2ndVectorMode = SECOND_VECTOR_EMPTY;
  6998. m_1stNullItemsBeginCount = 0;
  6999. m_1stNullItemsMiddleCount = 0;
  7000. m_2ndNullItemsCount = 0;
  7001. }
  7002. void VmaBlockMetadata_Linear::SetAllocationUserData(VmaAllocHandle allocHandle, void* userData)
  7003. {
  7004. VmaSuballocation& suballoc = FindSuballocation((VkDeviceSize)allocHandle - 1);
  7005. suballoc.userData = userData;
  7006. }
  7007. void VmaBlockMetadata_Linear::DebugLogAllAllocations() const
  7008. {
  7009. const SuballocationVectorType& suballocations1st = AccessSuballocations1st();
  7010. for (auto it = suballocations1st.begin() + m_1stNullItemsBeginCount; it != suballocations1st.end(); ++it)
  7011. if (it->type != VMA_SUBALLOCATION_TYPE_FREE)
  7012. DebugLogAllocation(it->offset, it->size, it->userData);
  7013. const SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
  7014. for (auto it = suballocations2nd.begin(); it != suballocations2nd.end(); ++it)
  7015. if (it->type != VMA_SUBALLOCATION_TYPE_FREE)
  7016. DebugLogAllocation(it->offset, it->size, it->userData);
  7017. }
  7018. VmaSuballocation& VmaBlockMetadata_Linear::FindSuballocation(VkDeviceSize offset) const
  7019. {
  7020. const SuballocationVectorType& suballocations1st = AccessSuballocations1st();
  7021. const SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
  7022. VmaSuballocation refSuballoc;
  7023. refSuballoc.offset = offset;
  7024. // Rest of members stays uninitialized intentionally for better performance.
  7025. // Item from the 1st vector.
  7026. {
  7027. SuballocationVectorType::const_iterator it = VmaBinaryFindSorted(
  7028. suballocations1st.begin() + m_1stNullItemsBeginCount,
  7029. suballocations1st.end(),
  7030. refSuballoc,
  7031. VmaSuballocationOffsetLess());
  7032. if (it != suballocations1st.end())
  7033. {
  7034. return const_cast<VmaSuballocation&>(*it);
  7035. }
  7036. }
  7037. if (m_2ndVectorMode != SECOND_VECTOR_EMPTY)
  7038. {
  7039. // Rest of members stays uninitialized intentionally for better performance.
  7040. SuballocationVectorType::const_iterator it = m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER ?
  7041. VmaBinaryFindSorted(suballocations2nd.begin(), suballocations2nd.end(), refSuballoc, VmaSuballocationOffsetLess()) :
  7042. VmaBinaryFindSorted(suballocations2nd.begin(), suballocations2nd.end(), refSuballoc, VmaSuballocationOffsetGreater());
  7043. if (it != suballocations2nd.end())
  7044. {
  7045. return const_cast<VmaSuballocation&>(*it);
  7046. }
  7047. }
  7048. VMA_ASSERT(0 && "Allocation not found in linear allocator!");
  7049. return const_cast<VmaSuballocation&>(suballocations1st.back()); // Should never occur.
  7050. }
  7051. bool VmaBlockMetadata_Linear::ShouldCompact1st() const
  7052. {
  7053. const size_t nullItemCount = m_1stNullItemsBeginCount + m_1stNullItemsMiddleCount;
  7054. const size_t suballocCount = AccessSuballocations1st().size();
  7055. return suballocCount > 32 && nullItemCount * 2 >= (suballocCount - nullItemCount) * 3;
  7056. }
  7057. void VmaBlockMetadata_Linear::CleanupAfterFree()
  7058. {
  7059. SuballocationVectorType& suballocations1st = AccessSuballocations1st();
  7060. SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
  7061. if (IsEmpty())
  7062. {
  7063. suballocations1st.clear();
  7064. suballocations2nd.clear();
  7065. m_1stNullItemsBeginCount = 0;
  7066. m_1stNullItemsMiddleCount = 0;
  7067. m_2ndNullItemsCount = 0;
  7068. m_2ndVectorMode = SECOND_VECTOR_EMPTY;
  7069. }
  7070. else
  7071. {
  7072. const size_t suballoc1stCount = suballocations1st.size();
  7073. const size_t nullItem1stCount = m_1stNullItemsBeginCount + m_1stNullItemsMiddleCount;
  7074. VMA_ASSERT(nullItem1stCount <= suballoc1stCount);
  7075. // Find more null items at the beginning of 1st vector.
  7076. while (m_1stNullItemsBeginCount < suballoc1stCount &&
  7077. suballocations1st[m_1stNullItemsBeginCount].type == VMA_SUBALLOCATION_TYPE_FREE)
  7078. {
  7079. ++m_1stNullItemsBeginCount;
  7080. --m_1stNullItemsMiddleCount;
  7081. }
  7082. // Find more null items at the end of 1st vector.
  7083. while (m_1stNullItemsMiddleCount > 0 &&
  7084. suballocations1st.back().type == VMA_SUBALLOCATION_TYPE_FREE)
  7085. {
  7086. --m_1stNullItemsMiddleCount;
  7087. suballocations1st.pop_back();
  7088. }
  7089. // Find more null items at the end of 2nd vector.
  7090. while (m_2ndNullItemsCount > 0 &&
  7091. suballocations2nd.back().type == VMA_SUBALLOCATION_TYPE_FREE)
  7092. {
  7093. --m_2ndNullItemsCount;
  7094. suballocations2nd.pop_back();
  7095. }
  7096. // Find more null items at the beginning of 2nd vector.
  7097. while (m_2ndNullItemsCount > 0 &&
  7098. suballocations2nd[0].type == VMA_SUBALLOCATION_TYPE_FREE)
  7099. {
  7100. --m_2ndNullItemsCount;
  7101. VmaVectorRemove(suballocations2nd, 0);
  7102. }
  7103. if (ShouldCompact1st())
  7104. {
  7105. const size_t nonNullItemCount = suballoc1stCount - nullItem1stCount;
  7106. size_t srcIndex = m_1stNullItemsBeginCount;
  7107. for (size_t dstIndex = 0; dstIndex < nonNullItemCount; ++dstIndex)
  7108. {
  7109. while (suballocations1st[srcIndex].type == VMA_SUBALLOCATION_TYPE_FREE)
  7110. {
  7111. ++srcIndex;
  7112. }
  7113. if (dstIndex != srcIndex)
  7114. {
  7115. suballocations1st[dstIndex] = suballocations1st[srcIndex];
  7116. }
  7117. ++srcIndex;
  7118. }
  7119. suballocations1st.resize(nonNullItemCount);
  7120. m_1stNullItemsBeginCount = 0;
  7121. m_1stNullItemsMiddleCount = 0;
  7122. }
  7123. // 2nd vector became empty.
  7124. if (suballocations2nd.empty())
  7125. {
  7126. m_2ndVectorMode = SECOND_VECTOR_EMPTY;
  7127. }
  7128. // 1st vector became empty.
  7129. if (suballocations1st.size() - m_1stNullItemsBeginCount == 0)
  7130. {
  7131. suballocations1st.clear();
  7132. m_1stNullItemsBeginCount = 0;
  7133. if (!suballocations2nd.empty() && m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER)
  7134. {
  7135. // Swap 1st with 2nd. Now 2nd is empty.
  7136. m_2ndVectorMode = SECOND_VECTOR_EMPTY;
  7137. m_1stNullItemsMiddleCount = m_2ndNullItemsCount;
  7138. while (m_1stNullItemsBeginCount < suballocations2nd.size() &&
  7139. suballocations2nd[m_1stNullItemsBeginCount].type == VMA_SUBALLOCATION_TYPE_FREE)
  7140. {
  7141. ++m_1stNullItemsBeginCount;
  7142. --m_1stNullItemsMiddleCount;
  7143. }
  7144. m_2ndNullItemsCount = 0;
  7145. m_1stVectorIndex ^= 1;
  7146. }
  7147. }
  7148. }
  7149. VMA_HEAVY_ASSERT(Validate());
  7150. }
  7151. bool VmaBlockMetadata_Linear::CreateAllocationRequest_LowerAddress(
  7152. VkDeviceSize allocSize,
  7153. VkDeviceSize allocAlignment,
  7154. VmaSuballocationType allocType,
  7155. uint32_t strategy,
  7156. VmaAllocationRequest* pAllocationRequest)
  7157. {
  7158. const VkDeviceSize blockSize = GetSize();
  7159. const VkDeviceSize debugMargin = GetDebugMargin();
  7160. const VkDeviceSize bufferImageGranularity = GetBufferImageGranularity();
  7161. SuballocationVectorType& suballocations1st = AccessSuballocations1st();
  7162. SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
  7163. if (m_2ndVectorMode == SECOND_VECTOR_EMPTY || m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK)
  7164. {
  7165. // Try to allocate at the end of 1st vector.
  7166. VkDeviceSize resultBaseOffset = 0;
  7167. if (!suballocations1st.empty())
  7168. {
  7169. const VmaSuballocation& lastSuballoc = suballocations1st.back();
  7170. resultBaseOffset = lastSuballoc.offset + lastSuballoc.size + debugMargin;
  7171. }
  7172. // Start from offset equal to beginning of free space.
  7173. VkDeviceSize resultOffset = resultBaseOffset;
  7174. // Apply alignment.
  7175. resultOffset = VmaAlignUp(resultOffset, allocAlignment);
  7176. // Check previous suballocations for BufferImageGranularity conflicts.
  7177. // Make bigger alignment if necessary.
  7178. if (bufferImageGranularity > 1 && bufferImageGranularity != allocAlignment && !suballocations1st.empty())
  7179. {
  7180. bool bufferImageGranularityConflict = false;
  7181. for (size_t prevSuballocIndex = suballocations1st.size(); prevSuballocIndex--; )
  7182. {
  7183. const VmaSuballocation& prevSuballoc = suballocations1st[prevSuballocIndex];
  7184. if (VmaBlocksOnSamePage(prevSuballoc.offset, prevSuballoc.size, resultOffset, bufferImageGranularity))
  7185. {
  7186. if (VmaIsBufferImageGranularityConflict(prevSuballoc.type, allocType))
  7187. {
  7188. bufferImageGranularityConflict = true;
  7189. break;
  7190. }
  7191. }
  7192. else
  7193. // Already on previous page.
  7194. break;
  7195. }
  7196. if (bufferImageGranularityConflict)
  7197. {
  7198. resultOffset = VmaAlignUp(resultOffset, bufferImageGranularity);
  7199. }
  7200. }
  7201. const VkDeviceSize freeSpaceEnd = m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK ?
  7202. suballocations2nd.back().offset : blockSize;
  7203. // There is enough free space at the end after alignment.
  7204. if (resultOffset + allocSize + debugMargin <= freeSpaceEnd)
  7205. {
  7206. // Check next suballocations for BufferImageGranularity conflicts.
  7207. // If conflict exists, allocation cannot be made here.
  7208. if ((allocSize % bufferImageGranularity || resultOffset % bufferImageGranularity) && m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK)
  7209. {
  7210. for (size_t nextSuballocIndex = suballocations2nd.size(); nextSuballocIndex--; )
  7211. {
  7212. const VmaSuballocation& nextSuballoc = suballocations2nd[nextSuballocIndex];
  7213. if (VmaBlocksOnSamePage(resultOffset, allocSize, nextSuballoc.offset, bufferImageGranularity))
  7214. {
  7215. if (VmaIsBufferImageGranularityConflict(allocType, nextSuballoc.type))
  7216. {
  7217. return false;
  7218. }
  7219. }
  7220. else
  7221. {
  7222. // Already on previous page.
  7223. break;
  7224. }
  7225. }
  7226. }
  7227. // All tests passed: Success.
  7228. pAllocationRequest->allocHandle = (VmaAllocHandle)(resultOffset + 1);
  7229. // pAllocationRequest->item, customData unused.
  7230. pAllocationRequest->type = VmaAllocationRequestType::EndOf1st;
  7231. return true;
  7232. }
  7233. }
  7234. // Wrap-around to end of 2nd vector. Try to allocate there, watching for the
  7235. // beginning of 1st vector as the end of free space.
  7236. if (m_2ndVectorMode == SECOND_VECTOR_EMPTY || m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER)
  7237. {
  7238. VMA_ASSERT(!suballocations1st.empty());
  7239. VkDeviceSize resultBaseOffset = 0;
  7240. if (!suballocations2nd.empty())
  7241. {
  7242. const VmaSuballocation& lastSuballoc = suballocations2nd.back();
  7243. resultBaseOffset = lastSuballoc.offset + lastSuballoc.size + debugMargin;
  7244. }
  7245. // Start from offset equal to beginning of free space.
  7246. VkDeviceSize resultOffset = resultBaseOffset;
  7247. // Apply alignment.
  7248. resultOffset = VmaAlignUp(resultOffset, allocAlignment);
  7249. // Check previous suballocations for BufferImageGranularity conflicts.
  7250. // Make bigger alignment if necessary.
  7251. if (bufferImageGranularity > 1 && bufferImageGranularity != allocAlignment && !suballocations2nd.empty())
  7252. {
  7253. bool bufferImageGranularityConflict = false;
  7254. for (size_t prevSuballocIndex = suballocations2nd.size(); prevSuballocIndex--; )
  7255. {
  7256. const VmaSuballocation& prevSuballoc = suballocations2nd[prevSuballocIndex];
  7257. if (VmaBlocksOnSamePage(prevSuballoc.offset, prevSuballoc.size, resultOffset, bufferImageGranularity))
  7258. {
  7259. if (VmaIsBufferImageGranularityConflict(prevSuballoc.type, allocType))
  7260. {
  7261. bufferImageGranularityConflict = true;
  7262. break;
  7263. }
  7264. }
  7265. else
  7266. // Already on previous page.
  7267. break;
  7268. }
  7269. if (bufferImageGranularityConflict)
  7270. {
  7271. resultOffset = VmaAlignUp(resultOffset, bufferImageGranularity);
  7272. }
  7273. }
  7274. size_t index1st = m_1stNullItemsBeginCount;
  7275. // There is enough free space at the end after alignment.
  7276. if ((index1st == suballocations1st.size() && resultOffset + allocSize + debugMargin <= blockSize) ||
  7277. (index1st < suballocations1st.size() && resultOffset + allocSize + debugMargin <= suballocations1st[index1st].offset))
  7278. {
  7279. // Check next suballocations for BufferImageGranularity conflicts.
  7280. // If conflict exists, allocation cannot be made here.
  7281. if (allocSize % bufferImageGranularity || resultOffset % bufferImageGranularity)
  7282. {
  7283. for (size_t nextSuballocIndex = index1st;
  7284. nextSuballocIndex < suballocations1st.size();
  7285. nextSuballocIndex++)
  7286. {
  7287. const VmaSuballocation& nextSuballoc = suballocations1st[nextSuballocIndex];
  7288. if (VmaBlocksOnSamePage(resultOffset, allocSize, nextSuballoc.offset, bufferImageGranularity))
  7289. {
  7290. if (VmaIsBufferImageGranularityConflict(allocType, nextSuballoc.type))
  7291. {
  7292. return false;
  7293. }
  7294. }
  7295. else
  7296. {
  7297. // Already on next page.
  7298. break;
  7299. }
  7300. }
  7301. }
  7302. // All tests passed: Success.
  7303. pAllocationRequest->allocHandle = (VmaAllocHandle)(resultOffset + 1);
  7304. pAllocationRequest->type = VmaAllocationRequestType::EndOf2nd;
  7305. // pAllocationRequest->item, customData unused.
  7306. return true;
  7307. }
  7308. }
  7309. return false;
  7310. }
  7311. bool VmaBlockMetadata_Linear::CreateAllocationRequest_UpperAddress(
  7312. VkDeviceSize allocSize,
  7313. VkDeviceSize allocAlignment,
  7314. VmaSuballocationType allocType,
  7315. uint32_t strategy,
  7316. VmaAllocationRequest* pAllocationRequest)
  7317. {
  7318. const VkDeviceSize blockSize = GetSize();
  7319. const VkDeviceSize bufferImageGranularity = GetBufferImageGranularity();
  7320. SuballocationVectorType& suballocations1st = AccessSuballocations1st();
  7321. SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
  7322. if (m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER)
  7323. {
  7324. VMA_ASSERT(0 && "Trying to use pool with linear algorithm as double stack, while it is already being used as ring buffer.");
  7325. return false;
  7326. }
  7327. // Try to allocate before 2nd.back(), or end of block if 2nd.empty().
  7328. if (allocSize > blockSize)
  7329. {
  7330. return false;
  7331. }
  7332. VkDeviceSize resultBaseOffset = blockSize - allocSize;
  7333. if (!suballocations2nd.empty())
  7334. {
  7335. const VmaSuballocation& lastSuballoc = suballocations2nd.back();
  7336. resultBaseOffset = lastSuballoc.offset - allocSize;
  7337. if (allocSize > lastSuballoc.offset)
  7338. {
  7339. return false;
  7340. }
  7341. }
  7342. // Start from offset equal to end of free space.
  7343. VkDeviceSize resultOffset = resultBaseOffset;
  7344. const VkDeviceSize debugMargin = GetDebugMargin();
  7345. // Apply debugMargin at the end.
  7346. if (debugMargin > 0)
  7347. {
  7348. if (resultOffset < debugMargin)
  7349. {
  7350. return false;
  7351. }
  7352. resultOffset -= debugMargin;
  7353. }
  7354. // Apply alignment.
  7355. resultOffset = VmaAlignDown(resultOffset, allocAlignment);
  7356. // Check next suballocations from 2nd for BufferImageGranularity conflicts.
  7357. // Make bigger alignment if necessary.
  7358. if (bufferImageGranularity > 1 && bufferImageGranularity != allocAlignment && !suballocations2nd.empty())
  7359. {
  7360. bool bufferImageGranularityConflict = false;
  7361. for (size_t nextSuballocIndex = suballocations2nd.size(); nextSuballocIndex--; )
  7362. {
  7363. const VmaSuballocation& nextSuballoc = suballocations2nd[nextSuballocIndex];
  7364. if (VmaBlocksOnSamePage(resultOffset, allocSize, nextSuballoc.offset, bufferImageGranularity))
  7365. {
  7366. if (VmaIsBufferImageGranularityConflict(nextSuballoc.type, allocType))
  7367. {
  7368. bufferImageGranularityConflict = true;
  7369. break;
  7370. }
  7371. }
  7372. else
  7373. // Already on previous page.
  7374. break;
  7375. }
  7376. if (bufferImageGranularityConflict)
  7377. {
  7378. resultOffset = VmaAlignDown(resultOffset, bufferImageGranularity);
  7379. }
  7380. }
  7381. // There is enough free space.
  7382. const VkDeviceSize endOf1st = !suballocations1st.empty() ?
  7383. suballocations1st.back().offset + suballocations1st.back().size :
  7384. 0;
  7385. if (endOf1st + debugMargin <= resultOffset)
  7386. {
  7387. // Check previous suballocations for BufferImageGranularity conflicts.
  7388. // If conflict exists, allocation cannot be made here.
  7389. if (bufferImageGranularity > 1)
  7390. {
  7391. for (size_t prevSuballocIndex = suballocations1st.size(); prevSuballocIndex--; )
  7392. {
  7393. const VmaSuballocation& prevSuballoc = suballocations1st[prevSuballocIndex];
  7394. if (VmaBlocksOnSamePage(prevSuballoc.offset, prevSuballoc.size, resultOffset, bufferImageGranularity))
  7395. {
  7396. if (VmaIsBufferImageGranularityConflict(allocType, prevSuballoc.type))
  7397. {
  7398. return false;
  7399. }
  7400. }
  7401. else
  7402. {
  7403. // Already on next page.
  7404. break;
  7405. }
  7406. }
  7407. }
  7408. // All tests passed: Success.
  7409. pAllocationRequest->allocHandle = (VmaAllocHandle)(resultOffset + 1);
  7410. // pAllocationRequest->item unused.
  7411. pAllocationRequest->type = VmaAllocationRequestType::UpperAddress;
  7412. return true;
  7413. }
  7414. return false;
  7415. }
  7416. #endif // _VMA_BLOCK_METADATA_LINEAR_FUNCTIONS
  7417. #endif // _VMA_BLOCK_METADATA_LINEAR
  7418. #ifndef _VMA_BLOCK_METADATA_TLSF
  7419. // To not search current larger region if first allocation won't succeed and skip to smaller range
  7420. // use with VMA_ALLOCATION_CREATE_STRATEGY_MIN_MEMORY_BIT as strategy in CreateAllocationRequest().
  7421. // When fragmentation and reusal of previous blocks doesn't matter then use with
  7422. // VMA_ALLOCATION_CREATE_STRATEGY_MIN_TIME_BIT for fastest alloc time possible.
  7423. class VmaBlockMetadata_TLSF : public VmaBlockMetadata
  7424. {
  7425. VMA_CLASS_NO_COPY_NO_MOVE(VmaBlockMetadata_TLSF)
  7426. public:
  7427. VmaBlockMetadata_TLSF(const VkAllocationCallbacks* pAllocationCallbacks,
  7428. VkDeviceSize bufferImageGranularity, bool isVirtual);
  7429. virtual ~VmaBlockMetadata_TLSF();
  7430. size_t GetAllocationCount() const override { return m_AllocCount; }
  7431. size_t GetFreeRegionsCount() const override { return m_BlocksFreeCount + 1; }
  7432. VkDeviceSize GetSumFreeSize() const override { return m_BlocksFreeSize + m_NullBlock->size; }
  7433. bool IsEmpty() const override { return m_NullBlock->offset == 0; }
  7434. VkDeviceSize GetAllocationOffset(VmaAllocHandle allocHandle) const override { return ((Block*)allocHandle)->offset; }
  7435. void Init(VkDeviceSize size) override;
  7436. bool Validate() const override;
  7437. void AddDetailedStatistics(VmaDetailedStatistics& inoutStats) const override;
  7438. void AddStatistics(VmaStatistics& inoutStats) const override;
  7439. #if VMA_STATS_STRING_ENABLED
  7440. void PrintDetailedMap(class VmaJsonWriter& json) const override;
  7441. #endif
  7442. bool CreateAllocationRequest(
  7443. VkDeviceSize allocSize,
  7444. VkDeviceSize allocAlignment,
  7445. bool upperAddress,
  7446. VmaSuballocationType allocType,
  7447. uint32_t strategy,
  7448. VmaAllocationRequest* pAllocationRequest) override;
  7449. VkResult CheckCorruption(const void* pBlockData) override;
  7450. void Alloc(
  7451. const VmaAllocationRequest& request,
  7452. VmaSuballocationType type,
  7453. void* userData) override;
  7454. void Free(VmaAllocHandle allocHandle) override;
  7455. void GetAllocationInfo(VmaAllocHandle allocHandle, VmaVirtualAllocationInfo& outInfo) override;
  7456. void* GetAllocationUserData(VmaAllocHandle allocHandle) const override;
  7457. VmaAllocHandle GetAllocationListBegin() const override;
  7458. VmaAllocHandle GetNextAllocation(VmaAllocHandle prevAlloc) const override;
  7459. VkDeviceSize GetNextFreeRegionSize(VmaAllocHandle alloc) const override;
  7460. void Clear() override;
  7461. void SetAllocationUserData(VmaAllocHandle allocHandle, void* userData) override;
  7462. void DebugLogAllAllocations() const override;
  7463. private:
  7464. // According to original paper it should be preferable 4 or 5:
  7465. // M. Masmano, I. Ripoll, A. Crespo, and J. Real "TLSF: a New Dynamic Memory Allocator for Real-Time Systems"
  7466. // http://www.gii.upv.es/tlsf/files/ecrts04_tlsf.pdf
  7467. static const uint8_t SECOND_LEVEL_INDEX = 5;
  7468. static const uint16_t SMALL_BUFFER_SIZE = 256;
  7469. static const uint32_t INITIAL_BLOCK_ALLOC_COUNT = 16;
  7470. static const uint8_t MEMORY_CLASS_SHIFT = 7;
  7471. static const uint8_t MAX_MEMORY_CLASSES = 65 - MEMORY_CLASS_SHIFT;
  7472. class Block
  7473. {
  7474. public:
  7475. VkDeviceSize offset;
  7476. VkDeviceSize size;
  7477. Block* prevPhysical;
  7478. Block* nextPhysical;
  7479. void MarkFree() { prevFree = VMA_NULL; }
  7480. void MarkTaken() { prevFree = this; }
  7481. bool IsFree() const { return prevFree != this; }
  7482. void*& UserData() { VMA_HEAVY_ASSERT(!IsFree()); return userData; }
  7483. Block*& PrevFree() { return prevFree; }
  7484. Block*& NextFree() { VMA_HEAVY_ASSERT(IsFree()); return nextFree; }
  7485. private:
  7486. Block* prevFree; // Address of the same block here indicates that block is taken
  7487. union
  7488. {
  7489. Block* nextFree;
  7490. void* userData;
  7491. };
  7492. };
  7493. size_t m_AllocCount;
  7494. // Total number of free blocks besides null block
  7495. size_t m_BlocksFreeCount;
  7496. // Total size of free blocks excluding null block
  7497. VkDeviceSize m_BlocksFreeSize;
  7498. uint32_t m_IsFreeBitmap;
  7499. uint8_t m_MemoryClasses;
  7500. uint32_t m_InnerIsFreeBitmap[MAX_MEMORY_CLASSES];
  7501. uint32_t m_ListsCount;
  7502. /*
  7503. * 0: 0-3 lists for small buffers
  7504. * 1+: 0-(2^SLI-1) lists for normal buffers
  7505. */
  7506. Block** m_FreeList;
  7507. VmaPoolAllocator<Block> m_BlockAllocator;
  7508. Block* m_NullBlock;
  7509. VmaBlockBufferImageGranularity m_GranularityHandler;
  7510. uint8_t SizeToMemoryClass(VkDeviceSize size) const;
  7511. uint16_t SizeToSecondIndex(VkDeviceSize size, uint8_t memoryClass) const;
  7512. uint32_t GetListIndex(uint8_t memoryClass, uint16_t secondIndex) const;
  7513. uint32_t GetListIndex(VkDeviceSize size) const;
  7514. void RemoveFreeBlock(Block* block);
  7515. void InsertFreeBlock(Block* block);
  7516. void MergeBlock(Block* block, Block* prev);
  7517. Block* FindFreeBlock(VkDeviceSize size, uint32_t& listIndex) const;
  7518. bool CheckBlock(
  7519. Block& block,
  7520. uint32_t listIndex,
  7521. VkDeviceSize allocSize,
  7522. VkDeviceSize allocAlignment,
  7523. VmaSuballocationType allocType,
  7524. VmaAllocationRequest* pAllocationRequest);
  7525. };
  7526. #ifndef _VMA_BLOCK_METADATA_TLSF_FUNCTIONS
  7527. VmaBlockMetadata_TLSF::VmaBlockMetadata_TLSF(const VkAllocationCallbacks* pAllocationCallbacks,
  7528. VkDeviceSize bufferImageGranularity, bool isVirtual)
  7529. : VmaBlockMetadata(pAllocationCallbacks, bufferImageGranularity, isVirtual),
  7530. m_AllocCount(0),
  7531. m_BlocksFreeCount(0),
  7532. m_BlocksFreeSize(0),
  7533. m_IsFreeBitmap(0),
  7534. m_MemoryClasses(0),
  7535. m_ListsCount(0),
  7536. m_FreeList(VMA_NULL),
  7537. m_BlockAllocator(pAllocationCallbacks, INITIAL_BLOCK_ALLOC_COUNT),
  7538. m_NullBlock(VMA_NULL),
  7539. m_GranularityHandler(bufferImageGranularity) {}
  7540. VmaBlockMetadata_TLSF::~VmaBlockMetadata_TLSF()
  7541. {
  7542. if (m_FreeList)
  7543. vma_delete_array(GetAllocationCallbacks(), m_FreeList, m_ListsCount);
  7544. m_GranularityHandler.Destroy(GetAllocationCallbacks());
  7545. }
  7546. void VmaBlockMetadata_TLSF::Init(VkDeviceSize size)
  7547. {
  7548. VmaBlockMetadata::Init(size);
  7549. if (!IsVirtual())
  7550. m_GranularityHandler.Init(GetAllocationCallbacks(), size);
  7551. m_NullBlock = m_BlockAllocator.Alloc();
  7552. m_NullBlock->size = size;
  7553. m_NullBlock->offset = 0;
  7554. m_NullBlock->prevPhysical = VMA_NULL;
  7555. m_NullBlock->nextPhysical = VMA_NULL;
  7556. m_NullBlock->MarkFree();
  7557. m_NullBlock->NextFree() = VMA_NULL;
  7558. m_NullBlock->PrevFree() = VMA_NULL;
  7559. uint8_t memoryClass = SizeToMemoryClass(size);
  7560. uint16_t sli = SizeToSecondIndex(size, memoryClass);
  7561. m_ListsCount = (memoryClass == 0 ? 0 : (memoryClass - 1) * (1UL << SECOND_LEVEL_INDEX) + sli) + 1;
  7562. if (IsVirtual())
  7563. m_ListsCount += 1UL << SECOND_LEVEL_INDEX;
  7564. else
  7565. m_ListsCount += 4;
  7566. m_MemoryClasses = memoryClass + uint8_t(2);
  7567. memset(m_InnerIsFreeBitmap, 0, MAX_MEMORY_CLASSES * sizeof(uint32_t));
  7568. m_FreeList = vma_new_array(GetAllocationCallbacks(), Block*, m_ListsCount);
  7569. memset(m_FreeList, 0, m_ListsCount * sizeof(Block*));
  7570. }
  7571. bool VmaBlockMetadata_TLSF::Validate() const
  7572. {
  7573. VMA_VALIDATE(GetSumFreeSize() <= GetSize());
  7574. VkDeviceSize calculatedSize = m_NullBlock->size;
  7575. VkDeviceSize calculatedFreeSize = m_NullBlock->size;
  7576. size_t allocCount = 0;
  7577. size_t freeCount = 0;
  7578. // Check integrity of free lists
  7579. for (uint32_t list = 0; list < m_ListsCount; ++list)
  7580. {
  7581. Block* block = m_FreeList[list];
  7582. if (block != VMA_NULL)
  7583. {
  7584. VMA_VALIDATE(block->IsFree());
  7585. VMA_VALIDATE(block->PrevFree() == VMA_NULL);
  7586. while (block->NextFree())
  7587. {
  7588. VMA_VALIDATE(block->NextFree()->IsFree());
  7589. VMA_VALIDATE(block->NextFree()->PrevFree() == block);
  7590. block = block->NextFree();
  7591. }
  7592. }
  7593. }
  7594. VkDeviceSize nextOffset = m_NullBlock->offset;
  7595. auto validateCtx = m_GranularityHandler.StartValidation(GetAllocationCallbacks(), IsVirtual());
  7596. VMA_VALIDATE(m_NullBlock->nextPhysical == VMA_NULL);
  7597. if (m_NullBlock->prevPhysical)
  7598. {
  7599. VMA_VALIDATE(m_NullBlock->prevPhysical->nextPhysical == m_NullBlock);
  7600. }
  7601. // Check all blocks
  7602. for (Block* prev = m_NullBlock->prevPhysical; prev != VMA_NULL; prev = prev->prevPhysical)
  7603. {
  7604. VMA_VALIDATE(prev->offset + prev->size == nextOffset);
  7605. nextOffset = prev->offset;
  7606. calculatedSize += prev->size;
  7607. uint32_t listIndex = GetListIndex(prev->size);
  7608. if (prev->IsFree())
  7609. {
  7610. ++freeCount;
  7611. // Check if free block belongs to free list
  7612. Block* freeBlock = m_FreeList[listIndex];
  7613. VMA_VALIDATE(freeBlock != VMA_NULL);
  7614. bool found = false;
  7615. do
  7616. {
  7617. if (freeBlock == prev)
  7618. found = true;
  7619. freeBlock = freeBlock->NextFree();
  7620. } while (!found && freeBlock != VMA_NULL);
  7621. VMA_VALIDATE(found);
  7622. calculatedFreeSize += prev->size;
  7623. }
  7624. else
  7625. {
  7626. ++allocCount;
  7627. // Check if taken block is not on a free list
  7628. Block* freeBlock = m_FreeList[listIndex];
  7629. while (freeBlock)
  7630. {
  7631. VMA_VALIDATE(freeBlock != prev);
  7632. freeBlock = freeBlock->NextFree();
  7633. }
  7634. if (!IsVirtual())
  7635. {
  7636. VMA_VALIDATE(m_GranularityHandler.Validate(validateCtx, prev->offset, prev->size));
  7637. }
  7638. }
  7639. if (prev->prevPhysical)
  7640. {
  7641. VMA_VALIDATE(prev->prevPhysical->nextPhysical == prev);
  7642. }
  7643. }
  7644. if (!IsVirtual())
  7645. {
  7646. VMA_VALIDATE(m_GranularityHandler.FinishValidation(validateCtx));
  7647. }
  7648. VMA_VALIDATE(nextOffset == 0);
  7649. VMA_VALIDATE(calculatedSize == GetSize());
  7650. VMA_VALIDATE(calculatedFreeSize == GetSumFreeSize());
  7651. VMA_VALIDATE(allocCount == m_AllocCount);
  7652. VMA_VALIDATE(freeCount == m_BlocksFreeCount);
  7653. return true;
  7654. }
  7655. void VmaBlockMetadata_TLSF::AddDetailedStatistics(VmaDetailedStatistics& inoutStats) const
  7656. {
  7657. inoutStats.statistics.blockCount++;
  7658. inoutStats.statistics.blockBytes += GetSize();
  7659. if (m_NullBlock->size > 0)
  7660. VmaAddDetailedStatisticsUnusedRange(inoutStats, m_NullBlock->size);
  7661. for (Block* block = m_NullBlock->prevPhysical; block != VMA_NULL; block = block->prevPhysical)
  7662. {
  7663. if (block->IsFree())
  7664. VmaAddDetailedStatisticsUnusedRange(inoutStats, block->size);
  7665. else
  7666. VmaAddDetailedStatisticsAllocation(inoutStats, block->size);
  7667. }
  7668. }
  7669. void VmaBlockMetadata_TLSF::AddStatistics(VmaStatistics& inoutStats) const
  7670. {
  7671. inoutStats.blockCount++;
  7672. inoutStats.allocationCount += (uint32_t)m_AllocCount;
  7673. inoutStats.blockBytes += GetSize();
  7674. inoutStats.allocationBytes += GetSize() - GetSumFreeSize();
  7675. }
  7676. #if VMA_STATS_STRING_ENABLED
  7677. void VmaBlockMetadata_TLSF::PrintDetailedMap(class VmaJsonWriter& json) const
  7678. {
  7679. size_t blockCount = m_AllocCount + m_BlocksFreeCount;
  7680. VmaStlAllocator<Block*> allocator(GetAllocationCallbacks());
  7681. VmaVector<Block*, VmaStlAllocator<Block*>> blockList(blockCount, allocator);
  7682. size_t i = blockCount;
  7683. for (Block* block = m_NullBlock->prevPhysical; block != VMA_NULL; block = block->prevPhysical)
  7684. {
  7685. blockList[--i] = block;
  7686. }
  7687. VMA_ASSERT(i == 0);
  7688. VmaDetailedStatistics stats;
  7689. VmaClearDetailedStatistics(stats);
  7690. AddDetailedStatistics(stats);
  7691. PrintDetailedMap_Begin(json,
  7692. stats.statistics.blockBytes - stats.statistics.allocationBytes,
  7693. stats.statistics.allocationCount,
  7694. stats.unusedRangeCount);
  7695. for (; i < blockCount; ++i)
  7696. {
  7697. Block* block = blockList[i];
  7698. if (block->IsFree())
  7699. PrintDetailedMap_UnusedRange(json, block->offset, block->size);
  7700. else
  7701. PrintDetailedMap_Allocation(json, block->offset, block->size, block->UserData());
  7702. }
  7703. if (m_NullBlock->size > 0)
  7704. PrintDetailedMap_UnusedRange(json, m_NullBlock->offset, m_NullBlock->size);
  7705. PrintDetailedMap_End(json);
  7706. }
  7707. #endif
  7708. bool VmaBlockMetadata_TLSF::CreateAllocationRequest(
  7709. VkDeviceSize allocSize,
  7710. VkDeviceSize allocAlignment,
  7711. bool upperAddress,
  7712. VmaSuballocationType allocType,
  7713. uint32_t strategy,
  7714. VmaAllocationRequest* pAllocationRequest)
  7715. {
  7716. VMA_ASSERT(allocSize > 0 && "Cannot allocate empty block!");
  7717. VMA_ASSERT(!upperAddress && "VMA_ALLOCATION_CREATE_UPPER_ADDRESS_BIT can be used only with linear algorithm.");
  7718. // For small granularity round up
  7719. if (!IsVirtual())
  7720. m_GranularityHandler.RoundupAllocRequest(allocType, allocSize, allocAlignment);
  7721. allocSize += GetDebugMargin();
  7722. // Quick check for too small pool
  7723. if (allocSize > GetSumFreeSize())
  7724. return false;
  7725. // If no free blocks in pool then check only null block
  7726. if (m_BlocksFreeCount == 0)
  7727. return CheckBlock(*m_NullBlock, m_ListsCount, allocSize, allocAlignment, allocType, pAllocationRequest);
  7728. // Round up to the next block
  7729. VkDeviceSize sizeForNextList = allocSize;
  7730. VkDeviceSize smallSizeStep = VkDeviceSize(SMALL_BUFFER_SIZE / (IsVirtual() ? 1 << SECOND_LEVEL_INDEX : 4));
  7731. if (allocSize > SMALL_BUFFER_SIZE)
  7732. {
  7733. sizeForNextList += (1ULL << (VMA_BITSCAN_MSB(allocSize) - SECOND_LEVEL_INDEX));
  7734. }
  7735. else if (allocSize > SMALL_BUFFER_SIZE - smallSizeStep)
  7736. sizeForNextList = SMALL_BUFFER_SIZE + 1;
  7737. else
  7738. sizeForNextList += smallSizeStep;
  7739. uint32_t nextListIndex = m_ListsCount;
  7740. uint32_t prevListIndex = m_ListsCount;
  7741. Block* nextListBlock = VMA_NULL;
  7742. Block* prevListBlock = VMA_NULL;
  7743. // Check blocks according to strategies
  7744. if (strategy & VMA_ALLOCATION_CREATE_STRATEGY_MIN_TIME_BIT)
  7745. {
  7746. // Quick check for larger block first
  7747. nextListBlock = FindFreeBlock(sizeForNextList, nextListIndex);
  7748. if (nextListBlock != VMA_NULL && CheckBlock(*nextListBlock, nextListIndex, allocSize, allocAlignment, allocType, pAllocationRequest))
  7749. return true;
  7750. // If not fitted then null block
  7751. if (CheckBlock(*m_NullBlock, m_ListsCount, allocSize, allocAlignment, allocType, pAllocationRequest))
  7752. return true;
  7753. // Null block failed, search larger bucket
  7754. while (nextListBlock)
  7755. {
  7756. if (CheckBlock(*nextListBlock, nextListIndex, allocSize, allocAlignment, allocType, pAllocationRequest))
  7757. return true;
  7758. nextListBlock = nextListBlock->NextFree();
  7759. }
  7760. // Failed again, check best fit bucket
  7761. prevListBlock = FindFreeBlock(allocSize, prevListIndex);
  7762. while (prevListBlock)
  7763. {
  7764. if (CheckBlock(*prevListBlock, prevListIndex, allocSize, allocAlignment, allocType, pAllocationRequest))
  7765. return true;
  7766. prevListBlock = prevListBlock->NextFree();
  7767. }
  7768. }
  7769. else if (strategy & VMA_ALLOCATION_CREATE_STRATEGY_MIN_MEMORY_BIT)
  7770. {
  7771. // Check best fit bucket
  7772. prevListBlock = FindFreeBlock(allocSize, prevListIndex);
  7773. while (prevListBlock)
  7774. {
  7775. if (CheckBlock(*prevListBlock, prevListIndex, allocSize, allocAlignment, allocType, pAllocationRequest))
  7776. return true;
  7777. prevListBlock = prevListBlock->NextFree();
  7778. }
  7779. // If failed check null block
  7780. if (CheckBlock(*m_NullBlock, m_ListsCount, allocSize, allocAlignment, allocType, pAllocationRequest))
  7781. return true;
  7782. // Check larger bucket
  7783. nextListBlock = FindFreeBlock(sizeForNextList, nextListIndex);
  7784. while (nextListBlock)
  7785. {
  7786. if (CheckBlock(*nextListBlock, nextListIndex, allocSize, allocAlignment, allocType, pAllocationRequest))
  7787. return true;
  7788. nextListBlock = nextListBlock->NextFree();
  7789. }
  7790. }
  7791. else if (strategy & VMA_ALLOCATION_CREATE_STRATEGY_MIN_OFFSET_BIT )
  7792. {
  7793. // Perform search from the start
  7794. VmaStlAllocator<Block*> allocator(GetAllocationCallbacks());
  7795. VmaVector<Block*, VmaStlAllocator<Block*>> blockList(m_BlocksFreeCount, allocator);
  7796. size_t i = m_BlocksFreeCount;
  7797. for (Block* block = m_NullBlock->prevPhysical; block != VMA_NULL; block = block->prevPhysical)
  7798. {
  7799. if (block->IsFree() && block->size >= allocSize)
  7800. blockList[--i] = block;
  7801. }
  7802. for (; i < m_BlocksFreeCount; ++i)
  7803. {
  7804. Block& block = *blockList[i];
  7805. if (CheckBlock(block, GetListIndex(block.size), allocSize, allocAlignment, allocType, pAllocationRequest))
  7806. return true;
  7807. }
  7808. // If failed check null block
  7809. if (CheckBlock(*m_NullBlock, m_ListsCount, allocSize, allocAlignment, allocType, pAllocationRequest))
  7810. return true;
  7811. // Whole range searched, no more memory
  7812. return false;
  7813. }
  7814. else
  7815. {
  7816. // Check larger bucket
  7817. nextListBlock = FindFreeBlock(sizeForNextList, nextListIndex);
  7818. while (nextListBlock)
  7819. {
  7820. if (CheckBlock(*nextListBlock, nextListIndex, allocSize, allocAlignment, allocType, pAllocationRequest))
  7821. return true;
  7822. nextListBlock = nextListBlock->NextFree();
  7823. }
  7824. // If failed check null block
  7825. if (CheckBlock(*m_NullBlock, m_ListsCount, allocSize, allocAlignment, allocType, pAllocationRequest))
  7826. return true;
  7827. // Check best fit bucket
  7828. prevListBlock = FindFreeBlock(allocSize, prevListIndex);
  7829. while (prevListBlock)
  7830. {
  7831. if (CheckBlock(*prevListBlock, prevListIndex, allocSize, allocAlignment, allocType, pAllocationRequest))
  7832. return true;
  7833. prevListBlock = prevListBlock->NextFree();
  7834. }
  7835. }
  7836. // Worst case, full search has to be done
  7837. while (++nextListIndex < m_ListsCount)
  7838. {
  7839. nextListBlock = m_FreeList[nextListIndex];
  7840. while (nextListBlock)
  7841. {
  7842. if (CheckBlock(*nextListBlock, nextListIndex, allocSize, allocAlignment, allocType, pAllocationRequest))
  7843. return true;
  7844. nextListBlock = nextListBlock->NextFree();
  7845. }
  7846. }
  7847. // No more memory sadly
  7848. return false;
  7849. }
  7850. VkResult VmaBlockMetadata_TLSF::CheckCorruption(const void* pBlockData)
  7851. {
  7852. for (Block* block = m_NullBlock->prevPhysical; block != VMA_NULL; block = block->prevPhysical)
  7853. {
  7854. if (!block->IsFree())
  7855. {
  7856. if (!VmaValidateMagicValue(pBlockData, block->offset + block->size))
  7857. {
  7858. VMA_ASSERT(0 && "MEMORY CORRUPTION DETECTED AFTER VALIDATED ALLOCATION!");
  7859. return VK_ERROR_UNKNOWN_COPY;
  7860. }
  7861. }
  7862. }
  7863. return VK_SUCCESS;
  7864. }
  7865. void VmaBlockMetadata_TLSF::Alloc(
  7866. const VmaAllocationRequest& request,
  7867. VmaSuballocationType type,
  7868. void* userData)
  7869. {
  7870. VMA_ASSERT(request.type == VmaAllocationRequestType::TLSF);
  7871. // Get block and pop it from the free list
  7872. Block* currentBlock = (Block*)request.allocHandle;
  7873. VkDeviceSize offset = request.algorithmData;
  7874. VMA_ASSERT(currentBlock != VMA_NULL);
  7875. VMA_ASSERT(currentBlock->offset <= offset);
  7876. if (currentBlock != m_NullBlock)
  7877. RemoveFreeBlock(currentBlock);
  7878. VkDeviceSize debugMargin = GetDebugMargin();
  7879. VkDeviceSize misssingAlignment = offset - currentBlock->offset;
  7880. // Append missing alignment to prev block or create new one
  7881. if (misssingAlignment)
  7882. {
  7883. Block* prevBlock = currentBlock->prevPhysical;
  7884. VMA_ASSERT(prevBlock != VMA_NULL && "There should be no missing alignment at offset 0!");
  7885. if (prevBlock->IsFree() && prevBlock->size != debugMargin)
  7886. {
  7887. uint32_t oldList = GetListIndex(prevBlock->size);
  7888. prevBlock->size += misssingAlignment;
  7889. // Check if new size crosses list bucket
  7890. if (oldList != GetListIndex(prevBlock->size))
  7891. {
  7892. prevBlock->size -= misssingAlignment;
  7893. RemoveFreeBlock(prevBlock);
  7894. prevBlock->size += misssingAlignment;
  7895. InsertFreeBlock(prevBlock);
  7896. }
  7897. else
  7898. m_BlocksFreeSize += misssingAlignment;
  7899. }
  7900. else
  7901. {
  7902. Block* newBlock = m_BlockAllocator.Alloc();
  7903. currentBlock->prevPhysical = newBlock;
  7904. prevBlock->nextPhysical = newBlock;
  7905. newBlock->prevPhysical = prevBlock;
  7906. newBlock->nextPhysical = currentBlock;
  7907. newBlock->size = misssingAlignment;
  7908. newBlock->offset = currentBlock->offset;
  7909. newBlock->MarkTaken();
  7910. InsertFreeBlock(newBlock);
  7911. }
  7912. currentBlock->size -= misssingAlignment;
  7913. currentBlock->offset += misssingAlignment;
  7914. }
  7915. VkDeviceSize size = request.size + debugMargin;
  7916. if (currentBlock->size == size)
  7917. {
  7918. if (currentBlock == m_NullBlock)
  7919. {
  7920. // Setup new null block
  7921. m_NullBlock = m_BlockAllocator.Alloc();
  7922. m_NullBlock->size = 0;
  7923. m_NullBlock->offset = currentBlock->offset + size;
  7924. m_NullBlock->prevPhysical = currentBlock;
  7925. m_NullBlock->nextPhysical = VMA_NULL;
  7926. m_NullBlock->MarkFree();
  7927. m_NullBlock->PrevFree() = VMA_NULL;
  7928. m_NullBlock->NextFree() = VMA_NULL;
  7929. currentBlock->nextPhysical = m_NullBlock;
  7930. currentBlock->MarkTaken();
  7931. }
  7932. }
  7933. else
  7934. {
  7935. VMA_ASSERT(currentBlock->size > size && "Proper block already found, shouldn't find smaller one!");
  7936. // Create new free block
  7937. Block* newBlock = m_BlockAllocator.Alloc();
  7938. newBlock->size = currentBlock->size - size;
  7939. newBlock->offset = currentBlock->offset + size;
  7940. newBlock->prevPhysical = currentBlock;
  7941. newBlock->nextPhysical = currentBlock->nextPhysical;
  7942. currentBlock->nextPhysical = newBlock;
  7943. currentBlock->size = size;
  7944. if (currentBlock == m_NullBlock)
  7945. {
  7946. m_NullBlock = newBlock;
  7947. m_NullBlock->MarkFree();
  7948. m_NullBlock->NextFree() = VMA_NULL;
  7949. m_NullBlock->PrevFree() = VMA_NULL;
  7950. currentBlock->MarkTaken();
  7951. }
  7952. else
  7953. {
  7954. newBlock->nextPhysical->prevPhysical = newBlock;
  7955. newBlock->MarkTaken();
  7956. InsertFreeBlock(newBlock);
  7957. }
  7958. }
  7959. currentBlock->UserData() = userData;
  7960. if (debugMargin > 0)
  7961. {
  7962. currentBlock->size -= debugMargin;
  7963. Block* newBlock = m_BlockAllocator.Alloc();
  7964. newBlock->size = debugMargin;
  7965. newBlock->offset = currentBlock->offset + currentBlock->size;
  7966. newBlock->prevPhysical = currentBlock;
  7967. newBlock->nextPhysical = currentBlock->nextPhysical;
  7968. newBlock->MarkTaken();
  7969. currentBlock->nextPhysical->prevPhysical = newBlock;
  7970. currentBlock->nextPhysical = newBlock;
  7971. InsertFreeBlock(newBlock);
  7972. }
  7973. if (!IsVirtual())
  7974. m_GranularityHandler.AllocPages((uint8_t)(uintptr_t)request.customData,
  7975. currentBlock->offset, currentBlock->size);
  7976. ++m_AllocCount;
  7977. }
  7978. void VmaBlockMetadata_TLSF::Free(VmaAllocHandle allocHandle)
  7979. {
  7980. Block* block = (Block*)allocHandle;
  7981. Block* next = block->nextPhysical;
  7982. VMA_ASSERT(!block->IsFree() && "Block is already free!");
  7983. if (!IsVirtual())
  7984. m_GranularityHandler.FreePages(block->offset, block->size);
  7985. --m_AllocCount;
  7986. VkDeviceSize debugMargin = GetDebugMargin();
  7987. if (debugMargin > 0)
  7988. {
  7989. RemoveFreeBlock(next);
  7990. MergeBlock(next, block);
  7991. block = next;
  7992. next = next->nextPhysical;
  7993. }
  7994. // Try merging
  7995. Block* prev = block->prevPhysical;
  7996. if (prev != VMA_NULL && prev->IsFree() && prev->size != debugMargin)
  7997. {
  7998. RemoveFreeBlock(prev);
  7999. MergeBlock(block, prev);
  8000. }
  8001. if (!next->IsFree())
  8002. InsertFreeBlock(block);
  8003. else if (next == m_NullBlock)
  8004. MergeBlock(m_NullBlock, block);
  8005. else
  8006. {
  8007. RemoveFreeBlock(next);
  8008. MergeBlock(next, block);
  8009. InsertFreeBlock(next);
  8010. }
  8011. }
  8012. void VmaBlockMetadata_TLSF::GetAllocationInfo(VmaAllocHandle allocHandle, VmaVirtualAllocationInfo& outInfo)
  8013. {
  8014. Block* block = (Block*)allocHandle;
  8015. VMA_ASSERT(!block->IsFree() && "Cannot get allocation info for free block!");
  8016. outInfo.offset = block->offset;
  8017. outInfo.size = block->size;
  8018. outInfo.pUserData = block->UserData();
  8019. }
  8020. void* VmaBlockMetadata_TLSF::GetAllocationUserData(VmaAllocHandle allocHandle) const
  8021. {
  8022. Block* block = (Block*)allocHandle;
  8023. VMA_ASSERT(!block->IsFree() && "Cannot get user data for free block!");
  8024. return block->UserData();
  8025. }
  8026. VmaAllocHandle VmaBlockMetadata_TLSF::GetAllocationListBegin() const
  8027. {
  8028. if (m_AllocCount == 0)
  8029. return VK_NULL_HANDLE;
  8030. for (Block* block = m_NullBlock->prevPhysical; block; block = block->prevPhysical)
  8031. {
  8032. if (!block->IsFree())
  8033. return (VmaAllocHandle)block;
  8034. }
  8035. VMA_ASSERT(false && "If m_AllocCount > 0 then should find any allocation!");
  8036. return VK_NULL_HANDLE;
  8037. }
  8038. VmaAllocHandle VmaBlockMetadata_TLSF::GetNextAllocation(VmaAllocHandle prevAlloc) const
  8039. {
  8040. Block* startBlock = (Block*)prevAlloc;
  8041. VMA_ASSERT(!startBlock->IsFree() && "Incorrect block!");
  8042. for (Block* block = startBlock->prevPhysical; block; block = block->prevPhysical)
  8043. {
  8044. if (!block->IsFree())
  8045. return (VmaAllocHandle)block;
  8046. }
  8047. return VK_NULL_HANDLE;
  8048. }
  8049. VkDeviceSize VmaBlockMetadata_TLSF::GetNextFreeRegionSize(VmaAllocHandle alloc) const
  8050. {
  8051. Block* block = (Block*)alloc;
  8052. VMA_ASSERT(!block->IsFree() && "Incorrect block!");
  8053. if (block->prevPhysical)
  8054. return block->prevPhysical->IsFree() ? block->prevPhysical->size : 0;
  8055. return 0;
  8056. }
  8057. void VmaBlockMetadata_TLSF::Clear()
  8058. {
  8059. m_AllocCount = 0;
  8060. m_BlocksFreeCount = 0;
  8061. m_BlocksFreeSize = 0;
  8062. m_IsFreeBitmap = 0;
  8063. m_NullBlock->offset = 0;
  8064. m_NullBlock->size = GetSize();
  8065. Block* block = m_NullBlock->prevPhysical;
  8066. m_NullBlock->prevPhysical = VMA_NULL;
  8067. while (block)
  8068. {
  8069. Block* prev = block->prevPhysical;
  8070. m_BlockAllocator.Free(block);
  8071. block = prev;
  8072. }
  8073. memset(m_FreeList, 0, m_ListsCount * sizeof(Block*));
  8074. memset(m_InnerIsFreeBitmap, 0, m_MemoryClasses * sizeof(uint32_t));
  8075. m_GranularityHandler.Clear();
  8076. }
  8077. void VmaBlockMetadata_TLSF::SetAllocationUserData(VmaAllocHandle allocHandle, void* userData)
  8078. {
  8079. Block* block = (Block*)allocHandle;
  8080. VMA_ASSERT(!block->IsFree() && "Trying to set user data for not allocated block!");
  8081. block->UserData() = userData;
  8082. }
  8083. void VmaBlockMetadata_TLSF::DebugLogAllAllocations() const
  8084. {
  8085. for (Block* block = m_NullBlock->prevPhysical; block != VMA_NULL; block = block->prevPhysical)
  8086. if (!block->IsFree())
  8087. DebugLogAllocation(block->offset, block->size, block->UserData());
  8088. }
  8089. uint8_t VmaBlockMetadata_TLSF::SizeToMemoryClass(VkDeviceSize size) const
  8090. {
  8091. if (size > SMALL_BUFFER_SIZE)
  8092. return uint8_t(VMA_BITSCAN_MSB(size) - MEMORY_CLASS_SHIFT);
  8093. return 0;
  8094. }
  8095. uint16_t VmaBlockMetadata_TLSF::SizeToSecondIndex(VkDeviceSize size, uint8_t memoryClass) const
  8096. {
  8097. if (memoryClass == 0)
  8098. {
  8099. if (IsVirtual())
  8100. return static_cast<uint16_t>((size - 1) / 8);
  8101. else
  8102. return static_cast<uint16_t>((size - 1) / 64);
  8103. }
  8104. return static_cast<uint16_t>((size >> (memoryClass + MEMORY_CLASS_SHIFT - SECOND_LEVEL_INDEX)) ^ (1U << SECOND_LEVEL_INDEX));
  8105. }
  8106. uint32_t VmaBlockMetadata_TLSF::GetListIndex(uint8_t memoryClass, uint16_t secondIndex) const
  8107. {
  8108. if (memoryClass == 0)
  8109. return secondIndex;
  8110. const uint32_t index = static_cast<uint32_t>(memoryClass - 1) * (1 << SECOND_LEVEL_INDEX) + secondIndex;
  8111. if (IsVirtual())
  8112. return index + (1 << SECOND_LEVEL_INDEX);
  8113. else
  8114. return index + 4;
  8115. }
  8116. uint32_t VmaBlockMetadata_TLSF::GetListIndex(VkDeviceSize size) const
  8117. {
  8118. uint8_t memoryClass = SizeToMemoryClass(size);
  8119. return GetListIndex(memoryClass, SizeToSecondIndex(size, memoryClass));
  8120. }
  8121. void VmaBlockMetadata_TLSF::RemoveFreeBlock(Block* block)
  8122. {
  8123. VMA_ASSERT(block != m_NullBlock);
  8124. VMA_ASSERT(block->IsFree());
  8125. if (block->NextFree() != VMA_NULL)
  8126. block->NextFree()->PrevFree() = block->PrevFree();
  8127. if (block->PrevFree() != VMA_NULL)
  8128. block->PrevFree()->NextFree() = block->NextFree();
  8129. else
  8130. {
  8131. uint8_t memClass = SizeToMemoryClass(block->size);
  8132. uint16_t secondIndex = SizeToSecondIndex(block->size, memClass);
  8133. uint32_t index = GetListIndex(memClass, secondIndex);
  8134. VMA_ASSERT(m_FreeList[index] == block);
  8135. m_FreeList[index] = block->NextFree();
  8136. if (block->NextFree() == VMA_NULL)
  8137. {
  8138. m_InnerIsFreeBitmap[memClass] &= ~(1U << secondIndex);
  8139. if (m_InnerIsFreeBitmap[memClass] == 0)
  8140. m_IsFreeBitmap &= ~(1UL << memClass);
  8141. }
  8142. }
  8143. block->MarkTaken();
  8144. block->UserData() = VMA_NULL;
  8145. --m_BlocksFreeCount;
  8146. m_BlocksFreeSize -= block->size;
  8147. }
  8148. void VmaBlockMetadata_TLSF::InsertFreeBlock(Block* block)
  8149. {
  8150. VMA_ASSERT(block != m_NullBlock);
  8151. VMA_ASSERT(!block->IsFree() && "Cannot insert block twice!");
  8152. uint8_t memClass = SizeToMemoryClass(block->size);
  8153. uint16_t secondIndex = SizeToSecondIndex(block->size, memClass);
  8154. uint32_t index = GetListIndex(memClass, secondIndex);
  8155. VMA_ASSERT(index < m_ListsCount);
  8156. block->PrevFree() = VMA_NULL;
  8157. block->NextFree() = m_FreeList[index];
  8158. m_FreeList[index] = block;
  8159. if (block->NextFree() != VMA_NULL)
  8160. block->NextFree()->PrevFree() = block;
  8161. else
  8162. {
  8163. m_InnerIsFreeBitmap[memClass] |= 1U << secondIndex;
  8164. m_IsFreeBitmap |= 1UL << memClass;
  8165. }
  8166. ++m_BlocksFreeCount;
  8167. m_BlocksFreeSize += block->size;
  8168. }
  8169. void VmaBlockMetadata_TLSF::MergeBlock(Block* block, Block* prev)
  8170. {
  8171. VMA_ASSERT(block->prevPhysical == prev && "Cannot merge separate physical regions!");
  8172. VMA_ASSERT(!prev->IsFree() && "Cannot merge block that belongs to free list!");
  8173. block->offset = prev->offset;
  8174. block->size += prev->size;
  8175. block->prevPhysical = prev->prevPhysical;
  8176. if (block->prevPhysical)
  8177. block->prevPhysical->nextPhysical = block;
  8178. m_BlockAllocator.Free(prev);
  8179. }
  8180. VmaBlockMetadata_TLSF::Block* VmaBlockMetadata_TLSF::FindFreeBlock(VkDeviceSize size, uint32_t& listIndex) const
  8181. {
  8182. uint8_t memoryClass = SizeToMemoryClass(size);
  8183. uint32_t innerFreeMap = m_InnerIsFreeBitmap[memoryClass] & (~0U << SizeToSecondIndex(size, memoryClass));
  8184. if (!innerFreeMap)
  8185. {
  8186. // Check higher levels for available blocks
  8187. uint32_t freeMap = m_IsFreeBitmap & (~0UL << (memoryClass + 1));
  8188. if (!freeMap)
  8189. return VMA_NULL; // No more memory available
  8190. // Find lowest free region
  8191. memoryClass = VMA_BITSCAN_LSB(freeMap);
  8192. innerFreeMap = m_InnerIsFreeBitmap[memoryClass];
  8193. VMA_ASSERT(innerFreeMap != 0);
  8194. }
  8195. // Find lowest free subregion
  8196. listIndex = GetListIndex(memoryClass, VMA_BITSCAN_LSB(innerFreeMap));
  8197. VMA_ASSERT(m_FreeList[listIndex]);
  8198. return m_FreeList[listIndex];
  8199. }
  8200. bool VmaBlockMetadata_TLSF::CheckBlock(
  8201. Block& block,
  8202. uint32_t listIndex,
  8203. VkDeviceSize allocSize,
  8204. VkDeviceSize allocAlignment,
  8205. VmaSuballocationType allocType,
  8206. VmaAllocationRequest* pAllocationRequest)
  8207. {
  8208. VMA_ASSERT(block.IsFree() && "Block is already taken!");
  8209. VkDeviceSize alignedOffset = VmaAlignUp(block.offset, allocAlignment);
  8210. if (block.size < allocSize + alignedOffset - block.offset)
  8211. return false;
  8212. // Check for granularity conflicts
  8213. if (!IsVirtual() &&
  8214. m_GranularityHandler.CheckConflictAndAlignUp(alignedOffset, allocSize, block.offset, block.size, allocType))
  8215. return false;
  8216. // Alloc successful
  8217. pAllocationRequest->type = VmaAllocationRequestType::TLSF;
  8218. pAllocationRequest->allocHandle = (VmaAllocHandle)&block;
  8219. pAllocationRequest->size = allocSize - GetDebugMargin();
  8220. pAllocationRequest->customData = (void*)allocType;
  8221. pAllocationRequest->algorithmData = alignedOffset;
  8222. // Place block at the start of list if it's normal block
  8223. if (listIndex != m_ListsCount && block.PrevFree())
  8224. {
  8225. block.PrevFree()->NextFree() = block.NextFree();
  8226. if (block.NextFree())
  8227. block.NextFree()->PrevFree() = block.PrevFree();
  8228. block.PrevFree() = VMA_NULL;
  8229. block.NextFree() = m_FreeList[listIndex];
  8230. m_FreeList[listIndex] = &block;
  8231. if (block.NextFree())
  8232. block.NextFree()->PrevFree() = &block;
  8233. }
  8234. return true;
  8235. }
  8236. #endif // _VMA_BLOCK_METADATA_TLSF_FUNCTIONS
  8237. #endif // _VMA_BLOCK_METADATA_TLSF
  8238. #ifndef _VMA_BLOCK_VECTOR
  8239. /*
  8240. Sequence of VmaDeviceMemoryBlock. Represents memory blocks allocated for a specific
  8241. Vulkan memory type.
  8242. Synchronized internally with a mutex.
  8243. */
  8244. class VmaBlockVector
  8245. {
  8246. friend struct VmaDefragmentationContext_T;
  8247. VMA_CLASS_NO_COPY_NO_MOVE(VmaBlockVector)
  8248. public:
  8249. VmaBlockVector(
  8250. VmaAllocator hAllocator,
  8251. VmaPool hParentPool,
  8252. uint32_t memoryTypeIndex,
  8253. VkDeviceSize preferredBlockSize,
  8254. size_t minBlockCount,
  8255. size_t maxBlockCount,
  8256. VkDeviceSize bufferImageGranularity,
  8257. bool explicitBlockSize,
  8258. uint32_t algorithm,
  8259. float priority,
  8260. VkDeviceSize minAllocationAlignment,
  8261. void* pMemoryAllocateNext);
  8262. ~VmaBlockVector();
  8263. VmaAllocator GetAllocator() const { return m_hAllocator; }
  8264. VmaPool GetParentPool() const { return m_hParentPool; }
  8265. bool IsCustomPool() const { return m_hParentPool != VMA_NULL; }
  8266. uint32_t GetMemoryTypeIndex() const { return m_MemoryTypeIndex; }
  8267. VkDeviceSize GetPreferredBlockSize() const { return m_PreferredBlockSize; }
  8268. VkDeviceSize GetBufferImageGranularity() const { return m_BufferImageGranularity; }
  8269. uint32_t GetAlgorithm() const { return m_Algorithm; }
  8270. bool HasExplicitBlockSize() const { return m_ExplicitBlockSize; }
  8271. float GetPriority() const { return m_Priority; }
  8272. const void* GetAllocationNextPtr() const { return m_pMemoryAllocateNext; }
  8273. // To be used only while the m_Mutex is locked. Used during defragmentation.
  8274. size_t GetBlockCount() const { return m_Blocks.size(); }
  8275. // To be used only while the m_Mutex is locked. Used during defragmentation.
  8276. VmaDeviceMemoryBlock* GetBlock(size_t index) const { return m_Blocks[index]; }
  8277. VMA_RW_MUTEX &GetMutex() { return m_Mutex; }
  8278. VkResult CreateMinBlocks();
  8279. void AddStatistics(VmaStatistics& inoutStats);
  8280. void AddDetailedStatistics(VmaDetailedStatistics& inoutStats);
  8281. bool IsEmpty();
  8282. bool IsCorruptionDetectionEnabled() const;
  8283. VkResult Allocate(
  8284. VkDeviceSize size,
  8285. VkDeviceSize alignment,
  8286. const VmaAllocationCreateInfo& createInfo,
  8287. VmaSuballocationType suballocType,
  8288. size_t allocationCount,
  8289. VmaAllocation* pAllocations);
  8290. void Free(const VmaAllocation hAllocation);
  8291. #if VMA_STATS_STRING_ENABLED
  8292. void PrintDetailedMap(class VmaJsonWriter& json);
  8293. #endif
  8294. VkResult CheckCorruption();
  8295. private:
  8296. const VmaAllocator m_hAllocator;
  8297. const VmaPool m_hParentPool;
  8298. const uint32_t m_MemoryTypeIndex;
  8299. const VkDeviceSize m_PreferredBlockSize;
  8300. const size_t m_MinBlockCount;
  8301. const size_t m_MaxBlockCount;
  8302. const VkDeviceSize m_BufferImageGranularity;
  8303. const bool m_ExplicitBlockSize;
  8304. const uint32_t m_Algorithm;
  8305. const float m_Priority;
  8306. const VkDeviceSize m_MinAllocationAlignment;
  8307. void* const m_pMemoryAllocateNext;
  8308. VMA_RW_MUTEX m_Mutex;
  8309. // Incrementally sorted by sumFreeSize, ascending.
  8310. VmaVector<VmaDeviceMemoryBlock*, VmaStlAllocator<VmaDeviceMemoryBlock*>> m_Blocks;
  8311. uint32_t m_NextBlockId;
  8312. bool m_IncrementalSort = true;
  8313. void SetIncrementalSort(bool val) { m_IncrementalSort = val; }
  8314. VkDeviceSize CalcMaxBlockSize() const;
  8315. // Finds and removes given block from vector.
  8316. void Remove(VmaDeviceMemoryBlock* pBlock);
  8317. // Performs single step in sorting m_Blocks. They may not be fully sorted
  8318. // after this call.
  8319. void IncrementallySortBlocks();
  8320. void SortByFreeSize();
  8321. VkResult AllocatePage(
  8322. VkDeviceSize size,
  8323. VkDeviceSize alignment,
  8324. const VmaAllocationCreateInfo& createInfo,
  8325. VmaSuballocationType suballocType,
  8326. VmaAllocation* pAllocation);
  8327. VkResult AllocateFromBlock(
  8328. VmaDeviceMemoryBlock* pBlock,
  8329. VkDeviceSize size,
  8330. VkDeviceSize alignment,
  8331. VmaAllocationCreateFlags allocFlags,
  8332. void* pUserData,
  8333. VmaSuballocationType suballocType,
  8334. uint32_t strategy,
  8335. VmaAllocation* pAllocation);
  8336. VkResult CommitAllocationRequest(
  8337. VmaAllocationRequest& allocRequest,
  8338. VmaDeviceMemoryBlock* pBlock,
  8339. VkDeviceSize alignment,
  8340. VmaAllocationCreateFlags allocFlags,
  8341. void* pUserData,
  8342. VmaSuballocationType suballocType,
  8343. VmaAllocation* pAllocation);
  8344. VkResult CreateBlock(VkDeviceSize blockSize, size_t* pNewBlockIndex);
  8345. bool HasEmptyBlock();
  8346. };
  8347. #endif // _VMA_BLOCK_VECTOR
  8348. #ifndef _VMA_DEFRAGMENTATION_CONTEXT
  8349. struct VmaDefragmentationContext_T
  8350. {
  8351. VMA_CLASS_NO_COPY_NO_MOVE(VmaDefragmentationContext_T)
  8352. public:
  8353. VmaDefragmentationContext_T(
  8354. VmaAllocator hAllocator,
  8355. const VmaDefragmentationInfo& info);
  8356. ~VmaDefragmentationContext_T();
  8357. void GetStats(VmaDefragmentationStats& outStats) { outStats = m_GlobalStats; }
  8358. VkResult DefragmentPassBegin(VmaDefragmentationPassMoveInfo& moveInfo);
  8359. VkResult DefragmentPassEnd(VmaDefragmentationPassMoveInfo& moveInfo);
  8360. private:
  8361. // Max number of allocations to ignore due to size constraints before ending single pass
  8362. static const uint8_t MAX_ALLOCS_TO_IGNORE = 16;
  8363. enum class CounterStatus { Pass, Ignore, End };
  8364. struct FragmentedBlock
  8365. {
  8366. uint32_t data;
  8367. VmaDeviceMemoryBlock* block;
  8368. };
  8369. struct StateBalanced
  8370. {
  8371. VkDeviceSize avgFreeSize = 0;
  8372. VkDeviceSize avgAllocSize = UINT64_MAX;
  8373. };
  8374. struct StateExtensive
  8375. {
  8376. enum class Operation : uint8_t
  8377. {
  8378. FindFreeBlockBuffer, FindFreeBlockTexture, FindFreeBlockAll,
  8379. MoveBuffers, MoveTextures, MoveAll,
  8380. Cleanup, Done
  8381. };
  8382. Operation operation = Operation::FindFreeBlockTexture;
  8383. size_t firstFreeBlock = SIZE_MAX;
  8384. };
  8385. struct MoveAllocationData
  8386. {
  8387. VkDeviceSize size;
  8388. VkDeviceSize alignment;
  8389. VmaSuballocationType type;
  8390. VmaAllocationCreateFlags flags;
  8391. VmaDefragmentationMove move = {};
  8392. };
  8393. const VkDeviceSize m_MaxPassBytes;
  8394. const uint32_t m_MaxPassAllocations;
  8395. const PFN_vmaCheckDefragmentationBreakFunction m_BreakCallback;
  8396. void* m_BreakCallbackUserData;
  8397. VmaStlAllocator<VmaDefragmentationMove> m_MoveAllocator;
  8398. VmaVector<VmaDefragmentationMove, VmaStlAllocator<VmaDefragmentationMove>> m_Moves;
  8399. uint8_t m_IgnoredAllocs = 0;
  8400. uint32_t m_Algorithm;
  8401. uint32_t m_BlockVectorCount;
  8402. VmaBlockVector* m_PoolBlockVector;
  8403. VmaBlockVector** m_pBlockVectors;
  8404. size_t m_ImmovableBlockCount = 0;
  8405. VmaDefragmentationStats m_GlobalStats = { 0 };
  8406. VmaDefragmentationStats m_PassStats = { 0 };
  8407. void* m_AlgorithmState = VMA_NULL;
  8408. static MoveAllocationData GetMoveData(VmaAllocHandle handle, VmaBlockMetadata* metadata);
  8409. CounterStatus CheckCounters(VkDeviceSize bytes);
  8410. bool IncrementCounters(VkDeviceSize bytes);
  8411. bool ReallocWithinBlock(VmaBlockVector& vector, VmaDeviceMemoryBlock* block);
  8412. bool AllocInOtherBlock(size_t start, size_t end, MoveAllocationData& data, VmaBlockVector& vector);
  8413. bool ComputeDefragmentation(VmaBlockVector& vector, size_t index);
  8414. bool ComputeDefragmentation_Fast(VmaBlockVector& vector);
  8415. bool ComputeDefragmentation_Balanced(VmaBlockVector& vector, size_t index, bool update);
  8416. bool ComputeDefragmentation_Full(VmaBlockVector& vector);
  8417. bool ComputeDefragmentation_Extensive(VmaBlockVector& vector, size_t index);
  8418. void UpdateVectorStatistics(VmaBlockVector& vector, StateBalanced& state);
  8419. bool MoveDataToFreeBlocks(VmaSuballocationType currentType,
  8420. VmaBlockVector& vector, size_t firstFreeBlock,
  8421. bool& texturePresent, bool& bufferPresent, bool& otherPresent);
  8422. };
  8423. #endif // _VMA_DEFRAGMENTATION_CONTEXT
  8424. #ifndef _VMA_POOL_T
  8425. struct VmaPool_T
  8426. {
  8427. friend struct VmaPoolListItemTraits;
  8428. VMA_CLASS_NO_COPY_NO_MOVE(VmaPool_T)
  8429. public:
  8430. VmaBlockVector m_BlockVector;
  8431. VmaDedicatedAllocationList m_DedicatedAllocations;
  8432. VmaPool_T(
  8433. VmaAllocator hAllocator,
  8434. const VmaPoolCreateInfo& createInfo,
  8435. VkDeviceSize preferredBlockSize);
  8436. ~VmaPool_T();
  8437. uint32_t GetId() const { return m_Id; }
  8438. void SetId(uint32_t id) { VMA_ASSERT(m_Id == 0); m_Id = id; }
  8439. const char* GetName() const { return m_Name; }
  8440. void SetName(const char* pName);
  8441. #if VMA_STATS_STRING_ENABLED
  8442. //void PrintDetailedMap(class VmaStringBuilder& sb);
  8443. #endif
  8444. private:
  8445. uint32_t m_Id;
  8446. char* m_Name;
  8447. VmaPool_T* m_PrevPool = VMA_NULL;
  8448. VmaPool_T* m_NextPool = VMA_NULL;
  8449. };
  8450. struct VmaPoolListItemTraits
  8451. {
  8452. typedef VmaPool_T ItemType;
  8453. static ItemType* GetPrev(const ItemType* item) { return item->m_PrevPool; }
  8454. static ItemType* GetNext(const ItemType* item) { return item->m_NextPool; }
  8455. static ItemType*& AccessPrev(ItemType* item) { return item->m_PrevPool; }
  8456. static ItemType*& AccessNext(ItemType* item) { return item->m_NextPool; }
  8457. };
  8458. #endif // _VMA_POOL_T
  8459. #ifndef _VMA_CURRENT_BUDGET_DATA
  8460. struct VmaCurrentBudgetData
  8461. {
  8462. VMA_CLASS_NO_COPY_NO_MOVE(VmaCurrentBudgetData)
  8463. public:
  8464. VMA_ATOMIC_UINT32 m_BlockCount[VK_MAX_MEMORY_HEAPS];
  8465. VMA_ATOMIC_UINT32 m_AllocationCount[VK_MAX_MEMORY_HEAPS];
  8466. VMA_ATOMIC_UINT64 m_BlockBytes[VK_MAX_MEMORY_HEAPS];
  8467. VMA_ATOMIC_UINT64 m_AllocationBytes[VK_MAX_MEMORY_HEAPS];
  8468. #if VMA_MEMORY_BUDGET
  8469. VMA_ATOMIC_UINT32 m_OperationsSinceBudgetFetch;
  8470. VMA_RW_MUTEX m_BudgetMutex;
  8471. uint64_t m_VulkanUsage[VK_MAX_MEMORY_HEAPS];
  8472. uint64_t m_VulkanBudget[VK_MAX_MEMORY_HEAPS];
  8473. uint64_t m_BlockBytesAtBudgetFetch[VK_MAX_MEMORY_HEAPS];
  8474. #endif // VMA_MEMORY_BUDGET
  8475. VmaCurrentBudgetData();
  8476. void AddAllocation(uint32_t heapIndex, VkDeviceSize allocationSize);
  8477. void RemoveAllocation(uint32_t heapIndex, VkDeviceSize allocationSize);
  8478. };
  8479. #ifndef _VMA_CURRENT_BUDGET_DATA_FUNCTIONS
  8480. VmaCurrentBudgetData::VmaCurrentBudgetData()
  8481. {
  8482. for (uint32_t heapIndex = 0; heapIndex < VK_MAX_MEMORY_HEAPS; ++heapIndex)
  8483. {
  8484. m_BlockCount[heapIndex] = 0;
  8485. m_AllocationCount[heapIndex] = 0;
  8486. m_BlockBytes[heapIndex] = 0;
  8487. m_AllocationBytes[heapIndex] = 0;
  8488. #if VMA_MEMORY_BUDGET
  8489. m_VulkanUsage[heapIndex] = 0;
  8490. m_VulkanBudget[heapIndex] = 0;
  8491. m_BlockBytesAtBudgetFetch[heapIndex] = 0;
  8492. #endif
  8493. }
  8494. #if VMA_MEMORY_BUDGET
  8495. m_OperationsSinceBudgetFetch = 0;
  8496. #endif
  8497. }
  8498. void VmaCurrentBudgetData::AddAllocation(uint32_t heapIndex, VkDeviceSize allocationSize)
  8499. {
  8500. m_AllocationBytes[heapIndex] += allocationSize;
  8501. ++m_AllocationCount[heapIndex];
  8502. #if VMA_MEMORY_BUDGET
  8503. ++m_OperationsSinceBudgetFetch;
  8504. #endif
  8505. }
  8506. void VmaCurrentBudgetData::RemoveAllocation(uint32_t heapIndex, VkDeviceSize allocationSize)
  8507. {
  8508. VMA_ASSERT(m_AllocationBytes[heapIndex] >= allocationSize);
  8509. m_AllocationBytes[heapIndex] -= allocationSize;
  8510. VMA_ASSERT(m_AllocationCount[heapIndex] > 0);
  8511. --m_AllocationCount[heapIndex];
  8512. #if VMA_MEMORY_BUDGET
  8513. ++m_OperationsSinceBudgetFetch;
  8514. #endif
  8515. }
  8516. #endif // _VMA_CURRENT_BUDGET_DATA_FUNCTIONS
  8517. #endif // _VMA_CURRENT_BUDGET_DATA
  8518. #ifndef _VMA_ALLOCATION_OBJECT_ALLOCATOR
  8519. /*
  8520. Thread-safe wrapper over VmaPoolAllocator free list, for allocation of VmaAllocation_T objects.
  8521. */
  8522. class VmaAllocationObjectAllocator
  8523. {
  8524. VMA_CLASS_NO_COPY_NO_MOVE(VmaAllocationObjectAllocator)
  8525. public:
  8526. VmaAllocationObjectAllocator(const VkAllocationCallbacks* pAllocationCallbacks)
  8527. : m_Allocator(pAllocationCallbacks, 1024) {}
  8528. template<typename... Types> VmaAllocation Allocate(Types&&... args);
  8529. void Free(VmaAllocation hAlloc);
  8530. private:
  8531. VMA_MUTEX m_Mutex;
  8532. VmaPoolAllocator<VmaAllocation_T> m_Allocator;
  8533. };
  8534. template<typename... Types>
  8535. VmaAllocation VmaAllocationObjectAllocator::Allocate(Types&&... args)
  8536. {
  8537. VmaMutexLock mutexLock(m_Mutex);
  8538. return m_Allocator.Alloc<Types...>(std::forward<Types>(args)...);
  8539. }
  8540. void VmaAllocationObjectAllocator::Free(VmaAllocation hAlloc)
  8541. {
  8542. VmaMutexLock mutexLock(m_Mutex);
  8543. m_Allocator.Free(hAlloc);
  8544. }
  8545. #endif // _VMA_ALLOCATION_OBJECT_ALLOCATOR
  8546. #ifndef _VMA_VIRTUAL_BLOCK_T
  8547. struct VmaVirtualBlock_T
  8548. {
  8549. VMA_CLASS_NO_COPY_NO_MOVE(VmaVirtualBlock_T)
  8550. public:
  8551. const bool m_AllocationCallbacksSpecified;
  8552. const VkAllocationCallbacks m_AllocationCallbacks;
  8553. VmaVirtualBlock_T(const VmaVirtualBlockCreateInfo& createInfo);
  8554. ~VmaVirtualBlock_T();
  8555. VkResult Init() { return VK_SUCCESS; }
  8556. bool IsEmpty() const { return m_Metadata->IsEmpty(); }
  8557. void Free(VmaVirtualAllocation allocation) { m_Metadata->Free((VmaAllocHandle)allocation); }
  8558. void SetAllocationUserData(VmaVirtualAllocation allocation, void* userData) { m_Metadata->SetAllocationUserData((VmaAllocHandle)allocation, userData); }
  8559. void Clear() { m_Metadata->Clear(); }
  8560. const VkAllocationCallbacks* GetAllocationCallbacks() const;
  8561. void GetAllocationInfo(VmaVirtualAllocation allocation, VmaVirtualAllocationInfo& outInfo);
  8562. VkResult Allocate(const VmaVirtualAllocationCreateInfo& createInfo, VmaVirtualAllocation& outAllocation,
  8563. VkDeviceSize* outOffset);
  8564. void GetStatistics(VmaStatistics& outStats) const;
  8565. void CalculateDetailedStatistics(VmaDetailedStatistics& outStats) const;
  8566. #if VMA_STATS_STRING_ENABLED
  8567. void BuildStatsString(bool detailedMap, VmaStringBuilder& sb) const;
  8568. #endif
  8569. private:
  8570. VmaBlockMetadata* m_Metadata;
  8571. };
  8572. #ifndef _VMA_VIRTUAL_BLOCK_T_FUNCTIONS
  8573. VmaVirtualBlock_T::VmaVirtualBlock_T(const VmaVirtualBlockCreateInfo& createInfo)
  8574. : m_AllocationCallbacksSpecified(createInfo.pAllocationCallbacks != VMA_NULL),
  8575. m_AllocationCallbacks(createInfo.pAllocationCallbacks != VMA_NULL ? *createInfo.pAllocationCallbacks : VmaEmptyAllocationCallbacks)
  8576. {
  8577. const uint32_t algorithm = createInfo.flags & VMA_VIRTUAL_BLOCK_CREATE_ALGORITHM_MASK;
  8578. switch (algorithm)
  8579. {
  8580. case 0:
  8581. m_Metadata = vma_new(GetAllocationCallbacks(), VmaBlockMetadata_TLSF)(VK_NULL_HANDLE, 1, true);
  8582. break;
  8583. case VMA_VIRTUAL_BLOCK_CREATE_LINEAR_ALGORITHM_BIT:
  8584. m_Metadata = vma_new(GetAllocationCallbacks(), VmaBlockMetadata_Linear)(VK_NULL_HANDLE, 1, true);
  8585. break;
  8586. default:
  8587. VMA_ASSERT(0);
  8588. m_Metadata = vma_new(GetAllocationCallbacks(), VmaBlockMetadata_TLSF)(VK_NULL_HANDLE, 1, true);
  8589. }
  8590. m_Metadata->Init(createInfo.size);
  8591. }
  8592. VmaVirtualBlock_T::~VmaVirtualBlock_T()
  8593. {
  8594. // Define macro VMA_DEBUG_LOG_FORMAT or more specialized VMA_LEAK_LOG_FORMAT
  8595. // to receive the list of the unfreed allocations.
  8596. if (!m_Metadata->IsEmpty())
  8597. m_Metadata->DebugLogAllAllocations();
  8598. // This is the most important assert in the entire library.
  8599. // Hitting it means you have some memory leak - unreleased virtual allocations.
  8600. VMA_ASSERT_LEAK(m_Metadata->IsEmpty() && "Some virtual allocations were not freed before destruction of this virtual block!");
  8601. vma_delete(GetAllocationCallbacks(), m_Metadata);
  8602. }
  8603. const VkAllocationCallbacks* VmaVirtualBlock_T::GetAllocationCallbacks() const
  8604. {
  8605. return m_AllocationCallbacksSpecified ? &m_AllocationCallbacks : VMA_NULL;
  8606. }
  8607. void VmaVirtualBlock_T::GetAllocationInfo(VmaVirtualAllocation allocation, VmaVirtualAllocationInfo& outInfo)
  8608. {
  8609. m_Metadata->GetAllocationInfo((VmaAllocHandle)allocation, outInfo);
  8610. }
  8611. VkResult VmaVirtualBlock_T::Allocate(const VmaVirtualAllocationCreateInfo& createInfo, VmaVirtualAllocation& outAllocation,
  8612. VkDeviceSize* outOffset)
  8613. {
  8614. VmaAllocationRequest request = {};
  8615. if (m_Metadata->CreateAllocationRequest(
  8616. createInfo.size, // allocSize
  8617. VMA_MAX(createInfo.alignment, (VkDeviceSize)1), // allocAlignment
  8618. (createInfo.flags & VMA_VIRTUAL_ALLOCATION_CREATE_UPPER_ADDRESS_BIT) != 0, // upperAddress
  8619. VMA_SUBALLOCATION_TYPE_UNKNOWN, // allocType - unimportant
  8620. createInfo.flags & VMA_VIRTUAL_ALLOCATION_CREATE_STRATEGY_MASK, // strategy
  8621. &request))
  8622. {
  8623. m_Metadata->Alloc(request,
  8624. VMA_SUBALLOCATION_TYPE_UNKNOWN, // type - unimportant
  8625. createInfo.pUserData);
  8626. outAllocation = (VmaVirtualAllocation)request.allocHandle;
  8627. if(outOffset)
  8628. *outOffset = m_Metadata->GetAllocationOffset(request.allocHandle);
  8629. return VK_SUCCESS;
  8630. }
  8631. outAllocation = (VmaVirtualAllocation)VK_NULL_HANDLE;
  8632. if (outOffset)
  8633. *outOffset = UINT64_MAX;
  8634. return VK_ERROR_OUT_OF_DEVICE_MEMORY;
  8635. }
  8636. void VmaVirtualBlock_T::GetStatistics(VmaStatistics& outStats) const
  8637. {
  8638. VmaClearStatistics(outStats);
  8639. m_Metadata->AddStatistics(outStats);
  8640. }
  8641. void VmaVirtualBlock_T::CalculateDetailedStatistics(VmaDetailedStatistics& outStats) const
  8642. {
  8643. VmaClearDetailedStatistics(outStats);
  8644. m_Metadata->AddDetailedStatistics(outStats);
  8645. }
  8646. #if VMA_STATS_STRING_ENABLED
  8647. void VmaVirtualBlock_T::BuildStatsString(bool detailedMap, VmaStringBuilder& sb) const
  8648. {
  8649. VmaJsonWriter json(GetAllocationCallbacks(), sb);
  8650. json.BeginObject();
  8651. VmaDetailedStatistics stats;
  8652. CalculateDetailedStatistics(stats);
  8653. json.WriteString("Stats");
  8654. VmaPrintDetailedStatistics(json, stats);
  8655. if (detailedMap)
  8656. {
  8657. json.WriteString("Details");
  8658. json.BeginObject();
  8659. m_Metadata->PrintDetailedMap(json);
  8660. json.EndObject();
  8661. }
  8662. json.EndObject();
  8663. }
  8664. #endif // VMA_STATS_STRING_ENABLED
  8665. #endif // _VMA_VIRTUAL_BLOCK_T_FUNCTIONS
  8666. #endif // _VMA_VIRTUAL_BLOCK_T
  8667. // Main allocator object.
  8668. struct VmaAllocator_T
  8669. {
  8670. VMA_CLASS_NO_COPY_NO_MOVE(VmaAllocator_T)
  8671. public:
  8672. const bool m_UseMutex;
  8673. const uint32_t m_VulkanApiVersion;
  8674. bool m_UseKhrDedicatedAllocation; // Can be set only if m_VulkanApiVersion < VK_MAKE_VERSION(1, 1, 0).
  8675. bool m_UseKhrBindMemory2; // Can be set only if m_VulkanApiVersion < VK_MAKE_VERSION(1, 1, 0).
  8676. bool m_UseExtMemoryBudget;
  8677. bool m_UseAmdDeviceCoherentMemory;
  8678. bool m_UseKhrBufferDeviceAddress;
  8679. bool m_UseExtMemoryPriority;
  8680. bool m_UseKhrMaintenance4;
  8681. bool m_UseKhrMaintenance5;
  8682. const VkDevice m_hDevice;
  8683. const VkInstance m_hInstance;
  8684. const bool m_AllocationCallbacksSpecified;
  8685. const VkAllocationCallbacks m_AllocationCallbacks;
  8686. VmaDeviceMemoryCallbacks m_DeviceMemoryCallbacks;
  8687. VmaAllocationObjectAllocator m_AllocationObjectAllocator;
  8688. // Each bit (1 << i) is set if HeapSizeLimit is enabled for that heap, so cannot allocate more than the heap size.
  8689. uint32_t m_HeapSizeLimitMask;
  8690. VkPhysicalDeviceProperties m_PhysicalDeviceProperties;
  8691. VkPhysicalDeviceMemoryProperties m_MemProps;
  8692. // Default pools.
  8693. VmaBlockVector* m_pBlockVectors[VK_MAX_MEMORY_TYPES];
  8694. VmaDedicatedAllocationList m_DedicatedAllocations[VK_MAX_MEMORY_TYPES];
  8695. VmaCurrentBudgetData m_Budget;
  8696. VMA_ATOMIC_UINT32 m_DeviceMemoryCount; // Total number of VkDeviceMemory objects.
  8697. VmaAllocator_T(const VmaAllocatorCreateInfo* pCreateInfo);
  8698. VkResult Init(const VmaAllocatorCreateInfo* pCreateInfo);
  8699. ~VmaAllocator_T();
  8700. const VkAllocationCallbacks* GetAllocationCallbacks() const
  8701. {
  8702. return m_AllocationCallbacksSpecified ? &m_AllocationCallbacks : VMA_NULL;
  8703. }
  8704. const VmaVulkanFunctions& GetVulkanFunctions() const
  8705. {
  8706. return m_VulkanFunctions;
  8707. }
  8708. VkPhysicalDevice GetPhysicalDevice() const { return m_PhysicalDevice; }
  8709. VkDeviceSize GetBufferImageGranularity() const
  8710. {
  8711. return VMA_MAX(
  8712. static_cast<VkDeviceSize>(VMA_DEBUG_MIN_BUFFER_IMAGE_GRANULARITY),
  8713. m_PhysicalDeviceProperties.limits.bufferImageGranularity);
  8714. }
  8715. uint32_t GetMemoryHeapCount() const { return m_MemProps.memoryHeapCount; }
  8716. uint32_t GetMemoryTypeCount() const { return m_MemProps.memoryTypeCount; }
  8717. uint32_t MemoryTypeIndexToHeapIndex(uint32_t memTypeIndex) const
  8718. {
  8719. VMA_ASSERT(memTypeIndex < m_MemProps.memoryTypeCount);
  8720. return m_MemProps.memoryTypes[memTypeIndex].heapIndex;
  8721. }
  8722. // True when specific memory type is HOST_VISIBLE but not HOST_COHERENT.
  8723. bool IsMemoryTypeNonCoherent(uint32_t memTypeIndex) const
  8724. {
  8725. return (m_MemProps.memoryTypes[memTypeIndex].propertyFlags & (VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT | VK_MEMORY_PROPERTY_HOST_COHERENT_BIT)) ==
  8726. VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT;
  8727. }
  8728. // Minimum alignment for all allocations in specific memory type.
  8729. VkDeviceSize GetMemoryTypeMinAlignment(uint32_t memTypeIndex) const
  8730. {
  8731. return IsMemoryTypeNonCoherent(memTypeIndex) ?
  8732. VMA_MAX((VkDeviceSize)VMA_MIN_ALIGNMENT, m_PhysicalDeviceProperties.limits.nonCoherentAtomSize) :
  8733. (VkDeviceSize)VMA_MIN_ALIGNMENT;
  8734. }
  8735. bool IsIntegratedGpu() const
  8736. {
  8737. return m_PhysicalDeviceProperties.deviceType == VK_PHYSICAL_DEVICE_TYPE_INTEGRATED_GPU;
  8738. }
  8739. uint32_t GetGlobalMemoryTypeBits() const { return m_GlobalMemoryTypeBits; }
  8740. void GetBufferMemoryRequirements(
  8741. VkBuffer hBuffer,
  8742. VkMemoryRequirements& memReq,
  8743. bool& requiresDedicatedAllocation,
  8744. bool& prefersDedicatedAllocation) const;
  8745. void GetImageMemoryRequirements(
  8746. VkImage hImage,
  8747. VkMemoryRequirements& memReq,
  8748. bool& requiresDedicatedAllocation,
  8749. bool& prefersDedicatedAllocation) const;
  8750. VkResult FindMemoryTypeIndex(
  8751. uint32_t memoryTypeBits,
  8752. const VmaAllocationCreateInfo* pAllocationCreateInfo,
  8753. VmaBufferImageUsage bufImgUsage,
  8754. uint32_t* pMemoryTypeIndex) const;
  8755. // Main allocation function.
  8756. VkResult AllocateMemory(
  8757. const VkMemoryRequirements& vkMemReq,
  8758. bool requiresDedicatedAllocation,
  8759. bool prefersDedicatedAllocation,
  8760. VkBuffer dedicatedBuffer,
  8761. VkImage dedicatedImage,
  8762. VmaBufferImageUsage dedicatedBufferImageUsage,
  8763. const VmaAllocationCreateInfo& createInfo,
  8764. VmaSuballocationType suballocType,
  8765. size_t allocationCount,
  8766. VmaAllocation* pAllocations);
  8767. // Main deallocation function.
  8768. void FreeMemory(
  8769. size_t allocationCount,
  8770. const VmaAllocation* pAllocations);
  8771. void CalculateStatistics(VmaTotalStatistics* pStats);
  8772. void GetHeapBudgets(
  8773. VmaBudget* outBudgets, uint32_t firstHeap, uint32_t heapCount);
  8774. #if VMA_STATS_STRING_ENABLED
  8775. void PrintDetailedMap(class VmaJsonWriter& json);
  8776. #endif
  8777. void GetAllocationInfo(VmaAllocation hAllocation, VmaAllocationInfo* pAllocationInfo);
  8778. void GetAllocationInfo2(VmaAllocation hAllocation, VmaAllocationInfo2* pAllocationInfo);
  8779. VkResult CreatePool(const VmaPoolCreateInfo* pCreateInfo, VmaPool* pPool);
  8780. void DestroyPool(VmaPool pool);
  8781. void GetPoolStatistics(VmaPool pool, VmaStatistics* pPoolStats);
  8782. void CalculatePoolStatistics(VmaPool pool, VmaDetailedStatistics* pPoolStats);
  8783. void SetCurrentFrameIndex(uint32_t frameIndex);
  8784. uint32_t GetCurrentFrameIndex() const { return m_CurrentFrameIndex.load(); }
  8785. VkResult CheckPoolCorruption(VmaPool hPool);
  8786. VkResult CheckCorruption(uint32_t memoryTypeBits);
  8787. // Call to Vulkan function vkAllocateMemory with accompanying bookkeeping.
  8788. VkResult AllocateVulkanMemory(const VkMemoryAllocateInfo* pAllocateInfo, VkDeviceMemory* pMemory);
  8789. // Call to Vulkan function vkFreeMemory with accompanying bookkeeping.
  8790. void FreeVulkanMemory(uint32_t memoryType, VkDeviceSize size, VkDeviceMemory hMemory);
  8791. // Call to Vulkan function vkBindBufferMemory or vkBindBufferMemory2KHR.
  8792. VkResult BindVulkanBuffer(
  8793. VkDeviceMemory memory,
  8794. VkDeviceSize memoryOffset,
  8795. VkBuffer buffer,
  8796. const void* pNext);
  8797. // Call to Vulkan function vkBindImageMemory or vkBindImageMemory2KHR.
  8798. VkResult BindVulkanImage(
  8799. VkDeviceMemory memory,
  8800. VkDeviceSize memoryOffset,
  8801. VkImage image,
  8802. const void* pNext);
  8803. VkResult Map(VmaAllocation hAllocation, void** ppData);
  8804. void Unmap(VmaAllocation hAllocation);
  8805. VkResult BindBufferMemory(
  8806. VmaAllocation hAllocation,
  8807. VkDeviceSize allocationLocalOffset,
  8808. VkBuffer hBuffer,
  8809. const void* pNext);
  8810. VkResult BindImageMemory(
  8811. VmaAllocation hAllocation,
  8812. VkDeviceSize allocationLocalOffset,
  8813. VkImage hImage,
  8814. const void* pNext);
  8815. VkResult FlushOrInvalidateAllocation(
  8816. VmaAllocation hAllocation,
  8817. VkDeviceSize offset, VkDeviceSize size,
  8818. VMA_CACHE_OPERATION op);
  8819. VkResult FlushOrInvalidateAllocations(
  8820. uint32_t allocationCount,
  8821. const VmaAllocation* allocations,
  8822. const VkDeviceSize* offsets, const VkDeviceSize* sizes,
  8823. VMA_CACHE_OPERATION op);
  8824. VkResult CopyMemoryToAllocation(
  8825. const void* pSrcHostPointer,
  8826. VmaAllocation dstAllocation,
  8827. VkDeviceSize dstAllocationLocalOffset,
  8828. VkDeviceSize size);
  8829. VkResult CopyAllocationToMemory(
  8830. VmaAllocation srcAllocation,
  8831. VkDeviceSize srcAllocationLocalOffset,
  8832. void* pDstHostPointer,
  8833. VkDeviceSize size);
  8834. void FillAllocation(const VmaAllocation hAllocation, uint8_t pattern);
  8835. /*
  8836. Returns bit mask of memory types that can support defragmentation on GPU as
  8837. they support creation of required buffer for copy operations.
  8838. */
  8839. uint32_t GetGpuDefragmentationMemoryTypeBits();
  8840. #if VMA_EXTERNAL_MEMORY
  8841. VkExternalMemoryHandleTypeFlagsKHR GetExternalMemoryHandleTypeFlags(uint32_t memTypeIndex) const
  8842. {
  8843. return m_TypeExternalMemoryHandleTypes[memTypeIndex];
  8844. }
  8845. #endif // #if VMA_EXTERNAL_MEMORY
  8846. private:
  8847. VkDeviceSize m_PreferredLargeHeapBlockSize;
  8848. VkPhysicalDevice m_PhysicalDevice;
  8849. VMA_ATOMIC_UINT32 m_CurrentFrameIndex;
  8850. VMA_ATOMIC_UINT32 m_GpuDefragmentationMemoryTypeBits; // UINT32_MAX means uninitialized.
  8851. #if VMA_EXTERNAL_MEMORY
  8852. VkExternalMemoryHandleTypeFlagsKHR m_TypeExternalMemoryHandleTypes[VK_MAX_MEMORY_TYPES];
  8853. #endif // #if VMA_EXTERNAL_MEMORY
  8854. VMA_RW_MUTEX m_PoolsMutex;
  8855. typedef VmaIntrusiveLinkedList<VmaPoolListItemTraits> PoolList;
  8856. // Protected by m_PoolsMutex.
  8857. PoolList m_Pools;
  8858. uint32_t m_NextPoolId;
  8859. VmaVulkanFunctions m_VulkanFunctions;
  8860. // Global bit mask AND-ed with any memoryTypeBits to disallow certain memory types.
  8861. uint32_t m_GlobalMemoryTypeBits;
  8862. void ImportVulkanFunctions(const VmaVulkanFunctions* pVulkanFunctions);
  8863. #if VMA_STATIC_VULKAN_FUNCTIONS == 1
  8864. void ImportVulkanFunctions_Static();
  8865. #endif
  8866. void ImportVulkanFunctions_Custom(const VmaVulkanFunctions* pVulkanFunctions);
  8867. #if VMA_DYNAMIC_VULKAN_FUNCTIONS == 1
  8868. void ImportVulkanFunctions_Dynamic();
  8869. #endif
  8870. void ValidateVulkanFunctions();
  8871. VkDeviceSize CalcPreferredBlockSize(uint32_t memTypeIndex);
  8872. VkResult AllocateMemoryOfType(
  8873. VmaPool pool,
  8874. VkDeviceSize size,
  8875. VkDeviceSize alignment,
  8876. bool dedicatedPreferred,
  8877. VkBuffer dedicatedBuffer,
  8878. VkImage dedicatedImage,
  8879. VmaBufferImageUsage dedicatedBufferImageUsage,
  8880. const VmaAllocationCreateInfo& createInfo,
  8881. uint32_t memTypeIndex,
  8882. VmaSuballocationType suballocType,
  8883. VmaDedicatedAllocationList& dedicatedAllocations,
  8884. VmaBlockVector& blockVector,
  8885. size_t allocationCount,
  8886. VmaAllocation* pAllocations);
  8887. // Helper function only to be used inside AllocateDedicatedMemory.
  8888. VkResult AllocateDedicatedMemoryPage(
  8889. VmaPool pool,
  8890. VkDeviceSize size,
  8891. VmaSuballocationType suballocType,
  8892. uint32_t memTypeIndex,
  8893. const VkMemoryAllocateInfo& allocInfo,
  8894. bool map,
  8895. bool isUserDataString,
  8896. bool isMappingAllowed,
  8897. void* pUserData,
  8898. VmaAllocation* pAllocation);
  8899. // Allocates and registers new VkDeviceMemory specifically for dedicated allocations.
  8900. VkResult AllocateDedicatedMemory(
  8901. VmaPool pool,
  8902. VkDeviceSize size,
  8903. VmaSuballocationType suballocType,
  8904. VmaDedicatedAllocationList& dedicatedAllocations,
  8905. uint32_t memTypeIndex,
  8906. bool map,
  8907. bool isUserDataString,
  8908. bool isMappingAllowed,
  8909. bool canAliasMemory,
  8910. void* pUserData,
  8911. float priority,
  8912. VkBuffer dedicatedBuffer,
  8913. VkImage dedicatedImage,
  8914. VmaBufferImageUsage dedicatedBufferImageUsage,
  8915. size_t allocationCount,
  8916. VmaAllocation* pAllocations,
  8917. const void* pNextChain = VMA_NULL);
  8918. void FreeDedicatedMemory(const VmaAllocation allocation);
  8919. VkResult CalcMemTypeParams(
  8920. VmaAllocationCreateInfo& outCreateInfo,
  8921. uint32_t memTypeIndex,
  8922. VkDeviceSize size,
  8923. size_t allocationCount);
  8924. VkResult CalcAllocationParams(
  8925. VmaAllocationCreateInfo& outCreateInfo,
  8926. bool dedicatedRequired,
  8927. bool dedicatedPreferred);
  8928. /*
  8929. Calculates and returns bit mask of memory types that can support defragmentation
  8930. on GPU as they support creation of required buffer for copy operations.
  8931. */
  8932. uint32_t CalculateGpuDefragmentationMemoryTypeBits() const;
  8933. uint32_t CalculateGlobalMemoryTypeBits() const;
  8934. bool GetFlushOrInvalidateRange(
  8935. VmaAllocation allocation,
  8936. VkDeviceSize offset, VkDeviceSize size,
  8937. VkMappedMemoryRange& outRange) const;
  8938. #if VMA_MEMORY_BUDGET
  8939. void UpdateVulkanBudget();
  8940. #endif // #if VMA_MEMORY_BUDGET
  8941. };
  8942. #ifndef _VMA_MEMORY_FUNCTIONS
  8943. static void* VmaMalloc(VmaAllocator hAllocator, size_t size, size_t alignment)
  8944. {
  8945. return VmaMalloc(&hAllocator->m_AllocationCallbacks, size, alignment);
  8946. }
  8947. static void VmaFree(VmaAllocator hAllocator, void* ptr)
  8948. {
  8949. VmaFree(&hAllocator->m_AllocationCallbacks, ptr);
  8950. }
  8951. template<typename T>
  8952. static T* VmaAllocate(VmaAllocator hAllocator)
  8953. {
  8954. return (T*)VmaMalloc(hAllocator, sizeof(T), VMA_ALIGN_OF(T));
  8955. }
  8956. template<typename T>
  8957. static T* VmaAllocateArray(VmaAllocator hAllocator, size_t count)
  8958. {
  8959. return (T*)VmaMalloc(hAllocator, sizeof(T) * count, VMA_ALIGN_OF(T));
  8960. }
  8961. template<typename T>
  8962. static void vma_delete(VmaAllocator hAllocator, T* ptr)
  8963. {
  8964. if(ptr != VMA_NULL)
  8965. {
  8966. ptr->~T();
  8967. VmaFree(hAllocator, ptr);
  8968. }
  8969. }
  8970. template<typename T>
  8971. static void vma_delete_array(VmaAllocator hAllocator, T* ptr, size_t count)
  8972. {
  8973. if(ptr != VMA_NULL)
  8974. {
  8975. for(size_t i = count; i--; )
  8976. ptr[i].~T();
  8977. VmaFree(hAllocator, ptr);
  8978. }
  8979. }
  8980. #endif // _VMA_MEMORY_FUNCTIONS
  8981. #ifndef _VMA_DEVICE_MEMORY_BLOCK_FUNCTIONS
  8982. VmaDeviceMemoryBlock::VmaDeviceMemoryBlock(VmaAllocator hAllocator)
  8983. : m_pMetadata(VMA_NULL),
  8984. m_MemoryTypeIndex(UINT32_MAX),
  8985. m_Id(0),
  8986. m_hMemory(VK_NULL_HANDLE),
  8987. m_MapCount(0),
  8988. m_pMappedData(VMA_NULL) {}
  8989. VmaDeviceMemoryBlock::~VmaDeviceMemoryBlock()
  8990. {
  8991. VMA_ASSERT_LEAK(m_MapCount == 0 && "VkDeviceMemory block is being destroyed while it is still mapped.");
  8992. VMA_ASSERT_LEAK(m_hMemory == VK_NULL_HANDLE);
  8993. }
  8994. void VmaDeviceMemoryBlock::Init(
  8995. VmaAllocator hAllocator,
  8996. VmaPool hParentPool,
  8997. uint32_t newMemoryTypeIndex,
  8998. VkDeviceMemory newMemory,
  8999. VkDeviceSize newSize,
  9000. uint32_t id,
  9001. uint32_t algorithm,
  9002. VkDeviceSize bufferImageGranularity)
  9003. {
  9004. VMA_ASSERT(m_hMemory == VK_NULL_HANDLE);
  9005. m_hParentPool = hParentPool;
  9006. m_MemoryTypeIndex = newMemoryTypeIndex;
  9007. m_Id = id;
  9008. m_hMemory = newMemory;
  9009. switch (algorithm)
  9010. {
  9011. case 0:
  9012. m_pMetadata = vma_new(hAllocator, VmaBlockMetadata_TLSF)(hAllocator->GetAllocationCallbacks(),
  9013. bufferImageGranularity, false); // isVirtual
  9014. break;
  9015. case VMA_POOL_CREATE_LINEAR_ALGORITHM_BIT:
  9016. m_pMetadata = vma_new(hAllocator, VmaBlockMetadata_Linear)(hAllocator->GetAllocationCallbacks(),
  9017. bufferImageGranularity, false); // isVirtual
  9018. break;
  9019. default:
  9020. VMA_ASSERT(0);
  9021. m_pMetadata = vma_new(hAllocator, VmaBlockMetadata_TLSF)(hAllocator->GetAllocationCallbacks(),
  9022. bufferImageGranularity, false); // isVirtual
  9023. }
  9024. m_pMetadata->Init(newSize);
  9025. }
  9026. void VmaDeviceMemoryBlock::Destroy(VmaAllocator allocator)
  9027. {
  9028. // Define macro VMA_DEBUG_LOG_FORMAT or more specialized VMA_LEAK_LOG_FORMAT
  9029. // to receive the list of the unfreed allocations.
  9030. if (!m_pMetadata->IsEmpty())
  9031. m_pMetadata->DebugLogAllAllocations();
  9032. // This is the most important assert in the entire library.
  9033. // Hitting it means you have some memory leak - unreleased VmaAllocation objects.
  9034. VMA_ASSERT_LEAK(m_pMetadata->IsEmpty() && "Some allocations were not freed before destruction of this memory block!");
  9035. VMA_ASSERT_LEAK(m_hMemory != VK_NULL_HANDLE);
  9036. allocator->FreeVulkanMemory(m_MemoryTypeIndex, m_pMetadata->GetSize(), m_hMemory);
  9037. m_hMemory = VK_NULL_HANDLE;
  9038. vma_delete(allocator, m_pMetadata);
  9039. m_pMetadata = VMA_NULL;
  9040. }
  9041. void VmaDeviceMemoryBlock::PostAlloc(VmaAllocator hAllocator)
  9042. {
  9043. VmaMutexLock lock(m_MapAndBindMutex, hAllocator->m_UseMutex);
  9044. m_MappingHysteresis.PostAlloc();
  9045. }
  9046. void VmaDeviceMemoryBlock::PostFree(VmaAllocator hAllocator)
  9047. {
  9048. VmaMutexLock lock(m_MapAndBindMutex, hAllocator->m_UseMutex);
  9049. if(m_MappingHysteresis.PostFree())
  9050. {
  9051. VMA_ASSERT(m_MappingHysteresis.GetExtraMapping() == 0);
  9052. if (m_MapCount == 0)
  9053. {
  9054. m_pMappedData = VMA_NULL;
  9055. (*hAllocator->GetVulkanFunctions().vkUnmapMemory)(hAllocator->m_hDevice, m_hMemory);
  9056. }
  9057. }
  9058. }
  9059. bool VmaDeviceMemoryBlock::Validate() const
  9060. {
  9061. VMA_VALIDATE((m_hMemory != VK_NULL_HANDLE) &&
  9062. (m_pMetadata->GetSize() != 0));
  9063. return m_pMetadata->Validate();
  9064. }
  9065. VkResult VmaDeviceMemoryBlock::CheckCorruption(VmaAllocator hAllocator)
  9066. {
  9067. void* pData = VMA_NULL;
  9068. VkResult res = Map(hAllocator, 1, &pData);
  9069. if (res != VK_SUCCESS)
  9070. {
  9071. return res;
  9072. }
  9073. res = m_pMetadata->CheckCorruption(pData);
  9074. Unmap(hAllocator, 1);
  9075. return res;
  9076. }
  9077. VkResult VmaDeviceMemoryBlock::Map(VmaAllocator hAllocator, uint32_t count, void** ppData)
  9078. {
  9079. if (count == 0)
  9080. {
  9081. return VK_SUCCESS;
  9082. }
  9083. VmaMutexLock lock(m_MapAndBindMutex, hAllocator->m_UseMutex);
  9084. const uint32_t oldTotalMapCount = m_MapCount + m_MappingHysteresis.GetExtraMapping();
  9085. if (oldTotalMapCount != 0)
  9086. {
  9087. VMA_ASSERT(m_pMappedData != VMA_NULL);
  9088. m_MappingHysteresis.PostMap();
  9089. m_MapCount += count;
  9090. if (ppData != VMA_NULL)
  9091. {
  9092. *ppData = m_pMappedData;
  9093. }
  9094. return VK_SUCCESS;
  9095. }
  9096. else
  9097. {
  9098. VkResult result = (*hAllocator->GetVulkanFunctions().vkMapMemory)(
  9099. hAllocator->m_hDevice,
  9100. m_hMemory,
  9101. 0, // offset
  9102. VK_WHOLE_SIZE,
  9103. 0, // flags
  9104. &m_pMappedData);
  9105. if (result == VK_SUCCESS)
  9106. {
  9107. VMA_ASSERT(m_pMappedData != VMA_NULL);
  9108. m_MappingHysteresis.PostMap();
  9109. m_MapCount = count;
  9110. if (ppData != VMA_NULL)
  9111. {
  9112. *ppData = m_pMappedData;
  9113. }
  9114. }
  9115. return result;
  9116. }
  9117. }
  9118. void VmaDeviceMemoryBlock::Unmap(VmaAllocator hAllocator, uint32_t count)
  9119. {
  9120. if (count == 0)
  9121. {
  9122. return;
  9123. }
  9124. VmaMutexLock lock(m_MapAndBindMutex, hAllocator->m_UseMutex);
  9125. if (m_MapCount >= count)
  9126. {
  9127. m_MapCount -= count;
  9128. const uint32_t totalMapCount = m_MapCount + m_MappingHysteresis.GetExtraMapping();
  9129. if (totalMapCount == 0)
  9130. {
  9131. m_pMappedData = VMA_NULL;
  9132. (*hAllocator->GetVulkanFunctions().vkUnmapMemory)(hAllocator->m_hDevice, m_hMemory);
  9133. }
  9134. m_MappingHysteresis.PostUnmap();
  9135. }
  9136. else
  9137. {
  9138. VMA_ASSERT(0 && "VkDeviceMemory block is being unmapped while it was not previously mapped.");
  9139. }
  9140. }
  9141. VkResult VmaDeviceMemoryBlock::WriteMagicValueAfterAllocation(VmaAllocator hAllocator, VkDeviceSize allocOffset, VkDeviceSize allocSize)
  9142. {
  9143. VMA_ASSERT(VMA_DEBUG_MARGIN > 0 && VMA_DEBUG_MARGIN % 4 == 0 && VMA_DEBUG_DETECT_CORRUPTION);
  9144. void* pData;
  9145. VkResult res = Map(hAllocator, 1, &pData);
  9146. if (res != VK_SUCCESS)
  9147. {
  9148. return res;
  9149. }
  9150. VmaWriteMagicValue(pData, allocOffset + allocSize);
  9151. Unmap(hAllocator, 1);
  9152. return VK_SUCCESS;
  9153. }
  9154. VkResult VmaDeviceMemoryBlock::ValidateMagicValueAfterAllocation(VmaAllocator hAllocator, VkDeviceSize allocOffset, VkDeviceSize allocSize)
  9155. {
  9156. VMA_ASSERT(VMA_DEBUG_MARGIN > 0 && VMA_DEBUG_MARGIN % 4 == 0 && VMA_DEBUG_DETECT_CORRUPTION);
  9157. void* pData;
  9158. VkResult res = Map(hAllocator, 1, &pData);
  9159. if (res != VK_SUCCESS)
  9160. {
  9161. return res;
  9162. }
  9163. if (!VmaValidateMagicValue(pData, allocOffset + allocSize))
  9164. {
  9165. VMA_ASSERT(0 && "MEMORY CORRUPTION DETECTED AFTER FREED ALLOCATION!");
  9166. }
  9167. Unmap(hAllocator, 1);
  9168. return VK_SUCCESS;
  9169. }
  9170. VkResult VmaDeviceMemoryBlock::BindBufferMemory(
  9171. const VmaAllocator hAllocator,
  9172. const VmaAllocation hAllocation,
  9173. VkDeviceSize allocationLocalOffset,
  9174. VkBuffer hBuffer,
  9175. const void* pNext)
  9176. {
  9177. VMA_ASSERT(hAllocation->GetType() == VmaAllocation_T::ALLOCATION_TYPE_BLOCK &&
  9178. hAllocation->GetBlock() == this);
  9179. VMA_ASSERT(allocationLocalOffset < hAllocation->GetSize() &&
  9180. "Invalid allocationLocalOffset. Did you forget that this offset is relative to the beginning of the allocation, not the whole memory block?");
  9181. const VkDeviceSize memoryOffset = hAllocation->GetOffset() + allocationLocalOffset;
  9182. // This lock is important so that we don't call vkBind... and/or vkMap... simultaneously on the same VkDeviceMemory from multiple threads.
  9183. VmaMutexLock lock(m_MapAndBindMutex, hAllocator->m_UseMutex);
  9184. return hAllocator->BindVulkanBuffer(m_hMemory, memoryOffset, hBuffer, pNext);
  9185. }
  9186. VkResult VmaDeviceMemoryBlock::BindImageMemory(
  9187. const VmaAllocator hAllocator,
  9188. const VmaAllocation hAllocation,
  9189. VkDeviceSize allocationLocalOffset,
  9190. VkImage hImage,
  9191. const void* pNext)
  9192. {
  9193. VMA_ASSERT(hAllocation->GetType() == VmaAllocation_T::ALLOCATION_TYPE_BLOCK &&
  9194. hAllocation->GetBlock() == this);
  9195. VMA_ASSERT(allocationLocalOffset < hAllocation->GetSize() &&
  9196. "Invalid allocationLocalOffset. Did you forget that this offset is relative to the beginning of the allocation, not the whole memory block?");
  9197. const VkDeviceSize memoryOffset = hAllocation->GetOffset() + allocationLocalOffset;
  9198. // This lock is important so that we don't call vkBind... and/or vkMap... simultaneously on the same VkDeviceMemory from multiple threads.
  9199. VmaMutexLock lock(m_MapAndBindMutex, hAllocator->m_UseMutex);
  9200. return hAllocator->BindVulkanImage(m_hMemory, memoryOffset, hImage, pNext);
  9201. }
  9202. #endif // _VMA_DEVICE_MEMORY_BLOCK_FUNCTIONS
  9203. #ifndef _VMA_ALLOCATION_T_FUNCTIONS
  9204. VmaAllocation_T::VmaAllocation_T(bool mappingAllowed)
  9205. : m_Alignment{ 1 },
  9206. m_Size{ 0 },
  9207. m_pUserData{ VMA_NULL },
  9208. m_pName{ VMA_NULL },
  9209. m_MemoryTypeIndex{ 0 },
  9210. m_Type{ (uint8_t)ALLOCATION_TYPE_NONE },
  9211. m_SuballocationType{ (uint8_t)VMA_SUBALLOCATION_TYPE_UNKNOWN },
  9212. m_MapCount{ 0 },
  9213. m_Flags{ 0 }
  9214. {
  9215. if(mappingAllowed)
  9216. m_Flags |= (uint8_t)FLAG_MAPPING_ALLOWED;
  9217. }
  9218. VmaAllocation_T::~VmaAllocation_T()
  9219. {
  9220. VMA_ASSERT_LEAK(m_MapCount == 0 && "Allocation was not unmapped before destruction.");
  9221. // Check if owned string was freed.
  9222. VMA_ASSERT(m_pName == VMA_NULL);
  9223. }
  9224. void VmaAllocation_T::InitBlockAllocation(
  9225. VmaDeviceMemoryBlock* block,
  9226. VmaAllocHandle allocHandle,
  9227. VkDeviceSize alignment,
  9228. VkDeviceSize size,
  9229. uint32_t memoryTypeIndex,
  9230. VmaSuballocationType suballocationType,
  9231. bool mapped)
  9232. {
  9233. VMA_ASSERT(m_Type == ALLOCATION_TYPE_NONE);
  9234. VMA_ASSERT(block != VMA_NULL);
  9235. m_Type = (uint8_t)ALLOCATION_TYPE_BLOCK;
  9236. m_Alignment = alignment;
  9237. m_Size = size;
  9238. m_MemoryTypeIndex = memoryTypeIndex;
  9239. if(mapped)
  9240. {
  9241. VMA_ASSERT(IsMappingAllowed() && "Mapping is not allowed on this allocation! Please use one of the new VMA_ALLOCATION_CREATE_HOST_ACCESS_* flags when creating it.");
  9242. m_Flags |= (uint8_t)FLAG_PERSISTENT_MAP;
  9243. }
  9244. m_SuballocationType = (uint8_t)suballocationType;
  9245. m_BlockAllocation.m_Block = block;
  9246. m_BlockAllocation.m_AllocHandle = allocHandle;
  9247. }
  9248. void VmaAllocation_T::InitDedicatedAllocation(
  9249. VmaPool hParentPool,
  9250. uint32_t memoryTypeIndex,
  9251. VkDeviceMemory hMemory,
  9252. VmaSuballocationType suballocationType,
  9253. void* pMappedData,
  9254. VkDeviceSize size)
  9255. {
  9256. VMA_ASSERT(m_Type == ALLOCATION_TYPE_NONE);
  9257. VMA_ASSERT(hMemory != VK_NULL_HANDLE);
  9258. m_Type = (uint8_t)ALLOCATION_TYPE_DEDICATED;
  9259. m_Alignment = 0;
  9260. m_Size = size;
  9261. m_MemoryTypeIndex = memoryTypeIndex;
  9262. m_SuballocationType = (uint8_t)suballocationType;
  9263. if(pMappedData != VMA_NULL)
  9264. {
  9265. VMA_ASSERT(IsMappingAllowed() && "Mapping is not allowed on this allocation! Please use one of the new VMA_ALLOCATION_CREATE_HOST_ACCESS_* flags when creating it.");
  9266. m_Flags |= (uint8_t)FLAG_PERSISTENT_MAP;
  9267. }
  9268. m_DedicatedAllocation.m_hParentPool = hParentPool;
  9269. m_DedicatedAllocation.m_hMemory = hMemory;
  9270. m_DedicatedAllocation.m_pMappedData = pMappedData;
  9271. m_DedicatedAllocation.m_Prev = VMA_NULL;
  9272. m_DedicatedAllocation.m_Next = VMA_NULL;
  9273. }
  9274. void VmaAllocation_T::SetName(VmaAllocator hAllocator, const char* pName)
  9275. {
  9276. VMA_ASSERT(pName == VMA_NULL || pName != m_pName);
  9277. FreeName(hAllocator);
  9278. if (pName != VMA_NULL)
  9279. m_pName = VmaCreateStringCopy(hAllocator->GetAllocationCallbacks(), pName);
  9280. }
  9281. uint8_t VmaAllocation_T::SwapBlockAllocation(VmaAllocator hAllocator, VmaAllocation allocation)
  9282. {
  9283. VMA_ASSERT(allocation != VMA_NULL);
  9284. VMA_ASSERT(m_Type == ALLOCATION_TYPE_BLOCK);
  9285. VMA_ASSERT(allocation->m_Type == ALLOCATION_TYPE_BLOCK);
  9286. if (m_MapCount != 0)
  9287. m_BlockAllocation.m_Block->Unmap(hAllocator, m_MapCount);
  9288. m_BlockAllocation.m_Block->m_pMetadata->SetAllocationUserData(m_BlockAllocation.m_AllocHandle, allocation);
  9289. std::swap(m_BlockAllocation, allocation->m_BlockAllocation);
  9290. m_BlockAllocation.m_Block->m_pMetadata->SetAllocationUserData(m_BlockAllocation.m_AllocHandle, this);
  9291. #if VMA_STATS_STRING_ENABLED
  9292. std::swap(m_BufferImageUsage, allocation->m_BufferImageUsage);
  9293. #endif
  9294. return m_MapCount;
  9295. }
  9296. VmaAllocHandle VmaAllocation_T::GetAllocHandle() const
  9297. {
  9298. switch (m_Type)
  9299. {
  9300. case ALLOCATION_TYPE_BLOCK:
  9301. return m_BlockAllocation.m_AllocHandle;
  9302. case ALLOCATION_TYPE_DEDICATED:
  9303. return VK_NULL_HANDLE;
  9304. default:
  9305. VMA_ASSERT(0);
  9306. return VK_NULL_HANDLE;
  9307. }
  9308. }
  9309. VkDeviceSize VmaAllocation_T::GetOffset() const
  9310. {
  9311. switch (m_Type)
  9312. {
  9313. case ALLOCATION_TYPE_BLOCK:
  9314. return m_BlockAllocation.m_Block->m_pMetadata->GetAllocationOffset(m_BlockAllocation.m_AllocHandle);
  9315. case ALLOCATION_TYPE_DEDICATED:
  9316. return 0;
  9317. default:
  9318. VMA_ASSERT(0);
  9319. return 0;
  9320. }
  9321. }
  9322. VmaPool VmaAllocation_T::GetParentPool() const
  9323. {
  9324. switch (m_Type)
  9325. {
  9326. case ALLOCATION_TYPE_BLOCK:
  9327. return m_BlockAllocation.m_Block->GetParentPool();
  9328. case ALLOCATION_TYPE_DEDICATED:
  9329. return m_DedicatedAllocation.m_hParentPool;
  9330. default:
  9331. VMA_ASSERT(0);
  9332. return VK_NULL_HANDLE;
  9333. }
  9334. }
  9335. VkDeviceMemory VmaAllocation_T::GetMemory() const
  9336. {
  9337. switch (m_Type)
  9338. {
  9339. case ALLOCATION_TYPE_BLOCK:
  9340. return m_BlockAllocation.m_Block->GetDeviceMemory();
  9341. case ALLOCATION_TYPE_DEDICATED:
  9342. return m_DedicatedAllocation.m_hMemory;
  9343. default:
  9344. VMA_ASSERT(0);
  9345. return VK_NULL_HANDLE;
  9346. }
  9347. }
  9348. void* VmaAllocation_T::GetMappedData() const
  9349. {
  9350. switch (m_Type)
  9351. {
  9352. case ALLOCATION_TYPE_BLOCK:
  9353. if (m_MapCount != 0 || IsPersistentMap())
  9354. {
  9355. void* pBlockData = m_BlockAllocation.m_Block->GetMappedData();
  9356. VMA_ASSERT(pBlockData != VMA_NULL);
  9357. return (char*)pBlockData + GetOffset();
  9358. }
  9359. else
  9360. {
  9361. return VMA_NULL;
  9362. }
  9363. break;
  9364. case ALLOCATION_TYPE_DEDICATED:
  9365. VMA_ASSERT((m_DedicatedAllocation.m_pMappedData != VMA_NULL) == (m_MapCount != 0 || IsPersistentMap()));
  9366. return m_DedicatedAllocation.m_pMappedData;
  9367. default:
  9368. VMA_ASSERT(0);
  9369. return VMA_NULL;
  9370. }
  9371. }
  9372. void VmaAllocation_T::BlockAllocMap()
  9373. {
  9374. VMA_ASSERT(GetType() == ALLOCATION_TYPE_BLOCK);
  9375. VMA_ASSERT(IsMappingAllowed() && "Mapping is not allowed on this allocation! Please use one of the new VMA_ALLOCATION_CREATE_HOST_ACCESS_* flags when creating it.");
  9376. if (m_MapCount < 0xFF)
  9377. {
  9378. ++m_MapCount;
  9379. }
  9380. else
  9381. {
  9382. VMA_ASSERT(0 && "Allocation mapped too many times simultaneously.");
  9383. }
  9384. }
  9385. void VmaAllocation_T::BlockAllocUnmap()
  9386. {
  9387. VMA_ASSERT(GetType() == ALLOCATION_TYPE_BLOCK);
  9388. if (m_MapCount > 0)
  9389. {
  9390. --m_MapCount;
  9391. }
  9392. else
  9393. {
  9394. VMA_ASSERT(0 && "Unmapping allocation not previously mapped.");
  9395. }
  9396. }
  9397. VkResult VmaAllocation_T::DedicatedAllocMap(VmaAllocator hAllocator, void** ppData)
  9398. {
  9399. VMA_ASSERT(GetType() == ALLOCATION_TYPE_DEDICATED);
  9400. VMA_ASSERT(IsMappingAllowed() && "Mapping is not allowed on this allocation! Please use one of the new VMA_ALLOCATION_CREATE_HOST_ACCESS_* flags when creating it.");
  9401. if (m_MapCount != 0 || IsPersistentMap())
  9402. {
  9403. if (m_MapCount < 0xFF)
  9404. {
  9405. VMA_ASSERT(m_DedicatedAllocation.m_pMappedData != VMA_NULL);
  9406. *ppData = m_DedicatedAllocation.m_pMappedData;
  9407. ++m_MapCount;
  9408. return VK_SUCCESS;
  9409. }
  9410. else
  9411. {
  9412. VMA_ASSERT(0 && "Dedicated allocation mapped too many times simultaneously.");
  9413. return VK_ERROR_MEMORY_MAP_FAILED;
  9414. }
  9415. }
  9416. else
  9417. {
  9418. VkResult result = (*hAllocator->GetVulkanFunctions().vkMapMemory)(
  9419. hAllocator->m_hDevice,
  9420. m_DedicatedAllocation.m_hMemory,
  9421. 0, // offset
  9422. VK_WHOLE_SIZE,
  9423. 0, // flags
  9424. ppData);
  9425. if (result == VK_SUCCESS)
  9426. {
  9427. m_DedicatedAllocation.m_pMappedData = *ppData;
  9428. m_MapCount = 1;
  9429. }
  9430. return result;
  9431. }
  9432. }
  9433. void VmaAllocation_T::DedicatedAllocUnmap(VmaAllocator hAllocator)
  9434. {
  9435. VMA_ASSERT(GetType() == ALLOCATION_TYPE_DEDICATED);
  9436. if (m_MapCount > 0)
  9437. {
  9438. --m_MapCount;
  9439. if (m_MapCount == 0 && !IsPersistentMap())
  9440. {
  9441. m_DedicatedAllocation.m_pMappedData = VMA_NULL;
  9442. (*hAllocator->GetVulkanFunctions().vkUnmapMemory)(
  9443. hAllocator->m_hDevice,
  9444. m_DedicatedAllocation.m_hMemory);
  9445. }
  9446. }
  9447. else
  9448. {
  9449. VMA_ASSERT(0 && "Unmapping dedicated allocation not previously mapped.");
  9450. }
  9451. }
  9452. #if VMA_STATS_STRING_ENABLED
  9453. void VmaAllocation_T::PrintParameters(class VmaJsonWriter& json) const
  9454. {
  9455. json.WriteString("Type");
  9456. json.WriteString(VMA_SUBALLOCATION_TYPE_NAMES[m_SuballocationType]);
  9457. json.WriteString("Size");
  9458. json.WriteNumber(m_Size);
  9459. json.WriteString("Usage");
  9460. json.WriteNumber(m_BufferImageUsage.Value); // It may be uint32_t or uint64_t.
  9461. if (m_pUserData != VMA_NULL)
  9462. {
  9463. json.WriteString("CustomData");
  9464. json.BeginString();
  9465. json.ContinueString_Pointer(m_pUserData);
  9466. json.EndString();
  9467. }
  9468. if (m_pName != VMA_NULL)
  9469. {
  9470. json.WriteString("Name");
  9471. json.WriteString(m_pName);
  9472. }
  9473. }
  9474. #endif // VMA_STATS_STRING_ENABLED
  9475. void VmaAllocation_T::FreeName(VmaAllocator hAllocator)
  9476. {
  9477. if(m_pName)
  9478. {
  9479. VmaFreeString(hAllocator->GetAllocationCallbacks(), m_pName);
  9480. m_pName = VMA_NULL;
  9481. }
  9482. }
  9483. #endif // _VMA_ALLOCATION_T_FUNCTIONS
  9484. #ifndef _VMA_BLOCK_VECTOR_FUNCTIONS
  9485. VmaBlockVector::VmaBlockVector(
  9486. VmaAllocator hAllocator,
  9487. VmaPool hParentPool,
  9488. uint32_t memoryTypeIndex,
  9489. VkDeviceSize preferredBlockSize,
  9490. size_t minBlockCount,
  9491. size_t maxBlockCount,
  9492. VkDeviceSize bufferImageGranularity,
  9493. bool explicitBlockSize,
  9494. uint32_t algorithm,
  9495. float priority,
  9496. VkDeviceSize minAllocationAlignment,
  9497. void* pMemoryAllocateNext)
  9498. : m_hAllocator(hAllocator),
  9499. m_hParentPool(hParentPool),
  9500. m_MemoryTypeIndex(memoryTypeIndex),
  9501. m_PreferredBlockSize(preferredBlockSize),
  9502. m_MinBlockCount(minBlockCount),
  9503. m_MaxBlockCount(maxBlockCount),
  9504. m_BufferImageGranularity(bufferImageGranularity),
  9505. m_ExplicitBlockSize(explicitBlockSize),
  9506. m_Algorithm(algorithm),
  9507. m_Priority(priority),
  9508. m_MinAllocationAlignment(minAllocationAlignment),
  9509. m_pMemoryAllocateNext(pMemoryAllocateNext),
  9510. m_Blocks(VmaStlAllocator<VmaDeviceMemoryBlock*>(hAllocator->GetAllocationCallbacks())),
  9511. m_NextBlockId(0) {}
  9512. VmaBlockVector::~VmaBlockVector()
  9513. {
  9514. for (size_t i = m_Blocks.size(); i--; )
  9515. {
  9516. m_Blocks[i]->Destroy(m_hAllocator);
  9517. vma_delete(m_hAllocator, m_Blocks[i]);
  9518. }
  9519. }
  9520. VkResult VmaBlockVector::CreateMinBlocks()
  9521. {
  9522. for (size_t i = 0; i < m_MinBlockCount; ++i)
  9523. {
  9524. VkResult res = CreateBlock(m_PreferredBlockSize, VMA_NULL);
  9525. if (res != VK_SUCCESS)
  9526. {
  9527. return res;
  9528. }
  9529. }
  9530. return VK_SUCCESS;
  9531. }
  9532. void VmaBlockVector::AddStatistics(VmaStatistics& inoutStats)
  9533. {
  9534. VmaMutexLockRead lock(m_Mutex, m_hAllocator->m_UseMutex);
  9535. const size_t blockCount = m_Blocks.size();
  9536. for (uint32_t blockIndex = 0; blockIndex < blockCount; ++blockIndex)
  9537. {
  9538. const VmaDeviceMemoryBlock* const pBlock = m_Blocks[blockIndex];
  9539. VMA_ASSERT(pBlock);
  9540. VMA_HEAVY_ASSERT(pBlock->Validate());
  9541. pBlock->m_pMetadata->AddStatistics(inoutStats);
  9542. }
  9543. }
  9544. void VmaBlockVector::AddDetailedStatistics(VmaDetailedStatistics& inoutStats)
  9545. {
  9546. VmaMutexLockRead lock(m_Mutex, m_hAllocator->m_UseMutex);
  9547. const size_t blockCount = m_Blocks.size();
  9548. for (uint32_t blockIndex = 0; blockIndex < blockCount; ++blockIndex)
  9549. {
  9550. const VmaDeviceMemoryBlock* const pBlock = m_Blocks[blockIndex];
  9551. VMA_ASSERT(pBlock);
  9552. VMA_HEAVY_ASSERT(pBlock->Validate());
  9553. pBlock->m_pMetadata->AddDetailedStatistics(inoutStats);
  9554. }
  9555. }
  9556. bool VmaBlockVector::IsEmpty()
  9557. {
  9558. VmaMutexLockRead lock(m_Mutex, m_hAllocator->m_UseMutex);
  9559. return m_Blocks.empty();
  9560. }
  9561. bool VmaBlockVector::IsCorruptionDetectionEnabled() const
  9562. {
  9563. const uint32_t requiredMemFlags = VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT | VK_MEMORY_PROPERTY_HOST_COHERENT_BIT;
  9564. return (VMA_DEBUG_DETECT_CORRUPTION != 0) &&
  9565. (VMA_DEBUG_MARGIN > 0) &&
  9566. (m_Algorithm == 0 || m_Algorithm == VMA_POOL_CREATE_LINEAR_ALGORITHM_BIT) &&
  9567. (m_hAllocator->m_MemProps.memoryTypes[m_MemoryTypeIndex].propertyFlags & requiredMemFlags) == requiredMemFlags;
  9568. }
  9569. VkResult VmaBlockVector::Allocate(
  9570. VkDeviceSize size,
  9571. VkDeviceSize alignment,
  9572. const VmaAllocationCreateInfo& createInfo,
  9573. VmaSuballocationType suballocType,
  9574. size_t allocationCount,
  9575. VmaAllocation* pAllocations)
  9576. {
  9577. size_t allocIndex;
  9578. VkResult res = VK_SUCCESS;
  9579. alignment = VMA_MAX(alignment, m_MinAllocationAlignment);
  9580. if (IsCorruptionDetectionEnabled())
  9581. {
  9582. size = VmaAlignUp<VkDeviceSize>(size, sizeof(VMA_CORRUPTION_DETECTION_MAGIC_VALUE));
  9583. alignment = VmaAlignUp<VkDeviceSize>(alignment, sizeof(VMA_CORRUPTION_DETECTION_MAGIC_VALUE));
  9584. }
  9585. {
  9586. VmaMutexLockWrite lock(m_Mutex, m_hAllocator->m_UseMutex);
  9587. for (allocIndex = 0; allocIndex < allocationCount; ++allocIndex)
  9588. {
  9589. res = AllocatePage(
  9590. size,
  9591. alignment,
  9592. createInfo,
  9593. suballocType,
  9594. pAllocations + allocIndex);
  9595. if (res != VK_SUCCESS)
  9596. {
  9597. break;
  9598. }
  9599. }
  9600. }
  9601. if (res != VK_SUCCESS)
  9602. {
  9603. // Free all already created allocations.
  9604. while (allocIndex--)
  9605. Free(pAllocations[allocIndex]);
  9606. memset(pAllocations, 0, sizeof(VmaAllocation) * allocationCount);
  9607. }
  9608. return res;
  9609. }
  9610. VkResult VmaBlockVector::AllocatePage(
  9611. VkDeviceSize size,
  9612. VkDeviceSize alignment,
  9613. const VmaAllocationCreateInfo& createInfo,
  9614. VmaSuballocationType suballocType,
  9615. VmaAllocation* pAllocation)
  9616. {
  9617. const bool isUpperAddress = (createInfo.flags & VMA_ALLOCATION_CREATE_UPPER_ADDRESS_BIT) != 0;
  9618. VkDeviceSize freeMemory;
  9619. {
  9620. const uint32_t heapIndex = m_hAllocator->MemoryTypeIndexToHeapIndex(m_MemoryTypeIndex);
  9621. VmaBudget heapBudget = {};
  9622. m_hAllocator->GetHeapBudgets(&heapBudget, heapIndex, 1);
  9623. freeMemory = (heapBudget.usage < heapBudget.budget) ? (heapBudget.budget - heapBudget.usage) : 0;
  9624. }
  9625. const bool canFallbackToDedicated = !HasExplicitBlockSize() &&
  9626. (createInfo.flags & VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT) == 0;
  9627. const bool canCreateNewBlock =
  9628. ((createInfo.flags & VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT) == 0) &&
  9629. (m_Blocks.size() < m_MaxBlockCount) &&
  9630. (freeMemory >= size || !canFallbackToDedicated);
  9631. uint32_t strategy = createInfo.flags & VMA_ALLOCATION_CREATE_STRATEGY_MASK;
  9632. // Upper address can only be used with linear allocator and within single memory block.
  9633. if (isUpperAddress &&
  9634. (m_Algorithm != VMA_POOL_CREATE_LINEAR_ALGORITHM_BIT || m_MaxBlockCount > 1))
  9635. {
  9636. return VK_ERROR_FEATURE_NOT_PRESENT;
  9637. }
  9638. // Early reject: requested allocation size is larger that maximum block size for this block vector.
  9639. if (size + VMA_DEBUG_MARGIN > m_PreferredBlockSize)
  9640. {
  9641. return VK_ERROR_OUT_OF_DEVICE_MEMORY;
  9642. }
  9643. // 1. Search existing allocations. Try to allocate.
  9644. if (m_Algorithm == VMA_POOL_CREATE_LINEAR_ALGORITHM_BIT)
  9645. {
  9646. // Use only last block.
  9647. if (!m_Blocks.empty())
  9648. {
  9649. VmaDeviceMemoryBlock* const pCurrBlock = m_Blocks.back();
  9650. VMA_ASSERT(pCurrBlock);
  9651. VkResult res = AllocateFromBlock(
  9652. pCurrBlock, size, alignment, createInfo.flags, createInfo.pUserData, suballocType, strategy, pAllocation);
  9653. if (res == VK_SUCCESS)
  9654. {
  9655. VMA_DEBUG_LOG_FORMAT(" Returned from last block #%" PRIu32, pCurrBlock->GetId());
  9656. IncrementallySortBlocks();
  9657. return VK_SUCCESS;
  9658. }
  9659. }
  9660. }
  9661. else
  9662. {
  9663. if (strategy != VMA_ALLOCATION_CREATE_STRATEGY_MIN_TIME_BIT) // MIN_MEMORY or default
  9664. {
  9665. const bool isHostVisible =
  9666. (m_hAllocator->m_MemProps.memoryTypes[m_MemoryTypeIndex].propertyFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) != 0;
  9667. if(isHostVisible)
  9668. {
  9669. const bool isMappingAllowed = (createInfo.flags &
  9670. (VMA_ALLOCATION_CREATE_HOST_ACCESS_SEQUENTIAL_WRITE_BIT | VMA_ALLOCATION_CREATE_HOST_ACCESS_RANDOM_BIT)) != 0;
  9671. /*
  9672. For non-mappable allocations, check blocks that are not mapped first.
  9673. For mappable allocations, check blocks that are already mapped first.
  9674. This way, having many blocks, we will separate mappable and non-mappable allocations,
  9675. hopefully limiting the number of blocks that are mapped, which will help tools like RenderDoc.
  9676. */
  9677. for(size_t mappingI = 0; mappingI < 2; ++mappingI)
  9678. {
  9679. // Forward order in m_Blocks - prefer blocks with smallest amount of free space.
  9680. for (size_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex)
  9681. {
  9682. VmaDeviceMemoryBlock* const pCurrBlock = m_Blocks[blockIndex];
  9683. VMA_ASSERT(pCurrBlock);
  9684. const bool isBlockMapped = pCurrBlock->GetMappedData() != VMA_NULL;
  9685. if((mappingI == 0) == (isMappingAllowed == isBlockMapped))
  9686. {
  9687. VkResult res = AllocateFromBlock(
  9688. pCurrBlock, size, alignment, createInfo.flags, createInfo.pUserData, suballocType, strategy, pAllocation);
  9689. if (res == VK_SUCCESS)
  9690. {
  9691. VMA_DEBUG_LOG_FORMAT(" Returned from existing block #%" PRIu32, pCurrBlock->GetId());
  9692. IncrementallySortBlocks();
  9693. return VK_SUCCESS;
  9694. }
  9695. }
  9696. }
  9697. }
  9698. }
  9699. else
  9700. {
  9701. // Forward order in m_Blocks - prefer blocks with smallest amount of free space.
  9702. for (size_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex)
  9703. {
  9704. VmaDeviceMemoryBlock* const pCurrBlock = m_Blocks[blockIndex];
  9705. VMA_ASSERT(pCurrBlock);
  9706. VkResult res = AllocateFromBlock(
  9707. pCurrBlock, size, alignment, createInfo.flags, createInfo.pUserData, suballocType, strategy, pAllocation);
  9708. if (res == VK_SUCCESS)
  9709. {
  9710. VMA_DEBUG_LOG_FORMAT(" Returned from existing block #%" PRIu32, pCurrBlock->GetId());
  9711. IncrementallySortBlocks();
  9712. return VK_SUCCESS;
  9713. }
  9714. }
  9715. }
  9716. }
  9717. else // VMA_ALLOCATION_CREATE_STRATEGY_MIN_TIME_BIT
  9718. {
  9719. // Backward order in m_Blocks - prefer blocks with largest amount of free space.
  9720. for (size_t blockIndex = m_Blocks.size(); blockIndex--; )
  9721. {
  9722. VmaDeviceMemoryBlock* const pCurrBlock = m_Blocks[blockIndex];
  9723. VMA_ASSERT(pCurrBlock);
  9724. VkResult res = AllocateFromBlock(pCurrBlock, size, alignment, createInfo.flags, createInfo.pUserData, suballocType, strategy, pAllocation);
  9725. if (res == VK_SUCCESS)
  9726. {
  9727. VMA_DEBUG_LOG_FORMAT(" Returned from existing block #%" PRIu32, pCurrBlock->GetId());
  9728. IncrementallySortBlocks();
  9729. return VK_SUCCESS;
  9730. }
  9731. }
  9732. }
  9733. }
  9734. // 2. Try to create new block.
  9735. if (canCreateNewBlock)
  9736. {
  9737. // Calculate optimal size for new block.
  9738. VkDeviceSize newBlockSize = m_PreferredBlockSize;
  9739. uint32_t newBlockSizeShift = 0;
  9740. const uint32_t NEW_BLOCK_SIZE_SHIFT_MAX = 3;
  9741. if (!m_ExplicitBlockSize)
  9742. {
  9743. // Allocate 1/8, 1/4, 1/2 as first blocks.
  9744. const VkDeviceSize maxExistingBlockSize = CalcMaxBlockSize();
  9745. for (uint32_t i = 0; i < NEW_BLOCK_SIZE_SHIFT_MAX; ++i)
  9746. {
  9747. const VkDeviceSize smallerNewBlockSize = newBlockSize / 2;
  9748. if (smallerNewBlockSize > maxExistingBlockSize && smallerNewBlockSize >= size * 2)
  9749. {
  9750. newBlockSize = smallerNewBlockSize;
  9751. ++newBlockSizeShift;
  9752. }
  9753. else
  9754. {
  9755. break;
  9756. }
  9757. }
  9758. }
  9759. size_t newBlockIndex = 0;
  9760. VkResult res = (newBlockSize <= freeMemory || !canFallbackToDedicated) ?
  9761. CreateBlock(newBlockSize, &newBlockIndex) : VK_ERROR_OUT_OF_DEVICE_MEMORY;
  9762. // Allocation of this size failed? Try 1/2, 1/4, 1/8 of m_PreferredBlockSize.
  9763. if (!m_ExplicitBlockSize)
  9764. {
  9765. while (res < 0 && newBlockSizeShift < NEW_BLOCK_SIZE_SHIFT_MAX)
  9766. {
  9767. const VkDeviceSize smallerNewBlockSize = newBlockSize / 2;
  9768. if (smallerNewBlockSize >= size)
  9769. {
  9770. newBlockSize = smallerNewBlockSize;
  9771. ++newBlockSizeShift;
  9772. res = (newBlockSize <= freeMemory || !canFallbackToDedicated) ?
  9773. CreateBlock(newBlockSize, &newBlockIndex) : VK_ERROR_OUT_OF_DEVICE_MEMORY;
  9774. }
  9775. else
  9776. {
  9777. break;
  9778. }
  9779. }
  9780. }
  9781. if (res == VK_SUCCESS)
  9782. {
  9783. VmaDeviceMemoryBlock* const pBlock = m_Blocks[newBlockIndex];
  9784. VMA_ASSERT(pBlock->m_pMetadata->GetSize() >= size);
  9785. res = AllocateFromBlock(
  9786. pBlock, size, alignment, createInfo.flags, createInfo.pUserData, suballocType, strategy, pAllocation);
  9787. if (res == VK_SUCCESS)
  9788. {
  9789. VMA_DEBUG_LOG_FORMAT(" Created new block #%" PRIu32 " Size=%" PRIu64, pBlock->GetId(), newBlockSize);
  9790. IncrementallySortBlocks();
  9791. return VK_SUCCESS;
  9792. }
  9793. else
  9794. {
  9795. // Allocation from new block failed, possibly due to VMA_DEBUG_MARGIN or alignment.
  9796. return VK_ERROR_OUT_OF_DEVICE_MEMORY;
  9797. }
  9798. }
  9799. }
  9800. return VK_ERROR_OUT_OF_DEVICE_MEMORY;
  9801. }
  9802. void VmaBlockVector::Free(const VmaAllocation hAllocation)
  9803. {
  9804. VmaDeviceMemoryBlock* pBlockToDelete = VMA_NULL;
  9805. bool budgetExceeded = false;
  9806. {
  9807. const uint32_t heapIndex = m_hAllocator->MemoryTypeIndexToHeapIndex(m_MemoryTypeIndex);
  9808. VmaBudget heapBudget = {};
  9809. m_hAllocator->GetHeapBudgets(&heapBudget, heapIndex, 1);
  9810. budgetExceeded = heapBudget.usage >= heapBudget.budget;
  9811. }
  9812. // Scope for lock.
  9813. {
  9814. VmaMutexLockWrite lock(m_Mutex, m_hAllocator->m_UseMutex);
  9815. VmaDeviceMemoryBlock* pBlock = hAllocation->GetBlock();
  9816. if (IsCorruptionDetectionEnabled())
  9817. {
  9818. VkResult res = pBlock->ValidateMagicValueAfterAllocation(m_hAllocator, hAllocation->GetOffset(), hAllocation->GetSize());
  9819. VMA_ASSERT(res == VK_SUCCESS && "Couldn't map block memory to validate magic value.");
  9820. }
  9821. if (hAllocation->IsPersistentMap())
  9822. {
  9823. pBlock->Unmap(m_hAllocator, 1);
  9824. }
  9825. const bool hadEmptyBlockBeforeFree = HasEmptyBlock();
  9826. pBlock->m_pMetadata->Free(hAllocation->GetAllocHandle());
  9827. pBlock->PostFree(m_hAllocator);
  9828. VMA_HEAVY_ASSERT(pBlock->Validate());
  9829. VMA_DEBUG_LOG_FORMAT(" Freed from MemoryTypeIndex=%" PRIu32, m_MemoryTypeIndex);
  9830. const bool canDeleteBlock = m_Blocks.size() > m_MinBlockCount;
  9831. // pBlock became empty after this deallocation.
  9832. if (pBlock->m_pMetadata->IsEmpty())
  9833. {
  9834. // Already had empty block. We don't want to have two, so delete this one.
  9835. if ((hadEmptyBlockBeforeFree || budgetExceeded) && canDeleteBlock)
  9836. {
  9837. pBlockToDelete = pBlock;
  9838. Remove(pBlock);
  9839. }
  9840. // else: We now have one empty block - leave it. A hysteresis to avoid allocating whole block back and forth.
  9841. }
  9842. // pBlock didn't become empty, but we have another empty block - find and free that one.
  9843. // (This is optional, heuristics.)
  9844. else if (hadEmptyBlockBeforeFree && canDeleteBlock)
  9845. {
  9846. VmaDeviceMemoryBlock* pLastBlock = m_Blocks.back();
  9847. if (pLastBlock->m_pMetadata->IsEmpty())
  9848. {
  9849. pBlockToDelete = pLastBlock;
  9850. m_Blocks.pop_back();
  9851. }
  9852. }
  9853. IncrementallySortBlocks();
  9854. }
  9855. // Destruction of a free block. Deferred until this point, outside of mutex
  9856. // lock, for performance reason.
  9857. if (pBlockToDelete != VMA_NULL)
  9858. {
  9859. VMA_DEBUG_LOG_FORMAT(" Deleted empty block #%" PRIu32, pBlockToDelete->GetId());
  9860. pBlockToDelete->Destroy(m_hAllocator);
  9861. vma_delete(m_hAllocator, pBlockToDelete);
  9862. }
  9863. m_hAllocator->m_Budget.RemoveAllocation(m_hAllocator->MemoryTypeIndexToHeapIndex(m_MemoryTypeIndex), hAllocation->GetSize());
  9864. m_hAllocator->m_AllocationObjectAllocator.Free(hAllocation);
  9865. }
  9866. VkDeviceSize VmaBlockVector::CalcMaxBlockSize() const
  9867. {
  9868. VkDeviceSize result = 0;
  9869. for (size_t i = m_Blocks.size(); i--; )
  9870. {
  9871. result = VMA_MAX(result, m_Blocks[i]->m_pMetadata->GetSize());
  9872. if (result >= m_PreferredBlockSize)
  9873. {
  9874. break;
  9875. }
  9876. }
  9877. return result;
  9878. }
  9879. void VmaBlockVector::Remove(VmaDeviceMemoryBlock* pBlock)
  9880. {
  9881. for (uint32_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex)
  9882. {
  9883. if (m_Blocks[blockIndex] == pBlock)
  9884. {
  9885. VmaVectorRemove(m_Blocks, blockIndex);
  9886. return;
  9887. }
  9888. }
  9889. VMA_ASSERT(0);
  9890. }
  9891. void VmaBlockVector::IncrementallySortBlocks()
  9892. {
  9893. if (!m_IncrementalSort)
  9894. return;
  9895. if (m_Algorithm != VMA_POOL_CREATE_LINEAR_ALGORITHM_BIT)
  9896. {
  9897. // Bubble sort only until first swap.
  9898. for (size_t i = 1; i < m_Blocks.size(); ++i)
  9899. {
  9900. if (m_Blocks[i - 1]->m_pMetadata->GetSumFreeSize() > m_Blocks[i]->m_pMetadata->GetSumFreeSize())
  9901. {
  9902. std::swap(m_Blocks[i - 1], m_Blocks[i]);
  9903. return;
  9904. }
  9905. }
  9906. }
  9907. }
  9908. void VmaBlockVector::SortByFreeSize()
  9909. {
  9910. VMA_SORT(m_Blocks.begin(), m_Blocks.end(),
  9911. [](VmaDeviceMemoryBlock* b1, VmaDeviceMemoryBlock* b2) -> bool
  9912. {
  9913. return b1->m_pMetadata->GetSumFreeSize() < b2->m_pMetadata->GetSumFreeSize();
  9914. });
  9915. }
  9916. VkResult VmaBlockVector::AllocateFromBlock(
  9917. VmaDeviceMemoryBlock* pBlock,
  9918. VkDeviceSize size,
  9919. VkDeviceSize alignment,
  9920. VmaAllocationCreateFlags allocFlags,
  9921. void* pUserData,
  9922. VmaSuballocationType suballocType,
  9923. uint32_t strategy,
  9924. VmaAllocation* pAllocation)
  9925. {
  9926. const bool isUpperAddress = (allocFlags & VMA_ALLOCATION_CREATE_UPPER_ADDRESS_BIT) != 0;
  9927. VmaAllocationRequest currRequest = {};
  9928. if (pBlock->m_pMetadata->CreateAllocationRequest(
  9929. size,
  9930. alignment,
  9931. isUpperAddress,
  9932. suballocType,
  9933. strategy,
  9934. &currRequest))
  9935. {
  9936. return CommitAllocationRequest(currRequest, pBlock, alignment, allocFlags, pUserData, suballocType, pAllocation);
  9937. }
  9938. return VK_ERROR_OUT_OF_DEVICE_MEMORY;
  9939. }
  9940. VkResult VmaBlockVector::CommitAllocationRequest(
  9941. VmaAllocationRequest& allocRequest,
  9942. VmaDeviceMemoryBlock* pBlock,
  9943. VkDeviceSize alignment,
  9944. VmaAllocationCreateFlags allocFlags,
  9945. void* pUserData,
  9946. VmaSuballocationType suballocType,
  9947. VmaAllocation* pAllocation)
  9948. {
  9949. const bool mapped = (allocFlags & VMA_ALLOCATION_CREATE_MAPPED_BIT) != 0;
  9950. const bool isUserDataString = (allocFlags & VMA_ALLOCATION_CREATE_USER_DATA_COPY_STRING_BIT) != 0;
  9951. const bool isMappingAllowed = (allocFlags &
  9952. (VMA_ALLOCATION_CREATE_HOST_ACCESS_SEQUENTIAL_WRITE_BIT | VMA_ALLOCATION_CREATE_HOST_ACCESS_RANDOM_BIT)) != 0;
  9953. pBlock->PostAlloc(m_hAllocator);
  9954. // Allocate from pCurrBlock.
  9955. if (mapped)
  9956. {
  9957. VkResult res = pBlock->Map(m_hAllocator, 1, VMA_NULL);
  9958. if (res != VK_SUCCESS)
  9959. {
  9960. return res;
  9961. }
  9962. }
  9963. *pAllocation = m_hAllocator->m_AllocationObjectAllocator.Allocate(isMappingAllowed);
  9964. pBlock->m_pMetadata->Alloc(allocRequest, suballocType, *pAllocation);
  9965. (*pAllocation)->InitBlockAllocation(
  9966. pBlock,
  9967. allocRequest.allocHandle,
  9968. alignment,
  9969. allocRequest.size, // Not size, as actual allocation size may be larger than requested!
  9970. m_MemoryTypeIndex,
  9971. suballocType,
  9972. mapped);
  9973. VMA_HEAVY_ASSERT(pBlock->Validate());
  9974. if (isUserDataString)
  9975. (*pAllocation)->SetName(m_hAllocator, (const char*)pUserData);
  9976. else
  9977. (*pAllocation)->SetUserData(m_hAllocator, pUserData);
  9978. m_hAllocator->m_Budget.AddAllocation(m_hAllocator->MemoryTypeIndexToHeapIndex(m_MemoryTypeIndex), allocRequest.size);
  9979. if (VMA_DEBUG_INITIALIZE_ALLOCATIONS)
  9980. {
  9981. m_hAllocator->FillAllocation(*pAllocation, VMA_ALLOCATION_FILL_PATTERN_CREATED);
  9982. }
  9983. if (IsCorruptionDetectionEnabled())
  9984. {
  9985. VkResult res = pBlock->WriteMagicValueAfterAllocation(m_hAllocator, (*pAllocation)->GetOffset(), allocRequest.size);
  9986. VMA_ASSERT(res == VK_SUCCESS && "Couldn't map block memory to write magic value.");
  9987. }
  9988. return VK_SUCCESS;
  9989. }
  9990. VkResult VmaBlockVector::CreateBlock(VkDeviceSize blockSize, size_t* pNewBlockIndex)
  9991. {
  9992. VkMemoryAllocateInfo allocInfo = { VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO };
  9993. allocInfo.pNext = m_pMemoryAllocateNext;
  9994. allocInfo.memoryTypeIndex = m_MemoryTypeIndex;
  9995. allocInfo.allocationSize = blockSize;
  9996. #if VMA_BUFFER_DEVICE_ADDRESS
  9997. // Every standalone block can potentially contain a buffer with VK_BUFFER_USAGE_SHADER_DEVICE_ADDRESS_BIT - always enable the feature.
  9998. VkMemoryAllocateFlagsInfoKHR allocFlagsInfo = { VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_FLAGS_INFO_KHR };
  9999. if (m_hAllocator->m_UseKhrBufferDeviceAddress)
  10000. {
  10001. allocFlagsInfo.flags = VK_MEMORY_ALLOCATE_DEVICE_ADDRESS_BIT_KHR;
  10002. VmaPnextChainPushFront(&allocInfo, &allocFlagsInfo);
  10003. }
  10004. #endif // VMA_BUFFER_DEVICE_ADDRESS
  10005. #if VMA_MEMORY_PRIORITY
  10006. VkMemoryPriorityAllocateInfoEXT priorityInfo = { VK_STRUCTURE_TYPE_MEMORY_PRIORITY_ALLOCATE_INFO_EXT };
  10007. if (m_hAllocator->m_UseExtMemoryPriority)
  10008. {
  10009. VMA_ASSERT(m_Priority >= 0.f && m_Priority <= 1.f);
  10010. priorityInfo.priority = m_Priority;
  10011. VmaPnextChainPushFront(&allocInfo, &priorityInfo);
  10012. }
  10013. #endif // VMA_MEMORY_PRIORITY
  10014. #if VMA_EXTERNAL_MEMORY
  10015. // Attach VkExportMemoryAllocateInfoKHR if necessary.
  10016. VkExportMemoryAllocateInfoKHR exportMemoryAllocInfo = { VK_STRUCTURE_TYPE_EXPORT_MEMORY_ALLOCATE_INFO_KHR };
  10017. exportMemoryAllocInfo.handleTypes = m_hAllocator->GetExternalMemoryHandleTypeFlags(m_MemoryTypeIndex);
  10018. if (exportMemoryAllocInfo.handleTypes != 0)
  10019. {
  10020. VmaPnextChainPushFront(&allocInfo, &exportMemoryAllocInfo);
  10021. }
  10022. #endif // VMA_EXTERNAL_MEMORY
  10023. VkDeviceMemory mem = VK_NULL_HANDLE;
  10024. VkResult res = m_hAllocator->AllocateVulkanMemory(&allocInfo, &mem);
  10025. if (res < 0)
  10026. {
  10027. return res;
  10028. }
  10029. // New VkDeviceMemory successfully created.
  10030. // Create new Allocation for it.
  10031. VmaDeviceMemoryBlock* const pBlock = vma_new(m_hAllocator, VmaDeviceMemoryBlock)(m_hAllocator);
  10032. pBlock->Init(
  10033. m_hAllocator,
  10034. m_hParentPool,
  10035. m_MemoryTypeIndex,
  10036. mem,
  10037. allocInfo.allocationSize,
  10038. m_NextBlockId++,
  10039. m_Algorithm,
  10040. m_BufferImageGranularity);
  10041. m_Blocks.push_back(pBlock);
  10042. if (pNewBlockIndex != VMA_NULL)
  10043. {
  10044. *pNewBlockIndex = m_Blocks.size() - 1;
  10045. }
  10046. return VK_SUCCESS;
  10047. }
  10048. bool VmaBlockVector::HasEmptyBlock()
  10049. {
  10050. for (size_t index = 0, count = m_Blocks.size(); index < count; ++index)
  10051. {
  10052. VmaDeviceMemoryBlock* const pBlock = m_Blocks[index];
  10053. if (pBlock->m_pMetadata->IsEmpty())
  10054. {
  10055. return true;
  10056. }
  10057. }
  10058. return false;
  10059. }
  10060. #if VMA_STATS_STRING_ENABLED
  10061. void VmaBlockVector::PrintDetailedMap(class VmaJsonWriter& json)
  10062. {
  10063. VmaMutexLockRead lock(m_Mutex, m_hAllocator->m_UseMutex);
  10064. json.BeginObject();
  10065. for (size_t i = 0; i < m_Blocks.size(); ++i)
  10066. {
  10067. json.BeginString();
  10068. json.ContinueString(m_Blocks[i]->GetId());
  10069. json.EndString();
  10070. json.BeginObject();
  10071. json.WriteString("MapRefCount");
  10072. json.WriteNumber(m_Blocks[i]->GetMapRefCount());
  10073. m_Blocks[i]->m_pMetadata->PrintDetailedMap(json);
  10074. json.EndObject();
  10075. }
  10076. json.EndObject();
  10077. }
  10078. #endif // VMA_STATS_STRING_ENABLED
  10079. VkResult VmaBlockVector::CheckCorruption()
  10080. {
  10081. if (!IsCorruptionDetectionEnabled())
  10082. {
  10083. return VK_ERROR_FEATURE_NOT_PRESENT;
  10084. }
  10085. VmaMutexLockRead lock(m_Mutex, m_hAllocator->m_UseMutex);
  10086. for (uint32_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex)
  10087. {
  10088. VmaDeviceMemoryBlock* const pBlock = m_Blocks[blockIndex];
  10089. VMA_ASSERT(pBlock);
  10090. VkResult res = pBlock->CheckCorruption(m_hAllocator);
  10091. if (res != VK_SUCCESS)
  10092. {
  10093. return res;
  10094. }
  10095. }
  10096. return VK_SUCCESS;
  10097. }
  10098. #endif // _VMA_BLOCK_VECTOR_FUNCTIONS
  10099. #ifndef _VMA_DEFRAGMENTATION_CONTEXT_FUNCTIONS
  10100. VmaDefragmentationContext_T::VmaDefragmentationContext_T(
  10101. VmaAllocator hAllocator,
  10102. const VmaDefragmentationInfo& info)
  10103. : m_MaxPassBytes(info.maxBytesPerPass == 0 ? VK_WHOLE_SIZE : info.maxBytesPerPass),
  10104. m_MaxPassAllocations(info.maxAllocationsPerPass == 0 ? UINT32_MAX : info.maxAllocationsPerPass),
  10105. m_BreakCallback(info.pfnBreakCallback),
  10106. m_BreakCallbackUserData(info.pBreakCallbackUserData),
  10107. m_MoveAllocator(hAllocator->GetAllocationCallbacks()),
  10108. m_Moves(m_MoveAllocator)
  10109. {
  10110. m_Algorithm = info.flags & VMA_DEFRAGMENTATION_FLAG_ALGORITHM_MASK;
  10111. if (info.pool != VMA_NULL)
  10112. {
  10113. m_BlockVectorCount = 1;
  10114. m_PoolBlockVector = &info.pool->m_BlockVector;
  10115. m_pBlockVectors = &m_PoolBlockVector;
  10116. m_PoolBlockVector->SetIncrementalSort(false);
  10117. m_PoolBlockVector->SortByFreeSize();
  10118. }
  10119. else
  10120. {
  10121. m_BlockVectorCount = hAllocator->GetMemoryTypeCount();
  10122. m_PoolBlockVector = VMA_NULL;
  10123. m_pBlockVectors = hAllocator->m_pBlockVectors;
  10124. for (uint32_t i = 0; i < m_BlockVectorCount; ++i)
  10125. {
  10126. VmaBlockVector* vector = m_pBlockVectors[i];
  10127. if (vector != VMA_NULL)
  10128. {
  10129. vector->SetIncrementalSort(false);
  10130. vector->SortByFreeSize();
  10131. }
  10132. }
  10133. }
  10134. switch (m_Algorithm)
  10135. {
  10136. case 0: // Default algorithm
  10137. m_Algorithm = VMA_DEFRAGMENTATION_FLAG_ALGORITHM_BALANCED_BIT;
  10138. m_AlgorithmState = vma_new_array(hAllocator, StateBalanced, m_BlockVectorCount);
  10139. break;
  10140. case VMA_DEFRAGMENTATION_FLAG_ALGORITHM_BALANCED_BIT:
  10141. m_AlgorithmState = vma_new_array(hAllocator, StateBalanced, m_BlockVectorCount);
  10142. break;
  10143. case VMA_DEFRAGMENTATION_FLAG_ALGORITHM_EXTENSIVE_BIT:
  10144. if (hAllocator->GetBufferImageGranularity() > 1)
  10145. {
  10146. m_AlgorithmState = vma_new_array(hAllocator, StateExtensive, m_BlockVectorCount);
  10147. }
  10148. break;
  10149. }
  10150. }
  10151. VmaDefragmentationContext_T::~VmaDefragmentationContext_T()
  10152. {
  10153. if (m_PoolBlockVector != VMA_NULL)
  10154. {
  10155. m_PoolBlockVector->SetIncrementalSort(true);
  10156. }
  10157. else
  10158. {
  10159. for (uint32_t i = 0; i < m_BlockVectorCount; ++i)
  10160. {
  10161. VmaBlockVector* vector = m_pBlockVectors[i];
  10162. if (vector != VMA_NULL)
  10163. vector->SetIncrementalSort(true);
  10164. }
  10165. }
  10166. if (m_AlgorithmState)
  10167. {
  10168. switch (m_Algorithm)
  10169. {
  10170. case VMA_DEFRAGMENTATION_FLAG_ALGORITHM_BALANCED_BIT:
  10171. vma_delete_array(m_MoveAllocator.m_pCallbacks, reinterpret_cast<StateBalanced*>(m_AlgorithmState), m_BlockVectorCount);
  10172. break;
  10173. case VMA_DEFRAGMENTATION_FLAG_ALGORITHM_EXTENSIVE_BIT:
  10174. vma_delete_array(m_MoveAllocator.m_pCallbacks, reinterpret_cast<StateExtensive*>(m_AlgorithmState), m_BlockVectorCount);
  10175. break;
  10176. default:
  10177. VMA_ASSERT(0);
  10178. }
  10179. }
  10180. }
  10181. VkResult VmaDefragmentationContext_T::DefragmentPassBegin(VmaDefragmentationPassMoveInfo& moveInfo)
  10182. {
  10183. if (m_PoolBlockVector != VMA_NULL)
  10184. {
  10185. VmaMutexLockWrite lock(m_PoolBlockVector->GetMutex(), m_PoolBlockVector->GetAllocator()->m_UseMutex);
  10186. if (m_PoolBlockVector->GetBlockCount() > 1)
  10187. ComputeDefragmentation(*m_PoolBlockVector, 0);
  10188. else if (m_PoolBlockVector->GetBlockCount() == 1)
  10189. ReallocWithinBlock(*m_PoolBlockVector, m_PoolBlockVector->GetBlock(0));
  10190. }
  10191. else
  10192. {
  10193. for (uint32_t i = 0; i < m_BlockVectorCount; ++i)
  10194. {
  10195. if (m_pBlockVectors[i] != VMA_NULL)
  10196. {
  10197. VmaMutexLockWrite lock(m_pBlockVectors[i]->GetMutex(), m_pBlockVectors[i]->GetAllocator()->m_UseMutex);
  10198. if (m_pBlockVectors[i]->GetBlockCount() > 1)
  10199. {
  10200. if (ComputeDefragmentation(*m_pBlockVectors[i], i))
  10201. break;
  10202. }
  10203. else if (m_pBlockVectors[i]->GetBlockCount() == 1)
  10204. {
  10205. if (ReallocWithinBlock(*m_pBlockVectors[i], m_pBlockVectors[i]->GetBlock(0)))
  10206. break;
  10207. }
  10208. }
  10209. }
  10210. }
  10211. moveInfo.moveCount = static_cast<uint32_t>(m_Moves.size());
  10212. if (moveInfo.moveCount > 0)
  10213. {
  10214. moveInfo.pMoves = m_Moves.data();
  10215. return VK_INCOMPLETE;
  10216. }
  10217. moveInfo.pMoves = VMA_NULL;
  10218. return VK_SUCCESS;
  10219. }
  10220. VkResult VmaDefragmentationContext_T::DefragmentPassEnd(VmaDefragmentationPassMoveInfo& moveInfo)
  10221. {
  10222. VMA_ASSERT(moveInfo.moveCount > 0 ? moveInfo.pMoves != VMA_NULL : true);
  10223. VkResult result = VK_SUCCESS;
  10224. VmaStlAllocator<FragmentedBlock> blockAllocator(m_MoveAllocator.m_pCallbacks);
  10225. VmaVector<FragmentedBlock, VmaStlAllocator<FragmentedBlock>> immovableBlocks(blockAllocator);
  10226. VmaVector<FragmentedBlock, VmaStlAllocator<FragmentedBlock>> mappedBlocks(blockAllocator);
  10227. VmaAllocator allocator = VMA_NULL;
  10228. for (uint32_t i = 0; i < moveInfo.moveCount; ++i)
  10229. {
  10230. VmaDefragmentationMove& move = moveInfo.pMoves[i];
  10231. size_t prevCount = 0, currentCount = 0;
  10232. VkDeviceSize freedBlockSize = 0;
  10233. uint32_t vectorIndex;
  10234. VmaBlockVector* vector;
  10235. if (m_PoolBlockVector != VMA_NULL)
  10236. {
  10237. vectorIndex = 0;
  10238. vector = m_PoolBlockVector;
  10239. }
  10240. else
  10241. {
  10242. vectorIndex = move.srcAllocation->GetMemoryTypeIndex();
  10243. vector = m_pBlockVectors[vectorIndex];
  10244. VMA_ASSERT(vector != VMA_NULL);
  10245. }
  10246. switch (move.operation)
  10247. {
  10248. case VMA_DEFRAGMENTATION_MOVE_OPERATION_COPY:
  10249. {
  10250. uint8_t mapCount = move.srcAllocation->SwapBlockAllocation(vector->m_hAllocator, move.dstTmpAllocation);
  10251. if (mapCount > 0)
  10252. {
  10253. allocator = vector->m_hAllocator;
  10254. VmaDeviceMemoryBlock* newMapBlock = move.srcAllocation->GetBlock();
  10255. bool notPresent = true;
  10256. for (FragmentedBlock& block : mappedBlocks)
  10257. {
  10258. if (block.block == newMapBlock)
  10259. {
  10260. notPresent = false;
  10261. block.data += mapCount;
  10262. break;
  10263. }
  10264. }
  10265. if (notPresent)
  10266. mappedBlocks.push_back({ mapCount, newMapBlock });
  10267. }
  10268. // Scope for locks, Free have it's own lock
  10269. {
  10270. VmaMutexLockRead lock(vector->GetMutex(), vector->GetAllocator()->m_UseMutex);
  10271. prevCount = vector->GetBlockCount();
  10272. freedBlockSize = move.dstTmpAllocation->GetBlock()->m_pMetadata->GetSize();
  10273. }
  10274. vector->Free(move.dstTmpAllocation);
  10275. {
  10276. VmaMutexLockRead lock(vector->GetMutex(), vector->GetAllocator()->m_UseMutex);
  10277. currentCount = vector->GetBlockCount();
  10278. }
  10279. result = VK_INCOMPLETE;
  10280. break;
  10281. }
  10282. case VMA_DEFRAGMENTATION_MOVE_OPERATION_IGNORE:
  10283. {
  10284. m_PassStats.bytesMoved -= move.srcAllocation->GetSize();
  10285. --m_PassStats.allocationsMoved;
  10286. vector->Free(move.dstTmpAllocation);
  10287. VmaDeviceMemoryBlock* newBlock = move.srcAllocation->GetBlock();
  10288. bool notPresent = true;
  10289. for (const FragmentedBlock& block : immovableBlocks)
  10290. {
  10291. if (block.block == newBlock)
  10292. {
  10293. notPresent = false;
  10294. break;
  10295. }
  10296. }
  10297. if (notPresent)
  10298. immovableBlocks.push_back({ vectorIndex, newBlock });
  10299. break;
  10300. }
  10301. case VMA_DEFRAGMENTATION_MOVE_OPERATION_DESTROY:
  10302. {
  10303. m_PassStats.bytesMoved -= move.srcAllocation->GetSize();
  10304. --m_PassStats.allocationsMoved;
  10305. // Scope for locks, Free have it's own lock
  10306. {
  10307. VmaMutexLockRead lock(vector->GetMutex(), vector->GetAllocator()->m_UseMutex);
  10308. prevCount = vector->GetBlockCount();
  10309. freedBlockSize = move.srcAllocation->GetBlock()->m_pMetadata->GetSize();
  10310. }
  10311. vector->Free(move.srcAllocation);
  10312. {
  10313. VmaMutexLockRead lock(vector->GetMutex(), vector->GetAllocator()->m_UseMutex);
  10314. currentCount = vector->GetBlockCount();
  10315. }
  10316. freedBlockSize *= prevCount - currentCount;
  10317. VkDeviceSize dstBlockSize;
  10318. {
  10319. VmaMutexLockRead lock(vector->GetMutex(), vector->GetAllocator()->m_UseMutex);
  10320. dstBlockSize = move.dstTmpAllocation->GetBlock()->m_pMetadata->GetSize();
  10321. }
  10322. vector->Free(move.dstTmpAllocation);
  10323. {
  10324. VmaMutexLockRead lock(vector->GetMutex(), vector->GetAllocator()->m_UseMutex);
  10325. freedBlockSize += dstBlockSize * (currentCount - vector->GetBlockCount());
  10326. currentCount = vector->GetBlockCount();
  10327. }
  10328. result = VK_INCOMPLETE;
  10329. break;
  10330. }
  10331. default:
  10332. VMA_ASSERT(0);
  10333. }
  10334. if (prevCount > currentCount)
  10335. {
  10336. size_t freedBlocks = prevCount - currentCount;
  10337. m_PassStats.deviceMemoryBlocksFreed += static_cast<uint32_t>(freedBlocks);
  10338. m_PassStats.bytesFreed += freedBlockSize;
  10339. }
  10340. if(m_Algorithm == VMA_DEFRAGMENTATION_FLAG_ALGORITHM_EXTENSIVE_BIT &&
  10341. m_AlgorithmState != VMA_NULL)
  10342. {
  10343. // Avoid unnecessary tries to allocate when new free block is available
  10344. StateExtensive& state = reinterpret_cast<StateExtensive*>(m_AlgorithmState)[vectorIndex];
  10345. if (state.firstFreeBlock != SIZE_MAX)
  10346. {
  10347. const size_t diff = prevCount - currentCount;
  10348. if (state.firstFreeBlock >= diff)
  10349. {
  10350. state.firstFreeBlock -= diff;
  10351. if (state.firstFreeBlock != 0)
  10352. state.firstFreeBlock -= vector->GetBlock(state.firstFreeBlock - 1)->m_pMetadata->IsEmpty();
  10353. }
  10354. else
  10355. state.firstFreeBlock = 0;
  10356. }
  10357. }
  10358. }
  10359. moveInfo.moveCount = 0;
  10360. moveInfo.pMoves = VMA_NULL;
  10361. m_Moves.clear();
  10362. // Update stats
  10363. m_GlobalStats.allocationsMoved += m_PassStats.allocationsMoved;
  10364. m_GlobalStats.bytesFreed += m_PassStats.bytesFreed;
  10365. m_GlobalStats.bytesMoved += m_PassStats.bytesMoved;
  10366. m_GlobalStats.deviceMemoryBlocksFreed += m_PassStats.deviceMemoryBlocksFreed;
  10367. m_PassStats = { 0 };
  10368. // Move blocks with immovable allocations according to algorithm
  10369. if (immovableBlocks.size() > 0)
  10370. {
  10371. do
  10372. {
  10373. if(m_Algorithm == VMA_DEFRAGMENTATION_FLAG_ALGORITHM_EXTENSIVE_BIT)
  10374. {
  10375. if (m_AlgorithmState != VMA_NULL)
  10376. {
  10377. bool swapped = false;
  10378. // Move to the start of free blocks range
  10379. for (const FragmentedBlock& block : immovableBlocks)
  10380. {
  10381. StateExtensive& state = reinterpret_cast<StateExtensive*>(m_AlgorithmState)[block.data];
  10382. if (state.operation != StateExtensive::Operation::Cleanup)
  10383. {
  10384. VmaBlockVector* vector = m_pBlockVectors[block.data];
  10385. VmaMutexLockWrite lock(vector->GetMutex(), vector->GetAllocator()->m_UseMutex);
  10386. for (size_t i = 0, count = vector->GetBlockCount() - m_ImmovableBlockCount; i < count; ++i)
  10387. {
  10388. if (vector->GetBlock(i) == block.block)
  10389. {
  10390. std::swap(vector->m_Blocks[i], vector->m_Blocks[vector->GetBlockCount() - ++m_ImmovableBlockCount]);
  10391. if (state.firstFreeBlock != SIZE_MAX)
  10392. {
  10393. if (i + 1 < state.firstFreeBlock)
  10394. {
  10395. if (state.firstFreeBlock > 1)
  10396. std::swap(vector->m_Blocks[i], vector->m_Blocks[--state.firstFreeBlock]);
  10397. else
  10398. --state.firstFreeBlock;
  10399. }
  10400. }
  10401. swapped = true;
  10402. break;
  10403. }
  10404. }
  10405. }
  10406. }
  10407. if (swapped)
  10408. result = VK_INCOMPLETE;
  10409. break;
  10410. }
  10411. }
  10412. // Move to the beginning
  10413. for (const FragmentedBlock& block : immovableBlocks)
  10414. {
  10415. VmaBlockVector* vector = m_pBlockVectors[block.data];
  10416. VmaMutexLockWrite lock(vector->GetMutex(), vector->GetAllocator()->m_UseMutex);
  10417. for (size_t i = m_ImmovableBlockCount; i < vector->GetBlockCount(); ++i)
  10418. {
  10419. if (vector->GetBlock(i) == block.block)
  10420. {
  10421. std::swap(vector->m_Blocks[i], vector->m_Blocks[m_ImmovableBlockCount++]);
  10422. break;
  10423. }
  10424. }
  10425. }
  10426. } while (false);
  10427. }
  10428. // Bulk-map destination blocks
  10429. for (const FragmentedBlock& block : mappedBlocks)
  10430. {
  10431. VkResult res = block.block->Map(allocator, block.data, VMA_NULL);
  10432. VMA_ASSERT(res == VK_SUCCESS);
  10433. }
  10434. return result;
  10435. }
  10436. bool VmaDefragmentationContext_T::ComputeDefragmentation(VmaBlockVector& vector, size_t index)
  10437. {
  10438. switch (m_Algorithm)
  10439. {
  10440. case VMA_DEFRAGMENTATION_FLAG_ALGORITHM_FAST_BIT:
  10441. return ComputeDefragmentation_Fast(vector);
  10442. case VMA_DEFRAGMENTATION_FLAG_ALGORITHM_BALANCED_BIT:
  10443. return ComputeDefragmentation_Balanced(vector, index, true);
  10444. case VMA_DEFRAGMENTATION_FLAG_ALGORITHM_FULL_BIT:
  10445. return ComputeDefragmentation_Full(vector);
  10446. case VMA_DEFRAGMENTATION_FLAG_ALGORITHM_EXTENSIVE_BIT:
  10447. return ComputeDefragmentation_Extensive(vector, index);
  10448. default:
  10449. VMA_ASSERT(0);
  10450. return ComputeDefragmentation_Balanced(vector, index, true);
  10451. }
  10452. }
  10453. VmaDefragmentationContext_T::MoveAllocationData VmaDefragmentationContext_T::GetMoveData(
  10454. VmaAllocHandle handle, VmaBlockMetadata* metadata)
  10455. {
  10456. MoveAllocationData moveData;
  10457. moveData.move.srcAllocation = (VmaAllocation)metadata->GetAllocationUserData(handle);
  10458. moveData.size = moveData.move.srcAllocation->GetSize();
  10459. moveData.alignment = moveData.move.srcAllocation->GetAlignment();
  10460. moveData.type = moveData.move.srcAllocation->GetSuballocationType();
  10461. moveData.flags = 0;
  10462. if (moveData.move.srcAllocation->IsPersistentMap())
  10463. moveData.flags |= VMA_ALLOCATION_CREATE_MAPPED_BIT;
  10464. if (moveData.move.srcAllocation->IsMappingAllowed())
  10465. moveData.flags |= VMA_ALLOCATION_CREATE_HOST_ACCESS_SEQUENTIAL_WRITE_BIT | VMA_ALLOCATION_CREATE_HOST_ACCESS_RANDOM_BIT;
  10466. return moveData;
  10467. }
  10468. VmaDefragmentationContext_T::CounterStatus VmaDefragmentationContext_T::CheckCounters(VkDeviceSize bytes)
  10469. {
  10470. // Check custom criteria if exists
  10471. if (m_BreakCallback && m_BreakCallback(m_BreakCallbackUserData))
  10472. return CounterStatus::End;
  10473. // Ignore allocation if will exceed max size for copy
  10474. if (m_PassStats.bytesMoved + bytes > m_MaxPassBytes)
  10475. {
  10476. if (++m_IgnoredAllocs < MAX_ALLOCS_TO_IGNORE)
  10477. return CounterStatus::Ignore;
  10478. else
  10479. return CounterStatus::End;
  10480. }
  10481. else
  10482. m_IgnoredAllocs = 0;
  10483. return CounterStatus::Pass;
  10484. }
  10485. bool VmaDefragmentationContext_T::IncrementCounters(VkDeviceSize bytes)
  10486. {
  10487. m_PassStats.bytesMoved += bytes;
  10488. // Early return when max found
  10489. if (++m_PassStats.allocationsMoved >= m_MaxPassAllocations || m_PassStats.bytesMoved >= m_MaxPassBytes)
  10490. {
  10491. VMA_ASSERT((m_PassStats.allocationsMoved == m_MaxPassAllocations ||
  10492. m_PassStats.bytesMoved == m_MaxPassBytes) && "Exceeded maximal pass threshold!");
  10493. return true;
  10494. }
  10495. return false;
  10496. }
  10497. bool VmaDefragmentationContext_T::ReallocWithinBlock(VmaBlockVector& vector, VmaDeviceMemoryBlock* block)
  10498. {
  10499. VmaBlockMetadata* metadata = block->m_pMetadata;
  10500. for (VmaAllocHandle handle = metadata->GetAllocationListBegin();
  10501. handle != VK_NULL_HANDLE;
  10502. handle = metadata->GetNextAllocation(handle))
  10503. {
  10504. MoveAllocationData moveData = GetMoveData(handle, metadata);
  10505. // Ignore newly created allocations by defragmentation algorithm
  10506. if (moveData.move.srcAllocation->GetUserData() == this)
  10507. continue;
  10508. switch (CheckCounters(moveData.move.srcAllocation->GetSize()))
  10509. {
  10510. case CounterStatus::Ignore:
  10511. continue;
  10512. case CounterStatus::End:
  10513. return true;
  10514. case CounterStatus::Pass:
  10515. break;
  10516. default:
  10517. VMA_ASSERT(0);
  10518. }
  10519. VkDeviceSize offset = moveData.move.srcAllocation->GetOffset();
  10520. if (offset != 0 && metadata->GetSumFreeSize() >= moveData.size)
  10521. {
  10522. VmaAllocationRequest request = {};
  10523. if (metadata->CreateAllocationRequest(
  10524. moveData.size,
  10525. moveData.alignment,
  10526. false,
  10527. moveData.type,
  10528. VMA_ALLOCATION_CREATE_STRATEGY_MIN_OFFSET_BIT,
  10529. &request))
  10530. {
  10531. if (metadata->GetAllocationOffset(request.allocHandle) < offset)
  10532. {
  10533. if (vector.CommitAllocationRequest(
  10534. request,
  10535. block,
  10536. moveData.alignment,
  10537. moveData.flags,
  10538. this,
  10539. moveData.type,
  10540. &moveData.move.dstTmpAllocation) == VK_SUCCESS)
  10541. {
  10542. m_Moves.push_back(moveData.move);
  10543. if (IncrementCounters(moveData.size))
  10544. return true;
  10545. }
  10546. }
  10547. }
  10548. }
  10549. }
  10550. return false;
  10551. }
  10552. bool VmaDefragmentationContext_T::AllocInOtherBlock(size_t start, size_t end, MoveAllocationData& data, VmaBlockVector& vector)
  10553. {
  10554. for (; start < end; ++start)
  10555. {
  10556. VmaDeviceMemoryBlock* dstBlock = vector.GetBlock(start);
  10557. if (dstBlock->m_pMetadata->GetSumFreeSize() >= data.size)
  10558. {
  10559. if (vector.AllocateFromBlock(dstBlock,
  10560. data.size,
  10561. data.alignment,
  10562. data.flags,
  10563. this,
  10564. data.type,
  10565. 0,
  10566. &data.move.dstTmpAllocation) == VK_SUCCESS)
  10567. {
  10568. m_Moves.push_back(data.move);
  10569. if (IncrementCounters(data.size))
  10570. return true;
  10571. break;
  10572. }
  10573. }
  10574. }
  10575. return false;
  10576. }
  10577. bool VmaDefragmentationContext_T::ComputeDefragmentation_Fast(VmaBlockVector& vector)
  10578. {
  10579. // Move only between blocks
  10580. // Go through allocations in last blocks and try to fit them inside first ones
  10581. for (size_t i = vector.GetBlockCount() - 1; i > m_ImmovableBlockCount; --i)
  10582. {
  10583. VmaBlockMetadata* metadata = vector.GetBlock(i)->m_pMetadata;
  10584. for (VmaAllocHandle handle = metadata->GetAllocationListBegin();
  10585. handle != VK_NULL_HANDLE;
  10586. handle = metadata->GetNextAllocation(handle))
  10587. {
  10588. MoveAllocationData moveData = GetMoveData(handle, metadata);
  10589. // Ignore newly created allocations by defragmentation algorithm
  10590. if (moveData.move.srcAllocation->GetUserData() == this)
  10591. continue;
  10592. switch (CheckCounters(moveData.move.srcAllocation->GetSize()))
  10593. {
  10594. case CounterStatus::Ignore:
  10595. continue;
  10596. case CounterStatus::End:
  10597. return true;
  10598. case CounterStatus::Pass:
  10599. break;
  10600. default:
  10601. VMA_ASSERT(0);
  10602. }
  10603. // Check all previous blocks for free space
  10604. if (AllocInOtherBlock(0, i, moveData, vector))
  10605. return true;
  10606. }
  10607. }
  10608. return false;
  10609. }
  10610. bool VmaDefragmentationContext_T::ComputeDefragmentation_Balanced(VmaBlockVector& vector, size_t index, bool update)
  10611. {
  10612. // Go over every allocation and try to fit it in previous blocks at lowest offsets,
  10613. // if not possible: realloc within single block to minimize offset (exclude offset == 0),
  10614. // but only if there are noticeable gaps between them (some heuristic, ex. average size of allocation in block)
  10615. VMA_ASSERT(m_AlgorithmState != VMA_NULL);
  10616. StateBalanced& vectorState = reinterpret_cast<StateBalanced*>(m_AlgorithmState)[index];
  10617. if (update && vectorState.avgAllocSize == UINT64_MAX)
  10618. UpdateVectorStatistics(vector, vectorState);
  10619. const size_t startMoveCount = m_Moves.size();
  10620. VkDeviceSize minimalFreeRegion = vectorState.avgFreeSize / 2;
  10621. for (size_t i = vector.GetBlockCount() - 1; i > m_ImmovableBlockCount; --i)
  10622. {
  10623. VmaDeviceMemoryBlock* block = vector.GetBlock(i);
  10624. VmaBlockMetadata* metadata = block->m_pMetadata;
  10625. VkDeviceSize prevFreeRegionSize = 0;
  10626. for (VmaAllocHandle handle = metadata->GetAllocationListBegin();
  10627. handle != VK_NULL_HANDLE;
  10628. handle = metadata->GetNextAllocation(handle))
  10629. {
  10630. MoveAllocationData moveData = GetMoveData(handle, metadata);
  10631. // Ignore newly created allocations by defragmentation algorithm
  10632. if (moveData.move.srcAllocation->GetUserData() == this)
  10633. continue;
  10634. switch (CheckCounters(moveData.move.srcAllocation->GetSize()))
  10635. {
  10636. case CounterStatus::Ignore:
  10637. continue;
  10638. case CounterStatus::End:
  10639. return true;
  10640. case CounterStatus::Pass:
  10641. break;
  10642. default:
  10643. VMA_ASSERT(0);
  10644. }
  10645. // Check all previous blocks for free space
  10646. const size_t prevMoveCount = m_Moves.size();
  10647. if (AllocInOtherBlock(0, i, moveData, vector))
  10648. return true;
  10649. VkDeviceSize nextFreeRegionSize = metadata->GetNextFreeRegionSize(handle);
  10650. // If no room found then realloc within block for lower offset
  10651. VkDeviceSize offset = moveData.move.srcAllocation->GetOffset();
  10652. if (prevMoveCount == m_Moves.size() && offset != 0 && metadata->GetSumFreeSize() >= moveData.size)
  10653. {
  10654. // Check if realloc will make sense
  10655. if (prevFreeRegionSize >= minimalFreeRegion ||
  10656. nextFreeRegionSize >= minimalFreeRegion ||
  10657. moveData.size <= vectorState.avgFreeSize ||
  10658. moveData.size <= vectorState.avgAllocSize)
  10659. {
  10660. VmaAllocationRequest request = {};
  10661. if (metadata->CreateAllocationRequest(
  10662. moveData.size,
  10663. moveData.alignment,
  10664. false,
  10665. moveData.type,
  10666. VMA_ALLOCATION_CREATE_STRATEGY_MIN_OFFSET_BIT,
  10667. &request))
  10668. {
  10669. if (metadata->GetAllocationOffset(request.allocHandle) < offset)
  10670. {
  10671. if (vector.CommitAllocationRequest(
  10672. request,
  10673. block,
  10674. moveData.alignment,
  10675. moveData.flags,
  10676. this,
  10677. moveData.type,
  10678. &moveData.move.dstTmpAllocation) == VK_SUCCESS)
  10679. {
  10680. m_Moves.push_back(moveData.move);
  10681. if (IncrementCounters(moveData.size))
  10682. return true;
  10683. }
  10684. }
  10685. }
  10686. }
  10687. }
  10688. prevFreeRegionSize = nextFreeRegionSize;
  10689. }
  10690. }
  10691. // No moves performed, update statistics to current vector state
  10692. if (startMoveCount == m_Moves.size() && !update)
  10693. {
  10694. vectorState.avgAllocSize = UINT64_MAX;
  10695. return ComputeDefragmentation_Balanced(vector, index, false);
  10696. }
  10697. return false;
  10698. }
  10699. bool VmaDefragmentationContext_T::ComputeDefragmentation_Full(VmaBlockVector& vector)
  10700. {
  10701. // Go over every allocation and try to fit it in previous blocks at lowest offsets,
  10702. // if not possible: realloc within single block to minimize offset (exclude offset == 0)
  10703. for (size_t i = vector.GetBlockCount() - 1; i > m_ImmovableBlockCount; --i)
  10704. {
  10705. VmaDeviceMemoryBlock* block = vector.GetBlock(i);
  10706. VmaBlockMetadata* metadata = block->m_pMetadata;
  10707. for (VmaAllocHandle handle = metadata->GetAllocationListBegin();
  10708. handle != VK_NULL_HANDLE;
  10709. handle = metadata->GetNextAllocation(handle))
  10710. {
  10711. MoveAllocationData moveData = GetMoveData(handle, metadata);
  10712. // Ignore newly created allocations by defragmentation algorithm
  10713. if (moveData.move.srcAllocation->GetUserData() == this)
  10714. continue;
  10715. switch (CheckCounters(moveData.move.srcAllocation->GetSize()))
  10716. {
  10717. case CounterStatus::Ignore:
  10718. continue;
  10719. case CounterStatus::End:
  10720. return true;
  10721. case CounterStatus::Pass:
  10722. break;
  10723. default:
  10724. VMA_ASSERT(0);
  10725. }
  10726. // Check all previous blocks for free space
  10727. const size_t prevMoveCount = m_Moves.size();
  10728. if (AllocInOtherBlock(0, i, moveData, vector))
  10729. return true;
  10730. // If no room found then realloc within block for lower offset
  10731. VkDeviceSize offset = moveData.move.srcAllocation->GetOffset();
  10732. if (prevMoveCount == m_Moves.size() && offset != 0 && metadata->GetSumFreeSize() >= moveData.size)
  10733. {
  10734. VmaAllocationRequest request = {};
  10735. if (metadata->CreateAllocationRequest(
  10736. moveData.size,
  10737. moveData.alignment,
  10738. false,
  10739. moveData.type,
  10740. VMA_ALLOCATION_CREATE_STRATEGY_MIN_OFFSET_BIT,
  10741. &request))
  10742. {
  10743. if (metadata->GetAllocationOffset(request.allocHandle) < offset)
  10744. {
  10745. if (vector.CommitAllocationRequest(
  10746. request,
  10747. block,
  10748. moveData.alignment,
  10749. moveData.flags,
  10750. this,
  10751. moveData.type,
  10752. &moveData.move.dstTmpAllocation) == VK_SUCCESS)
  10753. {
  10754. m_Moves.push_back(moveData.move);
  10755. if (IncrementCounters(moveData.size))
  10756. return true;
  10757. }
  10758. }
  10759. }
  10760. }
  10761. }
  10762. }
  10763. return false;
  10764. }
  10765. bool VmaDefragmentationContext_T::ComputeDefragmentation_Extensive(VmaBlockVector& vector, size_t index)
  10766. {
  10767. // First free single block, then populate it to the brim, then free another block, and so on
  10768. // Fallback to previous algorithm since without granularity conflicts it can achieve max packing
  10769. if (vector.m_BufferImageGranularity == 1)
  10770. return ComputeDefragmentation_Full(vector);
  10771. VMA_ASSERT(m_AlgorithmState != VMA_NULL);
  10772. StateExtensive& vectorState = reinterpret_cast<StateExtensive*>(m_AlgorithmState)[index];
  10773. bool texturePresent = false, bufferPresent = false, otherPresent = false;
  10774. switch (vectorState.operation)
  10775. {
  10776. case StateExtensive::Operation::Done: // Vector defragmented
  10777. return false;
  10778. case StateExtensive::Operation::FindFreeBlockBuffer:
  10779. case StateExtensive::Operation::FindFreeBlockTexture:
  10780. case StateExtensive::Operation::FindFreeBlockAll:
  10781. {
  10782. // No more blocks to free, just perform fast realloc and move to cleanup
  10783. if (vectorState.firstFreeBlock == 0)
  10784. {
  10785. vectorState.operation = StateExtensive::Operation::Cleanup;
  10786. return ComputeDefragmentation_Fast(vector);
  10787. }
  10788. // No free blocks, have to clear last one
  10789. size_t last = (vectorState.firstFreeBlock == SIZE_MAX ? vector.GetBlockCount() : vectorState.firstFreeBlock) - 1;
  10790. VmaBlockMetadata* freeMetadata = vector.GetBlock(last)->m_pMetadata;
  10791. const size_t prevMoveCount = m_Moves.size();
  10792. for (VmaAllocHandle handle = freeMetadata->GetAllocationListBegin();
  10793. handle != VK_NULL_HANDLE;
  10794. handle = freeMetadata->GetNextAllocation(handle))
  10795. {
  10796. MoveAllocationData moveData = GetMoveData(handle, freeMetadata);
  10797. switch (CheckCounters(moveData.move.srcAllocation->GetSize()))
  10798. {
  10799. case CounterStatus::Ignore:
  10800. continue;
  10801. case CounterStatus::End:
  10802. return true;
  10803. case CounterStatus::Pass:
  10804. break;
  10805. default:
  10806. VMA_ASSERT(0);
  10807. }
  10808. // Check all previous blocks for free space
  10809. if (AllocInOtherBlock(0, last, moveData, vector))
  10810. {
  10811. // Full clear performed already
  10812. if (prevMoveCount != m_Moves.size() && freeMetadata->GetNextAllocation(handle) == VK_NULL_HANDLE)
  10813. vectorState.firstFreeBlock = last;
  10814. return true;
  10815. }
  10816. }
  10817. if (prevMoveCount == m_Moves.size())
  10818. {
  10819. // Cannot perform full clear, have to move data in other blocks around
  10820. if (last != 0)
  10821. {
  10822. for (size_t i = last - 1; i; --i)
  10823. {
  10824. if (ReallocWithinBlock(vector, vector.GetBlock(i)))
  10825. return true;
  10826. }
  10827. }
  10828. if (prevMoveCount == m_Moves.size())
  10829. {
  10830. // No possible reallocs within blocks, try to move them around fast
  10831. return ComputeDefragmentation_Fast(vector);
  10832. }
  10833. }
  10834. else
  10835. {
  10836. switch (vectorState.operation)
  10837. {
  10838. case StateExtensive::Operation::FindFreeBlockBuffer:
  10839. vectorState.operation = StateExtensive::Operation::MoveBuffers;
  10840. break;
  10841. case StateExtensive::Operation::FindFreeBlockTexture:
  10842. vectorState.operation = StateExtensive::Operation::MoveTextures;
  10843. break;
  10844. case StateExtensive::Operation::FindFreeBlockAll:
  10845. vectorState.operation = StateExtensive::Operation::MoveAll;
  10846. break;
  10847. default:
  10848. VMA_ASSERT(0);
  10849. vectorState.operation = StateExtensive::Operation::MoveTextures;
  10850. }
  10851. vectorState.firstFreeBlock = last;
  10852. // Nothing done, block found without reallocations, can perform another reallocs in same pass
  10853. return ComputeDefragmentation_Extensive(vector, index);
  10854. }
  10855. break;
  10856. }
  10857. case StateExtensive::Operation::MoveTextures:
  10858. {
  10859. if (MoveDataToFreeBlocks(VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL, vector,
  10860. vectorState.firstFreeBlock, texturePresent, bufferPresent, otherPresent))
  10861. {
  10862. if (texturePresent)
  10863. {
  10864. vectorState.operation = StateExtensive::Operation::FindFreeBlockTexture;
  10865. return ComputeDefragmentation_Extensive(vector, index);
  10866. }
  10867. if (!bufferPresent && !otherPresent)
  10868. {
  10869. vectorState.operation = StateExtensive::Operation::Cleanup;
  10870. break;
  10871. }
  10872. // No more textures to move, check buffers
  10873. vectorState.operation = StateExtensive::Operation::MoveBuffers;
  10874. bufferPresent = false;
  10875. otherPresent = false;
  10876. }
  10877. else
  10878. break;
  10879. VMA_FALLTHROUGH; // Fallthrough
  10880. }
  10881. case StateExtensive::Operation::MoveBuffers:
  10882. {
  10883. if (MoveDataToFreeBlocks(VMA_SUBALLOCATION_TYPE_BUFFER, vector,
  10884. vectorState.firstFreeBlock, texturePresent, bufferPresent, otherPresent))
  10885. {
  10886. if (bufferPresent)
  10887. {
  10888. vectorState.operation = StateExtensive::Operation::FindFreeBlockBuffer;
  10889. return ComputeDefragmentation_Extensive(vector, index);
  10890. }
  10891. if (!otherPresent)
  10892. {
  10893. vectorState.operation = StateExtensive::Operation::Cleanup;
  10894. break;
  10895. }
  10896. // No more buffers to move, check all others
  10897. vectorState.operation = StateExtensive::Operation::MoveAll;
  10898. otherPresent = false;
  10899. }
  10900. else
  10901. break;
  10902. VMA_FALLTHROUGH; // Fallthrough
  10903. }
  10904. case StateExtensive::Operation::MoveAll:
  10905. {
  10906. if (MoveDataToFreeBlocks(VMA_SUBALLOCATION_TYPE_FREE, vector,
  10907. vectorState.firstFreeBlock, texturePresent, bufferPresent, otherPresent))
  10908. {
  10909. if (otherPresent)
  10910. {
  10911. vectorState.operation = StateExtensive::Operation::FindFreeBlockBuffer;
  10912. return ComputeDefragmentation_Extensive(vector, index);
  10913. }
  10914. // Everything moved
  10915. vectorState.operation = StateExtensive::Operation::Cleanup;
  10916. }
  10917. break;
  10918. }
  10919. case StateExtensive::Operation::Cleanup:
  10920. // Cleanup is handled below so that other operations may reuse the cleanup code. This case is here to prevent the unhandled enum value warning (C4062).
  10921. break;
  10922. }
  10923. if (vectorState.operation == StateExtensive::Operation::Cleanup)
  10924. {
  10925. // All other work done, pack data in blocks even tighter if possible
  10926. const size_t prevMoveCount = m_Moves.size();
  10927. for (size_t i = 0; i < vector.GetBlockCount(); ++i)
  10928. {
  10929. if (ReallocWithinBlock(vector, vector.GetBlock(i)))
  10930. return true;
  10931. }
  10932. if (prevMoveCount == m_Moves.size())
  10933. vectorState.operation = StateExtensive::Operation::Done;
  10934. }
  10935. return false;
  10936. }
  10937. void VmaDefragmentationContext_T::UpdateVectorStatistics(VmaBlockVector& vector, StateBalanced& state)
  10938. {
  10939. size_t allocCount = 0;
  10940. size_t freeCount = 0;
  10941. state.avgFreeSize = 0;
  10942. state.avgAllocSize = 0;
  10943. for (size_t i = 0; i < vector.GetBlockCount(); ++i)
  10944. {
  10945. VmaBlockMetadata* metadata = vector.GetBlock(i)->m_pMetadata;
  10946. allocCount += metadata->GetAllocationCount();
  10947. freeCount += metadata->GetFreeRegionsCount();
  10948. state.avgFreeSize += metadata->GetSumFreeSize();
  10949. state.avgAllocSize += metadata->GetSize();
  10950. }
  10951. state.avgAllocSize = (state.avgAllocSize - state.avgFreeSize) / allocCount;
  10952. state.avgFreeSize /= freeCount;
  10953. }
  10954. bool VmaDefragmentationContext_T::MoveDataToFreeBlocks(VmaSuballocationType currentType,
  10955. VmaBlockVector& vector, size_t firstFreeBlock,
  10956. bool& texturePresent, bool& bufferPresent, bool& otherPresent)
  10957. {
  10958. const size_t prevMoveCount = m_Moves.size();
  10959. for (size_t i = firstFreeBlock ; i;)
  10960. {
  10961. VmaDeviceMemoryBlock* block = vector.GetBlock(--i);
  10962. VmaBlockMetadata* metadata = block->m_pMetadata;
  10963. for (VmaAllocHandle handle = metadata->GetAllocationListBegin();
  10964. handle != VK_NULL_HANDLE;
  10965. handle = metadata->GetNextAllocation(handle))
  10966. {
  10967. MoveAllocationData moveData = GetMoveData(handle, metadata);
  10968. // Ignore newly created allocations by defragmentation algorithm
  10969. if (moveData.move.srcAllocation->GetUserData() == this)
  10970. continue;
  10971. switch (CheckCounters(moveData.move.srcAllocation->GetSize()))
  10972. {
  10973. case CounterStatus::Ignore:
  10974. continue;
  10975. case CounterStatus::End:
  10976. return true;
  10977. case CounterStatus::Pass:
  10978. break;
  10979. default:
  10980. VMA_ASSERT(0);
  10981. }
  10982. // Move only single type of resources at once
  10983. if (!VmaIsBufferImageGranularityConflict(moveData.type, currentType))
  10984. {
  10985. // Try to fit allocation into free blocks
  10986. if (AllocInOtherBlock(firstFreeBlock, vector.GetBlockCount(), moveData, vector))
  10987. return false;
  10988. }
  10989. if (!VmaIsBufferImageGranularityConflict(moveData.type, VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL))
  10990. texturePresent = true;
  10991. else if (!VmaIsBufferImageGranularityConflict(moveData.type, VMA_SUBALLOCATION_TYPE_BUFFER))
  10992. bufferPresent = true;
  10993. else
  10994. otherPresent = true;
  10995. }
  10996. }
  10997. return prevMoveCount == m_Moves.size();
  10998. }
  10999. #endif // _VMA_DEFRAGMENTATION_CONTEXT_FUNCTIONS
  11000. #ifndef _VMA_POOL_T_FUNCTIONS
  11001. VmaPool_T::VmaPool_T(
  11002. VmaAllocator hAllocator,
  11003. const VmaPoolCreateInfo& createInfo,
  11004. VkDeviceSize preferredBlockSize)
  11005. : m_BlockVector(
  11006. hAllocator,
  11007. this, // hParentPool
  11008. createInfo.memoryTypeIndex,
  11009. createInfo.blockSize != 0 ? createInfo.blockSize : preferredBlockSize,
  11010. createInfo.minBlockCount,
  11011. createInfo.maxBlockCount,
  11012. (createInfo.flags& VMA_POOL_CREATE_IGNORE_BUFFER_IMAGE_GRANULARITY_BIT) != 0 ? 1 : hAllocator->GetBufferImageGranularity(),
  11013. createInfo.blockSize != 0, // explicitBlockSize
  11014. createInfo.flags & VMA_POOL_CREATE_ALGORITHM_MASK, // algorithm
  11015. createInfo.priority,
  11016. VMA_MAX(hAllocator->GetMemoryTypeMinAlignment(createInfo.memoryTypeIndex), createInfo.minAllocationAlignment),
  11017. createInfo.pMemoryAllocateNext),
  11018. m_Id(0),
  11019. m_Name(VMA_NULL) {}
  11020. VmaPool_T::~VmaPool_T()
  11021. {
  11022. VMA_ASSERT(m_PrevPool == VMA_NULL && m_NextPool == VMA_NULL);
  11023. const VkAllocationCallbacks* allocs = m_BlockVector.GetAllocator()->GetAllocationCallbacks();
  11024. VmaFreeString(allocs, m_Name);
  11025. }
  11026. void VmaPool_T::SetName(const char* pName)
  11027. {
  11028. const VkAllocationCallbacks* allocs = m_BlockVector.GetAllocator()->GetAllocationCallbacks();
  11029. VmaFreeString(allocs, m_Name);
  11030. if (pName != VMA_NULL)
  11031. {
  11032. m_Name = VmaCreateStringCopy(allocs, pName);
  11033. }
  11034. else
  11035. {
  11036. m_Name = VMA_NULL;
  11037. }
  11038. }
  11039. #endif // _VMA_POOL_T_FUNCTIONS
  11040. #ifndef _VMA_ALLOCATOR_T_FUNCTIONS
  11041. VmaAllocator_T::VmaAllocator_T(const VmaAllocatorCreateInfo* pCreateInfo) :
  11042. m_UseMutex((pCreateInfo->flags & VMA_ALLOCATOR_CREATE_EXTERNALLY_SYNCHRONIZED_BIT) == 0),
  11043. m_VulkanApiVersion(pCreateInfo->vulkanApiVersion != 0 ? pCreateInfo->vulkanApiVersion : VK_API_VERSION_1_0),
  11044. m_UseKhrDedicatedAllocation((pCreateInfo->flags & VMA_ALLOCATOR_CREATE_KHR_DEDICATED_ALLOCATION_BIT) != 0),
  11045. m_UseKhrBindMemory2((pCreateInfo->flags & VMA_ALLOCATOR_CREATE_KHR_BIND_MEMORY2_BIT) != 0),
  11046. m_UseExtMemoryBudget((pCreateInfo->flags & VMA_ALLOCATOR_CREATE_EXT_MEMORY_BUDGET_BIT) != 0),
  11047. m_UseAmdDeviceCoherentMemory((pCreateInfo->flags & VMA_ALLOCATOR_CREATE_AMD_DEVICE_COHERENT_MEMORY_BIT) != 0),
  11048. m_UseKhrBufferDeviceAddress((pCreateInfo->flags & VMA_ALLOCATOR_CREATE_BUFFER_DEVICE_ADDRESS_BIT) != 0),
  11049. m_UseExtMemoryPriority((pCreateInfo->flags & VMA_ALLOCATOR_CREATE_EXT_MEMORY_PRIORITY_BIT) != 0),
  11050. m_UseKhrMaintenance4((pCreateInfo->flags & VMA_ALLOCATOR_CREATE_KHR_MAINTENANCE4_BIT) != 0),
  11051. m_UseKhrMaintenance5((pCreateInfo->flags & VMA_ALLOCATOR_CREATE_KHR_MAINTENANCE5_BIT) != 0),
  11052. m_hDevice(pCreateInfo->device),
  11053. m_hInstance(pCreateInfo->instance),
  11054. m_AllocationCallbacksSpecified(pCreateInfo->pAllocationCallbacks != VMA_NULL),
  11055. m_AllocationCallbacks(pCreateInfo->pAllocationCallbacks ?
  11056. *pCreateInfo->pAllocationCallbacks : VmaEmptyAllocationCallbacks),
  11057. m_AllocationObjectAllocator(&m_AllocationCallbacks),
  11058. m_HeapSizeLimitMask(0),
  11059. m_DeviceMemoryCount(0),
  11060. m_PreferredLargeHeapBlockSize(0),
  11061. m_PhysicalDevice(pCreateInfo->physicalDevice),
  11062. m_GpuDefragmentationMemoryTypeBits(UINT32_MAX),
  11063. m_NextPoolId(0),
  11064. m_GlobalMemoryTypeBits(UINT32_MAX)
  11065. {
  11066. if(m_VulkanApiVersion >= VK_MAKE_VERSION(1, 1, 0))
  11067. {
  11068. m_UseKhrDedicatedAllocation = false;
  11069. m_UseKhrBindMemory2 = false;
  11070. }
  11071. if(VMA_DEBUG_DETECT_CORRUPTION)
  11072. {
  11073. // Needs to be multiply of uint32_t size because we are going to write VMA_CORRUPTION_DETECTION_MAGIC_VALUE to it.
  11074. VMA_ASSERT(VMA_DEBUG_MARGIN % sizeof(uint32_t) == 0);
  11075. }
  11076. VMA_ASSERT(pCreateInfo->physicalDevice && pCreateInfo->device && pCreateInfo->instance);
  11077. if(m_VulkanApiVersion < VK_MAKE_VERSION(1, 1, 0))
  11078. {
  11079. #if !(VMA_DEDICATED_ALLOCATION)
  11080. if((pCreateInfo->flags & VMA_ALLOCATOR_CREATE_KHR_DEDICATED_ALLOCATION_BIT) != 0)
  11081. {
  11082. VMA_ASSERT(0 && "VMA_ALLOCATOR_CREATE_KHR_DEDICATED_ALLOCATION_BIT set but required extensions are disabled by preprocessor macros.");
  11083. }
  11084. #endif
  11085. #if !(VMA_BIND_MEMORY2)
  11086. if((pCreateInfo->flags & VMA_ALLOCATOR_CREATE_KHR_BIND_MEMORY2_BIT) != 0)
  11087. {
  11088. VMA_ASSERT(0 && "VMA_ALLOCATOR_CREATE_KHR_BIND_MEMORY2_BIT set but required extension is disabled by preprocessor macros.");
  11089. }
  11090. #endif
  11091. }
  11092. #if !(VMA_MEMORY_BUDGET)
  11093. if((pCreateInfo->flags & VMA_ALLOCATOR_CREATE_EXT_MEMORY_BUDGET_BIT) != 0)
  11094. {
  11095. VMA_ASSERT(0 && "VMA_ALLOCATOR_CREATE_EXT_MEMORY_BUDGET_BIT set but required extension is disabled by preprocessor macros.");
  11096. }
  11097. #endif
  11098. #if !(VMA_BUFFER_DEVICE_ADDRESS)
  11099. if(m_UseKhrBufferDeviceAddress)
  11100. {
  11101. VMA_ASSERT(0 && "VMA_ALLOCATOR_CREATE_BUFFER_DEVICE_ADDRESS_BIT is set but required extension or Vulkan 1.2 is not available in your Vulkan header or its support in VMA has been disabled by a preprocessor macro.");
  11102. }
  11103. #endif
  11104. #if VMA_VULKAN_VERSION < 1003000
  11105. if(m_VulkanApiVersion >= VK_MAKE_VERSION(1, 3, 0))
  11106. {
  11107. VMA_ASSERT(0 && "vulkanApiVersion >= VK_API_VERSION_1_3 but required Vulkan version is disabled by preprocessor macros.");
  11108. }
  11109. #endif
  11110. #if VMA_VULKAN_VERSION < 1002000
  11111. if(m_VulkanApiVersion >= VK_MAKE_VERSION(1, 2, 0))
  11112. {
  11113. VMA_ASSERT(0 && "vulkanApiVersion >= VK_API_VERSION_1_2 but required Vulkan version is disabled by preprocessor macros.");
  11114. }
  11115. #endif
  11116. #if VMA_VULKAN_VERSION < 1001000
  11117. if(m_VulkanApiVersion >= VK_MAKE_VERSION(1, 1, 0))
  11118. {
  11119. VMA_ASSERT(0 && "vulkanApiVersion >= VK_API_VERSION_1_1 but required Vulkan version is disabled by preprocessor macros.");
  11120. }
  11121. #endif
  11122. #if !(VMA_MEMORY_PRIORITY)
  11123. if(m_UseExtMemoryPriority)
  11124. {
  11125. VMA_ASSERT(0 && "VMA_ALLOCATOR_CREATE_EXT_MEMORY_PRIORITY_BIT is set but required extension is not available in your Vulkan header or its support in VMA has been disabled by a preprocessor macro.");
  11126. }
  11127. #endif
  11128. #if !(VMA_KHR_MAINTENANCE4)
  11129. if(m_UseKhrMaintenance4)
  11130. {
  11131. VMA_ASSERT(0 && "VMA_ALLOCATOR_CREATE_KHR_MAINTENANCE4_BIT is set but required extension is not available in your Vulkan header or its support in VMA has been disabled by a preprocessor macro.");
  11132. }
  11133. #endif
  11134. #if !(VMA_KHR_MAINTENANCE5)
  11135. if(m_UseKhrMaintenance5)
  11136. {
  11137. VMA_ASSERT(0 && "VMA_ALLOCATOR_CREATE_KHR_MAINTENANCE5_BIT is set but required extension is not available in your Vulkan header or its support in VMA has been disabled by a preprocessor macro.");
  11138. }
  11139. #endif
  11140. memset(&m_DeviceMemoryCallbacks, 0 ,sizeof(m_DeviceMemoryCallbacks));
  11141. memset(&m_PhysicalDeviceProperties, 0, sizeof(m_PhysicalDeviceProperties));
  11142. memset(&m_MemProps, 0, sizeof(m_MemProps));
  11143. memset(&m_pBlockVectors, 0, sizeof(m_pBlockVectors));
  11144. memset(&m_VulkanFunctions, 0, sizeof(m_VulkanFunctions));
  11145. #if VMA_EXTERNAL_MEMORY
  11146. memset(&m_TypeExternalMemoryHandleTypes, 0, sizeof(m_TypeExternalMemoryHandleTypes));
  11147. #endif // #if VMA_EXTERNAL_MEMORY
  11148. if(pCreateInfo->pDeviceMemoryCallbacks != VMA_NULL)
  11149. {
  11150. m_DeviceMemoryCallbacks.pUserData = pCreateInfo->pDeviceMemoryCallbacks->pUserData;
  11151. m_DeviceMemoryCallbacks.pfnAllocate = pCreateInfo->pDeviceMemoryCallbacks->pfnAllocate;
  11152. m_DeviceMemoryCallbacks.pfnFree = pCreateInfo->pDeviceMemoryCallbacks->pfnFree;
  11153. }
  11154. ImportVulkanFunctions(pCreateInfo->pVulkanFunctions);
  11155. (*m_VulkanFunctions.vkGetPhysicalDeviceProperties)(m_PhysicalDevice, &m_PhysicalDeviceProperties);
  11156. (*m_VulkanFunctions.vkGetPhysicalDeviceMemoryProperties)(m_PhysicalDevice, &m_MemProps);
  11157. VMA_ASSERT(VmaIsPow2(VMA_MIN_ALIGNMENT));
  11158. VMA_ASSERT(VmaIsPow2(VMA_DEBUG_MIN_BUFFER_IMAGE_GRANULARITY));
  11159. VMA_ASSERT(VmaIsPow2(m_PhysicalDeviceProperties.limits.bufferImageGranularity));
  11160. VMA_ASSERT(VmaIsPow2(m_PhysicalDeviceProperties.limits.nonCoherentAtomSize));
  11161. m_PreferredLargeHeapBlockSize = (pCreateInfo->preferredLargeHeapBlockSize != 0) ?
  11162. pCreateInfo->preferredLargeHeapBlockSize : static_cast<VkDeviceSize>(VMA_DEFAULT_LARGE_HEAP_BLOCK_SIZE);
  11163. m_GlobalMemoryTypeBits = CalculateGlobalMemoryTypeBits();
  11164. #if VMA_EXTERNAL_MEMORY
  11165. if(pCreateInfo->pTypeExternalMemoryHandleTypes != VMA_NULL)
  11166. {
  11167. memcpy(m_TypeExternalMemoryHandleTypes, pCreateInfo->pTypeExternalMemoryHandleTypes,
  11168. sizeof(VkExternalMemoryHandleTypeFlagsKHR) * GetMemoryTypeCount());
  11169. }
  11170. #endif // #if VMA_EXTERNAL_MEMORY
  11171. if(pCreateInfo->pHeapSizeLimit != VMA_NULL)
  11172. {
  11173. for(uint32_t heapIndex = 0; heapIndex < GetMemoryHeapCount(); ++heapIndex)
  11174. {
  11175. const VkDeviceSize limit = pCreateInfo->pHeapSizeLimit[heapIndex];
  11176. if(limit != VK_WHOLE_SIZE)
  11177. {
  11178. m_HeapSizeLimitMask |= 1u << heapIndex;
  11179. if(limit < m_MemProps.memoryHeaps[heapIndex].size)
  11180. {
  11181. m_MemProps.memoryHeaps[heapIndex].size = limit;
  11182. }
  11183. }
  11184. }
  11185. }
  11186. for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex)
  11187. {
  11188. // Create only supported types
  11189. if((m_GlobalMemoryTypeBits & (1u << memTypeIndex)) != 0)
  11190. {
  11191. const VkDeviceSize preferredBlockSize = CalcPreferredBlockSize(memTypeIndex);
  11192. m_pBlockVectors[memTypeIndex] = vma_new(this, VmaBlockVector)(
  11193. this,
  11194. VK_NULL_HANDLE, // hParentPool
  11195. memTypeIndex,
  11196. preferredBlockSize,
  11197. 0,
  11198. SIZE_MAX,
  11199. GetBufferImageGranularity(),
  11200. false, // explicitBlockSize
  11201. 0, // algorithm
  11202. 0.5f, // priority (0.5 is the default per Vulkan spec)
  11203. GetMemoryTypeMinAlignment(memTypeIndex), // minAllocationAlignment
  11204. VMA_NULL); // // pMemoryAllocateNext
  11205. // No need to call m_pBlockVectors[memTypeIndex][blockVectorTypeIndex]->CreateMinBlocks here,
  11206. // because minBlockCount is 0.
  11207. }
  11208. }
  11209. }
  11210. VkResult VmaAllocator_T::Init(const VmaAllocatorCreateInfo* pCreateInfo)
  11211. {
  11212. VkResult res = VK_SUCCESS;
  11213. #if VMA_MEMORY_BUDGET
  11214. if(m_UseExtMemoryBudget)
  11215. {
  11216. UpdateVulkanBudget();
  11217. }
  11218. #endif // #if VMA_MEMORY_BUDGET
  11219. return res;
  11220. }
  11221. VmaAllocator_T::~VmaAllocator_T()
  11222. {
  11223. VMA_ASSERT(m_Pools.IsEmpty());
  11224. for(size_t memTypeIndex = GetMemoryTypeCount(); memTypeIndex--; )
  11225. {
  11226. vma_delete(this, m_pBlockVectors[memTypeIndex]);
  11227. }
  11228. }
  11229. void VmaAllocator_T::ImportVulkanFunctions(const VmaVulkanFunctions* pVulkanFunctions)
  11230. {
  11231. #if VMA_STATIC_VULKAN_FUNCTIONS == 1
  11232. ImportVulkanFunctions_Static();
  11233. #endif
  11234. if(pVulkanFunctions != VMA_NULL)
  11235. {
  11236. ImportVulkanFunctions_Custom(pVulkanFunctions);
  11237. }
  11238. #if VMA_DYNAMIC_VULKAN_FUNCTIONS == 1
  11239. ImportVulkanFunctions_Dynamic();
  11240. #endif
  11241. ValidateVulkanFunctions();
  11242. }
  11243. #if VMA_STATIC_VULKAN_FUNCTIONS == 1
  11244. void VmaAllocator_T::ImportVulkanFunctions_Static()
  11245. {
  11246. // Vulkan 1.0
  11247. m_VulkanFunctions.vkGetInstanceProcAddr = (PFN_vkGetInstanceProcAddr)vkGetInstanceProcAddr;
  11248. m_VulkanFunctions.vkGetDeviceProcAddr = (PFN_vkGetDeviceProcAddr)vkGetDeviceProcAddr;
  11249. m_VulkanFunctions.vkGetPhysicalDeviceProperties = (PFN_vkGetPhysicalDeviceProperties)vkGetPhysicalDeviceProperties;
  11250. m_VulkanFunctions.vkGetPhysicalDeviceMemoryProperties = (PFN_vkGetPhysicalDeviceMemoryProperties)vkGetPhysicalDeviceMemoryProperties;
  11251. m_VulkanFunctions.vkAllocateMemory = (PFN_vkAllocateMemory)vkAllocateMemory;
  11252. m_VulkanFunctions.vkFreeMemory = (PFN_vkFreeMemory)vkFreeMemory;
  11253. m_VulkanFunctions.vkMapMemory = (PFN_vkMapMemory)vkMapMemory;
  11254. m_VulkanFunctions.vkUnmapMemory = (PFN_vkUnmapMemory)vkUnmapMemory;
  11255. m_VulkanFunctions.vkFlushMappedMemoryRanges = (PFN_vkFlushMappedMemoryRanges)vkFlushMappedMemoryRanges;
  11256. m_VulkanFunctions.vkInvalidateMappedMemoryRanges = (PFN_vkInvalidateMappedMemoryRanges)vkInvalidateMappedMemoryRanges;
  11257. m_VulkanFunctions.vkBindBufferMemory = (PFN_vkBindBufferMemory)vkBindBufferMemory;
  11258. m_VulkanFunctions.vkBindImageMemory = (PFN_vkBindImageMemory)vkBindImageMemory;
  11259. m_VulkanFunctions.vkGetBufferMemoryRequirements = (PFN_vkGetBufferMemoryRequirements)vkGetBufferMemoryRequirements;
  11260. m_VulkanFunctions.vkGetImageMemoryRequirements = (PFN_vkGetImageMemoryRequirements)vkGetImageMemoryRequirements;
  11261. m_VulkanFunctions.vkCreateBuffer = (PFN_vkCreateBuffer)vkCreateBuffer;
  11262. m_VulkanFunctions.vkDestroyBuffer = (PFN_vkDestroyBuffer)vkDestroyBuffer;
  11263. m_VulkanFunctions.vkCreateImage = (PFN_vkCreateImage)vkCreateImage;
  11264. m_VulkanFunctions.vkDestroyImage = (PFN_vkDestroyImage)vkDestroyImage;
  11265. m_VulkanFunctions.vkCmdCopyBuffer = (PFN_vkCmdCopyBuffer)vkCmdCopyBuffer;
  11266. // Vulkan 1.1
  11267. #if VMA_VULKAN_VERSION >= 1001000
  11268. if(m_VulkanApiVersion >= VK_MAKE_VERSION(1, 1, 0))
  11269. {
  11270. m_VulkanFunctions.vkGetBufferMemoryRequirements2KHR = (PFN_vkGetBufferMemoryRequirements2)vkGetBufferMemoryRequirements2;
  11271. m_VulkanFunctions.vkGetImageMemoryRequirements2KHR = (PFN_vkGetImageMemoryRequirements2)vkGetImageMemoryRequirements2;
  11272. m_VulkanFunctions.vkBindBufferMemory2KHR = (PFN_vkBindBufferMemory2)vkBindBufferMemory2;
  11273. m_VulkanFunctions.vkBindImageMemory2KHR = (PFN_vkBindImageMemory2)vkBindImageMemory2;
  11274. }
  11275. #endif
  11276. #if VMA_VULKAN_VERSION >= 1001000
  11277. if(m_VulkanApiVersion >= VK_MAKE_VERSION(1, 1, 0))
  11278. {
  11279. m_VulkanFunctions.vkGetPhysicalDeviceMemoryProperties2KHR = (PFN_vkGetPhysicalDeviceMemoryProperties2)vkGetPhysicalDeviceMemoryProperties2;
  11280. }
  11281. #endif
  11282. #if VMA_VULKAN_VERSION >= 1003000
  11283. if(m_VulkanApiVersion >= VK_MAKE_VERSION(1, 3, 0))
  11284. {
  11285. m_VulkanFunctions.vkGetDeviceBufferMemoryRequirements = (PFN_vkGetDeviceBufferMemoryRequirements)vkGetDeviceBufferMemoryRequirements;
  11286. m_VulkanFunctions.vkGetDeviceImageMemoryRequirements = (PFN_vkGetDeviceImageMemoryRequirements)vkGetDeviceImageMemoryRequirements;
  11287. }
  11288. #endif
  11289. }
  11290. #endif // VMA_STATIC_VULKAN_FUNCTIONS == 1
  11291. void VmaAllocator_T::ImportVulkanFunctions_Custom(const VmaVulkanFunctions* pVulkanFunctions)
  11292. {
  11293. VMA_ASSERT(pVulkanFunctions != VMA_NULL);
  11294. #define VMA_COPY_IF_NOT_NULL(funcName) \
  11295. if(pVulkanFunctions->funcName != VMA_NULL) m_VulkanFunctions.funcName = pVulkanFunctions->funcName;
  11296. VMA_COPY_IF_NOT_NULL(vkGetInstanceProcAddr);
  11297. VMA_COPY_IF_NOT_NULL(vkGetDeviceProcAddr);
  11298. VMA_COPY_IF_NOT_NULL(vkGetPhysicalDeviceProperties);
  11299. VMA_COPY_IF_NOT_NULL(vkGetPhysicalDeviceMemoryProperties);
  11300. VMA_COPY_IF_NOT_NULL(vkAllocateMemory);
  11301. VMA_COPY_IF_NOT_NULL(vkFreeMemory);
  11302. VMA_COPY_IF_NOT_NULL(vkMapMemory);
  11303. VMA_COPY_IF_NOT_NULL(vkUnmapMemory);
  11304. VMA_COPY_IF_NOT_NULL(vkFlushMappedMemoryRanges);
  11305. VMA_COPY_IF_NOT_NULL(vkInvalidateMappedMemoryRanges);
  11306. VMA_COPY_IF_NOT_NULL(vkBindBufferMemory);
  11307. VMA_COPY_IF_NOT_NULL(vkBindImageMemory);
  11308. VMA_COPY_IF_NOT_NULL(vkGetBufferMemoryRequirements);
  11309. VMA_COPY_IF_NOT_NULL(vkGetImageMemoryRequirements);
  11310. VMA_COPY_IF_NOT_NULL(vkCreateBuffer);
  11311. VMA_COPY_IF_NOT_NULL(vkDestroyBuffer);
  11312. VMA_COPY_IF_NOT_NULL(vkCreateImage);
  11313. VMA_COPY_IF_NOT_NULL(vkDestroyImage);
  11314. VMA_COPY_IF_NOT_NULL(vkCmdCopyBuffer);
  11315. #if VMA_DEDICATED_ALLOCATION || VMA_VULKAN_VERSION >= 1001000
  11316. VMA_COPY_IF_NOT_NULL(vkGetBufferMemoryRequirements2KHR);
  11317. VMA_COPY_IF_NOT_NULL(vkGetImageMemoryRequirements2KHR);
  11318. #endif
  11319. #if VMA_BIND_MEMORY2 || VMA_VULKAN_VERSION >= 1001000
  11320. VMA_COPY_IF_NOT_NULL(vkBindBufferMemory2KHR);
  11321. VMA_COPY_IF_NOT_NULL(vkBindImageMemory2KHR);
  11322. #endif
  11323. #if VMA_MEMORY_BUDGET || VMA_VULKAN_VERSION >= 1001000
  11324. VMA_COPY_IF_NOT_NULL(vkGetPhysicalDeviceMemoryProperties2KHR);
  11325. #endif
  11326. #if VMA_KHR_MAINTENANCE4 || VMA_VULKAN_VERSION >= 1003000
  11327. VMA_COPY_IF_NOT_NULL(vkGetDeviceBufferMemoryRequirements);
  11328. VMA_COPY_IF_NOT_NULL(vkGetDeviceImageMemoryRequirements);
  11329. #endif
  11330. #undef VMA_COPY_IF_NOT_NULL
  11331. }
  11332. #if VMA_DYNAMIC_VULKAN_FUNCTIONS == 1
  11333. void VmaAllocator_T::ImportVulkanFunctions_Dynamic()
  11334. {
  11335. VMA_ASSERT(m_VulkanFunctions.vkGetInstanceProcAddr && m_VulkanFunctions.vkGetDeviceProcAddr &&
  11336. "To use VMA_DYNAMIC_VULKAN_FUNCTIONS in new versions of VMA you now have to pass "
  11337. "VmaVulkanFunctions::vkGetInstanceProcAddr and vkGetDeviceProcAddr as VmaAllocatorCreateInfo::pVulkanFunctions. "
  11338. "Other members can be null.");
  11339. #define VMA_FETCH_INSTANCE_FUNC(memberName, functionPointerType, functionNameString) \
  11340. if(m_VulkanFunctions.memberName == VMA_NULL) \
  11341. m_VulkanFunctions.memberName = \
  11342. (functionPointerType)m_VulkanFunctions.vkGetInstanceProcAddr(m_hInstance, functionNameString);
  11343. #define VMA_FETCH_DEVICE_FUNC(memberName, functionPointerType, functionNameString) \
  11344. if(m_VulkanFunctions.memberName == VMA_NULL) \
  11345. m_VulkanFunctions.memberName = \
  11346. (functionPointerType)m_VulkanFunctions.vkGetDeviceProcAddr(m_hDevice, functionNameString);
  11347. VMA_FETCH_INSTANCE_FUNC(vkGetPhysicalDeviceProperties, PFN_vkGetPhysicalDeviceProperties, "vkGetPhysicalDeviceProperties");
  11348. VMA_FETCH_INSTANCE_FUNC(vkGetPhysicalDeviceMemoryProperties, PFN_vkGetPhysicalDeviceMemoryProperties, "vkGetPhysicalDeviceMemoryProperties");
  11349. VMA_FETCH_DEVICE_FUNC(vkAllocateMemory, PFN_vkAllocateMemory, "vkAllocateMemory");
  11350. VMA_FETCH_DEVICE_FUNC(vkFreeMemory, PFN_vkFreeMemory, "vkFreeMemory");
  11351. VMA_FETCH_DEVICE_FUNC(vkMapMemory, PFN_vkMapMemory, "vkMapMemory");
  11352. VMA_FETCH_DEVICE_FUNC(vkUnmapMemory, PFN_vkUnmapMemory, "vkUnmapMemory");
  11353. VMA_FETCH_DEVICE_FUNC(vkFlushMappedMemoryRanges, PFN_vkFlushMappedMemoryRanges, "vkFlushMappedMemoryRanges");
  11354. VMA_FETCH_DEVICE_FUNC(vkInvalidateMappedMemoryRanges, PFN_vkInvalidateMappedMemoryRanges, "vkInvalidateMappedMemoryRanges");
  11355. VMA_FETCH_DEVICE_FUNC(vkBindBufferMemory, PFN_vkBindBufferMemory, "vkBindBufferMemory");
  11356. VMA_FETCH_DEVICE_FUNC(vkBindImageMemory, PFN_vkBindImageMemory, "vkBindImageMemory");
  11357. VMA_FETCH_DEVICE_FUNC(vkGetBufferMemoryRequirements, PFN_vkGetBufferMemoryRequirements, "vkGetBufferMemoryRequirements");
  11358. VMA_FETCH_DEVICE_FUNC(vkGetImageMemoryRequirements, PFN_vkGetImageMemoryRequirements, "vkGetImageMemoryRequirements");
  11359. VMA_FETCH_DEVICE_FUNC(vkCreateBuffer, PFN_vkCreateBuffer, "vkCreateBuffer");
  11360. VMA_FETCH_DEVICE_FUNC(vkDestroyBuffer, PFN_vkDestroyBuffer, "vkDestroyBuffer");
  11361. VMA_FETCH_DEVICE_FUNC(vkCreateImage, PFN_vkCreateImage, "vkCreateImage");
  11362. VMA_FETCH_DEVICE_FUNC(vkDestroyImage, PFN_vkDestroyImage, "vkDestroyImage");
  11363. VMA_FETCH_DEVICE_FUNC(vkCmdCopyBuffer, PFN_vkCmdCopyBuffer, "vkCmdCopyBuffer");
  11364. #if VMA_VULKAN_VERSION >= 1001000
  11365. if(m_VulkanApiVersion >= VK_MAKE_VERSION(1, 1, 0))
  11366. {
  11367. VMA_FETCH_DEVICE_FUNC(vkGetBufferMemoryRequirements2KHR, PFN_vkGetBufferMemoryRequirements2, "vkGetBufferMemoryRequirements2");
  11368. VMA_FETCH_DEVICE_FUNC(vkGetImageMemoryRequirements2KHR, PFN_vkGetImageMemoryRequirements2, "vkGetImageMemoryRequirements2");
  11369. VMA_FETCH_DEVICE_FUNC(vkBindBufferMemory2KHR, PFN_vkBindBufferMemory2, "vkBindBufferMemory2");
  11370. VMA_FETCH_DEVICE_FUNC(vkBindImageMemory2KHR, PFN_vkBindImageMemory2, "vkBindImageMemory2");
  11371. }
  11372. #endif
  11373. #if VMA_MEMORY_BUDGET || VMA_VULKAN_VERSION >= 1001000
  11374. if(m_VulkanApiVersion >= VK_MAKE_VERSION(1, 1, 0))
  11375. {
  11376. VMA_FETCH_INSTANCE_FUNC(vkGetPhysicalDeviceMemoryProperties2KHR, PFN_vkGetPhysicalDeviceMemoryProperties2KHR, "vkGetPhysicalDeviceMemoryProperties2");
  11377. }
  11378. else if(m_UseExtMemoryBudget)
  11379. {
  11380. VMA_FETCH_INSTANCE_FUNC(vkGetPhysicalDeviceMemoryProperties2KHR, PFN_vkGetPhysicalDeviceMemoryProperties2KHR, "vkGetPhysicalDeviceMemoryProperties2KHR");
  11381. }
  11382. #endif
  11383. #if VMA_DEDICATED_ALLOCATION
  11384. if(m_UseKhrDedicatedAllocation)
  11385. {
  11386. VMA_FETCH_DEVICE_FUNC(vkGetBufferMemoryRequirements2KHR, PFN_vkGetBufferMemoryRequirements2KHR, "vkGetBufferMemoryRequirements2KHR");
  11387. VMA_FETCH_DEVICE_FUNC(vkGetImageMemoryRequirements2KHR, PFN_vkGetImageMemoryRequirements2KHR, "vkGetImageMemoryRequirements2KHR");
  11388. }
  11389. #endif
  11390. #if VMA_BIND_MEMORY2
  11391. if(m_UseKhrBindMemory2)
  11392. {
  11393. VMA_FETCH_DEVICE_FUNC(vkBindBufferMemory2KHR, PFN_vkBindBufferMemory2KHR, "vkBindBufferMemory2KHR");
  11394. VMA_FETCH_DEVICE_FUNC(vkBindImageMemory2KHR, PFN_vkBindImageMemory2KHR, "vkBindImageMemory2KHR");
  11395. }
  11396. #endif // #if VMA_BIND_MEMORY2
  11397. #if VMA_MEMORY_BUDGET || VMA_VULKAN_VERSION >= 1001000
  11398. if(m_VulkanApiVersion >= VK_MAKE_VERSION(1, 1, 0))
  11399. {
  11400. VMA_FETCH_INSTANCE_FUNC(vkGetPhysicalDeviceMemoryProperties2KHR, PFN_vkGetPhysicalDeviceMemoryProperties2KHR, "vkGetPhysicalDeviceMemoryProperties2");
  11401. }
  11402. else if(m_UseExtMemoryBudget)
  11403. {
  11404. VMA_FETCH_INSTANCE_FUNC(vkGetPhysicalDeviceMemoryProperties2KHR, PFN_vkGetPhysicalDeviceMemoryProperties2KHR, "vkGetPhysicalDeviceMemoryProperties2KHR");
  11405. }
  11406. #endif // #if VMA_MEMORY_BUDGET
  11407. #if VMA_VULKAN_VERSION >= 1003000
  11408. if(m_VulkanApiVersion >= VK_MAKE_VERSION(1, 3, 0))
  11409. {
  11410. VMA_FETCH_DEVICE_FUNC(vkGetDeviceBufferMemoryRequirements, PFN_vkGetDeviceBufferMemoryRequirements, "vkGetDeviceBufferMemoryRequirements");
  11411. VMA_FETCH_DEVICE_FUNC(vkGetDeviceImageMemoryRequirements, PFN_vkGetDeviceImageMemoryRequirements, "vkGetDeviceImageMemoryRequirements");
  11412. }
  11413. #endif
  11414. #if VMA_KHR_MAINTENANCE4
  11415. if(m_UseKhrMaintenance4)
  11416. {
  11417. VMA_FETCH_DEVICE_FUNC(vkGetDeviceBufferMemoryRequirements, PFN_vkGetDeviceBufferMemoryRequirementsKHR, "vkGetDeviceBufferMemoryRequirementsKHR");
  11418. VMA_FETCH_DEVICE_FUNC(vkGetDeviceImageMemoryRequirements, PFN_vkGetDeviceImageMemoryRequirementsKHR, "vkGetDeviceImageMemoryRequirementsKHR");
  11419. }
  11420. #endif
  11421. #undef VMA_FETCH_DEVICE_FUNC
  11422. #undef VMA_FETCH_INSTANCE_FUNC
  11423. }
  11424. #endif // VMA_DYNAMIC_VULKAN_FUNCTIONS == 1
  11425. void VmaAllocator_T::ValidateVulkanFunctions()
  11426. {
  11427. VMA_ASSERT(m_VulkanFunctions.vkGetPhysicalDeviceProperties != VMA_NULL);
  11428. VMA_ASSERT(m_VulkanFunctions.vkGetPhysicalDeviceMemoryProperties != VMA_NULL);
  11429. VMA_ASSERT(m_VulkanFunctions.vkAllocateMemory != VMA_NULL);
  11430. VMA_ASSERT(m_VulkanFunctions.vkFreeMemory != VMA_NULL);
  11431. VMA_ASSERT(m_VulkanFunctions.vkMapMemory != VMA_NULL);
  11432. VMA_ASSERT(m_VulkanFunctions.vkUnmapMemory != VMA_NULL);
  11433. VMA_ASSERT(m_VulkanFunctions.vkFlushMappedMemoryRanges != VMA_NULL);
  11434. VMA_ASSERT(m_VulkanFunctions.vkInvalidateMappedMemoryRanges != VMA_NULL);
  11435. VMA_ASSERT(m_VulkanFunctions.vkBindBufferMemory != VMA_NULL);
  11436. VMA_ASSERT(m_VulkanFunctions.vkBindImageMemory != VMA_NULL);
  11437. VMA_ASSERT(m_VulkanFunctions.vkGetBufferMemoryRequirements != VMA_NULL);
  11438. VMA_ASSERT(m_VulkanFunctions.vkGetImageMemoryRequirements != VMA_NULL);
  11439. VMA_ASSERT(m_VulkanFunctions.vkCreateBuffer != VMA_NULL);
  11440. VMA_ASSERT(m_VulkanFunctions.vkDestroyBuffer != VMA_NULL);
  11441. VMA_ASSERT(m_VulkanFunctions.vkCreateImage != VMA_NULL);
  11442. VMA_ASSERT(m_VulkanFunctions.vkDestroyImage != VMA_NULL);
  11443. VMA_ASSERT(m_VulkanFunctions.vkCmdCopyBuffer != VMA_NULL);
  11444. #if VMA_DEDICATED_ALLOCATION || VMA_VULKAN_VERSION >= 1001000
  11445. if(m_VulkanApiVersion >= VK_MAKE_VERSION(1, 1, 0) || m_UseKhrDedicatedAllocation)
  11446. {
  11447. VMA_ASSERT(m_VulkanFunctions.vkGetBufferMemoryRequirements2KHR != VMA_NULL);
  11448. VMA_ASSERT(m_VulkanFunctions.vkGetImageMemoryRequirements2KHR != VMA_NULL);
  11449. }
  11450. #endif
  11451. #if VMA_BIND_MEMORY2 || VMA_VULKAN_VERSION >= 1001000
  11452. if(m_VulkanApiVersion >= VK_MAKE_VERSION(1, 1, 0) || m_UseKhrBindMemory2)
  11453. {
  11454. VMA_ASSERT(m_VulkanFunctions.vkBindBufferMemory2KHR != VMA_NULL);
  11455. VMA_ASSERT(m_VulkanFunctions.vkBindImageMemory2KHR != VMA_NULL);
  11456. }
  11457. #endif
  11458. #if VMA_MEMORY_BUDGET || VMA_VULKAN_VERSION >= 1001000
  11459. if(m_UseExtMemoryBudget || m_VulkanApiVersion >= VK_MAKE_VERSION(1, 1, 0))
  11460. {
  11461. VMA_ASSERT(m_VulkanFunctions.vkGetPhysicalDeviceMemoryProperties2KHR != VMA_NULL);
  11462. }
  11463. #endif
  11464. // Not validating these due to suspected driver bugs with these function
  11465. // pointers being null despite correct extension or Vulkan version is enabled.
  11466. // See issue #397. Their usage in VMA is optional anyway.
  11467. //
  11468. // VMA_ASSERT(m_VulkanFunctions.vkGetDeviceBufferMemoryRequirements != VMA_NULL);
  11469. // VMA_ASSERT(m_VulkanFunctions.vkGetDeviceImageMemoryRequirements != VMA_NULL);
  11470. }
  11471. VkDeviceSize VmaAllocator_T::CalcPreferredBlockSize(uint32_t memTypeIndex)
  11472. {
  11473. const uint32_t heapIndex = MemoryTypeIndexToHeapIndex(memTypeIndex);
  11474. const VkDeviceSize heapSize = m_MemProps.memoryHeaps[heapIndex].size;
  11475. const bool isSmallHeap = heapSize <= VMA_SMALL_HEAP_MAX_SIZE;
  11476. return VmaAlignUp(isSmallHeap ? (heapSize / 8) : m_PreferredLargeHeapBlockSize, (VkDeviceSize)32);
  11477. }
  11478. VkResult VmaAllocator_T::AllocateMemoryOfType(
  11479. VmaPool pool,
  11480. VkDeviceSize size,
  11481. VkDeviceSize alignment,
  11482. bool dedicatedPreferred,
  11483. VkBuffer dedicatedBuffer,
  11484. VkImage dedicatedImage,
  11485. VmaBufferImageUsage dedicatedBufferImageUsage,
  11486. const VmaAllocationCreateInfo& createInfo,
  11487. uint32_t memTypeIndex,
  11488. VmaSuballocationType suballocType,
  11489. VmaDedicatedAllocationList& dedicatedAllocations,
  11490. VmaBlockVector& blockVector,
  11491. size_t allocationCount,
  11492. VmaAllocation* pAllocations)
  11493. {
  11494. VMA_ASSERT(pAllocations != VMA_NULL);
  11495. VMA_DEBUG_LOG_FORMAT(" AllocateMemory: MemoryTypeIndex=%" PRIu32 ", AllocationCount=%zu, Size=%" PRIu64, memTypeIndex, allocationCount, size);
  11496. VmaAllocationCreateInfo finalCreateInfo = createInfo;
  11497. VkResult res = CalcMemTypeParams(
  11498. finalCreateInfo,
  11499. memTypeIndex,
  11500. size,
  11501. allocationCount);
  11502. if(res != VK_SUCCESS)
  11503. return res;
  11504. if((finalCreateInfo.flags & VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT) != 0)
  11505. {
  11506. return AllocateDedicatedMemory(
  11507. pool,
  11508. size,
  11509. suballocType,
  11510. dedicatedAllocations,
  11511. memTypeIndex,
  11512. (finalCreateInfo.flags & VMA_ALLOCATION_CREATE_MAPPED_BIT) != 0,
  11513. (finalCreateInfo.flags & VMA_ALLOCATION_CREATE_USER_DATA_COPY_STRING_BIT) != 0,
  11514. (finalCreateInfo.flags &
  11515. (VMA_ALLOCATION_CREATE_HOST_ACCESS_SEQUENTIAL_WRITE_BIT | VMA_ALLOCATION_CREATE_HOST_ACCESS_RANDOM_BIT)) != 0,
  11516. (finalCreateInfo.flags & VMA_ALLOCATION_CREATE_CAN_ALIAS_BIT) != 0,
  11517. finalCreateInfo.pUserData,
  11518. finalCreateInfo.priority,
  11519. dedicatedBuffer,
  11520. dedicatedImage,
  11521. dedicatedBufferImageUsage,
  11522. allocationCount,
  11523. pAllocations,
  11524. blockVector.GetAllocationNextPtr());
  11525. }
  11526. else
  11527. {
  11528. const bool canAllocateDedicated =
  11529. (finalCreateInfo.flags & VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT) == 0 &&
  11530. (pool == VK_NULL_HANDLE || !blockVector.HasExplicitBlockSize());
  11531. if(canAllocateDedicated)
  11532. {
  11533. // Heuristics: Allocate dedicated memory if requested size if greater than half of preferred block size.
  11534. if(size > blockVector.GetPreferredBlockSize() / 2)
  11535. {
  11536. dedicatedPreferred = true;
  11537. }
  11538. // Protection against creating each allocation as dedicated when we reach or exceed heap size/budget,
  11539. // which can quickly deplete maxMemoryAllocationCount: Don't prefer dedicated allocations when above
  11540. // 3/4 of the maximum allocation count.
  11541. if(m_PhysicalDeviceProperties.limits.maxMemoryAllocationCount < UINT32_MAX / 4 &&
  11542. m_DeviceMemoryCount.load() > m_PhysicalDeviceProperties.limits.maxMemoryAllocationCount * 3 / 4)
  11543. {
  11544. dedicatedPreferred = false;
  11545. }
  11546. if(dedicatedPreferred)
  11547. {
  11548. res = AllocateDedicatedMemory(
  11549. pool,
  11550. size,
  11551. suballocType,
  11552. dedicatedAllocations,
  11553. memTypeIndex,
  11554. (finalCreateInfo.flags & VMA_ALLOCATION_CREATE_MAPPED_BIT) != 0,
  11555. (finalCreateInfo.flags & VMA_ALLOCATION_CREATE_USER_DATA_COPY_STRING_BIT) != 0,
  11556. (finalCreateInfo.flags &
  11557. (VMA_ALLOCATION_CREATE_HOST_ACCESS_SEQUENTIAL_WRITE_BIT | VMA_ALLOCATION_CREATE_HOST_ACCESS_RANDOM_BIT)) != 0,
  11558. (finalCreateInfo.flags & VMA_ALLOCATION_CREATE_CAN_ALIAS_BIT) != 0,
  11559. finalCreateInfo.pUserData,
  11560. finalCreateInfo.priority,
  11561. dedicatedBuffer,
  11562. dedicatedImage,
  11563. dedicatedBufferImageUsage,
  11564. allocationCount,
  11565. pAllocations,
  11566. blockVector.GetAllocationNextPtr());
  11567. if(res == VK_SUCCESS)
  11568. {
  11569. // Succeeded: AllocateDedicatedMemory function already filled pMemory, nothing more to do here.
  11570. VMA_DEBUG_LOG(" Allocated as DedicatedMemory");
  11571. return VK_SUCCESS;
  11572. }
  11573. }
  11574. }
  11575. res = blockVector.Allocate(
  11576. size,
  11577. alignment,
  11578. finalCreateInfo,
  11579. suballocType,
  11580. allocationCount,
  11581. pAllocations);
  11582. if(res == VK_SUCCESS)
  11583. return VK_SUCCESS;
  11584. // Try dedicated memory.
  11585. if(canAllocateDedicated && !dedicatedPreferred)
  11586. {
  11587. res = AllocateDedicatedMemory(
  11588. pool,
  11589. size,
  11590. suballocType,
  11591. dedicatedAllocations,
  11592. memTypeIndex,
  11593. (finalCreateInfo.flags & VMA_ALLOCATION_CREATE_MAPPED_BIT) != 0,
  11594. (finalCreateInfo.flags & VMA_ALLOCATION_CREATE_USER_DATA_COPY_STRING_BIT) != 0,
  11595. (finalCreateInfo.flags &
  11596. (VMA_ALLOCATION_CREATE_HOST_ACCESS_SEQUENTIAL_WRITE_BIT | VMA_ALLOCATION_CREATE_HOST_ACCESS_RANDOM_BIT)) != 0,
  11597. (finalCreateInfo.flags & VMA_ALLOCATION_CREATE_CAN_ALIAS_BIT) != 0,
  11598. finalCreateInfo.pUserData,
  11599. finalCreateInfo.priority,
  11600. dedicatedBuffer,
  11601. dedicatedImage,
  11602. dedicatedBufferImageUsage,
  11603. allocationCount,
  11604. pAllocations,
  11605. blockVector.GetAllocationNextPtr());
  11606. if(res == VK_SUCCESS)
  11607. {
  11608. // Succeeded: AllocateDedicatedMemory function already filled pMemory, nothing more to do here.
  11609. VMA_DEBUG_LOG(" Allocated as DedicatedMemory");
  11610. return VK_SUCCESS;
  11611. }
  11612. }
  11613. // Everything failed: Return error code.
  11614. VMA_DEBUG_LOG(" vkAllocateMemory FAILED");
  11615. return res;
  11616. }
  11617. }
  11618. VkResult VmaAllocator_T::AllocateDedicatedMemory(
  11619. VmaPool pool,
  11620. VkDeviceSize size,
  11621. VmaSuballocationType suballocType,
  11622. VmaDedicatedAllocationList& dedicatedAllocations,
  11623. uint32_t memTypeIndex,
  11624. bool map,
  11625. bool isUserDataString,
  11626. bool isMappingAllowed,
  11627. bool canAliasMemory,
  11628. void* pUserData,
  11629. float priority,
  11630. VkBuffer dedicatedBuffer,
  11631. VkImage dedicatedImage,
  11632. VmaBufferImageUsage dedicatedBufferImageUsage,
  11633. size_t allocationCount,
  11634. VmaAllocation* pAllocations,
  11635. const void* pNextChain)
  11636. {
  11637. VMA_ASSERT(allocationCount > 0 && pAllocations);
  11638. VkMemoryAllocateInfo allocInfo = { VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO };
  11639. allocInfo.memoryTypeIndex = memTypeIndex;
  11640. allocInfo.allocationSize = size;
  11641. allocInfo.pNext = pNextChain;
  11642. #if VMA_DEDICATED_ALLOCATION || VMA_VULKAN_VERSION >= 1001000
  11643. VkMemoryDedicatedAllocateInfoKHR dedicatedAllocInfo = { VK_STRUCTURE_TYPE_MEMORY_DEDICATED_ALLOCATE_INFO_KHR };
  11644. if(!canAliasMemory)
  11645. {
  11646. if(m_UseKhrDedicatedAllocation || m_VulkanApiVersion >= VK_MAKE_VERSION(1, 1, 0))
  11647. {
  11648. if(dedicatedBuffer != VK_NULL_HANDLE)
  11649. {
  11650. VMA_ASSERT(dedicatedImage == VK_NULL_HANDLE);
  11651. dedicatedAllocInfo.buffer = dedicatedBuffer;
  11652. VmaPnextChainPushFront(&allocInfo, &dedicatedAllocInfo);
  11653. }
  11654. else if(dedicatedImage != VK_NULL_HANDLE)
  11655. {
  11656. dedicatedAllocInfo.image = dedicatedImage;
  11657. VmaPnextChainPushFront(&allocInfo, &dedicatedAllocInfo);
  11658. }
  11659. }
  11660. }
  11661. #endif // #if VMA_DEDICATED_ALLOCATION || VMA_VULKAN_VERSION >= 1001000
  11662. #if VMA_BUFFER_DEVICE_ADDRESS
  11663. VkMemoryAllocateFlagsInfoKHR allocFlagsInfo = { VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_FLAGS_INFO_KHR };
  11664. if(m_UseKhrBufferDeviceAddress)
  11665. {
  11666. bool canContainBufferWithDeviceAddress = true;
  11667. if(dedicatedBuffer != VK_NULL_HANDLE)
  11668. {
  11669. canContainBufferWithDeviceAddress = dedicatedBufferImageUsage == VmaBufferImageUsage::UNKNOWN ||
  11670. dedicatedBufferImageUsage.Contains(VK_BUFFER_USAGE_SHADER_DEVICE_ADDRESS_BIT_EXT);
  11671. }
  11672. else if(dedicatedImage != VK_NULL_HANDLE)
  11673. {
  11674. canContainBufferWithDeviceAddress = false;
  11675. }
  11676. if(canContainBufferWithDeviceAddress)
  11677. {
  11678. allocFlagsInfo.flags = VK_MEMORY_ALLOCATE_DEVICE_ADDRESS_BIT_KHR;
  11679. VmaPnextChainPushFront(&allocInfo, &allocFlagsInfo);
  11680. }
  11681. }
  11682. #endif // #if VMA_BUFFER_DEVICE_ADDRESS
  11683. #if VMA_MEMORY_PRIORITY
  11684. VkMemoryPriorityAllocateInfoEXT priorityInfo = { VK_STRUCTURE_TYPE_MEMORY_PRIORITY_ALLOCATE_INFO_EXT };
  11685. if(m_UseExtMemoryPriority)
  11686. {
  11687. VMA_ASSERT(priority >= 0.f && priority <= 1.f);
  11688. priorityInfo.priority = priority;
  11689. VmaPnextChainPushFront(&allocInfo, &priorityInfo);
  11690. }
  11691. #endif // #if VMA_MEMORY_PRIORITY
  11692. #if VMA_EXTERNAL_MEMORY
  11693. // Attach VkExportMemoryAllocateInfoKHR if necessary.
  11694. VkExportMemoryAllocateInfoKHR exportMemoryAllocInfo = { VK_STRUCTURE_TYPE_EXPORT_MEMORY_ALLOCATE_INFO_KHR };
  11695. exportMemoryAllocInfo.handleTypes = GetExternalMemoryHandleTypeFlags(memTypeIndex);
  11696. if(exportMemoryAllocInfo.handleTypes != 0)
  11697. {
  11698. VmaPnextChainPushFront(&allocInfo, &exportMemoryAllocInfo);
  11699. }
  11700. #endif // #if VMA_EXTERNAL_MEMORY
  11701. size_t allocIndex;
  11702. VkResult res = VK_SUCCESS;
  11703. for(allocIndex = 0; allocIndex < allocationCount; ++allocIndex)
  11704. {
  11705. res = AllocateDedicatedMemoryPage(
  11706. pool,
  11707. size,
  11708. suballocType,
  11709. memTypeIndex,
  11710. allocInfo,
  11711. map,
  11712. isUserDataString,
  11713. isMappingAllowed,
  11714. pUserData,
  11715. pAllocations + allocIndex);
  11716. if(res != VK_SUCCESS)
  11717. {
  11718. break;
  11719. }
  11720. }
  11721. if(res == VK_SUCCESS)
  11722. {
  11723. for (allocIndex = 0; allocIndex < allocationCount; ++allocIndex)
  11724. {
  11725. dedicatedAllocations.Register(pAllocations[allocIndex]);
  11726. }
  11727. VMA_DEBUG_LOG_FORMAT(" Allocated DedicatedMemory Count=%zu, MemoryTypeIndex=#%" PRIu32, allocationCount, memTypeIndex);
  11728. }
  11729. else
  11730. {
  11731. // Free all already created allocations.
  11732. while(allocIndex--)
  11733. {
  11734. VmaAllocation currAlloc = pAllocations[allocIndex];
  11735. VkDeviceMemory hMemory = currAlloc->GetMemory();
  11736. /*
  11737. There is no need to call this, because Vulkan spec allows to skip vkUnmapMemory
  11738. before vkFreeMemory.
  11739. if(currAlloc->GetMappedData() != VMA_NULL)
  11740. {
  11741. (*m_VulkanFunctions.vkUnmapMemory)(m_hDevice, hMemory);
  11742. }
  11743. */
  11744. FreeVulkanMemory(memTypeIndex, currAlloc->GetSize(), hMemory);
  11745. m_Budget.RemoveAllocation(MemoryTypeIndexToHeapIndex(memTypeIndex), currAlloc->GetSize());
  11746. m_AllocationObjectAllocator.Free(currAlloc);
  11747. }
  11748. memset(pAllocations, 0, sizeof(VmaAllocation) * allocationCount);
  11749. }
  11750. return res;
  11751. }
  11752. VkResult VmaAllocator_T::AllocateDedicatedMemoryPage(
  11753. VmaPool pool,
  11754. VkDeviceSize size,
  11755. VmaSuballocationType suballocType,
  11756. uint32_t memTypeIndex,
  11757. const VkMemoryAllocateInfo& allocInfo,
  11758. bool map,
  11759. bool isUserDataString,
  11760. bool isMappingAllowed,
  11761. void* pUserData,
  11762. VmaAllocation* pAllocation)
  11763. {
  11764. VkDeviceMemory hMemory = VK_NULL_HANDLE;
  11765. VkResult res = AllocateVulkanMemory(&allocInfo, &hMemory);
  11766. if(res < 0)
  11767. {
  11768. VMA_DEBUG_LOG(" vkAllocateMemory FAILED");
  11769. return res;
  11770. }
  11771. void* pMappedData = VMA_NULL;
  11772. if(map)
  11773. {
  11774. res = (*m_VulkanFunctions.vkMapMemory)(
  11775. m_hDevice,
  11776. hMemory,
  11777. 0,
  11778. VK_WHOLE_SIZE,
  11779. 0,
  11780. &pMappedData);
  11781. if(res < 0)
  11782. {
  11783. VMA_DEBUG_LOG(" vkMapMemory FAILED");
  11784. FreeVulkanMemory(memTypeIndex, size, hMemory);
  11785. return res;
  11786. }
  11787. }
  11788. *pAllocation = m_AllocationObjectAllocator.Allocate(isMappingAllowed);
  11789. (*pAllocation)->InitDedicatedAllocation(pool, memTypeIndex, hMemory, suballocType, pMappedData, size);
  11790. if (isUserDataString)
  11791. (*pAllocation)->SetName(this, (const char*)pUserData);
  11792. else
  11793. (*pAllocation)->SetUserData(this, pUserData);
  11794. m_Budget.AddAllocation(MemoryTypeIndexToHeapIndex(memTypeIndex), size);
  11795. if(VMA_DEBUG_INITIALIZE_ALLOCATIONS)
  11796. {
  11797. FillAllocation(*pAllocation, VMA_ALLOCATION_FILL_PATTERN_CREATED);
  11798. }
  11799. return VK_SUCCESS;
  11800. }
  11801. void VmaAllocator_T::GetBufferMemoryRequirements(
  11802. VkBuffer hBuffer,
  11803. VkMemoryRequirements& memReq,
  11804. bool& requiresDedicatedAllocation,
  11805. bool& prefersDedicatedAllocation) const
  11806. {
  11807. #if VMA_DEDICATED_ALLOCATION || VMA_VULKAN_VERSION >= 1001000
  11808. if(m_UseKhrDedicatedAllocation || m_VulkanApiVersion >= VK_MAKE_VERSION(1, 1, 0))
  11809. {
  11810. VkBufferMemoryRequirementsInfo2KHR memReqInfo = { VK_STRUCTURE_TYPE_BUFFER_MEMORY_REQUIREMENTS_INFO_2_KHR };
  11811. memReqInfo.buffer = hBuffer;
  11812. VkMemoryDedicatedRequirementsKHR memDedicatedReq = { VK_STRUCTURE_TYPE_MEMORY_DEDICATED_REQUIREMENTS_KHR };
  11813. VkMemoryRequirements2KHR memReq2 = { VK_STRUCTURE_TYPE_MEMORY_REQUIREMENTS_2_KHR };
  11814. VmaPnextChainPushFront(&memReq2, &memDedicatedReq);
  11815. (*m_VulkanFunctions.vkGetBufferMemoryRequirements2KHR)(m_hDevice, &memReqInfo, &memReq2);
  11816. memReq = memReq2.memoryRequirements;
  11817. requiresDedicatedAllocation = (memDedicatedReq.requiresDedicatedAllocation != VK_FALSE);
  11818. prefersDedicatedAllocation = (memDedicatedReq.prefersDedicatedAllocation != VK_FALSE);
  11819. }
  11820. else
  11821. #endif // #if VMA_DEDICATED_ALLOCATION || VMA_VULKAN_VERSION >= 1001000
  11822. {
  11823. (*m_VulkanFunctions.vkGetBufferMemoryRequirements)(m_hDevice, hBuffer, &memReq);
  11824. requiresDedicatedAllocation = false;
  11825. prefersDedicatedAllocation = false;
  11826. }
  11827. }
  11828. void VmaAllocator_T::GetImageMemoryRequirements(
  11829. VkImage hImage,
  11830. VkMemoryRequirements& memReq,
  11831. bool& requiresDedicatedAllocation,
  11832. bool& prefersDedicatedAllocation) const
  11833. {
  11834. #if VMA_DEDICATED_ALLOCATION || VMA_VULKAN_VERSION >= 1001000
  11835. if(m_UseKhrDedicatedAllocation || m_VulkanApiVersion >= VK_MAKE_VERSION(1, 1, 0))
  11836. {
  11837. VkImageMemoryRequirementsInfo2KHR memReqInfo = { VK_STRUCTURE_TYPE_IMAGE_MEMORY_REQUIREMENTS_INFO_2_KHR };
  11838. memReqInfo.image = hImage;
  11839. VkMemoryDedicatedRequirementsKHR memDedicatedReq = { VK_STRUCTURE_TYPE_MEMORY_DEDICATED_REQUIREMENTS_KHR };
  11840. VkMemoryRequirements2KHR memReq2 = { VK_STRUCTURE_TYPE_MEMORY_REQUIREMENTS_2_KHR };
  11841. VmaPnextChainPushFront(&memReq2, &memDedicatedReq);
  11842. (*m_VulkanFunctions.vkGetImageMemoryRequirements2KHR)(m_hDevice, &memReqInfo, &memReq2);
  11843. memReq = memReq2.memoryRequirements;
  11844. requiresDedicatedAllocation = (memDedicatedReq.requiresDedicatedAllocation != VK_FALSE);
  11845. prefersDedicatedAllocation = (memDedicatedReq.prefersDedicatedAllocation != VK_FALSE);
  11846. }
  11847. else
  11848. #endif // #if VMA_DEDICATED_ALLOCATION || VMA_VULKAN_VERSION >= 1001000
  11849. {
  11850. (*m_VulkanFunctions.vkGetImageMemoryRequirements)(m_hDevice, hImage, &memReq);
  11851. requiresDedicatedAllocation = false;
  11852. prefersDedicatedAllocation = false;
  11853. }
  11854. }
  11855. VkResult VmaAllocator_T::FindMemoryTypeIndex(
  11856. uint32_t memoryTypeBits,
  11857. const VmaAllocationCreateInfo* pAllocationCreateInfo,
  11858. VmaBufferImageUsage bufImgUsage,
  11859. uint32_t* pMemoryTypeIndex) const
  11860. {
  11861. memoryTypeBits &= GetGlobalMemoryTypeBits();
  11862. if(pAllocationCreateInfo->memoryTypeBits != 0)
  11863. {
  11864. memoryTypeBits &= pAllocationCreateInfo->memoryTypeBits;
  11865. }
  11866. VkMemoryPropertyFlags requiredFlags = 0, preferredFlags = 0, notPreferredFlags = 0;
  11867. if(!FindMemoryPreferences(
  11868. IsIntegratedGpu(),
  11869. *pAllocationCreateInfo,
  11870. bufImgUsage,
  11871. requiredFlags, preferredFlags, notPreferredFlags))
  11872. {
  11873. return VK_ERROR_FEATURE_NOT_PRESENT;
  11874. }
  11875. *pMemoryTypeIndex = UINT32_MAX;
  11876. uint32_t minCost = UINT32_MAX;
  11877. for(uint32_t memTypeIndex = 0, memTypeBit = 1;
  11878. memTypeIndex < GetMemoryTypeCount();
  11879. ++memTypeIndex, memTypeBit <<= 1)
  11880. {
  11881. // This memory type is acceptable according to memoryTypeBits bitmask.
  11882. if((memTypeBit & memoryTypeBits) != 0)
  11883. {
  11884. const VkMemoryPropertyFlags currFlags =
  11885. m_MemProps.memoryTypes[memTypeIndex].propertyFlags;
  11886. // This memory type contains requiredFlags.
  11887. if((requiredFlags & ~currFlags) == 0)
  11888. {
  11889. // Calculate cost as number of bits from preferredFlags not present in this memory type.
  11890. uint32_t currCost = VMA_COUNT_BITS_SET(preferredFlags & ~currFlags) +
  11891. VMA_COUNT_BITS_SET(currFlags & notPreferredFlags);
  11892. // Remember memory type with lowest cost.
  11893. if(currCost < minCost)
  11894. {
  11895. *pMemoryTypeIndex = memTypeIndex;
  11896. if(currCost == 0)
  11897. {
  11898. return VK_SUCCESS;
  11899. }
  11900. minCost = currCost;
  11901. }
  11902. }
  11903. }
  11904. }
  11905. return (*pMemoryTypeIndex != UINT32_MAX) ? VK_SUCCESS : VK_ERROR_FEATURE_NOT_PRESENT;
  11906. }
  11907. VkResult VmaAllocator_T::CalcMemTypeParams(
  11908. VmaAllocationCreateInfo& inoutCreateInfo,
  11909. uint32_t memTypeIndex,
  11910. VkDeviceSize size,
  11911. size_t allocationCount)
  11912. {
  11913. // If memory type is not HOST_VISIBLE, disable MAPPED.
  11914. if((inoutCreateInfo.flags & VMA_ALLOCATION_CREATE_MAPPED_BIT) != 0 &&
  11915. (m_MemProps.memoryTypes[memTypeIndex].propertyFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) == 0)
  11916. {
  11917. inoutCreateInfo.flags &= ~VMA_ALLOCATION_CREATE_MAPPED_BIT;
  11918. }
  11919. if((inoutCreateInfo.flags & VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT) != 0 &&
  11920. (inoutCreateInfo.flags & VMA_ALLOCATION_CREATE_WITHIN_BUDGET_BIT) != 0)
  11921. {
  11922. const uint32_t heapIndex = MemoryTypeIndexToHeapIndex(memTypeIndex);
  11923. VmaBudget heapBudget = {};
  11924. GetHeapBudgets(&heapBudget, heapIndex, 1);
  11925. if(heapBudget.usage + size * allocationCount > heapBudget.budget)
  11926. {
  11927. return VK_ERROR_OUT_OF_DEVICE_MEMORY;
  11928. }
  11929. }
  11930. return VK_SUCCESS;
  11931. }
  11932. VkResult VmaAllocator_T::CalcAllocationParams(
  11933. VmaAllocationCreateInfo& inoutCreateInfo,
  11934. bool dedicatedRequired,
  11935. bool dedicatedPreferred)
  11936. {
  11937. VMA_ASSERT((inoutCreateInfo.flags &
  11938. (VMA_ALLOCATION_CREATE_HOST_ACCESS_SEQUENTIAL_WRITE_BIT | VMA_ALLOCATION_CREATE_HOST_ACCESS_RANDOM_BIT)) !=
  11939. (VMA_ALLOCATION_CREATE_HOST_ACCESS_SEQUENTIAL_WRITE_BIT | VMA_ALLOCATION_CREATE_HOST_ACCESS_RANDOM_BIT) &&
  11940. "Specifying both flags VMA_ALLOCATION_CREATE_HOST_ACCESS_SEQUENTIAL_WRITE_BIT and VMA_ALLOCATION_CREATE_HOST_ACCESS_RANDOM_BIT is incorrect.");
  11941. VMA_ASSERT((((inoutCreateInfo.flags & VMA_ALLOCATION_CREATE_HOST_ACCESS_ALLOW_TRANSFER_INSTEAD_BIT) == 0 ||
  11942. (inoutCreateInfo.flags & (VMA_ALLOCATION_CREATE_HOST_ACCESS_SEQUENTIAL_WRITE_BIT | VMA_ALLOCATION_CREATE_HOST_ACCESS_RANDOM_BIT)) != 0)) &&
  11943. "Specifying VMA_ALLOCATION_CREATE_HOST_ACCESS_ALLOW_TRANSFER_INSTEAD_BIT requires also VMA_ALLOCATION_CREATE_HOST_ACCESS_SEQUENTIAL_WRITE_BIT or VMA_ALLOCATION_CREATE_HOST_ACCESS_RANDOM_BIT.");
  11944. if(inoutCreateInfo.usage == VMA_MEMORY_USAGE_AUTO || inoutCreateInfo.usage == VMA_MEMORY_USAGE_AUTO_PREFER_DEVICE || inoutCreateInfo.usage == VMA_MEMORY_USAGE_AUTO_PREFER_HOST)
  11945. {
  11946. if((inoutCreateInfo.flags & VMA_ALLOCATION_CREATE_MAPPED_BIT) != 0)
  11947. {
  11948. VMA_ASSERT((inoutCreateInfo.flags & (VMA_ALLOCATION_CREATE_HOST_ACCESS_SEQUENTIAL_WRITE_BIT | VMA_ALLOCATION_CREATE_HOST_ACCESS_RANDOM_BIT)) != 0 &&
  11949. "When using VMA_ALLOCATION_CREATE_MAPPED_BIT and usage = VMA_MEMORY_USAGE_AUTO*, you must also specify VMA_ALLOCATION_CREATE_HOST_ACCESS_SEQUENTIAL_WRITE_BIT or VMA_ALLOCATION_CREATE_HOST_ACCESS_RANDOM_BIT.");
  11950. }
  11951. }
  11952. // If memory is lazily allocated, it should be always dedicated.
  11953. if(dedicatedRequired ||
  11954. inoutCreateInfo.usage == VMA_MEMORY_USAGE_GPU_LAZILY_ALLOCATED)
  11955. {
  11956. inoutCreateInfo.flags |= VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT;
  11957. }
  11958. if(inoutCreateInfo.pool != VK_NULL_HANDLE)
  11959. {
  11960. if(inoutCreateInfo.pool->m_BlockVector.HasExplicitBlockSize() &&
  11961. (inoutCreateInfo.flags & VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT) != 0)
  11962. {
  11963. VMA_ASSERT(0 && "Specifying VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT while current custom pool doesn't support dedicated allocations.");
  11964. return VK_ERROR_FEATURE_NOT_PRESENT;
  11965. }
  11966. inoutCreateInfo.priority = inoutCreateInfo.pool->m_BlockVector.GetPriority();
  11967. }
  11968. if((inoutCreateInfo.flags & VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT) != 0 &&
  11969. (inoutCreateInfo.flags & VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT) != 0)
  11970. {
  11971. VMA_ASSERT(0 && "Specifying VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT together with VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT makes no sense.");
  11972. return VK_ERROR_FEATURE_NOT_PRESENT;
  11973. }
  11974. if(VMA_DEBUG_ALWAYS_DEDICATED_MEMORY &&
  11975. (inoutCreateInfo.flags & VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT) != 0)
  11976. {
  11977. inoutCreateInfo.flags |= VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT;
  11978. }
  11979. // Non-auto USAGE values imply HOST_ACCESS flags.
  11980. // And so does VMA_MEMORY_USAGE_UNKNOWN because it is used with custom pools.
  11981. // Which specific flag is used doesn't matter. They change things only when used with VMA_MEMORY_USAGE_AUTO*.
  11982. // Otherwise they just protect from assert on mapping.
  11983. if(inoutCreateInfo.usage != VMA_MEMORY_USAGE_AUTO &&
  11984. inoutCreateInfo.usage != VMA_MEMORY_USAGE_AUTO_PREFER_DEVICE &&
  11985. inoutCreateInfo.usage != VMA_MEMORY_USAGE_AUTO_PREFER_HOST)
  11986. {
  11987. if((inoutCreateInfo.flags & (VMA_ALLOCATION_CREATE_HOST_ACCESS_SEQUENTIAL_WRITE_BIT | VMA_ALLOCATION_CREATE_HOST_ACCESS_RANDOM_BIT)) == 0)
  11988. {
  11989. inoutCreateInfo.flags |= VMA_ALLOCATION_CREATE_HOST_ACCESS_RANDOM_BIT;
  11990. }
  11991. }
  11992. return VK_SUCCESS;
  11993. }
  11994. VkResult VmaAllocator_T::AllocateMemory(
  11995. const VkMemoryRequirements& vkMemReq,
  11996. bool requiresDedicatedAllocation,
  11997. bool prefersDedicatedAllocation,
  11998. VkBuffer dedicatedBuffer,
  11999. VkImage dedicatedImage,
  12000. VmaBufferImageUsage dedicatedBufferImageUsage,
  12001. const VmaAllocationCreateInfo& createInfo,
  12002. VmaSuballocationType suballocType,
  12003. size_t allocationCount,
  12004. VmaAllocation* pAllocations)
  12005. {
  12006. memset(pAllocations, 0, sizeof(VmaAllocation) * allocationCount);
  12007. VMA_ASSERT(VmaIsPow2(vkMemReq.alignment));
  12008. if(vkMemReq.size == 0)
  12009. {
  12010. return VK_ERROR_INITIALIZATION_FAILED;
  12011. }
  12012. VmaAllocationCreateInfo createInfoFinal = createInfo;
  12013. VkResult res = CalcAllocationParams(createInfoFinal, requiresDedicatedAllocation, prefersDedicatedAllocation);
  12014. if(res != VK_SUCCESS)
  12015. return res;
  12016. if(createInfoFinal.pool != VK_NULL_HANDLE)
  12017. {
  12018. VmaBlockVector& blockVector = createInfoFinal.pool->m_BlockVector;
  12019. return AllocateMemoryOfType(
  12020. createInfoFinal.pool,
  12021. vkMemReq.size,
  12022. vkMemReq.alignment,
  12023. prefersDedicatedAllocation,
  12024. dedicatedBuffer,
  12025. dedicatedImage,
  12026. dedicatedBufferImageUsage,
  12027. createInfoFinal,
  12028. blockVector.GetMemoryTypeIndex(),
  12029. suballocType,
  12030. createInfoFinal.pool->m_DedicatedAllocations,
  12031. blockVector,
  12032. allocationCount,
  12033. pAllocations);
  12034. }
  12035. else
  12036. {
  12037. // Bit mask of memory Vulkan types acceptable for this allocation.
  12038. uint32_t memoryTypeBits = vkMemReq.memoryTypeBits;
  12039. uint32_t memTypeIndex = UINT32_MAX;
  12040. res = FindMemoryTypeIndex(memoryTypeBits, &createInfoFinal, dedicatedBufferImageUsage, &memTypeIndex);
  12041. // Can't find any single memory type matching requirements. res is VK_ERROR_FEATURE_NOT_PRESENT.
  12042. if(res != VK_SUCCESS)
  12043. return res;
  12044. do
  12045. {
  12046. VmaBlockVector* blockVector = m_pBlockVectors[memTypeIndex];
  12047. VMA_ASSERT(blockVector && "Trying to use unsupported memory type!");
  12048. res = AllocateMemoryOfType(
  12049. VK_NULL_HANDLE,
  12050. vkMemReq.size,
  12051. vkMemReq.alignment,
  12052. requiresDedicatedAllocation || prefersDedicatedAllocation,
  12053. dedicatedBuffer,
  12054. dedicatedImage,
  12055. dedicatedBufferImageUsage,
  12056. createInfoFinal,
  12057. memTypeIndex,
  12058. suballocType,
  12059. m_DedicatedAllocations[memTypeIndex],
  12060. *blockVector,
  12061. allocationCount,
  12062. pAllocations);
  12063. // Allocation succeeded
  12064. if(res == VK_SUCCESS)
  12065. return VK_SUCCESS;
  12066. // Remove old memTypeIndex from list of possibilities.
  12067. memoryTypeBits &= ~(1u << memTypeIndex);
  12068. // Find alternative memTypeIndex.
  12069. res = FindMemoryTypeIndex(memoryTypeBits, &createInfoFinal, dedicatedBufferImageUsage, &memTypeIndex);
  12070. } while(res == VK_SUCCESS);
  12071. // No other matching memory type index could be found.
  12072. // Not returning res, which is VK_ERROR_FEATURE_NOT_PRESENT, because we already failed to allocate once.
  12073. return VK_ERROR_OUT_OF_DEVICE_MEMORY;
  12074. }
  12075. }
  12076. void VmaAllocator_T::FreeMemory(
  12077. size_t allocationCount,
  12078. const VmaAllocation* pAllocations)
  12079. {
  12080. VMA_ASSERT(pAllocations);
  12081. for(size_t allocIndex = allocationCount; allocIndex--; )
  12082. {
  12083. VmaAllocation allocation = pAllocations[allocIndex];
  12084. if(allocation != VK_NULL_HANDLE)
  12085. {
  12086. if(VMA_DEBUG_INITIALIZE_ALLOCATIONS)
  12087. {
  12088. FillAllocation(allocation, VMA_ALLOCATION_FILL_PATTERN_DESTROYED);
  12089. }
  12090. allocation->FreeName(this);
  12091. switch(allocation->GetType())
  12092. {
  12093. case VmaAllocation_T::ALLOCATION_TYPE_BLOCK:
  12094. {
  12095. VmaBlockVector* pBlockVector = VMA_NULL;
  12096. VmaPool hPool = allocation->GetParentPool();
  12097. if(hPool != VK_NULL_HANDLE)
  12098. {
  12099. pBlockVector = &hPool->m_BlockVector;
  12100. }
  12101. else
  12102. {
  12103. const uint32_t memTypeIndex = allocation->GetMemoryTypeIndex();
  12104. pBlockVector = m_pBlockVectors[memTypeIndex];
  12105. VMA_ASSERT(pBlockVector && "Trying to free memory of unsupported type!");
  12106. }
  12107. pBlockVector->Free(allocation);
  12108. }
  12109. break;
  12110. case VmaAllocation_T::ALLOCATION_TYPE_DEDICATED:
  12111. FreeDedicatedMemory(allocation);
  12112. break;
  12113. default:
  12114. VMA_ASSERT(0);
  12115. }
  12116. }
  12117. }
  12118. }
  12119. void VmaAllocator_T::CalculateStatistics(VmaTotalStatistics* pStats)
  12120. {
  12121. // Initialize.
  12122. VmaClearDetailedStatistics(pStats->total);
  12123. for(uint32_t i = 0; i < VK_MAX_MEMORY_TYPES; ++i)
  12124. VmaClearDetailedStatistics(pStats->memoryType[i]);
  12125. for(uint32_t i = 0; i < VK_MAX_MEMORY_HEAPS; ++i)
  12126. VmaClearDetailedStatistics(pStats->memoryHeap[i]);
  12127. // Process default pools.
  12128. for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex)
  12129. {
  12130. VmaBlockVector* const pBlockVector = m_pBlockVectors[memTypeIndex];
  12131. if (pBlockVector != VMA_NULL)
  12132. pBlockVector->AddDetailedStatistics(pStats->memoryType[memTypeIndex]);
  12133. }
  12134. // Process custom pools.
  12135. {
  12136. VmaMutexLockRead lock(m_PoolsMutex, m_UseMutex);
  12137. for(VmaPool pool = m_Pools.Front(); pool != VMA_NULL; pool = m_Pools.GetNext(pool))
  12138. {
  12139. VmaBlockVector& blockVector = pool->m_BlockVector;
  12140. const uint32_t memTypeIndex = blockVector.GetMemoryTypeIndex();
  12141. blockVector.AddDetailedStatistics(pStats->memoryType[memTypeIndex]);
  12142. pool->m_DedicatedAllocations.AddDetailedStatistics(pStats->memoryType[memTypeIndex]);
  12143. }
  12144. }
  12145. // Process dedicated allocations.
  12146. for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex)
  12147. {
  12148. m_DedicatedAllocations[memTypeIndex].AddDetailedStatistics(pStats->memoryType[memTypeIndex]);
  12149. }
  12150. // Sum from memory types to memory heaps.
  12151. for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex)
  12152. {
  12153. const uint32_t memHeapIndex = m_MemProps.memoryTypes[memTypeIndex].heapIndex;
  12154. VmaAddDetailedStatistics(pStats->memoryHeap[memHeapIndex], pStats->memoryType[memTypeIndex]);
  12155. }
  12156. // Sum from memory heaps to total.
  12157. for(uint32_t memHeapIndex = 0; memHeapIndex < GetMemoryHeapCount(); ++memHeapIndex)
  12158. VmaAddDetailedStatistics(pStats->total, pStats->memoryHeap[memHeapIndex]);
  12159. VMA_ASSERT(pStats->total.statistics.allocationCount == 0 ||
  12160. pStats->total.allocationSizeMax >= pStats->total.allocationSizeMin);
  12161. VMA_ASSERT(pStats->total.unusedRangeCount == 0 ||
  12162. pStats->total.unusedRangeSizeMax >= pStats->total.unusedRangeSizeMin);
  12163. }
  12164. void VmaAllocator_T::GetHeapBudgets(VmaBudget* outBudgets, uint32_t firstHeap, uint32_t heapCount)
  12165. {
  12166. #if VMA_MEMORY_BUDGET
  12167. if(m_UseExtMemoryBudget)
  12168. {
  12169. if(m_Budget.m_OperationsSinceBudgetFetch < 30)
  12170. {
  12171. VmaMutexLockRead lockRead(m_Budget.m_BudgetMutex, m_UseMutex);
  12172. for(uint32_t i = 0; i < heapCount; ++i, ++outBudgets)
  12173. {
  12174. const uint32_t heapIndex = firstHeap + i;
  12175. outBudgets->statistics.blockCount = m_Budget.m_BlockCount[heapIndex];
  12176. outBudgets->statistics.allocationCount = m_Budget.m_AllocationCount[heapIndex];
  12177. outBudgets->statistics.blockBytes = m_Budget.m_BlockBytes[heapIndex];
  12178. outBudgets->statistics.allocationBytes = m_Budget.m_AllocationBytes[heapIndex];
  12179. if(m_Budget.m_VulkanUsage[heapIndex] + outBudgets->statistics.blockBytes > m_Budget.m_BlockBytesAtBudgetFetch[heapIndex])
  12180. {
  12181. outBudgets->usage = m_Budget.m_VulkanUsage[heapIndex] +
  12182. outBudgets->statistics.blockBytes - m_Budget.m_BlockBytesAtBudgetFetch[heapIndex];
  12183. }
  12184. else
  12185. {
  12186. outBudgets->usage = 0;
  12187. }
  12188. // Have to take MIN with heap size because explicit HeapSizeLimit is included in it.
  12189. outBudgets->budget = VMA_MIN(
  12190. m_Budget.m_VulkanBudget[heapIndex], m_MemProps.memoryHeaps[heapIndex].size);
  12191. }
  12192. }
  12193. else
  12194. {
  12195. UpdateVulkanBudget(); // Outside of mutex lock
  12196. GetHeapBudgets(outBudgets, firstHeap, heapCount); // Recursion
  12197. }
  12198. }
  12199. else
  12200. #endif
  12201. {
  12202. for(uint32_t i = 0; i < heapCount; ++i, ++outBudgets)
  12203. {
  12204. const uint32_t heapIndex = firstHeap + i;
  12205. outBudgets->statistics.blockCount = m_Budget.m_BlockCount[heapIndex];
  12206. outBudgets->statistics.allocationCount = m_Budget.m_AllocationCount[heapIndex];
  12207. outBudgets->statistics.blockBytes = m_Budget.m_BlockBytes[heapIndex];
  12208. outBudgets->statistics.allocationBytes = m_Budget.m_AllocationBytes[heapIndex];
  12209. outBudgets->usage = outBudgets->statistics.blockBytes;
  12210. outBudgets->budget = m_MemProps.memoryHeaps[heapIndex].size * 8 / 10; // 80% heuristics.
  12211. }
  12212. }
  12213. }
  12214. void VmaAllocator_T::GetAllocationInfo(VmaAllocation hAllocation, VmaAllocationInfo* pAllocationInfo)
  12215. {
  12216. pAllocationInfo->memoryType = hAllocation->GetMemoryTypeIndex();
  12217. pAllocationInfo->deviceMemory = hAllocation->GetMemory();
  12218. pAllocationInfo->offset = hAllocation->GetOffset();
  12219. pAllocationInfo->size = hAllocation->GetSize();
  12220. pAllocationInfo->pMappedData = hAllocation->GetMappedData();
  12221. pAllocationInfo->pUserData = hAllocation->GetUserData();
  12222. pAllocationInfo->pName = hAllocation->GetName();
  12223. }
  12224. void VmaAllocator_T::GetAllocationInfo2(VmaAllocation hAllocation, VmaAllocationInfo2* pAllocationInfo)
  12225. {
  12226. GetAllocationInfo(hAllocation, &pAllocationInfo->allocationInfo);
  12227. switch (hAllocation->GetType())
  12228. {
  12229. case VmaAllocation_T::ALLOCATION_TYPE_BLOCK:
  12230. pAllocationInfo->blockSize = hAllocation->GetBlock()->m_pMetadata->GetSize();
  12231. pAllocationInfo->dedicatedMemory = VK_FALSE;
  12232. break;
  12233. case VmaAllocation_T::ALLOCATION_TYPE_DEDICATED:
  12234. pAllocationInfo->blockSize = pAllocationInfo->allocationInfo.size;
  12235. pAllocationInfo->dedicatedMemory = VK_TRUE;
  12236. break;
  12237. default:
  12238. VMA_ASSERT(0);
  12239. }
  12240. }
  12241. VkResult VmaAllocator_T::CreatePool(const VmaPoolCreateInfo* pCreateInfo, VmaPool* pPool)
  12242. {
  12243. VMA_DEBUG_LOG_FORMAT(" CreatePool: MemoryTypeIndex=%" PRIu32 ", flags=%" PRIu32, pCreateInfo->memoryTypeIndex, pCreateInfo->flags);
  12244. VmaPoolCreateInfo newCreateInfo = *pCreateInfo;
  12245. // Protection against uninitialized new structure member. If garbage data are left there, this pointer dereference would crash.
  12246. if(pCreateInfo->pMemoryAllocateNext)
  12247. {
  12248. VMA_ASSERT(((const VkBaseInStructure*)pCreateInfo->pMemoryAllocateNext)->sType != 0);
  12249. }
  12250. if(newCreateInfo.maxBlockCount == 0)
  12251. {
  12252. newCreateInfo.maxBlockCount = SIZE_MAX;
  12253. }
  12254. if(newCreateInfo.minBlockCount > newCreateInfo.maxBlockCount)
  12255. {
  12256. return VK_ERROR_INITIALIZATION_FAILED;
  12257. }
  12258. // Memory type index out of range or forbidden.
  12259. if(pCreateInfo->memoryTypeIndex >= GetMemoryTypeCount() ||
  12260. ((1u << pCreateInfo->memoryTypeIndex) & m_GlobalMemoryTypeBits) == 0)
  12261. {
  12262. return VK_ERROR_FEATURE_NOT_PRESENT;
  12263. }
  12264. if(newCreateInfo.minAllocationAlignment > 0)
  12265. {
  12266. VMA_ASSERT(VmaIsPow2(newCreateInfo.minAllocationAlignment));
  12267. }
  12268. const VkDeviceSize preferredBlockSize = CalcPreferredBlockSize(newCreateInfo.memoryTypeIndex);
  12269. *pPool = vma_new(this, VmaPool_T)(this, newCreateInfo, preferredBlockSize);
  12270. VkResult res = (*pPool)->m_BlockVector.CreateMinBlocks();
  12271. if(res != VK_SUCCESS)
  12272. {
  12273. vma_delete(this, *pPool);
  12274. *pPool = VMA_NULL;
  12275. return res;
  12276. }
  12277. // Add to m_Pools.
  12278. {
  12279. VmaMutexLockWrite lock(m_PoolsMutex, m_UseMutex);
  12280. (*pPool)->SetId(m_NextPoolId++);
  12281. m_Pools.PushBack(*pPool);
  12282. }
  12283. return VK_SUCCESS;
  12284. }
  12285. void VmaAllocator_T::DestroyPool(VmaPool pool)
  12286. {
  12287. // Remove from m_Pools.
  12288. {
  12289. VmaMutexLockWrite lock(m_PoolsMutex, m_UseMutex);
  12290. m_Pools.Remove(pool);
  12291. }
  12292. vma_delete(this, pool);
  12293. }
  12294. void VmaAllocator_T::GetPoolStatistics(VmaPool pool, VmaStatistics* pPoolStats)
  12295. {
  12296. VmaClearStatistics(*pPoolStats);
  12297. pool->m_BlockVector.AddStatistics(*pPoolStats);
  12298. pool->m_DedicatedAllocations.AddStatistics(*pPoolStats);
  12299. }
  12300. void VmaAllocator_T::CalculatePoolStatistics(VmaPool pool, VmaDetailedStatistics* pPoolStats)
  12301. {
  12302. VmaClearDetailedStatistics(*pPoolStats);
  12303. pool->m_BlockVector.AddDetailedStatistics(*pPoolStats);
  12304. pool->m_DedicatedAllocations.AddDetailedStatistics(*pPoolStats);
  12305. }
  12306. void VmaAllocator_T::SetCurrentFrameIndex(uint32_t frameIndex)
  12307. {
  12308. m_CurrentFrameIndex.store(frameIndex);
  12309. #if VMA_MEMORY_BUDGET
  12310. if(m_UseExtMemoryBudget)
  12311. {
  12312. UpdateVulkanBudget();
  12313. }
  12314. #endif // #if VMA_MEMORY_BUDGET
  12315. }
  12316. VkResult VmaAllocator_T::CheckPoolCorruption(VmaPool hPool)
  12317. {
  12318. return hPool->m_BlockVector.CheckCorruption();
  12319. }
  12320. VkResult VmaAllocator_T::CheckCorruption(uint32_t memoryTypeBits)
  12321. {
  12322. VkResult finalRes = VK_ERROR_FEATURE_NOT_PRESENT;
  12323. // Process default pools.
  12324. for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex)
  12325. {
  12326. VmaBlockVector* const pBlockVector = m_pBlockVectors[memTypeIndex];
  12327. if(pBlockVector != VMA_NULL)
  12328. {
  12329. VkResult localRes = pBlockVector->CheckCorruption();
  12330. switch(localRes)
  12331. {
  12332. case VK_ERROR_FEATURE_NOT_PRESENT:
  12333. break;
  12334. case VK_SUCCESS:
  12335. finalRes = VK_SUCCESS;
  12336. break;
  12337. default:
  12338. return localRes;
  12339. }
  12340. }
  12341. }
  12342. // Process custom pools.
  12343. {
  12344. VmaMutexLockRead lock(m_PoolsMutex, m_UseMutex);
  12345. for(VmaPool pool = m_Pools.Front(); pool != VMA_NULL; pool = m_Pools.GetNext(pool))
  12346. {
  12347. if(((1u << pool->m_BlockVector.GetMemoryTypeIndex()) & memoryTypeBits) != 0)
  12348. {
  12349. VkResult localRes = pool->m_BlockVector.CheckCorruption();
  12350. switch(localRes)
  12351. {
  12352. case VK_ERROR_FEATURE_NOT_PRESENT:
  12353. break;
  12354. case VK_SUCCESS:
  12355. finalRes = VK_SUCCESS;
  12356. break;
  12357. default:
  12358. return localRes;
  12359. }
  12360. }
  12361. }
  12362. }
  12363. return finalRes;
  12364. }
  12365. VkResult VmaAllocator_T::AllocateVulkanMemory(const VkMemoryAllocateInfo* pAllocateInfo, VkDeviceMemory* pMemory)
  12366. {
  12367. AtomicTransactionalIncrement<VMA_ATOMIC_UINT32> deviceMemoryCountIncrement;
  12368. const uint64_t prevDeviceMemoryCount = deviceMemoryCountIncrement.Increment(&m_DeviceMemoryCount);
  12369. #if VMA_DEBUG_DONT_EXCEED_MAX_MEMORY_ALLOCATION_COUNT
  12370. if(prevDeviceMemoryCount >= m_PhysicalDeviceProperties.limits.maxMemoryAllocationCount)
  12371. {
  12372. return VK_ERROR_TOO_MANY_OBJECTS;
  12373. }
  12374. #endif
  12375. const uint32_t heapIndex = MemoryTypeIndexToHeapIndex(pAllocateInfo->memoryTypeIndex);
  12376. // HeapSizeLimit is in effect for this heap.
  12377. if((m_HeapSizeLimitMask & (1u << heapIndex)) != 0)
  12378. {
  12379. const VkDeviceSize heapSize = m_MemProps.memoryHeaps[heapIndex].size;
  12380. VkDeviceSize blockBytes = m_Budget.m_BlockBytes[heapIndex];
  12381. for(;;)
  12382. {
  12383. const VkDeviceSize blockBytesAfterAllocation = blockBytes + pAllocateInfo->allocationSize;
  12384. if(blockBytesAfterAllocation > heapSize)
  12385. {
  12386. return VK_ERROR_OUT_OF_DEVICE_MEMORY;
  12387. }
  12388. if(m_Budget.m_BlockBytes[heapIndex].compare_exchange_strong(blockBytes, blockBytesAfterAllocation))
  12389. {
  12390. break;
  12391. }
  12392. }
  12393. }
  12394. else
  12395. {
  12396. m_Budget.m_BlockBytes[heapIndex] += pAllocateInfo->allocationSize;
  12397. }
  12398. ++m_Budget.m_BlockCount[heapIndex];
  12399. // VULKAN CALL vkAllocateMemory.
  12400. VkResult res = (*m_VulkanFunctions.vkAllocateMemory)(m_hDevice, pAllocateInfo, GetAllocationCallbacks(), pMemory);
  12401. if(res == VK_SUCCESS)
  12402. {
  12403. #if VMA_MEMORY_BUDGET
  12404. ++m_Budget.m_OperationsSinceBudgetFetch;
  12405. #endif
  12406. // Informative callback.
  12407. if(m_DeviceMemoryCallbacks.pfnAllocate != VMA_NULL)
  12408. {
  12409. (*m_DeviceMemoryCallbacks.pfnAllocate)(this, pAllocateInfo->memoryTypeIndex, *pMemory, pAllocateInfo->allocationSize, m_DeviceMemoryCallbacks.pUserData);
  12410. }
  12411. deviceMemoryCountIncrement.Commit();
  12412. }
  12413. else
  12414. {
  12415. --m_Budget.m_BlockCount[heapIndex];
  12416. m_Budget.m_BlockBytes[heapIndex] -= pAllocateInfo->allocationSize;
  12417. }
  12418. return res;
  12419. }
  12420. void VmaAllocator_T::FreeVulkanMemory(uint32_t memoryType, VkDeviceSize size, VkDeviceMemory hMemory)
  12421. {
  12422. // Informative callback.
  12423. if(m_DeviceMemoryCallbacks.pfnFree != VMA_NULL)
  12424. {
  12425. (*m_DeviceMemoryCallbacks.pfnFree)(this, memoryType, hMemory, size, m_DeviceMemoryCallbacks.pUserData);
  12426. }
  12427. // VULKAN CALL vkFreeMemory.
  12428. (*m_VulkanFunctions.vkFreeMemory)(m_hDevice, hMemory, GetAllocationCallbacks());
  12429. const uint32_t heapIndex = MemoryTypeIndexToHeapIndex(memoryType);
  12430. --m_Budget.m_BlockCount[heapIndex];
  12431. m_Budget.m_BlockBytes[heapIndex] -= size;
  12432. --m_DeviceMemoryCount;
  12433. }
  12434. VkResult VmaAllocator_T::BindVulkanBuffer(
  12435. VkDeviceMemory memory,
  12436. VkDeviceSize memoryOffset,
  12437. VkBuffer buffer,
  12438. const void* pNext)
  12439. {
  12440. if(pNext != VMA_NULL)
  12441. {
  12442. #if VMA_VULKAN_VERSION >= 1001000 || VMA_BIND_MEMORY2
  12443. if((m_UseKhrBindMemory2 || m_VulkanApiVersion >= VK_MAKE_VERSION(1, 1, 0)) &&
  12444. m_VulkanFunctions.vkBindBufferMemory2KHR != VMA_NULL)
  12445. {
  12446. VkBindBufferMemoryInfoKHR bindBufferMemoryInfo = { VK_STRUCTURE_TYPE_BIND_BUFFER_MEMORY_INFO_KHR };
  12447. bindBufferMemoryInfo.pNext = pNext;
  12448. bindBufferMemoryInfo.buffer = buffer;
  12449. bindBufferMemoryInfo.memory = memory;
  12450. bindBufferMemoryInfo.memoryOffset = memoryOffset;
  12451. return (*m_VulkanFunctions.vkBindBufferMemory2KHR)(m_hDevice, 1, &bindBufferMemoryInfo);
  12452. }
  12453. else
  12454. #endif // #if VMA_VULKAN_VERSION >= 1001000 || VMA_BIND_MEMORY2
  12455. {
  12456. return VK_ERROR_EXTENSION_NOT_PRESENT;
  12457. }
  12458. }
  12459. else
  12460. {
  12461. return (*m_VulkanFunctions.vkBindBufferMemory)(m_hDevice, buffer, memory, memoryOffset);
  12462. }
  12463. }
  12464. VkResult VmaAllocator_T::BindVulkanImage(
  12465. VkDeviceMemory memory,
  12466. VkDeviceSize memoryOffset,
  12467. VkImage image,
  12468. const void* pNext)
  12469. {
  12470. if(pNext != VMA_NULL)
  12471. {
  12472. #if VMA_VULKAN_VERSION >= 1001000 || VMA_BIND_MEMORY2
  12473. if((m_UseKhrBindMemory2 || m_VulkanApiVersion >= VK_MAKE_VERSION(1, 1, 0)) &&
  12474. m_VulkanFunctions.vkBindImageMemory2KHR != VMA_NULL)
  12475. {
  12476. VkBindImageMemoryInfoKHR bindBufferMemoryInfo = { VK_STRUCTURE_TYPE_BIND_IMAGE_MEMORY_INFO_KHR };
  12477. bindBufferMemoryInfo.pNext = pNext;
  12478. bindBufferMemoryInfo.image = image;
  12479. bindBufferMemoryInfo.memory = memory;
  12480. bindBufferMemoryInfo.memoryOffset = memoryOffset;
  12481. return (*m_VulkanFunctions.vkBindImageMemory2KHR)(m_hDevice, 1, &bindBufferMemoryInfo);
  12482. }
  12483. else
  12484. #endif // #if VMA_BIND_MEMORY2
  12485. {
  12486. return VK_ERROR_EXTENSION_NOT_PRESENT;
  12487. }
  12488. }
  12489. else
  12490. {
  12491. return (*m_VulkanFunctions.vkBindImageMemory)(m_hDevice, image, memory, memoryOffset);
  12492. }
  12493. }
  12494. VkResult VmaAllocator_T::Map(VmaAllocation hAllocation, void** ppData)
  12495. {
  12496. switch(hAllocation->GetType())
  12497. {
  12498. case VmaAllocation_T::ALLOCATION_TYPE_BLOCK:
  12499. {
  12500. VmaDeviceMemoryBlock* const pBlock = hAllocation->GetBlock();
  12501. char *pBytes = VMA_NULL;
  12502. VkResult res = pBlock->Map(this, 1, (void**)&pBytes);
  12503. if(res == VK_SUCCESS)
  12504. {
  12505. *ppData = pBytes + (ptrdiff_t)hAllocation->GetOffset();
  12506. hAllocation->BlockAllocMap();
  12507. }
  12508. return res;
  12509. }
  12510. VMA_FALLTHROUGH; // Fallthrough
  12511. case VmaAllocation_T::ALLOCATION_TYPE_DEDICATED:
  12512. return hAllocation->DedicatedAllocMap(this, ppData);
  12513. default:
  12514. VMA_ASSERT(0);
  12515. return VK_ERROR_MEMORY_MAP_FAILED;
  12516. }
  12517. }
  12518. void VmaAllocator_T::Unmap(VmaAllocation hAllocation)
  12519. {
  12520. switch(hAllocation->GetType())
  12521. {
  12522. case VmaAllocation_T::ALLOCATION_TYPE_BLOCK:
  12523. {
  12524. VmaDeviceMemoryBlock* const pBlock = hAllocation->GetBlock();
  12525. hAllocation->BlockAllocUnmap();
  12526. pBlock->Unmap(this, 1);
  12527. }
  12528. break;
  12529. case VmaAllocation_T::ALLOCATION_TYPE_DEDICATED:
  12530. hAllocation->DedicatedAllocUnmap(this);
  12531. break;
  12532. default:
  12533. VMA_ASSERT(0);
  12534. }
  12535. }
  12536. VkResult VmaAllocator_T::BindBufferMemory(
  12537. VmaAllocation hAllocation,
  12538. VkDeviceSize allocationLocalOffset,
  12539. VkBuffer hBuffer,
  12540. const void* pNext)
  12541. {
  12542. VkResult res = VK_ERROR_UNKNOWN_COPY;
  12543. switch(hAllocation->GetType())
  12544. {
  12545. case VmaAllocation_T::ALLOCATION_TYPE_DEDICATED:
  12546. res = BindVulkanBuffer(hAllocation->GetMemory(), allocationLocalOffset, hBuffer, pNext);
  12547. break;
  12548. case VmaAllocation_T::ALLOCATION_TYPE_BLOCK:
  12549. {
  12550. VmaDeviceMemoryBlock* const pBlock = hAllocation->GetBlock();
  12551. VMA_ASSERT(pBlock && "Binding buffer to allocation that doesn't belong to any block.");
  12552. res = pBlock->BindBufferMemory(this, hAllocation, allocationLocalOffset, hBuffer, pNext);
  12553. break;
  12554. }
  12555. default:
  12556. VMA_ASSERT(0);
  12557. }
  12558. return res;
  12559. }
  12560. VkResult VmaAllocator_T::BindImageMemory(
  12561. VmaAllocation hAllocation,
  12562. VkDeviceSize allocationLocalOffset,
  12563. VkImage hImage,
  12564. const void* pNext)
  12565. {
  12566. VkResult res = VK_ERROR_UNKNOWN_COPY;
  12567. switch(hAllocation->GetType())
  12568. {
  12569. case VmaAllocation_T::ALLOCATION_TYPE_DEDICATED:
  12570. res = BindVulkanImage(hAllocation->GetMemory(), allocationLocalOffset, hImage, pNext);
  12571. break;
  12572. case VmaAllocation_T::ALLOCATION_TYPE_BLOCK:
  12573. {
  12574. VmaDeviceMemoryBlock* pBlock = hAllocation->GetBlock();
  12575. VMA_ASSERT(pBlock && "Binding image to allocation that doesn't belong to any block.");
  12576. res = pBlock->BindImageMemory(this, hAllocation, allocationLocalOffset, hImage, pNext);
  12577. break;
  12578. }
  12579. default:
  12580. VMA_ASSERT(0);
  12581. }
  12582. return res;
  12583. }
  12584. VkResult VmaAllocator_T::FlushOrInvalidateAllocation(
  12585. VmaAllocation hAllocation,
  12586. VkDeviceSize offset, VkDeviceSize size,
  12587. VMA_CACHE_OPERATION op)
  12588. {
  12589. VkResult res = VK_SUCCESS;
  12590. VkMappedMemoryRange memRange = {};
  12591. if(GetFlushOrInvalidateRange(hAllocation, offset, size, memRange))
  12592. {
  12593. switch(op)
  12594. {
  12595. case VMA_CACHE_FLUSH:
  12596. res = (*GetVulkanFunctions().vkFlushMappedMemoryRanges)(m_hDevice, 1, &memRange);
  12597. break;
  12598. case VMA_CACHE_INVALIDATE:
  12599. res = (*GetVulkanFunctions().vkInvalidateMappedMemoryRanges)(m_hDevice, 1, &memRange);
  12600. break;
  12601. default:
  12602. VMA_ASSERT(0);
  12603. }
  12604. }
  12605. // else: Just ignore this call.
  12606. return res;
  12607. }
  12608. VkResult VmaAllocator_T::FlushOrInvalidateAllocations(
  12609. uint32_t allocationCount,
  12610. const VmaAllocation* allocations,
  12611. const VkDeviceSize* offsets, const VkDeviceSize* sizes,
  12612. VMA_CACHE_OPERATION op)
  12613. {
  12614. typedef VmaStlAllocator<VkMappedMemoryRange> RangeAllocator;
  12615. typedef VmaSmallVector<VkMappedMemoryRange, RangeAllocator, 16> RangeVector;
  12616. RangeVector ranges = RangeVector(RangeAllocator(GetAllocationCallbacks()));
  12617. for(uint32_t allocIndex = 0; allocIndex < allocationCount; ++allocIndex)
  12618. {
  12619. const VmaAllocation alloc = allocations[allocIndex];
  12620. const VkDeviceSize offset = offsets != VMA_NULL ? offsets[allocIndex] : 0;
  12621. const VkDeviceSize size = sizes != VMA_NULL ? sizes[allocIndex] : VK_WHOLE_SIZE;
  12622. VkMappedMemoryRange newRange;
  12623. if(GetFlushOrInvalidateRange(alloc, offset, size, newRange))
  12624. {
  12625. ranges.push_back(newRange);
  12626. }
  12627. }
  12628. VkResult res = VK_SUCCESS;
  12629. if(!ranges.empty())
  12630. {
  12631. switch(op)
  12632. {
  12633. case VMA_CACHE_FLUSH:
  12634. res = (*GetVulkanFunctions().vkFlushMappedMemoryRanges)(m_hDevice, (uint32_t)ranges.size(), ranges.data());
  12635. break;
  12636. case VMA_CACHE_INVALIDATE:
  12637. res = (*GetVulkanFunctions().vkInvalidateMappedMemoryRanges)(m_hDevice, (uint32_t)ranges.size(), ranges.data());
  12638. break;
  12639. default:
  12640. VMA_ASSERT(0);
  12641. }
  12642. }
  12643. // else: Just ignore this call.
  12644. return res;
  12645. }
  12646. VkResult VmaAllocator_T::CopyMemoryToAllocation(
  12647. const void* pSrcHostPointer,
  12648. VmaAllocation dstAllocation,
  12649. VkDeviceSize dstAllocationLocalOffset,
  12650. VkDeviceSize size)
  12651. {
  12652. void* dstMappedData = VMA_NULL;
  12653. VkResult res = Map(dstAllocation, &dstMappedData);
  12654. if(res == VK_SUCCESS)
  12655. {
  12656. memcpy((char*)dstMappedData + dstAllocationLocalOffset, pSrcHostPointer, (size_t)size);
  12657. Unmap(dstAllocation);
  12658. res = FlushOrInvalidateAllocation(dstAllocation, dstAllocationLocalOffset, size, VMA_CACHE_FLUSH);
  12659. }
  12660. return res;
  12661. }
  12662. VkResult VmaAllocator_T::CopyAllocationToMemory(
  12663. VmaAllocation srcAllocation,
  12664. VkDeviceSize srcAllocationLocalOffset,
  12665. void* pDstHostPointer,
  12666. VkDeviceSize size)
  12667. {
  12668. void* srcMappedData = VMA_NULL;
  12669. VkResult res = Map(srcAllocation, &srcMappedData);
  12670. if(res == VK_SUCCESS)
  12671. {
  12672. res = FlushOrInvalidateAllocation(srcAllocation, srcAllocationLocalOffset, size, VMA_CACHE_INVALIDATE);
  12673. if(res == VK_SUCCESS)
  12674. {
  12675. memcpy(pDstHostPointer, (const char*)srcMappedData + srcAllocationLocalOffset, (size_t)size);
  12676. Unmap(srcAllocation);
  12677. }
  12678. }
  12679. return res;
  12680. }
  12681. void VmaAllocator_T::FreeDedicatedMemory(const VmaAllocation allocation)
  12682. {
  12683. VMA_ASSERT(allocation && allocation->GetType() == VmaAllocation_T::ALLOCATION_TYPE_DEDICATED);
  12684. const uint32_t memTypeIndex = allocation->GetMemoryTypeIndex();
  12685. VmaPool parentPool = allocation->GetParentPool();
  12686. if(parentPool == VK_NULL_HANDLE)
  12687. {
  12688. // Default pool
  12689. m_DedicatedAllocations[memTypeIndex].Unregister(allocation);
  12690. }
  12691. else
  12692. {
  12693. // Custom pool
  12694. parentPool->m_DedicatedAllocations.Unregister(allocation);
  12695. }
  12696. VkDeviceMemory hMemory = allocation->GetMemory();
  12697. /*
  12698. There is no need to call this, because Vulkan spec allows to skip vkUnmapMemory
  12699. before vkFreeMemory.
  12700. if(allocation->GetMappedData() != VMA_NULL)
  12701. {
  12702. (*m_VulkanFunctions.vkUnmapMemory)(m_hDevice, hMemory);
  12703. }
  12704. */
  12705. FreeVulkanMemory(memTypeIndex, allocation->GetSize(), hMemory);
  12706. m_Budget.RemoveAllocation(MemoryTypeIndexToHeapIndex(allocation->GetMemoryTypeIndex()), allocation->GetSize());
  12707. m_AllocationObjectAllocator.Free(allocation);
  12708. VMA_DEBUG_LOG_FORMAT(" Freed DedicatedMemory MemoryTypeIndex=%" PRIu32, memTypeIndex);
  12709. }
  12710. uint32_t VmaAllocator_T::CalculateGpuDefragmentationMemoryTypeBits() const
  12711. {
  12712. VkBufferCreateInfo dummyBufCreateInfo;
  12713. VmaFillGpuDefragmentationBufferCreateInfo(dummyBufCreateInfo);
  12714. uint32_t memoryTypeBits = 0;
  12715. // Create buffer.
  12716. VkBuffer buf = VK_NULL_HANDLE;
  12717. VkResult res = (*GetVulkanFunctions().vkCreateBuffer)(
  12718. m_hDevice, &dummyBufCreateInfo, GetAllocationCallbacks(), &buf);
  12719. if(res == VK_SUCCESS)
  12720. {
  12721. // Query for supported memory types.
  12722. VkMemoryRequirements memReq;
  12723. (*GetVulkanFunctions().vkGetBufferMemoryRequirements)(m_hDevice, buf, &memReq);
  12724. memoryTypeBits = memReq.memoryTypeBits;
  12725. // Destroy buffer.
  12726. (*GetVulkanFunctions().vkDestroyBuffer)(m_hDevice, buf, GetAllocationCallbacks());
  12727. }
  12728. return memoryTypeBits;
  12729. }
  12730. uint32_t VmaAllocator_T::CalculateGlobalMemoryTypeBits() const
  12731. {
  12732. // Make sure memory information is already fetched.
  12733. VMA_ASSERT(GetMemoryTypeCount() > 0);
  12734. uint32_t memoryTypeBits = UINT32_MAX;
  12735. if(!m_UseAmdDeviceCoherentMemory)
  12736. {
  12737. // Exclude memory types that have VK_MEMORY_PROPERTY_DEVICE_COHERENT_BIT_AMD.
  12738. for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex)
  12739. {
  12740. if((m_MemProps.memoryTypes[memTypeIndex].propertyFlags & VK_MEMORY_PROPERTY_DEVICE_COHERENT_BIT_AMD_COPY) != 0)
  12741. {
  12742. memoryTypeBits &= ~(1u << memTypeIndex);
  12743. }
  12744. }
  12745. }
  12746. return memoryTypeBits;
  12747. }
  12748. bool VmaAllocator_T::GetFlushOrInvalidateRange(
  12749. VmaAllocation allocation,
  12750. VkDeviceSize offset, VkDeviceSize size,
  12751. VkMappedMemoryRange& outRange) const
  12752. {
  12753. const uint32_t memTypeIndex = allocation->GetMemoryTypeIndex();
  12754. if(size > 0 && IsMemoryTypeNonCoherent(memTypeIndex))
  12755. {
  12756. const VkDeviceSize nonCoherentAtomSize = m_PhysicalDeviceProperties.limits.nonCoherentAtomSize;
  12757. const VkDeviceSize allocationSize = allocation->GetSize();
  12758. VMA_ASSERT(offset <= allocationSize);
  12759. outRange.sType = VK_STRUCTURE_TYPE_MAPPED_MEMORY_RANGE;
  12760. outRange.pNext = VMA_NULL;
  12761. outRange.memory = allocation->GetMemory();
  12762. switch(allocation->GetType())
  12763. {
  12764. case VmaAllocation_T::ALLOCATION_TYPE_DEDICATED:
  12765. outRange.offset = VmaAlignDown(offset, nonCoherentAtomSize);
  12766. if(size == VK_WHOLE_SIZE)
  12767. {
  12768. outRange.size = allocationSize - outRange.offset;
  12769. }
  12770. else
  12771. {
  12772. VMA_ASSERT(offset + size <= allocationSize);
  12773. outRange.size = VMA_MIN(
  12774. VmaAlignUp(size + (offset - outRange.offset), nonCoherentAtomSize),
  12775. allocationSize - outRange.offset);
  12776. }
  12777. break;
  12778. case VmaAllocation_T::ALLOCATION_TYPE_BLOCK:
  12779. {
  12780. // 1. Still within this allocation.
  12781. outRange.offset = VmaAlignDown(offset, nonCoherentAtomSize);
  12782. if(size == VK_WHOLE_SIZE)
  12783. {
  12784. size = allocationSize - offset;
  12785. }
  12786. else
  12787. {
  12788. VMA_ASSERT(offset + size <= allocationSize);
  12789. }
  12790. outRange.size = VmaAlignUp(size + (offset - outRange.offset), nonCoherentAtomSize);
  12791. // 2. Adjust to whole block.
  12792. const VkDeviceSize allocationOffset = allocation->GetOffset();
  12793. VMA_ASSERT(allocationOffset % nonCoherentAtomSize == 0);
  12794. const VkDeviceSize blockSize = allocation->GetBlock()->m_pMetadata->GetSize();
  12795. outRange.offset += allocationOffset;
  12796. outRange.size = VMA_MIN(outRange.size, blockSize - outRange.offset);
  12797. break;
  12798. }
  12799. default:
  12800. VMA_ASSERT(0);
  12801. }
  12802. return true;
  12803. }
  12804. return false;
  12805. }
  12806. #if VMA_MEMORY_BUDGET
  12807. void VmaAllocator_T::UpdateVulkanBudget()
  12808. {
  12809. VMA_ASSERT(m_UseExtMemoryBudget);
  12810. VkPhysicalDeviceMemoryProperties2KHR memProps = { VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_MEMORY_PROPERTIES_2_KHR };
  12811. VkPhysicalDeviceMemoryBudgetPropertiesEXT budgetProps = { VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_MEMORY_BUDGET_PROPERTIES_EXT };
  12812. VmaPnextChainPushFront(&memProps, &budgetProps);
  12813. GetVulkanFunctions().vkGetPhysicalDeviceMemoryProperties2KHR(m_PhysicalDevice, &memProps);
  12814. {
  12815. VmaMutexLockWrite lockWrite(m_Budget.m_BudgetMutex, m_UseMutex);
  12816. for(uint32_t heapIndex = 0; heapIndex < GetMemoryHeapCount(); ++heapIndex)
  12817. {
  12818. m_Budget.m_VulkanUsage[heapIndex] = budgetProps.heapUsage[heapIndex];
  12819. m_Budget.m_VulkanBudget[heapIndex] = budgetProps.heapBudget[heapIndex];
  12820. m_Budget.m_BlockBytesAtBudgetFetch[heapIndex] = m_Budget.m_BlockBytes[heapIndex].load();
  12821. // Some bugged drivers return the budget incorrectly, e.g. 0 or much bigger than heap size.
  12822. if(m_Budget.m_VulkanBudget[heapIndex] == 0)
  12823. {
  12824. m_Budget.m_VulkanBudget[heapIndex] = m_MemProps.memoryHeaps[heapIndex].size * 8 / 10; // 80% heuristics.
  12825. }
  12826. else if(m_Budget.m_VulkanBudget[heapIndex] > m_MemProps.memoryHeaps[heapIndex].size)
  12827. {
  12828. m_Budget.m_VulkanBudget[heapIndex] = m_MemProps.memoryHeaps[heapIndex].size;
  12829. }
  12830. if(m_Budget.m_VulkanUsage[heapIndex] == 0 && m_Budget.m_BlockBytesAtBudgetFetch[heapIndex] > 0)
  12831. {
  12832. m_Budget.m_VulkanUsage[heapIndex] = m_Budget.m_BlockBytesAtBudgetFetch[heapIndex];
  12833. }
  12834. }
  12835. m_Budget.m_OperationsSinceBudgetFetch = 0;
  12836. }
  12837. }
  12838. #endif // VMA_MEMORY_BUDGET
  12839. void VmaAllocator_T::FillAllocation(const VmaAllocation hAllocation, uint8_t pattern)
  12840. {
  12841. if(VMA_DEBUG_INITIALIZE_ALLOCATIONS &&
  12842. hAllocation->IsMappingAllowed() &&
  12843. (m_MemProps.memoryTypes[hAllocation->GetMemoryTypeIndex()].propertyFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) != 0)
  12844. {
  12845. void* pData = VMA_NULL;
  12846. VkResult res = Map(hAllocation, &pData);
  12847. if(res == VK_SUCCESS)
  12848. {
  12849. memset(pData, (int)pattern, (size_t)hAllocation->GetSize());
  12850. FlushOrInvalidateAllocation(hAllocation, 0, VK_WHOLE_SIZE, VMA_CACHE_FLUSH);
  12851. Unmap(hAllocation);
  12852. }
  12853. else
  12854. {
  12855. VMA_ASSERT(0 && "VMA_DEBUG_INITIALIZE_ALLOCATIONS is enabled, but couldn't map memory to fill allocation.");
  12856. }
  12857. }
  12858. }
  12859. uint32_t VmaAllocator_T::GetGpuDefragmentationMemoryTypeBits()
  12860. {
  12861. uint32_t memoryTypeBits = m_GpuDefragmentationMemoryTypeBits.load();
  12862. if(memoryTypeBits == UINT32_MAX)
  12863. {
  12864. memoryTypeBits = CalculateGpuDefragmentationMemoryTypeBits();
  12865. m_GpuDefragmentationMemoryTypeBits.store(memoryTypeBits);
  12866. }
  12867. return memoryTypeBits;
  12868. }
  12869. #if VMA_STATS_STRING_ENABLED
  12870. void VmaAllocator_T::PrintDetailedMap(VmaJsonWriter& json)
  12871. {
  12872. json.WriteString("DefaultPools");
  12873. json.BeginObject();
  12874. {
  12875. for (uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex)
  12876. {
  12877. VmaBlockVector* pBlockVector = m_pBlockVectors[memTypeIndex];
  12878. VmaDedicatedAllocationList& dedicatedAllocList = m_DedicatedAllocations[memTypeIndex];
  12879. if (pBlockVector != VMA_NULL)
  12880. {
  12881. json.BeginString("Type ");
  12882. json.ContinueString(memTypeIndex);
  12883. json.EndString();
  12884. json.BeginObject();
  12885. {
  12886. json.WriteString("PreferredBlockSize");
  12887. json.WriteNumber(pBlockVector->GetPreferredBlockSize());
  12888. json.WriteString("Blocks");
  12889. pBlockVector->PrintDetailedMap(json);
  12890. json.WriteString("DedicatedAllocations");
  12891. dedicatedAllocList.BuildStatsString(json);
  12892. }
  12893. json.EndObject();
  12894. }
  12895. }
  12896. }
  12897. json.EndObject();
  12898. json.WriteString("CustomPools");
  12899. json.BeginObject();
  12900. {
  12901. VmaMutexLockRead lock(m_PoolsMutex, m_UseMutex);
  12902. if (!m_Pools.IsEmpty())
  12903. {
  12904. for (uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex)
  12905. {
  12906. bool displayType = true;
  12907. size_t index = 0;
  12908. for (VmaPool pool = m_Pools.Front(); pool != VMA_NULL; pool = m_Pools.GetNext(pool))
  12909. {
  12910. VmaBlockVector& blockVector = pool->m_BlockVector;
  12911. if (blockVector.GetMemoryTypeIndex() == memTypeIndex)
  12912. {
  12913. if (displayType)
  12914. {
  12915. json.BeginString("Type ");
  12916. json.ContinueString(memTypeIndex);
  12917. json.EndString();
  12918. json.BeginArray();
  12919. displayType = false;
  12920. }
  12921. json.BeginObject();
  12922. {
  12923. json.WriteString("Name");
  12924. json.BeginString();
  12925. json.ContinueString((uint64_t)index++);
  12926. if (pool->GetName())
  12927. {
  12928. json.ContinueString(" - ");
  12929. json.ContinueString(pool->GetName());
  12930. }
  12931. json.EndString();
  12932. json.WriteString("PreferredBlockSize");
  12933. json.WriteNumber(blockVector.GetPreferredBlockSize());
  12934. json.WriteString("Blocks");
  12935. blockVector.PrintDetailedMap(json);
  12936. json.WriteString("DedicatedAllocations");
  12937. pool->m_DedicatedAllocations.BuildStatsString(json);
  12938. }
  12939. json.EndObject();
  12940. }
  12941. }
  12942. if (!displayType)
  12943. json.EndArray();
  12944. }
  12945. }
  12946. }
  12947. json.EndObject();
  12948. }
  12949. #endif // VMA_STATS_STRING_ENABLED
  12950. #endif // _VMA_ALLOCATOR_T_FUNCTIONS
  12951. #ifndef _VMA_PUBLIC_INTERFACE
  12952. VMA_CALL_PRE VkResult VMA_CALL_POST vmaCreateAllocator(
  12953. const VmaAllocatorCreateInfo* pCreateInfo,
  12954. VmaAllocator* pAllocator)
  12955. {
  12956. VMA_ASSERT(pCreateInfo && pAllocator);
  12957. VMA_ASSERT(pCreateInfo->vulkanApiVersion == 0 ||
  12958. (VK_VERSION_MAJOR(pCreateInfo->vulkanApiVersion) == 1 && VK_VERSION_MINOR(pCreateInfo->vulkanApiVersion) <= 3));
  12959. VMA_DEBUG_LOG("vmaCreateAllocator");
  12960. *pAllocator = vma_new(pCreateInfo->pAllocationCallbacks, VmaAllocator_T)(pCreateInfo);
  12961. VkResult result = (*pAllocator)->Init(pCreateInfo);
  12962. if(result < 0)
  12963. {
  12964. vma_delete(pCreateInfo->pAllocationCallbacks, *pAllocator);
  12965. *pAllocator = VK_NULL_HANDLE;
  12966. }
  12967. return result;
  12968. }
  12969. VMA_CALL_PRE void VMA_CALL_POST vmaDestroyAllocator(
  12970. VmaAllocator allocator)
  12971. {
  12972. if(allocator != VK_NULL_HANDLE)
  12973. {
  12974. VMA_DEBUG_LOG("vmaDestroyAllocator");
  12975. VkAllocationCallbacks allocationCallbacks = allocator->m_AllocationCallbacks; // Have to copy the callbacks when destroying.
  12976. vma_delete(&allocationCallbacks, allocator);
  12977. }
  12978. }
  12979. VMA_CALL_PRE void VMA_CALL_POST vmaGetAllocatorInfo(VmaAllocator allocator, VmaAllocatorInfo* pAllocatorInfo)
  12980. {
  12981. VMA_ASSERT(allocator && pAllocatorInfo);
  12982. pAllocatorInfo->instance = allocator->m_hInstance;
  12983. pAllocatorInfo->physicalDevice = allocator->GetPhysicalDevice();
  12984. pAllocatorInfo->device = allocator->m_hDevice;
  12985. }
  12986. VMA_CALL_PRE void VMA_CALL_POST vmaGetPhysicalDeviceProperties(
  12987. VmaAllocator allocator,
  12988. const VkPhysicalDeviceProperties **ppPhysicalDeviceProperties)
  12989. {
  12990. VMA_ASSERT(allocator && ppPhysicalDeviceProperties);
  12991. *ppPhysicalDeviceProperties = &allocator->m_PhysicalDeviceProperties;
  12992. }
  12993. VMA_CALL_PRE void VMA_CALL_POST vmaGetMemoryProperties(
  12994. VmaAllocator allocator,
  12995. const VkPhysicalDeviceMemoryProperties** ppPhysicalDeviceMemoryProperties)
  12996. {
  12997. VMA_ASSERT(allocator && ppPhysicalDeviceMemoryProperties);
  12998. *ppPhysicalDeviceMemoryProperties = &allocator->m_MemProps;
  12999. }
  13000. VMA_CALL_PRE void VMA_CALL_POST vmaGetMemoryTypeProperties(
  13001. VmaAllocator allocator,
  13002. uint32_t memoryTypeIndex,
  13003. VkMemoryPropertyFlags* pFlags)
  13004. {
  13005. VMA_ASSERT(allocator && pFlags);
  13006. VMA_ASSERT(memoryTypeIndex < allocator->GetMemoryTypeCount());
  13007. *pFlags = allocator->m_MemProps.memoryTypes[memoryTypeIndex].propertyFlags;
  13008. }
  13009. VMA_CALL_PRE void VMA_CALL_POST vmaSetCurrentFrameIndex(
  13010. VmaAllocator allocator,
  13011. uint32_t frameIndex)
  13012. {
  13013. VMA_ASSERT(allocator);
  13014. VMA_DEBUG_GLOBAL_MUTEX_LOCK
  13015. allocator->SetCurrentFrameIndex(frameIndex);
  13016. }
  13017. VMA_CALL_PRE void VMA_CALL_POST vmaCalculateStatistics(
  13018. VmaAllocator allocator,
  13019. VmaTotalStatistics* pStats)
  13020. {
  13021. VMA_ASSERT(allocator && pStats);
  13022. VMA_DEBUG_GLOBAL_MUTEX_LOCK
  13023. allocator->CalculateStatistics(pStats);
  13024. }
  13025. VMA_CALL_PRE uint64_t VMA_CALL_POST vmaCalculateLazilyAllocatedBytes(
  13026. VmaAllocator allocator)
  13027. {
  13028. VMA_ASSERT(allocator);
  13029. VMA_DEBUG_GLOBAL_MUTEX_LOCK
  13030. VmaTotalStatistics stats;
  13031. allocator->CalculateStatistics(&stats);
  13032. uint64_t total_lazilily_allocated_bytes = 0;
  13033. for (uint32_t heapIndex = 0; heapIndex < allocator->GetMemoryHeapCount(); ++heapIndex) {
  13034. for (uint32_t typeIndex = 0; typeIndex < allocator->GetMemoryTypeCount(); ++typeIndex) {
  13035. if (allocator->MemoryTypeIndexToHeapIndex(typeIndex) == heapIndex) {
  13036. VkMemoryPropertyFlags flags = allocator->m_MemProps.memoryTypes[typeIndex].propertyFlags;
  13037. if (flags & VK_MEMORY_PROPERTY_LAZILY_ALLOCATED_BIT)
  13038. total_lazilily_allocated_bytes += stats.memoryType[typeIndex].statistics.allocationBytes;
  13039. }
  13040. }
  13041. }
  13042. return total_lazilily_allocated_bytes;
  13043. }
  13044. VMA_CALL_PRE void VMA_CALL_POST vmaGetHeapBudgets(
  13045. VmaAllocator allocator,
  13046. VmaBudget* pBudgets)
  13047. {
  13048. VMA_ASSERT(allocator && pBudgets);
  13049. VMA_DEBUG_GLOBAL_MUTEX_LOCK
  13050. allocator->GetHeapBudgets(pBudgets, 0, allocator->GetMemoryHeapCount());
  13051. }
  13052. #if VMA_STATS_STRING_ENABLED
  13053. VMA_CALL_PRE void VMA_CALL_POST vmaBuildStatsString(
  13054. VmaAllocator allocator,
  13055. char** ppStatsString,
  13056. VkBool32 detailedMap)
  13057. {
  13058. VMA_ASSERT(allocator && ppStatsString);
  13059. VMA_DEBUG_GLOBAL_MUTEX_LOCK
  13060. VmaStringBuilder sb(allocator->GetAllocationCallbacks());
  13061. {
  13062. VmaBudget budgets[VK_MAX_MEMORY_HEAPS];
  13063. allocator->GetHeapBudgets(budgets, 0, allocator->GetMemoryHeapCount());
  13064. VmaTotalStatistics stats;
  13065. allocator->CalculateStatistics(&stats);
  13066. VmaJsonWriter json(allocator->GetAllocationCallbacks(), sb);
  13067. json.BeginObject();
  13068. {
  13069. json.WriteString("General");
  13070. json.BeginObject();
  13071. {
  13072. const VkPhysicalDeviceProperties& deviceProperties = allocator->m_PhysicalDeviceProperties;
  13073. const VkPhysicalDeviceMemoryProperties& memoryProperties = allocator->m_MemProps;
  13074. json.WriteString("API");
  13075. json.WriteString("Vulkan");
  13076. json.WriteString("apiVersion");
  13077. json.BeginString();
  13078. json.ContinueString(VK_VERSION_MAJOR(deviceProperties.apiVersion));
  13079. json.ContinueString(".");
  13080. json.ContinueString(VK_VERSION_MINOR(deviceProperties.apiVersion));
  13081. json.ContinueString(".");
  13082. json.ContinueString(VK_VERSION_PATCH(deviceProperties.apiVersion));
  13083. json.EndString();
  13084. json.WriteString("GPU");
  13085. json.WriteString(deviceProperties.deviceName);
  13086. json.WriteString("deviceType");
  13087. json.WriteNumber(static_cast<uint32_t>(deviceProperties.deviceType));
  13088. json.WriteString("maxMemoryAllocationCount");
  13089. json.WriteNumber(deviceProperties.limits.maxMemoryAllocationCount);
  13090. json.WriteString("bufferImageGranularity");
  13091. json.WriteNumber(deviceProperties.limits.bufferImageGranularity);
  13092. json.WriteString("nonCoherentAtomSize");
  13093. json.WriteNumber(deviceProperties.limits.nonCoherentAtomSize);
  13094. json.WriteString("memoryHeapCount");
  13095. json.WriteNumber(memoryProperties.memoryHeapCount);
  13096. json.WriteString("memoryTypeCount");
  13097. json.WriteNumber(memoryProperties.memoryTypeCount);
  13098. }
  13099. json.EndObject();
  13100. }
  13101. {
  13102. json.WriteString("Total");
  13103. VmaPrintDetailedStatistics(json, stats.total);
  13104. }
  13105. {
  13106. json.WriteString("MemoryInfo");
  13107. json.BeginObject();
  13108. {
  13109. for (uint32_t heapIndex = 0; heapIndex < allocator->GetMemoryHeapCount(); ++heapIndex)
  13110. {
  13111. json.BeginString("Heap ");
  13112. json.ContinueString(heapIndex);
  13113. json.EndString();
  13114. json.BeginObject();
  13115. {
  13116. const VkMemoryHeap& heapInfo = allocator->m_MemProps.memoryHeaps[heapIndex];
  13117. json.WriteString("Flags");
  13118. json.BeginArray(true);
  13119. {
  13120. if (heapInfo.flags & VK_MEMORY_HEAP_DEVICE_LOCAL_BIT)
  13121. json.WriteString("DEVICE_LOCAL");
  13122. #if VMA_VULKAN_VERSION >= 1001000
  13123. if (heapInfo.flags & VK_MEMORY_HEAP_MULTI_INSTANCE_BIT)
  13124. json.WriteString("MULTI_INSTANCE");
  13125. #endif
  13126. VkMemoryHeapFlags flags = heapInfo.flags &
  13127. ~(VK_MEMORY_HEAP_DEVICE_LOCAL_BIT
  13128. #if VMA_VULKAN_VERSION >= 1001000
  13129. | VK_MEMORY_HEAP_MULTI_INSTANCE_BIT
  13130. #endif
  13131. );
  13132. if (flags != 0)
  13133. json.WriteNumber(flags);
  13134. }
  13135. json.EndArray();
  13136. json.WriteString("Size");
  13137. json.WriteNumber(heapInfo.size);
  13138. json.WriteString("Budget");
  13139. json.BeginObject();
  13140. {
  13141. json.WriteString("BudgetBytes");
  13142. json.WriteNumber(budgets[heapIndex].budget);
  13143. json.WriteString("UsageBytes");
  13144. json.WriteNumber(budgets[heapIndex].usage);
  13145. }
  13146. json.EndObject();
  13147. json.WriteString("Stats");
  13148. VmaPrintDetailedStatistics(json, stats.memoryHeap[heapIndex]);
  13149. json.WriteString("MemoryPools");
  13150. json.BeginObject();
  13151. {
  13152. for (uint32_t typeIndex = 0; typeIndex < allocator->GetMemoryTypeCount(); ++typeIndex)
  13153. {
  13154. if (allocator->MemoryTypeIndexToHeapIndex(typeIndex) == heapIndex)
  13155. {
  13156. json.BeginString("Type ");
  13157. json.ContinueString(typeIndex);
  13158. json.EndString();
  13159. json.BeginObject();
  13160. {
  13161. json.WriteString("Flags");
  13162. json.BeginArray(true);
  13163. {
  13164. VkMemoryPropertyFlags flags = allocator->m_MemProps.memoryTypes[typeIndex].propertyFlags;
  13165. if (flags & VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT)
  13166. json.WriteString("DEVICE_LOCAL");
  13167. if (flags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT)
  13168. json.WriteString("HOST_VISIBLE");
  13169. if (flags & VK_MEMORY_PROPERTY_HOST_COHERENT_BIT)
  13170. json.WriteString("HOST_COHERENT");
  13171. if (flags & VK_MEMORY_PROPERTY_HOST_CACHED_BIT)
  13172. json.WriteString("HOST_CACHED");
  13173. if (flags & VK_MEMORY_PROPERTY_LAZILY_ALLOCATED_BIT)
  13174. json.WriteString("LAZILY_ALLOCATED");
  13175. #if VMA_VULKAN_VERSION >= 1001000
  13176. if (flags & VK_MEMORY_PROPERTY_PROTECTED_BIT)
  13177. json.WriteString("PROTECTED");
  13178. #endif
  13179. #if VK_AMD_device_coherent_memory
  13180. if (flags & VK_MEMORY_PROPERTY_DEVICE_COHERENT_BIT_AMD_COPY)
  13181. json.WriteString("DEVICE_COHERENT_AMD");
  13182. if (flags & VK_MEMORY_PROPERTY_DEVICE_UNCACHED_BIT_AMD_COPY)
  13183. json.WriteString("DEVICE_UNCACHED_AMD");
  13184. #endif
  13185. flags &= ~(VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT
  13186. #if VMA_VULKAN_VERSION >= 1001000
  13187. | VK_MEMORY_PROPERTY_LAZILY_ALLOCATED_BIT
  13188. #endif
  13189. #if VK_AMD_device_coherent_memory
  13190. | VK_MEMORY_PROPERTY_DEVICE_COHERENT_BIT_AMD_COPY
  13191. | VK_MEMORY_PROPERTY_DEVICE_UNCACHED_BIT_AMD_COPY
  13192. #endif
  13193. | VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT
  13194. | VK_MEMORY_PROPERTY_HOST_COHERENT_BIT
  13195. | VK_MEMORY_PROPERTY_HOST_CACHED_BIT);
  13196. if (flags != 0)
  13197. json.WriteNumber(flags);
  13198. }
  13199. json.EndArray();
  13200. json.WriteString("Stats");
  13201. VmaPrintDetailedStatistics(json, stats.memoryType[typeIndex]);
  13202. }
  13203. json.EndObject();
  13204. }
  13205. }
  13206. }
  13207. json.EndObject();
  13208. }
  13209. json.EndObject();
  13210. }
  13211. }
  13212. json.EndObject();
  13213. }
  13214. if (detailedMap == VK_TRUE)
  13215. allocator->PrintDetailedMap(json);
  13216. json.EndObject();
  13217. }
  13218. *ppStatsString = VmaCreateStringCopy(allocator->GetAllocationCallbacks(), sb.GetData(), sb.GetLength());
  13219. }
  13220. VMA_CALL_PRE void VMA_CALL_POST vmaFreeStatsString(
  13221. VmaAllocator allocator,
  13222. char* pStatsString)
  13223. {
  13224. if(pStatsString != VMA_NULL)
  13225. {
  13226. VMA_ASSERT(allocator);
  13227. VmaFreeString(allocator->GetAllocationCallbacks(), pStatsString);
  13228. }
  13229. }
  13230. #endif // VMA_STATS_STRING_ENABLED
  13231. /*
  13232. This function is not protected by any mutex because it just reads immutable data.
  13233. */
  13234. VMA_CALL_PRE VkResult VMA_CALL_POST vmaFindMemoryTypeIndex(
  13235. VmaAllocator allocator,
  13236. uint32_t memoryTypeBits,
  13237. const VmaAllocationCreateInfo* pAllocationCreateInfo,
  13238. uint32_t* pMemoryTypeIndex)
  13239. {
  13240. VMA_ASSERT(allocator != VK_NULL_HANDLE);
  13241. VMA_ASSERT(pAllocationCreateInfo != VMA_NULL);
  13242. VMA_ASSERT(pMemoryTypeIndex != VMA_NULL);
  13243. return allocator->FindMemoryTypeIndex(memoryTypeBits, pAllocationCreateInfo, VmaBufferImageUsage::UNKNOWN, pMemoryTypeIndex);
  13244. }
  13245. VMA_CALL_PRE VkResult VMA_CALL_POST vmaFindMemoryTypeIndexForBufferInfo(
  13246. VmaAllocator allocator,
  13247. const VkBufferCreateInfo* pBufferCreateInfo,
  13248. const VmaAllocationCreateInfo* pAllocationCreateInfo,
  13249. uint32_t* pMemoryTypeIndex)
  13250. {
  13251. VMA_ASSERT(allocator != VK_NULL_HANDLE);
  13252. VMA_ASSERT(pBufferCreateInfo != VMA_NULL);
  13253. VMA_ASSERT(pAllocationCreateInfo != VMA_NULL);
  13254. VMA_ASSERT(pMemoryTypeIndex != VMA_NULL);
  13255. const VkDevice hDev = allocator->m_hDevice;
  13256. const VmaVulkanFunctions* funcs = &allocator->GetVulkanFunctions();
  13257. VkResult res;
  13258. #if VMA_KHR_MAINTENANCE4 || VMA_VULKAN_VERSION >= 1003000
  13259. if(funcs->vkGetDeviceBufferMemoryRequirements)
  13260. {
  13261. // Can query straight from VkBufferCreateInfo :)
  13262. VkDeviceBufferMemoryRequirementsKHR devBufMemReq = {VK_STRUCTURE_TYPE_DEVICE_BUFFER_MEMORY_REQUIREMENTS_KHR};
  13263. devBufMemReq.pCreateInfo = pBufferCreateInfo;
  13264. VkMemoryRequirements2 memReq = {VK_STRUCTURE_TYPE_MEMORY_REQUIREMENTS_2};
  13265. (*funcs->vkGetDeviceBufferMemoryRequirements)(hDev, &devBufMemReq, &memReq);
  13266. res = allocator->FindMemoryTypeIndex(
  13267. memReq.memoryRequirements.memoryTypeBits, pAllocationCreateInfo,
  13268. VmaBufferImageUsage(*pBufferCreateInfo, allocator->m_UseKhrMaintenance5), pMemoryTypeIndex);
  13269. }
  13270. else
  13271. #endif // VMA_KHR_MAINTENANCE4 || VMA_VULKAN_VERSION >= 1003000
  13272. {
  13273. // Must create a dummy buffer to query :(
  13274. VkBuffer hBuffer = VK_NULL_HANDLE;
  13275. res = funcs->vkCreateBuffer(
  13276. hDev, pBufferCreateInfo, allocator->GetAllocationCallbacks(), &hBuffer);
  13277. if(res == VK_SUCCESS)
  13278. {
  13279. VkMemoryRequirements memReq = {};
  13280. funcs->vkGetBufferMemoryRequirements(hDev, hBuffer, &memReq);
  13281. res = allocator->FindMemoryTypeIndex(
  13282. memReq.memoryTypeBits, pAllocationCreateInfo,
  13283. VmaBufferImageUsage(*pBufferCreateInfo, allocator->m_UseKhrMaintenance5), pMemoryTypeIndex);
  13284. funcs->vkDestroyBuffer(
  13285. hDev, hBuffer, allocator->GetAllocationCallbacks());
  13286. }
  13287. }
  13288. return res;
  13289. }
  13290. VMA_CALL_PRE VkResult VMA_CALL_POST vmaFindMemoryTypeIndexForImageInfo(
  13291. VmaAllocator allocator,
  13292. const VkImageCreateInfo* pImageCreateInfo,
  13293. const VmaAllocationCreateInfo* pAllocationCreateInfo,
  13294. uint32_t* pMemoryTypeIndex)
  13295. {
  13296. VMA_ASSERT(allocator != VK_NULL_HANDLE);
  13297. VMA_ASSERT(pImageCreateInfo != VMA_NULL);
  13298. VMA_ASSERT(pAllocationCreateInfo != VMA_NULL);
  13299. VMA_ASSERT(pMemoryTypeIndex != VMA_NULL);
  13300. const VkDevice hDev = allocator->m_hDevice;
  13301. const VmaVulkanFunctions* funcs = &allocator->GetVulkanFunctions();
  13302. VkResult res;
  13303. #if VMA_KHR_MAINTENANCE4 || VMA_VULKAN_VERSION >= 1003000
  13304. if(funcs->vkGetDeviceImageMemoryRequirements)
  13305. {
  13306. // Can query straight from VkImageCreateInfo :)
  13307. VkDeviceImageMemoryRequirementsKHR devImgMemReq = {VK_STRUCTURE_TYPE_DEVICE_IMAGE_MEMORY_REQUIREMENTS_KHR};
  13308. devImgMemReq.pCreateInfo = pImageCreateInfo;
  13309. VMA_ASSERT(pImageCreateInfo->tiling != VK_IMAGE_TILING_DRM_FORMAT_MODIFIER_EXT_COPY && (pImageCreateInfo->flags & VK_IMAGE_CREATE_DISJOINT_BIT_COPY) == 0 &&
  13310. "Cannot use this VkImageCreateInfo with vmaFindMemoryTypeIndexForImageInfo as I don't know what to pass as VkDeviceImageMemoryRequirements::planeAspect.");
  13311. VkMemoryRequirements2 memReq = {VK_STRUCTURE_TYPE_MEMORY_REQUIREMENTS_2};
  13312. (*funcs->vkGetDeviceImageMemoryRequirements)(hDev, &devImgMemReq, &memReq);
  13313. res = allocator->FindMemoryTypeIndex(
  13314. memReq.memoryRequirements.memoryTypeBits, pAllocationCreateInfo,
  13315. VmaBufferImageUsage(*pImageCreateInfo), pMemoryTypeIndex);
  13316. }
  13317. else
  13318. #endif // VMA_KHR_MAINTENANCE4 || VMA_VULKAN_VERSION >= 1003000
  13319. {
  13320. // Must create a dummy image to query :(
  13321. VkImage hImage = VK_NULL_HANDLE;
  13322. res = funcs->vkCreateImage(
  13323. hDev, pImageCreateInfo, allocator->GetAllocationCallbacks(), &hImage);
  13324. if(res == VK_SUCCESS)
  13325. {
  13326. VkMemoryRequirements memReq = {};
  13327. funcs->vkGetImageMemoryRequirements(hDev, hImage, &memReq);
  13328. res = allocator->FindMemoryTypeIndex(
  13329. memReq.memoryTypeBits, pAllocationCreateInfo,
  13330. VmaBufferImageUsage(*pImageCreateInfo), pMemoryTypeIndex);
  13331. funcs->vkDestroyImage(
  13332. hDev, hImage, allocator->GetAllocationCallbacks());
  13333. }
  13334. }
  13335. return res;
  13336. }
  13337. VMA_CALL_PRE VkResult VMA_CALL_POST vmaCreatePool(
  13338. VmaAllocator allocator,
  13339. const VmaPoolCreateInfo* pCreateInfo,
  13340. VmaPool* pPool)
  13341. {
  13342. VMA_ASSERT(allocator && pCreateInfo && pPool);
  13343. VMA_DEBUG_LOG("vmaCreatePool");
  13344. VMA_DEBUG_GLOBAL_MUTEX_LOCK
  13345. return allocator->CreatePool(pCreateInfo, pPool);
  13346. }
  13347. VMA_CALL_PRE void VMA_CALL_POST vmaDestroyPool(
  13348. VmaAllocator allocator,
  13349. VmaPool pool)
  13350. {
  13351. VMA_ASSERT(allocator);
  13352. if(pool == VK_NULL_HANDLE)
  13353. {
  13354. return;
  13355. }
  13356. VMA_DEBUG_LOG("vmaDestroyPool");
  13357. VMA_DEBUG_GLOBAL_MUTEX_LOCK
  13358. allocator->DestroyPool(pool);
  13359. }
  13360. VMA_CALL_PRE void VMA_CALL_POST vmaGetPoolStatistics(
  13361. VmaAllocator allocator,
  13362. VmaPool pool,
  13363. VmaStatistics* pPoolStats)
  13364. {
  13365. VMA_ASSERT(allocator && pool && pPoolStats);
  13366. VMA_DEBUG_GLOBAL_MUTEX_LOCK
  13367. allocator->GetPoolStatistics(pool, pPoolStats);
  13368. }
  13369. VMA_CALL_PRE void VMA_CALL_POST vmaCalculatePoolStatistics(
  13370. VmaAllocator allocator,
  13371. VmaPool pool,
  13372. VmaDetailedStatistics* pPoolStats)
  13373. {
  13374. VMA_ASSERT(allocator && pool && pPoolStats);
  13375. VMA_DEBUG_GLOBAL_MUTEX_LOCK
  13376. allocator->CalculatePoolStatistics(pool, pPoolStats);
  13377. }
  13378. VMA_CALL_PRE VkResult VMA_CALL_POST vmaCheckPoolCorruption(VmaAllocator allocator, VmaPool pool)
  13379. {
  13380. VMA_ASSERT(allocator && pool);
  13381. VMA_DEBUG_GLOBAL_MUTEX_LOCK
  13382. VMA_DEBUG_LOG("vmaCheckPoolCorruption");
  13383. return allocator->CheckPoolCorruption(pool);
  13384. }
  13385. VMA_CALL_PRE void VMA_CALL_POST vmaGetPoolName(
  13386. VmaAllocator allocator,
  13387. VmaPool pool,
  13388. const char** ppName)
  13389. {
  13390. VMA_ASSERT(allocator && pool && ppName);
  13391. VMA_DEBUG_LOG("vmaGetPoolName");
  13392. VMA_DEBUG_GLOBAL_MUTEX_LOCK
  13393. *ppName = pool->GetName();
  13394. }
  13395. VMA_CALL_PRE void VMA_CALL_POST vmaSetPoolName(
  13396. VmaAllocator allocator,
  13397. VmaPool pool,
  13398. const char* pName)
  13399. {
  13400. VMA_ASSERT(allocator && pool);
  13401. VMA_DEBUG_LOG("vmaSetPoolName");
  13402. VMA_DEBUG_GLOBAL_MUTEX_LOCK
  13403. pool->SetName(pName);
  13404. }
  13405. VMA_CALL_PRE VkResult VMA_CALL_POST vmaAllocateMemory(
  13406. VmaAllocator allocator,
  13407. const VkMemoryRequirements* pVkMemoryRequirements,
  13408. const VmaAllocationCreateInfo* pCreateInfo,
  13409. VmaAllocation* pAllocation,
  13410. VmaAllocationInfo* pAllocationInfo)
  13411. {
  13412. VMA_ASSERT(allocator && pVkMemoryRequirements && pCreateInfo && pAllocation);
  13413. VMA_DEBUG_LOG("vmaAllocateMemory");
  13414. VMA_DEBUG_GLOBAL_MUTEX_LOCK
  13415. VkResult result = allocator->AllocateMemory(
  13416. *pVkMemoryRequirements,
  13417. false, // requiresDedicatedAllocation
  13418. false, // prefersDedicatedAllocation
  13419. VK_NULL_HANDLE, // dedicatedBuffer
  13420. VK_NULL_HANDLE, // dedicatedImage
  13421. VmaBufferImageUsage::UNKNOWN, // dedicatedBufferImageUsage
  13422. *pCreateInfo,
  13423. VMA_SUBALLOCATION_TYPE_UNKNOWN,
  13424. 1, // allocationCount
  13425. pAllocation);
  13426. if(pAllocationInfo != VMA_NULL && result == VK_SUCCESS)
  13427. {
  13428. allocator->GetAllocationInfo(*pAllocation, pAllocationInfo);
  13429. }
  13430. return result;
  13431. }
  13432. VMA_CALL_PRE VkResult VMA_CALL_POST vmaAllocateMemoryPages(
  13433. VmaAllocator allocator,
  13434. const VkMemoryRequirements* pVkMemoryRequirements,
  13435. const VmaAllocationCreateInfo* pCreateInfo,
  13436. size_t allocationCount,
  13437. VmaAllocation* pAllocations,
  13438. VmaAllocationInfo* pAllocationInfo)
  13439. {
  13440. if(allocationCount == 0)
  13441. {
  13442. return VK_SUCCESS;
  13443. }
  13444. VMA_ASSERT(allocator && pVkMemoryRequirements && pCreateInfo && pAllocations);
  13445. VMA_DEBUG_LOG("vmaAllocateMemoryPages");
  13446. VMA_DEBUG_GLOBAL_MUTEX_LOCK
  13447. VkResult result = allocator->AllocateMemory(
  13448. *pVkMemoryRequirements,
  13449. false, // requiresDedicatedAllocation
  13450. false, // prefersDedicatedAllocation
  13451. VK_NULL_HANDLE, // dedicatedBuffer
  13452. VK_NULL_HANDLE, // dedicatedImage
  13453. VmaBufferImageUsage::UNKNOWN, // dedicatedBufferImageUsage
  13454. *pCreateInfo,
  13455. VMA_SUBALLOCATION_TYPE_UNKNOWN,
  13456. allocationCount,
  13457. pAllocations);
  13458. if(pAllocationInfo != VMA_NULL && result == VK_SUCCESS)
  13459. {
  13460. for(size_t i = 0; i < allocationCount; ++i)
  13461. {
  13462. allocator->GetAllocationInfo(pAllocations[i], pAllocationInfo + i);
  13463. }
  13464. }
  13465. return result;
  13466. }
  13467. VMA_CALL_PRE VkResult VMA_CALL_POST vmaAllocateMemoryForBuffer(
  13468. VmaAllocator allocator,
  13469. VkBuffer buffer,
  13470. const VmaAllocationCreateInfo* pCreateInfo,
  13471. VmaAllocation* pAllocation,
  13472. VmaAllocationInfo* pAllocationInfo)
  13473. {
  13474. VMA_ASSERT(allocator && buffer != VK_NULL_HANDLE && pCreateInfo && pAllocation);
  13475. VMA_DEBUG_LOG("vmaAllocateMemoryForBuffer");
  13476. VMA_DEBUG_GLOBAL_MUTEX_LOCK
  13477. VkMemoryRequirements vkMemReq = {};
  13478. bool requiresDedicatedAllocation = false;
  13479. bool prefersDedicatedAllocation = false;
  13480. allocator->GetBufferMemoryRequirements(buffer, vkMemReq,
  13481. requiresDedicatedAllocation,
  13482. prefersDedicatedAllocation);
  13483. VkResult result = allocator->AllocateMemory(
  13484. vkMemReq,
  13485. requiresDedicatedAllocation,
  13486. prefersDedicatedAllocation,
  13487. buffer, // dedicatedBuffer
  13488. VK_NULL_HANDLE, // dedicatedImage
  13489. VmaBufferImageUsage::UNKNOWN, // dedicatedBufferImageUsage
  13490. *pCreateInfo,
  13491. VMA_SUBALLOCATION_TYPE_BUFFER,
  13492. 1, // allocationCount
  13493. pAllocation);
  13494. if(pAllocationInfo && result == VK_SUCCESS)
  13495. {
  13496. allocator->GetAllocationInfo(*pAllocation, pAllocationInfo);
  13497. }
  13498. return result;
  13499. }
  13500. VMA_CALL_PRE VkResult VMA_CALL_POST vmaAllocateMemoryForImage(
  13501. VmaAllocator allocator,
  13502. VkImage image,
  13503. const VmaAllocationCreateInfo* pCreateInfo,
  13504. VmaAllocation* pAllocation,
  13505. VmaAllocationInfo* pAllocationInfo)
  13506. {
  13507. VMA_ASSERT(allocator && image != VK_NULL_HANDLE && pCreateInfo && pAllocation);
  13508. VMA_DEBUG_LOG("vmaAllocateMemoryForImage");
  13509. VMA_DEBUG_GLOBAL_MUTEX_LOCK
  13510. VkMemoryRequirements vkMemReq = {};
  13511. bool requiresDedicatedAllocation = false;
  13512. bool prefersDedicatedAllocation = false;
  13513. allocator->GetImageMemoryRequirements(image, vkMemReq,
  13514. requiresDedicatedAllocation, prefersDedicatedAllocation);
  13515. VkResult result = allocator->AllocateMemory(
  13516. vkMemReq,
  13517. requiresDedicatedAllocation,
  13518. prefersDedicatedAllocation,
  13519. VK_NULL_HANDLE, // dedicatedBuffer
  13520. image, // dedicatedImage
  13521. VmaBufferImageUsage::UNKNOWN, // dedicatedBufferImageUsage
  13522. *pCreateInfo,
  13523. VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN,
  13524. 1, // allocationCount
  13525. pAllocation);
  13526. if(pAllocationInfo && result == VK_SUCCESS)
  13527. {
  13528. allocator->GetAllocationInfo(*pAllocation, pAllocationInfo);
  13529. }
  13530. return result;
  13531. }
  13532. VMA_CALL_PRE void VMA_CALL_POST vmaFreeMemory(
  13533. VmaAllocator allocator,
  13534. VmaAllocation allocation)
  13535. {
  13536. VMA_ASSERT(allocator);
  13537. if(allocation == VK_NULL_HANDLE)
  13538. {
  13539. return;
  13540. }
  13541. VMA_DEBUG_LOG("vmaFreeMemory");
  13542. VMA_DEBUG_GLOBAL_MUTEX_LOCK
  13543. allocator->FreeMemory(
  13544. 1, // allocationCount
  13545. &allocation);
  13546. }
  13547. VMA_CALL_PRE void VMA_CALL_POST vmaFreeMemoryPages(
  13548. VmaAllocator allocator,
  13549. size_t allocationCount,
  13550. const VmaAllocation* pAllocations)
  13551. {
  13552. if(allocationCount == 0)
  13553. {
  13554. return;
  13555. }
  13556. VMA_ASSERT(allocator);
  13557. VMA_DEBUG_LOG("vmaFreeMemoryPages");
  13558. VMA_DEBUG_GLOBAL_MUTEX_LOCK
  13559. allocator->FreeMemory(allocationCount, pAllocations);
  13560. }
  13561. VMA_CALL_PRE void VMA_CALL_POST vmaGetAllocationInfo(
  13562. VmaAllocator allocator,
  13563. VmaAllocation allocation,
  13564. VmaAllocationInfo* pAllocationInfo)
  13565. {
  13566. VMA_ASSERT(allocator && allocation && pAllocationInfo);
  13567. VMA_DEBUG_GLOBAL_MUTEX_LOCK
  13568. allocator->GetAllocationInfo(allocation, pAllocationInfo);
  13569. }
  13570. VMA_CALL_PRE void VMA_CALL_POST vmaGetAllocationInfo2(
  13571. VmaAllocator allocator,
  13572. VmaAllocation allocation,
  13573. VmaAllocationInfo2* pAllocationInfo)
  13574. {
  13575. VMA_ASSERT(allocator && allocation && pAllocationInfo);
  13576. VMA_DEBUG_GLOBAL_MUTEX_LOCK
  13577. allocator->GetAllocationInfo2(allocation, pAllocationInfo);
  13578. }
  13579. VMA_CALL_PRE void VMA_CALL_POST vmaSetAllocationUserData(
  13580. VmaAllocator allocator,
  13581. VmaAllocation allocation,
  13582. void* pUserData)
  13583. {
  13584. VMA_ASSERT(allocator && allocation);
  13585. VMA_DEBUG_GLOBAL_MUTEX_LOCK
  13586. allocation->SetUserData(allocator, pUserData);
  13587. }
  13588. VMA_CALL_PRE void VMA_CALL_POST vmaSetAllocationName(
  13589. VmaAllocator VMA_NOT_NULL allocator,
  13590. VmaAllocation VMA_NOT_NULL allocation,
  13591. const char* VMA_NULLABLE pName)
  13592. {
  13593. allocation->SetName(allocator, pName);
  13594. }
  13595. VMA_CALL_PRE void VMA_CALL_POST vmaGetAllocationMemoryProperties(
  13596. VmaAllocator VMA_NOT_NULL allocator,
  13597. VmaAllocation VMA_NOT_NULL allocation,
  13598. VkMemoryPropertyFlags* VMA_NOT_NULL pFlags)
  13599. {
  13600. VMA_ASSERT(allocator && allocation && pFlags);
  13601. const uint32_t memTypeIndex = allocation->GetMemoryTypeIndex();
  13602. *pFlags = allocator->m_MemProps.memoryTypes[memTypeIndex].propertyFlags;
  13603. }
  13604. VMA_CALL_PRE VkResult VMA_CALL_POST vmaMapMemory(
  13605. VmaAllocator allocator,
  13606. VmaAllocation allocation,
  13607. void** ppData)
  13608. {
  13609. VMA_ASSERT(allocator && allocation && ppData);
  13610. VMA_DEBUG_GLOBAL_MUTEX_LOCK
  13611. return allocator->Map(allocation, ppData);
  13612. }
  13613. VMA_CALL_PRE void VMA_CALL_POST vmaUnmapMemory(
  13614. VmaAllocator allocator,
  13615. VmaAllocation allocation)
  13616. {
  13617. VMA_ASSERT(allocator && allocation);
  13618. VMA_DEBUG_GLOBAL_MUTEX_LOCK
  13619. allocator->Unmap(allocation);
  13620. }
  13621. VMA_CALL_PRE VkResult VMA_CALL_POST vmaFlushAllocation(
  13622. VmaAllocator allocator,
  13623. VmaAllocation allocation,
  13624. VkDeviceSize offset,
  13625. VkDeviceSize size)
  13626. {
  13627. VMA_ASSERT(allocator && allocation);
  13628. VMA_DEBUG_LOG("vmaFlushAllocation");
  13629. VMA_DEBUG_GLOBAL_MUTEX_LOCK
  13630. return allocator->FlushOrInvalidateAllocation(allocation, offset, size, VMA_CACHE_FLUSH);
  13631. }
  13632. VMA_CALL_PRE VkResult VMA_CALL_POST vmaInvalidateAllocation(
  13633. VmaAllocator allocator,
  13634. VmaAllocation allocation,
  13635. VkDeviceSize offset,
  13636. VkDeviceSize size)
  13637. {
  13638. VMA_ASSERT(allocator && allocation);
  13639. VMA_DEBUG_LOG("vmaInvalidateAllocation");
  13640. VMA_DEBUG_GLOBAL_MUTEX_LOCK
  13641. return allocator->FlushOrInvalidateAllocation(allocation, offset, size, VMA_CACHE_INVALIDATE);
  13642. }
  13643. VMA_CALL_PRE VkResult VMA_CALL_POST vmaFlushAllocations(
  13644. VmaAllocator allocator,
  13645. uint32_t allocationCount,
  13646. const VmaAllocation* allocations,
  13647. const VkDeviceSize* offsets,
  13648. const VkDeviceSize* sizes)
  13649. {
  13650. VMA_ASSERT(allocator);
  13651. if(allocationCount == 0)
  13652. {
  13653. return VK_SUCCESS;
  13654. }
  13655. VMA_ASSERT(allocations);
  13656. VMA_DEBUG_LOG("vmaFlushAllocations");
  13657. VMA_DEBUG_GLOBAL_MUTEX_LOCK
  13658. return allocator->FlushOrInvalidateAllocations(allocationCount, allocations, offsets, sizes, VMA_CACHE_FLUSH);
  13659. }
  13660. VMA_CALL_PRE VkResult VMA_CALL_POST vmaInvalidateAllocations(
  13661. VmaAllocator allocator,
  13662. uint32_t allocationCount,
  13663. const VmaAllocation* allocations,
  13664. const VkDeviceSize* offsets,
  13665. const VkDeviceSize* sizes)
  13666. {
  13667. VMA_ASSERT(allocator);
  13668. if(allocationCount == 0)
  13669. {
  13670. return VK_SUCCESS;
  13671. }
  13672. VMA_ASSERT(allocations);
  13673. VMA_DEBUG_LOG("vmaInvalidateAllocations");
  13674. VMA_DEBUG_GLOBAL_MUTEX_LOCK
  13675. return allocator->FlushOrInvalidateAllocations(allocationCount, allocations, offsets, sizes, VMA_CACHE_INVALIDATE);
  13676. }
  13677. VMA_CALL_PRE VkResult VMA_CALL_POST vmaCopyMemoryToAllocation(
  13678. VmaAllocator allocator,
  13679. const void* pSrcHostPointer,
  13680. VmaAllocation dstAllocation,
  13681. VkDeviceSize dstAllocationLocalOffset,
  13682. VkDeviceSize size)
  13683. {
  13684. VMA_ASSERT(allocator && pSrcHostPointer && dstAllocation);
  13685. if(size == 0)
  13686. {
  13687. return VK_SUCCESS;
  13688. }
  13689. VMA_DEBUG_LOG("vmaCopyMemoryToAllocation");
  13690. VMA_DEBUG_GLOBAL_MUTEX_LOCK
  13691. return allocator->CopyMemoryToAllocation(pSrcHostPointer, dstAllocation, dstAllocationLocalOffset, size);
  13692. }
  13693. VMA_CALL_PRE VkResult VMA_CALL_POST vmaCopyAllocationToMemory(
  13694. VmaAllocator allocator,
  13695. VmaAllocation srcAllocation,
  13696. VkDeviceSize srcAllocationLocalOffset,
  13697. void* pDstHostPointer,
  13698. VkDeviceSize size)
  13699. {
  13700. VMA_ASSERT(allocator && srcAllocation && pDstHostPointer);
  13701. if(size == 0)
  13702. {
  13703. return VK_SUCCESS;
  13704. }
  13705. VMA_DEBUG_LOG("vmaCopyAllocationToMemory");
  13706. VMA_DEBUG_GLOBAL_MUTEX_LOCK
  13707. return allocator->CopyAllocationToMemory(srcAllocation, srcAllocationLocalOffset, pDstHostPointer, size);
  13708. }
  13709. VMA_CALL_PRE VkResult VMA_CALL_POST vmaCheckCorruption(
  13710. VmaAllocator allocator,
  13711. uint32_t memoryTypeBits)
  13712. {
  13713. VMA_ASSERT(allocator);
  13714. VMA_DEBUG_LOG("vmaCheckCorruption");
  13715. VMA_DEBUG_GLOBAL_MUTEX_LOCK
  13716. return allocator->CheckCorruption(memoryTypeBits);
  13717. }
  13718. VMA_CALL_PRE VkResult VMA_CALL_POST vmaBeginDefragmentation(
  13719. VmaAllocator allocator,
  13720. const VmaDefragmentationInfo* pInfo,
  13721. VmaDefragmentationContext* pContext)
  13722. {
  13723. VMA_ASSERT(allocator && pInfo && pContext);
  13724. VMA_DEBUG_LOG("vmaBeginDefragmentation");
  13725. if (pInfo->pool != VMA_NULL)
  13726. {
  13727. // Check if run on supported algorithms
  13728. if (pInfo->pool->m_BlockVector.GetAlgorithm() & VMA_POOL_CREATE_LINEAR_ALGORITHM_BIT)
  13729. return VK_ERROR_FEATURE_NOT_PRESENT;
  13730. }
  13731. VMA_DEBUG_GLOBAL_MUTEX_LOCK
  13732. *pContext = vma_new(allocator, VmaDefragmentationContext_T)(allocator, *pInfo);
  13733. return VK_SUCCESS;
  13734. }
  13735. VMA_CALL_PRE void VMA_CALL_POST vmaEndDefragmentation(
  13736. VmaAllocator allocator,
  13737. VmaDefragmentationContext context,
  13738. VmaDefragmentationStats* pStats)
  13739. {
  13740. VMA_ASSERT(allocator && context);
  13741. VMA_DEBUG_LOG("vmaEndDefragmentation");
  13742. VMA_DEBUG_GLOBAL_MUTEX_LOCK
  13743. if (pStats)
  13744. context->GetStats(*pStats);
  13745. vma_delete(allocator, context);
  13746. }
  13747. VMA_CALL_PRE VkResult VMA_CALL_POST vmaBeginDefragmentationPass(
  13748. VmaAllocator VMA_NOT_NULL allocator,
  13749. VmaDefragmentationContext VMA_NOT_NULL context,
  13750. VmaDefragmentationPassMoveInfo* VMA_NOT_NULL pPassInfo)
  13751. {
  13752. VMA_ASSERT(context && pPassInfo);
  13753. VMA_DEBUG_LOG("vmaBeginDefragmentationPass");
  13754. VMA_DEBUG_GLOBAL_MUTEX_LOCK
  13755. return context->DefragmentPassBegin(*pPassInfo);
  13756. }
  13757. VMA_CALL_PRE VkResult VMA_CALL_POST vmaEndDefragmentationPass(
  13758. VmaAllocator VMA_NOT_NULL allocator,
  13759. VmaDefragmentationContext VMA_NOT_NULL context,
  13760. VmaDefragmentationPassMoveInfo* VMA_NOT_NULL pPassInfo)
  13761. {
  13762. VMA_ASSERT(context && pPassInfo);
  13763. VMA_DEBUG_LOG("vmaEndDefragmentationPass");
  13764. VMA_DEBUG_GLOBAL_MUTEX_LOCK
  13765. return context->DefragmentPassEnd(*pPassInfo);
  13766. }
  13767. VMA_CALL_PRE VkResult VMA_CALL_POST vmaBindBufferMemory(
  13768. VmaAllocator allocator,
  13769. VmaAllocation allocation,
  13770. VkBuffer buffer)
  13771. {
  13772. VMA_ASSERT(allocator && allocation && buffer);
  13773. VMA_DEBUG_LOG("vmaBindBufferMemory");
  13774. VMA_DEBUG_GLOBAL_MUTEX_LOCK
  13775. return allocator->BindBufferMemory(allocation, 0, buffer, VMA_NULL);
  13776. }
  13777. VMA_CALL_PRE VkResult VMA_CALL_POST vmaBindBufferMemory2(
  13778. VmaAllocator allocator,
  13779. VmaAllocation allocation,
  13780. VkDeviceSize allocationLocalOffset,
  13781. VkBuffer buffer,
  13782. const void* pNext)
  13783. {
  13784. VMA_ASSERT(allocator && allocation && buffer);
  13785. VMA_DEBUG_LOG("vmaBindBufferMemory2");
  13786. VMA_DEBUG_GLOBAL_MUTEX_LOCK
  13787. return allocator->BindBufferMemory(allocation, allocationLocalOffset, buffer, pNext);
  13788. }
  13789. VMA_CALL_PRE VkResult VMA_CALL_POST vmaBindImageMemory(
  13790. VmaAllocator allocator,
  13791. VmaAllocation allocation,
  13792. VkImage image)
  13793. {
  13794. VMA_ASSERT(allocator && allocation && image);
  13795. VMA_DEBUG_LOG("vmaBindImageMemory");
  13796. VMA_DEBUG_GLOBAL_MUTEX_LOCK
  13797. return allocator->BindImageMemory(allocation, 0, image, VMA_NULL);
  13798. }
  13799. VMA_CALL_PRE VkResult VMA_CALL_POST vmaBindImageMemory2(
  13800. VmaAllocator allocator,
  13801. VmaAllocation allocation,
  13802. VkDeviceSize allocationLocalOffset,
  13803. VkImage image,
  13804. const void* pNext)
  13805. {
  13806. VMA_ASSERT(allocator && allocation && image);
  13807. VMA_DEBUG_LOG("vmaBindImageMemory2");
  13808. VMA_DEBUG_GLOBAL_MUTEX_LOCK
  13809. return allocator->BindImageMemory(allocation, allocationLocalOffset, image, pNext);
  13810. }
  13811. VMA_CALL_PRE VkResult VMA_CALL_POST vmaCreateBuffer(
  13812. VmaAllocator allocator,
  13813. const VkBufferCreateInfo* pBufferCreateInfo,
  13814. const VmaAllocationCreateInfo* pAllocationCreateInfo,
  13815. VkBuffer* pBuffer,
  13816. VmaAllocation* pAllocation,
  13817. VmaAllocationInfo* pAllocationInfo)
  13818. {
  13819. VMA_ASSERT(allocator && pBufferCreateInfo && pAllocationCreateInfo && pBuffer && pAllocation);
  13820. if(pBufferCreateInfo->size == 0)
  13821. {
  13822. return VK_ERROR_INITIALIZATION_FAILED;
  13823. }
  13824. if((pBufferCreateInfo->usage & VK_BUFFER_USAGE_SHADER_DEVICE_ADDRESS_BIT_COPY) != 0 &&
  13825. !allocator->m_UseKhrBufferDeviceAddress)
  13826. {
  13827. VMA_ASSERT(0 && "Creating a buffer with VK_BUFFER_USAGE_SHADER_DEVICE_ADDRESS_BIT is not valid if VMA_ALLOCATOR_CREATE_BUFFER_DEVICE_ADDRESS_BIT was not used.");
  13828. return VK_ERROR_INITIALIZATION_FAILED;
  13829. }
  13830. VMA_DEBUG_LOG("vmaCreateBuffer");
  13831. VMA_DEBUG_GLOBAL_MUTEX_LOCK
  13832. *pBuffer = VK_NULL_HANDLE;
  13833. *pAllocation = VK_NULL_HANDLE;
  13834. // 1. Create VkBuffer.
  13835. VkResult res = (*allocator->GetVulkanFunctions().vkCreateBuffer)(
  13836. allocator->m_hDevice,
  13837. pBufferCreateInfo,
  13838. allocator->GetAllocationCallbacks(),
  13839. pBuffer);
  13840. if(res >= 0)
  13841. {
  13842. // 2. vkGetBufferMemoryRequirements.
  13843. VkMemoryRequirements vkMemReq = {};
  13844. bool requiresDedicatedAllocation = false;
  13845. bool prefersDedicatedAllocation = false;
  13846. allocator->GetBufferMemoryRequirements(*pBuffer, vkMemReq,
  13847. requiresDedicatedAllocation, prefersDedicatedAllocation);
  13848. // 3. Allocate memory using allocator.
  13849. res = allocator->AllocateMemory(
  13850. vkMemReq,
  13851. requiresDedicatedAllocation,
  13852. prefersDedicatedAllocation,
  13853. *pBuffer, // dedicatedBuffer
  13854. VK_NULL_HANDLE, // dedicatedImage
  13855. VmaBufferImageUsage(*pBufferCreateInfo, allocator->m_UseKhrMaintenance5), // dedicatedBufferImageUsage
  13856. *pAllocationCreateInfo,
  13857. VMA_SUBALLOCATION_TYPE_BUFFER,
  13858. 1, // allocationCount
  13859. pAllocation);
  13860. if(res >= 0)
  13861. {
  13862. // 3. Bind buffer with memory.
  13863. if((pAllocationCreateInfo->flags & VMA_ALLOCATION_CREATE_DONT_BIND_BIT) == 0)
  13864. {
  13865. res = allocator->BindBufferMemory(*pAllocation, 0, *pBuffer, VMA_NULL);
  13866. }
  13867. if(res >= 0)
  13868. {
  13869. // All steps succeeded.
  13870. #if VMA_STATS_STRING_ENABLED
  13871. (*pAllocation)->InitBufferUsage(*pBufferCreateInfo, allocator->m_UseKhrMaintenance5);
  13872. #endif
  13873. if(pAllocationInfo != VMA_NULL)
  13874. {
  13875. allocator->GetAllocationInfo(*pAllocation, pAllocationInfo);
  13876. }
  13877. return VK_SUCCESS;
  13878. }
  13879. allocator->FreeMemory(
  13880. 1, // allocationCount
  13881. pAllocation);
  13882. *pAllocation = VK_NULL_HANDLE;
  13883. (*allocator->GetVulkanFunctions().vkDestroyBuffer)(allocator->m_hDevice, *pBuffer, allocator->GetAllocationCallbacks());
  13884. *pBuffer = VK_NULL_HANDLE;
  13885. return res;
  13886. }
  13887. (*allocator->GetVulkanFunctions().vkDestroyBuffer)(allocator->m_hDevice, *pBuffer, allocator->GetAllocationCallbacks());
  13888. *pBuffer = VK_NULL_HANDLE;
  13889. return res;
  13890. }
  13891. return res;
  13892. }
  13893. VMA_CALL_PRE VkResult VMA_CALL_POST vmaCreateBufferWithAlignment(
  13894. VmaAllocator allocator,
  13895. const VkBufferCreateInfo* pBufferCreateInfo,
  13896. const VmaAllocationCreateInfo* pAllocationCreateInfo,
  13897. VkDeviceSize minAlignment,
  13898. VkBuffer* pBuffer,
  13899. VmaAllocation* pAllocation,
  13900. VmaAllocationInfo* pAllocationInfo)
  13901. {
  13902. VMA_ASSERT(allocator && pBufferCreateInfo && pAllocationCreateInfo && VmaIsPow2(minAlignment) && pBuffer && pAllocation);
  13903. if(pBufferCreateInfo->size == 0)
  13904. {
  13905. return VK_ERROR_INITIALIZATION_FAILED;
  13906. }
  13907. if((pBufferCreateInfo->usage & VK_BUFFER_USAGE_SHADER_DEVICE_ADDRESS_BIT_COPY) != 0 &&
  13908. !allocator->m_UseKhrBufferDeviceAddress)
  13909. {
  13910. VMA_ASSERT(0 && "Creating a buffer with VK_BUFFER_USAGE_SHADER_DEVICE_ADDRESS_BIT is not valid if VMA_ALLOCATOR_CREATE_BUFFER_DEVICE_ADDRESS_BIT was not used.");
  13911. return VK_ERROR_INITIALIZATION_FAILED;
  13912. }
  13913. VMA_DEBUG_LOG("vmaCreateBufferWithAlignment");
  13914. VMA_DEBUG_GLOBAL_MUTEX_LOCK
  13915. *pBuffer = VK_NULL_HANDLE;
  13916. *pAllocation = VK_NULL_HANDLE;
  13917. // 1. Create VkBuffer.
  13918. VkResult res = (*allocator->GetVulkanFunctions().vkCreateBuffer)(
  13919. allocator->m_hDevice,
  13920. pBufferCreateInfo,
  13921. allocator->GetAllocationCallbacks(),
  13922. pBuffer);
  13923. if(res >= 0)
  13924. {
  13925. // 2. vkGetBufferMemoryRequirements.
  13926. VkMemoryRequirements vkMemReq = {};
  13927. bool requiresDedicatedAllocation = false;
  13928. bool prefersDedicatedAllocation = false;
  13929. allocator->GetBufferMemoryRequirements(*pBuffer, vkMemReq,
  13930. requiresDedicatedAllocation, prefersDedicatedAllocation);
  13931. // 2a. Include minAlignment
  13932. vkMemReq.alignment = VMA_MAX(vkMemReq.alignment, minAlignment);
  13933. // 3. Allocate memory using allocator.
  13934. res = allocator->AllocateMemory(
  13935. vkMemReq,
  13936. requiresDedicatedAllocation,
  13937. prefersDedicatedAllocation,
  13938. *pBuffer, // dedicatedBuffer
  13939. VK_NULL_HANDLE, // dedicatedImage
  13940. VmaBufferImageUsage(*pBufferCreateInfo, allocator->m_UseKhrMaintenance5), // dedicatedBufferImageUsage
  13941. *pAllocationCreateInfo,
  13942. VMA_SUBALLOCATION_TYPE_BUFFER,
  13943. 1, // allocationCount
  13944. pAllocation);
  13945. if(res >= 0)
  13946. {
  13947. // 3. Bind buffer with memory.
  13948. if((pAllocationCreateInfo->flags & VMA_ALLOCATION_CREATE_DONT_BIND_BIT) == 0)
  13949. {
  13950. res = allocator->BindBufferMemory(*pAllocation, 0, *pBuffer, VMA_NULL);
  13951. }
  13952. if(res >= 0)
  13953. {
  13954. // All steps succeeded.
  13955. #if VMA_STATS_STRING_ENABLED
  13956. (*pAllocation)->InitBufferUsage(*pBufferCreateInfo, allocator->m_UseKhrMaintenance5);
  13957. #endif
  13958. if(pAllocationInfo != VMA_NULL)
  13959. {
  13960. allocator->GetAllocationInfo(*pAllocation, pAllocationInfo);
  13961. }
  13962. return VK_SUCCESS;
  13963. }
  13964. allocator->FreeMemory(
  13965. 1, // allocationCount
  13966. pAllocation);
  13967. *pAllocation = VK_NULL_HANDLE;
  13968. (*allocator->GetVulkanFunctions().vkDestroyBuffer)(allocator->m_hDevice, *pBuffer, allocator->GetAllocationCallbacks());
  13969. *pBuffer = VK_NULL_HANDLE;
  13970. return res;
  13971. }
  13972. (*allocator->GetVulkanFunctions().vkDestroyBuffer)(allocator->m_hDevice, *pBuffer, allocator->GetAllocationCallbacks());
  13973. *pBuffer = VK_NULL_HANDLE;
  13974. return res;
  13975. }
  13976. return res;
  13977. }
  13978. VMA_CALL_PRE VkResult VMA_CALL_POST vmaCreateAliasingBuffer(
  13979. VmaAllocator VMA_NOT_NULL allocator,
  13980. VmaAllocation VMA_NOT_NULL allocation,
  13981. const VkBufferCreateInfo* VMA_NOT_NULL pBufferCreateInfo,
  13982. VkBuffer VMA_NULLABLE_NON_DISPATCHABLE* VMA_NOT_NULL pBuffer)
  13983. {
  13984. return vmaCreateAliasingBuffer2(allocator, allocation, 0, pBufferCreateInfo, pBuffer);
  13985. }
  13986. VMA_CALL_PRE VkResult VMA_CALL_POST vmaCreateAliasingBuffer2(
  13987. VmaAllocator VMA_NOT_NULL allocator,
  13988. VmaAllocation VMA_NOT_NULL allocation,
  13989. VkDeviceSize allocationLocalOffset,
  13990. const VkBufferCreateInfo* VMA_NOT_NULL pBufferCreateInfo,
  13991. VkBuffer VMA_NULLABLE_NON_DISPATCHABLE* VMA_NOT_NULL pBuffer)
  13992. {
  13993. VMA_ASSERT(allocator && pBufferCreateInfo && pBuffer && allocation);
  13994. VMA_ASSERT(allocationLocalOffset + pBufferCreateInfo->size <= allocation->GetSize());
  13995. VMA_DEBUG_LOG("vmaCreateAliasingBuffer2");
  13996. *pBuffer = VK_NULL_HANDLE;
  13997. if (pBufferCreateInfo->size == 0)
  13998. {
  13999. return VK_ERROR_INITIALIZATION_FAILED;
  14000. }
  14001. if ((pBufferCreateInfo->usage & VK_BUFFER_USAGE_SHADER_DEVICE_ADDRESS_BIT_COPY) != 0 &&
  14002. !allocator->m_UseKhrBufferDeviceAddress)
  14003. {
  14004. VMA_ASSERT(0 && "Creating a buffer with VK_BUFFER_USAGE_SHADER_DEVICE_ADDRESS_BIT is not valid if VMA_ALLOCATOR_CREATE_BUFFER_DEVICE_ADDRESS_BIT was not used.");
  14005. return VK_ERROR_INITIALIZATION_FAILED;
  14006. }
  14007. VMA_DEBUG_GLOBAL_MUTEX_LOCK
  14008. // 1. Create VkBuffer.
  14009. VkResult res = (*allocator->GetVulkanFunctions().vkCreateBuffer)(
  14010. allocator->m_hDevice,
  14011. pBufferCreateInfo,
  14012. allocator->GetAllocationCallbacks(),
  14013. pBuffer);
  14014. if (res >= 0)
  14015. {
  14016. // 2. Bind buffer with memory.
  14017. res = allocator->BindBufferMemory(allocation, allocationLocalOffset, *pBuffer, VMA_NULL);
  14018. if (res >= 0)
  14019. {
  14020. return VK_SUCCESS;
  14021. }
  14022. (*allocator->GetVulkanFunctions().vkDestroyBuffer)(allocator->m_hDevice, *pBuffer, allocator->GetAllocationCallbacks());
  14023. }
  14024. return res;
  14025. }
  14026. VMA_CALL_PRE void VMA_CALL_POST vmaDestroyBuffer(
  14027. VmaAllocator allocator,
  14028. VkBuffer buffer,
  14029. VmaAllocation allocation)
  14030. {
  14031. VMA_ASSERT(allocator);
  14032. if(buffer == VK_NULL_HANDLE && allocation == VK_NULL_HANDLE)
  14033. {
  14034. return;
  14035. }
  14036. VMA_DEBUG_LOG("vmaDestroyBuffer");
  14037. VMA_DEBUG_GLOBAL_MUTEX_LOCK
  14038. if(buffer != VK_NULL_HANDLE)
  14039. {
  14040. (*allocator->GetVulkanFunctions().vkDestroyBuffer)(allocator->m_hDevice, buffer, allocator->GetAllocationCallbacks());
  14041. }
  14042. if(allocation != VK_NULL_HANDLE)
  14043. {
  14044. allocator->FreeMemory(
  14045. 1, // allocationCount
  14046. &allocation);
  14047. }
  14048. }
  14049. VMA_CALL_PRE VkResult VMA_CALL_POST vmaCreateImage(
  14050. VmaAllocator allocator,
  14051. const VkImageCreateInfo* pImageCreateInfo,
  14052. const VmaAllocationCreateInfo* pAllocationCreateInfo,
  14053. VkImage* pImage,
  14054. VmaAllocation* pAllocation,
  14055. VmaAllocationInfo* pAllocationInfo)
  14056. {
  14057. VMA_ASSERT(allocator && pImageCreateInfo && pAllocationCreateInfo && pImage && pAllocation);
  14058. if(pImageCreateInfo->extent.width == 0 ||
  14059. pImageCreateInfo->extent.height == 0 ||
  14060. pImageCreateInfo->extent.depth == 0 ||
  14061. pImageCreateInfo->mipLevels == 0 ||
  14062. pImageCreateInfo->arrayLayers == 0)
  14063. {
  14064. return VK_ERROR_INITIALIZATION_FAILED;
  14065. }
  14066. VMA_DEBUG_LOG("vmaCreateImage");
  14067. VMA_DEBUG_GLOBAL_MUTEX_LOCK
  14068. *pImage = VK_NULL_HANDLE;
  14069. *pAllocation = VK_NULL_HANDLE;
  14070. // 1. Create VkImage.
  14071. VkResult res = (*allocator->GetVulkanFunctions().vkCreateImage)(
  14072. allocator->m_hDevice,
  14073. pImageCreateInfo,
  14074. allocator->GetAllocationCallbacks(),
  14075. pImage);
  14076. if(res >= 0)
  14077. {
  14078. VmaSuballocationType suballocType = pImageCreateInfo->tiling == VK_IMAGE_TILING_OPTIMAL ?
  14079. VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL :
  14080. VMA_SUBALLOCATION_TYPE_IMAGE_LINEAR;
  14081. // 2. Allocate memory using allocator.
  14082. VkMemoryRequirements vkMemReq = {};
  14083. bool requiresDedicatedAllocation = false;
  14084. bool prefersDedicatedAllocation = false;
  14085. allocator->GetImageMemoryRequirements(*pImage, vkMemReq,
  14086. requiresDedicatedAllocation, prefersDedicatedAllocation);
  14087. res = allocator->AllocateMemory(
  14088. vkMemReq,
  14089. requiresDedicatedAllocation,
  14090. prefersDedicatedAllocation,
  14091. VK_NULL_HANDLE, // dedicatedBuffer
  14092. *pImage, // dedicatedImage
  14093. VmaBufferImageUsage(*pImageCreateInfo), // dedicatedBufferImageUsage
  14094. *pAllocationCreateInfo,
  14095. suballocType,
  14096. 1, // allocationCount
  14097. pAllocation);
  14098. if(res >= 0)
  14099. {
  14100. // 3. Bind image with memory.
  14101. if((pAllocationCreateInfo->flags & VMA_ALLOCATION_CREATE_DONT_BIND_BIT) == 0)
  14102. {
  14103. res = allocator->BindImageMemory(*pAllocation, 0, *pImage, VMA_NULL);
  14104. }
  14105. if(res >= 0)
  14106. {
  14107. // All steps succeeded.
  14108. #if VMA_STATS_STRING_ENABLED
  14109. (*pAllocation)->InitImageUsage(*pImageCreateInfo);
  14110. #endif
  14111. if(pAllocationInfo != VMA_NULL)
  14112. {
  14113. allocator->GetAllocationInfo(*pAllocation, pAllocationInfo);
  14114. }
  14115. return VK_SUCCESS;
  14116. }
  14117. allocator->FreeMemory(
  14118. 1, // allocationCount
  14119. pAllocation);
  14120. *pAllocation = VK_NULL_HANDLE;
  14121. (*allocator->GetVulkanFunctions().vkDestroyImage)(allocator->m_hDevice, *pImage, allocator->GetAllocationCallbacks());
  14122. *pImage = VK_NULL_HANDLE;
  14123. return res;
  14124. }
  14125. (*allocator->GetVulkanFunctions().vkDestroyImage)(allocator->m_hDevice, *pImage, allocator->GetAllocationCallbacks());
  14126. *pImage = VK_NULL_HANDLE;
  14127. return res;
  14128. }
  14129. return res;
  14130. }
  14131. VMA_CALL_PRE VkResult VMA_CALL_POST vmaCreateAliasingImage(
  14132. VmaAllocator VMA_NOT_NULL allocator,
  14133. VmaAllocation VMA_NOT_NULL allocation,
  14134. const VkImageCreateInfo* VMA_NOT_NULL pImageCreateInfo,
  14135. VkImage VMA_NULLABLE_NON_DISPATCHABLE* VMA_NOT_NULL pImage)
  14136. {
  14137. return vmaCreateAliasingImage2(allocator, allocation, 0, pImageCreateInfo, pImage);
  14138. }
  14139. VMA_CALL_PRE VkResult VMA_CALL_POST vmaCreateAliasingImage2(
  14140. VmaAllocator VMA_NOT_NULL allocator,
  14141. VmaAllocation VMA_NOT_NULL allocation,
  14142. VkDeviceSize allocationLocalOffset,
  14143. const VkImageCreateInfo* VMA_NOT_NULL pImageCreateInfo,
  14144. VkImage VMA_NULLABLE_NON_DISPATCHABLE* VMA_NOT_NULL pImage)
  14145. {
  14146. VMA_ASSERT(allocator && pImageCreateInfo && pImage && allocation);
  14147. *pImage = VK_NULL_HANDLE;
  14148. VMA_DEBUG_LOG("vmaCreateImage2");
  14149. if (pImageCreateInfo->extent.width == 0 ||
  14150. pImageCreateInfo->extent.height == 0 ||
  14151. pImageCreateInfo->extent.depth == 0 ||
  14152. pImageCreateInfo->mipLevels == 0 ||
  14153. pImageCreateInfo->arrayLayers == 0)
  14154. {
  14155. return VK_ERROR_INITIALIZATION_FAILED;
  14156. }
  14157. VMA_DEBUG_GLOBAL_MUTEX_LOCK
  14158. // 1. Create VkImage.
  14159. VkResult res = (*allocator->GetVulkanFunctions().vkCreateImage)(
  14160. allocator->m_hDevice,
  14161. pImageCreateInfo,
  14162. allocator->GetAllocationCallbacks(),
  14163. pImage);
  14164. if (res >= 0)
  14165. {
  14166. // 2. Bind image with memory.
  14167. res = allocator->BindImageMemory(allocation, allocationLocalOffset, *pImage, VMA_NULL);
  14168. if (res >= 0)
  14169. {
  14170. return VK_SUCCESS;
  14171. }
  14172. (*allocator->GetVulkanFunctions().vkDestroyImage)(allocator->m_hDevice, *pImage, allocator->GetAllocationCallbacks());
  14173. }
  14174. return res;
  14175. }
  14176. VMA_CALL_PRE void VMA_CALL_POST vmaDestroyImage(
  14177. VmaAllocator VMA_NOT_NULL allocator,
  14178. VkImage VMA_NULLABLE_NON_DISPATCHABLE image,
  14179. VmaAllocation VMA_NULLABLE allocation)
  14180. {
  14181. VMA_ASSERT(allocator);
  14182. if(image == VK_NULL_HANDLE && allocation == VK_NULL_HANDLE)
  14183. {
  14184. return;
  14185. }
  14186. VMA_DEBUG_LOG("vmaDestroyImage");
  14187. VMA_DEBUG_GLOBAL_MUTEX_LOCK
  14188. if(image != VK_NULL_HANDLE)
  14189. {
  14190. (*allocator->GetVulkanFunctions().vkDestroyImage)(allocator->m_hDevice, image, allocator->GetAllocationCallbacks());
  14191. }
  14192. if(allocation != VK_NULL_HANDLE)
  14193. {
  14194. allocator->FreeMemory(
  14195. 1, // allocationCount
  14196. &allocation);
  14197. }
  14198. }
  14199. VMA_CALL_PRE VkResult VMA_CALL_POST vmaCreateVirtualBlock(
  14200. const VmaVirtualBlockCreateInfo* VMA_NOT_NULL pCreateInfo,
  14201. VmaVirtualBlock VMA_NULLABLE * VMA_NOT_NULL pVirtualBlock)
  14202. {
  14203. VMA_ASSERT(pCreateInfo && pVirtualBlock);
  14204. VMA_ASSERT(pCreateInfo->size > 0);
  14205. VMA_DEBUG_LOG("vmaCreateVirtualBlock");
  14206. VMA_DEBUG_GLOBAL_MUTEX_LOCK;
  14207. *pVirtualBlock = vma_new(pCreateInfo->pAllocationCallbacks, VmaVirtualBlock_T)(*pCreateInfo);
  14208. VkResult res = (*pVirtualBlock)->Init();
  14209. if(res < 0)
  14210. {
  14211. vma_delete(pCreateInfo->pAllocationCallbacks, *pVirtualBlock);
  14212. *pVirtualBlock = VK_NULL_HANDLE;
  14213. }
  14214. return res;
  14215. }
  14216. VMA_CALL_PRE void VMA_CALL_POST vmaDestroyVirtualBlock(VmaVirtualBlock VMA_NULLABLE virtualBlock)
  14217. {
  14218. if(virtualBlock != VK_NULL_HANDLE)
  14219. {
  14220. VMA_DEBUG_LOG("vmaDestroyVirtualBlock");
  14221. VMA_DEBUG_GLOBAL_MUTEX_LOCK;
  14222. VkAllocationCallbacks allocationCallbacks = virtualBlock->m_AllocationCallbacks; // Have to copy the callbacks when destroying.
  14223. vma_delete(&allocationCallbacks, virtualBlock);
  14224. }
  14225. }
  14226. VMA_CALL_PRE VkBool32 VMA_CALL_POST vmaIsVirtualBlockEmpty(VmaVirtualBlock VMA_NOT_NULL virtualBlock)
  14227. {
  14228. VMA_ASSERT(virtualBlock != VK_NULL_HANDLE);
  14229. VMA_DEBUG_LOG("vmaIsVirtualBlockEmpty");
  14230. VMA_DEBUG_GLOBAL_MUTEX_LOCK;
  14231. return virtualBlock->IsEmpty() ? VK_TRUE : VK_FALSE;
  14232. }
  14233. VMA_CALL_PRE void VMA_CALL_POST vmaGetVirtualAllocationInfo(VmaVirtualBlock VMA_NOT_NULL virtualBlock,
  14234. VmaVirtualAllocation VMA_NOT_NULL_NON_DISPATCHABLE allocation, VmaVirtualAllocationInfo* VMA_NOT_NULL pVirtualAllocInfo)
  14235. {
  14236. VMA_ASSERT(virtualBlock != VK_NULL_HANDLE && pVirtualAllocInfo != VMA_NULL);
  14237. VMA_DEBUG_LOG("vmaGetVirtualAllocationInfo");
  14238. VMA_DEBUG_GLOBAL_MUTEX_LOCK;
  14239. virtualBlock->GetAllocationInfo(allocation, *pVirtualAllocInfo);
  14240. }
  14241. VMA_CALL_PRE VkResult VMA_CALL_POST vmaVirtualAllocate(VmaVirtualBlock VMA_NOT_NULL virtualBlock,
  14242. const VmaVirtualAllocationCreateInfo* VMA_NOT_NULL pCreateInfo, VmaVirtualAllocation VMA_NULLABLE_NON_DISPATCHABLE* VMA_NOT_NULL pAllocation,
  14243. VkDeviceSize* VMA_NULLABLE pOffset)
  14244. {
  14245. VMA_ASSERT(virtualBlock != VK_NULL_HANDLE && pCreateInfo != VMA_NULL && pAllocation != VMA_NULL);
  14246. VMA_DEBUG_LOG("vmaVirtualAllocate");
  14247. VMA_DEBUG_GLOBAL_MUTEX_LOCK;
  14248. return virtualBlock->Allocate(*pCreateInfo, *pAllocation, pOffset);
  14249. }
  14250. VMA_CALL_PRE void VMA_CALL_POST vmaVirtualFree(VmaVirtualBlock VMA_NOT_NULL virtualBlock, VmaVirtualAllocation VMA_NULLABLE_NON_DISPATCHABLE allocation)
  14251. {
  14252. if(allocation != VK_NULL_HANDLE)
  14253. {
  14254. VMA_ASSERT(virtualBlock != VK_NULL_HANDLE);
  14255. VMA_DEBUG_LOG("vmaVirtualFree");
  14256. VMA_DEBUG_GLOBAL_MUTEX_LOCK;
  14257. virtualBlock->Free(allocation);
  14258. }
  14259. }
  14260. VMA_CALL_PRE void VMA_CALL_POST vmaClearVirtualBlock(VmaVirtualBlock VMA_NOT_NULL virtualBlock)
  14261. {
  14262. VMA_ASSERT(virtualBlock != VK_NULL_HANDLE);
  14263. VMA_DEBUG_LOG("vmaClearVirtualBlock");
  14264. VMA_DEBUG_GLOBAL_MUTEX_LOCK;
  14265. virtualBlock->Clear();
  14266. }
  14267. VMA_CALL_PRE void VMA_CALL_POST vmaSetVirtualAllocationUserData(VmaVirtualBlock VMA_NOT_NULL virtualBlock,
  14268. VmaVirtualAllocation VMA_NOT_NULL_NON_DISPATCHABLE allocation, void* VMA_NULLABLE pUserData)
  14269. {
  14270. VMA_ASSERT(virtualBlock != VK_NULL_HANDLE);
  14271. VMA_DEBUG_LOG("vmaSetVirtualAllocationUserData");
  14272. VMA_DEBUG_GLOBAL_MUTEX_LOCK;
  14273. virtualBlock->SetAllocationUserData(allocation, pUserData);
  14274. }
  14275. VMA_CALL_PRE void VMA_CALL_POST vmaGetVirtualBlockStatistics(VmaVirtualBlock VMA_NOT_NULL virtualBlock,
  14276. VmaStatistics* VMA_NOT_NULL pStats)
  14277. {
  14278. VMA_ASSERT(virtualBlock != VK_NULL_HANDLE && pStats != VMA_NULL);
  14279. VMA_DEBUG_LOG("vmaGetVirtualBlockStatistics");
  14280. VMA_DEBUG_GLOBAL_MUTEX_LOCK;
  14281. virtualBlock->GetStatistics(*pStats);
  14282. }
  14283. VMA_CALL_PRE void VMA_CALL_POST vmaCalculateVirtualBlockStatistics(VmaVirtualBlock VMA_NOT_NULL virtualBlock,
  14284. VmaDetailedStatistics* VMA_NOT_NULL pStats)
  14285. {
  14286. VMA_ASSERT(virtualBlock != VK_NULL_HANDLE && pStats != VMA_NULL);
  14287. VMA_DEBUG_LOG("vmaCalculateVirtualBlockStatistics");
  14288. VMA_DEBUG_GLOBAL_MUTEX_LOCK;
  14289. virtualBlock->CalculateDetailedStatistics(*pStats);
  14290. }
  14291. #if VMA_STATS_STRING_ENABLED
  14292. VMA_CALL_PRE void VMA_CALL_POST vmaBuildVirtualBlockStatsString(VmaVirtualBlock VMA_NOT_NULL virtualBlock,
  14293. char* VMA_NULLABLE * VMA_NOT_NULL ppStatsString, VkBool32 detailedMap)
  14294. {
  14295. VMA_ASSERT(virtualBlock != VK_NULL_HANDLE && ppStatsString != VMA_NULL);
  14296. VMA_DEBUG_GLOBAL_MUTEX_LOCK;
  14297. const VkAllocationCallbacks* allocationCallbacks = virtualBlock->GetAllocationCallbacks();
  14298. VmaStringBuilder sb(allocationCallbacks);
  14299. virtualBlock->BuildStatsString(detailedMap != VK_FALSE, sb);
  14300. *ppStatsString = VmaCreateStringCopy(allocationCallbacks, sb.GetData(), sb.GetLength());
  14301. }
  14302. VMA_CALL_PRE void VMA_CALL_POST vmaFreeVirtualBlockStatsString(VmaVirtualBlock VMA_NOT_NULL virtualBlock,
  14303. char* VMA_NULLABLE pStatsString)
  14304. {
  14305. if(pStatsString != VMA_NULL)
  14306. {
  14307. VMA_ASSERT(virtualBlock != VK_NULL_HANDLE);
  14308. VMA_DEBUG_GLOBAL_MUTEX_LOCK;
  14309. VmaFreeString(virtualBlock->GetAllocationCallbacks(), pStatsString);
  14310. }
  14311. }
  14312. #endif // VMA_STATS_STRING_ENABLED
  14313. #endif // _VMA_PUBLIC_INTERFACE
  14314. #endif // VMA_IMPLEMENTATION
  14315. /**
  14316. \page quick_start Quick start
  14317. \section quick_start_project_setup Project setup
  14318. Vulkan Memory Allocator comes in form of a "stb-style" single header file.
  14319. While you can pull the entire repository e.g. as Git module, there is also Cmake script provided,
  14320. you don't need to build it as a separate library project.
  14321. You can add file "vk_mem_alloc.h" directly to your project and submit it to code repository next to your other source files.
  14322. "Single header" doesn't mean that everything is contained in C/C++ declarations,
  14323. like it tends to be in case of inline functions or C++ templates.
  14324. It means that implementation is bundled with interface in a single file and needs to be extracted using preprocessor macro.
  14325. If you don't do it properly, it will result in linker errors.
  14326. To do it properly:
  14327. -# Include "vk_mem_alloc.h" file in each CPP file where you want to use the library.
  14328. This includes declarations of all members of the library.
  14329. -# In exactly one CPP file define following macro before this include.
  14330. It enables also internal definitions.
  14331. \code
  14332. #define VMA_IMPLEMENTATION
  14333. #include "vk_mem_alloc.h"
  14334. \endcode
  14335. It may be a good idea to create dedicated CPP file just for this purpose, e.g. "VmaUsage.cpp".
  14336. This library includes header `<vulkan/vulkan.h>`, which in turn
  14337. includes `<windows.h>` on Windows. If you need some specific macros defined
  14338. before including these headers (like `WIN32_LEAN_AND_MEAN` or
  14339. `WINVER` for Windows, `VK_USE_PLATFORM_WIN32_KHR` for Vulkan), you must define
  14340. them before every `#include` of this library.
  14341. It may be a good idea to create a dedicate header file for this purpose, e.g. "VmaUsage.h",
  14342. that will be included in other source files instead of VMA header directly.
  14343. This library is written in C++, but has C-compatible interface.
  14344. Thus, you can include and use "vk_mem_alloc.h" in C or C++ code, but full
  14345. implementation with `VMA_IMPLEMENTATION` macro must be compiled as C++, NOT as C.
  14346. Some features of C++14 are used and required. Features of C++20 are used optionally when available.
  14347. Some headers of standard C and C++ library are used, but STL containers, RTTI, or C++ exceptions are not used.
  14348. \section quick_start_initialization Initialization
  14349. VMA offers library interface in a style similar to Vulkan, with object handles like #VmaAllocation,
  14350. structures describing parameters of objects to be created like #VmaAllocationCreateInfo,
  14351. and errors codes returned from functions using `VkResult` type.
  14352. The first and the main object that needs to be created is #VmaAllocator.
  14353. It represents the initialization of the entire library.
  14354. Only one such object should be created per `VkDevice`.
  14355. You should create it at program startup, after `VkDevice` was created, and before any device memory allocator needs to be made.
  14356. It must be destroyed before `VkDevice` is destroyed.
  14357. At program startup:
  14358. -# Initialize Vulkan to have `VkInstance`, `VkPhysicalDevice`, `VkDevice` object.
  14359. -# Fill VmaAllocatorCreateInfo structure and call vmaCreateAllocator() to create #VmaAllocator object.
  14360. Only members `physicalDevice`, `device`, `instance` are required.
  14361. However, you should inform the library which Vulkan version do you use by setting
  14362. VmaAllocatorCreateInfo::vulkanApiVersion and which extensions did you enable
  14363. by setting VmaAllocatorCreateInfo::flags.
  14364. Otherwise, VMA would use only features of Vulkan 1.0 core with no extensions.
  14365. See below for details.
  14366. \subsection quick_start_initialization_selecting_vulkan_version Selecting Vulkan version
  14367. VMA supports Vulkan version down to 1.0, for backward compatibility.
  14368. If you want to use higher version, you need to inform the library about it.
  14369. This is a two-step process.
  14370. <b>Step 1: Compile time.</b> By default, VMA compiles with code supporting the highest
  14371. Vulkan version found in the included `<vulkan/vulkan.h>` that is also supported by the library.
  14372. If this is OK, you don't need to do anything.
  14373. However, if you want to compile VMA as if only some lower Vulkan version was available,
  14374. define macro `VMA_VULKAN_VERSION` before every `#include "vk_mem_alloc.h"`.
  14375. It should have decimal numeric value in form of ABBBCCC, where A = major, BBB = minor, CCC = patch Vulkan version.
  14376. For example, to compile against Vulkan 1.2:
  14377. \code
  14378. #define VMA_VULKAN_VERSION 1002000 // Vulkan 1.2
  14379. #include "vk_mem_alloc.h"
  14380. \endcode
  14381. <b>Step 2: Runtime.</b> Even when compiled with higher Vulkan version available,
  14382. VMA can use only features of a lower version, which is configurable during creation of the #VmaAllocator object.
  14383. By default, only Vulkan 1.0 is used.
  14384. To initialize the allocator with support for higher Vulkan version, you need to set member
  14385. VmaAllocatorCreateInfo::vulkanApiVersion to an appropriate value, e.g. using constants like `VK_API_VERSION_1_2`.
  14386. See code sample below.
  14387. \subsection quick_start_initialization_importing_vulkan_functions Importing Vulkan functions
  14388. You may need to configure importing Vulkan functions. There are 3 ways to do this:
  14389. -# **If you link with Vulkan static library** (e.g. "vulkan-1.lib" on Windows):
  14390. - You don't need to do anything.
  14391. - VMA will use these, as macro `VMA_STATIC_VULKAN_FUNCTIONS` is defined to 1 by default.
  14392. -# **If you want VMA to fetch pointers to Vulkan functions dynamically** using `vkGetInstanceProcAddr`,
  14393. `vkGetDeviceProcAddr` (this is the option presented in the example below):
  14394. - Define `VMA_STATIC_VULKAN_FUNCTIONS` to 0, `VMA_DYNAMIC_VULKAN_FUNCTIONS` to 1.
  14395. - Provide pointers to these two functions via VmaVulkanFunctions::vkGetInstanceProcAddr,
  14396. VmaVulkanFunctions::vkGetDeviceProcAddr.
  14397. - The library will fetch pointers to all other functions it needs internally.
  14398. -# **If you fetch pointers to all Vulkan functions in a custom way**, e.g. using some loader like
  14399. [Volk](https://github.com/zeux/volk):
  14400. - Define `VMA_STATIC_VULKAN_FUNCTIONS` and `VMA_DYNAMIC_VULKAN_FUNCTIONS` to 0.
  14401. - Pass these pointers via structure #VmaVulkanFunctions.
  14402. \subsection quick_start_initialization_enabling_extensions Enabling extensions
  14403. VMA can automatically use following Vulkan extensions.
  14404. If you found them available on the selected physical device and you enabled them
  14405. while creating `VkInstance` / `VkDevice` object, inform VMA about their availability
  14406. by setting appropriate flags in VmaAllocatorCreateInfo::flags.
  14407. Vulkan extension | VMA flag
  14408. ------------------------------|-----------------------------------------------------
  14409. VK_KHR_dedicated_allocation | #VMA_ALLOCATOR_CREATE_KHR_DEDICATED_ALLOCATION_BIT
  14410. VK_KHR_bind_memory2 | #VMA_ALLOCATOR_CREATE_KHR_BIND_MEMORY2_BIT
  14411. VK_KHR_maintenance4 | #VMA_ALLOCATOR_CREATE_KHR_MAINTENANCE4_BIT
  14412. VK_KHR_maintenance5 | #VMA_ALLOCATOR_CREATE_KHR_MAINTENANCE5_BIT
  14413. VK_EXT_memory_budget | #VMA_ALLOCATOR_CREATE_EXT_MEMORY_BUDGET_BIT
  14414. VK_KHR_buffer_device_address | #VMA_ALLOCATOR_CREATE_BUFFER_DEVICE_ADDRESS_BIT
  14415. VK_EXT_memory_priority | #VMA_ALLOCATOR_CREATE_EXT_MEMORY_PRIORITY_BIT
  14416. VK_AMD_device_coherent_memory | #VMA_ALLOCATOR_CREATE_AMD_DEVICE_COHERENT_MEMORY_BIT
  14417. Example with fetching pointers to Vulkan functions dynamically:
  14418. \code
  14419. #define VMA_STATIC_VULKAN_FUNCTIONS 0
  14420. #define VMA_DYNAMIC_VULKAN_FUNCTIONS 1
  14421. #include "vk_mem_alloc.h"
  14422. ...
  14423. VmaVulkanFunctions vulkanFunctions = {};
  14424. vulkanFunctions.vkGetInstanceProcAddr = &vkGetInstanceProcAddr;
  14425. vulkanFunctions.vkGetDeviceProcAddr = &vkGetDeviceProcAddr;
  14426. VmaAllocatorCreateInfo allocatorCreateInfo = {};
  14427. allocatorCreateInfo.flags = VMA_ALLOCATOR_CREATE_EXT_MEMORY_BUDGET_BIT;
  14428. allocatorCreateInfo.vulkanApiVersion = VK_API_VERSION_1_2;
  14429. allocatorCreateInfo.physicalDevice = physicalDevice;
  14430. allocatorCreateInfo.device = device;
  14431. allocatorCreateInfo.instance = instance;
  14432. allocatorCreateInfo.pVulkanFunctions = &vulkanFunctions;
  14433. VmaAllocator allocator;
  14434. vmaCreateAllocator(&allocatorCreateInfo, &allocator);
  14435. // Entire program...
  14436. // At the end, don't forget to:
  14437. vmaDestroyAllocator(allocator);
  14438. \endcode
  14439. \subsection quick_start_initialization_other_config Other configuration options
  14440. There are additional configuration options available through preprocessor macros that you can define
  14441. before including VMA header and through parameters passed in #VmaAllocatorCreateInfo.
  14442. They include a possibility to use your own callbacks for host memory allocations (`VkAllocationCallbacks`),
  14443. callbacks for device memory allocations (instead of `vkAllocateMemory`, `vkFreeMemory`),
  14444. or your custom `VMA_ASSERT` macro, among others.
  14445. For more information, see: @ref configuration.
  14446. \section quick_start_resource_allocation Resource allocation
  14447. When you want to create a buffer or image:
  14448. -# Fill `VkBufferCreateInfo` / `VkImageCreateInfo` structure.
  14449. -# Fill VmaAllocationCreateInfo structure.
  14450. -# Call vmaCreateBuffer() / vmaCreateImage() to get `VkBuffer`/`VkImage` with memory
  14451. already allocated and bound to it, plus #VmaAllocation objects that represents its underlying memory.
  14452. \code
  14453. VkBufferCreateInfo bufferInfo = { VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO };
  14454. bufferInfo.size = 65536;
  14455. bufferInfo.usage = VK_BUFFER_USAGE_VERTEX_BUFFER_BIT | VK_BUFFER_USAGE_TRANSFER_DST_BIT;
  14456. VmaAllocationCreateInfo allocInfo = {};
  14457. allocInfo.usage = VMA_MEMORY_USAGE_AUTO;
  14458. VkBuffer buffer;
  14459. VmaAllocation allocation;
  14460. vmaCreateBuffer(allocator, &bufferInfo, &allocInfo, &buffer, &allocation, nullptr);
  14461. \endcode
  14462. Don't forget to destroy your buffer and allocation objects when no longer needed:
  14463. \code
  14464. vmaDestroyBuffer(allocator, buffer, allocation);
  14465. \endcode
  14466. If you need to map the buffer, you must set flag
  14467. #VMA_ALLOCATION_CREATE_HOST_ACCESS_SEQUENTIAL_WRITE_BIT or #VMA_ALLOCATION_CREATE_HOST_ACCESS_RANDOM_BIT
  14468. in VmaAllocationCreateInfo::flags.
  14469. There are many additional parameters that can control the choice of memory type to be used for the allocation
  14470. and other features.
  14471. For more information, see documentation chapters: @ref choosing_memory_type, @ref memory_mapping.
  14472. \page choosing_memory_type Choosing memory type
  14473. Physical devices in Vulkan support various combinations of memory heaps and
  14474. types. Help with choosing correct and optimal memory type for your specific
  14475. resource is one of the key features of this library. You can use it by filling
  14476. appropriate members of VmaAllocationCreateInfo structure, as described below.
  14477. You can also combine multiple methods.
  14478. -# If you just want to find memory type index that meets your requirements, you
  14479. can use function: vmaFindMemoryTypeIndexForBufferInfo(),
  14480. vmaFindMemoryTypeIndexForImageInfo(), vmaFindMemoryTypeIndex().
  14481. -# If you want to allocate a region of device memory without association with any
  14482. specific image or buffer, you can use function vmaAllocateMemory(). Usage of
  14483. this function is not recommended and usually not needed.
  14484. vmaAllocateMemoryPages() function is also provided for creating multiple allocations at once,
  14485. which may be useful for sparse binding.
  14486. -# If you already have a buffer or an image created, you want to allocate memory
  14487. for it and then you will bind it yourself, you can use function
  14488. vmaAllocateMemoryForBuffer(), vmaAllocateMemoryForImage().
  14489. For binding you should use functions: vmaBindBufferMemory(), vmaBindImageMemory()
  14490. or their extended versions: vmaBindBufferMemory2(), vmaBindImageMemory2().
  14491. -# If you want to create a buffer or an image, allocate memory for it, and bind
  14492. them together, all in one call, you can use function vmaCreateBuffer(),
  14493. vmaCreateImage().
  14494. <b>This is the easiest and recommended way to use this library!</b>
  14495. When using 3. or 4., the library internally queries Vulkan for memory types
  14496. supported for that buffer or image (function `vkGetBufferMemoryRequirements()`)
  14497. and uses only one of these types.
  14498. If no memory type can be found that meets all the requirements, these functions
  14499. return `VK_ERROR_FEATURE_NOT_PRESENT`.
  14500. You can leave VmaAllocationCreateInfo structure completely filled with zeros.
  14501. It means no requirements are specified for memory type.
  14502. It is valid, although not very useful.
  14503. \section choosing_memory_type_usage Usage
  14504. The easiest way to specify memory requirements is to fill member
  14505. VmaAllocationCreateInfo::usage using one of the values of enum #VmaMemoryUsage.
  14506. It defines high level, common usage types.
  14507. Since version 3 of the library, it is recommended to use #VMA_MEMORY_USAGE_AUTO to let it select best memory type for your resource automatically.
  14508. For example, if you want to create a uniform buffer that will be filled using
  14509. transfer only once or infrequently and then used for rendering every frame as a uniform buffer, you can
  14510. do it using following code. The buffer will most likely end up in a memory type with
  14511. `VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT` to be fast to access by the GPU device.
  14512. \code
  14513. VkBufferCreateInfo bufferInfo = { VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO };
  14514. bufferInfo.size = 65536;
  14515. bufferInfo.usage = VK_BUFFER_USAGE_UNIFORM_BUFFER_BIT | VK_BUFFER_USAGE_TRANSFER_DST_BIT;
  14516. VmaAllocationCreateInfo allocInfo = {};
  14517. allocInfo.usage = VMA_MEMORY_USAGE_AUTO;
  14518. VkBuffer buffer;
  14519. VmaAllocation allocation;
  14520. vmaCreateBuffer(allocator, &bufferInfo, &allocInfo, &buffer, &allocation, nullptr);
  14521. \endcode
  14522. If you have a preference for putting the resource in GPU (device) memory or CPU (host) memory
  14523. on systems with discrete graphics card that have the memories separate, you can use
  14524. #VMA_MEMORY_USAGE_AUTO_PREFER_DEVICE or #VMA_MEMORY_USAGE_AUTO_PREFER_HOST.
  14525. When using `VMA_MEMORY_USAGE_AUTO*` while you want to map the allocated memory,
  14526. you also need to specify one of the host access flags:
  14527. #VMA_ALLOCATION_CREATE_HOST_ACCESS_SEQUENTIAL_WRITE_BIT or #VMA_ALLOCATION_CREATE_HOST_ACCESS_RANDOM_BIT.
  14528. This will help the library decide about preferred memory type to ensure it has `VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT`
  14529. so you can map it.
  14530. For example, a staging buffer that will be filled via mapped pointer and then
  14531. used as a source of transfer to the buffer described previously can be created like this.
  14532. It will likely end up in a memory type that is `HOST_VISIBLE` and `HOST_COHERENT`
  14533. but not `HOST_CACHED` (meaning uncached, write-combined) and not `DEVICE_LOCAL` (meaning system RAM).
  14534. \code
  14535. VkBufferCreateInfo stagingBufferInfo = { VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO };
  14536. stagingBufferInfo.size = 65536;
  14537. stagingBufferInfo.usage = VK_BUFFER_USAGE_TRANSFER_SRC_BIT;
  14538. VmaAllocationCreateInfo stagingAllocInfo = {};
  14539. stagingAllocInfo.usage = VMA_MEMORY_USAGE_AUTO;
  14540. stagingAllocInfo.flags = VMA_ALLOCATION_CREATE_HOST_ACCESS_SEQUENTIAL_WRITE_BIT;
  14541. VkBuffer stagingBuffer;
  14542. VmaAllocation stagingAllocation;
  14543. vmaCreateBuffer(allocator, &stagingBufferInfo, &stagingAllocInfo, &stagingBuffer, &stagingAllocation, nullptr);
  14544. \endcode
  14545. For more examples of creating different kinds of resources, see chapter \ref usage_patterns.
  14546. See also: @ref memory_mapping.
  14547. Usage values `VMA_MEMORY_USAGE_AUTO*` are legal to use only when the library knows
  14548. about the resource being created by having `VkBufferCreateInfo` / `VkImageCreateInfo` passed,
  14549. so they work with functions like: vmaCreateBuffer(), vmaCreateImage(), vmaFindMemoryTypeIndexForBufferInfo() etc.
  14550. If you allocate raw memory using function vmaAllocateMemory(), you have to use other means of selecting
  14551. memory type, as described below.
  14552. \note
  14553. Old usage values (`VMA_MEMORY_USAGE_GPU_ONLY`, `VMA_MEMORY_USAGE_CPU_ONLY`,
  14554. `VMA_MEMORY_USAGE_CPU_TO_GPU`, `VMA_MEMORY_USAGE_GPU_TO_CPU`, `VMA_MEMORY_USAGE_CPU_COPY`)
  14555. are still available and work same way as in previous versions of the library
  14556. for backward compatibility, but they are deprecated.
  14557. \section choosing_memory_type_required_preferred_flags Required and preferred flags
  14558. You can specify more detailed requirements by filling members
  14559. VmaAllocationCreateInfo::requiredFlags and VmaAllocationCreateInfo::preferredFlags
  14560. with a combination of bits from enum `VkMemoryPropertyFlags`. For example,
  14561. if you want to create a buffer that will be persistently mapped on host (so it
  14562. must be `HOST_VISIBLE`) and preferably will also be `HOST_COHERENT` and `HOST_CACHED`,
  14563. use following code:
  14564. \code
  14565. VmaAllocationCreateInfo allocInfo = {};
  14566. allocInfo.requiredFlags = VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT;
  14567. allocInfo.preferredFlags = VK_MEMORY_PROPERTY_HOST_COHERENT_BIT | VK_MEMORY_PROPERTY_HOST_CACHED_BIT;
  14568. allocInfo.flags = VMA_ALLOCATION_CREATE_HOST_ACCESS_RANDOM_BIT | VMA_ALLOCATION_CREATE_MAPPED_BIT;
  14569. VkBuffer buffer;
  14570. VmaAllocation allocation;
  14571. vmaCreateBuffer(allocator, &bufferInfo, &allocInfo, &buffer, &allocation, nullptr);
  14572. \endcode
  14573. A memory type is chosen that has all the required flags and as many preferred
  14574. flags set as possible.
  14575. Value passed in VmaAllocationCreateInfo::usage is internally converted to a set of required and preferred flags,
  14576. plus some extra "magic" (heuristics).
  14577. \section choosing_memory_type_explicit_memory_types Explicit memory types
  14578. If you inspected memory types available on the physical device and <b>you have
  14579. a preference for memory types that you want to use</b>, you can fill member
  14580. VmaAllocationCreateInfo::memoryTypeBits. It is a bit mask, where each bit set
  14581. means that a memory type with that index is allowed to be used for the
  14582. allocation. Special value 0, just like `UINT32_MAX`, means there are no
  14583. restrictions to memory type index.
  14584. Please note that this member is NOT just a memory type index.
  14585. Still you can use it to choose just one, specific memory type.
  14586. For example, if you already determined that your buffer should be created in
  14587. memory type 2, use following code:
  14588. \code
  14589. uint32_t memoryTypeIndex = 2;
  14590. VmaAllocationCreateInfo allocInfo = {};
  14591. allocInfo.memoryTypeBits = 1u << memoryTypeIndex;
  14592. VkBuffer buffer;
  14593. VmaAllocation allocation;
  14594. vmaCreateBuffer(allocator, &bufferInfo, &allocInfo, &buffer, &allocation, nullptr);
  14595. \endcode
  14596. You can also use this parameter to <b>exclude some memory types</b>.
  14597. If you inspect memory heaps and types available on the current physical device and
  14598. you determine that for some reason you don't want to use a specific memory type for the allocation,
  14599. you can enable automatic memory type selection but exclude certain memory type or types
  14600. by setting all bits of `memoryTypeBits` to 1 except the ones you choose.
  14601. \code
  14602. // ...
  14603. uint32_t excludedMemoryTypeIndex = 2;
  14604. VmaAllocationCreateInfo allocInfo = {};
  14605. allocInfo.usage = VMA_MEMORY_USAGE_AUTO;
  14606. allocInfo.memoryTypeBits = ~(1u << excludedMemoryTypeIndex);
  14607. // ...
  14608. \endcode
  14609. \section choosing_memory_type_custom_memory_pools Custom memory pools
  14610. If you allocate from custom memory pool, all the ways of specifying memory
  14611. requirements described above are not applicable and the aforementioned members
  14612. of VmaAllocationCreateInfo structure are ignored. Memory type is selected
  14613. explicitly when creating the pool and then used to make all the allocations from
  14614. that pool. For further details, see \ref custom_memory_pools.
  14615. \section choosing_memory_type_dedicated_allocations Dedicated allocations
  14616. Memory for allocations is reserved out of larger block of `VkDeviceMemory`
  14617. allocated from Vulkan internally. That is the main feature of this whole library.
  14618. You can still request a separate memory block to be created for an allocation,
  14619. just like you would do in a trivial solution without using any allocator.
  14620. In that case, a buffer or image is always bound to that memory at offset 0.
  14621. This is called a "dedicated allocation".
  14622. You can explicitly request it by using flag #VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT.
  14623. The library can also internally decide to use dedicated allocation in some cases, e.g.:
  14624. - When the size of the allocation is large.
  14625. - When [VK_KHR_dedicated_allocation](@ref vk_khr_dedicated_allocation) extension is enabled
  14626. and it reports that dedicated allocation is required or recommended for the resource.
  14627. - When allocation of next big memory block fails due to not enough device memory,
  14628. but allocation with the exact requested size succeeds.
  14629. \page memory_mapping Memory mapping
  14630. To "map memory" in Vulkan means to obtain a CPU pointer to `VkDeviceMemory`,
  14631. to be able to read from it or write to it in CPU code.
  14632. Mapping is possible only of memory allocated from a memory type that has
  14633. `VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT` flag.
  14634. Functions `vkMapMemory()`, `vkUnmapMemory()` are designed for this purpose.
  14635. You can use them directly with memory allocated by this library,
  14636. but it is not recommended because of following issue:
  14637. Mapping the same `VkDeviceMemory` block multiple times is illegal - only one mapping at a time is allowed.
  14638. This includes mapping disjoint regions. Mapping is not reference-counted internally by Vulkan.
  14639. It is also not thread-safe.
  14640. Because of this, Vulkan Memory Allocator provides following facilities:
  14641. \note If you want to be able to map an allocation, you need to specify one of the flags
  14642. #VMA_ALLOCATION_CREATE_HOST_ACCESS_SEQUENTIAL_WRITE_BIT or #VMA_ALLOCATION_CREATE_HOST_ACCESS_RANDOM_BIT
  14643. in VmaAllocationCreateInfo::flags. These flags are required for an allocation to be mappable
  14644. when using #VMA_MEMORY_USAGE_AUTO or other `VMA_MEMORY_USAGE_AUTO*` enum values.
  14645. For other usage values they are ignored and every such allocation made in `HOST_VISIBLE` memory type is mappable,
  14646. but these flags can still be used for consistency.
  14647. \section memory_mapping_copy_functions Copy functions
  14648. The easiest way to copy data from a host pointer to an allocation is to use convenience function vmaCopyMemoryToAllocation().
  14649. It automatically maps the Vulkan memory temporarily (if not already mapped), performs `memcpy`,
  14650. and calls `vkFlushMappedMemoryRanges` (if required - if memory type is not `HOST_COHERENT`).
  14651. It is also the safest one, because using `memcpy` avoids a risk of accidentally introducing memory reads
  14652. (e.g. by doing `pMappedVectors[i] += v`), which may be very slow on memory types that are not `HOST_CACHED`.
  14653. \code
  14654. struct ConstantBuffer
  14655. {
  14656. ...
  14657. };
  14658. ConstantBuffer constantBufferData = ...
  14659. VkBufferCreateInfo bufCreateInfo = { VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO };
  14660. bufCreateInfo.size = sizeof(ConstantBuffer);
  14661. bufCreateInfo.usage = VK_BUFFER_USAGE_TRANSFER_SRC_BIT;
  14662. VmaAllocationCreateInfo allocCreateInfo = {};
  14663. allocCreateInfo.usage = VMA_MEMORY_USAGE_AUTO;
  14664. allocCreateInfo.flags = VMA_ALLOCATION_CREATE_HOST_ACCESS_SEQUENTIAL_WRITE_BIT;
  14665. VkBuffer buf;
  14666. VmaAllocation alloc;
  14667. vmaCreateBuffer(allocator, &bufCreateInfo, &allocCreateInfo, &buf, &alloc, nullptr);
  14668. vmaCopyMemoryToAllocation(allocator, &constantBufferData, alloc, 0, sizeof(ConstantBuffer));
  14669. \endcode
  14670. Copy in the other direction - from an allocation to a host pointer can be performed the same way using function vmaCopyAllocationToMemory().
  14671. \section memory_mapping_mapping_functions Mapping functions
  14672. The library provides following functions for mapping of a specific allocation: vmaMapMemory(), vmaUnmapMemory().
  14673. They are safer and more convenient to use than standard Vulkan functions.
  14674. You can map an allocation multiple times simultaneously - mapping is reference-counted internally.
  14675. You can also map different allocations simultaneously regardless of whether they use the same `VkDeviceMemory` block.
  14676. The way it is implemented is that the library always maps entire memory block, not just region of the allocation.
  14677. For further details, see description of vmaMapMemory() function.
  14678. Example:
  14679. \code
  14680. // Having these objects initialized:
  14681. struct ConstantBuffer
  14682. {
  14683. ...
  14684. };
  14685. ConstantBuffer constantBufferData = ...
  14686. VmaAllocator allocator = ...
  14687. VkBuffer constantBuffer = ...
  14688. VmaAllocation constantBufferAllocation = ...
  14689. // You can map and fill your buffer using following code:
  14690. void* mappedData;
  14691. vmaMapMemory(allocator, constantBufferAllocation, &mappedData);
  14692. memcpy(mappedData, &constantBufferData, sizeof(constantBufferData));
  14693. vmaUnmapMemory(allocator, constantBufferAllocation);
  14694. \endcode
  14695. When mapping, you may see a warning from Vulkan validation layer similar to this one:
  14696. <i>Mapping an image with layout VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL can result in undefined behavior if this memory is used by the device. Only GENERAL or PREINITIALIZED should be used.</i>
  14697. It happens because the library maps entire `VkDeviceMemory` block, where different
  14698. types of images and buffers may end up together, especially on GPUs with unified memory like Intel.
  14699. You can safely ignore it if you are sure you access only memory of the intended
  14700. object that you wanted to map.
  14701. \section memory_mapping_persistently_mapped_memory Persistently mapped memory
  14702. Keeping your memory persistently mapped is generally OK in Vulkan.
  14703. You don't need to unmap it before using its data on the GPU.
  14704. The library provides a special feature designed for that:
  14705. Allocations made with #VMA_ALLOCATION_CREATE_MAPPED_BIT flag set in
  14706. VmaAllocationCreateInfo::flags stay mapped all the time,
  14707. so you can just access CPU pointer to it any time
  14708. without a need to call any "map" or "unmap" function.
  14709. Example:
  14710. \code
  14711. VkBufferCreateInfo bufCreateInfo = { VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO };
  14712. bufCreateInfo.size = sizeof(ConstantBuffer);
  14713. bufCreateInfo.usage = VK_BUFFER_USAGE_TRANSFER_SRC_BIT;
  14714. VmaAllocationCreateInfo allocCreateInfo = {};
  14715. allocCreateInfo.usage = VMA_MEMORY_USAGE_AUTO;
  14716. allocCreateInfo.flags = VMA_ALLOCATION_CREATE_HOST_ACCESS_SEQUENTIAL_WRITE_BIT |
  14717. VMA_ALLOCATION_CREATE_MAPPED_BIT;
  14718. VkBuffer buf;
  14719. VmaAllocation alloc;
  14720. VmaAllocationInfo allocInfo;
  14721. vmaCreateBuffer(allocator, &bufCreateInfo, &allocCreateInfo, &buf, &alloc, &allocInfo);
  14722. // Buffer is already mapped. You can access its memory.
  14723. memcpy(allocInfo.pMappedData, &constantBufferData, sizeof(constantBufferData));
  14724. \endcode
  14725. \note #VMA_ALLOCATION_CREATE_MAPPED_BIT by itself doesn't guarantee that the allocation will end up
  14726. in a mappable memory type.
  14727. For this, you need to also specify #VMA_ALLOCATION_CREATE_HOST_ACCESS_SEQUENTIAL_WRITE_BIT or
  14728. #VMA_ALLOCATION_CREATE_HOST_ACCESS_RANDOM_BIT.
  14729. #VMA_ALLOCATION_CREATE_MAPPED_BIT only guarantees that if the memory is `HOST_VISIBLE`, the allocation will be mapped on creation.
  14730. For an example of how to make use of this fact, see section \ref usage_patterns_advanced_data_uploading.
  14731. \section memory_mapping_cache_control Cache flush and invalidate
  14732. Memory in Vulkan doesn't need to be unmapped before using it on GPU,
  14733. but unless a memory types has `VK_MEMORY_PROPERTY_HOST_COHERENT_BIT` flag set,
  14734. you need to manually **invalidate** cache before reading of mapped pointer
  14735. and **flush** cache after writing to mapped pointer.
  14736. Map/unmap operations don't do that automatically.
  14737. Vulkan provides following functions for this purpose `vkFlushMappedMemoryRanges()`,
  14738. `vkInvalidateMappedMemoryRanges()`, but this library provides more convenient
  14739. functions that refer to given allocation object: vmaFlushAllocation(),
  14740. vmaInvalidateAllocation(),
  14741. or multiple objects at once: vmaFlushAllocations(), vmaInvalidateAllocations().
  14742. Regions of memory specified for flush/invalidate must be aligned to
  14743. `VkPhysicalDeviceLimits::nonCoherentAtomSize`. This is automatically ensured by the library.
  14744. In any memory type that is `HOST_VISIBLE` but not `HOST_COHERENT`, all allocations
  14745. within blocks are aligned to this value, so their offsets are always multiply of
  14746. `nonCoherentAtomSize` and two different allocations never share same "line" of this size.
  14747. Also, Windows drivers from all 3 PC GPU vendors (AMD, Intel, NVIDIA)
  14748. currently provide `HOST_COHERENT` flag on all memory types that are
  14749. `HOST_VISIBLE`, so on PC you may not need to bother.
  14750. \page staying_within_budget Staying within budget
  14751. When developing a graphics-intensive game or program, it is important to avoid allocating
  14752. more GPU memory than it is physically available. When the memory is over-committed,
  14753. various bad things can happen, depending on the specific GPU, graphics driver, and
  14754. operating system:
  14755. - It may just work without any problems.
  14756. - The application may slow down because some memory blocks are moved to system RAM
  14757. and the GPU has to access them through PCI Express bus.
  14758. - A new allocation may take very long time to complete, even few seconds, and possibly
  14759. freeze entire system.
  14760. - The new allocation may fail with `VK_ERROR_OUT_OF_DEVICE_MEMORY`.
  14761. - It may even result in GPU crash (TDR), observed as `VK_ERROR_DEVICE_LOST`
  14762. returned somewhere later.
  14763. \section staying_within_budget_querying_for_budget Querying for budget
  14764. To query for current memory usage and available budget, use function vmaGetHeapBudgets().
  14765. Returned structure #VmaBudget contains quantities expressed in bytes, per Vulkan memory heap.
  14766. Please note that this function returns different information and works faster than
  14767. vmaCalculateStatistics(). vmaGetHeapBudgets() can be called every frame or even before every
  14768. allocation, while vmaCalculateStatistics() is intended to be used rarely,
  14769. only to obtain statistical information, e.g. for debugging purposes.
  14770. It is recommended to use <b>VK_EXT_memory_budget</b> device extension to obtain information
  14771. about the budget from Vulkan device. VMA is able to use this extension automatically.
  14772. When not enabled, the allocator behaves same way, but then it estimates current usage
  14773. and available budget based on its internal information and Vulkan memory heap sizes,
  14774. which may be less precise. In order to use this extension:
  14775. 1. Make sure extensions VK_EXT_memory_budget and VK_KHR_get_physical_device_properties2
  14776. required by it are available and enable them. Please note that the first is a device
  14777. extension and the second is instance extension!
  14778. 2. Use flag #VMA_ALLOCATOR_CREATE_EXT_MEMORY_BUDGET_BIT when creating #VmaAllocator object.
  14779. 3. Make sure to call vmaSetCurrentFrameIndex() every frame. Budget is queried from
  14780. Vulkan inside of it to avoid overhead of querying it with every allocation.
  14781. \section staying_within_budget_controlling_memory_usage Controlling memory usage
  14782. There are many ways in which you can try to stay within the budget.
  14783. First, when making new allocation requires allocating a new memory block, the library
  14784. tries not to exceed the budget automatically. If a block with default recommended size
  14785. (e.g. 256 MB) would go over budget, a smaller block is allocated, possibly even
  14786. dedicated memory for just this resource.
  14787. If the size of the requested resource plus current memory usage is more than the
  14788. budget, by default the library still tries to create it, leaving it to the Vulkan
  14789. implementation whether the allocation succeeds or fails. You can change this behavior
  14790. by using #VMA_ALLOCATION_CREATE_WITHIN_BUDGET_BIT flag. With it, the allocation is
  14791. not made if it would exceed the budget or if the budget is already exceeded.
  14792. VMA then tries to make the allocation from the next eligible Vulkan memory type.
  14793. The all of them fail, the call then fails with `VK_ERROR_OUT_OF_DEVICE_MEMORY`.
  14794. Example usage pattern may be to pass the #VMA_ALLOCATION_CREATE_WITHIN_BUDGET_BIT flag
  14795. when creating resources that are not essential for the application (e.g. the texture
  14796. of a specific object) and not to pass it when creating critically important resources
  14797. (e.g. render targets).
  14798. On AMD graphics cards there is a custom vendor extension available: <b>VK_AMD_memory_overallocation_behavior</b>
  14799. that allows to control the behavior of the Vulkan implementation in out-of-memory cases -
  14800. whether it should fail with an error code or still allow the allocation.
  14801. Usage of this extension involves only passing extra structure on Vulkan device creation,
  14802. so it is out of scope of this library.
  14803. Finally, you can also use #VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT flag to make sure
  14804. a new allocation is created only when it fits inside one of the existing memory blocks.
  14805. If it would require to allocate a new block, if fails instead with `VK_ERROR_OUT_OF_DEVICE_MEMORY`.
  14806. This also ensures that the function call is very fast because it never goes to Vulkan
  14807. to obtain a new block.
  14808. \note Creating \ref custom_memory_pools with VmaPoolCreateInfo::minBlockCount
  14809. set to more than 0 will currently try to allocate memory blocks without checking whether they
  14810. fit within budget.
  14811. \page resource_aliasing Resource aliasing (overlap)
  14812. New explicit graphics APIs (Vulkan and Direct3D 12), thanks to manual memory
  14813. management, give an opportunity to alias (overlap) multiple resources in the
  14814. same region of memory - a feature not available in the old APIs (Direct3D 11, OpenGL).
  14815. It can be useful to save video memory, but it must be used with caution.
  14816. For example, if you know the flow of your whole render frame in advance, you
  14817. are going to use some intermediate textures or buffers only during a small range of render passes,
  14818. and you know these ranges don't overlap in time, you can bind these resources to
  14819. the same place in memory, even if they have completely different parameters (width, height, format etc.).
  14820. ![Resource aliasing (overlap)](../gfx/Aliasing.png)
  14821. Such scenario is possible using VMA, but you need to create your images manually.
  14822. Then you need to calculate parameters of an allocation to be made using formula:
  14823. - allocation size = max(size of each image)
  14824. - allocation alignment = max(alignment of each image)
  14825. - allocation memoryTypeBits = bitwise AND(memoryTypeBits of each image)
  14826. Following example shows two different images bound to the same place in memory,
  14827. allocated to fit largest of them.
  14828. \code
  14829. // A 512x512 texture to be sampled.
  14830. VkImageCreateInfo img1CreateInfo = { VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO };
  14831. img1CreateInfo.imageType = VK_IMAGE_TYPE_2D;
  14832. img1CreateInfo.extent.width = 512;
  14833. img1CreateInfo.extent.height = 512;
  14834. img1CreateInfo.extent.depth = 1;
  14835. img1CreateInfo.mipLevels = 10;
  14836. img1CreateInfo.arrayLayers = 1;
  14837. img1CreateInfo.format = VK_FORMAT_R8G8B8A8_SRGB;
  14838. img1CreateInfo.tiling = VK_IMAGE_TILING_OPTIMAL;
  14839. img1CreateInfo.initialLayout = VK_IMAGE_LAYOUT_UNDEFINED;
  14840. img1CreateInfo.usage = VK_IMAGE_USAGE_TRANSFER_DST_BIT | VK_IMAGE_USAGE_SAMPLED_BIT;
  14841. img1CreateInfo.samples = VK_SAMPLE_COUNT_1_BIT;
  14842. // A full screen texture to be used as color attachment.
  14843. VkImageCreateInfo img2CreateInfo = { VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO };
  14844. img2CreateInfo.imageType = VK_IMAGE_TYPE_2D;
  14845. img2CreateInfo.extent.width = 1920;
  14846. img2CreateInfo.extent.height = 1080;
  14847. img2CreateInfo.extent.depth = 1;
  14848. img2CreateInfo.mipLevels = 1;
  14849. img2CreateInfo.arrayLayers = 1;
  14850. img2CreateInfo.format = VK_FORMAT_R8G8B8A8_UNORM;
  14851. img2CreateInfo.tiling = VK_IMAGE_TILING_OPTIMAL;
  14852. img2CreateInfo.initialLayout = VK_IMAGE_LAYOUT_UNDEFINED;
  14853. img2CreateInfo.usage = VK_IMAGE_USAGE_SAMPLED_BIT | VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT;
  14854. img2CreateInfo.samples = VK_SAMPLE_COUNT_1_BIT;
  14855. VkImage img1;
  14856. res = vkCreateImage(device, &img1CreateInfo, nullptr, &img1);
  14857. VkImage img2;
  14858. res = vkCreateImage(device, &img2CreateInfo, nullptr, &img2);
  14859. VkMemoryRequirements img1MemReq;
  14860. vkGetImageMemoryRequirements(device, img1, &img1MemReq);
  14861. VkMemoryRequirements img2MemReq;
  14862. vkGetImageMemoryRequirements(device, img2, &img2MemReq);
  14863. VkMemoryRequirements finalMemReq = {};
  14864. finalMemReq.size = std::max(img1MemReq.size, img2MemReq.size);
  14865. finalMemReq.alignment = std::max(img1MemReq.alignment, img2MemReq.alignment);
  14866. finalMemReq.memoryTypeBits = img1MemReq.memoryTypeBits & img2MemReq.memoryTypeBits;
  14867. // Validate if(finalMemReq.memoryTypeBits != 0)
  14868. VmaAllocationCreateInfo allocCreateInfo = {};
  14869. allocCreateInfo.preferredFlags = VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT;
  14870. VmaAllocation alloc;
  14871. res = vmaAllocateMemory(allocator, &finalMemReq, &allocCreateInfo, &alloc, nullptr);
  14872. res = vmaBindImageMemory(allocator, alloc, img1);
  14873. res = vmaBindImageMemory(allocator, alloc, img2);
  14874. // You can use img1, img2 here, but not at the same time!
  14875. vmaFreeMemory(allocator, alloc);
  14876. vkDestroyImage(allocator, img2, nullptr);
  14877. vkDestroyImage(allocator, img1, nullptr);
  14878. \endcode
  14879. VMA also provides convenience functions that create a buffer or image and bind it to memory
  14880. represented by an existing #VmaAllocation:
  14881. vmaCreateAliasingBuffer(), vmaCreateAliasingBuffer2(),
  14882. vmaCreateAliasingImage(), vmaCreateAliasingImage2().
  14883. Versions with "2" offer additional parameter `allocationLocalOffset`.
  14884. Remember that using resources that alias in memory requires proper synchronization.
  14885. You need to issue a memory barrier to make sure commands that use `img1` and `img2`
  14886. don't overlap on GPU timeline.
  14887. You also need to treat a resource after aliasing as uninitialized - containing garbage data.
  14888. For example, if you use `img1` and then want to use `img2`, you need to issue
  14889. an image memory barrier for `img2` with `oldLayout` = `VK_IMAGE_LAYOUT_UNDEFINED`.
  14890. Additional considerations:
  14891. - Vulkan also allows to interpret contents of memory between aliasing resources consistently in some cases.
  14892. See chapter 11.8. "Memory Aliasing" of Vulkan specification or `VK_IMAGE_CREATE_ALIAS_BIT` flag.
  14893. - You can create more complex layout where different images and buffers are bound
  14894. at different offsets inside one large allocation. For example, one can imagine
  14895. a big texture used in some render passes, aliasing with a set of many small buffers
  14896. used between in some further passes. To bind a resource at non-zero offset in an allocation,
  14897. use vmaBindBufferMemory2() / vmaBindImageMemory2().
  14898. - Before allocating memory for the resources you want to alias, check `memoryTypeBits`
  14899. returned in memory requirements of each resource to make sure the bits overlap.
  14900. Some GPUs may expose multiple memory types suitable e.g. only for buffers or
  14901. images with `COLOR_ATTACHMENT` usage, so the sets of memory types supported by your
  14902. resources may be disjoint. Aliasing them is not possible in that case.
  14903. \page custom_memory_pools Custom memory pools
  14904. A memory pool contains a number of `VkDeviceMemory` blocks.
  14905. The library automatically creates and manages default pool for each memory type available on the device.
  14906. Default memory pool automatically grows in size.
  14907. Size of allocated blocks is also variable and managed automatically.
  14908. You are using default pools whenever you leave VmaAllocationCreateInfo::pool = null.
  14909. You can create custom pool and allocate memory out of it.
  14910. It can be useful if you want to:
  14911. - Keep certain kind of allocations separate from others.
  14912. - Enforce particular, fixed size of Vulkan memory blocks.
  14913. - Limit maximum amount of Vulkan memory allocated for that pool.
  14914. - Reserve minimum or fixed amount of Vulkan memory always preallocated for that pool.
  14915. - Use extra parameters for a set of your allocations that are available in #VmaPoolCreateInfo but not in
  14916. #VmaAllocationCreateInfo - e.g., custom minimum alignment, custom `pNext` chain.
  14917. - Perform defragmentation on a specific subset of your allocations.
  14918. To use custom memory pools:
  14919. -# Fill VmaPoolCreateInfo structure.
  14920. -# Call vmaCreatePool() to obtain #VmaPool handle.
  14921. -# When making an allocation, set VmaAllocationCreateInfo::pool to this handle.
  14922. You don't need to specify any other parameters of this structure, like `usage`.
  14923. Example:
  14924. \code
  14925. // Find memoryTypeIndex for the pool.
  14926. VkBufferCreateInfo sampleBufCreateInfo = { VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO };
  14927. sampleBufCreateInfo.size = 0x10000; // Doesn't matter.
  14928. sampleBufCreateInfo.usage = VK_BUFFER_USAGE_UNIFORM_BUFFER_BIT | VK_BUFFER_USAGE_TRANSFER_DST_BIT;
  14929. VmaAllocationCreateInfo sampleAllocCreateInfo = {};
  14930. sampleAllocCreateInfo.usage = VMA_MEMORY_USAGE_AUTO;
  14931. uint32_t memTypeIndex;
  14932. VkResult res = vmaFindMemoryTypeIndexForBufferInfo(allocator,
  14933. &sampleBufCreateInfo, &sampleAllocCreateInfo, &memTypeIndex);
  14934. // Check res...
  14935. // Create a pool that can have at most 2 blocks, 128 MiB each.
  14936. VmaPoolCreateInfo poolCreateInfo = {};
  14937. poolCreateInfo.memoryTypeIndex = memTypeIndex;
  14938. poolCreateInfo.blockSize = 128ull * 1024 * 1024;
  14939. poolCreateInfo.maxBlockCount = 2;
  14940. VmaPool pool;
  14941. res = vmaCreatePool(allocator, &poolCreateInfo, &pool);
  14942. // Check res...
  14943. // Allocate a buffer out of it.
  14944. VkBufferCreateInfo bufCreateInfo = { VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO };
  14945. bufCreateInfo.size = 1024;
  14946. bufCreateInfo.usage = VK_BUFFER_USAGE_UNIFORM_BUFFER_BIT | VK_BUFFER_USAGE_TRANSFER_DST_BIT;
  14947. VmaAllocationCreateInfo allocCreateInfo = {};
  14948. allocCreateInfo.pool = pool;
  14949. VkBuffer buf;
  14950. VmaAllocation alloc;
  14951. res = vmaCreateBuffer(allocator, &bufCreateInfo, &allocCreateInfo, &buf, &alloc, nullptr);
  14952. // Check res...
  14953. \endcode
  14954. You have to free all allocations made from this pool before destroying it.
  14955. \code
  14956. vmaDestroyBuffer(allocator, buf, alloc);
  14957. vmaDestroyPool(allocator, pool);
  14958. \endcode
  14959. New versions of this library support creating dedicated allocations in custom pools.
  14960. It is supported only when VmaPoolCreateInfo::blockSize = 0.
  14961. To use this feature, set VmaAllocationCreateInfo::pool to the pointer to your custom pool and
  14962. VmaAllocationCreateInfo::flags to #VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT.
  14963. \section custom_memory_pools_MemTypeIndex Choosing memory type index
  14964. When creating a pool, you must explicitly specify memory type index.
  14965. To find the one suitable for your buffers or images, you can use helper functions
  14966. vmaFindMemoryTypeIndexForBufferInfo(), vmaFindMemoryTypeIndexForImageInfo().
  14967. You need to provide structures with example parameters of buffers or images
  14968. that you are going to create in that pool.
  14969. \code
  14970. VkBufferCreateInfo exampleBufCreateInfo = { VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO };
  14971. exampleBufCreateInfo.size = 1024; // Doesn't matter
  14972. exampleBufCreateInfo.usage = VK_BUFFER_USAGE_UNIFORM_BUFFER_BIT | VK_BUFFER_USAGE_TRANSFER_DST_BIT;
  14973. VmaAllocationCreateInfo allocCreateInfo = {};
  14974. allocCreateInfo.usage = VMA_MEMORY_USAGE_AUTO;
  14975. uint32_t memTypeIndex;
  14976. vmaFindMemoryTypeIndexForBufferInfo(allocator, &exampleBufCreateInfo, &allocCreateInfo, &memTypeIndex);
  14977. VmaPoolCreateInfo poolCreateInfo = {};
  14978. poolCreateInfo.memoryTypeIndex = memTypeIndex;
  14979. // ...
  14980. \endcode
  14981. When creating buffers/images allocated in that pool, provide following parameters:
  14982. - `VkBufferCreateInfo`: Prefer to pass same parameters as above.
  14983. Otherwise you risk creating resources in a memory type that is not suitable for them, which may result in undefined behavior.
  14984. Using different `VK_BUFFER_USAGE_` flags may work, but you shouldn't create images in a pool intended for buffers
  14985. or the other way around.
  14986. - VmaAllocationCreateInfo: You don't need to pass same parameters. Fill only `pool` member.
  14987. Other members are ignored anyway.
  14988. \section custom_memory_pools_when_not_use When not to use custom pools
  14989. Custom pools are commonly overused by VMA users.
  14990. While it may feel natural to keep some logical groups of resources separate in memory,
  14991. in most cases it does more harm than good.
  14992. Using custom pool shouldn't be your first choice.
  14993. Instead, please make all allocations from default pools first and only use custom pools
  14994. if you can prove and measure that it is beneficial in some way,
  14995. e.g. it results in lower memory usage, better performance, etc.
  14996. Using custom pools has disadvantages:
  14997. - Each pool has its own collection of `VkDeviceMemory` blocks.
  14998. Some of them may be partially or even completely empty.
  14999. Spreading allocations across multiple pools increases the amount of wasted (allocated but unbound) memory.
  15000. - You must manually choose specific memory type to be used by a custom pool (set as VmaPoolCreateInfo::memoryTypeIndex).
  15001. When using default pools, best memory type for each of your allocations can be selected automatically
  15002. using a carefully design algorithm that works across all kinds of GPUs.
  15003. - If an allocation from a custom pool at specific memory type fails, entire allocation operation returns failure.
  15004. When using default pools, VMA tries another compatible memory type.
  15005. - If you set VmaPoolCreateInfo::blockSize != 0, each memory block has the same size,
  15006. while default pools start from small blocks and only allocate next blocks larger and larger
  15007. up to the preferred block size.
  15008. Many of the common concerns can be addressed in a different way than using custom pools:
  15009. - If you want to keep your allocations of certain size (small versus large) or certain lifetime (transient versus long lived)
  15010. separate, you likely don't need to.
  15011. VMA uses a high quality allocation algorithm that manages memory well in various cases.
  15012. Please measure and check if using custom pools provides a benefit.
  15013. - If you want to keep your images and buffers separate, you don't need to.
  15014. VMA respects `bufferImageGranularity` limit automatically.
  15015. - If you want to keep your mapped and not mapped allocations separate, you don't need to.
  15016. VMA respects `nonCoherentAtomSize` limit automatically.
  15017. It also maps only those `VkDeviceMemory` blocks that need to map any allocation.
  15018. It even tries to keep mappable and non-mappable allocations in separate blocks to minimize the amount of mapped memory.
  15019. - If you want to choose a custom size for the default memory block, you can set it globally instead
  15020. using VmaAllocatorCreateInfo::preferredLargeHeapBlockSize.
  15021. - If you want to select specific memory type for your allocation,
  15022. you can set VmaAllocationCreateInfo::memoryTypeBits to `(1u << myMemoryTypeIndex)` instead.
  15023. - If you need to create a buffer with certain minimum alignment, you can still do it
  15024. using default pools with dedicated function vmaCreateBufferWithAlignment().
  15025. \section linear_algorithm Linear allocation algorithm
  15026. Each Vulkan memory block managed by this library has accompanying metadata that
  15027. keeps track of used and unused regions. By default, the metadata structure and
  15028. algorithm tries to find best place for new allocations among free regions to
  15029. optimize memory usage. This way you can allocate and free objects in any order.
  15030. ![Default allocation algorithm](../gfx/Linear_allocator_1_algo_default.png)
  15031. Sometimes there is a need to use simpler, linear allocation algorithm. You can
  15032. create custom pool that uses such algorithm by adding flag
  15033. #VMA_POOL_CREATE_LINEAR_ALGORITHM_BIT to VmaPoolCreateInfo::flags while creating
  15034. #VmaPool object. Then an alternative metadata management is used. It always
  15035. creates new allocations after last one and doesn't reuse free regions after
  15036. allocations freed in the middle. It results in better allocation performance and
  15037. less memory consumed by metadata.
  15038. ![Linear allocation algorithm](../gfx/Linear_allocator_2_algo_linear.png)
  15039. With this one flag, you can create a custom pool that can be used in many ways:
  15040. free-at-once, stack, double stack, and ring buffer. See below for details.
  15041. You don't need to specify explicitly which of these options you are going to use - it is detected automatically.
  15042. \subsection linear_algorithm_free_at_once Free-at-once
  15043. In a pool that uses linear algorithm, you still need to free all the allocations
  15044. individually, e.g. by using vmaFreeMemory() or vmaDestroyBuffer(). You can free
  15045. them in any order. New allocations are always made after last one - free space
  15046. in the middle is not reused. However, when you release all the allocation and
  15047. the pool becomes empty, allocation starts from the beginning again. This way you
  15048. can use linear algorithm to speed up creation of allocations that you are going
  15049. to release all at once.
  15050. ![Free-at-once](../gfx/Linear_allocator_3_free_at_once.png)
  15051. This mode is also available for pools created with VmaPoolCreateInfo::maxBlockCount
  15052. value that allows multiple memory blocks.
  15053. \subsection linear_algorithm_stack Stack
  15054. When you free an allocation that was created last, its space can be reused.
  15055. Thanks to this, if you always release allocations in the order opposite to their
  15056. creation (LIFO - Last In First Out), you can achieve behavior of a stack.
  15057. ![Stack](../gfx/Linear_allocator_4_stack.png)
  15058. This mode is also available for pools created with VmaPoolCreateInfo::maxBlockCount
  15059. value that allows multiple memory blocks.
  15060. \subsection linear_algorithm_double_stack Double stack
  15061. The space reserved by a custom pool with linear algorithm may be used by two
  15062. stacks:
  15063. - First, default one, growing up from offset 0.
  15064. - Second, "upper" one, growing down from the end towards lower offsets.
  15065. To make allocation from the upper stack, add flag #VMA_ALLOCATION_CREATE_UPPER_ADDRESS_BIT
  15066. to VmaAllocationCreateInfo::flags.
  15067. ![Double stack](../gfx/Linear_allocator_7_double_stack.png)
  15068. Double stack is available only in pools with one memory block -
  15069. VmaPoolCreateInfo::maxBlockCount must be 1. Otherwise behavior is undefined.
  15070. When the two stacks' ends meet so there is not enough space between them for a
  15071. new allocation, such allocation fails with usual
  15072. `VK_ERROR_OUT_OF_DEVICE_MEMORY` error.
  15073. \subsection linear_algorithm_ring_buffer Ring buffer
  15074. When you free some allocations from the beginning and there is not enough free space
  15075. for a new one at the end of a pool, allocator's "cursor" wraps around to the
  15076. beginning and starts allocation there. Thanks to this, if you always release
  15077. allocations in the same order as you created them (FIFO - First In First Out),
  15078. you can achieve behavior of a ring buffer / queue.
  15079. ![Ring buffer](../gfx/Linear_allocator_5_ring_buffer.png)
  15080. Ring buffer is available only in pools with one memory block -
  15081. VmaPoolCreateInfo::maxBlockCount must be 1. Otherwise behavior is undefined.
  15082. \note \ref defragmentation is not supported in custom pools created with #VMA_POOL_CREATE_LINEAR_ALGORITHM_BIT.
  15083. \page defragmentation Defragmentation
  15084. Interleaved allocations and deallocations of many objects of varying size can
  15085. cause fragmentation over time, which can lead to a situation where the library is unable
  15086. to find a continuous range of free memory for a new allocation despite there is
  15087. enough free space, just scattered across many small free ranges between existing
  15088. allocations.
  15089. To mitigate this problem, you can use defragmentation feature.
  15090. It doesn't happen automatically though and needs your cooperation,
  15091. because VMA is a low level library that only allocates memory.
  15092. It cannot recreate buffers and images in a new place as it doesn't remember the contents of `VkBufferCreateInfo` / `VkImageCreateInfo` structures.
  15093. It cannot copy their contents as it doesn't record any commands to a command buffer.
  15094. Example:
  15095. \code
  15096. VmaDefragmentationInfo defragInfo = {};
  15097. defragInfo.pool = myPool;
  15098. defragInfo.flags = VMA_DEFRAGMENTATION_FLAG_ALGORITHM_FAST_BIT;
  15099. VmaDefragmentationContext defragCtx;
  15100. VkResult res = vmaBeginDefragmentation(allocator, &defragInfo, &defragCtx);
  15101. // Check res...
  15102. for(;;)
  15103. {
  15104. VmaDefragmentationPassMoveInfo pass;
  15105. res = vmaBeginDefragmentationPass(allocator, defragCtx, &pass);
  15106. if(res == VK_SUCCESS)
  15107. break;
  15108. else if(res != VK_INCOMPLETE)
  15109. // Handle error...
  15110. for(uint32_t i = 0; i < pass.moveCount; ++i)
  15111. {
  15112. // Inspect pass.pMoves[i].srcAllocation, identify what buffer/image it represents.
  15113. VmaAllocationInfo allocInfo;
  15114. vmaGetAllocationInfo(allocator, pass.pMoves[i].srcAllocation, &allocInfo);
  15115. MyEngineResourceData* resData = (MyEngineResourceData*)allocInfo.pUserData;
  15116. // Recreate and bind this buffer/image at: pass.pMoves[i].dstMemory, pass.pMoves[i].dstOffset.
  15117. VkImageCreateInfo imgCreateInfo = ...
  15118. VkImage newImg;
  15119. res = vkCreateImage(device, &imgCreateInfo, nullptr, &newImg);
  15120. // Check res...
  15121. res = vmaBindImageMemory(allocator, pass.pMoves[i].dstTmpAllocation, newImg);
  15122. // Check res...
  15123. // Issue a vkCmdCopyBuffer/vkCmdCopyImage to copy its content to the new place.
  15124. vkCmdCopyImage(cmdBuf, resData->img, ..., newImg, ...);
  15125. }
  15126. // Make sure the copy commands finished executing.
  15127. vkWaitForFences(...);
  15128. // Destroy old buffers/images bound with pass.pMoves[i].srcAllocation.
  15129. for(uint32_t i = 0; i < pass.moveCount; ++i)
  15130. {
  15131. // ...
  15132. vkDestroyImage(device, resData->img, nullptr);
  15133. }
  15134. // Update appropriate descriptors to point to the new places...
  15135. res = vmaEndDefragmentationPass(allocator, defragCtx, &pass);
  15136. if(res == VK_SUCCESS)
  15137. break;
  15138. else if(res != VK_INCOMPLETE)
  15139. // Handle error...
  15140. }
  15141. vmaEndDefragmentation(allocator, defragCtx, nullptr);
  15142. \endcode
  15143. Although functions like vmaCreateBuffer(), vmaCreateImage(), vmaDestroyBuffer(), vmaDestroyImage()
  15144. create/destroy an allocation and a buffer/image at once, these are just a shortcut for
  15145. creating the resource, allocating memory, and binding them together.
  15146. Defragmentation works on memory allocations only. You must handle the rest manually.
  15147. Defragmentation is an iterative process that should repreat "passes" as long as related functions
  15148. return `VK_INCOMPLETE` not `VK_SUCCESS`.
  15149. In each pass:
  15150. 1. vmaBeginDefragmentationPass() function call:
  15151. - Calculates and returns the list of allocations to be moved in this pass.
  15152. Note this can be a time-consuming process.
  15153. - Reserves destination memory for them by creating temporary destination allocations
  15154. that you can query for their `VkDeviceMemory` + offset using vmaGetAllocationInfo().
  15155. 2. Inside the pass, **you should**:
  15156. - Inspect the returned list of allocations to be moved.
  15157. - Create new buffers/images and bind them at the returned destination temporary allocations.
  15158. - Copy data from source to destination resources if necessary.
  15159. - Destroy the source buffers/images, but NOT their allocations.
  15160. 3. vmaEndDefragmentationPass() function call:
  15161. - Frees the source memory reserved for the allocations that are moved.
  15162. - Modifies source #VmaAllocation objects that are moved to point to the destination reserved memory.
  15163. - Frees `VkDeviceMemory` blocks that became empty.
  15164. Unlike in previous iterations of the defragmentation API, there is no list of "movable" allocations passed as a parameter.
  15165. Defragmentation algorithm tries to move all suitable allocations.
  15166. You can, however, refuse to move some of them inside a defragmentation pass, by setting
  15167. `pass.pMoves[i].operation` to #VMA_DEFRAGMENTATION_MOVE_OPERATION_IGNORE.
  15168. This is not recommended and may result in suboptimal packing of the allocations after defragmentation.
  15169. If you cannot ensure any allocation can be moved, it is better to keep movable allocations separate in a custom pool.
  15170. Inside a pass, for each allocation that should be moved:
  15171. - You should copy its data from the source to the destination place by calling e.g. `vkCmdCopyBuffer()`, `vkCmdCopyImage()`.
  15172. - You need to make sure these commands finished executing before destroying the source buffers/images and before calling vmaEndDefragmentationPass().
  15173. - If a resource doesn't contain any meaningful data, e.g. it is a transient color attachment image to be cleared,
  15174. filled, and used temporarily in each rendering frame, you can just recreate this image
  15175. without copying its data.
  15176. - If the resource is in `HOST_VISIBLE` and `HOST_CACHED` memory, you can copy its data on the CPU
  15177. using `memcpy()`.
  15178. - If you cannot move the allocation, you can set `pass.pMoves[i].operation` to #VMA_DEFRAGMENTATION_MOVE_OPERATION_IGNORE.
  15179. This will cancel the move.
  15180. - vmaEndDefragmentationPass() will then free the destination memory
  15181. not the source memory of the allocation, leaving it unchanged.
  15182. - If you decide the allocation is unimportant and can be destroyed instead of moved (e.g. it wasn't used for long time),
  15183. you can set `pass.pMoves[i].operation` to #VMA_DEFRAGMENTATION_MOVE_OPERATION_DESTROY.
  15184. - vmaEndDefragmentationPass() will then free both source and destination memory, and will destroy the source #VmaAllocation object.
  15185. You can defragment a specific custom pool by setting VmaDefragmentationInfo::pool
  15186. (like in the example above) or all the default pools by setting this member to null.
  15187. Defragmentation is always performed in each pool separately.
  15188. Allocations are never moved between different Vulkan memory types.
  15189. The size of the destination memory reserved for a moved allocation is the same as the original one.
  15190. Alignment of an allocation as it was determined using `vkGetBufferMemoryRequirements()` etc. is also respected after defragmentation.
  15191. Buffers/images should be recreated with the same `VkBufferCreateInfo` / `VkImageCreateInfo` parameters as the original ones.
  15192. You can perform the defragmentation incrementally to limit the number of allocations and bytes to be moved
  15193. in each pass, e.g. to call it in sync with render frames and not to experience too big hitches.
  15194. See members: VmaDefragmentationInfo::maxBytesPerPass, VmaDefragmentationInfo::maxAllocationsPerPass.
  15195. It is also safe to perform the defragmentation asynchronously to render frames and other Vulkan and VMA
  15196. usage, possibly from multiple threads, with the exception that allocations
  15197. returned in VmaDefragmentationPassMoveInfo::pMoves shouldn't be destroyed until the defragmentation pass is ended.
  15198. <b>Mapping</b> is preserved on allocations that are moved during defragmentation.
  15199. Whether through #VMA_ALLOCATION_CREATE_MAPPED_BIT or vmaMapMemory(), the allocations
  15200. are mapped at their new place. Of course, pointer to the mapped data changes, so it needs to be queried
  15201. using VmaAllocationInfo::pMappedData.
  15202. \note Defragmentation is not supported in custom pools created with #VMA_POOL_CREATE_LINEAR_ALGORITHM_BIT.
  15203. \page statistics Statistics
  15204. This library contains several functions that return information about its internal state,
  15205. especially the amount of memory allocated from Vulkan.
  15206. \section statistics_numeric_statistics Numeric statistics
  15207. If you need to obtain basic statistics about memory usage per heap, together with current budget,
  15208. you can call function vmaGetHeapBudgets() and inspect structure #VmaBudget.
  15209. This is useful to keep track of memory usage and stay within budget
  15210. (see also \ref staying_within_budget).
  15211. Example:
  15212. \code
  15213. uint32_t heapIndex = ...
  15214. VmaBudget budgets[VK_MAX_MEMORY_HEAPS];
  15215. vmaGetHeapBudgets(allocator, budgets);
  15216. printf("My heap currently has %u allocations taking %llu B,\n",
  15217. budgets[heapIndex].statistics.allocationCount,
  15218. budgets[heapIndex].statistics.allocationBytes);
  15219. printf("allocated out of %u Vulkan device memory blocks taking %llu B,\n",
  15220. budgets[heapIndex].statistics.blockCount,
  15221. budgets[heapIndex].statistics.blockBytes);
  15222. printf("Vulkan reports total usage %llu B with budget %llu B.\n",
  15223. budgets[heapIndex].usage,
  15224. budgets[heapIndex].budget);
  15225. \endcode
  15226. You can query for more detailed statistics per memory heap, type, and totals,
  15227. including minimum and maximum allocation size and unused range size,
  15228. by calling function vmaCalculateStatistics() and inspecting structure #VmaTotalStatistics.
  15229. This function is slower though, as it has to traverse all the internal data structures,
  15230. so it should be used only for debugging purposes.
  15231. You can query for statistics of a custom pool using function vmaGetPoolStatistics()
  15232. or vmaCalculatePoolStatistics().
  15233. You can query for information about a specific allocation using function vmaGetAllocationInfo().
  15234. It fill structure #VmaAllocationInfo.
  15235. \section statistics_json_dump JSON dump
  15236. You can dump internal state of the allocator to a string in JSON format using function vmaBuildStatsString().
  15237. The result is guaranteed to be correct JSON.
  15238. It uses ANSI encoding.
  15239. Any strings provided by user (see [Allocation names](@ref allocation_names))
  15240. are copied as-is and properly escaped for JSON, so if they use UTF-8, ISO-8859-2 or any other encoding,
  15241. this JSON string can be treated as using this encoding.
  15242. It must be freed using function vmaFreeStatsString().
  15243. The format of this JSON string is not part of official documentation of the library,
  15244. but it will not change in backward-incompatible way without increasing library major version number
  15245. and appropriate mention in changelog.
  15246. The JSON string contains all the data that can be obtained using vmaCalculateStatistics().
  15247. It can also contain detailed map of allocated memory blocks and their regions -
  15248. free and occupied by allocations.
  15249. This allows e.g. to visualize the memory or assess fragmentation.
  15250. \page allocation_annotation Allocation names and user data
  15251. \section allocation_user_data Allocation user data
  15252. You can annotate allocations with your own information, e.g. for debugging purposes.
  15253. To do that, fill VmaAllocationCreateInfo::pUserData field when creating
  15254. an allocation. It is an opaque `void*` pointer. You can use it e.g. as a pointer,
  15255. some handle, index, key, ordinal number or any other value that would associate
  15256. the allocation with your custom metadata.
  15257. It is useful to identify appropriate data structures in your engine given #VmaAllocation,
  15258. e.g. when doing \ref defragmentation.
  15259. \code
  15260. VkBufferCreateInfo bufCreateInfo = ...
  15261. MyBufferMetadata* pMetadata = CreateBufferMetadata();
  15262. VmaAllocationCreateInfo allocCreateInfo = {};
  15263. allocCreateInfo.usage = VMA_MEMORY_USAGE_AUTO;
  15264. allocCreateInfo.pUserData = pMetadata;
  15265. VkBuffer buffer;
  15266. VmaAllocation allocation;
  15267. vmaCreateBuffer(allocator, &bufCreateInfo, &allocCreateInfo, &buffer, &allocation, nullptr);
  15268. \endcode
  15269. The pointer may be later retrieved as VmaAllocationInfo::pUserData:
  15270. \code
  15271. VmaAllocationInfo allocInfo;
  15272. vmaGetAllocationInfo(allocator, allocation, &allocInfo);
  15273. MyBufferMetadata* pMetadata = (MyBufferMetadata*)allocInfo.pUserData;
  15274. \endcode
  15275. It can also be changed using function vmaSetAllocationUserData().
  15276. Values of (non-zero) allocations' `pUserData` are printed in JSON report created by
  15277. vmaBuildStatsString() in hexadecimal form.
  15278. \section allocation_names Allocation names
  15279. An allocation can also carry a null-terminated string, giving a name to the allocation.
  15280. To set it, call vmaSetAllocationName().
  15281. The library creates internal copy of the string, so the pointer you pass doesn't need
  15282. to be valid for whole lifetime of the allocation. You can free it after the call.
  15283. \code
  15284. std::string imageName = "Texture: ";
  15285. imageName += fileName;
  15286. vmaSetAllocationName(allocator, allocation, imageName.c_str());
  15287. \endcode
  15288. The string can be later retrieved by inspecting VmaAllocationInfo::pName.
  15289. It is also printed in JSON report created by vmaBuildStatsString().
  15290. \note Setting string name to VMA allocation doesn't automatically set it to the Vulkan buffer or image created with it.
  15291. You must do it manually using an extension like VK_EXT_debug_utils, which is independent of this library.
  15292. \page virtual_allocator Virtual allocator
  15293. As an extra feature, the core allocation algorithm of the library is exposed through a simple and convenient API of "virtual allocator".
  15294. It doesn't allocate any real GPU memory. It just keeps track of used and free regions of a "virtual block".
  15295. You can use it to allocate your own memory or other objects, even completely unrelated to Vulkan.
  15296. A common use case is sub-allocation of pieces of one large GPU buffer.
  15297. \section virtual_allocator_creating_virtual_block Creating virtual block
  15298. To use this functionality, there is no main "allocator" object.
  15299. You don't need to have #VmaAllocator object created.
  15300. All you need to do is to create a separate #VmaVirtualBlock object for each block of memory you want to be managed by the allocator:
  15301. -# Fill in #VmaVirtualBlockCreateInfo structure.
  15302. -# Call vmaCreateVirtualBlock(). Get new #VmaVirtualBlock object.
  15303. Example:
  15304. \code
  15305. VmaVirtualBlockCreateInfo blockCreateInfo = {};
  15306. blockCreateInfo.size = 1048576; // 1 MB
  15307. VmaVirtualBlock block;
  15308. VkResult res = vmaCreateVirtualBlock(&blockCreateInfo, &block);
  15309. \endcode
  15310. \section virtual_allocator_making_virtual_allocations Making virtual allocations
  15311. #VmaVirtualBlock object contains internal data structure that keeps track of free and occupied regions
  15312. using the same code as the main Vulkan memory allocator.
  15313. Similarly to #VmaAllocation for standard GPU allocations, there is #VmaVirtualAllocation type
  15314. that represents an opaque handle to an allocation within the virtual block.
  15315. In order to make such allocation:
  15316. -# Fill in #VmaVirtualAllocationCreateInfo structure.
  15317. -# Call vmaVirtualAllocate(). Get new #VmaVirtualAllocation object that represents the allocation.
  15318. You can also receive `VkDeviceSize offset` that was assigned to the allocation.
  15319. Example:
  15320. \code
  15321. VmaVirtualAllocationCreateInfo allocCreateInfo = {};
  15322. allocCreateInfo.size = 4096; // 4 KB
  15323. VmaVirtualAllocation alloc;
  15324. VkDeviceSize offset;
  15325. res = vmaVirtualAllocate(block, &allocCreateInfo, &alloc, &offset);
  15326. if(res == VK_SUCCESS)
  15327. {
  15328. // Use the 4 KB of your memory starting at offset.
  15329. }
  15330. else
  15331. {
  15332. // Allocation failed - no space for it could be found. Handle this error!
  15333. }
  15334. \endcode
  15335. \section virtual_allocator_deallocation Deallocation
  15336. When no longer needed, an allocation can be freed by calling vmaVirtualFree().
  15337. You can only pass to this function an allocation that was previously returned by vmaVirtualAllocate()
  15338. called for the same #VmaVirtualBlock.
  15339. When whole block is no longer needed, the block object can be released by calling vmaDestroyVirtualBlock().
  15340. All allocations must be freed before the block is destroyed, which is checked internally by an assert.
  15341. However, if you don't want to call vmaVirtualFree() for each allocation, you can use vmaClearVirtualBlock() to free them all at once -
  15342. a feature not available in normal Vulkan memory allocator. Example:
  15343. \code
  15344. vmaVirtualFree(block, alloc);
  15345. vmaDestroyVirtualBlock(block);
  15346. \endcode
  15347. \section virtual_allocator_allocation_parameters Allocation parameters
  15348. You can attach a custom pointer to each allocation by using vmaSetVirtualAllocationUserData().
  15349. Its default value is null.
  15350. It can be used to store any data that needs to be associated with that allocation - e.g. an index, a handle, or a pointer to some
  15351. larger data structure containing more information. Example:
  15352. \code
  15353. struct CustomAllocData
  15354. {
  15355. std::string m_AllocName;
  15356. };
  15357. CustomAllocData* allocData = new CustomAllocData();
  15358. allocData->m_AllocName = "My allocation 1";
  15359. vmaSetVirtualAllocationUserData(block, alloc, allocData);
  15360. \endcode
  15361. The pointer can later be fetched, along with allocation offset and size, by passing the allocation handle to function
  15362. vmaGetVirtualAllocationInfo() and inspecting returned structure #VmaVirtualAllocationInfo.
  15363. If you allocated a new object to be used as the custom pointer, don't forget to delete that object before freeing the allocation!
  15364. Example:
  15365. \code
  15366. VmaVirtualAllocationInfo allocInfo;
  15367. vmaGetVirtualAllocationInfo(block, alloc, &allocInfo);
  15368. delete (CustomAllocData*)allocInfo.pUserData;
  15369. vmaVirtualFree(block, alloc);
  15370. \endcode
  15371. \section virtual_allocator_alignment_and_units Alignment and units
  15372. It feels natural to express sizes and offsets in bytes.
  15373. If an offset of an allocation needs to be aligned to a multiply of some number (e.g. 4 bytes), you can fill optional member
  15374. VmaVirtualAllocationCreateInfo::alignment to request it. Example:
  15375. \code
  15376. VmaVirtualAllocationCreateInfo allocCreateInfo = {};
  15377. allocCreateInfo.size = 4096; // 4 KB
  15378. allocCreateInfo.alignment = 4; // Returned offset must be a multiply of 4 B
  15379. VmaVirtualAllocation alloc;
  15380. res = vmaVirtualAllocate(block, &allocCreateInfo, &alloc, nullptr);
  15381. \endcode
  15382. Alignments of different allocations made from one block may vary.
  15383. However, if all alignments and sizes are always multiply of some size e.g. 4 B or `sizeof(MyDataStruct)`,
  15384. you can express all sizes, alignments, and offsets in multiples of that size instead of individual bytes.
  15385. It might be more convenient, but you need to make sure to use this new unit consistently in all the places:
  15386. - VmaVirtualBlockCreateInfo::size
  15387. - VmaVirtualAllocationCreateInfo::size and VmaVirtualAllocationCreateInfo::alignment
  15388. - Using offset returned by vmaVirtualAllocate() or in VmaVirtualAllocationInfo::offset
  15389. \section virtual_allocator_statistics Statistics
  15390. You can obtain statistics of a virtual block using vmaGetVirtualBlockStatistics()
  15391. (to get brief statistics that are fast to calculate)
  15392. or vmaCalculateVirtualBlockStatistics() (to get more detailed statistics, slower to calculate).
  15393. The functions fill structures #VmaStatistics, #VmaDetailedStatistics respectively - same as used by the normal Vulkan memory allocator.
  15394. Example:
  15395. \code
  15396. VmaStatistics stats;
  15397. vmaGetVirtualBlockStatistics(block, &stats);
  15398. printf("My virtual block has %llu bytes used by %u virtual allocations\n",
  15399. stats.allocationBytes, stats.allocationCount);
  15400. \endcode
  15401. You can also request a full list of allocations and free regions as a string in JSON format by calling
  15402. vmaBuildVirtualBlockStatsString().
  15403. Returned string must be later freed using vmaFreeVirtualBlockStatsString().
  15404. The format of this string differs from the one returned by the main Vulkan allocator, but it is similar.
  15405. \section virtual_allocator_additional_considerations Additional considerations
  15406. The "virtual allocator" functionality is implemented on a level of individual memory blocks.
  15407. Keeping track of a whole collection of blocks, allocating new ones when out of free space,
  15408. deleting empty ones, and deciding which one to try first for a new allocation must be implemented by the user.
  15409. Alternative allocation algorithms are supported, just like in custom pools of the real GPU memory.
  15410. See enum #VmaVirtualBlockCreateFlagBits to learn how to specify them (e.g. #VMA_VIRTUAL_BLOCK_CREATE_LINEAR_ALGORITHM_BIT).
  15411. You can find their description in chapter \ref custom_memory_pools.
  15412. Allocation strategies are also supported.
  15413. See enum #VmaVirtualAllocationCreateFlagBits to learn how to specify them (e.g. #VMA_VIRTUAL_ALLOCATION_CREATE_STRATEGY_MIN_TIME_BIT).
  15414. Following features are supported only by the allocator of the real GPU memory and not by virtual allocations:
  15415. buffer-image granularity, `VMA_DEBUG_MARGIN`, `VMA_MIN_ALIGNMENT`.
  15416. \page debugging_memory_usage Debugging incorrect memory usage
  15417. If you suspect a bug with memory usage, like usage of uninitialized memory or
  15418. memory being overwritten out of bounds of an allocation,
  15419. you can use debug features of this library to verify this.
  15420. \section debugging_memory_usage_initialization Memory initialization
  15421. If you experience a bug with incorrect and nondeterministic data in your program and you suspect uninitialized memory to be used,
  15422. you can enable automatic memory initialization to verify this.
  15423. To do it, define macro `VMA_DEBUG_INITIALIZE_ALLOCATIONS` to 1.
  15424. \code
  15425. #define VMA_DEBUG_INITIALIZE_ALLOCATIONS 1
  15426. #include "vk_mem_alloc.h"
  15427. \endcode
  15428. It makes memory of new allocations initialized to bit pattern `0xDCDCDCDC`.
  15429. Before an allocation is destroyed, its memory is filled with bit pattern `0xEFEFEFEF`.
  15430. Memory is automatically mapped and unmapped if necessary.
  15431. If you find these values while debugging your program, good chances are that you incorrectly
  15432. read Vulkan memory that is allocated but not initialized, or already freed, respectively.
  15433. Memory initialization works only with memory types that are `HOST_VISIBLE` and with allocations that can be mapped.
  15434. It works also with dedicated allocations.
  15435. \section debugging_memory_usage_margins Margins
  15436. By default, allocations are laid out in memory blocks next to each other if possible
  15437. (considering required alignment, `bufferImageGranularity`, and `nonCoherentAtomSize`).
  15438. ![Allocations without margin](../gfx/Margins_1.png)
  15439. Define macro `VMA_DEBUG_MARGIN` to some non-zero value (e.g. 16) to enforce specified
  15440. number of bytes as a margin after every allocation.
  15441. \code
  15442. #define VMA_DEBUG_MARGIN 16
  15443. #include "vk_mem_alloc.h"
  15444. \endcode
  15445. ![Allocations with margin](../gfx/Margins_2.png)
  15446. If your bug goes away after enabling margins, it means it may be caused by memory
  15447. being overwritten outside of allocation boundaries. It is not 100% certain though.
  15448. Change in application behavior may also be caused by different order and distribution
  15449. of allocations across memory blocks after margins are applied.
  15450. Margins work with all types of memory.
  15451. Margin is applied only to allocations made out of memory blocks and not to dedicated
  15452. allocations, which have their own memory block of specific size.
  15453. It is thus not applied to allocations made using #VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT flag
  15454. or those automatically decided to put into dedicated allocations, e.g. due to its
  15455. large size or recommended by VK_KHR_dedicated_allocation extension.
  15456. Margins appear in [JSON dump](@ref statistics_json_dump) as part of free space.
  15457. Note that enabling margins increases memory usage and fragmentation.
  15458. Margins do not apply to \ref virtual_allocator.
  15459. \section debugging_memory_usage_corruption_detection Corruption detection
  15460. You can additionally define macro `VMA_DEBUG_DETECT_CORRUPTION` to 1 to enable validation
  15461. of contents of the margins.
  15462. \code
  15463. #define VMA_DEBUG_MARGIN 16
  15464. #define VMA_DEBUG_DETECT_CORRUPTION 1
  15465. #include "vk_mem_alloc.h"
  15466. \endcode
  15467. When this feature is enabled, number of bytes specified as `VMA_DEBUG_MARGIN`
  15468. (it must be multiply of 4) after every allocation is filled with a magic number.
  15469. This idea is also know as "canary".
  15470. Memory is automatically mapped and unmapped if necessary.
  15471. This number is validated automatically when the allocation is destroyed.
  15472. If it is not equal to the expected value, `VMA_ASSERT()` is executed.
  15473. It clearly means that either CPU or GPU overwritten the memory outside of boundaries of the allocation,
  15474. which indicates a serious bug.
  15475. You can also explicitly request checking margins of all allocations in all memory blocks
  15476. that belong to specified memory types by using function vmaCheckCorruption(),
  15477. or in memory blocks that belong to specified custom pool, by using function
  15478. vmaCheckPoolCorruption().
  15479. Margin validation (corruption detection) works only for memory types that are
  15480. `HOST_VISIBLE` and `HOST_COHERENT`.
  15481. \section debugging_memory_usage_leak_detection Leak detection features
  15482. At allocation and allocator destruction time VMA checks for unfreed and unmapped blocks using
  15483. `VMA_ASSERT_LEAK()`. This macro defaults to an assertion, triggering a typically fatal error in Debug
  15484. builds, and doing nothing in Release builds. You can provide your own definition of `VMA_ASSERT_LEAK()`
  15485. to change this behavior.
  15486. At memory block destruction time VMA lists out all unfreed allocations using the `VMA_LEAK_LOG_FORMAT()`
  15487. macro, which defaults to `VMA_DEBUG_LOG_FORMAT`, which in turn defaults to a no-op.
  15488. If you're having trouble with leaks - for example, the aforementioned assertion triggers, but you don't
  15489. quite know \em why -, overriding this macro to print out the the leaking blocks, combined with assigning
  15490. individual names to allocations using vmaSetAllocationName(), can greatly aid in fixing them.
  15491. \page other_api_interop Interop with other graphics APIs
  15492. VMA provides some features that help with interoperability with other graphics APIs, e.g. OpenGL.
  15493. \section opengl_interop_exporting_memory Exporting memory
  15494. If you want to attach `VkExportMemoryAllocateInfoKHR` or other structure to `pNext` chain of memory allocations made by the library:
  15495. You can create \ref custom_memory_pools for such allocations.
  15496. Define and fill in your `VkExportMemoryAllocateInfoKHR` structure and attach it to VmaPoolCreateInfo::pMemoryAllocateNext
  15497. while creating the custom pool.
  15498. Please note that the structure must remain alive and unchanged for the whole lifetime of the #VmaPool,
  15499. not only while creating it, as no copy of the structure is made,
  15500. but its original pointer is used for each allocation instead.
  15501. If you want to export all memory allocated by VMA from certain memory types,
  15502. also dedicated allocations or other allocations made from default pools,
  15503. an alternative solution is to fill in VmaAllocatorCreateInfo::pTypeExternalMemoryHandleTypes.
  15504. It should point to an array with `VkExternalMemoryHandleTypeFlagsKHR` to be automatically passed by the library
  15505. through `VkExportMemoryAllocateInfoKHR` on each allocation made from a specific memory type.
  15506. Please note that new versions of the library also support dedicated allocations created in custom pools.
  15507. You should not mix these two methods in a way that allows to apply both to the same memory type.
  15508. Otherwise, `VkExportMemoryAllocateInfoKHR` structure would be attached twice to the `pNext` chain of `VkMemoryAllocateInfo`.
  15509. \section opengl_interop_custom_alignment Custom alignment
  15510. Buffers or images exported to a different API like OpenGL may require a different alignment,
  15511. higher than the one used by the library automatically, queried from functions like `vkGetBufferMemoryRequirements`.
  15512. To impose such alignment:
  15513. You can create \ref custom_memory_pools for such allocations.
  15514. Set VmaPoolCreateInfo::minAllocationAlignment member to the minimum alignment required for each allocation
  15515. to be made out of this pool.
  15516. The alignment actually used will be the maximum of this member and the alignment returned for the specific buffer or image
  15517. from a function like `vkGetBufferMemoryRequirements`, which is called by VMA automatically.
  15518. If you want to create a buffer with a specific minimum alignment out of default pools,
  15519. use special function vmaCreateBufferWithAlignment(), which takes additional parameter `minAlignment`.
  15520. Note the problem of alignment affects only resources placed inside bigger `VkDeviceMemory` blocks and not dedicated
  15521. allocations, as these, by definition, always have alignment = 0 because the resource is bound to the beginning of its dedicated block.
  15522. You can ensure that an allocation is created as dedicated by using #VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT.
  15523. Contrary to Direct3D 12, Vulkan doesn't have a concept of alignment of the entire memory block passed on its allocation.
  15524. \section opengl_interop_extended_allocation_information Extended allocation information
  15525. If you want to rely on VMA to allocate your buffers and images inside larger memory blocks,
  15526. but you need to know the size of the entire block and whether the allocation was made
  15527. with its own dedicated memory, use function vmaGetAllocationInfo2() to retrieve
  15528. extended allocation information in structure #VmaAllocationInfo2.
  15529. \page usage_patterns Recommended usage patterns
  15530. Vulkan gives great flexibility in memory allocation.
  15531. This chapter shows the most common patterns.
  15532. See also slides from talk:
  15533. [Sawicki, Adam. Advanced Graphics Techniques Tutorial: Memory management in Vulkan and DX12. Game Developers Conference, 2018](https://www.gdcvault.com/play/1025458/Advanced-Graphics-Techniques-Tutorial-New)
  15534. \section usage_patterns_gpu_only GPU-only resource
  15535. <b>When:</b>
  15536. Any resources that you frequently write and read on GPU,
  15537. e.g. images used as color attachments (aka "render targets"), depth-stencil attachments,
  15538. images/buffers used as storage image/buffer (aka "Unordered Access View (UAV)").
  15539. <b>What to do:</b>
  15540. Let the library select the optimal memory type, which will likely have `VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT`.
  15541. \code
  15542. VkImageCreateInfo imgCreateInfo = { VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO };
  15543. imgCreateInfo.imageType = VK_IMAGE_TYPE_2D;
  15544. imgCreateInfo.extent.width = 3840;
  15545. imgCreateInfo.extent.height = 2160;
  15546. imgCreateInfo.extent.depth = 1;
  15547. imgCreateInfo.mipLevels = 1;
  15548. imgCreateInfo.arrayLayers = 1;
  15549. imgCreateInfo.format = VK_FORMAT_R8G8B8A8_UNORM;
  15550. imgCreateInfo.tiling = VK_IMAGE_TILING_OPTIMAL;
  15551. imgCreateInfo.initialLayout = VK_IMAGE_LAYOUT_UNDEFINED;
  15552. imgCreateInfo.usage = VK_IMAGE_USAGE_SAMPLED_BIT | VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT;
  15553. imgCreateInfo.samples = VK_SAMPLE_COUNT_1_BIT;
  15554. VmaAllocationCreateInfo allocCreateInfo = {};
  15555. allocCreateInfo.usage = VMA_MEMORY_USAGE_AUTO;
  15556. allocCreateInfo.flags = VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT;
  15557. allocCreateInfo.priority = 1.0f;
  15558. VkImage img;
  15559. VmaAllocation alloc;
  15560. vmaCreateImage(allocator, &imgCreateInfo, &allocCreateInfo, &img, &alloc, nullptr);
  15561. \endcode
  15562. <b>Also consider:</b>
  15563. Consider creating them as dedicated allocations using #VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT,
  15564. especially if they are large or if you plan to destroy and recreate them with different sizes
  15565. e.g. when display resolution changes.
  15566. Prefer to create such resources first and all other GPU resources (like textures and vertex buffers) later.
  15567. When VK_EXT_memory_priority extension is enabled, it is also worth setting high priority to such allocation
  15568. to decrease chances to be evicted to system memory by the operating system.
  15569. \section usage_patterns_staging_copy_upload Staging copy for upload
  15570. <b>When:</b>
  15571. A "staging" buffer than you want to map and fill from CPU code, then use as a source of transfer
  15572. to some GPU resource.
  15573. <b>What to do:</b>
  15574. Use flag #VMA_ALLOCATION_CREATE_HOST_ACCESS_SEQUENTIAL_WRITE_BIT.
  15575. Let the library select the optimal memory type, which will always have `VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT`.
  15576. \code
  15577. VkBufferCreateInfo bufCreateInfo = { VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO };
  15578. bufCreateInfo.size = 65536;
  15579. bufCreateInfo.usage = VK_BUFFER_USAGE_TRANSFER_SRC_BIT;
  15580. VmaAllocationCreateInfo allocCreateInfo = {};
  15581. allocCreateInfo.usage = VMA_MEMORY_USAGE_AUTO;
  15582. allocCreateInfo.flags = VMA_ALLOCATION_CREATE_HOST_ACCESS_SEQUENTIAL_WRITE_BIT |
  15583. VMA_ALLOCATION_CREATE_MAPPED_BIT;
  15584. VkBuffer buf;
  15585. VmaAllocation alloc;
  15586. VmaAllocationInfo allocInfo;
  15587. vmaCreateBuffer(allocator, &bufCreateInfo, &allocCreateInfo, &buf, &alloc, &allocInfo);
  15588. ...
  15589. memcpy(allocInfo.pMappedData, myData, myDataSize);
  15590. \endcode
  15591. <b>Also consider:</b>
  15592. You can map the allocation using vmaMapMemory() or you can create it as persistenly mapped
  15593. using #VMA_ALLOCATION_CREATE_MAPPED_BIT, as in the example above.
  15594. \section usage_patterns_readback Readback
  15595. <b>When:</b>
  15596. Buffers for data written by or transferred from the GPU that you want to read back on the CPU,
  15597. e.g. results of some computations.
  15598. <b>What to do:</b>
  15599. Use flag #VMA_ALLOCATION_CREATE_HOST_ACCESS_RANDOM_BIT.
  15600. Let the library select the optimal memory type, which will always have `VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT`
  15601. and `VK_MEMORY_PROPERTY_HOST_CACHED_BIT`.
  15602. \code
  15603. VkBufferCreateInfo bufCreateInfo = { VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO };
  15604. bufCreateInfo.size = 65536;
  15605. bufCreateInfo.usage = VK_BUFFER_USAGE_TRANSFER_DST_BIT;
  15606. VmaAllocationCreateInfo allocCreateInfo = {};
  15607. allocCreateInfo.usage = VMA_MEMORY_USAGE_AUTO;
  15608. allocCreateInfo.flags = VMA_ALLOCATION_CREATE_HOST_ACCESS_RANDOM_BIT |
  15609. VMA_ALLOCATION_CREATE_MAPPED_BIT;
  15610. VkBuffer buf;
  15611. VmaAllocation alloc;
  15612. VmaAllocationInfo allocInfo;
  15613. vmaCreateBuffer(allocator, &bufCreateInfo, &allocCreateInfo, &buf, &alloc, &allocInfo);
  15614. ...
  15615. const float* downloadedData = (const float*)allocInfo.pMappedData;
  15616. \endcode
  15617. \section usage_patterns_advanced_data_uploading Advanced data uploading
  15618. For resources that you frequently write on CPU via mapped pointer and
  15619. frequently read on GPU e.g. as a uniform buffer (also called "dynamic"), multiple options are possible:
  15620. -# Easiest solution is to have one copy of the resource in `HOST_VISIBLE` memory,
  15621. even if it means system RAM (not `DEVICE_LOCAL`) on systems with a discrete graphics card,
  15622. and make the device reach out to that resource directly.
  15623. - Reads performed by the device will then go through PCI Express bus.
  15624. The performance of this access may be limited, but it may be fine depending on the size
  15625. of this resource (whether it is small enough to quickly end up in GPU cache) and the sparsity
  15626. of access.
  15627. -# On systems with unified memory (e.g. AMD APU or Intel integrated graphics, mobile chips),
  15628. a memory type may be available that is both `HOST_VISIBLE` (available for mapping) and `DEVICE_LOCAL`
  15629. (fast to access from the GPU). Then, it is likely the best choice for such type of resource.
  15630. -# Systems with a discrete graphics card and separate video memory may or may not expose
  15631. a memory type that is both `HOST_VISIBLE` and `DEVICE_LOCAL`, also known as Base Address Register (BAR).
  15632. If they do, it represents a piece of VRAM (or entire VRAM, if ReBAR is enabled in the motherboard BIOS)
  15633. that is available to CPU for mapping.
  15634. - Writes performed by the host to that memory go through PCI Express bus.
  15635. The performance of these writes may be limited, but it may be fine, especially on PCIe 4.0,
  15636. as long as rules of using uncached and write-combined memory are followed - only sequential writes and no reads.
  15637. -# Finally, you may need or prefer to create a separate copy of the resource in `DEVICE_LOCAL` memory,
  15638. a separate "staging" copy in `HOST_VISIBLE` memory and perform an explicit transfer command between them.
  15639. Thankfully, VMA offers an aid to create and use such resources in the the way optimal
  15640. for the current Vulkan device. To help the library make the best choice,
  15641. use flag #VMA_ALLOCATION_CREATE_HOST_ACCESS_SEQUENTIAL_WRITE_BIT together with
  15642. #VMA_ALLOCATION_CREATE_HOST_ACCESS_ALLOW_TRANSFER_INSTEAD_BIT.
  15643. It will then prefer a memory type that is both `DEVICE_LOCAL` and `HOST_VISIBLE` (integrated memory or BAR),
  15644. but if no such memory type is available or allocation from it fails
  15645. (PC graphics cards have only 256 MB of BAR by default, unless ReBAR is supported and enabled in BIOS),
  15646. it will fall back to `DEVICE_LOCAL` memory for fast GPU access.
  15647. It is then up to you to detect that the allocation ended up in a memory type that is not `HOST_VISIBLE`,
  15648. so you need to create another "staging" allocation and perform explicit transfers.
  15649. \code
  15650. VkBufferCreateInfo bufCreateInfo = { VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO };
  15651. bufCreateInfo.size = 65536;
  15652. bufCreateInfo.usage = VK_BUFFER_USAGE_UNIFORM_BUFFER_BIT | VK_BUFFER_USAGE_TRANSFER_DST_BIT;
  15653. VmaAllocationCreateInfo allocCreateInfo = {};
  15654. allocCreateInfo.usage = VMA_MEMORY_USAGE_AUTO;
  15655. allocCreateInfo.flags = VMA_ALLOCATION_CREATE_HOST_ACCESS_SEQUENTIAL_WRITE_BIT |
  15656. VMA_ALLOCATION_CREATE_HOST_ACCESS_ALLOW_TRANSFER_INSTEAD_BIT |
  15657. VMA_ALLOCATION_CREATE_MAPPED_BIT;
  15658. VkBuffer buf;
  15659. VmaAllocation alloc;
  15660. VmaAllocationInfo allocInfo;
  15661. vmaCreateBuffer(allocator, &bufCreateInfo, &allocCreateInfo, &buf, &alloc, &allocInfo);
  15662. VkMemoryPropertyFlags memPropFlags;
  15663. vmaGetAllocationMemoryProperties(allocator, alloc, &memPropFlags);
  15664. if(memPropFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT)
  15665. {
  15666. // Allocation ended up in a mappable memory and is already mapped - write to it directly.
  15667. // [Executed in runtime]:
  15668. memcpy(allocInfo.pMappedData, myData, myDataSize);
  15669. }
  15670. else
  15671. {
  15672. // Allocation ended up in a non-mappable memory - need to transfer.
  15673. VkBufferCreateInfo stagingBufCreateInfo = { VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO };
  15674. stagingBufCreateInfo.size = 65536;
  15675. stagingBufCreateInfo.usage = VK_BUFFER_USAGE_TRANSFER_SRC_BIT;
  15676. VmaAllocationCreateInfo stagingAllocCreateInfo = {};
  15677. stagingAllocCreateInfo.usage = VMA_MEMORY_USAGE_AUTO;
  15678. stagingAllocCreateInfo.flags = VMA_ALLOCATION_CREATE_HOST_ACCESS_SEQUENTIAL_WRITE_BIT |
  15679. VMA_ALLOCATION_CREATE_MAPPED_BIT;
  15680. VkBuffer stagingBuf;
  15681. VmaAllocation stagingAlloc;
  15682. VmaAllocationInfo stagingAllocInfo;
  15683. vmaCreateBuffer(allocator, &stagingBufCreateInfo, &stagingAllocCreateInfo,
  15684. &stagingBuf, &stagingAlloc, stagingAllocInfo);
  15685. // [Executed in runtime]:
  15686. memcpy(stagingAllocInfo.pMappedData, myData, myDataSize);
  15687. vmaFlushAllocation(allocator, stagingAlloc, 0, VK_WHOLE_SIZE);
  15688. //vkCmdPipelineBarrier: VK_ACCESS_HOST_WRITE_BIT --> VK_ACCESS_TRANSFER_READ_BIT
  15689. VkBufferCopy bufCopy = {
  15690. 0, // srcOffset
  15691. 0, // dstOffset,
  15692. myDataSize); // size
  15693. vkCmdCopyBuffer(cmdBuf, stagingBuf, buf, 1, &bufCopy);
  15694. }
  15695. \endcode
  15696. \section usage_patterns_other_use_cases Other use cases
  15697. Here are some other, less obvious use cases and their recommended settings:
  15698. - An image that is used only as transfer source and destination, but it should stay on the device,
  15699. as it is used to temporarily store a copy of some texture, e.g. from the current to the next frame,
  15700. for temporal antialiasing or other temporal effects.
  15701. - Use `VkImageCreateInfo::usage = VK_IMAGE_USAGE_TRANSFER_SRC_BIT | VK_IMAGE_USAGE_TRANSFER_DST_BIT`
  15702. - Use VmaAllocationCreateInfo::usage = #VMA_MEMORY_USAGE_AUTO
  15703. - An image that is used only as transfer source and destination, but it should be placed
  15704. in the system RAM despite it doesn't need to be mapped, because it serves as a "swap" copy to evict
  15705. least recently used textures from VRAM.
  15706. - Use `VkImageCreateInfo::usage = VK_IMAGE_USAGE_TRANSFER_SRC_BIT | VK_IMAGE_USAGE_TRANSFER_DST_BIT`
  15707. - Use VmaAllocationCreateInfo::usage = #VMA_MEMORY_USAGE_AUTO_PREFER_HOST,
  15708. as VMA needs a hint here to differentiate from the previous case.
  15709. - A buffer that you want to map and write from the CPU, directly read from the GPU
  15710. (e.g. as a uniform or vertex buffer), but you have a clear preference to place it in device or
  15711. host memory due to its large size.
  15712. - Use `VkBufferCreateInfo::usage = VK_BUFFER_USAGE_UNIFORM_BUFFER_BIT`
  15713. - Use VmaAllocationCreateInfo::usage = #VMA_MEMORY_USAGE_AUTO_PREFER_DEVICE or #VMA_MEMORY_USAGE_AUTO_PREFER_HOST
  15714. - Use VmaAllocationCreateInfo::flags = #VMA_ALLOCATION_CREATE_HOST_ACCESS_SEQUENTIAL_WRITE_BIT
  15715. \page configuration Configuration
  15716. Please check "CONFIGURATION SECTION" in the code to find macros that you can define
  15717. before each include of this file or change directly in this file to provide
  15718. your own implementation of basic facilities like assert, `min()` and `max()` functions,
  15719. mutex, atomic etc.
  15720. The library uses its own implementation of containers by default, but you can switch to using
  15721. STL containers instead.
  15722. For example, define `VMA_ASSERT(expr)` before including the library to provide
  15723. custom implementation of the assertion, compatible with your project.
  15724. By default it is defined to standard C `assert(expr)` in `_DEBUG` configuration
  15725. and empty otherwise.
  15726. \section config_Vulkan_functions Pointers to Vulkan functions
  15727. There are multiple ways to import pointers to Vulkan functions in the library.
  15728. In the simplest case you don't need to do anything.
  15729. If the compilation or linking of your program or the initialization of the #VmaAllocator
  15730. doesn't work for you, you can try to reconfigure it.
  15731. First, the allocator tries to fetch pointers to Vulkan functions linked statically,
  15732. like this:
  15733. \code
  15734. m_VulkanFunctions.vkAllocateMemory = (PFN_vkAllocateMemory)vkAllocateMemory;
  15735. \endcode
  15736. If you want to disable this feature, set configuration macro: `#define VMA_STATIC_VULKAN_FUNCTIONS 0`.
  15737. Second, you can provide the pointers yourself by setting member VmaAllocatorCreateInfo::pVulkanFunctions.
  15738. You can fetch them e.g. using functions `vkGetInstanceProcAddr` and `vkGetDeviceProcAddr` or
  15739. by using a helper library like [volk](https://github.com/zeux/volk).
  15740. Third, VMA tries to fetch remaining pointers that are still null by calling
  15741. `vkGetInstanceProcAddr` and `vkGetDeviceProcAddr` on its own.
  15742. You need to only fill in VmaVulkanFunctions::vkGetInstanceProcAddr and VmaVulkanFunctions::vkGetDeviceProcAddr.
  15743. Other pointers will be fetched automatically.
  15744. If you want to disable this feature, set configuration macro: `#define VMA_DYNAMIC_VULKAN_FUNCTIONS 0`.
  15745. Finally, all the function pointers required by the library (considering selected
  15746. Vulkan version and enabled extensions) are checked with `VMA_ASSERT` if they are not null.
  15747. \section custom_memory_allocator Custom host memory allocator
  15748. If you use custom allocator for CPU memory rather than default operator `new`
  15749. and `delete` from C++, you can make this library using your allocator as well
  15750. by filling optional member VmaAllocatorCreateInfo::pAllocationCallbacks. These
  15751. functions will be passed to Vulkan, as well as used by the library itself to
  15752. make any CPU-side allocations.
  15753. \section allocation_callbacks Device memory allocation callbacks
  15754. The library makes calls to `vkAllocateMemory()` and `vkFreeMemory()` internally.
  15755. You can setup callbacks to be informed about these calls, e.g. for the purpose
  15756. of gathering some statistics. To do it, fill optional member
  15757. VmaAllocatorCreateInfo::pDeviceMemoryCallbacks.
  15758. \section heap_memory_limit Device heap memory limit
  15759. When device memory of certain heap runs out of free space, new allocations may
  15760. fail (returning error code) or they may succeed, silently pushing some existing_
  15761. memory blocks from GPU VRAM to system RAM (which degrades performance). This
  15762. behavior is implementation-dependent - it depends on GPU vendor and graphics
  15763. driver.
  15764. On AMD cards it can be controlled while creating Vulkan device object by using
  15765. VK_AMD_memory_overallocation_behavior extension, if available.
  15766. Alternatively, if you want to test how your program behaves with limited amount of Vulkan device
  15767. memory available without switching your graphics card to one that really has
  15768. smaller VRAM, you can use a feature of this library intended for this purpose.
  15769. To do it, fill optional member VmaAllocatorCreateInfo::pHeapSizeLimit.
  15770. \page vk_khr_dedicated_allocation VK_KHR_dedicated_allocation
  15771. VK_KHR_dedicated_allocation is a Vulkan extension which can be used to improve
  15772. performance on some GPUs. It augments Vulkan API with possibility to query
  15773. driver whether it prefers particular buffer or image to have its own, dedicated
  15774. allocation (separate `VkDeviceMemory` block) for better efficiency - to be able
  15775. to do some internal optimizations. The extension is supported by this library.
  15776. It will be used automatically when enabled.
  15777. It has been promoted to core Vulkan 1.1, so if you use eligible Vulkan version
  15778. and inform VMA about it by setting VmaAllocatorCreateInfo::vulkanApiVersion,
  15779. you are all set.
  15780. Otherwise, if you want to use it as an extension:
  15781. 1 . When creating Vulkan device, check if following 2 device extensions are
  15782. supported (call `vkEnumerateDeviceExtensionProperties()`).
  15783. If yes, enable them (fill `VkDeviceCreateInfo::ppEnabledExtensionNames`).
  15784. - VK_KHR_get_memory_requirements2
  15785. - VK_KHR_dedicated_allocation
  15786. If you enabled these extensions:
  15787. 2 . Use #VMA_ALLOCATOR_CREATE_KHR_DEDICATED_ALLOCATION_BIT flag when creating
  15788. your #VmaAllocator to inform the library that you enabled required extensions
  15789. and you want the library to use them.
  15790. \code
  15791. allocatorInfo.flags |= VMA_ALLOCATOR_CREATE_KHR_DEDICATED_ALLOCATION_BIT;
  15792. vmaCreateAllocator(&allocatorInfo, &allocator);
  15793. \endcode
  15794. That is all. The extension will be automatically used whenever you create a
  15795. buffer using vmaCreateBuffer() or image using vmaCreateImage().
  15796. When using the extension together with Vulkan Validation Layer, you will receive
  15797. warnings like this:
  15798. _vkBindBufferMemory(): Binding memory to buffer 0x33 but vkGetBufferMemoryRequirements() has not been called on that buffer._
  15799. It is OK, you should just ignore it. It happens because you use function
  15800. `vkGetBufferMemoryRequirements2KHR()` instead of standard
  15801. `vkGetBufferMemoryRequirements()`, while the validation layer seems to be
  15802. unaware of it.
  15803. To learn more about this extension, see:
  15804. - [VK_KHR_dedicated_allocation in Vulkan specification](https://www.khronos.org/registry/vulkan/specs/1.2-extensions/html/chap50.html#VK_KHR_dedicated_allocation)
  15805. - [VK_KHR_dedicated_allocation unofficial manual](http://asawicki.info/articles/VK_KHR_dedicated_allocation.php5)
  15806. \page vk_ext_memory_priority VK_EXT_memory_priority
  15807. VK_EXT_memory_priority is a device extension that allows to pass additional "priority"
  15808. value to Vulkan memory allocations that the implementation may use prefer certain
  15809. buffers and images that are critical for performance to stay in device-local memory
  15810. in cases when the memory is over-subscribed, while some others may be moved to the system memory.
  15811. VMA offers convenient usage of this extension.
  15812. If you enable it, you can pass "priority" parameter when creating allocations or custom pools
  15813. and the library automatically passes the value to Vulkan using this extension.
  15814. If you want to use this extension in connection with VMA, follow these steps:
  15815. \section vk_ext_memory_priority_initialization Initialization
  15816. 1) Call `vkEnumerateDeviceExtensionProperties` for the physical device.
  15817. Check if the extension is supported - if returned array of `VkExtensionProperties` contains "VK_EXT_memory_priority".
  15818. 2) Call `vkGetPhysicalDeviceFeatures2` for the physical device instead of old `vkGetPhysicalDeviceFeatures`.
  15819. Attach additional structure `VkPhysicalDeviceMemoryPriorityFeaturesEXT` to `VkPhysicalDeviceFeatures2::pNext` to be returned.
  15820. Check if the device feature is really supported - check if `VkPhysicalDeviceMemoryPriorityFeaturesEXT::memoryPriority` is true.
  15821. 3) While creating device with `vkCreateDevice`, enable this extension - add "VK_EXT_memory_priority"
  15822. to the list passed as `VkDeviceCreateInfo::ppEnabledExtensionNames`.
  15823. 4) While creating the device, also don't set `VkDeviceCreateInfo::pEnabledFeatures`.
  15824. Fill in `VkPhysicalDeviceFeatures2` structure instead and pass it as `VkDeviceCreateInfo::pNext`.
  15825. Enable this device feature - attach additional structure `VkPhysicalDeviceMemoryPriorityFeaturesEXT` to
  15826. `VkPhysicalDeviceFeatures2::pNext` chain and set its member `memoryPriority` to `VK_TRUE`.
  15827. 5) While creating #VmaAllocator with vmaCreateAllocator() inform VMA that you
  15828. have enabled this extension and feature - add #VMA_ALLOCATOR_CREATE_EXT_MEMORY_PRIORITY_BIT
  15829. to VmaAllocatorCreateInfo::flags.
  15830. \section vk_ext_memory_priority_usage Usage
  15831. When using this extension, you should initialize following member:
  15832. - VmaAllocationCreateInfo::priority when creating a dedicated allocation with #VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT.
  15833. - VmaPoolCreateInfo::priority when creating a custom pool.
  15834. It should be a floating-point value between `0.0f` and `1.0f`, where recommended default is `0.5f`.
  15835. Memory allocated with higher value can be treated by the Vulkan implementation as higher priority
  15836. and so it can have lower chances of being pushed out to system memory, experiencing degraded performance.
  15837. It might be a good idea to create performance-critical resources like color-attachment or depth-stencil images
  15838. as dedicated and set high priority to them. For example:
  15839. \code
  15840. VkImageCreateInfo imgCreateInfo = { VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO };
  15841. imgCreateInfo.imageType = VK_IMAGE_TYPE_2D;
  15842. imgCreateInfo.extent.width = 3840;
  15843. imgCreateInfo.extent.height = 2160;
  15844. imgCreateInfo.extent.depth = 1;
  15845. imgCreateInfo.mipLevels = 1;
  15846. imgCreateInfo.arrayLayers = 1;
  15847. imgCreateInfo.format = VK_FORMAT_R8G8B8A8_UNORM;
  15848. imgCreateInfo.tiling = VK_IMAGE_TILING_OPTIMAL;
  15849. imgCreateInfo.initialLayout = VK_IMAGE_LAYOUT_UNDEFINED;
  15850. imgCreateInfo.usage = VK_IMAGE_USAGE_SAMPLED_BIT | VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT;
  15851. imgCreateInfo.samples = VK_SAMPLE_COUNT_1_BIT;
  15852. VmaAllocationCreateInfo allocCreateInfo = {};
  15853. allocCreateInfo.usage = VMA_MEMORY_USAGE_AUTO;
  15854. allocCreateInfo.flags = VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT;
  15855. allocCreateInfo.priority = 1.0f;
  15856. VkImage img;
  15857. VmaAllocation alloc;
  15858. vmaCreateImage(allocator, &imgCreateInfo, &allocCreateInfo, &img, &alloc, nullptr);
  15859. \endcode
  15860. `priority` member is ignored in the following situations:
  15861. - Allocations created in custom pools: They inherit the priority, along with all other allocation parameters
  15862. from the parameters passed in #VmaPoolCreateInfo when the pool was created.
  15863. - Allocations created in default pools: They inherit the priority from the parameters
  15864. VMA used when creating default pools, which means `priority == 0.5f`.
  15865. \page vk_amd_device_coherent_memory VK_AMD_device_coherent_memory
  15866. VK_AMD_device_coherent_memory is a device extension that enables access to
  15867. additional memory types with `VK_MEMORY_PROPERTY_DEVICE_COHERENT_BIT_AMD` and
  15868. `VK_MEMORY_PROPERTY_DEVICE_UNCACHED_BIT_AMD` flag. It is useful mostly for
  15869. allocation of buffers intended for writing "breadcrumb markers" in between passes
  15870. or draw calls, which in turn are useful for debugging GPU crash/hang/TDR cases.
  15871. When the extension is available but has not been enabled, Vulkan physical device
  15872. still exposes those memory types, but their usage is forbidden. VMA automatically
  15873. takes care of that - it returns `VK_ERROR_FEATURE_NOT_PRESENT` when an attempt
  15874. to allocate memory of such type is made.
  15875. If you want to use this extension in connection with VMA, follow these steps:
  15876. \section vk_amd_device_coherent_memory_initialization Initialization
  15877. 1) Call `vkEnumerateDeviceExtensionProperties` for the physical device.
  15878. Check if the extension is supported - if returned array of `VkExtensionProperties` contains "VK_AMD_device_coherent_memory".
  15879. 2) Call `vkGetPhysicalDeviceFeatures2` for the physical device instead of old `vkGetPhysicalDeviceFeatures`.
  15880. Attach additional structure `VkPhysicalDeviceCoherentMemoryFeaturesAMD` to `VkPhysicalDeviceFeatures2::pNext` to be returned.
  15881. Check if the device feature is really supported - check if `VkPhysicalDeviceCoherentMemoryFeaturesAMD::deviceCoherentMemory` is true.
  15882. 3) While creating device with `vkCreateDevice`, enable this extension - add "VK_AMD_device_coherent_memory"
  15883. to the list passed as `VkDeviceCreateInfo::ppEnabledExtensionNames`.
  15884. 4) While creating the device, also don't set `VkDeviceCreateInfo::pEnabledFeatures`.
  15885. Fill in `VkPhysicalDeviceFeatures2` structure instead and pass it as `VkDeviceCreateInfo::pNext`.
  15886. Enable this device feature - attach additional structure `VkPhysicalDeviceCoherentMemoryFeaturesAMD` to
  15887. `VkPhysicalDeviceFeatures2::pNext` and set its member `deviceCoherentMemory` to `VK_TRUE`.
  15888. 5) While creating #VmaAllocator with vmaCreateAllocator() inform VMA that you
  15889. have enabled this extension and feature - add #VMA_ALLOCATOR_CREATE_AMD_DEVICE_COHERENT_MEMORY_BIT
  15890. to VmaAllocatorCreateInfo::flags.
  15891. \section vk_amd_device_coherent_memory_usage Usage
  15892. After following steps described above, you can create VMA allocations and custom pools
  15893. out of the special `DEVICE_COHERENT` and `DEVICE_UNCACHED` memory types on eligible
  15894. devices. There are multiple ways to do it, for example:
  15895. - You can request or prefer to allocate out of such memory types by adding
  15896. `VK_MEMORY_PROPERTY_DEVICE_UNCACHED_BIT_AMD` to VmaAllocationCreateInfo::requiredFlags
  15897. or VmaAllocationCreateInfo::preferredFlags. Those flags can be freely mixed with
  15898. other ways of \ref choosing_memory_type, like setting VmaAllocationCreateInfo::usage.
  15899. - If you manually found memory type index to use for this purpose, force allocation
  15900. from this specific index by setting VmaAllocationCreateInfo::memoryTypeBits `= 1u << index`.
  15901. \section vk_amd_device_coherent_memory_more_information More information
  15902. To learn more about this extension, see [VK_AMD_device_coherent_memory in Vulkan specification](https://www.khronos.org/registry/vulkan/specs/1.2-extensions/man/html/VK_AMD_device_coherent_memory.html)
  15903. Example use of this extension can be found in the code of the sample and test suite
  15904. accompanying this library.
  15905. \page enabling_buffer_device_address Enabling buffer device address
  15906. Device extension VK_KHR_buffer_device_address
  15907. allow to fetch raw GPU pointer to a buffer and pass it for usage in a shader code.
  15908. It has been promoted to core Vulkan 1.2.
  15909. If you want to use this feature in connection with VMA, follow these steps:
  15910. \section enabling_buffer_device_address_initialization Initialization
  15911. 1) (For Vulkan version < 1.2) Call `vkEnumerateDeviceExtensionProperties` for the physical device.
  15912. Check if the extension is supported - if returned array of `VkExtensionProperties` contains
  15913. "VK_KHR_buffer_device_address".
  15914. 2) Call `vkGetPhysicalDeviceFeatures2` for the physical device instead of old `vkGetPhysicalDeviceFeatures`.
  15915. Attach additional structure `VkPhysicalDeviceBufferDeviceAddressFeatures*` to `VkPhysicalDeviceFeatures2::pNext` to be returned.
  15916. Check if the device feature is really supported - check if `VkPhysicalDeviceBufferDeviceAddressFeatures::bufferDeviceAddress` is true.
  15917. 3) (For Vulkan version < 1.2) While creating device with `vkCreateDevice`, enable this extension - add
  15918. "VK_KHR_buffer_device_address" to the list passed as `VkDeviceCreateInfo::ppEnabledExtensionNames`.
  15919. 4) While creating the device, also don't set `VkDeviceCreateInfo::pEnabledFeatures`.
  15920. Fill in `VkPhysicalDeviceFeatures2` structure instead and pass it as `VkDeviceCreateInfo::pNext`.
  15921. Enable this device feature - attach additional structure `VkPhysicalDeviceBufferDeviceAddressFeatures*` to
  15922. `VkPhysicalDeviceFeatures2::pNext` and set its member `bufferDeviceAddress` to `VK_TRUE`.
  15923. 5) While creating #VmaAllocator with vmaCreateAllocator() inform VMA that you
  15924. have enabled this feature - add #VMA_ALLOCATOR_CREATE_BUFFER_DEVICE_ADDRESS_BIT
  15925. to VmaAllocatorCreateInfo::flags.
  15926. \section enabling_buffer_device_address_usage Usage
  15927. After following steps described above, you can create buffers with `VK_BUFFER_USAGE_SHADER_DEVICE_ADDRESS_BIT*` using VMA.
  15928. The library automatically adds `VK_MEMORY_ALLOCATE_DEVICE_ADDRESS_BIT*` to
  15929. allocated memory blocks wherever it might be needed.
  15930. Please note that the library supports only `VK_BUFFER_USAGE_SHADER_DEVICE_ADDRESS_BIT*`.
  15931. The second part of this functionality related to "capture and replay" is not supported,
  15932. as it is intended for usage in debugging tools like RenderDoc, not in everyday Vulkan usage.
  15933. \section enabling_buffer_device_address_more_information More information
  15934. To learn more about this extension, see [VK_KHR_buffer_device_address in Vulkan specification](https://www.khronos.org/registry/vulkan/specs/1.2-extensions/html/chap46.html#VK_KHR_buffer_device_address)
  15935. Example use of this extension can be found in the code of the sample and test suite
  15936. accompanying this library.
  15937. \page general_considerations General considerations
  15938. \section general_considerations_thread_safety Thread safety
  15939. - The library has no global state, so separate #VmaAllocator objects can be used
  15940. independently.
  15941. There should be no need to create multiple such objects though - one per `VkDevice` is enough.
  15942. - By default, all calls to functions that take #VmaAllocator as first parameter
  15943. are safe to call from multiple threads simultaneously because they are
  15944. synchronized internally when needed.
  15945. This includes allocation and deallocation from default memory pool, as well as custom #VmaPool.
  15946. - When the allocator is created with #VMA_ALLOCATOR_CREATE_EXTERNALLY_SYNCHRONIZED_BIT
  15947. flag, calls to functions that take such #VmaAllocator object must be
  15948. synchronized externally.
  15949. - Access to a #VmaAllocation object must be externally synchronized. For example,
  15950. you must not call vmaGetAllocationInfo() and vmaMapMemory() from different
  15951. threads at the same time if you pass the same #VmaAllocation object to these
  15952. functions.
  15953. - #VmaVirtualBlock is not safe to be used from multiple threads simultaneously.
  15954. \section general_considerations_versioning_and_compatibility Versioning and compatibility
  15955. The library uses [**Semantic Versioning**](https://semver.org/),
  15956. which means version numbers follow convention: Major.Minor.Patch (e.g. 2.3.0), where:
  15957. - Incremented Patch version means a release is backward- and forward-compatible,
  15958. introducing only some internal improvements, bug fixes, optimizations etc.
  15959. or changes that are out of scope of the official API described in this documentation.
  15960. - Incremented Minor version means a release is backward-compatible,
  15961. so existing code that uses the library should continue to work, while some new
  15962. symbols could have been added: new structures, functions, new values in existing
  15963. enums and bit flags, new structure members, but not new function parameters.
  15964. - Incrementing Major version means a release could break some backward compatibility.
  15965. All changes between official releases are documented in file "CHANGELOG.md".
  15966. \warning Backward compatibility is considered on the level of C++ source code, not binary linkage.
  15967. Adding new members to existing structures is treated as backward compatible if initializing
  15968. the new members to binary zero results in the old behavior.
  15969. You should always fully initialize all library structures to zeros and not rely on their
  15970. exact binary size.
  15971. \section general_considerations_validation_layer_warnings Validation layer warnings
  15972. When using this library, you can meet following types of warnings issued by
  15973. Vulkan validation layer. They don't necessarily indicate a bug, so you may need
  15974. to just ignore them.
  15975. - *vkBindBufferMemory(): Binding memory to buffer 0xeb8e4 but vkGetBufferMemoryRequirements() has not been called on that buffer.*
  15976. - It happens when VK_KHR_dedicated_allocation extension is enabled.
  15977. `vkGetBufferMemoryRequirements2KHR` function is used instead, while validation layer seems to be unaware of it.
  15978. - *Mapping an image with layout VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL can result in undefined behavior if this memory is used by the device. Only GENERAL or PREINITIALIZED should be used.*
  15979. - It happens when you map a buffer or image, because the library maps entire
  15980. `VkDeviceMemory` block, where different types of images and buffers may end
  15981. up together, especially on GPUs with unified memory like Intel.
  15982. - *Non-linear image 0xebc91 is aliased with linear buffer 0xeb8e4 which may indicate a bug.*
  15983. - It may happen when you use [defragmentation](@ref defragmentation).
  15984. \section general_considerations_allocation_algorithm Allocation algorithm
  15985. The library uses following algorithm for allocation, in order:
  15986. -# Try to find free range of memory in existing blocks.
  15987. -# If failed, try to create a new block of `VkDeviceMemory`, with preferred block size.
  15988. -# If failed, try to create such block with size / 2, size / 4, size / 8.
  15989. -# If failed, try to allocate separate `VkDeviceMemory` for this allocation,
  15990. just like when you use #VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT.
  15991. -# If failed, choose other memory type that meets the requirements specified in
  15992. VmaAllocationCreateInfo and go to point 1.
  15993. -# If failed, return `VK_ERROR_OUT_OF_DEVICE_MEMORY`.
  15994. \section general_considerations_features_not_supported Features not supported
  15995. Features deliberately excluded from the scope of this library:
  15996. -# **Data transfer.** Uploading (streaming) and downloading data of buffers and images
  15997. between CPU and GPU memory and related synchronization is responsibility of the user.
  15998. Defining some "texture" object that would automatically stream its data from a
  15999. staging copy in CPU memory to GPU memory would rather be a feature of another,
  16000. higher-level library implemented on top of VMA.
  16001. VMA doesn't record any commands to a `VkCommandBuffer`. It just allocates memory.
  16002. -# **Recreation of buffers and images.** Although the library has functions for
  16003. buffer and image creation: vmaCreateBuffer(), vmaCreateImage(), you need to
  16004. recreate these objects yourself after defragmentation. That is because the big
  16005. structures `VkBufferCreateInfo`, `VkImageCreateInfo` are not stored in
  16006. #VmaAllocation object.
  16007. -# **Handling CPU memory allocation failures.** When dynamically creating small C++
  16008. objects in CPU memory (not Vulkan memory), allocation failures are not checked
  16009. and handled gracefully, because that would complicate code significantly and
  16010. is usually not needed in desktop PC applications anyway.
  16011. Success of an allocation is just checked with an assert.
  16012. -# **Code free of any compiler warnings.** Maintaining the library to compile and
  16013. work correctly on so many different platforms is hard enough. Being free of
  16014. any warnings, on any version of any compiler, is simply not feasible.
  16015. There are many preprocessor macros that make some variables unused, function parameters unreferenced,
  16016. or conditional expressions constant in some configurations.
  16017. The code of this library should not be bigger or more complicated just to silence these warnings.
  16018. It is recommended to disable such warnings instead.
  16019. -# This is a C++ library with C interface. **Bindings or ports to any other programming languages** are welcome as external projects but
  16020. are not going to be included into this repository.
  16021. */