loader.c 385 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914191519161917191819191920192119221923192419251926192719281929193019311932193319341935193619371938193919401941194219431944194519461947194819491950195119521953195419551956195719581959196019611962196319641965196619671968196919701971197219731974197519761977197819791980198119821983198419851986198719881989199019911992199319941995199619971998199920002001200220032004200520062007200820092010201120122013201420152016201720182019202020212022202320242025202620272028202920302031203220332034203520362037203820392040204120422043204420452046204720482049205020512052205320542055205620572058205920602061206220632064206520662067206820692070207120722073207420752076207720782079208020812082208320842085208620872088208920902091209220932094209520962097209820992100210121022103210421052106210721082109211021112112211321142115211621172118211921202121212221232124212521262127212821292130213121322133213421352136213721382139214021412142214321442145214621472148214921502151215221532154215521562157215821592160216121622163216421652166216721682169217021712172217321742175217621772178217921802181218221832184218521862187218821892190219121922193219421952196219721982199220022012202220322042205220622072208220922102211221222132214221522162217221822192220222122222223222422252226222722282229223022312232223322342235223622372238223922402241224222432244224522462247224822492250225122522253225422552256225722582259226022612262226322642265226622672268226922702271227222732274227522762277227822792280228122822283228422852286228722882289229022912292229322942295229622972298229923002301230223032304230523062307230823092310231123122313231423152316231723182319232023212322232323242325232623272328232923302331233223332334233523362337233823392340234123422343234423452346234723482349235023512352235323542355235623572358235923602361236223632364236523662367236823692370237123722373237423752376237723782379238023812382238323842385238623872388238923902391239223932394239523962397239823992400240124022403240424052406240724082409241024112412241324142415241624172418241924202421242224232424242524262427242824292430243124322433243424352436243724382439244024412442244324442445244624472448244924502451245224532454245524562457245824592460246124622463246424652466246724682469247024712472247324742475247624772478247924802481248224832484248524862487248824892490249124922493249424952496249724982499250025012502250325042505250625072508250925102511251225132514251525162517251825192520252125222523252425252526252725282529253025312532253325342535253625372538253925402541254225432544254525462547254825492550255125522553255425552556255725582559256025612562256325642565256625672568256925702571257225732574257525762577257825792580258125822583258425852586258725882589259025912592259325942595259625972598259926002601260226032604260526062607260826092610261126122613261426152616261726182619262026212622262326242625262626272628262926302631263226332634263526362637263826392640264126422643264426452646264726482649265026512652265326542655265626572658265926602661266226632664266526662667266826692670267126722673267426752676267726782679268026812682268326842685268626872688268926902691269226932694269526962697269826992700270127022703270427052706270727082709271027112712271327142715271627172718271927202721272227232724272527262727272827292730273127322733273427352736273727382739274027412742274327442745274627472748274927502751275227532754275527562757275827592760276127622763276427652766276727682769277027712772277327742775277627772778277927802781278227832784278527862787278827892790279127922793279427952796279727982799280028012802280328042805280628072808280928102811281228132814281528162817281828192820282128222823282428252826282728282829283028312832283328342835283628372838283928402841284228432844284528462847284828492850285128522853285428552856285728582859286028612862286328642865286628672868286928702871287228732874287528762877287828792880288128822883288428852886288728882889289028912892289328942895289628972898289929002901290229032904290529062907290829092910291129122913291429152916291729182919292029212922292329242925292629272928292929302931293229332934293529362937293829392940294129422943294429452946294729482949295029512952295329542955295629572958295929602961296229632964296529662967296829692970297129722973297429752976297729782979298029812982298329842985298629872988298929902991299229932994299529962997299829993000300130023003300430053006300730083009301030113012301330143015301630173018301930203021302230233024302530263027302830293030303130323033303430353036303730383039304030413042304330443045304630473048304930503051305230533054305530563057305830593060306130623063306430653066306730683069307030713072307330743075307630773078307930803081308230833084308530863087308830893090309130923093309430953096309730983099310031013102310331043105310631073108310931103111311231133114311531163117311831193120312131223123312431253126312731283129313031313132313331343135313631373138313931403141314231433144314531463147314831493150315131523153315431553156315731583159316031613162316331643165316631673168316931703171317231733174317531763177317831793180318131823183318431853186318731883189319031913192319331943195319631973198319932003201320232033204320532063207320832093210321132123213321432153216321732183219322032213222322332243225322632273228322932303231323232333234323532363237323832393240324132423243324432453246324732483249325032513252325332543255325632573258325932603261326232633264326532663267326832693270327132723273327432753276327732783279328032813282328332843285328632873288328932903291329232933294329532963297329832993300330133023303330433053306330733083309331033113312331333143315331633173318331933203321332233233324332533263327332833293330333133323333333433353336333733383339334033413342334333443345334633473348334933503351335233533354335533563357335833593360336133623363336433653366336733683369337033713372337333743375337633773378337933803381338233833384338533863387338833893390339133923393339433953396339733983399340034013402340334043405340634073408340934103411341234133414341534163417341834193420342134223423342434253426342734283429343034313432343334343435343634373438343934403441344234433444344534463447344834493450345134523453345434553456345734583459346034613462346334643465346634673468346934703471347234733474347534763477347834793480348134823483348434853486348734883489349034913492349334943495349634973498349935003501350235033504350535063507350835093510351135123513351435153516351735183519352035213522352335243525352635273528352935303531353235333534353535363537353835393540354135423543354435453546354735483549355035513552355335543555355635573558355935603561356235633564356535663567356835693570357135723573357435753576357735783579358035813582358335843585358635873588358935903591359235933594359535963597359835993600360136023603360436053606360736083609361036113612361336143615361636173618361936203621362236233624362536263627362836293630363136323633363436353636363736383639364036413642364336443645364636473648364936503651365236533654365536563657365836593660366136623663366436653666366736683669367036713672367336743675367636773678367936803681368236833684368536863687368836893690369136923693369436953696369736983699370037013702370337043705370637073708370937103711371237133714371537163717371837193720372137223723372437253726372737283729373037313732373337343735373637373738373937403741374237433744374537463747374837493750375137523753375437553756375737583759376037613762376337643765376637673768376937703771377237733774377537763777377837793780378137823783378437853786378737883789379037913792379337943795379637973798379938003801380238033804380538063807380838093810381138123813381438153816381738183819382038213822382338243825382638273828382938303831383238333834383538363837383838393840384138423843384438453846384738483849385038513852385338543855385638573858385938603861386238633864386538663867386838693870387138723873387438753876387738783879388038813882388338843885388638873888388938903891389238933894389538963897389838993900390139023903390439053906390739083909391039113912391339143915391639173918391939203921392239233924392539263927392839293930393139323933393439353936393739383939394039413942394339443945394639473948394939503951395239533954395539563957395839593960396139623963396439653966396739683969397039713972397339743975397639773978397939803981398239833984398539863987398839893990399139923993399439953996399739983999400040014002400340044005400640074008400940104011401240134014401540164017401840194020402140224023402440254026402740284029403040314032403340344035403640374038403940404041404240434044404540464047404840494050405140524053405440554056405740584059406040614062406340644065406640674068406940704071407240734074407540764077407840794080408140824083408440854086408740884089409040914092409340944095409640974098409941004101410241034104410541064107410841094110411141124113411441154116411741184119412041214122412341244125412641274128412941304131413241334134413541364137413841394140414141424143414441454146414741484149415041514152415341544155415641574158415941604161416241634164416541664167416841694170417141724173417441754176417741784179418041814182418341844185418641874188418941904191419241934194419541964197419841994200420142024203420442054206420742084209421042114212421342144215421642174218421942204221422242234224422542264227422842294230423142324233423442354236423742384239424042414242424342444245424642474248424942504251425242534254425542564257425842594260426142624263426442654266426742684269427042714272427342744275427642774278427942804281428242834284428542864287428842894290429142924293429442954296429742984299430043014302430343044305430643074308430943104311431243134314431543164317431843194320432143224323432443254326432743284329433043314332433343344335433643374338433943404341434243434344434543464347434843494350435143524353435443554356435743584359436043614362436343644365436643674368436943704371437243734374437543764377437843794380438143824383438443854386438743884389439043914392439343944395439643974398439944004401440244034404440544064407440844094410441144124413441444154416441744184419442044214422442344244425442644274428442944304431443244334434443544364437443844394440444144424443444444454446444744484449445044514452445344544455445644574458445944604461446244634464446544664467446844694470447144724473447444754476447744784479448044814482448344844485448644874488448944904491449244934494449544964497449844994500450145024503450445054506450745084509451045114512451345144515451645174518451945204521452245234524452545264527452845294530453145324533453445354536453745384539454045414542454345444545454645474548454945504551455245534554455545564557455845594560456145624563456445654566456745684569457045714572457345744575457645774578457945804581458245834584458545864587458845894590459145924593459445954596459745984599460046014602460346044605460646074608460946104611461246134614461546164617461846194620462146224623462446254626462746284629463046314632463346344635463646374638463946404641464246434644464546464647464846494650465146524653465446554656465746584659466046614662466346644665466646674668466946704671467246734674467546764677467846794680468146824683468446854686468746884689469046914692469346944695469646974698469947004701470247034704470547064707470847094710471147124713471447154716471747184719472047214722472347244725472647274728472947304731473247334734473547364737473847394740474147424743474447454746474747484749475047514752475347544755475647574758475947604761476247634764476547664767476847694770477147724773477447754776477747784779478047814782478347844785478647874788478947904791479247934794479547964797479847994800480148024803480448054806480748084809481048114812481348144815481648174818481948204821482248234824482548264827482848294830483148324833483448354836483748384839484048414842484348444845484648474848484948504851485248534854485548564857485848594860486148624863486448654866486748684869487048714872487348744875487648774878487948804881488248834884488548864887488848894890489148924893489448954896489748984899490049014902490349044905490649074908490949104911491249134914491549164917491849194920492149224923492449254926492749284929493049314932493349344935493649374938493949404941494249434944494549464947494849494950495149524953495449554956495749584959496049614962496349644965496649674968496949704971497249734974497549764977497849794980498149824983498449854986498749884989499049914992499349944995499649974998499950005001500250035004500550065007500850095010501150125013501450155016501750185019502050215022502350245025502650275028502950305031503250335034503550365037503850395040504150425043504450455046504750485049505050515052505350545055505650575058505950605061506250635064506550665067506850695070507150725073507450755076507750785079508050815082508350845085508650875088508950905091509250935094509550965097509850995100510151025103510451055106510751085109511051115112511351145115511651175118511951205121512251235124512551265127512851295130513151325133513451355136513751385139514051415142514351445145514651475148514951505151515251535154515551565157515851595160516151625163516451655166516751685169517051715172517351745175517651775178517951805181518251835184518551865187518851895190519151925193519451955196519751985199520052015202520352045205520652075208520952105211521252135214521552165217521852195220522152225223522452255226522752285229523052315232523352345235523652375238523952405241524252435244524552465247524852495250525152525253525452555256525752585259526052615262526352645265526652675268526952705271527252735274527552765277527852795280528152825283528452855286528752885289529052915292529352945295529652975298529953005301530253035304530553065307530853095310531153125313531453155316531753185319532053215322532353245325532653275328532953305331533253335334533553365337533853395340534153425343534453455346534753485349535053515352535353545355535653575358535953605361536253635364536553665367536853695370537153725373537453755376537753785379538053815382538353845385538653875388538953905391539253935394539553965397539853995400540154025403540454055406540754085409541054115412541354145415541654175418541954205421542254235424542554265427542854295430543154325433543454355436543754385439544054415442544354445445544654475448544954505451545254535454545554565457545854595460546154625463546454655466546754685469547054715472547354745475547654775478547954805481548254835484548554865487548854895490549154925493549454955496549754985499550055015502550355045505550655075508550955105511551255135514551555165517551855195520552155225523552455255526552755285529553055315532553355345535553655375538553955405541554255435544554555465547554855495550555155525553555455555556555755585559556055615562556355645565556655675568556955705571557255735574557555765577557855795580558155825583558455855586558755885589559055915592559355945595559655975598559956005601560256035604560556065607560856095610561156125613561456155616561756185619562056215622562356245625562656275628562956305631563256335634563556365637563856395640564156425643564456455646564756485649565056515652565356545655565656575658565956605661566256635664566556665667566856695670567156725673567456755676567756785679568056815682568356845685568656875688568956905691569256935694569556965697569856995700570157025703570457055706570757085709571057115712571357145715571657175718571957205721572257235724572557265727572857295730573157325733573457355736573757385739574057415742574357445745574657475748574957505751575257535754575557565757575857595760576157625763576457655766576757685769577057715772577357745775577657775778577957805781578257835784578557865787578857895790579157925793579457955796579757985799580058015802580358045805580658075808580958105811581258135814581558165817581858195820582158225823582458255826582758285829583058315832583358345835583658375838583958405841584258435844584558465847584858495850585158525853585458555856585758585859586058615862586358645865586658675868586958705871587258735874587558765877587858795880588158825883588458855886588758885889589058915892589358945895589658975898589959005901590259035904590559065907590859095910591159125913591459155916591759185919592059215922592359245925592659275928592959305931593259335934593559365937593859395940594159425943594459455946594759485949595059515952595359545955595659575958595959605961596259635964596559665967596859695970597159725973597459755976597759785979598059815982598359845985598659875988598959905991599259935994599559965997599859996000600160026003600460056006600760086009601060116012601360146015601660176018601960206021602260236024602560266027602860296030603160326033603460356036603760386039604060416042604360446045604660476048604960506051605260536054605560566057605860596060606160626063606460656066606760686069607060716072607360746075607660776078607960806081608260836084608560866087608860896090609160926093609460956096609760986099610061016102610361046105610661076108610961106111611261136114611561166117611861196120612161226123612461256126612761286129613061316132613361346135613661376138613961406141614261436144614561466147614861496150615161526153615461556156615761586159616061616162616361646165616661676168616961706171617261736174617561766177617861796180618161826183618461856186618761886189619061916192619361946195619661976198619962006201620262036204620562066207620862096210621162126213621462156216621762186219622062216222622362246225622662276228622962306231623262336234623562366237623862396240624162426243624462456246624762486249625062516252625362546255625662576258625962606261626262636264626562666267626862696270627162726273627462756276627762786279628062816282628362846285628662876288628962906291629262936294629562966297629862996300630163026303630463056306630763086309631063116312631363146315631663176318631963206321632263236324632563266327632863296330633163326333633463356336633763386339634063416342634363446345634663476348634963506351635263536354635563566357635863596360636163626363636463656366636763686369637063716372637363746375637663776378637963806381638263836384638563866387638863896390639163926393639463956396639763986399640064016402640364046405640664076408640964106411641264136414641564166417641864196420642164226423642464256426642764286429643064316432643364346435643664376438643964406441644264436444644564466447644864496450645164526453645464556456645764586459646064616462646364646465646664676468646964706471647264736474647564766477647864796480648164826483648464856486648764886489649064916492649364946495649664976498649965006501650265036504650565066507650865096510651165126513651465156516651765186519652065216522652365246525652665276528652965306531653265336534653565366537653865396540654165426543654465456546654765486549655065516552655365546555655665576558655965606561656265636564656565666567656865696570657165726573657465756576657765786579658065816582658365846585658665876588658965906591659265936594659565966597659865996600660166026603660466056606660766086609661066116612661366146615661666176618661966206621662266236624662566266627662866296630663166326633663466356636663766386639664066416642664366446645664666476648664966506651665266536654665566566657665866596660666166626663666466656666666766686669667066716672667366746675667666776678667966806681668266836684668566866687668866896690669166926693669466956696669766986699670067016702670367046705670667076708670967106711671267136714671567166717671867196720672167226723672467256726672767286729673067316732673367346735673667376738673967406741674267436744674567466747674867496750675167526753675467556756675767586759676067616762676367646765676667676768676967706771677267736774677567766777677867796780678167826783678467856786678767886789679067916792679367946795679667976798679968006801680268036804680568066807680868096810681168126813681468156816681768186819682068216822682368246825682668276828682968306831683268336834683568366837683868396840684168426843684468456846684768486849685068516852685368546855685668576858685968606861686268636864686568666867686868696870687168726873687468756876687768786879688068816882688368846885688668876888688968906891689268936894689568966897689868996900690169026903690469056906690769086909691069116912691369146915691669176918691969206921692269236924692569266927692869296930693169326933693469356936693769386939694069416942694369446945694669476948694969506951695269536954695569566957695869596960696169626963696469656966696769686969697069716972697369746975697669776978697969806981698269836984698569866987698869896990699169926993699469956996699769986999700070017002700370047005700670077008700970107011701270137014701570167017701870197020702170227023702470257026702770287029703070317032703370347035703670377038703970407041704270437044704570467047704870497050705170527053705470557056705770587059706070617062706370647065706670677068706970707071707270737074707570767077707870797080708170827083708470857086708770887089709070917092709370947095709670977098709971007101710271037104710571067107710871097110711171127113711471157116711771187119712071217122712371247125712671277128712971307131713271337134713571367137713871397140714171427143714471457146714771487149715071517152715371547155715671577158715971607161716271637164716571667167716871697170717171727173717471757176717771787179718071817182718371847185718671877188718971907191719271937194719571967197719871997200720172027203720472057206720772087209721072117212721372147215721672177218721972207221722272237224722572267227722872297230723172327233723472357236723772387239724072417242724372447245724672477248724972507251725272537254725572567257725872597260726172627263726472657266726772687269727072717272727372747275727672777278727972807281728272837284728572867287728872897290729172927293729472957296729772987299730073017302730373047305730673077308730973107311731273137314731573167317731873197320732173227323732473257326732773287329733073317332733373347335733673377338733973407341734273437344734573467347734873497350735173527353735473557356735773587359736073617362736373647365736673677368736973707371737273737374737573767377737873797380738173827383738473857386738773887389739073917392739373947395739673977398739974007401740274037404740574067407740874097410741174127413741474157416741774187419742074217422742374247425742674277428742974307431743274337434743574367437743874397440744174427443744474457446744774487449745074517452745374547455745674577458745974607461746274637464746574667467746874697470747174727473747474757476747774787479748074817482748374847485748674877488748974907491749274937494749574967497749874997500750175027503750475057506750775087509751075117512751375147515751675177518751975207521752275237524752575267527752875297530753175327533753475357536753775387539754075417542754375447545754675477548754975507551755275537554755575567557755875597560756175627563756475657566756775687569757075717572757375747575757675777578757975807581758275837584758575867587758875897590759175927593759475957596759775987599760076017602760376047605760676077608760976107611761276137614761576167617761876197620762176227623762476257626762776287629763076317632763376347635763676377638763976407641764276437644764576467647764876497650765176527653765476557656765776587659766076617662766376647665766676677668766976707671767276737674767576767677767876797680768176827683768476857686768776887689769076917692769376947695769676977698769977007701770277037704770577067707770877097710771177127713771477157716771777187719772077217722772377247725772677277728772977307731773277337734773577367737773877397740774177427743774477457746774777487749775077517752775377547755775677577758775977607761776277637764776577667767776877697770777177727773777477757776777777787779778077817782778377847785778677877788778977907791779277937794779577967797779877997800780178027803780478057806780778087809781078117812781378147815781678177818781978207821782278237824782578267827782878297830783178327833783478357836783778387839784078417842784378447845784678477848784978507851785278537854785578567857785878597860786178627863786478657866786778687869787078717872787378747875787678777878787978807881788278837884788578867887788878897890789178927893789478957896789778987899790079017902790379047905790679077908790979107911791279137914791579167917791879197920792179227923792479257926792779287929793079317932793379347935793679377938793979407941794279437944794579467947794879497950795179527953795479557956795779587959796079617962796379647965796679677968796979707971797279737974797579767977797879797980798179827983798479857986798779887989799079917992799379947995799679977998799980008001800280038004800580068007800880098010801180128013801480158016801780188019802080218022802380248025802680278028802980308031803280338034803580368037803880398040804180428043804480458046804780488049805080518052805380548055805680578058805980608061806280638064806580668067806880698070807180728073807480758076807780788079808080818082808380848085808680878088808980908091809280938094809580968097809880998100810181028103810481058106810781088109811081118112811381148115811681178118811981208121812281238124812581268127812881298130813181328133813481358136813781388139814081418142814381448145814681478148814981508151815281538154815581568157815881598160816181628163816481658166816781688169817081718172817381748175817681778178817981808181818281838184818581868187818881898190819181928193819481958196819781988199820082018202820382048205820682078208820982108211821282138214821582168217821882198220822182228223822482258226822782288229823082318232823382348235823682378238823982408241824282438244824582468247824882498250825182528253825482558256825782588259826082618262826382648265826682678268826982708271827282738274827582768277827882798280828182828283828482858286828782888289829082918292829382948295829682978298829983008301830283038304830583068307830883098310831183128313831483158316831783188319832083218322832383248325832683278328832983308331833283338334833583368337833883398340834183428343834483458346834783488349835083518352835383548355835683578358835983608361836283638364836583668367836883698370837183728373837483758376837783788379838083818382838383848385838683878388838983908391839283938394839583968397839883998400840184028403840484058406840784088409841084118412841384148415841684178418841984208421842284238424842584268427842884298430843184328433843484358436843784388439844084418442844384448445844684478448844984508451845284538454845584568457845884598460846184628463846484658466846784688469847084718472847384748475847684778478847984808481848284838484848584868487848884898490849184928493849484958496849784988499850085018502850385048505850685078508850985108511851285138514851585168517851885198520852185228523852485258526852785288529853085318532853385348535853685378538853985408541854285438544854585468547854885498550855185528553855485558556855785588559856085618562856385648565856685678568856985708571857285738574857585768577857885798580858185828583858485858586858785888589859085918592859385948595859685978598859986008601860286038604860586068607860886098610861186128613861486158616861786188619862086218622862386248625862686278628862986308631863286338634863586368637863886398640864186428643864486458646864786488649865086518652865386548655865686578658865986608661866286638664866586668667866886698670867186728673867486758676867786788679868086818682868386848685868686878688868986908691869286938694869586968697869886998700870187028703870487058706870787088709871087118712871387148715871687178718871987208721872287238724872587268727872887298730873187328733873487358736873787388739874087418742874387448745874687478748874987508751
  1. /*
  2. *
  3. * Copyright (c) 2014-2020 The Khronos Group Inc.
  4. * Copyright (c) 2014-2020 Valve Corporation
  5. * Copyright (c) 2014-2020 LunarG, Inc.
  6. * Copyright (C) 2015 Google Inc.
  7. *
  8. * Licensed under the Apache License, Version 2.0 (the "License");
  9. * you may not use this file except in compliance with the License.
  10. * You may obtain a copy of the License at
  11. *
  12. * http://www.apache.org/licenses/LICENSE-2.0
  13. *
  14. * Unless required by applicable law or agreed to in writing, software
  15. * distributed under the License is distributed on an "AS IS" BASIS,
  16. * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  17. * See the License for the specific language governing permissions and
  18. * limitations under the License.
  19. *
  20. * Author: Jon Ashburn <[email protected]>
  21. * Author: Courtney Goeltzenleuchter <[email protected]>
  22. * Author: Mark Young <[email protected]>
  23. * Author: Lenny Komow <[email protected]>
  24. *
  25. */
  26. // This needs to be defined first, or else we'll get redefinitions on NTSTATUS values
  27. #ifdef _WIN32
  28. #define UMDF_USING_NTSTATUS
  29. #include <ntstatus.h>
  30. #endif
  31. #ifndef _GNU_SOURCE
  32. #define _GNU_SOURCE
  33. #endif
  34. #include <inttypes.h>
  35. #include <stdio.h>
  36. #include <stdlib.h>
  37. #include <stdarg.h>
  38. #include <stdbool.h>
  39. #include <string.h>
  40. #include <stddef.h>
  41. #if defined(__APPLE__)
  42. #include <CoreFoundation/CoreFoundation.h>
  43. #include <sys/param.h>
  44. #endif
  45. // Time related functions
  46. #include <time.h>
  47. #include <sys/types.h>
  48. #if defined(_WIN32)
  49. #include "dirent_on_windows.h"
  50. #else // _WIN32
  51. #include <dirent.h>
  52. #endif // _WIN32
  53. #include "vk_loader_platform.h"
  54. #include "loader.h"
  55. #include "gpa_helper.h"
  56. #include "debug_utils.h"
  57. #include "wsi.h"
  58. #include "vulkan/vk_icd.h"
  59. #include "cJSON.h"
  60. #include "murmurhash.h"
  61. #if defined(_WIN32)
  62. #include <cfgmgr32.h>
  63. #include <initguid.h>
  64. #include <devpkey.h>
  65. #include <winternl.h>
  66. #include <strsafe.h>
  67. #ifdef __MINGW32__
  68. #undef strcpy // fix error with redfined strcpy when building with MinGW-w64
  69. #endif
  70. #include <dxgi1_6.h>
  71. #include "adapters.h"
  72. typedef HRESULT (APIENTRY *PFN_CreateDXGIFactory1)(REFIID riid, void **ppFactory);
  73. static PFN_CreateDXGIFactory1 fpCreateDXGIFactory1;
  74. #endif
  75. // This is a CMake generated file with #defines for any functions/includes
  76. // that it found present. This is currently necessary to properly determine
  77. // if secure_getenv or __secure_getenv are present
  78. #if !defined(VULKAN_NON_CMAKE_BUILD)
  79. #include "loader_cmake_config.h"
  80. #endif // !defined(VULKAN_NON_CMAKE_BUILD)
  81. // Generated file containing all the extension data
  82. #include "vk_loader_extensions.c"
  83. // Environment Variable information
  84. #define VK_ICD_FILENAMES_ENV_VAR "VK_ICD_FILENAMES"
  85. #define VK_LAYER_PATH_ENV_VAR "VK_LAYER_PATH"
  86. // Override layer information
  87. #define VK_OVERRIDE_LAYER_NAME "VK_LAYER_LUNARG_override"
  88. struct loader_struct loader = {0};
  89. // TLS for instance for alloc/free callbacks
  90. THREAD_LOCAL_DECL struct loader_instance *tls_instance;
  91. static size_t loader_platform_combine_path(char *dest, size_t len, ...);
  92. struct loader_phys_dev_per_icd {
  93. uint32_t count;
  94. VkPhysicalDevice *phys_devs;
  95. struct loader_icd_term *this_icd_term;
  96. };
  97. enum loader_debug {
  98. LOADER_INFO_BIT = 0x01,
  99. LOADER_WARN_BIT = 0x02,
  100. LOADER_PERF_BIT = 0x04,
  101. LOADER_ERROR_BIT = 0x08,
  102. LOADER_DEBUG_BIT = 0x10,
  103. };
  104. uint32_t g_loader_debug = 0;
  105. uint32_t g_loader_log_msgs = 0;
  106. enum loader_data_files_type {
  107. LOADER_DATA_FILE_MANIFEST_ICD = 0,
  108. LOADER_DATA_FILE_MANIFEST_LAYER,
  109. LOADER_DATA_FILE_NUM_TYPES // Not a real field, used for possible loop terminator
  110. };
  111. // thread safety lock for accessing global data structures such as "loader"
  112. // all entrypoints on the instance chain need to be locked except GPA
  113. // additionally CreateDevice and DestroyDevice needs to be locked
  114. loader_platform_thread_mutex loader_lock;
  115. loader_platform_thread_mutex loader_json_lock;
  116. loader_platform_thread_mutex loader_preload_icd_lock;
  117. // A list of ICDs that gets initialized when the loader does its global initialization. This list should never be used by anything
  118. // other than EnumerateInstanceExtensionProperties(), vkDestroyInstance, and loader_release(). This list does not change
  119. // functionality, but the fact that the libraries already been loaded causes any call that needs to load ICD libraries to speed up
  120. // significantly. This can have a huge impact when making repeated calls to vkEnumerateInstanceExtensionProperties and
  121. // vkCreateInstance.
  122. static struct loader_icd_tramp_list scanned_icds;
  123. LOADER_PLATFORM_THREAD_ONCE_DECLARATION(once_init);
  124. void *loader_instance_heap_alloc(const struct loader_instance *instance, size_t size, VkSystemAllocationScope alloc_scope) {
  125. void *pMemory = NULL;
  126. #if (DEBUG_DISABLE_APP_ALLOCATORS == 1)
  127. {
  128. #else
  129. if (instance && instance->alloc_callbacks.pfnAllocation) {
  130. // These are internal structures, so it's best to align everything to
  131. // the largest unit size which is the size of a uint64_t.
  132. pMemory = instance->alloc_callbacks.pfnAllocation(instance->alloc_callbacks.pUserData, size, sizeof(uint64_t), alloc_scope);
  133. } else {
  134. #endif
  135. pMemory = malloc(size);
  136. }
  137. return pMemory;
  138. }
  139. void loader_instance_heap_free(const struct loader_instance *instance, void *pMemory) {
  140. if (pMemory != NULL) {
  141. #if (DEBUG_DISABLE_APP_ALLOCATORS == 1)
  142. {
  143. #else
  144. if (instance && instance->alloc_callbacks.pfnFree) {
  145. instance->alloc_callbacks.pfnFree(instance->alloc_callbacks.pUserData, pMemory);
  146. } else {
  147. #endif
  148. free(pMemory);
  149. }
  150. }
  151. }
  152. void *loader_instance_heap_realloc(const struct loader_instance *instance, void *pMemory, size_t orig_size, size_t size,
  153. VkSystemAllocationScope alloc_scope) {
  154. void *pNewMem = NULL;
  155. if (pMemory == NULL || orig_size == 0) {
  156. pNewMem = loader_instance_heap_alloc(instance, size, alloc_scope);
  157. } else if (size == 0) {
  158. loader_instance_heap_free(instance, pMemory);
  159. #if (DEBUG_DISABLE_APP_ALLOCATORS == 1)
  160. #else
  161. } else if (instance && instance->alloc_callbacks.pfnReallocation) {
  162. // These are internal structures, so it's best to align everything to
  163. // the largest unit size which is the size of a uint64_t.
  164. pNewMem = instance->alloc_callbacks.pfnReallocation(instance->alloc_callbacks.pUserData, pMemory, size, sizeof(uint64_t),
  165. alloc_scope);
  166. #endif
  167. } else {
  168. pNewMem = realloc(pMemory, size);
  169. }
  170. return pNewMem;
  171. }
  172. void *loader_instance_tls_heap_alloc(size_t size) {
  173. return loader_instance_heap_alloc(tls_instance, size, VK_SYSTEM_ALLOCATION_SCOPE_COMMAND);
  174. }
  175. void loader_instance_tls_heap_free(void *pMemory) { loader_instance_heap_free(tls_instance, pMemory); }
  176. void *loader_device_heap_alloc(const struct loader_device *device, size_t size, VkSystemAllocationScope alloc_scope) {
  177. void *pMemory = NULL;
  178. #if (DEBUG_DISABLE_APP_ALLOCATORS == 1)
  179. {
  180. #else
  181. if (device && device->alloc_callbacks.pfnAllocation) {
  182. // These are internal structures, so it's best to align everything to
  183. // the largest unit size which is the size of a uint64_t.
  184. pMemory = device->alloc_callbacks.pfnAllocation(device->alloc_callbacks.pUserData, size, sizeof(uint64_t), alloc_scope);
  185. } else {
  186. #endif
  187. pMemory = malloc(size);
  188. }
  189. return pMemory;
  190. }
  191. void loader_device_heap_free(const struct loader_device *device, void *pMemory) {
  192. if (pMemory != NULL) {
  193. #if (DEBUG_DISABLE_APP_ALLOCATORS == 1)
  194. {
  195. #else
  196. if (device && device->alloc_callbacks.pfnFree) {
  197. device->alloc_callbacks.pfnFree(device->alloc_callbacks.pUserData, pMemory);
  198. } else {
  199. #endif
  200. free(pMemory);
  201. }
  202. }
  203. }
  204. void *loader_device_heap_realloc(const struct loader_device *device, void *pMemory, size_t orig_size, size_t size,
  205. VkSystemAllocationScope alloc_scope) {
  206. void *pNewMem = NULL;
  207. if (pMemory == NULL || orig_size == 0) {
  208. pNewMem = loader_device_heap_alloc(device, size, alloc_scope);
  209. } else if (size == 0) {
  210. loader_device_heap_free(device, pMemory);
  211. #if (DEBUG_DISABLE_APP_ALLOCATORS == 1)
  212. #else
  213. } else if (device && device->alloc_callbacks.pfnReallocation) {
  214. // These are internal structures, so it's best to align everything to
  215. // the largest unit size which is the size of a uint64_t.
  216. pNewMem = device->alloc_callbacks.pfnReallocation(device->alloc_callbacks.pUserData, pMemory, size, sizeof(uint64_t),
  217. alloc_scope);
  218. #endif
  219. } else {
  220. pNewMem = realloc(pMemory, size);
  221. }
  222. return pNewMem;
  223. }
  224. // Environment variables
  225. #if defined(__linux__) || defined(__APPLE__) || defined(__Fuchsia__)
  226. static inline bool IsHighIntegrity() {
  227. return geteuid() != getuid() || getegid() != getgid();
  228. }
  229. static inline char *loader_getenv(const char *name, const struct loader_instance *inst) {
  230. // No allocation of memory necessary for Linux, but we should at least touch
  231. // the inst pointer to get rid of compiler warnings.
  232. (void)inst;
  233. return getenv(name);
  234. }
  235. static inline char *loader_secure_getenv(const char *name, const struct loader_instance *inst) {
  236. char *out;
  237. #if defined(__APPLE__)
  238. // Apple does not appear to have a secure getenv implementation.
  239. // The main difference between secure getenv and getenv is that secure getenv
  240. // returns NULL if the process is being run with elevated privileges by a normal user.
  241. // The idea is to prevent the reading of malicious environment variables by a process
  242. // that can do damage.
  243. // This algorithm is derived from glibc code that sets an internal
  244. // variable (__libc_enable_secure) if the process is running under setuid or setgid.
  245. return IsHighIntegrity() ? NULL : loader_getenv(name, inst);
  246. #elif defined(__Fuchsia__)
  247. return loader_getenv(name, inst);
  248. #else
  249. // Linux
  250. #if defined(HAVE_SECURE_GETENV) && !defined(USE_UNSAFE_FILE_SEARCH)
  251. (void)inst;
  252. out = secure_getenv(name);
  253. #elif defined(HAVE___SECURE_GETENV) && !defined(USE_UNSAFE_FILE_SEARCH)
  254. (void)inst;
  255. out = __secure_getenv(name);
  256. #else
  257. out = loader_getenv(name, inst);
  258. #if !defined(USE_UNSAFE_FILE_SEARCH)
  259. loader_log(inst, LOADER_INFO_BIT, 0, "Loader is using non-secure environment variable lookup for %s", name);
  260. #endif
  261. #endif
  262. return out;
  263. #endif
  264. }
  265. static inline void loader_free_getenv(char *val, const struct loader_instance *inst) {
  266. // No freeing of memory necessary for Linux, but we should at least touch
  267. // the val and inst pointers to get rid of compiler warnings.
  268. (void)val;
  269. (void)inst;
  270. }
  271. #elif defined(WIN32)
  272. static inline bool IsHighIntegrity() {
  273. HANDLE process_token;
  274. if (OpenProcessToken(GetCurrentProcess(), TOKEN_QUERY | TOKEN_QUERY_SOURCE, &process_token)) {
  275. // Maximum possible size of SID_AND_ATTRIBUTES is maximum size of a SID + size of attributes DWORD.
  276. uint8_t mandatory_label_buffer[SECURITY_MAX_SID_SIZE + sizeof(DWORD)];
  277. DWORD buffer_size;
  278. if (GetTokenInformation(process_token, TokenIntegrityLevel, mandatory_label_buffer, sizeof(mandatory_label_buffer),
  279. &buffer_size) != 0) {
  280. const TOKEN_MANDATORY_LABEL *mandatory_label = (const TOKEN_MANDATORY_LABEL *)mandatory_label_buffer;
  281. const DWORD sub_authority_count = *GetSidSubAuthorityCount(mandatory_label->Label.Sid);
  282. const DWORD integrity_level = *GetSidSubAuthority(mandatory_label->Label.Sid, sub_authority_count - 1);
  283. CloseHandle(process_token);
  284. return integrity_level > SECURITY_MANDATORY_MEDIUM_RID;
  285. }
  286. CloseHandle(process_token);
  287. }
  288. return false;
  289. }
  290. static inline char *loader_getenv(const char *name, const struct loader_instance *inst) {
  291. char *retVal;
  292. DWORD valSize;
  293. valSize = GetEnvironmentVariableA(name, NULL, 0);
  294. // valSize DOES include the null terminator, so for any set variable
  295. // will always be at least 1. If it's 0, the variable wasn't set.
  296. if (valSize == 0) return NULL;
  297. // Allocate the space necessary for the registry entry
  298. if (NULL != inst && NULL != inst->alloc_callbacks.pfnAllocation) {
  299. retVal = (char *)inst->alloc_callbacks.pfnAllocation(inst->alloc_callbacks.pUserData, valSize, sizeof(char *),
  300. VK_SYSTEM_ALLOCATION_SCOPE_COMMAND);
  301. } else {
  302. retVal = (char *)malloc(valSize);
  303. }
  304. if (NULL != retVal) {
  305. GetEnvironmentVariableA(name, retVal, valSize);
  306. }
  307. return retVal;
  308. }
  309. static inline char *loader_secure_getenv(const char *name, const struct loader_instance *inst) {
  310. #if !defined(USE_UNSAFE_FILE_SEARCH)
  311. if (IsHighIntegrity()) {
  312. loader_log(inst, LOADER_INFO_BIT, 0, "Loader is running with elevated permissions. Environment variable %s will be ignored",
  313. name);
  314. return NULL;
  315. }
  316. #endif
  317. return loader_getenv(name, inst);
  318. }
  319. static inline void loader_free_getenv(char *val, const struct loader_instance *inst) {
  320. if (NULL != inst && NULL != inst->alloc_callbacks.pfnFree) {
  321. inst->alloc_callbacks.pfnFree(inst->alloc_callbacks.pUserData, val);
  322. } else {
  323. free((void *)val);
  324. }
  325. }
  326. #else
  327. static inline char *loader_getenv(const char *name, const struct loader_instance *inst) {
  328. // stub func
  329. (void)inst;
  330. (void)name;
  331. return NULL;
  332. }
  333. static inline void loader_free_getenv(char *val, const struct loader_instance *inst) {
  334. // stub func
  335. (void)val;
  336. (void)inst;
  337. }
  338. #endif
  339. void loader_log(const struct loader_instance *inst, VkFlags msg_type, int32_t msg_code, const char *format, ...) {
  340. char msg[512];
  341. char cmd_line_msg[512];
  342. size_t cmd_line_size = sizeof(cmd_line_msg);
  343. va_list ap;
  344. int ret;
  345. va_start(ap, format);
  346. ret = vsnprintf(msg, sizeof(msg), format, ap);
  347. if ((ret >= (int)sizeof(msg)) || ret < 0) {
  348. msg[sizeof(msg) - 1] = '\0';
  349. }
  350. va_end(ap);
  351. if (inst) {
  352. VkDebugUtilsMessageSeverityFlagBitsEXT severity = 0;
  353. VkDebugUtilsMessageTypeFlagsEXT type;
  354. VkDebugUtilsMessengerCallbackDataEXT callback_data;
  355. VkDebugUtilsObjectNameInfoEXT object_name;
  356. if ((msg_type & LOADER_INFO_BIT) != 0) {
  357. severity = VK_DEBUG_UTILS_MESSAGE_SEVERITY_INFO_BIT_EXT;
  358. } else if ((msg_type & LOADER_WARN_BIT) != 0) {
  359. severity = VK_DEBUG_UTILS_MESSAGE_SEVERITY_WARNING_BIT_EXT;
  360. } else if ((msg_type & LOADER_ERROR_BIT) != 0) {
  361. severity = VK_DEBUG_UTILS_MESSAGE_SEVERITY_ERROR_BIT_EXT;
  362. } else if ((msg_type & LOADER_DEBUG_BIT) != 0) {
  363. severity = VK_DEBUG_UTILS_MESSAGE_SEVERITY_VERBOSE_BIT_EXT;
  364. }
  365. if ((msg_type & LOADER_PERF_BIT) != 0) {
  366. type = VK_DEBUG_UTILS_MESSAGE_TYPE_PERFORMANCE_BIT_EXT;
  367. } else {
  368. type = VK_DEBUG_UTILS_MESSAGE_TYPE_GENERAL_BIT_EXT;
  369. }
  370. callback_data.sType = VK_STRUCTURE_TYPE_DEBUG_UTILS_MESSENGER_CALLBACK_DATA_EXT;
  371. callback_data.pNext = NULL;
  372. callback_data.flags = 0;
  373. callback_data.pMessageIdName = "Loader Message";
  374. callback_data.messageIdNumber = 0;
  375. callback_data.pMessage = msg;
  376. callback_data.queueLabelCount = 0;
  377. callback_data.pQueueLabels = NULL;
  378. callback_data.cmdBufLabelCount = 0;
  379. callback_data.pCmdBufLabels = NULL;
  380. callback_data.objectCount = 1;
  381. callback_data.pObjects = &object_name;
  382. object_name.sType = VK_STRUCTURE_TYPE_DEBUG_UTILS_OBJECT_NAME_INFO_EXT;
  383. object_name.pNext = NULL;
  384. object_name.objectType = VK_OBJECT_TYPE_INSTANCE;
  385. object_name.objectHandle = (uint64_t)(uintptr_t)inst;
  386. object_name.pObjectName = NULL;
  387. util_SubmitDebugUtilsMessageEXT(inst, severity, type, &callback_data);
  388. }
  389. if (!(msg_type & g_loader_log_msgs)) {
  390. return;
  391. }
  392. cmd_line_msg[0] = '\0';
  393. cmd_line_size -= 1;
  394. size_t original_size = cmd_line_size;
  395. if ((msg_type & LOADER_INFO_BIT) != 0) {
  396. strncat(cmd_line_msg, "INFO", cmd_line_size);
  397. cmd_line_size -= 4;
  398. }
  399. if ((msg_type & LOADER_WARN_BIT) != 0) {
  400. if (cmd_line_size != original_size) {
  401. strncat(cmd_line_msg, " | ", cmd_line_size);
  402. cmd_line_size -= 3;
  403. }
  404. strncat(cmd_line_msg, "WARNING", cmd_line_size);
  405. cmd_line_size -= 7;
  406. }
  407. if ((msg_type & LOADER_PERF_BIT) != 0) {
  408. if (cmd_line_size != original_size) {
  409. strncat(cmd_line_msg, " | ", cmd_line_size);
  410. cmd_line_size -= 3;
  411. }
  412. strncat(cmd_line_msg, "PERF", cmd_line_size);
  413. cmd_line_size -= 4;
  414. }
  415. if ((msg_type & LOADER_ERROR_BIT) != 0) {
  416. if (cmd_line_size != original_size) {
  417. strncat(cmd_line_msg, " | ", cmd_line_size);
  418. cmd_line_size -= 3;
  419. }
  420. strncat(cmd_line_msg, "ERROR", cmd_line_size);
  421. cmd_line_size -= 5;
  422. }
  423. if ((msg_type & LOADER_DEBUG_BIT) != 0) {
  424. if (cmd_line_size != original_size) {
  425. strncat(cmd_line_msg, " | ", cmd_line_size);
  426. cmd_line_size -= 3;
  427. }
  428. strncat(cmd_line_msg, "DEBUG", cmd_line_size);
  429. cmd_line_size -= 5;
  430. }
  431. if (cmd_line_size != original_size) {
  432. strncat(cmd_line_msg, ": ", cmd_line_size);
  433. cmd_line_size -= 2;
  434. }
  435. if (0 < cmd_line_size) {
  436. // If the message is too long, trim it down
  437. if (strlen(msg) > cmd_line_size) {
  438. msg[cmd_line_size - 1] = '\0';
  439. }
  440. strncat(cmd_line_msg, msg, cmd_line_size);
  441. } else {
  442. // Shouldn't get here, but check to make sure if we've already overrun
  443. // the string boundary
  444. assert(false);
  445. }
  446. #if defined(WIN32)
  447. OutputDebugString(cmd_line_msg);
  448. OutputDebugString("\n");
  449. #endif
  450. fputs(cmd_line_msg, stderr);
  451. fputc('\n', stderr);
  452. }
  453. VKAPI_ATTR VkResult VKAPI_CALL vkSetInstanceDispatch(VkInstance instance, void *object) {
  454. struct loader_instance *inst = loader_get_instance(instance);
  455. if (!inst) {
  456. loader_log(inst, VK_DEBUG_REPORT_ERROR_BIT_EXT, 0,
  457. "vkSetInstanceDispatch: Can not retrieve Instance "
  458. "dispatch table.");
  459. return VK_ERROR_INITIALIZATION_FAILED;
  460. }
  461. loader_set_dispatch(object, inst->disp);
  462. return VK_SUCCESS;
  463. }
  464. VKAPI_ATTR VkResult VKAPI_CALL vkSetDeviceDispatch(VkDevice device, void *object) {
  465. struct loader_device *dev;
  466. struct loader_icd_term *icd_term = loader_get_icd_and_device(device, &dev, NULL);
  467. if (NULL == icd_term) {
  468. return VK_ERROR_INITIALIZATION_FAILED;
  469. }
  470. loader_set_dispatch(object, &dev->loader_dispatch);
  471. return VK_SUCCESS;
  472. }
  473. #if defined(_WIN32)
  474. // Append the JSON path data to the list and allocate/grow the list if it's not large enough.
  475. // Function returns true if filename was appended to reg_data list.
  476. // Caller should free reg_data.
  477. static bool loaderAddJsonEntry(const struct loader_instance *inst,
  478. char **reg_data, // list of JSON files
  479. PDWORD total_size, // size of reg_data
  480. LPCSTR key_name, // key name - used for debug prints - i.e. VulkanDriverName
  481. DWORD key_type, // key data type
  482. LPSTR json_path, // JSON string to add to the list reg_data
  483. DWORD json_size, // size in bytes of json_path
  484. VkResult *result) {
  485. // Check for and ignore duplicates.
  486. if (*reg_data && strstr(*reg_data, json_path)) {
  487. // Success. The json_path is already in the list.
  488. return true;
  489. }
  490. if (NULL == *reg_data) {
  491. *reg_data = loader_instance_heap_alloc(inst, *total_size, VK_SYSTEM_ALLOCATION_SCOPE_INSTANCE);
  492. if (NULL == *reg_data) {
  493. loader_log(inst, VK_DEBUG_REPORT_ERROR_BIT_EXT, 0,
  494. "loaderAddJsonEntry: Failed to allocate space for registry data for key %s", json_path);
  495. *result = VK_ERROR_OUT_OF_HOST_MEMORY;
  496. return false;
  497. }
  498. *reg_data[0] = '\0';
  499. } else if (strlen(*reg_data) + json_size + 1 > *total_size) {
  500. void *new_ptr =
  501. loader_instance_heap_realloc(inst, *reg_data, *total_size, *total_size * 2, VK_SYSTEM_ALLOCATION_SCOPE_INSTANCE);
  502. if (NULL == new_ptr) {
  503. loader_log(inst, VK_DEBUG_REPORT_ERROR_BIT_EXT, 0,
  504. "loaderAddJsonEntry: Failed to reallocate space for registry value of size %d for key %s", *total_size * 2,
  505. json_path);
  506. *result = VK_ERROR_OUT_OF_HOST_MEMORY;
  507. return false;
  508. }
  509. *reg_data = new_ptr;
  510. *total_size *= 2;
  511. }
  512. for (char *curr_filename = json_path; curr_filename[0] != '\0'; curr_filename += strlen(curr_filename) + 1) {
  513. if (strlen(*reg_data) == 0) {
  514. (void)snprintf(*reg_data, json_size + 1, "%s", curr_filename);
  515. } else {
  516. (void)snprintf(*reg_data + strlen(*reg_data), json_size + 2, "%c%s", PATH_SEPARATOR, curr_filename);
  517. }
  518. loader_log(inst, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, 0, "%s: Located json file \"%s\" from PnP registry: %s", __FUNCTION__,
  519. curr_filename, key_name);
  520. if (key_type == REG_SZ) {
  521. break;
  522. }
  523. }
  524. return true;
  525. }
  526. // Find the list of registry files (names VulkanDriverName/VulkanDriverNameWow) in hkr.
  527. //
  528. // This function looks for filename in given device handle, filename is then added to return list
  529. // function return true if filename was appended to reg_data list
  530. // If error occurs result is updated with failure reason
  531. bool loaderGetDeviceRegistryEntry(const struct loader_instance *inst, char **reg_data, PDWORD total_size, DEVINST dev_id,
  532. LPCSTR value_name, VkResult *result) {
  533. HKEY hkrKey = INVALID_HANDLE_VALUE;
  534. DWORD requiredSize, data_type;
  535. char *manifest_path = NULL;
  536. bool found = false;
  537. if (NULL == total_size || NULL == reg_data) {
  538. *result = VK_ERROR_INITIALIZATION_FAILED;
  539. return false;
  540. }
  541. CONFIGRET status = CM_Open_DevNode_Key(dev_id, KEY_QUERY_VALUE, 0, RegDisposition_OpenExisting, &hkrKey, CM_REGISTRY_SOFTWARE);
  542. if (status != CR_SUCCESS) {
  543. loader_log(inst, VK_DEBUG_REPORT_WARNING_BIT_EXT, 0,
  544. "loaderGetDeviceRegistryEntry: Failed to open registry key for DeviceID(%d)", dev_id);
  545. *result = VK_ERROR_INITIALIZATION_FAILED;
  546. return false;
  547. }
  548. // query value
  549. LSTATUS ret = RegQueryValueEx(
  550. hkrKey,
  551. value_name,
  552. NULL,
  553. NULL,
  554. NULL,
  555. &requiredSize);
  556. if (ret != ERROR_SUCCESS) {
  557. if (ret == ERROR_FILE_NOT_FOUND) {
  558. loader_log(inst, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, 0,
  559. "loaderGetDeviceRegistryEntry: Device ID(%d) Does not contain a value for \"%s\"", dev_id, value_name);
  560. } else {
  561. loader_log(inst, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, 0,
  562. "loaderGetDeviceRegistryEntry: DeviceID(%d) Failed to obtain %s size", dev_id, value_name);
  563. }
  564. goto out;
  565. }
  566. manifest_path = loader_instance_heap_alloc(inst, requiredSize, VK_SYSTEM_ALLOCATION_SCOPE_INSTANCE);
  567. if (manifest_path == NULL) {
  568. loader_log(inst, VK_DEBUG_REPORT_ERROR_BIT_EXT, 0,
  569. "loaderGetDeviceRegistryEntry: Failed to allocate space for DriverName.");
  570. *result = VK_ERROR_OUT_OF_HOST_MEMORY;
  571. goto out;
  572. }
  573. ret = RegQueryValueEx(
  574. hkrKey,
  575. value_name,
  576. NULL,
  577. &data_type,
  578. (BYTE *)manifest_path,
  579. &requiredSize
  580. );
  581. if (ret != ERROR_SUCCESS) {
  582. loader_log(inst, VK_DEBUG_REPORT_ERROR_BIT_EXT, 0,
  583. "loaderGetDeviceRegistryEntry: DeviceID(%d) Failed to obtain %s", value_name);
  584. *result = VK_ERROR_INITIALIZATION_FAILED;
  585. goto out;
  586. }
  587. if (data_type != REG_SZ && data_type != REG_MULTI_SZ) {
  588. loader_log(inst, VK_DEBUG_REPORT_ERROR_BIT_EXT, 0,
  589. "loaderGetDeviceRegistryEntry: Invalid %s data type. Expected REG_SZ or REG_MULTI_SZ.", value_name);
  590. *result = VK_ERROR_INITIALIZATION_FAILED;
  591. goto out;
  592. }
  593. found = loaderAddJsonEntry(inst, reg_data, total_size, value_name, data_type, manifest_path, requiredSize, result);
  594. out:
  595. if (manifest_path != NULL) {
  596. loader_instance_heap_free(inst, manifest_path);
  597. }
  598. RegCloseKey(hkrKey);
  599. return found;
  600. }
  601. // Find the list of registry files (names VulkanDriverName/VulkanDriverNameWow) in hkr .
  602. //
  603. // This function looks for display devices and childish software components
  604. // for a list of files which are added to a returned list (function return
  605. // value).
  606. // Function return is a string with a ';' separated list of filenames.
  607. // Function return is NULL if no valid name/value pairs are found in the key,
  608. // or the key is not found.
  609. //
  610. // *reg_data contains a string list of filenames as pointer.
  611. // When done using the returned string list, the caller should free the pointer.
  612. VkResult loaderGetDeviceRegistryFiles(const struct loader_instance *inst, char **reg_data, PDWORD reg_data_size,
  613. LPCSTR value_name) {
  614. static const wchar_t *softwareComponentGUID = L"{5c4c3332-344d-483c-8739-259e934c9cc8}";
  615. static const wchar_t *displayGUID = L"{4d36e968-e325-11ce-bfc1-08002be10318}";
  616. #ifdef CM_GETIDLIST_FILTER_PRESENT
  617. const ULONG flags = CM_GETIDLIST_FILTER_CLASS | CM_GETIDLIST_FILTER_PRESENT;
  618. #else
  619. const ULONG flags = 0x300;
  620. #endif
  621. wchar_t childGuid[MAX_GUID_STRING_LEN + 2]; // +2 for brackets {}
  622. ULONG childGuidSize = sizeof(childGuid);
  623. DEVINST devID = 0, childID = 0;
  624. wchar_t *pDeviceNames = NULL;
  625. ULONG deviceNamesSize = 0;
  626. VkResult result = VK_SUCCESS;
  627. bool found = false;
  628. if (NULL == reg_data) {
  629. result = VK_ERROR_INITIALIZATION_FAILED;
  630. return result;
  631. }
  632. // if after obtaining the DeviceNameSize, new device is added start over
  633. do {
  634. CM_Get_Device_ID_List_SizeW(&deviceNamesSize, displayGUID, flags);
  635. if (pDeviceNames != NULL) {
  636. loader_instance_heap_free(inst, pDeviceNames);
  637. }
  638. pDeviceNames = loader_instance_heap_alloc(inst, deviceNamesSize * sizeof(wchar_t), VK_SYSTEM_ALLOCATION_SCOPE_INSTANCE);
  639. if (pDeviceNames == NULL) {
  640. loader_log(inst, VK_DEBUG_REPORT_ERROR_BIT_EXT, 0,
  641. "loaderGetDeviceRegistryFiles: Failed to allocate space for display device names.");
  642. result = VK_ERROR_OUT_OF_HOST_MEMORY;
  643. return result;
  644. }
  645. } while (CM_Get_Device_ID_ListW(displayGUID, pDeviceNames, deviceNamesSize, flags) == CR_BUFFER_SMALL);
  646. if (pDeviceNames) {
  647. for (wchar_t *deviceName = pDeviceNames; *deviceName; deviceName += wcslen(deviceName) + 1) {
  648. CONFIGRET status = CM_Locate_DevNodeW(&devID, deviceName, CM_LOCATE_DEVNODE_NORMAL);
  649. if (CR_SUCCESS != status) {
  650. loader_log(inst, VK_DEBUG_REPORT_ERROR_BIT_EXT, 0, "loaderGetDeviceRegistryFiles: failed to open DevNode %ls",
  651. deviceName);
  652. continue;
  653. }
  654. ULONG ulStatus, ulProblem;
  655. status = CM_Get_DevNode_Status(&ulStatus, &ulProblem, devID, 0);
  656. if (CR_SUCCESS != status)
  657. {
  658. loader_log(inst, VK_DEBUG_REPORT_ERROR_BIT_EXT, 0, "loaderGetDeviceRegistryFiles: failed to probe device status %ls",
  659. deviceName);
  660. continue;
  661. }
  662. if ((ulStatus & DN_HAS_PROBLEM) && (ulProblem == CM_PROB_NEED_RESTART || ulProblem == DN_NEED_RESTART)) {
  663. loader_log(inst, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, 0,
  664. "loaderGetDeviceRegistryFiles: device %ls is pending reboot, skipping ...", deviceName);
  665. continue;
  666. }
  667. loader_log(inst, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, 0, "loaderGetDeviceRegistryFiles: opening device %ls", deviceName);
  668. if (loaderGetDeviceRegistryEntry(inst, reg_data, reg_data_size, devID, value_name, &result)) {
  669. found = true;
  670. continue;
  671. }
  672. else if (result == VK_ERROR_OUT_OF_HOST_MEMORY) {
  673. break;
  674. }
  675. status = CM_Get_Child(&childID, devID, 0);
  676. if (status != CR_SUCCESS) {
  677. loader_log(inst, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, 0,
  678. "loaderGetDeviceRegistryFiles: unable to open child-device error:%d", status);
  679. continue;
  680. }
  681. do {
  682. wchar_t buffer[MAX_DEVICE_ID_LEN];
  683. CM_Get_Device_IDW(childID, buffer, MAX_DEVICE_ID_LEN, 0);
  684. loader_log(inst, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, 0,
  685. "loaderGetDeviceRegistryFiles: Opening child device %d - %ls", childID, buffer);
  686. status = CM_Get_DevNode_Registry_PropertyW(childID, CM_DRP_CLASSGUID, NULL, &childGuid, &childGuidSize, 0);
  687. if (status != CR_SUCCESS) {
  688. loader_log(inst, VK_DEBUG_REPORT_ERROR_BIT_EXT, 0,
  689. "loaderGetDeviceRegistryFiles: unable to obtain GUID for:%d error:%d", childID, status);
  690. result = VK_ERROR_INITIALIZATION_FAILED;
  691. continue;
  692. }
  693. if (wcscmp(childGuid, softwareComponentGUID) != 0) {
  694. loader_log(inst, VK_DEBUG_REPORT_DEBUG_BIT_EXT, 0,
  695. "loaderGetDeviceRegistryFiles: GUID for %d is not SoftwareComponent skipping", childID);
  696. continue;
  697. }
  698. if (loaderGetDeviceRegistryEntry(inst, reg_data, reg_data_size, childID, value_name, &result)) {
  699. found = true;
  700. break; // check next-display-device
  701. }
  702. } while (CM_Get_Sibling(&childID, childID, 0) == CR_SUCCESS);
  703. }
  704. loader_instance_heap_free(inst, pDeviceNames);
  705. }
  706. if (!found && result != VK_ERROR_OUT_OF_HOST_MEMORY) {
  707. result = VK_ERROR_INITIALIZATION_FAILED;
  708. }
  709. return result;
  710. }
  711. static char *loader_get_next_path(char *path);
  712. // Find the list of registry files (names within a key) in key "location".
  713. //
  714. // This function looks in the registry (hive = DEFAULT_VK_REGISTRY_HIVE) key as
  715. // given in "location"
  716. // for a list or name/values which are added to a returned list (function return
  717. // value).
  718. // The DWORD values within the key must be 0 or they are skipped.
  719. // Function return is a string with a ';' separated list of filenames.
  720. // Function return is NULL if no valid name/value pairs are found in the key,
  721. // or the key is not found.
  722. //
  723. // *reg_data contains a string list of filenames as pointer.
  724. // When done using the returned string list, the caller should free the pointer.
  725. VkResult loaderGetRegistryFiles(const struct loader_instance *inst, char *location, bool use_secondary_hive, char **reg_data,
  726. PDWORD reg_data_size) {
  727. // This list contains all of the allowed ICDs. This allows us to verify that a device is actually present from the vendor
  728. // specified. This does disallow other vendors, but any new driver should use the device-specific registries anyway.
  729. static const struct {
  730. const char *filename;
  731. int vendor_id;
  732. } known_drivers[] = {
  733. #if defined(_WIN64)
  734. {
  735. .filename = "igvk64.json",
  736. .vendor_id = 0x8086,
  737. },
  738. {
  739. .filename = "nv-vk64.json",
  740. .vendor_id = 0x10de,
  741. },
  742. {
  743. .filename = "amd-vulkan64.json",
  744. .vendor_id = 0x1002,
  745. },
  746. {
  747. .filename = "amdvlk64.json",
  748. .vendor_id = 0x1002,
  749. },
  750. #else
  751. {
  752. .filename = "igvk32.json",
  753. .vendor_id = 0x8086,
  754. },
  755. {
  756. .filename = "nv-vk32.json",
  757. .vendor_id = 0x10de,
  758. },
  759. {
  760. .filename = "amd-vulkan32.json",
  761. .vendor_id = 0x1002,
  762. },
  763. {
  764. .filename = "amdvlk32.json",
  765. .vendor_id = 0x1002,
  766. },
  767. #endif
  768. };
  769. LONG rtn_value;
  770. HKEY hive = DEFAULT_VK_REGISTRY_HIVE, key;
  771. DWORD access_flags;
  772. char name[2048];
  773. char *loc = location;
  774. char *next;
  775. DWORD name_size = sizeof(name);
  776. DWORD value;
  777. DWORD value_size = sizeof(value);
  778. VkResult result = VK_SUCCESS;
  779. bool found = false;
  780. IDXGIFactory1 *dxgi_factory = NULL;
  781. bool is_driver = !strcmp(location, VK_DRIVERS_INFO_REGISTRY_LOC);
  782. if (NULL == reg_data) {
  783. result = VK_ERROR_INITIALIZATION_FAILED;
  784. goto out;
  785. }
  786. if (is_driver) {
  787. HRESULT hres = fpCreateDXGIFactory1(&IID_IDXGIFactory1, (void **)&dxgi_factory);
  788. if (hres != S_OK) {
  789. loader_log(
  790. inst, VK_DEBUG_REPORT_WARNING_BIT_EXT, 0,
  791. "loaderGetRegistryFiles: Failed to create dxgi factory for ICD registry verification. No ICDs will be added from "
  792. "legacy registry locations");
  793. goto out;
  794. }
  795. }
  796. while (*loc) {
  797. next = loader_get_next_path(loc);
  798. access_flags = KEY_QUERY_VALUE;
  799. rtn_value = RegOpenKeyEx(hive, loc, 0, access_flags, &key);
  800. if (ERROR_SUCCESS == rtn_value) {
  801. for (DWORD idx = 0;
  802. (rtn_value = RegEnumValue(key, idx++, name, &name_size, NULL, NULL, (LPBYTE)&value, &value_size)) == ERROR_SUCCESS;
  803. name_size = sizeof(name), value_size = sizeof(value)) {
  804. if (value_size == sizeof(value) && value == 0) {
  805. if (NULL == *reg_data) {
  806. *reg_data = loader_instance_heap_alloc(inst, *reg_data_size, VK_SYSTEM_ALLOCATION_SCOPE_INSTANCE);
  807. if (NULL == *reg_data) {
  808. loader_log(inst, VK_DEBUG_REPORT_ERROR_BIT_EXT, 0,
  809. "loaderGetRegistryFiles: Failed to allocate space for registry data for key %s", name);
  810. RegCloseKey(key);
  811. result = VK_ERROR_OUT_OF_HOST_MEMORY;
  812. goto out;
  813. }
  814. *reg_data[0] = '\0';
  815. } else if (strlen(*reg_data) + name_size + 1 > *reg_data_size) {
  816. void *new_ptr = loader_instance_heap_realloc(inst, *reg_data, *reg_data_size, *reg_data_size * 2,
  817. VK_SYSTEM_ALLOCATION_SCOPE_INSTANCE);
  818. if (NULL == new_ptr) {
  819. loader_log(
  820. inst, VK_DEBUG_REPORT_ERROR_BIT_EXT, 0,
  821. "loaderGetRegistryFiles: Failed to reallocate space for registry value of size %d for key %s",
  822. *reg_data_size * 2, name);
  823. RegCloseKey(key);
  824. result = VK_ERROR_OUT_OF_HOST_MEMORY;
  825. goto out;
  826. }
  827. *reg_data = new_ptr;
  828. *reg_data_size *= 2;
  829. }
  830. // We've now found a json file. If this is an ICD, we still need to check if there is actually a device
  831. // that matches this ICD
  832. loader_log(
  833. inst, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, 0, "Located json file \"%s\" from registry \"%s\\%s\"", name,
  834. hive == DEFAULT_VK_REGISTRY_HIVE ? DEFAULT_VK_REGISTRY_HIVE_STR : SECONDARY_VK_REGISTRY_HIVE_STR, location);
  835. if (is_driver) {
  836. int i;
  837. for (i = 0; i < sizeof(known_drivers) / sizeof(known_drivers[0]); ++i) {
  838. if (!strcmp(name + strlen(name) - strlen(known_drivers[i].filename), known_drivers[i].filename)) {
  839. break;
  840. }
  841. }
  842. if (i == sizeof(known_drivers) / sizeof(known_drivers[0])) {
  843. loader_log(inst, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, 0,
  844. "Driver %s is not recognized as a known driver. It will be assumed to be active", name);
  845. } else {
  846. bool found_gpu = false;
  847. for (int j = 0;; ++j) {
  848. IDXGIAdapter1 *adapter;
  849. HRESULT hres = dxgi_factory->lpVtbl->EnumAdapters1(dxgi_factory, j, &adapter);
  850. if (hres == DXGI_ERROR_NOT_FOUND) {
  851. break;
  852. } else if (hres != S_OK) {
  853. loader_log(inst, VK_DEBUG_REPORT_WARNING_BIT_EXT, 0,
  854. "Failed to enumerate DXGI adapters at index %d. As a result, drivers may be skipped", j);
  855. continue;
  856. }
  857. DXGI_ADAPTER_DESC1 description;
  858. hres = adapter->lpVtbl->GetDesc1(adapter, &description);
  859. if (hres != S_OK) {
  860. loader_log(
  861. inst, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, 0,
  862. "Failed to get DXGI adapter information at index %d. As a result, drivers may be skipped", j);
  863. continue;
  864. }
  865. if (description.VendorId == known_drivers[i].vendor_id) {
  866. found_gpu = true;
  867. break;
  868. }
  869. }
  870. if (!found_gpu) {
  871. loader_log(inst, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, 0,
  872. "Dropping driver %s as no corresponding DXGI adapter was found", name);
  873. continue;
  874. }
  875. }
  876. }
  877. if (strlen(*reg_data) == 0) {
  878. // The list is emtpy. Add the first entry.
  879. (void)snprintf(*reg_data, name_size + 1, "%s", name);
  880. found = true;
  881. } else {
  882. // At this point the reg_data variable contains other JSON paths, likely from the PNP/device section
  883. // of the registry that we want to have precedence over this non-device specific section of the registry.
  884. // To make sure we avoid enumerating old JSON files/drivers that might be present in the non-device specific
  885. // area of the registry when a newer device specific JSON file is present, do a check before adding.
  886. // Find the file name, without path, of the JSON file found in the non-device specific registry location.
  887. // If the same JSON file name is already found in the list, don't add it again.
  888. bool foundDuplicate = false;
  889. char *pLastSlashName = strrchr(name, '\\');
  890. if (pLastSlashName != NULL) {
  891. char *foundMatch = strstr(*reg_data, pLastSlashName + 1);
  892. if (foundMatch != NULL) {
  893. foundDuplicate = true;
  894. }
  895. }
  896. if (foundDuplicate == false) {
  897. // Add the new entry to the list.
  898. (void)snprintf(*reg_data + strlen(*reg_data), name_size + 2, "%c%s", PATH_SEPARATOR, name);
  899. found = true;
  900. } else {
  901. loader_log(
  902. inst, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, 0,
  903. "Skipping adding of json file \"%s\" from registry \"%s\\%s\" to the list due to duplication", name,
  904. hive == DEFAULT_VK_REGISTRY_HIVE ? DEFAULT_VK_REGISTRY_HIVE_STR : SECONDARY_VK_REGISTRY_HIVE_STR,
  905. location);
  906. }
  907. }
  908. }
  909. }
  910. RegCloseKey(key);
  911. }
  912. // Advance the location - if the next location is in the secondary hive, then reset the locations and advance the hive
  913. if (use_secondary_hive && (hive == DEFAULT_VK_REGISTRY_HIVE) && (*next == '\0')) {
  914. loc = location;
  915. hive = SECONDARY_VK_REGISTRY_HIVE;
  916. } else {
  917. loc = next;
  918. }
  919. }
  920. if (!found && result != VK_ERROR_OUT_OF_HOST_MEMORY) {
  921. result = VK_ERROR_INITIALIZATION_FAILED;
  922. }
  923. out:
  924. if (is_driver && dxgi_factory != NULL) {
  925. dxgi_factory->lpVtbl->Release(dxgi_factory);
  926. }
  927. return result;
  928. }
  929. #endif // WIN32
  930. // Combine path elements, separating each element with the platform-specific
  931. // directory separator, and save the combined string to a destination buffer,
  932. // not exceeding the given length. Path elements are given as variable args,
  933. // with a NULL element terminating the list.
  934. //
  935. // \returns the total length of the combined string, not including an ASCII
  936. // NUL termination character. This length may exceed the available storage:
  937. // in this case, the written string will be truncated to avoid a buffer
  938. // overrun, and the return value will greater than or equal to the storage
  939. // size. A NULL argument may be provided as the destination buffer in order
  940. // to determine the required string length without actually writing a string.
  941. static size_t loader_platform_combine_path(char *dest, size_t len, ...) {
  942. size_t required_len = 0;
  943. va_list ap;
  944. const char *component;
  945. va_start(ap, len);
  946. while ((component = va_arg(ap, const char *))) {
  947. if (required_len > 0) {
  948. // This path element is not the first non-empty element; prepend
  949. // a directory separator if space allows
  950. if (dest && required_len + 1 < len) {
  951. (void)snprintf(dest + required_len, len - required_len, "%c", DIRECTORY_SYMBOL);
  952. }
  953. required_len++;
  954. }
  955. if (dest && required_len < len) {
  956. strncpy(dest + required_len, component, len - required_len);
  957. }
  958. required_len += strlen(component);
  959. }
  960. va_end(ap);
  961. // strncpy(3) won't add a NUL terminating byte in the event of truncation.
  962. if (dest && required_len >= len) {
  963. dest[len - 1] = '\0';
  964. }
  965. return required_len;
  966. }
  967. // Given string of three part form "maj.min.pat" convert to a vulkan version number.
  968. static uint32_t loader_make_version(char *vers_str) {
  969. uint32_t major = 0, minor = 0, patch = 0;
  970. char *vers_tok;
  971. if (!vers_str) {
  972. return 0;
  973. }
  974. vers_tok = strtok(vers_str, ".\"\n\r");
  975. if (NULL != vers_tok) {
  976. major = (uint16_t)atoi(vers_tok);
  977. vers_tok = strtok(NULL, ".\"\n\r");
  978. if (NULL != vers_tok) {
  979. minor = (uint16_t)atoi(vers_tok);
  980. vers_tok = strtok(NULL, ".\"\n\r");
  981. if (NULL != vers_tok) {
  982. patch = (uint16_t)atoi(vers_tok);
  983. }
  984. }
  985. }
  986. return VK_MAKE_VERSION(major, minor, patch);
  987. }
  988. bool compare_vk_extension_properties(const VkExtensionProperties *op1, const VkExtensionProperties *op2) {
  989. return strcmp(op1->extensionName, op2->extensionName) == 0 ? true : false;
  990. }
  991. // Search the given ext_array for an extension matching the given vk_ext_prop
  992. bool has_vk_extension_property_array(const VkExtensionProperties *vk_ext_prop, const uint32_t count,
  993. const VkExtensionProperties *ext_array) {
  994. for (uint32_t i = 0; i < count; i++) {
  995. if (compare_vk_extension_properties(vk_ext_prop, &ext_array[i])) return true;
  996. }
  997. return false;
  998. }
  999. // Search the given ext_list for an extension matching the given vk_ext_prop
  1000. bool has_vk_extension_property(const VkExtensionProperties *vk_ext_prop, const struct loader_extension_list *ext_list) {
  1001. for (uint32_t i = 0; i < ext_list->count; i++) {
  1002. if (compare_vk_extension_properties(&ext_list->list[i], vk_ext_prop)) return true;
  1003. }
  1004. return false;
  1005. }
  1006. // Search the given ext_list for a device extension matching the given ext_prop
  1007. bool has_vk_dev_ext_property(const VkExtensionProperties *ext_prop, const struct loader_device_extension_list *ext_list) {
  1008. for (uint32_t i = 0; i < ext_list->count; i++) {
  1009. if (compare_vk_extension_properties(&ext_list->list[i].props, ext_prop)) return true;
  1010. }
  1011. return false;
  1012. }
  1013. // Get the next unused layer property in the list. Init the property to zero.
  1014. static struct loader_layer_properties *loaderGetNextLayerPropertySlot(const struct loader_instance *inst,
  1015. struct loader_layer_list *layer_list) {
  1016. if (layer_list->capacity == 0) {
  1017. layer_list->list =
  1018. loader_instance_heap_alloc(inst, sizeof(struct loader_layer_properties) * 64, VK_SYSTEM_ALLOCATION_SCOPE_INSTANCE);
  1019. if (layer_list->list == NULL) {
  1020. loader_log(inst, VK_DEBUG_REPORT_ERROR_BIT_EXT, 0,
  1021. "loaderGetNextLayerPropertySlot: Out of memory can "
  1022. "not add any layer properties to list");
  1023. return NULL;
  1024. }
  1025. memset(layer_list->list, 0, sizeof(struct loader_layer_properties) * 64);
  1026. layer_list->capacity = sizeof(struct loader_layer_properties) * 64;
  1027. }
  1028. // Ensure enough room to add an entry
  1029. if ((layer_list->count + 1) * sizeof(struct loader_layer_properties) > layer_list->capacity) {
  1030. void *new_ptr = loader_instance_heap_realloc(inst, layer_list->list, layer_list->capacity, layer_list->capacity * 2,
  1031. VK_SYSTEM_ALLOCATION_SCOPE_INSTANCE);
  1032. if (NULL == new_ptr) {
  1033. loader_log(inst, VK_DEBUG_REPORT_ERROR_BIT_EXT, 0, "loaderGetNextLayerPropertySlot: realloc failed for layer list");
  1034. return NULL;
  1035. }
  1036. layer_list->list = new_ptr;
  1037. memset((uint8_t *)layer_list->list + layer_list->capacity, 0, layer_list->capacity);
  1038. layer_list->capacity *= 2;
  1039. }
  1040. layer_list->count++;
  1041. return &(layer_list->list[layer_list->count - 1]);
  1042. }
  1043. // Search the given layer list for a layer property matching the given layer name
  1044. static struct loader_layer_properties *loaderFindLayerProperty(const char *name, const struct loader_layer_list *layer_list) {
  1045. for (uint32_t i = 0; i < layer_list->count; i++) {
  1046. const VkLayerProperties *item = &layer_list->list[i].info;
  1047. if (strcmp(name, item->layerName) == 0) return &layer_list->list[i];
  1048. }
  1049. return NULL;
  1050. }
  1051. // Search the given layer list for a layer matching the given layer name
  1052. static bool loaderFindLayerNameInList(const char *name, const struct loader_layer_list *layer_list) {
  1053. if (NULL == layer_list) {
  1054. return false;
  1055. }
  1056. if (NULL != loaderFindLayerProperty(name, layer_list)) {
  1057. return true;
  1058. }
  1059. return false;
  1060. }
  1061. // Search the given meta-layer's component list for a layer matching the given layer name
  1062. static bool loaderFindLayerNameInMetaLayer(const struct loader_instance *inst, const char *layer_name,
  1063. struct loader_layer_list *layer_list, struct loader_layer_properties *meta_layer_props) {
  1064. for (uint32_t comp_layer = 0; comp_layer < meta_layer_props->num_component_layers; comp_layer++) {
  1065. if (!strcmp(meta_layer_props->component_layer_names[comp_layer], layer_name)) {
  1066. return true;
  1067. }
  1068. struct loader_layer_properties *comp_layer_props =
  1069. loaderFindLayerProperty(meta_layer_props->component_layer_names[comp_layer], layer_list);
  1070. if (comp_layer_props->type_flags & VK_LAYER_TYPE_FLAG_META_LAYER) {
  1071. return loaderFindLayerNameInMetaLayer(inst, layer_name, layer_list, comp_layer_props);
  1072. }
  1073. }
  1074. return false;
  1075. }
  1076. // Search the override layer's blacklist for a layer matching the given layer name
  1077. static bool loaderFindLayerNameInBlacklist(const struct loader_instance *inst, const char *layer_name,
  1078. struct loader_layer_list *layer_list, struct loader_layer_properties *meta_layer_props) {
  1079. for (uint32_t black_layer = 0; black_layer < meta_layer_props->num_blacklist_layers; ++black_layer) {
  1080. if (!strcmp(meta_layer_props->blacklist_layer_names[black_layer], layer_name)) {
  1081. return true;
  1082. }
  1083. }
  1084. return false;
  1085. }
  1086. // Remove all layer properties entries from the list
  1087. void loaderDeleteLayerListAndProperties(const struct loader_instance *inst, struct loader_layer_list *layer_list) {
  1088. uint32_t i, j, k;
  1089. struct loader_device_extension_list *dev_ext_list;
  1090. struct loader_dev_ext_props *ext_props;
  1091. if (!layer_list) return;
  1092. for (i = 0; i < layer_list->count; i++) {
  1093. if (NULL != layer_list->list[i].blacklist_layer_names) {
  1094. loader_instance_heap_free(inst, layer_list->list[i].blacklist_layer_names);
  1095. layer_list->list[i].blacklist_layer_names = NULL;
  1096. }
  1097. if (NULL != layer_list->list[i].component_layer_names) {
  1098. loader_instance_heap_free(inst, layer_list->list[i].component_layer_names);
  1099. layer_list->list[i].component_layer_names = NULL;
  1100. }
  1101. if (NULL != layer_list->list[i].override_paths) {
  1102. loader_instance_heap_free(inst, layer_list->list[i].override_paths);
  1103. layer_list->list[i].override_paths = NULL;
  1104. }
  1105. if (NULL != layer_list->list[i].app_key_paths) {
  1106. loader_instance_heap_free(inst, layer_list->list[i].app_key_paths);
  1107. layer_list->list[i].app_key_paths = NULL;
  1108. }
  1109. loader_destroy_generic_list(inst, (struct loader_generic_list *)&layer_list->list[i].instance_extension_list);
  1110. dev_ext_list = &layer_list->list[i].device_extension_list;
  1111. if (dev_ext_list->capacity > 0 && NULL != dev_ext_list->list) {
  1112. for (j = 0; j < dev_ext_list->count; j++) {
  1113. ext_props = &dev_ext_list->list[j];
  1114. if (ext_props->entrypoint_count > 0) {
  1115. for (k = 0; k < ext_props->entrypoint_count; k++) {
  1116. loader_instance_heap_free(inst, ext_props->entrypoints[k]);
  1117. }
  1118. loader_instance_heap_free(inst, ext_props->entrypoints);
  1119. }
  1120. }
  1121. }
  1122. loader_destroy_generic_list(inst, (struct loader_generic_list *)dev_ext_list);
  1123. }
  1124. layer_list->count = 0;
  1125. if (layer_list->capacity > 0) {
  1126. layer_list->capacity = 0;
  1127. loader_instance_heap_free(inst, layer_list->list);
  1128. }
  1129. }
  1130. void loaderRemoveLayerInList(const struct loader_instance *inst, struct loader_layer_list *layer_list, uint32_t layer_to_remove) {
  1131. if (layer_list == NULL || layer_to_remove >= layer_list->count) {
  1132. return;
  1133. }
  1134. if (layer_list->list[layer_to_remove].type_flags & VK_LAYER_TYPE_FLAG_META_LAYER) {
  1135. // Delete the component layers
  1136. loader_instance_heap_free(inst, layer_list->list[layer_to_remove].component_layer_names);
  1137. loader_instance_heap_free(inst, layer_list->list[layer_to_remove].override_paths);
  1138. loader_instance_heap_free(inst, layer_list->list[layer_to_remove].blacklist_layer_names);
  1139. loader_instance_heap_free(inst, layer_list->list[layer_to_remove].app_key_paths);
  1140. }
  1141. // Remove the current invalid meta-layer from the layer list. Use memmove since we are
  1142. // overlapping the source and destination addresses.
  1143. memmove(&layer_list->list[layer_to_remove], &layer_list->list[layer_to_remove + 1],
  1144. sizeof(struct loader_layer_properties) * (layer_list->count - 1 - layer_to_remove));
  1145. // Make sure to clear out the removed layer, in case new layers are added in the previous location
  1146. memset(&layer_list->list[layer_list->count - 1], 0, sizeof(struct loader_layer_properties));
  1147. // Decrement the count (because we now have one less) and decrement the loop index since we need to
  1148. // re-check this index.
  1149. layer_list->count--;
  1150. }
  1151. // Remove all layers in the layer list that are blacklisted by the override layer.
  1152. // NOTE: This should only be called if an override layer is found and not expired.
  1153. void loaderRemoveLayersInBlacklist(const struct loader_instance *inst, struct loader_layer_list *layer_list) {
  1154. struct loader_layer_properties *override_prop = loaderFindLayerProperty(VK_OVERRIDE_LAYER_NAME, layer_list);
  1155. if (NULL == override_prop) {
  1156. return;
  1157. }
  1158. for (int32_t j = 0; j < (int32_t)(layer_list->count); j++) {
  1159. struct loader_layer_properties cur_layer_prop = layer_list->list[j];
  1160. const char *cur_layer_name = &cur_layer_prop.info.layerName[0];
  1161. // Skip the override layer itself.
  1162. if (!strcmp(VK_OVERRIDE_LAYER_NAME, cur_layer_name)) {
  1163. continue;
  1164. }
  1165. // If found in the override layer's blacklist, remove it
  1166. if (loaderFindLayerNameInBlacklist(inst, cur_layer_name, layer_list, override_prop)) {
  1167. loader_log(inst, VK_DEBUG_REPORT_DEBUG_BIT_EXT, 0,
  1168. "loaderRemoveLayersInBlacklist: Override layer is active and layer %s is in the blacklist"
  1169. " inside of it. Removing that layer from current layer list.",
  1170. cur_layer_name);
  1171. if (cur_layer_prop.type_flags & VK_LAYER_TYPE_FLAG_META_LAYER) {
  1172. // Delete the component layers
  1173. loader_instance_heap_free(inst, cur_layer_prop.component_layer_names);
  1174. loader_instance_heap_free(inst, cur_layer_prop.override_paths);
  1175. // Never need to free the blacklist, since it can only exist in the override layer
  1176. }
  1177. // Remove the current invalid meta-layer from the layer list. Use memmove since we are
  1178. // overlapping the source and destination addresses.
  1179. memmove(&layer_list->list[j], &layer_list->list[j + 1],
  1180. sizeof(struct loader_layer_properties) * (layer_list->count - 1 - j));
  1181. // Decrement the count (because we now have one less) and decrement the loop index since we need to
  1182. // re-check this index.
  1183. layer_list->count--;
  1184. j--;
  1185. // Re-do the query for the override layer
  1186. override_prop = loaderFindLayerProperty(VK_OVERRIDE_LAYER_NAME, layer_list);
  1187. }
  1188. }
  1189. }
  1190. // Remove all layers in the layer list that are not found inside any implicit meta-layers.
  1191. void loaderRemoveLayersNotInImplicitMetaLayers(const struct loader_instance *inst, struct loader_layer_list *layer_list) {
  1192. int32_t i;
  1193. int32_t j;
  1194. int32_t layer_count = (int32_t)(layer_list->count);
  1195. for (i = 0; i < layer_count; i++) {
  1196. layer_list->list[i].keep = false;
  1197. }
  1198. for (i = 0; i < layer_count; i++) {
  1199. struct loader_layer_properties cur_layer_prop = layer_list->list[i];
  1200. if (0 == (cur_layer_prop.type_flags & VK_LAYER_TYPE_FLAG_EXPLICIT_LAYER)) {
  1201. cur_layer_prop.keep = true;
  1202. } else {
  1203. continue;
  1204. }
  1205. if (cur_layer_prop.type_flags & VK_LAYER_TYPE_FLAG_META_LAYER) {
  1206. for (j = 0; j < layer_count; j++) {
  1207. struct loader_layer_properties layer_to_check = layer_list->list[j];
  1208. if (i == j) {
  1209. continue;
  1210. }
  1211. // For all layers found in this meta layer, we want to keep them as well.
  1212. if (loaderFindLayerNameInMetaLayer(inst, layer_to_check.info.layerName, layer_list, &cur_layer_prop)) {
  1213. cur_layer_prop.keep = true;
  1214. }
  1215. }
  1216. }
  1217. }
  1218. // Remove any layers we don't want to keep (Don't use layer_count here as we need it to be
  1219. // dynamically updated if we delete a layer property in the list).
  1220. for (i = 0; i < (int32_t)(layer_list->count); i++) {
  1221. struct loader_layer_properties cur_layer_prop = layer_list->list[i];
  1222. if (!cur_layer_prop.keep) {
  1223. loader_log(inst, VK_DEBUG_REPORT_DEBUG_BIT_EXT, 0,
  1224. "loaderRemoveLayersNotInImplicitMetaLayers : Implicit meta-layers are active, and layer %s is not list"
  1225. " inside of any. So removing layer from current layer list.",
  1226. cur_layer_prop.info.layerName);
  1227. if (cur_layer_prop.type_flags & VK_LAYER_TYPE_FLAG_META_LAYER) {
  1228. // Delete the component layers
  1229. loader_instance_heap_free(inst, cur_layer_prop.component_layer_names);
  1230. loader_instance_heap_free(inst, cur_layer_prop.override_paths);
  1231. }
  1232. // Remove the current invalid meta-layer from the layer list. Use memmove since we are
  1233. // overlapping the source and destination addresses.
  1234. memmove(&layer_list->list[i], &layer_list->list[i + 1],
  1235. sizeof(struct loader_layer_properties) * (layer_list->count - 1 - i));
  1236. // Decrement the count (because we now have one less) and decrement the loop index since we need to
  1237. // re-check this index.
  1238. layer_list->count--;
  1239. i--;
  1240. }
  1241. }
  1242. }
  1243. static VkResult loader_add_instance_extensions(const struct loader_instance *inst,
  1244. const PFN_vkEnumerateInstanceExtensionProperties fp_get_props, const char *lib_name,
  1245. struct loader_extension_list *ext_list) {
  1246. uint32_t i, count = 0;
  1247. VkExtensionProperties *ext_props;
  1248. VkResult res = VK_SUCCESS;
  1249. if (!fp_get_props) {
  1250. // No EnumerateInstanceExtensionProperties defined
  1251. goto out;
  1252. }
  1253. res = fp_get_props(NULL, &count, NULL);
  1254. if (res != VK_SUCCESS) {
  1255. loader_log(inst, VK_DEBUG_REPORT_ERROR_BIT_EXT, 0,
  1256. "loader_add_instance_extensions: Error getting Instance "
  1257. "extension count from %s",
  1258. lib_name);
  1259. goto out;
  1260. }
  1261. if (count == 0) {
  1262. // No ExtensionProperties to report
  1263. goto out;
  1264. }
  1265. ext_props = loader_stack_alloc(count * sizeof(VkExtensionProperties));
  1266. if (NULL == ext_props) {
  1267. res = VK_ERROR_OUT_OF_HOST_MEMORY;
  1268. goto out;
  1269. }
  1270. res = fp_get_props(NULL, &count, ext_props);
  1271. if (res != VK_SUCCESS) {
  1272. loader_log(inst, VK_DEBUG_REPORT_ERROR_BIT_EXT, 0,
  1273. "loader_add_instance_extensions: Error getting Instance "
  1274. "extensions from %s",
  1275. lib_name);
  1276. goto out;
  1277. }
  1278. for (i = 0; i < count; i++) {
  1279. char spec_version[64];
  1280. bool ext_unsupported = wsi_unsupported_instance_extension(&ext_props[i]);
  1281. if (!ext_unsupported) {
  1282. (void)snprintf(spec_version, sizeof(spec_version), "%d.%d.%d", VK_VERSION_MAJOR(ext_props[i].specVersion),
  1283. VK_VERSION_MINOR(ext_props[i].specVersion), VK_VERSION_PATCH(ext_props[i].specVersion));
  1284. loader_log(inst, VK_DEBUG_REPORT_DEBUG_BIT_EXT, 0, "Instance Extension: %s (%s) version %s", ext_props[i].extensionName,
  1285. lib_name, spec_version);
  1286. res = loader_add_to_ext_list(inst, ext_list, 1, &ext_props[i]);
  1287. if (res != VK_SUCCESS) {
  1288. loader_log(inst, VK_DEBUG_REPORT_ERROR_BIT_EXT, 0,
  1289. "loader_add_instance_extensions: Failed to add %s "
  1290. "to Instance extension list",
  1291. lib_name);
  1292. goto out;
  1293. }
  1294. }
  1295. }
  1296. out:
  1297. return res;
  1298. }
  1299. // Initialize ext_list with the physical device extensions.
  1300. // The extension properties are passed as inputs in count and ext_props.
  1301. static VkResult loader_init_device_extensions(const struct loader_instance *inst, struct loader_physical_device_term *phys_dev_term,
  1302. uint32_t count, VkExtensionProperties *ext_props,
  1303. struct loader_extension_list *ext_list) {
  1304. VkResult res;
  1305. uint32_t i;
  1306. res = loader_init_generic_list(inst, (struct loader_generic_list *)ext_list, sizeof(VkExtensionProperties));
  1307. if (VK_SUCCESS != res) {
  1308. return res;
  1309. }
  1310. for (i = 0; i < count; i++) {
  1311. char spec_version[64];
  1312. (void)snprintf(spec_version, sizeof(spec_version), "%d.%d.%d", VK_VERSION_MAJOR(ext_props[i].specVersion),
  1313. VK_VERSION_MINOR(ext_props[i].specVersion), VK_VERSION_PATCH(ext_props[i].specVersion));
  1314. loader_log(inst, VK_DEBUG_REPORT_DEBUG_BIT_EXT, 0, "Device Extension: %s (%s) version %s", ext_props[i].extensionName,
  1315. phys_dev_term->this_icd_term->scanned_icd->lib_name, spec_version);
  1316. res = loader_add_to_ext_list(inst, ext_list, 1, &ext_props[i]);
  1317. if (res != VK_SUCCESS) return res;
  1318. }
  1319. return VK_SUCCESS;
  1320. }
  1321. VkResult loader_add_device_extensions(const struct loader_instance *inst,
  1322. PFN_vkEnumerateDeviceExtensionProperties fpEnumerateDeviceExtensionProperties,
  1323. VkPhysicalDevice physical_device, const char *lib_name,
  1324. struct loader_extension_list *ext_list) {
  1325. uint32_t i, count;
  1326. VkResult res;
  1327. VkExtensionProperties *ext_props;
  1328. res = fpEnumerateDeviceExtensionProperties(physical_device, NULL, &count, NULL);
  1329. if (res == VK_SUCCESS && count > 0) {
  1330. ext_props = loader_stack_alloc(count * sizeof(VkExtensionProperties));
  1331. if (!ext_props) {
  1332. loader_log(inst, VK_DEBUG_REPORT_ERROR_BIT_EXT, 0,
  1333. "loader_add_device_extensions: Failed to allocate space"
  1334. " for device extension properties.");
  1335. return VK_ERROR_OUT_OF_HOST_MEMORY;
  1336. }
  1337. res = fpEnumerateDeviceExtensionProperties(physical_device, NULL, &count, ext_props);
  1338. if (res != VK_SUCCESS) {
  1339. return res;
  1340. }
  1341. for (i = 0; i < count; i++) {
  1342. char spec_version[64];
  1343. (void)snprintf(spec_version, sizeof(spec_version), "%d.%d.%d", VK_VERSION_MAJOR(ext_props[i].specVersion),
  1344. VK_VERSION_MINOR(ext_props[i].specVersion), VK_VERSION_PATCH(ext_props[i].specVersion));
  1345. loader_log(inst, VK_DEBUG_REPORT_DEBUG_BIT_EXT, 0, "Device Extension: %s (%s) version %s", ext_props[i].extensionName,
  1346. lib_name, spec_version);
  1347. res = loader_add_to_ext_list(inst, ext_list, 1, &ext_props[i]);
  1348. if (res != VK_SUCCESS) {
  1349. return res;
  1350. }
  1351. }
  1352. } else {
  1353. loader_log(inst, VK_DEBUG_REPORT_ERROR_BIT_EXT, 0,
  1354. "loader_add_device_extensions: Error getting physical "
  1355. "device extension info count from library %s",
  1356. lib_name);
  1357. return res;
  1358. }
  1359. return VK_SUCCESS;
  1360. }
  1361. VkResult loader_init_generic_list(const struct loader_instance *inst, struct loader_generic_list *list_info, size_t element_size) {
  1362. size_t capacity = 32 * element_size;
  1363. list_info->count = 0;
  1364. list_info->capacity = 0;
  1365. list_info->list = loader_instance_heap_alloc(inst, capacity, VK_SYSTEM_ALLOCATION_SCOPE_INSTANCE);
  1366. if (list_info->list == NULL) {
  1367. loader_log(inst, VK_DEBUG_REPORT_ERROR_BIT_EXT, 0,
  1368. "loader_init_generic_list: Failed to allocate space "
  1369. "for generic list");
  1370. return VK_ERROR_OUT_OF_HOST_MEMORY;
  1371. }
  1372. memset(list_info->list, 0, capacity);
  1373. list_info->capacity = capacity;
  1374. return VK_SUCCESS;
  1375. }
  1376. void loader_destroy_generic_list(const struct loader_instance *inst, struct loader_generic_list *list) {
  1377. loader_instance_heap_free(inst, list->list);
  1378. list->count = 0;
  1379. list->capacity = 0;
  1380. }
  1381. // Append non-duplicate extension properties defined in props to the given ext_list.
  1382. // Return - Vk_SUCCESS on success
  1383. VkResult loader_add_to_ext_list(const struct loader_instance *inst, struct loader_extension_list *ext_list,
  1384. uint32_t prop_list_count, const VkExtensionProperties *props) {
  1385. uint32_t i;
  1386. const VkExtensionProperties *cur_ext;
  1387. if (ext_list->list == NULL || ext_list->capacity == 0) {
  1388. VkResult res = loader_init_generic_list(inst, (struct loader_generic_list *)ext_list, sizeof(VkExtensionProperties));
  1389. if (VK_SUCCESS != res) {
  1390. return res;
  1391. }
  1392. }
  1393. for (i = 0; i < prop_list_count; i++) {
  1394. cur_ext = &props[i];
  1395. // look for duplicates
  1396. if (has_vk_extension_property(cur_ext, ext_list)) {
  1397. continue;
  1398. }
  1399. // add to list at end
  1400. // check for enough capacity
  1401. if (ext_list->count * sizeof(VkExtensionProperties) >= ext_list->capacity) {
  1402. void *new_ptr = loader_instance_heap_realloc(inst, ext_list->list, ext_list->capacity, ext_list->capacity * 2,
  1403. VK_SYSTEM_ALLOCATION_SCOPE_INSTANCE);
  1404. if (new_ptr == NULL) {
  1405. loader_log(inst, VK_DEBUG_REPORT_ERROR_BIT_EXT, 0,
  1406. "loader_add_to_ext_list: Failed to reallocate "
  1407. "space for extension list");
  1408. return VK_ERROR_OUT_OF_HOST_MEMORY;
  1409. }
  1410. ext_list->list = new_ptr;
  1411. // double capacity
  1412. ext_list->capacity *= 2;
  1413. }
  1414. memcpy(&ext_list->list[ext_list->count], cur_ext, sizeof(VkExtensionProperties));
  1415. ext_list->count++;
  1416. }
  1417. return VK_SUCCESS;
  1418. }
  1419. // Append one extension property defined in props with entrypoints defined in entries to the given
  1420. // ext_list. Do not append if a duplicate.
  1421. // Return - Vk_SUCCESS on success
  1422. VkResult loader_add_to_dev_ext_list(const struct loader_instance *inst, struct loader_device_extension_list *ext_list,
  1423. const VkExtensionProperties *props, uint32_t entry_count, char **entrys) {
  1424. uint32_t idx;
  1425. if (ext_list->list == NULL || ext_list->capacity == 0) {
  1426. VkResult res = loader_init_generic_list(inst, (struct loader_generic_list *)ext_list, sizeof(struct loader_dev_ext_props));
  1427. if (VK_SUCCESS != res) {
  1428. return res;
  1429. }
  1430. }
  1431. // look for duplicates
  1432. if (has_vk_dev_ext_property(props, ext_list)) {
  1433. return VK_SUCCESS;
  1434. }
  1435. idx = ext_list->count;
  1436. // add to list at end
  1437. // check for enough capacity
  1438. if (idx * sizeof(struct loader_dev_ext_props) >= ext_list->capacity) {
  1439. void *new_ptr = loader_instance_heap_realloc(inst, ext_list->list, ext_list->capacity, ext_list->capacity * 2,
  1440. VK_SYSTEM_ALLOCATION_SCOPE_INSTANCE);
  1441. if (NULL == new_ptr) {
  1442. loader_log(inst, VK_DEBUG_REPORT_ERROR_BIT_EXT, 0,
  1443. "loader_add_to_dev_ext_list: Failed to reallocate space for device extension list");
  1444. return VK_ERROR_OUT_OF_HOST_MEMORY;
  1445. }
  1446. ext_list->list = new_ptr;
  1447. // double capacity
  1448. ext_list->capacity *= 2;
  1449. }
  1450. memcpy(&ext_list->list[idx].props, props, sizeof(*props));
  1451. ext_list->list[idx].entrypoint_count = entry_count;
  1452. if (entry_count == 0) {
  1453. ext_list->list[idx].entrypoints = NULL;
  1454. } else {
  1455. ext_list->list[idx].entrypoints =
  1456. loader_instance_heap_alloc(inst, sizeof(char *) * entry_count, VK_SYSTEM_ALLOCATION_SCOPE_INSTANCE);
  1457. if (ext_list->list[idx].entrypoints == NULL) {
  1458. loader_log(inst, VK_DEBUG_REPORT_ERROR_BIT_EXT, 0,
  1459. "loader_add_to_dev_ext_list: Failed to allocate space "
  1460. "for device extension entrypoint list in list %d",
  1461. idx);
  1462. ext_list->list[idx].entrypoint_count = 0;
  1463. return VK_ERROR_OUT_OF_HOST_MEMORY;
  1464. }
  1465. for (uint32_t i = 0; i < entry_count; i++) {
  1466. ext_list->list[idx].entrypoints[i] =
  1467. loader_instance_heap_alloc(inst, strlen(entrys[i]) + 1, VK_SYSTEM_ALLOCATION_SCOPE_INSTANCE);
  1468. if (ext_list->list[idx].entrypoints[i] == NULL) {
  1469. for (uint32_t j = 0; j < i; j++) {
  1470. loader_instance_heap_free(inst, ext_list->list[idx].entrypoints[j]);
  1471. }
  1472. loader_instance_heap_free(inst, ext_list->list[idx].entrypoints);
  1473. ext_list->list[idx].entrypoint_count = 0;
  1474. ext_list->list[idx].entrypoints = NULL;
  1475. loader_log(inst, VK_DEBUG_REPORT_ERROR_BIT_EXT, 0,
  1476. "loader_add_to_dev_ext_list: Failed to allocate space "
  1477. "for device extension entrypoint %d name",
  1478. i);
  1479. return VK_ERROR_OUT_OF_HOST_MEMORY;
  1480. }
  1481. strcpy(ext_list->list[idx].entrypoints[i], entrys[i]);
  1482. }
  1483. }
  1484. ext_list->count++;
  1485. return VK_SUCCESS;
  1486. }
  1487. // Prototypes needed.
  1488. bool loaderAddMetaLayer(const struct loader_instance *inst, const struct loader_layer_properties *prop,
  1489. struct loader_layer_list *target_list, struct loader_layer_list *expanded_target_list,
  1490. const struct loader_layer_list *source_list);
  1491. // Manage lists of VkLayerProperties
  1492. static bool loaderInitLayerList(const struct loader_instance *inst, struct loader_layer_list *list) {
  1493. list->capacity = 32 * sizeof(struct loader_layer_properties);
  1494. list->list = loader_instance_heap_alloc(inst, list->capacity, VK_SYSTEM_ALLOCATION_SCOPE_INSTANCE);
  1495. if (list->list == NULL) {
  1496. return false;
  1497. }
  1498. memset(list->list, 0, list->capacity);
  1499. list->count = 0;
  1500. return true;
  1501. }
  1502. // Search the given layer list for a list matching the given VkLayerProperties
  1503. bool loaderListHasLayerProperty(const VkLayerProperties *vk_layer_prop, const struct loader_layer_list *list) {
  1504. for (uint32_t i = 0; i < list->count; i++) {
  1505. if (strcmp(vk_layer_prop->layerName, list->list[i].info.layerName) == 0) return true;
  1506. }
  1507. return false;
  1508. }
  1509. void loaderDestroyLayerList(const struct loader_instance *inst, struct loader_device *device,
  1510. struct loader_layer_list *layer_list) {
  1511. if (device) {
  1512. loader_device_heap_free(device, layer_list->list);
  1513. } else {
  1514. loader_instance_heap_free(inst, layer_list->list);
  1515. }
  1516. layer_list->count = 0;
  1517. layer_list->capacity = 0;
  1518. }
  1519. // Append non-duplicate layer properties defined in prop_list to the given layer_info list
  1520. VkResult loaderAddLayerPropertiesToList(const struct loader_instance *inst, struct loader_layer_list *list,
  1521. uint32_t prop_list_count, const struct loader_layer_properties *props) {
  1522. uint32_t i;
  1523. struct loader_layer_properties *layer;
  1524. if (list->list == NULL || list->capacity == 0) {
  1525. if (!loaderInitLayerList(inst, list)) {
  1526. return VK_ERROR_OUT_OF_HOST_MEMORY;
  1527. }
  1528. }
  1529. if (list->list == NULL) return VK_SUCCESS;
  1530. for (i = 0; i < prop_list_count; i++) {
  1531. layer = (struct loader_layer_properties *)&props[i];
  1532. // Look for duplicates, and skip
  1533. if (loaderListHasLayerProperty(&layer->info, list)) {
  1534. continue;
  1535. }
  1536. // Check for enough capacity
  1537. if (((list->count + 1) * sizeof(struct loader_layer_properties)) >= list->capacity) {
  1538. size_t new_capacity = list->capacity * 2;
  1539. void *new_ptr =
  1540. loader_instance_heap_realloc(inst, list->list, list->capacity, new_capacity, VK_SYSTEM_ALLOCATION_SCOPE_INSTANCE);
  1541. if (NULL == new_ptr) {
  1542. loader_log(inst, VK_DEBUG_REPORT_ERROR_BIT_EXT, 0,
  1543. "loaderAddLayerPropertiesToList: Realloc failed for when attempting to add new layer");
  1544. return VK_ERROR_OUT_OF_HOST_MEMORY;
  1545. }
  1546. list->list = new_ptr;
  1547. list->capacity = new_capacity;
  1548. }
  1549. memcpy(&list->list[list->count], layer, sizeof(struct loader_layer_properties));
  1550. list->count++;
  1551. }
  1552. return VK_SUCCESS;
  1553. }
  1554. // Search the given search_list for any layers in the props list. Add these to the
  1555. // output layer_list. Don't add duplicates to the output layer_list.
  1556. static VkResult loaderAddLayerNamesToList(const struct loader_instance *inst, struct loader_layer_list *output_list,
  1557. struct loader_layer_list *expanded_output_list, uint32_t name_count,
  1558. const char *const *names, const struct loader_layer_list *source_list) {
  1559. struct loader_layer_properties *layer_prop;
  1560. VkResult err = VK_SUCCESS;
  1561. for (uint32_t i = 0; i < name_count; i++) {
  1562. const char *source_name = names[i];
  1563. layer_prop = loaderFindLayerProperty(source_name, source_list);
  1564. if (NULL == layer_prop) {
  1565. loader_log(inst, VK_DEBUG_REPORT_ERROR_BIT_EXT, 0, "loaderAddLayerNamesToList: Unable to find layer %s", source_name);
  1566. err = VK_ERROR_LAYER_NOT_PRESENT;
  1567. continue;
  1568. }
  1569. // If not a meta-layer, simply add it.
  1570. if (0 == (layer_prop->type_flags & VK_LAYER_TYPE_FLAG_META_LAYER)) {
  1571. if (!loaderListHasLayerProperty(&layer_prop->info, output_list)) {
  1572. loaderAddLayerPropertiesToList(inst, output_list, 1, layer_prop);
  1573. }
  1574. if (!loaderListHasLayerProperty(&layer_prop->info, expanded_output_list)) {
  1575. loaderAddLayerPropertiesToList(inst, expanded_output_list, 1, layer_prop);
  1576. }
  1577. } else {
  1578. if (!loaderListHasLayerProperty(&layer_prop->info, output_list) ||
  1579. !loaderListHasLayerProperty(&layer_prop->info, expanded_output_list)) {
  1580. loaderAddMetaLayer(inst, layer_prop, output_list, expanded_output_list, source_list);
  1581. }
  1582. }
  1583. }
  1584. return err;
  1585. }
  1586. static bool checkExpiration(const struct loader_instance *inst, const struct loader_layer_properties *prop) {
  1587. time_t current = time(NULL);
  1588. struct tm tm_current = *localtime(&current);
  1589. struct tm tm_expiration = {
  1590. .tm_sec = 0,
  1591. .tm_min = prop->expiration.minute,
  1592. .tm_hour = prop->expiration.hour,
  1593. .tm_mday = prop->expiration.day,
  1594. .tm_mon = prop->expiration.month - 1,
  1595. .tm_year = prop->expiration.year - 1900,
  1596. .tm_isdst = tm_current.tm_isdst,
  1597. // wday and yday are ignored by mktime
  1598. };
  1599. time_t expiration = mktime(&tm_expiration);
  1600. return current < expiration;
  1601. }
  1602. // Determine if the provided implicit layer should be enabled by querying the appropriate environmental variables.
  1603. // For an implicit layer, at least a disable environment variable is required.
  1604. bool loaderImplicitLayerIsEnabled(const struct loader_instance *inst, const struct loader_layer_properties *prop) {
  1605. bool enable = false;
  1606. char *env_value = NULL;
  1607. // If no enable_environment variable is specified, this implicit layer is always be enabled by default.
  1608. if (prop->enable_env_var.name[0] == 0) {
  1609. enable = true;
  1610. } else {
  1611. // Otherwise, only enable this layer if the enable environment variable is defined
  1612. env_value = loader_getenv(prop->enable_env_var.name, inst);
  1613. if (env_value && !strcmp(prop->enable_env_var.value, env_value)) {
  1614. enable = true;
  1615. }
  1616. loader_free_getenv(env_value, inst);
  1617. }
  1618. // The disable_environment has priority over everything else. If it is defined, the layer is always
  1619. // disabled.
  1620. env_value = loader_getenv(prop->disable_env_var.name, inst);
  1621. if (env_value) {
  1622. enable = false;
  1623. }
  1624. loader_free_getenv(env_value, inst);
  1625. // If this layer has an expiration, check it to determine if this layer has expired.
  1626. if (prop->has_expiration) {
  1627. enable = checkExpiration(inst, prop);
  1628. }
  1629. // Enable this layer if it is included in the override layer
  1630. if (inst != NULL && inst->override_layer_present) {
  1631. struct loader_layer_properties *override = NULL;
  1632. for (uint32_t i = 0; i < inst->instance_layer_list.count; ++i) {
  1633. if (strcmp(inst->instance_layer_list.list[i].info.layerName, VK_OVERRIDE_LAYER_NAME) == 0) {
  1634. override = &inst->instance_layer_list.list[i];
  1635. break;
  1636. }
  1637. }
  1638. if (override != NULL) {
  1639. for (uint32_t i = 0; i < override->num_component_layers; ++i) {
  1640. if (strcmp(override->component_layer_names[i], prop->info.layerName) == 0) {
  1641. enable = true;
  1642. break;
  1643. }
  1644. }
  1645. }
  1646. }
  1647. return enable;
  1648. }
  1649. // Check the individual implicit layer for the enable/disable environment variable settings. Only add it after
  1650. // every check has passed indicating it should be used.
  1651. static void loaderAddImplicitLayer(const struct loader_instance *inst, const struct loader_layer_properties *prop,
  1652. struct loader_layer_list *target_list, struct loader_layer_list *expanded_target_list,
  1653. const struct loader_layer_list *source_list) {
  1654. bool enable = loaderImplicitLayerIsEnabled(inst, prop);
  1655. // If the implicit layer is supposed to be enable, make sure the layer supports at least the same API version
  1656. // that the application is asking (i.e. layer's API >= app's API). If it's not, disable this layer.
  1657. if (enable) {
  1658. uint16_t layer_api_major_version = VK_VERSION_MAJOR(prop->info.specVersion);
  1659. uint16_t layer_api_minor_version = VK_VERSION_MINOR(prop->info.specVersion);
  1660. if (inst->app_api_major_version > layer_api_major_version ||
  1661. (inst->app_api_major_version == layer_api_major_version && inst->app_api_minor_version > layer_api_minor_version)) {
  1662. loader_log(inst, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, 0,
  1663. "loader_add_implicit_layer: Disabling implicit layer %s for using an old API version %d.%d versus "
  1664. "application requested %d.%d",
  1665. prop->info.layerName, layer_api_major_version, layer_api_minor_version, inst->app_api_major_version,
  1666. inst->app_api_minor_version);
  1667. enable = false;
  1668. }
  1669. }
  1670. if (enable) {
  1671. if (0 == (prop->type_flags & VK_LAYER_TYPE_FLAG_META_LAYER)) {
  1672. if (!loaderListHasLayerProperty(&prop->info, target_list)) {
  1673. loaderAddLayerPropertiesToList(inst, target_list, 1, prop);
  1674. }
  1675. if (NULL != expanded_target_list && !loaderListHasLayerProperty(&prop->info, expanded_target_list)) {
  1676. loaderAddLayerPropertiesToList(inst, expanded_target_list, 1, prop);
  1677. }
  1678. } else {
  1679. if (!loaderListHasLayerProperty(&prop->info, target_list) ||
  1680. (NULL != expanded_target_list && !loaderListHasLayerProperty(&prop->info, expanded_target_list))) {
  1681. loaderAddMetaLayer(inst, prop, target_list, expanded_target_list, source_list);
  1682. }
  1683. }
  1684. }
  1685. }
  1686. // Add the component layers of a meta-layer to the active list of layers
  1687. bool loaderAddMetaLayer(const struct loader_instance *inst, const struct loader_layer_properties *prop,
  1688. struct loader_layer_list *target_list, struct loader_layer_list *expanded_target_list,
  1689. const struct loader_layer_list *source_list) {
  1690. bool found = true;
  1691. // If the meta-layer isn't present in the unexpanded list, add it.
  1692. if (!loaderListHasLayerProperty(&prop->info, target_list)) {
  1693. loaderAddLayerPropertiesToList(inst, target_list, 1, prop);
  1694. }
  1695. // We need to add all the individual component layers
  1696. for (uint32_t comp_layer = 0; comp_layer < prop->num_component_layers; comp_layer++) {
  1697. bool found_comp = false;
  1698. const struct loader_layer_properties *search_prop =
  1699. loaderFindLayerProperty(prop->component_layer_names[comp_layer], source_list);
  1700. if (search_prop != NULL) {
  1701. found_comp = true;
  1702. // If the component layer is itself an implicit layer, we need to do the implicit layer enable
  1703. // checks
  1704. if (0 == (search_prop->type_flags & VK_LAYER_TYPE_FLAG_EXPLICIT_LAYER)) {
  1705. loaderAddImplicitLayer(inst, search_prop, target_list, expanded_target_list, source_list);
  1706. } else {
  1707. if (0 != (search_prop->type_flags & VK_LAYER_TYPE_FLAG_META_LAYER)) {
  1708. found = loaderAddMetaLayer(inst, search_prop, target_list, expanded_target_list, source_list);
  1709. } else {
  1710. // Otherwise, just make sure it hasn't already been added to either list before we add it
  1711. if (!loaderListHasLayerProperty(&search_prop->info, target_list)) {
  1712. loaderAddLayerPropertiesToList(inst, target_list, 1, search_prop);
  1713. }
  1714. if (NULL != expanded_target_list && !loaderListHasLayerProperty(&search_prop->info, expanded_target_list)) {
  1715. loaderAddLayerPropertiesToList(inst, expanded_target_list, 1, search_prop);
  1716. }
  1717. }
  1718. }
  1719. }
  1720. if (!found_comp) {
  1721. loader_log(inst, VK_DEBUG_REPORT_WARNING_BIT_EXT, 0,
  1722. "loaderAddMetaLayer: Failed to find layer name %s component layer "
  1723. "%s to activate",
  1724. search_prop->info.layerName, prop->component_layer_names[comp_layer]);
  1725. found = false;
  1726. }
  1727. }
  1728. // Add this layer to the overall target list (not the expanded one)
  1729. if (found && !loaderListHasLayerProperty(&prop->info, target_list)) {
  1730. loaderAddLayerPropertiesToList(inst, target_list, 1, prop);
  1731. }
  1732. return found;
  1733. }
  1734. // Search the source_list for any layer with a name that matches the given name and a type
  1735. // that matches the given type. Add all matching layers to the target_list.
  1736. // Do not add if found loader_layer_properties is already on the target_list.
  1737. VkResult loaderAddLayerNameToList(const struct loader_instance *inst, const char *name, const enum layer_type_flags type_flags,
  1738. const struct loader_layer_list *source_list, struct loader_layer_list *target_list,
  1739. struct loader_layer_list *expanded_target_list) {
  1740. VkResult res = VK_SUCCESS;
  1741. bool found = false;
  1742. for (uint32_t i = 0; i < source_list->count; i++) {
  1743. struct loader_layer_properties *source_prop = &source_list->list[i];
  1744. if (0 == strcmp(source_prop->info.layerName, name) && (source_prop->type_flags & type_flags) == type_flags) {
  1745. // If not a meta-layer, simply add it.
  1746. if (0 == (source_prop->type_flags & VK_LAYER_TYPE_FLAG_META_LAYER)) {
  1747. if (!loaderListHasLayerProperty(&source_prop->info, target_list) &&
  1748. VK_SUCCESS == loaderAddLayerPropertiesToList(inst, target_list, 1, source_prop)) {
  1749. found = true;
  1750. }
  1751. if (!loaderListHasLayerProperty(&source_prop->info, expanded_target_list) &&
  1752. VK_SUCCESS == loaderAddLayerPropertiesToList(inst, expanded_target_list, 1, source_prop)) {
  1753. found = true;
  1754. }
  1755. } else {
  1756. found = loaderAddMetaLayer(inst, source_prop, target_list, expanded_target_list, source_list);
  1757. }
  1758. }
  1759. }
  1760. if (!found) {
  1761. if (strcmp(name, "VK_LAYER_LUNARG_standard_validation")) {
  1762. loader_log(inst, VK_DEBUG_REPORT_WARNING_BIT_EXT, 0,
  1763. "loaderAddLayerNameToList: Failed to find layer name %s to activate", name);
  1764. } else {
  1765. res = VK_ERROR_LAYER_NOT_PRESENT;
  1766. loader_log(inst, VK_DEBUG_REPORT_ERROR_BIT_EXT, 0,
  1767. "Layer VK_LAYER_LUNARG_standard_validation has been changed to VK_LAYER_KHRONOS_validation. Please use the "
  1768. "new version of the layer.");
  1769. }
  1770. }
  1771. return res;
  1772. }
  1773. static VkExtensionProperties *get_extension_property(const char *name, const struct loader_extension_list *list) {
  1774. for (uint32_t i = 0; i < list->count; i++) {
  1775. if (strcmp(name, list->list[i].extensionName) == 0) return &list->list[i];
  1776. }
  1777. return NULL;
  1778. }
  1779. static VkExtensionProperties *get_dev_extension_property(const char *name, const struct loader_device_extension_list *list) {
  1780. for (uint32_t i = 0; i < list->count; i++) {
  1781. if (strcmp(name, list->list[i].props.extensionName) == 0) return &list->list[i].props;
  1782. }
  1783. return NULL;
  1784. }
  1785. // For Instance extensions implemented within the loader (i.e. DEBUG_REPORT
  1786. // the extension must provide two entry points for the loader to use:
  1787. // - "trampoline" entry point - this is the address returned by GetProcAddr
  1788. // and will always do what's necessary to support a
  1789. // global call.
  1790. // - "terminator" function - this function will be put at the end of the
  1791. // instance chain and will contain the necessary logic
  1792. // to call / process the extension for the appropriate
  1793. // ICDs that are available.
  1794. // There is no generic mechanism for including these functions, the references
  1795. // must be placed into the appropriate loader entry points.
  1796. // GetInstanceProcAddr: call extension GetInstanceProcAddr to check for GetProcAddr
  1797. // requests
  1798. // loader_coalesce_extensions(void) - add extension records to the list of global
  1799. // extension available to the app.
  1800. // instance_disp - add function pointer for terminator function
  1801. // to this array.
  1802. // The extension itself should be in a separate file that will be linked directly
  1803. // with the loader.
  1804. VkResult loader_get_icd_loader_instance_extensions(const struct loader_instance *inst, struct loader_icd_tramp_list *icd_tramp_list,
  1805. struct loader_extension_list *inst_exts) {
  1806. struct loader_extension_list icd_exts;
  1807. VkResult res = VK_SUCCESS;
  1808. char *env_value;
  1809. bool filter_extensions = true;
  1810. loader_log(inst, VK_DEBUG_REPORT_DEBUG_BIT_EXT, 0, "Build ICD instance extension list");
  1811. // Check if a user wants to disable the instance extension filtering behavior
  1812. env_value = loader_getenv("VK_LOADER_DISABLE_INST_EXT_FILTER", inst);
  1813. if (NULL != env_value && atoi(env_value) != 0) {
  1814. filter_extensions = false;
  1815. }
  1816. loader_free_getenv(env_value, inst);
  1817. // traverse scanned icd list adding non-duplicate extensions to the list
  1818. for (uint32_t i = 0; i < icd_tramp_list->count; i++) {
  1819. res = loader_init_generic_list(inst, (struct loader_generic_list *)&icd_exts, sizeof(VkExtensionProperties));
  1820. if (VK_SUCCESS != res) {
  1821. goto out;
  1822. }
  1823. res = loader_add_instance_extensions(inst, icd_tramp_list->scanned_list[i].EnumerateInstanceExtensionProperties,
  1824. icd_tramp_list->scanned_list[i].lib_name, &icd_exts);
  1825. if (VK_SUCCESS == res) {
  1826. if (filter_extensions) {
  1827. // Remove any extensions not recognized by the loader
  1828. for (int32_t j = 0; j < (int32_t)icd_exts.count; j++) {
  1829. // See if the extension is in the list of supported extensions
  1830. bool found = false;
  1831. for (uint32_t k = 0; LOADER_INSTANCE_EXTENSIONS[k] != NULL; k++) {
  1832. if (strcmp(icd_exts.list[j].extensionName, LOADER_INSTANCE_EXTENSIONS[k]) == 0) {
  1833. found = true;
  1834. break;
  1835. }
  1836. }
  1837. // If it isn't in the list, remove it
  1838. if (!found) {
  1839. for (uint32_t k = j + 1; k < icd_exts.count; k++) {
  1840. icd_exts.list[k - 1] = icd_exts.list[k];
  1841. }
  1842. --icd_exts.count;
  1843. --j;
  1844. }
  1845. }
  1846. }
  1847. res = loader_add_to_ext_list(inst, inst_exts, icd_exts.count, icd_exts.list);
  1848. }
  1849. loader_destroy_generic_list(inst, (struct loader_generic_list *)&icd_exts);
  1850. if (VK_SUCCESS != res) {
  1851. goto out;
  1852. }
  1853. };
  1854. // Traverse loader's extensions, adding non-duplicate extensions to the list
  1855. debug_utils_AddInstanceExtensions(inst, inst_exts);
  1856. out:
  1857. return res;
  1858. }
  1859. struct loader_icd_term *loader_get_icd_and_device(const void *device, struct loader_device **found_dev, uint32_t *icd_index) {
  1860. *found_dev = NULL;
  1861. for (struct loader_instance *inst = loader.instances; inst; inst = inst->next) {
  1862. uint32_t index = 0;
  1863. for (struct loader_icd_term *icd_term = inst->icd_terms; icd_term; icd_term = icd_term->next) {
  1864. for (struct loader_device *dev = icd_term->logical_device_list; dev; dev = dev->next)
  1865. // Value comparison of device prevents object wrapping by layers
  1866. if (loader_get_dispatch(dev->icd_device) == loader_get_dispatch(device) ||
  1867. (dev->chain_device != VK_NULL_HANDLE &&
  1868. loader_get_dispatch(dev->chain_device) == loader_get_dispatch(device))) {
  1869. *found_dev = dev;
  1870. if (NULL != icd_index) {
  1871. *icd_index = index;
  1872. }
  1873. return icd_term;
  1874. }
  1875. index++;
  1876. }
  1877. }
  1878. return NULL;
  1879. }
  1880. void loader_destroy_logical_device(const struct loader_instance *inst, struct loader_device *dev,
  1881. const VkAllocationCallbacks *pAllocator) {
  1882. if (pAllocator) {
  1883. dev->alloc_callbacks = *pAllocator;
  1884. }
  1885. if (NULL != dev->expanded_activated_layer_list.list) {
  1886. loaderDeactivateLayers(inst, dev, &dev->expanded_activated_layer_list);
  1887. }
  1888. if (NULL != dev->app_activated_layer_list.list) {
  1889. loaderDestroyLayerList(inst, dev, &dev->app_activated_layer_list);
  1890. }
  1891. loader_device_heap_free(dev, dev);
  1892. }
  1893. struct loader_device *loader_create_logical_device(const struct loader_instance *inst, const VkAllocationCallbacks *pAllocator) {
  1894. struct loader_device *new_dev;
  1895. #if (DEBUG_DISABLE_APP_ALLOCATORS == 1)
  1896. {
  1897. #else
  1898. if (pAllocator) {
  1899. new_dev = (struct loader_device *)pAllocator->pfnAllocation(pAllocator->pUserData, sizeof(struct loader_device),
  1900. sizeof(int *), VK_SYSTEM_ALLOCATION_SCOPE_DEVICE);
  1901. } else {
  1902. #endif
  1903. new_dev = (struct loader_device *)malloc(sizeof(struct loader_device));
  1904. }
  1905. if (!new_dev) {
  1906. loader_log(inst, VK_DEBUG_REPORT_ERROR_BIT_EXT, 0,
  1907. "loader_create_logical_device: Failed to alloc struct "
  1908. "loader_device");
  1909. return NULL;
  1910. }
  1911. memset(new_dev, 0, sizeof(struct loader_device));
  1912. if (pAllocator) {
  1913. new_dev->alloc_callbacks = *pAllocator;
  1914. }
  1915. return new_dev;
  1916. }
  1917. void loader_add_logical_device(const struct loader_instance *inst, struct loader_icd_term *icd_term, struct loader_device *dev) {
  1918. dev->next = icd_term->logical_device_list;
  1919. icd_term->logical_device_list = dev;
  1920. }
  1921. void loader_remove_logical_device(const struct loader_instance *inst, struct loader_icd_term *icd_term,
  1922. struct loader_device *found_dev, const VkAllocationCallbacks *pAllocator) {
  1923. struct loader_device *dev, *prev_dev;
  1924. if (!icd_term || !found_dev) return;
  1925. prev_dev = NULL;
  1926. dev = icd_term->logical_device_list;
  1927. while (dev && dev != found_dev) {
  1928. prev_dev = dev;
  1929. dev = dev->next;
  1930. }
  1931. if (prev_dev)
  1932. prev_dev->next = found_dev->next;
  1933. else
  1934. icd_term->logical_device_list = found_dev->next;
  1935. loader_destroy_logical_device(inst, found_dev, pAllocator);
  1936. }
  1937. static void loader_icd_destroy(struct loader_instance *ptr_inst, struct loader_icd_term *icd_term,
  1938. const VkAllocationCallbacks *pAllocator) {
  1939. ptr_inst->total_icd_count--;
  1940. for (struct loader_device *dev = icd_term->logical_device_list; dev;) {
  1941. struct loader_device *next_dev = dev->next;
  1942. loader_destroy_logical_device(ptr_inst, dev, pAllocator);
  1943. dev = next_dev;
  1944. }
  1945. loader_instance_heap_free(ptr_inst, icd_term);
  1946. }
  1947. static struct loader_icd_term *loader_icd_create(const struct loader_instance *inst) {
  1948. struct loader_icd_term *icd_term;
  1949. icd_term = loader_instance_heap_alloc(inst, sizeof(struct loader_icd_term), VK_SYSTEM_ALLOCATION_SCOPE_INSTANCE);
  1950. if (!icd_term) {
  1951. return NULL;
  1952. }
  1953. memset(icd_term, 0, sizeof(struct loader_icd_term));
  1954. return icd_term;
  1955. }
  1956. static struct loader_icd_term *loader_icd_add(struct loader_instance *ptr_inst, const struct loader_scanned_icd *scanned_icd) {
  1957. struct loader_icd_term *icd_term;
  1958. icd_term = loader_icd_create(ptr_inst);
  1959. if (!icd_term) {
  1960. return NULL;
  1961. }
  1962. icd_term->scanned_icd = scanned_icd;
  1963. icd_term->this_instance = ptr_inst;
  1964. // Prepend to the list
  1965. icd_term->next = ptr_inst->icd_terms;
  1966. ptr_inst->icd_terms = icd_term;
  1967. ptr_inst->total_icd_count++;
  1968. return icd_term;
  1969. }
  1970. // Determine the ICD interface version to use.
  1971. // @param icd
  1972. // @param pVersion Output parameter indicating which version to use or 0 if
  1973. // the negotiation API is not supported by the ICD
  1974. // @return bool indicating true if the selected interface version is supported
  1975. // by the loader, false indicates the version is not supported
  1976. bool loader_get_icd_interface_version(PFN_vkNegotiateLoaderICDInterfaceVersion fp_negotiate_icd_version, uint32_t *pVersion) {
  1977. if (fp_negotiate_icd_version == NULL) {
  1978. // ICD does not support the negotiation API, it supports version 0 or 1
  1979. // calling code must determine if it is version 0 or 1
  1980. *pVersion = 0;
  1981. } else {
  1982. // ICD supports the negotiation API, so call it with the loader's
  1983. // latest version supported
  1984. *pVersion = CURRENT_LOADER_ICD_INTERFACE_VERSION;
  1985. VkResult result = fp_negotiate_icd_version(pVersion);
  1986. if (result == VK_ERROR_INCOMPATIBLE_DRIVER) {
  1987. // ICD no longer supports the loader's latest interface version so
  1988. // fail loading the ICD
  1989. return false;
  1990. }
  1991. }
  1992. #if MIN_SUPPORTED_LOADER_ICD_INTERFACE_VERSION > 0
  1993. if (*pVersion < MIN_SUPPORTED_LOADER_ICD_INTERFACE_VERSION) {
  1994. // Loader no longer supports the ICD's latest interface version so fail
  1995. // loading the ICD
  1996. return false;
  1997. }
  1998. #endif
  1999. return true;
  2000. }
  2001. void loader_scanned_icd_clear(const struct loader_instance *inst, struct loader_icd_tramp_list *icd_tramp_list) {
  2002. if (0 != icd_tramp_list->capacity) {
  2003. for (uint32_t i = 0; i < icd_tramp_list->count; i++) {
  2004. loader_platform_close_library(icd_tramp_list->scanned_list[i].handle);
  2005. loader_instance_heap_free(inst, icd_tramp_list->scanned_list[i].lib_name);
  2006. }
  2007. loader_instance_heap_free(inst, icd_tramp_list->scanned_list);
  2008. icd_tramp_list->capacity = 0;
  2009. icd_tramp_list->count = 0;
  2010. icd_tramp_list->scanned_list = NULL;
  2011. }
  2012. }
  2013. static VkResult loader_scanned_icd_init(const struct loader_instance *inst, struct loader_icd_tramp_list *icd_tramp_list) {
  2014. VkResult err = VK_SUCCESS;
  2015. loader_scanned_icd_clear(inst, icd_tramp_list);
  2016. icd_tramp_list->capacity = 8 * sizeof(struct loader_scanned_icd);
  2017. icd_tramp_list->scanned_list = loader_instance_heap_alloc(inst, icd_tramp_list->capacity, VK_SYSTEM_ALLOCATION_SCOPE_INSTANCE);
  2018. if (NULL == icd_tramp_list->scanned_list) {
  2019. loader_log(inst, VK_DEBUG_REPORT_ERROR_BIT_EXT, 0,
  2020. "loader_scanned_icd_init: Realloc failed for layer list when "
  2021. "attempting to add new layer");
  2022. err = VK_ERROR_OUT_OF_HOST_MEMORY;
  2023. }
  2024. return err;
  2025. }
  2026. static VkResult loader_scanned_icd_add(const struct loader_instance *inst, struct loader_icd_tramp_list *icd_tramp_list,
  2027. const char *filename, uint32_t api_version) {
  2028. loader_platform_dl_handle handle;
  2029. PFN_vkCreateInstance fp_create_inst;
  2030. PFN_vkEnumerateInstanceExtensionProperties fp_get_inst_ext_props;
  2031. PFN_vkGetInstanceProcAddr fp_get_proc_addr;
  2032. PFN_GetPhysicalDeviceProcAddr fp_get_phys_dev_proc_addr = NULL;
  2033. PFN_vkNegotiateLoaderICDInterfaceVersion fp_negotiate_icd_version;
  2034. #if defined(VK_USE_PLATFORM_WIN32_KHR)
  2035. PFN_vk_icdEnumerateAdapterPhysicalDevices fp_enum_dxgi_adapter_phys_devs = NULL;
  2036. #endif
  2037. struct loader_scanned_icd *new_scanned_icd;
  2038. uint32_t interface_vers;
  2039. VkResult res = VK_SUCCESS;
  2040. // TODO implement smarter opening/closing of libraries. For now this
  2041. // function leaves libraries open and the scanned_icd_clear closes them
  2042. #if defined(__Fuchsia__)
  2043. handle = loader_platform_open_driver(filename);
  2044. #else
  2045. handle = loader_platform_open_library(filename);
  2046. #endif
  2047. if (NULL == handle) {
  2048. loader_log(inst, VK_DEBUG_REPORT_ERROR_BIT_EXT, 0, loader_platform_open_library_error(filename));
  2049. goto out;
  2050. }
  2051. // Get and settle on an ICD interface version
  2052. fp_negotiate_icd_version = loader_platform_get_proc_address(handle, "vk_icdNegotiateLoaderICDInterfaceVersion");
  2053. if (!loader_get_icd_interface_version(fp_negotiate_icd_version, &interface_vers)) {
  2054. loader_log(inst, VK_DEBUG_REPORT_ERROR_BIT_EXT, 0,
  2055. "loader_scanned_icd_add: ICD %s doesn't support interface"
  2056. " version compatible with loader, skip this ICD.",
  2057. filename);
  2058. goto out;
  2059. }
  2060. fp_get_proc_addr = loader_platform_get_proc_address(handle, "vk_icdGetInstanceProcAddr");
  2061. if (NULL == fp_get_proc_addr) {
  2062. assert(interface_vers == 0);
  2063. // Use deprecated interface from version 0
  2064. fp_get_proc_addr = loader_platform_get_proc_address(handle, "vkGetInstanceProcAddr");
  2065. if (NULL == fp_get_proc_addr) {
  2066. loader_log(inst, VK_DEBUG_REPORT_ERROR_BIT_EXT, 0,
  2067. "loader_scanned_icd_add: Attempt to retrieve either "
  2068. "\'vkGetInstanceProcAddr\' or "
  2069. "\'vk_icdGetInstanceProcAddr\' from ICD %s failed.",
  2070. filename);
  2071. goto out;
  2072. } else {
  2073. loader_log(inst, VK_DEBUG_REPORT_WARNING_BIT_EXT, 0,
  2074. "loader_scanned_icd_add: Using deprecated ICD "
  2075. "interface of \'vkGetInstanceProcAddr\' instead of "
  2076. "\'vk_icdGetInstanceProcAddr\' for ICD %s",
  2077. filename);
  2078. }
  2079. fp_create_inst = loader_platform_get_proc_address(handle, "vkCreateInstance");
  2080. if (NULL == fp_create_inst) {
  2081. loader_log(inst, VK_DEBUG_REPORT_ERROR_BIT_EXT, 0,
  2082. "loader_scanned_icd_add: Failed querying "
  2083. "\'vkCreateInstance\' via dlsym/loadlibrary for "
  2084. "ICD %s",
  2085. filename);
  2086. goto out;
  2087. }
  2088. fp_get_inst_ext_props = loader_platform_get_proc_address(handle, "vkEnumerateInstanceExtensionProperties");
  2089. if (NULL == fp_get_inst_ext_props) {
  2090. loader_log(inst, VK_DEBUG_REPORT_ERROR_BIT_EXT, 0,
  2091. "loader_scanned_icd_add: Could not get \'vkEnumerate"
  2092. "InstanceExtensionProperties\' via dlsym/loadlibrary "
  2093. "for ICD %s",
  2094. filename);
  2095. goto out;
  2096. }
  2097. } else {
  2098. // Use newer interface version 1 or later
  2099. if (interface_vers == 0) {
  2100. interface_vers = 1;
  2101. }
  2102. fp_create_inst = (PFN_vkCreateInstance)fp_get_proc_addr(NULL, "vkCreateInstance");
  2103. if (NULL == fp_create_inst) {
  2104. loader_log(inst, VK_DEBUG_REPORT_ERROR_BIT_EXT, 0,
  2105. "loader_scanned_icd_add: Could not get "
  2106. "\'vkCreateInstance\' via \'vk_icdGetInstanceProcAddr\'"
  2107. " for ICD %s",
  2108. filename);
  2109. goto out;
  2110. }
  2111. fp_get_inst_ext_props =
  2112. (PFN_vkEnumerateInstanceExtensionProperties)fp_get_proc_addr(NULL, "vkEnumerateInstanceExtensionProperties");
  2113. if (NULL == fp_get_inst_ext_props) {
  2114. loader_log(inst, VK_DEBUG_REPORT_ERROR_BIT_EXT, 0,
  2115. "loader_scanned_icd_add: Could not get \'vkEnumerate"
  2116. "InstanceExtensionProperties\' via "
  2117. "\'vk_icdGetInstanceProcAddr\' for ICD %s",
  2118. filename);
  2119. goto out;
  2120. }
  2121. fp_get_phys_dev_proc_addr = loader_platform_get_proc_address(handle, "vk_icdGetPhysicalDeviceProcAddr");
  2122. #if defined(VK_USE_PLATFORM_WIN32_KHR)
  2123. if (interface_vers >= 6) {
  2124. fp_enum_dxgi_adapter_phys_devs = loader_platform_get_proc_address(handle, "vk_icdEnumerateAdapterPhysicalDevices");
  2125. }
  2126. #endif
  2127. }
  2128. // check for enough capacity
  2129. if ((icd_tramp_list->count * sizeof(struct loader_scanned_icd)) >= icd_tramp_list->capacity) {
  2130. void *new_ptr = loader_instance_heap_realloc(inst, icd_tramp_list->scanned_list, icd_tramp_list->capacity,
  2131. icd_tramp_list->capacity * 2, VK_SYSTEM_ALLOCATION_SCOPE_INSTANCE);
  2132. if (NULL == new_ptr) {
  2133. res = VK_ERROR_OUT_OF_HOST_MEMORY;
  2134. loader_log(inst, VK_DEBUG_REPORT_ERROR_BIT_EXT, 0,
  2135. "loader_scanned_icd_add: Realloc failed on icd library list for ICD %s", filename);
  2136. goto out;
  2137. }
  2138. icd_tramp_list->scanned_list = new_ptr;
  2139. // double capacity
  2140. icd_tramp_list->capacity *= 2;
  2141. }
  2142. new_scanned_icd = &(icd_tramp_list->scanned_list[icd_tramp_list->count]);
  2143. new_scanned_icd->handle = handle;
  2144. new_scanned_icd->api_version = api_version;
  2145. new_scanned_icd->GetInstanceProcAddr = fp_get_proc_addr;
  2146. new_scanned_icd->GetPhysicalDeviceProcAddr = fp_get_phys_dev_proc_addr;
  2147. new_scanned_icd->EnumerateInstanceExtensionProperties = fp_get_inst_ext_props;
  2148. new_scanned_icd->CreateInstance = fp_create_inst;
  2149. #if defined(VK_USE_PLATFORM_WIN32_KHR)
  2150. new_scanned_icd->EnumerateAdapterPhysicalDevices = fp_enum_dxgi_adapter_phys_devs;
  2151. #endif
  2152. new_scanned_icd->interface_version = interface_vers;
  2153. new_scanned_icd->lib_name = (char *)loader_instance_heap_alloc(inst, strlen(filename) + 1, VK_SYSTEM_ALLOCATION_SCOPE_INSTANCE);
  2154. if (NULL == new_scanned_icd->lib_name) {
  2155. loader_log(inst, VK_DEBUG_REPORT_ERROR_BIT_EXT, 0, "loader_scanned_icd_add: Out of memory can't add ICD %s", filename);
  2156. res = VK_ERROR_OUT_OF_HOST_MEMORY;
  2157. goto out;
  2158. }
  2159. strcpy(new_scanned_icd->lib_name, filename);
  2160. icd_tramp_list->count++;
  2161. out:
  2162. return res;
  2163. }
  2164. static void loader_debug_init(void) {
  2165. char *env, *orig;
  2166. if (g_loader_debug > 0) return;
  2167. g_loader_debug = 0;
  2168. // Parse comma-separated debug options
  2169. orig = env = loader_getenv("VK_LOADER_DEBUG", NULL);
  2170. while (env) {
  2171. char *p = strchr(env, ',');
  2172. size_t len;
  2173. if (p)
  2174. len = p - env;
  2175. else
  2176. len = strlen(env);
  2177. if (len > 0) {
  2178. if (strncmp(env, "all", len) == 0) {
  2179. g_loader_debug = ~0u;
  2180. g_loader_log_msgs = ~0u;
  2181. } else if (strncmp(env, "warn", len) == 0) {
  2182. g_loader_debug |= LOADER_WARN_BIT;
  2183. g_loader_log_msgs |= VK_DEBUG_REPORT_WARNING_BIT_EXT;
  2184. } else if (strncmp(env, "info", len) == 0) {
  2185. g_loader_debug |= LOADER_INFO_BIT;
  2186. g_loader_log_msgs |= VK_DEBUG_REPORT_INFORMATION_BIT_EXT;
  2187. } else if (strncmp(env, "perf", len) == 0) {
  2188. g_loader_debug |= LOADER_PERF_BIT;
  2189. g_loader_log_msgs |= VK_DEBUG_REPORT_PERFORMANCE_WARNING_BIT_EXT;
  2190. } else if (strncmp(env, "error", len) == 0) {
  2191. g_loader_debug |= LOADER_ERROR_BIT;
  2192. g_loader_log_msgs |= VK_DEBUG_REPORT_ERROR_BIT_EXT;
  2193. } else if (strncmp(env, "debug", len) == 0) {
  2194. g_loader_debug |= LOADER_DEBUG_BIT;
  2195. g_loader_log_msgs |= VK_DEBUG_REPORT_DEBUG_BIT_EXT;
  2196. }
  2197. }
  2198. if (!p) break;
  2199. env = p + 1;
  2200. }
  2201. loader_free_getenv(orig, NULL);
  2202. }
  2203. void loader_initialize(void) {
  2204. // initialize mutexes
  2205. loader_platform_thread_create_mutex(&loader_lock);
  2206. loader_platform_thread_create_mutex(&loader_json_lock);
  2207. loader_platform_thread_create_mutex(&loader_preload_icd_lock);
  2208. // initialize logging
  2209. loader_debug_init();
  2210. // initial cJSON to use alloc callbacks
  2211. cJSON_Hooks alloc_fns = {
  2212. .malloc_fn = loader_instance_tls_heap_alloc, .free_fn = loader_instance_tls_heap_free,
  2213. };
  2214. cJSON_InitHooks(&alloc_fns);
  2215. #if defined(_WIN32)
  2216. // This is needed to ensure that newer APIs are available right away
  2217. // and not after the first call that has been statically linked
  2218. LoadLibrary("gdi32.dll");
  2219. TCHAR systemPath[MAX_PATH] = "";
  2220. GetSystemDirectory(systemPath, MAX_PATH);
  2221. StringCchCat(systemPath, MAX_PATH, TEXT("\\dxgi.dll"));
  2222. HMODULE dxgi_module = LoadLibrary(systemPath);
  2223. fpCreateDXGIFactory1 = dxgi_module == NULL ? NULL :
  2224. (PFN_CreateDXGIFactory1)GetProcAddress(dxgi_module, "CreateDXGIFactory1");
  2225. #endif
  2226. }
  2227. struct loader_data_files {
  2228. uint32_t count;
  2229. uint32_t alloc_count;
  2230. char **filename_list;
  2231. };
  2232. void loader_release() {
  2233. // Guarantee release of the preloaded ICD libraries. This may have already been called in vkDestroyInstance.
  2234. loader_unload_preloaded_icds();
  2235. // release mutexes
  2236. loader_platform_thread_delete_mutex(&loader_lock);
  2237. loader_platform_thread_delete_mutex(&loader_json_lock);
  2238. loader_platform_thread_delete_mutex(&loader_preload_icd_lock);
  2239. }
  2240. // Preload the ICD libraries that are likely to be needed so we don't repeatedly load/unload them later
  2241. void loader_preload_icds(void) {
  2242. loader_platform_thread_lock_mutex(&loader_preload_icd_lock);
  2243. // Already preloaded, skip loading again.
  2244. if (scanned_icds.scanned_list != NULL) {
  2245. loader_platform_thread_unlock_mutex(&loader_preload_icd_lock);
  2246. return;
  2247. }
  2248. memset(&scanned_icds, 0, sizeof(scanned_icds));
  2249. VkResult result = loader_icd_scan(NULL, &scanned_icds);
  2250. if (result != VK_SUCCESS) {
  2251. loader_scanned_icd_clear(NULL, &scanned_icds);
  2252. }
  2253. loader_platform_thread_unlock_mutex(&loader_preload_icd_lock);
  2254. }
  2255. // Release the ICD libraries that were preloaded
  2256. void loader_unload_preloaded_icds(void) {
  2257. loader_platform_thread_lock_mutex(&loader_preload_icd_lock);
  2258. loader_scanned_icd_clear(NULL, &scanned_icds);
  2259. loader_platform_thread_unlock_mutex(&loader_preload_icd_lock);
  2260. }
  2261. // Get next file or dirname given a string list or registry key path
  2262. //
  2263. // \returns
  2264. // A pointer to first char in the next path.
  2265. // The next path (or NULL) in the list is returned in next_path.
  2266. // Note: input string is modified in some cases. PASS IN A COPY!
  2267. static char *loader_get_next_path(char *path) {
  2268. uint32_t len;
  2269. char *next;
  2270. if (path == NULL) return NULL;
  2271. next = strchr(path, PATH_SEPARATOR);
  2272. if (next == NULL) {
  2273. len = (uint32_t)strlen(path);
  2274. next = path + len;
  2275. } else {
  2276. *next = '\0';
  2277. next++;
  2278. }
  2279. return next;
  2280. }
  2281. // Given a path which is absolute or relative, expand the path if relative or
  2282. // leave the path unmodified if absolute. The base path to prepend to relative
  2283. // paths is given in rel_base.
  2284. //
  2285. // @return - A string in out_fullpath of the full absolute path
  2286. static void loader_expand_path(const char *path, const char *rel_base, size_t out_size, char *out_fullpath) {
  2287. if (loader_platform_is_path_absolute(path)) {
  2288. // do not prepend a base to an absolute path
  2289. rel_base = "";
  2290. }
  2291. loader_platform_combine_path(out_fullpath, out_size, rel_base, path, NULL);
  2292. }
  2293. // Given a filename (file) and a list of paths (dir), try to find an existing
  2294. // file in the paths. If filename already is a path then no searching in the given paths.
  2295. //
  2296. // @return - A string in out_fullpath of either the full path or file.
  2297. static void loader_get_fullpath(const char *file, const char *dirs, size_t out_size, char *out_fullpath) {
  2298. if (!loader_platform_is_path(file) && *dirs) {
  2299. char *dirs_copy, *dir, *next_dir;
  2300. dirs_copy = loader_stack_alloc(strlen(dirs) + 1);
  2301. strcpy(dirs_copy, dirs);
  2302. // find if file exists after prepending paths in given list
  2303. for (dir = dirs_copy; *dir && (next_dir = loader_get_next_path(dir)); dir = next_dir) {
  2304. loader_platform_combine_path(out_fullpath, out_size, dir, file, NULL);
  2305. if (loader_platform_file_exists(out_fullpath)) {
  2306. return;
  2307. }
  2308. }
  2309. }
  2310. (void)snprintf(out_fullpath, out_size, "%s", file);
  2311. }
  2312. // Read a JSON file into a buffer.
  2313. //
  2314. // @return - A pointer to a cJSON object representing the JSON parse tree.
  2315. // This returned buffer should be freed by caller.
  2316. static VkResult loader_get_json(const struct loader_instance *inst, const char *filename, cJSON **json) {
  2317. FILE *file = NULL;
  2318. char *json_buf;
  2319. size_t len;
  2320. VkResult res = VK_SUCCESS;
  2321. if (NULL == json) {
  2322. loader_log(inst, VK_DEBUG_REPORT_ERROR_BIT_EXT, 0, "loader_get_json: Received invalid JSON file");
  2323. res = VK_ERROR_INITIALIZATION_FAILED;
  2324. goto out;
  2325. }
  2326. *json = NULL;
  2327. file = fopen(filename, "rb");
  2328. if (!file) {
  2329. loader_log(inst, VK_DEBUG_REPORT_ERROR_BIT_EXT, 0, "loader_get_json: Failed to open JSON file %s", filename);
  2330. res = VK_ERROR_INITIALIZATION_FAILED;
  2331. goto out;
  2332. }
  2333. // NOTE: We can't just use fseek(file, 0, SEEK_END) because that isn't guaranteed to be supported on all systems
  2334. do {
  2335. // We're just seeking the end of the file, so this buffer is never used
  2336. char buffer[256];
  2337. fread(buffer, 1, sizeof(buffer), file);
  2338. } while (!feof(file));
  2339. len = ftell(file);
  2340. fseek(file, 0, SEEK_SET);
  2341. json_buf = (char *)loader_stack_alloc(len + 1);
  2342. if (json_buf == NULL) {
  2343. loader_log(inst, VK_DEBUG_REPORT_ERROR_BIT_EXT, 0,
  2344. "loader_get_json: Failed to allocate space for "
  2345. "JSON file %s buffer of length %d",
  2346. filename, len);
  2347. res = VK_ERROR_OUT_OF_HOST_MEMORY;
  2348. goto out;
  2349. }
  2350. if (fread(json_buf, sizeof(char), len, file) != len) {
  2351. loader_log(inst, VK_DEBUG_REPORT_ERROR_BIT_EXT, 0, "loader_get_json: Failed to read JSON file %s.", filename);
  2352. res = VK_ERROR_INITIALIZATION_FAILED;
  2353. goto out;
  2354. }
  2355. json_buf[len] = '\0';
  2356. // Can't be a valid json if the string is of length zero
  2357. if (len == 0) {
  2358. res = VK_ERROR_INITIALIZATION_FAILED;
  2359. goto out;
  2360. }
  2361. // Parse text from file
  2362. *json = cJSON_Parse(json_buf);
  2363. if (*json == NULL) {
  2364. loader_log(inst, VK_DEBUG_REPORT_ERROR_BIT_EXT, 0,
  2365. "loader_get_json: Failed to parse JSON file %s, "
  2366. "this is usually because something ran out of "
  2367. "memory.",
  2368. filename);
  2369. res = VK_ERROR_OUT_OF_HOST_MEMORY;
  2370. goto out;
  2371. }
  2372. out:
  2373. if (NULL != file) {
  2374. fclose(file);
  2375. }
  2376. return res;
  2377. }
  2378. // Verify that all component layers in a meta-layer are valid.
  2379. static bool verifyMetaLayerComponentLayers(const struct loader_instance *inst, struct loader_layer_properties *prop,
  2380. struct loader_layer_list *instance_layers) {
  2381. bool success = true;
  2382. const uint32_t expected_major = VK_VERSION_MAJOR(prop->info.specVersion);
  2383. const uint32_t expected_minor = VK_VERSION_MINOR(prop->info.specVersion);
  2384. for (uint32_t comp_layer = 0; comp_layer < prop->num_component_layers; comp_layer++) {
  2385. if (!loaderFindLayerNameInList(prop->component_layer_names[comp_layer], instance_layers)) {
  2386. if (NULL != inst) {
  2387. loader_log(inst, VK_DEBUG_REPORT_WARNING_BIT_EXT, 0,
  2388. "verifyMetaLayerComponentLayers: Meta-layer %s can't find component layer %s at index %d."
  2389. " Skipping this layer.",
  2390. prop->info.layerName, prop->component_layer_names[comp_layer], comp_layer);
  2391. }
  2392. success = false;
  2393. break;
  2394. } else {
  2395. struct loader_layer_properties *comp_prop =
  2396. loaderFindLayerProperty(prop->component_layer_names[comp_layer], instance_layers);
  2397. if (comp_prop == NULL) {
  2398. if (NULL != inst) {
  2399. loader_log(inst, VK_DEBUG_REPORT_WARNING_BIT_EXT, 0,
  2400. "verifyMetaLayerComponentLayers: Meta-layer %s can't find property for component layer "
  2401. "%s at index %d. Skipping this layer.",
  2402. prop->info.layerName, prop->component_layer_names[comp_layer], comp_layer);
  2403. }
  2404. success = false;
  2405. break;
  2406. }
  2407. // Check the version of each layer, they need to at least match MAJOR and MINOR
  2408. uint32_t cur_major = VK_VERSION_MAJOR(comp_prop->info.specVersion);
  2409. uint32_t cur_minor = VK_VERSION_MINOR(comp_prop->info.specVersion);
  2410. if (cur_major != expected_major || cur_minor != expected_minor) {
  2411. if (NULL != inst) {
  2412. loader_log(inst, VK_DEBUG_REPORT_WARNING_BIT_EXT, 0,
  2413. "verifyMetaLayerComponentLayers: Meta-layer uses API version %d.%d, but component "
  2414. "layer %d uses API version %d.%d. Skipping this layer.",
  2415. expected_major, expected_minor, comp_layer, cur_major, cur_minor);
  2416. }
  2417. success = false;
  2418. break;
  2419. }
  2420. // Make sure the layer isn't using it's own name
  2421. if (!strcmp(prop->info.layerName, prop->component_layer_names[comp_layer])) {
  2422. if (NULL != inst) {
  2423. loader_log(inst, VK_DEBUG_REPORT_WARNING_BIT_EXT, 0,
  2424. "verifyMetaLayerComponentLayers: Meta-layer %s lists itself in its component layer "
  2425. "list at index %d. Skipping this layer.",
  2426. prop->info.layerName, comp_layer);
  2427. }
  2428. success = false;
  2429. break;
  2430. }
  2431. if (comp_prop->type_flags & VK_LAYER_TYPE_FLAG_META_LAYER) {
  2432. if (NULL != inst) {
  2433. loader_log(inst, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, 0,
  2434. "verifyMetaLayerComponentLayers: Adding meta-layer %s which also contains meta-layer %s",
  2435. prop->info.layerName, comp_prop->info.layerName);
  2436. }
  2437. // Make sure if the layer is using a meta-layer in its component list that we also verify that.
  2438. if (!verifyMetaLayerComponentLayers(inst, comp_prop, instance_layers)) {
  2439. if (NULL != inst) {
  2440. loader_log(inst, VK_DEBUG_REPORT_WARNING_BIT_EXT, 0,
  2441. "Meta-layer %s component layer %s can not find all component layers."
  2442. " Skipping this layer.",
  2443. prop->info.layerName, prop->component_layer_names[comp_layer]);
  2444. }
  2445. success = false;
  2446. break;
  2447. }
  2448. }
  2449. // Add any instance and device extensions from component layers to this layer
  2450. // list, so that anyone querying extensions will only need to look at the meta-layer
  2451. for (uint32_t ext = 0; ext < comp_prop->instance_extension_list.count; ext++) {
  2452. if (NULL != inst) {
  2453. loader_log(inst, VK_DEBUG_REPORT_DEBUG_BIT_EXT, 0,
  2454. "Meta-layer %s component layer %s adding instance extension %s", prop->info.layerName,
  2455. prop->component_layer_names[comp_layer], comp_prop->instance_extension_list.list[ext].extensionName);
  2456. }
  2457. if (!has_vk_extension_property(&comp_prop->instance_extension_list.list[ext], &prop->instance_extension_list)) {
  2458. loader_add_to_ext_list(inst, &prop->instance_extension_list, 1, &comp_prop->instance_extension_list.list[ext]);
  2459. }
  2460. }
  2461. for (uint32_t ext = 0; ext < comp_prop->device_extension_list.count; ext++) {
  2462. if (NULL != inst) {
  2463. loader_log(inst, VK_DEBUG_REPORT_DEBUG_BIT_EXT, 0,
  2464. "Meta-layer %s component layer %s adding device extension %s", prop->info.layerName,
  2465. prop->component_layer_names[comp_layer],
  2466. comp_prop->device_extension_list.list[ext].props.extensionName);
  2467. }
  2468. if (!has_vk_dev_ext_property(&comp_prop->device_extension_list.list[ext].props, &prop->device_extension_list)) {
  2469. loader_add_to_dev_ext_list(inst, &prop->device_extension_list,
  2470. &comp_prop->device_extension_list.list[ext].props, 0, NULL);
  2471. }
  2472. }
  2473. }
  2474. }
  2475. if (success) {
  2476. loader_log(inst, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, 0, "Meta-layer %s all %d component layers appear to be valid.",
  2477. prop->info.layerName, prop->num_component_layers);
  2478. }
  2479. return success;
  2480. }
  2481. // Verify that all meta-layers in a layer list are valid.
  2482. static void VerifyAllMetaLayers(struct loader_instance *inst, struct loader_layer_list *instance_layers,
  2483. bool *override_layer_present) {
  2484. *override_layer_present = false;
  2485. for (int32_t i = 0; i < (int32_t)instance_layers->count; i++) {
  2486. struct loader_layer_properties *prop = &instance_layers->list[i];
  2487. // If this is a meta-layer, make sure it is valid
  2488. if ((prop->type_flags & VK_LAYER_TYPE_FLAG_META_LAYER) && !verifyMetaLayerComponentLayers(inst, prop, instance_layers)) {
  2489. if (NULL != inst) {
  2490. loader_log(inst, VK_DEBUG_REPORT_DEBUG_BIT_EXT, 0,
  2491. "Removing meta-layer %s from instance layer list since it appears invalid.", prop->info.layerName);
  2492. }
  2493. // Delete the component layers
  2494. loader_instance_heap_free(inst, prop->component_layer_names);
  2495. if (prop->blacklist_layer_names != NULL) {
  2496. loader_instance_heap_free(inst, prop->blacklist_layer_names);
  2497. }
  2498. if (prop->override_paths != NULL) {
  2499. loader_instance_heap_free(inst, prop->override_paths);
  2500. }
  2501. // Remove the current invalid meta-layer from the layer list. Use memmove since we are
  2502. // overlapping the source and destination addresses.
  2503. memmove(&instance_layers->list[i], &instance_layers->list[i + 1],
  2504. sizeof(struct loader_layer_properties) * (instance_layers->count - 1 - i));
  2505. // Decrement the count (because we now have one less) and decrement the loop index since we need to
  2506. // re-check this index.
  2507. instance_layers->count--;
  2508. i--;
  2509. } else if (prop->is_override && loaderImplicitLayerIsEnabled(inst, prop)) {
  2510. *override_layer_present = true;
  2511. }
  2512. }
  2513. }
  2514. // If the current working directory matches any app_key_path of the layers, remove all other override layers.
  2515. // Otherwise if no matching app_key was found, remove all but the global override layer, which has no app_key_path.
  2516. static void RemoveAllNonValidOverrideLayers(struct loader_instance *inst, struct loader_layer_list *instance_layers) {
  2517. if (instance_layers == NULL) {
  2518. return;
  2519. }
  2520. char cur_path[MAX_STRING_SIZE];
  2521. char *ret = loader_platform_executable_path(cur_path, sizeof(cur_path));
  2522. if (ret == NULL) {
  2523. loader_log(inst, VK_DEBUG_REPORT_ERROR_BIT_EXT, 0,
  2524. "RemoveAllNonValidOverrideLayers: Failed to get executable path and name");
  2525. return;
  2526. }
  2527. // Find out if there is an override layer with same the app_key_path as the path to the current executable.
  2528. // If more than one is found, remove it and use the first layer
  2529. // Remove any layers which aren't global and do not have the same app_key_path as the path to the current executable.
  2530. bool found_active_override_layer = false;
  2531. int global_layer_index = -1;
  2532. for (uint32_t i = 0; i < instance_layers->count; i++) {
  2533. struct loader_layer_properties *props = &instance_layers->list[i];
  2534. if (strcmp(props->info.layerName, VK_OVERRIDE_LAYER_NAME) == 0) {
  2535. if (props->num_app_key_paths > 0) { // not the global layer
  2536. for (uint32_t j = 0; j < props->num_app_key_paths; j++) {
  2537. if (strcmp(props->app_key_paths[j], cur_path) == 0) {
  2538. if (!found_active_override_layer) {
  2539. found_active_override_layer = true;
  2540. } else {
  2541. loader_log(inst, VK_DEBUG_REPORT_WARNING_BIT_EXT, 0,
  2542. "RemoveAllNonValidOverrideLayers: Multiple override layers where the same"
  2543. "path in app_keys was found. Using the first layer found");
  2544. // Remove duplicate active override layers that have the same app_key_path
  2545. loaderRemoveLayerInList(inst, instance_layers, i);
  2546. i--;
  2547. }
  2548. }
  2549. }
  2550. if (!found_active_override_layer) {
  2551. // Remove non-global override layers that don't have an app_key that matches cur_path
  2552. loaderRemoveLayerInList(inst, instance_layers, i);
  2553. i--;
  2554. }
  2555. } else {
  2556. if (global_layer_index == -1) {
  2557. global_layer_index = i;
  2558. } else {
  2559. loader_log(inst, VK_DEBUG_REPORT_WARNING_BIT_EXT, 0,
  2560. "RemoveAllNonValidOverrideLayers: Multiple global override layers "
  2561. "found. Using the first global layer found");
  2562. loaderRemoveLayerInList(inst, instance_layers, i);
  2563. i--;
  2564. }
  2565. }
  2566. }
  2567. }
  2568. // Remove global layer if layer with same the app_key_path as the path to the current executable is found
  2569. if (found_active_override_layer && global_layer_index >= 0) {
  2570. loaderRemoveLayerInList(inst, instance_layers, global_layer_index);
  2571. }
  2572. // Should be at most 1 override layer in the list now.
  2573. if (found_active_override_layer) {
  2574. loader_log(inst, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, 0, "Using the override layer for app key %s", cur_path);
  2575. } else if (global_layer_index >= 0) {
  2576. loader_log(inst, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, 0, "Using the global override layer");
  2577. }
  2578. }
  2579. // This structure is used to store the json file version
  2580. // in a more manageable way.
  2581. typedef struct {
  2582. uint16_t major;
  2583. uint16_t minor;
  2584. uint16_t patch;
  2585. } layer_json_version;
  2586. static inline bool layer_json_supports_pre_instance_tag(const layer_json_version *layer_json) {
  2587. // Supported versions started in 1.1.2, so anything newer
  2588. return layer_json->major > 1 || layer_json->minor > 1 || (layer_json->minor == 1 && layer_json->patch > 1);
  2589. }
  2590. static VkResult loaderReadLayerJson(const struct loader_instance *inst, struct loader_layer_list *layer_instance_list,
  2591. cJSON *layer_node, layer_json_version version, cJSON *item, cJSON *disable_environment,
  2592. bool is_implicit, char *filename) {
  2593. char *temp;
  2594. char *name, *type, *library_path_str, *api_version;
  2595. char *implementation_version, *description;
  2596. cJSON *ext_item;
  2597. cJSON *library_path;
  2598. cJSON *component_layers;
  2599. cJSON *override_paths;
  2600. cJSON *blacklisted_layers;
  2601. VkExtensionProperties ext_prop;
  2602. VkResult result = VK_ERROR_INITIALIZATION_FAILED;
  2603. struct loader_layer_properties *props = NULL;
  2604. int i, j;
  2605. // The following are required in the "layer" object:
  2606. // (required) "name"
  2607. // (required) "type"
  2608. // (required) "library_path"
  2609. // (required) "api_version"
  2610. // (required) "implementation_version"
  2611. // (required) "description"
  2612. // (required for implicit layers) "disable_environment"
  2613. #define GET_JSON_OBJECT(node, var) \
  2614. { \
  2615. var = cJSON_GetObjectItem(node, #var); \
  2616. if (var == NULL) { \
  2617. layer_node = layer_node->next; \
  2618. loader_log(inst, VK_DEBUG_REPORT_WARNING_BIT_EXT, 0, \
  2619. "Didn't find required layer object %s in manifest " \
  2620. "JSON file, skipping this layer", \
  2621. #var); \
  2622. goto out; \
  2623. } \
  2624. }
  2625. #define GET_JSON_ITEM(node, var) \
  2626. { \
  2627. item = cJSON_GetObjectItem(node, #var); \
  2628. if (item == NULL) { \
  2629. layer_node = layer_node->next; \
  2630. loader_log(inst, VK_DEBUG_REPORT_WARNING_BIT_EXT, 0, \
  2631. "Didn't find required layer value %s in manifest JSON " \
  2632. "file, skipping this layer", \
  2633. #var); \
  2634. goto out; \
  2635. } \
  2636. temp = cJSON_Print(item); \
  2637. if (temp == NULL) { \
  2638. layer_node = layer_node->next; \
  2639. loader_log(inst, VK_DEBUG_REPORT_WARNING_BIT_EXT, 0, \
  2640. "Problem accessing layer value %s in manifest JSON " \
  2641. "file, skipping this layer", \
  2642. #var); \
  2643. result = VK_ERROR_OUT_OF_HOST_MEMORY; \
  2644. goto out; \
  2645. } \
  2646. temp[strlen(temp) - 1] = '\0'; \
  2647. var = loader_stack_alloc(strlen(temp) + 1); \
  2648. strcpy(var, &temp[1]); \
  2649. cJSON_Free(temp); \
  2650. }
  2651. GET_JSON_ITEM(layer_node, name)
  2652. GET_JSON_ITEM(layer_node, type)
  2653. GET_JSON_ITEM(layer_node, api_version)
  2654. GET_JSON_ITEM(layer_node, implementation_version)
  2655. GET_JSON_ITEM(layer_node, description)
  2656. // Add list entry
  2657. if (!strcmp(type, "DEVICE")) {
  2658. loader_log(inst, VK_DEBUG_REPORT_WARNING_BIT_EXT, 0, "Device layers are deprecated skipping this layer");
  2659. layer_node = layer_node->next;
  2660. goto out;
  2661. }
  2662. // Allow either GLOBAL or INSTANCE type interchangeably to handle
  2663. // layers that must work with older loaders
  2664. if (!strcmp(type, "INSTANCE") || !strcmp(type, "GLOBAL")) {
  2665. if (layer_instance_list == NULL) {
  2666. layer_node = layer_node->next;
  2667. goto out;
  2668. }
  2669. props = loaderGetNextLayerPropertySlot(inst, layer_instance_list);
  2670. if (NULL == props) {
  2671. // Error already triggered in loaderGetNextLayerPropertySlot.
  2672. result = VK_ERROR_OUT_OF_HOST_MEMORY;
  2673. goto out;
  2674. }
  2675. props->type_flags = VK_LAYER_TYPE_FLAG_INSTANCE_LAYER;
  2676. if (!is_implicit) {
  2677. props->type_flags |= VK_LAYER_TYPE_FLAG_EXPLICIT_LAYER;
  2678. }
  2679. } else {
  2680. layer_node = layer_node->next;
  2681. goto out;
  2682. }
  2683. // Expiration date for override layer. Field starte with JSON file 1.1.2 and
  2684. // is completely optional. So, no check put in place.
  2685. if (!strcmp(name, VK_OVERRIDE_LAYER_NAME)) {
  2686. cJSON *expiration;
  2687. if (version.major < 1 && version.minor < 1 && version.patch < 2) {
  2688. loader_log(
  2689. inst, VK_DEBUG_REPORT_WARNING_BIT_EXT, 0,
  2690. "Override layer expiration date not added until version 1.1.2. Please update JSON file version appropriately.");
  2691. }
  2692. props->is_override = true;
  2693. expiration = cJSON_GetObjectItem(layer_node, "expiration_date");
  2694. if (NULL != expiration) {
  2695. char date_copy[32];
  2696. uint8_t cur_item = 0;
  2697. // Get the string for the current item
  2698. temp = cJSON_Print(expiration);
  2699. if (temp == NULL) {
  2700. loader_log(inst, VK_DEBUG_REPORT_WARNING_BIT_EXT, 0,
  2701. "Problem accessing layer value 'expiration_date' in manifest JSON file, skipping this layer");
  2702. result = VK_ERROR_OUT_OF_HOST_MEMORY;
  2703. goto out;
  2704. }
  2705. temp[strlen(temp) - 1] = '\0';
  2706. strcpy(date_copy, &temp[1]);
  2707. cJSON_Free(temp);
  2708. if (strlen(date_copy) == 16) {
  2709. char *cur_start = &date_copy[0];
  2710. char *next_dash = strchr(date_copy, '-');
  2711. if (NULL != next_dash) {
  2712. while (cur_item < 5 && strlen(cur_start)) {
  2713. if (next_dash != NULL) {
  2714. *next_dash = '\0';
  2715. }
  2716. switch (cur_item) {
  2717. case 0: // Year
  2718. props->expiration.year = atoi(cur_start);
  2719. break;
  2720. case 1: // Month
  2721. props->expiration.month = atoi(cur_start);
  2722. break;
  2723. case 2: // Day
  2724. props->expiration.day = atoi(cur_start);
  2725. break;
  2726. case 3: // Hour
  2727. props->expiration.hour = atoi(cur_start);
  2728. break;
  2729. case 4: // Minute
  2730. props->expiration.minute = atoi(cur_start);
  2731. props->has_expiration = true;
  2732. break;
  2733. default: // Ignore
  2734. break;
  2735. }
  2736. if (next_dash != NULL) {
  2737. cur_start = next_dash + 1;
  2738. next_dash = strchr(cur_start, '-');
  2739. }
  2740. cur_item++;
  2741. }
  2742. }
  2743. }
  2744. }
  2745. }
  2746. // Library path no longer required unless component_layers is also not defined
  2747. library_path = cJSON_GetObjectItem(layer_node, "library_path");
  2748. component_layers = cJSON_GetObjectItem(layer_node, "component_layers");
  2749. if (NULL != library_path) {
  2750. if (NULL != component_layers) {
  2751. loader_log(inst, VK_DEBUG_REPORT_WARNING_BIT_EXT, 0,
  2752. "Indicating meta-layer-specific component_layers, but also "
  2753. "defining layer library path. Both are not compatible, so "
  2754. "skipping this layer");
  2755. goto out;
  2756. }
  2757. props->num_component_layers = 0;
  2758. props->component_layer_names = NULL;
  2759. temp = cJSON_Print(library_path);
  2760. if (NULL == temp) {
  2761. layer_node = layer_node->next;
  2762. loader_log(inst, VK_DEBUG_REPORT_WARNING_BIT_EXT, 0,
  2763. "Problem accessing layer value library_path in manifest JSON "
  2764. "file, skipping this layer");
  2765. result = VK_ERROR_OUT_OF_HOST_MEMORY;
  2766. goto out;
  2767. }
  2768. temp[strlen(temp) - 1] = '\0';
  2769. library_path_str = loader_stack_alloc(strlen(temp) + 1);
  2770. strcpy(library_path_str, &temp[1]);
  2771. cJSON_Free(temp);
  2772. char *fullpath = props->lib_name;
  2773. char *rel_base;
  2774. if (NULL != library_path_str) {
  2775. if (loader_platform_is_path(library_path_str)) {
  2776. // A relative or absolute path
  2777. char *name_copy = loader_stack_alloc(strlen(filename) + 1);
  2778. strcpy(name_copy, filename);
  2779. rel_base = loader_platform_dirname(name_copy);
  2780. loader_expand_path(library_path_str, rel_base, MAX_STRING_SIZE, fullpath);
  2781. } else {
  2782. // A filename which is assumed in a system directory
  2783. #if defined(DEFAULT_VK_LAYERS_PATH)
  2784. loader_get_fullpath(library_path_str, DEFAULT_VK_LAYERS_PATH, MAX_STRING_SIZE, fullpath);
  2785. #else
  2786. loader_get_fullpath(library_path_str, "", MAX_STRING_SIZE, fullpath);
  2787. #endif
  2788. }
  2789. }
  2790. } else if (NULL != component_layers) {
  2791. if (version.major == 1 && (version.minor < 1 || version.patch < 1)) {
  2792. loader_log(inst, VK_DEBUG_REPORT_WARNING_BIT_EXT, 0,
  2793. "Indicating meta-layer-specific component_layers, but using older "
  2794. "JSON file version.");
  2795. }
  2796. int count = cJSON_GetArraySize(component_layers);
  2797. props->num_component_layers = count;
  2798. // Allocate buffer for layer names
  2799. props->component_layer_names =
  2800. loader_instance_heap_alloc(inst, sizeof(char[MAX_STRING_SIZE]) * count, VK_SYSTEM_ALLOCATION_SCOPE_INSTANCE);
  2801. if (NULL == props->component_layer_names) {
  2802. result = VK_ERROR_OUT_OF_HOST_MEMORY;
  2803. goto out;
  2804. }
  2805. // Copy the component layers into the array
  2806. for (i = 0; i < count; i++) {
  2807. cJSON *comp_layer = cJSON_GetArrayItem(component_layers, i);
  2808. if (NULL != comp_layer) {
  2809. temp = cJSON_Print(comp_layer);
  2810. if (NULL == temp) {
  2811. result = VK_ERROR_OUT_OF_HOST_MEMORY;
  2812. goto out;
  2813. }
  2814. temp[strlen(temp) - 1] = '\0';
  2815. strncpy(props->component_layer_names[i], temp + 1, MAX_STRING_SIZE - 1);
  2816. props->component_layer_names[i][MAX_STRING_SIZE - 1] = '\0';
  2817. cJSON_Free(temp);
  2818. }
  2819. }
  2820. // This is now, officially, a meta-layer
  2821. props->type_flags |= VK_LAYER_TYPE_FLAG_META_LAYER;
  2822. loader_log(inst, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, 0, "Encountered meta-layer %s", name);
  2823. // Make sure we set up other things so we head down the correct branches below
  2824. library_path_str = NULL;
  2825. } else {
  2826. loader_log(inst, VK_DEBUG_REPORT_WARNING_BIT_EXT, 0,
  2827. "Layer missing both library_path and component_layers fields. One or the "
  2828. "other MUST be defined. Skipping this layer");
  2829. goto out;
  2830. }
  2831. props->num_blacklist_layers = 0;
  2832. props->blacklist_layer_names = NULL;
  2833. blacklisted_layers = cJSON_GetObjectItem(layer_node, "blacklisted_layers");
  2834. if (blacklisted_layers != NULL) {
  2835. if (strcmp(name, VK_OVERRIDE_LAYER_NAME)) {
  2836. loader_log(inst, VK_DEBUG_REPORT_WARNING_BIT_EXT, 0,
  2837. "Layer %s contains a blacklist, but a blacklist can only be provided by the override metalayer. "
  2838. "This blacklist will be ignored.",
  2839. name);
  2840. } else {
  2841. props->num_blacklist_layers = cJSON_GetArraySize(blacklisted_layers);
  2842. if (props->num_blacklist_layers > 0) {
  2843. // Allocate the blacklist array
  2844. props->blacklist_layer_names = loader_instance_heap_alloc(
  2845. inst, sizeof(char[MAX_STRING_SIZE]) * props->num_blacklist_layers, VK_SYSTEM_ALLOCATION_SCOPE_INSTANCE);
  2846. if (props->blacklist_layer_names == NULL) {
  2847. result = VK_ERROR_OUT_OF_HOST_MEMORY;
  2848. goto out;
  2849. }
  2850. // Copy the blacklisted layers into the array
  2851. for (i = 0; i < (int)props->num_blacklist_layers; ++i) {
  2852. cJSON *black_layer = cJSON_GetArrayItem(blacklisted_layers, i);
  2853. if (black_layer == NULL) {
  2854. continue;
  2855. }
  2856. temp = cJSON_Print(black_layer);
  2857. if (temp == NULL) {
  2858. result = VK_ERROR_OUT_OF_HOST_MEMORY;
  2859. goto out;
  2860. }
  2861. temp[strlen(temp) - 1] = '\0';
  2862. strncpy(props->blacklist_layer_names[i], temp + 1, MAX_STRING_SIZE - 1);
  2863. props->blacklist_layer_names[i][MAX_STRING_SIZE - 1] = '\0';
  2864. cJSON_Free(temp);
  2865. }
  2866. }
  2867. }
  2868. }
  2869. override_paths = cJSON_GetObjectItem(layer_node, "override_paths");
  2870. if (NULL != override_paths) {
  2871. if (version.major == 1 && (version.minor < 1 || version.patch < 1)) {
  2872. loader_log(inst, VK_DEBUG_REPORT_WARNING_BIT_EXT, 0,
  2873. "Indicating meta-layer-specific override paths, but using older "
  2874. "JSON file version.");
  2875. }
  2876. int count = cJSON_GetArraySize(override_paths);
  2877. props->num_override_paths = count;
  2878. if (count > 0) {
  2879. // Allocate buffer for override paths
  2880. props->override_paths =
  2881. loader_instance_heap_alloc(inst, sizeof(char[MAX_STRING_SIZE]) * count, VK_SYSTEM_ALLOCATION_SCOPE_INSTANCE);
  2882. if (NULL == props->override_paths) {
  2883. result = VK_ERROR_OUT_OF_HOST_MEMORY;
  2884. goto out;
  2885. }
  2886. // Copy the override paths into the array
  2887. for (i = 0; i < count; i++) {
  2888. cJSON *override_path = cJSON_GetArrayItem(override_paths, i);
  2889. if (NULL != override_path) {
  2890. temp = cJSON_Print(override_path);
  2891. if (NULL == temp) {
  2892. result = VK_ERROR_OUT_OF_HOST_MEMORY;
  2893. goto out;
  2894. }
  2895. temp[strlen(temp) - 1] = '\0';
  2896. strncpy(props->override_paths[i], temp + 1, MAX_STRING_SIZE - 1);
  2897. props->override_paths[i][MAX_STRING_SIZE - 1] = '\0';
  2898. cJSON_Free(temp);
  2899. }
  2900. }
  2901. }
  2902. }
  2903. if (is_implicit) {
  2904. GET_JSON_OBJECT(layer_node, disable_environment)
  2905. }
  2906. #undef GET_JSON_ITEM
  2907. #undef GET_JSON_OBJECT
  2908. strncpy(props->info.layerName, name, sizeof(props->info.layerName));
  2909. props->info.layerName[sizeof(props->info.layerName) - 1] = '\0';
  2910. props->info.specVersion = loader_make_version(api_version);
  2911. props->info.implementationVersion = atoi(implementation_version);
  2912. strncpy((char *)props->info.description, description, sizeof(props->info.description));
  2913. props->info.description[sizeof(props->info.description) - 1] = '\0';
  2914. if (is_implicit) {
  2915. if (!disable_environment || !disable_environment->child) {
  2916. loader_log(inst, VK_DEBUG_REPORT_WARNING_BIT_EXT, 0,
  2917. "Didn't find required layer child value disable_environment"
  2918. "in manifest JSON file, skipping this layer");
  2919. layer_node = layer_node->next;
  2920. goto out;
  2921. }
  2922. strncpy(props->disable_env_var.name, disable_environment->child->string, sizeof(props->disable_env_var.name));
  2923. props->disable_env_var.name[sizeof(props->disable_env_var.name) - 1] = '\0';
  2924. strncpy(props->disable_env_var.value, disable_environment->child->valuestring, sizeof(props->disable_env_var.value));
  2925. props->disable_env_var.value[sizeof(props->disable_env_var.value) - 1] = '\0';
  2926. }
  2927. // Now get all optional items and objects and put in list:
  2928. // functions
  2929. // instance_extensions
  2930. // device_extensions
  2931. // enable_environment (implicit layers only)
  2932. #define GET_JSON_OBJECT(node, var) \
  2933. { var = cJSON_GetObjectItem(node, #var); }
  2934. #define GET_JSON_ITEM(node, var) \
  2935. { \
  2936. item = cJSON_GetObjectItem(node, #var); \
  2937. if (item != NULL) { \
  2938. temp = cJSON_Print(item); \
  2939. if (temp != NULL) { \
  2940. temp[strlen(temp) - 1] = '\0'; \
  2941. var = loader_stack_alloc(strlen(temp) + 1); \
  2942. strcpy(var, &temp[1]); \
  2943. cJSON_Free(temp); \
  2944. } else { \
  2945. result = VK_ERROR_OUT_OF_HOST_MEMORY; \
  2946. goto out; \
  2947. } \
  2948. } \
  2949. }
  2950. cJSON *instance_extensions, *device_extensions, *functions, *enable_environment;
  2951. cJSON *entrypoints = NULL;
  2952. char *vkGetInstanceProcAddr = NULL;
  2953. char *vkGetDeviceProcAddr = NULL;
  2954. char *vkNegotiateLoaderLayerInterfaceVersion = NULL;
  2955. char *spec_version = NULL;
  2956. char **entry_array = NULL;
  2957. cJSON *app_keys = NULL;
  2958. // Layer interface functions
  2959. // vkGetInstanceProcAddr
  2960. // vkGetDeviceProcAddr
  2961. // vkNegotiateLoaderLayerInterfaceVersion (starting with JSON file 1.1.0)
  2962. GET_JSON_OBJECT(layer_node, functions)
  2963. if (functions != NULL) {
  2964. if (version.major > 1 || version.minor >= 1) {
  2965. GET_JSON_ITEM(functions, vkNegotiateLoaderLayerInterfaceVersion)
  2966. if (vkNegotiateLoaderLayerInterfaceVersion != NULL)
  2967. strncpy(props->functions.str_negotiate_interface, vkNegotiateLoaderLayerInterfaceVersion,
  2968. sizeof(props->functions.str_negotiate_interface));
  2969. props->functions.str_negotiate_interface[sizeof(props->functions.str_negotiate_interface) - 1] = '\0';
  2970. } else {
  2971. props->functions.str_negotiate_interface[0] = '\0';
  2972. }
  2973. GET_JSON_ITEM(functions, vkGetInstanceProcAddr)
  2974. GET_JSON_ITEM(functions, vkGetDeviceProcAddr)
  2975. if (vkGetInstanceProcAddr != NULL) {
  2976. strncpy(props->functions.str_gipa, vkGetInstanceProcAddr, sizeof(props->functions.str_gipa));
  2977. if (version.major > 1 || version.minor >= 1) {
  2978. loader_log(inst, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, 0,
  2979. "Layer \"%s\" using deprecated \'vkGetInstanceProcAddr\' tag which was deprecated starting with JSON "
  2980. "file version 1.1.0. The new vkNegotiateLoaderLayerInterfaceVersion function is preferred, though for "
  2981. "compatibility reasons it may be desirable to continue using the deprecated tag.",
  2982. name);
  2983. }
  2984. }
  2985. props->functions.str_gipa[sizeof(props->functions.str_gipa) - 1] = '\0';
  2986. if (vkGetDeviceProcAddr != NULL) {
  2987. strncpy(props->functions.str_gdpa, vkGetDeviceProcAddr, sizeof(props->functions.str_gdpa));
  2988. if (version.major > 1 || version.minor >= 1) {
  2989. loader_log(inst, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, 0,
  2990. "Layer \"%s\" using deprecated \'vkGetDeviceProcAddr\' tag which was deprecated starting with JSON "
  2991. "file version 1.1.0. The new vkNegotiateLoaderLayerInterfaceVersion function is preferred, though for "
  2992. "compatibility reasons it may be desirable to continue using the deprecated tag.",
  2993. name);
  2994. }
  2995. }
  2996. props->functions.str_gdpa[sizeof(props->functions.str_gdpa) - 1] = '\0';
  2997. }
  2998. // instance_extensions
  2999. // array of {
  3000. // name
  3001. // spec_version
  3002. // }
  3003. GET_JSON_OBJECT(layer_node, instance_extensions)
  3004. if (instance_extensions != NULL) {
  3005. int count = cJSON_GetArraySize(instance_extensions);
  3006. for (i = 0; i < count; i++) {
  3007. ext_item = cJSON_GetArrayItem(instance_extensions, i);
  3008. GET_JSON_ITEM(ext_item, name)
  3009. if (name != NULL) {
  3010. strncpy(ext_prop.extensionName, name, sizeof(ext_prop.extensionName));
  3011. ext_prop.extensionName[sizeof(ext_prop.extensionName) - 1] = '\0';
  3012. }
  3013. GET_JSON_ITEM(ext_item, spec_version)
  3014. if (NULL != spec_version) {
  3015. ext_prop.specVersion = atoi(spec_version);
  3016. } else {
  3017. ext_prop.specVersion = 0;
  3018. }
  3019. bool ext_unsupported = wsi_unsupported_instance_extension(&ext_prop);
  3020. if (!ext_unsupported) {
  3021. loader_add_to_ext_list(inst, &props->instance_extension_list, 1, &ext_prop);
  3022. }
  3023. }
  3024. }
  3025. // device_extensions
  3026. // array of {
  3027. // name
  3028. // spec_version
  3029. // entrypoints
  3030. // }
  3031. GET_JSON_OBJECT(layer_node, device_extensions)
  3032. if (device_extensions != NULL) {
  3033. int count = cJSON_GetArraySize(device_extensions);
  3034. for (i = 0; i < count; i++) {
  3035. ext_item = cJSON_GetArrayItem(device_extensions, i);
  3036. GET_JSON_ITEM(ext_item, name)
  3037. GET_JSON_ITEM(ext_item, spec_version)
  3038. if (name != NULL) {
  3039. strncpy(ext_prop.extensionName, name, sizeof(ext_prop.extensionName));
  3040. ext_prop.extensionName[sizeof(ext_prop.extensionName) - 1] = '\0';
  3041. }
  3042. if (NULL != spec_version) {
  3043. ext_prop.specVersion = atoi(spec_version);
  3044. } else {
  3045. ext_prop.specVersion = 0;
  3046. }
  3047. // entrypoints = cJSON_GetObjectItem(ext_item, "entrypoints");
  3048. GET_JSON_OBJECT(ext_item, entrypoints)
  3049. int entry_count;
  3050. if (entrypoints == NULL) {
  3051. loader_add_to_dev_ext_list(inst, &props->device_extension_list, &ext_prop, 0, NULL);
  3052. continue;
  3053. }
  3054. entry_count = cJSON_GetArraySize(entrypoints);
  3055. if (entry_count) {
  3056. entry_array = (char **)loader_stack_alloc(sizeof(char *) * entry_count);
  3057. }
  3058. for (j = 0; j < entry_count; j++) {
  3059. ext_item = cJSON_GetArrayItem(entrypoints, j);
  3060. if (ext_item != NULL) {
  3061. temp = cJSON_Print(ext_item);
  3062. if (NULL == temp) {
  3063. entry_array[j] = NULL;
  3064. result = VK_ERROR_OUT_OF_HOST_MEMORY;
  3065. goto out;
  3066. }
  3067. temp[strlen(temp) - 1] = '\0';
  3068. entry_array[j] = loader_stack_alloc(strlen(temp) + 1);
  3069. strcpy(entry_array[j], &temp[1]);
  3070. cJSON_Free(temp);
  3071. }
  3072. }
  3073. loader_add_to_dev_ext_list(inst, &props->device_extension_list, &ext_prop, entry_count, entry_array);
  3074. }
  3075. }
  3076. if (is_implicit) {
  3077. GET_JSON_OBJECT(layer_node, enable_environment)
  3078. // enable_environment is optional
  3079. if (enable_environment) {
  3080. strncpy(props->enable_env_var.name, enable_environment->child->string, sizeof(props->enable_env_var.name));
  3081. props->enable_env_var.name[sizeof(props->enable_env_var.name) - 1] = '\0';
  3082. strncpy(props->enable_env_var.value, enable_environment->child->valuestring, sizeof(props->enable_env_var.value));
  3083. props->enable_env_var.value[sizeof(props->enable_env_var.value) - 1] = '\0';
  3084. }
  3085. }
  3086. // Read in the pre-instance stuff
  3087. cJSON *pre_instance = cJSON_GetObjectItem(layer_node, "pre_instance_functions");
  3088. if (pre_instance) {
  3089. if (!layer_json_supports_pre_instance_tag(&version)) {
  3090. loader_log(inst, VK_DEBUG_REPORT_ERROR_BIT_EXT, 0,
  3091. "Found pre_instance_functions section in layer from \"%s\". "
  3092. "This section is only valid in manifest version 1.1.2 or later. The section will be ignored",
  3093. filename);
  3094. } else if (!is_implicit) {
  3095. loader_log(inst, VK_DEBUG_REPORT_WARNING_BIT_EXT, 0,
  3096. "Found pre_instance_functions section in explicit layer from "
  3097. "\"%s\". This section is only valid in implicit layers. The section will be ignored",
  3098. filename);
  3099. } else {
  3100. cJSON *inst_ext_json = cJSON_GetObjectItem(pre_instance, "vkEnumerateInstanceExtensionProperties");
  3101. if (inst_ext_json) {
  3102. char *inst_ext_name = cJSON_Print(inst_ext_json);
  3103. if (inst_ext_name == NULL) {
  3104. result = VK_ERROR_OUT_OF_HOST_MEMORY;
  3105. goto out;
  3106. }
  3107. size_t len = strlen(inst_ext_name) >= MAX_STRING_SIZE ? MAX_STRING_SIZE - 3 : strlen(inst_ext_name) - 2;
  3108. strncpy(props->pre_instance_functions.enumerate_instance_extension_properties, inst_ext_name + 1, len);
  3109. props->pre_instance_functions.enumerate_instance_extension_properties[len] = '\0';
  3110. cJSON_Free(inst_ext_name);
  3111. }
  3112. cJSON *inst_layer_json = cJSON_GetObjectItem(pre_instance, "vkEnumerateInstanceLayerProperties");
  3113. if (inst_layer_json) {
  3114. char *inst_layer_name = cJSON_Print(inst_layer_json);
  3115. if (inst_layer_name == NULL) {
  3116. result = VK_ERROR_OUT_OF_HOST_MEMORY;
  3117. goto out;
  3118. }
  3119. size_t len = strlen(inst_layer_name) >= MAX_STRING_SIZE ? MAX_STRING_SIZE - 3 : strlen(inst_layer_name) - 2;
  3120. strncpy(props->pre_instance_functions.enumerate_instance_layer_properties, inst_layer_name + 1, len);
  3121. props->pre_instance_functions.enumerate_instance_layer_properties[len] = '\0';
  3122. cJSON_Free(inst_layer_name);
  3123. }
  3124. cJSON *inst_version_json = cJSON_GetObjectItem(pre_instance, "vkEnumerateInstanceVersion");
  3125. if (inst_version_json) {
  3126. char *inst_version_name = cJSON_Print(inst_version_json);
  3127. if (inst_version_json) {
  3128. result = VK_ERROR_OUT_OF_HOST_MEMORY;
  3129. goto out;
  3130. }
  3131. size_t len = strlen(inst_version_name) >= MAX_STRING_SIZE ? MAX_STRING_SIZE - 3 : strlen(inst_version_name) - 2;
  3132. strncpy(props->pre_instance_functions.enumerate_instance_version, inst_version_name + 1, len);
  3133. props->pre_instance_functions.enumerate_instance_version[len] = '\0';
  3134. cJSON_Free(inst_version_name);
  3135. }
  3136. }
  3137. }
  3138. props->num_app_key_paths = 0;
  3139. props->app_key_paths = NULL;
  3140. app_keys = cJSON_GetObjectItem(layer_node, "app_keys");
  3141. if (app_keys != NULL) {
  3142. if (strcmp(name, VK_OVERRIDE_LAYER_NAME)) {
  3143. loader_log(inst, VK_DEBUG_REPORT_WARNING_BIT_EXT, 0,
  3144. "Layer %s contains app_keys, but any app_keys can only be provided by the override metalayer. "
  3145. "These will be ignored.",
  3146. name);
  3147. } else {
  3148. props->num_app_key_paths = cJSON_GetArraySize(app_keys);
  3149. // Allocate the blacklist array
  3150. props->app_key_paths = loader_instance_heap_alloc(inst, sizeof(char[MAX_STRING_SIZE]) * props->num_app_key_paths,
  3151. VK_SYSTEM_ALLOCATION_SCOPE_INSTANCE);
  3152. if (props->app_key_paths == NULL) {
  3153. result = VK_ERROR_OUT_OF_HOST_MEMORY;
  3154. goto out;
  3155. }
  3156. // Copy the app_key_paths into the array
  3157. for (i = 0; i < (int)props->num_app_key_paths; ++i) {
  3158. cJSON *app_key_path = cJSON_GetArrayItem(app_keys, i);
  3159. if (app_key_path == NULL) {
  3160. continue;
  3161. }
  3162. temp = cJSON_Print(app_key_path);
  3163. if (temp == NULL) {
  3164. result = VK_ERROR_OUT_OF_HOST_MEMORY;
  3165. goto out;
  3166. }
  3167. temp[strlen(temp) - 1] = '\0';
  3168. strncpy(props->app_key_paths[i], temp + 1, MAX_STRING_SIZE - 1);
  3169. props->app_key_paths[i][MAX_STRING_SIZE - 1] = '\0';
  3170. cJSON_Free(temp);
  3171. }
  3172. }
  3173. }
  3174. result = VK_SUCCESS;
  3175. out:
  3176. #undef GET_JSON_ITEM
  3177. #undef GET_JSON_OBJECT
  3178. if (VK_SUCCESS != result && NULL != props) {
  3179. if (NULL != props->blacklist_layer_names) {
  3180. loader_instance_heap_free(inst, props->blacklist_layer_names);
  3181. }
  3182. if (NULL != props->component_layer_names) {
  3183. loader_instance_heap_free(inst, props->component_layer_names);
  3184. }
  3185. if (NULL != props->override_paths) {
  3186. loader_instance_heap_free(inst, props->override_paths);
  3187. }
  3188. if (NULL != props->app_key_paths) {
  3189. loader_instance_heap_free(inst, props->app_key_paths);
  3190. }
  3191. props->num_blacklist_layers = 0;
  3192. props->blacklist_layer_names = NULL;
  3193. props->num_component_layers = 0;
  3194. props->component_layer_names = NULL;
  3195. props->num_override_paths = 0;
  3196. props->override_paths = NULL;
  3197. props->num_app_key_paths = 0;
  3198. props->app_key_paths = NULL;
  3199. }
  3200. return result;
  3201. }
  3202. static inline bool isValidLayerJsonVersion(const layer_json_version *layer_json) {
  3203. // Supported versions are: 1.0.0, 1.0.1, and 1.1.0 - 1.1.2.
  3204. if ((layer_json->major == 1 && layer_json->minor == 1 && layer_json->patch < 3) ||
  3205. (layer_json->major == 1 && layer_json->minor == 0 && layer_json->patch < 2)) {
  3206. return true;
  3207. }
  3208. return false;
  3209. }
  3210. static inline bool layerJsonSupportsMultipleLayers(const layer_json_version *layer_json) {
  3211. // Supported versions started in 1.0.1, so anything newer
  3212. if ((layer_json->major > 1 || layer_json->minor > 0 || layer_json->patch > 1)) {
  3213. return true;
  3214. }
  3215. return false;
  3216. }
  3217. // Given a cJSON struct (json) of the top level JSON object from layer manifest
  3218. // file, add entry to the layer_list. Fill out the layer_properties in this list
  3219. // entry from the input cJSON object.
  3220. //
  3221. // \returns
  3222. // void
  3223. // layer_list has a new entry and initialized accordingly.
  3224. // If the json input object does not have all the required fields no entry
  3225. // is added to the list.
  3226. static VkResult loaderAddLayerProperties(const struct loader_instance *inst, struct loader_layer_list *layer_instance_list,
  3227. cJSON *json, bool is_implicit, char *filename) {
  3228. // The following Fields in layer manifest file that are required:
  3229. // - "file_format_version"
  3230. // - If more than one "layer" object are used, then the "layers" array is
  3231. // required
  3232. VkResult result = VK_ERROR_INITIALIZATION_FAILED;
  3233. cJSON *item, *layers_node, *layer_node;
  3234. layer_json_version json_version = {0, 0, 0};
  3235. char *vers_tok;
  3236. cJSON *disable_environment = NULL;
  3237. // Make sure sure the top level json value is an object
  3238. if (!json || json->type != 6) {
  3239. goto out;
  3240. }
  3241. item = cJSON_GetObjectItem(json, "file_format_version");
  3242. if (item == NULL) {
  3243. goto out;
  3244. }
  3245. char *file_vers = cJSON_PrintUnformatted(item);
  3246. if (NULL == file_vers) {
  3247. goto out;
  3248. }
  3249. loader_log(inst, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, 0, "Found manifest file %s, version %s", filename, file_vers);
  3250. // Get the major/minor/and patch as integers for easier comparison
  3251. vers_tok = strtok(file_vers, ".\"\n\r");
  3252. if (NULL != vers_tok) {
  3253. json_version.major = (uint16_t)atoi(vers_tok);
  3254. vers_tok = strtok(NULL, ".\"\n\r");
  3255. if (NULL != vers_tok) {
  3256. json_version.minor = (uint16_t)atoi(vers_tok);
  3257. vers_tok = strtok(NULL, ".\"\n\r");
  3258. if (NULL != vers_tok) {
  3259. json_version.patch = (uint16_t)atoi(vers_tok);
  3260. }
  3261. }
  3262. }
  3263. if (!isValidLayerJsonVersion(&json_version)) {
  3264. loader_log(inst, VK_DEBUG_REPORT_WARNING_BIT_EXT, 0,
  3265. "loaderAddLayerProperties: %s invalid layer manifest file version %d.%d.%d. May cause errors.", filename,
  3266. json_version.major, json_version.minor, json_version.patch);
  3267. }
  3268. cJSON_Free(file_vers);
  3269. // If "layers" is present, read in the array of layer objects
  3270. layers_node = cJSON_GetObjectItem(json, "layers");
  3271. if (layers_node != NULL) {
  3272. int numItems = cJSON_GetArraySize(layers_node);
  3273. if (!layerJsonSupportsMultipleLayers(&json_version)) {
  3274. loader_log(
  3275. inst, VK_DEBUG_REPORT_WARNING_BIT_EXT, 0,
  3276. "loaderAddLayerProperties: \'layers\' tag not supported until file version 1.0.1, but %s is reporting version %s",
  3277. filename, file_vers);
  3278. }
  3279. for (int curLayer = 0; curLayer < numItems; curLayer++) {
  3280. layer_node = cJSON_GetArrayItem(layers_node, curLayer);
  3281. if (layer_node == NULL) {
  3282. loader_log(inst, VK_DEBUG_REPORT_WARNING_BIT_EXT, 0,
  3283. "loaderAddLayerProperties: Can not find 'layers' array element %d object in manifest JSON file %s. "
  3284. "Skipping this file",
  3285. curLayer, filename);
  3286. goto out;
  3287. }
  3288. result = loaderReadLayerJson(inst, layer_instance_list, layer_node, json_version, item, disable_environment,
  3289. is_implicit, filename);
  3290. }
  3291. } else {
  3292. // Otherwise, try to read in individual layers
  3293. layer_node = cJSON_GetObjectItem(json, "layer");
  3294. if (layer_node == NULL) {
  3295. loader_log(inst, VK_DEBUG_REPORT_WARNING_BIT_EXT, 0,
  3296. "loaderAddLayerProperties: Can not find 'layer' object in manifest JSON file %s. Skipping this file.",
  3297. filename);
  3298. goto out;
  3299. }
  3300. // Loop through all "layer" objects in the file to get a count of them
  3301. // first.
  3302. uint16_t layer_count = 0;
  3303. cJSON *tempNode = layer_node;
  3304. do {
  3305. tempNode = tempNode->next;
  3306. layer_count++;
  3307. } while (tempNode != NULL);
  3308. // Throw a warning if we encounter multiple "layer" objects in file
  3309. // versions newer than 1.0.0. Having multiple objects with the same
  3310. // name at the same level is actually a JSON standard violation.
  3311. if (layer_count > 1 && layerJsonSupportsMultipleLayers(&json_version)) {
  3312. loader_log(inst, VK_DEBUG_REPORT_ERROR_BIT_EXT, 0,
  3313. "loaderAddLayerProperties: Multiple 'layer' nodes are deprecated starting in file version \"1.0.1\". "
  3314. "Please use 'layers' : [] array instead in %s.",
  3315. filename);
  3316. } else {
  3317. do {
  3318. result = loaderReadLayerJson(inst, layer_instance_list, layer_node, json_version, item, disable_environment,
  3319. is_implicit, filename);
  3320. layer_node = layer_node->next;
  3321. } while (layer_node != NULL);
  3322. }
  3323. }
  3324. out:
  3325. return result;
  3326. }
  3327. static inline size_t DetermineDataFilePathSize(const char *cur_path, size_t relative_path_size) {
  3328. size_t path_size = 0;
  3329. if (NULL != cur_path) {
  3330. // For each folder in cur_path, (detected by finding additional
  3331. // path separators in the string) we need to add the relative path on
  3332. // the end. Plus, leave an additional two slots on the end to add an
  3333. // additional directory slash and path separator if needed
  3334. path_size += strlen(cur_path) + relative_path_size + 2;
  3335. for (const char *x = cur_path; *x; ++x) {
  3336. if (*x == PATH_SEPARATOR) {
  3337. path_size += relative_path_size + 2;
  3338. }
  3339. }
  3340. }
  3341. return path_size;
  3342. }
  3343. static inline void CopyDataFilePath(const char *cur_path, const char *relative_path, size_t relative_path_size,
  3344. char **output_path) {
  3345. if (NULL != cur_path) {
  3346. uint32_t start = 0;
  3347. uint32_t stop = 0;
  3348. char *cur_write = *output_path;
  3349. while (cur_path[start] != '\0') {
  3350. while (cur_path[start] == PATH_SEPARATOR) {
  3351. start++;
  3352. }
  3353. stop = start;
  3354. while (cur_path[stop] != PATH_SEPARATOR && cur_path[stop] != '\0') {
  3355. stop++;
  3356. }
  3357. const size_t s = stop - start;
  3358. if (s) {
  3359. memcpy(cur_write, &cur_path[start], s);
  3360. cur_write += s;
  3361. // If last symbol written was not a directory symbol, add it.
  3362. if (*(cur_write - 1) != DIRECTORY_SYMBOL) {
  3363. *cur_write++ = DIRECTORY_SYMBOL;
  3364. }
  3365. if (relative_path_size > 0) {
  3366. memcpy(cur_write, relative_path, relative_path_size);
  3367. cur_write += relative_path_size;
  3368. }
  3369. *cur_write++ = PATH_SEPARATOR;
  3370. start = stop;
  3371. }
  3372. }
  3373. *output_path = cur_write;
  3374. }
  3375. }
  3376. // Check to see if there's enough space in the data file list. If not, add some.
  3377. static inline VkResult CheckAndAdjustDataFileList(const struct loader_instance *inst, struct loader_data_files *out_files) {
  3378. if (out_files->count == 0) {
  3379. out_files->filename_list = loader_instance_heap_alloc(inst, 64 * sizeof(char *), VK_SYSTEM_ALLOCATION_SCOPE_COMMAND);
  3380. if (NULL == out_files->filename_list) {
  3381. loader_log(inst, VK_DEBUG_REPORT_ERROR_BIT_EXT, 0,
  3382. "CheckAndAdjustDataFileList: Failed to allocate space for manifest file name list");
  3383. return VK_ERROR_OUT_OF_HOST_MEMORY;
  3384. }
  3385. out_files->alloc_count = 64;
  3386. } else if (out_files->count == out_files->alloc_count) {
  3387. size_t new_size = out_files->alloc_count * sizeof(char *) * 2;
  3388. void *new_ptr = loader_instance_heap_realloc(inst, out_files->filename_list, out_files->alloc_count * sizeof(char *),
  3389. new_size, VK_SYSTEM_ALLOCATION_SCOPE_COMMAND);
  3390. if (NULL == new_ptr) {
  3391. loader_log(inst, VK_DEBUG_REPORT_ERROR_BIT_EXT, 0,
  3392. "CheckAndAdjustDataFileList: Failed to reallocate space for manifest file name list");
  3393. return VK_ERROR_OUT_OF_HOST_MEMORY;
  3394. }
  3395. out_files->filename_list = new_ptr;
  3396. out_files->alloc_count *= 2;
  3397. }
  3398. return VK_SUCCESS;
  3399. }
  3400. // If the file found is a manifest file name, add it to the out_files manifest list.
  3401. static VkResult AddIfManifestFile(const struct loader_instance *inst, const char *file_name, struct loader_data_files *out_files) {
  3402. VkResult vk_result = VK_SUCCESS;
  3403. if (NULL == file_name || NULL == out_files) {
  3404. loader_log(inst, VK_DEBUG_REPORT_ERROR_BIT_EXT, 0, "AddIfManfistFile: Received NULL pointer");
  3405. vk_result = VK_ERROR_INITIALIZATION_FAILED;
  3406. goto out;
  3407. }
  3408. // Look for files ending with ".json" suffix
  3409. size_t name_len = strlen(file_name);
  3410. const char *name_suffix = file_name + name_len - 5;
  3411. if ((name_len < 5) || 0 != strncmp(name_suffix, ".json", 5)) {
  3412. // Use incomplete to indicate invalid name, but to keep going.
  3413. vk_result = VK_INCOMPLETE;
  3414. goto out;
  3415. }
  3416. // Check and allocate space in the manifest list if necessary
  3417. vk_result = CheckAndAdjustDataFileList(inst, out_files);
  3418. if (VK_SUCCESS != vk_result) {
  3419. goto out;
  3420. }
  3421. out_files->filename_list[out_files->count] =
  3422. loader_instance_heap_alloc(inst, strlen(file_name) + 1, VK_SYSTEM_ALLOCATION_SCOPE_COMMAND);
  3423. if (out_files->filename_list[out_files->count] == NULL) {
  3424. loader_log(inst, VK_DEBUG_REPORT_ERROR_BIT_EXT, 0, "AddIfManfistFile: Failed to allocate space for manifest file %d list",
  3425. out_files->count);
  3426. vk_result = VK_ERROR_OUT_OF_HOST_MEMORY;
  3427. goto out;
  3428. }
  3429. strcpy(out_files->filename_list[out_files->count++], file_name);
  3430. out:
  3431. return vk_result;
  3432. }
  3433. static VkResult AddDataFilesInPath(const struct loader_instance *inst, char *search_path, bool is_directory_list,
  3434. struct loader_data_files *out_files, bool use_first_found_manifest) {
  3435. VkResult vk_result = VK_SUCCESS;
  3436. DIR *dir_stream = NULL;
  3437. struct dirent *dir_entry;
  3438. char *cur_file;
  3439. char *next_file;
  3440. char *name;
  3441. char full_path[2048];
  3442. #ifndef _WIN32
  3443. char temp_path[2048];
  3444. #endif
  3445. // Now, parse the paths
  3446. next_file = search_path;
  3447. while (NULL != next_file && *next_file != '\0') {
  3448. name = NULL;
  3449. cur_file = next_file;
  3450. next_file = loader_get_next_path(cur_file);
  3451. // Get the next name in the list and verify it's valid
  3452. if (is_directory_list) {
  3453. dir_stream = opendir(cur_file);
  3454. if (NULL == dir_stream) {
  3455. continue;
  3456. }
  3457. while (1) {
  3458. dir_entry = readdir(dir_stream);
  3459. if (NULL == dir_entry) {
  3460. break;
  3461. }
  3462. name = &(dir_entry->d_name[0]);
  3463. loader_get_fullpath(name, cur_file, sizeof(full_path), full_path);
  3464. name = full_path;
  3465. VkResult local_res;
  3466. local_res = AddIfManifestFile(inst, name, out_files);
  3467. // Incomplete means this was not a valid data file.
  3468. if (local_res == VK_INCOMPLETE) {
  3469. continue;
  3470. } else if (local_res != VK_SUCCESS) {
  3471. vk_result = local_res;
  3472. break;
  3473. }
  3474. }
  3475. closedir(dir_stream);
  3476. if (vk_result != VK_SUCCESS) {
  3477. goto out;
  3478. }
  3479. } else {
  3480. #ifdef _WIN32
  3481. name = cur_file;
  3482. #else
  3483. // Only Linux has relative paths, make a copy of location so it isn't modified
  3484. size_t str_len;
  3485. if (NULL != next_file) {
  3486. str_len = next_file - cur_file + 1;
  3487. } else {
  3488. str_len = strlen(cur_file) + 1;
  3489. }
  3490. if (str_len > sizeof(temp_path)) {
  3491. loader_log(inst, VK_DEBUG_REPORT_DEBUG_BIT_EXT, 0, "AddDataFilesInPath: Path to %s too long\n", cur_file);
  3492. continue;
  3493. }
  3494. strcpy(temp_path, cur_file);
  3495. name = temp_path;
  3496. #endif
  3497. loader_get_fullpath(cur_file, name, sizeof(full_path), full_path);
  3498. name = full_path;
  3499. VkResult local_res;
  3500. local_res = AddIfManifestFile(inst, name, out_files);
  3501. // Incomplete means this was not a valid data file.
  3502. if (local_res == VK_INCOMPLETE) {
  3503. continue;
  3504. } else if (local_res != VK_SUCCESS) {
  3505. vk_result = local_res;
  3506. break;
  3507. }
  3508. }
  3509. if (use_first_found_manifest && out_files->count > 0) {
  3510. break;
  3511. }
  3512. }
  3513. out:
  3514. return vk_result;
  3515. }
  3516. // Look for data files in the provided paths, but first check the environment override to determine if we should use that
  3517. // instead.
  3518. static VkResult ReadDataFilesInSearchPaths(const struct loader_instance *inst, enum loader_data_files_type data_file_type,
  3519. const char *env_override, const char *path_override, const char *relative_location,
  3520. bool *override_active, struct loader_data_files *out_files) {
  3521. VkResult vk_result = VK_SUCCESS;
  3522. bool is_directory_list = true;
  3523. bool is_icd = (data_file_type == LOADER_DATA_FILE_MANIFEST_ICD);
  3524. char *override_env = NULL;
  3525. const char *override_path = NULL;
  3526. size_t search_path_size = 0;
  3527. char *search_path = NULL;
  3528. char *cur_path_ptr = NULL;
  3529. size_t rel_size = 0;
  3530. bool use_first_found_manifest = false;
  3531. #ifndef _WIN32
  3532. bool xdgconfig_alloc = true;
  3533. bool xdgdata_alloc = true;
  3534. #endif
  3535. #ifndef _WIN32
  3536. // Determine how much space is needed to generate the full search path
  3537. // for the current manifest files.
  3538. char *xdgconfdirs = loader_secure_getenv("XDG_CONFIG_DIRS", inst);
  3539. char *xdgdatadirs = loader_secure_getenv("XDG_DATA_DIRS", inst);
  3540. char *xdgdatahome = loader_secure_getenv("XDG_DATA_HOME", inst);
  3541. char *home = NULL;
  3542. char* home_root = NULL;
  3543. if (xdgconfdirs == NULL) {
  3544. xdgconfig_alloc = false;
  3545. }
  3546. if (xdgdatadirs == NULL) {
  3547. xdgdata_alloc = false;
  3548. }
  3549. #if !defined(__Fuchsia__)
  3550. if (xdgconfdirs == NULL || xdgconfdirs[0] == '\0') {
  3551. xdgconfdirs = FALLBACK_CONFIG_DIRS;
  3552. }
  3553. if (xdgdatadirs == NULL || xdgdatadirs[0] == '\0') {
  3554. xdgdatadirs = FALLBACK_DATA_DIRS;
  3555. }
  3556. #endif
  3557. // Only use HOME if XDG_DATA_HOME is not present on the system
  3558. if (NULL == xdgdatahome) {
  3559. home = loader_secure_getenv("HOME", inst);
  3560. if (home != NULL) {
  3561. home_root = loader_instance_heap_alloc(inst, strlen(home) + 14, VK_SYSTEM_ALLOCATION_SCOPE_COMMAND);
  3562. if (home_root == NULL) {
  3563. vk_result = VK_ERROR_OUT_OF_HOST_MEMORY;
  3564. goto out;
  3565. }
  3566. strcpy(home_root, home);
  3567. strcat(home_root, "/.local/share");
  3568. }
  3569. }
  3570. #endif
  3571. if (path_override != NULL) {
  3572. override_path = path_override;
  3573. } else if (env_override != NULL) {
  3574. #ifndef _WIN32
  3575. if (geteuid() != getuid() || getegid() != getgid()) {
  3576. // Don't allow setuid apps to use the env var:
  3577. env_override = NULL;
  3578. } else
  3579. #endif
  3580. {
  3581. override_env = loader_secure_getenv(env_override, inst);
  3582. // The ICD override is actually a specific list of filenames, not directories
  3583. if (is_icd && NULL != override_env) {
  3584. is_directory_list = false;
  3585. }
  3586. override_path = override_env;
  3587. }
  3588. }
  3589. // Add two by default for NULL terminator and one path separator on end (just in case)
  3590. search_path_size = 2;
  3591. // If there's an override, use that (and the local folder if required) and nothing else
  3592. if (NULL != override_path) {
  3593. // Local folder and null terminator
  3594. search_path_size += strlen(override_path) + 1;
  3595. } else if (NULL == relative_location) {
  3596. // If there's no override, and no relative location, bail out. This is usually
  3597. // the case when we're on Windows and the default path is to use the registry.
  3598. goto out;
  3599. } else {
  3600. // Add the general search folders (with the appropriate relative folder added)
  3601. rel_size = strlen(relative_location);
  3602. if (rel_size == 0) {
  3603. goto out;
  3604. } else {
  3605. #if defined(__APPLE__)
  3606. search_path_size += MAXPATHLEN;
  3607. #endif
  3608. #ifndef _WIN32
  3609. search_path_size += DetermineDataFilePathSize(xdgconfdirs, rel_size);
  3610. search_path_size += DetermineDataFilePathSize(xdgdatadirs, rel_size);
  3611. search_path_size += DetermineDataFilePathSize(SYSCONFDIR, rel_size);
  3612. #if defined(EXTRASYSCONFDIR)
  3613. search_path_size += DetermineDataFilePathSize(EXTRASYSCONFDIR, rel_size);
  3614. #endif
  3615. if (is_directory_list) {
  3616. if (!IsHighIntegrity()) {
  3617. search_path_size += DetermineDataFilePathSize(xdgdatahome, rel_size);
  3618. search_path_size += DetermineDataFilePathSize(home_root, rel_size);
  3619. }
  3620. }
  3621. #endif
  3622. }
  3623. }
  3624. // Allocate the required space
  3625. search_path = loader_instance_heap_alloc(inst, search_path_size, VK_SYSTEM_ALLOCATION_SCOPE_COMMAND);
  3626. if (NULL == search_path) {
  3627. loader_log(inst, VK_DEBUG_REPORT_ERROR_BIT_EXT, 0,
  3628. "ReadDataFilesInSearchPaths: Failed to allocate space for search path of length %d", (uint32_t)search_path_size);
  3629. vk_result = VK_ERROR_OUT_OF_HOST_MEMORY;
  3630. goto out;
  3631. }
  3632. cur_path_ptr = search_path;
  3633. // Add the remaining paths to the list
  3634. if (NULL != override_path) {
  3635. strcpy(cur_path_ptr, override_path);
  3636. } else {
  3637. #ifndef _WIN32
  3638. if (rel_size > 0) {
  3639. #if defined(__APPLE__)
  3640. // Add the bundle's Resources dir to the beginning of the search path.
  3641. // Looks for manifests in the bundle first, before any system directories.
  3642. CFBundleRef main_bundle = CFBundleGetMainBundle();
  3643. if (NULL != main_bundle) {
  3644. CFURLRef ref = CFBundleCopyResourcesDirectoryURL(main_bundle);
  3645. if (NULL != ref) {
  3646. if (CFURLGetFileSystemRepresentation(ref, TRUE, (UInt8 *)cur_path_ptr, search_path_size)) {
  3647. cur_path_ptr += strlen(cur_path_ptr);
  3648. *cur_path_ptr++ = DIRECTORY_SYMBOL;
  3649. memcpy(cur_path_ptr, relative_location, rel_size);
  3650. cur_path_ptr += rel_size;
  3651. *cur_path_ptr++ = PATH_SEPARATOR;
  3652. // only for ICD manifests
  3653. if (env_override != NULL && strcmp(VK_ICD_FILENAMES_ENV_VAR, env_override) == 0) {
  3654. use_first_found_manifest = true;
  3655. }
  3656. }
  3657. CFRelease(ref);
  3658. }
  3659. }
  3660. #endif
  3661. CopyDataFilePath(xdgconfdirs, relative_location, rel_size, &cur_path_ptr);
  3662. CopyDataFilePath(SYSCONFDIR, relative_location, rel_size, &cur_path_ptr);
  3663. #if defined(EXTRASYSCONFDIR)
  3664. CopyDataFilePath(EXTRASYSCONFDIR, relative_location, rel_size, &cur_path_ptr);
  3665. #endif
  3666. CopyDataFilePath(xdgdatadirs, relative_location, rel_size, &cur_path_ptr);
  3667. if (is_directory_list) {
  3668. CopyDataFilePath(xdgdatahome, relative_location, rel_size, &cur_path_ptr);
  3669. CopyDataFilePath(home_root, relative_location, rel_size, &cur_path_ptr);
  3670. }
  3671. }
  3672. // Remove the last path separator
  3673. --cur_path_ptr;
  3674. assert(cur_path_ptr - search_path < (ptrdiff_t)search_path_size);
  3675. *cur_path_ptr = '\0';
  3676. #endif
  3677. }
  3678. // Remove duplicate paths, or it would result in duplicate extensions, duplicate devices, etc.
  3679. // This uses minimal memory, but is O(N^2) on the number of paths. Expect only a few paths.
  3680. char path_sep_str[2] = {PATH_SEPARATOR, '\0'};
  3681. size_t search_path_updated_size = strlen(search_path);
  3682. for (size_t first = 0; first < search_path_updated_size;) {
  3683. // If this is an empty path, erase it
  3684. if (search_path[first] == PATH_SEPARATOR) {
  3685. memmove(&search_path[first], &search_path[first + 1], search_path_updated_size - first + 1);
  3686. search_path_updated_size -= 1;
  3687. continue;
  3688. }
  3689. size_t first_end = first + 1;
  3690. first_end += strcspn(&search_path[first_end], path_sep_str);
  3691. for (size_t second = first_end + 1; second < search_path_updated_size;) {
  3692. size_t second_end = second + 1;
  3693. second_end += strcspn(&search_path[second_end], path_sep_str);
  3694. if (first_end - first == second_end - second &&
  3695. !strncmp(&search_path[first], &search_path[second], second_end - second)) {
  3696. // Found duplicate. Include PATH_SEPARATOR in second_end, then erase it from search_path.
  3697. if (search_path[second_end] == PATH_SEPARATOR) {
  3698. second_end++;
  3699. }
  3700. memmove(&search_path[second], &search_path[second_end], search_path_updated_size - second_end + 1);
  3701. search_path_updated_size -= second_end - second;
  3702. } else {
  3703. second = second_end + 1;
  3704. }
  3705. }
  3706. first = first_end + 1;
  3707. }
  3708. search_path_size = search_path_updated_size;
  3709. // Print out the paths being searched if debugging is enabled
  3710. if (search_path_size > 0) {
  3711. loader_log(inst, VK_DEBUG_REPORT_DEBUG_BIT_EXT, 0,
  3712. "ReadDataFilesInSearchPaths: Searching the following paths for manifest files: %s\n", search_path);
  3713. }
  3714. // Now, parse the paths and add any manifest files found in them.
  3715. vk_result = AddDataFilesInPath(inst, search_path, is_directory_list, out_files, use_first_found_manifest);
  3716. if (NULL != override_path) {
  3717. *override_active = true;
  3718. } else {
  3719. *override_active = false;
  3720. }
  3721. out:
  3722. if (NULL != override_env) {
  3723. loader_free_getenv(override_env, inst);
  3724. }
  3725. #ifndef _WIN32
  3726. if (xdgconfig_alloc) {
  3727. loader_free_getenv(xdgconfdirs, inst);
  3728. }
  3729. if (xdgdata_alloc) {
  3730. loader_free_getenv(xdgdatadirs, inst);
  3731. }
  3732. if (NULL != xdgdatahome) {
  3733. loader_free_getenv(xdgdatahome, inst);
  3734. }
  3735. if (NULL != home) {
  3736. loader_free_getenv(home, inst);
  3737. }
  3738. if (NULL != home_root) {
  3739. loader_instance_heap_free(inst, home_root);
  3740. }
  3741. #endif
  3742. if (NULL != search_path) {
  3743. loader_instance_heap_free(inst, search_path);
  3744. }
  3745. return vk_result;
  3746. }
  3747. #ifdef _WIN32
  3748. // Read manifest JSON files using the Windows driver interface
  3749. static VkResult ReadManifestsFromD3DAdapters(const struct loader_instance *inst, char **reg_data, PDWORD reg_data_size,
  3750. const wchar_t *value_name) {
  3751. VkResult result = VK_INCOMPLETE;
  3752. LoaderEnumAdapters2 adapters = {.adapter_count = 0, .adapters = NULL};
  3753. LoaderQueryRegistryInfo *full_info = NULL;
  3754. size_t full_info_size = 0;
  3755. char *json_path = NULL;
  3756. size_t json_path_size = 0;
  3757. PFN_LoaderEnumAdapters2 fpLoaderEnumAdapters2 =
  3758. (PFN_LoaderEnumAdapters2)GetProcAddress(GetModuleHandle("gdi32.dll"), "D3DKMTEnumAdapters2");
  3759. PFN_LoaderQueryAdapterInfo fpLoaderQueryAdapterInfo =
  3760. (PFN_LoaderQueryAdapterInfo)GetProcAddress(GetModuleHandle("gdi32.dll"), "D3DKMTQueryAdapterInfo");
  3761. if (fpLoaderEnumAdapters2 == NULL || fpLoaderQueryAdapterInfo == NULL) {
  3762. result = VK_ERROR_OUT_OF_HOST_MEMORY;
  3763. goto out;
  3764. }
  3765. // Get all of the adapters
  3766. NTSTATUS status = fpLoaderEnumAdapters2(&adapters);
  3767. if (status == STATUS_SUCCESS && adapters.adapter_count > 0) {
  3768. adapters.adapters = loader_instance_heap_alloc(inst, sizeof(*adapters.adapters) * adapters.adapter_count,
  3769. VK_SYSTEM_ALLOCATION_SCOPE_COMMAND);
  3770. if (adapters.adapters == NULL) {
  3771. goto out;
  3772. }
  3773. status = fpLoaderEnumAdapters2(&adapters);
  3774. }
  3775. if (status != STATUS_SUCCESS) {
  3776. goto out;
  3777. }
  3778. // If that worked, we need to get the manifest file(s) for each adapter
  3779. for (ULONG i = 0; i < adapters.adapter_count; ++i) {
  3780. // The first query should just check if the field exists and how big it is
  3781. LoaderQueryRegistryInfo filename_info = {
  3782. .query_type = LOADER_QUERY_REGISTRY_ADAPTER_KEY,
  3783. .query_flags =
  3784. {
  3785. .translate_path = true,
  3786. },
  3787. .value_type = REG_MULTI_SZ,
  3788. .physical_adapter_index = 0,
  3789. };
  3790. wcsncpy(filename_info.value_name, value_name, sizeof(filename_info.value_name) / sizeof(WCHAR));
  3791. LoaderQueryAdapterInfo query_info = {
  3792. .handle = adapters.adapters[i].handle,
  3793. .type = LOADER_QUERY_TYPE_REGISTRY,
  3794. .private_data = &filename_info,
  3795. .private_data_size = sizeof(filename_info),
  3796. };
  3797. status = fpLoaderQueryAdapterInfo(&query_info);
  3798. // This error indicates that the type didn't match, so we'll try a REG_SZ
  3799. if (status != STATUS_SUCCESS) {
  3800. filename_info.value_type = REG_SZ;
  3801. status = fpLoaderQueryAdapterInfo(&query_info);
  3802. }
  3803. if (status != STATUS_SUCCESS || filename_info.status != LOADER_QUERY_REGISTRY_STATUS_BUFFER_OVERFLOW) {
  3804. continue;
  3805. }
  3806. while (status == STATUS_SUCCESS &&
  3807. ((LoaderQueryRegistryInfo *)query_info.private_data)->status == LOADER_QUERY_REGISTRY_STATUS_BUFFER_OVERFLOW) {
  3808. bool needs_copy = (full_info == NULL);
  3809. size_t full_size = sizeof(LoaderQueryRegistryInfo) + filename_info.output_value_size;
  3810. void *buffer =
  3811. loader_instance_heap_realloc(inst, full_info, full_info_size, full_size, VK_SYSTEM_ALLOCATION_SCOPE_COMMAND);
  3812. if (buffer == NULL) {
  3813. result = VK_ERROR_OUT_OF_HOST_MEMORY;
  3814. goto out;
  3815. }
  3816. full_info = buffer;
  3817. full_info_size = full_size;
  3818. if (needs_copy) {
  3819. memcpy(full_info, &filename_info, sizeof(LoaderQueryRegistryInfo));
  3820. }
  3821. query_info.private_data = full_info;
  3822. query_info.private_data_size = (UINT)full_info_size;
  3823. status = fpLoaderQueryAdapterInfo(&query_info);
  3824. }
  3825. if (status != STATUS_SUCCESS || full_info->status != LOADER_QUERY_REGISTRY_STATUS_SUCCESS) {
  3826. goto out;
  3827. }
  3828. // Convert the wide string to a narrow string
  3829. void *buffer = loader_instance_heap_realloc(inst, json_path, json_path_size, full_info->output_value_size,
  3830. VK_SYSTEM_ALLOCATION_SCOPE_COMMAND);
  3831. if (buffer == NULL) {
  3832. result = VK_ERROR_OUT_OF_HOST_MEMORY;
  3833. goto out;
  3834. }
  3835. json_path = buffer;
  3836. json_path_size = full_info->output_value_size;
  3837. // Iterate over each component string
  3838. for (const wchar_t *curr_path = full_info->output_string; curr_path[0] != '\0'; curr_path += wcslen(curr_path) + 1) {
  3839. WideCharToMultiByte(CP_UTF8, 0, curr_path, -1, json_path, (int)json_path_size, NULL, NULL);
  3840. // Add the string to the output list
  3841. result = VK_SUCCESS;
  3842. loaderAddJsonEntry(inst, reg_data, reg_data_size, (LPCTSTR)L"EnumAdapters", REG_SZ, json_path,
  3843. (DWORD)strlen(json_path) + 1, &result);
  3844. if (result != VK_SUCCESS) {
  3845. goto out;
  3846. }
  3847. // If this is a string and not a multi-string, we don't want to go throught the loop more than once
  3848. if (full_info->value_type == REG_SZ) {
  3849. break;
  3850. }
  3851. }
  3852. }
  3853. out:
  3854. if (json_path != NULL) {
  3855. loader_instance_heap_free(inst, json_path);
  3856. }
  3857. if (full_info != NULL) {
  3858. loader_instance_heap_free(inst, full_info);
  3859. }
  3860. if (adapters.adapters != NULL) {
  3861. loader_instance_heap_free(inst, adapters.adapters);
  3862. }
  3863. return result;
  3864. }
  3865. // Look for data files in the registry.
  3866. static VkResult ReadDataFilesInRegistry(const struct loader_instance *inst, enum loader_data_files_type data_file_type,
  3867. bool warn_if_not_present, char *registry_location, struct loader_data_files *out_files) {
  3868. VkResult vk_result = VK_SUCCESS;
  3869. char *search_path = NULL;
  3870. // These calls look at the PNP/Device section of the registry.
  3871. VkResult regHKR_result = VK_SUCCESS;
  3872. DWORD reg_size = 4096;
  3873. if (!strncmp(registry_location, VK_DRIVERS_INFO_REGISTRY_LOC, sizeof(VK_DRIVERS_INFO_REGISTRY_LOC))) {
  3874. // If we're looking for drivers we need to try enumerating adapters
  3875. regHKR_result = ReadManifestsFromD3DAdapters(inst, &search_path, &reg_size, LoaderPnpDriverRegistryWide());
  3876. if (regHKR_result == VK_INCOMPLETE) {
  3877. regHKR_result = loaderGetDeviceRegistryFiles(inst, &search_path, &reg_size, LoaderPnpDriverRegistry());
  3878. }
  3879. } else if (!strncmp(registry_location, VK_ELAYERS_INFO_REGISTRY_LOC, sizeof(VK_ELAYERS_INFO_REGISTRY_LOC))) {
  3880. regHKR_result = ReadManifestsFromD3DAdapters(inst, &search_path, &reg_size, LoaderPnpELayerRegistryWide());
  3881. if (regHKR_result == VK_INCOMPLETE) {
  3882. regHKR_result = loaderGetDeviceRegistryFiles(inst, &search_path, &reg_size, LoaderPnpELayerRegistry());
  3883. }
  3884. } else if (!strncmp(registry_location, VK_ILAYERS_INFO_REGISTRY_LOC, sizeof(VK_ILAYERS_INFO_REGISTRY_LOC))) {
  3885. regHKR_result = ReadManifestsFromD3DAdapters(inst, &search_path, &reg_size, LoaderPnpILayerRegistryWide());
  3886. if (regHKR_result == VK_INCOMPLETE) {
  3887. regHKR_result = loaderGetDeviceRegistryFiles(inst, &search_path, &reg_size, LoaderPnpILayerRegistry());
  3888. }
  3889. }
  3890. // This call looks into the Khronos non-device specific section of the registry.
  3891. bool use_secondary_hive = (data_file_type == LOADER_DATA_FILE_MANIFEST_LAYER) && (!IsHighIntegrity());
  3892. VkResult reg_result = loaderGetRegistryFiles(inst, registry_location, use_secondary_hive, &search_path, &reg_size);
  3893. if ((VK_SUCCESS != reg_result && VK_SUCCESS != regHKR_result) || NULL == search_path) {
  3894. if (data_file_type == LOADER_DATA_FILE_MANIFEST_ICD) {
  3895. loader_log(
  3896. inst, VK_DEBUG_REPORT_ERROR_BIT_EXT, 0,
  3897. "ReadDataFilesInRegistry: Registry lookup failed to get ICD manifest files. Possibly missing Vulkan driver?");
  3898. if (VK_SUCCESS == reg_result || VK_ERROR_OUT_OF_HOST_MEMORY == reg_result) {
  3899. vk_result = reg_result;
  3900. } else {
  3901. vk_result = regHKR_result;
  3902. }
  3903. } else {
  3904. if (warn_if_not_present) {
  3905. if (data_file_type == LOADER_DATA_FILE_MANIFEST_LAYER) {
  3906. // This is only a warning for layers
  3907. loader_log(inst, VK_DEBUG_REPORT_WARNING_BIT_EXT, 0,
  3908. "ReadDataFilesInRegistry: Registry lookup failed to get layer manifest files.");
  3909. } else {
  3910. // This is only a warning for general data files
  3911. loader_log(inst, VK_DEBUG_REPORT_WARNING_BIT_EXT, 0,
  3912. "ReadDataFilesInRegistry: Registry lookup failed to get data files.");
  3913. }
  3914. }
  3915. if (reg_result == VK_ERROR_OUT_OF_HOST_MEMORY) {
  3916. vk_result = reg_result;
  3917. } else {
  3918. // Return success for now since it's not critical for layers
  3919. vk_result = VK_SUCCESS;
  3920. }
  3921. }
  3922. goto out;
  3923. }
  3924. // Now, parse the paths and add any manifest files found in them.
  3925. vk_result = AddDataFilesInPath(inst, search_path, false, out_files, false);
  3926. out:
  3927. if (NULL != search_path) {
  3928. loader_instance_heap_free(inst, search_path);
  3929. }
  3930. return vk_result;
  3931. }
  3932. #endif // _WIN32
  3933. // Find the Vulkan library manifest files.
  3934. //
  3935. // This function scans the "location" or "env_override" directories/files
  3936. // for a list of JSON manifest files. If env_override is non-NULL
  3937. // and has a valid value. Then the location is ignored. Otherwise
  3938. // location is used to look for manifest files. The location
  3939. // is interpreted as Registry path on Windows and a directory path(s)
  3940. // on Linux. "home_location" is an additional directory in the users home
  3941. // directory to look at. It is expanded into the dir path
  3942. // $XDG_DATA_HOME/home_location or $HOME/.local/share/home_location depending
  3943. // on environment variables. This "home_location" is only used on Linux.
  3944. //
  3945. // \returns
  3946. // VKResult
  3947. // A string list of manifest files to be opened in out_files param.
  3948. // List has a pointer to string for each manifest filename.
  3949. // When done using the list in out_files, pointers should be freed.
  3950. // Location or override string lists can be either files or directories as
  3951. // follows:
  3952. // | location | override
  3953. // --------------------------------
  3954. // Win ICD | files | files
  3955. // Win Layer | files | dirs
  3956. // Linux ICD | dirs | files
  3957. // Linux Layer| dirs | dirs
  3958. static VkResult loaderGetDataFiles(const struct loader_instance *inst, enum loader_data_files_type data_file_type,
  3959. bool warn_if_not_present, const char *env_override, const char *path_override,
  3960. char *registry_location, const char *relative_location, struct loader_data_files *out_files) {
  3961. VkResult res = VK_SUCCESS;
  3962. bool override_active = false;
  3963. // Free and init the out_files information so there's no false data left from uninitialized variables.
  3964. if (out_files->filename_list != NULL) {
  3965. for (uint32_t i = 0; i < out_files->count; i++) {
  3966. if (NULL != out_files->filename_list[i]) {
  3967. loader_instance_heap_free(inst, out_files->filename_list[i]);
  3968. out_files->filename_list[i] = NULL;
  3969. }
  3970. }
  3971. loader_instance_heap_free(inst, out_files->filename_list);
  3972. }
  3973. out_files->count = 0;
  3974. out_files->alloc_count = 0;
  3975. out_files->filename_list = NULL;
  3976. res = ReadDataFilesInSearchPaths(inst, data_file_type, env_override, path_override, relative_location, &override_active,
  3977. out_files);
  3978. if (VK_SUCCESS != res) {
  3979. goto out;
  3980. }
  3981. #ifdef _WIN32
  3982. // Read the registry if the override wasn't active.
  3983. if (!override_active) {
  3984. res = ReadDataFilesInRegistry(inst, data_file_type, warn_if_not_present, registry_location, out_files);
  3985. if (VK_SUCCESS != res) {
  3986. goto out;
  3987. }
  3988. }
  3989. #endif
  3990. out:
  3991. if (VK_SUCCESS != res && NULL != out_files->filename_list) {
  3992. for (uint32_t remove = 0; remove < out_files->count; remove++) {
  3993. loader_instance_heap_free(inst, out_files->filename_list[remove]);
  3994. }
  3995. loader_instance_heap_free(inst, out_files->filename_list);
  3996. out_files->count = 0;
  3997. out_files->alloc_count = 0;
  3998. out_files->filename_list = NULL;
  3999. }
  4000. return res;
  4001. }
  4002. void loader_init_icd_lib_list() {}
  4003. void loader_destroy_icd_lib_list() {}
  4004. // Try to find the Vulkan ICD driver(s).
  4005. //
  4006. // This function scans the default system loader path(s) or path
  4007. // specified by the \c VK_ICD_FILENAMES environment variable in
  4008. // order to find loadable VK ICDs manifest files. From these
  4009. // manifest files it finds the ICD libraries.
  4010. //
  4011. // \returns
  4012. // Vulkan result
  4013. // (on result == VK_SUCCESS) a list of icds that were discovered
  4014. VkResult loader_icd_scan(const struct loader_instance *inst, struct loader_icd_tramp_list *icd_tramp_list) {
  4015. char *file_str;
  4016. uint16_t file_major_vers = 0;
  4017. uint16_t file_minor_vers = 0;
  4018. uint16_t file_patch_vers = 0;
  4019. char *vers_tok;
  4020. struct loader_data_files manifest_files;
  4021. VkResult res = VK_SUCCESS;
  4022. bool lockedMutex = false;
  4023. cJSON *json = NULL;
  4024. uint32_t num_good_icds = 0;
  4025. memset(&manifest_files, 0, sizeof(struct loader_data_files));
  4026. res = loader_scanned_icd_init(inst, icd_tramp_list);
  4027. if (VK_SUCCESS != res) {
  4028. goto out;
  4029. }
  4030. // Get a list of manifest files for ICDs
  4031. res = loaderGetDataFiles(inst, LOADER_DATA_FILE_MANIFEST_ICD, true, VK_ICD_FILENAMES_ENV_VAR, NULL,
  4032. VK_DRIVERS_INFO_REGISTRY_LOC, VK_DRIVERS_INFO_RELATIVE_DIR, &manifest_files);
  4033. if (VK_SUCCESS != res || manifest_files.count == 0) {
  4034. goto out;
  4035. }
  4036. loader_platform_thread_lock_mutex(&loader_json_lock);
  4037. lockedMutex = true;
  4038. for (uint32_t i = 0; i < manifest_files.count; i++) {
  4039. file_str = manifest_files.filename_list[i];
  4040. if (file_str == NULL) {
  4041. continue;
  4042. }
  4043. VkResult temp_res = loader_get_json(inst, file_str, &json);
  4044. if (NULL == json || temp_res != VK_SUCCESS) {
  4045. if (NULL != json) {
  4046. cJSON_Delete(json);
  4047. json = NULL;
  4048. }
  4049. // If we haven't already found an ICD, copy this result to
  4050. // the returned result.
  4051. if (num_good_icds == 0) {
  4052. res = temp_res;
  4053. }
  4054. if (temp_res == VK_ERROR_OUT_OF_HOST_MEMORY) {
  4055. res = VK_ERROR_OUT_OF_HOST_MEMORY;
  4056. break;
  4057. } else {
  4058. continue;
  4059. }
  4060. }
  4061. res = temp_res;
  4062. cJSON *item, *itemICD;
  4063. item = cJSON_GetObjectItem(json, "file_format_version");
  4064. if (item == NULL) {
  4065. if (num_good_icds == 0) {
  4066. res = VK_ERROR_INITIALIZATION_FAILED;
  4067. }
  4068. loader_log(inst, VK_DEBUG_REPORT_WARNING_BIT_EXT, 0,
  4069. "loader_icd_scan: ICD JSON %s does not have a"
  4070. " \'file_format_version\' field. Skipping ICD JSON.",
  4071. file_str);
  4072. cJSON_Delete(json);
  4073. json = NULL;
  4074. continue;
  4075. }
  4076. char *file_vers = cJSON_Print(item);
  4077. if (NULL == file_vers) {
  4078. // Only reason the print can fail is if there was an allocation issue
  4079. if (num_good_icds == 0) {
  4080. res = VK_ERROR_OUT_OF_HOST_MEMORY;
  4081. }
  4082. loader_log(inst, VK_DEBUG_REPORT_WARNING_BIT_EXT, 0,
  4083. "loader_icd_scan: Failed retrieving ICD JSON %s"
  4084. " \'file_format_version\' field. Skipping ICD JSON",
  4085. file_str);
  4086. cJSON_Delete(json);
  4087. json = NULL;
  4088. continue;
  4089. }
  4090. loader_log(inst, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, 0, "Found ICD manifest file %s, version %s", file_str, file_vers);
  4091. // Get the major/minor/and patch as integers for easier comparison
  4092. vers_tok = strtok(file_vers, ".\"\n\r");
  4093. if (NULL != vers_tok) {
  4094. file_major_vers = (uint16_t)atoi(vers_tok);
  4095. vers_tok = strtok(NULL, ".\"\n\r");
  4096. if (NULL != vers_tok) {
  4097. file_minor_vers = (uint16_t)atoi(vers_tok);
  4098. vers_tok = strtok(NULL, ".\"\n\r");
  4099. if (NULL != vers_tok) {
  4100. file_patch_vers = (uint16_t)atoi(vers_tok);
  4101. }
  4102. }
  4103. }
  4104. if (file_major_vers != 1 || file_minor_vers != 0 || file_patch_vers > 1) {
  4105. loader_log(inst, VK_DEBUG_REPORT_WARNING_BIT_EXT, 0,
  4106. "loader_icd_scan: Unexpected manifest file version "
  4107. "(expected 1.0.0 or 1.0.1), may cause errors");
  4108. }
  4109. cJSON_Free(file_vers);
  4110. itemICD = cJSON_GetObjectItem(json, "ICD");
  4111. if (itemICD != NULL) {
  4112. item = cJSON_GetObjectItem(itemICD, "library_path");
  4113. if (item != NULL) {
  4114. char *temp = cJSON_Print(item);
  4115. if (!temp || strlen(temp) == 0) {
  4116. if (num_good_icds == 0) {
  4117. res = VK_ERROR_OUT_OF_HOST_MEMORY;
  4118. }
  4119. loader_log(inst, VK_DEBUG_REPORT_WARNING_BIT_EXT, 0,
  4120. "loader_icd_scan: Failed retrieving ICD JSON %s"
  4121. " \'library_path\' field. Skipping ICD JSON.",
  4122. file_str);
  4123. cJSON_Free(temp);
  4124. cJSON_Delete(json);
  4125. json = NULL;
  4126. continue;
  4127. }
  4128. // strip out extra quotes
  4129. temp[strlen(temp) - 1] = '\0';
  4130. char *library_path = loader_stack_alloc(strlen(temp) + 1);
  4131. if (NULL == library_path) {
  4132. loader_log(inst, VK_DEBUG_REPORT_ERROR_BIT_EXT, 0,
  4133. "loader_icd_scan: Failed to allocate space for "
  4134. "ICD JSON %s \'library_path\' value. Skipping "
  4135. "ICD JSON.",
  4136. file_str);
  4137. res = VK_ERROR_OUT_OF_HOST_MEMORY;
  4138. cJSON_Free(temp);
  4139. cJSON_Delete(json);
  4140. json = NULL;
  4141. goto out;
  4142. }
  4143. strcpy(library_path, &temp[1]);
  4144. cJSON_Free(temp);
  4145. if (strlen(library_path) == 0) {
  4146. loader_log(inst, VK_DEBUG_REPORT_WARNING_BIT_EXT, 0,
  4147. "loader_icd_scan: ICD JSON %s \'library_path\'"
  4148. " field is empty. Skipping ICD JSON.",
  4149. file_str);
  4150. cJSON_Delete(json);
  4151. json = NULL;
  4152. continue;
  4153. }
  4154. char fullpath[MAX_STRING_SIZE];
  4155. // Print out the paths being searched if debugging is enabled
  4156. loader_log(inst, VK_DEBUG_REPORT_DEBUG_BIT_EXT, 0, "Searching for ICD drivers named %s", library_path);
  4157. if (loader_platform_is_path(library_path)) {
  4158. // a relative or absolute path
  4159. char *name_copy = loader_stack_alloc(strlen(file_str) + 1);
  4160. char *rel_base;
  4161. strcpy(name_copy, file_str);
  4162. rel_base = loader_platform_dirname(name_copy);
  4163. loader_expand_path(library_path, rel_base, sizeof(fullpath), fullpath);
  4164. } else {
  4165. // a filename which is assumed in a system directory
  4166. #if defined(DEFAULT_VK_DRIVERS_PATH)
  4167. loader_get_fullpath(library_path, DEFAULT_VK_DRIVERS_PATH, sizeof(fullpath), fullpath);
  4168. #else
  4169. loader_get_fullpath(library_path, "", sizeof(fullpath), fullpath);
  4170. #endif
  4171. }
  4172. uint32_t vers = 0;
  4173. item = cJSON_GetObjectItem(itemICD, "api_version");
  4174. if (item != NULL) {
  4175. temp = cJSON_Print(item);
  4176. if (NULL == temp) {
  4177. loader_log(inst, VK_DEBUG_REPORT_WARNING_BIT_EXT, 0,
  4178. "loader_icd_scan: Failed retrieving ICD JSON %s"
  4179. " \'api_version\' field. Skipping ICD JSON.",
  4180. file_str);
  4181. // Only reason the print can fail is if there was an
  4182. // allocation issue
  4183. if (num_good_icds == 0) {
  4184. res = VK_ERROR_OUT_OF_HOST_MEMORY;
  4185. }
  4186. cJSON_Free(temp);
  4187. cJSON_Delete(json);
  4188. json = NULL;
  4189. continue;
  4190. }
  4191. vers = loader_make_version(temp);
  4192. cJSON_Free(temp);
  4193. } else {
  4194. loader_log(inst, VK_DEBUG_REPORT_WARNING_BIT_EXT, 0,
  4195. "loader_icd_scan: ICD JSON %s does not have an"
  4196. " \'api_version\' field.",
  4197. file_str);
  4198. }
  4199. res = loader_scanned_icd_add(inst, icd_tramp_list, fullpath, vers);
  4200. if (VK_SUCCESS != res) {
  4201. loader_log(inst, VK_DEBUG_REPORT_ERROR_BIT_EXT, 0,
  4202. "loader_icd_scan: Failed to add ICD JSON %s. "
  4203. " Skipping ICD JSON.",
  4204. fullpath);
  4205. cJSON_Delete(json);
  4206. json = NULL;
  4207. continue;
  4208. }
  4209. num_good_icds++;
  4210. } else {
  4211. loader_log(inst, VK_DEBUG_REPORT_WARNING_BIT_EXT, 0,
  4212. "loader_icd_scan: Failed to find \'library_path\' "
  4213. "object in ICD JSON file %s. Skipping ICD JSON.",
  4214. file_str);
  4215. }
  4216. } else {
  4217. loader_log(inst, VK_DEBUG_REPORT_WARNING_BIT_EXT, 0,
  4218. "loader_icd_scan: Can not find \'ICD\' object in ICD JSON "
  4219. "file %s. Skipping ICD JSON",
  4220. file_str);
  4221. }
  4222. cJSON_Delete(json);
  4223. json = NULL;
  4224. }
  4225. out:
  4226. if (NULL != json) {
  4227. cJSON_Delete(json);
  4228. }
  4229. if (NULL != manifest_files.filename_list) {
  4230. for (uint32_t i = 0; i < manifest_files.count; i++) {
  4231. if (NULL != manifest_files.filename_list[i]) {
  4232. loader_instance_heap_free(inst, manifest_files.filename_list[i]);
  4233. }
  4234. }
  4235. loader_instance_heap_free(inst, manifest_files.filename_list);
  4236. }
  4237. if (lockedMutex) {
  4238. loader_platform_thread_unlock_mutex(&loader_json_lock);
  4239. }
  4240. return res;
  4241. }
  4242. void loaderScanForLayers(struct loader_instance *inst, struct loader_layer_list *instance_layers) {
  4243. char *file_str;
  4244. struct loader_data_files manifest_files;
  4245. cJSON *json;
  4246. bool override_layer_valid = false;
  4247. char *override_paths = NULL;
  4248. uint32_t total_count = 0;
  4249. memset(&manifest_files, 0, sizeof(struct loader_data_files));
  4250. // Cleanup any previously scanned libraries
  4251. loaderDeleteLayerListAndProperties(inst, instance_layers);
  4252. loader_platform_thread_lock_mutex(&loader_json_lock);
  4253. // Get a list of manifest files for any implicit layers
  4254. // Pass NULL for environment variable override - implicit layers are not overridden by LAYERS_PATH_ENV
  4255. if (VK_SUCCESS != loaderGetDataFiles(inst, LOADER_DATA_FILE_MANIFEST_LAYER, false, NULL, NULL, VK_ILAYERS_INFO_REGISTRY_LOC,
  4256. VK_ILAYERS_INFO_RELATIVE_DIR, &manifest_files)) {
  4257. goto out;
  4258. }
  4259. if (manifest_files.count != 0) {
  4260. total_count += manifest_files.count;
  4261. for (uint32_t i = 0; i < manifest_files.count; i++) {
  4262. file_str = manifest_files.filename_list[i];
  4263. if (file_str == NULL) {
  4264. continue;
  4265. }
  4266. // Parse file into JSON struct
  4267. VkResult res = loader_get_json(inst, file_str, &json);
  4268. if (VK_ERROR_OUT_OF_HOST_MEMORY == res) {
  4269. goto out;
  4270. } else if (VK_SUCCESS != res || NULL == json) {
  4271. continue;
  4272. }
  4273. VkResult local_res = loaderAddLayerProperties(inst, instance_layers, json, true, file_str);
  4274. cJSON_Delete(json);
  4275. // If the error is anything other than out of memory we still want to try to load the other layers
  4276. if (VK_ERROR_OUT_OF_HOST_MEMORY == local_res) {
  4277. goto out;
  4278. }
  4279. }
  4280. }
  4281. // Remove any extraneous override layers.
  4282. RemoveAllNonValidOverrideLayers(inst, instance_layers);
  4283. // Check to see if the override layer is present, and use it's override paths.
  4284. for (int32_t i = 0; i < (int32_t)instance_layers->count; i++) {
  4285. struct loader_layer_properties *prop = &instance_layers->list[i];
  4286. if (prop->is_override && loaderImplicitLayerIsEnabled(inst, prop) && prop->num_override_paths > 0) {
  4287. char *cur_write_ptr = NULL;
  4288. size_t override_path_size = 0;
  4289. for (uint32_t j = 0; j < prop->num_override_paths; j++) {
  4290. override_path_size += DetermineDataFilePathSize(prop->override_paths[j], 0);
  4291. }
  4292. override_paths = loader_instance_heap_alloc(inst, override_path_size, VK_SYSTEM_ALLOCATION_SCOPE_COMMAND);
  4293. if (override_paths == NULL) {
  4294. goto out;
  4295. }
  4296. cur_write_ptr = &override_paths[0];
  4297. for (uint32_t j = 0; j < prop->num_override_paths; j++) {
  4298. CopyDataFilePath(prop->override_paths[j], NULL, 0, &cur_write_ptr);
  4299. }
  4300. // Remove the last path separator
  4301. --cur_write_ptr;
  4302. assert(cur_write_ptr - override_paths < (ptrdiff_t)override_path_size);
  4303. *cur_write_ptr = '\0';
  4304. loader_log(NULL, VK_DEBUG_REPORT_WARNING_BIT_EXT, 0, "loaderScanForLayers: Override layer has override paths set to %s",
  4305. override_paths);
  4306. }
  4307. }
  4308. // Get a list of manifest files for explicit layers
  4309. if (VK_SUCCESS != loaderGetDataFiles(inst, LOADER_DATA_FILE_MANIFEST_LAYER, true, VK_LAYER_PATH_ENV_VAR, override_paths,
  4310. VK_ELAYERS_INFO_REGISTRY_LOC, VK_ELAYERS_INFO_RELATIVE_DIR, &manifest_files)) {
  4311. goto out;
  4312. }
  4313. // Make sure we have at least one layer, if not, go ahead and return
  4314. if (manifest_files.count == 0 && total_count == 0) {
  4315. goto out;
  4316. } else {
  4317. total_count += manifest_files.count;
  4318. for (uint32_t i = 0; i < manifest_files.count; i++) {
  4319. file_str = manifest_files.filename_list[i];
  4320. if (file_str == NULL) {
  4321. continue;
  4322. }
  4323. // Parse file into JSON struct
  4324. VkResult res = loader_get_json(inst, file_str, &json);
  4325. if (VK_ERROR_OUT_OF_HOST_MEMORY == res) {
  4326. goto out;
  4327. } else if (VK_SUCCESS != res || NULL == json) {
  4328. continue;
  4329. }
  4330. VkResult local_res = loaderAddLayerProperties(inst, instance_layers, json, false, file_str);
  4331. cJSON_Delete(json);
  4332. // If the error is anything other than out of memory we still want to try to load the other layers
  4333. if (VK_ERROR_OUT_OF_HOST_MEMORY == local_res) {
  4334. goto out;
  4335. }
  4336. }
  4337. }
  4338. // Verify any meta-layers in the list are valid and all the component layers are
  4339. // actually present in the available layer list
  4340. VerifyAllMetaLayers(inst, instance_layers, &override_layer_valid);
  4341. if (override_layer_valid) {
  4342. loaderRemoveLayersInBlacklist(inst, instance_layers);
  4343. if (NULL != inst) {
  4344. inst->override_layer_present = true;
  4345. }
  4346. }
  4347. out:
  4348. if (NULL != override_paths) {
  4349. loader_instance_heap_free(inst, override_paths);
  4350. }
  4351. if (NULL != manifest_files.filename_list) {
  4352. for (uint32_t i = 0; i < manifest_files.count; i++) {
  4353. if (NULL != manifest_files.filename_list[i]) {
  4354. loader_instance_heap_free(inst, manifest_files.filename_list[i]);
  4355. }
  4356. }
  4357. loader_instance_heap_free(inst, manifest_files.filename_list);
  4358. }
  4359. loader_platform_thread_unlock_mutex(&loader_json_lock);
  4360. }
  4361. void loaderScanForImplicitLayers(struct loader_instance *inst, struct loader_layer_list *instance_layers) {
  4362. char *file_str;
  4363. struct loader_data_files manifest_files;
  4364. cJSON *json;
  4365. bool override_layer_valid = false;
  4366. char *override_paths = NULL;
  4367. bool implicit_metalayer_present = false;
  4368. bool have_json_lock = false;
  4369. // Before we begin anything, init manifest_files to avoid a delete of garbage memory if
  4370. // a failure occurs before allocating the manifest filename_list.
  4371. memset(&manifest_files, 0, sizeof(struct loader_data_files));
  4372. // Pass NULL for environment variable override - implicit layers are not overridden by LAYERS_PATH_ENV
  4373. VkResult res = loaderGetDataFiles(inst, LOADER_DATA_FILE_MANIFEST_LAYER, false, NULL, NULL, VK_ILAYERS_INFO_REGISTRY_LOC,
  4374. VK_ILAYERS_INFO_RELATIVE_DIR, &manifest_files);
  4375. if (VK_SUCCESS != res || manifest_files.count == 0) {
  4376. goto out;
  4377. }
  4378. // Cleanup any previously scanned libraries
  4379. loaderDeleteLayerListAndProperties(inst, instance_layers);
  4380. loader_platform_thread_lock_mutex(&loader_json_lock);
  4381. have_json_lock = true;
  4382. for (uint32_t i = 0; i < manifest_files.count; i++) {
  4383. file_str = manifest_files.filename_list[i];
  4384. if (file_str == NULL) {
  4385. continue;
  4386. }
  4387. // parse file into JSON struct
  4388. res = loader_get_json(inst, file_str, &json);
  4389. if (VK_ERROR_OUT_OF_HOST_MEMORY == res) {
  4390. goto out;
  4391. } else if (VK_SUCCESS != res || NULL == json) {
  4392. continue;
  4393. }
  4394. res = loaderAddLayerProperties(inst, instance_layers, json, true, file_str);
  4395. loader_instance_heap_free(inst, file_str);
  4396. manifest_files.filename_list[i] = NULL;
  4397. cJSON_Delete(json);
  4398. if (VK_ERROR_OUT_OF_HOST_MEMORY == res) {
  4399. goto out;
  4400. }
  4401. }
  4402. // Remove any extraneous override layers.
  4403. RemoveAllNonValidOverrideLayers(inst, instance_layers);
  4404. // Check to see if either the override layer is present, or another implicit meta-layer.
  4405. // Each of these may require explicit layers to be enabled at this time.
  4406. for (int32_t i = 0; i < (int32_t)instance_layers->count; i++) {
  4407. struct loader_layer_properties *prop = &instance_layers->list[i];
  4408. if (prop->is_override && loaderImplicitLayerIsEnabled(inst, prop)) {
  4409. override_layer_valid = true;
  4410. if (prop->num_override_paths > 0) {
  4411. char *cur_write_ptr = NULL;
  4412. size_t override_path_size = 0;
  4413. for (uint32_t j = 0; j < prop->num_override_paths; j++) {
  4414. override_path_size += DetermineDataFilePathSize(prop->override_paths[j], 0);
  4415. }
  4416. override_paths = loader_instance_heap_alloc(inst, override_path_size, VK_SYSTEM_ALLOCATION_SCOPE_COMMAND);
  4417. if (override_paths == NULL) {
  4418. goto out;
  4419. }
  4420. cur_write_ptr = &override_paths[0];
  4421. for (uint32_t j = 0; j < prop->num_override_paths; j++) {
  4422. CopyDataFilePath(prop->override_paths[j], NULL, 0, &cur_write_ptr);
  4423. }
  4424. // Remove the last path separator
  4425. --cur_write_ptr;
  4426. assert(cur_write_ptr - override_paths < (ptrdiff_t)override_path_size);
  4427. *cur_write_ptr = '\0';
  4428. loader_log(NULL, VK_DEBUG_REPORT_WARNING_BIT_EXT, 0,
  4429. "loaderScanForImplicitLayers: Override layer has override paths set to %s", override_paths);
  4430. }
  4431. } else if (!prop->is_override && prop->type_flags & VK_LAYER_TYPE_FLAG_META_LAYER) {
  4432. implicit_metalayer_present = true;
  4433. }
  4434. }
  4435. // If either the override layer or an implicit meta-layer are present, we need to add
  4436. // explicit layer info as well. Not to worry, though, all explicit layers not included
  4437. // in the override layer will be removed below in loaderRemoveLayersInBlacklist().
  4438. if (override_layer_valid || implicit_metalayer_present) {
  4439. if (VK_SUCCESS != loaderGetDataFiles(inst, LOADER_DATA_FILE_MANIFEST_LAYER, true, VK_LAYER_PATH_ENV_VAR, override_paths,
  4440. VK_ELAYERS_INFO_REGISTRY_LOC, VK_ELAYERS_INFO_RELATIVE_DIR, &manifest_files)) {
  4441. goto out;
  4442. }
  4443. for (uint32_t i = 0; i < manifest_files.count; i++) {
  4444. file_str = manifest_files.filename_list[i];
  4445. if (file_str == NULL) {
  4446. continue;
  4447. }
  4448. // parse file into JSON struct
  4449. res = loader_get_json(inst, file_str, &json);
  4450. if (VK_ERROR_OUT_OF_HOST_MEMORY == res) {
  4451. goto out;
  4452. } else if (VK_SUCCESS != res || NULL == json) {
  4453. continue;
  4454. }
  4455. res = loaderAddLayerProperties(inst, instance_layers, json, true, file_str);
  4456. loader_instance_heap_free(inst, file_str);
  4457. manifest_files.filename_list[i] = NULL;
  4458. cJSON_Delete(json);
  4459. if (VK_ERROR_OUT_OF_HOST_MEMORY == res) {
  4460. goto out;
  4461. }
  4462. }
  4463. }
  4464. // Verify any meta-layers in the list are valid and all the component layers are
  4465. // actually present in the available layer list
  4466. VerifyAllMetaLayers(inst, instance_layers, &override_layer_valid);
  4467. if (override_layer_valid || implicit_metalayer_present) {
  4468. loaderRemoveLayersNotInImplicitMetaLayers(inst, instance_layers);
  4469. if (override_layer_valid && inst != NULL) {
  4470. inst->override_layer_present = true;
  4471. }
  4472. }
  4473. out:
  4474. if (NULL != override_paths) {
  4475. loader_instance_heap_free(inst, override_paths);
  4476. }
  4477. for (uint32_t i = 0; i < manifest_files.count; i++) {
  4478. if (NULL != manifest_files.filename_list[i]) {
  4479. loader_instance_heap_free(inst, manifest_files.filename_list[i]);
  4480. }
  4481. }
  4482. if (NULL != manifest_files.filename_list) {
  4483. loader_instance_heap_free(inst, manifest_files.filename_list);
  4484. }
  4485. if (have_json_lock) {
  4486. loader_platform_thread_unlock_mutex(&loader_json_lock);
  4487. }
  4488. }
  4489. static VKAPI_ATTR PFN_vkVoidFunction VKAPI_CALL loader_gpdpa_instance_internal(VkInstance inst, const char *pName) {
  4490. // inst is not wrapped
  4491. if (inst == VK_NULL_HANDLE) {
  4492. return NULL;
  4493. }
  4494. VkLayerInstanceDispatchTable *disp_table = *(VkLayerInstanceDispatchTable **)inst;
  4495. void *addr;
  4496. if (disp_table == NULL) return NULL;
  4497. bool found_name;
  4498. addr = loader_lookup_instance_dispatch_table(disp_table, pName, &found_name);
  4499. if (found_name) {
  4500. return addr;
  4501. }
  4502. if (loader_phys_dev_ext_gpa(loader_get_instance(inst), pName, true, NULL, &addr)) return addr;
  4503. // Don't call down the chain, this would be an infinite loop
  4504. loader_log(NULL, VK_DEBUG_REPORT_WARNING_BIT_EXT, 0, "loader_gpdpa_instance_internal() unrecognized name %s", pName);
  4505. return NULL;
  4506. }
  4507. static VKAPI_ATTR PFN_vkVoidFunction VKAPI_CALL loader_gpdpa_instance_terminator(VkInstance inst, const char *pName) {
  4508. // inst is not wrapped
  4509. if (inst == VK_NULL_HANDLE) {
  4510. return NULL;
  4511. }
  4512. VkLayerInstanceDispatchTable *disp_table = *(VkLayerInstanceDispatchTable **)inst;
  4513. void *addr;
  4514. if (disp_table == NULL) return NULL;
  4515. bool found_name;
  4516. addr = loader_lookup_instance_dispatch_table(disp_table, pName, &found_name);
  4517. if (found_name) {
  4518. return addr;
  4519. }
  4520. // Get the terminator, but don't perform checking since it should already
  4521. // have been setup if we get here.
  4522. if (loader_phys_dev_ext_gpa(loader_get_instance(inst), pName, false, NULL, &addr)) {
  4523. return addr;
  4524. }
  4525. // Don't call down the chain, this would be an infinite loop
  4526. loader_log(NULL, VK_DEBUG_REPORT_WARNING_BIT_EXT, 0, "loader_gpdpa_instance_terminator() unrecognized name %s", pName);
  4527. return NULL;
  4528. }
  4529. static VKAPI_ATTR PFN_vkVoidFunction VKAPI_CALL loader_gpa_instance_internal(VkInstance inst, const char *pName) {
  4530. if (!strcmp(pName, "vkGetInstanceProcAddr")) {
  4531. return (PFN_vkVoidFunction)loader_gpa_instance_internal;
  4532. }
  4533. if (!strcmp(pName, "vk_layerGetPhysicalDeviceProcAddr")) {
  4534. return (PFN_vkVoidFunction)loader_gpdpa_instance_terminator;
  4535. }
  4536. if (!strcmp(pName, "vkCreateInstance")) {
  4537. return (PFN_vkVoidFunction)terminator_CreateInstance;
  4538. }
  4539. if (!strcmp(pName, "vkCreateDevice")) {
  4540. return (PFN_vkVoidFunction)terminator_CreateDevice;
  4541. }
  4542. // The VK_EXT_debug_utils functions need a special case here so the terminators can still be found from vkGetInstanceProcAddr
  4543. if (!strcmp(pName, "vkSetDebugUtilsObjectNameEXT")) {
  4544. return (PFN_vkVoidFunction)terminator_SetDebugUtilsObjectNameEXT;
  4545. }
  4546. if (!strcmp(pName, "vkSetDebugUtilsObjectTagEXT")) {
  4547. return (PFN_vkVoidFunction)terminator_SetDebugUtilsObjectTagEXT;
  4548. }
  4549. if (!strcmp(pName, "vkQueueBeginDebugUtilsLabelEXT")) {
  4550. return (PFN_vkVoidFunction)terminator_QueueBeginDebugUtilsLabelEXT;
  4551. }
  4552. if (!strcmp(pName, "vkQueueEndDebugUtilsLabelEXT")) {
  4553. return (PFN_vkVoidFunction)terminator_QueueEndDebugUtilsLabelEXT;
  4554. }
  4555. if (!strcmp(pName, "vkQueueInsertDebugUtilsLabelEXT")) {
  4556. return (PFN_vkVoidFunction)terminator_QueueInsertDebugUtilsLabelEXT;
  4557. }
  4558. if (!strcmp(pName, "vkCmdBeginDebugUtilsLabelEXT")) {
  4559. return (PFN_vkVoidFunction)terminator_CmdBeginDebugUtilsLabelEXT;
  4560. }
  4561. if (!strcmp(pName, "vkCmdEndDebugUtilsLabelEXT")) {
  4562. return (PFN_vkVoidFunction)terminator_CmdEndDebugUtilsLabelEXT;
  4563. }
  4564. if (!strcmp(pName, "vkCmdInsertDebugUtilsLabelEXT")) {
  4565. return (PFN_vkVoidFunction)terminator_CmdInsertDebugUtilsLabelEXT;
  4566. }
  4567. // inst is not wrapped
  4568. if (inst == VK_NULL_HANDLE) {
  4569. return NULL;
  4570. }
  4571. VkLayerInstanceDispatchTable *disp_table = *(VkLayerInstanceDispatchTable **)inst;
  4572. void *addr;
  4573. if (disp_table == NULL) return NULL;
  4574. bool found_name;
  4575. addr = loader_lookup_instance_dispatch_table(disp_table, pName, &found_name);
  4576. if (found_name) {
  4577. return addr;
  4578. }
  4579. // Don't call down the chain, this would be an infinite loop
  4580. loader_log(NULL, VK_DEBUG_REPORT_WARNING_BIT_EXT, 0, "loader_gpa_instance_internal() unrecognized name %s", pName);
  4581. return NULL;
  4582. }
  4583. VKAPI_ATTR PFN_vkVoidFunction VKAPI_CALL loader_gpa_device_internal(VkDevice device, const char *pName) {
  4584. struct loader_device *dev;
  4585. struct loader_icd_term *icd_term = loader_get_icd_and_device(device, &dev, NULL);
  4586. // Return this function if a layer above here is asking for the vkGetDeviceProcAddr.
  4587. // This is so we can properly intercept any device commands needing a terminator.
  4588. if (!strcmp(pName, "vkGetDeviceProcAddr")) {
  4589. return (PFN_vkVoidFunction)loader_gpa_device_internal;
  4590. }
  4591. // NOTE: Device Funcs needing Trampoline/Terminator.
  4592. // Overrides for device functions needing a trampoline and
  4593. // a terminator because certain device entry-points still need to go
  4594. // through a terminator before hitting the ICD. This could be for
  4595. // several reasons, but the main one is currently unwrapping an
  4596. // object before passing the appropriate info along to the ICD.
  4597. // This is why we also have to override the direct ICD call to
  4598. // vkGetDeviceProcAddr to intercept those calls.
  4599. PFN_vkVoidFunction addr = get_extension_device_proc_terminator(dev, pName);
  4600. if (NULL != addr) {
  4601. return addr;
  4602. }
  4603. return icd_term->dispatch.GetDeviceProcAddr(device, pName);
  4604. }
  4605. // Initialize device_ext dispatch table entry as follows:
  4606. // If dev == NULL find all logical devices created within this instance and
  4607. // init the entry (given by idx) in the ext dispatch table.
  4608. // If dev != NULL only initialize the entry in the given dev's dispatch table.
  4609. // The initialization value is gotten by calling down the device chain with
  4610. // GDPA.
  4611. // If GDPA returns NULL then don't initialize the dispatch table entry.
  4612. static void loader_init_dispatch_dev_ext_entry(struct loader_instance *inst, struct loader_device *dev, uint32_t idx,
  4613. const char *funcName)
  4614. {
  4615. void *gdpa_value;
  4616. if (dev != NULL) {
  4617. gdpa_value = dev->loader_dispatch.core_dispatch.GetDeviceProcAddr(dev->chain_device, funcName);
  4618. if (gdpa_value != NULL) dev->loader_dispatch.ext_dispatch.dev_ext[idx] = (PFN_vkDevExt)gdpa_value;
  4619. } else {
  4620. for (struct loader_icd_term *icd_term = inst->icd_terms; icd_term != NULL; icd_term = icd_term->next) {
  4621. struct loader_device *ldev = icd_term->logical_device_list;
  4622. while (ldev) {
  4623. gdpa_value = ldev->loader_dispatch.core_dispatch.GetDeviceProcAddr(ldev->chain_device, funcName);
  4624. if (gdpa_value != NULL) ldev->loader_dispatch.ext_dispatch.dev_ext[idx] = (PFN_vkDevExt)gdpa_value;
  4625. ldev = ldev->next;
  4626. }
  4627. }
  4628. }
  4629. }
  4630. // Find all dev extension in the hash table and initialize the dispatch table
  4631. // for dev for each of those extension entrypoints found in hash table.
  4632. void loader_init_dispatch_dev_ext(struct loader_instance *inst, struct loader_device *dev) {
  4633. for (uint32_t i = 0; i < MAX_NUM_UNKNOWN_EXTS; i++) {
  4634. if (inst->dev_ext_disp_hash[i].func_name != NULL)
  4635. loader_init_dispatch_dev_ext_entry(inst, dev, i, inst->dev_ext_disp_hash[i].func_name);
  4636. }
  4637. }
  4638. static bool loader_check_icds_for_dev_ext_address(struct loader_instance *inst, const char *funcName) {
  4639. struct loader_icd_term *icd_term;
  4640. icd_term = inst->icd_terms;
  4641. while (NULL != icd_term) {
  4642. if (icd_term->scanned_icd->GetInstanceProcAddr(icd_term->instance, funcName))
  4643. // this icd supports funcName
  4644. return true;
  4645. icd_term = icd_term->next;
  4646. }
  4647. return false;
  4648. }
  4649. static bool loader_check_layer_list_for_dev_ext_address(const struct loader_layer_list *const layers, const char *funcName) {
  4650. // Iterate over the layers.
  4651. for (uint32_t layer = 0; layer < layers->count; ++layer) {
  4652. // Iterate over the extensions.
  4653. const struct loader_device_extension_list *const extensions = &(layers->list[layer].device_extension_list);
  4654. for (uint32_t extension = 0; extension < extensions->count; ++extension) {
  4655. // Iterate over the entry points.
  4656. const struct loader_dev_ext_props *const property = &(extensions->list[extension]);
  4657. for (uint32_t entry = 0; entry < property->entrypoint_count; ++entry) {
  4658. if (strcmp(property->entrypoints[entry], funcName) == 0) {
  4659. return true;
  4660. }
  4661. }
  4662. }
  4663. }
  4664. return false;
  4665. }
  4666. static void loader_free_dev_ext_table(struct loader_instance *inst) {
  4667. for (uint32_t i = 0; i < MAX_NUM_UNKNOWN_EXTS; i++) {
  4668. loader_instance_heap_free(inst, inst->dev_ext_disp_hash[i].func_name);
  4669. loader_instance_heap_free(inst, inst->dev_ext_disp_hash[i].list.index);
  4670. }
  4671. memset(inst->dev_ext_disp_hash, 0, sizeof(inst->dev_ext_disp_hash));
  4672. }
  4673. static bool loader_add_dev_ext_table(struct loader_instance *inst, uint32_t *ptr_idx, const char *funcName) {
  4674. uint32_t i;
  4675. uint32_t idx = *ptr_idx;
  4676. struct loader_dispatch_hash_list *list = &inst->dev_ext_disp_hash[idx].list;
  4677. if (!inst->dev_ext_disp_hash[idx].func_name) {
  4678. // no entry here at this idx, so use it
  4679. assert(list->capacity == 0);
  4680. inst->dev_ext_disp_hash[idx].func_name =
  4681. (char *)loader_instance_heap_alloc(inst, strlen(funcName) + 1, VK_SYSTEM_ALLOCATION_SCOPE_INSTANCE);
  4682. if (inst->dev_ext_disp_hash[idx].func_name == NULL) {
  4683. loader_log(inst, VK_DEBUG_REPORT_ERROR_BIT_EXT, 0,
  4684. "loader_add_dev_ext_table: Failed to allocate memory "
  4685. "for func_name %s",
  4686. funcName);
  4687. return false;
  4688. }
  4689. strncpy(inst->dev_ext_disp_hash[idx].func_name, funcName, strlen(funcName) + 1);
  4690. return true;
  4691. }
  4692. // check for enough capacity
  4693. if (list->capacity == 0) {
  4694. list->index = loader_instance_heap_alloc(inst, 8 * sizeof(*(list->index)), VK_SYSTEM_ALLOCATION_SCOPE_INSTANCE);
  4695. if (list->index == NULL) {
  4696. loader_log(inst, VK_DEBUG_REPORT_ERROR_BIT_EXT, 0,
  4697. "loader_add_dev_ext_table: Failed to allocate memory for list index of function %s", funcName);
  4698. return false;
  4699. }
  4700. list->capacity = 8 * sizeof(*(list->index));
  4701. } else if (list->capacity < (list->count + 1) * sizeof(*(list->index))) {
  4702. void *new_ptr = loader_instance_heap_realloc(inst, list->index, list->capacity, list->capacity * 2,
  4703. VK_SYSTEM_ALLOCATION_SCOPE_INSTANCE);
  4704. if (NULL == new_ptr) {
  4705. loader_log(inst, VK_DEBUG_REPORT_ERROR_BIT_EXT, 0,
  4706. "loader_add_dev_ext_table: Failed to reallocate memory for list index of function %s", funcName);
  4707. return false;
  4708. }
  4709. list->index = new_ptr;
  4710. list->capacity *= 2;
  4711. }
  4712. // find an unused index in the hash table and use it
  4713. i = (idx + 1) % MAX_NUM_UNKNOWN_EXTS;
  4714. do {
  4715. if (!inst->dev_ext_disp_hash[i].func_name) {
  4716. assert(inst->dev_ext_disp_hash[i].list.capacity == 0);
  4717. inst->dev_ext_disp_hash[i].func_name =
  4718. (char *)loader_instance_heap_alloc(inst, strlen(funcName) + 1, VK_SYSTEM_ALLOCATION_SCOPE_INSTANCE);
  4719. if (inst->dev_ext_disp_hash[i].func_name == NULL) {
  4720. loader_log(inst, VK_DEBUG_REPORT_ERROR_BIT_EXT, 0,
  4721. "loader_add_dev_ext_table: Failed to allocate memory "
  4722. "for func_name %s",
  4723. funcName);
  4724. return false;
  4725. }
  4726. strncpy(inst->dev_ext_disp_hash[i].func_name, funcName, strlen(funcName) + 1);
  4727. list->index[list->count] = i;
  4728. list->count++;
  4729. *ptr_idx = i;
  4730. return true;
  4731. }
  4732. i = (i + 1) % MAX_NUM_UNKNOWN_EXTS;
  4733. } while (i != idx);
  4734. loader_log(inst, VK_DEBUG_REPORT_ERROR_BIT_EXT, 0,
  4735. "loader_add_dev_ext_table: Could not insert into hash table; is "
  4736. "it full?");
  4737. return false;
  4738. }
  4739. static bool loader_name_in_dev_ext_table(struct loader_instance *inst, uint32_t *idx, const char *funcName) {
  4740. uint32_t alt_idx;
  4741. if (inst->dev_ext_disp_hash[*idx].func_name && !strcmp(inst->dev_ext_disp_hash[*idx].func_name, funcName)) return true;
  4742. // funcName wasn't at the primary spot in the hash table
  4743. // search the list of secondary locations (shallow search, not deep search)
  4744. for (uint32_t i = 0; i < inst->dev_ext_disp_hash[*idx].list.count; i++) {
  4745. alt_idx = inst->dev_ext_disp_hash[*idx].list.index[i];
  4746. if (!strcmp(inst->dev_ext_disp_hash[*idx].func_name, funcName)) {
  4747. *idx = alt_idx;
  4748. return true;
  4749. }
  4750. }
  4751. return false;
  4752. }
  4753. // This function returns generic trampoline code address for unknown entry
  4754. // points.
  4755. // Presumably, these unknown entry points (as given by funcName) are device
  4756. // extension entrypoints. A hash table is used to keep a list of unknown entry
  4757. // points and their mapping to the device extension dispatch table
  4758. // (struct loader_dev_ext_dispatch_table).
  4759. // \returns
  4760. // For a given entry point string (funcName), if an existing mapping is found
  4761. // the
  4762. // trampoline address for that mapping is returned. Otherwise, this unknown
  4763. // entry point
  4764. // has not been seen yet. Next check if a layer or ICD supports it. If so then
  4765. // a
  4766. // new entry in the hash table is initialized and that trampoline address for
  4767. // the new entry is returned. Null is returned if the hash table is full or
  4768. // if no discovered layer or ICD returns a non-NULL GetProcAddr for it.
  4769. void *loader_dev_ext_gpa(struct loader_instance *inst, const char *funcName) {
  4770. uint32_t idx;
  4771. uint32_t seed = 0;
  4772. idx = murmurhash(funcName, strlen(funcName), seed) % MAX_NUM_UNKNOWN_EXTS;
  4773. if (loader_name_in_dev_ext_table(inst, &idx, funcName))
  4774. // found funcName already in hash
  4775. return loader_get_dev_ext_trampoline(idx);
  4776. // Check if funcName is supported in either ICDs or a layer library
  4777. if (!loader_check_icds_for_dev_ext_address(inst, funcName) &&
  4778. !loader_check_layer_list_for_dev_ext_address(&inst->app_activated_layer_list, funcName)) {
  4779. // if support found in layers continue on
  4780. return NULL;
  4781. }
  4782. if (loader_add_dev_ext_table(inst, &idx, funcName)) {
  4783. // successfully added new table entry
  4784. // init any dev dispatch table entries as needed
  4785. loader_init_dispatch_dev_ext_entry(inst, NULL, idx, funcName);
  4786. return loader_get_dev_ext_trampoline(idx);
  4787. }
  4788. return NULL;
  4789. }
  4790. static bool loader_check_icds_for_phys_dev_ext_address(struct loader_instance *inst, const char *funcName) {
  4791. struct loader_icd_term *icd_term;
  4792. icd_term = inst->icd_terms;
  4793. while (NULL != icd_term) {
  4794. if (icd_term->scanned_icd->interface_version >= MIN_PHYS_DEV_EXTENSION_ICD_INTERFACE_VERSION &&
  4795. icd_term->scanned_icd->GetPhysicalDeviceProcAddr(icd_term->instance, funcName))
  4796. // this icd supports funcName
  4797. return true;
  4798. icd_term = icd_term->next;
  4799. }
  4800. return false;
  4801. }
  4802. static bool loader_check_layer_list_for_phys_dev_ext_address(struct loader_instance *inst, const char *funcName) {
  4803. struct loader_layer_properties *layer_prop_list = inst->expanded_activated_layer_list.list;
  4804. for (uint32_t layer = 0; layer < inst->expanded_activated_layer_list.count; ++layer) {
  4805. // If this layer supports the vk_layerGetPhysicalDeviceProcAddr, then call
  4806. // it and see if it returns a valid pointer for this function name.
  4807. if (layer_prop_list[layer].interface_version > 1) {
  4808. const struct loader_layer_functions *const functions = &(layer_prop_list[layer].functions);
  4809. if (NULL != functions->get_physical_device_proc_addr &&
  4810. NULL != functions->get_physical_device_proc_addr((VkInstance)inst->instance, funcName)) {
  4811. return true;
  4812. }
  4813. }
  4814. }
  4815. return false;
  4816. }
  4817. static void loader_free_phys_dev_ext_table(struct loader_instance *inst) {
  4818. for (uint32_t i = 0; i < MAX_NUM_UNKNOWN_EXTS; i++) {
  4819. loader_instance_heap_free(inst, inst->phys_dev_ext_disp_hash[i].func_name);
  4820. loader_instance_heap_free(inst, inst->phys_dev_ext_disp_hash[i].list.index);
  4821. }
  4822. memset(inst->phys_dev_ext_disp_hash, 0, sizeof(inst->phys_dev_ext_disp_hash));
  4823. }
  4824. static bool loader_add_phys_dev_ext_table(struct loader_instance *inst, uint32_t *ptr_idx, const char *funcName) {
  4825. uint32_t i;
  4826. uint32_t idx = *ptr_idx;
  4827. struct loader_dispatch_hash_list *list = &inst->phys_dev_ext_disp_hash[idx].list;
  4828. if (!inst->phys_dev_ext_disp_hash[idx].func_name) {
  4829. // no entry here at this idx, so use it
  4830. assert(list->capacity == 0);
  4831. inst->phys_dev_ext_disp_hash[idx].func_name =
  4832. (char *)loader_instance_heap_alloc(inst, strlen(funcName) + 1, VK_SYSTEM_ALLOCATION_SCOPE_INSTANCE);
  4833. if (inst->phys_dev_ext_disp_hash[idx].func_name == NULL) {
  4834. loader_log(inst, VK_DEBUG_REPORT_ERROR_BIT_EXT, 0,
  4835. "loader_add_phys_dev_ext_table() can't allocate memory for "
  4836. "func_name");
  4837. return false;
  4838. }
  4839. strncpy(inst->phys_dev_ext_disp_hash[idx].func_name, funcName, strlen(funcName) + 1);
  4840. return true;
  4841. }
  4842. // check for enough capacity
  4843. if (list->capacity == 0) {
  4844. list->index = loader_instance_heap_alloc(inst, 8 * sizeof(*(list->index)), VK_SYSTEM_ALLOCATION_SCOPE_INSTANCE);
  4845. if (list->index == NULL) {
  4846. loader_log(inst, VK_DEBUG_REPORT_ERROR_BIT_EXT, 0, "loader_add_phys_dev_ext_table() can't allocate list memory");
  4847. return false;
  4848. }
  4849. list->capacity = 8 * sizeof(*(list->index));
  4850. } else if (list->capacity < (list->count + 1) * sizeof(*(list->index))) {
  4851. void *new_ptr = loader_instance_heap_realloc(inst, list->index, list->capacity, list->capacity * 2,
  4852. VK_SYSTEM_ALLOCATION_SCOPE_INSTANCE);
  4853. if (NULL == new_ptr) {
  4854. loader_log(inst, VK_DEBUG_REPORT_ERROR_BIT_EXT, 0, "loader_add_phys_dev_ext_table() can't reallocate list memory");
  4855. return false;
  4856. }
  4857. list->index = new_ptr;
  4858. list->capacity *= 2;
  4859. }
  4860. // find an unused index in the hash table and use it
  4861. i = (idx + 1) % MAX_NUM_UNKNOWN_EXTS;
  4862. do {
  4863. if (!inst->phys_dev_ext_disp_hash[i].func_name) {
  4864. assert(inst->phys_dev_ext_disp_hash[i].list.capacity == 0);
  4865. inst->phys_dev_ext_disp_hash[i].func_name =
  4866. (char *)loader_instance_heap_alloc(inst, strlen(funcName) + 1, VK_SYSTEM_ALLOCATION_SCOPE_INSTANCE);
  4867. if (inst->phys_dev_ext_disp_hash[i].func_name == NULL) {
  4868. loader_log(inst, VK_DEBUG_REPORT_ERROR_BIT_EXT, 0,
  4869. "loader_add_dev_ext_table() can't reallocate "
  4870. "func_name memory");
  4871. return false;
  4872. }
  4873. strncpy(inst->phys_dev_ext_disp_hash[i].func_name, funcName, strlen(funcName) + 1);
  4874. list->index[list->count] = i;
  4875. list->count++;
  4876. *ptr_idx = i;
  4877. return true;
  4878. }
  4879. i = (i + 1) % MAX_NUM_UNKNOWN_EXTS;
  4880. } while (i != idx);
  4881. loader_log(inst, VK_DEBUG_REPORT_ERROR_BIT_EXT, 0,
  4882. "loader_add_phys_dev_ext_table() couldn't insert into hash table; is "
  4883. "it full?");
  4884. return false;
  4885. }
  4886. static bool loader_name_in_phys_dev_ext_table(struct loader_instance *inst, uint32_t *idx, const char *funcName) {
  4887. uint32_t alt_idx;
  4888. if (inst->phys_dev_ext_disp_hash[*idx].func_name && !strcmp(inst->phys_dev_ext_disp_hash[*idx].func_name, funcName))
  4889. return true;
  4890. // funcName wasn't at the primary spot in the hash table
  4891. // search the list of secondary locations (shallow search, not deep search)
  4892. for (uint32_t i = 0; i < inst->phys_dev_ext_disp_hash[*idx].list.count; i++) {
  4893. alt_idx = inst->phys_dev_ext_disp_hash[*idx].list.index[i];
  4894. if (!strcmp(inst->phys_dev_ext_disp_hash[*idx].func_name, funcName)) {
  4895. *idx = alt_idx;
  4896. return true;
  4897. }
  4898. }
  4899. return false;
  4900. }
  4901. // This function returns a generic trampoline and/or terminator function
  4902. // address for any unknown physical device extension commands. A hash
  4903. // table is used to keep a list of unknown entry points and their
  4904. // mapping to the physical device extension dispatch table (struct
  4905. // loader_phys_dev_ext_dispatch_table).
  4906. // For a given entry point string (funcName), if an existing mapping is
  4907. // found, then the trampoline address for that mapping is returned in
  4908. // tramp_addr (if it is not NULL) and the terminator address for that
  4909. // mapping is returned in term_addr (if it is not NULL). Otherwise,
  4910. // this unknown entry point has not been seen yet.
  4911. // If it has not been seen before, and perform_checking is 'true',
  4912. // check if a layer or and ICD supports it. If so then a new entry in
  4913. // the hash table is initialized and the trampoline and/or terminator
  4914. // addresses are returned.
  4915. // Null is returned if the hash table is full or if no discovered layer or
  4916. // ICD returns a non-NULL GetProcAddr for it.
  4917. bool loader_phys_dev_ext_gpa(struct loader_instance *inst, const char *funcName, bool perform_checking, void **tramp_addr,
  4918. void **term_addr) {
  4919. uint32_t idx;
  4920. uint32_t seed = 0;
  4921. bool success = false;
  4922. if (inst == NULL) {
  4923. goto out;
  4924. }
  4925. if (NULL != tramp_addr) {
  4926. *tramp_addr = NULL;
  4927. }
  4928. if (NULL != term_addr) {
  4929. *term_addr = NULL;
  4930. }
  4931. // We should always check to see if any ICD supports it.
  4932. if (!loader_check_icds_for_phys_dev_ext_address(inst, funcName)) {
  4933. // If we're not checking layers, or we are and it's not in a layer, just
  4934. // return
  4935. if (!perform_checking || !loader_check_layer_list_for_phys_dev_ext_address(inst, funcName)) {
  4936. goto out;
  4937. }
  4938. }
  4939. idx = murmurhash(funcName, strlen(funcName), seed) % MAX_NUM_UNKNOWN_EXTS;
  4940. if (perform_checking && !loader_name_in_phys_dev_ext_table(inst, &idx, funcName)) {
  4941. uint32_t i;
  4942. bool added = false;
  4943. // Only need to add first one to get index in Instance. Others will use
  4944. // the same index.
  4945. if (!added && loader_add_phys_dev_ext_table(inst, &idx, funcName)) {
  4946. added = true;
  4947. }
  4948. // Setup the ICD function pointers
  4949. struct loader_icd_term *icd_term = inst->icd_terms;
  4950. while (NULL != icd_term) {
  4951. if (MIN_PHYS_DEV_EXTENSION_ICD_INTERFACE_VERSION <= icd_term->scanned_icd->interface_version &&
  4952. NULL != icd_term->scanned_icd->GetPhysicalDeviceProcAddr) {
  4953. icd_term->phys_dev_ext[idx] =
  4954. (PFN_PhysDevExt)icd_term->scanned_icd->GetPhysicalDeviceProcAddr(icd_term->instance, funcName);
  4955. // Make sure we set the instance dispatch to point to the
  4956. // loader's terminator now since we can at least handle it
  4957. // in one ICD.
  4958. inst->disp->phys_dev_ext[idx] = loader_get_phys_dev_ext_termin(idx);
  4959. } else {
  4960. icd_term->phys_dev_ext[idx] = NULL;
  4961. }
  4962. icd_term = icd_term->next;
  4963. }
  4964. // Now, search for the first layer attached and query using it to get
  4965. // the first entry point.
  4966. for (i = 0; i < inst->expanded_activated_layer_list.count; i++) {
  4967. struct loader_layer_properties *layer_prop = &inst->expanded_activated_layer_list.list[i];
  4968. if (layer_prop->interface_version > 1 && NULL != layer_prop->functions.get_physical_device_proc_addr) {
  4969. inst->disp->phys_dev_ext[idx] =
  4970. (PFN_PhysDevExt)layer_prop->functions.get_physical_device_proc_addr((VkInstance)inst->instance, funcName);
  4971. if (NULL != inst->disp->phys_dev_ext[idx]) {
  4972. break;
  4973. }
  4974. }
  4975. }
  4976. }
  4977. if (NULL != tramp_addr) {
  4978. *tramp_addr = loader_get_phys_dev_ext_tramp(idx);
  4979. }
  4980. if (NULL != term_addr) {
  4981. *term_addr = loader_get_phys_dev_ext_termin(idx);
  4982. }
  4983. success = true;
  4984. out:
  4985. return success;
  4986. }
  4987. struct loader_instance *loader_get_instance(const VkInstance instance) {
  4988. // look up the loader_instance in our list by comparing dispatch tables, as
  4989. // there is no guarantee the instance is still a loader_instance* after any
  4990. // layers which wrap the instance object.
  4991. const VkLayerInstanceDispatchTable *disp;
  4992. struct loader_instance *ptr_instance = NULL;
  4993. disp = loader_get_instance_layer_dispatch(instance);
  4994. for (struct loader_instance *inst = loader.instances; inst; inst = inst->next) {
  4995. if (&inst->disp->layer_inst_disp == disp) {
  4996. ptr_instance = inst;
  4997. break;
  4998. }
  4999. }
  5000. return ptr_instance;
  5001. }
  5002. static loader_platform_dl_handle loaderOpenLayerFile(const struct loader_instance *inst, const char *chain_type,
  5003. struct loader_layer_properties *prop) {
  5004. if ((prop->lib_handle = loader_platform_open_library(prop->lib_name)) == NULL) {
  5005. loader_log(inst, VK_DEBUG_REPORT_ERROR_BIT_EXT, 0, loader_platform_open_library_error(prop->lib_name));
  5006. } else {
  5007. loader_log(inst, VK_DEBUG_REPORT_DEBUG_BIT_EXT, 0, "Loading layer library %s", prop->lib_name);
  5008. }
  5009. return prop->lib_handle;
  5010. }
  5011. static void loaderCloseLayerFile(const struct loader_instance *inst, struct loader_layer_properties *prop) {
  5012. if (prop->lib_handle) {
  5013. loader_platform_close_library(prop->lib_handle);
  5014. loader_log(inst, VK_DEBUG_REPORT_DEBUG_BIT_EXT, 0, "Unloading layer library %s", prop->lib_name);
  5015. prop->lib_handle = NULL;
  5016. }
  5017. }
  5018. void loaderDeactivateLayers(const struct loader_instance *instance, struct loader_device *device, struct loader_layer_list *list) {
  5019. // Delete instance list of enabled layers and close any layer libraries
  5020. for (uint32_t i = 0; i < list->count; i++) {
  5021. struct loader_layer_properties *layer_prop = &list->list[i];
  5022. loaderCloseLayerFile(instance, layer_prop);
  5023. }
  5024. loaderDestroyLayerList(instance, device, list);
  5025. }
  5026. // Go through the search_list and find any layers which match type. If layer
  5027. // type match is found in then add it to ext_list.
  5028. static void loaderAddImplicitLayers(const struct loader_instance *inst, struct loader_layer_list *target_list,
  5029. struct loader_layer_list *expanded_target_list, const struct loader_layer_list *source_list) {
  5030. for (uint32_t src_layer = 0; src_layer < source_list->count; src_layer++) {
  5031. const struct loader_layer_properties *prop = &source_list->list[src_layer];
  5032. if (0 == (prop->type_flags & VK_LAYER_TYPE_FLAG_EXPLICIT_LAYER)) {
  5033. loaderAddImplicitLayer(inst, prop, target_list, expanded_target_list, source_list);
  5034. }
  5035. }
  5036. }
  5037. // Get the layer name(s) from the env_name environment variable. If layer is found in
  5038. // search_list then add it to layer_list. But only add it to layer_list if type_flags matches.
  5039. static VkResult loaderAddEnvironmentLayers(struct loader_instance *inst, const enum layer_type_flags type_flags,
  5040. const char *env_name, struct loader_layer_list *target_list,
  5041. struct loader_layer_list *expanded_target_list,
  5042. const struct loader_layer_list *source_list) {
  5043. VkResult res = VK_SUCCESS;
  5044. char *next, *name;
  5045. char *layer_env = loader_getenv(env_name, inst);
  5046. if (layer_env == NULL) {
  5047. goto out;
  5048. }
  5049. name = loader_stack_alloc(strlen(layer_env) + 1);
  5050. if (name == NULL) {
  5051. goto out;
  5052. }
  5053. strcpy(name, layer_env);
  5054. while (name && *name) {
  5055. next = loader_get_next_path(name);
  5056. res = loaderAddLayerNameToList(inst, name, type_flags, source_list, target_list, expanded_target_list);
  5057. if (res != VK_SUCCESS) {
  5058. goto out;
  5059. }
  5060. name = next;
  5061. }
  5062. out:
  5063. if (layer_env != NULL) {
  5064. loader_free_getenv(layer_env, inst);
  5065. }
  5066. return res;
  5067. }
  5068. VkResult loaderEnableInstanceLayers(struct loader_instance *inst, const VkInstanceCreateInfo *pCreateInfo,
  5069. const struct loader_layer_list *instance_layers) {
  5070. VkResult err = VK_SUCCESS;
  5071. uint16_t layer_api_major_version;
  5072. uint16_t layer_api_minor_version;
  5073. uint32_t i;
  5074. struct loader_layer_properties *prop;
  5075. assert(inst && "Cannot have null instance");
  5076. if (!loaderInitLayerList(inst, &inst->app_activated_layer_list)) {
  5077. loader_log(inst, VK_DEBUG_REPORT_ERROR_BIT_EXT, 0,
  5078. "loaderEnableInstanceLayers: Failed to initialize application version of the layer list");
  5079. return VK_ERROR_OUT_OF_HOST_MEMORY;
  5080. }
  5081. if (!loaderInitLayerList(inst, &inst->expanded_activated_layer_list)) {
  5082. loader_log(inst, VK_DEBUG_REPORT_ERROR_BIT_EXT, 0,
  5083. "loaderEnableInstanceLayers: Failed to initialize expanded version of the layer list");
  5084. return VK_ERROR_OUT_OF_HOST_MEMORY;
  5085. }
  5086. // Add any implicit layers first
  5087. loaderAddImplicitLayers(inst, &inst->app_activated_layer_list, &inst->expanded_activated_layer_list, instance_layers);
  5088. // Add any layers specified via environment variable next
  5089. err = loaderAddEnvironmentLayers(inst, VK_LAYER_TYPE_FLAG_EXPLICIT_LAYER, "VK_INSTANCE_LAYERS", &inst->app_activated_layer_list,
  5090. &inst->expanded_activated_layer_list, instance_layers);
  5091. if (err != VK_SUCCESS) {
  5092. goto out;
  5093. }
  5094. // Add layers specified by the application
  5095. err = loaderAddLayerNamesToList(inst, &inst->app_activated_layer_list, &inst->expanded_activated_layer_list,
  5096. pCreateInfo->enabledLayerCount, pCreateInfo->ppEnabledLayerNames, instance_layers);
  5097. for (i = 0; i < inst->expanded_activated_layer_list.count; i++) {
  5098. // Verify that the layer api version is at least that of the application's request, if not, throw a warning since
  5099. // undefined behavior could occur.
  5100. prop = inst->expanded_activated_layer_list.list + i;
  5101. layer_api_major_version = VK_VERSION_MAJOR(prop->info.specVersion);
  5102. layer_api_minor_version = VK_VERSION_MINOR(prop->info.specVersion);
  5103. if (inst->app_api_major_version > layer_api_major_version ||
  5104. (inst->app_api_major_version == layer_api_major_version && inst->app_api_minor_version > layer_api_minor_version)) {
  5105. loader_log(inst, VK_DEBUG_REPORT_WARNING_BIT_EXT, 0,
  5106. "loader_add_to_layer_list: Explicit layer %s is using an old API version %" PRIu16 ".%" PRIu16
  5107. " versus application requested %" PRIu16 ".%" PRIu16,
  5108. prop->info.layerName, layer_api_major_version, layer_api_minor_version, inst->app_api_major_version,
  5109. inst->app_api_minor_version);
  5110. }
  5111. }
  5112. out:
  5113. return err;
  5114. }
  5115. // Determine the layer interface version to use.
  5116. bool loaderGetLayerInterfaceVersion(PFN_vkNegotiateLoaderLayerInterfaceVersion fp_negotiate_layer_version,
  5117. VkNegotiateLayerInterface *interface_struct) {
  5118. memset(interface_struct, 0, sizeof(VkNegotiateLayerInterface));
  5119. interface_struct->sType = LAYER_NEGOTIATE_INTERFACE_STRUCT;
  5120. interface_struct->loaderLayerInterfaceVersion = 1;
  5121. interface_struct->pNext = NULL;
  5122. if (fp_negotiate_layer_version != NULL) {
  5123. // Layer supports the negotiation API, so call it with the loader's
  5124. // latest version supported
  5125. interface_struct->loaderLayerInterfaceVersion = CURRENT_LOADER_LAYER_INTERFACE_VERSION;
  5126. VkResult result = fp_negotiate_layer_version(interface_struct);
  5127. if (result != VK_SUCCESS) {
  5128. // Layer no longer supports the loader's latest interface version so
  5129. // fail loading the Layer
  5130. return false;
  5131. }
  5132. }
  5133. if (interface_struct->loaderLayerInterfaceVersion < MIN_SUPPORTED_LOADER_LAYER_INTERFACE_VERSION) {
  5134. // Loader no longer supports the layer's latest interface version so
  5135. // fail loading the layer
  5136. return false;
  5137. }
  5138. return true;
  5139. }
  5140. VKAPI_ATTR VkResult VKAPI_CALL loader_layer_create_device(VkInstance instance, VkPhysicalDevice physicalDevice,
  5141. const VkDeviceCreateInfo *pCreateInfo,
  5142. const VkAllocationCallbacks *pAllocator, VkDevice *pDevice,
  5143. PFN_vkGetInstanceProcAddr layerGIPA, PFN_vkGetDeviceProcAddr *nextGDPA) {
  5144. VkResult res;
  5145. VkPhysicalDevice internal_device = VK_NULL_HANDLE;
  5146. struct loader_device *dev = NULL;
  5147. struct loader_instance *inst = NULL;
  5148. if (instance != NULL) {
  5149. inst = loader_get_instance(instance);
  5150. internal_device = physicalDevice;
  5151. } else {
  5152. struct loader_physical_device_tramp *phys_dev = (struct loader_physical_device_tramp *)physicalDevice;
  5153. internal_device = phys_dev->phys_dev;
  5154. inst = (struct loader_instance *)phys_dev->this_instance;
  5155. }
  5156. // Get the physical device (ICD) extensions
  5157. struct loader_extension_list icd_exts;
  5158. icd_exts.list = NULL;
  5159. res = loader_init_generic_list(inst, (struct loader_generic_list *)&icd_exts, sizeof(VkExtensionProperties));
  5160. if (VK_SUCCESS != res) {
  5161. loader_log(inst, VK_DEBUG_REPORT_ERROR_BIT_EXT, 0, "vkCreateDevice: Failed to create ICD extension list");
  5162. goto out;
  5163. }
  5164. PFN_vkEnumerateDeviceExtensionProperties enumDeviceExtensionProperties = NULL;
  5165. if (layerGIPA != NULL) {
  5166. enumDeviceExtensionProperties =
  5167. (PFN_vkEnumerateDeviceExtensionProperties)layerGIPA(instance, "vkEnumerateDeviceExtensionProperties");
  5168. } else {
  5169. enumDeviceExtensionProperties = inst->disp->layer_inst_disp.EnumerateDeviceExtensionProperties;
  5170. }
  5171. res = loader_add_device_extensions(inst, enumDeviceExtensionProperties, internal_device, "Unknown", &icd_exts);
  5172. if (res != VK_SUCCESS) {
  5173. loader_log(inst, VK_DEBUG_REPORT_ERROR_BIT_EXT, 0, "vkCreateDevice: Failed to add extensions to list");
  5174. goto out;
  5175. }
  5176. // Make sure requested extensions to be enabled are supported
  5177. res = loader_validate_device_extensions(inst, &inst->expanded_activated_layer_list, &icd_exts, pCreateInfo);
  5178. if (res != VK_SUCCESS) {
  5179. loader_log(inst, VK_DEBUG_REPORT_ERROR_BIT_EXT, 0, "vkCreateDevice: Failed to validate extensions in list");
  5180. goto out;
  5181. }
  5182. dev = loader_create_logical_device(inst, pAllocator);
  5183. if (dev == NULL) {
  5184. res = VK_ERROR_OUT_OF_HOST_MEMORY;
  5185. goto out;
  5186. }
  5187. // Copy the application enabled instance layer list into the device
  5188. if (NULL != inst->app_activated_layer_list.list) {
  5189. dev->app_activated_layer_list.capacity = inst->app_activated_layer_list.capacity;
  5190. dev->app_activated_layer_list.count = inst->app_activated_layer_list.count;
  5191. dev->app_activated_layer_list.list =
  5192. loader_device_heap_alloc(dev, inst->app_activated_layer_list.capacity, VK_SYSTEM_ALLOCATION_SCOPE_DEVICE);
  5193. if (dev->app_activated_layer_list.list == NULL) {
  5194. loader_log(inst, VK_DEBUG_REPORT_ERROR_BIT_EXT, 0,
  5195. "vkCreateDevice: Failed to allocate application activated layer list of size %d.",
  5196. inst->app_activated_layer_list.capacity);
  5197. res = VK_ERROR_OUT_OF_HOST_MEMORY;
  5198. goto out;
  5199. }
  5200. memcpy(dev->app_activated_layer_list.list, inst->app_activated_layer_list.list,
  5201. sizeof(*dev->app_activated_layer_list.list) * dev->app_activated_layer_list.count);
  5202. } else {
  5203. dev->app_activated_layer_list.capacity = 0;
  5204. dev->app_activated_layer_list.count = 0;
  5205. dev->app_activated_layer_list.list = NULL;
  5206. }
  5207. // Copy the expanded enabled instance layer list into the device
  5208. if (NULL != inst->expanded_activated_layer_list.list) {
  5209. dev->expanded_activated_layer_list.capacity = inst->expanded_activated_layer_list.capacity;
  5210. dev->expanded_activated_layer_list.count = inst->expanded_activated_layer_list.count;
  5211. dev->expanded_activated_layer_list.list =
  5212. loader_device_heap_alloc(dev, inst->expanded_activated_layer_list.capacity, VK_SYSTEM_ALLOCATION_SCOPE_DEVICE);
  5213. if (dev->expanded_activated_layer_list.list == NULL) {
  5214. loader_log(inst, VK_DEBUG_REPORT_ERROR_BIT_EXT, 0,
  5215. "vkCreateDevice: Failed to allocate expanded activated layer list of size %d.",
  5216. inst->expanded_activated_layer_list.capacity);
  5217. res = VK_ERROR_OUT_OF_HOST_MEMORY;
  5218. goto out;
  5219. }
  5220. memcpy(dev->expanded_activated_layer_list.list, inst->expanded_activated_layer_list.list,
  5221. sizeof(*dev->expanded_activated_layer_list.list) * dev->expanded_activated_layer_list.count);
  5222. } else {
  5223. dev->expanded_activated_layer_list.capacity = 0;
  5224. dev->expanded_activated_layer_list.count = 0;
  5225. dev->expanded_activated_layer_list.list = NULL;
  5226. }
  5227. res = loader_create_device_chain(internal_device, pCreateInfo, pAllocator, inst, dev, layerGIPA, nextGDPA);
  5228. if (res != VK_SUCCESS) {
  5229. loader_log(inst, VK_DEBUG_REPORT_ERROR_BIT_EXT, 0, "vkCreateDevice: Failed to create device chain.");
  5230. goto out;
  5231. }
  5232. *pDevice = dev->chain_device;
  5233. // Initialize any device extension dispatch entry's from the instance list
  5234. loader_init_dispatch_dev_ext(inst, dev);
  5235. // Initialize WSI device extensions as part of core dispatch since loader
  5236. // has dedicated trampoline code for these
  5237. loader_init_device_extension_dispatch_table(&dev->loader_dispatch, inst->disp->layer_inst_disp.GetInstanceProcAddr,
  5238. dev->loader_dispatch.core_dispatch.GetDeviceProcAddr, inst->instance, *pDevice);
  5239. out:
  5240. // Failure cleanup
  5241. if (VK_SUCCESS != res) {
  5242. if (NULL != dev) {
  5243. loader_destroy_logical_device(inst, dev, pAllocator);
  5244. }
  5245. }
  5246. if (NULL != icd_exts.list) {
  5247. loader_destroy_generic_list(inst, (struct loader_generic_list *)&icd_exts);
  5248. }
  5249. return res;
  5250. }
  5251. VKAPI_ATTR void VKAPI_CALL loader_layer_destroy_device(VkDevice device, const VkAllocationCallbacks *pAllocator,
  5252. PFN_vkDestroyDevice destroyFunction) {
  5253. struct loader_device *dev;
  5254. if (device == VK_NULL_HANDLE) {
  5255. return;
  5256. }
  5257. struct loader_icd_term *icd_term = loader_get_icd_and_device(device, &dev, NULL);
  5258. const struct loader_instance *inst = icd_term->this_instance;
  5259. destroyFunction(device, pAllocator);
  5260. dev->chain_device = NULL;
  5261. dev->icd_device = NULL;
  5262. loader_remove_logical_device(inst, icd_term, dev, pAllocator);
  5263. }
  5264. // Given the list of layers to activate in the loader_instance
  5265. // structure. This function will add a VkLayerInstanceCreateInfo
  5266. // structure to the VkInstanceCreateInfo.pNext pointer.
  5267. // Each activated layer will have it's own VkLayerInstanceLink
  5268. // structure that tells the layer what Get*ProcAddr to call to
  5269. // get function pointers to the next layer down.
  5270. // Once the chain info has been created this function will
  5271. // execute the CreateInstance call chain. Each layer will
  5272. // then have an opportunity in it's CreateInstance function
  5273. // to setup it's dispatch table when the lower layer returns
  5274. // successfully.
  5275. // Each layer can wrap or not-wrap the returned VkInstance object
  5276. // as it sees fit.
  5277. // The instance chain is terminated by a loader function
  5278. // that will call CreateInstance on all available ICD's and
  5279. // cache those VkInstance objects for future use.
  5280. VkResult loader_create_instance_chain(const VkInstanceCreateInfo *pCreateInfo, const VkAllocationCallbacks *pAllocator,
  5281. struct loader_instance *inst, VkInstance *created_instance) {
  5282. uint32_t activated_layers = 0;
  5283. VkLayerInstanceCreateInfo chain_info;
  5284. VkLayerInstanceLink *layer_instance_link_info = NULL;
  5285. VkInstanceCreateInfo loader_create_info;
  5286. VkResult res;
  5287. PFN_vkGetInstanceProcAddr next_gipa = loader_gpa_instance_internal;
  5288. PFN_vkGetInstanceProcAddr cur_gipa = loader_gpa_instance_internal;
  5289. PFN_vkGetDeviceProcAddr cur_gdpa = loader_gpa_device_internal;
  5290. PFN_GetPhysicalDeviceProcAddr next_gpdpa = loader_gpdpa_instance_internal;
  5291. PFN_GetPhysicalDeviceProcAddr cur_gpdpa = loader_gpdpa_instance_internal;
  5292. memcpy(&loader_create_info, pCreateInfo, sizeof(VkInstanceCreateInfo));
  5293. if (inst->expanded_activated_layer_list.count > 0) {
  5294. chain_info.u.pLayerInfo = NULL;
  5295. chain_info.pNext = pCreateInfo->pNext;
  5296. chain_info.sType = VK_STRUCTURE_TYPE_LOADER_INSTANCE_CREATE_INFO;
  5297. chain_info.function = VK_LAYER_LINK_INFO;
  5298. loader_create_info.pNext = &chain_info;
  5299. layer_instance_link_info = loader_stack_alloc(sizeof(VkLayerInstanceLink) * inst->expanded_activated_layer_list.count);
  5300. if (!layer_instance_link_info) {
  5301. loader_log(inst, VK_DEBUG_REPORT_ERROR_BIT_EXT, 0,
  5302. "loader_create_instance_chain: Failed to alloc Instance"
  5303. " objects for layer");
  5304. return VK_ERROR_OUT_OF_HOST_MEMORY;
  5305. }
  5306. // Create instance chain of enabled layers
  5307. for (int32_t i = inst->expanded_activated_layer_list.count - 1; i >= 0; i--) {
  5308. struct loader_layer_properties *layer_prop = &inst->expanded_activated_layer_list.list[i];
  5309. loader_platform_dl_handle lib_handle;
  5310. lib_handle = loaderOpenLayerFile(inst, "instance", layer_prop);
  5311. if (!lib_handle) {
  5312. continue;
  5313. }
  5314. if (NULL == layer_prop->functions.negotiate_layer_interface) {
  5315. PFN_vkNegotiateLoaderLayerInterfaceVersion negotiate_interface = NULL;
  5316. bool functions_in_interface = false;
  5317. if (strlen(layer_prop->functions.str_negotiate_interface) == 0) {
  5318. negotiate_interface = (PFN_vkNegotiateLoaderLayerInterfaceVersion)loader_platform_get_proc_address(
  5319. lib_handle, "vkNegotiateLoaderLayerInterfaceVersion");
  5320. } else {
  5321. negotiate_interface = (PFN_vkNegotiateLoaderLayerInterfaceVersion)loader_platform_get_proc_address(
  5322. lib_handle, layer_prop->functions.str_negotiate_interface);
  5323. }
  5324. // If we can negotiate an interface version, then we can also
  5325. // get everything we need from the one function call, so try
  5326. // that first, and see if we can get all the function pointers
  5327. // necessary from that one call.
  5328. if (NULL != negotiate_interface) {
  5329. layer_prop->functions.negotiate_layer_interface = negotiate_interface;
  5330. VkNegotiateLayerInterface interface_struct;
  5331. if (loaderGetLayerInterfaceVersion(negotiate_interface, &interface_struct)) {
  5332. // Go ahead and set the properties version to the
  5333. // correct value.
  5334. layer_prop->interface_version = interface_struct.loaderLayerInterfaceVersion;
  5335. // If the interface is 2 or newer, we have access to the
  5336. // new GetPhysicalDeviceProcAddr function, so grab it,
  5337. // and the other necessary functions, from the
  5338. // structure.
  5339. if (interface_struct.loaderLayerInterfaceVersion > 1) {
  5340. cur_gipa = interface_struct.pfnGetInstanceProcAddr;
  5341. cur_gdpa = interface_struct.pfnGetDeviceProcAddr;
  5342. cur_gpdpa = interface_struct.pfnGetPhysicalDeviceProcAddr;
  5343. if (cur_gipa != NULL) {
  5344. // We've set the functions, so make sure we
  5345. // don't do the unnecessary calls later.
  5346. functions_in_interface = true;
  5347. }
  5348. }
  5349. }
  5350. }
  5351. if (!functions_in_interface) {
  5352. if ((cur_gipa = layer_prop->functions.get_instance_proc_addr) == NULL) {
  5353. if (strlen(layer_prop->functions.str_gipa) == 0) {
  5354. cur_gipa =
  5355. (PFN_vkGetInstanceProcAddr)loader_platform_get_proc_address(lib_handle, "vkGetInstanceProcAddr");
  5356. layer_prop->functions.get_instance_proc_addr = cur_gipa;
  5357. } else {
  5358. cur_gipa = (PFN_vkGetInstanceProcAddr)loader_platform_get_proc_address(lib_handle,
  5359. layer_prop->functions.str_gipa);
  5360. }
  5361. if (NULL == cur_gipa) {
  5362. loader_log(inst, VK_DEBUG_REPORT_ERROR_BIT_EXT, 0,
  5363. "loader_create_instance_chain: Failed to"
  5364. " find \'vkGetInstanceProcAddr\' in "
  5365. "layer %s",
  5366. layer_prop->lib_name);
  5367. continue;
  5368. }
  5369. }
  5370. }
  5371. }
  5372. layer_instance_link_info[activated_layers].pNext = chain_info.u.pLayerInfo;
  5373. layer_instance_link_info[activated_layers].pfnNextGetInstanceProcAddr = next_gipa;
  5374. layer_instance_link_info[activated_layers].pfnNextGetPhysicalDeviceProcAddr = next_gpdpa;
  5375. next_gipa = cur_gipa;
  5376. if (layer_prop->interface_version > 1 && cur_gpdpa != NULL) {
  5377. layer_prop->functions.get_physical_device_proc_addr = cur_gpdpa;
  5378. next_gpdpa = cur_gpdpa;
  5379. }
  5380. if (layer_prop->interface_version > 1 && cur_gipa != NULL) {
  5381. layer_prop->functions.get_instance_proc_addr = cur_gipa;
  5382. }
  5383. if (layer_prop->interface_version > 1 && cur_gdpa != NULL) {
  5384. layer_prop->functions.get_device_proc_addr = cur_gdpa;
  5385. }
  5386. chain_info.u.pLayerInfo = &layer_instance_link_info[activated_layers];
  5387. loader_log(inst, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, 0, "Insert instance layer %s (%s)", layer_prop->info.layerName,
  5388. layer_prop->lib_name);
  5389. activated_layers++;
  5390. }
  5391. }
  5392. VkLoaderFeatureFlags feature_flags = 0;
  5393. #if defined(_WIN32)
  5394. IDXGIFactory6* dxgi_factory = NULL;
  5395. HRESULT hres = fpCreateDXGIFactory1(&IID_IDXGIFactory6, (void **)&dxgi_factory);
  5396. if (hres == S_OK) {
  5397. feature_flags |= VK_LOADER_FEATURE_PHYSICAL_DEVICE_SORTING;
  5398. dxgi_factory->lpVtbl->Release(dxgi_factory);
  5399. }
  5400. #endif
  5401. PFN_vkCreateInstance fpCreateInstance = (PFN_vkCreateInstance)next_gipa(*created_instance, "vkCreateInstance");
  5402. if (fpCreateInstance) {
  5403. const VkLayerInstanceCreateInfo instance_dispatch = {
  5404. .sType = VK_STRUCTURE_TYPE_LOADER_INSTANCE_CREATE_INFO,
  5405. .pNext = loader_create_info.pNext,
  5406. .function = VK_LOADER_DATA_CALLBACK,
  5407. .u = {
  5408. .pfnSetInstanceLoaderData = vkSetInstanceDispatch,
  5409. },
  5410. };
  5411. const VkLayerInstanceCreateInfo device_callback = {
  5412. .sType = VK_STRUCTURE_TYPE_LOADER_INSTANCE_CREATE_INFO,
  5413. .pNext = &instance_dispatch,
  5414. .function = VK_LOADER_LAYER_CREATE_DEVICE_CALLBACK,
  5415. .u = {
  5416. .layerDevice = {
  5417. .pfnLayerCreateDevice = loader_layer_create_device,
  5418. .pfnLayerDestroyDevice = loader_layer_destroy_device,
  5419. },
  5420. },
  5421. };
  5422. const VkLayerInstanceCreateInfo loader_features = {
  5423. .sType = VK_STRUCTURE_TYPE_LOADER_INSTANCE_CREATE_INFO,
  5424. .pNext = &device_callback,
  5425. .function = VK_LOADER_FEATURES,
  5426. .u = {
  5427. .loaderFeatures = feature_flags,
  5428. },
  5429. };
  5430. loader_create_info.pNext = &loader_features;
  5431. res = fpCreateInstance(&loader_create_info, pAllocator, created_instance);
  5432. } else {
  5433. loader_log(inst, VK_DEBUG_REPORT_ERROR_BIT_EXT, 0,
  5434. "loader_create_instance_chain: Failed to find "
  5435. "\'vkCreateInstance\'");
  5436. // Couldn't find CreateInstance function!
  5437. res = VK_ERROR_INITIALIZATION_FAILED;
  5438. }
  5439. if (res == VK_SUCCESS) {
  5440. loader_init_instance_core_dispatch_table(&inst->disp->layer_inst_disp, next_gipa, *created_instance);
  5441. inst->instance = *created_instance;
  5442. }
  5443. return res;
  5444. }
  5445. void loaderActivateInstanceLayerExtensions(struct loader_instance *inst, VkInstance created_inst) {
  5446. loader_init_instance_extension_dispatch_table(&inst->disp->layer_inst_disp, inst->disp->layer_inst_disp.GetInstanceProcAddr,
  5447. created_inst);
  5448. }
  5449. VkResult loader_create_device_chain(const VkPhysicalDevice pd, const VkDeviceCreateInfo *pCreateInfo,
  5450. const VkAllocationCallbacks *pAllocator, const struct loader_instance *inst,
  5451. struct loader_device *dev, PFN_vkGetInstanceProcAddr callingLayer,
  5452. PFN_vkGetDeviceProcAddr *layerNextGDPA) {
  5453. uint32_t activated_layers = 0;
  5454. VkLayerDeviceLink *layer_device_link_info;
  5455. VkLayerDeviceCreateInfo chain_info;
  5456. VkDeviceCreateInfo loader_create_info;
  5457. VkResult res;
  5458. PFN_vkGetDeviceProcAddr fpGDPA = NULL, nextGDPA = loader_gpa_device_internal;
  5459. PFN_vkGetInstanceProcAddr fpGIPA = NULL, nextGIPA = loader_gpa_instance_internal;
  5460. memcpy(&loader_create_info, pCreateInfo, sizeof(VkDeviceCreateInfo));
  5461. // Before we continue, we need to find out if the KHR_device_group extension is in the enabled list. If it is, we then
  5462. // need to look for the corresponding VkDeviceGroupDeviceCreateInfoKHR struct in the device list. This is because we
  5463. // need to replace all the incoming physical device values (which are really loader trampoline physical device values)
  5464. // with the layer/ICD version.
  5465. {
  5466. VkBaseOutStructure *pNext = (VkBaseOutStructure *)loader_create_info.pNext;
  5467. VkBaseOutStructure *pPrev = (VkBaseOutStructure *)&loader_create_info;
  5468. while (NULL != pNext) {
  5469. if (VK_STRUCTURE_TYPE_DEVICE_GROUP_DEVICE_CREATE_INFO == pNext->sType) {
  5470. VkDeviceGroupDeviceCreateInfoKHR *cur_struct = (VkDeviceGroupDeviceCreateInfoKHR *)pNext;
  5471. if (0 < cur_struct->physicalDeviceCount && NULL != cur_struct->pPhysicalDevices) {
  5472. VkDeviceGroupDeviceCreateInfoKHR *temp_struct = loader_stack_alloc(sizeof(VkDeviceGroupDeviceCreateInfoKHR));
  5473. VkPhysicalDevice *phys_dev_array = NULL;
  5474. if (NULL == temp_struct) {
  5475. return VK_ERROR_OUT_OF_HOST_MEMORY;
  5476. }
  5477. memcpy(temp_struct, cur_struct, sizeof(VkDeviceGroupDeviceCreateInfoKHR));
  5478. phys_dev_array = loader_stack_alloc(sizeof(VkPhysicalDevice) * cur_struct->physicalDeviceCount);
  5479. if (NULL == phys_dev_array) {
  5480. return VK_ERROR_OUT_OF_HOST_MEMORY;
  5481. }
  5482. // Before calling down, replace the incoming physical device values (which are really loader trampoline
  5483. // physical devices) with the next layer (or possibly even the terminator) physical device values.
  5484. struct loader_physical_device_tramp *cur_tramp;
  5485. for (uint32_t phys_dev = 0; phys_dev < cur_struct->physicalDeviceCount; phys_dev++) {
  5486. cur_tramp = (struct loader_physical_device_tramp *)cur_struct->pPhysicalDevices[phys_dev];
  5487. phys_dev_array[phys_dev] = cur_tramp->phys_dev;
  5488. }
  5489. temp_struct->pPhysicalDevices = phys_dev_array;
  5490. // Replace the old struct in the pNext chain with this one.
  5491. pPrev->pNext = (VkBaseOutStructure *)temp_struct;
  5492. pNext = (VkBaseOutStructure *)temp_struct;
  5493. }
  5494. break;
  5495. }
  5496. pPrev = pNext;
  5497. pNext = pNext->pNext;
  5498. }
  5499. }
  5500. layer_device_link_info = loader_stack_alloc(sizeof(VkLayerDeviceLink) * dev->expanded_activated_layer_list.count);
  5501. if (!layer_device_link_info) {
  5502. loader_log(inst, VK_DEBUG_REPORT_ERROR_BIT_EXT, 0,
  5503. "loader_create_device_chain: Failed to alloc Device objects"
  5504. " for layer. Skipping Layer.");
  5505. return VK_ERROR_OUT_OF_HOST_MEMORY;
  5506. }
  5507. if (dev->expanded_activated_layer_list.count > 0) {
  5508. chain_info.sType = VK_STRUCTURE_TYPE_LOADER_DEVICE_CREATE_INFO;
  5509. chain_info.function = VK_LAYER_LINK_INFO;
  5510. chain_info.u.pLayerInfo = NULL;
  5511. chain_info.pNext = loader_create_info.pNext;
  5512. loader_create_info.pNext = &chain_info;
  5513. bool done = false;
  5514. // Create instance chain of enabled layers
  5515. for (int32_t i = dev->expanded_activated_layer_list.count - 1; i >= 0; i--) {
  5516. struct loader_layer_properties *layer_prop = &dev->expanded_activated_layer_list.list[i];
  5517. loader_platform_dl_handle lib_handle;
  5518. lib_handle = loaderOpenLayerFile(inst, "device", layer_prop);
  5519. if (!lib_handle || done) {
  5520. continue;
  5521. }
  5522. // The Get*ProcAddr pointers will already be filled in if they were received from either the json file or the
  5523. // version negotiation
  5524. if ((fpGIPA = layer_prop->functions.get_instance_proc_addr) == NULL) {
  5525. if (strlen(layer_prop->functions.str_gipa) == 0) {
  5526. fpGIPA = (PFN_vkGetInstanceProcAddr)loader_platform_get_proc_address(lib_handle, "vkGetInstanceProcAddr");
  5527. layer_prop->functions.get_instance_proc_addr = fpGIPA;
  5528. } else
  5529. fpGIPA =
  5530. (PFN_vkGetInstanceProcAddr)loader_platform_get_proc_address(lib_handle, layer_prop->functions.str_gipa);
  5531. if (!fpGIPA) {
  5532. loader_log(inst, VK_DEBUG_REPORT_ERROR_BIT_EXT, 0,
  5533. "loader_create_device_chain: Failed to find "
  5534. "\'vkGetInstanceProcAddr\' in layer %s. Skipping"
  5535. " layer.",
  5536. layer_prop->lib_name);
  5537. continue;
  5538. }
  5539. }
  5540. if (fpGIPA == callingLayer) {
  5541. if (layerNextGDPA != NULL) {
  5542. *layerNextGDPA = nextGDPA;
  5543. }
  5544. done = true;
  5545. continue;
  5546. }
  5547. if ((fpGDPA = layer_prop->functions.get_device_proc_addr) == NULL) {
  5548. if (strlen(layer_prop->functions.str_gdpa) == 0) {
  5549. fpGDPA = (PFN_vkGetDeviceProcAddr)loader_platform_get_proc_address(lib_handle, "vkGetDeviceProcAddr");
  5550. layer_prop->functions.get_device_proc_addr = fpGDPA;
  5551. } else
  5552. fpGDPA =
  5553. (PFN_vkGetDeviceProcAddr)loader_platform_get_proc_address(lib_handle, layer_prop->functions.str_gdpa);
  5554. if (!fpGDPA) {
  5555. loader_log(inst, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, 0, "Failed to find vkGetDeviceProcAddr in layer %s",
  5556. layer_prop->lib_name);
  5557. continue;
  5558. }
  5559. }
  5560. layer_device_link_info[activated_layers].pNext = chain_info.u.pLayerInfo;
  5561. layer_device_link_info[activated_layers].pfnNextGetInstanceProcAddr = nextGIPA;
  5562. layer_device_link_info[activated_layers].pfnNextGetDeviceProcAddr = nextGDPA;
  5563. chain_info.u.pLayerInfo = &layer_device_link_info[activated_layers];
  5564. nextGIPA = fpGIPA;
  5565. nextGDPA = fpGDPA;
  5566. loader_log(inst, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, 0, "Inserted device layer %s (%s)", layer_prop->info.layerName,
  5567. layer_prop->lib_name);
  5568. activated_layers++;
  5569. }
  5570. }
  5571. VkDevice created_device = (VkDevice)dev;
  5572. PFN_vkCreateDevice fpCreateDevice = (PFN_vkCreateDevice)nextGIPA(inst->instance, "vkCreateDevice");
  5573. if (fpCreateDevice) {
  5574. VkLayerDeviceCreateInfo create_info_disp;
  5575. create_info_disp.sType = VK_STRUCTURE_TYPE_LOADER_DEVICE_CREATE_INFO;
  5576. create_info_disp.function = VK_LOADER_DATA_CALLBACK;
  5577. create_info_disp.u.pfnSetDeviceLoaderData = vkSetDeviceDispatch;
  5578. create_info_disp.pNext = loader_create_info.pNext;
  5579. loader_create_info.pNext = &create_info_disp;
  5580. res = fpCreateDevice(pd, &loader_create_info, pAllocator, &created_device);
  5581. if (res != VK_SUCCESS) {
  5582. return res;
  5583. }
  5584. dev->chain_device = created_device;
  5585. } else {
  5586. loader_log(inst, VK_DEBUG_REPORT_ERROR_BIT_EXT, 0,
  5587. "loader_create_device_chain: Failed to find \'vkCreateDevice\' "
  5588. "in layers or ICD");
  5589. // Couldn't find CreateDevice function!
  5590. return VK_ERROR_INITIALIZATION_FAILED;
  5591. }
  5592. // Initialize device dispatch table
  5593. loader_init_device_dispatch_table(&dev->loader_dispatch, nextGDPA, dev->chain_device);
  5594. return res;
  5595. }
  5596. VkResult loaderValidateLayers(const struct loader_instance *inst, const uint32_t layer_count,
  5597. const char *const *ppEnabledLayerNames, const struct loader_layer_list *list) {
  5598. struct loader_layer_properties *prop;
  5599. for (uint32_t i = 0; i < layer_count; i++) {
  5600. VkStringErrorFlags result = vk_string_validate(MaxLoaderStringLength, ppEnabledLayerNames[i]);
  5601. if (result != VK_STRING_ERROR_NONE) {
  5602. loader_log(inst, VK_DEBUG_REPORT_ERROR_BIT_EXT, 0,
  5603. "loaderValidateLayers: Device ppEnabledLayerNames "
  5604. "contains string that is too long or is badly formed");
  5605. return VK_ERROR_LAYER_NOT_PRESENT;
  5606. }
  5607. prop = loaderFindLayerProperty(ppEnabledLayerNames[i], list);
  5608. if (NULL == prop) {
  5609. loader_log(inst, VK_DEBUG_REPORT_ERROR_BIT_EXT, 0,
  5610. "loaderValidateLayers: Layer %d does not exist in the list of available layers", i);
  5611. return VK_ERROR_LAYER_NOT_PRESENT;
  5612. }
  5613. }
  5614. return VK_SUCCESS;
  5615. }
  5616. VkResult loader_validate_instance_extensions(struct loader_instance *inst, const struct loader_extension_list *icd_exts,
  5617. const struct loader_layer_list *instance_layers,
  5618. const VkInstanceCreateInfo *pCreateInfo) {
  5619. VkExtensionProperties *extension_prop;
  5620. char *env_value;
  5621. bool check_if_known = true;
  5622. VkResult res = VK_SUCCESS;
  5623. struct loader_layer_list active_layers;
  5624. struct loader_layer_list expanded_layers;
  5625. memset(&active_layers, 0, sizeof(active_layers));
  5626. memset(&expanded_layers, 0, sizeof(expanded_layers));
  5627. if (!loaderInitLayerList(inst, &active_layers)) {
  5628. res = VK_ERROR_OUT_OF_HOST_MEMORY;
  5629. goto out;
  5630. }
  5631. if (!loaderInitLayerList(inst, &expanded_layers)) {
  5632. res = VK_ERROR_OUT_OF_HOST_MEMORY;
  5633. goto out;
  5634. }
  5635. // Build the lists of active layers (including metalayers) and expanded layers (with metalayers resolved to their components)
  5636. loaderAddImplicitLayers(inst, &active_layers, &expanded_layers, instance_layers);
  5637. res = loaderAddEnvironmentLayers(inst, VK_LAYER_TYPE_FLAG_EXPLICIT_LAYER, ENABLED_LAYERS_ENV, &active_layers, &expanded_layers,
  5638. instance_layers);
  5639. if (res != VK_SUCCESS) {
  5640. goto out;
  5641. }
  5642. res = loaderAddLayerNamesToList(inst, &active_layers, &expanded_layers, pCreateInfo->enabledLayerCount,
  5643. pCreateInfo->ppEnabledLayerNames, instance_layers);
  5644. if (VK_SUCCESS != res) {
  5645. goto out;
  5646. }
  5647. for (uint32_t i = 0; i < pCreateInfo->enabledExtensionCount; i++) {
  5648. VkStringErrorFlags result = vk_string_validate(MaxLoaderStringLength, pCreateInfo->ppEnabledExtensionNames[i]);
  5649. if (result != VK_STRING_ERROR_NONE) {
  5650. loader_log(inst, VK_DEBUG_REPORT_ERROR_BIT_EXT, 0,
  5651. "loader_validate_instance_extensions: Instance ppEnabledExtensionNames contains "
  5652. "string that is too long or is badly formed");
  5653. res = VK_ERROR_EXTENSION_NOT_PRESENT;
  5654. goto out;
  5655. }
  5656. // Check if a user wants to disable the instance extension filtering behavior
  5657. env_value = loader_getenv("VK_LOADER_DISABLE_INST_EXT_FILTER", inst);
  5658. if (NULL != env_value && atoi(env_value) != 0) {
  5659. check_if_known = false;
  5660. }
  5661. loader_free_getenv(env_value, inst);
  5662. if (check_if_known) {
  5663. // See if the extension is in the list of supported extensions
  5664. bool found = false;
  5665. for (uint32_t j = 0; LOADER_INSTANCE_EXTENSIONS[j] != NULL; j++) {
  5666. if (strcmp(pCreateInfo->ppEnabledExtensionNames[i], LOADER_INSTANCE_EXTENSIONS[j]) == 0) {
  5667. found = true;
  5668. break;
  5669. }
  5670. }
  5671. // If it isn't in the list, return an error
  5672. if (!found) {
  5673. loader_log(inst, VK_DEBUG_REPORT_ERROR_BIT_EXT, 0,
  5674. "loader_validate_instance_extensions: Extension %s not found in list of known instance extensions.",
  5675. pCreateInfo->ppEnabledExtensionNames[i]);
  5676. res = VK_ERROR_EXTENSION_NOT_PRESENT;
  5677. goto out;
  5678. }
  5679. }
  5680. extension_prop = get_extension_property(pCreateInfo->ppEnabledExtensionNames[i], icd_exts);
  5681. if (extension_prop) {
  5682. continue;
  5683. }
  5684. extension_prop = NULL;
  5685. // Not in global list, search layer extension lists
  5686. struct loader_layer_properties *layer_prop = NULL;
  5687. for (uint32_t j = 0; NULL == extension_prop && j < expanded_layers.count; ++j) {
  5688. extension_prop =
  5689. get_extension_property(pCreateInfo->ppEnabledExtensionNames[i], &expanded_layers.list[j].instance_extension_list);
  5690. if (extension_prop) {
  5691. // Found the extension in one of the layers enabled by the app.
  5692. break;
  5693. }
  5694. layer_prop = loaderFindLayerProperty(expanded_layers.list[j].info.layerName, instance_layers);
  5695. if (NULL == layer_prop) {
  5696. // Should NOT get here, loaderValidateLayers should have already filtered this case out.
  5697. continue;
  5698. }
  5699. }
  5700. if (!extension_prop) {
  5701. // Didn't find extension name in any of the global layers, error out
  5702. loader_log(inst, VK_DEBUG_REPORT_ERROR_BIT_EXT, 0,
  5703. "loader_validate_instance_extensions: Instance extension %s not supported by available ICDs or enabled "
  5704. "layers.",
  5705. pCreateInfo->ppEnabledExtensionNames[i]);
  5706. res = VK_ERROR_EXTENSION_NOT_PRESENT;
  5707. goto out;
  5708. }
  5709. }
  5710. out:
  5711. loaderDestroyLayerList(inst, NULL, &active_layers);
  5712. loaderDestroyLayerList(inst, NULL, &expanded_layers);
  5713. return res;
  5714. }
  5715. VkResult loader_validate_device_extensions(struct loader_instance *this_instance,
  5716. const struct loader_layer_list *activated_device_layers,
  5717. const struct loader_extension_list *icd_exts, const VkDeviceCreateInfo *pCreateInfo) {
  5718. VkExtensionProperties *extension_prop;
  5719. struct loader_layer_properties *layer_prop;
  5720. for (uint32_t i = 0; i < pCreateInfo->enabledExtensionCount; i++) {
  5721. VkStringErrorFlags result = vk_string_validate(MaxLoaderStringLength, pCreateInfo->ppEnabledExtensionNames[i]);
  5722. if (result != VK_STRING_ERROR_NONE) {
  5723. loader_log(this_instance, VK_DEBUG_REPORT_ERROR_BIT_EXT, 0,
  5724. "loader_validate_device_extensions: Device ppEnabledExtensionNames contains "
  5725. "string that is too long or is badly formed");
  5726. return VK_ERROR_EXTENSION_NOT_PRESENT;
  5727. }
  5728. const char *extension_name = pCreateInfo->ppEnabledExtensionNames[i];
  5729. extension_prop = get_extension_property(extension_name, icd_exts);
  5730. if (extension_prop) {
  5731. continue;
  5732. }
  5733. // Not in global list, search activated layer extension lists
  5734. for (uint32_t j = 0; j < activated_device_layers->count; j++) {
  5735. layer_prop = &activated_device_layers->list[j];
  5736. extension_prop = get_dev_extension_property(extension_name, &layer_prop->device_extension_list);
  5737. if (extension_prop) {
  5738. // Found the extension in one of the layers enabled by the app.
  5739. break;
  5740. }
  5741. }
  5742. if (!extension_prop) {
  5743. // Didn't find extension name in any of the device layers, error out
  5744. loader_log(this_instance, VK_DEBUG_REPORT_ERROR_BIT_EXT, 0,
  5745. "loader_validate_device_extensions: Device extension %s not supported by selected physical device "
  5746. "or enabled layers.",
  5747. pCreateInfo->ppEnabledExtensionNames[i]);
  5748. return VK_ERROR_EXTENSION_NOT_PRESENT;
  5749. }
  5750. }
  5751. return VK_SUCCESS;
  5752. }
  5753. // Terminator functions for the Instance chain
  5754. // All named terminator_<Vulkan API name>
  5755. VKAPI_ATTR VkResult VKAPI_CALL terminator_CreateInstance(const VkInstanceCreateInfo *pCreateInfo,
  5756. const VkAllocationCallbacks *pAllocator, VkInstance *pInstance) {
  5757. struct loader_icd_term *icd_term;
  5758. VkExtensionProperties *prop;
  5759. char **filtered_extension_names = NULL;
  5760. VkInstanceCreateInfo icd_create_info;
  5761. VkResult res = VK_SUCCESS;
  5762. bool one_icd_successful = false;
  5763. struct loader_instance *ptr_instance = (struct loader_instance *)*pInstance;
  5764. memcpy(&icd_create_info, pCreateInfo, sizeof(icd_create_info));
  5765. icd_create_info.enabledLayerCount = 0;
  5766. icd_create_info.ppEnabledLayerNames = NULL;
  5767. // NOTE: Need to filter the extensions to only those supported by the ICD.
  5768. // No ICD will advertise support for layers. An ICD library could
  5769. // support a layer, but it would be independent of the actual ICD,
  5770. // just in the same library.
  5771. filtered_extension_names = loader_stack_alloc(pCreateInfo->enabledExtensionCount * sizeof(char *));
  5772. if (!filtered_extension_names) {
  5773. loader_log(ptr_instance, VK_DEBUG_REPORT_ERROR_BIT_EXT, 0,
  5774. "terminator_CreateInstance: Failed create extension name array for %d extensions",
  5775. pCreateInfo->enabledExtensionCount);
  5776. res = VK_ERROR_OUT_OF_HOST_MEMORY;
  5777. goto out;
  5778. }
  5779. icd_create_info.ppEnabledExtensionNames = (const char *const *)filtered_extension_names;
  5780. for (uint32_t i = 0; i < ptr_instance->icd_tramp_list.count; i++) {
  5781. icd_term = loader_icd_add(ptr_instance, &ptr_instance->icd_tramp_list.scanned_list[i]);
  5782. if (NULL == icd_term) {
  5783. loader_log(ptr_instance, VK_DEBUG_REPORT_ERROR_BIT_EXT, 0,
  5784. "terminator_CreateInstance: Failed to add ICD %d to ICD trampoline list.", i);
  5785. res = VK_ERROR_OUT_OF_HOST_MEMORY;
  5786. goto out;
  5787. }
  5788. // If any error happens after here, we need to remove the ICD from the list,
  5789. // because we've already added it, but haven't validated it
  5790. // Make sure that we reset the pApplicationInfo so we don't get an old pointer
  5791. icd_create_info.pApplicationInfo = pCreateInfo->pApplicationInfo;
  5792. icd_create_info.enabledExtensionCount = 0;
  5793. struct loader_extension_list icd_exts;
  5794. loader_log(ptr_instance, VK_DEBUG_REPORT_DEBUG_BIT_EXT, 0, "Build ICD instance extension list");
  5795. // traverse scanned icd list adding non-duplicate extensions to the list
  5796. res = loader_init_generic_list(ptr_instance, (struct loader_generic_list *)&icd_exts, sizeof(VkExtensionProperties));
  5797. if (VK_ERROR_OUT_OF_HOST_MEMORY == res) {
  5798. // If out of memory, bail immediately.
  5799. goto out;
  5800. } else if (VK_SUCCESS != res) {
  5801. // Something bad happened with this ICD, so free it and try the
  5802. // next.
  5803. ptr_instance->icd_terms = icd_term->next;
  5804. icd_term->next = NULL;
  5805. loader_icd_destroy(ptr_instance, icd_term, pAllocator);
  5806. continue;
  5807. }
  5808. res = loader_add_instance_extensions(ptr_instance, icd_term->scanned_icd->EnumerateInstanceExtensionProperties,
  5809. icd_term->scanned_icd->lib_name, &icd_exts);
  5810. if (VK_SUCCESS != res) {
  5811. loader_destroy_generic_list(ptr_instance, (struct loader_generic_list *)&icd_exts);
  5812. if (VK_ERROR_OUT_OF_HOST_MEMORY == res) {
  5813. // If out of memory, bail immediately.
  5814. goto out;
  5815. } else {
  5816. // Something bad happened with this ICD, so free it and try the next.
  5817. ptr_instance->icd_terms = icd_term->next;
  5818. icd_term->next = NULL;
  5819. loader_icd_destroy(ptr_instance, icd_term, pAllocator);
  5820. continue;
  5821. }
  5822. }
  5823. for (uint32_t j = 0; j < pCreateInfo->enabledExtensionCount; j++) {
  5824. prop = get_extension_property(pCreateInfo->ppEnabledExtensionNames[j], &icd_exts);
  5825. if (prop) {
  5826. filtered_extension_names[icd_create_info.enabledExtensionCount] = (char *)pCreateInfo->ppEnabledExtensionNames[j];
  5827. icd_create_info.enabledExtensionCount++;
  5828. }
  5829. }
  5830. loader_destroy_generic_list(ptr_instance, (struct loader_generic_list *)&icd_exts);
  5831. // Get the driver version from vkEnumerateInstanceVersion
  5832. uint32_t icd_version = VK_API_VERSION_1_0;
  5833. VkResult icd_result = VK_SUCCESS;
  5834. if (icd_term->scanned_icd->api_version >= VK_API_VERSION_1_1) {
  5835. PFN_vkEnumerateInstanceVersion icd_enumerate_instance_version = (PFN_vkEnumerateInstanceVersion)
  5836. icd_term->scanned_icd->GetInstanceProcAddr(NULL, "vkEnumerateInstanceVersion");
  5837. if (icd_enumerate_instance_version != NULL) {
  5838. icd_result = icd_enumerate_instance_version(&icd_version);
  5839. if (icd_result != VK_SUCCESS) {
  5840. icd_version = VK_API_VERSION_1_0;
  5841. loader_log(ptr_instance, VK_DEBUG_REPORT_DEBUG_BIT_EXT, 0, "terminator_CreateInstance: ICD \"%s\" "
  5842. "vkEnumerateInstanceVersion returned error. The ICD will be treated as a 1.0 ICD",
  5843. icd_term->scanned_icd->lib_name);
  5844. }
  5845. }
  5846. }
  5847. // Create an instance, substituting the version to 1.0 if necessary
  5848. VkApplicationInfo icd_app_info;
  5849. uint32_t icd_version_nopatch = VK_MAKE_VERSION(VK_VERSION_MAJOR(icd_version), VK_VERSION_MINOR(icd_version), 0);
  5850. uint32_t requested_version = pCreateInfo == NULL || pCreateInfo->pApplicationInfo == NULL ? VK_API_VERSION_1_0 : pCreateInfo->pApplicationInfo->apiVersion;
  5851. if ((requested_version != 0) && (icd_version_nopatch == VK_API_VERSION_1_0)) {
  5852. if (icd_create_info.pApplicationInfo == NULL) {
  5853. memset(&icd_app_info, 0, sizeof(icd_app_info));
  5854. } else {
  5855. memcpy(&icd_app_info, icd_create_info.pApplicationInfo, sizeof(icd_app_info));
  5856. }
  5857. icd_app_info.apiVersion = icd_version;
  5858. icd_create_info.pApplicationInfo = &icd_app_info;
  5859. }
  5860. icd_result = ptr_instance->icd_tramp_list.scanned_list[i].CreateInstance(&icd_create_info, pAllocator, &(icd_term->instance));
  5861. if (VK_ERROR_OUT_OF_HOST_MEMORY == icd_result) {
  5862. // If out of memory, bail immediately.
  5863. res = VK_ERROR_OUT_OF_HOST_MEMORY;
  5864. goto out;
  5865. } else if (VK_SUCCESS != icd_result) {
  5866. loader_log(ptr_instance, VK_DEBUG_REPORT_WARNING_BIT_EXT, 0,
  5867. "terminator_CreateInstance: Failed to CreateInstance in "
  5868. "ICD %d. Skipping ICD.",
  5869. i);
  5870. ptr_instance->icd_terms = icd_term->next;
  5871. icd_term->next = NULL;
  5872. loader_icd_destroy(ptr_instance, icd_term, pAllocator);
  5873. continue;
  5874. }
  5875. if (!loader_icd_init_entries(icd_term, icd_term->instance,
  5876. ptr_instance->icd_tramp_list.scanned_list[i].GetInstanceProcAddr)) {
  5877. loader_log(ptr_instance, VK_DEBUG_REPORT_WARNING_BIT_EXT, 0,
  5878. "terminator_CreateInstance: Failed to CreateInstance and find "
  5879. "entrypoints with ICD. Skipping ICD.");
  5880. ptr_instance->icd_terms = icd_term->next;
  5881. icd_term->next = NULL;
  5882. loader_icd_destroy(ptr_instance, icd_term, pAllocator);
  5883. continue;
  5884. }
  5885. // If we made it this far, at least one ICD was successful
  5886. one_icd_successful = true;
  5887. }
  5888. // If no ICDs were added to instance list and res is unchanged from it's initial value, the loader was unable to
  5889. // find a suitable ICD.
  5890. if (VK_SUCCESS == res && (ptr_instance->icd_terms == NULL || !one_icd_successful)) {
  5891. res = VK_ERROR_INCOMPATIBLE_DRIVER;
  5892. }
  5893. out:
  5894. if (VK_SUCCESS != res) {
  5895. while (NULL != ptr_instance->icd_terms) {
  5896. icd_term = ptr_instance->icd_terms;
  5897. ptr_instance->icd_terms = icd_term->next;
  5898. if (NULL != icd_term->instance) {
  5899. icd_term->dispatch.DestroyInstance(icd_term->instance, pAllocator);
  5900. }
  5901. loader_icd_destroy(ptr_instance, icd_term, pAllocator);
  5902. }
  5903. }
  5904. return res;
  5905. }
  5906. VKAPI_ATTR void VKAPI_CALL terminator_DestroyInstance(VkInstance instance, const VkAllocationCallbacks *pAllocator) {
  5907. struct loader_instance *ptr_instance = loader_instance(instance);
  5908. if (NULL == ptr_instance) {
  5909. return;
  5910. }
  5911. struct loader_icd_term *icd_terms = ptr_instance->icd_terms;
  5912. struct loader_icd_term *next_icd_term;
  5913. // Remove this instance from the list of instances:
  5914. struct loader_instance *prev = NULL;
  5915. struct loader_instance *next = loader.instances;
  5916. while (next != NULL) {
  5917. if (next == ptr_instance) {
  5918. // Remove this instance from the list:
  5919. if (prev)
  5920. prev->next = next->next;
  5921. else
  5922. loader.instances = next->next;
  5923. break;
  5924. }
  5925. prev = next;
  5926. next = next->next;
  5927. }
  5928. while (NULL != icd_terms) {
  5929. if (icd_terms->instance) {
  5930. icd_terms->dispatch.DestroyInstance(icd_terms->instance, pAllocator);
  5931. }
  5932. next_icd_term = icd_terms->next;
  5933. icd_terms->instance = VK_NULL_HANDLE;
  5934. loader_icd_destroy(ptr_instance, icd_terms, pAllocator);
  5935. icd_terms = next_icd_term;
  5936. }
  5937. loaderDeleteLayerListAndProperties(ptr_instance, &ptr_instance->instance_layer_list);
  5938. loader_scanned_icd_clear(ptr_instance, &ptr_instance->icd_tramp_list);
  5939. loader_destroy_generic_list(ptr_instance, (struct loader_generic_list *)&ptr_instance->ext_list);
  5940. if (NULL != ptr_instance->phys_devs_term) {
  5941. for (uint32_t i = 0; i < ptr_instance->phys_dev_count_term; i++) {
  5942. loader_instance_heap_free(ptr_instance, ptr_instance->phys_devs_term[i]);
  5943. }
  5944. loader_instance_heap_free(ptr_instance, ptr_instance->phys_devs_term);
  5945. }
  5946. if (NULL != ptr_instance->phys_dev_groups_term) {
  5947. for (uint32_t i = 0; i < ptr_instance->phys_dev_group_count_term; i++) {
  5948. loader_instance_heap_free(ptr_instance, ptr_instance->phys_dev_groups_term[i]);
  5949. }
  5950. loader_instance_heap_free(ptr_instance, ptr_instance->phys_dev_groups_term);
  5951. }
  5952. loader_free_dev_ext_table(ptr_instance);
  5953. loader_free_phys_dev_ext_table(ptr_instance);
  5954. }
  5955. VKAPI_ATTR VkResult VKAPI_CALL terminator_CreateDevice(VkPhysicalDevice physicalDevice, const VkDeviceCreateInfo *pCreateInfo,
  5956. const VkAllocationCallbacks *pAllocator, VkDevice *pDevice) {
  5957. VkResult res = VK_SUCCESS;
  5958. struct loader_physical_device_term *phys_dev_term;
  5959. phys_dev_term = (struct loader_physical_device_term *)physicalDevice;
  5960. struct loader_icd_term *icd_term = phys_dev_term->this_icd_term;
  5961. struct loader_device *dev = (struct loader_device *)*pDevice;
  5962. PFN_vkCreateDevice fpCreateDevice = icd_term->dispatch.CreateDevice;
  5963. struct loader_extension_list icd_exts;
  5964. VkBaseOutStructure *caller_dgci_container = NULL;
  5965. VkDeviceGroupDeviceCreateInfoKHR *caller_dgci = NULL;
  5966. dev->phys_dev_term = phys_dev_term;
  5967. icd_exts.list = NULL;
  5968. if (fpCreateDevice == NULL) {
  5969. loader_log(icd_term->this_instance, VK_DEBUG_REPORT_ERROR_BIT_EXT, 0,
  5970. "terminator_CreateDevice: No vkCreateDevice command exposed "
  5971. "by ICD %s",
  5972. icd_term->scanned_icd->lib_name);
  5973. res = VK_ERROR_INITIALIZATION_FAILED;
  5974. goto out;
  5975. }
  5976. VkDeviceCreateInfo localCreateInfo;
  5977. memcpy(&localCreateInfo, pCreateInfo, sizeof(localCreateInfo));
  5978. // NOTE: Need to filter the extensions to only those supported by the ICD.
  5979. // No ICD will advertise support for layers. An ICD library could support a layer,
  5980. // but it would be independent of the actual ICD, just in the same library.
  5981. char **filtered_extension_names = NULL;
  5982. if (0 < pCreateInfo->enabledExtensionCount) {
  5983. filtered_extension_names = loader_stack_alloc(pCreateInfo->enabledExtensionCount * sizeof(char *));
  5984. if (NULL == filtered_extension_names) {
  5985. loader_log(icd_term->this_instance, VK_DEBUG_REPORT_ERROR_BIT_EXT, 0,
  5986. "terminator_CreateDevice: Failed to create extension name "
  5987. "storage for %d extensions",
  5988. pCreateInfo->enabledExtensionCount);
  5989. return VK_ERROR_OUT_OF_HOST_MEMORY;
  5990. }
  5991. }
  5992. localCreateInfo.enabledLayerCount = 0;
  5993. localCreateInfo.ppEnabledLayerNames = NULL;
  5994. localCreateInfo.enabledExtensionCount = 0;
  5995. localCreateInfo.ppEnabledExtensionNames = (const char *const *)filtered_extension_names;
  5996. // Get the physical device (ICD) extensions
  5997. res = loader_init_generic_list(icd_term->this_instance, (struct loader_generic_list *)&icd_exts, sizeof(VkExtensionProperties));
  5998. if (VK_SUCCESS != res) {
  5999. goto out;
  6000. }
  6001. res = loader_add_device_extensions(icd_term->this_instance, icd_term->dispatch.EnumerateDeviceExtensionProperties,
  6002. phys_dev_term->phys_dev, icd_term->scanned_icd->lib_name, &icd_exts);
  6003. if (res != VK_SUCCESS) {
  6004. goto out;
  6005. }
  6006. for (uint32_t i = 0; i < pCreateInfo->enabledExtensionCount; i++) {
  6007. const char *extension_name = pCreateInfo->ppEnabledExtensionNames[i];
  6008. VkExtensionProperties *prop = get_extension_property(extension_name, &icd_exts);
  6009. if (prop) {
  6010. filtered_extension_names[localCreateInfo.enabledExtensionCount] = (char *)extension_name;
  6011. localCreateInfo.enabledExtensionCount++;
  6012. } else {
  6013. loader_log(icd_term->this_instance, VK_DEBUG_REPORT_DEBUG_BIT_EXT, 0,
  6014. "vkCreateDevice extension %s not available for "
  6015. "devices associated with ICD %s",
  6016. extension_name, icd_term->scanned_icd->lib_name);
  6017. }
  6018. }
  6019. // Before we continue, If KHX_device_group is the list of enabled and viable extensions, then we then need to look for the
  6020. // corresponding VkDeviceGroupDeviceCreateInfo struct in the device list and replace all the physical device values (which
  6021. // are really loader physical device terminator values) with the ICD versions.
  6022. //if (icd_term->this_instance->enabled_known_extensions.khr_device_group_creation == 1) {
  6023. {
  6024. VkBaseOutStructure *pNext = (VkBaseOutStructure *)localCreateInfo.pNext;
  6025. VkBaseOutStructure *pPrev = (VkBaseOutStructure *)&localCreateInfo;
  6026. while (NULL != pNext) {
  6027. if (VK_STRUCTURE_TYPE_DEVICE_GROUP_DEVICE_CREATE_INFO == pNext->sType) {
  6028. VkDeviceGroupDeviceCreateInfo *cur_struct = (VkDeviceGroupDeviceCreateInfo *)pNext;
  6029. if (0 < cur_struct->physicalDeviceCount && NULL != cur_struct->pPhysicalDevices) {
  6030. VkDeviceGroupDeviceCreateInfo *temp_struct = loader_stack_alloc(sizeof(VkDeviceGroupDeviceCreateInfo));
  6031. VkPhysicalDevice *phys_dev_array = NULL;
  6032. if (NULL == temp_struct) {
  6033. return VK_ERROR_OUT_OF_HOST_MEMORY;
  6034. }
  6035. memcpy(temp_struct, cur_struct, sizeof(VkDeviceGroupDeviceCreateInfo));
  6036. phys_dev_array = loader_stack_alloc(sizeof(VkPhysicalDevice) * cur_struct->physicalDeviceCount);
  6037. if (NULL == phys_dev_array) {
  6038. return VK_ERROR_OUT_OF_HOST_MEMORY;
  6039. }
  6040. // Before calling down, replace the incoming physical device values (which are really loader terminator
  6041. // physical devices) with the ICDs physical device values.
  6042. struct loader_physical_device_term *cur_term;
  6043. for (uint32_t phys_dev = 0; phys_dev < cur_struct->physicalDeviceCount; phys_dev++) {
  6044. cur_term = (struct loader_physical_device_term *)cur_struct->pPhysicalDevices[phys_dev];
  6045. phys_dev_array[phys_dev] = cur_term->phys_dev;
  6046. }
  6047. temp_struct->pPhysicalDevices = phys_dev_array;
  6048. // Keep track of pointers to restore pNext chain before returning
  6049. caller_dgci_container = pPrev;
  6050. caller_dgci = cur_struct;
  6051. // Replace the old struct in the pNext chain with this one.
  6052. pPrev->pNext = (VkBaseOutStructure *)temp_struct;
  6053. pNext = (VkBaseOutStructure *)temp_struct;
  6054. }
  6055. break;
  6056. }
  6057. pPrev = pNext;
  6058. pNext = pNext->pNext;
  6059. }
  6060. }
  6061. // Handle loader emulation for structs that are not supported by the ICD:
  6062. // Presently, the emulation leaves the pNext chain alone. This means that the ICD will receive items in the chain which
  6063. // are not recognized by the ICD. If this causes the ICD to fail, then the items would have to be removed here. The current
  6064. // implementation does not remove them because copying the pNext chain would be impossible if the loader does not recognize
  6065. // the any of the struct types, as the loader would not know the size to allocate and copy.
  6066. //if (icd_term->dispatch.GetPhysicalDeviceFeatures2 == NULL && icd_term->dispatch.GetPhysicalDeviceFeatures2KHR == NULL) {
  6067. {
  6068. const void *pNext = localCreateInfo.pNext;
  6069. while (pNext != NULL) {
  6070. switch (*(VkStructureType *)pNext) {
  6071. case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_FEATURES_2: {
  6072. const VkPhysicalDeviceFeatures2KHR *features = pNext;
  6073. if (icd_term->dispatch.GetPhysicalDeviceFeatures2 == NULL && icd_term->dispatch.GetPhysicalDeviceFeatures2KHR == NULL) {
  6074. loader_log(icd_term->this_instance, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, 0,
  6075. "vkCreateDevice: Emulating handling of VkPhysicalDeviceFeatures2 in pNext chain for ICD \"%s\"",
  6076. icd_term->scanned_icd->lib_name);
  6077. // Verify that VK_KHR_get_physical_device_properties2 is enabled
  6078. if (icd_term->this_instance->enabled_known_extensions.khr_get_physical_device_properties2) {
  6079. localCreateInfo.pEnabledFeatures = &features->features;
  6080. }
  6081. }
  6082. // Leave this item in the pNext chain for now
  6083. pNext = features->pNext;
  6084. break;
  6085. }
  6086. case VK_STRUCTURE_TYPE_DEVICE_GROUP_DEVICE_CREATE_INFO: {
  6087. const VkDeviceGroupDeviceCreateInfoKHR *group_info = pNext;
  6088. if (icd_term->dispatch.EnumeratePhysicalDeviceGroups == NULL && icd_term->dispatch.EnumeratePhysicalDeviceGroupsKHR == NULL) {
  6089. loader_log(
  6090. icd_term->this_instance, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, 0,
  6091. "vkCreateDevice: Emulating handling of VkPhysicalDeviceGroupProperties in pNext chain for ICD \"%s\"",
  6092. icd_term->scanned_icd->lib_name);
  6093. // The group must contain only this one device, since physical device groups aren't actually supported
  6094. if (group_info->physicalDeviceCount != 1) {
  6095. loader_log(icd_term->this_instance, VK_DEBUG_REPORT_ERROR_BIT_EXT, 0,
  6096. "vkCreateDevice: Emulation failed to create device from device group info");
  6097. res = VK_ERROR_INITIALIZATION_FAILED;
  6098. goto out;
  6099. }
  6100. }
  6101. // Nothing needs to be done here because we're leaving the item in the pNext chain and because the spec states
  6102. // that the physicalDevice argument must be included in the device group, and we've already checked that it is
  6103. pNext = group_info->pNext;
  6104. break;
  6105. }
  6106. // Multiview properties are also allowed, but since VK_KHX_multiview is a device extension, we'll just let the ICD
  6107. // handle that error when the user enables the extension here
  6108. default: {
  6109. const VkBaseInStructure *header = pNext;
  6110. pNext = header->pNext;
  6111. break;
  6112. }
  6113. }
  6114. }
  6115. }
  6116. // Every extension that has a loader-defined terminator needs to be marked as enabled or disabled so that we know whether or
  6117. // not to return that terminator when vkGetDeviceProcAddr is called
  6118. for (uint32_t i = 0; i < localCreateInfo.enabledExtensionCount; ++i) {
  6119. if (!strcmp(localCreateInfo.ppEnabledExtensionNames[i], VK_KHR_SWAPCHAIN_EXTENSION_NAME)) {
  6120. dev->extensions.khr_swapchain_enabled = true;
  6121. } else if (!strcmp(localCreateInfo.ppEnabledExtensionNames[i], VK_KHR_DISPLAY_SWAPCHAIN_EXTENSION_NAME)) {
  6122. dev->extensions.khr_display_swapchain_enabled = true;
  6123. } else if (!strcmp(localCreateInfo.ppEnabledExtensionNames[i], VK_KHR_DEVICE_GROUP_EXTENSION_NAME)) {
  6124. dev->extensions.khr_device_group_enabled = true;
  6125. } else if (!strcmp(localCreateInfo.ppEnabledExtensionNames[i], VK_EXT_DEBUG_MARKER_EXTENSION_NAME)) {
  6126. dev->extensions.ext_debug_marker_enabled = true;
  6127. } else if (!strcmp(localCreateInfo.ppEnabledExtensionNames[i], "VK_EXT_full_screen_exclusive")) {
  6128. dev->extensions.ext_full_screen_exclusive_enabled = true;
  6129. }
  6130. }
  6131. dev->extensions.ext_debug_utils_enabled = icd_term->this_instance->enabled_known_extensions.ext_debug_utils;
  6132. if (!dev->extensions.khr_device_group_enabled) {
  6133. VkPhysicalDeviceProperties properties;
  6134. icd_term->dispatch.GetPhysicalDeviceProperties(phys_dev_term->phys_dev, &properties);
  6135. if (properties.apiVersion >= VK_API_VERSION_1_1) {
  6136. dev->extensions.khr_device_group_enabled = true;
  6137. }
  6138. }
  6139. res = fpCreateDevice(phys_dev_term->phys_dev, &localCreateInfo, pAllocator, &dev->icd_device);
  6140. if (res != VK_SUCCESS) {
  6141. loader_log(icd_term->this_instance, VK_DEBUG_REPORT_ERROR_BIT_EXT, 0,
  6142. "terminator_CreateDevice: Failed in ICD %s vkCreateDevice"
  6143. "call",
  6144. icd_term->scanned_icd->lib_name);
  6145. goto out;
  6146. }
  6147. *pDevice = dev->icd_device;
  6148. loader_add_logical_device(icd_term->this_instance, icd_term, dev);
  6149. // Init dispatch pointer in new device object
  6150. loader_init_dispatch(*pDevice, &dev->loader_dispatch);
  6151. out:
  6152. if (NULL != icd_exts.list) {
  6153. loader_destroy_generic_list(icd_term->this_instance, (struct loader_generic_list *)&icd_exts);
  6154. }
  6155. // Restore pNext pointer to old VkDeviceGroupDeviceCreateInfoKHX
  6156. // in the chain to maintain consistency for the caller.
  6157. if (caller_dgci_container != NULL) {
  6158. caller_dgci_container->pNext = (VkBaseOutStructure *)caller_dgci;
  6159. }
  6160. return res;
  6161. }
  6162. VkResult setupLoaderTrampPhysDevs(VkInstance instance) {
  6163. VkResult res = VK_SUCCESS;
  6164. VkPhysicalDevice *local_phys_devs = NULL;
  6165. struct loader_instance *inst;
  6166. uint32_t total_count = 0;
  6167. struct loader_physical_device_tramp **new_phys_devs = NULL;
  6168. inst = loader_get_instance(instance);
  6169. if (NULL == inst) {
  6170. res = VK_ERROR_INITIALIZATION_FAILED;
  6171. goto out;
  6172. }
  6173. // Query how many GPUs there
  6174. res = inst->disp->layer_inst_disp.EnumeratePhysicalDevices(instance, &total_count, NULL);
  6175. if (res != VK_SUCCESS) {
  6176. loader_log(inst, VK_DEBUG_REPORT_ERROR_BIT_EXT, 0,
  6177. "setupLoaderTrampPhysDevs: Failed during dispatch call "
  6178. "of \'vkEnumeratePhysicalDevices\' to lower layers or "
  6179. "loader to get count.");
  6180. goto out;
  6181. }
  6182. // Really use what the total GPU count is since Optimus and other layers may mess
  6183. // the count up.
  6184. total_count = inst->total_gpu_count;
  6185. // Create an array for the new physical devices, which will be stored
  6186. // in the instance for the trampoline code.
  6187. new_phys_devs = (struct loader_physical_device_tramp **)loader_instance_heap_alloc(
  6188. inst, total_count * sizeof(struct loader_physical_device_tramp *), VK_SYSTEM_ALLOCATION_SCOPE_INSTANCE);
  6189. if (NULL == new_phys_devs) {
  6190. loader_log(inst, VK_DEBUG_REPORT_ERROR_BIT_EXT, 0,
  6191. "setupLoaderTrampPhysDevs: Failed to allocate new physical device"
  6192. " array of size %d",
  6193. total_count);
  6194. res = VK_ERROR_OUT_OF_HOST_MEMORY;
  6195. goto out;
  6196. }
  6197. memset(new_phys_devs, 0, total_count * sizeof(struct loader_physical_device_tramp *));
  6198. // Create a temporary array (on the stack) to keep track of the
  6199. // returned VkPhysicalDevice values.
  6200. local_phys_devs = loader_stack_alloc(sizeof(VkPhysicalDevice) * total_count);
  6201. if (NULL == local_phys_devs) {
  6202. loader_log(inst, VK_DEBUG_REPORT_ERROR_BIT_EXT, 0,
  6203. "setupLoaderTrampPhysDevs: Failed to allocate local "
  6204. "physical device array of size %d",
  6205. total_count);
  6206. res = VK_ERROR_OUT_OF_HOST_MEMORY;
  6207. goto out;
  6208. }
  6209. memset(local_phys_devs, 0, sizeof(VkPhysicalDevice) * total_count);
  6210. res = inst->disp->layer_inst_disp.EnumeratePhysicalDevices(instance, &total_count, local_phys_devs);
  6211. if (VK_SUCCESS != res) {
  6212. loader_log(inst, VK_DEBUG_REPORT_ERROR_BIT_EXT, 0,
  6213. "setupLoaderTrampPhysDevs: Failed during dispatch call "
  6214. "of \'vkEnumeratePhysicalDevices\' to lower layers or "
  6215. "loader to get content.");
  6216. goto out;
  6217. }
  6218. // Copy or create everything to fill the new array of physical devices
  6219. for (uint32_t new_idx = 0; new_idx < total_count; new_idx++) {
  6220. // Check if this physical device is already in the old buffer
  6221. for (uint32_t old_idx = 0; old_idx < inst->phys_dev_count_tramp; old_idx++) {
  6222. if (local_phys_devs[new_idx] == inst->phys_devs_tramp[old_idx]->phys_dev) {
  6223. new_phys_devs[new_idx] = inst->phys_devs_tramp[old_idx];
  6224. break;
  6225. }
  6226. }
  6227. // If this physical device isn't in the old buffer, create it
  6228. if (NULL == new_phys_devs[new_idx]) {
  6229. new_phys_devs[new_idx] = (struct loader_physical_device_tramp *)loader_instance_heap_alloc(
  6230. inst, sizeof(struct loader_physical_device_tramp), VK_SYSTEM_ALLOCATION_SCOPE_INSTANCE);
  6231. if (NULL == new_phys_devs[new_idx]) {
  6232. loader_log(inst, VK_DEBUG_REPORT_ERROR_BIT_EXT, 0,
  6233. "setupLoaderTrampPhysDevs: Failed to allocate "
  6234. "physical device trampoline object %d",
  6235. new_idx);
  6236. total_count = new_idx;
  6237. res = VK_ERROR_OUT_OF_HOST_MEMORY;
  6238. goto out;
  6239. }
  6240. // Initialize the new physicalDevice object
  6241. loader_set_dispatch((void *)new_phys_devs[new_idx], inst->disp);
  6242. new_phys_devs[new_idx]->this_instance = inst;
  6243. new_phys_devs[new_idx]->phys_dev = local_phys_devs[new_idx];
  6244. }
  6245. }
  6246. out:
  6247. if (VK_SUCCESS != res) {
  6248. if (NULL != new_phys_devs) {
  6249. for (uint32_t i = 0; i < total_count; i++) {
  6250. loader_instance_heap_free(inst, new_phys_devs[i]);
  6251. }
  6252. loader_instance_heap_free(inst, new_phys_devs);
  6253. }
  6254. total_count = 0;
  6255. } else {
  6256. // Free everything that didn't carry over to the new array of
  6257. // physical devices
  6258. if (NULL != inst->phys_devs_tramp) {
  6259. for (uint32_t i = 0; i < inst->phys_dev_count_tramp; i++) {
  6260. bool found = false;
  6261. for (uint32_t j = 0; j < total_count; j++) {
  6262. if (inst->phys_devs_tramp[i] == new_phys_devs[j]) {
  6263. found = true;
  6264. break;
  6265. }
  6266. }
  6267. if (!found) {
  6268. loader_instance_heap_free(inst, inst->phys_devs_tramp[i]);
  6269. }
  6270. }
  6271. loader_instance_heap_free(inst, inst->phys_devs_tramp);
  6272. }
  6273. // Swap in the new physical device list
  6274. inst->phys_dev_count_tramp = total_count;
  6275. inst->phys_devs_tramp = new_phys_devs;
  6276. }
  6277. return res;
  6278. }
  6279. struct LoaderSortedPhysicalDevice {
  6280. uint32_t device_count;
  6281. VkPhysicalDevice* physical_devices;
  6282. uint32_t icd_index;
  6283. struct loader_icd_term* icd_term;
  6284. };
  6285. // This function allocates an array in sorted_devices which must be freed by the caller if not null
  6286. VkResult ReadSortedPhysicalDevices(struct loader_instance *inst, struct LoaderSortedPhysicalDevice **sorted_devices, uint32_t* sorted_count)
  6287. {
  6288. VkResult res = VK_SUCCESS;
  6289. #if defined(_WIN32)
  6290. uint32_t sorted_alloc = 0;
  6291. struct loader_icd_term *icd_term = NULL;
  6292. IDXGIFactory6* dxgi_factory = NULL;
  6293. HRESULT hres = fpCreateDXGIFactory1(&IID_IDXGIFactory6, (void **)&dxgi_factory);
  6294. if (hres != S_OK) {
  6295. loader_log(inst, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, 0, "Failed to create DXGI factory 6. Physical devices will not be sorted");
  6296. }
  6297. else {
  6298. sorted_alloc = 16;
  6299. *sorted_devices = loader_instance_heap_alloc(inst, sorted_alloc * sizeof(struct LoaderSortedPhysicalDevice), VK_SYSTEM_ALLOCATION_SCOPE_COMMAND);
  6300. if (*sorted_devices == NULL) {
  6301. res = VK_ERROR_OUT_OF_HOST_MEMORY;
  6302. goto out;
  6303. }
  6304. memset(*sorted_devices, 0, sorted_alloc * sizeof(struct LoaderSortedPhysicalDevice));
  6305. *sorted_count = 0;
  6306. for (uint32_t i = 0; ; ++i) {
  6307. IDXGIAdapter1* adapter;
  6308. hres = dxgi_factory->lpVtbl->EnumAdapterByGpuPreference(dxgi_factory, i, DXGI_GPU_PREFERENCE_UNSPECIFIED, &IID_IDXGIAdapter1, (void **)&adapter);
  6309. if (hres == DXGI_ERROR_NOT_FOUND) {
  6310. break; // No more adapters
  6311. }
  6312. else if (hres != S_OK) {
  6313. loader_log(inst, VK_DEBUG_REPORT_WARNING_BIT_EXT, 0, "Failed to enumerate adapters by GPU preference at index %u. This adapter will not be sorted", i);
  6314. break;
  6315. }
  6316. DXGI_ADAPTER_DESC1 description;
  6317. hres = adapter->lpVtbl->GetDesc1(adapter, &description);
  6318. if (hres != S_OK) {
  6319. loader_log(inst, VK_DEBUG_REPORT_WARNING_BIT_EXT, 0, "Failed to get adapter LUID index %u. This adapter will not be sorted", i);
  6320. continue;
  6321. }
  6322. if (sorted_alloc <= i) {
  6323. uint32_t old_size = sorted_alloc * sizeof(struct LoaderSortedPhysicalDevice);
  6324. *sorted_devices = loader_instance_heap_realloc(inst, *sorted_devices, old_size, 2 * old_size, VK_SYSTEM_ALLOCATION_SCOPE_COMMAND);
  6325. if (*sorted_devices == NULL) {
  6326. adapter->lpVtbl->Release(adapter);
  6327. res = VK_ERROR_OUT_OF_HOST_MEMORY;
  6328. goto out;
  6329. }
  6330. sorted_alloc *= 2;
  6331. }
  6332. struct LoaderSortedPhysicalDevice *sorted_array = *sorted_devices;
  6333. sorted_array[*sorted_count].device_count = 0;
  6334. sorted_array[*sorted_count].physical_devices = NULL;
  6335. //*sorted_count = i;
  6336. icd_term = inst->icd_terms;
  6337. for (uint32_t icd_idx = 0; NULL != icd_term; icd_term = icd_term->next, icd_idx++) {
  6338. // This is the new behavior, which cannot be run unless the ICD provides EnumerateAdapterPhysicalDevices
  6339. if (icd_term->scanned_icd->EnumerateAdapterPhysicalDevices == NULL) {
  6340. continue;
  6341. }
  6342. uint32_t count;
  6343. VkResult vkres = icd_term->scanned_icd->EnumerateAdapterPhysicalDevices(icd_term->instance, description.AdapterLuid, &count, NULL);
  6344. if (vkres == VK_ERROR_INCOMPATIBLE_DRIVER) {
  6345. continue; // This driver doesn't support the adapter
  6346. } else if (vkres == VK_ERROR_OUT_OF_HOST_MEMORY) {
  6347. res = VK_ERROR_OUT_OF_HOST_MEMORY;
  6348. goto out;
  6349. } else if (vkres != VK_SUCCESS) {
  6350. loader_log(inst, VK_DEBUG_REPORT_WARNING_BIT_EXT, 0, "Failed to convert DXGI adapter into Vulkan physical device with unexpected error code");
  6351. continue;
  6352. }
  6353. // Get the actual physical devices
  6354. if (0 != count)
  6355. {
  6356. do {
  6357. sorted_array[*sorted_count].physical_devices = loader_instance_heap_realloc(inst, sorted_array[*sorted_count].physical_devices, sorted_array[*sorted_count].device_count * sizeof(VkPhysicalDevice), count * sizeof(VkPhysicalDevice), VK_SYSTEM_ALLOCATION_SCOPE_COMMAND);
  6358. if (sorted_array[*sorted_count].physical_devices == NULL) {
  6359. res = VK_ERROR_OUT_OF_HOST_MEMORY;
  6360. break;
  6361. }
  6362. sorted_array[*sorted_count].device_count = count;
  6363. } while ((vkres = icd_term->scanned_icd->EnumerateAdapterPhysicalDevices(icd_term->instance, description.AdapterLuid, &count, sorted_array[*sorted_count].physical_devices)) == VK_INCOMPLETE);
  6364. }
  6365. if (vkres != VK_SUCCESS) {
  6366. loader_instance_heap_free(inst, sorted_array[*sorted_count].physical_devices);
  6367. sorted_array[*sorted_count].physical_devices = NULL;
  6368. if (vkres == VK_ERROR_OUT_OF_HOST_MEMORY) {
  6369. res = VK_ERROR_OUT_OF_HOST_MEMORY;
  6370. goto out;
  6371. } else {
  6372. loader_log(inst, VK_DEBUG_REPORT_WARNING_BIT_EXT, 0,
  6373. "Failed to convert DXGI adapter into Vulkan physical device");
  6374. continue;
  6375. }
  6376. }
  6377. inst->total_gpu_count += (sorted_array[*sorted_count].device_count = count);
  6378. sorted_array[*sorted_count].icd_index = icd_idx;
  6379. sorted_array[*sorted_count].icd_term = icd_term;
  6380. ++(*sorted_count);
  6381. }
  6382. adapter->lpVtbl->Release(adapter);
  6383. }
  6384. dxgi_factory->lpVtbl->Release(dxgi_factory);
  6385. }
  6386. out:
  6387. #endif
  6388. if (*sorted_count == 0 && *sorted_devices != NULL) {
  6389. loader_instance_heap_free(inst, *sorted_devices);
  6390. *sorted_devices = NULL;
  6391. }
  6392. return res;
  6393. }
  6394. VkResult setupLoaderTermPhysDevs(struct loader_instance *inst) {
  6395. VkResult res = VK_SUCCESS;
  6396. struct loader_icd_term *icd_term;
  6397. struct loader_phys_dev_per_icd *icd_phys_dev_array = NULL;
  6398. struct loader_physical_device_term **new_phys_devs = NULL;
  6399. struct LoaderSortedPhysicalDevice *sorted_phys_dev_array = NULL;
  6400. uint32_t sorted_count = 0;
  6401. inst->total_gpu_count = 0;
  6402. // Allocate something to store the physical device characteristics
  6403. // that we read from each ICD.
  6404. icd_phys_dev_array =
  6405. (struct loader_phys_dev_per_icd *)loader_stack_alloc(sizeof(struct loader_phys_dev_per_icd) * inst->total_icd_count);
  6406. if (NULL == icd_phys_dev_array) {
  6407. loader_log(inst, VK_DEBUG_REPORT_ERROR_BIT_EXT, 0,
  6408. "setupLoaderTermPhysDevs: Failed to allocate temporary "
  6409. "ICD Physical device info array of size %d",
  6410. inst->total_gpu_count);
  6411. res = VK_ERROR_OUT_OF_HOST_MEMORY;
  6412. goto out;
  6413. }
  6414. memset(icd_phys_dev_array, 0, sizeof(struct loader_phys_dev_per_icd) * inst->total_icd_count);
  6415. // Get the physical devices supported by platform sorting mechanism into a separate list
  6416. res = ReadSortedPhysicalDevices(inst, &sorted_phys_dev_array, &sorted_count);
  6417. if (VK_SUCCESS != res) {
  6418. goto out;
  6419. }
  6420. // For each ICD, query the number of physical devices, and then get an
  6421. // internal value for those physical devices.
  6422. icd_term = inst->icd_terms;
  6423. for (uint32_t icd_idx = 0; NULL != icd_term; icd_term = icd_term->next, icd_idx++) {
  6424. icd_phys_dev_array[icd_idx].count = 0;
  6425. icd_phys_dev_array[icd_idx].phys_devs = NULL;
  6426. icd_phys_dev_array[icd_idx].this_icd_term = NULL;
  6427. // This is the legacy behavior which should be skipped if EnumerateAdapterPhysicalDevices is available
  6428. // and we successfully enumerated sorted adapters using ReadSortedPhysicalDevices.
  6429. #if defined(VK_USE_PLATFORM_WIN32_KHR)
  6430. if (sorted_count && icd_term->scanned_icd->EnumerateAdapterPhysicalDevices != NULL) {
  6431. continue;
  6432. }
  6433. #endif
  6434. res = icd_term->dispatch.EnumeratePhysicalDevices(icd_term->instance, &icd_phys_dev_array[icd_idx].count, NULL);
  6435. if (VK_SUCCESS != res) {
  6436. loader_log(inst, VK_DEBUG_REPORT_ERROR_BIT_EXT, 0,
  6437. "setupLoaderTermPhysDevs: Call to "
  6438. "ICD %d's \'vkEnumeratePhysicalDevices\' failed with"
  6439. " error 0x%08x",
  6440. icd_idx, res);
  6441. goto out;
  6442. }
  6443. icd_phys_dev_array[icd_idx].phys_devs =
  6444. (VkPhysicalDevice *)loader_stack_alloc(icd_phys_dev_array[icd_idx].count * sizeof(VkPhysicalDevice));
  6445. if (NULL == icd_phys_dev_array[icd_idx].phys_devs) {
  6446. loader_log(inst, VK_DEBUG_REPORT_ERROR_BIT_EXT, 0,
  6447. "setupLoaderTermPhysDevs: Failed to allocate temporary "
  6448. "ICD Physical device array for ICD %d of size %d",
  6449. icd_idx, inst->total_gpu_count);
  6450. res = VK_ERROR_OUT_OF_HOST_MEMORY;
  6451. goto out;
  6452. }
  6453. res = icd_term->dispatch.EnumeratePhysicalDevices(icd_term->instance, &(icd_phys_dev_array[icd_idx].count),
  6454. icd_phys_dev_array[icd_idx].phys_devs);
  6455. if (VK_SUCCESS != res) {
  6456. goto out;
  6457. }
  6458. inst->total_gpu_count += icd_phys_dev_array[icd_idx].count;
  6459. icd_phys_dev_array[icd_idx].this_icd_term = icd_term;
  6460. }
  6461. if (0 == inst->total_gpu_count) {
  6462. loader_log(inst, VK_DEBUG_REPORT_ERROR_BIT_EXT, 0,
  6463. "setupLoaderTermPhysDevs: Failed to detect any valid"
  6464. " GPUs in the current config");
  6465. res = VK_ERROR_INITIALIZATION_FAILED;
  6466. goto out;
  6467. }
  6468. new_phys_devs = loader_instance_heap_alloc(inst, sizeof(struct loader_physical_device_term *) * inst->total_gpu_count,
  6469. VK_SYSTEM_ALLOCATION_SCOPE_INSTANCE);
  6470. if (NULL == new_phys_devs) {
  6471. loader_log(inst, VK_DEBUG_REPORT_ERROR_BIT_EXT, 0,
  6472. "setupLoaderTermPhysDevs: Failed to allocate new physical"
  6473. " device array of size %d",
  6474. inst->total_gpu_count);
  6475. res = VK_ERROR_OUT_OF_HOST_MEMORY;
  6476. goto out;
  6477. }
  6478. memset(new_phys_devs, 0, sizeof(struct loader_physical_device_term *) * inst->total_gpu_count);
  6479. // Copy or create everything to fill the new array of physical devices
  6480. uint32_t idx = 0;
  6481. #if defined(_WIN32)
  6482. // Copy over everything found through sorted enumeration
  6483. for (uint32_t i = 0; i < sorted_count; ++i) {
  6484. for (uint32_t j = 0; j < sorted_phys_dev_array[i].device_count; ++j) {
  6485. // Check if this physical device is already in the old buffer
  6486. if (NULL != inst->phys_devs_term) {
  6487. for (uint32_t old_idx = 0; old_idx < inst->phys_dev_count_term; old_idx++) {
  6488. if (sorted_phys_dev_array[i].physical_devices[j] == inst->phys_devs_term[old_idx]->phys_dev) {
  6489. new_phys_devs[idx] = inst->phys_devs_term[old_idx];
  6490. break;
  6491. }
  6492. }
  6493. }
  6494. // If this physical device isn't in the old buffer, then we need to create it.
  6495. if (NULL == new_phys_devs[idx]) {
  6496. new_phys_devs[idx] = loader_instance_heap_alloc(inst, sizeof(struct loader_physical_device_term),
  6497. VK_SYSTEM_ALLOCATION_SCOPE_INSTANCE);
  6498. if (NULL == new_phys_devs[idx]) {
  6499. loader_log(inst, VK_DEBUG_REPORT_ERROR_BIT_EXT, 0,
  6500. "setupLoaderTermPhysDevs: Failed to allocate "
  6501. "physical device terminator object %d",
  6502. idx);
  6503. inst->total_gpu_count = idx;
  6504. res = VK_ERROR_OUT_OF_HOST_MEMORY;
  6505. goto out;
  6506. }
  6507. loader_set_dispatch((void *)new_phys_devs[idx], inst->disp);
  6508. new_phys_devs[idx]->this_icd_term = sorted_phys_dev_array[i].icd_term;
  6509. new_phys_devs[idx]->icd_index = (uint8_t)(sorted_phys_dev_array[i].icd_index);
  6510. new_phys_devs[idx]->phys_dev = sorted_phys_dev_array[i].physical_devices[j];
  6511. }
  6512. // Increment the count of new physical devices
  6513. idx++;
  6514. }
  6515. }
  6516. #endif
  6517. // Copy over everything found through EnumeratePhysicalDevices
  6518. for (uint32_t icd_idx = 0; icd_idx < inst->total_icd_count; icd_idx++) {
  6519. for (uint32_t pd_idx = 0; pd_idx < icd_phys_dev_array[icd_idx].count; pd_idx++) {
  6520. // Check if this physical device is already in the old buffer
  6521. if (NULL != inst->phys_devs_term) {
  6522. for (uint32_t old_idx = 0; old_idx < inst->phys_dev_count_term; old_idx++) {
  6523. if (icd_phys_dev_array[icd_idx].phys_devs[pd_idx] == inst->phys_devs_term[old_idx]->phys_dev) {
  6524. new_phys_devs[idx] = inst->phys_devs_term[old_idx];
  6525. break;
  6526. }
  6527. }
  6528. }
  6529. // If this physical device isn't in the old buffer, then we
  6530. // need to create it.
  6531. if (NULL == new_phys_devs[idx]) {
  6532. new_phys_devs[idx] = loader_instance_heap_alloc(inst, sizeof(struct loader_physical_device_term),
  6533. VK_SYSTEM_ALLOCATION_SCOPE_INSTANCE);
  6534. if (NULL == new_phys_devs[idx]) {
  6535. loader_log(inst, VK_DEBUG_REPORT_ERROR_BIT_EXT, 0,
  6536. "setupLoaderTermPhysDevs: Failed to allocate "
  6537. "physical device terminator object %d",
  6538. idx);
  6539. inst->total_gpu_count = idx;
  6540. res = VK_ERROR_OUT_OF_HOST_MEMORY;
  6541. goto out;
  6542. }
  6543. loader_set_dispatch((void *)new_phys_devs[idx], inst->disp);
  6544. new_phys_devs[idx]->this_icd_term = icd_phys_dev_array[icd_idx].this_icd_term;
  6545. new_phys_devs[idx]->icd_index = (uint8_t)(icd_idx);
  6546. new_phys_devs[idx]->phys_dev = icd_phys_dev_array[icd_idx].phys_devs[pd_idx];
  6547. }
  6548. idx++;
  6549. }
  6550. }
  6551. out:
  6552. if (VK_SUCCESS != res) {
  6553. if (NULL != new_phys_devs) {
  6554. // We've encountered an error, so we should free the new buffers.
  6555. for (uint32_t i = 0; i < inst->total_gpu_count; i++) {
  6556. loader_instance_heap_free(inst, new_phys_devs[i]);
  6557. }
  6558. loader_instance_heap_free(inst, new_phys_devs);
  6559. }
  6560. inst->total_gpu_count = 0;
  6561. } else {
  6562. // Free everything that didn't carry over to the new array of
  6563. // physical devices. Everything else will have been copied over
  6564. // to the new array.
  6565. if (NULL != inst->phys_devs_term) {
  6566. for (uint32_t cur_pd = 0; cur_pd < inst->phys_dev_count_term; cur_pd++) {
  6567. bool found = false;
  6568. for (uint32_t new_pd_idx = 0; new_pd_idx < inst->total_gpu_count; new_pd_idx++) {
  6569. if (inst->phys_devs_term[cur_pd] == new_phys_devs[new_pd_idx]) {
  6570. found = true;
  6571. break;
  6572. }
  6573. }
  6574. if (!found) {
  6575. loader_instance_heap_free(inst, inst->phys_devs_term[cur_pd]);
  6576. }
  6577. }
  6578. loader_instance_heap_free(inst, inst->phys_devs_term);
  6579. }
  6580. // Swap out old and new devices list
  6581. inst->phys_dev_count_term = inst->total_gpu_count;
  6582. inst->phys_devs_term = new_phys_devs;
  6583. }
  6584. if (sorted_phys_dev_array != NULL) {
  6585. for (uint32_t i = 0; i < sorted_count; ++i) {
  6586. if (sorted_phys_dev_array[i].device_count > 0 && sorted_phys_dev_array[i].physical_devices != NULL) {
  6587. loader_instance_heap_free(inst, sorted_phys_dev_array[i].physical_devices);
  6588. }
  6589. }
  6590. loader_instance_heap_free(inst, sorted_phys_dev_array);
  6591. }
  6592. return res;
  6593. }
  6594. VKAPI_ATTR VkResult VKAPI_CALL terminator_EnumeratePhysicalDevices(VkInstance instance, uint32_t *pPhysicalDeviceCount,
  6595. VkPhysicalDevice *pPhysicalDevices) {
  6596. struct loader_instance *inst = (struct loader_instance *)instance;
  6597. VkResult res = VK_SUCCESS;
  6598. // Always call the setup loader terminator physical devices because they may
  6599. // have changed at any point.
  6600. res = setupLoaderTermPhysDevs(inst);
  6601. if (VK_SUCCESS != res) {
  6602. goto out;
  6603. }
  6604. uint32_t copy_count = inst->total_gpu_count;
  6605. if (NULL != pPhysicalDevices) {
  6606. if (copy_count > *pPhysicalDeviceCount) {
  6607. copy_count = *pPhysicalDeviceCount;
  6608. res = VK_INCOMPLETE;
  6609. }
  6610. for (uint32_t i = 0; i < copy_count; i++) {
  6611. pPhysicalDevices[i] = (VkPhysicalDevice)inst->phys_devs_term[i];
  6612. }
  6613. }
  6614. *pPhysicalDeviceCount = copy_count;
  6615. out:
  6616. return res;
  6617. }
  6618. VKAPI_ATTR void VKAPI_CALL terminator_GetPhysicalDeviceProperties(VkPhysicalDevice physicalDevice,
  6619. VkPhysicalDeviceProperties *pProperties) {
  6620. struct loader_physical_device_term *phys_dev_term = (struct loader_physical_device_term *)physicalDevice;
  6621. struct loader_icd_term *icd_term = phys_dev_term->this_icd_term;
  6622. if (NULL != icd_term->dispatch.GetPhysicalDeviceProperties) {
  6623. icd_term->dispatch.GetPhysicalDeviceProperties(phys_dev_term->phys_dev, pProperties);
  6624. }
  6625. }
  6626. VKAPI_ATTR void VKAPI_CALL terminator_GetPhysicalDeviceQueueFamilyProperties(VkPhysicalDevice physicalDevice,
  6627. uint32_t *pQueueFamilyPropertyCount,
  6628. VkQueueFamilyProperties *pProperties) {
  6629. struct loader_physical_device_term *phys_dev_term = (struct loader_physical_device_term *)physicalDevice;
  6630. struct loader_icd_term *icd_term = phys_dev_term->this_icd_term;
  6631. if (NULL != icd_term->dispatch.GetPhysicalDeviceQueueFamilyProperties) {
  6632. icd_term->dispatch.GetPhysicalDeviceQueueFamilyProperties(phys_dev_term->phys_dev, pQueueFamilyPropertyCount, pProperties);
  6633. }
  6634. }
  6635. VKAPI_ATTR void VKAPI_CALL terminator_GetPhysicalDeviceMemoryProperties(VkPhysicalDevice physicalDevice,
  6636. VkPhysicalDeviceMemoryProperties *pProperties) {
  6637. struct loader_physical_device_term *phys_dev_term = (struct loader_physical_device_term *)physicalDevice;
  6638. struct loader_icd_term *icd_term = phys_dev_term->this_icd_term;
  6639. if (NULL != icd_term->dispatch.GetPhysicalDeviceMemoryProperties) {
  6640. icd_term->dispatch.GetPhysicalDeviceMemoryProperties(phys_dev_term->phys_dev, pProperties);
  6641. }
  6642. }
  6643. VKAPI_ATTR void VKAPI_CALL terminator_GetPhysicalDeviceFeatures(VkPhysicalDevice physicalDevice,
  6644. VkPhysicalDeviceFeatures *pFeatures) {
  6645. struct loader_physical_device_term *phys_dev_term = (struct loader_physical_device_term *)physicalDevice;
  6646. struct loader_icd_term *icd_term = phys_dev_term->this_icd_term;
  6647. if (NULL != icd_term->dispatch.GetPhysicalDeviceFeatures) {
  6648. icd_term->dispatch.GetPhysicalDeviceFeatures(phys_dev_term->phys_dev, pFeatures);
  6649. }
  6650. }
  6651. VKAPI_ATTR void VKAPI_CALL terminator_GetPhysicalDeviceFormatProperties(VkPhysicalDevice physicalDevice, VkFormat format,
  6652. VkFormatProperties *pFormatInfo) {
  6653. struct loader_physical_device_term *phys_dev_term = (struct loader_physical_device_term *)physicalDevice;
  6654. struct loader_icd_term *icd_term = phys_dev_term->this_icd_term;
  6655. if (NULL != icd_term->dispatch.GetPhysicalDeviceFormatProperties) {
  6656. icd_term->dispatch.GetPhysicalDeviceFormatProperties(phys_dev_term->phys_dev, format, pFormatInfo);
  6657. }
  6658. }
  6659. VKAPI_ATTR VkResult VKAPI_CALL terminator_GetPhysicalDeviceImageFormatProperties(VkPhysicalDevice physicalDevice, VkFormat format,
  6660. VkImageType type, VkImageTiling tiling,
  6661. VkImageUsageFlags usage, VkImageCreateFlags flags,
  6662. VkImageFormatProperties *pImageFormatProperties) {
  6663. struct loader_physical_device_term *phys_dev_term = (struct loader_physical_device_term *)physicalDevice;
  6664. struct loader_icd_term *icd_term = phys_dev_term->this_icd_term;
  6665. if (NULL == icd_term->dispatch.GetPhysicalDeviceImageFormatProperties) {
  6666. loader_log(icd_term->this_instance, VK_DEBUG_REPORT_ERROR_BIT_EXT, 0,
  6667. "Encountered the vkEnumerateDeviceLayerProperties "
  6668. "terminator. This means a layer improperly continued.");
  6669. return VK_ERROR_INITIALIZATION_FAILED;
  6670. }
  6671. return icd_term->dispatch.GetPhysicalDeviceImageFormatProperties(phys_dev_term->phys_dev, format, type, tiling, usage, flags,
  6672. pImageFormatProperties);
  6673. }
  6674. VKAPI_ATTR void VKAPI_CALL terminator_GetPhysicalDeviceSparseImageFormatProperties(VkPhysicalDevice physicalDevice, VkFormat format,
  6675. VkImageType type, VkSampleCountFlagBits samples,
  6676. VkImageUsageFlags usage, VkImageTiling tiling,
  6677. uint32_t *pNumProperties,
  6678. VkSparseImageFormatProperties *pProperties) {
  6679. struct loader_physical_device_term *phys_dev_term = (struct loader_physical_device_term *)physicalDevice;
  6680. struct loader_icd_term *icd_term = phys_dev_term->this_icd_term;
  6681. if (NULL != icd_term->dispatch.GetPhysicalDeviceSparseImageFormatProperties) {
  6682. icd_term->dispatch.GetPhysicalDeviceSparseImageFormatProperties(phys_dev_term->phys_dev, format, type, samples, usage,
  6683. tiling, pNumProperties, pProperties);
  6684. }
  6685. }
  6686. VKAPI_ATTR VkResult VKAPI_CALL terminator_EnumerateDeviceExtensionProperties(VkPhysicalDevice physicalDevice,
  6687. const char *pLayerName, uint32_t *pPropertyCount,
  6688. VkExtensionProperties *pProperties) {
  6689. struct loader_physical_device_term *phys_dev_term;
  6690. struct loader_layer_list implicit_layer_list = {0};
  6691. struct loader_extension_list all_exts = {0};
  6692. struct loader_extension_list icd_exts = {0};
  6693. // Any layer or trampoline wrapping should be removed at this point in time can just cast to the expected
  6694. // type for VkPhysicalDevice.
  6695. phys_dev_term = (struct loader_physical_device_term *)physicalDevice;
  6696. // if we got here with a non-empty pLayerName, look up the extensions
  6697. // from the json
  6698. if (pLayerName != NULL && strlen(pLayerName) > 0) {
  6699. uint32_t count;
  6700. uint32_t copy_size;
  6701. const struct loader_instance *inst = phys_dev_term->this_icd_term->this_instance;
  6702. struct loader_device_extension_list *dev_ext_list = NULL;
  6703. struct loader_device_extension_list local_ext_list;
  6704. memset(&local_ext_list, 0, sizeof(local_ext_list));
  6705. if (vk_string_validate(MaxLoaderStringLength, pLayerName) == VK_STRING_ERROR_NONE) {
  6706. for (uint32_t i = 0; i < inst->instance_layer_list.count; i++) {
  6707. struct loader_layer_properties *props = &inst->instance_layer_list.list[i];
  6708. if (strcmp(props->info.layerName, pLayerName) == 0) {
  6709. dev_ext_list = &props->device_extension_list;
  6710. }
  6711. }
  6712. count = (dev_ext_list == NULL) ? 0 : dev_ext_list->count;
  6713. if (pProperties == NULL) {
  6714. *pPropertyCount = count;
  6715. loader_destroy_generic_list(inst, (struct loader_generic_list *)&local_ext_list);
  6716. return VK_SUCCESS;
  6717. }
  6718. copy_size = *pPropertyCount < count ? *pPropertyCount : count;
  6719. for (uint32_t i = 0; i < copy_size; i++) {
  6720. memcpy(&pProperties[i], &dev_ext_list->list[i].props, sizeof(VkExtensionProperties));
  6721. }
  6722. *pPropertyCount = copy_size;
  6723. loader_destroy_generic_list(inst, (struct loader_generic_list *)&local_ext_list);
  6724. if (copy_size < count) {
  6725. return VK_INCOMPLETE;
  6726. }
  6727. } else {
  6728. loader_log(inst, VK_DEBUG_REPORT_ERROR_BIT_EXT, 0,
  6729. "vkEnumerateDeviceExtensionProperties: pLayerName "
  6730. "is too long or is badly formed");
  6731. return VK_ERROR_EXTENSION_NOT_PRESENT;
  6732. }
  6733. return VK_SUCCESS;
  6734. }
  6735. // This case is during the call down the instance chain with pLayerName == NULL
  6736. struct loader_icd_term *icd_term = phys_dev_term->this_icd_term;
  6737. uint32_t icd_ext_count = *pPropertyCount;
  6738. VkExtensionProperties *icd_props_list = pProperties;
  6739. VkResult res;
  6740. if (NULL == icd_props_list) {
  6741. // We need to find the count without duplicates. This requires querying the driver for the names of the extensions.
  6742. // A small amount of storage is then needed to facilitate the de-duplication.
  6743. res = icd_term->dispatch.EnumerateDeviceExtensionProperties(phys_dev_term->phys_dev, NULL, &icd_ext_count, NULL);
  6744. if (res != VK_SUCCESS) {
  6745. goto out;
  6746. }
  6747. icd_props_list = loader_instance_heap_alloc(icd_term->this_instance, sizeof(VkExtensionProperties) * icd_ext_count,
  6748. VK_SYSTEM_ALLOCATION_SCOPE_COMMAND);
  6749. if (NULL == icd_props_list) {
  6750. res = VK_ERROR_OUT_OF_HOST_MEMORY;
  6751. goto out;
  6752. }
  6753. }
  6754. // Get the available device extension count, and if pProperties is not NULL, the extensions as well
  6755. res = icd_term->dispatch.EnumerateDeviceExtensionProperties(phys_dev_term->phys_dev, NULL, &icd_ext_count, icd_props_list);
  6756. if (res != VK_SUCCESS) {
  6757. goto out;
  6758. }
  6759. if (!loaderInitLayerList(icd_term->this_instance, &implicit_layer_list)) {
  6760. res = VK_ERROR_OUT_OF_HOST_MEMORY;
  6761. goto out;
  6762. }
  6763. loaderAddImplicitLayers(icd_term->this_instance, &implicit_layer_list, NULL, &icd_term->this_instance->instance_layer_list);
  6764. // Initialize dev_extension list within the physicalDevice object
  6765. res = loader_init_device_extensions(icd_term->this_instance, phys_dev_term, icd_ext_count, icd_props_list, &icd_exts);
  6766. if (res != VK_SUCCESS) {
  6767. goto out;
  6768. }
  6769. // We need to determine which implicit layers are active, and then add their extensions. This can't be cached as
  6770. // it depends on results of environment variables (which can change).
  6771. res = loader_add_to_ext_list(icd_term->this_instance, &all_exts, icd_exts.count, icd_exts.list);
  6772. if (res != VK_SUCCESS) {
  6773. goto out;
  6774. }
  6775. loaderAddImplicitLayers(icd_term->this_instance, &implicit_layer_list, NULL, &icd_term->this_instance->instance_layer_list);
  6776. for (uint32_t i = 0; i < implicit_layer_list.count; i++) {
  6777. for (uint32_t j = 0; j < implicit_layer_list.list[i].device_extension_list.count; j++) {
  6778. res = loader_add_to_ext_list(icd_term->this_instance, &all_exts, 1,
  6779. &implicit_layer_list.list[i].device_extension_list.list[j].props);
  6780. if (res != VK_SUCCESS) {
  6781. goto out;
  6782. }
  6783. }
  6784. }
  6785. uint32_t capacity = *pPropertyCount;
  6786. VkExtensionProperties *props = pProperties;
  6787. res = VK_SUCCESS;
  6788. if (NULL != pProperties) {
  6789. for (uint32_t i = 0; i < all_exts.count && i < capacity; i++) {
  6790. props[i] = all_exts.list[i];
  6791. }
  6792. // Wasn't enough space for the extensions, we did partial copy now return VK_INCOMPLETE
  6793. if (capacity < all_exts.count) {
  6794. res = VK_INCOMPLETE;
  6795. } else {
  6796. *pPropertyCount = all_exts.count;
  6797. }
  6798. } else {
  6799. *pPropertyCount = all_exts.count;
  6800. }
  6801. out:
  6802. if (NULL != implicit_layer_list.list) {
  6803. loader_destroy_generic_list(icd_term->this_instance, (struct loader_generic_list *)&implicit_layer_list);
  6804. }
  6805. if (NULL != all_exts.list) {
  6806. loader_destroy_generic_list(icd_term->this_instance, (struct loader_generic_list *)&all_exts);
  6807. }
  6808. if (NULL != icd_exts.list) {
  6809. loader_destroy_generic_list(icd_term->this_instance, (struct loader_generic_list *)&icd_exts);
  6810. }
  6811. if (NULL == pProperties && NULL != icd_props_list) {
  6812. loader_instance_heap_free(icd_term->this_instance, icd_props_list);
  6813. }
  6814. return res;
  6815. }
  6816. VKAPI_ATTR VkResult VKAPI_CALL terminator_EnumerateDeviceLayerProperties(VkPhysicalDevice physicalDevice, uint32_t *pPropertyCount,
  6817. VkLayerProperties *pProperties) {
  6818. struct loader_physical_device_term *phys_dev_term = (struct loader_physical_device_term *)physicalDevice;
  6819. struct loader_icd_term *icd_term = phys_dev_term->this_icd_term;
  6820. loader_log(icd_term->this_instance, VK_DEBUG_REPORT_ERROR_BIT_EXT, 0,
  6821. "Encountered the vkEnumerateDeviceLayerProperties "
  6822. "terminator. This means a layer improperly continued.");
  6823. // Should never get here this call isn't dispatched down the chain
  6824. return VK_ERROR_INITIALIZATION_FAILED;
  6825. }
  6826. VkStringErrorFlags vk_string_validate(const int max_length, const char *utf8) {
  6827. VkStringErrorFlags result = VK_STRING_ERROR_NONE;
  6828. int num_char_bytes = 0;
  6829. int i, j;
  6830. if (utf8 == NULL) {
  6831. return VK_STRING_ERROR_NULL_PTR;
  6832. }
  6833. for (i = 0; i <= max_length; i++) {
  6834. if (utf8[i] == 0) {
  6835. break;
  6836. } else if (i == max_length) {
  6837. result |= VK_STRING_ERROR_LENGTH;
  6838. break;
  6839. } else if ((utf8[i] >= 0x20) && (utf8[i] < 0x7f)) {
  6840. num_char_bytes = 0;
  6841. } else if ((utf8[i] & UTF8_ONE_BYTE_MASK) == UTF8_ONE_BYTE_CODE) {
  6842. num_char_bytes = 1;
  6843. } else if ((utf8[i] & UTF8_TWO_BYTE_MASK) == UTF8_TWO_BYTE_CODE) {
  6844. num_char_bytes = 2;
  6845. } else if ((utf8[i] & UTF8_THREE_BYTE_MASK) == UTF8_THREE_BYTE_CODE) {
  6846. num_char_bytes = 3;
  6847. } else {
  6848. result = VK_STRING_ERROR_BAD_DATA;
  6849. }
  6850. // Validate the following num_char_bytes of data
  6851. for (j = 0; (j < num_char_bytes) && (i < max_length); j++) {
  6852. if (++i == max_length) {
  6853. result |= VK_STRING_ERROR_LENGTH;
  6854. break;
  6855. }
  6856. if ((utf8[i] & UTF8_DATA_BYTE_MASK) != UTF8_DATA_BYTE_CODE) {
  6857. result |= VK_STRING_ERROR_BAD_DATA;
  6858. }
  6859. }
  6860. }
  6861. return result;
  6862. }
  6863. VKAPI_ATTR VkResult VKAPI_CALL
  6864. terminator_EnumerateInstanceVersion(const VkEnumerateInstanceVersionChain *chain, uint32_t* pApiVersion) {
  6865. // NOTE: The Vulkan WG doesn't want us checking pApiVersion for NULL, but instead
  6866. // prefers us crashing.
  6867. *pApiVersion = VK_HEADER_VERSION_COMPLETE;
  6868. return VK_SUCCESS;
  6869. }
  6870. VKAPI_ATTR VkResult VKAPI_CALL
  6871. terminator_EnumerateInstanceExtensionProperties(const VkEnumerateInstanceExtensionPropertiesChain *chain, const char *pLayerName,
  6872. uint32_t *pPropertyCount, VkExtensionProperties *pProperties) {
  6873. struct loader_extension_list *global_ext_list = NULL;
  6874. struct loader_layer_list instance_layers;
  6875. struct loader_extension_list local_ext_list;
  6876. struct loader_icd_tramp_list icd_tramp_list;
  6877. uint32_t copy_size;
  6878. VkResult res = VK_SUCCESS;
  6879. // tls_instance = NULL;
  6880. memset(&local_ext_list, 0, sizeof(local_ext_list));
  6881. memset(&instance_layers, 0, sizeof(instance_layers));
  6882. // Get layer libraries if needed
  6883. if (pLayerName && strlen(pLayerName) != 0) {
  6884. if (vk_string_validate(MaxLoaderStringLength, pLayerName) != VK_STRING_ERROR_NONE) {
  6885. assert(VK_FALSE &&
  6886. "vkEnumerateInstanceExtensionProperties: "
  6887. "pLayerName is too long or is badly formed");
  6888. res = VK_ERROR_EXTENSION_NOT_PRESENT;
  6889. goto out;
  6890. }
  6891. loaderScanForLayers(NULL, &instance_layers);
  6892. for (uint32_t i = 0; i < instance_layers.count; i++) {
  6893. struct loader_layer_properties *props = &instance_layers.list[i];
  6894. if (strcmp(props->info.layerName, pLayerName) == 0) {
  6895. global_ext_list = &props->instance_extension_list;
  6896. break;
  6897. }
  6898. }
  6899. } else {
  6900. // Preload ICD libraries so subsequent calls to EnumerateInstanceExtensionProperties don't have to load them
  6901. loader_preload_icds();
  6902. // Scan/discover all ICD libraries
  6903. memset(&icd_tramp_list, 0, sizeof(icd_tramp_list));
  6904. res = loader_icd_scan(NULL, &icd_tramp_list);
  6905. // EnumerateInstanceExtensionProperties can't return anything other than OOM or VK_ERROR_LAYER_NOT_PRESENT
  6906. if ((VK_SUCCESS != res && icd_tramp_list.count > 0) || res == VK_ERROR_OUT_OF_HOST_MEMORY) {
  6907. goto out;
  6908. }
  6909. // Get extensions from all ICD's, merge so no duplicates
  6910. res = loader_get_icd_loader_instance_extensions(NULL, &icd_tramp_list, &local_ext_list);
  6911. if (VK_SUCCESS != res) {
  6912. goto out;
  6913. }
  6914. loader_scanned_icd_clear(NULL, &icd_tramp_list);
  6915. // Append enabled implicit layers.
  6916. loaderScanForImplicitLayers(NULL, &instance_layers);
  6917. for (uint32_t i = 0; i < instance_layers.count; i++) {
  6918. if (!loaderImplicitLayerIsEnabled(NULL, &instance_layers.list[i])) {
  6919. continue;
  6920. }
  6921. struct loader_extension_list *ext_list = &instance_layers.list[i].instance_extension_list;
  6922. loader_add_to_ext_list(NULL, &local_ext_list, ext_list->count, ext_list->list);
  6923. }
  6924. global_ext_list = &local_ext_list;
  6925. }
  6926. if (global_ext_list == NULL) {
  6927. res = VK_ERROR_LAYER_NOT_PRESENT;
  6928. goto out;
  6929. }
  6930. if (pProperties == NULL) {
  6931. *pPropertyCount = global_ext_list->count;
  6932. goto out;
  6933. }
  6934. copy_size = *pPropertyCount < global_ext_list->count ? *pPropertyCount : global_ext_list->count;
  6935. for (uint32_t i = 0; i < copy_size; i++) {
  6936. memcpy(&pProperties[i], &global_ext_list->list[i], sizeof(VkExtensionProperties));
  6937. }
  6938. *pPropertyCount = copy_size;
  6939. if (copy_size < global_ext_list->count) {
  6940. res = VK_INCOMPLETE;
  6941. goto out;
  6942. }
  6943. out:
  6944. loader_destroy_generic_list(NULL, (struct loader_generic_list *)&local_ext_list);
  6945. loaderDeleteLayerListAndProperties(NULL, &instance_layers);
  6946. return res;
  6947. }
  6948. VKAPI_ATTR VkResult VKAPI_CALL terminator_EnumerateInstanceLayerProperties(const VkEnumerateInstanceLayerPropertiesChain *chain,
  6949. uint32_t *pPropertyCount,
  6950. VkLayerProperties *pProperties) {
  6951. VkResult result = VK_SUCCESS;
  6952. struct loader_layer_list instance_layer_list;
  6953. tls_instance = NULL;
  6954. LOADER_PLATFORM_THREAD_ONCE(&once_init, loader_initialize);
  6955. uint32_t copy_size;
  6956. // Get layer libraries
  6957. memset(&instance_layer_list, 0, sizeof(instance_layer_list));
  6958. loaderScanForLayers(NULL, &instance_layer_list);
  6959. if (pProperties == NULL) {
  6960. *pPropertyCount = instance_layer_list.count;
  6961. goto out;
  6962. }
  6963. copy_size = (*pPropertyCount < instance_layer_list.count) ? *pPropertyCount : instance_layer_list.count;
  6964. for (uint32_t i = 0; i < copy_size; i++) {
  6965. memcpy(&pProperties[i], &instance_layer_list.list[i].info, sizeof(VkLayerProperties));
  6966. }
  6967. *pPropertyCount = copy_size;
  6968. if (copy_size < instance_layer_list.count) {
  6969. result = VK_INCOMPLETE;
  6970. goto out;
  6971. }
  6972. out:
  6973. loaderDeleteLayerListAndProperties(NULL, &instance_layer_list);
  6974. return result;
  6975. }
  6976. #if defined(_WIN32) && defined(LOADER_DYNAMIC_LIB)
  6977. BOOL WINAPI DllMain(HINSTANCE hinst, DWORD reason, LPVOID reserved) {
  6978. switch (reason) {
  6979. case DLL_PROCESS_ATTACH:
  6980. loader_initialize();
  6981. break;
  6982. case DLL_PROCESS_DETACH:
  6983. if (NULL == reserved) {
  6984. loader_release();
  6985. }
  6986. break;
  6987. default:
  6988. // Do nothing
  6989. break;
  6990. }
  6991. return TRUE;
  6992. }
  6993. #elif !defined(_WIN32)
  6994. __attribute__((constructor)) void loader_init_library() { loader_initialize(); }
  6995. __attribute__((destructor)) void loader_free_library() { loader_release(); }
  6996. #endif
  6997. // ---- Vulkan Core 1.1 terminators
  6998. VkResult setupLoaderTermPhysDevGroups(struct loader_instance *inst) {
  6999. VkResult res = VK_SUCCESS;
  7000. struct loader_icd_term *icd_term;
  7001. uint32_t total_count = 0;
  7002. uint32_t cur_icd_group_count = 0;
  7003. VkPhysicalDeviceGroupPropertiesKHR **new_phys_dev_groups = NULL;
  7004. VkPhysicalDeviceGroupPropertiesKHR *local_phys_dev_groups = NULL;
  7005. bool *local_phys_dev_group_sorted = NULL;
  7006. PFN_vkEnumeratePhysicalDeviceGroups fpEnumeratePhysicalDeviceGroups = NULL;
  7007. struct LoaderSortedPhysicalDevice* sorted_phys_dev_array = NULL;
  7008. uint32_t sorted_count = 0;
  7009. if (0 == inst->phys_dev_count_term) {
  7010. loader_log(inst, VK_DEBUG_REPORT_ERROR_BIT_EXT, 0,
  7011. "setupLoaderTermPhysDevGroups: Loader failed to setup physical "
  7012. "device terminator info before calling \'EnumeratePhysicalDeviceGroups\'.");
  7013. assert(false);
  7014. res = VK_ERROR_INITIALIZATION_FAILED;
  7015. goto out;
  7016. }
  7017. // For each ICD, query the number of physical device groups, and then get an
  7018. // internal value for those physical devices.
  7019. icd_term = inst->icd_terms;
  7020. for (uint32_t icd_idx = 0; NULL != icd_term; icd_term = icd_term->next, icd_idx++) {
  7021. // Get the function pointer to use to call into the ICD. This could be the core or KHR version
  7022. if (inst->enabled_known_extensions.khr_device_group_creation) {
  7023. fpEnumeratePhysicalDeviceGroups = icd_term->dispatch.EnumeratePhysicalDeviceGroupsKHR;
  7024. } else {
  7025. fpEnumeratePhysicalDeviceGroups = icd_term->dispatch.EnumeratePhysicalDeviceGroups;
  7026. }
  7027. cur_icd_group_count = 0;
  7028. if (NULL == fpEnumeratePhysicalDeviceGroups) {
  7029. // Treat each ICD's GPU as it's own group if the extension isn't supported
  7030. res = icd_term->dispatch.EnumeratePhysicalDevices(icd_term->instance, &cur_icd_group_count, NULL);
  7031. if (res != VK_SUCCESS) {
  7032. loader_log(inst, VK_DEBUG_REPORT_ERROR_BIT_EXT, 0,
  7033. "setupLoaderTermPhysDevGroups: Failed during dispatch call of "
  7034. "\'EnumeratePhysicalDevices\' to ICD %d to get plain phys dev count.",
  7035. icd_idx);
  7036. goto out;
  7037. }
  7038. } else {
  7039. // Query the actual group info
  7040. res = fpEnumeratePhysicalDeviceGroups(icd_term->instance, &cur_icd_group_count, NULL);
  7041. if (res != VK_SUCCESS) {
  7042. loader_log(inst, VK_DEBUG_REPORT_ERROR_BIT_EXT, 0,
  7043. "setupLoaderTermPhysDevGroups: Failed during dispatch call of "
  7044. "\'EnumeratePhysicalDeviceGroups\' to ICD %d to get count.",
  7045. icd_idx);
  7046. goto out;
  7047. }
  7048. }
  7049. total_count += cur_icd_group_count;
  7050. }
  7051. // Create an array for the new physical device groups, which will be stored
  7052. // in the instance for the Terminator code.
  7053. new_phys_dev_groups = (VkPhysicalDeviceGroupProperties **)loader_instance_heap_alloc(
  7054. inst, total_count * sizeof(VkPhysicalDeviceGroupProperties *), VK_SYSTEM_ALLOCATION_SCOPE_INSTANCE);
  7055. if (NULL == new_phys_dev_groups) {
  7056. loader_log(inst, VK_DEBUG_REPORT_ERROR_BIT_EXT, 0,
  7057. "setupLoaderTermPhysDevGroups: Failed to allocate new physical device"
  7058. " group array of size %d",
  7059. total_count);
  7060. res = VK_ERROR_OUT_OF_HOST_MEMORY;
  7061. goto out;
  7062. }
  7063. memset(new_phys_dev_groups, 0, total_count * sizeof(VkPhysicalDeviceGroupProperties *));
  7064. // Create a temporary array (on the stack) to keep track of the
  7065. // returned VkPhysicalDevice values.
  7066. local_phys_dev_groups = loader_stack_alloc(sizeof(VkPhysicalDeviceGroupProperties) * total_count);
  7067. local_phys_dev_group_sorted = loader_stack_alloc(sizeof(bool) * total_count);
  7068. if (NULL == local_phys_dev_groups || NULL == local_phys_dev_group_sorted) {
  7069. loader_log(inst, VK_DEBUG_REPORT_ERROR_BIT_EXT, 0,
  7070. "setupLoaderTermPhysDevGroups: Failed to allocate local "
  7071. "physical device group array of size %d",
  7072. total_count);
  7073. res = VK_ERROR_OUT_OF_HOST_MEMORY;
  7074. goto out;
  7075. }
  7076. // Initialize the memory to something valid
  7077. memset(local_phys_dev_groups, 0, sizeof(VkPhysicalDeviceGroupProperties) * total_count);
  7078. memset(local_phys_dev_group_sorted, 0, sizeof(bool) * total_count);
  7079. for (uint32_t group = 0; group < total_count; group++) {
  7080. local_phys_dev_groups[group].sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_GROUP_PROPERTIES_KHR;
  7081. local_phys_dev_groups[group].pNext = NULL;
  7082. local_phys_dev_groups[group].subsetAllocation = false;
  7083. }
  7084. // Get the physical devices supported by platform sorting mechanism into a separate list
  7085. res = ReadSortedPhysicalDevices(inst, &sorted_phys_dev_array, &sorted_count);
  7086. if (VK_SUCCESS != res) {
  7087. goto out;
  7088. }
  7089. cur_icd_group_count = 0;
  7090. icd_term = inst->icd_terms;
  7091. for (uint32_t icd_idx = 0; NULL != icd_term; icd_term = icd_term->next, icd_idx++) {
  7092. uint32_t count_this_time = total_count - cur_icd_group_count;
  7093. // Check if this group can be sorted
  7094. #if defined(VK_USE_PLATFORM_WIN32_KHR)
  7095. bool icd_sorted = sorted_count && (icd_term->scanned_icd->EnumerateAdapterPhysicalDevices != NULL);
  7096. #else
  7097. bool icd_sorted = false;
  7098. #endif
  7099. // Get the function pointer to use to call into the ICD. This could be the core or KHR version
  7100. if (inst->enabled_known_extensions.khr_device_group_creation) {
  7101. fpEnumeratePhysicalDeviceGroups = icd_term->dispatch.EnumeratePhysicalDeviceGroupsKHR;
  7102. } else {
  7103. fpEnumeratePhysicalDeviceGroups = icd_term->dispatch.EnumeratePhysicalDeviceGroups;
  7104. }
  7105. if (NULL == fpEnumeratePhysicalDeviceGroups) {
  7106. VkPhysicalDevice* phys_dev_array = loader_stack_alloc(sizeof(VkPhysicalDevice) * count_this_time);
  7107. if (NULL == phys_dev_array) {
  7108. loader_log(inst, VK_DEBUG_REPORT_ERROR_BIT_EXT, 0,
  7109. "setupLoaderTermPhysDevGroups: Failed to allocate local "
  7110. "physical device array of size %d",
  7111. count_this_time);
  7112. res = VK_ERROR_OUT_OF_HOST_MEMORY;
  7113. goto out;
  7114. }
  7115. res = icd_term->dispatch.EnumeratePhysicalDevices(icd_term->instance, &count_this_time, phys_dev_array);
  7116. if (res != VK_SUCCESS) {
  7117. loader_log(inst, VK_DEBUG_REPORT_ERROR_BIT_EXT, 0,
  7118. "setupLoaderTermPhysDevGroups: Failed during dispatch call of "
  7119. "\'EnumeratePhysicalDevices\' to ICD %d to get plain phys dev count.",
  7120. icd_idx);
  7121. goto out;
  7122. }
  7123. // Add each GPU as it's own group
  7124. for (uint32_t indiv_gpu = 0; indiv_gpu < count_this_time; indiv_gpu++) {
  7125. local_phys_dev_groups[indiv_gpu + cur_icd_group_count].physicalDeviceCount = 1;
  7126. local_phys_dev_groups[indiv_gpu + cur_icd_group_count].physicalDevices[0] = phys_dev_array[indiv_gpu];
  7127. local_phys_dev_group_sorted[indiv_gpu + cur_icd_group_count] = icd_sorted;
  7128. }
  7129. } else {
  7130. res = fpEnumeratePhysicalDeviceGroups(icd_term->instance, &count_this_time, &local_phys_dev_groups[cur_icd_group_count]);
  7131. for (uint32_t group = 0; group < count_this_time; ++group) {
  7132. local_phys_dev_group_sorted[group + cur_icd_group_count] = icd_sorted;
  7133. }
  7134. if (VK_SUCCESS != res) {
  7135. loader_log(inst, VK_DEBUG_REPORT_ERROR_BIT_EXT, 0,
  7136. "setupLoaderTermPhysDevGroups: Failed during dispatch call of "
  7137. "\'EnumeratePhysicalDeviceGroups\' to ICD %d to get content.",
  7138. icd_idx);
  7139. goto out;
  7140. }
  7141. }
  7142. cur_icd_group_count += count_this_time;
  7143. }
  7144. // Replace all the physical device IDs with the proper loader values
  7145. for (uint32_t group = 0; group < total_count; group++) {
  7146. for (uint32_t group_gpu = 0; group_gpu < local_phys_dev_groups[group].physicalDeviceCount; group_gpu++) {
  7147. bool found = false;
  7148. for (uint32_t term_gpu = 0; term_gpu < inst->phys_dev_count_term; term_gpu++) {
  7149. if (local_phys_dev_groups[group].physicalDevices[group_gpu] == inst->phys_devs_term[term_gpu]->phys_dev) {
  7150. local_phys_dev_groups[group].physicalDevices[group_gpu] = (VkPhysicalDevice)inst->phys_devs_term[term_gpu];
  7151. found = true;
  7152. break;
  7153. }
  7154. }
  7155. if (!found) {
  7156. loader_log(inst, VK_DEBUG_REPORT_ERROR_BIT_EXT, 0,
  7157. "setupLoaderTermPhysDevGroups: Failed to find GPU %d in group %d"
  7158. " returned by \'EnumeratePhysicalDeviceGroups\' in list returned"
  7159. " by \'EnumeratePhysicalDevices\'", group_gpu, group);
  7160. res = VK_ERROR_INITIALIZATION_FAILED;
  7161. goto out;
  7162. }
  7163. }
  7164. }
  7165. uint32_t idx = 0;
  7166. #if defined(_WIN32)
  7167. // Copy over everything found through sorted enumeration
  7168. for (uint32_t i = 0; i < sorted_count; ++i) {
  7169. // Find the VkPhysicalDeviceGroupProperties object in local_phys_dev_groups
  7170. VkPhysicalDeviceGroupProperties *group_properties = NULL;
  7171. for (uint32_t group = 0; group < total_count; group++) {
  7172. if (sorted_phys_dev_array[i].device_count != local_phys_dev_groups[group].physicalDeviceCount) {
  7173. continue;
  7174. }
  7175. bool match = true;
  7176. for (uint32_t group_gpu = 0; group_gpu < local_phys_dev_groups[group].physicalDeviceCount; group_gpu++) {
  7177. if (sorted_phys_dev_array[i].physical_devices[group_gpu] != ((struct loader_physical_device_term*) local_phys_dev_groups[group].physicalDevices[group_gpu])->phys_dev) {
  7178. match = false;
  7179. break;
  7180. }
  7181. }
  7182. if (match) {
  7183. group_properties = &local_phys_dev_groups[group];
  7184. }
  7185. }
  7186. // Check if this physical device group with the same contents is already in the old buffer
  7187. for (uint32_t old_idx = 0; old_idx < inst->phys_dev_group_count_term; old_idx++) {
  7188. if (NULL != group_properties && group_properties->physicalDeviceCount == inst->phys_dev_groups_term[old_idx]->physicalDeviceCount) {
  7189. bool found_all_gpus = true;
  7190. for (uint32_t old_gpu = 0; old_gpu < inst->phys_dev_groups_term[old_idx]->physicalDeviceCount; old_gpu++) {
  7191. bool found_gpu = false;
  7192. for (uint32_t new_gpu = 0; new_gpu < group_properties->physicalDeviceCount; new_gpu++) {
  7193. if (group_properties->physicalDevices[new_gpu] == inst->phys_dev_groups_term[old_idx]->physicalDevices[old_gpu]) {
  7194. found_gpu = true;
  7195. break;
  7196. }
  7197. }
  7198. if (!found_gpu) {
  7199. found_all_gpus = false;
  7200. break;
  7201. }
  7202. }
  7203. if (!found_all_gpus) {
  7204. continue;
  7205. }
  7206. else {
  7207. new_phys_dev_groups[idx] = inst->phys_dev_groups_term[old_idx];
  7208. break;
  7209. }
  7210. }
  7211. }
  7212. // If this physical device group isn't in the old buffer, create it
  7213. if (group_properties != NULL && NULL == new_phys_dev_groups[idx]) {
  7214. new_phys_dev_groups[idx] = (VkPhysicalDeviceGroupPropertiesKHR*)loader_instance_heap_alloc(
  7215. inst, sizeof(VkPhysicalDeviceGroupPropertiesKHR), VK_SYSTEM_ALLOCATION_SCOPE_INSTANCE);
  7216. if (NULL == new_phys_dev_groups[idx]) {
  7217. loader_log(inst, VK_DEBUG_REPORT_ERROR_BIT_EXT, 0,
  7218. "setupLoaderTermPhysDevGroups: Failed to allocate "
  7219. "physical device group Terminator object %d",
  7220. idx);
  7221. total_count = idx;
  7222. res = VK_ERROR_OUT_OF_HOST_MEMORY;
  7223. goto out;
  7224. }
  7225. memcpy(new_phys_dev_groups[idx], group_properties, sizeof(VkPhysicalDeviceGroupPropertiesKHR));
  7226. }
  7227. ++idx;
  7228. }
  7229. #endif
  7230. // Copy or create everything to fill the new array of physical device groups
  7231. for (uint32_t new_idx = 0; new_idx < total_count; new_idx++) {
  7232. // Skip groups which have been included through sorting
  7233. if (local_phys_dev_group_sorted[new_idx] || local_phys_dev_groups[new_idx].physicalDeviceCount == 0) {
  7234. continue;
  7235. }
  7236. // Check if this physical device group with the same contents is already in the old buffer
  7237. for (uint32_t old_idx = 0; old_idx < inst->phys_dev_group_count_term; old_idx++) {
  7238. if (local_phys_dev_groups[new_idx].physicalDeviceCount == inst->phys_dev_groups_term[old_idx]->physicalDeviceCount) {
  7239. bool found_all_gpus = true;
  7240. for (uint32_t old_gpu = 0; old_gpu < inst->phys_dev_groups_term[old_idx]->physicalDeviceCount; old_gpu++) {
  7241. bool found_gpu = false;
  7242. for (uint32_t new_gpu = 0; new_gpu < local_phys_dev_groups[new_idx].physicalDeviceCount; new_gpu++) {
  7243. if (local_phys_dev_groups[new_idx].physicalDevices[new_gpu] == inst->phys_dev_groups_term[old_idx]->physicalDevices[old_gpu]) {
  7244. found_gpu = true;
  7245. break;
  7246. }
  7247. }
  7248. if (!found_gpu) {
  7249. found_all_gpus = false;
  7250. break;
  7251. }
  7252. }
  7253. if (!found_all_gpus) {
  7254. continue;
  7255. } else {
  7256. new_phys_dev_groups[idx] = inst->phys_dev_groups_term[old_idx];
  7257. break;
  7258. }
  7259. }
  7260. }
  7261. // If this physical device group isn't in the old buffer, create it
  7262. if (NULL == new_phys_dev_groups[idx]) {
  7263. new_phys_dev_groups[idx] = (VkPhysicalDeviceGroupPropertiesKHR *)loader_instance_heap_alloc(
  7264. inst, sizeof(VkPhysicalDeviceGroupPropertiesKHR), VK_SYSTEM_ALLOCATION_SCOPE_INSTANCE);
  7265. if (NULL == new_phys_dev_groups[idx]) {
  7266. loader_log(inst, VK_DEBUG_REPORT_ERROR_BIT_EXT, 0,
  7267. "setupLoaderTermPhysDevGroups: Failed to allocate "
  7268. "physical device group Terminator object %d",
  7269. idx);
  7270. total_count = idx;
  7271. res = VK_ERROR_OUT_OF_HOST_MEMORY;
  7272. goto out;
  7273. }
  7274. memcpy(new_phys_dev_groups[idx], &local_phys_dev_groups[new_idx],
  7275. sizeof(VkPhysicalDeviceGroupPropertiesKHR));
  7276. }
  7277. ++idx;
  7278. }
  7279. out:
  7280. if (VK_SUCCESS != res) {
  7281. if (NULL != new_phys_dev_groups) {
  7282. for (uint32_t i = 0; i < total_count; i++) {
  7283. loader_instance_heap_free(inst, new_phys_dev_groups[i]);
  7284. }
  7285. loader_instance_heap_free(inst, new_phys_dev_groups);
  7286. }
  7287. total_count = 0;
  7288. } else {
  7289. // Free everything that didn't carry over to the new array of
  7290. // physical device groups
  7291. if (NULL != inst->phys_dev_groups_term) {
  7292. for (uint32_t i = 0; i < inst->phys_dev_group_count_term; i++) {
  7293. bool found = false;
  7294. for (uint32_t j = 0; j < total_count; j++) {
  7295. if (inst->phys_dev_groups_term[i] == new_phys_dev_groups[j]) {
  7296. found = true;
  7297. break;
  7298. }
  7299. }
  7300. if (!found) {
  7301. loader_instance_heap_free(inst, inst->phys_dev_groups_term[i]);
  7302. }
  7303. }
  7304. loader_instance_heap_free(inst, inst->phys_dev_groups_term);
  7305. }
  7306. // Swap in the new physical device group list
  7307. inst->phys_dev_group_count_term = total_count;
  7308. inst->phys_dev_groups_term = new_phys_dev_groups;
  7309. }
  7310. if (sorted_phys_dev_array != NULL) {
  7311. for (uint32_t i = 0; i < sorted_count; ++i) {
  7312. if (sorted_phys_dev_array[i].device_count > 0 && sorted_phys_dev_array[i].physical_devices != NULL) {
  7313. loader_instance_heap_free(inst, sorted_phys_dev_array[i].physical_devices);
  7314. }
  7315. }
  7316. loader_instance_heap_free(inst, sorted_phys_dev_array);
  7317. }
  7318. return res;
  7319. }
  7320. VKAPI_ATTR VkResult VKAPI_CALL terminator_EnumeratePhysicalDeviceGroups(
  7321. VkInstance instance, uint32_t *pPhysicalDeviceGroupCount,
  7322. VkPhysicalDeviceGroupProperties *pPhysicalDeviceGroupProperties) {
  7323. struct loader_instance *inst = (struct loader_instance *)instance;
  7324. VkResult res = VK_SUCCESS;
  7325. // Always call the setup loader terminator physical device groups because they may
  7326. // have changed at any point.
  7327. res = setupLoaderTermPhysDevGroups(inst);
  7328. if (VK_SUCCESS != res) {
  7329. goto out;
  7330. }
  7331. uint32_t copy_count = inst->phys_dev_group_count_term;
  7332. if (NULL != pPhysicalDeviceGroupProperties) {
  7333. if (copy_count > *pPhysicalDeviceGroupCount) {
  7334. copy_count = *pPhysicalDeviceGroupCount;
  7335. res = VK_INCOMPLETE;
  7336. }
  7337. for (uint32_t i = 0; i < copy_count; i++) {
  7338. memcpy(&pPhysicalDeviceGroupProperties[i], inst->phys_dev_groups_term[i],
  7339. sizeof(VkPhysicalDeviceGroupPropertiesKHR));
  7340. }
  7341. }
  7342. *pPhysicalDeviceGroupCount = copy_count;
  7343. out:
  7344. return res;
  7345. }
  7346. VKAPI_ATTR void VKAPI_CALL terminator_GetPhysicalDeviceFeatures2(VkPhysicalDevice physicalDevice,
  7347. VkPhysicalDeviceFeatures2 *pFeatures) {
  7348. struct loader_physical_device_term *phys_dev_term = (struct loader_physical_device_term *)physicalDevice;
  7349. struct loader_icd_term *icd_term = phys_dev_term->this_icd_term;
  7350. const struct loader_instance *inst = icd_term->this_instance;
  7351. // Get the function pointer to use to call into the ICD. This could be the core or KHR version
  7352. PFN_vkGetPhysicalDeviceFeatures2 fpGetPhysicalDeviceFeatures2 = NULL;
  7353. if (inst != NULL && inst->enabled_known_extensions.khr_get_physical_device_properties2) {
  7354. fpGetPhysicalDeviceFeatures2 = icd_term->dispatch.GetPhysicalDeviceFeatures2KHR;
  7355. } else {
  7356. fpGetPhysicalDeviceFeatures2 = icd_term->dispatch.GetPhysicalDeviceFeatures2;
  7357. }
  7358. if (fpGetPhysicalDeviceFeatures2 != NULL || !inst->enabled_known_extensions.khr_get_physical_device_properties2) {
  7359. // Pass the call to the driver
  7360. fpGetPhysicalDeviceFeatures2(phys_dev_term->phys_dev, pFeatures);
  7361. } else {
  7362. // Emulate the call
  7363. loader_log(icd_term->this_instance, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, 0,
  7364. "vkGetPhysicalDeviceFeatures2: Emulating call in ICD \"%s\" using vkGetPhysicalDeviceFeatures",
  7365. icd_term->scanned_icd->lib_name);
  7366. // Write to the VkPhysicalDeviceFeatures2 struct
  7367. icd_term->dispatch.GetPhysicalDeviceFeatures(phys_dev_term->phys_dev, &pFeatures->features);
  7368. const VkBaseInStructure *pNext = pFeatures->pNext;
  7369. while (pNext != NULL) {
  7370. switch (pNext->sType) {
  7371. case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_MULTIVIEW_FEATURES: {
  7372. // Skip the check if VK_KHR_multiview is enabled because it's a device extension
  7373. // Write to the VkPhysicalDeviceMultiviewFeaturesKHR struct
  7374. VkPhysicalDeviceMultiviewFeaturesKHR *multiview_features = (VkPhysicalDeviceMultiviewFeaturesKHR *)pNext;
  7375. multiview_features->multiview = VK_FALSE;
  7376. multiview_features->multiviewGeometryShader = VK_FALSE;
  7377. multiview_features->multiviewTessellationShader = VK_FALSE;
  7378. pNext = multiview_features->pNext;
  7379. break;
  7380. }
  7381. default: {
  7382. loader_log(icd_term->this_instance, VK_DEBUG_REPORT_WARNING_BIT_EXT, 0,
  7383. "vkGetPhysicalDeviceFeatures2: Emulation found unrecognized structure type in pFeatures->pNext - "
  7384. "this struct will be ignored");
  7385. pNext = pNext->pNext;
  7386. break;
  7387. }
  7388. }
  7389. }
  7390. }
  7391. }
  7392. VKAPI_ATTR void VKAPI_CALL terminator_GetPhysicalDeviceProperties2(VkPhysicalDevice physicalDevice,
  7393. VkPhysicalDeviceProperties2 *pProperties) {
  7394. struct loader_physical_device_term *phys_dev_term = (struct loader_physical_device_term *)physicalDevice;
  7395. struct loader_icd_term *icd_term = phys_dev_term->this_icd_term;
  7396. const struct loader_instance *inst = icd_term->this_instance;
  7397. // Get the function pointer to use to call into the ICD. This could be the core or KHR version
  7398. PFN_vkGetPhysicalDeviceProperties2 fpGetPhysicalDeviceProperties2 = NULL;
  7399. if (inst != NULL && inst->enabled_known_extensions.khr_get_physical_device_properties2) {
  7400. fpGetPhysicalDeviceProperties2 = icd_term->dispatch.GetPhysicalDeviceProperties2KHR;
  7401. } else {
  7402. fpGetPhysicalDeviceProperties2 = icd_term->dispatch.GetPhysicalDeviceProperties2;
  7403. }
  7404. if (fpGetPhysicalDeviceProperties2 != NULL || !inst->enabled_known_extensions.khr_get_physical_device_properties2) {
  7405. // Pass the call to the driver
  7406. fpGetPhysicalDeviceProperties2(phys_dev_term->phys_dev, pProperties);
  7407. } else {
  7408. // Emulate the call
  7409. loader_log(icd_term->this_instance, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, 0,
  7410. "vkGetPhysicalDeviceProperties2: Emulating call in ICD \"%s\" using vkGetPhysicalDeviceProperties",
  7411. icd_term->scanned_icd->lib_name);
  7412. // Write to the VkPhysicalDeviceProperties2 struct
  7413. icd_term->dispatch.GetPhysicalDeviceProperties(phys_dev_term->phys_dev, &pProperties->properties);
  7414. const VkBaseInStructure *pNext = pProperties->pNext;
  7415. while (pNext != NULL) {
  7416. switch (pNext->sType) {
  7417. case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_ID_PROPERTIES: {
  7418. VkPhysicalDeviceIDPropertiesKHR *id_properties = (VkPhysicalDeviceIDPropertiesKHR *)pNext;
  7419. // Verify that "VK_KHR_external_memory_capabilities" is enabled
  7420. if (icd_term->this_instance->enabled_known_extensions.khr_external_memory_capabilities) {
  7421. loader_log(icd_term->this_instance, VK_DEBUG_REPORT_WARNING_BIT_EXT, 0,
  7422. "vkGetPhysicalDeviceProperties2: Emulation cannot generate unique IDs for struct "
  7423. "VkPhysicalDeviceIDProperties - setting IDs to zero instead");
  7424. // Write to the VkPhysicalDeviceIDPropertiesKHR struct
  7425. memset(id_properties->deviceUUID, 0, VK_UUID_SIZE);
  7426. memset(id_properties->driverUUID, 0, VK_UUID_SIZE);
  7427. id_properties->deviceLUIDValid = VK_FALSE;
  7428. }
  7429. pNext = id_properties->pNext;
  7430. break;
  7431. }
  7432. default: {
  7433. loader_log(icd_term->this_instance, VK_DEBUG_REPORT_WARNING_BIT_EXT, 0,
  7434. "vkGetPhysicalDeviceProperties2KHR: Emulation found unrecognized structure type in "
  7435. "pProperties->pNext - this struct will be ignored");
  7436. pNext = pNext->pNext;
  7437. break;
  7438. }
  7439. }
  7440. }
  7441. }
  7442. }
  7443. VKAPI_ATTR void VKAPI_CALL terminator_GetPhysicalDeviceFormatProperties2(VkPhysicalDevice physicalDevice, VkFormat format,
  7444. VkFormatProperties2 *pFormatProperties) {
  7445. struct loader_physical_device_term *phys_dev_term = (struct loader_physical_device_term *)physicalDevice;
  7446. struct loader_icd_term *icd_term = phys_dev_term->this_icd_term;
  7447. const struct loader_instance *inst = icd_term->this_instance;
  7448. // Get the function pointer to use to call into the ICD. This could be the core or KHR version
  7449. PFN_vkGetPhysicalDeviceFormatProperties2 fpGetPhysicalDeviceFormatProperties2 = NULL;
  7450. if (inst != NULL && inst->enabled_known_extensions.khr_get_physical_device_properties2) {
  7451. fpGetPhysicalDeviceFormatProperties2 = icd_term->dispatch.GetPhysicalDeviceFormatProperties2KHR;
  7452. } else {
  7453. fpGetPhysicalDeviceFormatProperties2 = icd_term->dispatch.GetPhysicalDeviceFormatProperties2;
  7454. }
  7455. if (fpGetPhysicalDeviceFormatProperties2 != NULL || !inst->enabled_known_extensions.khr_get_physical_device_properties2) {
  7456. // Pass the call to the driver
  7457. fpGetPhysicalDeviceFormatProperties2(phys_dev_term->phys_dev, format, pFormatProperties);
  7458. } else {
  7459. // Emulate the call
  7460. loader_log(
  7461. icd_term->this_instance, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, 0,
  7462. "vkGetPhysicalDeviceFormatProperties2: Emulating call in ICD \"%s\" using vkGetPhysicalDeviceFormatProperties",
  7463. icd_term->scanned_icd->lib_name);
  7464. // Write to the VkFormatProperties2 struct
  7465. icd_term->dispatch.GetPhysicalDeviceFormatProperties(phys_dev_term->phys_dev, format, &pFormatProperties->formatProperties);
  7466. if (pFormatProperties->pNext != NULL) {
  7467. loader_log(icd_term->this_instance, VK_DEBUG_REPORT_WARNING_BIT_EXT, 0,
  7468. "vkGetPhysicalDeviceFormatProperties2: Emulation found unrecognized structure type in "
  7469. "pFormatProperties->pNext - this struct will be ignored");
  7470. }
  7471. }
  7472. }
  7473. VKAPI_ATTR VkResult VKAPI_CALL terminator_GetPhysicalDeviceImageFormatProperties2(
  7474. VkPhysicalDevice physicalDevice, const VkPhysicalDeviceImageFormatInfo2KHR *pImageFormatInfo,
  7475. VkImageFormatProperties2KHR *pImageFormatProperties) {
  7476. struct loader_physical_device_term *phys_dev_term = (struct loader_physical_device_term *)physicalDevice;
  7477. struct loader_icd_term *icd_term = phys_dev_term->this_icd_term;
  7478. const struct loader_instance *inst = icd_term->this_instance;
  7479. // Get the function pointer to use to call into the ICD. This could be the core or KHR version
  7480. PFN_vkGetPhysicalDeviceImageFormatProperties2 fpGetPhysicalDeviceImageFormatProperties2 = NULL;
  7481. if (inst != NULL && inst->enabled_known_extensions.khr_get_physical_device_properties2) {
  7482. fpGetPhysicalDeviceImageFormatProperties2 = icd_term->dispatch.GetPhysicalDeviceImageFormatProperties2KHR;
  7483. } else {
  7484. fpGetPhysicalDeviceImageFormatProperties2 = icd_term->dispatch.GetPhysicalDeviceImageFormatProperties2;
  7485. }
  7486. if (fpGetPhysicalDeviceImageFormatProperties2 != NULL || !inst->enabled_known_extensions.khr_get_physical_device_properties2) {
  7487. // Pass the call to the driver
  7488. return fpGetPhysicalDeviceImageFormatProperties2(phys_dev_term->phys_dev, pImageFormatInfo, pImageFormatProperties);
  7489. } else {
  7490. // Emulate the call
  7491. loader_log(icd_term->this_instance, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, 0,
  7492. "vkGetPhysicalDeviceImageFormatProperties2: Emulating call in ICD \"%s\" using "
  7493. "vkGetPhysicalDeviceImageFormatProperties",
  7494. icd_term->scanned_icd->lib_name);
  7495. // If there is more info in either pNext, then this is unsupported
  7496. if (pImageFormatInfo->pNext != NULL || pImageFormatProperties->pNext != NULL) {
  7497. return VK_ERROR_FORMAT_NOT_SUPPORTED;
  7498. }
  7499. // Write to the VkImageFormatProperties2KHR struct
  7500. return icd_term->dispatch.GetPhysicalDeviceImageFormatProperties(
  7501. phys_dev_term->phys_dev, pImageFormatInfo->format, pImageFormatInfo->type, pImageFormatInfo->tiling,
  7502. pImageFormatInfo->usage, pImageFormatInfo->flags, &pImageFormatProperties->imageFormatProperties);
  7503. }
  7504. }
  7505. VKAPI_ATTR void VKAPI_CALL terminator_GetPhysicalDeviceQueueFamilyProperties2(
  7506. VkPhysicalDevice physicalDevice, uint32_t *pQueueFamilyPropertyCount, VkQueueFamilyProperties2KHR *pQueueFamilyProperties) {
  7507. struct loader_physical_device_term *phys_dev_term = (struct loader_physical_device_term *)physicalDevice;
  7508. struct loader_icd_term *icd_term = phys_dev_term->this_icd_term;
  7509. const struct loader_instance *inst = icd_term->this_instance;
  7510. // Get the function pointer to use to call into the ICD. This could be the core or KHR version
  7511. PFN_vkGetPhysicalDeviceQueueFamilyProperties2 fpGetPhysicalDeviceQueueFamilyProperties2 = NULL;
  7512. if (inst != NULL && inst->enabled_known_extensions.khr_get_physical_device_properties2) {
  7513. fpGetPhysicalDeviceQueueFamilyProperties2 = icd_term->dispatch.GetPhysicalDeviceQueueFamilyProperties2KHR;
  7514. } else {
  7515. fpGetPhysicalDeviceQueueFamilyProperties2 = icd_term->dispatch.GetPhysicalDeviceQueueFamilyProperties2;
  7516. }
  7517. if (fpGetPhysicalDeviceQueueFamilyProperties2 != NULL || !inst->enabled_known_extensions.khr_get_physical_device_properties2) {
  7518. // Pass the call to the driver
  7519. fpGetPhysicalDeviceQueueFamilyProperties2(phys_dev_term->phys_dev, pQueueFamilyPropertyCount, pQueueFamilyProperties);
  7520. } else {
  7521. // Emulate the call
  7522. loader_log(icd_term->this_instance, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, 0,
  7523. "vkGetPhysicalDeviceQueueFamilyProperties2: Emulating call in ICD \"%s\" using "
  7524. "vkGetPhysicalDeviceQueueFamilyProperties",
  7525. icd_term->scanned_icd->lib_name);
  7526. if (pQueueFamilyProperties == NULL || *pQueueFamilyPropertyCount == 0) {
  7527. // Write to pQueueFamilyPropertyCount
  7528. icd_term->dispatch.GetPhysicalDeviceQueueFamilyProperties(phys_dev_term->phys_dev, pQueueFamilyPropertyCount, NULL);
  7529. } else {
  7530. // Allocate a temporary array for the output of the old function
  7531. VkQueueFamilyProperties *properties = loader_stack_alloc(*pQueueFamilyPropertyCount * sizeof(VkQueueFamilyProperties));
  7532. if (properties == NULL) {
  7533. *pQueueFamilyPropertyCount = 0;
  7534. loader_log(
  7535. icd_term->this_instance, VK_DEBUG_REPORT_ERROR_BIT_EXT, 0,
  7536. "vkGetPhysicalDeviceQueueFamilyProperties2: Out of memory - Failed to allocate array for loader emulation.");
  7537. return;
  7538. }
  7539. icd_term->dispatch.GetPhysicalDeviceQueueFamilyProperties(phys_dev_term->phys_dev, pQueueFamilyPropertyCount,
  7540. properties);
  7541. for (uint32_t i = 0; i < *pQueueFamilyPropertyCount; ++i) {
  7542. // Write to the VkQueueFamilyProperties2KHR struct
  7543. memcpy(&pQueueFamilyProperties[i].queueFamilyProperties, &properties[i], sizeof(VkQueueFamilyProperties));
  7544. if (pQueueFamilyProperties[i].pNext != NULL) {
  7545. loader_log(icd_term->this_instance, VK_DEBUG_REPORT_WARNING_BIT_EXT, 0,
  7546. "vkGetPhysicalDeviceQueueFamilyProperties2: Emulation found unrecognized structure type in "
  7547. "pQueueFamilyProperties[%d].pNext - this struct will be ignored",
  7548. i);
  7549. }
  7550. }
  7551. }
  7552. }
  7553. }
  7554. VKAPI_ATTR void VKAPI_CALL terminator_GetPhysicalDeviceMemoryProperties2(
  7555. VkPhysicalDevice physicalDevice, VkPhysicalDeviceMemoryProperties2 *pMemoryProperties) {
  7556. struct loader_physical_device_term *phys_dev_term = (struct loader_physical_device_term *)physicalDevice;
  7557. struct loader_icd_term *icd_term = phys_dev_term->this_icd_term;
  7558. const struct loader_instance *inst = icd_term->this_instance;
  7559. // Get the function pointer to use to call into the ICD. This could be the core or KHR version
  7560. PFN_vkGetPhysicalDeviceMemoryProperties2 fpGetPhysicalDeviceMemoryProperties2 = NULL;
  7561. if (inst != NULL && inst->enabled_known_extensions.khr_get_physical_device_properties2) {
  7562. fpGetPhysicalDeviceMemoryProperties2 = icd_term->dispatch.GetPhysicalDeviceMemoryProperties2KHR;
  7563. } else {
  7564. fpGetPhysicalDeviceMemoryProperties2 = icd_term->dispatch.GetPhysicalDeviceMemoryProperties2;
  7565. }
  7566. if (fpGetPhysicalDeviceMemoryProperties2 != NULL || !inst->enabled_known_extensions.khr_get_physical_device_properties2) {
  7567. // Pass the call to the driver
  7568. fpGetPhysicalDeviceMemoryProperties2(phys_dev_term->phys_dev, pMemoryProperties);
  7569. } else {
  7570. // Emulate the call
  7571. loader_log(
  7572. icd_term->this_instance, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, 0,
  7573. "vkGetPhysicalDeviceMemoryProperties2: Emulating call in ICD \"%s\" using vkGetPhysicalDeviceMemoryProperties",
  7574. icd_term->scanned_icd->lib_name);
  7575. // Write to the VkPhysicalDeviceMemoryProperties2 struct
  7576. icd_term->dispatch.GetPhysicalDeviceMemoryProperties(phys_dev_term->phys_dev, &pMemoryProperties->memoryProperties);
  7577. if (pMemoryProperties->pNext != NULL) {
  7578. loader_log(icd_term->this_instance, VK_DEBUG_REPORT_WARNING_BIT_EXT, 0,
  7579. "vkGetPhysicalDeviceMemoryProperties2: Emulation found unrecognized structure type in "
  7580. "pMemoryProperties->pNext - this struct will be ignored");
  7581. }
  7582. }
  7583. }
  7584. VKAPI_ATTR void VKAPI_CALL terminator_GetPhysicalDeviceSparseImageFormatProperties2(
  7585. VkPhysicalDevice physicalDevice, const VkPhysicalDeviceSparseImageFormatInfo2KHR *pFormatInfo, uint32_t *pPropertyCount,
  7586. VkSparseImageFormatProperties2KHR *pProperties) {
  7587. struct loader_physical_device_term *phys_dev_term = (struct loader_physical_device_term *)physicalDevice;
  7588. struct loader_icd_term *icd_term = phys_dev_term->this_icd_term;
  7589. const struct loader_instance *inst = icd_term->this_instance;
  7590. // Get the function pointer to use to call into the ICD. This could be the core or KHR version
  7591. PFN_vkGetPhysicalDeviceSparseImageFormatProperties2 fpGetPhysicalDeviceSparseImageFormatProperties2 = NULL;
  7592. if (inst != NULL && inst->enabled_known_extensions.khr_get_physical_device_properties2) {
  7593. fpGetPhysicalDeviceSparseImageFormatProperties2 = icd_term->dispatch.GetPhysicalDeviceSparseImageFormatProperties2KHR;
  7594. } else {
  7595. fpGetPhysicalDeviceSparseImageFormatProperties2 = icd_term->dispatch.GetPhysicalDeviceSparseImageFormatProperties2;
  7596. }
  7597. if (fpGetPhysicalDeviceSparseImageFormatProperties2 != NULL || !inst->enabled_known_extensions.khr_get_physical_device_properties2) {
  7598. // Pass the call to the driver
  7599. fpGetPhysicalDeviceSparseImageFormatProperties2(phys_dev_term->phys_dev, pFormatInfo, pPropertyCount, pProperties);
  7600. } else {
  7601. // Emulate the call
  7602. loader_log(icd_term->this_instance, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, 0,
  7603. "vkGetPhysicalDeviceSparseImageFormatProperties2: Emulating call in ICD \"%s\" using "
  7604. "vkGetPhysicalDeviceSparseImageFormatProperties",
  7605. icd_term->scanned_icd->lib_name);
  7606. if (pFormatInfo->pNext != NULL) {
  7607. loader_log(icd_term->this_instance, VK_DEBUG_REPORT_WARNING_BIT_EXT, 0,
  7608. "vkGetPhysicalDeviceSparseImageFormatProperties2: Emulation found unrecognized structure type in "
  7609. "pFormatInfo->pNext - this struct will be ignored");
  7610. }
  7611. if (pProperties == NULL || *pPropertyCount == 0) {
  7612. // Write to pPropertyCount
  7613. icd_term->dispatch.GetPhysicalDeviceSparseImageFormatProperties(
  7614. phys_dev_term->phys_dev, pFormatInfo->format, pFormatInfo->type, pFormatInfo->samples, pFormatInfo->usage,
  7615. pFormatInfo->tiling, pPropertyCount, NULL);
  7616. } else {
  7617. // Allocate a temporary array for the output of the old function
  7618. VkSparseImageFormatProperties *properties =
  7619. loader_stack_alloc(*pPropertyCount * sizeof(VkSparseImageMemoryRequirements));
  7620. if (properties == NULL) {
  7621. *pPropertyCount = 0;
  7622. loader_log(icd_term->this_instance, VK_DEBUG_REPORT_ERROR_BIT_EXT, 0,
  7623. "vkGetPhysicalDeviceSparseImageFormatProperties2: Out of memory - Failed to allocate array for "
  7624. "loader emulation.");
  7625. return;
  7626. }
  7627. icd_term->dispatch.GetPhysicalDeviceSparseImageFormatProperties(
  7628. phys_dev_term->phys_dev, pFormatInfo->format, pFormatInfo->type, pFormatInfo->samples, pFormatInfo->usage,
  7629. pFormatInfo->tiling, pPropertyCount, properties);
  7630. for (uint32_t i = 0; i < *pPropertyCount; ++i) {
  7631. // Write to the VkSparseImageFormatProperties2KHR struct
  7632. memcpy(&pProperties[i].properties, &properties[i], sizeof(VkSparseImageFormatProperties));
  7633. if (pProperties[i].pNext != NULL) {
  7634. loader_log(icd_term->this_instance, VK_DEBUG_REPORT_WARNING_BIT_EXT, 0,
  7635. "vkGetPhysicalDeviceSparseImageFormatProperties2: Emulation found unrecognized structure type in "
  7636. "pProperties[%d].pNext - this struct will be ignored",
  7637. i);
  7638. }
  7639. }
  7640. }
  7641. }
  7642. }
  7643. VKAPI_ATTR void VKAPI_CALL terminator_GetPhysicalDeviceExternalBufferProperties(
  7644. VkPhysicalDevice physicalDevice, const VkPhysicalDeviceExternalBufferInfo *pExternalBufferInfo,
  7645. VkExternalBufferProperties *pExternalBufferProperties) {
  7646. struct loader_physical_device_term *phys_dev_term = (struct loader_physical_device_term *)physicalDevice;
  7647. struct loader_icd_term *icd_term = phys_dev_term->this_icd_term;
  7648. const struct loader_instance *inst = icd_term->this_instance;
  7649. // Get the function pointer to use to call into the ICD. This could be the core or KHR version
  7650. PFN_vkGetPhysicalDeviceExternalBufferProperties fpGetPhysicalDeviceExternalBufferProperties = NULL;
  7651. if (inst != NULL && inst->enabled_known_extensions.khr_external_memory_capabilities) {
  7652. fpGetPhysicalDeviceExternalBufferProperties = icd_term->dispatch.GetPhysicalDeviceExternalBufferPropertiesKHR;
  7653. } else {
  7654. fpGetPhysicalDeviceExternalBufferProperties = icd_term->dispatch.GetPhysicalDeviceExternalBufferProperties;
  7655. }
  7656. if (fpGetPhysicalDeviceExternalBufferProperties || !inst->enabled_known_extensions.khr_external_memory_capabilities) {
  7657. // Pass the call to the driver
  7658. fpGetPhysicalDeviceExternalBufferProperties(phys_dev_term->phys_dev, pExternalBufferInfo, pExternalBufferProperties);
  7659. } else {
  7660. // Emulate the call
  7661. loader_log(icd_term->this_instance, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, 0,
  7662. "vkGetPhysicalDeviceExternalBufferProperties: Emulating call in ICD \"%s\"", icd_term->scanned_icd->lib_name);
  7663. if (pExternalBufferInfo->pNext != NULL) {
  7664. loader_log(icd_term->this_instance, VK_DEBUG_REPORT_WARNING_BIT_EXT, 0,
  7665. "vkGetPhysicalDeviceExternalBufferProperties: Emulation found unrecognized structure type in "
  7666. "pExternalBufferInfo->pNext - this struct will be ignored");
  7667. }
  7668. // Fill in everything being unsupported
  7669. memset(&pExternalBufferProperties->externalMemoryProperties, 0, sizeof(VkExternalMemoryPropertiesKHR));
  7670. if (pExternalBufferProperties->pNext != NULL) {
  7671. loader_log(icd_term->this_instance, VK_DEBUG_REPORT_WARNING_BIT_EXT, 0,
  7672. "vkGetPhysicalDeviceExternalBufferProperties: Emulation found unrecognized structure type in "
  7673. "pExternalBufferProperties->pNext - this struct will be ignored");
  7674. }
  7675. }
  7676. }
  7677. VKAPI_ATTR void VKAPI_CALL terminator_GetPhysicalDeviceExternalSemaphoreProperties(
  7678. VkPhysicalDevice physicalDevice, const VkPhysicalDeviceExternalSemaphoreInfo *pExternalSemaphoreInfo,
  7679. VkExternalSemaphoreProperties *pExternalSemaphoreProperties) {
  7680. struct loader_physical_device_term *phys_dev_term = (struct loader_physical_device_term *)physicalDevice;
  7681. struct loader_icd_term *icd_term = phys_dev_term->this_icd_term;
  7682. const struct loader_instance *inst = icd_term->this_instance;
  7683. // Get the function pointer to use to call into the ICD. This could be the core or KHR version
  7684. PFN_vkGetPhysicalDeviceExternalSemaphoreProperties fpGetPhysicalDeviceExternalSemaphoreProperties = NULL;
  7685. if (inst != NULL && inst->enabled_known_extensions.khr_external_semaphore_capabilities) {
  7686. fpGetPhysicalDeviceExternalSemaphoreProperties = icd_term->dispatch.GetPhysicalDeviceExternalSemaphorePropertiesKHR;
  7687. } else {
  7688. fpGetPhysicalDeviceExternalSemaphoreProperties = icd_term->dispatch.GetPhysicalDeviceExternalSemaphoreProperties;
  7689. }
  7690. if (fpGetPhysicalDeviceExternalSemaphoreProperties != NULL || !inst->enabled_known_extensions.khr_external_semaphore_capabilities) {
  7691. // Pass the call to the driver
  7692. fpGetPhysicalDeviceExternalSemaphoreProperties(phys_dev_term->phys_dev, pExternalSemaphoreInfo, pExternalSemaphoreProperties);
  7693. } else {
  7694. // Emulate the call
  7695. loader_log(icd_term->this_instance, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, 0,
  7696. "vkGetPhysicalDeviceExternalSemaphoreProperties: Emulating call in ICD \"%s\"",
  7697. icd_term->scanned_icd->lib_name);
  7698. if (pExternalSemaphoreInfo->pNext != NULL) {
  7699. loader_log(icd_term->this_instance, VK_DEBUG_REPORT_WARNING_BIT_EXT, 0,
  7700. "vkGetPhysicalDeviceExternalSemaphoreProperties: Emulation found unrecognized structure type in "
  7701. "pExternalSemaphoreInfo->pNext - this struct will be ignored");
  7702. }
  7703. // Fill in everything being unsupported
  7704. pExternalSemaphoreProperties->exportFromImportedHandleTypes = 0;
  7705. pExternalSemaphoreProperties->compatibleHandleTypes = 0;
  7706. pExternalSemaphoreProperties->externalSemaphoreFeatures = 0;
  7707. if (pExternalSemaphoreProperties->pNext != NULL) {
  7708. loader_log(icd_term->this_instance, VK_DEBUG_REPORT_WARNING_BIT_EXT, 0,
  7709. "vkGetPhysicalDeviceExternalSemaphoreProperties: Emulation found unrecognized structure type in "
  7710. "pExternalSemaphoreProperties->pNext - this struct will be ignored");
  7711. }
  7712. }
  7713. }
  7714. VKAPI_ATTR void VKAPI_CALL terminator_GetPhysicalDeviceExternalFenceProperties(
  7715. VkPhysicalDevice physicalDevice, const VkPhysicalDeviceExternalFenceInfo *pExternalFenceInfo,
  7716. VkExternalFenceProperties *pExternalFenceProperties) {
  7717. struct loader_physical_device_term *phys_dev_term = (struct loader_physical_device_term *)physicalDevice;
  7718. struct loader_icd_term *icd_term = phys_dev_term->this_icd_term;
  7719. const struct loader_instance *inst = icd_term->this_instance;
  7720. // Get the function pointer to use to call into the ICD. This could be the core or KHR version
  7721. PFN_vkGetPhysicalDeviceExternalFenceProperties fpGetPhysicalDeviceExternalFenceProperties = NULL;
  7722. if (inst != NULL && inst->enabled_known_extensions.khr_external_fence_capabilities) {
  7723. fpGetPhysicalDeviceExternalFenceProperties = icd_term->dispatch.GetPhysicalDeviceExternalFencePropertiesKHR;
  7724. } else {
  7725. fpGetPhysicalDeviceExternalFenceProperties = icd_term->dispatch.GetPhysicalDeviceExternalFenceProperties;
  7726. }
  7727. if (fpGetPhysicalDeviceExternalFenceProperties != NULL || !inst->enabled_known_extensions.khr_external_fence_capabilities) {
  7728. // Pass the call to the driver
  7729. fpGetPhysicalDeviceExternalFenceProperties(phys_dev_term->phys_dev, pExternalFenceInfo, pExternalFenceProperties);
  7730. } else {
  7731. // Emulate the call
  7732. loader_log(icd_term->this_instance, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, 0,
  7733. "vkGetPhysicalDeviceExternalFenceProperties: Emulating call in ICD \"%s\"", icd_term->scanned_icd->lib_name);
  7734. if (pExternalFenceInfo->pNext != NULL) {
  7735. loader_log(icd_term->this_instance, VK_DEBUG_REPORT_WARNING_BIT_EXT, 0,
  7736. "vkGetPhysicalDeviceExternalFenceProperties: Emulation found unrecognized structure type in "
  7737. "pExternalFenceInfo->pNext - this struct will be ignored");
  7738. }
  7739. // Fill in everything being unsupported
  7740. pExternalFenceProperties->exportFromImportedHandleTypes = 0;
  7741. pExternalFenceProperties->compatibleHandleTypes = 0;
  7742. pExternalFenceProperties->externalFenceFeatures = 0;
  7743. if (pExternalFenceProperties->pNext != NULL) {
  7744. loader_log(icd_term->this_instance, VK_DEBUG_REPORT_WARNING_BIT_EXT, 0,
  7745. "vkGetPhysicalDeviceExternalFenceProperties: Emulation found unrecognized structure type in "
  7746. "pExternalFenceProperties->pNext - this struct will be ignored");
  7747. }
  7748. }
  7749. }