rendering_device_d3d12.cpp 387 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914191519161917191819191920192119221923192419251926192719281929193019311932193319341935193619371938193919401941194219431944194519461947194819491950195119521953195419551956195719581959196019611962196319641965196619671968196919701971197219731974197519761977197819791980198119821983198419851986198719881989199019911992199319941995199619971998199920002001200220032004200520062007200820092010201120122013201420152016201720182019202020212022202320242025202620272028202920302031203220332034203520362037203820392040204120422043204420452046204720482049205020512052205320542055205620572058205920602061206220632064206520662067206820692070207120722073207420752076207720782079208020812082208320842085208620872088208920902091209220932094209520962097209820992100210121022103210421052106210721082109211021112112211321142115211621172118211921202121212221232124212521262127212821292130213121322133213421352136213721382139214021412142214321442145214621472148214921502151215221532154215521562157215821592160216121622163216421652166216721682169217021712172217321742175217621772178217921802181218221832184218521862187218821892190219121922193219421952196219721982199220022012202220322042205220622072208220922102211221222132214221522162217221822192220222122222223222422252226222722282229223022312232223322342235223622372238223922402241224222432244224522462247224822492250225122522253225422552256225722582259226022612262226322642265226622672268226922702271227222732274227522762277227822792280228122822283228422852286228722882289229022912292229322942295229622972298229923002301230223032304230523062307230823092310231123122313231423152316231723182319232023212322232323242325232623272328232923302331233223332334233523362337233823392340234123422343234423452346234723482349235023512352235323542355235623572358235923602361236223632364236523662367236823692370237123722373237423752376237723782379238023812382238323842385238623872388238923902391239223932394239523962397239823992400240124022403240424052406240724082409241024112412241324142415241624172418241924202421242224232424242524262427242824292430243124322433243424352436243724382439244024412442244324442445244624472448244924502451245224532454245524562457245824592460246124622463246424652466246724682469247024712472247324742475247624772478247924802481248224832484248524862487248824892490249124922493249424952496249724982499250025012502250325042505250625072508250925102511251225132514251525162517251825192520252125222523252425252526252725282529253025312532253325342535253625372538253925402541254225432544254525462547254825492550255125522553255425552556255725582559256025612562256325642565256625672568256925702571257225732574257525762577257825792580258125822583258425852586258725882589259025912592259325942595259625972598259926002601260226032604260526062607260826092610261126122613261426152616261726182619262026212622262326242625262626272628262926302631263226332634263526362637263826392640264126422643264426452646264726482649265026512652265326542655265626572658265926602661266226632664266526662667266826692670267126722673267426752676267726782679268026812682268326842685268626872688268926902691269226932694269526962697269826992700270127022703270427052706270727082709271027112712271327142715271627172718271927202721272227232724272527262727272827292730273127322733273427352736273727382739274027412742274327442745274627472748274927502751275227532754275527562757275827592760276127622763276427652766276727682769277027712772277327742775277627772778277927802781278227832784278527862787278827892790279127922793279427952796279727982799280028012802280328042805280628072808280928102811281228132814281528162817281828192820282128222823282428252826282728282829283028312832283328342835283628372838283928402841284228432844284528462847284828492850285128522853285428552856285728582859286028612862286328642865286628672868286928702871287228732874287528762877287828792880288128822883288428852886288728882889289028912892289328942895289628972898289929002901290229032904290529062907290829092910291129122913291429152916291729182919292029212922292329242925292629272928292929302931293229332934293529362937293829392940294129422943294429452946294729482949295029512952295329542955295629572958295929602961296229632964296529662967296829692970297129722973297429752976297729782979298029812982298329842985298629872988298929902991299229932994299529962997299829993000300130023003300430053006300730083009301030113012301330143015301630173018301930203021302230233024302530263027302830293030303130323033303430353036303730383039304030413042304330443045304630473048304930503051305230533054305530563057305830593060306130623063306430653066306730683069307030713072307330743075307630773078307930803081308230833084308530863087308830893090309130923093309430953096309730983099310031013102310331043105310631073108310931103111311231133114311531163117311831193120312131223123312431253126312731283129313031313132313331343135313631373138313931403141314231433144314531463147314831493150315131523153315431553156315731583159316031613162316331643165316631673168316931703171317231733174317531763177317831793180318131823183318431853186318731883189319031913192319331943195319631973198319932003201320232033204320532063207320832093210321132123213321432153216321732183219322032213222322332243225322632273228322932303231323232333234323532363237323832393240324132423243324432453246324732483249325032513252325332543255325632573258325932603261326232633264326532663267326832693270327132723273327432753276327732783279328032813282328332843285328632873288328932903291329232933294329532963297329832993300330133023303330433053306330733083309331033113312331333143315331633173318331933203321332233233324332533263327332833293330333133323333333433353336333733383339334033413342334333443345334633473348334933503351335233533354335533563357335833593360336133623363336433653366336733683369337033713372337333743375337633773378337933803381338233833384338533863387338833893390339133923393339433953396339733983399340034013402340334043405340634073408340934103411341234133414341534163417341834193420342134223423342434253426342734283429343034313432343334343435343634373438343934403441344234433444344534463447344834493450345134523453345434553456345734583459346034613462346334643465346634673468346934703471347234733474347534763477347834793480348134823483348434853486348734883489349034913492349334943495349634973498349935003501350235033504350535063507350835093510351135123513351435153516351735183519352035213522352335243525352635273528352935303531353235333534353535363537353835393540354135423543354435453546354735483549355035513552355335543555355635573558355935603561356235633564356535663567356835693570357135723573357435753576357735783579358035813582358335843585358635873588358935903591359235933594359535963597359835993600360136023603360436053606360736083609361036113612361336143615361636173618361936203621362236233624362536263627362836293630363136323633363436353636363736383639364036413642364336443645364636473648364936503651365236533654365536563657365836593660366136623663366436653666366736683669367036713672367336743675367636773678367936803681368236833684368536863687368836893690369136923693369436953696369736983699370037013702370337043705370637073708370937103711371237133714371537163717371837193720372137223723372437253726372737283729373037313732373337343735373637373738373937403741374237433744374537463747374837493750375137523753375437553756375737583759376037613762376337643765376637673768376937703771377237733774377537763777377837793780378137823783378437853786378737883789379037913792379337943795379637973798379938003801380238033804380538063807380838093810381138123813381438153816381738183819382038213822382338243825382638273828382938303831383238333834383538363837383838393840384138423843384438453846384738483849385038513852385338543855385638573858385938603861386238633864386538663867386838693870387138723873387438753876387738783879388038813882388338843885388638873888388938903891389238933894389538963897389838993900390139023903390439053906390739083909391039113912391339143915391639173918391939203921392239233924392539263927392839293930393139323933393439353936393739383939394039413942394339443945394639473948394939503951395239533954395539563957395839593960396139623963396439653966396739683969397039713972397339743975397639773978397939803981398239833984398539863987398839893990399139923993399439953996399739983999400040014002400340044005400640074008400940104011401240134014401540164017401840194020402140224023402440254026402740284029403040314032403340344035403640374038403940404041404240434044404540464047404840494050405140524053405440554056405740584059406040614062406340644065406640674068406940704071407240734074407540764077407840794080408140824083408440854086408740884089409040914092409340944095409640974098409941004101410241034104410541064107410841094110411141124113411441154116411741184119412041214122412341244125412641274128412941304131413241334134413541364137413841394140414141424143414441454146414741484149415041514152415341544155415641574158415941604161416241634164416541664167416841694170417141724173417441754176417741784179418041814182418341844185418641874188418941904191419241934194419541964197419841994200420142024203420442054206420742084209421042114212421342144215421642174218421942204221422242234224422542264227422842294230423142324233423442354236423742384239424042414242424342444245424642474248424942504251425242534254425542564257425842594260426142624263426442654266426742684269427042714272427342744275427642774278427942804281428242834284428542864287428842894290429142924293429442954296429742984299430043014302430343044305430643074308430943104311431243134314431543164317431843194320432143224323432443254326432743284329433043314332433343344335433643374338433943404341434243434344434543464347434843494350435143524353435443554356435743584359436043614362436343644365436643674368436943704371437243734374437543764377437843794380438143824383438443854386438743884389439043914392439343944395439643974398439944004401440244034404440544064407440844094410441144124413441444154416441744184419442044214422442344244425442644274428442944304431443244334434443544364437443844394440444144424443444444454446444744484449445044514452445344544455445644574458445944604461446244634464446544664467446844694470447144724473447444754476447744784479448044814482448344844485448644874488448944904491449244934494449544964497449844994500450145024503450445054506450745084509451045114512451345144515451645174518451945204521452245234524452545264527452845294530453145324533453445354536453745384539454045414542454345444545454645474548454945504551455245534554455545564557455845594560456145624563456445654566456745684569457045714572457345744575457645774578457945804581458245834584458545864587458845894590459145924593459445954596459745984599460046014602460346044605460646074608460946104611461246134614461546164617461846194620462146224623462446254626462746284629463046314632463346344635463646374638463946404641464246434644464546464647464846494650465146524653465446554656465746584659466046614662466346644665466646674668466946704671467246734674467546764677467846794680468146824683468446854686468746884689469046914692469346944695469646974698469947004701470247034704470547064707470847094710471147124713471447154716471747184719472047214722472347244725472647274728472947304731473247334734473547364737473847394740474147424743474447454746474747484749475047514752475347544755475647574758475947604761476247634764476547664767476847694770477147724773477447754776477747784779478047814782478347844785478647874788478947904791479247934794479547964797479847994800480148024803480448054806480748084809481048114812481348144815481648174818481948204821482248234824482548264827482848294830483148324833483448354836483748384839484048414842484348444845484648474848484948504851485248534854485548564857485848594860486148624863486448654866486748684869487048714872487348744875487648774878487948804881488248834884488548864887488848894890489148924893489448954896489748984899490049014902490349044905490649074908490949104911491249134914491549164917491849194920492149224923492449254926492749284929493049314932493349344935493649374938493949404941494249434944494549464947494849494950495149524953495449554956495749584959496049614962496349644965496649674968496949704971497249734974497549764977497849794980498149824983498449854986498749884989499049914992499349944995499649974998499950005001500250035004500550065007500850095010501150125013501450155016501750185019502050215022502350245025502650275028502950305031503250335034503550365037503850395040504150425043504450455046504750485049505050515052505350545055505650575058505950605061506250635064506550665067506850695070507150725073507450755076507750785079508050815082508350845085508650875088508950905091509250935094509550965097509850995100510151025103510451055106510751085109511051115112511351145115511651175118511951205121512251235124512551265127512851295130513151325133513451355136513751385139514051415142514351445145514651475148514951505151515251535154515551565157515851595160516151625163516451655166516751685169517051715172517351745175517651775178517951805181518251835184518551865187518851895190519151925193519451955196519751985199520052015202520352045205520652075208520952105211521252135214521552165217521852195220522152225223522452255226522752285229523052315232523352345235523652375238523952405241524252435244524552465247524852495250525152525253525452555256525752585259526052615262526352645265526652675268526952705271527252735274527552765277527852795280528152825283528452855286528752885289529052915292529352945295529652975298529953005301530253035304530553065307530853095310531153125313531453155316531753185319532053215322532353245325532653275328532953305331533253335334533553365337533853395340534153425343534453455346534753485349535053515352535353545355535653575358535953605361536253635364536553665367536853695370537153725373537453755376537753785379538053815382538353845385538653875388538953905391539253935394539553965397539853995400540154025403540454055406540754085409541054115412541354145415541654175418541954205421542254235424542554265427542854295430543154325433543454355436543754385439544054415442544354445445544654475448544954505451545254535454545554565457545854595460546154625463546454655466546754685469547054715472547354745475547654775478547954805481548254835484548554865487548854895490549154925493549454955496549754985499550055015502550355045505550655075508550955105511551255135514551555165517551855195520552155225523552455255526552755285529553055315532553355345535553655375538553955405541554255435544554555465547554855495550555155525553555455555556555755585559556055615562556355645565556655675568556955705571557255735574557555765577557855795580558155825583558455855586558755885589559055915592559355945595559655975598559956005601560256035604560556065607560856095610561156125613561456155616561756185619562056215622562356245625562656275628562956305631563256335634563556365637563856395640564156425643564456455646564756485649565056515652565356545655565656575658565956605661566256635664566556665667566856695670567156725673567456755676567756785679568056815682568356845685568656875688568956905691569256935694569556965697569856995700570157025703570457055706570757085709571057115712571357145715571657175718571957205721572257235724572557265727572857295730573157325733573457355736573757385739574057415742574357445745574657475748574957505751575257535754575557565757575857595760576157625763576457655766576757685769577057715772577357745775577657775778577957805781578257835784578557865787578857895790579157925793579457955796579757985799580058015802580358045805580658075808580958105811581258135814581558165817581858195820582158225823582458255826582758285829583058315832583358345835583658375838583958405841584258435844584558465847584858495850585158525853585458555856585758585859586058615862586358645865586658675868586958705871587258735874587558765877587858795880588158825883588458855886588758885889589058915892589358945895589658975898589959005901590259035904590559065907590859095910591159125913591459155916591759185919592059215922592359245925592659275928592959305931593259335934593559365937593859395940594159425943594459455946594759485949595059515952595359545955595659575958595959605961596259635964596559665967596859695970597159725973597459755976597759785979598059815982598359845985598659875988598959905991599259935994599559965997599859996000600160026003600460056006600760086009601060116012601360146015601660176018601960206021602260236024602560266027602860296030603160326033603460356036603760386039604060416042604360446045604660476048604960506051605260536054605560566057605860596060606160626063606460656066606760686069607060716072607360746075607660776078607960806081608260836084608560866087608860896090609160926093609460956096609760986099610061016102610361046105610661076108610961106111611261136114611561166117611861196120612161226123612461256126612761286129613061316132613361346135613661376138613961406141614261436144614561466147614861496150615161526153615461556156615761586159616061616162616361646165616661676168616961706171617261736174617561766177617861796180618161826183618461856186618761886189619061916192619361946195619661976198619962006201620262036204620562066207620862096210621162126213621462156216621762186219622062216222622362246225622662276228622962306231623262336234623562366237623862396240624162426243624462456246624762486249625062516252625362546255625662576258625962606261626262636264626562666267626862696270627162726273627462756276627762786279628062816282628362846285628662876288628962906291629262936294629562966297629862996300630163026303630463056306630763086309631063116312631363146315631663176318631963206321632263236324632563266327632863296330633163326333633463356336633763386339634063416342634363446345634663476348634963506351635263536354635563566357635863596360636163626363636463656366636763686369637063716372637363746375637663776378637963806381638263836384638563866387638863896390639163926393639463956396639763986399640064016402640364046405640664076408640964106411641264136414641564166417641864196420642164226423642464256426642764286429643064316432643364346435643664376438643964406441644264436444644564466447644864496450645164526453645464556456645764586459646064616462646364646465646664676468646964706471647264736474647564766477647864796480648164826483648464856486648764886489649064916492649364946495649664976498649965006501650265036504650565066507650865096510651165126513651465156516651765186519652065216522652365246525652665276528652965306531653265336534653565366537653865396540654165426543654465456546654765486549655065516552655365546555655665576558655965606561656265636564656565666567656865696570657165726573657465756576657765786579658065816582658365846585658665876588658965906591659265936594659565966597659865996600660166026603660466056606660766086609661066116612661366146615661666176618661966206621662266236624662566266627662866296630663166326633663466356636663766386639664066416642664366446645664666476648664966506651665266536654665566566657665866596660666166626663666466656666666766686669667066716672667366746675667666776678667966806681668266836684668566866687668866896690669166926693669466956696669766986699670067016702670367046705670667076708670967106711671267136714671567166717671867196720672167226723672467256726672767286729673067316732673367346735673667376738673967406741674267436744674567466747674867496750675167526753675467556756675767586759676067616762676367646765676667676768676967706771677267736774677567766777677867796780678167826783678467856786678767886789679067916792679367946795679667976798679968006801680268036804680568066807680868096810681168126813681468156816681768186819682068216822682368246825682668276828682968306831683268336834683568366837683868396840684168426843684468456846684768486849685068516852685368546855685668576858685968606861686268636864686568666867686868696870687168726873687468756876687768786879688068816882688368846885688668876888688968906891689268936894689568966897689868996900690169026903690469056906690769086909691069116912691369146915691669176918691969206921692269236924692569266927692869296930693169326933693469356936693769386939694069416942694369446945694669476948694969506951695269536954695569566957695869596960696169626963696469656966696769686969697069716972697369746975697669776978697969806981698269836984698569866987698869896990699169926993699469956996699769986999700070017002700370047005700670077008700970107011701270137014701570167017701870197020702170227023702470257026702770287029703070317032703370347035703670377038703970407041704270437044704570467047704870497050705170527053705470557056705770587059706070617062706370647065706670677068706970707071707270737074707570767077707870797080708170827083708470857086708770887089709070917092709370947095709670977098709971007101710271037104710571067107710871097110711171127113711471157116711771187119712071217122712371247125712671277128712971307131713271337134713571367137713871397140714171427143714471457146714771487149715071517152715371547155715671577158715971607161716271637164716571667167716871697170717171727173717471757176717771787179718071817182718371847185718671877188718971907191719271937194719571967197719871997200720172027203720472057206720772087209721072117212721372147215721672177218721972207221722272237224722572267227722872297230723172327233723472357236723772387239724072417242724372447245724672477248724972507251725272537254725572567257725872597260726172627263726472657266726772687269727072717272727372747275727672777278727972807281728272837284728572867287728872897290729172927293729472957296729772987299730073017302730373047305730673077308730973107311731273137314731573167317731873197320732173227323732473257326732773287329733073317332733373347335733673377338733973407341734273437344734573467347734873497350735173527353735473557356735773587359736073617362736373647365736673677368736973707371737273737374737573767377737873797380738173827383738473857386738773887389739073917392739373947395739673977398739974007401740274037404740574067407740874097410741174127413741474157416741774187419742074217422742374247425742674277428742974307431743274337434743574367437743874397440744174427443744474457446744774487449745074517452745374547455745674577458745974607461746274637464746574667467746874697470747174727473747474757476747774787479748074817482748374847485748674877488748974907491749274937494749574967497749874997500750175027503750475057506750775087509751075117512751375147515751675177518751975207521752275237524752575267527752875297530753175327533753475357536753775387539754075417542754375447545754675477548754975507551755275537554755575567557755875597560756175627563756475657566756775687569757075717572757375747575757675777578757975807581758275837584758575867587758875897590759175927593759475957596759775987599760076017602760376047605760676077608760976107611761276137614761576167617761876197620762176227623762476257626762776287629763076317632763376347635763676377638763976407641764276437644764576467647764876497650765176527653765476557656765776587659766076617662766376647665766676677668766976707671767276737674767576767677767876797680768176827683768476857686768776887689769076917692769376947695769676977698769977007701770277037704770577067707770877097710771177127713771477157716771777187719772077217722772377247725772677277728772977307731773277337734773577367737773877397740774177427743774477457746774777487749775077517752775377547755775677577758775977607761776277637764776577667767776877697770777177727773777477757776777777787779778077817782778377847785778677877788778977907791779277937794779577967797779877997800780178027803780478057806780778087809781078117812781378147815781678177818781978207821782278237824782578267827782878297830783178327833783478357836783778387839784078417842784378447845784678477848784978507851785278537854785578567857785878597860786178627863786478657866786778687869787078717872787378747875787678777878787978807881788278837884788578867887788878897890789178927893789478957896789778987899790079017902790379047905790679077908790979107911791279137914791579167917791879197920792179227923792479257926792779287929793079317932793379347935793679377938793979407941794279437944794579467947794879497950795179527953795479557956795779587959796079617962796379647965796679677968796979707971797279737974797579767977797879797980798179827983798479857986798779887989799079917992799379947995799679977998799980008001800280038004800580068007800880098010801180128013801480158016801780188019802080218022802380248025802680278028802980308031803280338034803580368037803880398040804180428043804480458046804780488049805080518052805380548055805680578058805980608061806280638064806580668067806880698070807180728073807480758076807780788079808080818082808380848085808680878088808980908091809280938094809580968097809880998100810181028103810481058106810781088109811081118112811381148115811681178118811981208121812281238124812581268127812881298130813181328133813481358136813781388139814081418142814381448145814681478148814981508151815281538154815581568157815881598160816181628163816481658166816781688169817081718172817381748175817681778178817981808181818281838184818581868187818881898190819181928193819481958196819781988199820082018202820382048205820682078208820982108211821282138214821582168217821882198220822182228223822482258226822782288229823082318232823382348235823682378238823982408241824282438244824582468247824882498250825182528253825482558256825782588259826082618262826382648265826682678268826982708271827282738274827582768277827882798280828182828283828482858286828782888289829082918292829382948295829682978298829983008301830283038304830583068307830883098310831183128313831483158316831783188319832083218322832383248325832683278328832983308331833283338334833583368337833883398340834183428343834483458346834783488349835083518352835383548355835683578358835983608361836283638364836583668367836883698370837183728373837483758376837783788379838083818382838383848385838683878388838983908391839283938394839583968397839883998400840184028403840484058406840784088409841084118412841384148415841684178418841984208421842284238424842584268427842884298430843184328433843484358436843784388439844084418442844384448445844684478448844984508451845284538454845584568457845884598460846184628463846484658466846784688469847084718472847384748475847684778478847984808481848284838484848584868487848884898490849184928493849484958496849784988499850085018502850385048505850685078508850985108511851285138514851585168517851885198520852185228523852485258526852785288529853085318532853385348535853685378538853985408541854285438544854585468547854885498550855185528553855485558556855785588559856085618562856385648565856685678568856985708571857285738574857585768577857885798580858185828583858485858586858785888589859085918592859385948595859685978598859986008601860286038604860586068607860886098610861186128613861486158616861786188619862086218622862386248625862686278628862986308631863286338634863586368637863886398640864186428643864486458646864786488649865086518652865386548655865686578658865986608661866286638664866586668667866886698670867186728673867486758676867786788679868086818682868386848685868686878688868986908691869286938694869586968697869886998700870187028703870487058706870787088709871087118712871387148715871687178718871987208721872287238724872587268727872887298730873187328733873487358736873787388739874087418742874387448745874687478748874987508751875287538754875587568757875887598760876187628763876487658766876787688769877087718772877387748775877687778778877987808781878287838784878587868787878887898790879187928793879487958796879787988799880088018802880388048805880688078808880988108811881288138814881588168817881888198820882188228823882488258826882788288829883088318832883388348835883688378838883988408841884288438844884588468847884888498850885188528853885488558856885788588859886088618862886388648865886688678868886988708871887288738874887588768877887888798880888188828883888488858886888788888889889088918892889388948895889688978898889989008901890289038904890589068907890889098910891189128913891489158916891789188919892089218922892389248925892689278928892989308931893289338934893589368937893889398940894189428943894489458946894789488949895089518952895389548955895689578958895989608961896289638964896589668967896889698970897189728973897489758976897789788979898089818982898389848985898689878988898989908991899289938994899589968997899889999000900190029003900490059006900790089009901090119012901390149015901690179018901990209021902290239024902590269027902890299030903190329033903490359036903790389039904090419042904390449045904690479048904990509051905290539054905590569057905890599060906190629063906490659066906790689069907090719072907390749075907690779078907990809081908290839084908590869087908890899090909190929093909490959096909790989099910091019102910391049105910691079108910991109111911291139114911591169117911891199120912191229123912491259126912791289129913091319132913391349135913691379138913991409141914291439144914591469147914891499150915191529153915491559156915791589159916091619162916391649165916691679168916991709171917291739174917591769177917891799180918191829183918491859186918791889189919091919192919391949195919691979198919992009201920292039204920592069207920892099210921192129213921492159216921792189219922092219222922392249225922692279228922992309231923292339234923592369237923892399240924192429243924492459246924792489249925092519252925392549255925692579258925992609261926292639264926592669267926892699270927192729273927492759276927792789279928092819282928392849285928692879288928992909291929292939294929592969297929892999300930193029303930493059306930793089309931093119312931393149315931693179318931993209321932293239324932593269327932893299330933193329333933493359336933793389339934093419342934393449345934693479348934993509351935293539354935593569357935893599360936193629363936493659366936793689369937093719372937393749375937693779378937993809381938293839384938593869387938893899390939193929393939493959396939793989399940094019402940394049405940694079408940994109411941294139414941594169417941894199420942194229423942494259426942794289429943094319432943394349435943694379438943994409441944294439444944594469447944894499450945194529453945494559456945794589459946094619462946394649465946694679468946994709471947294739474947594769477947894799480
  1. /**************************************************************************/
  2. /* rendering_device_d3d12.cpp */
  3. /**************************************************************************/
  4. /* This file is part of: */
  5. /* GODOT ENGINE */
  6. /* https://godotengine.org */
  7. /**************************************************************************/
  8. /* Copyright (c) 2014-present Godot Engine contributors (see AUTHORS.md). */
  9. /* Copyright (c) 2007-2014 Juan Linietsky, Ariel Manzur. */
  10. /* */
  11. /* Permission is hereby granted, free of charge, to any person obtaining */
  12. /* a copy of this software and associated documentation files (the */
  13. /* "Software"), to deal in the Software without restriction, including */
  14. /* without limitation the rights to use, copy, modify, merge, publish, */
  15. /* distribute, sublicense, and/or sell copies of the Software, and to */
  16. /* permit persons to whom the Software is furnished to do so, subject to */
  17. /* the following conditions: */
  18. /* */
  19. /* The above copyright notice and this permission notice shall be */
  20. /* included in all copies or substantial portions of the Software. */
  21. /* */
  22. /* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, */
  23. /* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF */
  24. /* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. */
  25. /* IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY */
  26. /* CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, */
  27. /* TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE */
  28. /* SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. */
  29. /**************************************************************************/
  30. #include "rendering_device_d3d12.h"
  31. #include "core/config/project_settings.h"
  32. #include "core/io/compression.h"
  33. #include "core/io/file_access.h"
  34. #include "core/io/marshalls.h"
  35. #include "core/object/worker_thread_pool.h"
  36. #include "core/os/os.h"
  37. #include "core/templates/hashfuncs.h"
  38. #include "d3d12_godot_nir_bridge.h"
  39. #include "modules/regex/regex.h"
  40. #include "thirdparty/zlib/zlib.h"
  41. #ifdef DEV_ENABLED
  42. #include "core/crypto/hashing_context.h"
  43. #endif
  44. // No point in fighting warnings in Mesa.
  45. #pragma warning(push)
  46. #pragma warning(disable : 4200) // "nonstandard extension used: zero-sized array in struct/union".
  47. #pragma warning(disable : 4806) // "'&': unsafe operation: no value of type 'bool' promoted to type 'uint32_t' can equal the given constant".
  48. #include "dxil_validator.h"
  49. #include "nir_spirv.h"
  50. #include "nir_to_dxil.h"
  51. #include "spirv_to_dxil.h"
  52. extern "C" {
  53. #include "dxil_spirv_nir.h"
  54. }
  55. #pragma warning(pop)
  56. #define ALIGN(m_number, m_alignment) ((((m_number) + ((m_alignment)-1)) / (m_alignment)) * (m_alignment))
  57. #ifdef USE_SMALL_ALLOCS_POOL
  58. static const uint32_t SMALL_ALLOCATION_MAX_SIZE = 4096;
  59. #endif
  60. static const D3D12_RANGE VOID_RANGE = {};
  61. static const uint32_t MAX_VULKAN_SETS = 16;
  62. static const uint32_t ROOT_CONSTANT_SPACE = MAX_VULKAN_SETS + 1;
  63. static const uint32_t ROOT_CONSTANT_REGISTER = 0;
  64. static const uint32_t RUNTIME_DATA_SPACE = MAX_VULKAN_SETS + 2;
  65. static const uint32_t RUNTIME_DATA_REGISTER = 0;
  66. static const uint32_t MAX_IMAGE_FORMAT_PLANES = 2;
  67. #ifdef DEV_ENABLED
  68. //#define DEBUG_COUNT_BARRIERS
  69. #endif
  70. RenderingDeviceD3D12::Buffer *RenderingDeviceD3D12::_get_buffer_from_owner(RID p_buffer) {
  71. Buffer *buffer = nullptr;
  72. if (vertex_buffer_owner.owns(p_buffer)) {
  73. buffer = vertex_buffer_owner.get_or_null(p_buffer);
  74. } else if (index_buffer_owner.owns(p_buffer)) {
  75. buffer = index_buffer_owner.get_or_null(p_buffer);
  76. } else if (uniform_buffer_owner.owns(p_buffer)) {
  77. buffer = uniform_buffer_owner.get_or_null(p_buffer);
  78. } else if (texture_buffer_owner.owns(p_buffer)) {
  79. buffer = &texture_buffer_owner.get_or_null(p_buffer)->buffer;
  80. } else if (storage_buffer_owner.owns(p_buffer)) {
  81. buffer = storage_buffer_owner.get_or_null(p_buffer);
  82. }
  83. return buffer;
  84. }
  85. void RenderingDeviceD3D12::_add_dependency(RID p_id, RID p_depends_on) {
  86. if (!dependency_map.has(p_depends_on)) {
  87. dependency_map[p_depends_on] = HashSet<RID>();
  88. }
  89. dependency_map[p_depends_on].insert(p_id);
  90. if (!reverse_dependency_map.has(p_id)) {
  91. reverse_dependency_map[p_id] = HashSet<RID>();
  92. }
  93. reverse_dependency_map[p_id].insert(p_depends_on);
  94. }
  95. void RenderingDeviceD3D12::_free_dependencies(RID p_id) {
  96. // Direct dependencies must be freed.
  97. HashMap<RID, HashSet<RID>>::Iterator E = dependency_map.find(p_id);
  98. if (E) {
  99. while (E->value.size()) {
  100. free(*E->value.begin());
  101. }
  102. dependency_map.remove(E);
  103. }
  104. // Reverse dependencies must be unreferenced.
  105. E = reverse_dependency_map.find(p_id);
  106. if (E) {
  107. for (const RID &F : E->value) {
  108. HashMap<RID, HashSet<RID>>::Iterator G = dependency_map.find(F);
  109. ERR_CONTINUE(!G);
  110. ERR_CONTINUE(!G->value.has(p_id));
  111. G->value.erase(p_id);
  112. }
  113. reverse_dependency_map.remove(E);
  114. }
  115. }
  116. // NOTE: RD's packed format names are reversed in relation to DXGI's; e.g.:.
  117. // - DATA_FORMAT_A8B8G8R8_UNORM_PACK32 -> DXGI_FORMAT_R8G8B8A8_UNORM (packed; note ABGR vs. RGBA).
  118. // - DATA_FORMAT_B8G8R8A8_UNORM -> DXGI_FORMAT_B8G8R8A8_UNORM (not packed; note BGRA order matches).
  119. // TODO: Add YUV formats properly, which would require better support for planes in the RD API.
  120. const RenderingDeviceD3D12::D3D12Format RenderingDeviceD3D12::d3d12_formats[RenderingDevice::DATA_FORMAT_MAX] = {
  121. /* DATA_FORMAT_R4G4_UNORM_PACK8 */ {},
  122. /* DATA_FORMAT_R4G4B4A4_UNORM_PACK16 */ { DXGI_FORMAT_B4G4R4A4_UNORM, DXGI_FORMAT_B4G4R4A4_UNORM, D3D12_ENCODE_SHADER_4_COMPONENT_MAPPING(1, 2, 3, 0) },
  123. /* DATA_FORMAT_B4G4R4A4_UNORM_PACK16 */ { DXGI_FORMAT_B4G4R4A4_UNORM, DXGI_FORMAT_B4G4R4A4_UNORM, D3D12_ENCODE_SHADER_4_COMPONENT_MAPPING(3, 2, 1, 0) },
  124. /* DATA_FORMAT_R5G6B5_UNORM_PACK16 */ { DXGI_FORMAT_B5G6R5_UNORM, DXGI_FORMAT_B5G6R5_UNORM },
  125. /* DATA_FORMAT_B5G6R5_UNORM_PACK16 */ { DXGI_FORMAT_B5G6R5_UNORM, DXGI_FORMAT_B5G6R5_UNORM, D3D12_ENCODE_SHADER_4_COMPONENT_MAPPING(2, 1, 0, 3) },
  126. /* DATA_FORMAT_R5G5B5A1_UNORM_PACK16 */ { DXGI_FORMAT_B5G6R5_UNORM, DXGI_FORMAT_B5G5R5A1_UNORM, D3D12_ENCODE_SHADER_4_COMPONENT_MAPPING(1, 2, 3, 0) },
  127. /* DATA_FORMAT_B5G5R5A1_UNORM_PACK16 */ { DXGI_FORMAT_B5G6R5_UNORM, DXGI_FORMAT_B5G5R5A1_UNORM, D3D12_ENCODE_SHADER_4_COMPONENT_MAPPING(3, 2, 1, 0) },
  128. /* DATA_FORMAT_A1R5G5B5_UNORM_PACK16 */ { DXGI_FORMAT_B5G6R5_UNORM, DXGI_FORMAT_B5G5R5A1_UNORM },
  129. /* DATA_FORMAT_R8_UNORM */ { DXGI_FORMAT_R8_TYPELESS, DXGI_FORMAT_R8_UNORM },
  130. /* DATA_FORMAT_R8_SNORM */ { DXGI_FORMAT_R8_TYPELESS, DXGI_FORMAT_R8_SNORM },
  131. /* DATA_FORMAT_R8_USCALED */ { DXGI_FORMAT_R8_TYPELESS, DXGI_FORMAT_R8_UINT },
  132. /* DATA_FORMAT_R8_SSCALED */ { DXGI_FORMAT_R8_TYPELESS, DXGI_FORMAT_R8_SINT },
  133. /* DATA_FORMAT_R8_UINT */ { DXGI_FORMAT_R8_TYPELESS, DXGI_FORMAT_R8_UINT },
  134. /* DATA_FORMAT_R8_SINT */ { DXGI_FORMAT_R8_TYPELESS, DXGI_FORMAT_R8_SINT },
  135. /* DATA_FORMAT_R8_SRGB */ {},
  136. /* DATA_FORMAT_R8G8_UNORM */ { DXGI_FORMAT_R8G8_TYPELESS, DXGI_FORMAT_R8G8_UNORM },
  137. /* DATA_FORMAT_R8G8_SNORM */ { DXGI_FORMAT_R8G8_TYPELESS, DXGI_FORMAT_R8G8_SNORM },
  138. /* DATA_FORMAT_R8G8_USCALED */ { DXGI_FORMAT_R8G8_TYPELESS, DXGI_FORMAT_R8G8_UINT },
  139. /* DATA_FORMAT_R8G8_SSCALED */ { DXGI_FORMAT_R8G8_TYPELESS, DXGI_FORMAT_R8G8_SINT },
  140. /* DATA_FORMAT_R8G8_UINT */ { DXGI_FORMAT_R8G8_TYPELESS, DXGI_FORMAT_R8G8_UINT },
  141. /* DATA_FORMAT_R8G8_SINT */ { DXGI_FORMAT_R8G8_TYPELESS, DXGI_FORMAT_R8G8_SINT },
  142. /* DATA_FORMAT_R8G8_SRGB */ {},
  143. /* DATA_FORMAT_R8G8B8_UNORM */ {},
  144. /* DATA_FORMAT_R8G8B8_SNORM */ {},
  145. /* DATA_FORMAT_R8G8B8_USCALED */ {},
  146. /* DATA_FORMAT_R8G8B8_SSCALED */ {},
  147. /* DATA_FORMAT_R8G8B8_UINT */ {},
  148. /* DATA_FORMAT_R8G8B8_SINT */ {},
  149. /* DATA_FORMAT_R8G8B8_SRGB */ {},
  150. /* DATA_FORMAT_B8G8R8_UNORM */ {},
  151. /* DATA_FORMAT_B8G8R8_SNORM */ {},
  152. /* DATA_FORMAT_B8G8R8_USCALED */ {},
  153. /* DATA_FORMAT_B8G8R8_SSCALED */ {},
  154. /* DATA_FORMAT_B8G8R8_UINT */ {},
  155. /* DATA_FORMAT_B8G8R8_SINT */ {},
  156. /* DATA_FORMAT_B8G8R8_SRGB */ {},
  157. /* DATA_FORMAT_R8G8B8A8_UNORM */ { DXGI_FORMAT_R8G8B8A8_TYPELESS, DXGI_FORMAT_R8G8B8A8_UNORM },
  158. /* DATA_FORMAT_R8G8B8A8_SNORM */ { DXGI_FORMAT_R8G8B8A8_TYPELESS, DXGI_FORMAT_R8G8B8A8_SNORM },
  159. /* DATA_FORMAT_R8G8B8A8_USCALED */ { DXGI_FORMAT_R8G8B8A8_TYPELESS, DXGI_FORMAT_R8G8B8A8_UINT },
  160. /* DATA_FORMAT_R8G8B8A8_SSCALED */ { DXGI_FORMAT_R8G8B8A8_TYPELESS, DXGI_FORMAT_R8G8B8A8_SINT },
  161. /* DATA_FORMAT_R8G8B8A8_UINT */ { DXGI_FORMAT_R8G8B8A8_TYPELESS, DXGI_FORMAT_R8G8B8A8_UINT },
  162. /* DATA_FORMAT_R8G8B8A8_SINT */ { DXGI_FORMAT_R8G8B8A8_TYPELESS, DXGI_FORMAT_R8G8B8A8_SINT },
  163. /* DATA_FORMAT_R8G8B8A8_SRGB */ { DXGI_FORMAT_R8G8B8A8_TYPELESS, DXGI_FORMAT_R8G8B8A8_UNORM_SRGB },
  164. /* DATA_FORMAT_B8G8R8A8_UNORM */ { DXGI_FORMAT_B8G8R8A8_TYPELESS, DXGI_FORMAT_B8G8R8A8_UNORM },
  165. /* DATA_FORMAT_B8G8R8A8_SNORM */ { DXGI_FORMAT_R8G8B8A8_TYPELESS, DXGI_FORMAT_R8G8B8A8_SNORM },
  166. /* DATA_FORMAT_B8G8R8A8_USCALED */ { DXGI_FORMAT_R8G8B8A8_TYPELESS, DXGI_FORMAT_R8G8B8A8_UINT },
  167. /* DATA_FORMAT_B8G8R8A8_SSCALED */ { DXGI_FORMAT_R8G8B8A8_TYPELESS, DXGI_FORMAT_R8G8B8A8_SINT },
  168. /* DATA_FORMAT_B8G8R8A8_UINT */ { DXGI_FORMAT_R8G8B8A8_TYPELESS, DXGI_FORMAT_R8G8B8A8_UINT },
  169. /* DATA_FORMAT_B8G8R8A8_SINT */ { DXGI_FORMAT_R8G8B8A8_TYPELESS, DXGI_FORMAT_R8G8B8A8_SINT },
  170. /* DATA_FORMAT_B8G8R8A8_SRGB */ { DXGI_FORMAT_B8G8R8A8_TYPELESS, DXGI_FORMAT_R8G8B8A8_UNORM_SRGB },
  171. /* DATA_FORMAT_A8B8G8R8_UNORM_PACK32 */ { DXGI_FORMAT_R8G8B8A8_TYPELESS, DXGI_FORMAT_R8G8B8A8_UNORM },
  172. /* DATA_FORMAT_A8B8G8R8_SNORM_PACK32 */ { DXGI_FORMAT_R8G8B8A8_TYPELESS, DXGI_FORMAT_R8G8B8A8_SNORM },
  173. /* DATA_FORMAT_A8B8G8R8_USCALED_PACK32 */ { DXGI_FORMAT_R8G8B8A8_TYPELESS, DXGI_FORMAT_R8G8B8A8_UINT },
  174. /* DATA_FORMAT_A8B8G8R8_SSCALED_PACK32 */ { DXGI_FORMAT_R8G8B8A8_TYPELESS, DXGI_FORMAT_R8G8B8A8_SINT },
  175. /* DATA_FORMAT_A8B8G8R8_UINT_PACK32 */ { DXGI_FORMAT_R8G8B8A8_TYPELESS, DXGI_FORMAT_R8G8B8A8_UINT },
  176. /* DATA_FORMAT_A8B8G8R8_SINT_PACK32 */ { DXGI_FORMAT_R8G8B8A8_TYPELESS, DXGI_FORMAT_R8G8B8A8_SINT },
  177. /* DATA_FORMAT_A8B8G8R8_SRGB_PACK32 */ { DXGI_FORMAT_B8G8R8A8_TYPELESS, DXGI_FORMAT_B8G8R8A8_UNORM_SRGB },
  178. /* DATA_FORMAT_A2R10G10B10_UNORM_PACK32 */ { DXGI_FORMAT_R10G10B10A2_TYPELESS, DXGI_FORMAT_R10G10B10A2_UNORM, D3D12_ENCODE_SHADER_4_COMPONENT_MAPPING(2, 1, 0, 3) },
  179. /* DATA_FORMAT_A2R10G10B10_SNORM_PACK32 */ {},
  180. /* DATA_FORMAT_A2R10G10B10_USCALED_PACK32 */ { DXGI_FORMAT_R10G10B10A2_TYPELESS, DXGI_FORMAT_R10G10B10A2_UINT, D3D12_ENCODE_SHADER_4_COMPONENT_MAPPING(2, 1, 0, 3) },
  181. /* DATA_FORMAT_A2R10G10B10_SSCALED_PACK32 */ {},
  182. /* DATA_FORMAT_A2R10G10B10_UINT_PACK32 */ { DXGI_FORMAT_R10G10B10A2_TYPELESS, DXGI_FORMAT_R10G10B10A2_UINT, D3D12_ENCODE_SHADER_4_COMPONENT_MAPPING(2, 1, 0, 3) },
  183. /* DATA_FORMAT_A2R10G10B10_SINT_PACK32 */ {},
  184. /* DATA_FORMAT_A2B10G10R10_UNORM_PACK32 */ { DXGI_FORMAT_R10G10B10A2_TYPELESS, DXGI_FORMAT_R10G10B10A2_UNORM },
  185. /* DATA_FORMAT_A2B10G10R10_SNORM_PACK32 */ {},
  186. /* DATA_FORMAT_A2B10G10R10_USCALED_PACK32 */ { DXGI_FORMAT_R10G10B10A2_TYPELESS, DXGI_FORMAT_R10G10B10A2_UINT },
  187. /* DATA_FORMAT_A2B10G10R10_SSCALED_PACK32 */ {},
  188. /* DATA_FORMAT_A2B10G10R10_UINT_PACK32 */ { DXGI_FORMAT_R10G10B10A2_TYPELESS, DXGI_FORMAT_R10G10B10A2_UINT },
  189. /* DATA_FORMAT_A2B10G10R10_SINT_PACK32 */ {},
  190. /* DATA_FORMAT_R16_UNORM */ { DXGI_FORMAT_R16_TYPELESS, DXGI_FORMAT_R16_UNORM },
  191. /* DATA_FORMAT_R16_SNORM */ { DXGI_FORMAT_R16_TYPELESS, DXGI_FORMAT_R16_SNORM },
  192. /* DATA_FORMAT_R16_USCALED */ { DXGI_FORMAT_R16_TYPELESS, DXGI_FORMAT_R16_UINT },
  193. /* DATA_FORMAT_R16_SSCALED */ { DXGI_FORMAT_R16_TYPELESS, DXGI_FORMAT_R16_SINT },
  194. /* DATA_FORMAT_R16_UINT */ { DXGI_FORMAT_R16_TYPELESS, DXGI_FORMAT_R16_UINT },
  195. /* DATA_FORMAT_R16_SINT */ { DXGI_FORMAT_R16_TYPELESS, DXGI_FORMAT_R16_SINT },
  196. /* DATA_FORMAT_R16_SFLOAT */ { DXGI_FORMAT_R16_TYPELESS, DXGI_FORMAT_R16_FLOAT },
  197. /* DATA_FORMAT_R16G16_UNORM */ { DXGI_FORMAT_R16G16_TYPELESS, DXGI_FORMAT_R16G16_UNORM },
  198. /* DATA_FORMAT_R16G16_SNORM */ { DXGI_FORMAT_R16G16_TYPELESS, DXGI_FORMAT_R16G16_SNORM },
  199. /* DATA_FORMAT_R16G16_USCALED */ { DXGI_FORMAT_R16G16_TYPELESS, DXGI_FORMAT_R16G16_UINT },
  200. /* DATA_FORMAT_R16G16_SSCALED */ { DXGI_FORMAT_R16G16_TYPELESS, DXGI_FORMAT_R16G16_SINT },
  201. /* DATA_FORMAT_R16G16_UINT */ { DXGI_FORMAT_R16G16_TYPELESS, DXGI_FORMAT_R16G16_UINT },
  202. /* DATA_FORMAT_R16G16_SINT */ { DXGI_FORMAT_R16G16_TYPELESS, DXGI_FORMAT_R16G16_SINT },
  203. /* DATA_FORMAT_R16G16_SFLOAT */ { DXGI_FORMAT_R16G16_TYPELESS, DXGI_FORMAT_R16G16_FLOAT },
  204. /* DATA_FORMAT_R16G16B16_UNORM */ {},
  205. /* DATA_FORMAT_R16G16B16_SNORM */ {},
  206. /* DATA_FORMAT_R16G16B16_USCALED */ {},
  207. /* DATA_FORMAT_R16G16B16_SSCALED */ {},
  208. /* DATA_FORMAT_R16G16B16_UINT */ {},
  209. /* DATA_FORMAT_R16G16B16_SINT */ {},
  210. /* DATA_FORMAT_R16G16B16_SFLOAT */ {},
  211. /* DATA_FORMAT_R16G16B16A16_UNORM */ { DXGI_FORMAT_R16G16B16A16_TYPELESS, DXGI_FORMAT_R16G16B16A16_UNORM },
  212. /* DATA_FORMAT_R16G16B16A16_SNORM */ { DXGI_FORMAT_R16G16B16A16_TYPELESS, DXGI_FORMAT_R16G16B16A16_SNORM },
  213. /* DATA_FORMAT_R16G16B16A16_USCALED */ { DXGI_FORMAT_R16G16B16A16_TYPELESS, DXGI_FORMAT_R16G16B16A16_UINT },
  214. /* DATA_FORMAT_R16G16B16A16_SSCALED */ { DXGI_FORMAT_R16G16B16A16_TYPELESS, DXGI_FORMAT_R16G16B16A16_SINT },
  215. /* DATA_FORMAT_R16G16B16A16_UINT */ { DXGI_FORMAT_R16G16B16A16_TYPELESS, DXGI_FORMAT_R16G16B16A16_UINT },
  216. /* DATA_FORMAT_R16G16B16A16_SINT */ { DXGI_FORMAT_R16G16B16A16_TYPELESS, DXGI_FORMAT_R16G16B16A16_SINT },
  217. /* DATA_FORMAT_R16G16B16A16_SFLOAT */ { DXGI_FORMAT_R16G16B16A16_TYPELESS, DXGI_FORMAT_R16G16B16A16_FLOAT },
  218. /* DATA_FORMAT_R32_UINT */ { DXGI_FORMAT_R32_TYPELESS, DXGI_FORMAT_R32_UINT },
  219. /* DATA_FORMAT_R32_SINT */ { DXGI_FORMAT_R32_TYPELESS, DXGI_FORMAT_R32_SINT },
  220. /* DATA_FORMAT_R32_SFLOAT */ { DXGI_FORMAT_R32_TYPELESS, DXGI_FORMAT_R32_FLOAT },
  221. /* DATA_FORMAT_R32G32_UINT */ { DXGI_FORMAT_R32G32_TYPELESS, DXGI_FORMAT_R32G32_UINT },
  222. /* DATA_FORMAT_R32G32_SINT */ { DXGI_FORMAT_R32G32_TYPELESS, DXGI_FORMAT_R32G32_SINT },
  223. /* DATA_FORMAT_R32G32_SFLOAT */ { DXGI_FORMAT_R32G32_TYPELESS, DXGI_FORMAT_R32G32_FLOAT },
  224. /* DATA_FORMAT_R32G32B32_UINT */ { DXGI_FORMAT_R32G32B32_TYPELESS, DXGI_FORMAT_R32G32B32_UINT },
  225. /* DATA_FORMAT_R32G32B32_SINT */ { DXGI_FORMAT_R32G32B32_TYPELESS, DXGI_FORMAT_R32G32B32_SINT },
  226. /* DATA_FORMAT_R32G32B32_SFLOAT */ { DXGI_FORMAT_R32G32B32_TYPELESS, DXGI_FORMAT_R32G32B32_FLOAT },
  227. /* DATA_FORMAT_R32G32B32A32_UINT */ { DXGI_FORMAT_R32G32B32A32_TYPELESS, DXGI_FORMAT_R32G32B32A32_UINT },
  228. /* DATA_FORMAT_R32G32B32A32_SINT */ { DXGI_FORMAT_R32G32B32A32_TYPELESS, DXGI_FORMAT_R32G32B32A32_SINT },
  229. /* DATA_FORMAT_R32G32B32A32_SFLOAT */ { DXGI_FORMAT_R32G32B32A32_TYPELESS, DXGI_FORMAT_R32G32B32A32_FLOAT },
  230. /* DATA_FORMAT_R64_UINT */ {},
  231. /* DATA_FORMAT_R64_SINT */ {},
  232. /* DATA_FORMAT_R64_SFLOAT */ {},
  233. /* DATA_FORMAT_R64G64_UINT */ {},
  234. /* DATA_FORMAT_R64G64_SINT */ {},
  235. /* DATA_FORMAT_R64G64_SFLOAT */ {},
  236. /* DATA_FORMAT_R64G64B64_UINT */ {},
  237. /* DATA_FORMAT_R64G64B64_SINT */ {},
  238. /* DATA_FORMAT_R64G64B64_SFLOAT */ {},
  239. /* DATA_FORMAT_R64G64B64A64_UINT */ {},
  240. /* DATA_FORMAT_R64G64B64A64_SINT */ {},
  241. /* DATA_FORMAT_R64G64B64A64_SFLOAT */ {},
  242. /* DATA_FORMAT_B10G11R11_UFLOAT_PACK32 */ { DXGI_FORMAT_R11G11B10_FLOAT, DXGI_FORMAT_R11G11B10_FLOAT },
  243. /* DATA_FORMAT_E5B9G9R9_UFLOAT_PACK32 */ { DXGI_FORMAT_R9G9B9E5_SHAREDEXP, DXGI_FORMAT_R9G9B9E5_SHAREDEXP },
  244. /* DATA_FORMAT_D16_UNORM */ { DXGI_FORMAT_R16_TYPELESS, DXGI_FORMAT_R16_UNORM, 0, DXGI_FORMAT_D16_UNORM },
  245. /* DATA_FORMAT_X8_D24_UNORM_PACK32 */ { DXGI_FORMAT_R24G8_TYPELESS, DXGI_FORMAT_UNKNOWN, 0, DXGI_FORMAT_D24_UNORM_S8_UINT },
  246. /* DATA_FORMAT_D32_SFLOAT */ { DXGI_FORMAT_R32_TYPELESS, DXGI_FORMAT_R32_FLOAT, D3D12_DEFAULT_SHADER_4_COMPONENT_MAPPING, DXGI_FORMAT_D32_FLOAT },
  247. /* DATA_FORMAT_S8_UINT */ {},
  248. /* DATA_FORMAT_D16_UNORM_S8_UINT */ {},
  249. /* DATA_FORMAT_D24_UNORM_S8_UINT */ { DXGI_FORMAT_R24G8_TYPELESS, DXGI_FORMAT_UNKNOWN, 0, DXGI_FORMAT_D24_UNORM_S8_UINT },
  250. /* DATA_FORMAT_D32_SFLOAT_S8_UINT */ { DXGI_FORMAT_R32G8X24_TYPELESS, DXGI_FORMAT_R32_FLOAT_X8X24_TYPELESS, D3D12_DEFAULT_SHADER_4_COMPONENT_MAPPING, DXGI_FORMAT_D32_FLOAT_S8X24_UINT },
  251. /* DATA_FORMAT_BC1_RGB_UNORM_BLOCK */ { DXGI_FORMAT_BC1_TYPELESS, DXGI_FORMAT_BC1_UNORM, D3D12_ENCODE_SHADER_4_COMPONENT_MAPPING(0, 1, 2, D3D12_SHADER_COMPONENT_MAPPING_FORCE_VALUE_1) },
  252. /* DATA_FORMAT_BC1_RGB_SRGB_BLOCK */ { DXGI_FORMAT_BC1_TYPELESS, DXGI_FORMAT_BC1_UNORM_SRGB, D3D12_ENCODE_SHADER_4_COMPONENT_MAPPING(0, 1, 2, D3D12_SHADER_COMPONENT_MAPPING_FORCE_VALUE_1) },
  253. /* DATA_FORMAT_BC1_RGBA_UNORM_BLOCK */ { DXGI_FORMAT_BC1_TYPELESS, DXGI_FORMAT_BC1_UNORM },
  254. /* DATA_FORMAT_BC1_RGBA_SRGB_BLOCK */ { DXGI_FORMAT_BC1_TYPELESS, DXGI_FORMAT_BC1_UNORM_SRGB },
  255. /* DATA_FORMAT_BC2_UNORM_BLOCK */ { DXGI_FORMAT_BC2_TYPELESS, DXGI_FORMAT_BC2_UNORM },
  256. /* DATA_FORMAT_BC2_SRGB_BLOCK */ { DXGI_FORMAT_BC2_TYPELESS, DXGI_FORMAT_BC2_UNORM_SRGB },
  257. /* DATA_FORMAT_BC3_UNORM_BLOCK */ { DXGI_FORMAT_BC3_TYPELESS, DXGI_FORMAT_BC3_UNORM },
  258. /* DATA_FORMAT_BC3_SRGB_BLOCK */ { DXGI_FORMAT_BC3_TYPELESS, DXGI_FORMAT_BC3_UNORM_SRGB },
  259. /* DATA_FORMAT_BC4_UNORM_BLOCK */ { DXGI_FORMAT_BC4_TYPELESS, DXGI_FORMAT_BC4_UNORM },
  260. /* DATA_FORMAT_BC4_SNORM_BLOCK */ { DXGI_FORMAT_BC4_TYPELESS, DXGI_FORMAT_BC4_SNORM },
  261. /* DATA_FORMAT_BC5_UNORM_BLOCK */ { DXGI_FORMAT_BC5_TYPELESS, DXGI_FORMAT_BC5_UNORM },
  262. /* DATA_FORMAT_BC5_SNORM_BLOCK */ { DXGI_FORMAT_BC5_TYPELESS, DXGI_FORMAT_BC5_SNORM },
  263. /* DATA_FORMAT_BC6H_UFLOAT_BLOCK */ { DXGI_FORMAT_BC6H_TYPELESS, DXGI_FORMAT_BC6H_UF16 },
  264. /* DATA_FORMAT_BC6H_SFLOAT_BLOCK */ { DXGI_FORMAT_BC6H_TYPELESS, DXGI_FORMAT_BC6H_SF16 },
  265. /* DATA_FORMAT_BC7_UNORM_BLOCK */ { DXGI_FORMAT_BC7_TYPELESS, DXGI_FORMAT_BC7_UNORM },
  266. /* DATA_FORMAT_BC7_SRGB_BLOCK */ { DXGI_FORMAT_BC7_TYPELESS, DXGI_FORMAT_BC7_UNORM_SRGB },
  267. /* DATA_FORMAT_ETC2_R8G8B8_UNORM_BLOCK */ {},
  268. /* DATA_FORMAT_ETC2_R8G8B8_SRGB_BLOCK */ {},
  269. /* DATA_FORMAT_ETC2_R8G8B8A1_UNORM_BLOCK */ {},
  270. /* DATA_FORMAT_ETC2_R8G8B8A1_SRGB_BLOCK */ {},
  271. /* DATA_FORMAT_ETC2_R8G8B8A8_UNORM_BLOCK */ {},
  272. /* DATA_FORMAT_ETC2_R8G8B8A8_SRGB_BLOCK */ {},
  273. /* DATA_FORMAT_EAC_R11_UNORM_BLOCK */ {},
  274. /* DATA_FORMAT_EAC_R11_SNORM_BLOCK */ {},
  275. /* DATA_FORMAT_EAC_R11G11_UNORM_BLOCK */ {},
  276. /* DATA_FORMAT_EAC_R11G11_SNORM_BLOCK */ {},
  277. /* DATA_FORMAT_ASTC_4x4_UNORM_BLOCK */ {},
  278. /* DATA_FORMAT_ASTC_4x4_SRGB_BLOCK */ {},
  279. /* DATA_FORMAT_ASTC_5x4_UNORM_BLOCK */ {},
  280. /* DATA_FORMAT_ASTC_5x4_SRGB_BLOCK */ {},
  281. /* DATA_FORMAT_ASTC_5x5_UNORM_BLOCK */ {},
  282. /* DATA_FORMAT_ASTC_5x5_SRGB_BLOCK */ {},
  283. /* DATA_FORMAT_ASTC_6x5_UNORM_BLOCK */ {},
  284. /* DATA_FORMAT_ASTC_6x5_SRGB_BLOCK */ {},
  285. /* DATA_FORMAT_ASTC_6x6_UNORM_BLOCK */ {},
  286. /* DATA_FORMAT_ASTC_6x6_SRGB_BLOCK */ {},
  287. /* DATA_FORMAT_ASTC_8x5_UNORM_BLOCK */ {},
  288. /* DATA_FORMAT_ASTC_8x5_SRGB_BLOCK */ {},
  289. /* DATA_FORMAT_ASTC_8x6_UNORM_BLOCK */ {},
  290. /* DATA_FORMAT_ASTC_8x6_SRGB_BLOCK */ {},
  291. /* DATA_FORMAT_ASTC_8x8_UNORM_BLOCK */ {},
  292. /* DATA_FORMAT_ASTC_8x8_SRGB_BLOCK */ {},
  293. /* DATA_FORMAT_ASTC_10x5_UNORM_BLOCK */ {},
  294. /* DATA_FORMAT_ASTC_10x5_SRGB_BLOCK */ {},
  295. /* DATA_FORMAT_ASTC_10x6_UNORM_BLOCK */ {},
  296. /* DATA_FORMAT_ASTC_10x6_SRGB_BLOCK */ {},
  297. /* DATA_FORMAT_ASTC_10x8_UNORM_BLOCK */ {},
  298. /* DATA_FORMAT_ASTC_10x8_SRGB_BLOCK */ {},
  299. /* DATA_FORMAT_ASTC_10x10_UNORM_BLOCK */ {},
  300. /* DATA_FORMAT_ASTC_10x10_SRGB_BLOCK */ {},
  301. /* DATA_FORMAT_ASTC_12x10_UNORM_BLOCK */ {},
  302. /* DATA_FORMAT_ASTC_12x10_SRGB_BLOCK */ {},
  303. /* DATA_FORMAT_ASTC_12x12_UNORM_BLOCK */ {},
  304. /* DATA_FORMAT_ASTC_12x12_SRGB_BLOCK */ {},
  305. /* DATA_FORMAT_G8B8G8R8_422_UNORM */ {},
  306. /* DATA_FORMAT_B8G8R8G8_422_UNORM */ {},
  307. /* DATA_FORMAT_G8_B8_R8_3PLANE_420_UNORM */ {},
  308. /* DATA_FORMAT_G8_B8R8_2PLANE_420_UNORM */ {},
  309. /* DATA_FORMAT_G8_B8_R8_3PLANE_422_UNORM */ {},
  310. /* DATA_FORMAT_G8_B8R8_2PLANE_422_UNORM */ {},
  311. /* DATA_FORMAT_G8_B8_R8_3PLANE_444_UNORM */ {},
  312. /* DATA_FORMAT_R10X6_UNORM_PACK16 */ {},
  313. /* DATA_FORMAT_R10X6G10X6_UNORM_2PACK16 */ {},
  314. /* DATA_FORMAT_R10X6G10X6B10X6A10X6_UNORM_4PACK16 */ {},
  315. /* DATA_FORMAT_G10X6B10X6G10X6R10X6_422_UNORM_4PACK16 */ {},
  316. /* DATA_FORMAT_B10X6G10X6R10X6G10X6_422_UNORM_4PACK16 */ {},
  317. /* DATA_FORMAT_G10X6_B10X6_R10X6_3PLANE_420_UNORM_3PACK16 */ {},
  318. /* DATA_FORMAT_G10X6_B10X6R10X6_2PLANE_420_UNORM_3PACK16 */ {},
  319. /* DATA_FORMAT_G10X6_B10X6_R10X6_3PLANE_422_UNORM_3PACK16 */ {},
  320. /* DATA_FORMAT_G10X6_B10X6R10X6_2PLANE_422_UNORM_3PACK16 */ {},
  321. /* DATA_FORMAT_G10X6_B10X6_R10X6_3PLANE_444_UNORM_3PACK16 */ {},
  322. /* DATA_FORMAT_R12X4_UNORM_PACK16 */ {},
  323. /* DATA_FORMAT_R12X4G12X4_UNORM_2PACK16 */ {},
  324. /* DATA_FORMAT_R12X4G12X4B12X4A12X4_UNORM_4PACK16 */ {},
  325. /* DATA_FORMAT_G12X4B12X4G12X4R12X4_422_UNORM_4PACK16 */ {},
  326. /* DATA_FORMAT_B12X4G12X4R12X4G12X4_422_UNORM_4PACK16 */ {},
  327. /* DATA_FORMAT_G12X4_B12X4_R12X4_3PLANE_420_UNORM_3PACK16 */ {},
  328. /* DATA_FORMAT_G12X4_B12X4R12X4_2PLANE_420_UNORM_3PACK16 */ {},
  329. /* DATA_FORMAT_G12X4_B12X4_R12X4_3PLANE_422_UNORM_3PACK16 */ {},
  330. /* DATA_FORMAT_G12X4_B12X4R12X4_2PLANE_422_UNORM_3PACK16 */ {},
  331. /* DATA_FORMAT_G12X4_B12X4_R12X4_3PLANE_444_UNORM_3PACK16 */ {},
  332. /* DATA_FORMAT_G16B16G16R16_422_UNORM */ {},
  333. /* DATA_FORMAT_B16G16R16G16_422_UNORM */ {},
  334. /* DATA_FORMAT_G16_B16_R16_3PLANE_420_UNORM */ {},
  335. /* DATA_FORMAT_G16_B16R16_2PLANE_420_UNORM */ {},
  336. /* DATA_FORMAT_G16_B16_R16_3PLANE_422_UNORM */ {},
  337. /* DATA_FORMAT_G16_B16R16_2PLANE_422_UNORM */ {},
  338. /* DATA_FORMAT_G16_B16_R16_3PLANE_444_UNORM */ {},
  339. };
  340. const char *RenderingDeviceD3D12::named_formats[RenderingDevice::DATA_FORMAT_MAX] = {
  341. "R4G4_Unorm_Pack8",
  342. "R4G4B4A4_Unorm_Pack16",
  343. "B4G4R4A4_Unorm_Pack16",
  344. "R5G6B5_Unorm_Pack16",
  345. "B5G6R5_Unorm_Pack16",
  346. "R5G5B5A1_Unorm_Pack16",
  347. "B5G5R5A1_Unorm_Pack16",
  348. "A1R5G5B5_Unorm_Pack16",
  349. "R8_Unorm",
  350. "R8_Snorm",
  351. "R8_Uscaled",
  352. "R8_Sscaled",
  353. "R8_Uint",
  354. "R8_Sint",
  355. "R8_Srgb",
  356. "R8G8_Unorm",
  357. "R8G8_Snorm",
  358. "R8G8_Uscaled",
  359. "R8G8_Sscaled",
  360. "R8G8_Uint",
  361. "R8G8_Sint",
  362. "R8G8_Srgb",
  363. "R8G8B8_Unorm",
  364. "R8G8B8_Snorm",
  365. "R8G8B8_Uscaled",
  366. "R8G8B8_Sscaled",
  367. "R8G8B8_Uint",
  368. "R8G8B8_Sint",
  369. "R8G8B8_Srgb",
  370. "B8G8R8_Unorm",
  371. "B8G8R8_Snorm",
  372. "B8G8R8_Uscaled",
  373. "B8G8R8_Sscaled",
  374. "B8G8R8_Uint",
  375. "B8G8R8_Sint",
  376. "B8G8R8_Srgb",
  377. "R8G8B8A8_Unorm",
  378. "R8G8B8A8_Snorm",
  379. "R8G8B8A8_Uscaled",
  380. "R8G8B8A8_Sscaled",
  381. "R8G8B8A8_Uint",
  382. "R8G8B8A8_Sint",
  383. "R8G8B8A8_Srgb",
  384. "B8G8R8A8_Unorm",
  385. "B8G8R8A8_Snorm",
  386. "B8G8R8A8_Uscaled",
  387. "B8G8R8A8_Sscaled",
  388. "B8G8R8A8_Uint",
  389. "B8G8R8A8_Sint",
  390. "B8G8R8A8_Srgb",
  391. "A8B8G8R8_Unorm_Pack32",
  392. "A8B8G8R8_Snorm_Pack32",
  393. "A8B8G8R8_Uscaled_Pack32",
  394. "A8B8G8R8_Sscaled_Pack32",
  395. "A8B8G8R8_Uint_Pack32",
  396. "A8B8G8R8_Sint_Pack32",
  397. "A8B8G8R8_Srgb_Pack32",
  398. "A2R10G10B10_Unorm_Pack32",
  399. "A2R10G10B10_Snorm_Pack32",
  400. "A2R10G10B10_Uscaled_Pack32",
  401. "A2R10G10B10_Sscaled_Pack32",
  402. "A2R10G10B10_Uint_Pack32",
  403. "A2R10G10B10_Sint_Pack32",
  404. "A2B10G10R10_Unorm_Pack32",
  405. "A2B10G10R10_Snorm_Pack32",
  406. "A2B10G10R10_Uscaled_Pack32",
  407. "A2B10G10R10_Sscaled_Pack32",
  408. "A2B10G10R10_Uint_Pack32",
  409. "A2B10G10R10_Sint_Pack32",
  410. "R16_Unorm",
  411. "R16_Snorm",
  412. "R16_Uscaled",
  413. "R16_Sscaled",
  414. "R16_Uint",
  415. "R16_Sint",
  416. "R16_Sfloat",
  417. "R16G16_Unorm",
  418. "R16G16_Snorm",
  419. "R16G16_Uscaled",
  420. "R16G16_Sscaled",
  421. "R16G16_Uint",
  422. "R16G16_Sint",
  423. "R16G16_Sfloat",
  424. "R16G16B16_Unorm",
  425. "R16G16B16_Snorm",
  426. "R16G16B16_Uscaled",
  427. "R16G16B16_Sscaled",
  428. "R16G16B16_Uint",
  429. "R16G16B16_Sint",
  430. "R16G16B16_Sfloat",
  431. "R16G16B16A16_Unorm",
  432. "R16G16B16A16_Snorm",
  433. "R16G16B16A16_Uscaled",
  434. "R16G16B16A16_Sscaled",
  435. "R16G16B16A16_Uint",
  436. "R16G16B16A16_Sint",
  437. "R16G16B16A16_Sfloat",
  438. "R32_Uint",
  439. "R32_Sint",
  440. "R32_Sfloat",
  441. "R32G32_Uint",
  442. "R32G32_Sint",
  443. "R32G32_Sfloat",
  444. "R32G32B32_Uint",
  445. "R32G32B32_Sint",
  446. "R32G32B32_Sfloat",
  447. "R32G32B32A32_Uint",
  448. "R32G32B32A32_Sint",
  449. "R32G32B32A32_Sfloat",
  450. "R64_Uint",
  451. "R64_Sint",
  452. "R64_Sfloat",
  453. "R64G64_Uint",
  454. "R64G64_Sint",
  455. "R64G64_Sfloat",
  456. "R64G64B64_Uint",
  457. "R64G64B64_Sint",
  458. "R64G64B64_Sfloat",
  459. "R64G64B64A64_Uint",
  460. "R64G64B64A64_Sint",
  461. "R64G64B64A64_Sfloat",
  462. "B10G11R11_Ufloat_Pack32",
  463. "E5B9G9R9_Ufloat_Pack32",
  464. "D16_Unorm",
  465. "X8_D24_Unorm_Pack32",
  466. "D32_Sfloat",
  467. "S8_Uint",
  468. "D16_Unorm_S8_Uint",
  469. "D24_Unorm_S8_Uint",
  470. "D32_Sfloat_S8_Uint",
  471. "Bc1_Rgb_Unorm_Block",
  472. "Bc1_Rgb_Srgb_Block",
  473. "Bc1_Rgba_Unorm_Block",
  474. "Bc1_Rgba_Srgb_Block",
  475. "Bc2_Unorm_Block",
  476. "Bc2_Srgb_Block",
  477. "Bc3_Unorm_Block",
  478. "Bc3_Srgb_Block",
  479. "Bc4_Unorm_Block",
  480. "Bc4_Snorm_Block",
  481. "Bc5_Unorm_Block",
  482. "Bc5_Snorm_Block",
  483. "Bc6H_Ufloat_Block",
  484. "Bc6H_Sfloat_Block",
  485. "Bc7_Unorm_Block",
  486. "Bc7_Srgb_Block",
  487. "Etc2_R8G8B8_Unorm_Block",
  488. "Etc2_R8G8B8_Srgb_Block",
  489. "Etc2_R8G8B8A1_Unorm_Block",
  490. "Etc2_R8G8B8A1_Srgb_Block",
  491. "Etc2_R8G8B8A8_Unorm_Block",
  492. "Etc2_R8G8B8A8_Srgb_Block",
  493. "Eac_R11_Unorm_Block",
  494. "Eac_R11_Snorm_Block",
  495. "Eac_R11G11_Unorm_Block",
  496. "Eac_R11G11_Snorm_Block",
  497. "Astc_4X4_Unorm_Block",
  498. "Astc_4X4_Srgb_Block",
  499. "Astc_5X4_Unorm_Block",
  500. "Astc_5X4_Srgb_Block",
  501. "Astc_5X5_Unorm_Block",
  502. "Astc_5X5_Srgb_Block",
  503. "Astc_6X5_Unorm_Block",
  504. "Astc_6X5_Srgb_Block",
  505. "Astc_6X6_Unorm_Block",
  506. "Astc_6X6_Srgb_Block",
  507. "Astc_8X5_Unorm_Block",
  508. "Astc_8X5_Srgb_Block",
  509. "Astc_8X6_Unorm_Block",
  510. "Astc_8X6_Srgb_Block",
  511. "Astc_8X8_Unorm_Block",
  512. "Astc_8X8_Srgb_Block",
  513. "Astc_10X5_Unorm_Block",
  514. "Astc_10X5_Srgb_Block",
  515. "Astc_10X6_Unorm_Block",
  516. "Astc_10X6_Srgb_Block",
  517. "Astc_10X8_Unorm_Block",
  518. "Astc_10X8_Srgb_Block",
  519. "Astc_10X10_Unorm_Block",
  520. "Astc_10X10_Srgb_Block",
  521. "Astc_12X10_Unorm_Block",
  522. "Astc_12X10_Srgb_Block",
  523. "Astc_12X12_Unorm_Block",
  524. "Astc_12X12_Srgb_Block",
  525. "G8B8G8R8_422_Unorm",
  526. "B8G8R8G8_422_Unorm",
  527. "G8_B8_R8_3Plane_420_Unorm",
  528. "G8_B8R8_2Plane_420_Unorm",
  529. "G8_B8_R8_3Plane_422_Unorm",
  530. "G8_B8R8_2Plane_422_Unorm",
  531. "G8_B8_R8_3Plane_444_Unorm",
  532. "R10X6_Unorm_Pack16",
  533. "R10X6G10X6_Unorm_2Pack16",
  534. "R10X6G10X6B10X6A10X6_Unorm_4Pack16",
  535. "G10X6B10X6G10X6R10X6_422_Unorm_4Pack16",
  536. "B10X6G10X6R10X6G10X6_422_Unorm_4Pack16",
  537. "G10X6_B10X6_R10X6_3Plane_420_Unorm_3Pack16",
  538. "G10X6_B10X6R10X6_2Plane_420_Unorm_3Pack16",
  539. "G10X6_B10X6_R10X6_3Plane_422_Unorm_3Pack16",
  540. "G10X6_B10X6R10X6_2Plane_422_Unorm_3Pack16",
  541. "G10X6_B10X6_R10X6_3Plane_444_Unorm_3Pack16",
  542. "R12X4_Unorm_Pack16",
  543. "R12X4G12X4_Unorm_2Pack16",
  544. "R12X4G12X4B12X4A12X4_Unorm_4Pack16",
  545. "G12X4B12X4G12X4R12X4_422_Unorm_4Pack16",
  546. "B12X4G12X4R12X4G12X4_422_Unorm_4Pack16",
  547. "G12X4_B12X4_R12X4_3Plane_420_Unorm_3Pack16",
  548. "G12X4_B12X4R12X4_2Plane_420_Unorm_3Pack16",
  549. "G12X4_B12X4_R12X4_3Plane_422_Unorm_3Pack16",
  550. "G12X4_B12X4R12X4_2Plane_422_Unorm_3Pack16",
  551. "G12X4_B12X4_R12X4_3Plane_444_Unorm_3Pack16",
  552. "G16B16G16R16_422_Unorm",
  553. "B16G16R16G16_422_Unorm",
  554. "G16_B16_R16_3Plane_420_Unorm",
  555. "G16_B16R16_2Plane_420_Unorm",
  556. "G16_B16_R16_3Plane_422_Unorm",
  557. "G16_B16R16_2Plane_422_Unorm",
  558. "G16_B16_R16_3Plane_444_Unorm",
  559. };
  560. int RenderingDeviceD3D12::get_format_vertex_size(DataFormat p_format) {
  561. switch (p_format) {
  562. case DATA_FORMAT_R8_UNORM:
  563. case DATA_FORMAT_R8_SNORM:
  564. case DATA_FORMAT_R8_UINT:
  565. case DATA_FORMAT_R8_SINT:
  566. case DATA_FORMAT_R8G8_UNORM:
  567. case DATA_FORMAT_R8G8_SNORM:
  568. case DATA_FORMAT_R8G8_UINT:
  569. case DATA_FORMAT_R8G8_SINT:
  570. case DATA_FORMAT_R8G8B8_UNORM:
  571. case DATA_FORMAT_R8G8B8_SNORM:
  572. case DATA_FORMAT_R8G8B8_UINT:
  573. case DATA_FORMAT_R8G8B8_SINT:
  574. case DATA_FORMAT_B8G8R8_UNORM:
  575. case DATA_FORMAT_B8G8R8_SNORM:
  576. case DATA_FORMAT_B8G8R8_UINT:
  577. case DATA_FORMAT_B8G8R8_SINT:
  578. case DATA_FORMAT_R8G8B8A8_UNORM:
  579. case DATA_FORMAT_R8G8B8A8_SNORM:
  580. case DATA_FORMAT_R8G8B8A8_UINT:
  581. case DATA_FORMAT_R8G8B8A8_SINT:
  582. case DATA_FORMAT_B8G8R8A8_UNORM:
  583. case DATA_FORMAT_B8G8R8A8_SNORM:
  584. case DATA_FORMAT_B8G8R8A8_UINT:
  585. case DATA_FORMAT_B8G8R8A8_SINT:
  586. case DATA_FORMAT_A2B10G10R10_UNORM_PACK32:
  587. return 4;
  588. case DATA_FORMAT_R16_UNORM:
  589. case DATA_FORMAT_R16_SNORM:
  590. case DATA_FORMAT_R16_UINT:
  591. case DATA_FORMAT_R16_SINT:
  592. case DATA_FORMAT_R16_SFLOAT:
  593. return 4;
  594. case DATA_FORMAT_R16G16_UNORM:
  595. case DATA_FORMAT_R16G16_SNORM:
  596. case DATA_FORMAT_R16G16_UINT:
  597. case DATA_FORMAT_R16G16_SINT:
  598. case DATA_FORMAT_R16G16_SFLOAT:
  599. return 4;
  600. case DATA_FORMAT_R16G16B16_UNORM:
  601. case DATA_FORMAT_R16G16B16_SNORM:
  602. case DATA_FORMAT_R16G16B16_UINT:
  603. case DATA_FORMAT_R16G16B16_SINT:
  604. case DATA_FORMAT_R16G16B16_SFLOAT:
  605. return 8;
  606. case DATA_FORMAT_R16G16B16A16_UNORM:
  607. case DATA_FORMAT_R16G16B16A16_SNORM:
  608. case DATA_FORMAT_R16G16B16A16_UINT:
  609. case DATA_FORMAT_R16G16B16A16_SINT:
  610. case DATA_FORMAT_R16G16B16A16_SFLOAT:
  611. return 8;
  612. case DATA_FORMAT_R32_UINT:
  613. case DATA_FORMAT_R32_SINT:
  614. case DATA_FORMAT_R32_SFLOAT:
  615. return 4;
  616. case DATA_FORMAT_R32G32_UINT:
  617. case DATA_FORMAT_R32G32_SINT:
  618. case DATA_FORMAT_R32G32_SFLOAT:
  619. return 8;
  620. case DATA_FORMAT_R32G32B32_UINT:
  621. case DATA_FORMAT_R32G32B32_SINT:
  622. case DATA_FORMAT_R32G32B32_SFLOAT:
  623. return 12;
  624. case DATA_FORMAT_R32G32B32A32_UINT:
  625. case DATA_FORMAT_R32G32B32A32_SINT:
  626. case DATA_FORMAT_R32G32B32A32_SFLOAT:
  627. return 16;
  628. case DATA_FORMAT_R64_UINT:
  629. case DATA_FORMAT_R64_SINT:
  630. case DATA_FORMAT_R64_SFLOAT:
  631. return 8;
  632. case DATA_FORMAT_R64G64_UINT:
  633. case DATA_FORMAT_R64G64_SINT:
  634. case DATA_FORMAT_R64G64_SFLOAT:
  635. return 16;
  636. case DATA_FORMAT_R64G64B64_UINT:
  637. case DATA_FORMAT_R64G64B64_SINT:
  638. case DATA_FORMAT_R64G64B64_SFLOAT:
  639. return 24;
  640. case DATA_FORMAT_R64G64B64A64_UINT:
  641. case DATA_FORMAT_R64G64B64A64_SINT:
  642. case DATA_FORMAT_R64G64B64A64_SFLOAT:
  643. return 32;
  644. default:
  645. return 0;
  646. }
  647. }
  648. uint32_t RenderingDeviceD3D12::get_image_format_pixel_size(DataFormat p_format) {
  649. switch (p_format) {
  650. case DATA_FORMAT_R4G4_UNORM_PACK8:
  651. return 1;
  652. case DATA_FORMAT_R4G4B4A4_UNORM_PACK16:
  653. case DATA_FORMAT_B4G4R4A4_UNORM_PACK16:
  654. case DATA_FORMAT_R5G6B5_UNORM_PACK16:
  655. case DATA_FORMAT_B5G6R5_UNORM_PACK16:
  656. case DATA_FORMAT_R5G5B5A1_UNORM_PACK16:
  657. case DATA_FORMAT_B5G5R5A1_UNORM_PACK16:
  658. case DATA_FORMAT_A1R5G5B5_UNORM_PACK16:
  659. return 2;
  660. case DATA_FORMAT_R8_UNORM:
  661. case DATA_FORMAT_R8_SNORM:
  662. case DATA_FORMAT_R8_USCALED:
  663. case DATA_FORMAT_R8_SSCALED:
  664. case DATA_FORMAT_R8_UINT:
  665. case DATA_FORMAT_R8_SINT:
  666. case DATA_FORMAT_R8_SRGB:
  667. return 1;
  668. case DATA_FORMAT_R8G8_UNORM:
  669. case DATA_FORMAT_R8G8_SNORM:
  670. case DATA_FORMAT_R8G8_USCALED:
  671. case DATA_FORMAT_R8G8_SSCALED:
  672. case DATA_FORMAT_R8G8_UINT:
  673. case DATA_FORMAT_R8G8_SINT:
  674. case DATA_FORMAT_R8G8_SRGB:
  675. return 2;
  676. case DATA_FORMAT_R8G8B8_UNORM:
  677. case DATA_FORMAT_R8G8B8_SNORM:
  678. case DATA_FORMAT_R8G8B8_USCALED:
  679. case DATA_FORMAT_R8G8B8_SSCALED:
  680. case DATA_FORMAT_R8G8B8_UINT:
  681. case DATA_FORMAT_R8G8B8_SINT:
  682. case DATA_FORMAT_R8G8B8_SRGB:
  683. case DATA_FORMAT_B8G8R8_UNORM:
  684. case DATA_FORMAT_B8G8R8_SNORM:
  685. case DATA_FORMAT_B8G8R8_USCALED:
  686. case DATA_FORMAT_B8G8R8_SSCALED:
  687. case DATA_FORMAT_B8G8R8_UINT:
  688. case DATA_FORMAT_B8G8R8_SINT:
  689. case DATA_FORMAT_B8G8R8_SRGB:
  690. return 3;
  691. case DATA_FORMAT_R8G8B8A8_UNORM:
  692. case DATA_FORMAT_R8G8B8A8_SNORM:
  693. case DATA_FORMAT_R8G8B8A8_USCALED:
  694. case DATA_FORMAT_R8G8B8A8_SSCALED:
  695. case DATA_FORMAT_R8G8B8A8_UINT:
  696. case DATA_FORMAT_R8G8B8A8_SINT:
  697. case DATA_FORMAT_R8G8B8A8_SRGB:
  698. case DATA_FORMAT_B8G8R8A8_UNORM:
  699. case DATA_FORMAT_B8G8R8A8_SNORM:
  700. case DATA_FORMAT_B8G8R8A8_USCALED:
  701. case DATA_FORMAT_B8G8R8A8_SSCALED:
  702. case DATA_FORMAT_B8G8R8A8_UINT:
  703. case DATA_FORMAT_B8G8R8A8_SINT:
  704. case DATA_FORMAT_B8G8R8A8_SRGB:
  705. return 4;
  706. case DATA_FORMAT_A8B8G8R8_UNORM_PACK32:
  707. case DATA_FORMAT_A8B8G8R8_SNORM_PACK32:
  708. case DATA_FORMAT_A8B8G8R8_USCALED_PACK32:
  709. case DATA_FORMAT_A8B8G8R8_SSCALED_PACK32:
  710. case DATA_FORMAT_A8B8G8R8_UINT_PACK32:
  711. case DATA_FORMAT_A8B8G8R8_SINT_PACK32:
  712. case DATA_FORMAT_A8B8G8R8_SRGB_PACK32:
  713. case DATA_FORMAT_A2R10G10B10_UNORM_PACK32:
  714. case DATA_FORMAT_A2R10G10B10_SNORM_PACK32:
  715. case DATA_FORMAT_A2R10G10B10_USCALED_PACK32:
  716. case DATA_FORMAT_A2R10G10B10_SSCALED_PACK32:
  717. case DATA_FORMAT_A2R10G10B10_UINT_PACK32:
  718. case DATA_FORMAT_A2R10G10B10_SINT_PACK32:
  719. case DATA_FORMAT_A2B10G10R10_UNORM_PACK32:
  720. case DATA_FORMAT_A2B10G10R10_SNORM_PACK32:
  721. case DATA_FORMAT_A2B10G10R10_USCALED_PACK32:
  722. case DATA_FORMAT_A2B10G10R10_SSCALED_PACK32:
  723. case DATA_FORMAT_A2B10G10R10_UINT_PACK32:
  724. case DATA_FORMAT_A2B10G10R10_SINT_PACK32:
  725. return 4;
  726. case DATA_FORMAT_R16_UNORM:
  727. case DATA_FORMAT_R16_SNORM:
  728. case DATA_FORMAT_R16_USCALED:
  729. case DATA_FORMAT_R16_SSCALED:
  730. case DATA_FORMAT_R16_UINT:
  731. case DATA_FORMAT_R16_SINT:
  732. case DATA_FORMAT_R16_SFLOAT:
  733. return 2;
  734. case DATA_FORMAT_R16G16_UNORM:
  735. case DATA_FORMAT_R16G16_SNORM:
  736. case DATA_FORMAT_R16G16_USCALED:
  737. case DATA_FORMAT_R16G16_SSCALED:
  738. case DATA_FORMAT_R16G16_UINT:
  739. case DATA_FORMAT_R16G16_SINT:
  740. case DATA_FORMAT_R16G16_SFLOAT:
  741. return 4;
  742. case DATA_FORMAT_R16G16B16_UNORM:
  743. case DATA_FORMAT_R16G16B16_SNORM:
  744. case DATA_FORMAT_R16G16B16_USCALED:
  745. case DATA_FORMAT_R16G16B16_SSCALED:
  746. case DATA_FORMAT_R16G16B16_UINT:
  747. case DATA_FORMAT_R16G16B16_SINT:
  748. case DATA_FORMAT_R16G16B16_SFLOAT:
  749. return 6;
  750. case DATA_FORMAT_R16G16B16A16_UNORM:
  751. case DATA_FORMAT_R16G16B16A16_SNORM:
  752. case DATA_FORMAT_R16G16B16A16_USCALED:
  753. case DATA_FORMAT_R16G16B16A16_SSCALED:
  754. case DATA_FORMAT_R16G16B16A16_UINT:
  755. case DATA_FORMAT_R16G16B16A16_SINT:
  756. case DATA_FORMAT_R16G16B16A16_SFLOAT:
  757. return 8;
  758. case DATA_FORMAT_R32_UINT:
  759. case DATA_FORMAT_R32_SINT:
  760. case DATA_FORMAT_R32_SFLOAT:
  761. return 4;
  762. case DATA_FORMAT_R32G32_UINT:
  763. case DATA_FORMAT_R32G32_SINT:
  764. case DATA_FORMAT_R32G32_SFLOAT:
  765. return 8;
  766. case DATA_FORMAT_R32G32B32_UINT:
  767. case DATA_FORMAT_R32G32B32_SINT:
  768. case DATA_FORMAT_R32G32B32_SFLOAT:
  769. return 12;
  770. case DATA_FORMAT_R32G32B32A32_UINT:
  771. case DATA_FORMAT_R32G32B32A32_SINT:
  772. case DATA_FORMAT_R32G32B32A32_SFLOAT:
  773. return 16;
  774. case DATA_FORMAT_R64_UINT:
  775. case DATA_FORMAT_R64_SINT:
  776. case DATA_FORMAT_R64_SFLOAT:
  777. return 8;
  778. case DATA_FORMAT_R64G64_UINT:
  779. case DATA_FORMAT_R64G64_SINT:
  780. case DATA_FORMAT_R64G64_SFLOAT:
  781. return 16;
  782. case DATA_FORMAT_R64G64B64_UINT:
  783. case DATA_FORMAT_R64G64B64_SINT:
  784. case DATA_FORMAT_R64G64B64_SFLOAT:
  785. return 24;
  786. case DATA_FORMAT_R64G64B64A64_UINT:
  787. case DATA_FORMAT_R64G64B64A64_SINT:
  788. case DATA_FORMAT_R64G64B64A64_SFLOAT:
  789. return 32;
  790. case DATA_FORMAT_B10G11R11_UFLOAT_PACK32:
  791. case DATA_FORMAT_E5B9G9R9_UFLOAT_PACK32:
  792. return 4;
  793. case DATA_FORMAT_D16_UNORM:
  794. return 2;
  795. case DATA_FORMAT_X8_D24_UNORM_PACK32:
  796. return 4;
  797. case DATA_FORMAT_D32_SFLOAT:
  798. return 4;
  799. case DATA_FORMAT_S8_UINT:
  800. return 1;
  801. case DATA_FORMAT_D16_UNORM_S8_UINT:
  802. return 4;
  803. case DATA_FORMAT_D24_UNORM_S8_UINT:
  804. return 4;
  805. case DATA_FORMAT_D32_SFLOAT_S8_UINT:
  806. return 5; // ?
  807. case DATA_FORMAT_BC1_RGB_UNORM_BLOCK:
  808. case DATA_FORMAT_BC1_RGB_SRGB_BLOCK:
  809. case DATA_FORMAT_BC1_RGBA_UNORM_BLOCK:
  810. case DATA_FORMAT_BC1_RGBA_SRGB_BLOCK:
  811. case DATA_FORMAT_BC2_UNORM_BLOCK:
  812. case DATA_FORMAT_BC2_SRGB_BLOCK:
  813. case DATA_FORMAT_BC3_UNORM_BLOCK:
  814. case DATA_FORMAT_BC3_SRGB_BLOCK:
  815. case DATA_FORMAT_BC4_UNORM_BLOCK:
  816. case DATA_FORMAT_BC4_SNORM_BLOCK:
  817. case DATA_FORMAT_BC5_UNORM_BLOCK:
  818. case DATA_FORMAT_BC5_SNORM_BLOCK:
  819. case DATA_FORMAT_BC6H_UFLOAT_BLOCK:
  820. case DATA_FORMAT_BC6H_SFLOAT_BLOCK:
  821. case DATA_FORMAT_BC7_UNORM_BLOCK:
  822. case DATA_FORMAT_BC7_SRGB_BLOCK:
  823. return 1;
  824. case DATA_FORMAT_ETC2_R8G8B8_UNORM_BLOCK:
  825. case DATA_FORMAT_ETC2_R8G8B8_SRGB_BLOCK:
  826. case DATA_FORMAT_ETC2_R8G8B8A1_UNORM_BLOCK:
  827. case DATA_FORMAT_ETC2_R8G8B8A1_SRGB_BLOCK:
  828. case DATA_FORMAT_ETC2_R8G8B8A8_UNORM_BLOCK:
  829. case DATA_FORMAT_ETC2_R8G8B8A8_SRGB_BLOCK:
  830. return 1;
  831. case DATA_FORMAT_EAC_R11_UNORM_BLOCK:
  832. case DATA_FORMAT_EAC_R11_SNORM_BLOCK:
  833. case DATA_FORMAT_EAC_R11G11_UNORM_BLOCK:
  834. case DATA_FORMAT_EAC_R11G11_SNORM_BLOCK:
  835. return 1;
  836. case DATA_FORMAT_ASTC_4x4_UNORM_BLOCK:
  837. case DATA_FORMAT_ASTC_4x4_SRGB_BLOCK:
  838. case DATA_FORMAT_ASTC_5x4_UNORM_BLOCK:
  839. case DATA_FORMAT_ASTC_5x4_SRGB_BLOCK:
  840. case DATA_FORMAT_ASTC_5x5_UNORM_BLOCK:
  841. case DATA_FORMAT_ASTC_5x5_SRGB_BLOCK:
  842. case DATA_FORMAT_ASTC_6x5_UNORM_BLOCK:
  843. case DATA_FORMAT_ASTC_6x5_SRGB_BLOCK:
  844. case DATA_FORMAT_ASTC_6x6_UNORM_BLOCK:
  845. case DATA_FORMAT_ASTC_6x6_SRGB_BLOCK:
  846. case DATA_FORMAT_ASTC_8x5_UNORM_BLOCK:
  847. case DATA_FORMAT_ASTC_8x5_SRGB_BLOCK:
  848. case DATA_FORMAT_ASTC_8x6_UNORM_BLOCK:
  849. case DATA_FORMAT_ASTC_8x6_SRGB_BLOCK:
  850. case DATA_FORMAT_ASTC_8x8_UNORM_BLOCK:
  851. case DATA_FORMAT_ASTC_8x8_SRGB_BLOCK:
  852. case DATA_FORMAT_ASTC_10x5_UNORM_BLOCK:
  853. case DATA_FORMAT_ASTC_10x5_SRGB_BLOCK:
  854. case DATA_FORMAT_ASTC_10x6_UNORM_BLOCK:
  855. case DATA_FORMAT_ASTC_10x6_SRGB_BLOCK:
  856. case DATA_FORMAT_ASTC_10x8_UNORM_BLOCK:
  857. case DATA_FORMAT_ASTC_10x8_SRGB_BLOCK:
  858. case DATA_FORMAT_ASTC_10x10_UNORM_BLOCK:
  859. case DATA_FORMAT_ASTC_10x10_SRGB_BLOCK:
  860. case DATA_FORMAT_ASTC_12x10_UNORM_BLOCK:
  861. case DATA_FORMAT_ASTC_12x10_SRGB_BLOCK:
  862. case DATA_FORMAT_ASTC_12x12_UNORM_BLOCK:
  863. case DATA_FORMAT_ASTC_12x12_SRGB_BLOCK:
  864. return 1;
  865. case DATA_FORMAT_G8B8G8R8_422_UNORM:
  866. case DATA_FORMAT_B8G8R8G8_422_UNORM:
  867. return 4;
  868. case DATA_FORMAT_G8_B8_R8_3PLANE_420_UNORM:
  869. case DATA_FORMAT_G8_B8R8_2PLANE_420_UNORM:
  870. case DATA_FORMAT_G8_B8_R8_3PLANE_422_UNORM:
  871. case DATA_FORMAT_G8_B8R8_2PLANE_422_UNORM:
  872. case DATA_FORMAT_G8_B8_R8_3PLANE_444_UNORM:
  873. return 4;
  874. case DATA_FORMAT_R10X6_UNORM_PACK16:
  875. case DATA_FORMAT_R10X6G10X6_UNORM_2PACK16:
  876. case DATA_FORMAT_R10X6G10X6B10X6A10X6_UNORM_4PACK16:
  877. case DATA_FORMAT_G10X6B10X6G10X6R10X6_422_UNORM_4PACK16:
  878. case DATA_FORMAT_B10X6G10X6R10X6G10X6_422_UNORM_4PACK16:
  879. case DATA_FORMAT_G10X6_B10X6_R10X6_3PLANE_420_UNORM_3PACK16:
  880. case DATA_FORMAT_G10X6_B10X6R10X6_2PLANE_420_UNORM_3PACK16:
  881. case DATA_FORMAT_G10X6_B10X6_R10X6_3PLANE_422_UNORM_3PACK16:
  882. case DATA_FORMAT_G10X6_B10X6R10X6_2PLANE_422_UNORM_3PACK16:
  883. case DATA_FORMAT_G10X6_B10X6_R10X6_3PLANE_444_UNORM_3PACK16:
  884. case DATA_FORMAT_R12X4_UNORM_PACK16:
  885. case DATA_FORMAT_R12X4G12X4_UNORM_2PACK16:
  886. case DATA_FORMAT_R12X4G12X4B12X4A12X4_UNORM_4PACK16:
  887. case DATA_FORMAT_G12X4B12X4G12X4R12X4_422_UNORM_4PACK16:
  888. case DATA_FORMAT_B12X4G12X4R12X4G12X4_422_UNORM_4PACK16:
  889. case DATA_FORMAT_G12X4_B12X4_R12X4_3PLANE_420_UNORM_3PACK16:
  890. case DATA_FORMAT_G12X4_B12X4R12X4_2PLANE_420_UNORM_3PACK16:
  891. case DATA_FORMAT_G12X4_B12X4_R12X4_3PLANE_422_UNORM_3PACK16:
  892. case DATA_FORMAT_G12X4_B12X4R12X4_2PLANE_422_UNORM_3PACK16:
  893. case DATA_FORMAT_G12X4_B12X4_R12X4_3PLANE_444_UNORM_3PACK16:
  894. return 2;
  895. case DATA_FORMAT_G16B16G16R16_422_UNORM:
  896. case DATA_FORMAT_B16G16R16G16_422_UNORM:
  897. case DATA_FORMAT_G16_B16_R16_3PLANE_420_UNORM:
  898. case DATA_FORMAT_G16_B16R16_2PLANE_420_UNORM:
  899. case DATA_FORMAT_G16_B16_R16_3PLANE_422_UNORM:
  900. case DATA_FORMAT_G16_B16R16_2PLANE_422_UNORM:
  901. case DATA_FORMAT_G16_B16_R16_3PLANE_444_UNORM:
  902. return 8;
  903. default: {
  904. ERR_PRINT("Format not handled, bug");
  905. }
  906. }
  907. return 1;
  908. }
  909. // https://www.khronos.org/registry/DataFormat/specs/1.1/dataformat.1.1.pdf
  910. void RenderingDeviceD3D12::get_compressed_image_format_block_dimensions(DataFormat p_format, uint32_t &r_w, uint32_t &r_h) {
  911. switch (p_format) {
  912. case DATA_FORMAT_BC1_RGB_UNORM_BLOCK:
  913. case DATA_FORMAT_BC1_RGB_SRGB_BLOCK:
  914. case DATA_FORMAT_BC1_RGBA_UNORM_BLOCK:
  915. case DATA_FORMAT_BC1_RGBA_SRGB_BLOCK:
  916. case DATA_FORMAT_BC2_UNORM_BLOCK:
  917. case DATA_FORMAT_BC2_SRGB_BLOCK:
  918. case DATA_FORMAT_BC3_UNORM_BLOCK:
  919. case DATA_FORMAT_BC3_SRGB_BLOCK:
  920. case DATA_FORMAT_BC4_UNORM_BLOCK:
  921. case DATA_FORMAT_BC4_SNORM_BLOCK:
  922. case DATA_FORMAT_BC5_UNORM_BLOCK:
  923. case DATA_FORMAT_BC5_SNORM_BLOCK:
  924. case DATA_FORMAT_BC6H_UFLOAT_BLOCK:
  925. case DATA_FORMAT_BC6H_SFLOAT_BLOCK:
  926. case DATA_FORMAT_BC7_UNORM_BLOCK:
  927. case DATA_FORMAT_BC7_SRGB_BLOCK:
  928. case DATA_FORMAT_ETC2_R8G8B8_UNORM_BLOCK:
  929. case DATA_FORMAT_ETC2_R8G8B8_SRGB_BLOCK:
  930. case DATA_FORMAT_ETC2_R8G8B8A1_UNORM_BLOCK:
  931. case DATA_FORMAT_ETC2_R8G8B8A1_SRGB_BLOCK:
  932. case DATA_FORMAT_ETC2_R8G8B8A8_UNORM_BLOCK:
  933. case DATA_FORMAT_ETC2_R8G8B8A8_SRGB_BLOCK:
  934. case DATA_FORMAT_EAC_R11_UNORM_BLOCK:
  935. case DATA_FORMAT_EAC_R11_SNORM_BLOCK:
  936. case DATA_FORMAT_EAC_R11G11_UNORM_BLOCK:
  937. case DATA_FORMAT_EAC_R11G11_SNORM_BLOCK:
  938. case DATA_FORMAT_ASTC_4x4_UNORM_BLOCK: // Again, not sure about astc.
  939. case DATA_FORMAT_ASTC_4x4_SRGB_BLOCK:
  940. case DATA_FORMAT_ASTC_5x4_UNORM_BLOCK:
  941. case DATA_FORMAT_ASTC_5x4_SRGB_BLOCK:
  942. case DATA_FORMAT_ASTC_5x5_UNORM_BLOCK:
  943. case DATA_FORMAT_ASTC_5x5_SRGB_BLOCK:
  944. case DATA_FORMAT_ASTC_6x5_UNORM_BLOCK:
  945. case DATA_FORMAT_ASTC_6x5_SRGB_BLOCK:
  946. case DATA_FORMAT_ASTC_6x6_UNORM_BLOCK:
  947. case DATA_FORMAT_ASTC_6x6_SRGB_BLOCK:
  948. case DATA_FORMAT_ASTC_8x5_UNORM_BLOCK:
  949. case DATA_FORMAT_ASTC_8x5_SRGB_BLOCK:
  950. case DATA_FORMAT_ASTC_8x6_UNORM_BLOCK:
  951. case DATA_FORMAT_ASTC_8x6_SRGB_BLOCK:
  952. case DATA_FORMAT_ASTC_8x8_UNORM_BLOCK:
  953. case DATA_FORMAT_ASTC_8x8_SRGB_BLOCK:
  954. case DATA_FORMAT_ASTC_10x5_UNORM_BLOCK:
  955. case DATA_FORMAT_ASTC_10x5_SRGB_BLOCK:
  956. case DATA_FORMAT_ASTC_10x6_UNORM_BLOCK:
  957. case DATA_FORMAT_ASTC_10x6_SRGB_BLOCK:
  958. case DATA_FORMAT_ASTC_10x8_UNORM_BLOCK:
  959. case DATA_FORMAT_ASTC_10x8_SRGB_BLOCK:
  960. case DATA_FORMAT_ASTC_10x10_UNORM_BLOCK:
  961. case DATA_FORMAT_ASTC_10x10_SRGB_BLOCK:
  962. case DATA_FORMAT_ASTC_12x10_UNORM_BLOCK:
  963. case DATA_FORMAT_ASTC_12x10_SRGB_BLOCK:
  964. case DATA_FORMAT_ASTC_12x12_UNORM_BLOCK:
  965. case DATA_FORMAT_ASTC_12x12_SRGB_BLOCK:
  966. r_w = 4;
  967. r_h = 4;
  968. return;
  969. default: {
  970. r_w = 1;
  971. r_h = 1;
  972. }
  973. }
  974. }
  975. uint32_t RenderingDeviceD3D12::get_compressed_image_format_block_byte_size(DataFormat p_format) {
  976. switch (p_format) {
  977. case DATA_FORMAT_BC1_RGB_UNORM_BLOCK:
  978. case DATA_FORMAT_BC1_RGB_SRGB_BLOCK:
  979. case DATA_FORMAT_BC1_RGBA_UNORM_BLOCK:
  980. case DATA_FORMAT_BC1_RGBA_SRGB_BLOCK:
  981. return 8;
  982. case DATA_FORMAT_BC2_UNORM_BLOCK:
  983. case DATA_FORMAT_BC2_SRGB_BLOCK:
  984. return 16;
  985. case DATA_FORMAT_BC3_UNORM_BLOCK:
  986. case DATA_FORMAT_BC3_SRGB_BLOCK:
  987. return 16;
  988. case DATA_FORMAT_BC4_UNORM_BLOCK:
  989. case DATA_FORMAT_BC4_SNORM_BLOCK:
  990. return 8;
  991. case DATA_FORMAT_BC5_UNORM_BLOCK:
  992. case DATA_FORMAT_BC5_SNORM_BLOCK:
  993. return 16;
  994. case DATA_FORMAT_BC6H_UFLOAT_BLOCK:
  995. case DATA_FORMAT_BC6H_SFLOAT_BLOCK:
  996. return 16;
  997. case DATA_FORMAT_BC7_UNORM_BLOCK:
  998. case DATA_FORMAT_BC7_SRGB_BLOCK:
  999. return 16;
  1000. case DATA_FORMAT_ETC2_R8G8B8_UNORM_BLOCK:
  1001. case DATA_FORMAT_ETC2_R8G8B8_SRGB_BLOCK:
  1002. return 8;
  1003. case DATA_FORMAT_ETC2_R8G8B8A1_UNORM_BLOCK:
  1004. case DATA_FORMAT_ETC2_R8G8B8A1_SRGB_BLOCK:
  1005. return 8;
  1006. case DATA_FORMAT_ETC2_R8G8B8A8_UNORM_BLOCK:
  1007. case DATA_FORMAT_ETC2_R8G8B8A8_SRGB_BLOCK:
  1008. return 16;
  1009. case DATA_FORMAT_EAC_R11_UNORM_BLOCK:
  1010. case DATA_FORMAT_EAC_R11_SNORM_BLOCK:
  1011. return 8;
  1012. case DATA_FORMAT_EAC_R11G11_UNORM_BLOCK:
  1013. case DATA_FORMAT_EAC_R11G11_SNORM_BLOCK:
  1014. return 16;
  1015. case DATA_FORMAT_ASTC_4x4_UNORM_BLOCK: // Again, not sure about astc.
  1016. case DATA_FORMAT_ASTC_4x4_SRGB_BLOCK:
  1017. case DATA_FORMAT_ASTC_5x4_UNORM_BLOCK:
  1018. case DATA_FORMAT_ASTC_5x4_SRGB_BLOCK:
  1019. case DATA_FORMAT_ASTC_5x5_UNORM_BLOCK:
  1020. case DATA_FORMAT_ASTC_5x5_SRGB_BLOCK:
  1021. case DATA_FORMAT_ASTC_6x5_UNORM_BLOCK:
  1022. case DATA_FORMAT_ASTC_6x5_SRGB_BLOCK:
  1023. case DATA_FORMAT_ASTC_6x6_UNORM_BLOCK:
  1024. case DATA_FORMAT_ASTC_6x6_SRGB_BLOCK:
  1025. case DATA_FORMAT_ASTC_8x5_UNORM_BLOCK:
  1026. case DATA_FORMAT_ASTC_8x5_SRGB_BLOCK:
  1027. case DATA_FORMAT_ASTC_8x6_UNORM_BLOCK:
  1028. case DATA_FORMAT_ASTC_8x6_SRGB_BLOCK:
  1029. case DATA_FORMAT_ASTC_8x8_UNORM_BLOCK:
  1030. case DATA_FORMAT_ASTC_8x8_SRGB_BLOCK:
  1031. case DATA_FORMAT_ASTC_10x5_UNORM_BLOCK:
  1032. case DATA_FORMAT_ASTC_10x5_SRGB_BLOCK:
  1033. case DATA_FORMAT_ASTC_10x6_UNORM_BLOCK:
  1034. case DATA_FORMAT_ASTC_10x6_SRGB_BLOCK:
  1035. case DATA_FORMAT_ASTC_10x8_UNORM_BLOCK:
  1036. case DATA_FORMAT_ASTC_10x8_SRGB_BLOCK:
  1037. case DATA_FORMAT_ASTC_10x10_UNORM_BLOCK:
  1038. case DATA_FORMAT_ASTC_10x10_SRGB_BLOCK:
  1039. case DATA_FORMAT_ASTC_12x10_UNORM_BLOCK:
  1040. case DATA_FORMAT_ASTC_12x10_SRGB_BLOCK:
  1041. case DATA_FORMAT_ASTC_12x12_UNORM_BLOCK:
  1042. case DATA_FORMAT_ASTC_12x12_SRGB_BLOCK:
  1043. return 8; // Wrong.
  1044. default: {
  1045. }
  1046. }
  1047. return 1;
  1048. }
  1049. uint32_t RenderingDeviceD3D12::get_compressed_image_format_pixel_rshift(DataFormat p_format) {
  1050. switch (p_format) {
  1051. case DATA_FORMAT_BC1_RGB_UNORM_BLOCK: // These formats are half byte size, so rshift is 1.
  1052. case DATA_FORMAT_BC1_RGB_SRGB_BLOCK:
  1053. case DATA_FORMAT_BC1_RGBA_UNORM_BLOCK:
  1054. case DATA_FORMAT_BC1_RGBA_SRGB_BLOCK:
  1055. case DATA_FORMAT_BC4_UNORM_BLOCK:
  1056. case DATA_FORMAT_BC4_SNORM_BLOCK:
  1057. case DATA_FORMAT_ETC2_R8G8B8_UNORM_BLOCK:
  1058. case DATA_FORMAT_ETC2_R8G8B8_SRGB_BLOCK:
  1059. case DATA_FORMAT_ETC2_R8G8B8A1_UNORM_BLOCK:
  1060. case DATA_FORMAT_ETC2_R8G8B8A1_SRGB_BLOCK:
  1061. case DATA_FORMAT_EAC_R11_UNORM_BLOCK:
  1062. case DATA_FORMAT_EAC_R11_SNORM_BLOCK:
  1063. return 1;
  1064. default: {
  1065. }
  1066. }
  1067. return 0;
  1068. }
  1069. uint32_t RenderingDeviceD3D12::get_image_format_plane_count(DataFormat p_format) {
  1070. uint32_t planes = 1;
  1071. switch (p_format) {
  1072. case DATA_FORMAT_D16_UNORM_S8_UINT:
  1073. case DATA_FORMAT_D24_UNORM_S8_UINT:
  1074. case DATA_FORMAT_D32_SFLOAT_S8_UINT: {
  1075. planes = 2;
  1076. }
  1077. default: {
  1078. }
  1079. }
  1080. DEV_ASSERT(planes <= MAX_IMAGE_FORMAT_PLANES);
  1081. return planes;
  1082. }
  1083. uint32_t RenderingDeviceD3D12::get_image_format_required_size(DataFormat p_format, uint32_t p_width, uint32_t p_height, uint32_t p_depth, uint32_t p_mipmaps, uint32_t *r_blockw, uint32_t *r_blockh, uint32_t *r_depth) {
  1084. ERR_FAIL_COND_V(p_mipmaps == 0, 0);
  1085. uint32_t w = p_width;
  1086. uint32_t h = p_height;
  1087. uint32_t d = p_depth;
  1088. uint32_t size = 0;
  1089. uint32_t pixel_size = get_image_format_pixel_size(p_format);
  1090. uint32_t pixel_rshift = get_compressed_image_format_pixel_rshift(p_format);
  1091. uint32_t blockw, blockh;
  1092. get_compressed_image_format_block_dimensions(p_format, blockw, blockh);
  1093. for (uint32_t i = 0; i < p_mipmaps; i++) {
  1094. uint32_t bw = w % blockw != 0 ? w + (blockw - w % blockw) : w;
  1095. uint32_t bh = h % blockh != 0 ? h + (blockh - h % blockh) : h;
  1096. uint32_t s = bw * bh;
  1097. s *= pixel_size;
  1098. s >>= pixel_rshift;
  1099. size += s * d;
  1100. if (r_blockw) {
  1101. *r_blockw = bw;
  1102. }
  1103. if (r_blockh) {
  1104. *r_blockh = bh;
  1105. }
  1106. if (r_depth) {
  1107. *r_depth = d;
  1108. }
  1109. w = MAX(blockw, w >> 1);
  1110. h = MAX(blockh, h >> 1);
  1111. d = MAX(1u, d >> 1);
  1112. }
  1113. return size;
  1114. }
  1115. uint32_t RenderingDeviceD3D12::get_image_required_mipmaps(uint32_t p_width, uint32_t p_height, uint32_t p_depth) {
  1116. // Formats and block size don't really matter here since they can all go down to 1px (even if block is larger).
  1117. uint32_t w = p_width;
  1118. uint32_t h = p_height;
  1119. uint32_t d = p_depth;
  1120. uint32_t mipmaps = 1;
  1121. while (true) {
  1122. if (w == 1 && h == 1 && d == 1) {
  1123. break;
  1124. }
  1125. w = MAX(1u, w >> 1);
  1126. h = MAX(1u, h >> 1);
  1127. d = MAX(1u, d >> 1);
  1128. mipmaps++;
  1129. }
  1130. return mipmaps;
  1131. }
  1132. ///////////////////////
  1133. const D3D12_COMPARISON_FUNC RenderingDeviceD3D12::compare_operators[RenderingDevice::COMPARE_OP_MAX] = {
  1134. D3D12_COMPARISON_FUNC_NEVER,
  1135. D3D12_COMPARISON_FUNC_LESS,
  1136. D3D12_COMPARISON_FUNC_EQUAL,
  1137. D3D12_COMPARISON_FUNC_LESS_EQUAL,
  1138. D3D12_COMPARISON_FUNC_GREATER,
  1139. D3D12_COMPARISON_FUNC_NOT_EQUAL,
  1140. D3D12_COMPARISON_FUNC_GREATER_EQUAL,
  1141. D3D12_COMPARISON_FUNC_ALWAYS,
  1142. };
  1143. const D3D12_STENCIL_OP RenderingDeviceD3D12::stencil_operations[RenderingDevice::STENCIL_OP_MAX] = {
  1144. D3D12_STENCIL_OP_KEEP,
  1145. D3D12_STENCIL_OP_ZERO,
  1146. D3D12_STENCIL_OP_REPLACE,
  1147. D3D12_STENCIL_OP_INCR_SAT,
  1148. D3D12_STENCIL_OP_DECR_SAT,
  1149. D3D12_STENCIL_OP_INVERT,
  1150. D3D12_STENCIL_OP_INCR,
  1151. D3D12_STENCIL_OP_DECR,
  1152. };
  1153. const UINT RenderingDeviceD3D12::rasterization_sample_count[RenderingDevice::TEXTURE_SAMPLES_MAX] = {
  1154. 1,
  1155. 2,
  1156. 4,
  1157. 8,
  1158. 16,
  1159. 32,
  1160. 64,
  1161. };
  1162. const D3D12_LOGIC_OP RenderingDeviceD3D12::logic_operations[RenderingDevice::LOGIC_OP_MAX] = {
  1163. D3D12_LOGIC_OP_CLEAR,
  1164. D3D12_LOGIC_OP_AND,
  1165. D3D12_LOGIC_OP_AND_REVERSE,
  1166. D3D12_LOGIC_OP_COPY,
  1167. D3D12_LOGIC_OP_AND_INVERTED,
  1168. D3D12_LOGIC_OP_NOOP,
  1169. D3D12_LOGIC_OP_XOR,
  1170. D3D12_LOGIC_OP_OR,
  1171. D3D12_LOGIC_OP_NOR,
  1172. D3D12_LOGIC_OP_EQUIV,
  1173. D3D12_LOGIC_OP_INVERT,
  1174. D3D12_LOGIC_OP_OR_REVERSE,
  1175. D3D12_LOGIC_OP_COPY_INVERTED,
  1176. D3D12_LOGIC_OP_OR_INVERTED,
  1177. D3D12_LOGIC_OP_NAND,
  1178. D3D12_LOGIC_OP_SET,
  1179. };
  1180. const D3D12_BLEND RenderingDeviceD3D12::blend_factors[RenderingDevice::BLEND_FACTOR_MAX] = {
  1181. D3D12_BLEND_ZERO,
  1182. D3D12_BLEND_ONE,
  1183. D3D12_BLEND_SRC_COLOR,
  1184. D3D12_BLEND_INV_SRC_COLOR,
  1185. D3D12_BLEND_DEST_COLOR,
  1186. D3D12_BLEND_INV_DEST_COLOR,
  1187. D3D12_BLEND_SRC_ALPHA,
  1188. D3D12_BLEND_INV_SRC_ALPHA,
  1189. D3D12_BLEND_DEST_ALPHA,
  1190. D3D12_BLEND_INV_DEST_ALPHA,
  1191. D3D12_BLEND_BLEND_FACTOR,
  1192. D3D12_BLEND_INV_BLEND_FACTOR,
  1193. D3D12_BLEND_BLEND_FACTOR,
  1194. D3D12_BLEND_INV_BLEND_FACTOR,
  1195. D3D12_BLEND_SRC_ALPHA_SAT,
  1196. D3D12_BLEND_SRC1_COLOR,
  1197. D3D12_BLEND_INV_SRC1_COLOR,
  1198. D3D12_BLEND_SRC1_ALPHA,
  1199. D3D12_BLEND_INV_SRC1_ALPHA,
  1200. };
  1201. const D3D12_BLEND_OP RenderingDeviceD3D12::blend_operations[RenderingDevice::BLEND_OP_MAX] = {
  1202. D3D12_BLEND_OP_ADD,
  1203. D3D12_BLEND_OP_SUBTRACT,
  1204. D3D12_BLEND_OP_REV_SUBTRACT,
  1205. D3D12_BLEND_OP_MIN,
  1206. D3D12_BLEND_OP_MAX,
  1207. };
  1208. const D3D12_TEXTURE_ADDRESS_MODE RenderingDeviceD3D12::address_modes[RenderingDevice::SAMPLER_REPEAT_MODE_MAX] = {
  1209. D3D12_TEXTURE_ADDRESS_MODE_WRAP,
  1210. D3D12_TEXTURE_ADDRESS_MODE_MIRROR,
  1211. D3D12_TEXTURE_ADDRESS_MODE_CLAMP,
  1212. D3D12_TEXTURE_ADDRESS_MODE_BORDER,
  1213. D3D12_TEXTURE_ADDRESS_MODE_MIRROR_ONCE,
  1214. };
  1215. const FLOAT RenderingDeviceD3D12::sampler_border_colors[RenderingDevice::SAMPLER_BORDER_COLOR_MAX][4] = {
  1216. { 0, 0, 0, 0 },
  1217. { 0, 0, 0, 0 },
  1218. { 0, 0, 0, 1 },
  1219. { 0, 0, 0, 1 },
  1220. { 1, 1, 1, 1 },
  1221. { 1, 1, 1, 1 },
  1222. };
  1223. const D3D12_RESOURCE_DIMENSION RenderingDeviceD3D12::d3d12_texture_dimension[RenderingDevice::TEXTURE_TYPE_MAX] = {
  1224. D3D12_RESOURCE_DIMENSION_TEXTURE1D,
  1225. D3D12_RESOURCE_DIMENSION_TEXTURE2D,
  1226. D3D12_RESOURCE_DIMENSION_TEXTURE3D,
  1227. D3D12_RESOURCE_DIMENSION_TEXTURE2D,
  1228. D3D12_RESOURCE_DIMENSION_TEXTURE1D,
  1229. D3D12_RESOURCE_DIMENSION_TEXTURE2D,
  1230. D3D12_RESOURCE_DIMENSION_TEXTURE2D,
  1231. };
  1232. /******************/
  1233. /**** RESOURCE ****/
  1234. /******************/
  1235. static const D3D12_RESOURCE_STATES RESOURCE_READ_STATES =
  1236. D3D12_RESOURCE_STATE_VERTEX_AND_CONSTANT_BUFFER |
  1237. D3D12_RESOURCE_STATE_INDEX_BUFFER |
  1238. D3D12_RESOURCE_STATE_DEPTH_READ |
  1239. D3D12_RESOURCE_STATE_NON_PIXEL_SHADER_RESOURCE |
  1240. D3D12_RESOURCE_STATE_PIXEL_SHADER_RESOURCE |
  1241. D3D12_RESOURCE_STATE_INDIRECT_ARGUMENT |
  1242. D3D12_RESOURCE_STATE_COPY_SOURCE |
  1243. D3D12_RESOURCE_STATE_RESOLVE_SOURCE |
  1244. D3D12_RESOURCE_STATE_SHADING_RATE_SOURCE;
  1245. static const D3D12_RESOURCE_STATES RESOURCE_WRITE_STATES =
  1246. D3D12_RESOURCE_STATE_RENDER_TARGET |
  1247. D3D12_RESOURCE_STATE_DEPTH_WRITE |
  1248. D3D12_RESOURCE_STATE_COPY_DEST |
  1249. D3D12_RESOURCE_STATE_RESOLVE_DEST;
  1250. static const D3D12_RESOURCE_STATES RESOURCE_RW_STATES =
  1251. D3D12_RESOURCE_STATE_UNORDERED_ACCESS;
  1252. void RenderingDeviceD3D12::ResourceState::extend(D3D12_RESOURCE_STATES p_states_to_add) {
  1253. states |= p_states_to_add;
  1254. #ifdef DEV_ENABLED
  1255. if ((states & RESOURCE_RW_STATES)) {
  1256. if ((states & RESOURCE_READ_STATES)) {
  1257. // Thanks to [[SRV_UAV_AMBIGUITY]], this is not necessarily an error.
  1258. }
  1259. if ((states & RESOURCE_WRITE_STATES)) {
  1260. ERR_PRINT("Error in new state mask: has R/W state plus some W/O state(s).");
  1261. }
  1262. } else {
  1263. if ((states & RESOURCE_WRITE_STATES)) {
  1264. if ((states & RESOURCE_READ_STATES)) {
  1265. ERR_PRINT("Error in new state mask: mixes R/O and W/O states.");
  1266. } else {
  1267. uint32_t num_w_states = 0;
  1268. for (uint32_t i = 0; i < sizeof(D3D12_RESOURCE_STATES) * 8; i++) {
  1269. num_w_states += ((states & RESOURCE_WRITE_STATES) & (1 << i)) ? 1 : 0;
  1270. }
  1271. ERR_PRINT("Error in new state mask: has multiple W/O states.");
  1272. }
  1273. }
  1274. }
  1275. #endif
  1276. }
  1277. void RenderingDeviceD3D12::_resource_transition_batch(Resource *p_resource, uint32_t p_subresource, uint32_t p_num_planes, D3D12_RESOURCE_STATES p_new_state, ID3D12Resource *p_resource_override) {
  1278. DEV_ASSERT(p_subresource != UINT32_MAX); // We don't support an "all-resources" command here.
  1279. DEV_ASSERT(p_new_state != D3D12_RESOURCE_STATE_COMMON); // No need to support this for now.
  1280. #ifdef DEBUG_COUNT_BARRIERS
  1281. uint64_t start = OS::get_singleton()->get_ticks_usec();
  1282. #endif
  1283. Resource::States *res_states = p_resource->get_states_ptr();
  1284. D3D12_RESOURCE_STATES *curr_state = &res_states->subresource_states[p_subresource];
  1285. ID3D12Resource *res_to_transition = p_resource_override ? p_resource_override : p_resource->resource;
  1286. bool redundant_transition = ((*curr_state) & p_new_state) == p_new_state;
  1287. if (redundant_transition) {
  1288. bool just_written = *curr_state == D3D12_RESOURCE_STATE_UNORDERED_ACCESS;
  1289. bool needs_uav_barrier = just_written && res_states->last_batch_with_uav_barrier != res_barriers_batch;
  1290. if (needs_uav_barrier) {
  1291. if (res_barriers.size() < res_barriers_count + 1) {
  1292. res_barriers.resize(res_barriers_count + 1);
  1293. }
  1294. res_barriers[res_barriers_count] = CD3DX12_RESOURCE_BARRIER::UAV(res_to_transition);
  1295. res_barriers_count++;
  1296. res_states->last_batch_with_uav_barrier = res_barriers_batch;
  1297. }
  1298. } else {
  1299. uint64_t subres_mask_piece = ((uint64_t)1 << (p_subresource & 0b111111));
  1300. uint8_t subres_qword = p_subresource >> 6;
  1301. if (res_barriers_requests.has(res_states)) {
  1302. BarrierRequest &br = res_barriers_requests.get(res_states);
  1303. DEV_ASSERT(br.dx_resource == res_to_transition);
  1304. DEV_ASSERT(br.subres_mask_qwords == ALIGN(res_states->subresource_states.size(), 64) / 64);
  1305. DEV_ASSERT(br.planes == p_num_planes);
  1306. // First, find if the subresource already has a barrier scheduled.
  1307. uint8_t curr_group_idx = 0;
  1308. bool same_transition_scheduled = false;
  1309. for (curr_group_idx = 0; curr_group_idx < br.groups_count; curr_group_idx++) {
  1310. if (unlikely(br.groups[curr_group_idx].state.get_state_mask() == BarrierRequest::DELETED_GROUP)) {
  1311. continue;
  1312. }
  1313. if ((br.groups[curr_group_idx].subres_mask[subres_qword] & subres_mask_piece)) {
  1314. uint32_t state_mask = br.groups[curr_group_idx].state.get_state_mask();
  1315. same_transition_scheduled = (state_mask & (uint32_t)p_new_state) == (uint32_t)p_new_state;
  1316. break;
  1317. }
  1318. }
  1319. if (!same_transition_scheduled) {
  1320. bool subres_already_there = curr_group_idx != br.groups_count;
  1321. ResourceState final_state;
  1322. if (subres_already_there) {
  1323. final_state = br.groups[curr_group_idx].state;
  1324. final_state.extend(p_new_state);
  1325. bool subres_alone = true;
  1326. for (uint8_t i = 0; i < br.subres_mask_qwords; i++) {
  1327. if (i == subres_qword) {
  1328. if (br.groups[curr_group_idx].subres_mask[i] != subres_mask_piece) {
  1329. subres_alone = false;
  1330. break;
  1331. }
  1332. } else {
  1333. if (br.groups[curr_group_idx].subres_mask[i] != 0) {
  1334. subres_alone = false;
  1335. break;
  1336. }
  1337. }
  1338. }
  1339. bool relocated = false;
  1340. if (subres_alone) {
  1341. // Subresource is there by itself.
  1342. for (uint8_t i = 0; i < br.groups_count; i++) {
  1343. if (unlikely(i == curr_group_idx)) {
  1344. continue;
  1345. }
  1346. if (unlikely(br.groups[i].state.get_state_mask() == BarrierRequest::DELETED_GROUP)) {
  1347. continue;
  1348. }
  1349. // There's another group with the final state; relocate to it.
  1350. if (br.groups[i].state.get_state_mask() == final_state.get_state_mask()) {
  1351. br.groups[curr_group_idx].subres_mask[subres_qword] &= ~subres_mask_piece;
  1352. relocated = true;
  1353. break;
  1354. }
  1355. }
  1356. if (relocated) {
  1357. // Let's delete the group where it used to be by itself.
  1358. if (curr_group_idx == br.groups_count - 1) {
  1359. br.groups_count--;
  1360. } else {
  1361. br.groups[curr_group_idx].state = ResourceState(BarrierRequest::DELETED_GROUP);
  1362. }
  1363. } else {
  1364. // Its current group, where it's alone, can extend its state.
  1365. br.groups[curr_group_idx].state = final_state;
  1366. }
  1367. } else {
  1368. // Already there, but not by itself and the state mask is different, so it now belongs to a different group.
  1369. br.groups[curr_group_idx].subres_mask[subres_qword] &= ~subres_mask_piece;
  1370. subres_already_there = false;
  1371. }
  1372. } else {
  1373. final_state = p_new_state;
  1374. }
  1375. if (!subres_already_there) {
  1376. // See if it fits exactly the state of some of the groups to fit it there.
  1377. for (uint8_t i = 0; i < br.groups_count; i++) {
  1378. if (unlikely(i == curr_group_idx)) {
  1379. continue;
  1380. }
  1381. if (unlikely(br.groups[i].state.get_state_mask() == BarrierRequest::DELETED_GROUP)) {
  1382. continue;
  1383. }
  1384. if (br.groups[i].state.get_state_mask() == final_state.get_state_mask()) {
  1385. br.groups[i].subres_mask[subres_qword] |= subres_mask_piece;
  1386. subres_already_there = true;
  1387. break;
  1388. }
  1389. }
  1390. if (!subres_already_there) {
  1391. // Add a new group to accommodate this subresource.
  1392. uint8_t group_to_fill = 0;
  1393. if (br.groups_count < BarrierRequest::MAX_GROUPS) {
  1394. // There are still free groups.
  1395. group_to_fill = br.groups_count;
  1396. br.groups_count++;
  1397. } else {
  1398. // Let's try to take over a deleted one.
  1399. for (; group_to_fill < br.groups_count; group_to_fill++) {
  1400. if (unlikely(br.groups[group_to_fill].state.get_state_mask() == BarrierRequest::DELETED_GROUP)) {
  1401. break;
  1402. }
  1403. }
  1404. CRASH_COND(group_to_fill == br.groups_count);
  1405. }
  1406. br.groups[group_to_fill].state = final_state;
  1407. for (uint8_t i = 0; i < br.subres_mask_qwords; i++) {
  1408. if (unlikely(i == subres_qword)) {
  1409. br.groups[group_to_fill].subres_mask[i] = subres_mask_piece;
  1410. } else {
  1411. br.groups[group_to_fill].subres_mask[i] = 0;
  1412. }
  1413. }
  1414. }
  1415. }
  1416. }
  1417. } else {
  1418. BarrierRequest &br = res_barriers_requests[res_states];
  1419. br.dx_resource = res_to_transition;
  1420. br.subres_mask_qwords = ALIGN(p_resource->get_states_ptr()->subresource_states.size(), 64) / 64;
  1421. CRASH_COND(p_resource->get_states_ptr()->subresource_states.size() > BarrierRequest::MAX_SUBRESOURCES);
  1422. br.planes = p_num_planes;
  1423. br.groups[0].state = p_new_state;
  1424. for (uint8_t i = 0; i < br.subres_mask_qwords; i++) {
  1425. if (unlikely(i == subres_qword)) {
  1426. br.groups[0].subres_mask[i] = subres_mask_piece;
  1427. } else {
  1428. br.groups[0].subres_mask[i] = 0;
  1429. }
  1430. }
  1431. br.groups_count = 1;
  1432. }
  1433. }
  1434. if (p_new_state == D3D12_RESOURCE_STATE_UNORDERED_ACCESS) {
  1435. res_states->last_batch_transitioned_to_uav = res_barriers_batch;
  1436. }
  1437. #ifdef DEBUG_COUNT_BARRIERS
  1438. frame_barriers_cpu_time += OS::get_singleton()->get_ticks_usec() - start;
  1439. #endif
  1440. }
  1441. void RenderingDeviceD3D12::_resource_transitions_flush(ID3D12GraphicsCommandList *p_command_list) {
  1442. #ifdef DEBUG_COUNT_BARRIERS
  1443. uint64_t start = OS::get_singleton()->get_ticks_usec();
  1444. #endif
  1445. for (const KeyValue<Resource::States *, BarrierRequest> &E : res_barriers_requests) {
  1446. Resource::States *res_states = E.key;
  1447. const BarrierRequest &br = E.value;
  1448. uint32_t num_subresources = res_states->subresource_states.size();
  1449. // When there's not a lot of subresources, the empirical finding is that it's better
  1450. // to avoid attempting the single-barrier optimization.
  1451. static const uint32_t SINGLE_BARRIER_ATTEMPT_MAX_NUM_SUBRESOURCES = 48;
  1452. bool may_do_single_barrier = br.groups_count == 1 && num_subresources * br.planes >= SINGLE_BARRIER_ATTEMPT_MAX_NUM_SUBRESOURCES;
  1453. if (may_do_single_barrier) {
  1454. // A single group means we may be able to do a single all-subresources barrier.
  1455. {
  1456. // First requisite is that all subresources are involved.
  1457. uint8_t subres_mask_full_qwords = num_subresources / 64;
  1458. for (uint32_t i = 0; i < subres_mask_full_qwords; i++) {
  1459. if (br.groups[0].subres_mask[i] != UINT64_MAX) {
  1460. may_do_single_barrier = false;
  1461. break;
  1462. }
  1463. }
  1464. if (may_do_single_barrier) {
  1465. if (num_subresources % 64) {
  1466. DEV_ASSERT(br.subres_mask_qwords == subres_mask_full_qwords + 1);
  1467. uint64_t mask_tail_qword = 0;
  1468. for (uint8_t i = 0; i < num_subresources % 64; i++) {
  1469. mask_tail_qword |= ((uint64_t)1 << i);
  1470. }
  1471. if ((br.groups[0].subres_mask[subres_mask_full_qwords] & mask_tail_qword) != mask_tail_qword) {
  1472. may_do_single_barrier = false;
  1473. }
  1474. }
  1475. }
  1476. }
  1477. if (may_do_single_barrier) {
  1478. // Second requisite is that the source state is the same for all.
  1479. for (uint32_t i = 1; i < num_subresources; i++) {
  1480. if (res_states->subresource_states[i] != res_states->subresource_states[0]) {
  1481. may_do_single_barrier = false;
  1482. break;
  1483. }
  1484. }
  1485. if (may_do_single_barrier) {
  1486. // Hurray!, we can do a single barrier (plus maybe a UAV one, too).
  1487. bool just_written = res_states->subresource_states[0] == D3D12_RESOURCE_STATE_UNORDERED_ACCESS;
  1488. bool needs_uav_barrier = just_written && res_states->last_batch_with_uav_barrier != res_barriers_batch;
  1489. uint32_t needed_barriers = (needs_uav_barrier ? 1 : 0) + 1;
  1490. if (res_barriers.size() < res_barriers_count + needed_barriers) {
  1491. res_barriers.resize(res_barriers_count + needed_barriers);
  1492. }
  1493. if (needs_uav_barrier) {
  1494. res_barriers[res_barriers_count] = CD3DX12_RESOURCE_BARRIER::UAV(br.dx_resource);
  1495. res_barriers_count++;
  1496. res_states->last_batch_with_uav_barrier = res_barriers_batch;
  1497. }
  1498. if (res_states->subresource_states[0] != br.groups[0].state.get_state_mask()) {
  1499. res_barriers[res_barriers_count] = CD3DX12_RESOURCE_BARRIER::Transition(br.dx_resource, res_states->subresource_states[0], br.groups[0].state.get_state_mask(), D3D12_RESOURCE_BARRIER_ALL_SUBRESOURCES);
  1500. res_barriers_count++;
  1501. }
  1502. for (uint32_t i = 0; i < num_subresources; i++) {
  1503. res_states->subresource_states[i] = br.groups[0].state.get_state_mask();
  1504. }
  1505. }
  1506. }
  1507. }
  1508. if (!may_do_single_barrier) {
  1509. for (uint8_t i = 0; i < br.groups_count; i++) {
  1510. const BarrierRequest::Group &g = E.value.groups[i];
  1511. if (unlikely(g.state.get_state_mask() == BarrierRequest::DELETED_GROUP)) {
  1512. continue;
  1513. }
  1514. uint32_t subresource = 0;
  1515. do {
  1516. uint64_t subres_mask_piece = ((uint64_t)1 << (subresource % 64));
  1517. uint8_t subres_qword = subresource / 64;
  1518. if (likely(g.subres_mask[subres_qword] == 0)) {
  1519. subresource += 64;
  1520. continue;
  1521. }
  1522. if (likely(!(g.subres_mask[subres_qword] & subres_mask_piece))) {
  1523. subresource++;
  1524. continue;
  1525. }
  1526. D3D12_RESOURCE_STATES *curr_state = &res_states->subresource_states[subresource];
  1527. bool just_written = *curr_state == D3D12_RESOURCE_STATE_UNORDERED_ACCESS;
  1528. bool needs_uav_barrier = just_written && res_states->last_batch_with_uav_barrier != res_barriers_batch;
  1529. uint32_t needed_barriers = (needs_uav_barrier ? 1 : 0) + br.planes;
  1530. if (res_barriers.size() < res_barriers_count + needed_barriers) {
  1531. res_barriers.resize(res_barriers_count + needed_barriers);
  1532. }
  1533. if (needs_uav_barrier) {
  1534. res_barriers[res_barriers_count] = CD3DX12_RESOURCE_BARRIER::UAV(br.dx_resource);
  1535. res_barriers_count++;
  1536. res_states->last_batch_with_uav_barrier = res_barriers_batch;
  1537. }
  1538. if (*curr_state != g.state.get_state_mask()) {
  1539. for (uint8_t k = 0; k < br.planes; k++) {
  1540. res_barriers[res_barriers_count] = CD3DX12_RESOURCE_BARRIER::Transition(br.dx_resource, *curr_state, g.state.get_state_mask(), subresource + k * num_subresources);
  1541. res_barriers_count++;
  1542. }
  1543. }
  1544. *curr_state = g.state.get_state_mask();
  1545. subresource++;
  1546. } while (subresource < num_subresources);
  1547. }
  1548. }
  1549. }
  1550. if (res_barriers_count) {
  1551. p_command_list->ResourceBarrier(res_barriers_count, res_barriers.ptr());
  1552. res_barriers_requests.clear();
  1553. }
  1554. #ifdef DEBUG_COUNT_BARRIERS
  1555. frame_barriers_count += res_barriers_count;
  1556. frame_barriers_batches_count++;
  1557. frame_barriers_cpu_time += OS::get_singleton()->get_ticks_usec() - start;
  1558. #endif
  1559. res_barriers_count = 0;
  1560. res_barriers_batch++;
  1561. }
  1562. /***************************/
  1563. /**** BUFFER MANAGEMENT ****/
  1564. /***************************/
  1565. Error RenderingDeviceD3D12::_buffer_allocate(Buffer *p_buffer, uint32_t p_size, D3D12_RESOURCE_STATES p_usage, D3D12_HEAP_TYPE p_heap_type) {
  1566. ERR_FAIL_COND_V(p_heap_type != D3D12_HEAP_TYPE_DEFAULT && p_heap_type != D3D12_HEAP_TYPE_READBACK, ERR_INVALID_PARAMETER);
  1567. // D3D12 debug layers complain at CBV creation time if the size is not multiple of the value per the spec
  1568. // but also if you give a rounded size at that point because it will extend beyond the
  1569. // memory of the resource. Therefore, it seems the only way is to create it with a
  1570. // rounded size.
  1571. CD3DX12_RESOURCE_DESC resource_desc = CD3DX12_RESOURCE_DESC::Buffer(ALIGN(p_size, D3D12_CONSTANT_BUFFER_DATA_PLACEMENT_ALIGNMENT));
  1572. if ((p_usage & D3D12_RESOURCE_STATE_UNORDERED_ACCESS)) {
  1573. resource_desc.Flags |= D3D12_RESOURCE_FLAG_ALLOW_UNORDERED_ACCESS;
  1574. }
  1575. D3D12MA::ALLOCATION_DESC allocation_desc = {};
  1576. allocation_desc.HeapType = p_heap_type;
  1577. #ifdef USE_SMALL_ALLOCS_POOL
  1578. if (p_size <= SMALL_ALLOCATION_MAX_SIZE) {
  1579. allocation_desc.CustomPool = _find_or_create_small_allocs_pool(p_heap_type, D3D12_HEAP_FLAG_ALLOW_ONLY_BUFFERS);
  1580. }
  1581. #endif
  1582. HRESULT res = context->get_allocator()->CreateResource(
  1583. &allocation_desc,
  1584. &resource_desc,
  1585. D3D12_RESOURCE_STATE_COPY_DEST,
  1586. nullptr,
  1587. &p_buffer->allocation,
  1588. IID_PPV_ARGS(&p_buffer->resource));
  1589. ERR_FAIL_COND_V_MSG(res, ERR_CANT_CREATE, "Can't create buffer of size: " + itos(p_size) + ", error " + vformat("0x%08ux", res) + ".");
  1590. p_buffer->size = p_size;
  1591. p_buffer->usage = p_usage;
  1592. p_buffer->own_states.subresource_states.push_back(D3D12_RESOURCE_STATE_COPY_DEST);
  1593. buffer_memory += p_size;
  1594. return OK;
  1595. }
  1596. Error RenderingDeviceD3D12::_buffer_free(Buffer *p_buffer) {
  1597. ERR_FAIL_COND_V(p_buffer->size == 0, ERR_INVALID_PARAMETER);
  1598. buffer_memory -= p_buffer->size;
  1599. p_buffer->resource->Release();
  1600. p_buffer->resource = nullptr;
  1601. p_buffer->allocation->Release();
  1602. p_buffer->allocation = nullptr;
  1603. p_buffer->size = 0;
  1604. return OK;
  1605. }
  1606. Error RenderingDeviceD3D12::_insert_staging_block() {
  1607. StagingBufferBlock block;
  1608. D3D12_RESOURCE_DESC resource_desc = {};
  1609. resource_desc.Dimension = D3D12_RESOURCE_DIMENSION_BUFFER;
  1610. resource_desc.Alignment = 0;
  1611. resource_desc.Width = staging_buffer_block_size;
  1612. resource_desc.Height = 1;
  1613. resource_desc.DepthOrArraySize = 1;
  1614. resource_desc.MipLevels = 1;
  1615. resource_desc.Format = DXGI_FORMAT_UNKNOWN;
  1616. resource_desc.SampleDesc.Count = 1;
  1617. resource_desc.SampleDesc.Quality = 0;
  1618. resource_desc.Layout = D3D12_TEXTURE_LAYOUT_ROW_MAJOR;
  1619. resource_desc.Flags = D3D12_RESOURCE_FLAG_NONE;
  1620. D3D12MA::ALLOCATION_DESC allocation_desc = {};
  1621. allocation_desc.HeapType = D3D12_HEAP_TYPE_UPLOAD;
  1622. HRESULT res = context->get_allocator()->CreateResource(
  1623. &allocation_desc,
  1624. &resource_desc,
  1625. D3D12_RESOURCE_STATE_GENERIC_READ,
  1626. NULL,
  1627. &block.allocation,
  1628. IID_PPV_ARGS(&block.resource));
  1629. ERR_FAIL_COND_V_MSG(res, ERR_CANT_CREATE, "CreateResource failed with error " + vformat("0x%08ux", res) + ".");
  1630. staging_buffer_blocks.insert(staging_buffer_current, block);
  1631. return OK;
  1632. }
  1633. Error RenderingDeviceD3D12::_staging_buffer_allocate(uint32_t p_amount, uint32_t p_required_align, uint32_t &r_alloc_offset, uint32_t &r_alloc_size, bool p_can_segment) {
  1634. // Determine a block to use.
  1635. r_alloc_size = p_amount;
  1636. while (true) {
  1637. r_alloc_offset = 0;
  1638. // See if we can use current block.
  1639. if (staging_buffer_blocks[staging_buffer_current].frame_used == frames_drawn) {
  1640. // We used this block this frame, let's see if there is still room.
  1641. uint32_t write_from = staging_buffer_blocks[staging_buffer_current].fill_amount;
  1642. {
  1643. uint32_t align_remainder = write_from % p_required_align;
  1644. if (align_remainder != 0) {
  1645. write_from += p_required_align - align_remainder;
  1646. }
  1647. }
  1648. int32_t available_bytes = int32_t(staging_buffer_block_size) - int32_t(write_from);
  1649. if ((int32_t)p_amount < available_bytes) {
  1650. // All is good, we should be ok, all will fit.
  1651. r_alloc_offset = write_from;
  1652. } else if (p_can_segment && available_bytes >= (int32_t)p_required_align) {
  1653. // Ok all won't fit but at least we can fit a chunkie.
  1654. // All is good, update what needs to be written to.
  1655. r_alloc_offset = write_from;
  1656. r_alloc_size = available_bytes - (available_bytes % p_required_align);
  1657. } else {
  1658. // Can't fit it into this buffer.
  1659. // Will need to try next buffer.
  1660. staging_buffer_current = (staging_buffer_current + 1) % staging_buffer_blocks.size();
  1661. // Before doing anything, though, let's check that we didn't manage to fill all functions.
  1662. // Possible in a single frame.
  1663. if (staging_buffer_blocks[staging_buffer_current].frame_used == frames_drawn) {
  1664. // Guess we did.. ok, let's see if we can insert a new block.
  1665. if ((uint64_t)staging_buffer_blocks.size() * staging_buffer_block_size < staging_buffer_max_size) {
  1666. // We can, so we are safe.
  1667. Error err = _insert_staging_block();
  1668. if (err) {
  1669. return err;
  1670. }
  1671. // Claim for this frame.
  1672. staging_buffer_blocks.write[staging_buffer_current].frame_used = frames_drawn;
  1673. } else {
  1674. // Ok, worst case scenario, all the staging buffers belong to this frame
  1675. // and this frame is not even done
  1676. // If this is the main thread, it means the user is likely loading a lot of resources at once,.
  1677. // Otherwise, the thread should just be blocked until the next frame (currently unimplemented).
  1678. if (false) { // Separate thread from render.
  1679. //block_until_next_frame()
  1680. continue;
  1681. } else {
  1682. // Flush EVERYTHING including setup commands. IF not immediate, also need to flush the draw commands.
  1683. _flush(true);
  1684. // Clear the whole staging buffer.
  1685. for (int i = 0; i < staging_buffer_blocks.size(); i++) {
  1686. staging_buffer_blocks.write[i].frame_used = 0;
  1687. staging_buffer_blocks.write[i].fill_amount = 0;
  1688. }
  1689. // Claim current.
  1690. staging_buffer_blocks.write[staging_buffer_current].frame_used = frames_drawn;
  1691. }
  1692. }
  1693. } else {
  1694. // Not from current frame, so continue and try again.
  1695. continue;
  1696. }
  1697. }
  1698. } else if (staging_buffer_blocks[staging_buffer_current].frame_used <= frames_drawn - frame_count) {
  1699. // This is an old block, which was already processed, let's reuse.
  1700. staging_buffer_blocks.write[staging_buffer_current].frame_used = frames_drawn;
  1701. staging_buffer_blocks.write[staging_buffer_current].fill_amount = 0;
  1702. } else {
  1703. // This block may still be in use, let's not touch it unless we have to, so.. can we create a new one?
  1704. if ((uint64_t)staging_buffer_blocks.size() * staging_buffer_block_size < staging_buffer_max_size) {
  1705. // We are still allowed to create a new block, so let's do that and insert it for current pos.
  1706. Error err = _insert_staging_block();
  1707. if (err) {
  1708. return err;
  1709. }
  1710. // Claim for this frame.
  1711. staging_buffer_blocks.write[staging_buffer_current].frame_used = frames_drawn;
  1712. } else {
  1713. // Oops, we are out of room and we can't create more.
  1714. // Let's flush older frames.
  1715. // The logic here is that if a game is loading a lot of data from the main thread, it will need to be stalled anyway.
  1716. // If loading from a separate thread, we can block that thread until next frame when more room is made (not currently implemented, though).
  1717. if (false) {
  1718. // Separate thread from render.
  1719. //block_until_next_frame()
  1720. continue; // And try again.
  1721. } else {
  1722. _flush(false);
  1723. for (int i = 0; i < staging_buffer_blocks.size(); i++) {
  1724. // Clear all functions but the ones from this frame.
  1725. int block_idx = (i + staging_buffer_current) % staging_buffer_blocks.size();
  1726. if (staging_buffer_blocks[block_idx].frame_used == frames_drawn) {
  1727. break; // Ok, we reached something from this frame, abort.
  1728. }
  1729. staging_buffer_blocks.write[block_idx].frame_used = 0;
  1730. staging_buffer_blocks.write[block_idx].fill_amount = 0;
  1731. }
  1732. // Claim for current frame.
  1733. staging_buffer_blocks.write[staging_buffer_current].frame_used = frames_drawn;
  1734. }
  1735. }
  1736. }
  1737. // All was good, break.
  1738. break;
  1739. }
  1740. staging_buffer_used = true;
  1741. return OK;
  1742. }
  1743. Error RenderingDeviceD3D12::_buffer_update(Buffer *p_buffer, size_t p_offset, const uint8_t *p_data, size_t p_data_size, bool p_use_draw_command_list, uint32_t p_required_align) {
  1744. // Submitting may get chunked for various reasons, so convert this to a task.
  1745. size_t to_submit = p_data_size;
  1746. size_t submit_from = 0;
  1747. while (to_submit > 0) {
  1748. uint32_t block_write_offset;
  1749. uint32_t block_write_amount;
  1750. Error err = _staging_buffer_allocate(MIN(to_submit, staging_buffer_block_size), p_required_align, block_write_offset, block_write_amount);
  1751. if (err) {
  1752. return err;
  1753. }
  1754. // Map staging buffer.
  1755. void *data_ptr = nullptr;
  1756. {
  1757. HRESULT res = staging_buffer_blocks[staging_buffer_current].resource->Map(0, &VOID_RANGE, &data_ptr);
  1758. ERR_FAIL_COND_V_MSG(res, ERR_CANT_CREATE, "Map failed with error " + vformat("0x%08ux", res) + ".");
  1759. }
  1760. // Copy to staging buffer.
  1761. memcpy(((uint8_t *)data_ptr) + block_write_offset, p_data + submit_from, block_write_amount);
  1762. // Unmap.
  1763. staging_buffer_blocks[staging_buffer_current].resource->Unmap(0, &VOID_RANGE);
  1764. // Insert a command to copy this.
  1765. ID3D12GraphicsCommandList *command_list = (p_use_draw_command_list ? frames[frame].draw_command_list : frames[frame].setup_command_list).Get();
  1766. command_list->CopyBufferRegion(p_buffer->resource, submit_from + p_offset, staging_buffer_blocks[staging_buffer_current].resource, block_write_offset, block_write_amount);
  1767. staging_buffer_blocks.write[staging_buffer_current].fill_amount = block_write_offset + block_write_amount;
  1768. to_submit -= block_write_amount;
  1769. submit_from += block_write_amount;
  1770. }
  1771. return OK;
  1772. }
  1773. /*****************/
  1774. /**** TEXTURE ****/
  1775. /*****************/
  1776. RID RenderingDeviceD3D12::texture_create(const TextureFormat &p_format, const TextureView &p_view, const Vector<Vector<uint8_t>> &p_data) {
  1777. _THREAD_SAFE_METHOD_
  1778. D3D12_RESOURCE_DESC1 resource_desc = {}; // Using D3D12_RESOURCE_DESC1. Thanks to the layout, it's sliceable down to D3D12_RESOURCE_DESC if needed.
  1779. resource_desc.Alignment = 0; // D3D12MA will override this to use a smaller alignment than the default if possible.
  1780. Vector<DataFormat> allowed_formats;
  1781. if (p_format.shareable_formats.size()) {
  1782. ERR_FAIL_COND_V_MSG(p_format.shareable_formats.find(p_format.format) == -1, RID(),
  1783. "If supplied a list of shareable formats, the current format must be present in the list");
  1784. ERR_FAIL_COND_V_MSG(p_view.format_override != DATA_FORMAT_MAX && p_format.shareable_formats.find(p_view.format_override) == -1, RID(),
  1785. "If supplied a list of shareable formats, the current view format override must be present in the list");
  1786. allowed_formats = p_format.shareable_formats;
  1787. } else {
  1788. allowed_formats.push_back(p_format.format);
  1789. if (p_view.format_override != DATA_FORMAT_MAX) {
  1790. allowed_formats.push_back(p_view.format_override);
  1791. }
  1792. }
  1793. ERR_FAIL_INDEX_V(p_format.texture_type, TEXTURE_TYPE_MAX, RID());
  1794. resource_desc.Dimension = d3d12_texture_dimension[p_format.texture_type];
  1795. ERR_FAIL_COND_V_MSG(p_format.width < 1, RID(), "Width must be equal or greater than 1 for all textures");
  1796. resource_desc.Format = d3d12_formats[p_format.format].family;
  1797. resource_desc.Width = p_format.width;
  1798. if (resource_desc.Dimension == D3D12_RESOURCE_DIMENSION_TEXTURE3D || resource_desc.Dimension == D3D12_RESOURCE_DIMENSION_TEXTURE2D) {
  1799. ERR_FAIL_COND_V_MSG(p_format.height < 1, RID(), "Height must be equal or greater than 1 for 2D and 3D textures");
  1800. resource_desc.Height = p_format.height;
  1801. } else {
  1802. resource_desc.Height = 1;
  1803. }
  1804. if (resource_desc.Dimension == D3D12_RESOURCE_DIMENSION_TEXTURE3D) {
  1805. ERR_FAIL_COND_V_MSG(p_format.depth < 1, RID(), "Depth must be equal or greater than 1 for 3D textures");
  1806. resource_desc.DepthOrArraySize = p_format.depth;
  1807. } else {
  1808. resource_desc.DepthOrArraySize = 1;
  1809. }
  1810. ERR_FAIL_COND_V(p_format.mipmaps < 1, RID());
  1811. resource_desc.MipLevels = p_format.mipmaps;
  1812. if (p_format.texture_type == TEXTURE_TYPE_1D_ARRAY || p_format.texture_type == TEXTURE_TYPE_2D_ARRAY || p_format.texture_type == TEXTURE_TYPE_CUBE_ARRAY || p_format.texture_type == TEXTURE_TYPE_CUBE) {
  1813. ERR_FAIL_COND_V_MSG(p_format.array_layers < 1, RID(),
  1814. "Amount of layers must be equal or greater than 1 for arrays and cubemaps.");
  1815. ERR_FAIL_COND_V_MSG((p_format.texture_type == TEXTURE_TYPE_CUBE_ARRAY || p_format.texture_type == TEXTURE_TYPE_CUBE) && (p_format.array_layers % 6) != 0, RID(),
  1816. "Cubemap and cubemap array textures must provide a layer number that is multiple of 6");
  1817. resource_desc.DepthOrArraySize *= p_format.array_layers;
  1818. }
  1819. ERR_FAIL_INDEX_V(p_format.samples, TEXTURE_SAMPLES_MAX, RID());
  1820. // Usage.
  1821. if ((p_format.usage_bits & TEXTURE_USAGE_COLOR_ATTACHMENT_BIT)) {
  1822. resource_desc.Flags |= D3D12_RESOURCE_FLAG_ALLOW_RENDER_TARGET;
  1823. } else {
  1824. if ((p_format.usage_bits & TEXTURE_USAGE_CAN_COPY_TO_BIT)) {
  1825. resource_desc.Flags |= D3D12_RESOURCE_FLAG_ALLOW_UNORDERED_ACCESS; // For clearing via UAV.
  1826. }
  1827. }
  1828. if (p_format.usage_bits & TEXTURE_USAGE_DEPTH_STENCIL_ATTACHMENT_BIT) {
  1829. resource_desc.Flags |= D3D12_RESOURCE_FLAG_ALLOW_DEPTH_STENCIL;
  1830. }
  1831. if (p_format.usage_bits & TEXTURE_USAGE_STORAGE_BIT) {
  1832. resource_desc.Flags |= D3D12_RESOURCE_FLAG_ALLOW_UNORDERED_ACCESS;
  1833. }
  1834. resource_desc.SampleDesc = {};
  1835. DXGI_FORMAT format_to_test = (resource_desc.Flags & D3D12_RESOURCE_FLAG_ALLOW_DEPTH_STENCIL) ? d3d12_formats[p_format.format].dsv_format : d3d12_formats[p_format.format].general_format;
  1836. if (!(resource_desc.Flags & D3D12_RESOURCE_FLAG_ALLOW_UNORDERED_ACCESS)) {
  1837. resource_desc.SampleDesc.Count = MIN(
  1838. _find_max_common_supported_sample_count(&format_to_test, 1),
  1839. rasterization_sample_count[p_format.samples]);
  1840. } else {
  1841. // No MSAA in D3D12 if storage. May have become possible recently where supported, though.
  1842. resource_desc.SampleDesc.Count = 1;
  1843. }
  1844. resource_desc.SampleDesc.Quality = resource_desc.SampleDesc.Count == 1 ? 0 : DXGI_STANDARD_MULTISAMPLE_QUALITY_PATTERN;
  1845. uint32_t required_mipmaps = get_image_required_mipmaps(p_format.width, p_format.height, p_format.depth);
  1846. ERR_FAIL_COND_V_MSG(required_mipmaps < p_format.mipmaps, RID(),
  1847. "Too many mipmaps requested for texture format and dimensions (" + itos(p_format.mipmaps) + "), maximum allowed: (" + itos(required_mipmaps) + ").");
  1848. if (p_data.size()) {
  1849. ERR_FAIL_COND_V_MSG(!(p_format.usage_bits & TEXTURE_USAGE_CAN_UPDATE_BIT), RID(),
  1850. "Texture needs the TEXTURE_USAGE_CAN_UPDATE_BIT usage flag in order to be updated at initialization or later");
  1851. int expected_images = p_format.array_layers;
  1852. ERR_FAIL_COND_V_MSG(p_data.size() != expected_images, RID(),
  1853. "Default supplied data for image format is of invalid length (" + itos(p_data.size()) + "), should be (" + itos(expected_images) + ").");
  1854. for (uint32_t i = 0; i < p_format.array_layers; i++) {
  1855. uint32_t required_size = get_image_format_required_size(p_format.format, p_format.width, p_format.height, p_format.depth, p_format.mipmaps);
  1856. ERR_FAIL_COND_V_MSG((uint32_t)p_data[i].size() != required_size, RID(),
  1857. "Data for slice index " + itos(i) + " (mapped to layer " + itos(i) + ") differs in size (supplied: " + itos(p_data[i].size()) + ") than what is required by the format (" + itos(required_size) + ").");
  1858. }
  1859. }
  1860. // Validate that this image is supported for the intended use.
  1861. // If views of different families are wanted, special setup is needed for proper sharing among them.
  1862. // Two options here:
  1863. // 1. If ID3DDevice10 is present and driver reports relaxed casting is, leverage its new extended resource creation API (via D3D12MA).
  1864. // 2. Otherwise, fall back to an approach based on abusing aliasing, hoping for the best.
  1865. bool cross_family_sharing = false;
  1866. ComPtr<ID3D12Device10> device10;
  1867. device.As(&device10);
  1868. bool relaxed_casting_available = device10.Get() && context->get_format_capabilities().relaxed_casting_supported;
  1869. LocalVector<DXGI_FORMAT> castable_formats;
  1870. HashMap<DataFormat, D3D12_RESOURCE_FLAGS> aliases_forbidden_flags;
  1871. D3D12_RESOURCE_FLAGS accum_forbidden_flags = {};
  1872. for (DataFormat curr_format : allowed_formats) {
  1873. // For now, we'll validate usages only the main format, to match what Vulkan RD does.
  1874. // TODO: The aliasing trick assumes the main format is the only writable one. We should either validate for that or handle a different order gracefully.
  1875. bool checking_main_format = curr_format == p_format.format;
  1876. String format_text = "'" + String(named_formats[p_format.format]) + "'";
  1877. ERR_FAIL_COND_V_MSG(d3d12_formats[curr_format].family == DXGI_FORMAT_UNKNOWN, RID(), "Format " + format_text + " is not supported.");
  1878. if (d3d12_formats[curr_format].family != d3d12_formats[allowed_formats[0]].family) {
  1879. cross_family_sharing = true;
  1880. }
  1881. if (relaxed_casting_available) {
  1882. castable_formats.push_back(d3d12_formats[curr_format].general_format);
  1883. }
  1884. D3D12_FEATURE_DATA_FORMAT_SUPPORT srv_rtv_support = {};
  1885. srv_rtv_support.Format = d3d12_formats[curr_format].general_format;
  1886. HRESULT res = device->CheckFeatureSupport(D3D12_FEATURE_FORMAT_SUPPORT, &srv_rtv_support, sizeof(srv_rtv_support));
  1887. ERR_FAIL_COND_V_MSG(res, RID(), "CheckFeatureSupport failed with error " + vformat("0x%08ux", res) + ".");
  1888. D3D12_FEATURE_DATA_FORMAT_SUPPORT uav_support = srv_rtv_support; // Fine for now.
  1889. D3D12_FEATURE_DATA_FORMAT_SUPPORT dsv_support = {};
  1890. dsv_support.Format = d3d12_formats[curr_format].dsv_format;
  1891. res = device->CheckFeatureSupport(D3D12_FEATURE_FORMAT_SUPPORT, &dsv_support, sizeof(dsv_support));
  1892. ERR_FAIL_COND_V_MSG(res, RID(), "CheckFeatureSupport failed with error " + vformat("0x%08ux", res) + ".");
  1893. if (checking_main_format) {
  1894. if ((p_format.usage_bits & (TEXTURE_USAGE_SAMPLING_BIT | TEXTURE_USAGE_COLOR_ATTACHMENT_BIT))) {
  1895. if (p_format.mipmaps && !(srv_rtv_support.Support1 & D3D12_FORMAT_SUPPORT1_MIP)) {
  1896. ERR_FAIL_V_MSG(RID(), "Format " + format_text + " does not support mip.maps.");
  1897. }
  1898. }
  1899. // Per https://docs.microsoft.com/en-us/windows/win32/api/d3d12/ne-d3d12-d3d12_format_support1,
  1900. // as long as the resource can be used as a texture, Sample() will work with point filter at least.
  1901. // However, we've empirically found that checking for at least D3D12_FORMAT_SUPPORT1_SHADER_LOAD is needed.
  1902. // That's almost good for integer formats. The problem is that theoretically there may be
  1903. // float formats that support LOAD but not SAMPLE fully, so this check will not detect
  1904. // such a flaw in the format. Linearly interpolated sampling would just not work on them.
  1905. // [[IMPLICIT_SAMPLE]]
  1906. if ((p_format.usage_bits & TEXTURE_USAGE_SAMPLING_BIT) && !(srv_rtv_support.Support1 & (D3D12_FORMAT_SUPPORT1_SHADER_LOAD | D3D12_FORMAT_SUPPORT1_SHADER_SAMPLE))) {
  1907. ERR_FAIL_V_MSG(RID(), "Format " + format_text + " does not support usage as a sampled texture.");
  1908. }
  1909. if ((p_format.usage_bits & TEXTURE_USAGE_SAMPLING_BIT) && d3d12_formats[curr_format].general_format == DXGI_FORMAT_UNKNOWN) {
  1910. ERR_FAIL_V_MSG(RID(), "Format " + format_text + " does not support usage as a sampled texture.");
  1911. }
  1912. if ((p_format.usage_bits & TEXTURE_USAGE_COLOR_ATTACHMENT_BIT) && !(srv_rtv_support.Support1 & D3D12_FORMAT_SUPPORT1_RENDER_TARGET)) {
  1913. ERR_FAIL_V_MSG(RID(), "Format " + format_text + " does not support usage as color attachment.");
  1914. }
  1915. }
  1916. if ((p_format.usage_bits & TEXTURE_USAGE_CAN_COPY_TO_BIT)) {
  1917. // We need to check if the texture can be cleared; if it's not flagged for color attachment , we have to see if it's possible via a UAV.
  1918. if (!(p_format.usage_bits & TEXTURE_USAGE_COLOR_ATTACHMENT_BIT)) {
  1919. if (!(uav_support.Support1 & D3D12_FORMAT_SUPPORT1_TYPED_UNORDERED_ACCESS_VIEW)) {
  1920. if (checking_main_format) {
  1921. ERR_FAIL_V_MSG(RID(), "Format " + format_text + " does not support usage as a copy-to texture, because clearing it is not supported.");
  1922. } else {
  1923. aliases_forbidden_flags[curr_format] |= D3D12_RESOURCE_FLAG_ALLOW_UNORDERED_ACCESS;
  1924. accum_forbidden_flags |= D3D12_RESOURCE_FLAG_ALLOW_UNORDERED_ACCESS;
  1925. }
  1926. }
  1927. }
  1928. }
  1929. if ((p_format.usage_bits & TEXTURE_USAGE_DEPTH_STENCIL_ATTACHMENT_BIT) && !(dsv_support.Support1 & D3D12_FORMAT_SUPPORT1_DEPTH_STENCIL)) {
  1930. if (checking_main_format) {
  1931. printf("dxgiformat: %x\n", resource_desc.Format);
  1932. ERR_FAIL_V_MSG(RID(), "Format " + format_text + " does not support usage as depth-stencil attachment.");
  1933. } else {
  1934. aliases_forbidden_flags[curr_format] |= D3D12_RESOURCE_FLAG_ALLOW_DEPTH_STENCIL;
  1935. accum_forbidden_flags |= D3D12_RESOURCE_FLAG_ALLOW_DEPTH_STENCIL;
  1936. }
  1937. }
  1938. if ((p_format.usage_bits & TEXTURE_USAGE_STORAGE_BIT)) {
  1939. if (!(uav_support.Support1 & D3D12_FORMAT_SUPPORT1_TYPED_UNORDERED_ACCESS_VIEW)) { // Maybe check LOAD/STORE, too?
  1940. if (checking_main_format) {
  1941. ERR_FAIL_V_MSG(RID(), "Format " + format_text + " does not support usage as storage image.");
  1942. } else {
  1943. aliases_forbidden_flags[curr_format] |= D3D12_RESOURCE_FLAG_ALLOW_UNORDERED_ACCESS;
  1944. accum_forbidden_flags |= D3D12_RESOURCE_FLAG_ALLOW_UNORDERED_ACCESS;
  1945. }
  1946. }
  1947. }
  1948. if (checking_main_format) {
  1949. if ((p_format.usage_bits & TEXTURE_USAGE_STORAGE_ATOMIC_BIT) && !(uav_support.Support2 & D3D12_FORMAT_SUPPORT2_UAV_ATOMIC_ADD)) { // Check a basic atomic at least.
  1950. ERR_FAIL_V_MSG(RID(), "Format " + format_text + " does not support usage as atomic storage image.");
  1951. }
  1952. if ((p_format.usage_bits & TEXTURE_USAGE_VRS_ATTACHMENT_BIT) && d3d12_formats[curr_format].general_format != DXGI_FORMAT_R8_UINT) {
  1953. ERR_FAIL_V_MSG(RID(), "Format " + format_text + " does not support usage as VRS attachment.");
  1954. }
  1955. }
  1956. }
  1957. if (cross_family_sharing && !relaxed_casting_available) {
  1958. // At least guarantee the same layout among aliases.
  1959. resource_desc.Layout = D3D12_TEXTURE_LAYOUT_64KB_UNDEFINED_SWIZZLE;
  1960. // Per https://docs.microsoft.com/en-us/windows/win32/api/d3d12/ne-d3d12-d3d12_texture_layout.
  1961. if (p_format.texture_type == TEXTURE_TYPE_1D) {
  1962. ERR_FAIL_V_MSG(RID(), "This texture's views require aliasing, but that's not supported for a 1D texture.");
  1963. }
  1964. if (p_format.samples != TEXTURE_SAMPLES_1) {
  1965. ERR_FAIL_V_MSG(RID(), "This texture's views require aliasing, but that's not supported for a multi-sample texture.");
  1966. }
  1967. if ((p_format.usage_bits & TEXTURE_USAGE_DEPTH_STENCIL_ATTACHMENT_BIT)) {
  1968. ERR_FAIL_V_MSG(RID(), "This texture's views require aliasing, but that's not supported for a depth-stencil texture.");
  1969. }
  1970. if (d3d12_formats[p_format.format].family == DXGI_FORMAT_R32G32B32_TYPELESS) {
  1971. ERR_FAIL_V_MSG(RID(), "This texture's views require aliasing, but that's not supported for an R32G32B32 texture.");
  1972. }
  1973. } else {
  1974. resource_desc.Layout = D3D12_TEXTURE_LAYOUT_UNKNOWN;
  1975. }
  1976. if ((p_format.usage_bits & TEXTURE_USAGE_VRS_ATTACHMENT_BIT)) {
  1977. // For VRS images we can't use the typeless format.
  1978. resource_desc.Format = DXGI_FORMAT_R8_UINT;
  1979. }
  1980. // Some view validation.
  1981. if (p_view.format_override != DATA_FORMAT_MAX) {
  1982. ERR_FAIL_INDEX_V(p_view.format_override, DATA_FORMAT_MAX, RID());
  1983. }
  1984. ERR_FAIL_INDEX_V(p_view.swizzle_r, TEXTURE_SWIZZLE_MAX, RID());
  1985. ERR_FAIL_INDEX_V(p_view.swizzle_g, TEXTURE_SWIZZLE_MAX, RID());
  1986. ERR_FAIL_INDEX_V(p_view.swizzle_b, TEXTURE_SWIZZLE_MAX, RID());
  1987. ERR_FAIL_INDEX_V(p_view.swizzle_a, TEXTURE_SWIZZLE_MAX, RID());
  1988. // Allocate memory.
  1989. D3D12MA::ALLOCATION_DESC allocation_desc = {};
  1990. if (cross_family_sharing && !relaxed_casting_available) {
  1991. allocation_desc.Flags = D3D12MA::ALLOCATION_FLAG_CAN_ALIAS;
  1992. }
  1993. allocation_desc.HeapType = (p_format.usage_bits & TEXTURE_USAGE_CPU_READ_BIT) ? D3D12_HEAP_TYPE_READBACK : D3D12_HEAP_TYPE_DEFAULT;
  1994. if ((resource_desc.Flags & (D3D12_RESOURCE_FLAG_ALLOW_RENDER_TARGET | D3D12_RESOURCE_FLAG_ALLOW_DEPTH_STENCIL))) {
  1995. if (!(accum_forbidden_flags & (D3D12_RESOURCE_FLAG_ALLOW_RENDER_TARGET | D3D12_RESOURCE_FLAG_ALLOW_DEPTH_STENCIL))) {
  1996. allocation_desc.ExtraHeapFlags = D3D12_HEAP_FLAG_ALLOW_ONLY_RT_DS_TEXTURES;
  1997. }
  1998. } else {
  1999. allocation_desc.ExtraHeapFlags = D3D12_HEAP_FLAG_ALLOW_ONLY_NON_RT_DS_TEXTURES;
  2000. }
  2001. if ((resource_desc.Flags & D3D12_RESOURCE_FLAG_ALLOW_UNORDERED_ACCESS)) {
  2002. if (!(accum_forbidden_flags & D3D12_RESOURCE_FLAG_ALLOW_UNORDERED_ACCESS)) {
  2003. allocation_desc.ExtraHeapFlags |= D3D12_HEAP_FLAG_ALLOW_SHADER_ATOMICS;
  2004. }
  2005. }
  2006. #ifdef USE_SMALL_ALLOCS_POOL
  2007. uint32_t width, height;
  2008. uint32_t image_size = get_image_format_required_size(p_format.format, p_format.width, p_format.height, p_format.depth, p_format.mipmaps, &width, &height);
  2009. if (image_size <= SMALL_ALLOCATION_MAX_SIZE) {
  2010. allocation_desc.CustomPool = _find_or_create_small_allocs_pool(allocation_desc.HeapType, allocation_desc.ExtraHeapFlags);
  2011. }
  2012. #endif
  2013. Texture texture;
  2014. D3D12_RESOURCE_STATES initial_state = p_data.size() || (p_format.usage_bits & TEXTURE_USAGE_CPU_READ_BIT) ? D3D12_RESOURCE_STATE_COPY_DEST : D3D12_RESOURCE_STATE_ALL_SHADER_RESOURCE;
  2015. FLOAT black[4] = {};
  2016. D3D12_CLEAR_VALUE clear_value = CD3DX12_CLEAR_VALUE(d3d12_formats[p_format.format].general_format, black);
  2017. D3D12_CLEAR_VALUE *clear_value_ptr = (resource_desc.Flags & D3D12_RESOURCE_FLAG_ALLOW_RENDER_TARGET) ? &clear_value : nullptr;
  2018. HRESULT res = {};
  2019. if (cross_family_sharing && relaxed_casting_available) {
  2020. res = context->get_allocator()->CreateResource3(
  2021. &allocation_desc,
  2022. &resource_desc,
  2023. D3D12_BARRIER_LAYOUT_COMMON, // Needed for barrier interop.
  2024. clear_value_ptr,
  2025. castable_formats.size(),
  2026. castable_formats.ptr(),
  2027. &texture.allocation,
  2028. IID_PPV_ARGS(&texture.owner_resource));
  2029. initial_state = D3D12_RESOURCE_STATE_COMMON; // Needed for barrier interop.
  2030. } else {
  2031. res = context->get_allocator()->CreateResource(
  2032. &allocation_desc,
  2033. (D3D12_RESOURCE_DESC *)&resource_desc,
  2034. initial_state,
  2035. clear_value_ptr,
  2036. &texture.allocation,
  2037. IID_PPV_ARGS(&texture.owner_resource));
  2038. }
  2039. ERR_FAIL_COND_V_MSG(res, RID(), "CreateResource failed with error " + vformat("0x%08ux", res) + ".");
  2040. texture.resource = texture.owner_resource;
  2041. image_memory += texture.allocation->GetSize();
  2042. texture.type = p_format.texture_type;
  2043. texture.format = p_format.format;
  2044. texture.planes = get_image_format_plane_count(p_format.format);
  2045. texture.width = p_format.width;
  2046. texture.height = p_format.height;
  2047. texture.depth = p_format.depth;
  2048. texture.layers = p_format.array_layers;
  2049. texture.mipmaps = p_format.mipmaps;
  2050. texture.owner_layers = texture.layers;
  2051. texture.owner_mipmaps = texture.mipmaps;
  2052. texture.base_mipmap = 0;
  2053. texture.base_layer = 0;
  2054. texture.is_resolve_buffer = p_format.is_resolve_buffer;
  2055. texture.usage_flags = p_format.usage_bits;
  2056. texture.samples = p_format.samples;
  2057. texture.allowed_shared_formats = p_format.shareable_formats;
  2058. texture.own_states.subresource_states.resize(texture.mipmaps * texture.layers);
  2059. for (uint32_t i = 0; i < texture.own_states.subresource_states.size(); i++) {
  2060. texture.own_states.subresource_states[i] = initial_state;
  2061. }
  2062. texture.bound = false;
  2063. // Describe view.
  2064. static const D3D12_SRV_DIMENSION view_dimensions[TEXTURE_TYPE_MAX] = {
  2065. D3D12_SRV_DIMENSION_TEXTURE1D,
  2066. D3D12_SRV_DIMENSION_TEXTURE2D,
  2067. D3D12_SRV_DIMENSION_TEXTURE3D,
  2068. D3D12_SRV_DIMENSION_TEXTURECUBE,
  2069. D3D12_SRV_DIMENSION_TEXTURE1DARRAY,
  2070. D3D12_SRV_DIMENSION_TEXTURE2DARRAY,
  2071. D3D12_SRV_DIMENSION_TEXTURECUBEARRAY,
  2072. };
  2073. static const D3D12_SRV_DIMENSION view_dimensions_ms[TEXTURE_TYPE_MAX] = {
  2074. D3D12_SRV_DIMENSION_UNKNOWN,
  2075. D3D12_SRV_DIMENSION_TEXTURE2DMS,
  2076. D3D12_SRV_DIMENSION_UNKNOWN,
  2077. D3D12_SRV_DIMENSION_UNKNOWN,
  2078. D3D12_SRV_DIMENSION_UNKNOWN,
  2079. D3D12_SRV_DIMENSION_TEXTURE2DMSARRAY,
  2080. D3D12_SRV_DIMENSION_UNKNOWN,
  2081. };
  2082. static const D3D12_UAV_DIMENSION uav_dimensions[TEXTURE_TYPE_MAX] = {
  2083. D3D12_UAV_DIMENSION_TEXTURE1D,
  2084. D3D12_UAV_DIMENSION_TEXTURE2D,
  2085. D3D12_UAV_DIMENSION_TEXTURE3D,
  2086. D3D12_UAV_DIMENSION_TEXTURE2DARRAY,
  2087. D3D12_UAV_DIMENSION_TEXTURE1DARRAY,
  2088. D3D12_UAV_DIMENSION_TEXTURE2DARRAY,
  2089. D3D12_UAV_DIMENSION_TEXTURE2DARRAY,
  2090. };
  2091. texture.srv_desc.ViewDimension = p_format.samples == TEXTURE_SAMPLES_1 ? view_dimensions[p_format.texture_type] : view_dimensions_ms[p_format.texture_type];
  2092. texture.owner_uav_desc.Format = d3d12_formats[p_format.format].general_format;
  2093. texture.owner_uav_desc.ViewDimension = p_format.samples == TEXTURE_SAMPLES_1 ? uav_dimensions[p_format.texture_type] : D3D12_UAV_DIMENSION_UNKNOWN;
  2094. UINT base_swizzle = D3D12_DEFAULT_SHADER_4_COMPONENT_MAPPING;
  2095. if (p_view.format_override == DATA_FORMAT_MAX) {
  2096. texture.srv_desc.Format = d3d12_formats[p_format.format].general_format;
  2097. base_swizzle = d3d12_formats[p_format.format].swizzle;
  2098. } else {
  2099. texture.srv_desc.Format = d3d12_formats[p_view.format_override].general_format;
  2100. base_swizzle = d3d12_formats[p_view.format_override].swizzle;
  2101. }
  2102. // Apply requested swizzle (component mapping) on top of the one from the format database.
  2103. D3D12_SHADER_COMPONENT_MAPPING component_swizzles[TEXTURE_SWIZZLE_MAX] = {
  2104. D3D12_SHADER_COMPONENT_MAPPING_FORCE_VALUE_0, // Unused.
  2105. D3D12_SHADER_COMPONENT_MAPPING_FORCE_VALUE_0,
  2106. D3D12_SHADER_COMPONENT_MAPPING_FORCE_VALUE_1,
  2107. // These will be D3D12_SHADER_COMPONENT_MAPPING_FROM_MEMORY_COMPONENT_*.
  2108. D3D12_DECODE_SHADER_4_COMPONENT_MAPPING(0, base_swizzle),
  2109. D3D12_DECODE_SHADER_4_COMPONENT_MAPPING(1, base_swizzle),
  2110. D3D12_DECODE_SHADER_4_COMPONENT_MAPPING(2, base_swizzle),
  2111. D3D12_DECODE_SHADER_4_COMPONENT_MAPPING(3, base_swizzle),
  2112. };
  2113. texture.srv_desc.Shader4ComponentMapping = D3D12_ENCODE_SHADER_4_COMPONENT_MAPPING(
  2114. p_view.swizzle_r == TEXTURE_SWIZZLE_IDENTITY ? component_swizzles[TEXTURE_SWIZZLE_R] : component_swizzles[p_view.swizzle_r],
  2115. p_view.swizzle_g == TEXTURE_SWIZZLE_IDENTITY ? component_swizzles[TEXTURE_SWIZZLE_G] : component_swizzles[p_view.swizzle_g],
  2116. p_view.swizzle_b == TEXTURE_SWIZZLE_IDENTITY ? component_swizzles[TEXTURE_SWIZZLE_B] : component_swizzles[p_view.swizzle_b],
  2117. p_view.swizzle_a == TEXTURE_SWIZZLE_IDENTITY ? component_swizzles[TEXTURE_SWIZZLE_A] : component_swizzles[p_view.swizzle_a]);
  2118. switch (texture.srv_desc.ViewDimension) {
  2119. case D3D12_SRV_DIMENSION_TEXTURE1D: {
  2120. texture.srv_desc.Texture1D.MipLevels = p_format.mipmaps;
  2121. } break;
  2122. case D3D12_SRV_DIMENSION_TEXTURE1DARRAY: {
  2123. texture.srv_desc.Texture1DArray.MipLevels = p_format.mipmaps;
  2124. texture.srv_desc.Texture1DArray.ArraySize = p_format.array_layers;
  2125. } break;
  2126. case D3D12_SRV_DIMENSION_TEXTURE2D: {
  2127. texture.srv_desc.Texture2D.MipLevels = p_format.mipmaps;
  2128. } break;
  2129. case D3D12_SRV_DIMENSION_TEXTURE2DMS: {
  2130. } break;
  2131. case D3D12_SRV_DIMENSION_TEXTURE2DARRAY: {
  2132. texture.srv_desc.Texture2DArray.MipLevels = p_format.mipmaps;
  2133. texture.srv_desc.Texture2DArray.ArraySize = p_format.array_layers;
  2134. } break;
  2135. case D3D12_SRV_DIMENSION_TEXTURE2DMSARRAY: {
  2136. texture.srv_desc.Texture2DMSArray.ArraySize = p_format.array_layers;
  2137. } break;
  2138. case D3D12_SRV_DIMENSION_TEXTURECUBEARRAY: {
  2139. texture.srv_desc.TextureCubeArray.MipLevels = p_format.mipmaps;
  2140. texture.srv_desc.TextureCubeArray.NumCubes = p_format.array_layers / 6;
  2141. } break;
  2142. case D3D12_SRV_DIMENSION_TEXTURE3D: {
  2143. texture.srv_desc.Texture3D.MipLevels = p_format.mipmaps;
  2144. } break;
  2145. case D3D12_SRV_DIMENSION_TEXTURECUBE: {
  2146. texture.srv_desc.TextureCube.MipLevels = p_format.mipmaps;
  2147. } break;
  2148. }
  2149. switch (texture.owner_uav_desc.ViewDimension) {
  2150. case D3D12_UAV_DIMENSION_TEXTURE1DARRAY: {
  2151. texture.owner_uav_desc.Texture1DArray.ArraySize = p_format.array_layers;
  2152. } break;
  2153. case D3D12_UAV_DIMENSION_TEXTURE2DARRAY: {
  2154. // Either for an actual 2D texture array, cubemap or cubemap array.
  2155. texture.owner_uav_desc.Texture2DArray.ArraySize = p_format.array_layers;
  2156. } break;
  2157. case D3D12_UAV_DIMENSION_TEXTURE3D: {
  2158. texture.owner_uav_desc.Texture3D.WSize = p_format.depth;
  2159. } break;
  2160. default: {
  2161. }
  2162. }
  2163. texture.uav_desc = texture.owner_uav_desc;
  2164. if (p_view.format_override != DATA_FORMAT_MAX) {
  2165. texture.uav_desc.Format = d3d12_formats[p_view.format_override].general_format;
  2166. }
  2167. if (cross_family_sharing && !relaxed_casting_available) {
  2168. D3D12_RESOURCE_DESC resource_desc_backup = *(D3D12_RESOURCE_DESC *)&resource_desc;
  2169. D3D12MA::ALLOCATION_DESC allocation_desc_backup = allocation_desc;
  2170. texture.aliases.resize(texture.allowed_shared_formats.size());
  2171. for (int i = 0; i < texture.allowed_shared_formats.size(); i++) {
  2172. DataFormat curr_format = texture.allowed_shared_formats[i];
  2173. DXGI_FORMAT format_family = d3d12_formats[curr_format].family;
  2174. if (format_family == d3d12_formats[p_format.format].family) {
  2175. texture.aliases[i] = nullptr;
  2176. continue;
  2177. }
  2178. D3D12_RESOURCE_DESC alias_resource_desc = *(D3D12_RESOURCE_DESC *)&resource_desc;
  2179. alias_resource_desc.Format = format_family;
  2180. if (aliases_forbidden_flags.has(curr_format)) {
  2181. alias_resource_desc.Flags &= ~aliases_forbidden_flags[curr_format];
  2182. }
  2183. clear_value.Format = format_family;
  2184. res = context->get_allocator()->CreateAliasingResource(
  2185. texture.allocation,
  2186. 0,
  2187. &alias_resource_desc,
  2188. initial_state,
  2189. (alias_resource_desc.Flags & D3D12_RESOURCE_FLAG_ALLOW_RENDER_TARGET) ? clear_value_ptr : nullptr,
  2190. IID_PPV_ARGS(&texture.aliases[i]));
  2191. ERR_FAIL_COND_V_MSG(res, RID(), "CreateAliasingResource failed with error " + vformat("0x%08ux", res) + ".");
  2192. if (curr_format == p_view.format_override) {
  2193. texture.resource = texture.aliases[i];
  2194. }
  2195. }
  2196. }
  2197. RID id = texture_owner.make_rid(texture);
  2198. #ifdef DEV_ENABLED
  2199. set_resource_name(id, "RID:" + itos(id.get_id()));
  2200. #endif
  2201. if (p_data.size()) {
  2202. Texture *texture_ptr = texture_owner.get_or_null(id);
  2203. ERR_FAIL_NULL_V(texture_ptr, RID());
  2204. ID3D12GraphicsCommandList *command_list = frames[frame].setup_command_list.Get();
  2205. for (uint32_t i = 0; i < p_format.array_layers; i++) {
  2206. _texture_update(texture_ptr, i, p_data[i], RD::BARRIER_MASK_ALL_BARRIERS, command_list);
  2207. }
  2208. }
  2209. return id;
  2210. }
  2211. RID RenderingDeviceD3D12::texture_create_shared(const TextureView &p_view, RID p_with_texture) {
  2212. _THREAD_SAFE_METHOD_
  2213. Texture *src_texture = texture_owner.get_or_null(p_with_texture);
  2214. ERR_FAIL_NULL_V(src_texture, RID());
  2215. if (src_texture->owner.is_valid()) { // Ahh this is a share.
  2216. p_with_texture = src_texture->owner;
  2217. src_texture = texture_owner.get_or_null(src_texture->owner);
  2218. ERR_FAIL_NULL_V(src_texture, RID()); // This is a bug.
  2219. }
  2220. // Describe view.
  2221. Texture texture = *src_texture;
  2222. texture.own_states.subresource_states.clear();
  2223. texture.states = &src_texture->own_states;
  2224. UINT base_swizzle = D3D12_DEFAULT_SHADER_4_COMPONENT_MAPPING;
  2225. if (p_view.format_override == DATA_FORMAT_MAX || p_view.format_override == texture.format) {
  2226. texture.srv_desc.Format = d3d12_formats[texture.format].general_format;
  2227. base_swizzle = d3d12_formats[texture.format].swizzle;
  2228. texture.uav_desc.Format = d3d12_formats[texture.format].general_format;
  2229. } else {
  2230. ERR_FAIL_INDEX_V(p_view.format_override, DATA_FORMAT_MAX, RID());
  2231. ERR_FAIL_COND_V_MSG(texture.allowed_shared_formats.find(p_view.format_override) == -1, RID(),
  2232. "Format override is not in the list of allowed shareable formats for original texture.");
  2233. texture.srv_desc.Format = d3d12_formats[p_view.format_override].general_format;
  2234. base_swizzle = d3d12_formats[p_view.format_override].swizzle;
  2235. texture.uav_desc.Format = d3d12_formats[p_view.format_override].general_format;
  2236. if (texture.aliases.size()) {
  2237. for (int i = 0; i < texture.allowed_shared_formats.size(); i++) {
  2238. if (texture.allowed_shared_formats[i] == p_view.format_override) {
  2239. texture.resource = texture.aliases[i];
  2240. break;
  2241. }
  2242. }
  2243. }
  2244. }
  2245. // Apply requested swizzle (component mapping) on top of the one from the format database.
  2246. D3D12_SHADER_COMPONENT_MAPPING component_swizzles[TEXTURE_SWIZZLE_MAX] = {
  2247. D3D12_SHADER_COMPONENT_MAPPING_FORCE_VALUE_0, // Unused.
  2248. D3D12_SHADER_COMPONENT_MAPPING_FORCE_VALUE_0,
  2249. D3D12_SHADER_COMPONENT_MAPPING_FORCE_VALUE_1,
  2250. // These will be D3D12_SHADER_COMPONENT_MAPPING_FROM_MEMORY_COMPONENT_*.
  2251. D3D12_DECODE_SHADER_4_COMPONENT_MAPPING(0, base_swizzle),
  2252. D3D12_DECODE_SHADER_4_COMPONENT_MAPPING(1, base_swizzle),
  2253. D3D12_DECODE_SHADER_4_COMPONENT_MAPPING(2, base_swizzle),
  2254. D3D12_DECODE_SHADER_4_COMPONENT_MAPPING(3, base_swizzle),
  2255. };
  2256. texture.srv_desc.Shader4ComponentMapping = D3D12_ENCODE_SHADER_4_COMPONENT_MAPPING(
  2257. p_view.swizzle_r == TEXTURE_SWIZZLE_IDENTITY ? component_swizzles[TEXTURE_SWIZZLE_R] : component_swizzles[p_view.swizzle_r],
  2258. p_view.swizzle_g == TEXTURE_SWIZZLE_IDENTITY ? component_swizzles[TEXTURE_SWIZZLE_G] : component_swizzles[p_view.swizzle_g],
  2259. p_view.swizzle_b == TEXTURE_SWIZZLE_IDENTITY ? component_swizzles[TEXTURE_SWIZZLE_B] : component_swizzles[p_view.swizzle_b],
  2260. p_view.swizzle_a == TEXTURE_SWIZZLE_IDENTITY ? component_swizzles[TEXTURE_SWIZZLE_A] : component_swizzles[p_view.swizzle_a]);
  2261. texture.owner = p_with_texture;
  2262. RID id = texture_owner.make_rid(texture);
  2263. #ifdef DEV_ENABLED
  2264. set_resource_name(id, "RID:" + itos(id.get_id()));
  2265. #endif
  2266. _add_dependency(id, p_with_texture);
  2267. return id;
  2268. }
  2269. RID RenderingDeviceD3D12::texture_create_from_extension(TextureType p_type, DataFormat p_format, TextureSamples p_samples, BitField<RenderingDevice::TextureUsageBits> p_flags, uint64_t p_image, uint64_t p_width, uint64_t p_height, uint64_t p_depth, uint64_t p_layers) {
  2270. ERR_FAIL_V_MSG(RID(), "Unimplemented!");
  2271. }
  2272. RID RenderingDeviceD3D12::texture_create_shared_from_slice(const TextureView &p_view, RID p_with_texture, uint32_t p_layer, uint32_t p_mipmap, uint32_t p_mipmaps, TextureSliceType p_slice_type, uint32_t p_layers) {
  2273. _THREAD_SAFE_METHOD_
  2274. Texture *src_texture = texture_owner.get_or_null(p_with_texture);
  2275. ERR_FAIL_NULL_V(src_texture, RID());
  2276. if (src_texture->owner.is_valid()) { // Ahh this is a share.
  2277. p_with_texture = src_texture->owner;
  2278. src_texture = texture_owner.get_or_null(src_texture->owner);
  2279. ERR_FAIL_NULL_V(src_texture, RID()); // This is a bug.
  2280. }
  2281. ERR_FAIL_COND_V_MSG(p_slice_type == TEXTURE_SLICE_CUBEMAP && (src_texture->type != TEXTURE_TYPE_CUBE && src_texture->type != TEXTURE_TYPE_CUBE_ARRAY), RID(),
  2282. "Can only create a cubemap slice from a cubemap or cubemap array mipmap");
  2283. ERR_FAIL_COND_V_MSG(p_slice_type == TEXTURE_SLICE_3D && src_texture->type != TEXTURE_TYPE_3D, RID(),
  2284. "Can only create a 3D slice from a 3D texture");
  2285. ERR_FAIL_COND_V_MSG(p_slice_type == TEXTURE_SLICE_2D_ARRAY && (src_texture->type != TEXTURE_TYPE_2D_ARRAY), RID(),
  2286. "Can only create an array slice from a 2D array mipmap");
  2287. // Describe view.
  2288. ERR_FAIL_UNSIGNED_INDEX_V(p_mipmap, src_texture->mipmaps, RID());
  2289. ERR_FAIL_COND_V(p_mipmap + p_mipmaps > src_texture->mipmaps, RID());
  2290. ERR_FAIL_UNSIGNED_INDEX_V(p_layer, src_texture->layers, RID());
  2291. int slice_layers = 1;
  2292. if (p_layers != 0) {
  2293. ERR_FAIL_COND_V_MSG(p_layers > 1 && p_slice_type != TEXTURE_SLICE_2D_ARRAY, RID(), "layer slicing only supported for 2D arrays");
  2294. ERR_FAIL_COND_V_MSG(p_layer + p_layers > src_texture->layers, RID(), "layer slice is out of bounds");
  2295. slice_layers = p_layers;
  2296. } else if (p_slice_type == TEXTURE_SLICE_2D_ARRAY) {
  2297. ERR_FAIL_COND_V_MSG(p_layer != 0, RID(), "layer must be 0 when obtaining a 2D array mipmap slice");
  2298. slice_layers = src_texture->layers;
  2299. } else if (p_slice_type == TEXTURE_SLICE_CUBEMAP) {
  2300. slice_layers = 6;
  2301. }
  2302. Texture texture = *src_texture;
  2303. get_image_format_required_size(texture.format, texture.width, texture.height, texture.depth, p_mipmap + 1, &texture.width, &texture.height);
  2304. texture.mipmaps = p_mipmaps;
  2305. texture.layers = slice_layers;
  2306. texture.base_mipmap = p_mipmap;
  2307. texture.base_layer = p_layer;
  2308. texture.own_states.subresource_states.clear();
  2309. texture.states = &src_texture->own_states;
  2310. UINT base_swizzle = D3D12_DEFAULT_SHADER_4_COMPONENT_MAPPING;
  2311. if (p_view.format_override == DATA_FORMAT_MAX || p_view.format_override == texture.format) {
  2312. texture.srv_desc.Format = d3d12_formats[texture.format].general_format;
  2313. base_swizzle = d3d12_formats[texture.format].swizzle;
  2314. texture.uav_desc.Format = d3d12_formats[texture.format].general_format;
  2315. } else {
  2316. ERR_FAIL_INDEX_V(p_view.format_override, DATA_FORMAT_MAX, RID());
  2317. ERR_FAIL_COND_V_MSG(texture.allowed_shared_formats.find(p_view.format_override) == -1, RID(),
  2318. "Format override is not in the list of allowed shareable formats for original texture.");
  2319. texture.srv_desc.Format = d3d12_formats[p_view.format_override].general_format;
  2320. base_swizzle = d3d12_formats[p_view.format_override].swizzle;
  2321. texture.uav_desc.Format = d3d12_formats[p_view.format_override].general_format;
  2322. if (texture.aliases.size()) {
  2323. for (int i = 0; i < texture.allowed_shared_formats.size(); i++) {
  2324. if (texture.allowed_shared_formats[i] == p_view.format_override) {
  2325. texture.resource = texture.aliases[i];
  2326. break;
  2327. }
  2328. }
  2329. }
  2330. }
  2331. // Apply requested swizzle (component mapping) on top of the one from the format database.
  2332. D3D12_SHADER_COMPONENT_MAPPING component_swizzles[TEXTURE_SWIZZLE_MAX] = {
  2333. D3D12_SHADER_COMPONENT_MAPPING_FORCE_VALUE_0, // Unused.
  2334. D3D12_SHADER_COMPONENT_MAPPING_FORCE_VALUE_0,
  2335. D3D12_SHADER_COMPONENT_MAPPING_FORCE_VALUE_1,
  2336. // These will be D3D12_SHADER_COMPONENT_MAPPING_FROM_MEMORY_COMPONENT_*.
  2337. D3D12_DECODE_SHADER_4_COMPONENT_MAPPING(0, base_swizzle),
  2338. D3D12_DECODE_SHADER_4_COMPONENT_MAPPING(1, base_swizzle),
  2339. D3D12_DECODE_SHADER_4_COMPONENT_MAPPING(2, base_swizzle),
  2340. D3D12_DECODE_SHADER_4_COMPONENT_MAPPING(3, base_swizzle),
  2341. };
  2342. texture.srv_desc.Shader4ComponentMapping = D3D12_ENCODE_SHADER_4_COMPONENT_MAPPING(
  2343. p_view.swizzle_r == TEXTURE_SWIZZLE_IDENTITY ? component_swizzles[TEXTURE_SWIZZLE_R] : component_swizzles[p_view.swizzle_r],
  2344. p_view.swizzle_g == TEXTURE_SWIZZLE_IDENTITY ? component_swizzles[TEXTURE_SWIZZLE_G] : component_swizzles[p_view.swizzle_g],
  2345. p_view.swizzle_b == TEXTURE_SWIZZLE_IDENTITY ? component_swizzles[TEXTURE_SWIZZLE_B] : component_swizzles[p_view.swizzle_b],
  2346. p_view.swizzle_a == TEXTURE_SWIZZLE_IDENTITY ? component_swizzles[TEXTURE_SWIZZLE_A] : component_swizzles[p_view.swizzle_a]);
  2347. if (p_slice_type == TEXTURE_SLICE_CUBEMAP) {
  2348. ERR_FAIL_COND_V_MSG(p_layer >= src_texture->layers, RID(),
  2349. "Specified layer is invalid for cubemap");
  2350. ERR_FAIL_COND_V_MSG((p_layer % 6) != 0, RID(),
  2351. "Specified layer must be a multiple of 6.");
  2352. }
  2353. // Leveraging aliasing in members of the union as much as possible.
  2354. texture.srv_desc.Texture1D.MostDetailedMip = p_mipmap;
  2355. texture.srv_desc.Texture1D.MipLevels = 1;
  2356. texture.uav_desc.Texture1D.MipSlice = p_mipmap;
  2357. switch (p_slice_type) {
  2358. case TEXTURE_SLICE_2D: {
  2359. if (texture.srv_desc.ViewDimension == D3D12_SRV_DIMENSION_TEXTURE2D && p_layer == 0) {
  2360. CRASH_COND(texture.uav_desc.ViewDimension != D3D12_UAV_DIMENSION_TEXTURE2D);
  2361. } else if (texture.srv_desc.ViewDimension == D3D12_SRV_DIMENSION_TEXTURE2DMS && p_layer == 0) {
  2362. CRASH_COND(texture.uav_desc.ViewDimension != D3D12_UAV_DIMENSION_UNKNOWN);
  2363. } else if ((texture.srv_desc.ViewDimension == D3D12_SRV_DIMENSION_TEXTURE2DARRAY || (texture.srv_desc.ViewDimension == D3D12_SRV_DIMENSION_TEXTURE2D && p_layer)) || texture.srv_desc.ViewDimension == D3D12_SRV_DIMENSION_TEXTURECUBE || texture.srv_desc.ViewDimension == D3D12_SRV_DIMENSION_TEXTURECUBEARRAY) {
  2364. texture.srv_desc.ViewDimension = D3D12_SRV_DIMENSION_TEXTURE2DARRAY;
  2365. texture.srv_desc.Texture2DArray.FirstArraySlice = p_layer;
  2366. texture.srv_desc.Texture2DArray.ArraySize = 1;
  2367. texture.srv_desc.Texture2DArray.PlaneSlice = 0;
  2368. texture.srv_desc.Texture2DArray.ResourceMinLODClamp = 0.0f;
  2369. texture.uav_desc.ViewDimension = D3D12_UAV_DIMENSION_TEXTURE2DARRAY;
  2370. texture.uav_desc.Texture2DArray.FirstArraySlice = p_layer;
  2371. texture.uav_desc.Texture2DArray.ArraySize = 1;
  2372. texture.uav_desc.Texture2DArray.PlaneSlice = 0;
  2373. } else if ((texture.srv_desc.ViewDimension == D3D12_SRV_DIMENSION_TEXTURE2DMSARRAY || (texture.srv_desc.ViewDimension == D3D12_SRV_DIMENSION_TEXTURE2DMS && p_layer))) {
  2374. texture.srv_desc.ViewDimension = D3D12_SRV_DIMENSION_TEXTURE2DARRAY;
  2375. texture.srv_desc.Texture2DMSArray.FirstArraySlice = p_layer;
  2376. texture.srv_desc.Texture2DMSArray.ArraySize = 1;
  2377. texture.uav_desc.ViewDimension = D3D12_UAV_DIMENSION_UNKNOWN;
  2378. } else {
  2379. CRASH_NOW();
  2380. }
  2381. } break;
  2382. case TEXTURE_SLICE_CUBEMAP: {
  2383. if (texture.srv_desc.ViewDimension == D3D12_SRV_DIMENSION_TEXTURECUBE) {
  2384. CRASH_COND(texture.uav_desc.ViewDimension != D3D12_UAV_DIMENSION_TEXTURE2DARRAY);
  2385. } else if (texture.srv_desc.ViewDimension == D3D12_SRV_DIMENSION_TEXTURECUBE || p_layer == 0) {
  2386. texture.srv_desc.ViewDimension = D3D12_SRV_DIMENSION_TEXTURECUBE;
  2387. CRASH_COND(texture.uav_desc.ViewDimension != D3D12_UAV_DIMENSION_TEXTURE2DARRAY);
  2388. texture.uav_desc.ViewDimension = D3D12_UAV_DIMENSION_TEXTURE2DARRAY;
  2389. texture.uav_desc.Texture2DArray.FirstArraySlice = 0;
  2390. texture.uav_desc.Texture2DArray.ArraySize = 6;
  2391. texture.uav_desc.Texture2DArray.PlaneSlice = 0;
  2392. } else if (texture.srv_desc.ViewDimension == D3D12_SRV_DIMENSION_TEXTURECUBEARRAY || p_layer != 0) {
  2393. texture.srv_desc.ViewDimension = D3D12_SRV_DIMENSION_TEXTURECUBEARRAY;
  2394. texture.srv_desc.TextureCubeArray.First2DArrayFace = p_layer;
  2395. texture.srv_desc.TextureCubeArray.NumCubes = 1;
  2396. texture.srv_desc.TextureCubeArray.ResourceMinLODClamp = 0.0f;
  2397. CRASH_COND(texture.uav_desc.ViewDimension != D3D12_UAV_DIMENSION_TEXTURE2DARRAY);
  2398. texture.uav_desc.ViewDimension = D3D12_UAV_DIMENSION_TEXTURE2DARRAY;
  2399. texture.uav_desc.Texture2DArray.FirstArraySlice = p_layer;
  2400. texture.uav_desc.Texture2DArray.ArraySize = 6;
  2401. texture.uav_desc.Texture2DArray.PlaneSlice = 0;
  2402. } else {
  2403. CRASH_NOW();
  2404. }
  2405. } break;
  2406. case TEXTURE_SLICE_3D: {
  2407. CRASH_COND(texture.srv_desc.ViewDimension != D3D12_SRV_DIMENSION_TEXTURE3D);
  2408. CRASH_COND(texture.uav_desc.ViewDimension != D3D12_UAV_DIMENSION_TEXTURE3D);
  2409. texture.uav_desc.Texture3D.WSize = -1;
  2410. } break;
  2411. case TEXTURE_SLICE_2D_ARRAY: {
  2412. CRASH_COND(texture.srv_desc.ViewDimension != D3D12_SRV_DIMENSION_TEXTURE2DARRAY);
  2413. texture.srv_desc.Texture2DArray.FirstArraySlice = p_layer;
  2414. texture.srv_desc.Texture2DArray.ArraySize = slice_layers;
  2415. CRASH_COND(texture.uav_desc.ViewDimension != D3D12_UAV_DIMENSION_TEXTURE2DARRAY);
  2416. texture.uav_desc.Texture2DArray.FirstArraySlice = p_layer;
  2417. texture.uav_desc.Texture2DArray.ArraySize = slice_layers;
  2418. } break;
  2419. }
  2420. texture.owner = p_with_texture;
  2421. RID id = texture_owner.make_rid(texture);
  2422. #ifdef DEV_ENABLED
  2423. set_resource_name(id, "RID:" + itos(id.get_id()));
  2424. #endif
  2425. _add_dependency(id, p_with_texture);
  2426. return id;
  2427. }
  2428. Error RenderingDeviceD3D12::texture_update(RID p_texture, uint32_t p_layer, const Vector<uint8_t> &p_data, BitField<BarrierMask> p_post_barrier) {
  2429. ERR_FAIL_COND_V_MSG((draw_list || compute_list), ERR_INVALID_PARAMETER,
  2430. "Updating textures is forbidden during creation of a draw or compute list");
  2431. Texture *texture = texture_owner.get_or_null(p_texture);
  2432. ERR_FAIL_NULL_V(texture, ERR_INVALID_PARAMETER);
  2433. if (texture->owner != RID()) {
  2434. texture = texture_owner.get_or_null(texture->owner);
  2435. ERR_FAIL_NULL_V(texture, ERR_BUG); // This is a bug.
  2436. }
  2437. ID3D12GraphicsCommandList *command_list = frames[frame].draw_command_list.Get();
  2438. uint32_t subresource = D3D12CalcSubresource(0, p_layer, 0, texture->mipmaps, texture->layers);
  2439. _resource_transition_batch(texture, subresource, texture->planes, D3D12_RESOURCE_STATE_COPY_DEST);
  2440. _resource_transitions_flush(command_list);
  2441. Error err = _texture_update(texture, p_layer, p_data, p_post_barrier, command_list);
  2442. return err;
  2443. }
  2444. static _ALWAYS_INLINE_ void _copy_region(uint8_t const *__restrict p_src, uint8_t *__restrict p_dst, uint32_t p_src_x, uint32_t p_src_y, uint32_t p_src_w, uint32_t p_src_h, uint32_t p_src_full_w, uint32_t p_dst_pitch, uint32_t p_unit_size) {
  2445. uint32_t src_offset = (p_src_y * p_src_full_w + p_src_x) * p_unit_size;
  2446. uint32_t dst_offset = 0;
  2447. for (uint32_t y = p_src_h; y > 0; y--) {
  2448. uint8_t const *__restrict src = p_src + src_offset;
  2449. uint8_t *__restrict dst = p_dst + dst_offset;
  2450. for (uint32_t x = p_src_w * p_unit_size; x > 0; x--) {
  2451. *dst = *src;
  2452. src++;
  2453. dst++;
  2454. }
  2455. src_offset += p_src_full_w * p_unit_size;
  2456. dst_offset += p_dst_pitch;
  2457. }
  2458. }
  2459. Error RenderingDeviceD3D12::_texture_update(Texture *p_texture, uint32_t p_layer, const Vector<uint8_t> &p_data, BitField<BarrierMask> p_post_barrier, ID3D12GraphicsCommandList *p_command_list) {
  2460. _THREAD_SAFE_METHOD_
  2461. ERR_FAIL_COND_V_MSG(p_texture->bound, ERR_CANT_ACQUIRE_RESOURCE,
  2462. "Texture can't be updated while a render pass that uses it is being created. Ensure render pass is finalized (and that it was created with RENDER_PASS_CONTENTS_FINISH) to unbind this texture.");
  2463. ERR_FAIL_COND_V_MSG(!(p_texture->usage_flags & TEXTURE_USAGE_CAN_UPDATE_BIT), ERR_INVALID_PARAMETER,
  2464. "Texture requires the TEXTURE_USAGE_CAN_UPDATE_BIT in order to be updatable.");
  2465. uint32_t layer_count = p_texture->layers;
  2466. if (p_texture->type == TEXTURE_TYPE_CUBE || p_texture->type == TEXTURE_TYPE_CUBE_ARRAY) {
  2467. layer_count *= 6;
  2468. }
  2469. ERR_FAIL_COND_V(p_layer >= layer_count, ERR_INVALID_PARAMETER);
  2470. uint32_t width, height;
  2471. uint32_t image_size = get_image_format_required_size(p_texture->format, p_texture->width, p_texture->height, p_texture->depth, p_texture->mipmaps, &width, &height);
  2472. uint32_t required_size = image_size;
  2473. uint32_t required_align = get_compressed_image_format_block_byte_size(p_texture->format);
  2474. if (required_align == 1) {
  2475. required_align = get_image_format_pixel_size(p_texture->format);
  2476. }
  2477. if ((required_align % 4) != 0) { // Alignment rules are really strange.
  2478. required_align *= 4;
  2479. }
  2480. required_align = ALIGN(required_align, D3D12_TEXTURE_DATA_PLACEMENT_ALIGNMENT);
  2481. ERR_FAIL_COND_V_MSG(required_size != (uint32_t)p_data.size(), ERR_INVALID_PARAMETER,
  2482. "Required size for texture update (" + itos(required_size) + ") does not match data supplied size (" + itos(p_data.size()) + ").");
  2483. uint32_t region_size = texture_upload_region_size_px;
  2484. const uint8_t *r = p_data.ptr();
  2485. uint32_t mipmap_offset = 0;
  2486. uint32_t logic_width = p_texture->width;
  2487. uint32_t logic_height = p_texture->height;
  2488. for (uint32_t mm_i = 0; mm_i < p_texture->mipmaps; mm_i++) {
  2489. uint32_t depth;
  2490. uint32_t image_total = get_image_format_required_size(p_texture->format, p_texture->width, p_texture->height, p_texture->depth, mm_i + 1, &width, &height, &depth);
  2491. const uint8_t *read_ptr_mipmap = r + mipmap_offset;
  2492. image_size = image_total - mipmap_offset;
  2493. UINT dst_subresource = D3D12CalcSubresource(mm_i, p_layer, 0, p_texture->mipmaps, p_texture->layers);
  2494. CD3DX12_TEXTURE_COPY_LOCATION copy_dst(p_texture->resource, dst_subresource);
  2495. for (uint32_t z = 0; z < depth; z++) { // For 3D textures, depth may be > 0.
  2496. const uint8_t *read_ptr = read_ptr_mipmap + image_size * z / depth;
  2497. for (uint32_t y = 0; y < height; y += region_size) {
  2498. for (uint32_t x = 0; x < width; x += region_size) {
  2499. uint32_t region_w = MIN(region_size, width - x);
  2500. uint32_t region_h = MIN(region_size, height - y);
  2501. uint32_t pixel_size = get_image_format_pixel_size(p_texture->format);
  2502. uint32_t block_w, block_h;
  2503. get_compressed_image_format_block_dimensions(p_texture->format, block_w, block_h);
  2504. uint32_t region_pitch = (region_w * pixel_size * block_w) >> get_compressed_image_format_pixel_rshift(p_texture->format);
  2505. region_pitch = ALIGN(region_pitch, D3D12_TEXTURE_DATA_PITCH_ALIGNMENT);
  2506. uint32_t to_allocate = region_pitch * region_h;
  2507. uint32_t alloc_offset, alloc_size;
  2508. Error err = _staging_buffer_allocate(to_allocate, required_align, alloc_offset, alloc_size, false);
  2509. ERR_FAIL_COND_V(err, ERR_CANT_CREATE);
  2510. uint8_t *write_ptr;
  2511. { // Map.
  2512. void *data_ptr = nullptr;
  2513. HRESULT res = staging_buffer_blocks[staging_buffer_current].resource->Map(0, &VOID_RANGE, &data_ptr);
  2514. ERR_FAIL_COND_V_MSG(res, ERR_CANT_CREATE, "Map failed with error " + vformat("0x%08ux", res) + ".");
  2515. write_ptr = (uint8_t *)data_ptr;
  2516. write_ptr += alloc_offset;
  2517. }
  2518. ERR_FAIL_COND_V(region_w % block_w, ERR_BUG);
  2519. ERR_FAIL_COND_V(region_pitch % block_w, ERR_BUG);
  2520. ERR_FAIL_COND_V(region_h % block_h, ERR_BUG);
  2521. if (block_w != 1 || block_h != 1) {
  2522. // Compressed image (functions).
  2523. // Must copy a block region.
  2524. uint32_t block_size = get_compressed_image_format_block_byte_size(p_texture->format);
  2525. // Re-create current variables in blocky format.
  2526. uint32_t xb = x / block_w;
  2527. uint32_t yb = y / block_h;
  2528. uint32_t wb = width / block_w;
  2529. // Uint32_t hb = height / block_h;.
  2530. uint32_t region_wb = region_w / block_w;
  2531. uint32_t region_hb = region_h / block_h;
  2532. _copy_region(read_ptr, write_ptr, xb, yb, region_wb, region_hb, wb, region_pitch, block_size);
  2533. } else {
  2534. // Regular image (pixels).
  2535. // Must copy a pixel region.
  2536. _copy_region(read_ptr, write_ptr, x, y, region_w, region_h, width, region_pitch, pixel_size);
  2537. }
  2538. { // Unmap.
  2539. staging_buffer_blocks[staging_buffer_current].resource->Unmap(0, &VOID_RANGE);
  2540. }
  2541. D3D12_PLACED_SUBRESOURCE_FOOTPRINT src_footprint = {};
  2542. src_footprint.Offset = alloc_offset;
  2543. src_footprint.Footprint = CD3DX12_SUBRESOURCE_FOOTPRINT(
  2544. d3d12_formats[p_texture->format].family,
  2545. region_w,
  2546. region_h,
  2547. 1,
  2548. region_pitch);
  2549. CD3DX12_TEXTURE_COPY_LOCATION copy_src(staging_buffer_blocks[staging_buffer_current].resource, src_footprint);
  2550. CD3DX12_BOX src_box(0, 0, region_w, region_h);
  2551. p_command_list->CopyTextureRegion(&copy_dst, x, y, z, &copy_src, &src_box);
  2552. staging_buffer_blocks.write[staging_buffer_current].fill_amount = alloc_offset + alloc_size;
  2553. }
  2554. }
  2555. }
  2556. mipmap_offset = image_total;
  2557. logic_width = MAX(1u, logic_width >> 1);
  2558. logic_height = MAX(1u, logic_height >> 1);
  2559. }
  2560. return OK;
  2561. }
  2562. Vector<uint8_t> RenderingDeviceD3D12::_texture_get_data_from_image(Texture *tex, uint32_t p_layer, bool p_2d) {
  2563. uint32_t width, height, depth;
  2564. uint32_t image_size = get_image_format_required_size(tex->format, tex->width, tex->height, p_2d ? 1 : tex->depth, tex->mipmaps, &width, &height, &depth);
  2565. Vector<uint8_t> image_data;
  2566. image_data.resize(image_size);
  2567. D3D12_RESOURCE_DESC res_desc = tex->resource->GetDesc();
  2568. uint32_t blockw, blockh;
  2569. get_compressed_image_format_block_dimensions(tex->format, blockw, blockh);
  2570. uint32_t block_size = get_compressed_image_format_block_byte_size(tex->format);
  2571. uint32_t pixel_size = get_image_format_pixel_size(tex->format);
  2572. {
  2573. uint8_t *w = image_data.ptrw();
  2574. uint32_t mipmap_offset = 0;
  2575. for (uint32_t mm_i = 0; mm_i < tex->mipmaps; mm_i++) {
  2576. uint32_t image_total = get_image_format_required_size(tex->format, tex->width, tex->height, p_2d ? 1 : tex->depth, mm_i + 1, &width, &height, &depth);
  2577. uint8_t *write_ptr_mipmap = w + mipmap_offset;
  2578. image_size = image_total - mipmap_offset;
  2579. UINT subresource = 0;
  2580. uint64_t image_total_src = 0;
  2581. D3D12_PLACED_SUBRESOURCE_FOOTPRINT layout = {};
  2582. device->GetCopyableFootprints(
  2583. &res_desc,
  2584. subresource,
  2585. 1,
  2586. 0,
  2587. &layout,
  2588. nullptr,
  2589. nullptr,
  2590. &image_total_src);
  2591. void *img_mem;
  2592. HRESULT res = tex->resource->Map(subresource, nullptr, &img_mem);
  2593. ERR_FAIL_COND_V_MSG(res, Vector<uint8_t>(), "Map failed with error " + vformat("0x%08ux", res) + ".");
  2594. for (uint32_t z = 0; z < depth; z++) {
  2595. uint8_t *write_ptr = write_ptr_mipmap + z * image_size / depth;
  2596. const uint8_t *slice_read_ptr = ((uint8_t *)img_mem) + layout.Offset + z * image_total_src / depth;
  2597. if (block_size > 1) {
  2598. // Compressed.
  2599. uint32_t line_width = (block_size * (width / blockw));
  2600. for (uint32_t y = 0; y < height / blockh; y++) {
  2601. const uint8_t *rptr = slice_read_ptr + y * layout.Footprint.RowPitch;
  2602. uint8_t *wptr = write_ptr + y * line_width;
  2603. memcpy(wptr, rptr, line_width);
  2604. }
  2605. } else {
  2606. // Uncompressed.
  2607. for (uint32_t y = 0; y < height; y++) {
  2608. const uint8_t *rptr = slice_read_ptr + y * layout.Footprint.RowPitch;
  2609. uint8_t *wptr = write_ptr + y * pixel_size * width;
  2610. memcpy(wptr, rptr, (uint64_t)pixel_size * width);
  2611. }
  2612. }
  2613. }
  2614. tex->resource->Unmap(subresource, nullptr);
  2615. mipmap_offset = image_total;
  2616. }
  2617. }
  2618. return image_data;
  2619. }
  2620. Vector<uint8_t> RenderingDeviceD3D12::texture_get_data(RID p_texture, uint32_t p_layer) {
  2621. _THREAD_SAFE_METHOD_
  2622. Texture *tex = texture_owner.get_or_null(p_texture);
  2623. ERR_FAIL_NULL_V(tex, Vector<uint8_t>());
  2624. ERR_FAIL_COND_V_MSG(tex->bound, Vector<uint8_t>(),
  2625. "Texture can't be retrieved while a render pass that uses it is being created. Ensure render pass is finalized (and that it was created with RENDER_PASS_CONTENTS_FINISH) to unbind this texture.");
  2626. ERR_FAIL_COND_V_MSG(!(tex->usage_flags & TEXTURE_USAGE_CAN_COPY_FROM_BIT), Vector<uint8_t>(),
  2627. "Texture requires the TEXTURE_USAGE_CAN_COPY_FROM_BIT in order to be retrieved.");
  2628. uint32_t layer_count = tex->layers;
  2629. if (tex->type == TEXTURE_TYPE_CUBE || tex->type == TEXTURE_TYPE_CUBE_ARRAY) {
  2630. layer_count *= 6;
  2631. }
  2632. ERR_FAIL_COND_V(p_layer >= layer_count, Vector<uint8_t>());
  2633. if (tex->usage_flags & TEXTURE_USAGE_CPU_READ_BIT) {
  2634. // Does not need anything fancy, map and read.
  2635. return _texture_get_data_from_image(tex, p_layer);
  2636. } else {
  2637. // Compute total image size.
  2638. uint32_t width, height, depth;
  2639. uint32_t final_buffer_size = get_image_format_required_size(tex->format, tex->width, tex->height, tex->depth, tex->mipmaps, &width, &height, &depth);
  2640. uint32_t block_w, block_h;
  2641. get_compressed_image_format_block_dimensions(tex->format, block_w, block_h);
  2642. uint32_t alignment = D3D12_TEXTURE_DATA_PITCH_ALIGNMENT;
  2643. // We'll use a potentially bigger buffer to account for mip sizes in which we need to use a bigger pitch to keep D3D12 happy.
  2644. uint32_t buffer_size = 0;
  2645. {
  2646. uint32_t computed_h = tex->height;
  2647. uint32_t computed_d = tex->depth;
  2648. uint32_t prev_size = 0;
  2649. for (uint32_t i = 0; i < tex->mipmaps; i++) {
  2650. uint32_t image_size = get_image_format_required_size(tex->format, tex->width, tex->height, tex->depth, i + 1);
  2651. uint32_t inferred_row_pitch = image_size / (computed_h * computed_d) * block_h;
  2652. uint32_t adjusted_row_pitch = ALIGN(inferred_row_pitch, alignment);
  2653. uint32_t adjusted_image_size = adjusted_row_pitch / block_h * computed_h * tex->depth;
  2654. uint32_t size = adjusted_image_size - prev_size;
  2655. prev_size = image_size;
  2656. buffer_size = ALIGN(buffer_size + size, D3D12_TEXTURE_DATA_PLACEMENT_ALIGNMENT);
  2657. computed_h = MAX(1u, computed_h >> 1);
  2658. computed_d = MAX(1u, computed_d >> 1);
  2659. }
  2660. }
  2661. // Allocate buffer.
  2662. ID3D12GraphicsCommandList *command_list = frames[frame].draw_command_list.Get(); // Makes more sense to retrieve.
  2663. Buffer tmp_buffer;
  2664. Error err = _buffer_allocate(&tmp_buffer, buffer_size, D3D12_RESOURCE_STATE_COPY_DEST, D3D12_HEAP_TYPE_READBACK);
  2665. ERR_FAIL_COND_V(err != OK, Vector<uint8_t>());
  2666. for (uint32_t i = 0; i < tex->mipmaps; i++) {
  2667. uint32_t subresource = D3D12CalcSubresource(i, p_layer, 0, tex->owner_mipmaps, tex->owner_layers);
  2668. _resource_transition_batch(tex, subresource, tex->planes, D3D12_RESOURCE_STATE_COPY_SOURCE);
  2669. }
  2670. _resource_transitions_flush(command_list);
  2671. uint32_t computed_w = tex->width;
  2672. uint32_t computed_h = tex->height;
  2673. uint32_t computed_d = tex->depth;
  2674. uint32_t prev_size = 0;
  2675. uint32_t offset = 0;
  2676. for (uint32_t i = 0; i < tex->mipmaps; i++) {
  2677. uint32_t image_size = get_image_format_required_size(tex->format, tex->width, tex->height, tex->depth, i + 1);
  2678. uint32_t size = image_size - prev_size;
  2679. prev_size = image_size;
  2680. D3D12_PLACED_SUBRESOURCE_FOOTPRINT dst_footprint = {};
  2681. dst_footprint.Offset = offset;
  2682. dst_footprint.Footprint.Width = MAX(block_w, computed_w);
  2683. dst_footprint.Footprint.Height = MAX(block_h, computed_h);
  2684. dst_footprint.Footprint.Depth = computed_d;
  2685. uint32_t inferred_row_pitch = size / (dst_footprint.Footprint.Height * computed_d) * block_h;
  2686. dst_footprint.Footprint.RowPitch = inferred_row_pitch;
  2687. dst_footprint.Footprint.Format = d3d12_formats[tex->format].family;
  2688. CD3DX12_TEXTURE_COPY_LOCATION copy_dst(tmp_buffer.resource, dst_footprint);
  2689. UINT src_subresource = D3D12CalcSubresource(i, p_layer, 0, tex->owner_mipmaps, tex->owner_layers);
  2690. CD3DX12_TEXTURE_COPY_LOCATION copy_src(tex->resource, src_subresource);
  2691. if (dst_footprint.Footprint.RowPitch % alignment) {
  2692. // Dammit! Now we must copy with an imposed pitch and then adjust row by row.
  2693. copy_dst.PlacedFootprint.Offset = ALIGN(offset, D3D12_TEXTURE_DATA_PLACEMENT_ALIGNMENT);
  2694. uint32_t adjusted_row_pitch = ALIGN(inferred_row_pitch, alignment);
  2695. copy_dst.PlacedFootprint.Footprint.RowPitch = adjusted_row_pitch;
  2696. command_list->CopyTextureRegion(&copy_dst, 0, 0, 0, &copy_src, nullptr);
  2697. _flush(true);
  2698. void *buffer_mem;
  2699. uint32_t adjusted_size = adjusted_row_pitch / block_h * dst_footprint.Footprint.Height * computed_d;
  2700. CD3DX12_RANGE range(offset, copy_dst.PlacedFootprint.Offset + adjusted_size);
  2701. HRESULT res = tmp_buffer.resource->Map(0, &range, &buffer_mem);
  2702. ERR_FAIL_COND_V_MSG(res, Vector<uint8_t>(), "Map failed with error " + vformat("0x%08ux", res) + ".");
  2703. for (uint32_t j = 0; j < dst_footprint.Footprint.Height / block_h * computed_d; j++) {
  2704. memmove((uint8_t *)buffer_mem + offset + j * inferred_row_pitch, (uint8_t *)buffer_mem + copy_dst.PlacedFootprint.Offset + j * adjusted_row_pitch, inferred_row_pitch);
  2705. }
  2706. tmp_buffer.resource->Unmap(0, nullptr);
  2707. } else if (offset % D3D12_TEXTURE_DATA_PLACEMENT_ALIGNMENT) {
  2708. // Row pitch is fine, but offset alignment is not good.
  2709. copy_dst.PlacedFootprint.Offset = ALIGN(offset, D3D12_TEXTURE_DATA_PLACEMENT_ALIGNMENT);
  2710. command_list->CopyTextureRegion(&copy_dst, 0, 0, 0, &copy_src, nullptr);
  2711. _flush(true);
  2712. void *buffer_mem;
  2713. CD3DX12_RANGE range(copy_dst.PlacedFootprint.Offset, size);
  2714. HRESULT res = tmp_buffer.resource->Map(0, &range, &buffer_mem);
  2715. ERR_FAIL_COND_V_MSG(res, Vector<uint8_t>(), "Map failed with error " + vformat("0x%08ux", res) + ".");
  2716. memmove((uint8_t *)buffer_mem + offset, (uint8_t *)buffer_mem + copy_dst.PlacedFootprint.Offset, size);
  2717. tmp_buffer.resource->Unmap(0, nullptr);
  2718. } else {
  2719. command_list->CopyTextureRegion(&copy_dst, 0, 0, 0, &copy_src, nullptr);
  2720. }
  2721. computed_w = MAX(1u, computed_w >> 1);
  2722. computed_h = MAX(1u, computed_h >> 1);
  2723. computed_d = MAX(1u, computed_d >> 1);
  2724. offset += size;
  2725. }
  2726. _flush(true);
  2727. void *buffer_mem;
  2728. CD3DX12_RANGE range(0, final_buffer_size);
  2729. HRESULT res = tmp_buffer.resource->Map(0, &range, &buffer_mem);
  2730. ERR_FAIL_COND_V_MSG(res, Vector<uint8_t>(), "Map failed with error " + vformat("0x%08ux", res) + ".");
  2731. Vector<uint8_t> buffer_data;
  2732. buffer_data.resize(final_buffer_size);
  2733. {
  2734. uint8_t *w = buffer_data.ptrw();
  2735. memcpy(w, buffer_mem, final_buffer_size);
  2736. }
  2737. tmp_buffer.resource->Unmap(0, nullptr);
  2738. _buffer_free(&tmp_buffer);
  2739. return buffer_data;
  2740. }
  2741. }
  2742. bool RenderingDeviceD3D12::texture_is_shared(RID p_texture) {
  2743. _THREAD_SAFE_METHOD_
  2744. Texture *tex = texture_owner.get_or_null(p_texture);
  2745. ERR_FAIL_NULL_V(tex, false);
  2746. return tex->owner.is_valid();
  2747. }
  2748. bool RenderingDeviceD3D12::texture_is_valid(RID p_texture) {
  2749. return texture_owner.owns(p_texture);
  2750. }
  2751. RenderingDevice::TextureFormat RenderingDeviceD3D12::texture_get_format(RID p_texture) {
  2752. _THREAD_SAFE_METHOD_
  2753. Texture *tex = texture_owner.get_or_null(p_texture);
  2754. ERR_FAIL_NULL_V(tex, TextureFormat());
  2755. TextureFormat tf;
  2756. tf.format = tex->format;
  2757. tf.width = tex->width;
  2758. tf.height = tex->height;
  2759. tf.depth = tex->depth;
  2760. tf.array_layers = tex->layers;
  2761. tf.mipmaps = tex->mipmaps;
  2762. tf.texture_type = tex->type;
  2763. tf.samples = tex->samples;
  2764. tf.usage_bits = tex->usage_flags;
  2765. tf.shareable_formats = tex->allowed_shared_formats;
  2766. tf.is_resolve_buffer = tex->is_resolve_buffer;
  2767. return tf;
  2768. }
  2769. Size2i RenderingDeviceD3D12::texture_size(RID p_texture) {
  2770. _THREAD_SAFE_METHOD_
  2771. Texture *tex = texture_owner.get_or_null(p_texture);
  2772. ERR_FAIL_NULL_V(tex, Size2i());
  2773. return Size2i(tex->width, tex->height);
  2774. }
  2775. uint64_t RenderingDeviceD3D12::texture_get_native_handle(RID p_texture) {
  2776. _THREAD_SAFE_METHOD_
  2777. Texture *tex = texture_owner.get_or_null(p_texture);
  2778. ERR_FAIL_NULL_V(tex, 0);
  2779. return (uint64_t)tex->resource;
  2780. }
  2781. Error RenderingDeviceD3D12::texture_copy(RID p_from_texture, RID p_to_texture, const Vector3 &p_from, const Vector3 &p_to, const Vector3 &p_size, uint32_t p_src_mipmap, uint32_t p_dst_mipmap, uint32_t p_src_layer, uint32_t p_dst_layer, BitField<BarrierMask> p_post_barrier) {
  2782. _THREAD_SAFE_METHOD_
  2783. Texture *src_tex = texture_owner.get_or_null(p_from_texture);
  2784. ERR_FAIL_NULL_V(src_tex, ERR_INVALID_PARAMETER);
  2785. ERR_FAIL_COND_V_MSG(src_tex->bound, ERR_INVALID_PARAMETER,
  2786. "Source texture can't be copied while a render pass that uses it is being created. Ensure render pass is finalized (and that it was created with RENDER_PASS_CONTENTS_FINISH) to unbind this texture.");
  2787. ERR_FAIL_COND_V_MSG(!(src_tex->usage_flags & TEXTURE_USAGE_CAN_COPY_FROM_BIT), ERR_INVALID_PARAMETER,
  2788. "Source texture requires the TEXTURE_USAGE_CAN_COPY_FROM_BIT in order to be retrieved.");
  2789. uint32_t src_layer_count = src_tex->layers;
  2790. uint32_t src_width, src_height, src_depth;
  2791. get_image_format_required_size(src_tex->format, src_tex->width, src_tex->height, src_tex->depth, p_src_mipmap + 1, &src_width, &src_height, &src_depth);
  2792. if (src_tex->type == TEXTURE_TYPE_CUBE || src_tex->type == TEXTURE_TYPE_CUBE_ARRAY) {
  2793. src_layer_count *= 6;
  2794. }
  2795. ERR_FAIL_COND_V(p_from.x < 0 || p_from.x + p_size.x > src_width, ERR_INVALID_PARAMETER);
  2796. ERR_FAIL_COND_V(p_from.y < 0 || p_from.y + p_size.y > src_height, ERR_INVALID_PARAMETER);
  2797. ERR_FAIL_COND_V(p_from.z < 0 || p_from.z + p_size.z > src_depth, ERR_INVALID_PARAMETER);
  2798. ERR_FAIL_COND_V(p_src_mipmap >= src_tex->mipmaps, ERR_INVALID_PARAMETER);
  2799. ERR_FAIL_COND_V(p_src_layer >= src_layer_count, ERR_INVALID_PARAMETER);
  2800. Texture *dst_tex = texture_owner.get_or_null(p_to_texture);
  2801. ERR_FAIL_NULL_V(dst_tex, ERR_INVALID_PARAMETER);
  2802. ERR_FAIL_COND_V_MSG(dst_tex->bound, ERR_INVALID_PARAMETER,
  2803. "Destination texture can't be copied while a render pass that uses it is being created. Ensure render pass is finalized (and that it was created with RENDER_PASS_CONTENTS_FINISH) to unbind this texture.");
  2804. ERR_FAIL_COND_V_MSG(!(dst_tex->usage_flags & TEXTURE_USAGE_CAN_COPY_TO_BIT), ERR_INVALID_PARAMETER,
  2805. "Destination texture requires the TEXTURE_USAGE_CAN_COPY_TO_BIT in order to be retrieved.");
  2806. uint32_t dst_layer_count = dst_tex->layers;
  2807. uint32_t dst_width, dst_height, dst_depth;
  2808. get_image_format_required_size(dst_tex->format, dst_tex->width, dst_tex->height, dst_tex->depth, p_dst_mipmap + 1, &dst_width, &dst_height, &dst_depth);
  2809. if (dst_tex->type == TEXTURE_TYPE_CUBE || dst_tex->type == TEXTURE_TYPE_CUBE_ARRAY) {
  2810. dst_layer_count *= 6;
  2811. }
  2812. ERR_FAIL_COND_V(p_to.x < 0 || p_to.x + p_size.x > dst_width, ERR_INVALID_PARAMETER);
  2813. ERR_FAIL_COND_V(p_to.y < 0 || p_to.y + p_size.y > dst_height, ERR_INVALID_PARAMETER);
  2814. ERR_FAIL_COND_V(p_to.z < 0 || p_to.z + p_size.z > dst_depth, ERR_INVALID_PARAMETER);
  2815. ERR_FAIL_COND_V(p_dst_mipmap >= dst_tex->mipmaps, ERR_INVALID_PARAMETER);
  2816. ERR_FAIL_COND_V(p_dst_layer >= dst_layer_count, ERR_INVALID_PARAMETER);
  2817. ERR_FAIL_COND_V_MSG((src_tex->usage_flags & TEXTURE_USAGE_DEPTH_STENCIL_ATTACHMENT_BIT) != (dst_tex->usage_flags & TEXTURE_USAGE_DEPTH_STENCIL_ATTACHMENT_BIT), ERR_INVALID_PARAMETER,
  2818. "Source and destination texture must be of the same type (color or depth).");
  2819. ID3D12GraphicsCommandList *command_list = frames[frame].draw_command_list.Get();
  2820. uint32_t src_subresource = D3D12CalcSubresource(p_src_mipmap, p_src_layer, 0, src_tex->owner_mipmaps, src_tex->owner_layers);
  2821. _resource_transition_batch(src_tex, src_subresource, src_tex->planes, D3D12_RESOURCE_STATE_COPY_SOURCE);
  2822. uint32_t dst_subresource = D3D12CalcSubresource(p_dst_mipmap, p_dst_layer, 0, dst_tex->owner_mipmaps, dst_tex->owner_layers);
  2823. _resource_transition_batch(dst_tex, dst_subresource, dst_tex->planes, D3D12_RESOURCE_STATE_COPY_DEST);
  2824. _resource_transitions_flush(command_list);
  2825. {
  2826. CD3DX12_TEXTURE_COPY_LOCATION src_location(src_tex->resource, src_subresource);
  2827. CD3DX12_BOX src_box(p_from.x, p_from.y, p_from.z, p_from.x + p_size.x, p_from.y + p_size.y, p_from.z + p_size.z);
  2828. CD3DX12_TEXTURE_COPY_LOCATION dst_location(dst_tex->resource, dst_subresource);
  2829. command_list->CopyTextureRegion(
  2830. &dst_location,
  2831. p_to.x, p_to.y, p_to.z,
  2832. &src_location,
  2833. &src_box);
  2834. }
  2835. return OK;
  2836. }
  2837. Error RenderingDeviceD3D12::texture_resolve_multisample(RID p_from_texture, RID p_to_texture, BitField<BarrierMask> p_post_barrier) {
  2838. _THREAD_SAFE_METHOD_
  2839. Texture *src_tex = texture_owner.get_or_null(p_from_texture);
  2840. ERR_FAIL_NULL_V(src_tex, ERR_INVALID_PARAMETER);
  2841. ERR_FAIL_COND_V_MSG(src_tex->bound, ERR_INVALID_PARAMETER,
  2842. "Source texture can't be copied while a render pass that uses it is being created. Ensure render pass is finalized (and that it was created with RENDER_PASS_CONTENTS_FINISH) to unbind this texture.");
  2843. ERR_FAIL_COND_V_MSG(!(src_tex->usage_flags & TEXTURE_USAGE_CAN_COPY_FROM_BIT), ERR_INVALID_PARAMETER,
  2844. "Source texture requires the TEXTURE_USAGE_CAN_COPY_FROM_BIT in order to be retrieved.");
  2845. ERR_FAIL_COND_V_MSG(src_tex->type != TEXTURE_TYPE_2D, ERR_INVALID_PARAMETER, "Source texture must be 2D (or a slice of a 3D/Cube texture)");
  2846. ERR_FAIL_COND_V_MSG(src_tex->samples == TEXTURE_SAMPLES_1, ERR_INVALID_PARAMETER, "Source texture must be multisampled.");
  2847. Texture *dst_tex = texture_owner.get_or_null(p_to_texture);
  2848. ERR_FAIL_NULL_V(dst_tex, ERR_INVALID_PARAMETER);
  2849. ERR_FAIL_COND_V_MSG(dst_tex->bound, ERR_INVALID_PARAMETER,
  2850. "Destination texture can't be copied while a render pass that uses it is being created. Ensure render pass is finalized (and that it was created with RENDER_PASS_CONTENTS_FINISH) to unbind this texture.");
  2851. ERR_FAIL_COND_V_MSG(!(dst_tex->usage_flags & TEXTURE_USAGE_CAN_COPY_TO_BIT), ERR_INVALID_PARAMETER,
  2852. "Destination texture requires the TEXTURE_USAGE_CAN_COPY_TO_BIT in order to be retrieved.");
  2853. ERR_FAIL_COND_V_MSG(dst_tex->type != TEXTURE_TYPE_2D, ERR_INVALID_PARAMETER, "Destination texture must be 2D (or a slice of a 3D/Cube texture).");
  2854. ERR_FAIL_COND_V_MSG(dst_tex->samples != TEXTURE_SAMPLES_1, ERR_INVALID_PARAMETER, "Destination texture must not be multisampled.");
  2855. ERR_FAIL_COND_V_MSG(src_tex->format != dst_tex->format, ERR_INVALID_PARAMETER, "Source and Destination textures must be the same format.");
  2856. ERR_FAIL_COND_V_MSG(src_tex->width != dst_tex->width && src_tex->height != dst_tex->height && src_tex->depth != dst_tex->depth, ERR_INVALID_PARAMETER, "Source and Destination textures must have the same dimensions.");
  2857. ERR_FAIL_COND_V_MSG((src_tex->usage_flags & TEXTURE_USAGE_DEPTH_STENCIL_ATTACHMENT_BIT) != (dst_tex->usage_flags & TEXTURE_USAGE_DEPTH_STENCIL_ATTACHMENT_BIT), ERR_INVALID_PARAMETER,
  2858. "Source and destination texture must be of the same type (color or depth).");
  2859. ID3D12GraphicsCommandList *command_list = frames[frame].draw_command_list.Get();
  2860. uint32_t src_subresource = D3D12CalcSubresource(src_tex->base_mipmap, src_tex->base_layer, 0, src_tex->owner_mipmaps, src_tex->owner_layers);
  2861. _resource_transition_batch(src_tex, src_subresource, src_tex->planes, D3D12_RESOURCE_STATE_RESOLVE_SOURCE);
  2862. uint32_t dst_subresource = D3D12CalcSubresource(dst_tex->base_mipmap, dst_tex->base_layer, 0, dst_tex->owner_mipmaps, dst_tex->owner_layers);
  2863. _resource_transition_batch(dst_tex, dst_subresource, dst_tex->planes, D3D12_RESOURCE_STATE_RESOLVE_DEST);
  2864. _resource_transitions_flush(command_list);
  2865. command_list->ResolveSubresource(dst_tex->resource, dst_subresource, src_tex->resource, src_subresource, d3d12_formats[src_tex->format].general_format);
  2866. return OK;
  2867. }
  2868. Error RenderingDeviceD3D12::texture_clear(RID p_texture, const Color &p_color, uint32_t p_base_mipmap, uint32_t p_mipmaps, uint32_t p_base_layer, uint32_t p_layers, BitField<BarrierMask> p_post_barrier) {
  2869. _THREAD_SAFE_METHOD_
  2870. Texture *src_tex = texture_owner.get_or_null(p_texture);
  2871. ERR_FAIL_NULL_V(src_tex, ERR_INVALID_PARAMETER);
  2872. ERR_FAIL_COND_V_MSG(src_tex->bound, ERR_INVALID_PARAMETER,
  2873. "Source texture can't be cleared while a render pass that uses it is being created. Ensure render pass is finalized (and that it was created with RENDER_PASS_CONTENTS_FINISH) to unbind this texture.");
  2874. ERR_FAIL_COND_V(p_layers == 0, ERR_INVALID_PARAMETER);
  2875. ERR_FAIL_COND_V(p_mipmaps == 0, ERR_INVALID_PARAMETER);
  2876. ERR_FAIL_COND_V_MSG(!(src_tex->usage_flags & TEXTURE_USAGE_CAN_COPY_TO_BIT), ERR_INVALID_PARAMETER,
  2877. "Source texture requires the TEXTURE_USAGE_CAN_COPY_TO_BIT in order to be cleared.");
  2878. uint32_t src_layer_count = src_tex->layers;
  2879. if (src_tex->type == TEXTURE_TYPE_CUBE || src_tex->type == TEXTURE_TYPE_CUBE_ARRAY) {
  2880. src_layer_count *= 6;
  2881. }
  2882. ERR_FAIL_COND_V(p_base_mipmap + p_mipmaps > src_tex->mipmaps, ERR_INVALID_PARAMETER);
  2883. ERR_FAIL_COND_V(p_base_layer + p_layers > src_layer_count, ERR_INVALID_PARAMETER);
  2884. if ((src_tex->usage_flags & TEXTURE_USAGE_COLOR_ATTACHMENT_BIT)) {
  2885. // Clear via RTV.
  2886. if (frames[frame].desc_heap_walkers.rtv.is_at_eof()) {
  2887. if (!frames[frame].desc_heaps_exhausted_reported.rtv) {
  2888. frames[frame].desc_heaps_exhausted_reported.rtv = true;
  2889. ERR_FAIL_V_MSG(ERR_BUSY,
  2890. "Cannot clear texture because there's no enough room in current frame's RENDER TARGET descriptors heap.\n"
  2891. "Please increase the value of the rendering/rendering_device/d3d12/max_misc_descriptors_per_frame project setting.");
  2892. } else {
  2893. return ERR_BUSY;
  2894. }
  2895. }
  2896. D3D12_RENDER_TARGET_VIEW_DESC rtv_desc = _make_rtv_for_texture(src_tex, p_base_mipmap, p_base_layer, p_layers);
  2897. rtv_desc.Format = src_tex->owner_uav_desc.Format;
  2898. ID3D12GraphicsCommandList *command_list = frames[frame].draw_command_list.Get();
  2899. for (uint32_t i = 0; i < p_layers; i++) {
  2900. for (uint32_t j = 0; j < p_mipmaps; j++) {
  2901. uint32_t subresource = D3D12CalcSubresource(src_tex->base_mipmap + p_base_mipmap + j, src_tex->base_layer + p_base_layer + i, 0, src_tex->owner_mipmaps, src_tex->owner_layers);
  2902. _resource_transition_batch(src_tex, subresource, src_tex->planes, D3D12_RESOURCE_STATE_RENDER_TARGET, src_tex->owner_resource);
  2903. }
  2904. }
  2905. _resource_transitions_flush(command_list);
  2906. device->CreateRenderTargetView(
  2907. src_tex->owner_resource,
  2908. &rtv_desc,
  2909. frames[frame].desc_heap_walkers.rtv.get_curr_cpu_handle());
  2910. command_list->ClearRenderTargetView(
  2911. frames[frame].desc_heap_walkers.rtv.get_curr_cpu_handle(),
  2912. p_color.components,
  2913. 0,
  2914. nullptr);
  2915. frames[frame].desc_heap_walkers.rtv.advance();
  2916. } else {
  2917. // Clear via UAV.
  2918. if (frames[frame].desc_heap_walkers.resources.is_at_eof()) {
  2919. if (!frames[frame].desc_heaps_exhausted_reported.resources) {
  2920. frames[frame].desc_heaps_exhausted_reported.resources = true;
  2921. ERR_FAIL_V_MSG(ERR_BUSY,
  2922. "Cannot clear texture because there's no enough room in current frame's RESOURCE descriptors heap.\n"
  2923. "Please increase the value of the rendering/rendering_device/d3d12/max_resource_descriptors_per_frame project setting.");
  2924. } else {
  2925. return ERR_BUSY;
  2926. }
  2927. }
  2928. if (frames[frame].desc_heap_walkers.aux.is_at_eof()) {
  2929. if (!frames[frame].desc_heaps_exhausted_reported.aux) {
  2930. frames[frame].desc_heaps_exhausted_reported.aux = true;
  2931. ERR_FAIL_V_MSG(ERR_BUSY,
  2932. "Cannot clear texture because there's no enough room in current frame's AUX descriptors heap.\n"
  2933. "Please increase the value of the rendering/rendering_device/d3d12/max_misc_descriptors_per_frame project setting.");
  2934. } else {
  2935. return ERR_BUSY;
  2936. }
  2937. }
  2938. ID3D12GraphicsCommandList *command_list = frames[frame].draw_command_list.Get();
  2939. for (uint32_t i = 0; i < p_layers; i++) {
  2940. for (uint32_t j = 0; j < p_mipmaps; j++) {
  2941. uint32_t subresource = D3D12CalcSubresource(src_tex->base_mipmap + p_base_mipmap + j, src_tex->base_layer + p_base_layer + i, 0, src_tex->owner_mipmaps, src_tex->owner_layers);
  2942. _resource_transition_batch(src_tex, subresource, src_tex->planes, D3D12_RESOURCE_STATE_UNORDERED_ACCESS, src_tex->owner_resource);
  2943. }
  2944. }
  2945. _resource_transitions_flush(command_list);
  2946. device->CreateUnorderedAccessView(
  2947. src_tex->owner_resource,
  2948. nullptr,
  2949. &src_tex->owner_uav_desc,
  2950. frames[frame].desc_heap_walkers.aux.get_curr_cpu_handle());
  2951. device->CopyDescriptorsSimple(
  2952. 1,
  2953. frames[frame].desc_heap_walkers.resources.get_curr_cpu_handle(),
  2954. frames[frame].desc_heap_walkers.aux.get_curr_cpu_handle(),
  2955. D3D12_DESCRIPTOR_HEAP_TYPE_CBV_SRV_UAV);
  2956. UINT values[4] = {
  2957. (UINT)p_color.get_r8(),
  2958. (UINT)p_color.get_g8(),
  2959. (UINT)p_color.get_b8(),
  2960. (UINT)p_color.get_a8(),
  2961. };
  2962. command_list->ClearUnorderedAccessViewUint(
  2963. frames[frame].desc_heap_walkers.resources.get_curr_gpu_handle(),
  2964. frames[frame].desc_heap_walkers.aux.get_curr_cpu_handle(),
  2965. src_tex->owner_resource,
  2966. values,
  2967. 0,
  2968. nullptr);
  2969. frames[frame].desc_heap_walkers.resources.advance();
  2970. frames[frame].desc_heap_walkers.aux.advance();
  2971. }
  2972. return OK;
  2973. }
  2974. bool RenderingDeviceD3D12::texture_is_format_supported_for_usage(DataFormat p_format, BitField<RenderingDevice::TextureUsageBits> p_usage) const {
  2975. ERR_FAIL_INDEX_V(p_format, DATA_FORMAT_MAX, false);
  2976. _THREAD_SAFE_METHOD_
  2977. D3D12_FEATURE_DATA_FORMAT_SUPPORT srv_rtv_support = {};
  2978. srv_rtv_support.Format = d3d12_formats[p_format].general_format;
  2979. HRESULT res = device->CheckFeatureSupport(D3D12_FEATURE_FORMAT_SUPPORT, &srv_rtv_support, sizeof(srv_rtv_support));
  2980. ERR_FAIL_COND_V_MSG(res, false, "CheckFeatureSupport failed with error " + vformat("0x%08ux", res) + ".");
  2981. D3D12_FEATURE_DATA_FORMAT_SUPPORT &uav_support = srv_rtv_support; // Fine for now.
  2982. D3D12_FEATURE_DATA_FORMAT_SUPPORT dsv_support = {};
  2983. dsv_support.Format = d3d12_formats[p_format].dsv_format;
  2984. res = device->CheckFeatureSupport(D3D12_FEATURE_FORMAT_SUPPORT, &dsv_support, sizeof(dsv_support));
  2985. ERR_FAIL_COND_V_MSG(res, false, "CheckFeatureSupport failed with error " + vformat("0x%08ux", res) + ".");
  2986. if ((p_usage & TEXTURE_USAGE_SAMPLING_BIT) && !(srv_rtv_support.Support1 & (D3D12_FORMAT_SUPPORT1_SHADER_LOAD | D3D12_FORMAT_SUPPORT1_SHADER_SAMPLE)) && d3d12_formats[p_format].general_format != DXGI_FORMAT_UNKNOWN) {
  2987. return false;
  2988. }
  2989. if ((p_usage & TEXTURE_USAGE_SAMPLING_BIT) && d3d12_formats[p_format].general_format == DXGI_FORMAT_UNKNOWN) {
  2990. return false;
  2991. }
  2992. if ((p_usage & TEXTURE_USAGE_COLOR_ATTACHMENT_BIT) && !(srv_rtv_support.Support1 & D3D12_FORMAT_SUPPORT1_RENDER_TARGET)) {
  2993. return false;
  2994. }
  2995. if ((p_usage & TEXTURE_USAGE_DEPTH_STENCIL_ATTACHMENT_BIT) && !(dsv_support.Support1 & D3D12_FORMAT_SUPPORT1_DEPTH_STENCIL)) {
  2996. printf("dxgiformat: %x\n", d3d12_formats[p_format].dsv_format);
  2997. return false;
  2998. }
  2999. if ((p_usage & TEXTURE_USAGE_STORAGE_BIT) && !(uav_support.Support1 & D3D12_FORMAT_SUPPORT1_TYPED_UNORDERED_ACCESS_VIEW)) { // Maybe check LOAD/STORE, too?
  3000. return false;
  3001. }
  3002. if ((p_usage & TEXTURE_USAGE_STORAGE_ATOMIC_BIT) && !(uav_support.Support2 & D3D12_FORMAT_SUPPORT2_UAV_ATOMIC_ADD)) { // Check a basic atomic at least.
  3003. return false;
  3004. }
  3005. if ((p_usage & TEXTURE_USAGE_VRS_ATTACHMENT_BIT) && d3d12_formats[p_format].general_format != DXGI_FORMAT_R8_UINT) {
  3006. return false;
  3007. }
  3008. return true;
  3009. }
  3010. /********************/
  3011. /**** ATTACHMENT ****/
  3012. /********************/
  3013. bool RenderingDeviceD3D12::_framebuffer_format_preprocess(FramebufferFormat *p_fb_format, uint32_t p_view_count) {
  3014. const Vector<AttachmentFormat> &attachments = p_fb_format->attachments;
  3015. LocalVector<int32_t> attachment_last_pass;
  3016. attachment_last_pass.resize(attachments.size());
  3017. if (p_view_count > 1) {
  3018. const D3D12Context::MultiviewCapabilities &capabilities = context->get_multiview_capabilities();
  3019. // This only works with multiview!
  3020. ERR_FAIL_COND_V_MSG(!capabilities.is_supported, false, "Multiview not supported");
  3021. // Make sure we limit this to the number of views we support.
  3022. ERR_FAIL_COND_V_MSG(p_view_count > capabilities.max_view_count, false, "Hardware does not support requested number of views for Multiview render pass");
  3023. }
  3024. int attachment_count = 0;
  3025. HashSet<DXGI_FORMAT> ms_attachment_formats;
  3026. for (int i = 0; i < attachments.size(); i++) {
  3027. if (attachments[i].usage_flags == AttachmentFormat::UNUSED_ATTACHMENT) {
  3028. continue;
  3029. }
  3030. ERR_FAIL_INDEX_V(attachments[i].format, DATA_FORMAT_MAX, false);
  3031. ERR_FAIL_INDEX_V(attachments[i].samples, TEXTURE_SAMPLES_MAX, false);
  3032. ERR_FAIL_COND_V_MSG(!(attachments[i].usage_flags & (TEXTURE_USAGE_COLOR_ATTACHMENT_BIT | TEXTURE_USAGE_DEPTH_STENCIL_ATTACHMENT_BIT | TEXTURE_USAGE_INPUT_ATTACHMENT_BIT | TEXTURE_USAGE_VRS_ATTACHMENT_BIT)),
  3033. ERR_INVALID_PARAMETER, "Texture format for index (" + itos(i) + ") requires an attachment (color, depth-stencil, input or VRS) bit set.");
  3034. attachment_last_pass[i] = -1;
  3035. attachment_count++;
  3036. if (attachments[i].samples != TEXTURE_SAMPLES_1) {
  3037. if ((attachments[i].usage_flags & TEXTURE_USAGE_COLOR_ATTACHMENT_BIT)) {
  3038. ms_attachment_formats.insert(d3d12_formats[attachments[i].format].general_format);
  3039. } else if ((attachments[i].usage_flags & TEXTURE_USAGE_DEPTH_STENCIL_ATTACHMENT_BIT)) {
  3040. ms_attachment_formats.insert(d3d12_formats[attachments[i].format].dsv_format);
  3041. }
  3042. }
  3043. }
  3044. Vector<FramebufferPass> &passes = p_fb_format->passes;
  3045. for (int i = 0; i < passes.size(); i++) {
  3046. FramebufferPass *pass = &passes.write[i];
  3047. TextureSamples texture_samples = TEXTURE_SAMPLES_1;
  3048. bool is_multisample_first = true;
  3049. ERR_FAIL_COND_V(pass->color_attachments.size() > D3D12_SIMULTANEOUS_RENDER_TARGET_COUNT, false);
  3050. for (int j = 0; j < pass->color_attachments.size(); j++) {
  3051. int32_t attachment = pass->color_attachments[j];
  3052. if (attachment != FramebufferPass::ATTACHMENT_UNUSED) {
  3053. ERR_FAIL_INDEX_V_MSG(attachment, attachments.size(), false, "Invalid framebuffer format attachment(" + itos(attachment) + "), in pass (" + itos(i) + "), color attachment (" + itos(j) + ").");
  3054. ERR_FAIL_COND_V_MSG(!(attachments[attachment].usage_flags & TEXTURE_USAGE_COLOR_ATTACHMENT_BIT), false, "Invalid framebuffer format attachment(" + itos(attachment) + "), in pass (" + itos(i) + "), it's marked as depth, but it's not usable as color attachment.");
  3055. ERR_FAIL_COND_V_MSG(attachment_last_pass[attachment] == i, false, "Invalid framebuffer format attachment(" + itos(attachment) + "), in pass (" + itos(i) + "), it already was used for something else before in this pass.");
  3056. if (is_multisample_first) {
  3057. texture_samples = attachments[attachment].samples;
  3058. is_multisample_first = false;
  3059. } else {
  3060. ERR_FAIL_COND_V_MSG(texture_samples != attachments[attachment].samples, false, "Invalid framebuffer format attachment(" + itos(attachment) + "), in pass (" + itos(i) + "), if an attachment is marked as multisample, all of them should be multisample and use the same number of samples.");
  3061. }
  3062. attachment_last_pass[attachment] = i;
  3063. }
  3064. }
  3065. for (int j = 0; j < pass->input_attachments.size(); j++) {
  3066. int32_t attachment = pass->input_attachments[j];
  3067. if (attachment != FramebufferPass::ATTACHMENT_UNUSED) {
  3068. ERR_FAIL_INDEX_V_MSG(attachment, attachments.size(), false, "Invalid framebuffer format attachment(" + itos(attachment) + "), in pass (" + itos(i) + "), input attachment (" + itos(j) + ").");
  3069. ERR_FAIL_COND_V_MSG(!(attachments[attachment].usage_flags & TEXTURE_USAGE_INPUT_ATTACHMENT_BIT), false, "Invalid framebuffer format attachment(" + itos(attachment) + "), in pass (" + itos(i) + "), it isn't marked as an input texture.");
  3070. ERR_FAIL_COND_V_MSG(attachment_last_pass[attachment] == i, false, "Invalid framebuffer format attachment(" + itos(attachment) + "), in pass (" + itos(i) + "), it already was used for something else before in this pass.");
  3071. if ((attachments[attachment].usage_flags & TEXTURE_USAGE_DEPTH_STENCIL_ATTACHMENT_BIT)) {
  3072. ERR_FAIL_V_MSG(false, "Invalid framebuffer format attachment(" + itos(attachment) + "), in pass (" + itos(i) + "), the D3D12 driver doesn't yet support using depth-stencil targets as input attachments.");
  3073. }
  3074. attachment_last_pass[attachment] = i;
  3075. }
  3076. }
  3077. if (pass->resolve_attachments.size() > 0) {
  3078. ERR_FAIL_COND_V_MSG(pass->resolve_attachments.size() != pass->color_attachments.size(), false, "The amount of resolve attachments (" + itos(pass->resolve_attachments.size()) + ") must match the number of color attachments (" + itos(pass->color_attachments.size()) + ").");
  3079. ERR_FAIL_COND_V_MSG(texture_samples == TEXTURE_SAMPLES_1, false, "Resolve attachments specified, but color attachments are not multisample.");
  3080. }
  3081. for (int j = 0; j < pass->resolve_attachments.size(); j++) {
  3082. int32_t attachment = pass->resolve_attachments[j];
  3083. if (attachment != FramebufferPass::ATTACHMENT_UNUSED) {
  3084. ERR_FAIL_INDEX_V_MSG(attachment, attachments.size(), false, "Invalid framebuffer format attachment(" + itos(attachment) + "), in pass (" + itos(i) + "), resolve attachment (" + itos(j) + ").");
  3085. ERR_FAIL_COND_V_MSG(pass->color_attachments[j] == FramebufferPass::ATTACHMENT_UNUSED, false, "Invalid framebuffer format attachment(" + itos(attachment) + "), in pass (" + itos(i) + "), resolve attachment (" + itos(j) + "), the respective color attachment is marked as unused.");
  3086. ERR_FAIL_COND_V_MSG(!(attachments[attachment].usage_flags & TEXTURE_USAGE_COLOR_ATTACHMENT_BIT), false, "Invalid framebuffer format attachment(" + itos(attachment) + "), in pass (" + itos(i) + "), resolve attachment, it isn't marked as a color texture.");
  3087. ERR_FAIL_COND_V_MSG(attachment_last_pass[attachment] == i, false, "Invalid framebuffer format attachment(" + itos(attachment) + "), in pass (" + itos(i) + "), it already was used for something else before in this pass.");
  3088. bool multisample = attachments[attachment].samples > TEXTURE_SAMPLES_1;
  3089. ERR_FAIL_COND_V_MSG(multisample, false, "Invalid framebuffer format attachment(" + itos(attachment) + "), in pass (" + itos(i) + "), resolve attachments can't be multisample.");
  3090. attachment_last_pass[attachment] = i;
  3091. }
  3092. }
  3093. if (pass->depth_attachment != FramebufferPass::ATTACHMENT_UNUSED) {
  3094. int32_t attachment = pass->depth_attachment;
  3095. ERR_FAIL_INDEX_V_MSG(attachment, attachments.size(), false, "Invalid framebuffer depth format attachment(" + itos(attachment) + "), in pass (" + itos(i) + "), depth attachment.");
  3096. ERR_FAIL_COND_V_MSG(!(attachments[attachment].usage_flags & TEXTURE_USAGE_DEPTH_STENCIL_ATTACHMENT_BIT), false, "Invalid framebuffer depth format attachment(" + itos(attachment) + "), in pass (" + itos(i) + "), it's marked as depth, but it's not a depth attachment.");
  3097. ERR_FAIL_COND_V_MSG(attachment_last_pass[attachment] == i, false, "Invalid framebuffer depth format attachment(" + itos(attachment) + "), in pass (" + itos(i) + "), it already was used for something else before in this pass.");
  3098. attachment_last_pass[attachment] = i;
  3099. if (is_multisample_first) {
  3100. texture_samples = attachments[attachment].samples;
  3101. is_multisample_first = false;
  3102. } else {
  3103. ERR_FAIL_COND_V_MSG(texture_samples != attachments[attachment].samples, false, "Invalid framebuffer depth format attachment(" + itos(attachment) + "), in pass (" + itos(i) + "), if an attachment is marked as multisample, all of them should be multisample and use the same number of samples including the depth.");
  3104. }
  3105. }
  3106. if (context->get_vrs_capabilities().ss_image_supported && pass->vrs_attachment != FramebufferPass::ATTACHMENT_UNUSED) {
  3107. int32_t attachment = pass->vrs_attachment;
  3108. ERR_FAIL_INDEX_V_MSG(attachment, attachments.size(), false, "Invalid framebuffer VRS format attachment(" + itos(attachment) + "), in pass (" + itos(i) + "), VRS attachment.");
  3109. ERR_FAIL_COND_V_MSG(!(attachments[attachment].usage_flags & TEXTURE_USAGE_VRS_ATTACHMENT_BIT), false, "Invalid framebuffer VRS format attachment(" + itos(attachment) + "), in pass (" + itos(i) + "), it's marked as VRS, but it's not a VRS attachment.");
  3110. ERR_FAIL_COND_V_MSG(attachment_last_pass[attachment] == i, false, "Invalid framebuffer VRS attachment(" + itos(attachment) + "), in pass (" + itos(i) + "), it already was used for something else before in this pass.");
  3111. attachment_last_pass[attachment] = i;
  3112. }
  3113. for (int j = 0; j < pass->preserve_attachments.size(); j++) {
  3114. int32_t attachment = pass->preserve_attachments[j];
  3115. ERR_FAIL_COND_V_MSG(attachment == FramebufferPass::ATTACHMENT_UNUSED, false, "Invalid framebuffer format attachment(" + itos(attachment) + "), in pass (" + itos(i) + "), preserve attachment (" + itos(j) + "). Preserve attachments can't be unused.");
  3116. ERR_FAIL_INDEX_V_MSG(attachment, attachments.size(), false, "Invalid framebuffer format attachment(" + itos(attachment) + "), in pass (" + itos(i) + "), preserve attachment (" + itos(j) + ").");
  3117. if (attachment_last_pass[attachment] != i) {
  3118. // Preserve can still be used to keep depth or color from being discarded after use.
  3119. attachment_last_pass[attachment] = i;
  3120. }
  3121. }
  3122. p_fb_format->pass_samples.push_back(texture_samples);
  3123. }
  3124. if (p_fb_format->view_count > 1) {
  3125. const D3D12Context::MultiviewCapabilities capabilities = context->get_multiview_capabilities();
  3126. // For now this only works with multiview!
  3127. ERR_FAIL_COND_V_MSG(!capabilities.is_supported, ERR_UNAVAILABLE, "Multiview not supported");
  3128. // Make sure we limit this to the number of views we support.
  3129. ERR_FAIL_COND_V_MSG(p_fb_format->view_count > capabilities.max_view_count, ERR_UNAVAILABLE, "Hardware does not support requested number of views for Multiview render pass");
  3130. }
  3131. if (!ms_attachment_formats.is_empty()) {
  3132. LocalVector<DXGI_FORMAT> formats;
  3133. for (DXGI_FORMAT f : ms_attachment_formats) {
  3134. formats.push_back(f);
  3135. }
  3136. p_fb_format->max_supported_sample_count = _find_max_common_supported_sample_count(formats.ptr(), formats.size());
  3137. }
  3138. return true;
  3139. }
  3140. uint32_t RenderingDeviceD3D12::_find_max_common_supported_sample_count(const DXGI_FORMAT *p_formats, uint32_t p_num_formats) {
  3141. uint32_t common = UINT32_MAX;
  3142. for (uint32_t i = 0; i < p_num_formats; i++) {
  3143. if (format_sample_counts_mask_cache.has(p_formats[i])) {
  3144. common &= format_sample_counts_mask_cache[p_formats[i]];
  3145. } else {
  3146. D3D12_FEATURE_DATA_MULTISAMPLE_QUALITY_LEVELS msql = {};
  3147. msql.Format = p_formats[i];
  3148. uint32_t mask = 0;
  3149. for (int samples = 1 << (TEXTURE_SAMPLES_MAX - 1); samples >= 1; samples /= 2) {
  3150. msql.SampleCount = (UINT)samples;
  3151. HRESULT res = device->CheckFeatureSupport(D3D12_FEATURE_MULTISAMPLE_QUALITY_LEVELS, &msql, sizeof(msql));
  3152. if (SUCCEEDED(res) && msql.NumQualityLevels) {
  3153. int bit = get_shift_from_power_of_2(samples);
  3154. ERR_FAIL_COND_V(bit == -1, 1);
  3155. mask |= (uint32_t)(1 << bit);
  3156. }
  3157. }
  3158. format_sample_counts_mask_cache.insert(p_formats[i], mask);
  3159. common &= mask;
  3160. }
  3161. }
  3162. if (common == UINT32_MAX) {
  3163. return 1;
  3164. } else {
  3165. return (uint32_t)1 << nearest_shift(common);
  3166. }
  3167. }
  3168. RenderingDevice::FramebufferFormatID RenderingDeviceD3D12::framebuffer_format_create(const Vector<AttachmentFormat> &p_format, uint32_t p_view_count) {
  3169. FramebufferPass pass;
  3170. for (int i = 0; i < p_format.size(); i++) {
  3171. if (p_format[i].usage_flags & TEXTURE_USAGE_DEPTH_STENCIL_ATTACHMENT_BIT) {
  3172. pass.depth_attachment = i;
  3173. } else {
  3174. pass.color_attachments.push_back(i);
  3175. }
  3176. }
  3177. Vector<FramebufferPass> passes;
  3178. passes.push_back(pass);
  3179. return framebuffer_format_create_multipass(p_format, passes, p_view_count);
  3180. }
  3181. RenderingDevice::FramebufferFormatID RenderingDeviceD3D12::framebuffer_format_create_multipass(const Vector<AttachmentFormat> &p_attachments, const Vector<FramebufferPass> &p_passes, uint32_t p_view_count) {
  3182. _THREAD_SAFE_METHOD_
  3183. FramebufferFormat fb_format;
  3184. fb_format.attachments = p_attachments;
  3185. fb_format.passes = p_passes;
  3186. fb_format.view_count = p_view_count;
  3187. if (!_framebuffer_format_preprocess(&fb_format, p_view_count)) {
  3188. return INVALID_ID;
  3189. }
  3190. FramebufferFormatID id = FramebufferFormatID(framebuffer_formats.size()) | (FramebufferFormatID(ID_TYPE_FRAMEBUFFER_FORMAT) << FramebufferFormatID(ID_BASE_SHIFT));
  3191. framebuffer_formats[id] = fb_format;
  3192. return id;
  3193. }
  3194. RenderingDevice::FramebufferFormatID RenderingDeviceD3D12::framebuffer_format_create_empty(TextureSamples p_samples) {
  3195. _THREAD_SAFE_METHOD_
  3196. FramebufferFormat fb_format;
  3197. fb_format.passes.push_back(FramebufferPass());
  3198. fb_format.pass_samples.push_back(p_samples);
  3199. FramebufferFormatID id = FramebufferFormatID(framebuffer_formats.size()) | (FramebufferFormatID(ID_TYPE_FRAMEBUFFER_FORMAT) << FramebufferFormatID(ID_BASE_SHIFT));
  3200. framebuffer_formats[id] = fb_format;
  3201. return id;
  3202. }
  3203. RenderingDevice::TextureSamples RenderingDeviceD3D12::framebuffer_format_get_texture_samples(FramebufferFormatID p_format, uint32_t p_pass) {
  3204. HashMap<FramebufferFormatID, FramebufferFormat>::Iterator E = framebuffer_formats.find(p_format);
  3205. ERR_FAIL_NULL_V(E, TEXTURE_SAMPLES_1);
  3206. ERR_FAIL_COND_V(p_pass >= uint32_t(E->value.pass_samples.size()), TEXTURE_SAMPLES_1);
  3207. return E->value.pass_samples[p_pass];
  3208. }
  3209. /***********************/
  3210. /**** RENDER TARGET ****/
  3211. /***********************/
  3212. RID RenderingDeviceD3D12::framebuffer_create_empty(const Size2i &p_size, TextureSamples p_samples, FramebufferFormatID p_format_check) {
  3213. _THREAD_SAFE_METHOD_
  3214. Framebuffer framebuffer;
  3215. framebuffer.format_id = framebuffer_format_create_empty(p_samples);
  3216. ERR_FAIL_COND_V(p_format_check != INVALID_FORMAT_ID && framebuffer.format_id != p_format_check, RID());
  3217. framebuffer.size = p_size;
  3218. return framebuffer_owner.make_rid(framebuffer);
  3219. }
  3220. RID RenderingDeviceD3D12::framebuffer_create(const Vector<RID> &p_texture_attachments, FramebufferFormatID p_format_check, uint32_t p_view_count) {
  3221. _THREAD_SAFE_METHOD_
  3222. FramebufferPass pass;
  3223. for (int i = 0; i < p_texture_attachments.size(); i++) {
  3224. Texture *texture = texture_owner.get_or_null(p_texture_attachments[i]);
  3225. ERR_FAIL_COND_V_MSG(texture && texture->layers != p_view_count, RID(), "Layers of our texture doesn't match view count for this framebuffer");
  3226. if (texture && texture->usage_flags & TEXTURE_USAGE_DEPTH_STENCIL_ATTACHMENT_BIT) {
  3227. pass.depth_attachment = i;
  3228. } else if (texture && texture->usage_flags & TEXTURE_USAGE_VRS_ATTACHMENT_BIT) {
  3229. pass.vrs_attachment = i;
  3230. } else {
  3231. if (texture && texture->is_resolve_buffer) {
  3232. pass.resolve_attachments.push_back(i);
  3233. } else {
  3234. pass.color_attachments.push_back(texture ? i : FramebufferPass::ATTACHMENT_UNUSED);
  3235. }
  3236. }
  3237. }
  3238. Vector<FramebufferPass> passes;
  3239. passes.push_back(pass);
  3240. return framebuffer_create_multipass(p_texture_attachments, passes, p_format_check, p_view_count);
  3241. }
  3242. D3D12_RENDER_TARGET_VIEW_DESC RenderingDeviceD3D12::_make_rtv_for_texture(const RenderingDeviceD3D12::Texture *p_texture, uint32_t p_mipmap_offset, uint32_t p_layer_offset, uint32_t p_layers) {
  3243. D3D12_RENDER_TARGET_VIEW_DESC rtv_desc = {};
  3244. rtv_desc.Format = p_texture->srv_desc.Format;
  3245. switch (p_texture->srv_desc.ViewDimension) {
  3246. case D3D12_SRV_DIMENSION_TEXTURE1D: {
  3247. rtv_desc.ViewDimension = D3D12_RTV_DIMENSION_TEXTURE1D;
  3248. rtv_desc.Texture1D.MipSlice = p_texture->srv_desc.Texture1D.MostDetailedMip + p_mipmap_offset;
  3249. } break;
  3250. case D3D12_SRV_DIMENSION_TEXTURE1DARRAY: {
  3251. rtv_desc.ViewDimension = D3D12_RTV_DIMENSION_TEXTURE1DARRAY;
  3252. rtv_desc.Texture1DArray.MipSlice = p_texture->srv_desc.Texture1DArray.MostDetailedMip + p_mipmap_offset;
  3253. rtv_desc.Texture1DArray.FirstArraySlice = p_texture->srv_desc.Texture1DArray.FirstArraySlice + p_layer_offset;
  3254. rtv_desc.Texture1DArray.ArraySize = p_layers == UINT32_MAX ? p_texture->srv_desc.Texture1DArray.ArraySize : p_layers;
  3255. } break;
  3256. case D3D12_SRV_DIMENSION_TEXTURE2D: {
  3257. rtv_desc.ViewDimension = D3D12_RTV_DIMENSION_TEXTURE2D;
  3258. rtv_desc.Texture2D.MipSlice = p_texture->srv_desc.Texture2D.MostDetailedMip + p_mipmap_offset;
  3259. rtv_desc.Texture2D.PlaneSlice = p_texture->srv_desc.Texture2D.PlaneSlice;
  3260. } break;
  3261. case D3D12_SRV_DIMENSION_TEXTURE2DARRAY: {
  3262. rtv_desc.ViewDimension = D3D12_RTV_DIMENSION_TEXTURE2DARRAY;
  3263. rtv_desc.Texture2DArray.MipSlice = p_texture->srv_desc.Texture2DArray.MostDetailedMip + p_mipmap_offset;
  3264. rtv_desc.Texture2DArray.FirstArraySlice = p_texture->srv_desc.Texture2DArray.FirstArraySlice + p_layer_offset;
  3265. rtv_desc.Texture2DArray.ArraySize = p_layers == UINT32_MAX ? p_texture->srv_desc.Texture2DArray.ArraySize : p_layers;
  3266. rtv_desc.Texture2DArray.PlaneSlice = p_texture->srv_desc.Texture2DArray.PlaneSlice;
  3267. } break;
  3268. case D3D12_SRV_DIMENSION_TEXTURE2DMS: {
  3269. rtv_desc.ViewDimension = D3D12_RTV_DIMENSION_TEXTURE2DMS;
  3270. } break;
  3271. case D3D12_SRV_DIMENSION_TEXTURE2DMSARRAY: {
  3272. rtv_desc.ViewDimension = D3D12_RTV_DIMENSION_TEXTURE2DMSARRAY;
  3273. rtv_desc.Texture2DMSArray.FirstArraySlice = p_texture->srv_desc.Texture2DMSArray.FirstArraySlice + p_layer_offset;
  3274. rtv_desc.Texture2DMSArray.ArraySize = p_layers == UINT32_MAX ? p_texture->srv_desc.Texture2DMSArray.ArraySize : p_layers;
  3275. } break;
  3276. case D3D12_SRV_DIMENSION_TEXTURE3D: {
  3277. rtv_desc.ViewDimension = D3D12_RTV_DIMENSION_TEXTURE3D;
  3278. rtv_desc.Texture3D.MipSlice = p_texture->srv_desc.Texture3D.MostDetailedMip + p_mipmap_offset;
  3279. rtv_desc.Texture3D.FirstWSlice = 0;
  3280. rtv_desc.Texture3D.WSize = p_texture->depth;
  3281. } break;
  3282. default: {
  3283. ERR_FAIL_V_MSG(D3D12_RENDER_TARGET_VIEW_DESC(), "Can't create an RTV from an SRV whose view dimension is " + itos(p_texture->srv_desc.ViewDimension) + ".");
  3284. }
  3285. }
  3286. return rtv_desc;
  3287. }
  3288. D3D12_DEPTH_STENCIL_VIEW_DESC RenderingDeviceD3D12::_make_dsv_for_texture(const RenderingDeviceD3D12::Texture *p_texture) {
  3289. D3D12_DEPTH_STENCIL_VIEW_DESC dsv_desc = {};
  3290. dsv_desc.Format = d3d12_formats[p_texture->format].dsv_format;
  3291. dsv_desc.Flags = D3D12_DSV_FLAG_NONE;
  3292. switch (p_texture->srv_desc.ViewDimension) {
  3293. case D3D12_SRV_DIMENSION_TEXTURE1D: {
  3294. dsv_desc.ViewDimension = D3D12_DSV_DIMENSION_TEXTURE1D;
  3295. dsv_desc.Texture1D.MipSlice = p_texture->srv_desc.Texture1D.MostDetailedMip;
  3296. } break;
  3297. case D3D12_SRV_DIMENSION_TEXTURE1DARRAY: {
  3298. dsv_desc.ViewDimension = D3D12_DSV_DIMENSION_TEXTURE1DARRAY;
  3299. dsv_desc.Texture1DArray.MipSlice = p_texture->srv_desc.Texture1DArray.MostDetailedMip;
  3300. dsv_desc.Texture1DArray.FirstArraySlice = p_texture->srv_desc.Texture1DArray.FirstArraySlice;
  3301. dsv_desc.Texture1DArray.ArraySize = p_texture->srv_desc.Texture1DArray.ArraySize;
  3302. } break;
  3303. case D3D12_SRV_DIMENSION_TEXTURE2D: {
  3304. dsv_desc.ViewDimension = D3D12_DSV_DIMENSION_TEXTURE2D;
  3305. dsv_desc.Texture2D.MipSlice = p_texture->srv_desc.Texture2D.MostDetailedMip;
  3306. } break;
  3307. case D3D12_SRV_DIMENSION_TEXTURE2DARRAY: {
  3308. dsv_desc.ViewDimension = D3D12_DSV_DIMENSION_TEXTURE2DARRAY;
  3309. dsv_desc.Texture2DArray.MipSlice = p_texture->srv_desc.Texture2DArray.MostDetailedMip;
  3310. dsv_desc.Texture2DArray.FirstArraySlice = p_texture->srv_desc.Texture2DArray.FirstArraySlice;
  3311. dsv_desc.Texture2DArray.ArraySize = p_texture->srv_desc.Texture2DArray.ArraySize;
  3312. } break;
  3313. case D3D12_SRV_DIMENSION_TEXTURE2DMS: {
  3314. dsv_desc.ViewDimension = D3D12_DSV_DIMENSION_TEXTURE2DMS;
  3315. dsv_desc.Texture2DMS.UnusedField_NothingToDefine = p_texture->srv_desc.Texture2DMS.UnusedField_NothingToDefine;
  3316. } break;
  3317. case D3D12_SRV_DIMENSION_TEXTURE2DMSARRAY: {
  3318. dsv_desc.ViewDimension = D3D12_DSV_DIMENSION_TEXTURE2DMSARRAY;
  3319. dsv_desc.Texture2DMSArray.FirstArraySlice = p_texture->srv_desc.Texture2DMSArray.FirstArraySlice;
  3320. dsv_desc.Texture2DMSArray.ArraySize = p_texture->srv_desc.Texture2DMSArray.ArraySize;
  3321. } break;
  3322. default: {
  3323. ERR_FAIL_V_MSG(D3D12_DEPTH_STENCIL_VIEW_DESC(), "Can't create an RTV from an SRV whose view dimension is " + itos(p_texture->srv_desc.ViewDimension) + ".");
  3324. }
  3325. }
  3326. return dsv_desc;
  3327. }
  3328. RID RenderingDeviceD3D12::framebuffer_create_multipass(const Vector<RID> &p_texture_attachments, const Vector<FramebufferPass> &p_passes, FramebufferFormatID p_format_check, uint32_t p_view_count) {
  3329. _THREAD_SAFE_METHOD_
  3330. Vector<AttachmentFormat> attachments;
  3331. attachments.resize(p_texture_attachments.size());
  3332. Vector<uint32_t> attachments_handle_inds;
  3333. attachments_handle_inds.resize(p_texture_attachments.size());
  3334. Size2i size;
  3335. bool size_set = false;
  3336. int num_color = 0;
  3337. int num_depth = 0;
  3338. for (int i = 0; i < p_texture_attachments.size(); i++) {
  3339. AttachmentFormat af;
  3340. Texture *texture = texture_owner.get_or_null(p_texture_attachments[i]);
  3341. if (!texture) {
  3342. af.usage_flags = AttachmentFormat::UNUSED_ATTACHMENT;
  3343. attachments_handle_inds.write[i] = UINT32_MAX;
  3344. } else {
  3345. ERR_FAIL_COND_V_MSG(texture->layers != p_view_count, RID(), "Layers of our texture doesn't match view count for this framebuffer");
  3346. if (!size_set) {
  3347. size.width = texture->width;
  3348. size.height = texture->height;
  3349. size_set = true;
  3350. } else if (texture->usage_flags & TEXTURE_USAGE_VRS_ATTACHMENT_BIT) {
  3351. // If this is not the first attachment we assume this is used as the VRS attachment.
  3352. // In this case this texture will be 1/16th the size of the color attachment.
  3353. // So we skip the size check.
  3354. } else {
  3355. ERR_FAIL_COND_V_MSG((uint32_t)size.width != texture->width || (uint32_t)size.height != texture->height, RID(),
  3356. "All textures in a framebuffer should be the same size.");
  3357. }
  3358. af.format = texture->format;
  3359. af.samples = texture->samples;
  3360. af.usage_flags = texture->usage_flags;
  3361. bool is_vrs = texture->usage_flags & TEXTURE_USAGE_VRS_ATTACHMENT_BIT && i == p_passes[0].vrs_attachment;
  3362. if (is_vrs) {
  3363. } else if ((texture->usage_flags & TEXTURE_USAGE_COLOR_ATTACHMENT_BIT)) {
  3364. attachments_handle_inds.write[i] = num_color;
  3365. num_color++;
  3366. } else if ((texture->usage_flags & TEXTURE_USAGE_DEPTH_STENCIL_ATTACHMENT_BIT)) {
  3367. attachments_handle_inds.write[i] = num_depth;
  3368. num_depth++;
  3369. } else {
  3370. attachments_handle_inds.write[i] = UINT32_MAX;
  3371. }
  3372. }
  3373. attachments.write[i] = af;
  3374. }
  3375. ERR_FAIL_COND_V_MSG(!size_set, RID(), "All attachments unused.");
  3376. FramebufferFormatID format_id = framebuffer_format_create_multipass(attachments, p_passes, p_view_count);
  3377. if (format_id == INVALID_ID) {
  3378. return RID();
  3379. }
  3380. ERR_FAIL_COND_V_MSG(p_format_check != INVALID_ID && format_id != p_format_check, RID(),
  3381. "The format used to check this framebuffer differs from the intended framebuffer format.");
  3382. Framebuffer framebuffer;
  3383. framebuffer.format_id = format_id;
  3384. framebuffer.texture_ids = p_texture_attachments;
  3385. framebuffer.attachments_handle_inds = attachments_handle_inds;
  3386. framebuffer.size = size;
  3387. framebuffer.view_count = p_view_count;
  3388. {
  3389. if (num_color) {
  3390. Error err = framebuffer.rtv_heap.allocate(device.Get(), D3D12_DESCRIPTOR_HEAP_TYPE_RTV, num_color, false);
  3391. ERR_FAIL_COND_V(err, RID());
  3392. }
  3393. DescriptorsHeap::Walker rtv_heap_walker = framebuffer.rtv_heap.make_walker();
  3394. if (num_depth) {
  3395. Error err = framebuffer.dsv_heap.allocate(device.Get(), D3D12_DESCRIPTOR_HEAP_TYPE_DSV, num_depth, false);
  3396. ERR_FAIL_COND_V(err, RID());
  3397. }
  3398. DescriptorsHeap::Walker dsv_heap_walker = framebuffer.dsv_heap.make_walker();
  3399. for (int i = 0; i < p_texture_attachments.size(); i++) {
  3400. Texture *texture = texture_owner.get_or_null(p_texture_attachments[i]);
  3401. if (!texture) {
  3402. continue;
  3403. }
  3404. bool is_vrs = texture->usage_flags & TEXTURE_USAGE_VRS_ATTACHMENT_BIT && i == p_passes[0].vrs_attachment;
  3405. if (is_vrs) {
  3406. } else if ((texture->usage_flags & TEXTURE_USAGE_COLOR_ATTACHMENT_BIT)) {
  3407. D3D12_RENDER_TARGET_VIEW_DESC rtv_desc = _make_rtv_for_texture(texture);
  3408. device->CreateRenderTargetView(texture->resource, &rtv_desc, rtv_heap_walker.get_curr_cpu_handle());
  3409. rtv_heap_walker.advance();
  3410. } else if ((texture->usage_flags & TEXTURE_USAGE_DEPTH_STENCIL_ATTACHMENT_BIT)) {
  3411. D3D12_DEPTH_STENCIL_VIEW_DESC dsv_desc = _make_dsv_for_texture(texture);
  3412. device->CreateDepthStencilView(texture->resource, &dsv_desc, dsv_heap_walker.get_curr_cpu_handle());
  3413. dsv_heap_walker.advance();
  3414. }
  3415. }
  3416. DEV_ASSERT(rtv_heap_walker.is_at_eof());
  3417. DEV_ASSERT(dsv_heap_walker.is_at_eof());
  3418. }
  3419. RID id = framebuffer_owner.make_rid(framebuffer);
  3420. for (int i = 0; i < p_texture_attachments.size(); i++) {
  3421. if (p_texture_attachments[i].is_valid()) {
  3422. _add_dependency(id, p_texture_attachments[i]);
  3423. }
  3424. }
  3425. return id;
  3426. }
  3427. RenderingDevice::FramebufferFormatID RenderingDeviceD3D12::framebuffer_get_format(RID p_framebuffer) {
  3428. _THREAD_SAFE_METHOD_
  3429. Framebuffer *framebuffer = framebuffer_owner.get_or_null(p_framebuffer);
  3430. ERR_FAIL_NULL_V(framebuffer, INVALID_ID);
  3431. return framebuffer->format_id;
  3432. }
  3433. bool RenderingDeviceD3D12::framebuffer_is_valid(RID p_framebuffer) const {
  3434. _THREAD_SAFE_METHOD_
  3435. return framebuffer_owner.owns(p_framebuffer);
  3436. }
  3437. void RenderingDeviceD3D12::framebuffer_set_invalidation_callback(RID p_framebuffer, InvalidationCallback p_callback, void *p_userdata) {
  3438. _THREAD_SAFE_METHOD_
  3439. Framebuffer *framebuffer = framebuffer_owner.get_or_null(p_framebuffer);
  3440. ERR_FAIL_NULL(framebuffer);
  3441. framebuffer->invalidated_callback = p_callback;
  3442. framebuffer->invalidated_callback_userdata = p_userdata;
  3443. }
  3444. /*****************/
  3445. /**** SAMPLER ****/
  3446. /*****************/
  3447. RID RenderingDeviceD3D12::sampler_create(const SamplerState &p_state) {
  3448. _THREAD_SAFE_METHOD_
  3449. D3D12_SAMPLER_DESC sampler_desc = {};
  3450. if (p_state.use_anisotropy) {
  3451. sampler_desc.Filter = D3D12_ENCODE_ANISOTROPIC_FILTER(D3D12_FILTER_REDUCTION_TYPE_STANDARD);
  3452. sampler_desc.MaxAnisotropy = p_state.anisotropy_max;
  3453. } else {
  3454. static const D3D12_FILTER_TYPE d3d12_filter_types[] = {
  3455. D3D12_FILTER_TYPE_POINT, // SAMPLER_FILTER_NEAREST.
  3456. D3D12_FILTER_TYPE_LINEAR, // SAMPLER_FILTER_LINEAR.
  3457. };
  3458. sampler_desc.Filter = D3D12_ENCODE_BASIC_FILTER(
  3459. d3d12_filter_types[p_state.min_filter],
  3460. d3d12_filter_types[p_state.mag_filter],
  3461. d3d12_filter_types[p_state.mip_filter],
  3462. p_state.enable_compare ? D3D12_FILTER_REDUCTION_TYPE_COMPARISON : D3D12_FILTER_REDUCTION_TYPE_STANDARD);
  3463. }
  3464. ERR_FAIL_INDEX_V(p_state.repeat_u, SAMPLER_REPEAT_MODE_MAX, RID());
  3465. sampler_desc.AddressU = address_modes[p_state.repeat_u];
  3466. ERR_FAIL_INDEX_V(p_state.repeat_v, SAMPLER_REPEAT_MODE_MAX, RID());
  3467. sampler_desc.AddressV = address_modes[p_state.repeat_v];
  3468. ERR_FAIL_INDEX_V(p_state.repeat_w, SAMPLER_REPEAT_MODE_MAX, RID());
  3469. sampler_desc.AddressW = address_modes[p_state.repeat_w];
  3470. ERR_FAIL_INDEX_V(p_state.border_color, SAMPLER_BORDER_COLOR_MAX, RID());
  3471. for (int i = 0; i < 4; i++) {
  3472. sampler_desc.BorderColor[i] = sampler_border_colors[p_state.border_color][i];
  3473. }
  3474. sampler_desc.MinLOD = p_state.min_lod;
  3475. sampler_desc.MaxLOD = p_state.max_lod;
  3476. sampler_desc.MipLODBias = p_state.lod_bias;
  3477. ERR_FAIL_INDEX_V(p_state.compare_op, COMPARE_OP_MAX, RID());
  3478. sampler_desc.ComparisonFunc = p_state.enable_compare ? compare_operators[p_state.compare_op] : D3D12_COMPARISON_FUNC_NEVER;
  3479. // TODO: Emulate somehow?
  3480. if (p_state.unnormalized_uvw) {
  3481. WARN_PRINT("Creating a sampler with unnormalized UVW, which is not supported.");
  3482. }
  3483. return sampler_owner.make_rid(sampler_desc);
  3484. }
  3485. bool RenderingDeviceD3D12::sampler_is_format_supported_for_filter(DataFormat p_format, SamplerFilter p_sampler_filter) const {
  3486. ERR_FAIL_INDEX_V(p_format, DATA_FORMAT_MAX, false);
  3487. _THREAD_SAFE_METHOD_
  3488. D3D12_FEATURE_DATA_FORMAT_SUPPORT srv_rtv_support = {};
  3489. srv_rtv_support.Format = d3d12_formats[p_format].general_format;
  3490. HRESULT res = device->CheckFeatureSupport(D3D12_FEATURE_FORMAT_SUPPORT, &srv_rtv_support, sizeof(srv_rtv_support));
  3491. ERR_FAIL_COND_V_MSG(res, false, "CheckFeatureSupport failed with error " + vformat("0x%08ux", res) + ".");
  3492. return (srv_rtv_support.Support1 & D3D12_FORMAT_SUPPORT1_SHADER_SAMPLE);
  3493. }
  3494. /**********************/
  3495. /**** VERTEX ARRAY ****/
  3496. /**********************/
  3497. RID RenderingDeviceD3D12::vertex_buffer_create(uint32_t p_size_bytes, const Vector<uint8_t> &p_data, bool p_use_as_storage) {
  3498. _THREAD_SAFE_METHOD_
  3499. ERR_FAIL_COND_V(p_data.size() && (uint32_t)p_data.size() != p_size_bytes, RID());
  3500. Buffer buffer;
  3501. D3D12_RESOURCE_STATES usage = D3D12_RESOURCE_STATE_VERTEX_AND_CONSTANT_BUFFER;
  3502. if (p_use_as_storage) {
  3503. usage |= D3D12_RESOURCE_STATE_UNORDERED_ACCESS;
  3504. }
  3505. Error err = _buffer_allocate(&buffer, p_size_bytes, usage, D3D12_HEAP_TYPE_DEFAULT);
  3506. ERR_FAIL_COND_V(err != OK, RID());
  3507. if (p_data.size()) {
  3508. uint64_t data_size = p_data.size();
  3509. const uint8_t *r = p_data.ptr();
  3510. _buffer_update(&buffer, 0, r, data_size);
  3511. }
  3512. RID id = vertex_buffer_owner.make_rid(buffer);
  3513. #ifdef DEV_ENABLED
  3514. set_resource_name(id, "RID:" + itos(id.get_id()));
  3515. #endif
  3516. return id;
  3517. }
  3518. // Internally reference counted, this ID is warranted to be unique for the same description, but needs to be freed as many times as it was allocated.
  3519. RenderingDevice::VertexFormatID RenderingDeviceD3D12::vertex_format_create(const Vector<VertexAttribute> &p_vertex_formats) {
  3520. _THREAD_SAFE_METHOD_
  3521. VertexDescriptionKey key;
  3522. key.vertex_formats = p_vertex_formats;
  3523. VertexFormatID *idptr = vertex_format_cache.getptr(key);
  3524. if (idptr) {
  3525. return *idptr;
  3526. }
  3527. // Does not exist, create one and cache it.
  3528. VertexDescriptionCache vdcache;
  3529. vdcache.elements_desc.resize(p_vertex_formats.size());
  3530. HashSet<int> used_locations;
  3531. for (int i = 0; i < p_vertex_formats.size(); i++) {
  3532. ERR_CONTINUE(p_vertex_formats[i].format >= DATA_FORMAT_MAX);
  3533. ERR_FAIL_COND_V(used_locations.has(p_vertex_formats[i].location), INVALID_ID);
  3534. ERR_FAIL_COND_V_MSG(get_format_vertex_size(p_vertex_formats[i].format) == 0, INVALID_ID,
  3535. "Data format for attachment (" + itos(i) + "), '" + named_formats[p_vertex_formats[i].format] + "', is not valid for a vertex array.");
  3536. // SPIRV-Cross maps `layout(location = <N>) in` to `TEXCOORD<N>`.
  3537. vdcache.elements_desc.write[i].SemanticName = "TEXCOORD"; // SPIRV-Cross will apply TEXCOORD semantic to vertex attributes.
  3538. vdcache.elements_desc.write[i].SemanticIndex = p_vertex_formats[i].location;
  3539. vdcache.elements_desc.write[i].Format = d3d12_formats[p_vertex_formats[i].format].general_format;
  3540. vdcache.elements_desc.write[i].InputSlot = i; // TODO: Can the same slot be used if data comes from the same buffer (regardless format)?
  3541. vdcache.elements_desc.write[i].AlignedByteOffset = p_vertex_formats[i].offset;
  3542. if (p_vertex_formats[i].frequency == VERTEX_FREQUENCY_INSTANCE) {
  3543. vdcache.elements_desc.write[i].InputSlotClass = D3D12_INPUT_CLASSIFICATION_PER_INSTANCE_DATA;
  3544. vdcache.elements_desc.write[i].InstanceDataStepRate = 1;
  3545. } else {
  3546. vdcache.elements_desc.write[i].InputSlotClass = D3D12_INPUT_CLASSIFICATION_PER_VERTEX_DATA;
  3547. vdcache.elements_desc.write[i].InstanceDataStepRate = 0;
  3548. }
  3549. used_locations.insert(p_vertex_formats[i].location);
  3550. }
  3551. vdcache.vertex_formats = p_vertex_formats;
  3552. VertexFormatID id = VertexFormatID(vertex_format_cache.size()) | (VertexFormatID(ID_TYPE_VERTEX_FORMAT) << ID_BASE_SHIFT);
  3553. vertex_format_cache[key] = id;
  3554. vertex_formats[id] = vdcache;
  3555. return id;
  3556. }
  3557. RID RenderingDeviceD3D12::vertex_array_create(uint32_t p_vertex_count, VertexFormatID p_vertex_format, const Vector<RID> &p_src_buffers, const Vector<uint64_t> &p_offsets) {
  3558. _THREAD_SAFE_METHOD_
  3559. ERR_FAIL_COND_V(!vertex_formats.has(p_vertex_format), RID());
  3560. const VertexDescriptionCache &vd = vertex_formats[p_vertex_format];
  3561. ERR_FAIL_COND_V(vd.vertex_formats.size() != p_src_buffers.size(), RID());
  3562. for (int i = 0; i < p_src_buffers.size(); i++) {
  3563. ERR_FAIL_COND_V(!vertex_buffer_owner.owns(p_src_buffers[i]), RID());
  3564. }
  3565. VertexArray vertex_array;
  3566. if (!p_offsets.is_empty()) {
  3567. ERR_FAIL_COND_V(p_offsets.size() != p_src_buffers.size(), RID());
  3568. }
  3569. vertex_array.vertex_count = p_vertex_count;
  3570. vertex_array.description = p_vertex_format;
  3571. vertex_array.max_instances_allowed = 0xFFFFFFFF; // By default as many as you want.
  3572. HashSet<Buffer *> unique_buffers;
  3573. for (int i = 0; i < p_src_buffers.size(); i++) {
  3574. Buffer *buffer = vertex_buffer_owner.get_or_null(p_src_buffers[i]);
  3575. const VertexAttribute &atf = vd.vertex_formats[i];
  3576. // Validate with buffer.
  3577. {
  3578. uint32_t element_size = get_format_vertex_size(atf.format);
  3579. ERR_FAIL_COND_V(element_size == 0, RID()); // Should never happens since this was prevalidated.
  3580. if (atf.frequency == VERTEX_FREQUENCY_VERTEX) {
  3581. // Validate size for regular drawing.
  3582. uint64_t total_size = uint64_t(atf.stride) * (p_vertex_count - 1) + atf.offset + element_size;
  3583. ERR_FAIL_COND_V_MSG(total_size > buffer->size, RID(),
  3584. "Attachment (" + itos(i) + ") will read past the end of the buffer.");
  3585. } else {
  3586. // Validate size for instances drawing.
  3587. uint64_t available = buffer->size - atf.offset;
  3588. ERR_FAIL_COND_V_MSG(available < element_size, RID(),
  3589. "Attachment (" + itos(i) + ") uses instancing, but it's just too small.");
  3590. uint32_t instances_allowed = available / atf.stride;
  3591. vertex_array.max_instances_allowed = MIN(instances_allowed, vertex_array.max_instances_allowed);
  3592. }
  3593. }
  3594. unique_buffers.insert(buffer);
  3595. D3D12_VERTEX_BUFFER_VIEW view = {};
  3596. uint64_t data_offset = p_offsets.is_empty() ? 0 : p_offsets[i];
  3597. view.BufferLocation = buffer->resource->GetGPUVirtualAddress() + data_offset;
  3598. view.SizeInBytes = buffer->size;
  3599. view.StrideInBytes = atf.stride;
  3600. vertex_array.views.push_back(view);
  3601. }
  3602. for (Buffer *buffer : unique_buffers) {
  3603. vertex_array.unique_buffers.push_back(buffer);
  3604. }
  3605. RID id = vertex_array_owner.make_rid(vertex_array);
  3606. for (int i = 0; i < p_src_buffers.size(); i++) {
  3607. _add_dependency(id, p_src_buffers[i]);
  3608. }
  3609. return id;
  3610. }
  3611. RID RenderingDeviceD3D12::index_buffer_create(uint32_t p_index_count, IndexBufferFormat p_format, const Vector<uint8_t> &p_data, bool p_use_restart_indices) {
  3612. _THREAD_SAFE_METHOD_
  3613. ERR_FAIL_COND_V(p_index_count == 0, RID());
  3614. IndexBuffer index_buffer;
  3615. index_buffer.index_format = (p_format == INDEX_BUFFER_FORMAT_UINT16) ? DXGI_FORMAT_R16_UINT : DXGI_FORMAT_R32_UINT;
  3616. index_buffer.supports_restart_indices = p_use_restart_indices;
  3617. index_buffer.index_count = p_index_count;
  3618. uint32_t size_bytes = p_index_count * ((p_format == INDEX_BUFFER_FORMAT_UINT16) ? 2 : 4);
  3619. #ifdef DEBUG_ENABLED
  3620. if (p_data.size()) {
  3621. index_buffer.max_index = 0;
  3622. ERR_FAIL_COND_V_MSG((uint32_t)p_data.size() != size_bytes, RID(),
  3623. "Default index buffer initializer array size (" + itos(p_data.size()) + ") does not match format required size (" + itos(size_bytes) + ").");
  3624. const uint8_t *r = p_data.ptr();
  3625. if (p_format == INDEX_BUFFER_FORMAT_UINT16) {
  3626. const uint16_t *index16 = (const uint16_t *)r;
  3627. for (uint32_t i = 0; i < p_index_count; i++) {
  3628. if (p_use_restart_indices && index16[i] == 0xFFFF) {
  3629. continue; // Restart index, ignore.
  3630. }
  3631. index_buffer.max_index = MAX(index16[i], index_buffer.max_index);
  3632. }
  3633. } else {
  3634. const uint32_t *index32 = (const uint32_t *)r;
  3635. for (uint32_t i = 0; i < p_index_count; i++) {
  3636. if (p_use_restart_indices && index32[i] == 0xFFFFFFFF) {
  3637. continue; // Restart index, ignore.
  3638. }
  3639. index_buffer.max_index = MAX(index32[i], index_buffer.max_index);
  3640. }
  3641. }
  3642. } else {
  3643. index_buffer.max_index = 0xFFFFFFFF;
  3644. }
  3645. #else
  3646. index_buffer.max_index = 0xFFFFFFFF;
  3647. #endif
  3648. Error err = _buffer_allocate(&index_buffer, size_bytes, D3D12_RESOURCE_STATE_INDEX_BUFFER, D3D12_HEAP_TYPE_DEFAULT);
  3649. ERR_FAIL_COND_V(err != OK, RID());
  3650. if (p_data.size()) {
  3651. uint64_t data_size = p_data.size();
  3652. const uint8_t *r = p_data.ptr();
  3653. _buffer_update(&index_buffer, 0, r, data_size);
  3654. }
  3655. RID id = index_buffer_owner.make_rid(index_buffer);
  3656. #ifdef DEV_ENABLED
  3657. set_resource_name(id, "RID:" + itos(id.get_id()));
  3658. #endif
  3659. return id;
  3660. }
  3661. RID RenderingDeviceD3D12::index_array_create(RID p_index_buffer, uint32_t p_index_offset, uint32_t p_index_count) {
  3662. _THREAD_SAFE_METHOD_
  3663. ERR_FAIL_COND_V(!index_buffer_owner.owns(p_index_buffer), RID());
  3664. IndexBuffer *index_buffer = index_buffer_owner.get_or_null(p_index_buffer);
  3665. ERR_FAIL_COND_V(p_index_count == 0, RID());
  3666. ERR_FAIL_COND_V(p_index_offset + p_index_count > index_buffer->index_count, RID());
  3667. IndexArray index_array;
  3668. index_array.buffer = index_buffer;
  3669. index_array.max_index = index_buffer->max_index;
  3670. index_array.offset = p_index_offset;
  3671. index_array.indices = p_index_count;
  3672. index_array.supports_restart_indices = index_buffer->supports_restart_indices;
  3673. index_array.view.BufferLocation = index_buffer->resource->GetGPUVirtualAddress();
  3674. index_array.view.SizeInBytes = p_index_count * (index_buffer->index_format == DXGI_FORMAT_R16_UINT ? 2 : 4);
  3675. index_array.view.Format = index_buffer->index_format;
  3676. RID id = index_array_owner.make_rid(index_array);
  3677. _add_dependency(id, p_index_buffer);
  3678. return id;
  3679. }
  3680. /****************/
  3681. /**** SHADER ****/
  3682. /****************/
  3683. static const char *shader_uniform_names[RenderingDevice::UNIFORM_TYPE_MAX + 1] = {
  3684. "Sampler", "CombinedSampler", "Texture", "Image", "TextureBuffer", "SamplerTextureBuffer", "ImageBuffer", "UniformBuffer", "StorageBuffer", "InputAttachment", "N/A"
  3685. };
  3686. static uint32_t shader_stage_bit_offset_indices[RenderingDevice::SHADER_STAGE_MAX] = {
  3687. /* SHADER_STAGE_VERTEX */ 0,
  3688. /* SHADER_STAGE_FRAGMENT */ 1,
  3689. /* SHADER_STAGE_TESSELATION_CONTROL */ UINT32_MAX,
  3690. /* SHADER_STAGE_TESSELATION_EVALUATION */ UINT32_MAX,
  3691. /* SHADER_STAGE_COMPUTE */ 2,
  3692. };
  3693. String RenderingDeviceD3D12::_shader_uniform_debug(RID p_shader, int p_set) {
  3694. String ret;
  3695. const Shader *shader = shader_owner.get_or_null(p_shader);
  3696. ERR_FAIL_NULL_V(shader, String());
  3697. for (int i = 0; i < shader->sets.size(); i++) {
  3698. if (p_set >= 0 && i != p_set) {
  3699. continue;
  3700. }
  3701. for (int j = 0; j < shader->sets[i].uniforms.size(); j++) {
  3702. const UniformInfo &ui = shader->sets[i].uniforms[j].info;
  3703. if (!ret.is_empty()) {
  3704. ret += "\n";
  3705. }
  3706. ret += "Set: " + itos(i) + " Binding: " + itos(ui.binding) + " Type: " + shader_uniform_names[ui.type] + " Writable: " + (ui.writable ? "Y" : "N") + " Length: " + itos(ui.length);
  3707. }
  3708. }
  3709. return ret;
  3710. }
  3711. uint32_t RenderingDeviceD3D12::_shader_patch_dxil_specialization_constant(
  3712. PipelineSpecializationConstantType p_type,
  3713. const void *p_value,
  3714. const uint64_t (&p_stages_bit_offsets)[D3D12_BITCODE_OFFSETS_NUM_STAGES],
  3715. HashMap<ShaderStage, Vector<uint8_t>> &r_stages_bytecodes,
  3716. bool p_is_first_patch) {
  3717. uint32_t patch_val = 0;
  3718. switch (p_type) {
  3719. case PIPELINE_SPECIALIZATION_CONSTANT_TYPE_INT: {
  3720. uint32_t int_value = *((const int *)p_value);
  3721. ERR_FAIL_COND_V(int_value & (1 << 31), 0);
  3722. patch_val = int_value;
  3723. } break;
  3724. case PIPELINE_SPECIALIZATION_CONSTANT_TYPE_BOOL: {
  3725. bool bool_value = *((const bool *)p_value);
  3726. patch_val = (uint32_t)bool_value;
  3727. } break;
  3728. case PIPELINE_SPECIALIZATION_CONSTANT_TYPE_FLOAT: {
  3729. uint32_t int_value = *((const int *)p_value);
  3730. ERR_FAIL_COND_V(int_value & (1 << 31), 0);
  3731. patch_val = (int_value >> 1);
  3732. } break;
  3733. }
  3734. // For VBR encoding to encode the number of bits we expect (32), we need to set the MSB unconditionally.
  3735. // However, signed VBR moves the MSB to the LSB, so setting the MSB to 1 wouldn't help. Therefore,
  3736. // the bit we set to 1 is the one at index 30.
  3737. patch_val |= (1 << 30);
  3738. patch_val <<= 1; // What signed VBR does.
  3739. auto tamper_bits = [](uint8_t *p_start, uint64_t p_bit_offset, uint64_t p_value) -> uint64_t {
  3740. uint64_t original = 0;
  3741. uint32_t curr_input_byte = p_bit_offset / 8;
  3742. uint8_t curr_input_bit = p_bit_offset % 8;
  3743. auto get_curr_input_bit = [&]() -> bool {
  3744. return ((p_start[curr_input_byte] >> curr_input_bit) & 1);
  3745. };
  3746. auto move_to_next_input_bit = [&]() {
  3747. if (curr_input_bit == 7) {
  3748. curr_input_bit = 0;
  3749. curr_input_byte++;
  3750. } else {
  3751. curr_input_bit++;
  3752. }
  3753. };
  3754. auto tamper_input_bit = [&](bool p_new_bit) {
  3755. p_start[curr_input_byte] &= ~((uint8_t)1 << curr_input_bit);
  3756. if (p_new_bit) {
  3757. p_start[curr_input_byte] |= (uint8_t)1 << curr_input_bit;
  3758. }
  3759. };
  3760. uint8_t value_bit_idx = 0;
  3761. for (uint32_t i = 0; i < 5; i++) { // 32 bits take 5 full bytes in VBR.
  3762. for (uint32_t j = 0; j < 7; j++) {
  3763. bool input_bit = get_curr_input_bit();
  3764. original |= (uint64_t)(input_bit ? 1 : 0) << value_bit_idx;
  3765. tamper_input_bit((p_value >> value_bit_idx) & 1);
  3766. move_to_next_input_bit();
  3767. value_bit_idx++;
  3768. }
  3769. #ifdef DEV_ENABLED
  3770. bool input_bit = get_curr_input_bit();
  3771. DEV_ASSERT(i < 4 && input_bit || i == 4 && !input_bit);
  3772. #endif
  3773. move_to_next_input_bit();
  3774. }
  3775. return original;
  3776. };
  3777. uint32_t stages_patched_mask = 0;
  3778. for (int stage = 0; stage < SHADER_STAGE_MAX; stage++) {
  3779. if (!r_stages_bytecodes.has((ShaderStage)stage)) {
  3780. continue;
  3781. }
  3782. uint64_t offset = p_stages_bit_offsets[shader_stage_bit_offset_indices[stage]];
  3783. if (offset == 0) {
  3784. // This constant does not appear at this stage.
  3785. continue;
  3786. }
  3787. Vector<uint8_t> &bytecode = r_stages_bytecodes[(ShaderStage)stage];
  3788. #ifdef DEV_ENABLED
  3789. uint64_t orig_patch_val = tamper_bits(bytecode.ptrw(), offset, patch_val);
  3790. // Checking against the value the NIR patch should have set.
  3791. DEV_ASSERT(!p_is_first_patch || ((orig_patch_val >> 1) & GODOT_NIR_SC_SENTINEL_MAGIC_MASK) == GODOT_NIR_SC_SENTINEL_MAGIC);
  3792. uint64_t readback_patch_val = tamper_bits(bytecode.ptrw(), offset, patch_val);
  3793. DEV_ASSERT(readback_patch_val == patch_val);
  3794. #else
  3795. tamper_bits(bytecode.ptrw(), offset, patch_val);
  3796. #endif
  3797. stages_patched_mask |= (1 << stage);
  3798. }
  3799. return stages_patched_mask;
  3800. }
  3801. bool RenderingDeviceD3D12::_shader_sign_dxil_bytecode(ShaderStage p_stage, Vector<uint8_t> &r_dxil_blob) {
  3802. dxil_validator *validator = get_dxil_validator_for_current_thread();
  3803. char *err = nullptr;
  3804. bool res = dxil_validate_module(validator, r_dxil_blob.ptrw(), r_dxil_blob.size(), &err);
  3805. if (!res) {
  3806. if (err) {
  3807. ERR_FAIL_COND_V_MSG(!res, false, "Shader signing invocation at stage " + String(shader_stage_names[p_stage]) + " failed:\n" + String(err));
  3808. } else {
  3809. ERR_FAIL_COND_V_MSG(!res, false, "Shader signing invocation at stage " + String(shader_stage_names[p_stage]) + " failed.");
  3810. }
  3811. }
  3812. return true;
  3813. }
  3814. // Version 1: Initial.
  3815. // Version 2: 64-bit vertex input mask.
  3816. #define SHADER_BINARY_VERSION 2
  3817. String RenderingDeviceD3D12::shader_get_binary_cache_key() const {
  3818. return "D3D12-SV" + itos(SHADER_BINARY_VERSION);
  3819. }
  3820. enum RootSignatureLocationType {
  3821. RS_LOC_TYPE_RESOURCE,
  3822. RS_LOC_TYPE_SAMPLER,
  3823. };
  3824. enum ResourceClass {
  3825. RES_CLASS_INVALID,
  3826. RES_CLASS_CBV,
  3827. RES_CLASS_SRV,
  3828. RES_CLASS_UAV,
  3829. };
  3830. // Phase 1: SPIR-V reflection, where the Vulkan/RD interface of the shader is discovered.
  3831. // Phase 2: SPIR-V to DXIL translation, where the DXIL interface is discovered, which may have gaps due to optimizations.
  3832. struct RenderingDeviceD3D12ShaderBinaryDataBinding {
  3833. // - Phase 1.
  3834. uint32_t type;
  3835. uint32_t binding;
  3836. uint32_t stages;
  3837. uint32_t length; // Size of arrays (in total elements), or ubos (in bytes * total elements).
  3838. uint32_t writable;
  3839. // - Phase 2.
  3840. uint32_t res_class;
  3841. uint32_t has_sampler;
  3842. uint32_t dxil_stages;
  3843. struct RootSignatureLocation {
  3844. uint32_t root_param_idx = UINT32_MAX; // UINT32_MAX if unused.
  3845. uint32_t range_idx = UINT32_MAX; // UINT32_MAX if unused.
  3846. };
  3847. RootSignatureLocation root_sig_locations[2]; // Index is RootSignatureLocationType.
  3848. // We need to sort these to fill the root signature locations properly.
  3849. bool operator<(const RenderingDeviceD3D12ShaderBinaryDataBinding &p_other) const {
  3850. return binding < p_other.binding;
  3851. }
  3852. };
  3853. struct RenderingDeviceD3D12ShaderBinarySpecializationConstant {
  3854. // - Phase 1.
  3855. uint32_t type;
  3856. uint32_t constant_id;
  3857. union {
  3858. uint32_t int_value;
  3859. float float_value;
  3860. bool bool_value;
  3861. };
  3862. // - Phase 2.
  3863. uint64_t stages_bit_offsets[D3D12_BITCODE_OFFSETS_NUM_STAGES];
  3864. };
  3865. struct RenderingDeviceD3D12ShaderBinaryData {
  3866. uint64_t vertex_input_mask;
  3867. uint32_t fragment_output_mask;
  3868. uint32_t specialization_constants_count;
  3869. uint32_t spirv_specialization_constants_ids_mask;
  3870. uint32_t is_compute;
  3871. uint32_t compute_local_size[3];
  3872. uint32_t set_count;
  3873. uint32_t push_constant_size;
  3874. uint32_t dxil_push_constant_stages; // Phase 2.
  3875. uint32_t nir_runtime_data_root_param_idx; // Phase 2.
  3876. uint32_t stage_count;
  3877. uint32_t shader_name_len;
  3878. uint32_t root_signature_len;
  3879. uint32_t root_signature_crc;
  3880. };
  3881. Vector<uint8_t> RenderingDeviceD3D12::shader_compile_binary_from_spirv(const Vector<ShaderStageSPIRVData> &p_spirv, const String &p_shader_name) {
  3882. SpirvReflectionData spirv_data;
  3883. if (_reflect_spirv(p_spirv, spirv_data) != OK) {
  3884. return Vector<uint8_t>();
  3885. }
  3886. // Collect reflection data into binary data.
  3887. RenderingDeviceD3D12ShaderBinaryData binary_data = {};
  3888. Vector<Vector<RenderingDeviceD3D12ShaderBinaryDataBinding>> uniform_info;
  3889. Vector<RenderingDeviceD3D12ShaderBinarySpecializationConstant> specialization_constants;
  3890. {
  3891. binary_data.vertex_input_mask = spirv_data.vertex_input_mask;
  3892. binary_data.fragment_output_mask = spirv_data.fragment_output_mask;
  3893. binary_data.specialization_constants_count = spirv_data.specialization_constants.size();
  3894. binary_data.is_compute = spirv_data.is_compute;
  3895. binary_data.compute_local_size[0] = spirv_data.compute_local_size[0];
  3896. binary_data.compute_local_size[1] = spirv_data.compute_local_size[1];
  3897. binary_data.compute_local_size[2] = spirv_data.compute_local_size[2];
  3898. binary_data.set_count = spirv_data.uniforms.size();
  3899. binary_data.push_constant_size = spirv_data.push_constant_size;
  3900. binary_data.nir_runtime_data_root_param_idx = UINT32_MAX;
  3901. binary_data.stage_count = p_spirv.size();
  3902. for (const Vector<SpirvReflectionData::Uniform> &spirv_set : spirv_data.uniforms) {
  3903. Vector<RenderingDeviceD3D12ShaderBinaryDataBinding> set_bindings;
  3904. for (const SpirvReflectionData::Uniform &spirv_uniform : spirv_set) {
  3905. RenderingDeviceD3D12ShaderBinaryDataBinding binding{};
  3906. binding.type = (uint32_t)spirv_uniform.type;
  3907. binding.binding = spirv_uniform.binding;
  3908. binding.stages = (uint32_t)spirv_uniform.stages_mask;
  3909. binding.length = spirv_uniform.length;
  3910. binding.writable = (uint32_t)spirv_uniform.writable;
  3911. set_bindings.push_back(binding);
  3912. }
  3913. uniform_info.push_back(set_bindings);
  3914. }
  3915. for (const SpirvReflectionData::SpecializationConstant &spirv_sc : spirv_data.specialization_constants) {
  3916. RenderingDeviceD3D12ShaderBinarySpecializationConstant spec_constant{};
  3917. spec_constant.type = (uint32_t)spirv_sc.type;
  3918. spec_constant.constant_id = spirv_sc.constant_id;
  3919. spec_constant.int_value = spirv_sc.int_value;
  3920. specialization_constants.push_back(spec_constant);
  3921. binary_data.spirv_specialization_constants_ids_mask |= (1 << spirv_sc.constant_id);
  3922. }
  3923. }
  3924. // Translate SPIR-V shaders to DXIL, and collect shader info from the new representation.
  3925. HashMap<ShaderStage, Vector<uint8_t>> dxil_blobs;
  3926. BitField<ShaderStage> stages_processed;
  3927. {
  3928. HashMap<int, nir_shader *> stages_nir_shaders;
  3929. auto free_nir_shaders = [&]() {
  3930. for (KeyValue<int, nir_shader *> &E : stages_nir_shaders) {
  3931. ralloc_free(E.value);
  3932. }
  3933. stages_nir_shaders.clear();
  3934. };
  3935. // This is based on spirv2dxil.c. May need updates when it changes.
  3936. // Also, this has to stay around until after linking.
  3937. nir_shader_compiler_options nir_options = *dxil_get_nir_compiler_options();
  3938. nir_options.lower_base_vertex = false;
  3939. dxil_spirv_runtime_conf dxil_runtime_conf = {};
  3940. dxil_runtime_conf.runtime_data_cbv.register_space = RUNTIME_DATA_SPACE;
  3941. dxil_runtime_conf.runtime_data_cbv.base_shader_register = RUNTIME_DATA_REGISTER;
  3942. dxil_runtime_conf.push_constant_cbv.register_space = ROOT_CONSTANT_SPACE;
  3943. dxil_runtime_conf.push_constant_cbv.base_shader_register = ROOT_CONSTANT_REGISTER;
  3944. dxil_runtime_conf.zero_based_vertex_instance_id = true;
  3945. dxil_runtime_conf.zero_based_compute_workgroup_id = true;
  3946. dxil_runtime_conf.declared_read_only_images_as_srvs = true;
  3947. // Making this explicit to let maintainers know that in practice this didn't improve performance,
  3948. // probably because data generated by one shader and consumed by another one forces the resource
  3949. // to transition from UAV to SRV, and back, instead of being an UAV all the time.
  3950. // In case someone wants to try, care must be taken so in case of incompatible bindings across stages
  3951. // happen as a result, all the stages are re-translated. That can happen if, for instance, a stage only
  3952. // uses an allegedly writable resource only for reading but the next stage doesn't.
  3953. dxil_runtime_conf.inferred_read_only_images_as_srvs = false;
  3954. // - Translate SPIR-V to NIR.
  3955. for (int i = 0; i < p_spirv.size(); i++) {
  3956. ShaderStage stage = (ShaderStage)p_spirv[i].shader_stage;
  3957. ShaderStage stage_flag = (ShaderStage)(1 << p_spirv[i].shader_stage);
  3958. stages_processed.set_flag(stage_flag);
  3959. {
  3960. char *entry_point = "main";
  3961. static const gl_shader_stage SPIRV_TO_MESA_STAGES[SHADER_STAGE_MAX] = {
  3962. /* SHADER_STAGE_VERTEX */ MESA_SHADER_VERTEX,
  3963. /* SHADER_STAGE_FRAGMENT */ MESA_SHADER_FRAGMENT,
  3964. /* SHADER_STAGE_TESSELATION_CONTROL */ MESA_SHADER_TESS_CTRL,
  3965. /* SHADER_STAGE_TESSELATION_EVALUATION */ MESA_SHADER_TESS_EVAL,
  3966. /* SHADER_STAGE_COMPUTE */ MESA_SHADER_COMPUTE,
  3967. };
  3968. nir_shader *nir_shader = spirv_to_nir(
  3969. (const uint32_t *)p_spirv[i].spir_v.ptr(),
  3970. p_spirv[i].spir_v.size() / sizeof(uint32_t),
  3971. nullptr,
  3972. 0,
  3973. SPIRV_TO_MESA_STAGES[stage],
  3974. entry_point,
  3975. dxil_spirv_nir_get_spirv_options(), &nir_options);
  3976. if (!nir_shader) {
  3977. free_nir_shaders();
  3978. ERR_FAIL_V_MSG(Vector<uint8_t>(), "Shader translation (step 1) at stage " + String(shader_stage_names[stage]) + " failed.");
  3979. }
  3980. #ifdef DEV_ENABLED
  3981. nir_validate_shader(nir_shader, "Validate before feeding NIR to the DXIL compiler");
  3982. #endif
  3983. if (stage == SHADER_STAGE_VERTEX) {
  3984. dxil_runtime_conf.yz_flip.y_mask = 0xffff;
  3985. dxil_runtime_conf.yz_flip.mode = DXIL_SPIRV_Y_FLIP_UNCONDITIONAL;
  3986. } else {
  3987. dxil_runtime_conf.yz_flip.y_mask = 0;
  3988. dxil_runtime_conf.yz_flip.mode = DXIL_SPIRV_YZ_FLIP_NONE;
  3989. }
  3990. // This is based on spirv2dxil.c. May need updates when it changes.
  3991. dxil_spirv_nir_prep(nir_shader);
  3992. bool requires_runtime_data = {};
  3993. dxil_spirv_nir_passes(nir_shader, &dxil_runtime_conf, &requires_runtime_data);
  3994. stages_nir_shaders[stage] = nir_shader;
  3995. }
  3996. }
  3997. // - Link NIR shaders.
  3998. for (int i = SHADER_STAGE_MAX - 1; i >= 0; i--) {
  3999. if (!stages_nir_shaders.has(i)) {
  4000. continue;
  4001. }
  4002. nir_shader *shader = stages_nir_shaders[i];
  4003. nir_shader *prev_shader = nullptr;
  4004. for (int j = i - 1; j >= 0; j--) {
  4005. if (stages_nir_shaders.has(j)) {
  4006. prev_shader = stages_nir_shaders[j];
  4007. break;
  4008. }
  4009. }
  4010. if (prev_shader) {
  4011. bool requires_runtime_data = {};
  4012. dxil_spirv_nir_link(shader, prev_shader, &dxil_runtime_conf, &requires_runtime_data);
  4013. }
  4014. }
  4015. // - Translate NIR to DXIL.
  4016. for (int i = 0; i < p_spirv.size(); i++) {
  4017. ShaderStage stage = (ShaderStage)p_spirv[i].shader_stage;
  4018. struct ShaderData {
  4019. ShaderStage stage;
  4020. RenderingDeviceD3D12ShaderBinaryData &binary_data;
  4021. Vector<Vector<RenderingDeviceD3D12ShaderBinaryDataBinding>> &uniform_info;
  4022. Vector<RenderingDeviceD3D12ShaderBinarySpecializationConstant> &specialization_constants;
  4023. } shader_data{ stage, binary_data, uniform_info, specialization_constants };
  4024. GodotNirCallbacks godot_nir_callbacks = {};
  4025. godot_nir_callbacks.data = &shader_data;
  4026. godot_nir_callbacks.report_resource = [](uint32_t p_register, uint32_t p_space, uint32_t p_dxil_type, void *p_data) {
  4027. ShaderData &shader_data = *(ShaderData *)p_data;
  4028. // Types based on Mesa's dxil_container.h.
  4029. static const uint32_t DXIL_RES_SAMPLER = 1;
  4030. static const ResourceClass DXIL_TYPE_TO_CLASS[] = {
  4031. /* DXIL_RES_INVALID */ RES_CLASS_INVALID,
  4032. /* DXIL_RES_SAMPLER */ RES_CLASS_INVALID, // Handling sampler as a flag.
  4033. /* DXIL_RES_CBV */ RES_CLASS_CBV,
  4034. /* DXIL_RES_SRV_TYPED */ RES_CLASS_SRV,
  4035. /* DXIL_RES_SRV_RAW */ RES_CLASS_SRV,
  4036. /* DXIL_RES_SRV_STRUCTURED */ RES_CLASS_SRV,
  4037. /* DXIL_RES_UAV_TYPED */ RES_CLASS_UAV,
  4038. /* DXIL_RES_UAV_RAW */ RES_CLASS_UAV,
  4039. /* DXIL_RES_UAV_STRUCTURED */ RES_CLASS_UAV,
  4040. /* DXIL_RES_UAV_STRUCTURED_WITH_COUNTER */ RES_CLASS_INVALID,
  4041. };
  4042. DEV_ASSERT(p_dxil_type < ARRAY_SIZE(DXIL_TYPE_TO_CLASS));
  4043. ResourceClass res_class = DXIL_TYPE_TO_CLASS[p_dxil_type];
  4044. if (p_register == ROOT_CONSTANT_REGISTER && p_space == ROOT_CONSTANT_SPACE) {
  4045. DEV_ASSERT(res_class == RES_CLASS_CBV);
  4046. shader_data.binary_data.dxil_push_constant_stages |= (1 << shader_data.stage);
  4047. } else if (p_register == RUNTIME_DATA_REGISTER && p_space == RUNTIME_DATA_SPACE) {
  4048. DEV_ASSERT(res_class == RES_CLASS_CBV);
  4049. shader_data.binary_data.nir_runtime_data_root_param_idx = 1; // Temporary, to be determined later.
  4050. } else {
  4051. DEV_ASSERT(p_space == 0);
  4052. uint32_t set = p_register / GODOT_NIR_DESCRIPTOR_SET_MULTIPLIER;
  4053. uint32_t binding = (p_register % GODOT_NIR_DESCRIPTOR_SET_MULTIPLIER) / GODOT_NIR_BINDING_MULTIPLIER;
  4054. DEV_ASSERT(set < (uint32_t)shader_data.uniform_info.size());
  4055. bool found = false;
  4056. for (int i = 0; i < shader_data.uniform_info[set].size(); i++) {
  4057. if (shader_data.uniform_info[set][i].binding != binding) {
  4058. continue;
  4059. }
  4060. RenderingDeviceD3D12ShaderBinaryDataBinding &binding_info = shader_data.uniform_info.write[set].write[i];
  4061. binding_info.dxil_stages |= (1 << shader_data.stage);
  4062. if (res_class != RES_CLASS_INVALID) {
  4063. DEV_ASSERT(binding_info.res_class == (uint32_t)RES_CLASS_INVALID || binding_info.res_class == (uint32_t)res_class);
  4064. binding_info.res_class = res_class;
  4065. } else if (p_dxil_type == DXIL_RES_SAMPLER) {
  4066. binding_info.has_sampler = (uint32_t) true;
  4067. } else {
  4068. CRASH_NOW();
  4069. }
  4070. found = true;
  4071. break;
  4072. }
  4073. DEV_ASSERT(found);
  4074. }
  4075. };
  4076. godot_nir_callbacks.report_sc_bit_offset_fn = [](uint32_t p_sc_id, uint64_t p_bit_offset, void *p_data) {
  4077. ShaderData &shader_data = *(ShaderData *)p_data;
  4078. bool found = false;
  4079. for (int i = 0; i < shader_data.specialization_constants.size(); i++) {
  4080. if (shader_data.specialization_constants[i].constant_id != p_sc_id) {
  4081. continue;
  4082. }
  4083. uint32_t offset_idx = shader_stage_bit_offset_indices[shader_data.stage];
  4084. DEV_ASSERT(shader_data.specialization_constants.write[i].stages_bit_offsets[offset_idx] == 0);
  4085. shader_data.specialization_constants.write[i].stages_bit_offsets[offset_idx] = p_bit_offset;
  4086. found = true;
  4087. break;
  4088. }
  4089. DEV_ASSERT(found);
  4090. };
  4091. godot_nir_callbacks.report_bitcode_bit_offset_fn = [](uint64_t p_bit_offset, void *p_data) {
  4092. DEV_ASSERT(p_bit_offset % 8 == 0);
  4093. ShaderData &shader_data = *(ShaderData *)p_data;
  4094. uint32_t offset_idx = shader_stage_bit_offset_indices[shader_data.stage];
  4095. for (int i = 0; i < shader_data.specialization_constants.size(); i++) {
  4096. if (shader_data.specialization_constants.write[i].stages_bit_offsets[offset_idx] == 0) {
  4097. // This SC has been optimized out from this stage.
  4098. continue;
  4099. }
  4100. shader_data.specialization_constants.write[i].stages_bit_offsets[offset_idx] += p_bit_offset;
  4101. }
  4102. };
  4103. auto shader_model_d3d_to_dxil = [](D3D_SHADER_MODEL p_d3d_shader_model) -> dxil_shader_model {
  4104. static_assert(SHADER_MODEL_6_0 == 0x60000);
  4105. static_assert(SHADER_MODEL_6_3 == 0x60003);
  4106. static_assert(D3D_SHADER_MODEL_6_0 == 0x60);
  4107. static_assert(D3D_SHADER_MODEL_6_3 == 0x63);
  4108. return (dxil_shader_model)((p_d3d_shader_model >> 4) * 0x10000 + (p_d3d_shader_model & 0xf));
  4109. };
  4110. nir_to_dxil_options nir_to_dxil_options = {};
  4111. nir_to_dxil_options.environment = DXIL_ENVIRONMENT_VULKAN;
  4112. nir_to_dxil_options.shader_model_max = shader_model_d3d_to_dxil(context->get_shader_capabilities().shader_model);
  4113. nir_to_dxil_options.validator_version_max = dxil_get_validator_version(get_dxil_validator_for_current_thread());
  4114. nir_to_dxil_options.godot_nir_callbacks = &godot_nir_callbacks;
  4115. dxil_logger logger = {};
  4116. logger.log = [](void *p_priv, const char *p_msg) {
  4117. #ifdef DEBUG_ENABLED
  4118. print_verbose(p_msg);
  4119. #endif
  4120. };
  4121. blob dxil_blob = {};
  4122. bool ok = nir_to_dxil(stages_nir_shaders[stage], &nir_to_dxil_options, &logger, &dxil_blob);
  4123. ralloc_free(stages_nir_shaders[stage]);
  4124. stages_nir_shaders.erase(stage);
  4125. if (!ok) {
  4126. free_nir_shaders();
  4127. ERR_FAIL_V_MSG(Vector<uint8_t>(), "Shader translation at stage " + String(shader_stage_names[stage]) + " failed.");
  4128. }
  4129. Vector<uint8_t> blob_copy;
  4130. blob_copy.resize(dxil_blob.size);
  4131. memcpy(blob_copy.ptrw(), dxil_blob.data, dxil_blob.size);
  4132. blob_finish(&dxil_blob);
  4133. dxil_blobs.insert(stage, blob_copy);
  4134. }
  4135. }
  4136. #if 0
  4137. if (dxil_blobs.has(SHADER_STAGE_FRAGMENT)) {
  4138. Ref<FileAccess> f = FileAccess::open("res://1.dxil", FileAccess::WRITE);
  4139. f->store_buffer(dxil_blobs[SHADER_STAGE_FRAGMENT].ptr(), dxil_blobs[SHADER_STAGE_FRAGMENT].size());
  4140. }
  4141. #endif
  4142. // Patch with default values of specialization constants.
  4143. if (specialization_constants.size()) {
  4144. for (const RenderingDeviceD3D12ShaderBinarySpecializationConstant &sc : specialization_constants) {
  4145. _shader_patch_dxil_specialization_constant((PipelineSpecializationConstantType)sc.type, &sc.int_value, sc.stages_bit_offsets, dxil_blobs, true);
  4146. }
  4147. #if 0
  4148. if (dxil_blobs.has(SHADER_STAGE_FRAGMENT)) {
  4149. Ref<FileAccess> f = FileAccess::open("res://2.dxil", FileAccess::WRITE);
  4150. f->store_buffer(dxil_blobs[SHADER_STAGE_FRAGMENT].ptr(), dxil_blobs[SHADER_STAGE_FRAGMENT].size());
  4151. }
  4152. #endif
  4153. }
  4154. // Sign.
  4155. for (KeyValue<ShaderStage, Vector<uint8_t>> &E : dxil_blobs) {
  4156. ShaderStage stage = E.key;
  4157. Vector<uint8_t> &dxil_blob = E.value;
  4158. bool sign_ok = _shader_sign_dxil_bytecode(stage, dxil_blob);
  4159. ERR_FAIL_COND_V(!sign_ok, Vector<uint8_t>());
  4160. }
  4161. // Build the root signature.
  4162. ComPtr<ID3DBlob> root_sig_blob;
  4163. {
  4164. auto stages_to_d3d12_visibility = [](uint32_t p_stages_mask) -> D3D12_SHADER_VISIBILITY {
  4165. switch (p_stages_mask) {
  4166. case SHADER_STAGE_VERTEX_BIT: {
  4167. return D3D12_SHADER_VISIBILITY_VERTEX;
  4168. }
  4169. case SHADER_STAGE_FRAGMENT_BIT: {
  4170. return D3D12_SHADER_VISIBILITY_PIXEL;
  4171. }
  4172. default: {
  4173. return D3D12_SHADER_VISIBILITY_ALL;
  4174. }
  4175. }
  4176. };
  4177. LocalVector<D3D12_ROOT_PARAMETER1> root_params;
  4178. // Root (push) constants.
  4179. if (binary_data.dxil_push_constant_stages) {
  4180. CD3DX12_ROOT_PARAMETER1 push_constant;
  4181. push_constant.InitAsConstants(
  4182. binary_data.push_constant_size / sizeof(uint32_t),
  4183. ROOT_CONSTANT_REGISTER,
  4184. ROOT_CONSTANT_SPACE,
  4185. stages_to_d3d12_visibility(binary_data.dxil_push_constant_stages));
  4186. root_params.push_back(push_constant);
  4187. }
  4188. // NIR-DXIL runtime data.
  4189. if (binary_data.nir_runtime_data_root_param_idx == 1) { // Set above to 1 when discovering runtime data is needed.
  4190. DEV_ASSERT(!binary_data.is_compute); // Could be supported if needed, but it's pointless as of now.
  4191. binary_data.nir_runtime_data_root_param_idx = root_params.size();
  4192. CD3DX12_ROOT_PARAMETER1 nir_runtime_data;
  4193. nir_runtime_data.InitAsConstants(
  4194. sizeof(dxil_spirv_vertex_runtime_data) / sizeof(uint32_t),
  4195. RUNTIME_DATA_REGISTER,
  4196. RUNTIME_DATA_SPACE,
  4197. D3D12_SHADER_VISIBILITY_VERTEX);
  4198. root_params.push_back(nir_runtime_data);
  4199. }
  4200. // Descriptor tables (up to two per uniform set, for resources and/or samplers).
  4201. // These have to stay around until serialization!
  4202. struct TraceableDescriptorTable {
  4203. uint32_t stages_mask = {};
  4204. Vector<D3D12_DESCRIPTOR_RANGE1> ranges;
  4205. Vector<RenderingDeviceD3D12ShaderBinaryDataBinding::RootSignatureLocation *> root_sig_locations;
  4206. };
  4207. Vector<TraceableDescriptorTable> resource_tables_maps;
  4208. Vector<TraceableDescriptorTable> sampler_tables_maps;
  4209. for (int set = 0; set < uniform_info.size(); set++) {
  4210. bool first_resource_in_set = true;
  4211. bool first_sampler_in_set = true;
  4212. uniform_info.write[set].sort();
  4213. for (int i = 0; i < uniform_info[set].size(); i++) {
  4214. const RenderingDeviceD3D12ShaderBinaryDataBinding &binding = uniform_info[set][i];
  4215. bool really_used = binding.dxil_stages != 0;
  4216. #ifdef DEV_ENABLED
  4217. bool anybody_home = (ResourceClass)binding.res_class != RES_CLASS_INVALID || binding.has_sampler;
  4218. DEV_ASSERT(anybody_home == really_used);
  4219. #endif
  4220. if (!really_used) {
  4221. continue; // Existed in SPIR-V; went away in DXIL.
  4222. }
  4223. auto insert_range = [](D3D12_DESCRIPTOR_RANGE_TYPE p_range_type,
  4224. uint32_t p_num_descriptors,
  4225. uint32_t p_dxil_register,
  4226. uint32_t p_dxil_stages_mask,
  4227. RenderingDeviceD3D12ShaderBinaryDataBinding::RootSignatureLocation(&p_root_sig_locations),
  4228. Vector<TraceableDescriptorTable> &r_tables,
  4229. bool &r_first_in_set) {
  4230. if (r_first_in_set) {
  4231. r_tables.resize(r_tables.size() + 1);
  4232. r_first_in_set = false;
  4233. }
  4234. TraceableDescriptorTable &table = r_tables.write[r_tables.size() - 1];
  4235. table.stages_mask |= p_dxil_stages_mask;
  4236. CD3DX12_DESCRIPTOR_RANGE1 range;
  4237. // Due to the aliasing hack for SRV-UAV of different families,
  4238. // we can be causing an unintended change of data (sometimes the validation layers catch it).
  4239. D3D12_DESCRIPTOR_RANGE_FLAGS flags = D3D12_DESCRIPTOR_RANGE_FLAG_NONE;
  4240. if (p_range_type == D3D12_DESCRIPTOR_RANGE_TYPE_SRV || p_range_type == D3D12_DESCRIPTOR_RANGE_TYPE_UAV) {
  4241. flags = D3D12_DESCRIPTOR_RANGE_FLAG_DATA_VOLATILE;
  4242. } else if (p_range_type == D3D12_DESCRIPTOR_RANGE_TYPE_CBV) {
  4243. flags = D3D12_DESCRIPTOR_RANGE_FLAG_DATA_STATIC_WHILE_SET_AT_EXECUTE;
  4244. }
  4245. range.Init(p_range_type, p_num_descriptors, p_dxil_register, 0, flags);
  4246. table.ranges.push_back(range);
  4247. table.root_sig_locations.push_back(&p_root_sig_locations);
  4248. };
  4249. uint32_t num_descriptors = 1;
  4250. D3D12_DESCRIPTOR_RANGE_TYPE resource_range_type = {};
  4251. switch ((ResourceClass)binding.res_class) {
  4252. case RES_CLASS_INVALID: {
  4253. num_descriptors = binding.length;
  4254. DEV_ASSERT(binding.has_sampler);
  4255. } break;
  4256. case RES_CLASS_CBV: {
  4257. resource_range_type = D3D12_DESCRIPTOR_RANGE_TYPE_CBV;
  4258. DEV_ASSERT(!binding.has_sampler);
  4259. } break;
  4260. case RES_CLASS_SRV: {
  4261. resource_range_type = D3D12_DESCRIPTOR_RANGE_TYPE_SRV;
  4262. num_descriptors = MAX(1u, binding.length); // An unbound R/O buffer is reflected as zero-size.
  4263. } break;
  4264. case RES_CLASS_UAV: {
  4265. resource_range_type = D3D12_DESCRIPTOR_RANGE_TYPE_UAV;
  4266. num_descriptors = MAX(1u, binding.length); // An unbound R/W buffer is reflected as zero-size.
  4267. DEV_ASSERT(!binding.has_sampler);
  4268. } break;
  4269. }
  4270. uint32_t dxil_register = set * GODOT_NIR_DESCRIPTOR_SET_MULTIPLIER + binding.binding * GODOT_NIR_BINDING_MULTIPLIER;
  4271. if (binding.res_class != RES_CLASS_INVALID) {
  4272. insert_range(
  4273. resource_range_type,
  4274. num_descriptors,
  4275. dxil_register,
  4276. uniform_info[set][i].dxil_stages,
  4277. uniform_info.write[set].write[i].root_sig_locations[RS_LOC_TYPE_RESOURCE],
  4278. resource_tables_maps,
  4279. first_resource_in_set);
  4280. }
  4281. if (binding.has_sampler) {
  4282. insert_range(
  4283. D3D12_DESCRIPTOR_RANGE_TYPE_SAMPLER,
  4284. num_descriptors,
  4285. dxil_register,
  4286. uniform_info[set][i].dxil_stages,
  4287. uniform_info.write[set].write[i].root_sig_locations[RS_LOC_TYPE_SAMPLER],
  4288. sampler_tables_maps,
  4289. first_sampler_in_set);
  4290. }
  4291. }
  4292. }
  4293. auto make_descriptor_tables = [&root_params, &stages_to_d3d12_visibility](const Vector<TraceableDescriptorTable> &p_tables) {
  4294. for (const TraceableDescriptorTable &table : p_tables) {
  4295. D3D12_SHADER_VISIBILITY visibility = stages_to_d3d12_visibility(table.stages_mask);
  4296. DEV_ASSERT(table.ranges.size() == table.root_sig_locations.size());
  4297. for (int i = 0; i < table.ranges.size(); i++) {
  4298. // By now we know very well which root signature location corresponds to the pointed uniform.
  4299. table.root_sig_locations[i]->root_param_idx = root_params.size();
  4300. table.root_sig_locations[i]->range_idx = i;
  4301. }
  4302. CD3DX12_ROOT_PARAMETER1 root_table;
  4303. root_table.InitAsDescriptorTable(table.ranges.size(), table.ranges.ptr(), visibility);
  4304. root_params.push_back(root_table);
  4305. }
  4306. };
  4307. make_descriptor_tables(resource_tables_maps);
  4308. make_descriptor_tables(sampler_tables_maps);
  4309. CD3DX12_VERSIONED_ROOT_SIGNATURE_DESC root_sig_desc = {};
  4310. D3D12_ROOT_SIGNATURE_FLAGS root_sig_flags =
  4311. D3D12_ROOT_SIGNATURE_FLAG_DENY_HULL_SHADER_ROOT_ACCESS |
  4312. D3D12_ROOT_SIGNATURE_FLAG_DENY_DOMAIN_SHADER_ROOT_ACCESS |
  4313. D3D12_ROOT_SIGNATURE_FLAG_DENY_GEOMETRY_SHADER_ROOT_ACCESS |
  4314. D3D12_ROOT_SIGNATURE_FLAG_DENY_AMPLIFICATION_SHADER_ROOT_ACCESS |
  4315. D3D12_ROOT_SIGNATURE_FLAG_DENY_MESH_SHADER_ROOT_ACCESS;
  4316. if (!stages_processed.has_flag(SHADER_STAGE_VERTEX_BIT)) {
  4317. root_sig_flags |= D3D12_ROOT_SIGNATURE_FLAG_DENY_VERTEX_SHADER_ROOT_ACCESS;
  4318. }
  4319. if (!stages_processed.has_flag(SHADER_STAGE_FRAGMENT_BIT)) {
  4320. root_sig_flags |= D3D12_ROOT_SIGNATURE_FLAG_DENY_PIXEL_SHADER_ROOT_ACCESS;
  4321. }
  4322. if (binary_data.vertex_input_mask) {
  4323. root_sig_flags |= D3D12_ROOT_SIGNATURE_FLAG_ALLOW_INPUT_ASSEMBLER_INPUT_LAYOUT;
  4324. }
  4325. root_sig_desc.Init_1_1(root_params.size(), root_params.ptr(), 0, nullptr, root_sig_flags);
  4326. ComPtr<ID3DBlob> error_blob;
  4327. HRESULT res = D3DX12SerializeVersionedRootSignature(&root_sig_desc, D3D_ROOT_SIGNATURE_VERSION_1_1, root_sig_blob.GetAddressOf(), error_blob.GetAddressOf());
  4328. ERR_FAIL_COND_V_MSG(res, Vector<uint8_t>(),
  4329. "Serialization of root signature failed with error " + vformat("0x%08ux", res) + " and the following message:\n" + String((char *)error_blob->GetBufferPointer(), error_blob->GetBufferSize()));
  4330. binary_data.root_signature_crc = crc32(0, nullptr, 0);
  4331. binary_data.root_signature_crc = crc32(binary_data.root_signature_crc, (const Bytef *)root_sig_blob->GetBufferPointer(), root_sig_blob->GetBufferSize());
  4332. }
  4333. Vector<Vector<uint8_t>> compressed_stages;
  4334. Vector<uint32_t> zstd_size;
  4335. uint32_t stages_binary_size = 0;
  4336. for (int i = 0; i < p_spirv.size(); i++) {
  4337. Vector<uint8_t> zstd;
  4338. Vector<uint8_t> &dxil_blob = dxil_blobs[p_spirv[i].shader_stage];
  4339. zstd.resize(Compression::get_max_compressed_buffer_size(dxil_blob.size(), Compression::MODE_ZSTD));
  4340. int dst_size = Compression::compress(zstd.ptrw(), dxil_blob.ptr(), dxil_blob.size(), Compression::MODE_ZSTD);
  4341. zstd_size.push_back(dst_size);
  4342. zstd.resize(dst_size);
  4343. compressed_stages.push_back(zstd);
  4344. uint32_t s = compressed_stages[i].size();
  4345. if (s % 4 != 0) {
  4346. s += 4 - (s % 4);
  4347. }
  4348. stages_binary_size += s;
  4349. }
  4350. CharString shader_name_utf = p_shader_name.utf8();
  4351. binary_data.shader_name_len = shader_name_utf.length();
  4352. uint32_t total_size = sizeof(uint32_t) * 3; // Header + version + main datasize;.
  4353. total_size += sizeof(RenderingDeviceD3D12ShaderBinaryData);
  4354. total_size += binary_data.shader_name_len;
  4355. if ((binary_data.shader_name_len % 4) != 0) { // Alignment rules are really strange.
  4356. total_size += 4 - (binary_data.shader_name_len % 4);
  4357. }
  4358. for (int i = 0; i < uniform_info.size(); i++) {
  4359. total_size += sizeof(uint32_t);
  4360. total_size += uniform_info[i].size() * sizeof(RenderingDeviceD3D12ShaderBinaryDataBinding);
  4361. }
  4362. total_size += sizeof(RenderingDeviceD3D12ShaderBinarySpecializationConstant) * specialization_constants.size();
  4363. total_size += compressed_stages.size() * sizeof(uint32_t) * 3; // Sizes.
  4364. total_size += stages_binary_size;
  4365. binary_data.root_signature_len = root_sig_blob->GetBufferSize();
  4366. total_size += binary_data.root_signature_len;
  4367. Vector<uint8_t> ret;
  4368. ret.resize(total_size);
  4369. {
  4370. uint32_t offset = 0;
  4371. uint8_t *binptr = ret.ptrw();
  4372. binptr[0] = 'G';
  4373. binptr[1] = 'S';
  4374. binptr[2] = 'B';
  4375. binptr[3] = 'D'; // Godot shader binary data.
  4376. offset += 4;
  4377. encode_uint32(SHADER_BINARY_VERSION, binptr + offset);
  4378. offset += sizeof(uint32_t);
  4379. encode_uint32(sizeof(RenderingDeviceD3D12ShaderBinaryData), binptr + offset);
  4380. offset += sizeof(uint32_t);
  4381. memcpy(binptr + offset, &binary_data, sizeof(RenderingDeviceD3D12ShaderBinaryData));
  4382. offset += sizeof(RenderingDeviceD3D12ShaderBinaryData);
  4383. if (binary_data.shader_name_len > 0) {
  4384. memcpy(binptr + offset, shader_name_utf.ptr(), binary_data.shader_name_len);
  4385. offset += binary_data.shader_name_len;
  4386. if ((binary_data.shader_name_len % 4) != 0) { // Alignment rules are really strange.
  4387. offset += 4 - (binary_data.shader_name_len % 4);
  4388. }
  4389. }
  4390. for (int i = 0; i < uniform_info.size(); i++) {
  4391. int count = uniform_info[i].size();
  4392. encode_uint32(count, binptr + offset);
  4393. offset += sizeof(uint32_t);
  4394. if (count > 0) {
  4395. memcpy(binptr + offset, uniform_info[i].ptr(), sizeof(RenderingDeviceD3D12ShaderBinaryDataBinding) * count);
  4396. offset += sizeof(RenderingDeviceD3D12ShaderBinaryDataBinding) * count;
  4397. }
  4398. }
  4399. if (specialization_constants.size()) {
  4400. memcpy(binptr + offset, specialization_constants.ptr(), sizeof(RenderingDeviceD3D12ShaderBinarySpecializationConstant) * specialization_constants.size());
  4401. offset += sizeof(RenderingDeviceD3D12ShaderBinarySpecializationConstant) * specialization_constants.size();
  4402. }
  4403. for (int i = 0; i < compressed_stages.size(); i++) {
  4404. encode_uint32(p_spirv[i].shader_stage, binptr + offset);
  4405. offset += sizeof(uint32_t);
  4406. encode_uint32(dxil_blobs[p_spirv[i].shader_stage].size(), binptr + offset);
  4407. offset += sizeof(uint32_t);
  4408. encode_uint32(zstd_size[i], binptr + offset);
  4409. offset += sizeof(uint32_t);
  4410. memcpy(binptr + offset, compressed_stages[i].ptr(), compressed_stages[i].size());
  4411. uint32_t s = compressed_stages[i].size();
  4412. if (s % 4 != 0) {
  4413. s += 4 - (s % 4);
  4414. }
  4415. offset += s;
  4416. }
  4417. memcpy(binptr + offset, root_sig_blob->GetBufferPointer(), root_sig_blob->GetBufferSize());
  4418. offset += root_sig_blob->GetBufferSize();
  4419. ERR_FAIL_COND_V(offset != (uint32_t)ret.size(), Vector<uint8_t>());
  4420. }
  4421. return ret;
  4422. }
  4423. RID RenderingDeviceD3D12::shader_create_from_bytecode(const Vector<uint8_t> &p_shader_binary, RID p_placeholder) {
  4424. const uint8_t *binptr = p_shader_binary.ptr();
  4425. uint32_t binsize = p_shader_binary.size();
  4426. uint32_t read_offset = 0;
  4427. // Consistency check.
  4428. ERR_FAIL_COND_V(binsize < sizeof(uint32_t) * 3 + sizeof(RenderingDeviceD3D12ShaderBinaryData), RID());
  4429. ERR_FAIL_COND_V(binptr[0] != 'G' || binptr[1] != 'S' || binptr[2] != 'B' || binptr[3] != 'D', RID());
  4430. uint32_t bin_version = decode_uint32(binptr + 4);
  4431. ERR_FAIL_COND_V(bin_version != SHADER_BINARY_VERSION, RID());
  4432. uint32_t bin_data_size = decode_uint32(binptr + 8);
  4433. const RenderingDeviceD3D12ShaderBinaryData &binary_data = *(reinterpret_cast<const RenderingDeviceD3D12ShaderBinaryData *>(binptr + 12));
  4434. uint64_t vertex_input_mask = binary_data.vertex_input_mask;
  4435. uint32_t fragment_output_mask = binary_data.fragment_output_mask;
  4436. bool is_compute = binary_data.is_compute;
  4437. const uint32_t compute_local_size[3] = { binary_data.compute_local_size[0], binary_data.compute_local_size[1], binary_data.compute_local_size[2] };
  4438. read_offset += sizeof(uint32_t) * 3 + bin_data_size;
  4439. String name;
  4440. if (binary_data.shader_name_len) {
  4441. name.parse_utf8((const char *)(binptr + read_offset), binary_data.shader_name_len);
  4442. read_offset += binary_data.shader_name_len;
  4443. if ((binary_data.shader_name_len % 4) != 0) { // Alignment rules are really strange.
  4444. read_offset += 4 - (binary_data.shader_name_len % 4);
  4445. }
  4446. }
  4447. Vector<Shader::Set> set_info;
  4448. set_info.resize(binary_data.set_count);
  4449. for (uint32_t i = 0; i < binary_data.set_count; i++) {
  4450. ERR_FAIL_COND_V(read_offset + sizeof(uint32_t) >= binsize, RID());
  4451. uint32_t set_count = decode_uint32(binptr + read_offset);
  4452. read_offset += sizeof(uint32_t);
  4453. const RenderingDeviceD3D12ShaderBinaryDataBinding *set_ptr = reinterpret_cast<const RenderingDeviceD3D12ShaderBinaryDataBinding *>(binptr + read_offset);
  4454. uint32_t set_size = set_count * sizeof(RenderingDeviceD3D12ShaderBinaryDataBinding);
  4455. ERR_FAIL_COND_V(read_offset + set_size >= binsize, RID());
  4456. for (uint32_t j = 0; j < set_count; j++) {
  4457. Shader::ShaderUniformInfo sui;
  4458. sui.info.type = UniformType(set_ptr[j].type);
  4459. sui.info.writable = set_ptr[j].writable;
  4460. sui.info.length = set_ptr[j].length;
  4461. sui.info.binding = set_ptr[j].binding;
  4462. sui.binding.stages = set_ptr[j].dxil_stages;
  4463. sui.binding.res_class = (ResourceClass)set_ptr[j].res_class;
  4464. static_assert(sizeof(UniformBindingInfo::root_sig_locations) == sizeof(RenderingDeviceD3D12ShaderBinaryDataBinding::root_sig_locations));
  4465. memcpy(&sui.binding.root_sig_locations, &set_ptr[j].root_sig_locations, sizeof(UniformBindingInfo::root_sig_locations));
  4466. set_info.write[i].uniforms.push_back(sui);
  4467. if (sui.binding.root_sig_locations.resource.root_param_idx != UINT32_MAX) {
  4468. set_info.write[i].num_root_params.resources++;
  4469. }
  4470. if (sui.binding.root_sig_locations.sampler.root_param_idx != UINT32_MAX) {
  4471. set_info.write[i].num_root_params.samplers++;
  4472. }
  4473. }
  4474. read_offset += set_size;
  4475. }
  4476. ERR_FAIL_COND_V(read_offset + binary_data.specialization_constants_count * sizeof(RenderingDeviceD3D12ShaderBinarySpecializationConstant) >= binsize, RID());
  4477. Vector<Shader::SpecializationConstant> specialization_constants;
  4478. for (uint32_t i = 0; i < binary_data.specialization_constants_count; i++) {
  4479. const RenderingDeviceD3D12ShaderBinarySpecializationConstant &src_sc = *(reinterpret_cast<const RenderingDeviceD3D12ShaderBinarySpecializationConstant *>(binptr + read_offset));
  4480. Shader::SpecializationConstant sc;
  4481. sc.constant.int_value = src_sc.int_value;
  4482. sc.constant.type = PipelineSpecializationConstantType(src_sc.type);
  4483. sc.constant.constant_id = src_sc.constant_id;
  4484. memcpy(sc.stages_bit_offsets, src_sc.stages_bit_offsets, sizeof(sc.stages_bit_offsets));
  4485. specialization_constants.push_back(sc);
  4486. read_offset += sizeof(RenderingDeviceD3D12ShaderBinarySpecializationConstant);
  4487. }
  4488. HashMap<ShaderStage, Vector<uint8_t>> stages_bytecode;
  4489. for (uint32_t i = 0; i < binary_data.stage_count; i++) {
  4490. ERR_FAIL_COND_V(read_offset + sizeof(uint32_t) * 3 >= binsize, RID());
  4491. uint32_t stage = decode_uint32(binptr + read_offset);
  4492. read_offset += sizeof(uint32_t);
  4493. uint32_t dxil_size = decode_uint32(binptr + read_offset);
  4494. read_offset += sizeof(uint32_t);
  4495. uint32_t zstd_size = decode_uint32(binptr + read_offset);
  4496. read_offset += sizeof(uint32_t);
  4497. // Decompress.
  4498. Vector<uint8_t> dxil;
  4499. dxil.resize(dxil_size);
  4500. int dec_dxil_size = Compression::decompress(dxil.ptrw(), dxil.size(), binptr + read_offset, zstd_size, Compression::MODE_ZSTD);
  4501. ERR_FAIL_COND_V(dec_dxil_size != (int32_t)dxil_size, RID());
  4502. stages_bytecode[ShaderStage(stage)] = dxil;
  4503. if (zstd_size % 4 != 0) {
  4504. zstd_size += 4 - (zstd_size % 4);
  4505. }
  4506. ERR_FAIL_COND_V(read_offset + zstd_size > binsize, RID());
  4507. read_offset += zstd_size;
  4508. }
  4509. const uint8_t *root_sig_data_ptr = binptr + read_offset;
  4510. ComPtr<ID3D12RootSignatureDeserializer> root_sig_deserializer;
  4511. HRESULT res = D3D12CreateRootSignatureDeserializer(root_sig_data_ptr, binary_data.root_signature_len, IID_PPV_ARGS(root_sig_deserializer.GetAddressOf()));
  4512. ERR_FAIL_COND_V_MSG(res, RID(), "D3D12CreateRootSignatureDeserializer failed with error " + vformat("0x%08ux", res) + ".");
  4513. read_offset += binary_data.root_signature_len;
  4514. ERR_FAIL_COND_V(read_offset != binsize, RID());
  4515. // TODO: Need to lock?
  4516. _THREAD_SAFE_METHOD_
  4517. ComPtr<ID3D12RootSignature> root_signature;
  4518. res = device->CreateRootSignature(0, root_sig_data_ptr, binary_data.root_signature_len, IID_PPV_ARGS(root_signature.GetAddressOf()));
  4519. ERR_FAIL_COND_V_MSG(res, RID(), "CreateRootSignature failed with error " + vformat("0x%08ux", res) + ".");
  4520. RID id;
  4521. if (p_placeholder.is_null()) {
  4522. id = shader_owner.make_rid();
  4523. } else {
  4524. id = p_placeholder;
  4525. }
  4526. Shader *shader = shader_owner.get_or_null(id);
  4527. ERR_FAIL_NULL_V(shader, RID());
  4528. shader->vertex_input_mask = vertex_input_mask;
  4529. shader->fragment_output_mask = fragment_output_mask;
  4530. shader->spirv_push_constant_size = binary_data.push_constant_size;
  4531. shader->dxil_push_constant_size = binary_data.dxil_push_constant_stages ? binary_data.push_constant_size : 0;
  4532. shader->nir_runtime_data_root_param_idx = binary_data.nir_runtime_data_root_param_idx;
  4533. shader->is_compute = is_compute;
  4534. shader->compute_local_size[0] = compute_local_size[0];
  4535. shader->compute_local_size[1] = compute_local_size[1];
  4536. shader->compute_local_size[2] = compute_local_size[2];
  4537. shader->specialization_constants = specialization_constants;
  4538. shader->spirv_specialization_constants_ids_mask = binary_data.spirv_specialization_constants_ids_mask;
  4539. shader->name = name;
  4540. shader->root_signature = root_signature;
  4541. shader->root_signature_deserializer = root_sig_deserializer;
  4542. shader->root_signature_desc = root_sig_deserializer->GetRootSignatureDesc();
  4543. shader->root_signature_crc = binary_data.root_signature_crc;
  4544. shader->stages_bytecode = stages_bytecode;
  4545. // Proceed to create descriptor sets.
  4546. for (uint32_t i = 0; i < binary_data.set_count; i++) {
  4547. uint32_t format = 0; // No format, default.
  4548. Shader::Set &set = set_info.write[i];
  4549. if (set.uniforms.size()) {
  4550. // Has data, needs an actual format;.
  4551. UniformSetFormat usformat;
  4552. usformat.uniform_info.resize(set.uniforms.size());
  4553. for (int j = 0; j < set.uniforms.size(); j++) {
  4554. usformat.uniform_info.write[j] = set.uniforms[j].info;
  4555. }
  4556. RBMap<UniformSetFormat, uint32_t>::Element *E = uniform_set_format_cache.find(usformat);
  4557. if (E) {
  4558. format = E->get();
  4559. } else {
  4560. format = uniform_set_format_cache.size() + 1;
  4561. E = uniform_set_format_cache.insert(usformat, format);
  4562. uniform_set_format_cache_reverse.push_back(E);
  4563. DEV_ASSERT(uniform_set_format_cache_reverse.size() == uniform_set_format_cache.size());
  4564. }
  4565. }
  4566. shader->sets.push_back(set);
  4567. shader->set_formats.push_back(format);
  4568. }
  4569. #ifdef DEV_ENABLED
  4570. set_resource_name(id, "RID:" + itos(id.get_id()));
  4571. #endif
  4572. return id;
  4573. }
  4574. RID RenderingDeviceD3D12::shader_create_placeholder() {
  4575. Shader shader;
  4576. return shader_owner.make_rid(shader);
  4577. }
  4578. uint64_t RenderingDeviceD3D12::shader_get_vertex_input_attribute_mask(RID p_shader) {
  4579. _THREAD_SAFE_METHOD_
  4580. const Shader *shader = shader_owner.get_or_null(p_shader);
  4581. ERR_FAIL_NULL_V(shader, 0);
  4582. return shader->vertex_input_mask;
  4583. }
  4584. /******************/
  4585. /**** UNIFORMS ****/
  4586. /******************/
  4587. RID RenderingDeviceD3D12::uniform_buffer_create(uint32_t p_size_bytes, const Vector<uint8_t> &p_data) {
  4588. _THREAD_SAFE_METHOD_
  4589. ERR_FAIL_COND_V(p_data.size() && (uint32_t)p_data.size() != p_size_bytes, RID());
  4590. Buffer buffer;
  4591. Error err = _buffer_allocate(&buffer, p_size_bytes, D3D12_RESOURCE_STATE_VERTEX_AND_CONSTANT_BUFFER, D3D12_HEAP_TYPE_DEFAULT);
  4592. ERR_FAIL_COND_V(err != OK, RID());
  4593. if (p_data.size()) {
  4594. uint64_t data_size = p_data.size();
  4595. const uint8_t *r = p_data.ptr();
  4596. _buffer_update(&buffer, 0, r, data_size);
  4597. }
  4598. RID id = uniform_buffer_owner.make_rid(buffer);
  4599. #ifdef DEV_ENABLED
  4600. set_resource_name(id, "RID:" + itos(id.get_id()));
  4601. #endif
  4602. return id;
  4603. }
  4604. RID RenderingDeviceD3D12::storage_buffer_create(uint32_t p_size_bytes, const Vector<uint8_t> &p_data, BitField<StorageBufferUsage> p_usage) {
  4605. _THREAD_SAFE_METHOD_
  4606. ERR_FAIL_COND_V(p_data.size() && (uint32_t)p_data.size() != p_size_bytes, RID());
  4607. Buffer buffer;
  4608. D3D12_RESOURCE_STATES states = D3D12_RESOURCE_STATE_COPY_SOURCE | D3D12_RESOURCE_STATE_COPY_DEST | D3D12_RESOURCE_STATE_UNORDERED_ACCESS;
  4609. if (p_usage.has_flag(STORAGE_BUFFER_USAGE_DISPATCH_INDIRECT)) {
  4610. states |= D3D12_RESOURCE_STATE_INDIRECT_ARGUMENT;
  4611. }
  4612. Error err = _buffer_allocate(&buffer, p_size_bytes, states, D3D12_HEAP_TYPE_DEFAULT);
  4613. ERR_FAIL_COND_V(err != OK, RID());
  4614. if (p_data.size()) {
  4615. uint64_t data_size = p_data.size();
  4616. const uint8_t *r = p_data.ptr();
  4617. _buffer_update(&buffer, 0, r, data_size);
  4618. }
  4619. return storage_buffer_owner.make_rid(buffer);
  4620. }
  4621. RID RenderingDeviceD3D12::texture_buffer_create(uint32_t p_size_elements, DataFormat p_format, const Vector<uint8_t> &p_data) {
  4622. _THREAD_SAFE_METHOD_
  4623. uint32_t element_size = get_format_vertex_size(p_format);
  4624. ERR_FAIL_COND_V_MSG(element_size == 0, RID(), "Format requested is not supported for texture buffers");
  4625. uint64_t size_bytes = uint64_t(element_size) * p_size_elements;
  4626. ERR_FAIL_COND_V(p_data.size() && (uint32_t)p_data.size() != size_bytes, RID());
  4627. TextureBuffer texture_buffer;
  4628. Error err = _buffer_allocate(&texture_buffer.buffer, size_bytes, D3D12_RESOURCE_STATE_ALL_SHADER_RESOURCE, D3D12_HEAP_TYPE_DEFAULT);
  4629. ERR_FAIL_COND_V(err != OK, RID());
  4630. if (p_data.size()) {
  4631. uint64_t data_size = p_data.size();
  4632. const uint8_t *r = p_data.ptr();
  4633. _buffer_update(&texture_buffer.buffer, 0, r, data_size);
  4634. }
  4635. // Allocate the view.
  4636. RID id = texture_buffer_owner.make_rid(texture_buffer);
  4637. #ifdef DEV_ENABLED
  4638. set_resource_name(id, "RID:" + itos(id.get_id()));
  4639. #endif
  4640. return id;
  4641. }
  4642. Error RenderingDeviceD3D12::DescriptorsHeap::allocate(ID3D12Device *p_device, D3D12_DESCRIPTOR_HEAP_TYPE p_type, uint32_t p_descriptor_count, bool p_for_gpu) {
  4643. ERR_FAIL_COND_V(heap, ERR_ALREADY_EXISTS);
  4644. ERR_FAIL_COND_V(p_descriptor_count == 0, ERR_INVALID_PARAMETER);
  4645. handle_size = p_device->GetDescriptorHandleIncrementSize(p_type);
  4646. desc.Type = p_type;
  4647. desc.NumDescriptors = p_descriptor_count;
  4648. desc.Flags = p_for_gpu ? D3D12_DESCRIPTOR_HEAP_FLAG_SHADER_VISIBLE : D3D12_DESCRIPTOR_HEAP_FLAG_NONE;
  4649. HRESULT res = p_device->CreateDescriptorHeap(&desc, IID_PPV_ARGS(heap.GetAddressOf()));
  4650. ERR_FAIL_COND_V_MSG(res, ERR_CANT_CREATE, "CreateDescriptorHeap failed with error " + vformat("0x%08ux", res) + ".");
  4651. return OK;
  4652. }
  4653. RenderingDeviceD3D12::DescriptorsHeap::Walker RenderingDeviceD3D12::DescriptorsHeap::make_walker() const {
  4654. Walker walker;
  4655. walker.handle_size = handle_size;
  4656. walker.handle_count = desc.NumDescriptors;
  4657. if (heap) {
  4658. walker.first_cpu_handle = heap->GetCPUDescriptorHandleForHeapStart();
  4659. if ((desc.Flags & D3D12_DESCRIPTOR_HEAP_FLAG_SHADER_VISIBLE)) {
  4660. walker.first_gpu_handle = heap->GetGPUDescriptorHandleForHeapStart();
  4661. }
  4662. }
  4663. return walker;
  4664. }
  4665. void RenderingDeviceD3D12::DescriptorsHeap::Walker::advance(uint32_t p_count) {
  4666. ERR_FAIL_COND_MSG(handle_index + p_count > handle_count, "Would advance past EOF.");
  4667. handle_index += p_count;
  4668. }
  4669. D3D12_CPU_DESCRIPTOR_HANDLE RenderingDeviceD3D12::DescriptorsHeap::Walker::get_curr_cpu_handle() {
  4670. ERR_FAIL_COND_V_MSG(is_at_eof(), D3D12_CPU_DESCRIPTOR_HANDLE(), "Heap walker is at EOF.");
  4671. return D3D12_CPU_DESCRIPTOR_HANDLE{ first_cpu_handle.ptr + handle_index * handle_size };
  4672. }
  4673. D3D12_GPU_DESCRIPTOR_HANDLE RenderingDeviceD3D12::DescriptorsHeap::Walker::get_curr_gpu_handle() {
  4674. ERR_FAIL_COND_V_MSG(!first_gpu_handle.ptr, D3D12_GPU_DESCRIPTOR_HANDLE(), "Can't provide a GPU handle from a non-GPU descriptors heap.");
  4675. ERR_FAIL_COND_V_MSG(is_at_eof(), D3D12_GPU_DESCRIPTOR_HANDLE(), "Heap walker is at EOF.");
  4676. return D3D12_GPU_DESCRIPTOR_HANDLE{ first_gpu_handle.ptr + handle_index * handle_size };
  4677. }
  4678. static void _add_descriptor_count_for_uniform(RenderingDevice::UniformType p_type, uint32_t p_binding_length, bool p_dobule_srv_uav_ambiguous, uint32_t &r_num_resources, uint32_t &r_num_samplers, bool &r_srv_uav_ambiguity) {
  4679. r_srv_uav_ambiguity = false;
  4680. // Some resource types can be SRV or UAV, depending on what NIR-DXIL decided for a specific shader variant.
  4681. // The goal is to generate both SRV and UAV for the descriptor sets' heaps and copy only the relevant one
  4682. // to the frame descriptor heap at binding time.
  4683. // [[SRV_UAV_AMBIGUITY]]
  4684. switch (p_type) {
  4685. case RenderingDevice::UNIFORM_TYPE_SAMPLER: {
  4686. r_num_samplers += p_binding_length;
  4687. } break;
  4688. case RenderingDevice::UNIFORM_TYPE_SAMPLER_WITH_TEXTURE:
  4689. case RenderingDevice::UNIFORM_TYPE_SAMPLER_WITH_TEXTURE_BUFFER: {
  4690. r_num_resources += p_binding_length;
  4691. r_num_samplers += p_binding_length;
  4692. } break;
  4693. case RenderingDevice::UNIFORM_TYPE_UNIFORM_BUFFER: {
  4694. r_num_resources += 1;
  4695. } break;
  4696. case RenderingDevice::UNIFORM_TYPE_STORAGE_BUFFER: {
  4697. r_num_resources += p_dobule_srv_uav_ambiguous ? 2 : 1;
  4698. r_srv_uav_ambiguity = true;
  4699. } break;
  4700. case RenderingDevice::UNIFORM_TYPE_IMAGE: {
  4701. r_num_resources += p_binding_length * (p_dobule_srv_uav_ambiguous ? 2 : 1);
  4702. r_srv_uav_ambiguity = true;
  4703. } break;
  4704. default: {
  4705. r_num_resources += p_binding_length;
  4706. }
  4707. }
  4708. }
  4709. RID RenderingDeviceD3D12::uniform_set_create(const Vector<Uniform> &p_uniforms, RID p_shader, uint32_t p_shader_set) {
  4710. _THREAD_SAFE_METHOD_
  4711. ERR_FAIL_COND_V(p_uniforms.size() == 0, RID());
  4712. Shader *shader = shader_owner.get_or_null(p_shader);
  4713. ERR_FAIL_NULL_V(shader, RID());
  4714. ERR_FAIL_COND_V_MSG(p_shader_set >= (uint32_t)shader->sets.size() || shader->sets[p_shader_set].uniforms.size() == 0, RID(),
  4715. "Desired set (" + itos(p_shader_set) + ") not used by shader.");
  4716. // See that all sets in shader are satisfied.
  4717. const Shader::Set &set = shader->sets[p_shader_set];
  4718. uint32_t uniform_count = p_uniforms.size();
  4719. const Uniform *uniforms = p_uniforms.ptr();
  4720. uint32_t set_uniform_count = set.uniforms.size();
  4721. const Shader::ShaderUniformInfo *set_uniforms = set.uniforms.ptr();
  4722. // Do a first pass to count resources and samplers, and error checking.
  4723. uint32_t num_resource_descs = 0;
  4724. uint32_t num_sampler_descs = 0;
  4725. LocalVector<int> uniform_indices;
  4726. uniform_indices.resize(set_uniform_count);
  4727. for (uint32_t i = 0; i < set_uniform_count; i++) {
  4728. const UniformInfo &set_uniform = set_uniforms[i].info;
  4729. int uniform_idx = -1;
  4730. for (int j = 0; j < (int)uniform_count; j++) {
  4731. if (uniforms[j].binding == set_uniform.binding) {
  4732. uniform_idx = j;
  4733. }
  4734. }
  4735. ERR_FAIL_COND_V_MSG(uniform_idx == -1, RID(),
  4736. "All the shader bindings for the given set must be covered by the uniforms provided. Binding (" + itos(set_uniform.binding) + "), set (" + itos(p_shader_set) + ") was not provided.");
  4737. uniform_indices[i] = uniform_idx;
  4738. const Uniform &uniform = uniforms[uniform_idx];
  4739. ERR_FAIL_COND_V_MSG(uniform.uniform_type != set_uniform.type, RID(),
  4740. "Mismatch uniform type for binding (" + itos(set_uniform.binding) + "), set (" + itos(p_shader_set) + "). Expected '" + shader_uniform_names[set_uniform.type] + "', supplied: '" + shader_uniform_names[uniform.uniform_type] + "'.");
  4741. // Since the uniform set may be created for a shader different than the one that will be actually bound,
  4742. // which may have a different set of uniforms optimized out, the stages mask we can check now is not reliable.
  4743. // Therefore, we can't make any assumptions here about descriptors that we may not need to create,
  4744. // pixel or vertex-only shader resource states, etc.
  4745. bool srv_uav_ambiguity = false;
  4746. _add_descriptor_count_for_uniform(uniform.uniform_type, set_uniform.length, true, num_resource_descs, num_sampler_descs, srv_uav_ambiguity);
  4747. }
  4748. struct {
  4749. DescriptorsHeap resources;
  4750. DescriptorsHeap samplers;
  4751. } desc_heaps;
  4752. #ifdef DEV_ENABLED
  4753. LocalVector<UniformSet::ResourceDescInfo> resources_desc_info;
  4754. #endif
  4755. if (num_resource_descs) {
  4756. Error err = desc_heaps.resources.allocate(device.Get(), D3D12_DESCRIPTOR_HEAP_TYPE_CBV_SRV_UAV, num_resource_descs, false);
  4757. ERR_FAIL_COND_V(err, RID());
  4758. }
  4759. if (num_sampler_descs) {
  4760. Error err = desc_heaps.samplers.allocate(device.Get(), D3D12_DESCRIPTOR_HEAP_TYPE_SAMPLER, num_sampler_descs, false);
  4761. ERR_FAIL_COND_V(err, RID());
  4762. }
  4763. struct {
  4764. DescriptorsHeap::Walker resources;
  4765. DescriptorsHeap::Walker samplers;
  4766. } desc_heap_walkers;
  4767. desc_heap_walkers.resources = desc_heaps.resources.make_walker();
  4768. desc_heap_walkers.samplers = desc_heaps.samplers.make_walker();
  4769. // Used for verification to make sure a uniform set does not use a framebuffer bound texture.
  4770. LocalVector<UniformSet::AttachableTexture> attachable_textures;
  4771. struct RIDState {
  4772. bool is_buffer = false;
  4773. uint64_t shader_uniform_idx_mask = 0;
  4774. ResourceState state;
  4775. };
  4776. HashMap<Resource *, RIDState> resource_states;
  4777. for (uint32_t i = 0; i < set_uniform_count; i++) {
  4778. const Shader::ShaderUniformInfo &set_uniform = set_uniforms[i];
  4779. const Uniform &uniform = uniforms[uniform_indices[i]];
  4780. // Stages defined in the shader may be missing for a uniform due to the optimizer,
  4781. // but the opposite (extraneous stages present in the uniform stages mask) would be an error.
  4782. DEV_ASSERT(!(shader->is_compute && (set_uniform.binding.stages & (SHADER_STAGE_VERTEX_BIT | SHADER_STAGE_FRAGMENT_BIT))));
  4783. DEV_ASSERT(!(!shader->is_compute && (set_uniform.binding.stages & SHADER_STAGE_COMPUTE_BIT)));
  4784. switch (uniform.uniform_type) {
  4785. case UNIFORM_TYPE_SAMPLER: {
  4786. if (uniform.get_id_count() != (uint32_t)set_uniform.info.length) {
  4787. if (set_uniform.info.length > 1) {
  4788. ERR_FAIL_V_MSG(RID(), "Sampler (binding: " + itos(uniform.binding) + ") is an array of (" + itos(set_uniform.info.length) + ") sampler elements, so it should be provided equal number of sampler IDs to satisfy it (IDs provided: " + itos(uniform.get_id_count()) + ").");
  4789. } else {
  4790. ERR_FAIL_V_MSG(RID(), "Sampler (binding: " + itos(uniform.binding) + ") should provide one ID referencing a sampler (IDs provided: " + itos(uniform.get_id_count()) + ").");
  4791. }
  4792. }
  4793. for (uint32_t j = 0; j < uniform.get_id_count(); j++) {
  4794. D3D12_SAMPLER_DESC *sampler_desc = sampler_owner.get_or_null(uniform.get_id(j));
  4795. ERR_FAIL_COND_V_MSG(!sampler_desc, RID(), "Sampler (binding: " + itos(uniform.binding) + ", index " + itos(j) + ") is not a valid sampler.");
  4796. device->CreateSampler(sampler_desc, desc_heap_walkers.samplers.get_curr_cpu_handle());
  4797. desc_heap_walkers.samplers.advance();
  4798. }
  4799. } break;
  4800. case UNIFORM_TYPE_SAMPLER_WITH_TEXTURE: {
  4801. if (uniform.get_id_count() != (uint32_t)set_uniform.info.length * 2) {
  4802. if (set_uniform.info.length > 1) {
  4803. ERR_FAIL_V_MSG(RID(), "SamplerTexture (binding: " + itos(uniform.binding) + ") is an array of (" + itos(set_uniform.info.length) + ") sampler&texture elements, so it should provided twice the amount of IDs (sampler,texture pairs) to satisfy it (IDs provided: " + itos(uniform.get_id_count()) + ").");
  4804. } else {
  4805. ERR_FAIL_V_MSG(RID(), "SamplerTexture (binding: " + itos(uniform.binding) + ") should provide two IDs referencing a sampler and then a texture (IDs provided: " + itos(uniform.get_id_count()) + ").");
  4806. }
  4807. }
  4808. for (uint32_t j = 0; j < uniform.get_id_count(); j += 2) {
  4809. D3D12_SAMPLER_DESC *sampler_desc = sampler_owner.get_or_null(uniform.get_id(j));
  4810. ERR_FAIL_COND_V_MSG(!sampler_desc, RID(), "SamplerTexture (binding: " + itos(uniform.binding) + ", index " + itos(j) + ") is not a valid sampler.");
  4811. RID rid = uniform.get_id(j + 1);
  4812. Texture *texture = texture_owner.get_or_null(rid);
  4813. ERR_FAIL_COND_V_MSG(!texture, RID(), "SamplerTexture (binding: " + itos(uniform.binding) + ", index " + itos(j) + ") is not a valid texture.");
  4814. ERR_FAIL_COND_V_MSG(!(texture->usage_flags & TEXTURE_USAGE_SAMPLING_BIT), RID(),
  4815. "Texture (binding: " + itos(uniform.binding) + ", index " + itos(j) + ") needs the TEXTURE_USAGE_SAMPLING_BIT usage flag set in order to be used as uniform.");
  4816. device->CreateSampler(sampler_desc, desc_heap_walkers.samplers.get_curr_cpu_handle());
  4817. desc_heap_walkers.samplers.advance();
  4818. device->CreateShaderResourceView(texture->resource, &texture->srv_desc, desc_heap_walkers.resources.get_curr_cpu_handle());
  4819. #ifdef DEV_ENABLED
  4820. resources_desc_info.push_back({ D3D12_DESCRIPTOR_RANGE_TYPE_SRV, texture->srv_desc.ViewDimension });
  4821. #endif
  4822. desc_heap_walkers.resources.advance();
  4823. RIDState &rs = resource_states[texture];
  4824. rs.shader_uniform_idx_mask |= ((uint64_t)1 << i);
  4825. rs.state.extend(D3D12_RESOURCE_STATE_ALL_SHADER_RESOURCE);
  4826. if (texture->usage_flags & (TEXTURE_USAGE_COLOR_ATTACHMENT_BIT | TEXTURE_USAGE_DEPTH_STENCIL_ATTACHMENT_BIT | TEXTURE_USAGE_INPUT_ATTACHMENT_BIT)) {
  4827. UniformSet::AttachableTexture attachable_texture;
  4828. attachable_texture.bind = set_uniform.info.binding;
  4829. attachable_texture.texture = texture->owner.is_valid() ? texture->owner : uniform.get_id(j + 1);
  4830. attachable_textures.push_back(attachable_texture);
  4831. }
  4832. DEV_ASSERT(!texture->owner.is_valid() || texture_owner.get_or_null(texture->owner));
  4833. }
  4834. } break;
  4835. case UNIFORM_TYPE_TEXTURE: {
  4836. if (uniform.get_id_count() != (uint32_t)set_uniform.info.length) {
  4837. if (set_uniform.info.length > 1) {
  4838. ERR_FAIL_V_MSG(RID(), "Texture (binding: " + itos(uniform.binding) + ") is an array of (" + itos(set_uniform.info.length) + ") textures, so it should be provided equal number of texture IDs to satisfy it (IDs provided: " + itos(uniform.get_id_count()) + ").");
  4839. } else {
  4840. ERR_FAIL_V_MSG(RID(), "Texture (binding: " + itos(uniform.binding) + ") should provide one ID referencing a texture (IDs provided: " + itos(uniform.get_id_count()) + ").");
  4841. }
  4842. }
  4843. for (uint32_t j = 0; j < uniform.get_id_count(); j++) {
  4844. RID rid = uniform.get_id(j);
  4845. Texture *texture = texture_owner.get_or_null(rid);
  4846. ERR_FAIL_COND_V_MSG(!texture, RID(), "Texture (binding: " + itos(uniform.binding) + ", index " + itos(j) + ") is not a valid texture.");
  4847. ERR_FAIL_COND_V_MSG(!(texture->usage_flags & TEXTURE_USAGE_SAMPLING_BIT), RID(),
  4848. "Texture (binding: " + itos(uniform.binding) + ", index " + itos(j) + ") needs the TEXTURE_USAGE_SAMPLING_BIT usage flag set in order to be used as uniform.");
  4849. device->CreateShaderResourceView(texture->resource, &texture->srv_desc, desc_heap_walkers.resources.get_curr_cpu_handle());
  4850. #ifdef DEV_ENABLED
  4851. resources_desc_info.push_back({ D3D12_DESCRIPTOR_RANGE_TYPE_SRV, texture->srv_desc.ViewDimension });
  4852. #endif
  4853. desc_heap_walkers.resources.advance();
  4854. RIDState &rs = resource_states[texture];
  4855. rs.shader_uniform_idx_mask |= ((uint64_t)1 << i);
  4856. rs.state.extend(D3D12_RESOURCE_STATE_ALL_SHADER_RESOURCE);
  4857. if ((texture->usage_flags & (TEXTURE_USAGE_COLOR_ATTACHMENT_BIT | TEXTURE_USAGE_DEPTH_STENCIL_ATTACHMENT_BIT | TEXTURE_USAGE_INPUT_ATTACHMENT_BIT))) {
  4858. UniformSet::AttachableTexture attachable_texture;
  4859. attachable_texture.bind = set_uniform.info.binding;
  4860. attachable_texture.texture = texture->owner.is_valid() ? texture->owner : uniform.get_id(j);
  4861. attachable_textures.push_back(attachable_texture);
  4862. }
  4863. DEV_ASSERT(!texture->owner.is_valid() || texture_owner.get_or_null(texture->owner));
  4864. }
  4865. } break;
  4866. case UNIFORM_TYPE_IMAGE: {
  4867. if (uniform.get_id_count() != (uint32_t)set_uniform.info.length) {
  4868. if (set_uniform.info.length > 1) {
  4869. ERR_FAIL_V_MSG(RID(), "Image (binding: " + itos(uniform.binding) + ") is an array of (" + itos(set_uniform.info.length) + ") textures, so it should be provided equal number of texture IDs to satisfy it (IDs provided: " + itos(uniform.get_id_count()) + ").");
  4870. } else {
  4871. ERR_FAIL_V_MSG(RID(), "Image (binding: " + itos(uniform.binding) + ") should provide one ID referencing a texture (IDs provided: " + itos(uniform.get_id_count()) + ").");
  4872. }
  4873. }
  4874. for (uint32_t j = 0; j < uniform.get_id_count(); j++) {
  4875. RID rid = uniform.get_id(j);
  4876. Texture *texture = texture_owner.get_or_null(rid);
  4877. ERR_FAIL_COND_V_MSG(!texture, RID(),
  4878. "Image (binding: " + itos(uniform.binding) + ", index " + itos(j) + ") is not a valid texture.");
  4879. ERR_FAIL_COND_V_MSG(!(texture->usage_flags & TEXTURE_USAGE_STORAGE_BIT), RID(),
  4880. "Image (binding: " + itos(uniform.binding) + ", index " + itos(j) + ") needs the TEXTURE_USAGE_STORAGE_BIT usage flag set in order to be used as uniform.");
  4881. RIDState &rs = resource_states[texture];
  4882. rs.shader_uniform_idx_mask |= ((uint64_t)1 << i);
  4883. rs.state.extend(D3D12_RESOURCE_STATE_ALL_SHADER_RESOURCE | D3D12_RESOURCE_STATE_UNORDERED_ACCESS);
  4884. }
  4885. // SRVs first. [[SRV_UAV_AMBIGUITY]]
  4886. for (uint32_t j = 0; j < uniform.get_id_count(); j++) {
  4887. RID rid = uniform.get_id(j);
  4888. Texture *texture = texture_owner.get_or_null(rid);
  4889. ERR_FAIL_COND_V_MSG(!texture, RID(),
  4890. "Image (binding: " + itos(uniform.binding) + ", index " + itos(j) + ") is not a valid texture.");
  4891. ERR_FAIL_COND_V_MSG(!(texture->usage_flags & TEXTURE_USAGE_STORAGE_BIT), RID(),
  4892. "Image (binding: " + itos(uniform.binding) + ", index " + itos(j) + ") needs the TEXTURE_USAGE_STORAGE_BIT usage flag set in order to be used as uniform.");
  4893. device->CreateShaderResourceView(texture->resource, &texture->srv_desc, desc_heap_walkers.resources.get_curr_cpu_handle());
  4894. #ifdef DEV_ENABLED
  4895. resources_desc_info.push_back({ D3D12_DESCRIPTOR_RANGE_TYPE_SRV, texture->srv_desc.ViewDimension });
  4896. #endif
  4897. desc_heap_walkers.resources.advance();
  4898. DEV_ASSERT(!texture->owner.is_valid() || texture_owner.get_or_null(texture->owner));
  4899. }
  4900. // UAVs then. [[SRV_UAV_AMBIGUITY]]
  4901. for (uint32_t j = 0; j < uniform.get_id_count(); j++) {
  4902. RID rid = uniform.get_id(j);
  4903. Texture *texture = texture_owner.get_or_null(rid);
  4904. device->CreateUnorderedAccessView(texture->resource, nullptr, &texture->uav_desc, desc_heap_walkers.resources.get_curr_cpu_handle());
  4905. #ifdef DEV_ENABLED
  4906. resources_desc_info.push_back({ D3D12_DESCRIPTOR_RANGE_TYPE_UAV, {} });
  4907. #endif
  4908. desc_heap_walkers.resources.advance();
  4909. }
  4910. } break;
  4911. case UNIFORM_TYPE_TEXTURE_BUFFER: {
  4912. if (uniform.get_id_count() != (uint32_t)set_uniform.info.length) {
  4913. if (set_uniform.info.length > 1) {
  4914. ERR_FAIL_V_MSG(RID(), "Buffer (binding: " + itos(uniform.binding) + ") is an array of (" + itos(set_uniform.info.length) + ") texture buffer elements, so it should be provided equal number of texture buffer IDs to satisfy it (IDs provided: " + itos(uniform.get_id_count()) + ").");
  4915. } else {
  4916. ERR_FAIL_V_MSG(RID(), "Buffer (binding: " + itos(uniform.binding) + ") should provide one ID referencing a texture buffer (IDs provided: " + itos(uniform.get_id_count()) + ").");
  4917. }
  4918. }
  4919. for (uint32_t j = 0; j < uniform.get_id_count(); j++) {
  4920. TextureBuffer *buffer = texture_buffer_owner.get_or_null(uniform.get_id(j));
  4921. ERR_FAIL_COND_V_MSG(!buffer, RID(), "Texture Buffer (binding: " + itos(uniform.binding) + ", index " + itos(j) + ") is not a valid texture buffer.");
  4922. CRASH_NOW_MSG("Unimplemented!");
  4923. }
  4924. } break;
  4925. case UNIFORM_TYPE_SAMPLER_WITH_TEXTURE_BUFFER: {
  4926. CRASH_NOW();
  4927. if (uniform.get_id_count() != (uint32_t)set_uniform.info.length * 2) {
  4928. if (set_uniform.info.length > 1) {
  4929. ERR_FAIL_V_MSG(RID(), "SamplerBuffer (binding: " + itos(uniform.binding) + ") is an array of (" + itos(set_uniform.info.length) + ") sampler buffer elements, so it should provided twice the amount of IDs (sampler,buffer pairs) to satisfy it (IDs provided: " + itos(uniform.get_id_count()) + ").");
  4930. } else {
  4931. ERR_FAIL_V_MSG(RID(), "SamplerBuffer (binding: " + itos(uniform.binding) + ") should provide two IDs referencing a sampler and then a texture buffer (IDs provided: " + itos(uniform.get_id_count()) + ").");
  4932. }
  4933. }
  4934. for (uint32_t j = 0; j < uniform.get_id_count(); j += 2) {
  4935. D3D12_SAMPLER_DESC *sampler_desc = sampler_owner.get_or_null(uniform.get_id(j));
  4936. ERR_FAIL_COND_V_MSG(!sampler_desc, RID(), "SamplerBuffer (binding: " + itos(uniform.binding) + ", index " + itos(j + 1) + ") is not a valid sampler.");
  4937. TextureBuffer *buffer = texture_buffer_owner.get_or_null(uniform.get_id(j + 1));
  4938. ERR_FAIL_COND_V_MSG(!buffer, RID(), "SamplerBuffer (binding: " + itos(uniform.binding) + ", index " + itos(j + 1) + ") is not a valid texture buffer.");
  4939. device->CreateSampler(sampler_desc, desc_heap_walkers.samplers.get_curr_cpu_handle());
  4940. desc_heap_walkers.samplers.advance();
  4941. CRASH_NOW_MSG("Unimplemented!");
  4942. }
  4943. } break;
  4944. case UNIFORM_TYPE_IMAGE_BUFFER: {
  4945. // Todo.
  4946. } break;
  4947. case UNIFORM_TYPE_UNIFORM_BUFFER: {
  4948. ERR_FAIL_COND_V_MSG(uniform.get_id_count() != 1, RID(),
  4949. "Uniform buffer supplied (binding: " + itos(uniform.binding) + ") must provide one ID (" + itos(uniform.get_id_count()) + " provided).");
  4950. RID rid = uniform.get_id(0);
  4951. Buffer *buffer = uniform_buffer_owner.get_or_null(rid);
  4952. ERR_FAIL_COND_V_MSG(!buffer, RID(), "Uniform buffer supplied (binding: " + itos(uniform.binding) + ") is invalid.");
  4953. ERR_FAIL_COND_V_MSG(buffer->size < (uint32_t)set_uniform.info.length, RID(),
  4954. "Uniform buffer supplied (binding: " + itos(uniform.binding) + ") size (" + itos(buffer->size) + " is smaller than size of shader uniform: (" + itos(set_uniform.info.length) + ").");
  4955. D3D12_CONSTANT_BUFFER_VIEW_DESC cbv_desc = {};
  4956. cbv_desc.BufferLocation = buffer->resource->GetGPUVirtualAddress();
  4957. cbv_desc.SizeInBytes = ALIGN(buffer->size, 256);
  4958. device->CreateConstantBufferView(&cbv_desc, desc_heap_walkers.resources.get_curr_cpu_handle());
  4959. desc_heap_walkers.resources.advance();
  4960. #ifdef DEV_ENABLED
  4961. resources_desc_info.push_back({ D3D12_DESCRIPTOR_RANGE_TYPE_CBV, {} });
  4962. #endif
  4963. RIDState &rs = resource_states[buffer];
  4964. rs.is_buffer = true;
  4965. rs.shader_uniform_idx_mask |= ((uint64_t)1 << i);
  4966. rs.state.extend(D3D12_RESOURCE_STATE_VERTEX_AND_CONSTANT_BUFFER);
  4967. } break;
  4968. case UNIFORM_TYPE_STORAGE_BUFFER: {
  4969. ERR_FAIL_COND_V_MSG(uniform.get_id_count() != 1, RID(),
  4970. "Storage buffer supplied (binding: " + itos(uniform.binding) + ") must provide one ID (" + itos(uniform.get_id_count()) + " provided).");
  4971. RID rid = uniform.get_id(0);
  4972. Buffer *buffer = nullptr;
  4973. if (storage_buffer_owner.owns(rid)) {
  4974. buffer = storage_buffer_owner.get_or_null(rid);
  4975. } else if (vertex_buffer_owner.owns(rid)) {
  4976. buffer = vertex_buffer_owner.get_or_null(rid);
  4977. // Due to [[SRV_UAV_AMBIGUITY]] we can't make this check because it wouldn't make sense in the case of an SRV (r/o storage buffer).
  4978. //ERR_FAIL_COND_V_MSG(!(buffer->usage & D3D12_RESOURCE_STATE_UNORDERED_ACCESS), RID(), "Vertex buffer supplied (binding: " + itos(uniform.binding) + ") was not created with storage flag.");
  4979. }
  4980. ERR_FAIL_COND_V_MSG(!buffer, RID(), "Storage buffer supplied (binding: " + itos(uniform.binding) + ") is invalid.");
  4981. // If 0, then it's sized at link time.
  4982. ERR_FAIL_COND_V_MSG(set_uniform.info.length > 0 && buffer->size != (uint32_t)set_uniform.info.length, RID(),
  4983. "Storage buffer supplied (binding: " + itos(uniform.binding) + ") size (" + itos(buffer->size) + " does not match size of shader uniform: (" + itos(set_uniform.info.length) + ").");
  4984. RIDState &rs = resource_states[buffer];
  4985. rs.shader_uniform_idx_mask |= ((uint64_t)1 << i);
  4986. rs.is_buffer = true;
  4987. rs.state.extend(D3D12_RESOURCE_STATE_ALL_SHADER_RESOURCE | D3D12_RESOURCE_STATE_UNORDERED_ACCESS);
  4988. // SRV first. [[SRV_UAV_AMBIGUITY]]
  4989. {
  4990. D3D12_SHADER_RESOURCE_VIEW_DESC srv_desc = {};
  4991. srv_desc.Format = DXGI_FORMAT_R32_TYPELESS;
  4992. srv_desc.ViewDimension = D3D12_SRV_DIMENSION_BUFFER;
  4993. srv_desc.Shader4ComponentMapping = D3D12_DEFAULT_SHADER_4_COMPONENT_MAPPING;
  4994. srv_desc.Buffer.FirstElement = 0;
  4995. srv_desc.Buffer.NumElements = (buffer->size + 3) / 4;
  4996. srv_desc.Buffer.StructureByteStride = 0;
  4997. srv_desc.Buffer.Flags = D3D12_BUFFER_SRV_FLAG_RAW;
  4998. device->CreateShaderResourceView(buffer->resource, &srv_desc, desc_heap_walkers.resources.get_curr_cpu_handle());
  4999. #ifdef DEV_ENABLED
  5000. resources_desc_info.push_back({ D3D12_DESCRIPTOR_RANGE_TYPE_SRV, srv_desc.ViewDimension });
  5001. #endif
  5002. desc_heap_walkers.resources.advance();
  5003. }
  5004. // UAV then. [[SRV_UAV_AMBIGUITY]]
  5005. {
  5006. if ((buffer->usage & D3D12_RESOURCE_STATE_UNORDERED_ACCESS)) {
  5007. D3D12_UNORDERED_ACCESS_VIEW_DESC uav_desc = {};
  5008. uav_desc.Format = DXGI_FORMAT_R32_TYPELESS;
  5009. uav_desc.ViewDimension = D3D12_UAV_DIMENSION_BUFFER;
  5010. uav_desc.Buffer.FirstElement = 0;
  5011. uav_desc.Buffer.NumElements = (buffer->size + 3) / 4;
  5012. uav_desc.Buffer.StructureByteStride = 0;
  5013. uav_desc.Buffer.CounterOffsetInBytes = 0;
  5014. uav_desc.Buffer.Flags = D3D12_BUFFER_UAV_FLAG_RAW;
  5015. device->CreateUnorderedAccessView(buffer->resource, nullptr, &uav_desc, desc_heap_walkers.resources.get_curr_cpu_handle());
  5016. #ifdef DEV_ENABLED
  5017. resources_desc_info.push_back({ D3D12_DESCRIPTOR_RANGE_TYPE_UAV, {} });
  5018. #endif
  5019. } else {
  5020. // If can't transition to UAV, leave this one empty since it won't be
  5021. // used, and trying to create an UAV view would trigger a validation error.
  5022. }
  5023. desc_heap_walkers.resources.advance();
  5024. }
  5025. } break;
  5026. case UNIFORM_TYPE_INPUT_ATTACHMENT: {
  5027. ERR_FAIL_COND_V_MSG(shader->is_compute, RID(), "InputAttachment (binding: " + itos(uniform.binding) + ") supplied for compute shader (this is not allowed).");
  5028. if (uniform.get_id_count() != (uint32_t)set_uniform.info.length) {
  5029. if (set_uniform.info.length > 1) {
  5030. ERR_FAIL_V_MSG(RID(), "InputAttachment (binding: " + itos(uniform.binding) + ") is an array of (" + itos(set_uniform.info.length) + ") textures, so it should be provided equal number of texture IDs to satisfy it (IDs provided: " + itos(uniform.get_id_count()) + ").");
  5031. } else {
  5032. ERR_FAIL_V_MSG(RID(), "InputAttachment (binding: " + itos(uniform.binding) + ") should provide one ID referencing a texture (IDs provided: " + itos(uniform.get_id_count()) + ").");
  5033. }
  5034. }
  5035. for (uint32_t j = 0; j < uniform.get_id_count(); j++) {
  5036. RID rid = uniform.get_id(j);
  5037. Texture *texture = texture_owner.get_or_null(rid);
  5038. ERR_FAIL_COND_V_MSG(!texture, RID(),
  5039. "InputAttachment (binding: " + itos(uniform.binding) + ", index " + itos(j) + ") is not a valid texture.");
  5040. ERR_FAIL_COND_V_MSG(!(texture->usage_flags & TEXTURE_USAGE_SAMPLING_BIT), RID(),
  5041. "InputAttachment (binding: " + itos(uniform.binding) + ", index " + itos(j) + ") needs the TEXTURE_USAGE_SAMPLING_BIT usage flag set in order to be used as uniform.");
  5042. device->CreateShaderResourceView(texture->resource, &texture->srv_desc, desc_heap_walkers.resources.get_curr_cpu_handle());
  5043. #ifdef DEV_ENABLED
  5044. resources_desc_info.push_back({ D3D12_DESCRIPTOR_RANGE_TYPE_SRV, texture->srv_desc.ViewDimension });
  5045. #endif
  5046. desc_heap_walkers.resources.advance();
  5047. RIDState &rs = resource_states[texture];
  5048. rs.shader_uniform_idx_mask |= ((uint64_t)1 << i);
  5049. rs.state.extend(D3D12_RESOURCE_STATE_PIXEL_SHADER_RESOURCE);
  5050. DEV_ASSERT(!texture->owner.is_valid() || texture_owner.get_or_null(texture->owner));
  5051. }
  5052. } break;
  5053. default: {
  5054. }
  5055. }
  5056. }
  5057. DEV_ASSERT(desc_heap_walkers.resources.is_at_eof());
  5058. DEV_ASSERT(desc_heap_walkers.samplers.is_at_eof());
  5059. UniformSet uniform_set;
  5060. uniform_set.desc_heaps.resources = desc_heaps.resources;
  5061. uniform_set.desc_heaps.samplers = desc_heaps.samplers;
  5062. uniform_set.format = shader->set_formats[p_shader_set];
  5063. uniform_set.attachable_textures = attachable_textures;
  5064. uniform_set.shader_set = p_shader_set;
  5065. uniform_set.shader_id = p_shader;
  5066. #ifdef DEV_ENABLED
  5067. uniform_set._resources_desc_info = resources_desc_info;
  5068. uniform_set._shader = shader;
  5069. #endif
  5070. {
  5071. uniform_set.resource_states.resize(resource_states.size());
  5072. uint32_t i = 0;
  5073. for (const KeyValue<Resource *, RIDState> &E : resource_states) {
  5074. UniformSet::StateRequirement sr;
  5075. sr.resource = E.key;
  5076. sr.is_buffer = E.value.is_buffer;
  5077. sr.states = E.value.state.get_state_mask();
  5078. sr.shader_uniform_idx_mask = E.value.shader_uniform_idx_mask;
  5079. uniform_set.resource_states.write[i] = sr;
  5080. i++;
  5081. }
  5082. }
  5083. RID id = uniform_set_owner.make_rid(uniform_set);
  5084. // Add dependencies.
  5085. _add_dependency(id, p_shader);
  5086. for (uint32_t i = 0; i < uniform_count; i++) {
  5087. const Uniform &uniform = uniforms[i];
  5088. int id_count = uniform.get_id_count();
  5089. for (int j = 0; j < id_count; j++) {
  5090. _add_dependency(id, uniform.get_id(j));
  5091. }
  5092. }
  5093. return id;
  5094. }
  5095. bool RenderingDeviceD3D12::uniform_set_is_valid(RID p_uniform_set) {
  5096. return uniform_set_owner.owns(p_uniform_set);
  5097. }
  5098. void RenderingDeviceD3D12::uniform_set_set_invalidation_callback(RID p_uniform_set, InvalidationCallback p_callback, void *p_userdata) {
  5099. UniformSet *us = uniform_set_owner.get_or_null(p_uniform_set);
  5100. ERR_FAIL_NULL(us);
  5101. us->invalidated_callback = p_callback;
  5102. us->invalidated_callback_userdata = p_userdata;
  5103. }
  5104. Error RenderingDeviceD3D12::buffer_copy(RID p_src_buffer, RID p_dst_buffer, uint32_t p_src_offset, uint32_t p_dst_offset, uint32_t p_size, BitField<BarrierMask> p_post_barrier) {
  5105. _THREAD_SAFE_METHOD_
  5106. ERR_FAIL_COND_V_MSG(draw_list, ERR_INVALID_PARAMETER,
  5107. "Copying buffers is forbidden during creation of a draw list");
  5108. ERR_FAIL_COND_V_MSG(compute_list, ERR_INVALID_PARAMETER,
  5109. "Copying buffers is forbidden during creation of a compute list");
  5110. Buffer *src_buffer = _get_buffer_from_owner(p_src_buffer);
  5111. if (!src_buffer) {
  5112. ERR_FAIL_V_MSG(ERR_INVALID_PARAMETER, "Source buffer argument is not a valid buffer of any type.");
  5113. }
  5114. Buffer *dst_buffer = _get_buffer_from_owner(p_dst_buffer);
  5115. if (!dst_buffer) {
  5116. ERR_FAIL_V_MSG(ERR_INVALID_PARAMETER, "Destination buffer argument is not a valid buffer of any type.");
  5117. }
  5118. // Validate the copy's dimensions for both buffers.
  5119. ERR_FAIL_COND_V_MSG((p_size + p_src_offset) > src_buffer->size, ERR_INVALID_PARAMETER, "Size is larger than the source buffer.");
  5120. ERR_FAIL_COND_V_MSG((p_size + p_dst_offset) > dst_buffer->size, ERR_INVALID_PARAMETER, "Size is larger than the destination buffer.");
  5121. // Perform the copy.
  5122. ID3D12GraphicsCommandList *command_list = frames[frame].draw_command_list.Get();
  5123. _resource_transition_batch(src_buffer, 0, 1, D3D12_RESOURCE_STATE_COPY_SOURCE);
  5124. _resource_transition_batch(src_buffer, 0, 1, D3D12_RESOURCE_STATE_COPY_DEST);
  5125. _resource_transitions_flush(command_list);
  5126. command_list->CopyBufferRegion(dst_buffer->resource, p_dst_offset, src_buffer->resource, p_src_offset, p_size);
  5127. return OK;
  5128. }
  5129. Error RenderingDeviceD3D12::buffer_update(RID p_buffer, uint32_t p_offset, uint32_t p_size, const void *p_data, BitField<BarrierMask> p_post_barrier) {
  5130. _THREAD_SAFE_METHOD_
  5131. ERR_FAIL_COND_V_MSG(draw_list, ERR_INVALID_PARAMETER,
  5132. "Updating buffers is forbidden during creation of a draw list");
  5133. ERR_FAIL_COND_V_MSG(compute_list, ERR_INVALID_PARAMETER,
  5134. "Updating buffers is forbidden during creation of a compute list");
  5135. Buffer *buffer = _get_buffer_from_owner(p_buffer);
  5136. if (!buffer) {
  5137. ERR_FAIL_V_MSG(ERR_INVALID_PARAMETER, "Buffer argument is not a valid buffer of any type.");
  5138. }
  5139. ERR_FAIL_COND_V_MSG(p_offset + p_size > buffer->size, ERR_INVALID_PARAMETER,
  5140. "Attempted to write buffer (" + itos((p_offset + p_size) - buffer->size) + " bytes) past the end.");
  5141. ID3D12GraphicsCommandList *command_list = frames[frame].draw_command_list.Get();
  5142. _resource_transition_batch(buffer, 0, 1, D3D12_RESOURCE_STATE_COPY_DEST);
  5143. _resource_transitions_flush(command_list);
  5144. Error err = _buffer_update(buffer, p_offset, (uint8_t *)p_data, p_size, p_post_barrier);
  5145. if (err) {
  5146. return err;
  5147. }
  5148. return OK;
  5149. }
  5150. Error RenderingDeviceD3D12::buffer_clear(RID p_buffer, uint32_t p_offset, uint32_t p_size, BitField<BarrierMask> p_post_barrier) {
  5151. _THREAD_SAFE_METHOD_
  5152. ERR_FAIL_COND_V_MSG((p_size % 4) != 0, ERR_INVALID_PARAMETER,
  5153. "Size must be a multiple of four");
  5154. ERR_FAIL_COND_V_MSG(draw_list, ERR_INVALID_PARAMETER,
  5155. "Updating buffers in is forbidden during creation of a draw list");
  5156. ERR_FAIL_COND_V_MSG(compute_list, ERR_INVALID_PARAMETER,
  5157. "Updating buffers is forbidden during creation of a compute list");
  5158. Buffer *buffer = _get_buffer_from_owner(p_buffer);
  5159. if (!buffer) {
  5160. ERR_FAIL_V_MSG(ERR_INVALID_PARAMETER, "Buffer argument is not a valid buffer of any type.");
  5161. }
  5162. ERR_FAIL_COND_V_MSG(p_offset + p_size > buffer->size, ERR_INVALID_PARAMETER,
  5163. "Attempted to write buffer (" + itos((p_offset + p_size) - buffer->size) + " bytes) past the end.");
  5164. if (frames[frame].desc_heap_walkers.resources.is_at_eof()) {
  5165. if (!frames[frame].desc_heaps_exhausted_reported.resources) {
  5166. frames[frame].desc_heaps_exhausted_reported.resources = true;
  5167. ERR_FAIL_V_MSG(ERR_BUSY,
  5168. "Cannot clear buffer because there's no enough room in current frame's RESOURCE descriptors heap.\n"
  5169. "Please increase the value of the rendering/rendering_device/d3d12/max_resource_descriptors_per_frame project setting.");
  5170. } else {
  5171. return ERR_BUSY;
  5172. }
  5173. }
  5174. if (frames[frame].desc_heap_walkers.aux.is_at_eof()) {
  5175. if (!frames[frame].desc_heaps_exhausted_reported.aux) {
  5176. frames[frame].desc_heaps_exhausted_reported.aux = true;
  5177. ERR_FAIL_V_MSG(ERR_BUSY,
  5178. "Cannot clear buffer because there's no enough room in current frame's AUX descriptors heap.\n"
  5179. "Please increase the value of the rendering/rendering_device/d3d12/max_misc_descriptors_per_frame project setting.");
  5180. } else {
  5181. return ERR_BUSY;
  5182. }
  5183. }
  5184. ID3D12GraphicsCommandList *command_list = frames[frame].draw_command_list.Get();
  5185. _resource_transition_batch(buffer, 0, 1, D3D12_RESOURCE_STATE_UNORDERED_ACCESS);
  5186. _resource_transitions_flush(command_list);
  5187. D3D12_UNORDERED_ACCESS_VIEW_DESC uav_desc = {};
  5188. uav_desc.Format = DXGI_FORMAT_R32_TYPELESS;
  5189. uav_desc.ViewDimension = D3D12_UAV_DIMENSION_BUFFER;
  5190. uav_desc.Buffer.FirstElement = 0;
  5191. uav_desc.Buffer.NumElements = (buffer->size + 3) / 4;
  5192. uav_desc.Buffer.StructureByteStride = 0;
  5193. uav_desc.Buffer.CounterOffsetInBytes = 0;
  5194. uav_desc.Buffer.Flags = D3D12_BUFFER_UAV_FLAG_RAW;
  5195. device->CreateUnorderedAccessView(
  5196. buffer->resource,
  5197. nullptr,
  5198. &uav_desc,
  5199. frames[frame].desc_heap_walkers.aux.get_curr_cpu_handle());
  5200. device->CopyDescriptorsSimple(
  5201. 1,
  5202. frames[frame].desc_heap_walkers.resources.get_curr_cpu_handle(),
  5203. frames[frame].desc_heap_walkers.aux.get_curr_cpu_handle(),
  5204. D3D12_DESCRIPTOR_HEAP_TYPE_CBV_SRV_UAV);
  5205. static const UINT values[4] = {};
  5206. command_list->ClearUnorderedAccessViewUint(
  5207. frames[frame].desc_heap_walkers.resources.get_curr_gpu_handle(),
  5208. frames[frame].desc_heap_walkers.aux.get_curr_cpu_handle(),
  5209. buffer->resource,
  5210. values,
  5211. 0,
  5212. nullptr);
  5213. frames[frame].desc_heap_walkers.resources.advance();
  5214. frames[frame].desc_heap_walkers.aux.advance();
  5215. return OK;
  5216. }
  5217. Vector<uint8_t> RenderingDeviceD3D12::buffer_get_data(RID p_buffer, uint32_t p_offset, uint32_t p_size) {
  5218. _THREAD_SAFE_METHOD_
  5219. // Get the vulkan buffer and the potential stage/access possible.
  5220. Buffer *buffer = _get_buffer_from_owner(p_buffer);
  5221. if (!buffer) {
  5222. ERR_FAIL_V_MSG(Vector<uint8_t>(), "Buffer is either invalid or this type of buffer can't be retrieved. Only Index and Vertex buffers allow retrieving.");
  5223. }
  5224. ID3D12GraphicsCommandList *command_list = frames[frame].draw_command_list.Get();
  5225. // Size of buffer to retrieve.
  5226. if (!p_size) {
  5227. p_size = buffer->size;
  5228. } else {
  5229. ERR_FAIL_COND_V_MSG(p_size + p_offset > buffer->size, Vector<uint8_t>(),
  5230. "Size is larger than the buffer.");
  5231. }
  5232. _resource_transition_batch(buffer, 0, 1, D3D12_RESOURCE_STATE_COPY_SOURCE);
  5233. _resource_transitions_flush(command_list);
  5234. Buffer tmp_buffer;
  5235. Error err = _buffer_allocate(&tmp_buffer, p_size, D3D12_RESOURCE_STATE_COPY_DEST, D3D12_HEAP_TYPE_READBACK);
  5236. ERR_FAIL_COND_V(err != OK, Vector<uint8_t>());
  5237. command_list->CopyBufferRegion(tmp_buffer.resource, 0, buffer->resource, p_offset, p_size);
  5238. // Flush everything so memory can be safely mapped.
  5239. _flush(true);
  5240. void *buffer_mem;
  5241. HRESULT res = tmp_buffer.resource->Map(0, &VOID_RANGE, &buffer_mem);
  5242. ERR_FAIL_COND_V_MSG(res, Vector<uint8_t>(), "Map failed with error " + vformat("0x%08ux", res) + ".");
  5243. Vector<uint8_t> buffer_data;
  5244. {
  5245. buffer_data.resize(buffer->size);
  5246. uint8_t *w = buffer_data.ptrw();
  5247. memcpy(w, buffer_mem, buffer->size);
  5248. }
  5249. tmp_buffer.resource->Unmap(0, &VOID_RANGE);
  5250. _buffer_free(&tmp_buffer);
  5251. return buffer_data;
  5252. }
  5253. /*******************/
  5254. /**** PIPELINES ****/
  5255. /*******************/
  5256. Error RenderingDeviceD3D12::_apply_specialization_constants(
  5257. const Shader *p_shader,
  5258. const Vector<PipelineSpecializationConstant> &p_specialization_constants,
  5259. HashMap<ShaderStage, Vector<uint8_t>> &r_final_stages_bytecode) {
  5260. // If something needs to be patched, COW will do the trick.
  5261. r_final_stages_bytecode = p_shader->stages_bytecode;
  5262. uint32_t stages_re_sign_mask = 0;
  5263. for (const PipelineSpecializationConstant &psc : p_specialization_constants) {
  5264. if (!(p_shader->spirv_specialization_constants_ids_mask & (1 << psc.constant_id))) {
  5265. // This SC wasn't even in the original SPIR-V shader.
  5266. continue;
  5267. }
  5268. for (const Shader::SpecializationConstant &sc : p_shader->specialization_constants) {
  5269. if (psc.constant_id == sc.constant.constant_id) {
  5270. ERR_FAIL_COND_V_MSG(psc.type != sc.constant.type, ERR_INVALID_PARAMETER, "Specialization constant provided for id (" + itos(sc.constant.constant_id) + ") is of the wrong type.");
  5271. if (psc.int_value != sc.constant.int_value) {
  5272. stages_re_sign_mask |= _shader_patch_dxil_specialization_constant(psc.type, &psc.int_value, sc.stages_bit_offsets, r_final_stages_bytecode, false);
  5273. }
  5274. break;
  5275. }
  5276. }
  5277. }
  5278. // Re-sign patched stages.
  5279. for (KeyValue<ShaderStage, Vector<uint8_t>> &E : r_final_stages_bytecode) {
  5280. ShaderStage stage = E.key;
  5281. if ((stages_re_sign_mask & (1 << stage))) {
  5282. Vector<uint8_t> &bytecode = E.value;
  5283. bool sign_ok = _shader_sign_dxil_bytecode(stage, bytecode);
  5284. ERR_FAIL_COND_V(!sign_ok, ERR_QUERY_FAILED);
  5285. }
  5286. }
  5287. return OK;
  5288. }
  5289. #ifdef DEV_ENABLED
  5290. String RenderingDeviceD3D12::_build_pipeline_blob_filename(
  5291. const Vector<uint8_t> &p_blob,
  5292. const Shader *p_shader,
  5293. const Vector<PipelineSpecializationConstant> &p_specialization_constants,
  5294. const String &p_extra_name_suffix,
  5295. const String &p_forced_id) {
  5296. String id;
  5297. if (p_forced_id == "") {
  5298. HashingContext hc;
  5299. hc.start(HashingContext::HASH_MD5);
  5300. hc.update(p_blob);
  5301. Vector<uint8_t> hash_bin = hc.finish();
  5302. String hash_str = String::hex_encode_buffer(hash_bin.ptr(), hash_bin.size());
  5303. } else {
  5304. id = p_forced_id;
  5305. }
  5306. Vector<String> sc_str_pieces;
  5307. for (const Shader::SpecializationConstant &sc : p_shader->specialization_constants) {
  5308. uint32_t int_value = sc.constant.int_value;
  5309. for (const PipelineSpecializationConstant &psc : p_specialization_constants) {
  5310. if (psc.constant_id == sc.constant.constant_id) {
  5311. int_value = psc.int_value;
  5312. break;
  5313. }
  5314. }
  5315. sc_str_pieces.push_back(itos(sc.constant.constant_id) + "=" + itos(int_value));
  5316. }
  5317. String res = p_shader->name.replace(":", "-");
  5318. res += "." + id;
  5319. res += "." + String("_").join(sc_str_pieces);
  5320. if (p_extra_name_suffix != "") {
  5321. res += "." + p_extra_name_suffix;
  5322. }
  5323. return res;
  5324. }
  5325. void RenderingDeviceD3D12::_save_pso_blob(
  5326. ID3D12PipelineState *p_pso,
  5327. const Shader *p_shader,
  5328. const Vector<PipelineSpecializationConstant> &p_specialization_constants) {
  5329. ComPtr<ID3DBlob> pso_blob;
  5330. p_pso->GetCachedBlob(pso_blob.GetAddressOf());
  5331. Vector<uint8_t> pso_vector;
  5332. pso_vector.resize(pso_blob->GetBufferSize());
  5333. memcpy(pso_vector.ptrw(), pso_blob->GetBufferPointer(), pso_blob->GetBufferSize());
  5334. String base_filename = _build_pipeline_blob_filename(pso_vector, p_shader, p_specialization_constants);
  5335. Ref<FileAccess> fa = FileAccess::open("pso." + base_filename + ".bin", FileAccess::WRITE);
  5336. fa->store_buffer((const uint8_t *)pso_blob->GetBufferPointer(), pso_blob->GetBufferSize());
  5337. }
  5338. void RenderingDeviceD3D12::_save_stages_bytecode(
  5339. const HashMap<ShaderStage, Vector<uint8_t>> &p_stages_bytecode,
  5340. const Shader *p_shader,
  5341. const RID p_shader_rid,
  5342. const Vector<PipelineSpecializationConstant> &p_specialization_constants) {
  5343. for (const KeyValue<ShaderStage, Vector<uint8_t>> &E : p_stages_bytecode) {
  5344. ShaderStage stage = E.key;
  5345. const Vector<uint8_t> &bytecode = E.value;
  5346. String base_filename = _build_pipeline_blob_filename(bytecode, p_shader, p_specialization_constants, shader_stage_names[stage], itos(p_shader_rid.get_id()));
  5347. Ref<FileAccess> fa = FileAccess::open("dxil." + base_filename + ".bin", FileAccess::WRITE);
  5348. fa->store_buffer(bytecode.ptr(), bytecode.size());
  5349. }
  5350. }
  5351. #endif
  5352. /*************************/
  5353. /**** RENDER PIPELINE ****/
  5354. /*************************/
  5355. RID RenderingDeviceD3D12::render_pipeline_create(RID p_shader, FramebufferFormatID p_framebuffer_format, VertexFormatID p_vertex_format, RenderPrimitive p_render_primitive, const PipelineRasterizationState &p_rasterization_state, const PipelineMultisampleState &p_multisample_state, const PipelineDepthStencilState &p_depth_stencil_state, const PipelineColorBlendState &p_blend_state, BitField<PipelineDynamicStateFlags> p_dynamic_state_flags, uint32_t p_for_render_pass, const Vector<PipelineSpecializationConstant> &p_specialization_constants) {
  5356. #ifdef DEV_ENABLED
  5357. //#define DEBUG_CREATE_DEBUG_PSO
  5358. //#define DEBUG_SAVE_PSO_BLOBS
  5359. //#define DEBUG_SAVE_DXIL_BLOBS
  5360. #endif
  5361. _THREAD_SAFE_METHOD_
  5362. // Needs a shader.
  5363. Shader *shader = shader_owner.get_or_null(p_shader);
  5364. ERR_FAIL_NULL_V(shader, RID());
  5365. ERR_FAIL_COND_V_MSG(shader->is_compute, RID(),
  5366. "Compute shaders can't be used in render pipelines");
  5367. if (p_framebuffer_format == INVALID_ID) {
  5368. // If nothing provided, use an empty one (no attachments).
  5369. p_framebuffer_format = framebuffer_format_create(Vector<AttachmentFormat>());
  5370. }
  5371. ERR_FAIL_COND_V(!framebuffer_formats.has(p_framebuffer_format), RID());
  5372. const FramebufferFormat &fb_format = framebuffer_formats[p_framebuffer_format];
  5373. const FramebufferPass &pass = fb_format.passes[p_for_render_pass];
  5374. { // Validate shader vs framebuffer.
  5375. ERR_FAIL_COND_V_MSG(p_for_render_pass >= uint32_t(fb_format.passes.size()), RID(), "Render pass requested for pipeline creation (" + itos(p_for_render_pass) + ") is out of bounds");
  5376. uint32_t output_mask = 0;
  5377. for (int i = 0; i < pass.color_attachments.size(); i++) {
  5378. if (pass.color_attachments[i] != FramebufferPass::ATTACHMENT_UNUSED) {
  5379. output_mask |= 1 << i;
  5380. }
  5381. }
  5382. ERR_FAIL_COND_V_MSG(shader->fragment_output_mask != output_mask, RID(),
  5383. "Mismatch fragment shader output mask (" + itos(shader->fragment_output_mask) + ") and framebuffer color output mask (" + itos(output_mask) + ") when binding both in render pipeline.");
  5384. }
  5385. CD3DX12_PIPELINE_STATE_STREAM pipeline_desc;
  5386. RenderPipeline::DynamicParams dyn_params;
  5387. // Attachment formats.
  5388. {
  5389. for (int i = 0; i < pass.color_attachments.size(); i++) {
  5390. int32_t attachment = pass.color_attachments[i];
  5391. if (attachment == FramebufferPass::ATTACHMENT_UNUSED) {
  5392. (&pipeline_desc.RTVFormats)->RTFormats[i] = DXGI_FORMAT_UNKNOWN;
  5393. } else {
  5394. (&pipeline_desc.RTVFormats)->RTFormats[i] = d3d12_formats[fb_format.attachments[attachment].format].general_format;
  5395. }
  5396. }
  5397. (&pipeline_desc.RTVFormats)->NumRenderTargets = pass.color_attachments.size();
  5398. if (pass.depth_attachment == FramebufferPass::ATTACHMENT_UNUSED) {
  5399. pipeline_desc.DSVFormat = DXGI_FORMAT_UNKNOWN;
  5400. } else {
  5401. pipeline_desc.DSVFormat = d3d12_formats[fb_format.attachments[pass.depth_attachment].format].dsv_format;
  5402. }
  5403. }
  5404. // Vertex.
  5405. if (p_vertex_format != INVALID_ID) {
  5406. // Uses vertices, else it does not.
  5407. ERR_FAIL_COND_V(!vertex_formats.has(p_vertex_format), RID());
  5408. const VertexDescriptionCache &vd = vertex_formats[p_vertex_format];
  5409. (&pipeline_desc.InputLayout)->pInputElementDescs = vd.elements_desc.ptr();
  5410. (&pipeline_desc.InputLayout)->NumElements = vd.elements_desc.size();
  5411. // Validate with inputs.
  5412. for (uint32_t i = 0; i < 64; i++) {
  5413. if (!(shader->vertex_input_mask & (1ULL << i))) {
  5414. continue;
  5415. }
  5416. bool found = false;
  5417. for (int j = 0; j < vd.vertex_formats.size(); j++) {
  5418. if (vd.vertex_formats[j].location == i) {
  5419. found = true;
  5420. }
  5421. }
  5422. ERR_FAIL_COND_V_MSG(!found, RID(),
  5423. "Shader vertex input location (" + itos(i) + ") not provided in vertex input description for pipeline creation.");
  5424. }
  5425. } else {
  5426. // Does not use vertices.
  5427. ERR_FAIL_COND_V_MSG(shader->vertex_input_mask != 0, RID(),
  5428. "Shader contains vertex inputs, but no vertex input description was provided for pipeline creation.");
  5429. }
  5430. // Input assembly & tessellation.
  5431. ERR_FAIL_INDEX_V(p_render_primitive, RENDER_PRIMITIVE_MAX, RID());
  5432. static const D3D12_PRIMITIVE_TOPOLOGY_TYPE topology_types[RENDER_PRIMITIVE_MAX] = {
  5433. D3D12_PRIMITIVE_TOPOLOGY_TYPE_POINT,
  5434. D3D12_PRIMITIVE_TOPOLOGY_TYPE_LINE,
  5435. D3D12_PRIMITIVE_TOPOLOGY_TYPE_LINE,
  5436. D3D12_PRIMITIVE_TOPOLOGY_TYPE_LINE,
  5437. D3D12_PRIMITIVE_TOPOLOGY_TYPE_LINE,
  5438. D3D12_PRIMITIVE_TOPOLOGY_TYPE_TRIANGLE,
  5439. D3D12_PRIMITIVE_TOPOLOGY_TYPE_TRIANGLE,
  5440. D3D12_PRIMITIVE_TOPOLOGY_TYPE_TRIANGLE,
  5441. D3D12_PRIMITIVE_TOPOLOGY_TYPE_TRIANGLE,
  5442. D3D12_PRIMITIVE_TOPOLOGY_TYPE_TRIANGLE,
  5443. D3D12_PRIMITIVE_TOPOLOGY_TYPE_PATCH,
  5444. };
  5445. static const D3D12_PRIMITIVE_TOPOLOGY topologies[RENDER_PRIMITIVE_MAX] = {
  5446. D3D_PRIMITIVE_TOPOLOGY_POINTLIST,
  5447. D3D_PRIMITIVE_TOPOLOGY_LINELIST,
  5448. D3D_PRIMITIVE_TOPOLOGY_LINELIST_ADJ,
  5449. D3D_PRIMITIVE_TOPOLOGY_LINESTRIP,
  5450. D3D_PRIMITIVE_TOPOLOGY_LINESTRIP_ADJ,
  5451. D3D_PRIMITIVE_TOPOLOGY_TRIANGLELIST,
  5452. D3D_PRIMITIVE_TOPOLOGY_TRIANGLELIST_ADJ,
  5453. D3D_PRIMITIVE_TOPOLOGY_TRIANGLESTRIP,
  5454. D3D_PRIMITIVE_TOPOLOGY_TRIANGLESTRIP_ADJ,
  5455. D3D_PRIMITIVE_TOPOLOGY_TRIANGLESTRIP,
  5456. D3D_PRIMITIVE_TOPOLOGY_1_CONTROL_POINT_PATCHLIST,
  5457. };
  5458. pipeline_desc.PrimitiveTopologyType = topology_types[p_render_primitive];
  5459. if (p_render_primitive == RENDER_PRIMITIVE_TESSELATION_PATCH) {
  5460. ERR_FAIL_COND_V(p_rasterization_state.patch_control_points < 1 || p_rasterization_state.patch_control_points > 32, RID()); // Is there any way to get the true point count limit?
  5461. dyn_params.primitive_topology = (D3D12_PRIMITIVE_TOPOLOGY)((int)D3D_PRIMITIVE_TOPOLOGY_1_CONTROL_POINT_PATCHLIST + p_rasterization_state.patch_control_points);
  5462. } else {
  5463. dyn_params.primitive_topology = topologies[p_render_primitive];
  5464. }
  5465. if (p_render_primitive == RENDER_PRIMITIVE_TRIANGLE_STRIPS_WITH_RESTART_INDEX) {
  5466. // TODO: This is right for 16-bit indices; for 32-bit there's a different enum value to set, but we don't know at this point.
  5467. pipeline_desc.IBStripCutValue = D3D12_INDEX_BUFFER_STRIP_CUT_VALUE_0xFFFF;
  5468. } else {
  5469. pipeline_desc.IBStripCutValue = D3D12_INDEX_BUFFER_STRIP_CUT_VALUE_DISABLED;
  5470. }
  5471. // Rasterization.
  5472. (&pipeline_desc.RasterizerState)->DepthClipEnable = !p_rasterization_state.enable_depth_clamp;
  5473. // In D3D12, discard can be supported with some extra effort (empty pixel shader + disable depth/stencil test); that said, unsupported by now.
  5474. ERR_FAIL_COND_V(p_rasterization_state.discard_primitives, RID());
  5475. (&pipeline_desc.RasterizerState)->FillMode = p_rasterization_state.wireframe ? D3D12_FILL_MODE_WIREFRAME : D3D12_FILL_MODE_SOLID;
  5476. static const D3D12_CULL_MODE cull_mode[3] = {
  5477. D3D12_CULL_MODE_NONE,
  5478. D3D12_CULL_MODE_FRONT,
  5479. D3D12_CULL_MODE_BACK,
  5480. };
  5481. ERR_FAIL_INDEX_V(p_rasterization_state.cull_mode, 3, RID());
  5482. (&pipeline_desc.RasterizerState)->CullMode = cull_mode[p_rasterization_state.cull_mode];
  5483. (&pipeline_desc.RasterizerState)->FrontCounterClockwise = p_rasterization_state.front_face == POLYGON_FRONT_FACE_COUNTER_CLOCKWISE;
  5484. // In D3D12, there's still a point in setting up depth bias with no depth buffer, but just zeroing (disabling) it all in such case is closer to Vulkan.
  5485. if (p_rasterization_state.depth_bias_enabled && fb_format.passes[p_for_render_pass].depth_attachment != FramebufferPass::ATTACHMENT_UNUSED) {
  5486. (&pipeline_desc.RasterizerState)->DepthBias = p_rasterization_state.depth_bias_constant_factor;
  5487. (&pipeline_desc.RasterizerState)->DepthBiasClamp = p_rasterization_state.depth_bias_clamp;
  5488. (&pipeline_desc.RasterizerState)->SlopeScaledDepthBias = p_rasterization_state.depth_bias_slope_factor;
  5489. } else {
  5490. (&pipeline_desc.RasterizerState)->DepthBias = 0;
  5491. (&pipeline_desc.RasterizerState)->DepthBiasClamp = 0.0f;
  5492. (&pipeline_desc.RasterizerState)->SlopeScaledDepthBias = 0.0f;
  5493. }
  5494. (&pipeline_desc.RasterizerState)->ForcedSampleCount = 0;
  5495. (&pipeline_desc.RasterizerState)->ConservativeRaster = D3D12_CONSERVATIVE_RASTERIZATION_MODE_OFF;
  5496. (&pipeline_desc.RasterizerState)->MultisampleEnable = rasterization_sample_count[p_multisample_state.sample_count] != 1;
  5497. (&pipeline_desc.RasterizerState)->AntialiasedLineEnable = true;
  5498. // In D3D12, there's no line width.
  5499. ERR_FAIL_COND_V(!Math::is_equal_approx(p_rasterization_state.line_width, 1.0f), RID());
  5500. // Multisample.
  5501. ERR_FAIL_COND_V(p_multisample_state.enable_sample_shading, RID()); // How one enables this in D3D12?
  5502. if ((&pipeline_desc.RTVFormats)->NumRenderTargets || pipeline_desc.DSVFormat != DXGI_FORMAT_UNKNOWN) {
  5503. uint32_t sample_count = MIN(
  5504. fb_format.max_supported_sample_count,
  5505. rasterization_sample_count[p_multisample_state.sample_count]);
  5506. (&pipeline_desc.SampleDesc)->Count = sample_count;
  5507. } else {
  5508. (&pipeline_desc.SampleDesc)->Count = 1;
  5509. }
  5510. if ((&pipeline_desc.SampleDesc)->Count > 1) {
  5511. (&pipeline_desc.SampleDesc)->Quality = DXGI_STANDARD_MULTISAMPLE_QUALITY_PATTERN;
  5512. } else {
  5513. (&pipeline_desc.SampleDesc)->Quality = 0;
  5514. }
  5515. if (p_multisample_state.sample_mask.size()) {
  5516. // Use sample mask.
  5517. ERR_FAIL_COND_V(rasterization_sample_count[p_multisample_state.sample_count] != (uint32_t)p_multisample_state.sample_mask.size(), RID());
  5518. for (int i = 1; i < p_multisample_state.sample_mask.size(); i++) {
  5519. // In D3D12 there's a single sample mask for every pixel.
  5520. ERR_FAIL_COND_V(p_multisample_state.sample_mask[i] != p_multisample_state.sample_mask[0], RID());
  5521. }
  5522. pipeline_desc.SampleMask = p_multisample_state.sample_mask[0];
  5523. } else {
  5524. pipeline_desc.SampleMask = 0xffffffff;
  5525. }
  5526. // Depth stencil.
  5527. if (pass.depth_attachment == FramebufferPass::ATTACHMENT_UNUSED) {
  5528. (&pipeline_desc.DepthStencilState)->DepthEnable = false;
  5529. (&pipeline_desc.DepthStencilState)->StencilEnable = false;
  5530. } else {
  5531. (&pipeline_desc.DepthStencilState)->DepthEnable = p_depth_stencil_state.enable_depth_test;
  5532. (&pipeline_desc.DepthStencilState)->DepthWriteMask = p_depth_stencil_state.enable_depth_write ? D3D12_DEPTH_WRITE_MASK_ALL : D3D12_DEPTH_WRITE_MASK_ZERO;
  5533. ERR_FAIL_INDEX_V(p_depth_stencil_state.depth_compare_operator, COMPARE_OP_MAX, RID());
  5534. (&pipeline_desc.DepthStencilState)->DepthFunc = compare_operators[p_depth_stencil_state.depth_compare_operator];
  5535. (&pipeline_desc.DepthStencilState)->DepthBoundsTestEnable = p_depth_stencil_state.enable_depth_range;
  5536. (&pipeline_desc.DepthStencilState)->StencilEnable = p_depth_stencil_state.enable_stencil;
  5537. // In D3D12 some elements can't be different across front and back.
  5538. ERR_FAIL_COND_V(p_depth_stencil_state.front_op.compare_mask != p_depth_stencil_state.back_op.compare_mask, RID());
  5539. ERR_FAIL_COND_V(p_depth_stencil_state.front_op.write_mask != p_depth_stencil_state.back_op.write_mask, RID());
  5540. ERR_FAIL_COND_V(p_depth_stencil_state.front_op.reference != p_depth_stencil_state.back_op.reference, RID());
  5541. (&pipeline_desc.DepthStencilState)->StencilReadMask = p_depth_stencil_state.front_op.compare_mask;
  5542. (&pipeline_desc.DepthStencilState)->StencilWriteMask = p_depth_stencil_state.front_op.write_mask;
  5543. ERR_FAIL_INDEX_V(p_depth_stencil_state.front_op.fail, STENCIL_OP_MAX, RID());
  5544. (&pipeline_desc.DepthStencilState)->FrontFace.StencilFailOp = stencil_operations[p_depth_stencil_state.front_op.fail];
  5545. ERR_FAIL_INDEX_V(p_depth_stencil_state.front_op.pass, STENCIL_OP_MAX, RID());
  5546. (&pipeline_desc.DepthStencilState)->FrontFace.StencilPassOp = stencil_operations[p_depth_stencil_state.front_op.pass];
  5547. ERR_FAIL_INDEX_V(p_depth_stencil_state.front_op.depth_fail, STENCIL_OP_MAX, RID());
  5548. (&pipeline_desc.DepthStencilState)->FrontFace.StencilDepthFailOp = stencil_operations[p_depth_stencil_state.front_op.depth_fail];
  5549. ERR_FAIL_INDEX_V(p_depth_stencil_state.front_op.compare, COMPARE_OP_MAX, RID());
  5550. (&pipeline_desc.DepthStencilState)->FrontFace.StencilFunc = compare_operators[p_depth_stencil_state.front_op.compare];
  5551. ERR_FAIL_INDEX_V(p_depth_stencil_state.back_op.fail, STENCIL_OP_MAX, RID());
  5552. (&pipeline_desc.DepthStencilState)->BackFace.StencilFailOp = stencil_operations[p_depth_stencil_state.back_op.fail];
  5553. ERR_FAIL_INDEX_V(p_depth_stencil_state.back_op.pass, STENCIL_OP_MAX, RID());
  5554. (&pipeline_desc.DepthStencilState)->BackFace.StencilPassOp = stencil_operations[p_depth_stencil_state.back_op.pass];
  5555. ERR_FAIL_INDEX_V(p_depth_stencil_state.back_op.depth_fail, STENCIL_OP_MAX, RID());
  5556. (&pipeline_desc.DepthStencilState)->BackFace.StencilDepthFailOp = stencil_operations[p_depth_stencil_state.back_op.depth_fail];
  5557. ERR_FAIL_INDEX_V(p_depth_stencil_state.back_op.compare, COMPARE_OP_MAX, RID());
  5558. (&pipeline_desc.DepthStencilState)->BackFace.StencilFunc = compare_operators[p_depth_stencil_state.back_op.compare];
  5559. dyn_params.depth_bounds_min = p_depth_stencil_state.enable_depth_range ? p_depth_stencil_state.depth_range_min : 0.0f;
  5560. dyn_params.depth_bounds_max = p_depth_stencil_state.enable_depth_range ? p_depth_stencil_state.depth_range_max : 1.0f;
  5561. dyn_params.stencil_reference = p_depth_stencil_state.front_op.reference;
  5562. }
  5563. // Blend state.
  5564. (&pipeline_desc.BlendState)->AlphaToCoverageEnable = p_multisample_state.enable_alpha_to_coverage;
  5565. {
  5566. ERR_FAIL_COND_V(p_blend_state.attachments.size() < pass.color_attachments.size(), RID());
  5567. bool all_attachments_same_blend = true;
  5568. for (int i = 0; i < pass.color_attachments.size(); i++) {
  5569. const PipelineColorBlendState::Attachment &bs = p_blend_state.attachments[i];
  5570. D3D12_RENDER_TARGET_BLEND_DESC &bd = (&pipeline_desc.BlendState)->RenderTarget[i];
  5571. bd.BlendEnable = bs.enable_blend;
  5572. bd.LogicOpEnable = p_blend_state.enable_logic_op;
  5573. bd.LogicOp = logic_operations[p_blend_state.logic_op];
  5574. ERR_FAIL_INDEX_V(bs.src_color_blend_factor, BLEND_FACTOR_MAX, RID());
  5575. bd.SrcBlend = blend_factors[bs.src_color_blend_factor];
  5576. ERR_FAIL_INDEX_V(bs.dst_color_blend_factor, BLEND_FACTOR_MAX, RID());
  5577. bd.DestBlend = blend_factors[bs.dst_color_blend_factor];
  5578. ERR_FAIL_INDEX_V(bs.color_blend_op, BLEND_OP_MAX, RID());
  5579. bd.BlendOp = blend_operations[bs.color_blend_op];
  5580. ERR_FAIL_INDEX_V(bs.src_alpha_blend_factor, BLEND_FACTOR_MAX, RID());
  5581. bd.SrcBlendAlpha = blend_factors[bs.src_alpha_blend_factor];
  5582. ERR_FAIL_INDEX_V(bs.dst_alpha_blend_factor, BLEND_FACTOR_MAX, RID());
  5583. bd.DestBlendAlpha = blend_factors[bs.dst_alpha_blend_factor];
  5584. ERR_FAIL_INDEX_V(bs.alpha_blend_op, BLEND_OP_MAX, RID());
  5585. bd.BlendOpAlpha = blend_operations[bs.alpha_blend_op];
  5586. if (bs.write_r) {
  5587. bd.RenderTargetWriteMask |= D3D12_COLOR_WRITE_ENABLE_RED;
  5588. }
  5589. if (bs.write_g) {
  5590. bd.RenderTargetWriteMask |= D3D12_COLOR_WRITE_ENABLE_GREEN;
  5591. }
  5592. if (bs.write_b) {
  5593. bd.RenderTargetWriteMask |= D3D12_COLOR_WRITE_ENABLE_BLUE;
  5594. }
  5595. if (bs.write_a) {
  5596. bd.RenderTargetWriteMask |= D3D12_COLOR_WRITE_ENABLE_ALPHA;
  5597. }
  5598. if (i > 0 && all_attachments_same_blend) {
  5599. all_attachments_same_blend = &(&pipeline_desc.BlendState)->RenderTarget[i] == &(&pipeline_desc.BlendState)->RenderTarget[0];
  5600. }
  5601. }
  5602. // Per D3D12 docs, if logic op used, independent blending is not supported.
  5603. ERR_FAIL_COND_V(p_blend_state.enable_logic_op && !all_attachments_same_blend, RID());
  5604. (&pipeline_desc.BlendState)->IndependentBlendEnable = !all_attachments_same_blend;
  5605. }
  5606. dyn_params.blend_constant = p_blend_state.blend_constant;
  5607. // Stages bytecodes + specialization constants.
  5608. pipeline_desc.pRootSignature = shader->root_signature.Get();
  5609. #ifdef DEBUG_CREATE_DEBUG_PSO
  5610. pipeline_desc.Flags = D3D12_PIPELINE_STATE_FLAG_TOOL_DEBUG;
  5611. #endif
  5612. HashMap<ShaderStage, Vector<uint8_t>> final_stages_bytecode;
  5613. Error err = _apply_specialization_constants(shader, p_specialization_constants, final_stages_bytecode);
  5614. ERR_FAIL_COND_V(err, RID());
  5615. #ifdef DEV_ENABLED
  5616. // Ensure signing worked.
  5617. for (KeyValue<ShaderStage, Vector<uint8_t>> &E : final_stages_bytecode) {
  5618. bool any_non_zero = false;
  5619. for (int j = 0; j < 16; j++) {
  5620. if (E.value.ptr()[4 + j]) {
  5621. any_non_zero = true;
  5622. break;
  5623. }
  5624. }
  5625. DEV_ASSERT(any_non_zero);
  5626. }
  5627. #endif
  5628. if (shader->stages_bytecode.has(SHADER_STAGE_VERTEX)) {
  5629. pipeline_desc.VS = D3D12_SHADER_BYTECODE{
  5630. final_stages_bytecode[SHADER_STAGE_VERTEX].ptr(),
  5631. (SIZE_T)final_stages_bytecode[SHADER_STAGE_VERTEX].size()
  5632. };
  5633. }
  5634. if (shader->stages_bytecode.has(SHADER_STAGE_FRAGMENT)) {
  5635. pipeline_desc.PS = D3D12_SHADER_BYTECODE{
  5636. final_stages_bytecode[SHADER_STAGE_FRAGMENT].ptr(),
  5637. (SIZE_T)final_stages_bytecode[SHADER_STAGE_FRAGMENT].size()
  5638. };
  5639. }
  5640. RenderPipeline pipeline;
  5641. {
  5642. ComPtr<ID3D12Device2> device2;
  5643. device.As(&device2);
  5644. HRESULT res = {};
  5645. if (device2) {
  5646. D3D12_PIPELINE_STATE_STREAM_DESC pssd = {};
  5647. pssd.pPipelineStateSubobjectStream = &pipeline_desc;
  5648. pssd.SizeInBytes = sizeof(pipeline_desc);
  5649. res = device2->CreatePipelineState(&pssd, IID_PPV_ARGS(pipeline.pso.GetAddressOf()));
  5650. } else {
  5651. // Some features won't be available (like depth bounds).
  5652. // TODO: Check and/or report error then?
  5653. D3D12_GRAPHICS_PIPELINE_STATE_DESC desc = pipeline_desc.GraphicsDescV0();
  5654. res = device->CreateGraphicsPipelineState(&desc, IID_PPV_ARGS(pipeline.pso.GetAddressOf()));
  5655. }
  5656. ERR_FAIL_COND_V_MSG(res, RID(), "CreateGraphicsPipelineState failed with error " + vformat("0x%08ux", res) + " for shader '" + shader->name + "'.");
  5657. #ifdef DEBUG_SAVE_PSO_BLOBS
  5658. _save_pso_blob(pipeline.pso.Get(), shader, p_specialization_constants);
  5659. #endif
  5660. #ifdef DEBUG_SAVE_DXIL_BLOBS
  5661. _save_stages_bytecode(final_stages_bytecode, shader, p_shader, p_specialization_constants);
  5662. #endif
  5663. }
  5664. {
  5665. Vector<Vector<UniformBindingInfo>> bindings;
  5666. bindings.resize(shader->sets.size());
  5667. for (int i = 0; i < shader->sets.size(); i++) {
  5668. bindings.write[i].resize(shader->sets[i].uniforms.size());
  5669. for (int j = 0; j < shader->sets[i].uniforms.size(); j++) {
  5670. bindings.write[i].write[j] = shader->sets[i].uniforms[j].binding;
  5671. }
  5672. }
  5673. pipeline_bindings[next_pipeline_binding_id] = bindings;
  5674. pipeline.bindings_id = next_pipeline_binding_id;
  5675. next_pipeline_binding_id++;
  5676. }
  5677. pipeline.root_signature_crc = shader->root_signature_crc;
  5678. pipeline.set_formats = shader->set_formats;
  5679. pipeline.shader = p_shader;
  5680. pipeline.spirv_push_constant_size = shader->spirv_push_constant_size;
  5681. pipeline.dxil_push_constant_size = shader->dxil_push_constant_size;
  5682. pipeline.nir_runtime_data_root_param_idx = shader->nir_runtime_data_root_param_idx;
  5683. pipeline.dyn_params = dyn_params;
  5684. #ifdef DEBUG_ENABLED
  5685. pipeline.validation.dynamic_state = p_dynamic_state_flags;
  5686. pipeline.validation.framebuffer_format = p_framebuffer_format;
  5687. pipeline.validation.render_pass = p_for_render_pass;
  5688. pipeline.validation.vertex_format = p_vertex_format;
  5689. pipeline.validation.uses_restart_indices = pipeline_desc.IBStripCutValue != D3D12_INDEX_BUFFER_STRIP_CUT_VALUE_DISABLED;
  5690. static const uint32_t primitive_divisor[RENDER_PRIMITIVE_MAX] = {
  5691. 1, 2, 1, 1, 1, 3, 1, 1, 1, 1, 1
  5692. };
  5693. pipeline.validation.primitive_divisor = primitive_divisor[p_render_primitive];
  5694. static const uint32_t primitive_minimum[RENDER_PRIMITIVE_MAX] = {
  5695. 1,
  5696. 2,
  5697. 2,
  5698. 2,
  5699. 2,
  5700. 3,
  5701. 3,
  5702. 3,
  5703. 3,
  5704. 3,
  5705. 1,
  5706. };
  5707. pipeline.validation.primitive_minimum = primitive_minimum[p_render_primitive];
  5708. #endif
  5709. // Create ID to associate with this pipeline.
  5710. RID id = render_pipeline_owner.make_rid(pipeline);
  5711. #ifdef DEV_ENABLED
  5712. set_resource_name(id, "RID:" + itos(id.get_id()));
  5713. #endif
  5714. // Now add all the dependencies.
  5715. _add_dependency(id, p_shader);
  5716. return id;
  5717. }
  5718. bool RenderingDeviceD3D12::render_pipeline_is_valid(RID p_pipeline) {
  5719. _THREAD_SAFE_METHOD_
  5720. return render_pipeline_owner.owns(p_pipeline);
  5721. }
  5722. /**************************/
  5723. /**** COMPUTE PIPELINE ****/
  5724. /**************************/
  5725. RID RenderingDeviceD3D12::compute_pipeline_create(RID p_shader, const Vector<PipelineSpecializationConstant> &p_specialization_constants) {
  5726. #ifdef DEV_ENABLED
  5727. //#define DEBUG_CREATE_DEBUG_PSO
  5728. //#define DEBUG_SAVE_PSO_BLOBS
  5729. //#define DEBUG_SAVE_DXIL_BLOBS
  5730. #endif
  5731. _THREAD_SAFE_METHOD_
  5732. // Needs a shader.
  5733. Shader *shader = shader_owner.get_or_null(p_shader);
  5734. ERR_FAIL_NULL_V(shader, RID());
  5735. ERR_FAIL_COND_V_MSG(!shader->is_compute, RID(),
  5736. "Non-compute shaders can't be used in compute pipelines");
  5737. CD3DX12_PIPELINE_STATE_STREAM pipeline_desc = {};
  5738. // Stages bytecodes + specialization constants.
  5739. pipeline_desc.pRootSignature = shader->root_signature.Get();
  5740. #ifdef DEBUG_CREATE_DEBUG_PSO
  5741. pipeline_desc.Flags = D3D12_PIPELINE_STATE_FLAG_TOOL_DEBUG;
  5742. #endif
  5743. HashMap<ShaderStage, Vector<uint8_t>> final_stages_bytecode;
  5744. Error err = _apply_specialization_constants(shader, p_specialization_constants, final_stages_bytecode);
  5745. ERR_FAIL_COND_V(err, RID());
  5746. pipeline_desc.CS = D3D12_SHADER_BYTECODE{
  5747. final_stages_bytecode[SHADER_STAGE_COMPUTE].ptr(),
  5748. (SIZE_T)final_stages_bytecode[SHADER_STAGE_COMPUTE].size()
  5749. };
  5750. ComputePipeline pipeline;
  5751. {
  5752. ComPtr<ID3D12Device2> device2;
  5753. device.As(&device2);
  5754. HRESULT res = {};
  5755. if (device2) {
  5756. D3D12_PIPELINE_STATE_STREAM_DESC pssd = {};
  5757. pssd.pPipelineStateSubobjectStream = &pipeline_desc;
  5758. pssd.SizeInBytes = sizeof(pipeline_desc);
  5759. res = device2->CreatePipelineState(&pssd, IID_PPV_ARGS(pipeline.pso.GetAddressOf()));
  5760. } else {
  5761. D3D12_COMPUTE_PIPELINE_STATE_DESC desc = pipeline_desc.ComputeDescV0();
  5762. res = device->CreateComputePipelineState(&desc, IID_PPV_ARGS(pipeline.pso.GetAddressOf()));
  5763. }
  5764. ERR_FAIL_COND_V_MSG(res, RID(), "CreateComputePipelineState failed with error " + vformat("0x%08ux", res) + " for shader '" + shader->name + "'.");
  5765. #ifdef DEBUG_SAVE_PSO_BLOBS
  5766. _save_pso_blob(pipeline.pso.Get(), shader, p_specialization_constants);
  5767. #endif
  5768. #ifdef DEBUG_SAVE_DXIL_BLOBS
  5769. _save_stages_bytecode(final_stages_bytecode, shader, p_shader, p_specialization_constants);
  5770. #endif
  5771. }
  5772. {
  5773. Vector<Vector<UniformBindingInfo>> bindings;
  5774. bindings.resize(shader->sets.size());
  5775. for (int i = 0; i < shader->sets.size(); i++) {
  5776. bindings.write[i].resize(shader->sets[i].uniforms.size());
  5777. for (int j = 0; j < shader->sets[i].uniforms.size(); j++) {
  5778. bindings.write[i].write[j] = shader->sets[i].uniforms[j].binding;
  5779. }
  5780. }
  5781. pipeline_bindings[next_pipeline_binding_id] = bindings;
  5782. pipeline.bindings_id = next_pipeline_binding_id;
  5783. next_pipeline_binding_id++;
  5784. }
  5785. pipeline.root_signature_crc = shader->root_signature_crc;
  5786. pipeline.set_formats = shader->set_formats;
  5787. pipeline.shader = p_shader;
  5788. pipeline.spirv_push_constant_size = shader->spirv_push_constant_size;
  5789. pipeline.dxil_push_constant_size = shader->dxil_push_constant_size;
  5790. pipeline.local_group_size[0] = shader->compute_local_size[0];
  5791. pipeline.local_group_size[1] = shader->compute_local_size[1];
  5792. pipeline.local_group_size[2] = shader->compute_local_size[2];
  5793. // Create ID to associate with this pipeline.
  5794. RID id = compute_pipeline_owner.make_rid(pipeline);
  5795. #ifdef DEV_ENABLED
  5796. set_resource_name(id, "RID:" + itos(id.get_id()));
  5797. #endif
  5798. // Now add all the dependencies.
  5799. _add_dependency(id, p_shader);
  5800. return id;
  5801. }
  5802. bool RenderingDeviceD3D12::compute_pipeline_is_valid(RID p_pipeline) {
  5803. return compute_pipeline_owner.owns(p_pipeline);
  5804. }
  5805. /****************/
  5806. /**** SCREEN ****/
  5807. /****************/
  5808. int RenderingDeviceD3D12::screen_get_width(DisplayServer::WindowID p_screen) const {
  5809. _THREAD_SAFE_METHOD_
  5810. ERR_FAIL_COND_V_MSG(local_device.is_valid(), -1, "Local devices have no screen");
  5811. return context->window_get_width(p_screen);
  5812. }
  5813. int RenderingDeviceD3D12::screen_get_height(DisplayServer::WindowID p_screen) const {
  5814. _THREAD_SAFE_METHOD_
  5815. ERR_FAIL_COND_V_MSG(local_device.is_valid(), -1, "Local devices have no screen");
  5816. return context->window_get_height(p_screen);
  5817. }
  5818. RenderingDevice::FramebufferFormatID RenderingDeviceD3D12::screen_get_framebuffer_format() const {
  5819. _THREAD_SAFE_METHOD_
  5820. ERR_FAIL_COND_V_MSG(local_device.is_valid(), INVALID_ID, "Local devices have no screen");
  5821. // Very hacky, but not used often per frame so I guess ok.
  5822. DXGI_FORMAT d3d12_format = context->get_screen_format();
  5823. DataFormat format = DATA_FORMAT_MAX;
  5824. for (int i = 0; i < DATA_FORMAT_MAX; i++) {
  5825. if (d3d12_format == d3d12_formats[i].general_format) {
  5826. format = DataFormat(i);
  5827. break;
  5828. }
  5829. }
  5830. ERR_FAIL_COND_V(format == DATA_FORMAT_MAX, INVALID_ID);
  5831. AttachmentFormat attachment;
  5832. attachment.format = format;
  5833. attachment.samples = TEXTURE_SAMPLES_1;
  5834. attachment.usage_flags = TEXTURE_USAGE_COLOR_ATTACHMENT_BIT;
  5835. Vector<AttachmentFormat> screen_attachment;
  5836. screen_attachment.push_back(attachment);
  5837. return const_cast<RenderingDeviceD3D12 *>(this)->framebuffer_format_create(screen_attachment);
  5838. }
  5839. /*******************/
  5840. /**** DRAW LIST ****/
  5841. /*******************/
  5842. RenderingDevice::DrawListID RenderingDeviceD3D12::draw_list_begin_for_screen(DisplayServer::WindowID p_screen, const Color &p_clear_color) {
  5843. _THREAD_SAFE_METHOD_
  5844. ERR_FAIL_COND_V_MSG(local_device.is_valid(), INVALID_ID, "Local devices have no screen");
  5845. ERR_FAIL_COND_V_MSG(draw_list != nullptr, INVALID_ID, "Only one draw list can be active at the same time.");
  5846. ERR_FAIL_COND_V_MSG(compute_list != nullptr, INVALID_ID, "Only one draw/compute list can be active at the same time.");
  5847. if (!context->window_is_valid_swapchain(p_screen)) {
  5848. return INVALID_ID;
  5849. }
  5850. Size2i size = Size2i(context->window_get_width(p_screen), context->window_get_height(p_screen));
  5851. _draw_list_allocate(Rect2i(Vector2i(), size), 0, 0);
  5852. Vector<Color> clear_colors;
  5853. clear_colors.push_back(p_clear_color);
  5854. curr_screen_framebuffer = Framebuffer();
  5855. curr_screen_framebuffer.window_id = p_screen;
  5856. curr_screen_framebuffer.format_id = screen_get_framebuffer_format();
  5857. curr_screen_framebuffer.size = size;
  5858. curr_screen_framebuffer.screen_rtv_handle = context->window_get_framebuffer_rtv_handle(p_screen);
  5859. ID3D12GraphicsCommandList *command_list = frames[frame].draw_command_list.Get();
  5860. Error err = _draw_list_render_pass_begin(&curr_screen_framebuffer, INITIAL_ACTION_CLEAR, FINAL_ACTION_READ, INITIAL_ACTION_DROP, FINAL_ACTION_DISCARD, clear_colors, 0.0f, 0, Rect2i(), Point2i(), size, command_list, Vector<RID>());
  5861. if (err != OK) {
  5862. return INVALID_ID;
  5863. }
  5864. return int64_t(ID_TYPE_DRAW_LIST) << ID_BASE_SHIFT;
  5865. }
  5866. Error RenderingDeviceD3D12::_draw_list_render_pass_begin(Framebuffer *framebuffer, InitialAction p_initial_color_action, FinalAction p_final_color_action, InitialAction p_initial_depth_action, FinalAction p_final_depth_action, const Vector<Color> &p_clear_colors, float p_clear_depth, uint32_t p_clear_stencil, const Rect2 &p_region, Point2i viewport_offset, Point2i viewport_size, ID3D12GraphicsCommandList *command_list, const Vector<RID> &p_storage_textures) {
  5867. const FramebufferFormat &fb_format = framebuffer_formats[framebuffer->format_id];
  5868. bool is_screen = framebuffer->window_id != DisplayServer::INVALID_WINDOW_ID;
  5869. if (!is_screen) {
  5870. ERR_FAIL_COND_V(fb_format.attachments.size() != framebuffer->texture_ids.size(), ERR_BUG);
  5871. }
  5872. CD3DX12_RECT region_rect(0, 0, framebuffer->size.x, framebuffer->size.y);
  5873. if (p_region != Rect2() && p_region != Rect2(Vector2(), viewport_size)) { // Check custom region.
  5874. Rect2i viewport(viewport_offset, viewport_size);
  5875. Rect2i regioni = p_region;
  5876. if (!viewport.encloses(regioni)) {
  5877. ERR_FAIL_V_MSG(ERR_INVALID_PARAMETER, "When supplying a custom region, it must be contained within the framebuffer rectangle");
  5878. }
  5879. viewport_offset = regioni.position;
  5880. viewport_size = regioni.size;
  5881. region_rect = CD3DX12_RECT(
  5882. p_region.position.x,
  5883. p_region.position.y,
  5884. p_region.position.x + p_region.size.x,
  5885. p_region.position.y + p_region.size.y);
  5886. }
  5887. if (p_initial_color_action == INITIAL_ACTION_CLEAR) { // Check clear values.
  5888. int color_count = 0;
  5889. if (is_screen) {
  5890. color_count = 1;
  5891. } else {
  5892. for (int i = 0; i < framebuffer->texture_ids.size(); i++) {
  5893. Texture *texture = texture_owner.get_or_null(framebuffer->texture_ids[i]);
  5894. if (!texture || (!(texture->usage_flags & TEXTURE_USAGE_DEPTH_STENCIL_ATTACHMENT_BIT) && !(i != 0 && texture->usage_flags & TEXTURE_USAGE_VRS_ATTACHMENT_BIT))) {
  5895. if (!texture || !texture->is_resolve_buffer) {
  5896. color_count++;
  5897. }
  5898. }
  5899. }
  5900. }
  5901. ERR_FAIL_COND_V_MSG(p_clear_colors.size() != color_count, ERR_INVALID_PARAMETER,
  5902. "Clear color values supplied (" + itos(p_clear_colors.size()) + ") differ from the amount required for framebuffer color attachments (" + itos(color_count) + ").");
  5903. }
  5904. struct SetupInfo {
  5905. enum {
  5906. ACTION_NONE,
  5907. ACTION_DISCARD,
  5908. ACTION_CLEAR,
  5909. } action = ACTION_NONE;
  5910. UINT num_rects = 0;
  5911. D3D12_RECT *rect_ptr = nullptr;
  5912. D3D12_RESOURCE_STATES new_state = {};
  5913. SetupInfo(InitialAction p_action, D3D12_RECT *p_region_rect, bool p_is_color) {
  5914. switch (p_action) {
  5915. case INITIAL_ACTION_CLEAR: {
  5916. action = ACTION_CLEAR;
  5917. } break;
  5918. case INITIAL_ACTION_CLEAR_REGION: {
  5919. action = ACTION_CLEAR;
  5920. num_rects = 1;
  5921. rect_ptr = p_region_rect;
  5922. } break;
  5923. case INITIAL_ACTION_CLEAR_REGION_CONTINUE: {
  5924. action = ACTION_CLEAR;
  5925. num_rects = 1;
  5926. rect_ptr = p_region_rect;
  5927. } break;
  5928. case INITIAL_ACTION_KEEP: {
  5929. } break;
  5930. case INITIAL_ACTION_DROP: {
  5931. action = ACTION_DISCARD; // TODO: Are we really intended to do a resource Discard() as initial action, when final action can already do?
  5932. } break;
  5933. case INITIAL_ACTION_CONTINUE: {
  5934. } break;
  5935. }
  5936. }
  5937. };
  5938. SetupInfo setup_color(p_initial_color_action, &region_rect, true);
  5939. SetupInfo setup_depth(p_initial_depth_action, &region_rect, false);
  5940. draw_list_bound_textures.clear();
  5941. draw_list_unbind_color_textures = p_final_color_action != FINAL_ACTION_CONTINUE;
  5942. draw_list_unbind_depth_textures = p_final_depth_action != FINAL_ACTION_CONTINUE;
  5943. ID3D12Resource **discards = (ID3D12Resource **)alloca(sizeof(ID3D12Resource *) * fb_format.attachments.size());
  5944. uint32_t num_discards = 0;
  5945. struct RTVClear {
  5946. D3D12_CPU_DESCRIPTOR_HANDLE handle;
  5947. Color color;
  5948. };
  5949. RTVClear *rtv_clears = (RTVClear *)alloca(sizeof(RTVClear) * fb_format.attachments.size());
  5950. uint32_t num_rtv_clears = 0;
  5951. bool dsv_clear = false;
  5952. DescriptorsHeap::Walker rtv_heap_walker = framebuffer->rtv_heap.make_walker();
  5953. int color_index = 0;
  5954. for (int i = 0; i < fb_format.attachments.size(); i++) {
  5955. RID texture_rid;
  5956. Texture *texture = nullptr;
  5957. if (!is_screen) {
  5958. texture_rid = framebuffer->texture_ids[i];
  5959. if (texture_rid.is_null()) {
  5960. color_index++;
  5961. continue;
  5962. }
  5963. texture = texture_owner.get_or_null(texture_rid);
  5964. ERR_FAIL_NULL_V(texture, ERR_BUG);
  5965. texture->bound = true;
  5966. draw_list_bound_textures.push_back(texture_rid);
  5967. }
  5968. // We can setup a framebuffer where we write to our VRS texture to set it up.
  5969. // We make the assumption here that if our texture is actually used as our VRS attachment,
  5970. // it is used as such for each subpass. This is fairly certain seeing the restrictions on subpasses (in Vulkan).
  5971. // [[VRS_EVERY_SUBPASS_OR_NONE]]
  5972. bool is_vrs = fb_format.attachments[i].usage_flags & TEXTURE_USAGE_VRS_ATTACHMENT_BIT && i == fb_format.passes[0].vrs_attachment;
  5973. if (is_vrs) {
  5974. DEV_ASSERT(!is_screen);
  5975. DEV_ASSERT(texture->owner_mipmaps == 1);
  5976. DEV_ASSERT(texture->owner_layers == 1);
  5977. _resource_transition_batch(texture, 0, texture->planes, D3D12_RESOURCE_STATE_SHADING_RATE_SOURCE);
  5978. } else {
  5979. if ((fb_format.attachments[i].usage_flags & TEXTURE_USAGE_COLOR_ATTACHMENT_BIT)) {
  5980. if (!is_screen) { // Screen backbuffers are transitioned in prepare_buffers().
  5981. for (uint32_t j = 0; j < texture->layers; j++) {
  5982. for (uint32_t k = 0; k < texture->mipmaps; k++) {
  5983. uint32_t subresource = D3D12CalcSubresource(texture->base_mipmap + k, texture->base_layer + j, 0, texture->owner_mipmaps, texture->owner_layers);
  5984. _resource_transition_batch(texture, subresource, texture->planes, D3D12_RESOURCE_STATE_RENDER_TARGET);
  5985. }
  5986. }
  5987. }
  5988. if (setup_color.action == SetupInfo::ACTION_DISCARD) {
  5989. ID3D12Resource *resource = is_screen ? context->window_get_framebuffer_texture(framebuffer->window_id) : texture->resource;
  5990. discards[num_discards++] = resource;
  5991. } else if (setup_color.action == SetupInfo::ACTION_CLEAR) {
  5992. D3D12_CPU_DESCRIPTOR_HANDLE handle = is_screen ? framebuffer->screen_rtv_handle : rtv_heap_walker.get_curr_cpu_handle();
  5993. Color clear_color = color_index < p_clear_colors.size() ? p_clear_colors[color_index] : Color();
  5994. rtv_clears[num_rtv_clears++] = RTVClear{ handle, clear_color };
  5995. }
  5996. color_index++;
  5997. if (!is_screen) {
  5998. rtv_heap_walker.advance();
  5999. }
  6000. } else if ((fb_format.attachments[i].usage_flags & TEXTURE_USAGE_DEPTH_STENCIL_ATTACHMENT_BIT)) {
  6001. DEV_ASSERT(!is_screen);
  6002. for (uint32_t j = 0; j < texture->layers; j++) {
  6003. for (uint32_t k = 0; k < texture->mipmaps; k++) {
  6004. uint32_t subresource = D3D12CalcSubresource(texture->base_mipmap + k, texture->base_layer + j, 0, texture->owner_mipmaps, texture->owner_layers);
  6005. _resource_transition_batch(texture, subresource, texture->planes, D3D12_RESOURCE_STATE_DEPTH_WRITE);
  6006. }
  6007. }
  6008. if (setup_depth.action == SetupInfo::ACTION_DISCARD) {
  6009. discards[num_discards++] = texture->resource;
  6010. } else if (setup_depth.action == SetupInfo::ACTION_CLEAR) {
  6011. dsv_clear = true;
  6012. }
  6013. }
  6014. }
  6015. }
  6016. for (int i = 0; i < p_storage_textures.size(); i++) {
  6017. Texture *texture = texture_owner.get_or_null(p_storage_textures[i]);
  6018. if (!texture) {
  6019. continue;
  6020. }
  6021. ERR_CONTINUE_MSG(!(texture->usage_flags & TEXTURE_USAGE_STORAGE_BIT), "Supplied storage texture " + itos(i) + " for draw list is not set to be used for storage.");
  6022. }
  6023. _resource_transitions_flush(frames[frame].draw_command_list.Get());
  6024. for (uint32_t i = 0; i < num_discards; i++) {
  6025. command_list->DiscardResource(discards[i], nullptr);
  6026. }
  6027. for (uint32_t i = 0; i < num_rtv_clears; i++) {
  6028. command_list->ClearRenderTargetView(
  6029. rtv_clears[i].handle,
  6030. rtv_clears[i].color.components,
  6031. setup_color.num_rects,
  6032. setup_color.rect_ptr);
  6033. }
  6034. if (dsv_clear) {
  6035. command_list->ClearDepthStencilView(
  6036. framebuffer->dsv_heap.get_heap()->GetCPUDescriptorHandleForHeapStart(),
  6037. D3D12_CLEAR_FLAG_DEPTH | D3D12_CLEAR_FLAG_STENCIL,
  6038. p_clear_depth,
  6039. p_clear_stencil,
  6040. setup_depth.num_rects,
  6041. setup_depth.rect_ptr);
  6042. }
  6043. {
  6044. CD3DX12_VIEWPORT viewport(
  6045. viewport_offset.x,
  6046. viewport_offset.y,
  6047. viewport_size.x,
  6048. viewport_size.y,
  6049. 0.0f,
  6050. 1.0f);
  6051. command_list->RSSetViewports(1, &viewport);
  6052. CD3DX12_RECT scissor(
  6053. viewport_offset.x,
  6054. viewport_offset.y,
  6055. viewport_offset.x + viewport_size.x,
  6056. viewport_offset.y + viewport_size.y);
  6057. command_list->RSSetScissorRects(1, &scissor);
  6058. }
  6059. draw_list_subpass_count = fb_format.passes.size();
  6060. draw_list_current_subpass = 0;
  6061. draw_list_final_color_action = p_final_color_action;
  6062. draw_list_final_depth_action = p_final_depth_action;
  6063. draw_list_framebuffer = framebuffer;
  6064. draw_list_viewport_size = viewport_size;
  6065. _draw_list_subpass_begin();
  6066. return OK;
  6067. }
  6068. RenderingDevice::DrawListID RenderingDeviceD3D12::draw_list_begin(RID p_framebuffer, InitialAction p_initial_color_action, FinalAction p_final_color_action, InitialAction p_initial_depth_action, FinalAction p_final_depth_action, const Vector<Color> &p_clear_color_values, float p_clear_depth, uint32_t p_clear_stencil, const Rect2 &p_region, const Vector<RID> &p_storage_textures) {
  6069. _THREAD_SAFE_METHOD_
  6070. ERR_FAIL_COND_V_MSG(draw_list != nullptr, INVALID_ID, "Only one draw list can be active at the same time.");
  6071. ERR_FAIL_COND_V_MSG(compute_list != nullptr && !compute_list->state.allow_draw_overlap, INVALID_ID, "Only one draw/compute list can be active at the same time.");
  6072. Framebuffer *framebuffer = framebuffer_owner.get_or_null(p_framebuffer);
  6073. ERR_FAIL_NULL_V(framebuffer, INVALID_ID);
  6074. ID3D12GraphicsCommandList *command_list = frames[frame].draw_command_list.Get();
  6075. Error err = _draw_list_render_pass_begin(framebuffer, p_initial_color_action, p_final_color_action, p_initial_depth_action, p_final_depth_action, p_clear_color_values, p_clear_depth, p_clear_stencil, p_region, Point2i(), framebuffer->size, command_list, p_storage_textures);
  6076. if (err != OK) {
  6077. return INVALID_ID;
  6078. }
  6079. _draw_list_allocate(Rect2i(Point2i(), framebuffer->size), 0, 0);
  6080. return int64_t(ID_TYPE_DRAW_LIST) << ID_BASE_SHIFT;
  6081. }
  6082. Error RenderingDeviceD3D12::draw_list_begin_split(RID p_framebuffer, uint32_t p_splits, DrawListID *r_split_ids, InitialAction p_initial_color_action, FinalAction p_final_color_action, InitialAction p_initial_depth_action, FinalAction p_final_depth_action, const Vector<Color> &p_clear_color_values, float p_clear_depth, uint32_t p_clear_stencil, const Rect2 &p_region, const Vector<RID> &p_storage_textures) {
  6083. _THREAD_SAFE_METHOD_
  6084. ERR_FAIL_COND_V_MSG(draw_list != nullptr, ERR_BUSY, "Only one draw list can be active at the same time.");
  6085. ERR_FAIL_COND_V_MSG(compute_list != nullptr && !compute_list->state.allow_draw_overlap, ERR_BUSY, "Only one draw/compute list can be active at the same time.");
  6086. ERR_FAIL_COND_V(p_splits < 1, ERR_INVALID_DECLARATION);
  6087. Framebuffer *framebuffer = framebuffer_owner.get_or_null(p_framebuffer);
  6088. ERR_FAIL_NULL_V(framebuffer, ERR_INVALID_DECLARATION);
  6089. ID3D12GraphicsCommandList *frame_command_list = frames[frame].draw_command_list.Get();
  6090. Error err = _draw_list_render_pass_begin(framebuffer, p_initial_color_action, p_final_color_action, p_initial_depth_action, p_final_depth_action, p_clear_color_values, p_clear_depth, p_clear_stencil, p_region, Point2i(), framebuffer->size, frame_command_list, p_storage_textures);
  6091. if (err != OK) {
  6092. return ERR_CANT_CREATE;
  6093. }
  6094. err = _draw_list_allocate(Rect2i(Point2i(), framebuffer->size), p_splits, 0);
  6095. if (err != OK) {
  6096. return err;
  6097. }
  6098. for (uint32_t i = 0; i < p_splits; i++) {
  6099. // In Vulkan, we'd be setting viewports and scissors for each split here;
  6100. // D3D12 doesn't need it (it's even forbidden, for that matter).
  6101. r_split_ids[i] = (int64_t(ID_TYPE_SPLIT_DRAW_LIST) << ID_BASE_SHIFT) + i;
  6102. }
  6103. return OK;
  6104. }
  6105. RenderingDeviceD3D12::DrawList *RenderingDeviceD3D12::_get_draw_list_ptr(DrawListID p_id) {
  6106. if (p_id < 0) {
  6107. return nullptr;
  6108. }
  6109. if (!draw_list) {
  6110. return nullptr;
  6111. } else if (p_id == (int64_t(ID_TYPE_DRAW_LIST) << ID_BASE_SHIFT)) {
  6112. if (draw_list_split) {
  6113. return nullptr;
  6114. }
  6115. return draw_list;
  6116. } else if (p_id >> DrawListID(ID_BASE_SHIFT) == ID_TYPE_SPLIT_DRAW_LIST) {
  6117. if (!draw_list_split) {
  6118. return nullptr;
  6119. }
  6120. uint64_t index = p_id & ((DrawListID(1) << DrawListID(ID_BASE_SHIFT)) - 1); // Mask.
  6121. if (index >= draw_list_count) {
  6122. return nullptr;
  6123. }
  6124. return &draw_list[index];
  6125. } else {
  6126. return nullptr;
  6127. }
  6128. }
  6129. void RenderingDeviceD3D12::draw_list_set_blend_constants(DrawListID p_list, const Color &p_color) {
  6130. DrawList *dl = _get_draw_list_ptr(p_list);
  6131. ERR_FAIL_NULL(dl);
  6132. #ifdef DEBUG_ENABLED
  6133. ERR_FAIL_COND_MSG(!dl->validation.active, "Submitted Draw Lists can no longer be modified.");
  6134. #endif
  6135. dl->command_list->OMSetBlendFactor(p_color.components);
  6136. }
  6137. void RenderingDeviceD3D12::draw_list_bind_render_pipeline(DrawListID p_list, RID p_render_pipeline) {
  6138. DrawList *dl = _get_draw_list_ptr(p_list);
  6139. ERR_FAIL_NULL(dl);
  6140. #ifdef DEBUG_ENABLED
  6141. ERR_FAIL_COND_MSG(!dl->validation.active, "Submitted Draw Lists can no longer be modified.");
  6142. #endif
  6143. const RenderPipeline *pipeline = render_pipeline_owner.get_or_null(p_render_pipeline);
  6144. ERR_FAIL_NULL(pipeline);
  6145. #ifdef DEBUG_ENABLED
  6146. ERR_FAIL_COND(pipeline->validation.framebuffer_format != draw_list_framebuffer->format_id && pipeline->validation.render_pass != draw_list_current_subpass);
  6147. #endif
  6148. if (p_render_pipeline == dl->state.pipeline) {
  6149. return; // Redundant state, return.
  6150. }
  6151. dl->state.pipeline = p_render_pipeline;
  6152. dl->state.pso = pipeline->pso.Get();
  6153. dl->command_list->IASetPrimitiveTopology(pipeline->dyn_params.primitive_topology);
  6154. dl->command_list->OMSetBlendFactor(pipeline->dyn_params.blend_constant.components);
  6155. dl->command_list->OMSetStencilRef(pipeline->dyn_params.stencil_reference);
  6156. ID3D12GraphicsCommandList1 *command_list_1 = nullptr;
  6157. dl->command_list->QueryInterface<ID3D12GraphicsCommandList1>(&command_list_1);
  6158. if (command_list_1) {
  6159. command_list_1->OMSetDepthBounds(pipeline->dyn_params.depth_bounds_min, pipeline->dyn_params.depth_bounds_max);
  6160. command_list_1->Release();
  6161. }
  6162. Shader *shader = shader_owner.get_or_null(pipeline->shader);
  6163. if (dl->state.pipeline_shader != pipeline->shader) {
  6164. if (dl->state.root_signature_crc != pipeline->root_signature_crc) {
  6165. dl->command_list->SetGraphicsRootSignature(shader->root_signature.Get());
  6166. dl->state.root_signature_crc = pipeline->root_signature_crc;
  6167. // Root signature changed, so current descriptor set bindings become invalid.
  6168. for (uint32_t i = 0; i < dl->state.set_count; i++) {
  6169. dl->state.sets[i].bound = false;
  6170. }
  6171. if (pipeline->nir_runtime_data_root_param_idx != UINT32_MAX) {
  6172. // Set the viewport size part of the DXIL-NIR runtime data, which is the only we know to need currently.
  6173. constexpr dxil_spirv_vertex_runtime_data dummy_data = {};
  6174. uint32_t offset = constexpr((char *)&dummy_data.viewport_width - (char *)&dummy_data) / 4;
  6175. dl->command_list->SetGraphicsRoot32BitConstants(pipeline->nir_runtime_data_root_param_idx, 2, &draw_list_viewport_size, offset);
  6176. }
  6177. }
  6178. const uint32_t *pformats = pipeline->set_formats.ptr(); // Pipeline set formats.
  6179. dl->state.set_count = pipeline->set_formats.size(); // Update set count.
  6180. for (uint32_t i = 0; i < dl->state.set_count; i++) {
  6181. dl->state.sets[i].pipeline_expected_format = pformats[i];
  6182. #ifdef DEV_ENABLED
  6183. dl->state.sets[i]._pipeline_expected_format = pformats[i] ? &uniform_set_format_cache_reverse[pformats[i] - 1]->key().uniform_info : nullptr;
  6184. #endif
  6185. }
  6186. if (pipeline->spirv_push_constant_size) {
  6187. #ifdef DEBUG_ENABLED
  6188. dl->validation.pipeline_push_constant_supplied = false;
  6189. #endif
  6190. }
  6191. dl->state.pipeline_shader = pipeline->shader;
  6192. dl->state.pipeline_dxil_push_constant_size = pipeline->dxil_push_constant_size;
  6193. dl->state.pipeline_bindings_id = pipeline->bindings_id;
  6194. #ifdef DEV_ENABLED
  6195. dl->state._shader = shader;
  6196. #endif
  6197. }
  6198. #ifdef DEBUG_ENABLED
  6199. // Update render pass pipeline info.
  6200. dl->validation.pipeline_active = true;
  6201. dl->validation.pipeline_dynamic_state = pipeline->validation.dynamic_state;
  6202. dl->validation.pipeline_vertex_format = pipeline->validation.vertex_format;
  6203. dl->validation.pipeline_uses_restart_indices = pipeline->validation.uses_restart_indices;
  6204. dl->validation.pipeline_primitive_divisor = pipeline->validation.primitive_divisor;
  6205. dl->validation.pipeline_primitive_minimum = pipeline->validation.primitive_minimum;
  6206. dl->validation.pipeline_spirv_push_constant_size = pipeline->spirv_push_constant_size;
  6207. #endif
  6208. }
  6209. void RenderingDeviceD3D12::draw_list_bind_uniform_set(DrawListID p_list, RID p_uniform_set, uint32_t p_index) {
  6210. DrawList *dl = _get_draw_list_ptr(p_list);
  6211. ERR_FAIL_NULL(dl);
  6212. #ifdef DEBUG_ENABLED
  6213. ERR_FAIL_COND_MSG(!dl->validation.active, "Submitted Draw Lists can no longer be modified.");
  6214. #endif
  6215. const UniformSet *uniform_set = uniform_set_owner.get_or_null(p_uniform_set);
  6216. ERR_FAIL_NULL(uniform_set);
  6217. if (p_index > dl->state.set_count) {
  6218. dl->state.set_count = p_index;
  6219. }
  6220. dl->state.sets[p_index].bound = false; // Needs rebind.
  6221. dl->state.sets[p_index].uniform_set_format = uniform_set->format;
  6222. dl->state.sets[p_index].uniform_set = p_uniform_set;
  6223. #ifdef DEV_ENABLED
  6224. dl->state.sets[p_index]._uniform_set = uniform_set_owner.get_or_null(p_uniform_set);
  6225. #endif
  6226. #ifdef DEBUG_ENABLED
  6227. { // Validate that textures bound are not attached as framebuffer bindings.
  6228. uint32_t attachable_count = uniform_set->attachable_textures.size();
  6229. const UniformSet::AttachableTexture *attachable_ptr = uniform_set->attachable_textures.ptr();
  6230. uint32_t bound_count = draw_list_bound_textures.size();
  6231. const RID *bound_ptr = draw_list_bound_textures.ptr();
  6232. for (uint32_t i = 0; i < attachable_count; i++) {
  6233. for (uint32_t j = 0; j < bound_count; j++) {
  6234. ERR_FAIL_COND_MSG(attachable_ptr[i].texture == bound_ptr[j],
  6235. "Attempted to use the same texture in framebuffer attachment and a uniform (set: " + itos(p_index) + ", binding: " + itos(attachable_ptr[i].bind) + "), this is not allowed.");
  6236. }
  6237. }
  6238. }
  6239. #endif
  6240. }
  6241. void RenderingDeviceD3D12::draw_list_bind_vertex_array(DrawListID p_list, RID p_vertex_array) {
  6242. DrawList *dl = _get_draw_list_ptr(p_list);
  6243. ERR_FAIL_NULL(dl);
  6244. #ifdef DEBUG_ENABLED
  6245. ERR_FAIL_COND_MSG(!dl->validation.active, "Submitted Draw Lists can no longer be modified.");
  6246. #endif
  6247. const VertexArray *vertex_array = vertex_array_owner.get_or_null(p_vertex_array);
  6248. ERR_FAIL_NULL(vertex_array);
  6249. if (dl->state.vertex_array == p_vertex_array) {
  6250. return; // Already set.
  6251. }
  6252. dl->state.vertex_array = p_vertex_array;
  6253. #ifdef DEBUG_ENABLED
  6254. dl->validation.vertex_format = vertex_array->description;
  6255. dl->validation.vertex_max_instances_allowed = vertex_array->max_instances_allowed;
  6256. #endif
  6257. dl->validation.vertex_array_size = vertex_array->vertex_count;
  6258. for (Buffer *buffer : vertex_array->unique_buffers) {
  6259. _resource_transition_batch(buffer, 0, 1, D3D12_RESOURCE_STATE_VERTEX_AND_CONSTANT_BUFFER);
  6260. }
  6261. _resource_transitions_flush(dl->command_list);
  6262. dl->command_list->IASetVertexBuffers(0, vertex_array->views.size(), vertex_array->views.ptr());
  6263. }
  6264. void RenderingDeviceD3D12::draw_list_bind_index_array(DrawListID p_list, RID p_index_array) {
  6265. DrawList *dl = _get_draw_list_ptr(p_list);
  6266. ERR_FAIL_NULL(dl);
  6267. #ifdef DEBUG_ENABLED
  6268. ERR_FAIL_COND_MSG(!dl->validation.active, "Submitted Draw Lists can no longer be modified.");
  6269. #endif
  6270. const IndexArray *index_array = index_array_owner.get_or_null(p_index_array);
  6271. ERR_FAIL_NULL(index_array);
  6272. if (dl->state.index_array == p_index_array) {
  6273. return; // Already set.
  6274. }
  6275. dl->state.index_array = p_index_array;
  6276. #ifdef DEBUG_ENABLED
  6277. dl->validation.index_array_max_index = index_array->max_index;
  6278. #endif
  6279. dl->validation.index_array_size = index_array->indices;
  6280. dl->validation.index_array_offset = index_array->offset;
  6281. _resource_transition_batch(index_array->buffer, 0, 1, D3D12_RESOURCE_STATE_INDEX_BUFFER);
  6282. _resource_transitions_flush(dl->command_list);
  6283. dl->command_list->IASetIndexBuffer(&index_array->view);
  6284. }
  6285. void RenderingDeviceD3D12::draw_list_set_line_width(DrawListID p_list, float p_width) {
  6286. DrawList *dl = _get_draw_list_ptr(p_list);
  6287. ERR_FAIL_NULL(dl);
  6288. #ifdef DEBUG_ENABLED
  6289. ERR_FAIL_COND_MSG(!dl->validation.active, "Submitted Draw Lists can no longer be modified.");
  6290. #endif
  6291. if (!Math::is_equal_approx(p_width, 1.0f)) {
  6292. ERR_FAIL_MSG("Setting line widths other than 1.0 is not supported by the Direct3D 12 rendering driver.");
  6293. }
  6294. }
  6295. void RenderingDeviceD3D12::_bind_uniform_set(UniformSet *p_uniform_set, const Shader::Set &p_shader_set, const Vector<UniformBindingInfo> &p_bindings, ID3D12GraphicsCommandList *p_command_list, bool p_for_compute) {
  6296. using SetRootDescriptorTableFn = void (STDMETHODCALLTYPE ID3D12GraphicsCommandList::*)(UINT, D3D12_GPU_DESCRIPTOR_HANDLE);
  6297. SetRootDescriptorTableFn set_root_desc_table_fn = p_for_compute ? &ID3D12GraphicsCommandList::SetComputeRootDescriptorTable : &ID3D12GraphicsCommandList1::SetGraphicsRootDescriptorTable;
  6298. // If this set's descriptors have already been set for the current execution and a compatible root signature, reuse!
  6299. uint32_t root_sig_crc = p_for_compute ? compute_list->state.root_signature_crc : draw_list->state.root_signature_crc;
  6300. UniformSet::RecentBind *last_bind = nullptr;
  6301. for (int i = 0; i < ARRAY_SIZE(p_uniform_set->recent_binds); i++) {
  6302. if (p_uniform_set->recent_binds[i].execution_index == frames[frame].execution_index) {
  6303. if (p_uniform_set->recent_binds[i].root_signature_crc == root_sig_crc) {
  6304. for (const RootDescriptorTable &table : p_uniform_set->recent_binds[i].root_tables.resources) {
  6305. (p_command_list->*set_root_desc_table_fn)(table.root_param_idx, table.start_gpu_handle);
  6306. }
  6307. for (const RootDescriptorTable &table : p_uniform_set->recent_binds[i].root_tables.samplers) {
  6308. (p_command_list->*set_root_desc_table_fn)(table.root_param_idx, table.start_gpu_handle);
  6309. }
  6310. #ifdef DEV_ENABLED
  6311. p_uniform_set->recent_binds[i].uses++;
  6312. frames[frame].uniform_set_reused++;
  6313. #endif
  6314. return;
  6315. } else {
  6316. if (!last_bind || p_uniform_set->recent_binds[i].uses < last_bind->uses) {
  6317. // Prefer this one since it's been used less or we still haven't a better option.
  6318. last_bind = &p_uniform_set->recent_binds[i];
  6319. }
  6320. }
  6321. } else {
  6322. // Prefer this one since it's unused.
  6323. last_bind = &p_uniform_set->recent_binds[i];
  6324. last_bind->uses = 0;
  6325. }
  6326. }
  6327. struct {
  6328. DescriptorsHeap::Walker *resources = nullptr;
  6329. DescriptorsHeap::Walker *samplers = nullptr;
  6330. } frame_heap_walkers;
  6331. frame_heap_walkers.resources = &frames[frame].desc_heap_walkers.resources;
  6332. frame_heap_walkers.samplers = &frames[frame].desc_heap_walkers.samplers;
  6333. struct {
  6334. DescriptorsHeap::Walker resources;
  6335. DescriptorsHeap::Walker samplers;
  6336. } set_heap_walkers;
  6337. set_heap_walkers.resources = p_uniform_set->desc_heaps.resources.make_walker();
  6338. set_heap_walkers.samplers = p_uniform_set->desc_heaps.samplers.make_walker();
  6339. #ifdef DEV_ENABLED
  6340. // Whether we have stages where the uniform is actually used should match
  6341. // whether we have any root signature locations for it.
  6342. for (int i = 0; i < p_shader_set.uniforms.size(); i++) {
  6343. bool has_rs_locations = false;
  6344. if (p_bindings[i].root_sig_locations.resource.root_param_idx != UINT32_MAX ||
  6345. p_bindings[i].root_sig_locations.sampler.root_param_idx != UINT32_MAX) {
  6346. has_rs_locations = true;
  6347. break;
  6348. }
  6349. bool has_stages = p_bindings[i].stages;
  6350. DEV_ASSERT(has_rs_locations == has_stages);
  6351. }
  6352. #endif
  6353. last_bind->root_tables.resources.reserve(p_shader_set.num_root_params.resources);
  6354. last_bind->root_tables.resources.clear();
  6355. last_bind->root_tables.samplers.reserve(p_shader_set.num_root_params.samplers);
  6356. last_bind->root_tables.samplers.clear();
  6357. last_bind->uses++;
  6358. struct {
  6359. RootDescriptorTable *resources = nullptr;
  6360. RootDescriptorTable *samplers = nullptr;
  6361. } tables;
  6362. for (int i = 0; i < p_shader_set.uniforms.size(); i++) {
  6363. const Shader::ShaderUniformInfo &uniform_info = p_shader_set.uniforms[i];
  6364. uint32_t num_resource_descs = 0;
  6365. uint32_t num_sampler_descs = 0;
  6366. bool srv_uav_ambiguity = false;
  6367. _add_descriptor_count_for_uniform(uniform_info.info.type, uniform_info.info.length, false, num_resource_descs, num_sampler_descs, srv_uav_ambiguity);
  6368. bool resource_used = false;
  6369. if (p_bindings[i].stages) {
  6370. {
  6371. const UniformBindingInfo::RootSignatureLocation &rs_loc_resource = p_bindings[i].root_sig_locations.resource;
  6372. if (rs_loc_resource.root_param_idx != UINT32_MAX) { // Location used?
  6373. DEV_ASSERT(num_resource_descs);
  6374. DEV_ASSERT(!(srv_uav_ambiguity && (p_bindings[i].res_class != RES_CLASS_SRV && p_bindings[i].res_class != RES_CLASS_UAV))); // [[SRV_UAV_AMBIGUITY]]
  6375. bool must_flush_table = tables.resources && rs_loc_resource.root_param_idx != tables.resources->root_param_idx;
  6376. if (must_flush_table) {
  6377. // Check the root signature data has been filled ordered.
  6378. DEV_ASSERT(rs_loc_resource.root_param_idx > tables.resources->root_param_idx);
  6379. (p_command_list->*set_root_desc_table_fn)(tables.resources->root_param_idx, tables.resources->start_gpu_handle);
  6380. tables.resources = nullptr;
  6381. }
  6382. if (unlikely(frame_heap_walkers.resources->get_free_handles() < num_resource_descs)) {
  6383. if (!frames[frame].desc_heaps_exhausted_reported.resources) {
  6384. frames[frame].desc_heaps_exhausted_reported.resources = true;
  6385. ERR_FAIL_MSG("Cannot bind uniform set because there's no enough room in current frame's RESOURCES descriptor heap.\n"
  6386. "Please increase the value of the rendering/rendering_device/d3d12/max_resource_descriptors_per_frame project setting.");
  6387. } else {
  6388. return;
  6389. }
  6390. }
  6391. if (!tables.resources) {
  6392. DEV_ASSERT(last_bind->root_tables.resources.size() < last_bind->root_tables.resources.get_capacity());
  6393. last_bind->root_tables.resources.resize(last_bind->root_tables.resources.size() + 1);
  6394. tables.resources = &last_bind->root_tables.resources[last_bind->root_tables.resources.size() - 1];
  6395. tables.resources->root_param_idx = rs_loc_resource.root_param_idx;
  6396. tables.resources->start_gpu_handle = frame_heap_walkers.resources->get_curr_gpu_handle();
  6397. }
  6398. // If there is ambiguity and it didn't clarify as SRVs, skip them, which come first. [[SRV_UAV_AMBIGUITY]]
  6399. if (srv_uav_ambiguity && p_bindings[i].res_class != RES_CLASS_SRV) {
  6400. set_heap_walkers.resources.advance(num_resource_descs);
  6401. }
  6402. // TODO: Batch to avoid multiple calls where possible (in any case, flush before setting root descriptor tables, or even batch that as well).
  6403. device->CopyDescriptorsSimple(
  6404. num_resource_descs,
  6405. frame_heap_walkers.resources->get_curr_cpu_handle(),
  6406. set_heap_walkers.resources.get_curr_cpu_handle(),
  6407. D3D12_DESCRIPTOR_HEAP_TYPE_CBV_SRV_UAV);
  6408. frame_heap_walkers.resources->advance(num_resource_descs);
  6409. // If there is ambiguity and it didn't clarify as UAVs, skip them, which come later. [[SRV_UAV_AMBIGUITY]]
  6410. if (srv_uav_ambiguity && p_bindings[i].res_class != RES_CLASS_UAV) {
  6411. set_heap_walkers.resources.advance(num_resource_descs);
  6412. }
  6413. resource_used = true;
  6414. }
  6415. }
  6416. {
  6417. const UniformBindingInfo::RootSignatureLocation &rs_loc_sampler = p_bindings[i].root_sig_locations.sampler;
  6418. if (rs_loc_sampler.root_param_idx != UINT32_MAX) { // Location used?
  6419. DEV_ASSERT(num_sampler_descs);
  6420. DEV_ASSERT(!srv_uav_ambiguity); // [[SRV_UAV_AMBIGUITY]]
  6421. bool must_flush_table = tables.samplers && rs_loc_sampler.root_param_idx != tables.samplers->root_param_idx;
  6422. if (must_flush_table) {
  6423. // Check the root signature data has been filled ordered.
  6424. DEV_ASSERT(rs_loc_sampler.root_param_idx > tables.samplers->root_param_idx);
  6425. (p_command_list->*set_root_desc_table_fn)(tables.samplers->root_param_idx, tables.samplers->start_gpu_handle);
  6426. tables.samplers = nullptr;
  6427. }
  6428. if (unlikely(frame_heap_walkers.samplers->get_free_handles() < num_sampler_descs)) {
  6429. if (!frames[frame].desc_heaps_exhausted_reported.samplers) {
  6430. frames[frame].desc_heaps_exhausted_reported.samplers = true;
  6431. ERR_FAIL_MSG("Cannot bind uniform set because there's no enough room in current frame's SAMPLERS descriptors heap.\n"
  6432. "Please increase the value of the rendering/rendering_device/d3d12/max_sampler_descriptors_per_frame project setting.");
  6433. } else {
  6434. return;
  6435. }
  6436. }
  6437. if (!tables.samplers) {
  6438. DEV_ASSERT(last_bind->root_tables.samplers.size() < last_bind->root_tables.samplers.get_capacity());
  6439. last_bind->root_tables.samplers.resize(last_bind->root_tables.samplers.size() + 1);
  6440. tables.samplers = &last_bind->root_tables.samplers[last_bind->root_tables.samplers.size() - 1];
  6441. tables.samplers->root_param_idx = rs_loc_sampler.root_param_idx;
  6442. tables.samplers->start_gpu_handle = frame_heap_walkers.samplers->get_curr_gpu_handle();
  6443. }
  6444. // TODO: Batch to avoid multiple calls where possible (in any case, flush before setting root descriptor tables, or even batch that as well).
  6445. device->CopyDescriptorsSimple(
  6446. num_sampler_descs,
  6447. frame_heap_walkers.samplers->get_curr_cpu_handle(),
  6448. set_heap_walkers.samplers.get_curr_cpu_handle(),
  6449. D3D12_DESCRIPTOR_HEAP_TYPE_SAMPLER);
  6450. frame_heap_walkers.samplers->advance(num_sampler_descs);
  6451. }
  6452. }
  6453. }
  6454. // Uniform set descriptor heaps are always full (descriptors are created for every uniform in them) despite
  6455. // the shader variant a given set is created upon may not need all of them due to DXC optimizations.
  6456. // Therefore, at this point we have to advance through the descriptor set descriptor's heap unconditionally.
  6457. set_heap_walkers.resources.advance(num_resource_descs);
  6458. if (srv_uav_ambiguity) {
  6459. DEV_ASSERT(num_resource_descs);
  6460. if (!resource_used) {
  6461. set_heap_walkers.resources.advance(num_resource_descs); // Additional skip, since both SRVs and UAVs have to be bypassed.
  6462. }
  6463. }
  6464. set_heap_walkers.samplers.advance(num_sampler_descs);
  6465. }
  6466. DEV_ASSERT(set_heap_walkers.resources.is_at_eof());
  6467. DEV_ASSERT(set_heap_walkers.samplers.is_at_eof());
  6468. {
  6469. bool must_flush_table = tables.resources;
  6470. if (must_flush_table) {
  6471. (p_command_list->*set_root_desc_table_fn)(tables.resources->root_param_idx, tables.resources->start_gpu_handle);
  6472. }
  6473. }
  6474. {
  6475. bool must_flush_table = tables.samplers;
  6476. if (must_flush_table) {
  6477. (p_command_list->*set_root_desc_table_fn)(tables.samplers->root_param_idx, tables.samplers->start_gpu_handle);
  6478. }
  6479. }
  6480. last_bind->root_signature_crc = root_sig_crc;
  6481. last_bind->execution_index = frames[frame].execution_index;
  6482. }
  6483. void RenderingDeviceD3D12::_apply_uniform_set_resource_states(const UniformSet *p_uniform_set, const Shader::Set &p_shader_set) {
  6484. for (const UniformSet::StateRequirement &sr : p_uniform_set->resource_states) {
  6485. #ifdef DEV_ENABLED
  6486. {
  6487. uint32_t stages = 0;
  6488. D3D12_RESOURCE_STATES wanted_state = {};
  6489. bool writable = false;
  6490. // Doing the full loop for debugging since the real one below may break early,
  6491. // but we want an exhaustive check
  6492. uint64_t inv_uniforms_mask = ~sr.shader_uniform_idx_mask; // Inverting the mask saves operations.
  6493. for (uint8_t bit = 0; inv_uniforms_mask != UINT64_MAX; bit++) {
  6494. uint64_t bit_mask = ((uint64_t)1 << bit);
  6495. if (likely((inv_uniforms_mask & bit_mask))) {
  6496. continue;
  6497. }
  6498. inv_uniforms_mask |= bit_mask;
  6499. const Shader::ShaderUniformInfo &info = p_shader_set.uniforms[bit];
  6500. if (unlikely(!info.binding.stages)) {
  6501. continue;
  6502. }
  6503. D3D12_RESOURCE_STATES required_states = sr.states;
  6504. // Resolve a case of SRV/UAV ambiguity now. [[SRV_UAV_AMBIGUITY]]
  6505. if ((required_states & D3D12_RESOURCE_STATE_ALL_SHADER_RESOURCE) && (required_states & D3D12_RESOURCE_STATE_UNORDERED_ACCESS)) {
  6506. if (info.binding.res_class == RES_CLASS_SRV) {
  6507. required_states &= ~D3D12_RESOURCE_STATE_UNORDERED_ACCESS;
  6508. } else {
  6509. required_states = D3D12_RESOURCE_STATE_UNORDERED_ACCESS;
  6510. }
  6511. }
  6512. if (stages) { // Second occurrence at least?
  6513. CRASH_COND_MSG(info.info.writable != writable, "A resource is used in the same uniform set both as R/O and R/W. That's not supported and shouldn't happen.");
  6514. CRASH_COND_MSG(required_states != wanted_state, "A resource is used in the same uniform set with different resource states. The code needs to be enhanced to support that.");
  6515. } else {
  6516. wanted_state = required_states;
  6517. stages |= info.binding.stages;
  6518. writable = info.info.writable;
  6519. }
  6520. DEV_ASSERT((wanted_state == D3D12_RESOURCE_STATE_UNORDERED_ACCESS) == (bool)(wanted_state & D3D12_RESOURCE_STATE_UNORDERED_ACCESS));
  6521. if (wanted_state == D3D12_RESOURCE_STATE_UNORDERED_ACCESS || wanted_state == D3D12_RESOURCE_STATE_RENDER_TARGET) {
  6522. if (!sr.is_buffer) {
  6523. Texture *texture = (Texture *)sr.resource;
  6524. CRASH_COND_MSG(texture->resource != texture->owner_resource, "The texture format used for UAV or RTV must be the main one.");
  6525. }
  6526. }
  6527. }
  6528. }
  6529. #endif
  6530. // We may have assumed D3D12_RESOURCE_STATE_ALL_SHADER_RESOURCE for a resource,
  6531. // because at uniform set creation time we couldn't know for sure which stages
  6532. // it would be used in (due to the fact that a set can be created against a different,
  6533. // albeit compatible, shader, which may make a different usage in the end).
  6534. // However, now we know and can exclude up to one unneeded state.
  6535. // TODO: If subresources involved already in the needed state, or scheduled for it,
  6536. // maybe it's more optimal not to do anything here
  6537. uint32_t stages = 0;
  6538. D3D12_RESOURCE_STATES wanted_state = {};
  6539. uint64_t inv_uniforms_mask = ~sr.shader_uniform_idx_mask; // Inverting the mask saves operations.
  6540. for (uint8_t bit = 0; inv_uniforms_mask != UINT64_MAX; bit++) {
  6541. uint64_t bit_mask = ((uint64_t)1 << bit);
  6542. if (likely((inv_uniforms_mask & bit_mask))) {
  6543. continue;
  6544. }
  6545. inv_uniforms_mask |= bit_mask;
  6546. const Shader::ShaderUniformInfo &info = p_shader_set.uniforms[bit];
  6547. if (unlikely(!info.binding.stages)) {
  6548. continue;
  6549. }
  6550. if (!stages) {
  6551. D3D12_RESOURCE_STATES required_states = sr.states;
  6552. // Resolve a case of SRV/UAV ambiguity now. [[SRV_UAV_AMBIGUITY]]
  6553. if ((required_states & D3D12_RESOURCE_STATE_ALL_SHADER_RESOURCE) && (required_states & D3D12_RESOURCE_STATE_UNORDERED_ACCESS)) {
  6554. if (info.binding.res_class == RES_CLASS_SRV) {
  6555. required_states &= ~D3D12_RESOURCE_STATE_UNORDERED_ACCESS;
  6556. } else {
  6557. required_states = D3D12_RESOURCE_STATE_UNORDERED_ACCESS;
  6558. }
  6559. }
  6560. wanted_state = required_states;
  6561. if (!(wanted_state & D3D12_RESOURCE_STATE_ALL_SHADER_RESOURCE)) {
  6562. // By now, we already know the resource is used, and with no PS/NON_PS disjuntive; no need to check further.
  6563. break;
  6564. }
  6565. }
  6566. stages |= info.binding.stages;
  6567. if (stages == (SHADER_STAGE_VERTEX_BIT | SHADER_STAGE_FRAGMENT_BIT) || stages == SHADER_STAGE_COMPUTE_BIT) {
  6568. // By now, we already know the resource is used, and as both PS/NON_PS; no need to check further.
  6569. break;
  6570. }
  6571. }
  6572. if (likely(wanted_state)) {
  6573. if ((wanted_state & D3D12_RESOURCE_STATE_ALL_SHADER_RESOURCE)) {
  6574. if (stages == SHADER_STAGE_VERTEX_BIT || stages == SHADER_STAGE_COMPUTE_BIT) {
  6575. D3D12_RESOURCE_STATES unneeded_states = D3D12_RESOURCE_STATE_PIXEL_SHADER_RESOURCE;
  6576. wanted_state &= ~unneeded_states;
  6577. } else if (stages == SHADER_STAGE_FRAGMENT_BIT) {
  6578. D3D12_RESOURCE_STATES unneeded_states = D3D12_RESOURCE_STATE_NON_PIXEL_SHADER_RESOURCE;
  6579. wanted_state &= ~unneeded_states;
  6580. }
  6581. }
  6582. if (likely(wanted_state)) {
  6583. if (sr.is_buffer) {
  6584. _resource_transition_batch(sr.resource, 0, 1, wanted_state);
  6585. } else {
  6586. Texture *texture = (Texture *)sr.resource;
  6587. for (uint32_t i = 0; i < texture->layers; i++) {
  6588. for (uint32_t j = 0; j < texture->mipmaps; j++) {
  6589. uint32_t subresource = D3D12CalcSubresource(texture->base_mipmap + j, texture->base_layer + i, 0, texture->owner_mipmaps, texture->owner_layers);
  6590. _resource_transition_batch(texture, subresource, texture->planes, wanted_state, texture->owner_resource);
  6591. }
  6592. }
  6593. }
  6594. }
  6595. }
  6596. }
  6597. }
  6598. void RenderingDeviceD3D12::draw_list_set_push_constant(DrawListID p_list, const void *p_data, uint32_t p_data_size) {
  6599. DrawList *dl = _get_draw_list_ptr(p_list);
  6600. ERR_FAIL_NULL(dl);
  6601. #ifdef DEBUG_ENABLED
  6602. ERR_FAIL_COND_MSG(!dl->validation.active, "Submitted Draw Lists can no longer be modified.");
  6603. #endif
  6604. #ifdef DEBUG_ENABLED
  6605. ERR_FAIL_COND_MSG(p_data_size != dl->validation.pipeline_spirv_push_constant_size,
  6606. "This render pipeline requires (" + itos(dl->validation.pipeline_spirv_push_constant_size) + ") bytes of push constant data, supplied: (" + itos(p_data_size) + ")");
  6607. #endif
  6608. if (dl->state.pipeline_dxil_push_constant_size) {
  6609. dl->command_list->SetGraphicsRoot32BitConstants(0, p_data_size / sizeof(uint32_t), p_data, 0);
  6610. }
  6611. #ifdef DEBUG_ENABLED
  6612. dl->validation.pipeline_push_constant_supplied = true;
  6613. #endif
  6614. }
  6615. void RenderingDeviceD3D12::draw_list_draw(DrawListID p_list, bool p_use_indices, uint32_t p_instances, uint32_t p_procedural_vertices) {
  6616. DrawList *dl = _get_draw_list_ptr(p_list);
  6617. ERR_FAIL_NULL(dl);
  6618. #ifdef DEBUG_ENABLED
  6619. ERR_FAIL_COND_MSG(!dl->validation.active, "Submitted Draw Lists can no longer be modified.");
  6620. #endif
  6621. #ifdef DEBUG_ENABLED
  6622. ERR_FAIL_COND_MSG(!dl->validation.pipeline_active,
  6623. "No render pipeline was set before attempting to draw.");
  6624. if (dl->validation.pipeline_vertex_format != INVALID_ID) {
  6625. // Pipeline uses vertices, validate format.
  6626. ERR_FAIL_COND_MSG(dl->validation.vertex_format == INVALID_ID,
  6627. "No vertex array was bound, and render pipeline expects vertices.");
  6628. // Make sure format is right.
  6629. ERR_FAIL_COND_MSG(dl->validation.pipeline_vertex_format != dl->validation.vertex_format,
  6630. "The vertex format used to create the pipeline does not match the vertex format bound.");
  6631. // Make sure number of instances is valid.
  6632. ERR_FAIL_COND_MSG(p_instances > dl->validation.vertex_max_instances_allowed,
  6633. "Number of instances requested (" + itos(p_instances) + " is larger than the maximum number supported by the bound vertex array (" + itos(dl->validation.vertex_max_instances_allowed) + ").");
  6634. }
  6635. if (dl->validation.pipeline_spirv_push_constant_size) {
  6636. // Using push constants, check that they were supplied.
  6637. ERR_FAIL_COND_MSG(!dl->validation.pipeline_push_constant_supplied,
  6638. "The shader in this pipeline requires a push constant to be set before drawing, but it's not present.");
  6639. }
  6640. #endif
  6641. // Bind descriptor sets.
  6642. Shader *shader = shader_owner.get_or_null(dl->state.pipeline_shader);
  6643. struct SetToBind {
  6644. uint32_t set;
  6645. UniformSet *uniform_set;
  6646. const Shader::Set *shader_set;
  6647. };
  6648. SetToBind *sets_to_bind = (SetToBind *)alloca(sizeof(SetToBind) * dl->state.set_count);
  6649. uint32_t num_sets_to_bind = 0;
  6650. for (uint32_t i = 0; i < dl->state.set_count; i++) {
  6651. if (dl->state.sets[i].pipeline_expected_format == 0) {
  6652. continue; // Nothing expected by this pipeline.
  6653. }
  6654. #ifdef DEBUG_ENABLED
  6655. if (dl->state.sets[i].pipeline_expected_format != dl->state.sets[i].uniform_set_format) {
  6656. if (dl->state.sets[i].uniform_set_format == 0) {
  6657. ERR_FAIL_MSG("Uniforms were never supplied for set (" + itos(i) + ") at the time of drawing, which are required by the pipeline");
  6658. } else if (uniform_set_owner.owns(dl->state.sets[i].uniform_set)) {
  6659. UniformSet *us = uniform_set_owner.get_or_null(dl->state.sets[i].uniform_set);
  6660. ERR_FAIL_MSG("Uniforms supplied for set (" + itos(i) + "):\n" + _shader_uniform_debug(us->shader_id, us->shader_set) + "\nare not the same format as required by the pipeline shader. Pipeline shader requires the following bindings:\n" + _shader_uniform_debug(dl->state.pipeline_shader));
  6661. } else {
  6662. ERR_FAIL_MSG("Uniforms supplied for set (" + itos(i) + ", which was was just freed) are not the same format as required by the pipeline shader. Pipeline shader requires the following bindings:\n" + _shader_uniform_debug(dl->state.pipeline_shader));
  6663. }
  6664. }
  6665. #endif
  6666. UniformSet *uniform_set = uniform_set_owner.get_or_null(dl->state.sets[i].uniform_set);
  6667. const Shader::Set &shader_set = shader->sets[i];
  6668. _apply_uniform_set_resource_states(uniform_set, shader_set);
  6669. if (!dl->state.sets[i].bound) {
  6670. sets_to_bind[num_sets_to_bind].set = i;
  6671. sets_to_bind[num_sets_to_bind].uniform_set = uniform_set;
  6672. sets_to_bind[num_sets_to_bind].shader_set = &shader_set;
  6673. num_sets_to_bind++;
  6674. dl->state.sets[i].bound = true;
  6675. }
  6676. }
  6677. _resource_transitions_flush(dl->command_list);
  6678. for (uint32_t i = 0; i < num_sets_to_bind; i++) {
  6679. _bind_uniform_set(sets_to_bind[i].uniform_set, *sets_to_bind[i].shader_set, pipeline_bindings[dl->state.pipeline_bindings_id][sets_to_bind[i].set], dl->command_list, false);
  6680. }
  6681. if (dl->state.bound_pso != dl->state.pso) {
  6682. dl->command_list->SetPipelineState(dl->state.pso);
  6683. dl->state.bound_pso = dl->state.pso;
  6684. }
  6685. if (p_use_indices) {
  6686. #ifdef DEBUG_ENABLED
  6687. ERR_FAIL_COND_MSG(p_procedural_vertices > 0,
  6688. "Procedural vertices can't be used together with indices.");
  6689. ERR_FAIL_COND_MSG(!dl->validation.index_array_size,
  6690. "Draw command requested indices, but no index buffer was set.");
  6691. ERR_FAIL_COND_MSG(dl->validation.pipeline_uses_restart_indices != dl->validation.index_buffer_uses_restart_indices,
  6692. "The usage of restart indices in index buffer does not match the render primitive in the pipeline.");
  6693. #endif
  6694. uint32_t to_draw = dl->validation.index_array_size;
  6695. #ifdef DEBUG_ENABLED
  6696. ERR_FAIL_COND_MSG(to_draw < dl->validation.pipeline_primitive_minimum,
  6697. "Too few indices (" + itos(to_draw) + ") for the render primitive set in the render pipeline (" + itos(dl->validation.pipeline_primitive_minimum) + ").");
  6698. ERR_FAIL_COND_MSG((to_draw % dl->validation.pipeline_primitive_divisor) != 0,
  6699. "Index amount (" + itos(to_draw) + ") must be a multiple of the amount of indices required by the render primitive (" + itos(dl->validation.pipeline_primitive_divisor) + ").");
  6700. #endif
  6701. dl->command_list->DrawIndexedInstanced(to_draw, p_instances, dl->validation.index_array_offset, 0, 0);
  6702. } else {
  6703. uint32_t to_draw;
  6704. if (p_procedural_vertices > 0) {
  6705. #ifdef DEBUG_ENABLED
  6706. ERR_FAIL_COND_MSG(dl->validation.pipeline_vertex_format != INVALID_ID,
  6707. "Procedural vertices requested, but pipeline expects a vertex array.");
  6708. #endif
  6709. to_draw = p_procedural_vertices;
  6710. } else {
  6711. #ifdef DEBUG_ENABLED
  6712. ERR_FAIL_COND_MSG(dl->validation.pipeline_vertex_format == INVALID_ID,
  6713. "Draw command lacks indices, but pipeline format does not use vertices.");
  6714. #endif
  6715. to_draw = dl->validation.vertex_array_size;
  6716. }
  6717. #ifdef DEBUG_ENABLED
  6718. ERR_FAIL_COND_MSG(to_draw < dl->validation.pipeline_primitive_minimum,
  6719. "Too few vertices (" + itos(to_draw) + ") for the render primitive set in the render pipeline (" + itos(dl->validation.pipeline_primitive_minimum) + ").");
  6720. ERR_FAIL_COND_MSG((to_draw % dl->validation.pipeline_primitive_divisor) != 0,
  6721. "Vertex amount (" + itos(to_draw) + ") must be a multiple of the amount of vertices required by the render primitive (" + itos(dl->validation.pipeline_primitive_divisor) + ").");
  6722. #endif
  6723. dl->command_list->DrawInstanced(to_draw, p_instances, 0, 0);
  6724. }
  6725. }
  6726. void RenderingDeviceD3D12::draw_list_enable_scissor(DrawListID p_list, const Rect2 &p_rect) {
  6727. DrawList *dl = _get_draw_list_ptr(p_list);
  6728. ERR_FAIL_NULL(dl);
  6729. #ifdef DEBUG_ENABLED
  6730. ERR_FAIL_COND_MSG(!dl->validation.active, "Submitted Draw Lists can no longer be modified.");
  6731. #endif
  6732. Rect2i rect = p_rect;
  6733. rect.position += dl->viewport.position;
  6734. rect = dl->viewport.intersection(rect);
  6735. if (rect.get_area() == 0) {
  6736. return;
  6737. }
  6738. CD3DX12_RECT scissor(
  6739. rect.position.x,
  6740. rect.position.y,
  6741. rect.position.x + rect.size.width,
  6742. rect.position.y + rect.size.height);
  6743. dl->command_list->RSSetScissorRects(1, &scissor);
  6744. }
  6745. void RenderingDeviceD3D12::draw_list_disable_scissor(DrawListID p_list) {
  6746. DrawList *dl = _get_draw_list_ptr(p_list);
  6747. ERR_FAIL_NULL(dl);
  6748. #ifdef DEBUG_ENABLED
  6749. ERR_FAIL_COND_MSG(!dl->validation.active, "Submitted Draw Lists can no longer be modified.");
  6750. #endif
  6751. CD3DX12_RECT scissor(
  6752. dl->viewport.position.x,
  6753. dl->viewport.position.y,
  6754. dl->viewport.position.x + dl->viewport.size.width,
  6755. dl->viewport.position.y + dl->viewport.size.height);
  6756. dl->command_list->RSSetScissorRects(1, &scissor);
  6757. }
  6758. uint32_t RenderingDeviceD3D12::draw_list_get_current_pass() {
  6759. return draw_list_current_subpass;
  6760. }
  6761. void RenderingDeviceD3D12::_draw_list_subpass_begin() { // [[MANUAL_SUBPASSES]]
  6762. const FramebufferFormat &fb_format = framebuffer_formats[draw_list_framebuffer->format_id];
  6763. const FramebufferPass &pass = fb_format.passes[draw_list_current_subpass];
  6764. ID3D12GraphicsCommandList *command_list = frames[frame].draw_command_list.Get();
  6765. bool is_screen = draw_list_framebuffer->window_id != DisplayServer::INVALID_WINDOW_ID;
  6766. if (is_screen) {
  6767. DEV_ASSERT(!draw_list_framebuffer->dsv_heap.get_descriptor_count());
  6768. command_list->OMSetRenderTargets(1, &draw_list_framebuffer->screen_rtv_handle, true, nullptr);
  6769. } else {
  6770. D3D12_CPU_DESCRIPTOR_HANDLE *rtv_handles = (D3D12_CPU_DESCRIPTOR_HANDLE *)alloca(sizeof(D3D12_CPU_DESCRIPTOR_HANDLE) * pass.color_attachments.size());
  6771. DescriptorsHeap::Walker rtv_heap_walker = draw_list_framebuffer->rtv_heap.make_walker();
  6772. for (int i = 0; i < pass.color_attachments.size(); i++) {
  6773. uint32_t attachment = pass.color_attachments[i];
  6774. if (attachment == FramebufferPass::ATTACHMENT_UNUSED) {
  6775. if (!frames[frame].null_rtv_handle.ptr) {
  6776. // No null descriptor-handle created for this frame yet.
  6777. if (frames[frame].desc_heap_walkers.rtv.is_at_eof()) {
  6778. if (!frames[frame].desc_heaps_exhausted_reported.rtv) {
  6779. frames[frame].desc_heaps_exhausted_reported.rtv = true;
  6780. ERR_FAIL_MSG("Cannot begin subpass because there's no enough room in current frame's RENDER TARGET descriptors heap.\n"
  6781. "Please increase the value of the rendering/rendering_device/d3d12/max_misc_descriptors_per_frame project setting.");
  6782. } else {
  6783. return;
  6784. }
  6785. }
  6786. D3D12_RENDER_TARGET_VIEW_DESC rtv_desc_null = {};
  6787. rtv_desc_null.Format = DXGI_FORMAT_R8_UINT;
  6788. rtv_desc_null.ViewDimension = D3D12_RTV_DIMENSION_TEXTURE2D;
  6789. frames[frame].null_rtv_handle = frames[frame].desc_heap_walkers.rtv.get_curr_cpu_handle();
  6790. device->CreateRenderTargetView(nullptr, &rtv_desc_null, frames[frame].null_rtv_handle);
  6791. frames[frame].desc_heap_walkers.rtv.advance();
  6792. }
  6793. rtv_handles[i] = frames[frame].null_rtv_handle;
  6794. } else {
  6795. uint32_t rt_index = draw_list_framebuffer->attachments_handle_inds[attachment];
  6796. rtv_heap_walker.rewind();
  6797. rtv_heap_walker.advance(rt_index);
  6798. rtv_handles[i] = rtv_heap_walker.get_curr_cpu_handle();
  6799. }
  6800. }
  6801. D3D12_CPU_DESCRIPTOR_HANDLE dsv_handle = {};
  6802. {
  6803. DescriptorsHeap::Walker dsv_heap_walker = draw_list_framebuffer->dsv_heap.make_walker();
  6804. if (pass.depth_attachment != FramebufferPass::ATTACHMENT_UNUSED) {
  6805. uint32_t ds_index = draw_list_framebuffer->attachments_handle_inds[pass.depth_attachment];
  6806. dsv_heap_walker.rewind();
  6807. dsv_heap_walker.advance(ds_index);
  6808. dsv_handle = dsv_heap_walker.get_curr_cpu_handle();
  6809. }
  6810. }
  6811. command_list->OMSetRenderTargets(pass.color_attachments.size(), rtv_handles, false, dsv_handle.ptr ? &dsv_handle : nullptr);
  6812. // [[VRS_EVERY_SUBPASS_OR_NONE]]
  6813. if (context->get_vrs_capabilities().ss_image_supported && draw_list_current_subpass == 0) {
  6814. if (execution_index != vrs_state_execution_index) {
  6815. vrs_state = {};
  6816. }
  6817. Texture *vrs_texture = nullptr;
  6818. RID vrs_texture_id;
  6819. if (pass.vrs_attachment != FramebufferPass::ATTACHMENT_UNUSED) {
  6820. vrs_texture_id = draw_list_framebuffer->texture_ids[pass.vrs_attachment];
  6821. vrs_texture = texture_owner.get_or_null(vrs_texture_id);
  6822. if (!vrs_texture) {
  6823. vrs_texture_id = RID();
  6824. }
  6825. }
  6826. if (vrs_texture_id != vrs_state.texture_bound) {
  6827. ID3D12GraphicsCommandList5 *command_list_5 = nullptr;
  6828. command_list->QueryInterface<ID3D12GraphicsCommandList5>(&command_list_5);
  6829. DEV_ASSERT(command_list_5);
  6830. if (vrs_texture_id.is_valid()) {
  6831. if (!vrs_state.configured) {
  6832. static const D3D12_SHADING_RATE_COMBINER combiners[D3D12_RS_SET_SHADING_RATE_COMBINER_COUNT] = {
  6833. D3D12_SHADING_RATE_COMBINER_PASSTHROUGH,
  6834. D3D12_SHADING_RATE_COMBINER_OVERRIDE,
  6835. };
  6836. command_list_5->RSSetShadingRate(D3D12_SHADING_RATE_1X1, combiners);
  6837. vrs_state.configured = true;
  6838. command_list_5->RSSetShadingRateImage(vrs_texture->resource);
  6839. vrs_state.texture_bound = vrs_texture_id;
  6840. }
  6841. } else {
  6842. command_list_5->RSSetShadingRateImage(nullptr);
  6843. vrs_state.texture_bound = RID();
  6844. }
  6845. command_list_5->Release();
  6846. }
  6847. vrs_state_execution_index = execution_index;
  6848. }
  6849. }
  6850. }
  6851. void RenderingDeviceD3D12::_draw_list_subpass_end() { // [[MANUAL_SUBPASSES]]
  6852. const FramebufferFormat &fb_format = framebuffer_formats[draw_list_framebuffer->format_id];
  6853. const FramebufferPass &pass = fb_format.passes[draw_list_current_subpass];
  6854. ID3D12GraphicsCommandList *command_list = frames[frame].draw_command_list.Get();
  6855. struct Resolve {
  6856. ID3D12Resource *src_res;
  6857. uint32_t src_subres;
  6858. ID3D12Resource *dst_res;
  6859. uint32_t dst_subres;
  6860. DXGI_FORMAT format;
  6861. };
  6862. Resolve *resolves = (Resolve *)alloca(sizeof(Resolve) * pass.resolve_attachments.size());
  6863. uint32_t num_resolves = 0;
  6864. for (int i = 0; i < pass.resolve_attachments.size(); i++) {
  6865. int32_t color_index = pass.color_attachments[i];
  6866. int32_t resolve_index = pass.resolve_attachments[i];
  6867. DEV_ASSERT((color_index == FramebufferPass::ATTACHMENT_UNUSED) == (resolve_index == FramebufferPass::ATTACHMENT_UNUSED));
  6868. if (color_index == FramebufferPass::ATTACHMENT_UNUSED || draw_list_framebuffer->texture_ids[color_index].is_null()) {
  6869. continue;
  6870. }
  6871. Texture *src_tex = texture_owner.get_or_null(draw_list_framebuffer->texture_ids[color_index]);
  6872. uint32_t src_subresource = D3D12CalcSubresource(src_tex->base_mipmap, src_tex->base_layer, 0, src_tex->owner_mipmaps, src_tex->owner_layers);
  6873. _resource_transition_batch(src_tex, src_subresource, src_tex->planes, D3D12_RESOURCE_STATE_RESOLVE_SOURCE);
  6874. Texture *dst_tex = texture_owner.get_or_null(draw_list_framebuffer->texture_ids[resolve_index]);
  6875. uint32_t dst_subresource = D3D12CalcSubresource(dst_tex->base_mipmap, dst_tex->base_layer, 0, dst_tex->owner_mipmaps, dst_tex->owner_layers);
  6876. _resource_transition_batch(dst_tex, dst_subresource, dst_tex->planes, D3D12_RESOURCE_STATE_RESOLVE_DEST);
  6877. resolves[num_resolves].src_res = src_tex->resource;
  6878. resolves[num_resolves].src_subres = src_subresource;
  6879. resolves[num_resolves].dst_res = dst_tex->resource;
  6880. resolves[num_resolves].dst_subres = dst_subresource;
  6881. resolves[num_resolves].format = d3d12_formats[src_tex->format].general_format;
  6882. num_resolves++;
  6883. }
  6884. _resource_transitions_flush(command_list);
  6885. for (uint32_t i = 0; i < num_resolves; i++) {
  6886. command_list->ResolveSubresource(resolves[i].dst_res, resolves[i].dst_subres, resolves[i].src_res, resolves[i].src_subres, resolves[i].format);
  6887. }
  6888. }
  6889. RenderingDevice::DrawListID RenderingDeviceD3D12::draw_list_switch_to_next_pass() {
  6890. _THREAD_SAFE_METHOD_
  6891. ERR_FAIL_COND_V(draw_list == nullptr, INVALID_ID);
  6892. ERR_FAIL_COND_V(draw_list_current_subpass >= draw_list_subpass_count - 1, INVALID_FORMAT_ID);
  6893. _draw_list_subpass_end();
  6894. draw_list_current_subpass++;
  6895. _draw_list_subpass_begin();
  6896. Rect2i viewport;
  6897. _draw_list_free(&viewport);
  6898. _draw_list_allocate(viewport, 0, draw_list_current_subpass);
  6899. return int64_t(ID_TYPE_DRAW_LIST) << ID_BASE_SHIFT;
  6900. }
  6901. Error RenderingDeviceD3D12::draw_list_switch_to_next_pass_split(uint32_t p_splits, DrawListID *r_split_ids) {
  6902. _THREAD_SAFE_METHOD_
  6903. ERR_FAIL_COND_V(draw_list == nullptr, ERR_INVALID_PARAMETER);
  6904. ERR_FAIL_COND_V(draw_list_current_subpass >= draw_list_subpass_count - 1, ERR_INVALID_PARAMETER);
  6905. _draw_list_subpass_end();
  6906. draw_list_current_subpass++;
  6907. _draw_list_subpass_begin();
  6908. Rect2i viewport;
  6909. _draw_list_free(&viewport);
  6910. _draw_list_allocate(viewport, p_splits, draw_list_current_subpass);
  6911. for (uint32_t i = 0; i < p_splits; i++) {
  6912. r_split_ids[i] = (int64_t(ID_TYPE_SPLIT_DRAW_LIST) << ID_BASE_SHIFT) + i;
  6913. }
  6914. return OK;
  6915. }
  6916. Error RenderingDeviceD3D12::_draw_list_allocate(const Rect2i &p_viewport, uint32_t p_splits, uint32_t p_subpass) {
  6917. if (p_splits == 0) {
  6918. draw_list = memnew(DrawList);
  6919. draw_list->command_list = frames[frame].draw_command_list.Get();
  6920. draw_list->viewport = p_viewport;
  6921. draw_list_count = 0;
  6922. draw_list_split = false;
  6923. } else {
  6924. if (p_splits > (uint32_t)split_draw_list_allocators.size()) {
  6925. uint32_t from = split_draw_list_allocators.size();
  6926. split_draw_list_allocators.resize(p_splits);
  6927. for (uint32_t i = from; i < p_splits; i++) {
  6928. HRESULT res = device->CreateCommandAllocator(D3D12_COMMAND_LIST_TYPE_BUNDLE, IID_PPV_ARGS(&split_draw_list_allocators.write[i].command_allocator));
  6929. ERR_FAIL_COND_V_MSG(res, ERR_CANT_CREATE, "CreateCommandAllocator failed with error " + vformat("0x%08ux", res) + ".");
  6930. for (int j = 0; j < frame_count; j++) {
  6931. ID3D12GraphicsCommandList *command_list = nullptr;
  6932. res = device->CreateCommandList(0, D3D12_COMMAND_LIST_TYPE_BUNDLE, split_draw_list_allocators[i].command_allocator, nullptr, IID_PPV_ARGS(&command_list));
  6933. ERR_FAIL_COND_V_MSG(res, ERR_CANT_CREATE, "CreateCommandList failed with error " + vformat("0x%08ux", res) + ".");
  6934. split_draw_list_allocators.write[i].command_lists.push_back(command_list);
  6935. }
  6936. }
  6937. }
  6938. draw_list = memnew_arr(DrawList, p_splits);
  6939. draw_list_count = p_splits;
  6940. draw_list_split = true;
  6941. for (uint32_t i = 0; i < p_splits; i++) {
  6942. ID3D12GraphicsCommandList *command_list = split_draw_list_allocators[i].command_lists[frame];
  6943. HRESULT res = frames[frame].setup_command_allocator->Reset();
  6944. ERR_FAIL_COND_V_MSG(ERR_CANT_CREATE, ERR_CANT_CREATE, "Command allocator Reset failed with error " + vformat("0x%08ux", res) + ".");
  6945. res = command_list->Reset(split_draw_list_allocators[i].command_allocator, nullptr);
  6946. if (res) {
  6947. memdelete_arr(draw_list);
  6948. draw_list = nullptr;
  6949. ERR_FAIL_V_MSG(ERR_CANT_CREATE, "Command allocator Reset failed with error " + vformat("0x%08ux", res) + ".");
  6950. }
  6951. draw_list[i].command_list = command_list;
  6952. draw_list[i].viewport = p_viewport;
  6953. }
  6954. }
  6955. return OK;
  6956. }
  6957. void RenderingDeviceD3D12::_draw_list_free(Rect2i *r_last_viewport) {
  6958. if (draw_list_split) {
  6959. // Send all command buffers.
  6960. for (uint32_t i = 0; i < draw_list_count; i++) {
  6961. draw_list[i].command_list->Close();
  6962. frames[frame].draw_command_list->ExecuteBundle(draw_list[i].command_list);
  6963. if (r_last_viewport) {
  6964. if (i == 0 || draw_list[i].viewport_set) {
  6965. *r_last_viewport = draw_list[i].viewport;
  6966. }
  6967. }
  6968. }
  6969. memdelete_arr(draw_list);
  6970. draw_list = nullptr;
  6971. } else {
  6972. if (r_last_viewport) {
  6973. *r_last_viewport = draw_list->viewport;
  6974. }
  6975. // Just end the list.
  6976. memdelete(draw_list);
  6977. draw_list = nullptr;
  6978. }
  6979. draw_list_count = 0;
  6980. }
  6981. void RenderingDeviceD3D12::draw_list_end(BitField<BarrierMask> p_post_barrier) {
  6982. _THREAD_SAFE_METHOD_
  6983. ERR_FAIL_COND_MSG(!draw_list, "Immediate draw list is already inactive.");
  6984. _draw_list_subpass_end();
  6985. const FramebufferFormat &fb_format = framebuffer_formats[draw_list_framebuffer->format_id];
  6986. bool is_screen = draw_list_framebuffer->window_id != DisplayServer::INVALID_WINDOW_ID;
  6987. ID3D12GraphicsCommandList *command_list = frames[frame].draw_command_list.Get();
  6988. for (int i = 0; i < fb_format.attachments.size(); i++) {
  6989. Texture *texture = nullptr;
  6990. if (!is_screen) {
  6991. texture = texture_owner.get_or_null(draw_list_framebuffer->texture_ids[i]);
  6992. }
  6993. if ((fb_format.attachments[i].usage_flags & TEXTURE_USAGE_COLOR_ATTACHMENT_BIT)) {
  6994. switch (draw_list_final_color_action) {
  6995. case FINAL_ACTION_READ: {
  6996. // Nothing to do now.
  6997. } break;
  6998. case FINAL_ACTION_DISCARD: {
  6999. ID3D12Resource *resource = is_screen ? context->window_get_framebuffer_texture(draw_list_framebuffer->window_id) : texture->resource;
  7000. command_list->DiscardResource(resource, nullptr);
  7001. } break;
  7002. case FINAL_ACTION_CONTINUE: {
  7003. ERR_FAIL_COND(draw_list_unbind_color_textures); // Bug!
  7004. } break;
  7005. }
  7006. } else if ((fb_format.attachments[i].usage_flags & TEXTURE_USAGE_DEPTH_STENCIL_ATTACHMENT_BIT)) {
  7007. ERR_FAIL_COND(is_screen); // Bug!
  7008. switch (draw_list_final_depth_action) {
  7009. case FINAL_ACTION_READ: {
  7010. // Nothing to do now.
  7011. } break;
  7012. case FINAL_ACTION_DISCARD: {
  7013. ID3D12Resource *resource = is_screen ? context->window_get_framebuffer_texture(draw_list_framebuffer->window_id) : texture->resource;
  7014. command_list->DiscardResource(resource, nullptr);
  7015. } break;
  7016. case FINAL_ACTION_CONTINUE: {
  7017. ERR_FAIL_COND(draw_list_unbind_depth_textures); // Bug!
  7018. } break;
  7019. }
  7020. }
  7021. }
  7022. draw_list_subpass_count = 0;
  7023. draw_list_current_subpass = 0;
  7024. draw_list_framebuffer = nullptr;
  7025. _draw_list_free();
  7026. for (int i = 0; i < draw_list_bound_textures.size(); i++) {
  7027. Texture *texture = texture_owner.get_or_null(draw_list_bound_textures[i]);
  7028. ERR_CONTINUE(!texture); // Wtf.
  7029. if (draw_list_unbind_color_textures && (texture->usage_flags & TEXTURE_USAGE_COLOR_ATTACHMENT_BIT)) {
  7030. texture->bound = false;
  7031. }
  7032. if (draw_list_unbind_depth_textures && (texture->usage_flags & TEXTURE_USAGE_DEPTH_STENCIL_ATTACHMENT_BIT)) {
  7033. texture->bound = false;
  7034. }
  7035. }
  7036. draw_list_bound_textures.clear();
  7037. }
  7038. /***********************/
  7039. /**** COMPUTE LISTS ****/
  7040. /***********************/
  7041. RenderingDevice::ComputeListID RenderingDeviceD3D12::compute_list_begin(bool p_allow_draw_overlap) {
  7042. _THREAD_SAFE_METHOD_
  7043. ERR_FAIL_COND_V_MSG(!p_allow_draw_overlap && draw_list != nullptr, INVALID_ID, "Only one draw list can be active at the same time.");
  7044. ERR_FAIL_COND_V_MSG(compute_list != nullptr, INVALID_ID, "Only one draw/compute list can be active at the same time.");
  7045. compute_list = memnew(ComputeList);
  7046. compute_list->command_list = frames[frame].draw_command_list.Get();
  7047. compute_list->state.allow_draw_overlap = p_allow_draw_overlap;
  7048. return ID_TYPE_COMPUTE_LIST;
  7049. }
  7050. void RenderingDeviceD3D12::compute_list_bind_compute_pipeline(ComputeListID p_list, RID p_compute_pipeline) {
  7051. // Must be called within a compute list, the class mutex is locked during that time
  7052. ERR_FAIL_COND(p_list != ID_TYPE_COMPUTE_LIST);
  7053. ERR_FAIL_NULL(compute_list);
  7054. ComputeList *cl = compute_list;
  7055. const ComputePipeline *pipeline = compute_pipeline_owner.get_or_null(p_compute_pipeline);
  7056. ERR_FAIL_NULL(pipeline);
  7057. if (p_compute_pipeline == cl->state.pipeline) {
  7058. return; // Redundant state, return.
  7059. }
  7060. cl->state.pipeline = p_compute_pipeline;
  7061. cl->state.pso = pipeline->pso.Get();
  7062. Shader *shader = shader_owner.get_or_null(pipeline->shader);
  7063. if (cl->state.pipeline_shader != pipeline->shader) {
  7064. if (cl->state.root_signature_crc != pipeline->root_signature_crc) {
  7065. cl->command_list->SetComputeRootSignature(shader->root_signature.Get());
  7066. cl->state.root_signature_crc = pipeline->root_signature_crc;
  7067. // Root signature changed, so current descriptor set bindings become invalid.
  7068. for (uint32_t i = 0; i < cl->state.set_count; i++) {
  7069. cl->state.sets[i].bound = false;
  7070. }
  7071. }
  7072. const uint32_t *pformats = pipeline->set_formats.ptr(); // Pipeline set formats.
  7073. cl->state.set_count = pipeline->set_formats.size(); // Update set count.
  7074. for (uint32_t i = 0; i < cl->state.set_count; i++) {
  7075. cl->state.sets[i].pipeline_expected_format = pformats[i];
  7076. #ifdef DEV_ENABLED
  7077. cl->state.sets[i]._pipeline_expected_format = pformats[i] ? &uniform_set_format_cache_reverse[pformats[i] - 1]->key().uniform_info : nullptr;
  7078. #endif
  7079. }
  7080. if (pipeline->spirv_push_constant_size) {
  7081. #ifdef DEBUG_ENABLED
  7082. cl->validation.pipeline_push_constant_supplied = false;
  7083. #endif
  7084. }
  7085. cl->state.pipeline_shader = pipeline->shader;
  7086. cl->state.pipeline_dxil_push_constant_size = pipeline->dxil_push_constant_size;
  7087. cl->state.pipeline_bindings_id = pipeline->bindings_id;
  7088. cl->state.local_group_size[0] = pipeline->local_group_size[0];
  7089. cl->state.local_group_size[1] = pipeline->local_group_size[1];
  7090. cl->state.local_group_size[2] = pipeline->local_group_size[2];
  7091. #ifdef DEV_ENABLED
  7092. cl->state._shader = shader;
  7093. #endif
  7094. }
  7095. #ifdef DEBUG_ENABLED
  7096. // Update compute pass pipeline info.
  7097. cl->validation.pipeline_active = true;
  7098. cl->validation.pipeline_spirv_push_constant_size = pipeline->spirv_push_constant_size;
  7099. #endif
  7100. }
  7101. void RenderingDeviceD3D12::compute_list_bind_uniform_set(ComputeListID p_list, RID p_uniform_set, uint32_t p_index) {
  7102. // Must be called within a compute list, the class mutex is locked during that time
  7103. ERR_FAIL_COND(p_list != ID_TYPE_COMPUTE_LIST);
  7104. ERR_FAIL_NULL(compute_list);
  7105. ComputeList *cl = compute_list;
  7106. #ifdef DEBUG_ENABLED
  7107. ERR_FAIL_COND_MSG(!cl->validation.active, "Submitted Compute Lists can no longer be modified.");
  7108. #endif
  7109. UniformSet *uniform_set = uniform_set_owner.get_or_null(p_uniform_set);
  7110. ERR_FAIL_NULL(uniform_set);
  7111. if (p_index > cl->state.set_count) {
  7112. cl->state.set_count = p_index;
  7113. }
  7114. cl->state.sets[p_index].bound = false; // Needs rebind.
  7115. cl->state.sets[p_index].uniform_set_format = uniform_set->format;
  7116. cl->state.sets[p_index].uniform_set = p_uniform_set;
  7117. #ifdef DEV_ENABLED
  7118. cl->state.sets[p_index]._uniform_set = uniform_set_owner.get_or_null(p_uniform_set);
  7119. #endif
  7120. }
  7121. void RenderingDeviceD3D12::compute_list_set_push_constant(ComputeListID p_list, const void *p_data, uint32_t p_data_size) {
  7122. ERR_FAIL_COND(p_list != ID_TYPE_COMPUTE_LIST);
  7123. ERR_FAIL_NULL(compute_list);
  7124. ComputeList *cl = compute_list;
  7125. #ifdef DEBUG_ENABLED
  7126. ERR_FAIL_COND_MSG(!cl->validation.active, "Submitted Compute Lists can no longer be modified.");
  7127. #endif
  7128. #ifdef DEBUG_ENABLED
  7129. ERR_FAIL_COND_MSG(p_data_size != cl->validation.pipeline_spirv_push_constant_size,
  7130. "This render pipeline requires (" + itos(cl->validation.pipeline_spirv_push_constant_size) + ") bytes of push constant data, supplied: (" + itos(p_data_size) + ")");
  7131. #endif
  7132. if (cl->state.pipeline_dxil_push_constant_size) {
  7133. cl->command_list->SetComputeRoot32BitConstants(0, p_data_size / sizeof(uint32_t), p_data, 0);
  7134. }
  7135. #ifdef DEBUG_ENABLED
  7136. cl->validation.pipeline_push_constant_supplied = true;
  7137. #endif
  7138. }
  7139. void RenderingDeviceD3D12::compute_list_dispatch(ComputeListID p_list, uint32_t p_x_groups, uint32_t p_y_groups, uint32_t p_z_groups) {
  7140. // Must be called within a compute list, the class mutex is locked during that time
  7141. ERR_FAIL_COND(p_list != ID_TYPE_COMPUTE_LIST);
  7142. ERR_FAIL_NULL(compute_list);
  7143. ComputeList *cl = compute_list;
  7144. #ifdef DEBUG_ENABLED
  7145. ERR_FAIL_COND_MSG(p_x_groups == 0, "Dispatch amount of X compute groups (" + itos(p_x_groups) + ") is zero.");
  7146. ERR_FAIL_COND_MSG(p_z_groups == 0, "Dispatch amount of Z compute groups (" + itos(p_z_groups) + ") is zero.");
  7147. ERR_FAIL_COND_MSG(p_y_groups == 0, "Dispatch amount of Y compute groups (" + itos(p_y_groups) + ") is zero.");
  7148. ERR_FAIL_COND_MSG(p_x_groups > D3D12_CS_DISPATCH_MAX_THREAD_GROUPS_PER_DIMENSION,
  7149. "Dispatch amount of X compute groups (" + itos(p_x_groups) + ") is larger than device limit (" + itos(D3D12_CS_DISPATCH_MAX_THREAD_GROUPS_PER_DIMENSION) + ")");
  7150. ERR_FAIL_COND_MSG(p_y_groups > D3D12_CS_DISPATCH_MAX_THREAD_GROUPS_PER_DIMENSION,
  7151. "Dispatch amount of Y compute groups (" + itos(p_x_groups) + ") is larger than device limit (" + itos(D3D12_CS_DISPATCH_MAX_THREAD_GROUPS_PER_DIMENSION) + ")");
  7152. ERR_FAIL_COND_MSG(p_z_groups > D3D12_CS_DISPATCH_MAX_THREAD_GROUPS_PER_DIMENSION,
  7153. "Dispatch amount of Z compute groups (" + itos(p_x_groups) + ") is larger than device limit (" + itos(D3D12_CS_DISPATCH_MAX_THREAD_GROUPS_PER_DIMENSION) + ")");
  7154. ERR_FAIL_COND_MSG(!cl->validation.active, "Submitted Compute Lists can no longer be modified.");
  7155. #endif
  7156. #ifdef DEBUG_ENABLED
  7157. ERR_FAIL_COND_MSG(!cl->validation.pipeline_active, "No compute pipeline was set before attempting to draw.");
  7158. if (cl->validation.pipeline_spirv_push_constant_size) {
  7159. // Using push constants, check that they were supplied.
  7160. ERR_FAIL_COND_MSG(!cl->validation.pipeline_push_constant_supplied,
  7161. "The shader in this pipeline requires a push constant to be set before drawing, but it's not present.");
  7162. }
  7163. #endif
  7164. // Bind descriptor sets.
  7165. Shader *shader = shader_owner.get_or_null(cl->state.pipeline_shader);
  7166. struct SetToBind {
  7167. uint32_t set;
  7168. UniformSet *uniform_set;
  7169. const Shader::Set *shader_set;
  7170. };
  7171. SetToBind *sets_to_bind = (SetToBind *)alloca(sizeof(SetToBind) * cl->state.set_count);
  7172. uint32_t num_sets_to_bind = 0;
  7173. for (uint32_t i = 0; i < cl->state.set_count; i++) {
  7174. if (cl->state.sets[i].pipeline_expected_format == 0) {
  7175. continue; // Nothing expected by this pipeline.
  7176. }
  7177. #ifdef DEBUG_ENABLED
  7178. if (cl->state.sets[i].pipeline_expected_format != cl->state.sets[i].uniform_set_format) {
  7179. if (cl->state.sets[i].uniform_set_format == 0) {
  7180. ERR_FAIL_MSG("Uniforms were never supplied for set (" + itos(i) + ") at the time of drawing, which are required by the pipeline");
  7181. } else if (uniform_set_owner.owns(cl->state.sets[i].uniform_set)) {
  7182. UniformSet *us = uniform_set_owner.get_or_null(cl->state.sets[i].uniform_set);
  7183. ERR_FAIL_MSG("Uniforms supplied for set (" + itos(i) + "):\n" + _shader_uniform_debug(us->shader_id, us->shader_set) + "\nare not the same format as required by the pipeline shader. Pipeline shader requires the following bindings:\n" + _shader_uniform_debug(cl->state.pipeline_shader));
  7184. } else {
  7185. ERR_FAIL_MSG("Uniforms supplied for set (" + itos(i) + ", which was was just freed) are not the same format as required by the pipeline shader. Pipeline shader requires the following bindings:\n" + _shader_uniform_debug(cl->state.pipeline_shader));
  7186. }
  7187. }
  7188. #endif
  7189. UniformSet *uniform_set = uniform_set_owner.get_or_null(cl->state.sets[i].uniform_set);
  7190. const Shader::Set &shader_set = shader->sets[i];
  7191. _apply_uniform_set_resource_states(uniform_set, shader_set);
  7192. if (!cl->state.sets[i].bound) {
  7193. sets_to_bind[num_sets_to_bind].set = i;
  7194. sets_to_bind[num_sets_to_bind].uniform_set = uniform_set;
  7195. sets_to_bind[num_sets_to_bind].shader_set = &shader_set;
  7196. num_sets_to_bind++;
  7197. cl->state.sets[i].bound = true;
  7198. }
  7199. }
  7200. _resource_transitions_flush(cl->command_list);
  7201. for (uint32_t i = 0; i < num_sets_to_bind; i++) {
  7202. _bind_uniform_set(sets_to_bind[i].uniform_set, *sets_to_bind[i].shader_set, pipeline_bindings[cl->state.pipeline_bindings_id][sets_to_bind[i].set], cl->command_list, true);
  7203. }
  7204. if (cl->state.bound_pso != cl->state.pso) {
  7205. cl->command_list->SetPipelineState(cl->state.pso);
  7206. cl->state.bound_pso = cl->state.pso;
  7207. }
  7208. cl->command_list->Dispatch(p_x_groups, p_y_groups, p_z_groups);
  7209. }
  7210. void RenderingDeviceD3D12::compute_list_dispatch_threads(ComputeListID p_list, uint32_t p_x_threads, uint32_t p_y_threads, uint32_t p_z_threads) {
  7211. ERR_FAIL_COND(p_list != ID_TYPE_COMPUTE_LIST);
  7212. ERR_FAIL_NULL(compute_list);
  7213. #ifdef DEBUG_ENABLED
  7214. ERR_FAIL_COND_MSG(p_x_threads == 0, "Dispatch amount of X compute threads (" + itos(p_x_threads) + ") is zero.");
  7215. ERR_FAIL_COND_MSG(p_y_threads == 0, "Dispatch amount of Y compute threads (" + itos(p_y_threads) + ") is zero.");
  7216. ERR_FAIL_COND_MSG(p_z_threads == 0, "Dispatch amount of Z compute threads (" + itos(p_z_threads) + ") is zero.");
  7217. #endif
  7218. ComputeList *cl = compute_list;
  7219. #ifdef DEBUG_ENABLED
  7220. ERR_FAIL_COND_MSG(!cl->validation.pipeline_active, "No compute pipeline was set before attempting to draw.");
  7221. if (cl->validation.pipeline_spirv_push_constant_size) {
  7222. // Using push constants, check that they were supplied.
  7223. ERR_FAIL_COND_MSG(!cl->validation.pipeline_push_constant_supplied,
  7224. "The shader in this pipeline requires a push constant to be set before drawing, but it's not present.");
  7225. }
  7226. #endif
  7227. compute_list_dispatch(p_list, (p_x_threads - 1) / cl->state.local_group_size[0] + 1, (p_y_threads - 1) / cl->state.local_group_size[1] + 1, (p_z_threads - 1) / cl->state.local_group_size[2] + 1);
  7228. }
  7229. void RenderingDeviceD3D12::compute_list_dispatch_indirect(ComputeListID p_list, RID p_buffer, uint32_t p_offset) {
  7230. ERR_FAIL_COND(p_list != ID_TYPE_COMPUTE_LIST);
  7231. ERR_FAIL_NULL(compute_list);
  7232. ComputeList *cl = compute_list;
  7233. Buffer *buffer = storage_buffer_owner.get_or_null(p_buffer);
  7234. ERR_FAIL_NULL(buffer);
  7235. ERR_FAIL_COND_MSG(!(buffer->usage & D3D12_RESOURCE_STATE_INDIRECT_ARGUMENT), "Buffer provided was not created to do indirect dispatch.");
  7236. ERR_FAIL_COND_MSG(p_offset + 12 > buffer->size, "Offset provided (+12) is past the end of buffer.");
  7237. #ifdef DEBUG_ENABLED
  7238. ERR_FAIL_COND_MSG(!cl->validation.pipeline_active, "No compute pipeline was set before attempting to draw.");
  7239. if (cl->validation.pipeline_spirv_push_constant_size) {
  7240. // Using push constants, check that they were supplied.
  7241. ERR_FAIL_COND_MSG(!cl->validation.pipeline_push_constant_supplied,
  7242. "The shader in this pipeline requires a push constant to be set before drawing, but it's not present.");
  7243. }
  7244. #endif
  7245. // Bind descriptor sets.
  7246. Shader *shader = shader_owner.get_or_null(cl->state.pipeline_shader);
  7247. struct SetToBind {
  7248. uint32_t set;
  7249. UniformSet *uniform_set;
  7250. const Shader::Set *shader_set;
  7251. };
  7252. SetToBind *sets_to_bind = (SetToBind *)alloca(sizeof(SetToBind) * cl->state.set_count);
  7253. uint32_t num_sets_to_bind = 0;
  7254. for (uint32_t i = 0; i < cl->state.set_count; i++) {
  7255. if (cl->state.sets[i].pipeline_expected_format == 0) {
  7256. continue; // Nothing expected by this pipeline.
  7257. }
  7258. #ifdef DEBUG_ENABLED
  7259. if (cl->state.sets[i].pipeline_expected_format != cl->state.sets[i].uniform_set_format) {
  7260. if (cl->state.sets[i].uniform_set_format == 0) {
  7261. ERR_FAIL_MSG("Uniforms were never supplied for set (" + itos(i) + ") at the time of drawing, which are required by the pipeline");
  7262. } else if (uniform_set_owner.owns(cl->state.sets[i].uniform_set)) {
  7263. UniformSet *us = uniform_set_owner.get_or_null(cl->state.sets[i].uniform_set);
  7264. ERR_FAIL_MSG("Uniforms supplied for set (" + itos(i) + "):\n" + _shader_uniform_debug(us->shader_id, us->shader_set) + "\nare not the same format as required by the pipeline shader. Pipeline shader requires the following bindings:\n" + _shader_uniform_debug(cl->state.pipeline_shader));
  7265. } else {
  7266. ERR_FAIL_MSG("Uniforms supplied for set (" + itos(i) + ", which was was just freed) are not the same format as required by the pipeline shader. Pipeline shader requires the following bindings:\n" + _shader_uniform_debug(cl->state.pipeline_shader));
  7267. }
  7268. }
  7269. #endif
  7270. UniformSet *uniform_set = uniform_set_owner.get_or_null(cl->state.sets[i].uniform_set);
  7271. const Shader::Set &shader_set = shader->sets[i];
  7272. _apply_uniform_set_resource_states(uniform_set, shader_set);
  7273. if (!cl->state.sets[i].bound) {
  7274. sets_to_bind[num_sets_to_bind].set = i;
  7275. sets_to_bind[num_sets_to_bind].uniform_set = uniform_set;
  7276. sets_to_bind[num_sets_to_bind].shader_set = &shader_set;
  7277. num_sets_to_bind++;
  7278. cl->state.sets[i].bound = true;
  7279. }
  7280. }
  7281. _resource_transition_batch(buffer, 0, 1, D3D12_RESOURCE_STATE_INDIRECT_ARGUMENT);
  7282. _resource_transitions_flush(cl->command_list);
  7283. for (uint32_t i = 0; i < num_sets_to_bind; i++) {
  7284. _bind_uniform_set(sets_to_bind[i].uniform_set, *sets_to_bind[i].shader_set, pipeline_bindings[cl->state.pipeline_bindings_id][sets_to_bind[i].set], cl->command_list, true);
  7285. }
  7286. if (cl->state.bound_pso != cl->state.pso) {
  7287. cl->command_list->SetPipelineState(cl->state.pso);
  7288. cl->state.bound_pso = cl->state.pso;
  7289. }
  7290. cl->command_list->ExecuteIndirect(indirect_dispatch_cmd_sig.Get(), 1, buffer->resource, p_offset, nullptr, 0);
  7291. }
  7292. void RenderingDeviceD3D12::compute_list_add_barrier(ComputeListID p_list) {
  7293. // Must be called within a compute list, the class mutex is locked during that time
  7294. #ifdef FORCE_FULL_BARRIER
  7295. full_barrier();
  7296. #else
  7297. // Due to D3D12 resource-wise barriers, this is no op.
  7298. #endif
  7299. }
  7300. void RenderingDeviceD3D12::compute_list_end(BitField<BarrierMask> p_post_barrier) {
  7301. ERR_FAIL_NULL(compute_list);
  7302. #ifdef FORCE_FULL_BARRIER
  7303. full_barrier();
  7304. #endif
  7305. memdelete(compute_list);
  7306. compute_list = nullptr;
  7307. }
  7308. void RenderingDeviceD3D12::barrier(BitField<BarrierMask> p_from, BitField<BarrierMask> p_to) {
  7309. // Due to D3D12 resource-wise barriers, this is no op.
  7310. }
  7311. void RenderingDeviceD3D12::full_barrier() {
  7312. #ifndef DEBUG_ENABLED
  7313. ERR_PRINT("Full barrier is debug-only, should not be used in production");
  7314. #endif
  7315. // In the resource barriers world, we can force a full barrier by discarding some resource, as per
  7316. // https://microsoft.github.io/DirectX-Specs/d3d/D3D12EnhancedBarriers.html#synchronous-copy-discard-and-resolve.
  7317. frames[frame].draw_command_list->DiscardResource(texture_owner.get_or_null(aux_resource)->resource, nullptr);
  7318. }
  7319. void RenderingDeviceD3D12::_free_internal(RID p_id) {
  7320. #ifdef DEV_ENABLED
  7321. String resource_name;
  7322. if (resource_names.has(p_id)) {
  7323. resource_name = resource_names[p_id];
  7324. resource_names.erase(p_id);
  7325. }
  7326. #endif
  7327. // Push everything so it's disposed of next time this frame index is processed (means, it's safe to do it).
  7328. if (texture_owner.owns(p_id)) {
  7329. Texture *texture = texture_owner.get_or_null(p_id);
  7330. frames[frame].textures_to_dispose_of.push_back(*texture);
  7331. texture_owner.free(p_id);
  7332. } else if (framebuffer_owner.owns(p_id)) {
  7333. Framebuffer *framebuffer = framebuffer_owner.get_or_null(p_id);
  7334. frames[frame].framebuffers_to_dispose_of.push_back(*framebuffer);
  7335. if (framebuffer->invalidated_callback != nullptr) {
  7336. framebuffer->invalidated_callback(framebuffer->invalidated_callback_userdata);
  7337. }
  7338. framebuffer_owner.free(p_id);
  7339. } else if (sampler_owner.owns(p_id)) {
  7340. sampler_owner.free(p_id);
  7341. } else if (vertex_buffer_owner.owns(p_id)) {
  7342. Buffer *vertex_buffer = vertex_buffer_owner.get_or_null(p_id);
  7343. frames[frame].buffers_to_dispose_of.push_back(*vertex_buffer);
  7344. vertex_buffer_owner.free(p_id);
  7345. } else if (vertex_array_owner.owns(p_id)) {
  7346. vertex_array_owner.free(p_id);
  7347. } else if (index_buffer_owner.owns(p_id)) {
  7348. IndexBuffer *index_buffer = index_buffer_owner.get_or_null(p_id);
  7349. frames[frame].buffers_to_dispose_of.push_back(*index_buffer);
  7350. index_buffer_owner.free(p_id);
  7351. } else if (index_array_owner.owns(p_id)) {
  7352. index_array_owner.free(p_id);
  7353. } else if (shader_owner.owns(p_id)) {
  7354. Shader *shader = shader_owner.get_or_null(p_id);
  7355. frames[frame].shaders_to_dispose_of.push_back(*shader);
  7356. shader_owner.free(p_id);
  7357. } else if (uniform_buffer_owner.owns(p_id)) {
  7358. Buffer *uniform_buffer = uniform_buffer_owner.get_or_null(p_id);
  7359. frames[frame].buffers_to_dispose_of.push_back(*uniform_buffer);
  7360. uniform_buffer_owner.free(p_id);
  7361. } else if (texture_buffer_owner.owns(p_id)) {
  7362. TextureBuffer *texture_buffer = texture_buffer_owner.get_or_null(p_id);
  7363. frames[frame].buffers_to_dispose_of.push_back(texture_buffer->buffer);
  7364. texture_buffer_owner.free(p_id);
  7365. } else if (storage_buffer_owner.owns(p_id)) {
  7366. Buffer *storage_buffer = storage_buffer_owner.get_or_null(p_id);
  7367. frames[frame].buffers_to_dispose_of.push_back(*storage_buffer);
  7368. storage_buffer_owner.free(p_id);
  7369. } else if (uniform_set_owner.owns(p_id)) {
  7370. UniformSet *uniform_set = uniform_set_owner.get_or_null(p_id);
  7371. uniform_set_owner.free(p_id);
  7372. if (uniform_set->invalidated_callback != nullptr) {
  7373. uniform_set->invalidated_callback(uniform_set->invalidated_callback_userdata);
  7374. }
  7375. } else if (render_pipeline_owner.owns(p_id)) {
  7376. RenderPipeline *pipeline = render_pipeline_owner.get_or_null(p_id);
  7377. frames[frame].render_pipelines_to_dispose_of.push_back(*pipeline);
  7378. render_pipeline_owner.free(p_id);
  7379. } else if (compute_pipeline_owner.owns(p_id)) {
  7380. ComputePipeline *pipeline = compute_pipeline_owner.get_or_null(p_id);
  7381. frames[frame].compute_pipelines_to_dispose_of.push_back(*pipeline);
  7382. compute_pipeline_owner.free(p_id);
  7383. } else {
  7384. #ifdef DEV_ENABLED
  7385. ERR_PRINT("Attempted to free invalid ID: " + itos(p_id.get_id()) + " " + resource_name);
  7386. #else
  7387. ERR_PRINT("Attempted to free invalid ID: " + itos(p_id.get_id()));
  7388. #endif
  7389. }
  7390. }
  7391. void RenderingDeviceD3D12::free(RID p_id) {
  7392. _THREAD_SAFE_METHOD_
  7393. _free_dependencies(p_id); // Recursively erase dependencies first, to avoid potential API problems.
  7394. _free_internal(p_id);
  7395. }
  7396. void RenderingDeviceD3D12::set_resource_name(RID p_id, const String p_name) {
  7397. if (texture_owner.owns(p_id)) {
  7398. Texture *texture = texture_owner.get_or_null(p_id);
  7399. // Don't set the source texture's name when calling on a texture view.
  7400. if (texture->owner.is_null()) {
  7401. context->set_object_name(texture->resource, p_name);
  7402. }
  7403. } else if (framebuffer_owner.owns(p_id)) {
  7404. // No D3D12 object to name.
  7405. } else if (sampler_owner.owns(p_id)) {
  7406. // No D3D12 object to name.
  7407. } else if (shader_owner.owns(p_id)) {
  7408. Shader *shader = shader_owner.get_or_null(p_id);
  7409. context->set_object_name(shader->root_signature.Get(), p_name + " Root Signature");
  7410. } else if (uniform_set_owner.owns(p_id)) {
  7411. // No D3D12 object to name.
  7412. } else if (render_pipeline_owner.owns(p_id)) {
  7413. RenderPipeline *pipeline = render_pipeline_owner.get_or_null(p_id);
  7414. context->set_object_name(pipeline->pso.Get(), p_name);
  7415. } else if (compute_pipeline_owner.owns(p_id)) {
  7416. ComputePipeline *pipeline = compute_pipeline_owner.get_or_null(p_id);
  7417. context->set_object_name(pipeline->pso.Get(), p_name);
  7418. } else {
  7419. Buffer *buffer = _get_buffer_from_owner(p_id);
  7420. if (buffer) {
  7421. context->set_object_name(buffer->resource, p_name);
  7422. } else {
  7423. ERR_PRINT("Attempted to name invalid ID: " + itos(p_id.get_id()));
  7424. return;
  7425. }
  7426. }
  7427. #ifdef DEV_ENABLED
  7428. resource_names[p_id] = p_name;
  7429. #endif
  7430. }
  7431. void RenderingDeviceD3D12::draw_command_begin_label(String p_label_name, const Color p_color) {
  7432. _THREAD_SAFE_METHOD_
  7433. context->command_begin_label(frames[frame].draw_command_list.Get(), p_label_name, p_color);
  7434. }
  7435. void RenderingDeviceD3D12::draw_command_insert_label(String p_label_name, const Color p_color) {
  7436. _THREAD_SAFE_METHOD_
  7437. context->command_insert_label(frames[frame].draw_command_list.Get(), p_label_name, p_color);
  7438. }
  7439. void RenderingDeviceD3D12::draw_command_end_label() {
  7440. _THREAD_SAFE_METHOD_
  7441. context->command_end_label(frames[frame].draw_command_list.Get());
  7442. }
  7443. String RenderingDeviceD3D12::get_device_vendor_name() const {
  7444. return context->get_device_vendor_name();
  7445. }
  7446. String RenderingDeviceD3D12::get_device_name() const {
  7447. return context->get_device_name();
  7448. }
  7449. RenderingDevice::DeviceType RenderingDeviceD3D12::get_device_type() const {
  7450. return context->get_device_type();
  7451. }
  7452. String RenderingDeviceD3D12::get_device_api_version() const {
  7453. return context->get_device_api_version();
  7454. }
  7455. String RenderingDeviceD3D12::get_device_pipeline_cache_uuid() const {
  7456. return context->get_device_pipeline_cache_uuid();
  7457. }
  7458. void RenderingDeviceD3D12::_finalize_command_bufers() {
  7459. if (draw_list) {
  7460. ERR_PRINT("Found open draw list at the end of the frame, this should never happen (further drawing will likely not work).");
  7461. }
  7462. if (compute_list) {
  7463. ERR_PRINT("Found open compute list at the end of the frame, this should never happen (further compute will likely not work).");
  7464. }
  7465. { // Complete the setup buffer (that needs to be processed before anything else).
  7466. frames[frame].setup_command_list->Close();
  7467. frames[frame].draw_command_list->Close();
  7468. }
  7469. }
  7470. void RenderingDeviceD3D12::_begin_frame() {
  7471. // Erase pending resources.
  7472. _free_pending_resources(frame);
  7473. HRESULT res = frames[frame].setup_command_allocator->Reset();
  7474. ERR_FAIL_COND_MSG(res, "Command allocator Reset failed with error " + vformat("0x%08ux", res) + ".");
  7475. res = frames[frame].setup_command_list->Reset(frames[frame].setup_command_allocator.Get(), nullptr);
  7476. ERR_FAIL_COND_MSG(res, "Command list Reset failed with error " + vformat("0x%08ux", res) + ".");
  7477. res = frames[frame].draw_command_allocator->Reset();
  7478. ERR_FAIL_COND_MSG(res, "Command allocator Reset failed with error " + vformat("0x%08ux", res) + ".");
  7479. res = frames[frame].draw_command_list->Reset(frames[frame].draw_command_allocator.Get(), nullptr);
  7480. ERR_FAIL_COND_MSG(res, "Command list Reset failed with error " + vformat("0x%08ux", res) + ".");
  7481. ID3D12DescriptorHeap *heaps[] = {
  7482. frames[frame].desc_heaps.resources.get_heap(),
  7483. frames[frame].desc_heaps.samplers.get_heap(),
  7484. };
  7485. frames[frame].draw_command_list->SetDescriptorHeaps(2, heaps);
  7486. frames[frame].desc_heap_walkers.resources.rewind();
  7487. frames[frame].desc_heap_walkers.samplers.rewind();
  7488. frames[frame].desc_heap_walkers.aux.rewind();
  7489. frames[frame].desc_heap_walkers.rtv.rewind();
  7490. frames[frame].desc_heaps_exhausted_reported = {};
  7491. frames[frame].null_rtv_handle = {};
  7492. #ifdef DEBUG_COUNT_BARRIERS
  7493. print_verbose(vformat("Last frame: %d barriers (%d batches); %.1f ms", frame_barriers_count, frame_barriers_batches_count, frame_barriers_cpu_time * 0.001f));
  7494. frame_barriers_count = 0;
  7495. frame_barriers_batches_count = 0;
  7496. frame_barriers_cpu_time = 0;
  7497. #endif
  7498. if (local_device.is_null()) {
  7499. context->append_command_list(frames[frame].draw_command_list.Get());
  7500. context->set_setup_list(frames[frame].setup_command_list.Get()); // Append now so it's added before everything else.
  7501. }
  7502. // Advance current frame.
  7503. frames_drawn++;
  7504. // Advance staging buffer if used.
  7505. if (staging_buffer_used) {
  7506. staging_buffer_current = (staging_buffer_current + 1) % staging_buffer_blocks.size();
  7507. staging_buffer_used = false;
  7508. }
  7509. context->get_allocator()->SetCurrentFrameIndex(Engine::get_singleton()->get_frames_drawn());
  7510. if (frames[frame].timestamp_count) {
  7511. frames[frame].setup_command_list->ResolveQueryData(frames[frame].timestamp_heap.Get(), D3D12_QUERY_TYPE_TIMESTAMP, 0, frames[frame].timestamp_count, frames[frame].timestamp_result_values_buffer.resource, 0);
  7512. uint64_t *gpu_timestamps = nullptr;
  7513. res = frames[frame].timestamp_result_values_buffer.resource->Map(0, nullptr, (void **)&gpu_timestamps);
  7514. if (SUCCEEDED(res)) {
  7515. memcpy(frames[frame].timestamp_result_values.ptr(), gpu_timestamps, sizeof(uint64_t) * frames[frame].timestamp_count);
  7516. frames[frame].timestamp_result_values_buffer.resource->Unmap(0, nullptr);
  7517. }
  7518. SWAP(frames[frame].timestamp_names, frames[frame].timestamp_result_names);
  7519. SWAP(frames[frame].timestamp_cpu_values, frames[frame].timestamp_cpu_result_values);
  7520. }
  7521. frames[frame].timestamp_result_count = frames[frame].timestamp_count;
  7522. frames[frame].timestamp_count = 0;
  7523. frames[frame].index = Engine::get_singleton()->get_frames_drawn();
  7524. frames[frame].execution_index = execution_index;
  7525. #ifdef DEV_ENABLED
  7526. frames[frame].uniform_set_reused = 0;
  7527. #endif
  7528. }
  7529. void RenderingDeviceD3D12::swap_buffers() {
  7530. ERR_FAIL_COND_MSG(local_device.is_valid(), "Local devices can't swap buffers.");
  7531. _THREAD_SAFE_METHOD_
  7532. context->postpare_buffers(frames[frame].draw_command_list.Get());
  7533. screen_prepared = false;
  7534. _finalize_command_bufers();
  7535. context->swap_buffers();
  7536. execution_index++;
  7537. frame = (frame + 1) % frame_count;
  7538. _begin_frame();
  7539. }
  7540. void RenderingDeviceD3D12::submit() {
  7541. _THREAD_SAFE_METHOD_
  7542. ERR_FAIL_COND_MSG(local_device.is_null(), "Only local devices can submit and sync.");
  7543. ERR_FAIL_COND_MSG(local_device_processing, "device already submitted, call sync to wait until done.");
  7544. _finalize_command_bufers();
  7545. ID3D12CommandList *command_lists[2] = { frames[frame].setup_command_list.Get(), frames[frame].draw_command_list.Get() };
  7546. context->local_device_push_command_lists(local_device, command_lists, 2);
  7547. execution_index++;
  7548. local_device_processing = true;
  7549. }
  7550. void RenderingDeviceD3D12::sync() {
  7551. _THREAD_SAFE_METHOD_
  7552. ERR_FAIL_COND_MSG(local_device.is_null(), "Only local devices can submit and sync.");
  7553. ERR_FAIL_COND_MSG(!local_device_processing, "sync can only be called after a submit");
  7554. context->local_device_sync(local_device);
  7555. _begin_frame();
  7556. local_device_processing = false;
  7557. }
  7558. #ifdef USE_SMALL_ALLOCS_POOL
  7559. D3D12MA::Pool *RenderingDeviceD3D12::_find_or_create_small_allocs_pool(D3D12_HEAP_TYPE p_heap_type, D3D12_HEAP_FLAGS p_heap_flags) {
  7560. D3D12_HEAP_FLAGS effective_heap_flags = p_heap_flags;
  7561. if (context->get_allocator()->GetD3D12Options().ResourceHeapTier != D3D12_RESOURCE_HEAP_TIER_1) {
  7562. // Heap tier 2 allows mixing resource types liberally.
  7563. effective_heap_flags &= ~(D3D12_HEAP_FLAG_ALLOW_ONLY_BUFFERS | D3D12_HEAP_FLAG_ALLOW_ONLY_NON_RT_DS_TEXTURES | D3D12_HEAP_FLAG_ALLOW_ONLY_RT_DS_TEXTURES);
  7564. }
  7565. AllocPoolKey pool_key;
  7566. pool_key.heap_type = p_heap_type;
  7567. pool_key.heap_flags = effective_heap_flags;
  7568. if (small_allocs_pools.has(pool_key.key)) {
  7569. return small_allocs_pools[pool_key.key].Get();
  7570. }
  7571. #ifdef DEV_ENABLED
  7572. print_verbose("Creating D3D12MA small objects pool for heap type " + itos(p_heap_type) + " and heap flags " + itos(p_heap_flags));
  7573. #endif
  7574. D3D12MA::POOL_DESC poolDesc = {};
  7575. poolDesc.HeapProperties.Type = p_heap_type;
  7576. poolDesc.HeapFlags = effective_heap_flags;
  7577. ComPtr<D3D12MA::Pool> pool;
  7578. HRESULT res = context->get_allocator()->CreatePool(&poolDesc, pool.GetAddressOf());
  7579. small_allocs_pools[pool_key.key] = pool; // Don't try to create it again if failed the first time.
  7580. ERR_FAIL_COND_V_MSG(res, nullptr, "CreatePool failed with error " + vformat("0x%08ux", res) + ".");
  7581. return pool.Get();
  7582. }
  7583. #endif
  7584. void RenderingDeviceD3D12::_free_pending_resources(int p_frame) {
  7585. // Free in dependency usage order, so nothing weird happens.
  7586. // Pipelines.
  7587. while (frames[p_frame].render_pipelines_to_dispose_of.front()) {
  7588. RenderPipeline *rp = &frames[p_frame].render_pipelines_to_dispose_of.front()->get();
  7589. pipeline_bindings.erase(rp->bindings_id);
  7590. frames[p_frame].render_pipelines_to_dispose_of.pop_front();
  7591. }
  7592. while (frames[p_frame].compute_pipelines_to_dispose_of.front()) {
  7593. ComputePipeline *cp = &frames[p_frame].compute_pipelines_to_dispose_of.front()->get();
  7594. pipeline_bindings.erase(cp->bindings_id);
  7595. frames[p_frame].compute_pipelines_to_dispose_of.pop_front();
  7596. }
  7597. // Shaders.
  7598. frames[p_frame].shaders_to_dispose_of.clear();
  7599. // Framebuffers.
  7600. frames[p_frame].framebuffers_to_dispose_of.clear();
  7601. // Textures.
  7602. while (frames[p_frame].textures_to_dispose_of.front()) {
  7603. Texture *texture = &frames[p_frame].textures_to_dispose_of.front()->get();
  7604. if (texture->bound) {
  7605. WARN_PRINT("Deleted a texture while it was bound.");
  7606. }
  7607. if (texture->owner.is_null()) {
  7608. // Actually owns the image and the allocation too.
  7609. image_memory -= texture->allocation->GetSize();
  7610. for (uint32_t i = 0; i < texture->aliases.size(); i++) {
  7611. if (texture->aliases[i]) {
  7612. texture->aliases[i]->Release();
  7613. }
  7614. }
  7615. texture->resource->Release();
  7616. texture->resource = nullptr;
  7617. texture->allocation->Release();
  7618. texture->allocation = nullptr;
  7619. }
  7620. frames[p_frame].textures_to_dispose_of.pop_front();
  7621. }
  7622. // Buffers.
  7623. while (frames[p_frame].buffers_to_dispose_of.front()) {
  7624. _buffer_free(&frames[p_frame].buffers_to_dispose_of.front()->get());
  7625. frames[p_frame].buffers_to_dispose_of.pop_front();
  7626. }
  7627. }
  7628. void RenderingDeviceD3D12::prepare_screen_for_drawing() {
  7629. _THREAD_SAFE_METHOD_
  7630. context->prepare_buffers(frames[frame].draw_command_list.Get());
  7631. screen_prepared = true;
  7632. }
  7633. uint32_t RenderingDeviceD3D12::get_frame_delay() const {
  7634. return frame_count;
  7635. }
  7636. uint64_t RenderingDeviceD3D12::get_memory_usage(MemoryType p_type) const {
  7637. if (p_type == MEMORY_BUFFERS) {
  7638. return buffer_memory;
  7639. } else if (p_type == MEMORY_TEXTURES) {
  7640. return image_memory;
  7641. } else {
  7642. D3D12MA::TotalStatistics stats;
  7643. context->get_allocator()->CalculateStatistics(&stats);
  7644. return stats.Total.Stats.BlockBytes;
  7645. }
  7646. }
  7647. void RenderingDeviceD3D12::_flush(bool p_flush_current_frame) {
  7648. if (local_device.is_valid() && !p_flush_current_frame) {
  7649. return; // Flushing previous frames has no effect with local device.
  7650. }
  7651. if (p_flush_current_frame) {
  7652. frames[frame].setup_command_list->Close();
  7653. frames[frame].draw_command_list->Close();
  7654. }
  7655. if (local_device.is_valid()) {
  7656. ID3D12CommandList *command_lists[2] = { frames[frame].setup_command_list.Get(), frames[frame].draw_command_list.Get() };
  7657. context->local_device_push_command_lists(local_device, command_lists, 2);
  7658. execution_index++;
  7659. context->local_device_sync(local_device);
  7660. HRESULT res = frames[frame].setup_command_allocator->Reset();
  7661. ERR_FAIL_COND_MSG(res, "Command allocator Reset failed with error " + vformat("0x%08ux", res) + ".");
  7662. res = frames[frame].setup_command_list->Reset(frames[frame].setup_command_allocator.Get(), nullptr);
  7663. ERR_FAIL_COND_MSG(res, "Command allocator Reset failed with error " + vformat("0x%08ux", res) + ".");
  7664. res = frames[frame].draw_command_allocator->Reset();
  7665. ERR_FAIL_COND_MSG(res, "Command allocator Reset failed with error " + vformat("0x%08ux", res) + ".");
  7666. res = frames[frame].draw_command_list->Reset(frames[frame].draw_command_allocator.Get(), nullptr);
  7667. ERR_FAIL_COND_MSG(res, "Command allocator Reset failed with error " + vformat("0x%08ux", res) + ".");
  7668. ID3D12DescriptorHeap *heaps[] = {
  7669. frames[frame].desc_heaps.resources.get_heap(),
  7670. frames[frame].desc_heaps.samplers.get_heap(),
  7671. };
  7672. frames[frame].draw_command_list->SetDescriptorHeaps(2, heaps);
  7673. frames[frame].desc_heap_walkers.resources.rewind();
  7674. frames[frame].desc_heap_walkers.samplers.rewind();
  7675. frames[frame].desc_heap_walkers.aux.rewind();
  7676. frames[frame].desc_heap_walkers.rtv.rewind();
  7677. frames[frame].desc_heaps_exhausted_reported = {};
  7678. frames[frame].null_rtv_handle = {};
  7679. frames[frame].execution_index = execution_index;
  7680. } else {
  7681. context->flush(p_flush_current_frame, p_flush_current_frame);
  7682. // Re-create the setup command.
  7683. if (p_flush_current_frame) {
  7684. execution_index++;
  7685. HRESULT res = frames[frame].setup_command_allocator->Reset();
  7686. ERR_FAIL_COND_MSG(res, "Command allocator Reset failed with error " + vformat("0x%08ux", res) + ".");
  7687. res = frames[frame].draw_command_allocator->Reset();
  7688. ERR_FAIL_COND_MSG(res, "Command allocator Reset failed with error " + vformat("0x%08ux", res) + ".");
  7689. res = frames[frame].setup_command_list->Reset(frames[frame].setup_command_allocator.Get(), nullptr);
  7690. ERR_FAIL_COND_MSG(res, "Command list Reset failed with error " + vformat("0x%08ux", res) + ".");
  7691. res = frames[frame].draw_command_list->Reset(frames[frame].draw_command_allocator.Get(), nullptr);
  7692. ERR_FAIL_COND_MSG(res, "Command list Reset failed with error " + vformat("0x%08ux", res) + ".");
  7693. ID3D12DescriptorHeap *heaps[] = {
  7694. frames[frame].desc_heaps.resources.get_heap(),
  7695. frames[frame].desc_heaps.samplers.get_heap(),
  7696. };
  7697. frames[frame].draw_command_list->SetDescriptorHeaps(2, heaps);
  7698. frames[frame].desc_heap_walkers.resources.rewind();
  7699. frames[frame].desc_heap_walkers.samplers.rewind();
  7700. frames[frame].desc_heap_walkers.aux.rewind();
  7701. frames[frame].desc_heap_walkers.rtv.rewind();
  7702. frames[frame].desc_heaps_exhausted_reported = {};
  7703. frames[frame].null_rtv_handle = {};
  7704. frames[frame].execution_index = execution_index;
  7705. context->set_setup_list(frames[frame].setup_command_list.Get()); // Append now so it's added before everything else.
  7706. context->append_command_list(frames[frame].draw_command_list.Get());
  7707. }
  7708. }
  7709. }
  7710. void RenderingDeviceD3D12::initialize(D3D12Context *p_context, bool p_local_device) {
  7711. // Get our device capabilities.
  7712. {
  7713. device_capabilities.version_major = p_context->get_feat_level_major();
  7714. device_capabilities.version_minor = p_context->get_feat_level_minor();
  7715. }
  7716. context = p_context;
  7717. device = p_context->get_device();
  7718. if (p_local_device) {
  7719. frame_count = 1;
  7720. local_device = p_context->local_device_create();
  7721. device = p_context->local_device_get_d3d12_device(local_device);
  7722. } else {
  7723. frame_count = p_context->get_swapchain_image_count() + 1;
  7724. }
  7725. limits = p_context->get_device_limits();
  7726. max_timestamp_query_elements = 256;
  7727. { // Create command signature for indirect dispatch.
  7728. D3D12_INDIRECT_ARGUMENT_DESC iarg_desc = {};
  7729. iarg_desc.Type = D3D12_INDIRECT_ARGUMENT_TYPE_DISPATCH;
  7730. D3D12_COMMAND_SIGNATURE_DESC cs_desc = {};
  7731. cs_desc.ByteStride = sizeof(D3D12_DISPATCH_ARGUMENTS);
  7732. cs_desc.NumArgumentDescs = 1;
  7733. cs_desc.pArgumentDescs = &iarg_desc;
  7734. cs_desc.NodeMask = 0;
  7735. HRESULT res = device->CreateCommandSignature(&cs_desc, nullptr, IID_PPV_ARGS(indirect_dispatch_cmd_sig.GetAddressOf()));
  7736. ERR_FAIL_COND_MSG(res, "CreateCommandSignature failed with error " + vformat("0x%08ux", res) + ".");
  7737. }
  7738. uint32_t resource_descriptors_per_frame = GLOBAL_DEF("rendering/rendering_device/d3d12/max_resource_descriptors_per_frame", 16384);
  7739. uint32_t sampler_descriptors_per_frame = GLOBAL_DEF("rendering/rendering_device/d3d12/max_sampler_descriptors_per_frame", 1024);
  7740. uint32_t misc_descriptors_per_frame = GLOBAL_DEF("rendering/rendering_device/d3d12/max_misc_descriptors_per_frame", 512);
  7741. frames.resize(frame_count);
  7742. frame = 0;
  7743. // Create setup and frame buffers.
  7744. for (int i = 0; i < frame_count; i++) {
  7745. frames[i].index = 0;
  7746. { // Create descriptor heaps.
  7747. Error err = frames[i].desc_heaps.resources.allocate(device.Get(), D3D12_DESCRIPTOR_HEAP_TYPE_CBV_SRV_UAV, resource_descriptors_per_frame, true);
  7748. ERR_FAIL_COND_MSG(err, "Creating the frame's RESOURCE descriptors heap failed.");
  7749. err = frames[i].desc_heaps.samplers.allocate(device.Get(), D3D12_DESCRIPTOR_HEAP_TYPE_SAMPLER, sampler_descriptors_per_frame, true);
  7750. ERR_FAIL_COND_MSG(err, "Creating the frame's SAMPLER descriptors heap failed.");
  7751. err = frames[i].desc_heaps.aux.allocate(device.Get(), D3D12_DESCRIPTOR_HEAP_TYPE_CBV_SRV_UAV, misc_descriptors_per_frame, false);
  7752. ERR_FAIL_COND_MSG(err, "Creating the frame's AUX descriptors heap failed.");
  7753. err = frames[i].desc_heaps.rtv.allocate(device.Get(), D3D12_DESCRIPTOR_HEAP_TYPE_RTV, misc_descriptors_per_frame, false);
  7754. ERR_FAIL_COND_MSG(err, "Creating the frame's RENDER TARGET descriptors heap failed.");
  7755. frames[i].desc_heap_walkers.resources = frames[i].desc_heaps.resources.make_walker();
  7756. frames[i].desc_heap_walkers.samplers = frames[i].desc_heaps.samplers.make_walker();
  7757. frames[i].desc_heap_walkers.aux = frames[i].desc_heaps.aux.make_walker();
  7758. frames[i].desc_heap_walkers.rtv = frames[i].desc_heaps.rtv.make_walker();
  7759. }
  7760. { // Create command allocators.
  7761. HRESULT res = device->CreateCommandAllocator(D3D12_COMMAND_LIST_TYPE_DIRECT, IID_PPV_ARGS(frames[i].setup_command_allocator.GetAddressOf()));
  7762. ERR_CONTINUE_MSG(res, "CreateCommandAllocator failed with error " + vformat("0x%08ux", res) + ".");
  7763. res = device->CreateCommandAllocator(D3D12_COMMAND_LIST_TYPE_DIRECT, IID_PPV_ARGS(frames[i].draw_command_allocator.GetAddressOf()));
  7764. ERR_CONTINUE_MSG(res, "CreateCommandAllocator failed with error " + vformat("0x%08ux", res) + ".");
  7765. }
  7766. { // Create command lists.
  7767. HRESULT res = device->CreateCommandList(0, D3D12_COMMAND_LIST_TYPE_DIRECT, frames[i].setup_command_allocator.Get(), nullptr, IID_PPV_ARGS(frames[i].setup_command_list.GetAddressOf()));
  7768. ERR_CONTINUE_MSG(res, "CreateCommandList failed with error " + vformat("0x%08ux", res) + ".");
  7769. res = device->CreateCommandList(0, D3D12_COMMAND_LIST_TYPE_DIRECT, frames[i].draw_command_allocator.Get(), nullptr, IID_PPV_ARGS(frames[i].draw_command_list.GetAddressOf()));
  7770. ERR_CONTINUE_MSG(res, "CreateCommandList failed with error " + vformat("0x%08ux", res) + ".");
  7771. if (i > 0) {
  7772. frames[i].setup_command_list->Close();
  7773. frames[i].draw_command_list->Close();
  7774. }
  7775. }
  7776. if (i == 0) {
  7777. ID3D12DescriptorHeap *heaps[] = {
  7778. frames[frame].desc_heaps.resources.get_heap(),
  7779. frames[frame].desc_heaps.samplers.get_heap(),
  7780. };
  7781. frames[frame].draw_command_list->SetDescriptorHeaps(2, heaps);
  7782. }
  7783. {
  7784. // Create query heap.
  7785. D3D12_QUERY_HEAP_DESC qh_desc = {};
  7786. qh_desc.Type = D3D12_QUERY_HEAP_TYPE_TIMESTAMP;
  7787. qh_desc.Count = max_timestamp_query_elements;
  7788. qh_desc.NodeMask = 0;
  7789. HRESULT res = device->CreateQueryHeap(&qh_desc, IID_PPV_ARGS(frames[i].timestamp_heap.GetAddressOf()));
  7790. ERR_CONTINUE_MSG(res, "CreateQueryHeap failed with error " + vformat("0x%08ux", res) + ".");
  7791. frames[i].timestamp_names.resize(max_timestamp_query_elements);
  7792. frames[i].timestamp_cpu_values.resize(max_timestamp_query_elements);
  7793. frames[i].timestamp_count = 0;
  7794. frames[i].timestamp_result_names.resize(max_timestamp_query_elements);
  7795. frames[i].timestamp_cpu_result_values.resize(max_timestamp_query_elements);
  7796. frames[i].timestamp_result_values.resize(max_timestamp_query_elements);
  7797. Error err = _buffer_allocate(&frames[i].timestamp_result_values_buffer, sizeof(uint64_t) * max_timestamp_query_elements, D3D12_RESOURCE_STATE_COMMON, D3D12_HEAP_TYPE_READBACK);
  7798. ERR_CONTINUE(err);
  7799. frames[i].timestamp_result_count = 0;
  7800. }
  7801. }
  7802. if (local_device.is_null()) {
  7803. context->set_setup_list(frames[0].setup_command_list.Get()); // Append now so it's added before everything else.
  7804. context->append_command_list(frames[0].draw_command_list.Get());
  7805. }
  7806. staging_buffer_block_size = GLOBAL_GET("rendering/rendering_device/staging_buffer/block_size_kb");
  7807. staging_buffer_block_size = MAX(4u, staging_buffer_block_size);
  7808. staging_buffer_block_size *= 1024; // Kb -> bytes.
  7809. staging_buffer_max_size = GLOBAL_GET("rendering/rendering_device/staging_buffer/max_size_mb");
  7810. staging_buffer_max_size = MAX(1u, staging_buffer_max_size);
  7811. staging_buffer_max_size *= 1024 * 1024;
  7812. if (staging_buffer_max_size < staging_buffer_block_size * 4) {
  7813. // Validate enough functions.
  7814. staging_buffer_max_size = staging_buffer_block_size * 4;
  7815. }
  7816. texture_upload_region_size_px = GLOBAL_GET("rendering/rendering_device/staging_buffer/texture_upload_region_size_px");
  7817. texture_upload_region_size_px = nearest_power_of_2_templated(texture_upload_region_size_px);
  7818. frames_drawn = frame_count; // Start from frame count, so everything else is immediately old.
  7819. execution_index = 1;
  7820. // Ensure current staging block is valid and at least one per frame exists.
  7821. staging_buffer_current = 0;
  7822. staging_buffer_used = false;
  7823. for (int i = 0; i < frame_count; i++) {
  7824. // Staging was never used, create a block.
  7825. Error err = _insert_staging_block();
  7826. ERR_CONTINUE(err != OK);
  7827. }
  7828. {
  7829. aux_resource = texture_create(TextureFormat(), TextureView());
  7830. ERR_FAIL_COND(!aux_resource.is_valid());
  7831. }
  7832. draw_list = nullptr;
  7833. draw_list_count = 0;
  7834. draw_list_split = false;
  7835. vrs_state_execution_index = 0;
  7836. vrs_state = {};
  7837. compute_list = nullptr;
  7838. glsl_type_singleton_init_or_ref();
  7839. }
  7840. dxil_validator *RenderingDeviceD3D12::get_dxil_validator_for_current_thread() {
  7841. MutexLock lock(dxil_mutex);
  7842. int thread_idx = WorkerThreadPool::get_singleton()->get_thread_index();
  7843. if (dxil_validators.has(thread_idx)) {
  7844. return dxil_validators[thread_idx];
  7845. }
  7846. #ifdef DEV_ENABLED
  7847. print_verbose("Creating DXIL validator for worker thread index " + itos(thread_idx));
  7848. #endif
  7849. dxil_validator *dxil_validator = dxil_create_validator(nullptr);
  7850. CRASH_COND(!dxil_validator);
  7851. dxil_validators.insert(thread_idx, dxil_validator);
  7852. return dxil_validator;
  7853. }
  7854. template <class T>
  7855. void RenderingDeviceD3D12::_free_rids(T &p_owner, const char *p_type) {
  7856. List<RID> owned;
  7857. p_owner.get_owned_list(&owned);
  7858. if (owned.size()) {
  7859. if (owned.size() == 1) {
  7860. WARN_PRINT(vformat("1 RID of type \"%s\" was leaked.", p_type));
  7861. } else {
  7862. WARN_PRINT(vformat("%d RIDs of type \"%s\" were leaked.", owned.size(), p_type));
  7863. }
  7864. for (const RID &E : owned) {
  7865. #ifdef DEV_ENABLED
  7866. if (resource_names.has(E)) {
  7867. print_line(String(" - ") + resource_names[E]);
  7868. }
  7869. #endif
  7870. free(E);
  7871. }
  7872. }
  7873. }
  7874. void RenderingDeviceD3D12::capture_timestamp(const String &p_name) {
  7875. ERR_FAIL_COND_MSG(draw_list != nullptr, "Capturing timestamps during draw list creation is not allowed. Offending timestamp was: " + p_name);
  7876. ERR_FAIL_COND(frames[frame].timestamp_count >= max_timestamp_query_elements);
  7877. // This should be optional for profiling, else it will slow things down.
  7878. full_barrier();
  7879. frames[frame].draw_command_list->EndQuery(frames[frame].timestamp_heap.Get(), D3D12_QUERY_TYPE_TIMESTAMP, frames[frame].timestamp_count);
  7880. frames[frame].timestamp_names[frames[frame].timestamp_count] = p_name;
  7881. frames[frame].timestamp_cpu_values[frames[frame].timestamp_count] = OS::get_singleton()->get_ticks_usec();
  7882. frames[frame].timestamp_count++;
  7883. }
  7884. uint64_t RenderingDeviceD3D12::get_driver_resource(DriverResource p_resource, RID p_rid, uint64_t p_index) {
  7885. _THREAD_SAFE_METHOD_
  7886. return 0;
  7887. }
  7888. uint32_t RenderingDeviceD3D12::get_captured_timestamps_count() const {
  7889. return frames[frame].timestamp_result_count;
  7890. }
  7891. uint64_t RenderingDeviceD3D12::get_captured_timestamps_frame() const {
  7892. return frames[frame].index;
  7893. }
  7894. uint64_t RenderingDeviceD3D12::get_captured_timestamp_gpu_time(uint32_t p_index) const {
  7895. ERR_FAIL_UNSIGNED_INDEX_V(p_index, frames[frame].timestamp_result_count, 0);
  7896. return frames[frame].timestamp_result_values[p_index] / (double)limits.timestamp_frequency * 1000000000.0;
  7897. }
  7898. uint64_t RenderingDeviceD3D12::get_captured_timestamp_cpu_time(uint32_t p_index) const {
  7899. ERR_FAIL_UNSIGNED_INDEX_V(p_index, frames[frame].timestamp_result_count, 0);
  7900. return frames[frame].timestamp_cpu_result_values[p_index];
  7901. }
  7902. String RenderingDeviceD3D12::get_captured_timestamp_name(uint32_t p_index) const {
  7903. ERR_FAIL_UNSIGNED_INDEX_V(p_index, frames[frame].timestamp_result_count, String());
  7904. return frames[frame].timestamp_result_names[p_index];
  7905. }
  7906. uint64_t RenderingDeviceD3D12::limit_get(Limit p_limit) const {
  7907. switch (p_limit) {
  7908. case LIMIT_MAX_TEXTURES_PER_SHADER_STAGE:
  7909. return limits.max_srvs_per_shader_stage;
  7910. case LIMIT_MAX_UNIFORM_BUFFER_SIZE:
  7911. return 65536;
  7912. case LIMIT_MAX_VIEWPORT_DIMENSIONS_X:
  7913. case LIMIT_MAX_VIEWPORT_DIMENSIONS_Y:
  7914. return 16384; // Based on max. texture size. Maybe not correct.
  7915. case LIMIT_MAX_COMPUTE_WORKGROUP_COUNT_X:
  7916. return D3D12_CS_DISPATCH_MAX_THREAD_GROUPS_PER_DIMENSION;
  7917. case LIMIT_MAX_COMPUTE_WORKGROUP_COUNT_Y:
  7918. return D3D12_CS_DISPATCH_MAX_THREAD_GROUPS_PER_DIMENSION;
  7919. case LIMIT_MAX_COMPUTE_WORKGROUP_COUNT_Z:
  7920. return D3D12_CS_DISPATCH_MAX_THREAD_GROUPS_PER_DIMENSION;
  7921. case LIMIT_MAX_COMPUTE_WORKGROUP_SIZE_X:
  7922. return D3D12_CS_THREAD_GROUP_MAX_X;
  7923. case LIMIT_MAX_COMPUTE_WORKGROUP_SIZE_Y:
  7924. return D3D12_CS_THREAD_GROUP_MAX_Y;
  7925. case LIMIT_MAX_COMPUTE_WORKGROUP_SIZE_Z:
  7926. return D3D12_CS_THREAD_GROUP_MAX_Z;
  7927. case LIMIT_SUBGROUP_SIZE:
  7928. // Note in min/max. Shader model 6.6 supports it (see https://microsoft.github.io/DirectX-Specs/d3d/HLSL_SM_6_6_WaveSize.html),
  7929. // but at this time I don't know the implications on the transpilation to DXIL, etc.
  7930. case LIMIT_SUBGROUP_MIN_SIZE:
  7931. case LIMIT_SUBGROUP_MAX_SIZE: {
  7932. D3D12Context::SubgroupCapabilities subgroup_capabilities = context->get_subgroup_capabilities();
  7933. return subgroup_capabilities.size;
  7934. }
  7935. case LIMIT_SUBGROUP_IN_SHADERS: {
  7936. D3D12Context::SubgroupCapabilities subgroup_capabilities = context->get_subgroup_capabilities();
  7937. return subgroup_capabilities.supported_stages_flags_rd();
  7938. }
  7939. case LIMIT_SUBGROUP_OPERATIONS: {
  7940. D3D12Context::SubgroupCapabilities subgroup_capabilities = context->get_subgroup_capabilities();
  7941. return subgroup_capabilities.supported_operations_flags_rd();
  7942. }
  7943. case LIMIT_VRS_TEXEL_WIDTH:
  7944. case LIMIT_VRS_TEXEL_HEIGHT: {
  7945. return context->get_vrs_capabilities().ss_image_tile_size;
  7946. }
  7947. default:
  7948. // It's important to return a number that at least won't overflow any typical integer type.
  7949. #ifdef DEV_ENABLED
  7950. WARN_PRINT("Returning maximum value for unknown limit " + itos(p_limit) + ".");
  7951. #endif
  7952. return (uint64_t)1 << 30;
  7953. }
  7954. }
  7955. bool RenderingDeviceD3D12::has_feature(const Features p_feature) const {
  7956. switch (p_feature) {
  7957. case SUPPORTS_MULTIVIEW: {
  7958. D3D12Context::MultiviewCapabilities multiview_capabilies = context->get_multiview_capabilities();
  7959. return multiview_capabilies.is_supported && multiview_capabilies.max_view_count > 1;
  7960. } break;
  7961. case SUPPORTS_FSR_HALF_FLOAT: {
  7962. return context->get_shader_capabilities().native_16bit_ops && context->get_storage_buffer_capabilities().storage_buffer_16_bit_access_is_supported;
  7963. } break;
  7964. case SUPPORTS_ATTACHMENT_VRS: {
  7965. D3D12Context::VRSCapabilities vrs_capabilities = context->get_vrs_capabilities();
  7966. return vrs_capabilities.ss_image_supported;
  7967. } break;
  7968. case SUPPORTS_FRAGMENT_SHADER_WITH_ONLY_SIDE_EFFECTS: {
  7969. return true;
  7970. } break;
  7971. default: {
  7972. return false;
  7973. }
  7974. }
  7975. }
  7976. void RenderingDeviceD3D12::finalize() {
  7977. // Free all resources.
  7978. _flush(false);
  7979. free(aux_resource);
  7980. _free_rids(render_pipeline_owner, "Pipeline");
  7981. _free_rids(compute_pipeline_owner, "Compute");
  7982. _free_rids(uniform_set_owner, "UniformSet");
  7983. _free_rids(texture_buffer_owner, "TextureBuffer");
  7984. _free_rids(storage_buffer_owner, "StorageBuffer");
  7985. _free_rids(uniform_buffer_owner, "UniformBuffer");
  7986. _free_rids(shader_owner, "Shader");
  7987. _free_rids(index_array_owner, "IndexArray");
  7988. _free_rids(index_buffer_owner, "IndexBuffer");
  7989. _free_rids(vertex_array_owner, "VertexArray");
  7990. _free_rids(vertex_buffer_owner, "VertexBuffer");
  7991. _free_rids(framebuffer_owner, "Framebuffer");
  7992. _free_rids(sampler_owner, "Sampler");
  7993. {
  7994. // For textures it's a bit more difficult because they may be shared.
  7995. List<RID> owned;
  7996. texture_owner.get_owned_list(&owned);
  7997. if (owned.size()) {
  7998. if (owned.size() == 1) {
  7999. WARN_PRINT("1 RID of type \"Texture\" was leaked.");
  8000. } else {
  8001. WARN_PRINT(vformat("%d RIDs of type \"Texture\" were leaked.", owned.size()));
  8002. }
  8003. // Free shared first.
  8004. for (List<RID>::Element *E = owned.front(); E;) {
  8005. List<RID>::Element *N = E->next();
  8006. if (texture_is_shared(E->get())) {
  8007. #ifdef DEV_ENABLED
  8008. if (resource_names.has(E->get())) {
  8009. print_line(String(" - ") + resource_names[E->get()]);
  8010. }
  8011. #endif
  8012. free(E->get());
  8013. owned.erase(E);
  8014. }
  8015. E = N;
  8016. }
  8017. // Free non shared second, this will avoid an error trying to free unexisting textures due to dependencies.
  8018. for (const RID &E : owned) {
  8019. #ifdef DEV_ENABLED
  8020. if (resource_names.has(E)) {
  8021. print_line(String(" - ") + resource_names[E]);
  8022. }
  8023. #endif
  8024. free(E);
  8025. }
  8026. }
  8027. }
  8028. // Free everything pending.
  8029. for (int i = 0; i < frame_count; i++) {
  8030. int f = (frame + i) % frame_count;
  8031. _free_pending_resources(f);
  8032. frames[i].timestamp_result_values_buffer.allocation->Release();
  8033. frames[i].timestamp_result_values_buffer.resource->Release();
  8034. }
  8035. frames.clear();
  8036. pipeline_bindings.clear();
  8037. next_pipeline_binding_id = 1;
  8038. for (int i = 0; i < split_draw_list_allocators.size(); i++) {
  8039. for (int j = 0; i < split_draw_list_allocators[i].command_lists.size(); j++) {
  8040. split_draw_list_allocators[i].command_lists[j]->Release();
  8041. }
  8042. split_draw_list_allocators[i].command_allocator->Release();
  8043. }
  8044. res_barriers_requests.clear();
  8045. res_barriers.clear();
  8046. for (int i = 0; i < staging_buffer_blocks.size(); i++) {
  8047. staging_buffer_blocks[i].allocation->Release();
  8048. staging_buffer_blocks[i].resource->Release();
  8049. }
  8050. #ifdef USE_SMALL_ALLOCS_POOL
  8051. small_allocs_pools.clear();
  8052. #endif
  8053. indirect_dispatch_cmd_sig.Reset();
  8054. vertex_formats.clear();
  8055. framebuffer_formats.clear();
  8056. // All these should be clear at this point.
  8057. ERR_FAIL_COND(dependency_map.size());
  8058. ERR_FAIL_COND(reverse_dependency_map.size());
  8059. {
  8060. MutexLock lock(dxil_mutex);
  8061. for (const KeyValue<int, dxil_validator *> &E : dxil_validators) {
  8062. dxil_destroy_validator(E.value);
  8063. }
  8064. }
  8065. glsl_type_singleton_decref();
  8066. }
  8067. RenderingDevice *RenderingDeviceD3D12::create_local_device() {
  8068. RenderingDeviceD3D12 *rd = memnew(RenderingDeviceD3D12);
  8069. rd->initialize(context, true);
  8070. return rd;
  8071. }
  8072. RenderingDeviceD3D12::RenderingDeviceD3D12() {
  8073. device_capabilities.device_family = DEVICE_DIRECTX;
  8074. }
  8075. RenderingDeviceD3D12::~RenderingDeviceD3D12() {
  8076. if (local_device.is_valid()) {
  8077. finalize();
  8078. context->local_device_free(local_device);
  8079. }
  8080. }