12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847184818491850185118521853185418551856185718581859186018611862186318641865186618671868186918701871187218731874187518761877187818791880188118821883188418851886188718881889189018911892189318941895189618971898189919001901190219031904190519061907190819091910191119121913191419151916191719181919192019211922192319241925192619271928192919301931193219331934193519361937193819391940194119421943194419451946194719481949195019511952195319541955195619571958195919601961196219631964196519661967196819691970197119721973197419751976197719781979198019811982198319841985198619871988198919901991199219931994199519961997199819992000200120022003200420052006200720082009201020112012201320142015201620172018201920202021202220232024202520262027202820292030203120322033203420352036203720382039204020412042204320442045204620472048204920502051205220532054205520562057205820592060206120622063206420652066206720682069207020712072207320742075207620772078207920802081208220832084208520862087208820892090209120922093209420952096209720982099210021012102210321042105210621072108210921102111211221132114211521162117211821192120212121222123212421252126212721282129213021312132213321342135213621372138213921402141214221432144214521462147214821492150215121522153215421552156215721582159216021612162216321642165216621672168216921702171217221732174217521762177217821792180218121822183218421852186218721882189219021912192219321942195219621972198219922002201220222032204220522062207220822092210221122122213221422152216221722182219222022212222222322242225222622272228222922302231223222332234223522362237223822392240224122422243224422452246224722482249225022512252225322542255225622572258225922602261226222632264226522662267226822692270227122722273227422752276227722782279228022812282228322842285228622872288228922902291229222932294229522962297229822992300230123022303230423052306230723082309231023112312231323142315231623172318231923202321232223232324232523262327232823292330233123322333233423352336233723382339234023412342234323442345234623472348234923502351235223532354235523562357235823592360236123622363236423652366236723682369237023712372237323742375237623772378237923802381238223832384238523862387238823892390239123922393239423952396239723982399240024012402240324042405240624072408240924102411241224132414241524162417241824192420242124222423242424252426242724282429243024312432243324342435243624372438243924402441244224432444244524462447244824492450245124522453245424552456245724582459246024612462246324642465246624672468246924702471247224732474247524762477247824792480248124822483248424852486248724882489249024912492249324942495249624972498249925002501250225032504250525062507250825092510251125122513251425152516251725182519252025212522252325242525252625272528252925302531253225332534253525362537253825392540254125422543254425452546254725482549255025512552255325542555255625572558255925602561256225632564256525662567256825692570257125722573257425752576257725782579258025812582258325842585258625872588258925902591259225932594259525962597259825992600260126022603260426052606260726082609261026112612261326142615261626172618261926202621262226232624262526262627262826292630263126322633263426352636263726382639264026412642264326442645264626472648264926502651265226532654265526562657265826592660266126622663266426652666266726682669267026712672267326742675267626772678267926802681268226832684268526862687268826892690269126922693269426952696269726982699270027012702270327042705270627072708270927102711271227132714271527162717271827192720272127222723272427252726272727282729273027312732273327342735273627372738273927402741274227432744274527462747274827492750275127522753275427552756275727582759276027612762276327642765276627672768276927702771277227732774277527762777277827792780278127822783278427852786278727882789279027912792279327942795279627972798279928002801280228032804280528062807280828092810281128122813281428152816281728182819282028212822282328242825282628272828282928302831283228332834283528362837283828392840284128422843284428452846284728482849285028512852285328542855285628572858285928602861286228632864286528662867286828692870287128722873287428752876287728782879288028812882288328842885288628872888288928902891289228932894289528962897289828992900290129022903290429052906290729082909291029112912291329142915291629172918291929202921292229232924292529262927292829292930293129322933293429352936293729382939294029412942294329442945294629472948294929502951295229532954295529562957295829592960296129622963296429652966296729682969297029712972297329742975297629772978297929802981298229832984298529862987298829892990299129922993299429952996299729982999300030013002300330043005300630073008300930103011301230133014301530163017301830193020302130223023302430253026302730283029303030313032303330343035303630373038303930403041304230433044304530463047304830493050305130523053305430553056305730583059306030613062306330643065306630673068306930703071307230733074307530763077307830793080308130823083308430853086308730883089309030913092309330943095309630973098309931003101310231033104310531063107310831093110311131123113311431153116311731183119312031213122312331243125312631273128312931303131313231333134313531363137313831393140314131423143314431453146314731483149315031513152315331543155315631573158315931603161316231633164316531663167316831693170317131723173317431753176317731783179318031813182318331843185318631873188318931903191319231933194319531963197319831993200320132023203320432053206320732083209321032113212321332143215321632173218321932203221322232233224322532263227322832293230323132323233323432353236323732383239324032413242324332443245324632473248324932503251325232533254325532563257325832593260326132623263326432653266326732683269327032713272327332743275327632773278327932803281328232833284328532863287328832893290329132923293329432953296329732983299330033013302330333043305330633073308330933103311331233133314331533163317331833193320332133223323332433253326332733283329333033313332333333343335333633373338333933403341334233433344334533463347334833493350335133523353335433553356335733583359336033613362336333643365336633673368336933703371337233733374337533763377337833793380338133823383338433853386338733883389339033913392339333943395339633973398339934003401340234033404340534063407340834093410341134123413341434153416341734183419342034213422342334243425342634273428342934303431343234333434343534363437343834393440344134423443344434453446344734483449345034513452345334543455345634573458345934603461346234633464346534663467346834693470347134723473347434753476347734783479348034813482348334843485348634873488348934903491349234933494349534963497349834993500350135023503350435053506350735083509351035113512351335143515351635173518351935203521352235233524352535263527352835293530353135323533353435353536353735383539354035413542354335443545354635473548354935503551355235533554355535563557355835593560356135623563356435653566356735683569357035713572357335743575357635773578357935803581358235833584358535863587358835893590359135923593359435953596359735983599360036013602360336043605360636073608360936103611361236133614361536163617361836193620362136223623362436253626362736283629363036313632363336343635363636373638363936403641364236433644364536463647364836493650365136523653365436553656365736583659366036613662366336643665366636673668366936703671367236733674367536763677367836793680368136823683368436853686368736883689369036913692369336943695369636973698369937003701370237033704370537063707370837093710371137123713371437153716371737183719372037213722372337243725372637273728372937303731373237333734373537363737373837393740374137423743374437453746374737483749375037513752375337543755375637573758375937603761376237633764376537663767376837693770377137723773377437753776377737783779378037813782378337843785378637873788378937903791379237933794379537963797379837993800380138023803380438053806380738083809381038113812381338143815381638173818381938203821382238233824382538263827382838293830383138323833383438353836383738383839384038413842384338443845384638473848384938503851385238533854385538563857385838593860386138623863386438653866386738683869387038713872387338743875387638773878387938803881388238833884388538863887388838893890389138923893389438953896389738983899390039013902390339043905390639073908390939103911391239133914391539163917391839193920392139223923392439253926392739283929393039313932393339343935393639373938393939403941394239433944394539463947394839493950395139523953395439553956395739583959396039613962396339643965396639673968396939703971397239733974397539763977397839793980398139823983398439853986398739883989399039913992399339943995399639973998399940004001400240034004400540064007400840094010401140124013401440154016401740184019402040214022402340244025402640274028402940304031403240334034403540364037403840394040404140424043404440454046404740484049405040514052405340544055405640574058405940604061406240634064406540664067406840694070407140724073407440754076407740784079408040814082408340844085408640874088408940904091409240934094409540964097409840994100410141024103410441054106410741084109411041114112411341144115411641174118411941204121412241234124412541264127412841294130413141324133413441354136413741384139414041414142414341444145414641474148414941504151415241534154415541564157415841594160416141624163416441654166416741684169417041714172417341744175417641774178417941804181418241834184418541864187418841894190419141924193419441954196419741984199420042014202420342044205420642074208420942104211421242134214421542164217421842194220422142224223422442254226422742284229423042314232423342344235423642374238423942404241424242434244424542464247424842494250425142524253425442554256425742584259426042614262426342644265426642674268426942704271427242734274427542764277427842794280428142824283428442854286428742884289429042914292429342944295429642974298429943004301430243034304430543064307430843094310431143124313431443154316431743184319432043214322432343244325432643274328432943304331433243334334433543364337433843394340434143424343434443454346434743484349435043514352435343544355435643574358435943604361436243634364436543664367436843694370437143724373437443754376437743784379438043814382438343844385438643874388438943904391439243934394439543964397439843994400440144024403440444054406440744084409441044114412441344144415441644174418441944204421442244234424442544264427442844294430443144324433443444354436443744384439444044414442444344444445444644474448444944504451445244534454445544564457445844594460446144624463446444654466446744684469447044714472447344744475447644774478447944804481448244834484448544864487448844894490449144924493449444954496449744984499450045014502450345044505450645074508450945104511451245134514451545164517451845194520452145224523452445254526452745284529453045314532453345344535453645374538453945404541454245434544454545464547454845494550455145524553455445554556455745584559456045614562456345644565456645674568456945704571457245734574457545764577457845794580458145824583458445854586458745884589459045914592459345944595459645974598459946004601460246034604460546064607460846094610461146124613461446154616461746184619462046214622462346244625462646274628462946304631463246334634463546364637463846394640464146424643464446454646464746484649465046514652465346544655465646574658465946604661466246634664466546664667466846694670467146724673467446754676467746784679468046814682468346844685468646874688468946904691469246934694469546964697469846994700470147024703470447054706470747084709471047114712471347144715471647174718471947204721472247234724472547264727472847294730473147324733473447354736473747384739474047414742474347444745474647474748474947504751475247534754475547564757475847594760476147624763476447654766476747684769477047714772477347744775477647774778477947804781478247834784478547864787478847894790479147924793479447954796479747984799480048014802480348044805480648074808480948104811481248134814481548164817481848194820482148224823482448254826482748284829483048314832483348344835483648374838483948404841484248434844484548464847484848494850485148524853485448554856485748584859486048614862486348644865486648674868486948704871487248734874487548764877487848794880488148824883488448854886488748884889489048914892489348944895489648974898489949004901490249034904490549064907490849094910491149124913491449154916491749184919492049214922492349244925492649274928492949304931493249334934493549364937493849394940494149424943494449454946494749484949495049514952495349544955495649574958495949604961496249634964496549664967496849694970497149724973497449754976497749784979498049814982498349844985498649874988498949904991499249934994499549964997499849995000500150025003500450055006500750085009501050115012501350145015501650175018501950205021502250235024502550265027502850295030503150325033503450355036503750385039504050415042504350445045504650475048504950505051505250535054505550565057505850595060506150625063506450655066506750685069507050715072507350745075507650775078507950805081508250835084508550865087508850895090509150925093509450955096509750985099510051015102510351045105510651075108510951105111511251135114511551165117511851195120512151225123512451255126512751285129513051315132513351345135513651375138513951405141514251435144514551465147514851495150515151525153515451555156515751585159516051615162516351645165516651675168516951705171517251735174517551765177517851795180518151825183518451855186518751885189519051915192519351945195519651975198519952005201520252035204520552065207520852095210521152125213521452155216521752185219522052215222522352245225522652275228522952305231523252335234523552365237523852395240524152425243524452455246524752485249525052515252525352545255525652575258525952605261526252635264526552665267526852695270527152725273527452755276527752785279528052815282528352845285528652875288528952905291529252935294529552965297529852995300530153025303530453055306530753085309531053115312531353145315531653175318531953205321532253235324532553265327532853295330533153325333533453355336533753385339534053415342534353445345534653475348534953505351535253535354535553565357535853595360536153625363536453655366536753685369537053715372537353745375537653775378537953805381538253835384538553865387538853895390539153925393539453955396539753985399540054015402540354045405540654075408540954105411541254135414541554165417541854195420542154225423542454255426542754285429543054315432543354345435543654375438543954405441544254435444544554465447544854495450545154525453545454555456545754585459546054615462546354645465546654675468546954705471547254735474547554765477547854795480548154825483548454855486548754885489549054915492549354945495549654975498549955005501550255035504550555065507550855095510551155125513551455155516551755185519552055215522552355245525552655275528552955305531553255335534553555365537553855395540554155425543554455455546554755485549555055515552555355545555555655575558555955605561556255635564556555665567556855695570557155725573557455755576557755785579558055815582558355845585558655875588558955905591559255935594559555965597559855995600560156025603560456055606560756085609561056115612561356145615561656175618561956205621562256235624562556265627562856295630563156325633563456355636563756385639564056415642564356445645564656475648564956505651565256535654565556565657565856595660566156625663566456655666566756685669567056715672567356745675567656775678567956805681568256835684568556865687568856895690569156925693569456955696569756985699570057015702570357045705570657075708570957105711571257135714571557165717571857195720572157225723572457255726572757285729573057315732573357345735573657375738573957405741574257435744574557465747574857495750575157525753575457555756575757585759576057615762576357645765576657675768576957705771577257735774577557765777577857795780578157825783578457855786578757885789579057915792579357945795579657975798579958005801580258035804580558065807580858095810581158125813581458155816581758185819582058215822582358245825582658275828582958305831583258335834583558365837583858395840584158425843584458455846584758485849585058515852585358545855585658575858585958605861586258635864586558665867586858695870587158725873587458755876587758785879588058815882588358845885588658875888588958905891589258935894589558965897589858995900590159025903590459055906590759085909591059115912591359145915591659175918591959205921592259235924592559265927592859295930593159325933593459355936593759385939594059415942594359445945594659475948594959505951595259535954595559565957595859595960596159625963596459655966596759685969597059715972597359745975597659775978597959805981598259835984598559865987598859895990599159925993599459955996599759985999600060016002600360046005600660076008600960106011601260136014601560166017601860196020602160226023602460256026602760286029603060316032603360346035603660376038603960406041604260436044604560466047604860496050605160526053605460556056605760586059606060616062606360646065606660676068606960706071607260736074607560766077607860796080608160826083608460856086608760886089609060916092609360946095609660976098609961006101610261036104610561066107610861096110611161126113611461156116611761186119612061216122612361246125612661276128612961306131613261336134613561366137613861396140614161426143614461456146614761486149615061516152615361546155615661576158615961606161616261636164616561666167616861696170617161726173617461756176617761786179618061816182618361846185618661876188618961906191619261936194619561966197619861996200620162026203620462056206620762086209621062116212621362146215621662176218621962206221622262236224622562266227622862296230623162326233623462356236623762386239624062416242624362446245624662476248624962506251625262536254625562566257625862596260626162626263626462656266626762686269627062716272627362746275627662776278627962806281628262836284628562866287628862896290629162926293629462956296629762986299630063016302630363046305630663076308630963106311631263136314631563166317631863196320632163226323632463256326632763286329633063316332633363346335633663376338633963406341634263436344634563466347634863496350635163526353635463556356635763586359636063616362636363646365636663676368636963706371637263736374637563766377637863796380638163826383638463856386638763886389639063916392639363946395639663976398639964006401640264036404640564066407640864096410641164126413641464156416641764186419642064216422642364246425642664276428642964306431643264336434643564366437643864396440644164426443644464456446644764486449645064516452645364546455645664576458645964606461646264636464646564666467646864696470647164726473647464756476647764786479648064816482648364846485648664876488648964906491649264936494649564966497649864996500650165026503650465056506650765086509651065116512651365146515651665176518651965206521652265236524652565266527652865296530653165326533653465356536653765386539654065416542654365446545654665476548654965506551655265536554655565566557655865596560656165626563656465656566656765686569657065716572657365746575657665776578657965806581658265836584658565866587658865896590659165926593659465956596659765986599660066016602660366046605660666076608660966106611661266136614661566166617661866196620662166226623662466256626662766286629663066316632663366346635663666376638663966406641664266436644664566466647664866496650665166526653665466556656665766586659666066616662666366646665666666676668666966706671667266736674667566766677667866796680668166826683668466856686668766886689669066916692669366946695669666976698669967006701670267036704670567066707670867096710671167126713671467156716671767186719672067216722672367246725672667276728672967306731673267336734673567366737673867396740674167426743674467456746674767486749675067516752675367546755675667576758675967606761676267636764676567666767676867696770677167726773677467756776677767786779678067816782678367846785678667876788678967906791679267936794679567966797679867996800680168026803680468056806680768086809681068116812681368146815681668176818681968206821682268236824682568266827682868296830683168326833683468356836683768386839684068416842684368446845684668476848684968506851685268536854685568566857685868596860686168626863686468656866686768686869687068716872687368746875687668776878687968806881688268836884688568866887688868896890689168926893689468956896689768986899690069016902690369046905690669076908690969106911691269136914691569166917691869196920692169226923692469256926692769286929693069316932693369346935693669376938693969406941694269436944694569466947694869496950695169526953695469556956695769586959696069616962696369646965696669676968696969706971697269736974697569766977697869796980698169826983698469856986698769886989699069916992699369946995699669976998699970007001700270037004700570067007700870097010701170127013701470157016701770187019702070217022702370247025702670277028702970307031703270337034703570367037703870397040704170427043704470457046704770487049705070517052705370547055705670577058705970607061706270637064706570667067706870697070707170727073707470757076707770787079708070817082708370847085708670877088708970907091709270937094709570967097709870997100710171027103710471057106710771087109711071117112711371147115711671177118711971207121712271237124712571267127712871297130713171327133713471357136713771387139714071417142714371447145714671477148714971507151715271537154715571567157715871597160716171627163716471657166716771687169717071717172717371747175717671777178717971807181718271837184718571867187718871897190719171927193719471957196719771987199720072017202720372047205720672077208720972107211721272137214721572167217721872197220722172227223722472257226722772287229723072317232723372347235723672377238723972407241724272437244724572467247724872497250725172527253725472557256725772587259726072617262726372647265726672677268726972707271727272737274727572767277727872797280728172827283728472857286728772887289729072917292729372947295729672977298729973007301730273037304730573067307730873097310731173127313731473157316731773187319732073217322732373247325732673277328732973307331733273337334733573367337733873397340734173427343734473457346734773487349735073517352735373547355735673577358735973607361736273637364736573667367736873697370737173727373737473757376737773787379738073817382738373847385738673877388738973907391739273937394739573967397739873997400740174027403740474057406740774087409741074117412741374147415741674177418741974207421742274237424742574267427742874297430743174327433743474357436743774387439744074417442744374447445744674477448744974507451745274537454745574567457745874597460746174627463746474657466746774687469747074717472747374747475747674777478747974807481748274837484748574867487748874897490749174927493749474957496749774987499750075017502750375047505750675077508750975107511751275137514751575167517751875197520752175227523752475257526752775287529753075317532753375347535753675377538753975407541754275437544754575467547754875497550755175527553755475557556755775587559756075617562756375647565756675677568756975707571757275737574757575767577757875797580758175827583758475857586758775887589759075917592759375947595759675977598759976007601760276037604760576067607760876097610761176127613761476157616761776187619762076217622762376247625762676277628762976307631763276337634763576367637763876397640764176427643764476457646764776487649765076517652765376547655765676577658765976607661766276637664766576667667766876697670767176727673767476757676767776787679768076817682768376847685768676877688768976907691769276937694769576967697769876997700770177027703770477057706770777087709771077117712771377147715771677177718771977207721772277237724772577267727772877297730773177327733773477357736773777387739774077417742774377447745774677477748774977507751775277537754775577567757775877597760776177627763776477657766776777687769777077717772777377747775777677777778777977807781778277837784778577867787778877897790779177927793779477957796779777987799780078017802780378047805780678077808780978107811781278137814781578167817781878197820782178227823782478257826782778287829783078317832783378347835783678377838783978407841784278437844784578467847784878497850785178527853785478557856785778587859786078617862786378647865786678677868786978707871787278737874787578767877787878797880788178827883788478857886788778887889789078917892789378947895789678977898789979007901790279037904790579067907790879097910791179127913791479157916791779187919792079217922792379247925792679277928792979307931793279337934793579367937793879397940794179427943794479457946794779487949795079517952795379547955795679577958795979607961796279637964796579667967796879697970797179727973797479757976797779787979798079817982798379847985798679877988798979907991799279937994799579967997799879998000800180028003800480058006800780088009801080118012801380148015801680178018801980208021802280238024802580268027802880298030803180328033803480358036803780388039804080418042804380448045804680478048804980508051805280538054805580568057805880598060806180628063806480658066806780688069807080718072807380748075807680778078807980808081808280838084808580868087808880898090809180928093809480958096809780988099810081018102810381048105810681078108810981108111811281138114811581168117811881198120812181228123812481258126812781288129813081318132813381348135813681378138813981408141814281438144814581468147814881498150815181528153815481558156815781588159816081618162816381648165816681678168816981708171817281738174817581768177817881798180818181828183818481858186818781888189819081918192819381948195819681978198819982008201820282038204820582068207820882098210821182128213821482158216821782188219822082218222822382248225822682278228822982308231823282338234823582368237823882398240824182428243824482458246824782488249825082518252825382548255825682578258825982608261826282638264826582668267826882698270827182728273827482758276827782788279828082818282828382848285828682878288828982908291829282938294829582968297829882998300830183028303830483058306830783088309831083118312831383148315831683178318831983208321832283238324832583268327832883298330833183328333833483358336833783388339834083418342834383448345834683478348834983508351835283538354835583568357835883598360836183628363836483658366836783688369837083718372837383748375837683778378837983808381838283838384838583868387838883898390839183928393839483958396839783988399840084018402840384048405840684078408840984108411841284138414841584168417841884198420842184228423842484258426842784288429843084318432843384348435843684378438843984408441844284438444844584468447844884498450845184528453845484558456845784588459846084618462846384648465846684678468846984708471847284738474847584768477847884798480848184828483848484858486848784888489849084918492849384948495849684978498849985008501850285038504850585068507850885098510851185128513851485158516851785188519852085218522852385248525852685278528852985308531853285338534853585368537853885398540854185428543854485458546854785488549855085518552855385548555855685578558855985608561856285638564856585668567856885698570857185728573857485758576857785788579858085818582858385848585858685878588858985908591859285938594859585968597859885998600860186028603860486058606860786088609861086118612861386148615861686178618861986208621862286238624862586268627862886298630863186328633863486358636863786388639864086418642864386448645864686478648864986508651865286538654865586568657865886598660866186628663866486658666866786688669867086718672867386748675867686778678867986808681868286838684868586868687868886898690869186928693869486958696869786988699870087018702870387048705870687078708870987108711871287138714871587168717871887198720872187228723872487258726872787288729873087318732873387348735873687378738873987408741874287438744874587468747874887498750875187528753875487558756875787588759876087618762876387648765876687678768876987708771877287738774877587768777877887798780878187828783878487858786878787888789879087918792879387948795879687978798879988008801880288038804880588068807880888098810881188128813881488158816881788188819882088218822882388248825882688278828882988308831883288338834883588368837883888398840884188428843884488458846884788488849885088518852885388548855885688578858885988608861886288638864886588668867886888698870887188728873887488758876887788788879888088818882888388848885888688878888888988908891889288938894889588968897889888998900890189028903890489058906890789088909891089118912891389148915891689178918891989208921892289238924892589268927892889298930893189328933893489358936893789388939894089418942894389448945894689478948894989508951895289538954895589568957895889598960896189628963896489658966896789688969897089718972897389748975897689778978897989808981898289838984898589868987898889898990899189928993899489958996899789988999900090019002900390049005900690079008900990109011901290139014901590169017901890199020902190229023902490259026902790289029903090319032903390349035903690379038903990409041904290439044904590469047904890499050905190529053905490559056905790589059906090619062906390649065906690679068906990709071907290739074907590769077907890799080908190829083908490859086908790889089909090919092909390949095909690979098909991009101910291039104910591069107910891099110911191129113911491159116911791189119912091219122912391249125912691279128912991309131913291339134913591369137913891399140914191429143914491459146914791489149915091519152915391549155915691579158915991609161916291639164916591669167916891699170917191729173917491759176917791789179918091819182918391849185918691879188918991909191919291939194919591969197919891999200920192029203920492059206920792089209921092119212921392149215921692179218921992209221922292239224922592269227922892299230923192329233923492359236923792389239924092419242924392449245924692479248924992509251925292539254925592569257925892599260926192629263926492659266926792689269927092719272927392749275927692779278927992809281928292839284928592869287928892899290929192929293929492959296929792989299930093019302930393049305930693079308930993109311931293139314931593169317931893199320932193229323932493259326932793289329933093319332933393349335933693379338933993409341934293439344934593469347934893499350935193529353935493559356935793589359936093619362936393649365936693679368936993709371937293739374937593769377937893799380938193829383938493859386938793889389939093919392939393949395939693979398939994009401940294039404940594069407940894099410941194129413941494159416941794189419942094219422942394249425942694279428942994309431943294339434943594369437943894399440944194429443944494459446944794489449945094519452945394549455945694579458945994609461946294639464946594669467946894699470947194729473947494759476947794789479948094819482948394849485948694879488948994909491949294939494949594969497949894999500950195029503950495059506950795089509951095119512951395149515951695179518951995209521952295239524952595269527952895299530953195329533953495359536953795389539954095419542954395449545954695479548954995509551955295539554955595569557955895599560956195629563956495659566956795689569957095719572957395749575957695779578957995809581958295839584958595869587958895899590959195929593959495959596959795989599960096019602960396049605960696079608960996109611961296139614961596169617961896199620962196229623962496259626962796289629963096319632963396349635963696379638963996409641964296439644964596469647964896499650965196529653965496559656965796589659966096619662966396649665966696679668966996709671967296739674967596769677967896799680968196829683968496859686968796889689969096919692969396949695969696979698969997009701970297039704970597069707970897099710971197129713971497159716971797189719972097219722972397249725972697279728972997309731973297339734973597369737973897399740974197429743974497459746974797489749975097519752975397549755975697579758975997609761976297639764976597669767976897699770977197729773977497759776977797789779978097819782978397849785978697879788978997909791979297939794979597969797979897999800980198029803980498059806980798089809981098119812981398149815981698179818981998209821982298239824982598269827982898299830983198329833983498359836983798389839984098419842984398449845984698479848984998509851985298539854985598569857985898599860986198629863986498659866986798689869987098719872987398749875987698779878987998809881988298839884988598869887988898899890989198929893989498959896989798989899990099019902990399049905990699079908990999109911991299139914991599169917991899199920992199229923992499259926992799289929993099319932993399349935993699379938993999409941994299439944994599469947994899499950995199529953995499559956995799589959996099619962996399649965996699679968996999709971997299739974997599769977997899799980998199829983998499859986998799889989999099919992999399949995999699979998999910000100011000210003100041000510006100071000810009100101001110012100131001410015100161001710018100191002010021100221002310024100251002610027100281002910030100311003210033100341003510036100371003810039100401004110042100431004410045100461004710048100491005010051100521005310054100551005610057100581005910060100611006210063100641006510066100671006810069100701007110072100731007410075100761007710078100791008010081100821008310084100851008610087100881008910090100911009210093100941009510096100971009810099101001010110102101031010410105101061010710108101091011010111101121011310114101151011610117101181011910120101211012210123101241012510126101271012810129101301013110132101331013410135101361013710138101391014010141101421014310144101451014610147101481014910150101511015210153101541015510156101571015810159101601016110162101631016410165101661016710168101691017010171101721017310174101751017610177101781017910180101811018210183101841018510186101871018810189101901019110192101931019410195101961019710198101991020010201102021020310204102051020610207102081020910210102111021210213102141021510216102171021810219102201022110222102231022410225102261022710228102291023010231102321023310234102351023610237102381023910240102411024210243102441024510246102471024810249102501025110252102531025410255102561025710258102591026010261102621026310264102651026610267102681026910270102711027210273102741027510276102771027810279102801028110282102831028410285102861028710288102891029010291102921029310294102951029610297102981029910300103011030210303103041030510306103071030810309103101031110312103131031410315103161031710318103191032010321103221032310324103251032610327103281032910330103311033210333103341033510336103371033810339103401034110342103431034410345103461034710348103491035010351103521035310354103551035610357103581035910360103611036210363103641036510366103671036810369103701037110372103731037410375103761037710378103791038010381103821038310384103851038610387103881038910390103911039210393103941039510396103971039810399104001040110402104031040410405104061040710408104091041010411104121041310414104151041610417104181041910420104211042210423104241042510426104271042810429104301043110432104331043410435104361043710438104391044010441104421044310444104451044610447104481044910450104511045210453104541045510456104571045810459104601046110462104631046410465104661046710468104691047010471104721047310474104751047610477104781047910480104811048210483104841048510486104871048810489104901049110492104931049410495104961049710498104991050010501105021050310504105051050610507105081050910510105111051210513105141051510516105171051810519105201052110522105231052410525105261052710528105291053010531105321053310534105351053610537105381053910540105411054210543105441054510546105471054810549105501055110552105531055410555105561055710558105591056010561105621056310564105651056610567105681056910570105711057210573105741057510576105771057810579105801058110582105831058410585105861058710588105891059010591105921059310594105951059610597105981059910600106011060210603106041060510606106071060810609106101061110612106131061410615106161061710618106191062010621106221062310624106251062610627106281062910630106311063210633106341063510636106371063810639106401064110642106431064410645106461064710648106491065010651106521065310654106551065610657106581065910660106611066210663106641066510666106671066810669106701067110672106731067410675106761067710678106791068010681106821068310684106851068610687106881068910690106911069210693106941069510696106971069810699107001070110702107031070410705107061070710708107091071010711107121071310714107151071610717107181071910720107211072210723107241072510726107271072810729107301073110732107331073410735107361073710738107391074010741107421074310744107451074610747107481074910750107511075210753107541075510756107571075810759107601076110762107631076410765107661076710768107691077010771107721077310774107751077610777107781077910780107811078210783107841078510786107871078810789107901079110792107931079410795107961079710798107991080010801108021080310804108051080610807108081080910810108111081210813108141081510816108171081810819108201082110822108231082410825108261082710828108291083010831108321083310834108351083610837108381083910840108411084210843108441084510846108471084810849108501085110852108531085410855108561085710858108591086010861108621086310864108651086610867108681086910870108711087210873108741087510876108771087810879108801088110882108831088410885108861088710888108891089010891108921089310894108951089610897108981089910900109011090210903109041090510906109071090810909109101091110912109131091410915109161091710918109191092010921109221092310924109251092610927109281092910930109311093210933109341093510936109371093810939109401094110942109431094410945109461094710948109491095010951109521095310954109551095610957109581095910960109611096210963109641096510966109671096810969109701097110972109731097410975109761097710978109791098010981109821098310984109851098610987109881098910990109911099210993109941099510996109971099810999110001100111002110031100411005110061100711008110091101011011110121101311014110151101611017110181101911020110211102211023110241102511026110271102811029110301103111032110331103411035110361103711038110391104011041110421104311044110451104611047110481104911050110511105211053110541105511056110571105811059110601106111062110631106411065110661106711068110691107011071110721107311074110751107611077110781107911080110811108211083110841108511086110871108811089110901109111092110931109411095110961109711098110991110011101111021110311104111051110611107111081110911110111111111211113111141111511116111171111811119111201112111122111231112411125111261112711128111291113011131111321113311134111351113611137111381113911140111411114211143111441114511146111471114811149111501115111152111531115411155111561115711158111591116011161111621116311164111651116611167111681116911170111711117211173111741117511176111771117811179111801118111182111831118411185111861118711188111891119011191111921119311194111951119611197111981119911200112011120211203112041120511206112071120811209112101121111212112131121411215112161121711218112191122011221112221122311224112251122611227112281122911230112311123211233112341123511236112371123811239112401124111242112431124411245112461124711248112491125011251112521125311254112551125611257112581125911260112611126211263112641126511266112671126811269112701127111272112731127411275112761127711278112791128011281112821128311284112851128611287112881128911290112911129211293112941129511296112971129811299113001130111302113031130411305113061130711308113091131011311113121131311314113151131611317113181131911320113211132211323113241132511326113271132811329113301133111332113331133411335113361133711338113391134011341113421134311344113451134611347113481134911350113511135211353113541135511356113571135811359113601136111362113631136411365113661136711368113691137011371113721137311374113751137611377113781137911380113811138211383113841138511386113871138811389113901139111392113931139411395113961139711398113991140011401114021140311404114051140611407114081140911410114111141211413114141141511416114171141811419114201142111422114231142411425114261142711428114291143011431114321143311434114351143611437114381143911440114411144211443114441144511446114471144811449114501145111452114531145411455114561145711458114591146011461114621146311464114651146611467114681146911470114711147211473114741147511476114771147811479114801148111482114831148411485114861148711488114891149011491114921149311494114951149611497114981149911500115011150211503115041150511506115071150811509115101151111512115131151411515115161151711518115191152011521115221152311524115251152611527115281152911530115311153211533115341153511536115371153811539115401154111542115431154411545115461154711548115491155011551115521155311554115551155611557115581155911560115611156211563115641156511566115671156811569115701157111572115731157411575115761157711578115791158011581115821158311584115851158611587115881158911590115911159211593115941159511596115971159811599116001160111602116031160411605116061160711608116091161011611116121161311614116151161611617116181161911620116211162211623116241162511626116271162811629116301163111632116331163411635116361163711638116391164011641116421164311644116451164611647116481164911650116511165211653116541165511656116571165811659116601166111662116631166411665116661166711668116691167011671116721167311674116751167611677116781167911680116811168211683116841168511686116871168811689116901169111692116931169411695116961169711698116991170011701117021170311704117051170611707117081170911710117111171211713117141171511716117171171811719117201172111722117231172411725117261172711728117291173011731117321173311734117351173611737117381173911740117411174211743117441174511746117471174811749117501175111752117531175411755117561175711758117591176011761117621176311764117651176611767117681176911770117711177211773117741177511776117771177811779117801178111782117831178411785117861178711788117891179011791117921179311794117951179611797117981179911800118011180211803118041180511806118071180811809118101181111812118131181411815118161181711818118191182011821118221182311824118251182611827118281182911830118311183211833118341183511836118371183811839118401184111842118431184411845118461184711848118491185011851118521185311854118551185611857118581185911860118611186211863118641186511866118671186811869118701187111872118731187411875118761187711878118791188011881118821188311884118851188611887118881188911890118911189211893118941189511896118971189811899119001190111902119031190411905119061190711908119091191011911119121191311914119151191611917119181191911920119211192211923119241192511926119271192811929119301193111932119331193411935119361193711938119391194011941119421194311944119451194611947119481194911950119511195211953119541195511956119571195811959119601196111962119631196411965119661196711968119691197011971119721197311974119751197611977119781197911980119811198211983119841198511986119871198811989119901199111992119931199411995119961199711998119991200012001120021200312004120051200612007120081200912010120111201212013120141201512016120171201812019120201202112022120231202412025120261202712028120291203012031120321203312034120351203612037120381203912040120411204212043120441204512046120471204812049120501205112052120531205412055120561205712058120591206012061120621206312064120651206612067120681206912070120711207212073120741207512076120771207812079120801208112082120831208412085120861208712088120891209012091120921209312094120951209612097120981209912100121011210212103121041210512106121071210812109121101211112112121131211412115121161211712118121191212012121121221212312124121251212612127121281212912130121311213212133121341213512136121371213812139121401214112142121431214412145121461214712148121491215012151121521215312154121551215612157121581215912160121611216212163121641216512166121671216812169121701217112172121731217412175121761217712178121791218012181121821218312184121851218612187121881218912190121911219212193121941219512196121971219812199122001220112202122031220412205122061220712208122091221012211122121221312214122151221612217122181221912220122211222212223122241222512226122271222812229122301223112232122331223412235122361223712238122391224012241122421224312244122451224612247122481224912250122511225212253122541225512256122571225812259122601226112262122631226412265122661226712268122691227012271122721227312274122751227612277122781227912280122811228212283122841228512286122871228812289122901229112292122931229412295122961229712298122991230012301123021230312304123051230612307123081230912310123111231212313123141231512316123171231812319123201232112322123231232412325123261232712328123291233012331123321233312334123351233612337123381233912340123411234212343123441234512346123471234812349123501235112352123531235412355123561235712358123591236012361123621236312364123651236612367123681236912370123711237212373123741237512376123771237812379123801238112382123831238412385123861238712388123891239012391123921239312394123951239612397123981239912400124011240212403124041240512406124071240812409124101241112412124131241412415124161241712418124191242012421124221242312424124251242612427124281242912430124311243212433124341243512436124371243812439124401244112442124431244412445124461244712448124491245012451124521245312454124551245612457124581245912460124611246212463124641246512466124671246812469124701247112472124731247412475124761247712478124791248012481124821248312484124851248612487124881248912490124911249212493124941249512496124971249812499125001250112502125031250412505125061250712508125091251012511125121251312514125151251612517125181251912520125211252212523125241252512526125271252812529125301253112532125331253412535125361253712538125391254012541125421254312544125451254612547125481254912550125511255212553125541255512556125571255812559125601256112562125631256412565125661256712568125691257012571125721257312574125751257612577125781257912580125811258212583125841258512586125871258812589125901259112592125931259412595125961259712598125991260012601126021260312604126051260612607126081260912610126111261212613126141261512616126171261812619126201262112622126231262412625126261262712628126291263012631126321263312634126351263612637126381263912640126411264212643126441264512646126471264812649126501265112652126531265412655126561265712658126591266012661126621266312664126651266612667126681266912670126711267212673126741267512676126771267812679126801268112682126831268412685126861268712688126891269012691126921269312694126951269612697126981269912700127011270212703127041270512706127071270812709127101271112712127131271412715127161271712718127191272012721127221272312724127251272612727127281272912730127311273212733127341273512736127371273812739127401274112742 |
- {
- Copyright (c) 1998-2002 by Florian Klaempfl and Jonas Maebe
- This unit contains the peephole optimizer.
- This program is free software; you can redistribute it and/or modify
- it under the terms of the GNU General Public License as published by
- the Free Software Foundation; either version 2 of the License, or
- (at your option) any later version.
- This program is distributed in the hope that it will be useful,
- but WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- GNU General Public License for more details.
- You should have received a copy of the GNU General Public License
- along with this program; if not, write to the Free Software
- Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
- ****************************************************************************
- }
- unit aoptx86;
- {$i fpcdefs.inc}
- { $define DEBUG_AOPTCPU}
- {$ifdef EXTDEBUG}
- {$define DEBUG_AOPTCPU}
- {$endif EXTDEBUG}
- interface
- uses
- globtype,
- cpubase,
- aasmtai,aasmcpu,
- cgbase,cgutils,
- aopt,aoptobj;
- type
- TOptsToCheck = (
- aoc_MovAnd2Mov_3
- );
- TX86AsmOptimizer = class(TAsmOptimizer)
- { some optimizations are very expensive to check, so the
- pre opt pass can be used to set some flags, depending on the found
- instructions if it is worth to check a certain optimization }
- OptsToCheck : set of TOptsToCheck;
- function RegLoadedWithNewValue(reg : tregister; hp : tai) : boolean; override;
- function InstructionLoadsFromReg(const reg : TRegister; const hp : tai) : boolean; override;
- function RegReadByInstruction(reg : TRegister; hp : tai) : boolean;
- function RegInInstruction(Reg: TRegister; p1: tai): Boolean;override;
- function GetNextInstructionUsingReg(Current: tai; out Next: tai; reg: TRegister): Boolean;
- { This version of GetNextInstructionUsingReg will look across conditional jumps,
- potentially allowing further optimisation (although it might need to know if
- it crossed a conditional jump. }
- function GetNextInstructionUsingRegCond(Current: tai; out Next: tai; reg: TRegister; var CrossJump: Boolean): Boolean;
- {
- In comparison with GetNextInstructionUsingReg, GetNextInstructionUsingRegTrackingUse tracks
- the use of a register by allocs/dealloc, so it can ignore calls.
- In the following example, GetNextInstructionUsingReg will return the second movq,
- GetNextInstructionUsingRegTrackingUse won't.
- movq %rdi,%rax
- # Register rdi released
- # Register rdi allocated
- movq %rax,%rdi
- While in this example:
- movq %rdi,%rax
- call proc
- movq %rdi,%rax
- GetNextInstructionUsingRegTrackingUse will return the second instruction while GetNextInstructionUsingReg
- won't.
- }
- function GetNextInstructionUsingRegTrackingUse(Current: tai; out Next: tai; reg: TRegister): Boolean;
- function RegModifiedByInstruction(Reg: TRegister; p1: tai): boolean; override;
- private
- function SkipSimpleInstructions(var hp1: tai): Boolean;
- protected
- class function IsMOVZXAcceptable: Boolean; static; inline;
- { Attempts to allocate a volatile integer register for use between p and hp,
- using AUsedRegs for the current register usage information. Returns NR_NO
- if no free register could be found }
- function GetIntRegisterBetween(RegSize: TSubRegister; var AUsedRegs: TAllUsedRegs; p, hp: tai): TRegister;
- { Attempts to allocate a volatile MM register for use between p and hp,
- using AUsedRegs for the current register usage information. Returns NR_NO
- if no free register could be found }
- function GetMMRegisterBetween(RegSize: TSubRegister; var AUsedRegs: TAllUsedRegs; p, hp: tai): TRegister;
- { checks whether loading a new value in reg1 overwrites the entirety of reg2 }
- function Reg1WriteOverwritesReg2Entirely(reg1, reg2: tregister): boolean;
- { checks whether reading the value in reg1 depends on the value of reg2. This
- is very similar to SuperRegisterEquals, except it takes into account that
- R_SUBH and R_SUBL are independendent (e.g. reading from AL does not
- depend on the value in AH). }
- function Reg1ReadDependsOnReg2(reg1, reg2: tregister): boolean;
- { Replaces all references to AOldReg in a memory reference to ANewReg }
- class function ReplaceRegisterInRef(var ref: TReference; const AOldReg, ANewReg: TRegister): Boolean; static;
- { Replaces all references to AOldReg in an operand to ANewReg }
- class function ReplaceRegisterInOper(const p: taicpu; const OperIdx: Integer; const AOldReg, ANewReg: TRegister): Boolean; static;
- { Replaces all references to AOldReg in an instruction to ANewReg,
- except where the register is being written }
- class function ReplaceRegisterInInstruction(const p: taicpu; const AOldReg, ANewReg: TRegister): Boolean; static;
- { Returns true if the reference only refers to ESP or EBP (or their 64-bit equivalents),
- or writes to a global symbol }
- class function IsRefSafe(const ref: PReference): Boolean; static;
- { Returns true if the given MOV instruction can be safely converted to CMOV }
- class function CanBeCMOV(p : tai) : boolean; static;
- { Converts the LEA instruction to ADD/INC/SUB/DEC. Returns True if the
- conversion was successful }
- function ConvertLEA(const p : taicpu): Boolean;
- function DeepMOVOpt(const p_mov: taicpu; const hp: taicpu): Boolean;
- procedure DebugMsg(const s : string; p : tai);inline;
- class function IsExitCode(p : tai) : boolean; static;
- class function isFoldableArithOp(hp1 : taicpu; reg : tregister) : boolean; static;
- procedure RemoveLastDeallocForFuncRes(p : tai);
- function DoSubAddOpt(var p : tai) : Boolean;
- function DoMovCmpMemOpt(var p : tai; const hp1: tai; UpdateTmpUsedRegs: Boolean) : Boolean;
- function PrePeepholeOptSxx(var p : tai) : boolean;
- function PrePeepholeOptIMUL(var p : tai) : boolean;
- function PrePeepholeOptAND(var p : tai) : boolean;
- function OptPass1Test(var p: tai): boolean;
- function OptPass1Add(var p: tai): boolean;
- function OptPass1AND(var p : tai) : boolean;
- function OptPass1_V_MOVAP(var p : tai) : boolean;
- function OptPass1VOP(var p : tai) : boolean;
- function OptPass1MOV(var p : tai) : boolean;
- function OptPass1Movx(var p : tai) : boolean;
- function OptPass1MOVXX(var p : tai) : boolean;
- function OptPass1OP(var p : tai) : boolean;
- function OptPass1LEA(var p : tai) : boolean;
- function OptPass1Sub(var p : tai) : boolean;
- function OptPass1SHLSAL(var p : tai) : boolean;
- function OptPass1FSTP(var p : tai) : boolean;
- function OptPass1FLD(var p : tai) : boolean;
- function OptPass1Cmp(var p : tai) : boolean;
- function OptPass1PXor(var p : tai) : boolean;
- function OptPass1VPXor(var p: tai): boolean;
- function OptPass1Imul(var p : tai) : boolean;
- function OptPass1Jcc(var p : tai) : boolean;
- function OptPass1SHXX(var p: tai): boolean;
- function OptPass1VMOVDQ(var p: tai): Boolean;
- function OptPass1_V_Cvtss2sd(var p: tai): boolean;
- function OptPass2Movx(var p : tai): Boolean;
- function OptPass2MOV(var p : tai) : boolean;
- function OptPass2Imul(var p : tai) : boolean;
- function OptPass2Jmp(var p : tai) : boolean;
- function OptPass2Jcc(var p : tai) : boolean;
- function OptPass2Lea(var p: tai): Boolean;
- function OptPass2SUB(var p: tai): Boolean;
- function OptPass2ADD(var p : tai): Boolean;
- function OptPass2SETcc(var p : tai) : boolean;
- function CheckMemoryWrite(var first_mov, second_mov: taicpu): Boolean;
- function PostPeepholeOptMov(var p : tai) : Boolean;
- function PostPeepholeOptMovzx(var p : tai) : Boolean;
- {$ifdef x86_64} { These post-peephole optimisations only affect 64-bit registers. [Kit] }
- function PostPeepholeOptXor(var p : tai) : Boolean;
- {$endif x86_64}
- function PostPeepholeOptAnd(var p : tai) : boolean;
- function PostPeepholeOptMOVSX(var p : tai) : boolean;
- function PostPeepholeOptCmp(var p : tai) : Boolean;
- function PostPeepholeOptTestOr(var p : tai) : Boolean;
- function PostPeepholeOptCall(var p : tai) : Boolean;
- function PostPeepholeOptLea(var p : tai) : Boolean;
- function PostPeepholeOptPush(var p: tai): Boolean;
- function PostPeepholeOptShr(var p : tai) : boolean;
- function PostPeepholeOptADDSUB(var p : tai) : Boolean;
- function PostPeepholeOptVPXOR(var p: tai): Boolean;
- procedure ConvertJumpToRET(const p: tai; const ret_p: tai);
- function CheckJumpMovTransferOpt(var p: tai; hp1: tai; LoopCount: Integer; out Count: Integer): Boolean;
- function TrySwapMovCmp(var p, hp1: tai): Boolean;
- { Processor-dependent reference optimisation }
- class procedure OptimizeRefs(var p: taicpu); static;
- end;
- function MatchInstruction(const instr: tai; const op: TAsmOp; const opsize: topsizes): boolean;
- function MatchInstruction(const instr: tai; const op1,op2: TAsmOp; const opsize: topsizes): boolean;
- function MatchInstruction(const instr: tai; const op1,op2,op3: TAsmOp; const opsize: topsizes): boolean;
- function MatchInstruction(const instr: tai; const ops: array of TAsmOp; const opsize: topsizes): boolean;
- function MatchOperand(const oper: TOper; const reg: TRegister): boolean; inline;
- function MatchOperand(const oper: TOper; const a: tcgint): boolean; inline;
- function MatchOperand(const oper1: TOper; const oper2: TOper): boolean;
- {$if max_operands>2}
- function MatchOperand(const oper1: TOper; const oper2: TOper; const oper3: TOper): boolean;
- {$endif max_operands>2}
- function RefsEqual(const r1, r2: treference): boolean;
- function MatchReference(const ref : treference;base,index : TRegister) : Boolean;
- { returns true, if ref is a reference using only the registers passed as base and index
- and having an offset }
- function MatchReferenceWithOffset(const ref : treference;base,index : TRegister) : Boolean;
- implementation
- uses
- cutils,verbose,
- systems,
- globals,
- cpuinfo,
- procinfo,
- paramgr,
- aasmbase,
- aoptbase,aoptutils,
- symconst,symsym,
- cgx86,
- itcpugas;
- {$ifdef DEBUG_AOPTCPU}
- const
- SPeepholeOptimization: shortstring = 'Peephole Optimization: ';
- {$else DEBUG_AOPTCPU}
- { Empty strings help the optimizer to remove string concatenations that won't
- ever appear to the user on release builds. [Kit] }
- const
- SPeepholeOptimization = '';
- {$endif DEBUG_AOPTCPU}
- LIST_STEP_SIZE = 4;
- function MatchInstruction(const instr: tai; const op: TAsmOp; const opsize: topsizes): boolean;
- begin
- result :=
- (instr.typ = ait_instruction) and
- (taicpu(instr).opcode = op) and
- ((opsize = []) or (taicpu(instr).opsize in opsize));
- end;
- function MatchInstruction(const instr: tai; const op1,op2: TAsmOp; const opsize: topsizes): boolean;
- begin
- result :=
- (instr.typ = ait_instruction) and
- ((taicpu(instr).opcode = op1) or
- (taicpu(instr).opcode = op2)
- ) and
- ((opsize = []) or (taicpu(instr).opsize in opsize));
- end;
- function MatchInstruction(const instr: tai; const op1,op2,op3: TAsmOp; const opsize: topsizes): boolean;
- begin
- result :=
- (instr.typ = ait_instruction) and
- ((taicpu(instr).opcode = op1) or
- (taicpu(instr).opcode = op2) or
- (taicpu(instr).opcode = op3)
- ) and
- ((opsize = []) or (taicpu(instr).opsize in opsize));
- end;
- function MatchInstruction(const instr : tai;const ops : array of TAsmOp;
- const opsize : topsizes) : boolean;
- var
- op : TAsmOp;
- begin
- result:=false;
- if (instr.typ <> ait_instruction) or
- ((opsize <> []) and not(taicpu(instr).opsize in opsize)) then
- exit;
- for op in ops do
- begin
- if taicpu(instr).opcode = op then
- begin
- result:=true;
- exit;
- end;
- end;
- end;
- function MatchOperand(const oper: TOper; const reg: TRegister): boolean; inline;
- begin
- result := (oper.typ = top_reg) and (oper.reg = reg);
- end;
- function MatchOperand(const oper: TOper; const a: tcgint): boolean; inline;
- begin
- result := (oper.typ = top_const) and (oper.val = a);
- end;
- function MatchOperand(const oper1: TOper; const oper2: TOper): boolean;
- begin
- result := oper1.typ = oper2.typ;
- if result then
- case oper1.typ of
- top_const:
- Result:=oper1.val = oper2.val;
- top_reg:
- Result:=oper1.reg = oper2.reg;
- top_ref:
- Result:=RefsEqual(oper1.ref^, oper2.ref^);
- else
- internalerror(2013102801);
- end
- end;
- function MatchOperand(const oper1: TOper; const oper2: TOper; const oper3: TOper): boolean;
- begin
- result := (oper1.typ = oper2.typ) and (oper1.typ = oper3.typ);
- if result then
- case oper1.typ of
- top_const:
- Result:=(oper1.val = oper2.val) and (oper1.val = oper3.val);
- top_reg:
- Result:=(oper1.reg = oper2.reg) and (oper1.reg = oper3.reg);
- top_ref:
- Result:=RefsEqual(oper1.ref^, oper2.ref^) and RefsEqual(oper1.ref^, oper3.ref^);
- else
- internalerror(2020052401);
- end
- end;
- function RefsEqual(const r1, r2: treference): boolean;
- begin
- RefsEqual :=
- (r1.offset = r2.offset) and
- (r1.segment = r2.segment) and (r1.base = r2.base) and
- (r1.index = r2.index) and (r1.scalefactor = r2.scalefactor) and
- (r1.symbol=r2.symbol) and (r1.refaddr = r2.refaddr) and
- (r1.relsymbol = r2.relsymbol) and
- (r1.volatility=[]) and
- (r2.volatility=[]);
- end;
- function MatchReference(const ref : treference;base,index : TRegister) : Boolean;
- begin
- Result:=(ref.offset=0) and
- (ref.scalefactor in [0,1]) and
- (ref.segment=NR_NO) and
- (ref.symbol=nil) and
- (ref.relsymbol=nil) and
- ((base=NR_INVALID) or
- (ref.base=base)) and
- ((index=NR_INVALID) or
- (ref.index=index)) and
- (ref.volatility=[]);
- end;
- function MatchReferenceWithOffset(const ref : treference;base,index : TRegister) : Boolean;
- begin
- Result:=(ref.scalefactor in [0,1]) and
- (ref.segment=NR_NO) and
- (ref.symbol=nil) and
- (ref.relsymbol=nil) and
- ((base=NR_INVALID) or
- (ref.base=base)) and
- ((index=NR_INVALID) or
- (ref.index=index)) and
- (ref.volatility=[]);
- end;
- function InstrReadsFlags(p: tai): boolean;
- begin
- InstrReadsFlags := true;
- case p.typ of
- ait_instruction:
- if InsProp[taicpu(p).opcode].Ch*
- [Ch_RCarryFlag,Ch_RParityFlag,Ch_RAuxiliaryFlag,Ch_RZeroFlag,Ch_RSignFlag,Ch_ROverflowFlag,
- Ch_RWCarryFlag,Ch_RWParityFlag,Ch_RWAuxiliaryFlag,Ch_RWZeroFlag,Ch_RWSignFlag,Ch_RWOverflowFlag,
- Ch_RFlags,Ch_RWFlags,Ch_RFLAGScc,Ch_All]<>[] then
- exit;
- ait_label:
- exit;
- else
- ;
- end;
- InstrReadsFlags := false;
- end;
- function TX86AsmOptimizer.GetNextInstructionUsingReg(Current: tai; out Next: tai; reg: TRegister): Boolean;
- begin
- Next:=Current;
- repeat
- Result:=GetNextInstruction(Next,Next);
- until not (Result) or
- not(cs_opt_level3 in current_settings.optimizerswitches) or
- (Next.typ<>ait_instruction) or
- RegInInstruction(reg,Next) or
- is_calljmp(taicpu(Next).opcode);
- end;
- function TX86AsmOptimizer.GetNextInstructionUsingRegCond(Current: tai; out Next: tai; reg: TRegister; var CrossJump: Boolean): Boolean;
- begin
- { Note, CrossJump keeps its input value if a conditional jump is not found - it doesn't get set to False }
- Next := Current;
- repeat
- Result := GetNextInstruction(Next,Next);
- if Result and (Next.typ=ait_instruction) and is_calljmp(taicpu(Next).opcode) then
- if is_calljmpuncondret(taicpu(Next).opcode) then
- begin
- Result := False;
- Exit;
- end
- else
- CrossJump := True;
- until not Result or
- not (cs_opt_level3 in current_settings.optimizerswitches) or
- (Next.typ <> ait_instruction) or
- RegInInstruction(reg,Next);
- end;
- function TX86AsmOptimizer.GetNextInstructionUsingRegTrackingUse(Current: tai; out Next: tai; reg: TRegister): Boolean;
- begin
- if not(cs_opt_level3 in current_settings.optimizerswitches) then
- begin
- Result:=GetNextInstruction(Current,Next);
- exit;
- end;
- Next:=tai(Current.Next);
- Result:=false;
- while assigned(Next) do
- begin
- if ((Next.typ=ait_instruction) and is_calljmp(taicpu(Next).opcode) and not(taicpu(Next).opcode=A_CALL)) or
- ((Next.typ=ait_regalloc) and (getsupreg(tai_regalloc(Next).reg)=getsupreg(reg))) or
- ((Next.typ=ait_label) and not(labelCanBeSkipped(Tai_Label(Next)))) then
- exit
- else if (Next.typ=ait_instruction) and RegInInstruction(reg,Next) and not(taicpu(Next).opcode=A_CALL) then
- begin
- Result:=true;
- exit;
- end;
- Next:=tai(Next.Next);
- end;
- end;
- function TX86AsmOptimizer.InstructionLoadsFromReg(const reg: TRegister;const hp: tai): boolean;
- begin
- Result:=RegReadByInstruction(reg,hp);
- end;
- function TX86AsmOptimizer.RegReadByInstruction(reg: TRegister; hp: tai): boolean;
- var
- p: taicpu;
- opcount: longint;
- begin
- RegReadByInstruction := false;
- if hp.typ <> ait_instruction then
- exit;
- p := taicpu(hp);
- case p.opcode of
- A_CALL:
- regreadbyinstruction := true;
- A_IMUL:
- case p.ops of
- 1:
- regReadByInstruction := RegInOp(reg,p.oper[0]^) or
- (
- ((getregtype(reg)=R_INTREGISTER) and (getsupreg(reg)=RS_EAX)) and
- ((getsubreg(reg)<>R_SUBH) or (p.opsize<>S_B))
- );
- 2,3:
- regReadByInstruction :=
- reginop(reg,p.oper[0]^) or
- reginop(reg,p.oper[1]^);
- else
- InternalError(2019112801);
- end;
- A_MUL:
- begin
- regReadByInstruction := RegInOp(reg,p.oper[0]^) or
- (
- ((getregtype(reg)=R_INTREGISTER) and (getsupreg(reg)=RS_EAX)) and
- ((getsubreg(reg)<>R_SUBH) or (p.opsize<>S_B))
- );
- end;
- A_IDIV,A_DIV:
- begin
- regReadByInstruction := RegInOp(reg,p.oper[0]^) or
- (
- (getregtype(reg)=R_INTREGISTER) and
- (
- (getsupreg(reg)=RS_EAX) or ((getsupreg(reg)=RS_EDX) and (p.opsize<>S_B))
- )
- );
- end;
- else
- begin
- if (p.opcode=A_LEA) and is_segment_reg(reg) then
- begin
- RegReadByInstruction := false;
- exit;
- end;
- for opcount := 0 to p.ops-1 do
- if (p.oper[opCount]^.typ = top_ref) and
- RegInRef(reg,p.oper[opcount]^.ref^) then
- begin
- RegReadByInstruction := true;
- exit
- end;
- { special handling for SSE MOVSD }
- if (p.opcode=A_MOVSD) and (p.ops>0) then
- begin
- if p.ops<>2 then
- internalerror(2017042702);
- regReadByInstruction := reginop(reg,p.oper[0]^) or
- (
- (p.oper[1]^.typ=top_reg) and (p.oper[0]^.typ=top_reg) and reginop(reg, p.oper[1]^)
- );
- exit;
- end;
- with insprop[p.opcode] do
- begin
- case getregtype(reg) of
- R_INTREGISTER:
- begin
- case getsupreg(reg) of
- RS_EAX:
- if [Ch_REAX,Ch_RWEAX,Ch_MEAX]*Ch<>[] then
- begin
- RegReadByInstruction := true;
- exit
- end;
- RS_ECX:
- if [Ch_RECX,Ch_RWECX,Ch_MECX]*Ch<>[] then
- begin
- RegReadByInstruction := true;
- exit
- end;
- RS_EDX:
- if [Ch_REDX,Ch_RWEDX,Ch_MEDX]*Ch<>[] then
- begin
- RegReadByInstruction := true;
- exit
- end;
- RS_EBX:
- if [Ch_REBX,Ch_RWEBX,Ch_MEBX]*Ch<>[] then
- begin
- RegReadByInstruction := true;
- exit
- end;
- RS_ESP:
- if [Ch_RESP,Ch_RWESP,Ch_MESP]*Ch<>[] then
- begin
- RegReadByInstruction := true;
- exit
- end;
- RS_EBP:
- if [Ch_REBP,Ch_RWEBP,Ch_MEBP]*Ch<>[] then
- begin
- RegReadByInstruction := true;
- exit
- end;
- RS_ESI:
- if [Ch_RESI,Ch_RWESI,Ch_MESI]*Ch<>[] then
- begin
- RegReadByInstruction := true;
- exit
- end;
- RS_EDI:
- if [Ch_REDI,Ch_RWEDI,Ch_MEDI]*Ch<>[] then
- begin
- RegReadByInstruction := true;
- exit
- end;
- end;
- end;
- R_MMREGISTER:
- begin
- case getsupreg(reg) of
- RS_XMM0:
- if [Ch_RXMM0,Ch_RWXMM0,Ch_MXMM0]*Ch<>[] then
- begin
- RegReadByInstruction := true;
- exit
- end;
- end;
- end;
- else
- ;
- end;
- if SuperRegistersEqual(reg,NR_DEFAULTFLAGS) then
- begin
- if (Ch_RFLAGScc in Ch) and not(getsubreg(reg) in [R_SUBW,R_SUBD,R_SUBQ]) then
- begin
- case p.condition of
- C_A,C_NBE, { CF=0 and ZF=0 }
- C_BE,C_NA: { CF=1 or ZF=1 }
- RegReadByInstruction:=getsubreg(reg) in [R_SUBFLAGCARRY,R_SUBFLAGZERO];
- C_AE,C_NB,C_NC, { CF=0 }
- C_B,C_NAE,C_C: { CF=1 }
- RegReadByInstruction:=getsubreg(reg) in [R_SUBFLAGCARRY];
- C_NE,C_NZ, { ZF=0 }
- C_E,C_Z: { ZF=1 }
- RegReadByInstruction:=getsubreg(reg) in [R_SUBFLAGZERO];
- C_G,C_NLE, { ZF=0 and SF=OF }
- C_LE,C_NG: { ZF=1 or SF<>OF }
- RegReadByInstruction:=getsubreg(reg) in [R_SUBFLAGZERO,R_SUBFLAGSIGN,R_SUBFLAGOVERFLOW];
- C_GE,C_NL, { SF=OF }
- C_L,C_NGE: { SF<>OF }
- RegReadByInstruction:=getsubreg(reg) in [R_SUBFLAGSIGN,R_SUBFLAGOVERFLOW];
- C_NO, { OF=0 }
- C_O: { OF=1 }
- RegReadByInstruction:=getsubreg(reg) in [R_SUBFLAGOVERFLOW];
- C_NP,C_PO, { PF=0 }
- C_P,C_PE: { PF=1 }
- RegReadByInstruction:=getsubreg(reg) in [R_SUBFLAGPARITY];
- C_NS, { SF=0 }
- C_S: { SF=1 }
- RegReadByInstruction:=getsubreg(reg) in [R_SUBFLAGSIGN];
- else
- internalerror(2017042701);
- end;
- if RegReadByInstruction then
- exit;
- end;
- case getsubreg(reg) of
- R_SUBW,R_SUBD,R_SUBQ:
- RegReadByInstruction :=
- [Ch_RCarryFlag,Ch_RParityFlag,Ch_RAuxiliaryFlag,Ch_RZeroFlag,Ch_RSignFlag,Ch_ROverflowFlag,
- Ch_RWCarryFlag,Ch_RWParityFlag,Ch_RWAuxiliaryFlag,Ch_RWZeroFlag,Ch_RWSignFlag,Ch_RWOverflowFlag,
- Ch_RDirFlag,Ch_RFlags,Ch_RWFlags,Ch_RFLAGScc]*Ch<>[];
- R_SUBFLAGCARRY:
- RegReadByInstruction:=[Ch_RCarryFlag,Ch_RWCarryFlag,Ch_RFlags,Ch_RWFlags]*Ch<>[];
- R_SUBFLAGPARITY:
- RegReadByInstruction:=[Ch_RParityFlag,Ch_RWParityFlag,Ch_RFlags,Ch_RWFlags]*Ch<>[];
- R_SUBFLAGAUXILIARY:
- RegReadByInstruction:=[Ch_RAuxiliaryFlag,Ch_RWAuxiliaryFlag,Ch_RFlags,Ch_RWFlags]*Ch<>[];
- R_SUBFLAGZERO:
- RegReadByInstruction:=[Ch_RZeroFlag,Ch_RWZeroFlag,Ch_RFlags,Ch_RWFlags]*Ch<>[];
- R_SUBFLAGSIGN:
- RegReadByInstruction:=[Ch_RSignFlag,Ch_RWSignFlag,Ch_RFlags,Ch_RWFlags]*Ch<>[];
- R_SUBFLAGOVERFLOW:
- RegReadByInstruction:=[Ch_ROverflowFlag,Ch_RWOverflowFlag,Ch_RFlags,Ch_RWFlags]*Ch<>[];
- R_SUBFLAGINTERRUPT:
- RegReadByInstruction:=[Ch_RFlags,Ch_RWFlags]*Ch<>[];
- R_SUBFLAGDIRECTION:
- RegReadByInstruction:=[Ch_RDirFlag,Ch_RFlags,Ch_RWFlags]*Ch<>[];
- else
- internalerror(2017042601);
- end;
- exit;
- end;
- if (Ch_NoReadIfEqualRegs in Ch) and (p.ops=2) and
- (p.oper[0]^.typ=top_reg) and (p.oper[1]^.typ=top_reg) and
- (p.oper[0]^.reg=p.oper[1]^.reg) then
- exit;
- if ([CH_RWOP1,CH_ROP1,CH_MOP1]*Ch<>[]) and reginop(reg,p.oper[0]^) then
- begin
- RegReadByInstruction := true;
- exit
- end;
- if ([Ch_RWOP2,Ch_ROP2,Ch_MOP2]*Ch<>[]) and reginop(reg,p.oper[1]^) then
- begin
- RegReadByInstruction := true;
- exit
- end;
- if ([Ch_RWOP3,Ch_ROP3,Ch_MOP3]*Ch<>[]) and reginop(reg,p.oper[2]^) then
- begin
- RegReadByInstruction := true;
- exit
- end;
- if ([Ch_RWOP4,Ch_ROP4,Ch_MOP4]*Ch<>[]) and reginop(reg,p.oper[3]^) then
- begin
- RegReadByInstruction := true;
- exit
- end;
- end;
- end;
- end;
- end;
- function TX86AsmOptimizer.RegInInstruction(Reg: TRegister; p1: tai): Boolean;
- begin
- result:=false;
- if p1.typ<>ait_instruction then
- exit;
- if (Ch_All in insprop[taicpu(p1).opcode].Ch) then
- exit(true);
- if (getregtype(reg)=R_INTREGISTER) and
- { change information for xmm movsd are not correct }
- ((taicpu(p1).opcode<>A_MOVSD) or (taicpu(p1).ops=0)) then
- begin
- case getsupreg(reg) of
- { RS_EAX = RS_RAX on x86-64 }
- RS_EAX:
- result:=([Ch_REAX,Ch_RRAX,Ch_WEAX,Ch_WRAX,Ch_RWEAX,Ch_RWRAX,Ch_MEAX,Ch_MRAX]*insprop[taicpu(p1).opcode].Ch)<>[];
- RS_ECX:
- result:=([Ch_RECX,Ch_RRCX,Ch_WECX,Ch_WRCX,Ch_RWECX,Ch_RWRCX,Ch_MECX,Ch_MRCX]*insprop[taicpu(p1).opcode].Ch)<>[];
- RS_EDX:
- result:=([Ch_REDX,Ch_RRDX,Ch_WEDX,Ch_WRDX,Ch_RWEDX,Ch_RWRDX,Ch_MEDX,Ch_MRDX]*insprop[taicpu(p1).opcode].Ch)<>[];
- RS_EBX:
- result:=([Ch_REBX,Ch_RRBX,Ch_WEBX,Ch_WRBX,Ch_RWEBX,Ch_RWRBX,Ch_MEBX,Ch_MRBX]*insprop[taicpu(p1).opcode].Ch)<>[];
- RS_ESP:
- result:=([Ch_RESP,Ch_RRSP,Ch_WESP,Ch_WRSP,Ch_RWESP,Ch_RWRSP,Ch_MESP,Ch_MRSP]*insprop[taicpu(p1).opcode].Ch)<>[];
- RS_EBP:
- result:=([Ch_REBP,Ch_RRBP,Ch_WEBP,Ch_WRBP,Ch_RWEBP,Ch_RWRBP,Ch_MEBP,Ch_MRBP]*insprop[taicpu(p1).opcode].Ch)<>[];
- RS_ESI:
- result:=([Ch_RESI,Ch_RRSI,Ch_WESI,Ch_WRSI,Ch_RWESI,Ch_RWRSI,Ch_MESI,Ch_MRSI,Ch_RMemEDI]*insprop[taicpu(p1).opcode].Ch)<>[];
- RS_EDI:
- result:=([Ch_REDI,Ch_RRDI,Ch_WEDI,Ch_WRDI,Ch_RWEDI,Ch_RWRDI,Ch_MEDI,Ch_MRDI,Ch_WMemEDI]*insprop[taicpu(p1).opcode].Ch)<>[];
- else
- ;
- end;
- if result then
- exit;
- end
- else if getregtype(reg)=R_MMREGISTER then
- begin
- case getsupreg(reg) of
- RS_XMM0:
- result:=([Ch_RXMM0,Ch_WXMM0,Ch_RWXMM0,Ch_MXMM0]*insprop[taicpu(p1).opcode].Ch)<>[];
- else
- ;
- end;
- if result then
- exit;
- end
- else if SuperRegistersEqual(reg,NR_DEFAULTFLAGS) then
- begin
- if ([Ch_RFlags,Ch_WFlags,Ch_RWFlags,Ch_RFLAGScc]*insprop[taicpu(p1).opcode].Ch)<>[] then
- exit(true);
- case getsubreg(reg) of
- R_SUBFLAGCARRY:
- Result:=([Ch_RCarryFlag,Ch_RWCarryFlag,Ch_W0CarryFlag,Ch_W1CarryFlag,Ch_WCarryFlag,Ch_WUCarryFlag]*insprop[taicpu(p1).opcode].Ch)<>[];
- R_SUBFLAGPARITY:
- Result:=([Ch_RParityFlag,Ch_RWParityFlag,Ch_W0ParityFlag,Ch_W1ParityFlag,Ch_WParityFlag,Ch_WUParityFlag]*insprop[taicpu(p1).opcode].Ch)<>[];
- R_SUBFLAGAUXILIARY:
- Result:=([Ch_RAuxiliaryFlag,Ch_RWAuxiliaryFlag,Ch_W0AuxiliaryFlag,Ch_W1AuxiliaryFlag,Ch_WAuxiliaryFlag,Ch_WUAuxiliaryFlag]*insprop[taicpu(p1).opcode].Ch)<>[];
- R_SUBFLAGZERO:
- Result:=([Ch_RZeroFlag,Ch_RWZeroFlag,Ch_W0ZeroFlag,Ch_W1ZeroFlag,Ch_WZeroFlag,Ch_WUZeroFlag]*insprop[taicpu(p1).opcode].Ch)<>[];
- R_SUBFLAGSIGN:
- Result:=([Ch_RSignFlag,Ch_RWSignFlag,Ch_W0SignFlag,Ch_W1SignFlag,Ch_WSignFlag,Ch_WUSignFlag]*insprop[taicpu(p1).opcode].Ch)<>[];
- R_SUBFLAGOVERFLOW:
- Result:=([Ch_ROverflowFlag,Ch_RWOverflowFlag,Ch_W0OverflowFlag,Ch_W1OverflowFlag,Ch_WOverflowFlag,Ch_WUOverflowFlag]*insprop[taicpu(p1).opcode].Ch)<>[];
- R_SUBFLAGINTERRUPT:
- Result:=([Ch_W0IntFlag,Ch_W1IntFlag,Ch_WFlags]*insprop[taicpu(p1).opcode].Ch)<>[];
- R_SUBFLAGDIRECTION:
- Result:=([Ch_RDirFlag,Ch_W0DirFlag,Ch_W1DirFlag,Ch_WFlags]*insprop[taicpu(p1).opcode].Ch)<>[];
- R_SUBW,R_SUBD,R_SUBQ:
- { Everything except the direction bits }
- Result:=
- ([Ch_RCarryFlag,Ch_RParityFlag,Ch_RAuxiliaryFlag,Ch_RZeroFlag,Ch_RSignFlag,Ch_ROverflowFlag,
- Ch_WCarryFlag,Ch_WParityFlag,Ch_WAuxiliaryFlag,Ch_WZeroFlag,Ch_WSignFlag,Ch_WOverflowFlag,
- Ch_W0CarryFlag,Ch_W0ParityFlag,Ch_W0AuxiliaryFlag,Ch_W0ZeroFlag,Ch_W0SignFlag,Ch_W0OverflowFlag,
- Ch_W1CarryFlag,Ch_W1ParityFlag,Ch_W1AuxiliaryFlag,Ch_W1ZeroFlag,Ch_W1SignFlag,Ch_W1OverflowFlag,
- Ch_WUCarryFlag,Ch_WUParityFlag,Ch_WUAuxiliaryFlag,Ch_WUZeroFlag,Ch_WUSignFlag,Ch_WUOverflowFlag,
- Ch_RWCarryFlag,Ch_RWParityFlag,Ch_RWAuxiliaryFlag,Ch_RWZeroFlag,Ch_RWSignFlag,Ch_RWOverflowFlag
- ]*insprop[taicpu(p1).opcode].Ch)<>[];
- else
- ;
- end;
- if result then
- exit;
- end
- else if (getregtype(reg)=R_FPUREGISTER) and (Ch_FPU in insprop[taicpu(p1).opcode].Ch) then
- exit(true);
- Result:=inherited RegInInstruction(Reg, p1);
- end;
- function TX86AsmOptimizer.RegModifiedByInstruction(Reg: TRegister; p1: tai): boolean;
- const
- WriteOps: array[0..3] of set of TInsChange =
- ([CH_RWOP1,CH_WOP1,CH_MOP1],
- [Ch_RWOP2,Ch_WOP2,Ch_MOP2],
- [Ch_RWOP3,Ch_WOP3,Ch_MOP3],
- [Ch_RWOP4,Ch_WOP4,Ch_MOP4]);
- var
- OperIdx: Integer;
- begin
- Result := False;
- if p1.typ <> ait_instruction then
- exit;
- with insprop[taicpu(p1).opcode] do
- if SuperRegistersEqual(reg,NR_DEFAULTFLAGS) then
- begin
- case getsubreg(reg) of
- R_SUBW,R_SUBD,R_SUBQ:
- Result :=
- [Ch_WCarryFlag,Ch_WParityFlag,Ch_WAuxiliaryFlag,Ch_WZeroFlag,Ch_WSignFlag,Ch_WOverflowFlag,
- Ch_RWCarryFlag,Ch_RWParityFlag,Ch_RWAuxiliaryFlag,Ch_RWZeroFlag,Ch_RWSignFlag,Ch_RWOverflowFlag,
- Ch_W0DirFlag,Ch_W1DirFlag,Ch_W0IntFlag,Ch_W1IntFlag,Ch_WFlags,Ch_RWFlags]*Ch<>[];
- R_SUBFLAGCARRY:
- Result:=[Ch_WCarryFlag,Ch_RWCarryFlag,Ch_WFlags,Ch_RWFlags]*Ch<>[];
- R_SUBFLAGPARITY:
- Result:=[Ch_WParityFlag,Ch_RWParityFlag,Ch_WFlags,Ch_RWFlags]*Ch<>[];
- R_SUBFLAGAUXILIARY:
- Result:=[Ch_WAuxiliaryFlag,Ch_RWAuxiliaryFlag,Ch_WFlags,Ch_RWFlags]*Ch<>[];
- R_SUBFLAGZERO:
- Result:=[Ch_WZeroFlag,Ch_RWZeroFlag,Ch_WFlags,Ch_RWFlags]*Ch<>[];
- R_SUBFLAGSIGN:
- Result:=[Ch_WSignFlag,Ch_RWSignFlag,Ch_WFlags,Ch_RWFlags]*Ch<>[];
- R_SUBFLAGOVERFLOW:
- Result:=[Ch_WOverflowFlag,Ch_RWOverflowFlag,Ch_WFlags,Ch_RWFlags]*Ch<>[];
- R_SUBFLAGINTERRUPT:
- Result:=[Ch_W0IntFlag,Ch_W1IntFlag,Ch_WFlags,Ch_RWFlags]*Ch<>[];
- R_SUBFLAGDIRECTION:
- Result:=[Ch_W0DirFlag,Ch_W1DirFlag,Ch_WFlags,Ch_RWFlags]*Ch<>[];
- else
- internalerror(2017042602);
- end;
- exit;
- end;
- case taicpu(p1).opcode of
- A_CALL:
- { We could potentially set Result to False if the register in
- question is non-volatile for the subroutine's calling convention,
- but this would require detecting the calling convention in use and
- also assuming that the routine doesn't contain malformed assembly
- language, for example... so it could only be done under -O4 as it
- would be considered a side-effect. [Kit] }
- Result := True;
- A_MOVSD:
- { special handling for SSE MOVSD }
- if (taicpu(p1).ops>0) then
- begin
- if taicpu(p1).ops<>2 then
- internalerror(2017042703);
- Result := (taicpu(p1).oper[1]^.typ=top_reg) and RegInOp(reg,taicpu(p1).oper[1]^);
- end;
- { VMOVSS and VMOVSD has two and three operand flavours, this cannot modelled by x86ins.dat
- so fix it here (FK)
- }
- A_VMOVSS,
- A_VMOVSD:
- begin
- Result := (taicpu(p1).ops=3) and (taicpu(p1).oper[2]^.typ=top_reg) and RegInOp(reg,taicpu(p1).oper[2]^);
- exit;
- end;
- A_IMUL:
- Result := (taicpu(p1).oper[taicpu(p1).ops-1]^.typ=top_reg) and RegInOp(reg,taicpu(p1).oper[taicpu(p1).ops-1]^);
- else
- ;
- end;
- if Result then
- exit;
- with insprop[taicpu(p1).opcode] do
- begin
- if getregtype(reg)=R_INTREGISTER then
- begin
- case getsupreg(reg) of
- RS_EAX:
- if [Ch_WEAX,Ch_RWEAX,Ch_MEAX]*Ch<>[] then
- begin
- Result := True;
- exit
- end;
- RS_ECX:
- if [Ch_WECX,Ch_RWECX,Ch_MECX]*Ch<>[] then
- begin
- Result := True;
- exit
- end;
- RS_EDX:
- if [Ch_WEDX,Ch_RWEDX,Ch_MEDX]*Ch<>[] then
- begin
- Result := True;
- exit
- end;
- RS_EBX:
- if [Ch_WEBX,Ch_RWEBX,Ch_MEBX]*Ch<>[] then
- begin
- Result := True;
- exit
- end;
- RS_ESP:
- if [Ch_WESP,Ch_RWESP,Ch_MESP]*Ch<>[] then
- begin
- Result := True;
- exit
- end;
- RS_EBP:
- if [Ch_WEBP,Ch_RWEBP,Ch_MEBP]*Ch<>[] then
- begin
- Result := True;
- exit
- end;
- RS_ESI:
- if [Ch_WESI,Ch_RWESI,Ch_MESI]*Ch<>[] then
- begin
- Result := True;
- exit
- end;
- RS_EDI:
- if [Ch_WEDI,Ch_RWEDI,Ch_MEDI]*Ch<>[] then
- begin
- Result := True;
- exit
- end;
- end;
- end;
- for OperIdx := 0 to taicpu(p1).ops - 1 do
- if (WriteOps[OperIdx]*Ch<>[]) and
- { The register doesn't get modified inside a reference }
- (taicpu(p1).oper[OperIdx]^.typ = top_reg) and
- SuperRegistersEqual(reg,taicpu(p1).oper[OperIdx]^.reg) then
- begin
- Result := true;
- exit
- end;
- end;
- end;
- {$ifdef DEBUG_AOPTCPU}
- procedure TX86AsmOptimizer.DebugMsg(const s: string;p : tai);
- begin
- asml.insertbefore(tai_comment.Create(strpnew(s)), p);
- end;
- function debug_tostr(i: tcgint): string; inline;
- begin
- Result := tostr(i);
- end;
- function debug_regname(r: TRegister): string; inline;
- begin
- Result := '%' + std_regname(r);
- end;
- { Debug output function - creates a string representation of an operator }
- function debug_operstr(oper: TOper): string;
- begin
- case oper.typ of
- top_const:
- Result := '$' + debug_tostr(oper.val);
- top_reg:
- Result := debug_regname(oper.reg);
- top_ref:
- begin
- if oper.ref^.offset <> 0 then
- Result := debug_tostr(oper.ref^.offset) + '('
- else
- Result := '(';
- if (oper.ref^.base <> NR_INVALID) and (oper.ref^.base <> NR_NO) then
- begin
- Result := Result + debug_regname(oper.ref^.base);
- if (oper.ref^.index <> NR_INVALID) and (oper.ref^.index <> NR_NO) then
- Result := Result + ',' + debug_regname(oper.ref^.index);
- end
- else
- if (oper.ref^.index <> NR_INVALID) and (oper.ref^.index <> NR_NO) then
- Result := Result + debug_regname(oper.ref^.index);
- if (oper.ref^.scalefactor > 1) then
- Result := Result + ',' + debug_tostr(oper.ref^.scalefactor) + ')'
- else
- Result := Result + ')';
- end;
- else
- Result := '[UNKNOWN]';
- end;
- end;
- function debug_op2str(opcode: tasmop): string; inline;
- begin
- Result := std_op2str[opcode];
- end;
- function debug_opsize2str(opsize: topsize): string; inline;
- begin
- Result := gas_opsize2str[opsize];
- end;
- {$else DEBUG_AOPTCPU}
- procedure TX86AsmOptimizer.DebugMsg(const s: string;p : tai);inline;
- begin
- end;
- function debug_tostr(i: tcgint): string; inline;
- begin
- Result := '';
- end;
- function debug_regname(r: TRegister): string; inline;
- begin
- Result := '';
- end;
- function debug_operstr(oper: TOper): string; inline;
- begin
- Result := '';
- end;
- function debug_op2str(opcode: tasmop): string; inline;
- begin
- Result := '';
- end;
- function debug_opsize2str(opsize: topsize): string; inline;
- begin
- Result := '';
- end;
- {$endif DEBUG_AOPTCPU}
- class function TX86AsmOptimizer.IsMOVZXAcceptable: Boolean; inline;
- begin
- {$ifdef x86_64}
- { Always fine on x86-64 }
- Result := True;
- {$else x86_64}
- Result :=
- {$ifdef i8086}
- (current_settings.cputype >= cpu_386) and
- {$endif i8086}
- (
- { Always accept if optimising for size }
- (cs_opt_size in current_settings.optimizerswitches) or
- { From the Pentium II onwards, MOVZX only takes 1 cycle. [Kit] }
- (current_settings.optimizecputype >= cpu_Pentium2)
- );
- {$endif x86_64}
- end;
- { Attempts to allocate a volatile integer register for use between p and hp,
- using AUsedRegs for the current register usage information. Returns NR_NO
- if no free register could be found }
- function TX86AsmOptimizer.GetIntRegisterBetween(RegSize: TSubRegister; var AUsedRegs: TAllUsedRegs; p, hp: tai): TRegister;
- var
- RegSet: TCPURegisterSet;
- CurrentSuperReg: Integer;
- CurrentReg: TRegister;
- Currentp: tai;
- Breakout: Boolean;
- begin
- { TODO: Currently, only the volatile registers are checked - can this be extended to use any register the procedure has preserved? }
- Result := NR_NO;
- RegSet :=
- paramanager.get_volatile_registers_int(current_procinfo.procdef.proccalloption) +
- current_procinfo.saved_regs_int;
- for CurrentSuperReg in RegSet do
- begin
- CurrentReg := newreg(R_INTREGISTER, TSuperRegister(CurrentSuperReg), RegSize);
- if not AUsedRegs[R_INTREGISTER].IsUsed(CurrentReg)
- {$if defined(i386) or defined(i8086)}
- { If the target size is 8-bit, make sure we can actually encode it }
- and (
- (RegSize >= R_SUBW) or { Not R_SUBL or R_SUBH }
- (GetSupReg(CurrentReg) in [RS_EAX,RS_EBX,RS_ECX,RS_EDX])
- )
- {$endif i386 or i8086}
- then
- begin
- Currentp := p;
- Breakout := False;
- while not Breakout and GetNextInstruction(Currentp, Currentp) and (Currentp <> hp) do
- begin
- case Currentp.typ of
- ait_instruction:
- begin
- if RegInInstruction(CurrentReg, Currentp) then
- begin
- Breakout := True;
- Break;
- end;
- { Cannot allocate across an unconditional jump }
- if is_calljmpuncondret(taicpu(Currentp).opcode) then
- Exit;
- end;
- ait_marker:
- { Don't try anything more if a marker is hit }
- Exit;
- ait_regalloc:
- if (tai_regalloc(Currentp).ratype <> ra_dealloc) and SuperRegistersEqual(CurrentReg, tai_regalloc(Currentp).reg) then
- begin
- Breakout := True;
- Break;
- end;
- else
- ;
- end;
- end;
- if Breakout then
- { Try the next register }
- Continue;
- { We have a free register available }
- Result := CurrentReg;
- AllocRegBetween(CurrentReg, p, hp, AUsedRegs);
- Exit;
- end;
- end;
- end;
- { Attempts to allocate a volatile MM register for use between p and hp,
- using AUsedRegs for the current register usage information. Returns NR_NO
- if no free register could be found }
- function TX86AsmOptimizer.GetMMRegisterBetween(RegSize: TSubRegister; var AUsedRegs: TAllUsedRegs; p, hp: tai): TRegister;
- var
- RegSet: TCPURegisterSet;
- CurrentSuperReg: Integer;
- CurrentReg: TRegister;
- Currentp: tai;
- Breakout: Boolean;
- begin
- { TODO: Currently, only the volatile registers are checked - can this be extended to use any register the procedure has preserved? }
- Result := NR_NO;
- RegSet :=
- paramanager.get_volatile_registers_mm(current_procinfo.procdef.proccalloption) +
- current_procinfo.saved_regs_mm;
- for CurrentSuperReg in RegSet do
- begin
- CurrentReg := newreg(R_MMREGISTER, TSuperRegister(CurrentSuperReg), RegSize);
- if not AUsedRegs[R_MMREGISTER].IsUsed(CurrentReg) then
- begin
- Currentp := p;
- Breakout := False;
- while not Breakout and GetNextInstruction(Currentp, Currentp) and (Currentp <> hp) do
- begin
- case Currentp.typ of
- ait_instruction:
- begin
- if RegInInstruction(CurrentReg, Currentp) then
- begin
- Breakout := True;
- Break;
- end;
- { Cannot allocate across an unconditional jump }
- if is_calljmpuncondret(taicpu(Currentp).opcode) then
- Exit;
- end;
- ait_marker:
- { Don't try anything more if a marker is hit }
- Exit;
- ait_regalloc:
- if (tai_regalloc(Currentp).ratype <> ra_dealloc) and SuperRegistersEqual(CurrentReg, tai_regalloc(Currentp).reg) then
- begin
- Breakout := True;
- Break;
- end;
- else
- ;
- end;
- end;
- if Breakout then
- { Try the next register }
- Continue;
- { We have a free register available }
- Result := CurrentReg;
- AllocRegBetween(CurrentReg, p, hp, AUsedRegs);
- Exit;
- end;
- end;
- end;
- function TX86AsmOptimizer.Reg1WriteOverwritesReg2Entirely(reg1, reg2: tregister): boolean;
- begin
- if not SuperRegistersEqual(reg1,reg2) then
- exit(false);
- if getregtype(reg1)<>R_INTREGISTER then
- exit(true); {because SuperRegisterEqual is true}
- case getsubreg(reg1) of
- { A write to R_SUBL doesn't change R_SUBH and if reg2 is R_SUBW or
- higher, it preserves the high bits, so the new value depends on
- reg2's previous value. In other words, it is equivalent to doing:
- reg2 := (reg2 and $ffffff00) or byte(reg1); }
- R_SUBL:
- exit(getsubreg(reg2)=R_SUBL);
- { A write to R_SUBH doesn't change R_SUBL and if reg2 is R_SUBW or
- higher, it actually does a:
- reg2 := (reg2 and $ffff00ff) or (reg1 and $ff00); }
- R_SUBH:
- exit(getsubreg(reg2)=R_SUBH);
- { If reg2 is R_SUBD or larger, a write to R_SUBW preserves the high 16
- bits of reg2:
- reg2 := (reg2 and $ffff0000) or word(reg1); }
- R_SUBW:
- exit(getsubreg(reg2) in [R_SUBL,R_SUBH,R_SUBW]);
- { a write to R_SUBD always overwrites every other subregister,
- because it clears the high 32 bits of R_SUBQ on x86_64 }
- R_SUBD,
- R_SUBQ:
- exit(true);
- else
- internalerror(2017042801);
- end;
- end;
- function TX86AsmOptimizer.Reg1ReadDependsOnReg2(reg1, reg2: tregister): boolean;
- begin
- if not SuperRegistersEqual(reg1,reg2) then
- exit(false);
- if getregtype(reg1)<>R_INTREGISTER then
- exit(true); {because SuperRegisterEqual is true}
- case getsubreg(reg1) of
- R_SUBL:
- exit(getsubreg(reg2)<>R_SUBH);
- R_SUBH:
- exit(getsubreg(reg2)<>R_SUBL);
- R_SUBW,
- R_SUBD,
- R_SUBQ:
- exit(true);
- else
- internalerror(2017042802);
- end;
- end;
- function TX86AsmOptimizer.PrePeepholeOptSxx(var p : tai) : boolean;
- var
- hp1 : tai;
- l : TCGInt;
- begin
- result:=false;
- { changes the code sequence
- shr/sar const1, x
- shl const2, x
- to
- either "sar/and", "shl/and" or just "and" depending on const1 and const2 }
- if GetNextInstruction(p, hp1) and
- MatchInstruction(hp1,A_SHL,[]) and
- (taicpu(p).oper[0]^.typ = top_const) and
- (taicpu(hp1).oper[0]^.typ = top_const) and
- (taicpu(hp1).opsize = taicpu(p).opsize) and
- (taicpu(hp1).oper[1]^.typ = taicpu(p).oper[1]^.typ) and
- OpsEqual(taicpu(hp1).oper[1]^, taicpu(p).oper[1]^) then
- begin
- if (taicpu(p).oper[0]^.val > taicpu(hp1).oper[0]^.val) and
- not(cs_opt_size in current_settings.optimizerswitches) then
- begin
- { shr/sar const1, %reg
- shl const2, %reg
- with const1 > const2 }
- taicpu(p).loadConst(0,taicpu(p).oper[0]^.val-taicpu(hp1).oper[0]^.val);
- taicpu(hp1).opcode := A_AND;
- l := (1 shl (taicpu(hp1).oper[0]^.val)) - 1;
- case taicpu(p).opsize Of
- S_B: taicpu(hp1).loadConst(0,l Xor $ff);
- S_W: taicpu(hp1).loadConst(0,l Xor $ffff);
- S_L: taicpu(hp1).loadConst(0,l Xor tcgint($ffffffff));
- S_Q: taicpu(hp1).loadConst(0,l Xor tcgint($ffffffffffffffff));
- else
- Internalerror(2017050703)
- end;
- end
- else if (taicpu(p).oper[0]^.val<taicpu(hp1).oper[0]^.val) and
- not(cs_opt_size in current_settings.optimizerswitches) then
- begin
- { shr/sar const1, %reg
- shl const2, %reg
- with const1 < const2 }
- taicpu(hp1).loadConst(0,taicpu(hp1).oper[0]^.val-taicpu(p).oper[0]^.val);
- taicpu(p).opcode := A_AND;
- l := (1 shl (taicpu(p).oper[0]^.val))-1;
- case taicpu(p).opsize Of
- S_B: taicpu(p).loadConst(0,l Xor $ff);
- S_W: taicpu(p).loadConst(0,l Xor $ffff);
- S_L: taicpu(p).loadConst(0,l Xor tcgint($ffffffff));
- S_Q: taicpu(p).loadConst(0,l Xor tcgint($ffffffffffffffff));
- else
- Internalerror(2017050702)
- end;
- end
- else if (taicpu(p).oper[0]^.val = taicpu(hp1).oper[0]^.val) then
- begin
- { shr/sar const1, %reg
- shl const2, %reg
- with const1 = const2 }
- taicpu(p).opcode := A_AND;
- l := (1 shl (taicpu(p).oper[0]^.val))-1;
- case taicpu(p).opsize Of
- S_B: taicpu(p).loadConst(0,l Xor $ff);
- S_W: taicpu(p).loadConst(0,l Xor $ffff);
- S_L: taicpu(p).loadConst(0,l Xor tcgint($ffffffff));
- S_Q: taicpu(p).loadConst(0,l Xor tcgint($ffffffffffffffff));
- else
- Internalerror(2017050701)
- end;
- RemoveInstruction(hp1);
- end;
- end;
- end;
- function TX86AsmOptimizer.PrePeepholeOptIMUL(var p : tai) : boolean;
- var
- opsize : topsize;
- hp1 : tai;
- tmpref : treference;
- ShiftValue : Cardinal;
- BaseValue : TCGInt;
- begin
- result:=false;
- opsize:=taicpu(p).opsize;
- { changes certain "imul const, %reg"'s to lea sequences }
- if (MatchOpType(taicpu(p),top_const,top_reg) or
- MatchOpType(taicpu(p),top_const,top_reg,top_reg)) and
- (opsize in [S_L{$ifdef x86_64},S_Q{$endif x86_64}]) then
- if (taicpu(p).oper[0]^.val = 1) then
- if (taicpu(p).ops = 2) then
- { remove "imul $1, reg" }
- begin
- DebugMsg(SPeepholeOptimization + 'Imul2Nop done',p);
- Result := RemoveCurrentP(p);
- end
- else
- { change "imul $1, reg1, reg2" to "mov reg1, reg2" }
- begin
- hp1 := taicpu.Op_Reg_Reg(A_MOV, opsize, taicpu(p).oper[1]^.reg,taicpu(p).oper[2]^.reg);
- InsertLLItem(p.previous, p.next, hp1);
- DebugMsg(SPeepholeOptimization + 'Imul2Mov done',p);
- p.free;
- p := hp1;
- end
- else if ((taicpu(p).ops <= 2) or
- (taicpu(p).oper[2]^.typ = Top_Reg)) and
- not(cs_opt_size in current_settings.optimizerswitches) and
- (not(GetNextInstruction(p, hp1)) or
- not((tai(hp1).typ = ait_instruction) and
- ((taicpu(hp1).opcode=A_Jcc) and
- (taicpu(hp1).condition in [C_O,C_NO])))) then
- begin
- {
- imul X, reg1, reg2 to
- lea (reg1,reg1,Y), reg2
- shl ZZ,reg2
- imul XX, reg1 to
- lea (reg1,reg1,YY), reg1
- shl ZZ,reg2
- This optimziation makes sense for pretty much every x86, except the VIA Nano3000: it has IMUL latency 2, lea/shl pair as well,
- it does not exist as a separate optimization target in FPC though.
- This optimziation can be applied as long as only two bits are set in the constant and those two bits are separated by
- at most two zeros
- }
- reference_reset(tmpref,1,[]);
- if (PopCnt(QWord(taicpu(p).oper[0]^.val))=2) and (BsrQWord(taicpu(p).oper[0]^.val)-BsfQWord(taicpu(p).oper[0]^.val)<=3) then
- begin
- ShiftValue:=BsfQWord(taicpu(p).oper[0]^.val);
- BaseValue:=taicpu(p).oper[0]^.val shr ShiftValue;
- TmpRef.base := taicpu(p).oper[1]^.reg;
- TmpRef.index := taicpu(p).oper[1]^.reg;
- if not(BaseValue in [3,5,9]) then
- Internalerror(2018110101);
- TmpRef.ScaleFactor := BaseValue-1;
- if (taicpu(p).ops = 2) then
- hp1 := taicpu.op_ref_reg(A_LEA, opsize, TmpRef, taicpu(p).oper[1]^.reg)
- else
- hp1 := taicpu.op_ref_reg(A_LEA, opsize, TmpRef, taicpu(p).oper[2]^.reg);
- AsmL.InsertAfter(hp1,p);
- DebugMsg(SPeepholeOptimization + 'Imul2LeaShl done',p);
- taicpu(hp1).fileinfo:=taicpu(p).fileinfo;
- RemoveCurrentP(p, hp1);
- if ShiftValue>0 then
- AsmL.InsertAfter(taicpu.op_const_reg(A_SHL, opsize, ShiftValue, taicpu(hp1).oper[1]^.reg),hp1);
- end;
- end;
- end;
- function TX86AsmOptimizer.PrePeepholeOptAND(var p : tai) : boolean;
- begin
- Result := False;
- if MatchOperand(taicpu(p).oper[0]^, 0) and
- not RegInUsedRegs(NR_DEFAULTFLAGS, UsedRegs) then
- begin
- DebugMsg(SPeepholeOptimization + 'AND 0 -> MOV 0', p);
- taicpu(p).opcode := A_MOV;
- Result := True;
- end;
- end;
- function TX86AsmOptimizer.RegLoadedWithNewValue(reg: tregister; hp: tai): boolean;
- var
- p: taicpu absolute hp;
- i: Integer;
- begin
- Result := False;
- if not assigned(hp) or
- (hp.typ <> ait_instruction) then
- Exit;
- // p := taicpu(hp);
- Prefetch(insprop[p.opcode]);
- if SuperRegistersEqual(reg,NR_DEFAULTFLAGS) then
- with insprop[p.opcode] do
- begin
- case getsubreg(reg) of
- R_SUBW,R_SUBD,R_SUBQ:
- Result:=
- RegLoadedWithNewValue(NR_CARRYFLAG,hp) and
- RegLoadedWithNewValue(NR_PARITYFLAG,hp) and
- RegLoadedWithNewValue(NR_AUXILIARYFLAG,hp) and
- RegLoadedWithNewValue(NR_ZEROFLAG,hp) and
- RegLoadedWithNewValue(NR_SIGNFLAG,hp) and
- RegLoadedWithNewValue(NR_OVERFLOWFLAG,hp);
- R_SUBFLAGCARRY:
- Result:=[Ch_W0CarryFlag,Ch_W1CarryFlag,Ch_WCarryFlag,Ch_WUCarryFlag,Ch_WFlags]*Ch<>[];
- R_SUBFLAGPARITY:
- Result:=[Ch_W0ParityFlag,Ch_W1ParityFlag,Ch_WParityFlag,Ch_WUParityFlag,Ch_WFlags]*Ch<>[];
- R_SUBFLAGAUXILIARY:
- Result:=[Ch_W0AuxiliaryFlag,Ch_W1AuxiliaryFlag,Ch_WAuxiliaryFlag,Ch_WUAuxiliaryFlag,Ch_WFlags]*Ch<>[];
- R_SUBFLAGZERO:
- Result:=[Ch_W0ZeroFlag,Ch_W1ZeroFlag,Ch_WZeroFlag,Ch_WUZeroFlag,Ch_WFlags]*Ch<>[];
- R_SUBFLAGSIGN:
- Result:=[Ch_W0SignFlag,Ch_W1SignFlag,Ch_WSignFlag,Ch_WUSignFlag,Ch_WFlags]*Ch<>[];
- R_SUBFLAGOVERFLOW:
- Result:=[Ch_W0OverflowFlag,Ch_W1OverflowFlag,Ch_WOverflowFlag,Ch_WUOverflowFlag,Ch_WFlags]*Ch<>[];
- R_SUBFLAGINTERRUPT:
- Result:=[Ch_W0IntFlag,Ch_W1IntFlag,Ch_WFlags]*Ch<>[];
- R_SUBFLAGDIRECTION:
- Result:=[Ch_W0DirFlag,Ch_W1DirFlag,Ch_WFlags]*Ch<>[];
- else
- begin
- writeln(getsubreg(reg));
- internalerror(2017050501);
- end;
- end;
- exit;
- end;
- { Handle special cases first }
- case p.opcode of
- A_MOV, A_MOVZX, A_MOVSX, A_LEA, A_VMOVSS, A_VMOVSD, A_VMOVAPD,
- A_VMOVAPS, A_VMOVQ, A_MOVSS, A_MOVSD, A_MOVQ, A_MOVAPD, A_MOVAPS:
- begin
- Result :=
- (p.ops=2) and { A_MOVSD can have zero operands, so this check is needed }
- (p.oper[1]^.typ = top_reg) and
- (Reg1WriteOverwritesReg2Entirely(p.oper[1]^.reg,reg)) and
- (
- (p.oper[0]^.typ = top_const) or
- (
- (p.oper[0]^.typ = top_reg) and
- not(Reg1ReadDependsOnReg2(p.oper[0]^.reg,reg))
- ) or (
- (p.oper[0]^.typ = top_ref) and
- not RegInRef(reg,p.oper[0]^.ref^)
- )
- );
- end;
- A_MUL, A_IMUL:
- Result :=
- (
- (p.ops=3) and { IMUL only }
- (Reg1WriteOverwritesReg2Entirely(p.oper[2]^.reg,reg)) and
- (
- (
- (p.oper[1]^.typ=top_reg) and
- not Reg1ReadDependsOnReg2(p.oper[1]^.reg,reg)
- ) or (
- (p.oper[1]^.typ=top_ref) and
- not RegInRef(reg,p.oper[1]^.ref^)
- )
- )
- ) or (
- (
- (p.ops=1) and
- (
- (
- (
- (p.oper[0]^.typ=top_reg) and
- not Reg1ReadDependsOnReg2(p.oper[0]^.reg,reg)
- )
- ) or (
- (p.oper[0]^.typ=top_ref) and
- not RegInRef(reg,p.oper[0]^.ref^)
- )
- ) and (
- (
- (p.opsize=S_B) and
- Reg1WriteOverwritesReg2Entirely(NR_AX,reg) and
- not Reg1ReadDependsOnReg2(NR_AL,reg)
- ) or (
- (p.opsize=S_W) and
- Reg1WriteOverwritesReg2Entirely(NR_DX,reg)
- ) or (
- (p.opsize=S_L) and
- Reg1WriteOverwritesReg2Entirely(NR_EDX,reg)
- {$ifdef x86_64}
- ) or (
- (p.opsize=S_Q) and
- Reg1WriteOverwritesReg2Entirely(NR_RDX,reg)
- {$endif x86_64}
- )
- )
- )
- );
- A_CBW:
- Result := Reg1WriteOverwritesReg2Entirely(NR_AX,reg) and not(Reg1ReadDependsOnReg2(NR_AL,reg));
- {$ifndef x86_64}
- A_LDS:
- Result := (reg=NR_DS) and not(RegInRef(reg,p.oper[0]^.ref^));
- A_LES:
- Result := (reg=NR_ES) and not(RegInRef(reg,p.oper[0]^.ref^));
- {$endif not x86_64}
- A_LFS:
- Result := (reg=NR_FS) and not(RegInRef(reg,p.oper[0]^.ref^));
- A_LGS:
- Result := (reg=NR_GS) and not(RegInRef(reg,p.oper[0]^.ref^));
- A_LSS:
- Result := (reg=NR_SS) and not(RegInRef(reg,p.oper[0]^.ref^));
- A_LAHF{$ifndef x86_64}, A_AAM{$endif not x86_64}:
- Result := Reg1WriteOverwritesReg2Entirely(NR_AH,reg);
- A_LODSB:
- Result := Reg1WriteOverwritesReg2Entirely(NR_AL,reg);
- A_LODSW:
- Result := Reg1WriteOverwritesReg2Entirely(NR_AX,reg);
- {$ifdef x86_64}
- A_LODSQ:
- Result := Reg1WriteOverwritesReg2Entirely(NR_RAX,reg);
- {$endif x86_64}
- A_LODSD:
- Result := Reg1WriteOverwritesReg2Entirely(NR_EAX,reg);
- A_FSTSW, A_FNSTSW:
- Result := (p.oper[0]^.typ=top_reg) and Reg1WriteOverwritesReg2Entirely(p.oper[0]^.reg,reg);
- else
- begin
- with insprop[p.opcode] do
- begin
- if (
- { xor %reg,%reg etc. is classed as a new value }
- (([Ch_NoReadIfEqualRegs]*Ch)<>[]) and
- MatchOpType(p, top_reg, top_reg) and
- (p.oper[0]^.reg = p.oper[1]^.reg) and
- Reg1WriteOverwritesReg2Entirely(p.oper[1]^.reg,reg)
- ) then
- begin
- Result := True;
- Exit;
- end;
- { Make sure the entire register is overwritten }
- if (getregtype(reg) = R_INTREGISTER) then
- begin
- if (p.ops > 0) then
- begin
- if RegInOp(reg, p.oper[0]^) then
- begin
- if (p.oper[0]^.typ = top_ref) then
- begin
- if RegInRef(reg, p.oper[0]^.ref^) then
- begin
- Result := False;
- Exit;
- end;
- end
- else if (p.oper[0]^.typ = top_reg) then
- begin
- if ([Ch_ROp1, Ch_RWOp1, Ch_MOp1]*Ch<>[]) then
- begin
- Result := False;
- Exit;
- end
- else if ([Ch_WOp1]*Ch<>[]) then
- begin
- if Reg1WriteOverwritesReg2Entirely(p.oper[0]^.reg, reg) then
- Result := True
- else
- begin
- Result := False;
- Exit;
- end;
- end;
- end;
- end;
- if (p.ops > 1) then
- begin
- if RegInOp(reg, p.oper[1]^) then
- begin
- if (p.oper[1]^.typ = top_ref) then
- begin
- if RegInRef(reg, p.oper[1]^.ref^) then
- begin
- Result := False;
- Exit;
- end;
- end
- else if (p.oper[1]^.typ = top_reg) then
- begin
- if ([Ch_ROp2, Ch_RWOp2, Ch_MOp2]*Ch<>[]) then
- begin
- Result := False;
- Exit;
- end
- else if ([Ch_WOp2]*Ch<>[]) then
- begin
- if Reg1WriteOverwritesReg2Entirely(p.oper[1]^.reg, reg) then
- Result := True
- else
- begin
- Result := False;
- Exit;
- end;
- end;
- end;
- end;
- if (p.ops > 2) then
- begin
- if RegInOp(reg, p.oper[2]^) then
- begin
- if (p.oper[2]^.typ = top_ref) then
- begin
- if RegInRef(reg, p.oper[2]^.ref^) then
- begin
- Result := False;
- Exit;
- end;
- end
- else if (p.oper[2]^.typ = top_reg) then
- begin
- if ([Ch_ROp3, Ch_RWOp3, Ch_MOp3]*Ch<>[]) then
- begin
- Result := False;
- Exit;
- end
- else if ([Ch_WOp3]*Ch<>[]) then
- begin
- if Reg1WriteOverwritesReg2Entirely(p.oper[2]^.reg, reg) then
- Result := True
- else
- begin
- Result := False;
- Exit;
- end;
- end;
- end;
- end;
- if (p.ops > 3) and RegInOp(reg, p.oper[3]^) then
- begin
- if (p.oper[3]^.typ = top_ref) then
- begin
- if RegInRef(reg, p.oper[3]^.ref^) then
- begin
- Result := False;
- Exit;
- end;
- end
- else if (p.oper[3]^.typ = top_reg) then
- begin
- if ([Ch_ROp4, Ch_RWOp4, Ch_MOp4]*Ch<>[]) then
- begin
- Result := False;
- Exit;
- end
- else if ([Ch_WOp4]*Ch<>[]) then
- begin
- if Reg1WriteOverwritesReg2Entirely(p.oper[3]^.reg, reg) then
- Result := True
- else
- begin
- Result := False;
- Exit;
- end;
- end;
- end;
- end;
- end;
- end;
- end;
- { Don't do these ones first in case an input operand is equal to an explicit output registers }
- case getsupreg(reg) of
- RS_EAX:
- if ([Ch_WEAX{$ifdef x86_64},Ch_WRAX{$endif x86_64}]*Ch<>[]) and Reg1WriteOverwritesReg2Entirely(NR_EAX, reg) then
- begin
- Result := True;
- Exit;
- end;
- RS_ECX:
- if ([Ch_WECX{$ifdef x86_64},Ch_WRCX{$endif x86_64}]*Ch<>[]) and Reg1WriteOverwritesReg2Entirely(NR_ECX, reg) then
- begin
- Result := True;
- Exit;
- end;
- RS_EDX:
- if ([Ch_REDX{$ifdef x86_64},Ch_WRDX{$endif x86_64}]*Ch<>[]) and Reg1WriteOverwritesReg2Entirely(NR_EDX, reg) then
- begin
- Result := True;
- Exit;
- end;
- RS_EBX:
- if ([Ch_WEBX{$ifdef x86_64},Ch_WRBX{$endif x86_64}]*Ch<>[]) and Reg1WriteOverwritesReg2Entirely(NR_EBX, reg) then
- begin
- Result := True;
- Exit;
- end;
- RS_ESP:
- if ([Ch_WESP{$ifdef x86_64},Ch_WRSP{$endif x86_64}]*Ch<>[]) and Reg1WriteOverwritesReg2Entirely(NR_ESP, reg) then
- begin
- Result := True;
- Exit;
- end;
- RS_EBP:
- if ([Ch_WEBP{$ifdef x86_64},Ch_WRBP{$endif x86_64}]*Ch<>[]) and Reg1WriteOverwritesReg2Entirely(NR_EBP, reg) then
- begin
- Result := True;
- Exit;
- end;
- RS_ESI:
- if ([Ch_WESI{$ifdef x86_64},Ch_WRSI{$endif x86_64}]*Ch<>[]) and Reg1WriteOverwritesReg2Entirely(NR_ESI, reg) then
- begin
- Result := True;
- Exit;
- end;
- RS_EDI:
- if ([Ch_WEDI{$ifdef x86_64},Ch_WRDI{$endif x86_64}]*Ch<>[]) and Reg1WriteOverwritesReg2Entirely(NR_EDI, reg) then
- begin
- Result := True;
- Exit;
- end;
- else
- ;
- end;
- end;
- end;
- end;
- end;
- end;
- class function TX86AsmOptimizer.IsExitCode(p : tai) : boolean;
- var
- hp2,hp3 : tai;
- begin
- { some x86-64 issue a NOP before the real exit code }
- if MatchInstruction(p,A_NOP,[]) then
- GetNextInstruction(p,p);
- result:=assigned(p) and (p.typ=ait_instruction) and
- ((taicpu(p).opcode = A_RET) or
- ((taicpu(p).opcode=A_LEAVE) and
- GetNextInstruction(p,hp2) and
- MatchInstruction(hp2,A_RET,[S_NO])
- ) or
- (((taicpu(p).opcode=A_LEA) and
- MatchOpType(taicpu(p),top_ref,top_reg) and
- (taicpu(p).oper[0]^.ref^.base=NR_STACK_POINTER_REG) and
- (taicpu(p).oper[1]^.reg=NR_STACK_POINTER_REG)
- ) and
- GetNextInstruction(p,hp2) and
- MatchInstruction(hp2,A_RET,[S_NO])
- ) or
- ((((taicpu(p).opcode=A_MOV) and
- MatchOpType(taicpu(p),top_reg,top_reg) and
- (taicpu(p).oper[0]^.reg=current_procinfo.framepointer) and
- (taicpu(p).oper[1]^.reg=NR_STACK_POINTER_REG)) or
- ((taicpu(p).opcode=A_LEA) and
- MatchOpType(taicpu(p),top_ref,top_reg) and
- (taicpu(p).oper[0]^.ref^.base=current_procinfo.framepointer) and
- (taicpu(p).oper[1]^.reg=NR_STACK_POINTER_REG)
- )
- ) and
- GetNextInstruction(p,hp2) and
- MatchInstruction(hp2,A_POP,[reg2opsize(current_procinfo.framepointer)]) and
- MatchOpType(taicpu(hp2),top_reg) and
- (taicpu(hp2).oper[0]^.reg=current_procinfo.framepointer) and
- GetNextInstruction(hp2,hp3) and
- MatchInstruction(hp3,A_RET,[S_NO])
- )
- );
- end;
- class function TX86AsmOptimizer.isFoldableArithOp(hp1: taicpu; reg: tregister): boolean;
- begin
- isFoldableArithOp := False;
- case hp1.opcode of
- A_ADD,A_SUB,A_OR,A_XOR,A_AND,A_SHL,A_SHR,A_SAR:
- isFoldableArithOp :=
- ((taicpu(hp1).oper[0]^.typ = top_const) or
- ((taicpu(hp1).oper[0]^.typ = top_reg) and
- (taicpu(hp1).oper[0]^.reg <> reg))) and
- (taicpu(hp1).oper[1]^.typ = top_reg) and
- (taicpu(hp1).oper[1]^.reg = reg);
- A_INC,A_DEC,A_NEG,A_NOT:
- isFoldableArithOp :=
- (taicpu(hp1).oper[0]^.typ = top_reg) and
- (taicpu(hp1).oper[0]^.reg = reg);
- else
- ;
- end;
- end;
- procedure TX86AsmOptimizer.RemoveLastDeallocForFuncRes(p: tai);
- procedure DoRemoveLastDeallocForFuncRes( supreg: tsuperregister);
- var
- hp2: tai;
- begin
- hp2 := p;
- repeat
- hp2 := tai(hp2.previous);
- if assigned(hp2) and
- (hp2.typ = ait_regalloc) and
- (tai_regalloc(hp2).ratype=ra_dealloc) and
- (getregtype(tai_regalloc(hp2).reg) = R_INTREGISTER) and
- (getsupreg(tai_regalloc(hp2).reg) = supreg) then
- begin
- RemoveInstruction(hp2);
- break;
- end;
- until not(assigned(hp2)) or regInInstruction(newreg(R_INTREGISTER,supreg,R_SUBWHOLE),hp2);
- end;
- begin
- case current_procinfo.procdef.returndef.typ of
- arraydef,recorddef,pointerdef,
- stringdef,enumdef,procdef,objectdef,errordef,
- filedef,setdef,procvardef,
- classrefdef,forwarddef:
- DoRemoveLastDeallocForFuncRes(RS_EAX);
- orddef:
- if current_procinfo.procdef.returndef.size <> 0 then
- begin
- DoRemoveLastDeallocForFuncRes(RS_EAX);
- { for int64/qword }
- if current_procinfo.procdef.returndef.size = 8 then
- DoRemoveLastDeallocForFuncRes(RS_EDX);
- end;
- else
- ;
- end;
- end;
- function TX86AsmOptimizer.OptPass1_V_MOVAP(var p : tai) : boolean;
- var
- hp1,hp2 : tai;
- begin
- result:=false;
- if MatchOpType(taicpu(p),top_reg,top_reg) then
- begin
- { vmova* reg1,reg1
- =>
- <nop> }
- if MatchOperand(taicpu(p).oper[0]^,taicpu(p).oper[1]^) then
- begin
- RemoveCurrentP(p);
- result:=true;
- exit;
- end
- else if GetNextInstruction(p,hp1) then
- begin
- if MatchInstruction(hp1,[taicpu(p).opcode],[S_NO]) and
- MatchOperand(taicpu(p).oper[1]^,taicpu(hp1).oper[0]^) then
- begin
- { vmova* reg1,reg2
- vmova* reg2,reg3
- dealloc reg2
- =>
- vmova* reg1,reg3 }
- TransferUsedRegs(TmpUsedRegs);
- UpdateUsedRegs(TmpUsedRegs, tai(p.next));
- if MatchOpType(taicpu(hp1),top_reg,top_reg) and
- not(RegUsedAfterInstruction(taicpu(p).oper[1]^.reg,hp1,TmpUsedRegs)) then
- begin
- DebugMsg(SPeepholeOptimization + '(V)MOVA*(V)MOVA*2(V)MOVA* 1',p);
- taicpu(p).loadoper(1,taicpu(hp1).oper[1]^);
- RemoveInstruction(hp1);
- result:=true;
- exit;
- end
- { special case:
- vmova* reg1,<op>
- vmova* <op>,reg1
- =>
- vmova* reg1,<op> }
- else if MatchOperand(taicpu(p).oper[0]^,taicpu(hp1).oper[1]^) and
- ((taicpu(p).oper[0]^.typ<>top_ref) or
- (not(vol_read in taicpu(p).oper[0]^.ref^.volatility))
- ) then
- begin
- DebugMsg(SPeepholeOptimization + '(V)MOVA*(V)MOVA*2(V)MOVA* 2',p);
- RemoveInstruction(hp1);
- result:=true;
- exit;
- end
- end
- else if ((MatchInstruction(p,[A_MOVAPS,A_VMOVAPS],[S_NO]) and
- MatchInstruction(hp1,[A_MOVSS,A_VMOVSS],[S_NO])) or
- ((MatchInstruction(p,[A_MOVAPD,A_VMOVAPD],[S_NO]) and
- MatchInstruction(hp1,[A_MOVSD,A_VMOVSD],[S_NO])))
- ) and
- MatchOperand(taicpu(p).oper[1]^,taicpu(hp1).oper[0]^) then
- begin
- { vmova* reg1,reg2
- vmovs* reg2,<op>
- dealloc reg2
- =>
- vmovs* reg1,reg3 }
- TransferUsedRegs(TmpUsedRegs);
- UpdateUsedRegs(TmpUsedRegs, tai(p.next));
- if not(RegUsedAfterInstruction(taicpu(p).oper[1]^.reg,hp1,TmpUsedRegs)) then
- begin
- DebugMsg(SPeepholeOptimization + '(V)MOVA*(V)MOVS*2(V)MOVS* 1',p);
- taicpu(p).opcode:=taicpu(hp1).opcode;
- taicpu(p).loadoper(1,taicpu(hp1).oper[1]^);
- RemoveInstruction(hp1);
- result:=true;
- exit;
- end
- end;
- end;
- if GetNextInstructionUsingReg(p,hp1,taicpu(p).oper[1]^.reg) then
- begin
- if MatchInstruction(hp1,[A_VFMADDPD,
- A_VFMADD132PD,
- A_VFMADD132PS,
- A_VFMADD132SD,
- A_VFMADD132SS,
- A_VFMADD213PD,
- A_VFMADD213PS,
- A_VFMADD213SD,
- A_VFMADD213SS,
- A_VFMADD231PD,
- A_VFMADD231PS,
- A_VFMADD231SD,
- A_VFMADD231SS,
- A_VFMADDSUB132PD,
- A_VFMADDSUB132PS,
- A_VFMADDSUB213PD,
- A_VFMADDSUB213PS,
- A_VFMADDSUB231PD,
- A_VFMADDSUB231PS,
- A_VFMSUB132PD,
- A_VFMSUB132PS,
- A_VFMSUB132SD,
- A_VFMSUB132SS,
- A_VFMSUB213PD,
- A_VFMSUB213PS,
- A_VFMSUB213SD,
- A_VFMSUB213SS,
- A_VFMSUB231PD,
- A_VFMSUB231PS,
- A_VFMSUB231SD,
- A_VFMSUB231SS,
- A_VFMSUBADD132PD,
- A_VFMSUBADD132PS,
- A_VFMSUBADD213PD,
- A_VFMSUBADD213PS,
- A_VFMSUBADD231PD,
- A_VFMSUBADD231PS,
- A_VFNMADD132PD,
- A_VFNMADD132PS,
- A_VFNMADD132SD,
- A_VFNMADD132SS,
- A_VFNMADD213PD,
- A_VFNMADD213PS,
- A_VFNMADD213SD,
- A_VFNMADD213SS,
- A_VFNMADD231PD,
- A_VFNMADD231PS,
- A_VFNMADD231SD,
- A_VFNMADD231SS,
- A_VFNMSUB132PD,
- A_VFNMSUB132PS,
- A_VFNMSUB132SD,
- A_VFNMSUB132SS,
- A_VFNMSUB213PD,
- A_VFNMSUB213PS,
- A_VFNMSUB213SD,
- A_VFNMSUB213SS,
- A_VFNMSUB231PD,
- A_VFNMSUB231PS,
- A_VFNMSUB231SD,
- A_VFNMSUB231SS],[S_NO]) and
- { we mix single and double opperations here because we assume that the compiler
- generates vmovapd only after double operations and vmovaps only after single operations }
- MatchOperand(taicpu(p).oper[1]^,taicpu(hp1).oper[2]^) and
- GetNextInstruction(hp1,hp2) and
- MatchInstruction(hp2,[A_VMOVAPD,A_VMOVAPS,A_MOVAPD,A_MOVAPS],[S_NO]) and
- MatchOperand(taicpu(p).oper[0]^,taicpu(hp2).oper[1]^) then
- begin
- TransferUsedRegs(TmpUsedRegs);
- UpdateUsedRegs(TmpUsedRegs, tai(p.next));
- UpdateUsedRegs(TmpUsedRegs, tai(hp1.next));
- if not(RegUsedAfterInstruction(taicpu(p).oper[1]^.reg,hp2,TmpUsedRegs)) then
- begin
- taicpu(hp1).loadoper(2,taicpu(p).oper[0]^);
- RemoveCurrentP(p, hp1); // <-- Is this actually safe? hp1 is not necessarily the next instruction. [Kit]
- RemoveInstruction(hp2);
- end;
- end
- else if (hp1.typ = ait_instruction) and
- GetNextInstruction(hp1, hp2) and
- MatchInstruction(hp2,taicpu(p).opcode,[]) and
- OpsEqual(taicpu(hp2).oper[1]^, taicpu(p).oper[0]^) and
- MatchOpType(taicpu(hp2),top_reg,top_reg) and
- MatchOperand(taicpu(hp2).oper[0]^,taicpu(p).oper[1]^) and
- (((taicpu(p).opcode=A_MOVAPS) and
- ((taicpu(hp1).opcode=A_ADDSS) or (taicpu(hp1).opcode=A_SUBSS) or
- (taicpu(hp1).opcode=A_MULSS) or (taicpu(hp1).opcode=A_DIVSS))) or
- ((taicpu(p).opcode=A_MOVAPD) and
- ((taicpu(hp1).opcode=A_ADDSD) or (taicpu(hp1).opcode=A_SUBSD) or
- (taicpu(hp1).opcode=A_MULSD) or (taicpu(hp1).opcode=A_DIVSD)))
- ) then
- { change
- movapX reg,reg2
- addsX/subsX/... reg3, reg2
- movapX reg2,reg
- to
- addsX/subsX/... reg3,reg
- }
- begin
- TransferUsedRegs(TmpUsedRegs);
- UpdateUsedRegs(TmpUsedRegs, tai(p.next));
- UpdateUsedRegs(TmpUsedRegs, tai(hp1.next));
- If not(RegUsedAfterInstruction(taicpu(p).oper[1]^.reg,hp2,TmpUsedRegs)) then
- begin
- DebugMsg(SPeepholeOptimization + 'MovapXOpMovapX2Op ('+
- debug_op2str(taicpu(p).opcode)+' '+
- debug_op2str(taicpu(hp1).opcode)+' '+
- debug_op2str(taicpu(hp2).opcode)+') done',p);
- { we cannot eliminate the first move if
- the operations uses the same register for source and dest }
- if not(OpsEqual(taicpu(hp1).oper[1]^,taicpu(hp1).oper[0]^)) then
- RemoveCurrentP(p, nil);
- p:=hp1;
- taicpu(hp1).loadoper(1, taicpu(hp2).oper[1]^);
- RemoveInstruction(hp2);
- result:=true;
- end;
- end;
- end;
- end;
- end;
- function TX86AsmOptimizer.OptPass1VOP(var p : tai) : boolean;
- var
- hp1 : tai;
- begin
- result:=false;
- { replace
- V<Op>X %mreg1,%mreg2,%mreg3
- VMovX %mreg3,%mreg4
- dealloc %mreg3
- by
- V<Op>X %mreg1,%mreg2,%mreg4
- ?
- }
- if GetNextInstruction(p,hp1) and
- { we mix single and double operations here because we assume that the compiler
- generates vmovapd only after double operations and vmovaps only after single operations }
- MatchInstruction(hp1,A_VMOVAPD,A_VMOVAPS,[S_NO]) and
- MatchOperand(taicpu(p).oper[2]^,taicpu(hp1).oper[0]^) and
- (taicpu(hp1).oper[1]^.typ=top_reg) then
- begin
- TransferUsedRegs(TmpUsedRegs);
- UpdateUsedRegs(TmpUsedRegs, tai(p.next));
- if not(RegUsedAfterInstruction(taicpu(hp1).oper[0]^.reg,hp1,TmpUsedRegs)) then
- begin
- taicpu(p).loadoper(2,taicpu(hp1).oper[1]^);
- DebugMsg(SPeepholeOptimization + 'VOpVmov2VOp done',p);
- RemoveInstruction(hp1);
- result:=true;
- end;
- end;
- end;
- { Replaces all references to AOldReg in a memory reference to ANewReg }
- class function TX86AsmOptimizer.ReplaceRegisterInRef(var ref: TReference; const AOldReg, ANewReg: TRegister): Boolean;
- begin
- Result := False;
- { For safety reasons, only check for exact register matches }
- { Check base register }
- if (ref.base = AOldReg) then
- begin
- ref.base := ANewReg;
- Result := True;
- end;
- { Check index register }
- if (ref.index = AOldReg) then
- begin
- ref.index := ANewReg;
- Result := True;
- end;
- end;
- { Replaces all references to AOldReg in an operand to ANewReg }
- class function TX86AsmOptimizer.ReplaceRegisterInOper(const p: taicpu; const OperIdx: Integer; const AOldReg, ANewReg: TRegister): Boolean;
- var
- OldSupReg, NewSupReg: TSuperRegister;
- OldSubReg, NewSubReg: TSubRegister;
- OldRegType: TRegisterType;
- ThisOper: POper;
- begin
- ThisOper := p.oper[OperIdx]; { Faster to access overall }
- Result := False;
- if (AOldReg = NR_NO) or (ANewReg = NR_NO) then
- InternalError(2020011801);
- OldSupReg := getsupreg(AOldReg);
- OldSubReg := getsubreg(AOldReg);
- OldRegType := getregtype(AOldReg);
- NewSupReg := getsupreg(ANewReg);
- NewSubReg := getsubreg(ANewReg);
- if OldRegType <> getregtype(ANewReg) then
- InternalError(2020011802);
- if OldSubReg <> NewSubReg then
- InternalError(2020011803);
- case ThisOper^.typ of
- top_reg:
- if (
- (ThisOper^.reg = AOldReg) or
- (
- (OldRegType = R_INTREGISTER) and
- (getsupreg(ThisOper^.reg) = OldSupReg) and
- (getregtype(ThisOper^.reg) = R_INTREGISTER) and
- (
- (getsubreg(ThisOper^.reg) <= OldSubReg)
- {$ifndef x86_64}
- and (
- { Under i386 and i8086, ESI, EDI, EBP and ESP
- don't have an 8-bit representation }
- (getsubreg(ThisOper^.reg) >= R_SUBW) or
- not (NewSupReg in [RS_ESI, RS_EDI, RS_EBP, RS_ESP])
- )
- {$endif x86_64}
- )
- )
- ) then
- begin
- ThisOper^.reg := newreg(getregtype(ANewReg), NewSupReg, getsubreg(p.oper[OperIdx]^.reg));
- Result := True;
- end;
- top_ref:
- if ReplaceRegisterInRef(ThisOper^.ref^, AOldReg, ANewReg) then
- Result := True;
- else
- ;
- end;
- end;
- { Replaces all references to AOldReg in an instruction to ANewReg }
- class function TX86AsmOptimizer.ReplaceRegisterInInstruction(const p: taicpu; const AOldReg, ANewReg: TRegister): Boolean;
- const
- ReadFlag: array[0..3] of TInsChange = (Ch_Rop1, Ch_Rop2, Ch_Rop3, Ch_Rop4);
- var
- OperIdx: Integer;
- begin
- Result := False;
- for OperIdx := 0 to p.ops - 1 do
- if (ReadFlag[OperIdx] in InsProp[p.Opcode].Ch) then
- begin
- { The shift and rotate instructions can only use CL }
- if not (
- (OperIdx = 0) and
- { This second condition just helps to avoid unnecessarily
- calling MatchInstruction for 10 different opcodes }
- (p.oper[0]^.reg = NR_CL) and
- MatchInstruction(p, [A_RCL, A_RCR, A_ROL, A_ROR, A_SAL, A_SAR, A_SHL, A_SHLD, A_SHR, A_SHRD], [])
- ) then
- Result := ReplaceRegisterInOper(p, OperIdx, AOldReg, ANewReg) or Result;
- end
- else if p.oper[OperIdx]^.typ = top_ref then
- { It's okay to replace registers in references that get written to }
- Result := ReplaceRegisterInOper(p, OperIdx, AOldReg, ANewReg) or Result;
- end;
- class function TX86AsmOptimizer.IsRefSafe(const ref: PReference): Boolean;
- begin
- with ref^ do
- Result :=
- (index = NR_NO) and
- (
- {$ifdef x86_64}
- (
- (base = NR_RIP) and
- (refaddr in [addr_pic, addr_pic_no_got])
- ) or
- {$endif x86_64}
- (base = NR_STACK_POINTER_REG) or
- (base = current_procinfo.framepointer)
- );
- end;
- function TX86AsmOptimizer.ConvertLEA(const p: taicpu): Boolean;
- var
- l: asizeint;
- begin
- Result := False;
- { Should have been checked previously }
- if p.opcode <> A_LEA then
- InternalError(2020072501);
- { do not mess with the stack point as adjusting it by lea is recommend, except if we optimize for size }
- if (p.oper[1]^.reg=NR_STACK_POINTER_REG) and
- not(cs_opt_size in current_settings.optimizerswitches) then
- exit;
- with p.oper[0]^.ref^ do
- begin
- if (base <> p.oper[1]^.reg) or
- (index <> NR_NO) or
- assigned(symbol) then
- exit;
- l:=offset;
- if (l=1) and UseIncDec then
- begin
- p.opcode:=A_INC;
- p.loadreg(0,p.oper[1]^.reg);
- p.ops:=1;
- DebugMsg(SPeepholeOptimization + 'Lea2Inc done',p);
- end
- else if (l=-1) and UseIncDec then
- begin
- p.opcode:=A_DEC;
- p.loadreg(0,p.oper[1]^.reg);
- p.ops:=1;
- DebugMsg(SPeepholeOptimization + 'Lea2Dec done',p);
- end
- else
- begin
- if (l<0) and (l<>-2147483648) then
- begin
- p.opcode:=A_SUB;
- p.loadConst(0,-l);
- DebugMsg(SPeepholeOptimization + 'Lea2Sub done',p);
- end
- else
- begin
- p.opcode:=A_ADD;
- p.loadConst(0,l);
- DebugMsg(SPeepholeOptimization + 'Lea2Add done',p);
- end;
- end;
- end;
- Result := True;
- end;
- function TX86AsmOptimizer.DeepMOVOpt(const p_mov: taicpu; const hp: taicpu): Boolean;
- var
- CurrentReg, ReplaceReg: TRegister;
- begin
- Result := False;
- ReplaceReg := taicpu(p_mov).oper[0]^.reg;
- CurrentReg := taicpu(p_mov).oper[1]^.reg;
- case hp.opcode of
- A_FSTSW, A_FNSTSW,
- A_IN, A_INS, A_OUT, A_OUTS,
- A_CMPS, A_LODS, A_MOVS, A_SCAS, A_STOS:
- { These routines have explicit operands, but they are restricted in
- what they can be (e.g. IN and OUT can only read from AL, AX or
- EAX. }
- Exit;
- A_IMUL:
- begin
- { The 1-operand version writes to implicit registers
- The 2-operand version reads from the first operator, and reads
- from and writes to the second (equivalent to Ch_ROp1, ChRWOp2).
- the 3-operand version reads from a register that it doesn't write to
- }
- case hp.ops of
- 1:
- if (
- (
- (hp.opsize = S_B) and (getsupreg(CurrentReg) <> RS_EAX)
- ) or
- not (getsupreg(CurrentReg) in [RS_EAX, RS_EDX])
- ) and ReplaceRegisterInOper(hp, 0, CurrentReg, ReplaceReg) then
- begin
- Result := True;
- DebugMsg(SPeepholeOptimization + debug_regname(CurrentReg) + ' = ' + debug_regname(ReplaceReg) + '; changed to minimise pipeline stall (MovIMul2MovIMul 1)', hp);
- AllocRegBetween(ReplaceReg, p_mov, hp, UsedRegs);
- end;
- 2:
- { Only modify the first parameter }
- if ReplaceRegisterInOper(hp, 0, CurrentReg, ReplaceReg) then
- begin
- Result := True;
- DebugMsg(SPeepholeOptimization + debug_regname(CurrentReg) + ' = ' + debug_regname(ReplaceReg) + '; changed to minimise pipeline stall (MovIMul2MovIMul 2)', hp);
- AllocRegBetween(ReplaceReg, p_mov, hp, UsedRegs);
- end;
- 3:
- { Only modify the second parameter }
- if ReplaceRegisterInOper(hp, 1, CurrentReg, ReplaceReg) then
- begin
- Result := True;
- DebugMsg(SPeepholeOptimization + debug_regname(CurrentReg) + ' = ' + debug_regname(ReplaceReg) + '; changed to minimise pipeline stall (MovIMul2MovIMul 3)', hp);
- AllocRegBetween(ReplaceReg, p_mov, hp, UsedRegs);
- end;
- else
- InternalError(2020012901);
- end;
- end;
- else
- if (hp.ops > 0) and
- ReplaceRegisterInInstruction(hp, CurrentReg, ReplaceReg) then
- begin
- Result := True;
- DebugMsg(SPeepholeOptimization + debug_regname(CurrentReg) + ' = ' + debug_regname(ReplaceReg) + '; changed to minimise pipeline stall (MovXXX2MovXXX)', hp);
- AllocRegBetween(ReplaceReg, p_mov, hp, UsedRegs);
- end;
- end;
- end;
- function TX86AsmOptimizer.OptPass1MOV(var p : tai) : boolean;
- var
- hp1, hp2, hp3: tai;
- DoOptimisation, TempBool: Boolean;
- {$ifdef x86_64}
- NewConst: TCGInt;
- {$endif x86_64}
- procedure convert_mov_value(signed_movop: tasmop; max_value: tcgint); inline;
- begin
- if taicpu(hp1).opcode = signed_movop then
- begin
- if taicpu(p).oper[0]^.val > max_value shr 1 then
- taicpu(p).oper[0]^.val:=taicpu(p).oper[0]^.val - max_value - 1 { Convert to signed }
- end
- else
- taicpu(p).oper[0]^.val:=taicpu(p).oper[0]^.val and max_value; { Trim to unsigned }
- end;
- function TryConstMerge(var p1, p2: tai): Boolean;
- var
- ThisRef: TReference;
- begin
- Result := False;
- ThisRef := taicpu(p2).oper[1]^.ref^;
- { Only permit writes to the stack, since we can guarantee alignment with that }
- if (ThisRef.index = NR_NO) and
- (
- (ThisRef.base = NR_STACK_POINTER_REG) or
- (ThisRef.base = current_procinfo.framepointer)
- ) then
- begin
- case taicpu(p).opsize of
- S_B:
- begin
- { Word writes must be on a 2-byte boundary }
- if (taicpu(p1).oper[1]^.ref^.offset mod 2) = 0 then
- begin
- { Reduce offset of second reference to see if it is sequential with the first }
- Dec(ThisRef.offset, 1);
- if RefsEqual(taicpu(p1).oper[1]^.ref^, ThisRef) then
- begin
- { Make sure the constants aren't represented as a
- negative number, as these won't merge properly }
- taicpu(p1).opsize := S_W;
- taicpu(p1).oper[0]^.val := (taicpu(p1).oper[0]^.val and $FF) or ((taicpu(p2).oper[0]^.val and $FF) shl 8);
- DebugMsg(SPeepholeOptimization + 'Merged two byte-sized constant writes to stack (MovMov2Mov 2a)', p1);
- RemoveInstruction(p2);
- Result := True;
- end;
- end;
- end;
- S_W:
- begin
- { Longword writes must be on a 4-byte boundary }
- if (taicpu(p1).oper[1]^.ref^.offset mod 4) = 0 then
- begin
- { Reduce offset of second reference to see if it is sequential with the first }
- Dec(ThisRef.offset, 2);
- if RefsEqual(taicpu(p1).oper[1]^.ref^, ThisRef) then
- begin
- { Make sure the constants aren't represented as a
- negative number, as these won't merge properly }
- taicpu(p1).opsize := S_L;
- taicpu(p1).oper[0]^.val := (taicpu(p1).oper[0]^.val and $FFFF) or ((taicpu(p2).oper[0]^.val and $FFFF) shl 16);
- DebugMsg(SPeepholeOptimization + 'Merged two word-sized constant writes to stack (MovMov2Mov 2b)', p1);
- RemoveInstruction(p2);
- Result := True;
- end;
- end;
- end;
- {$ifdef x86_64}
- S_L:
- begin
- { Only sign-extended 32-bit constants can be written to 64-bit memory directly, so check to
- see if the constants can be encoded this way. }
- NewConst := (taicpu(p1).oper[0]^.val and $FFFFFFFF) or (taicpu(p2).oper[0]^.val shl 32);
- if (NewConst >= -2147483648) and (NewConst <= 2147483647) and
- { Quadword writes must be on an 8-byte boundary }
- ((taicpu(p1).oper[1]^.ref^.offset mod 8) = 0) then
- begin
- { Reduce offset of second reference to see if it is sequential with the first }
- Dec(ThisRef.offset, 4);
- if RefsEqual(taicpu(p1).oper[1]^.ref^, ThisRef) then
- begin
- { Make sure the constants aren't represented as a
- negative number, as these won't merge properly }
- taicpu(p1).opsize := S_Q;
- { Force a typecast into a 32-bit signed integer (that will then be sign-extended to 64-bit) }
- taicpu(p1).oper[0]^.val := NewConst;
- DebugMsg(SPeepholeOptimization + 'Merged two longword-sized constant writes to stack (MovMov2Mov 2c)', p1);
- RemoveInstruction(p2);
- Result := True;
- end;
- end;
- end;
- {$endif x86_64}
- else
- ;
- end;
- end;
- end;
- var
- GetNextInstruction_p, TempRegUsed, CrossJump: Boolean;
- PreMessage, RegName1, RegName2, InputVal, MaskNum: string;
- NewSize: topsize;
- CurrentReg, ActiveReg: TRegister;
- SourceRef, TargetRef: TReference;
- MovAligned, MovUnaligned: TAsmOp;
- begin
- Result:=false;
- GetNextInstruction_p:=GetNextInstruction(p, hp1);
- { remove mov reg1,reg1? }
- if MatchOperand(taicpu(p).oper[0]^,taicpu(p).oper[1]^)
- then
- begin
- DebugMsg(SPeepholeOptimization + 'Mov2Nop 1 done',p);
- { take care of the register (de)allocs following p }
- RemoveCurrentP(p, hp1);
- Result:=true;
- exit;
- end;
- { All the next optimisations require a next instruction }
- if not GetNextInstruction_p or (hp1.typ <> ait_instruction) then
- Exit;
- { Look for:
- mov %reg1,%reg2
- ??? %reg2,r/m
- Change to:
- mov %reg1,%reg2
- ??? %reg1,r/m
- }
- if MatchOpType(taicpu(p), top_reg, top_reg) then
- begin
- CurrentReg := taicpu(p).oper[1]^.reg;
- if RegReadByInstruction(CurrentReg, hp1) and
- DeepMOVOpt(taicpu(p), taicpu(hp1)) then
- begin
- { A change has occurred, just not in p }
- Result := True;
- TransferUsedRegs(TmpUsedRegs);
- UpdateUsedRegs(TmpUsedRegs, tai(p.Next));
- if not RegUsedAfterInstruction(CurrentReg, hp1, TmpUsedRegs) and
- { Just in case something didn't get modified (e.g. an
- implicit register) }
- not RegReadByInstruction(CurrentReg, hp1) then
- begin
- { We can remove the original MOV }
- DebugMsg(SPeepholeOptimization + 'Mov2Nop 3 done',p);
- RemoveCurrentp(p, hp1);
- { UsedRegs got updated by RemoveCurrentp }
- Result := True;
- Exit;
- end;
- { If we know a MOV instruction has become a null operation, we might as well
- get rid of it now to save time. }
- if (taicpu(hp1).opcode = A_MOV) and
- (taicpu(hp1).oper[1]^.typ = top_reg) and
- SuperRegistersEqual(taicpu(hp1).oper[1]^.reg, taicpu(p).oper[0]^.reg) and
- { Just being a register is enough to confirm it's a null operation }
- (taicpu(hp1).oper[0]^.typ = top_reg) then
- begin
- Result := True;
- { Speed-up to reduce a pipeline stall... if we had something like...
- movl %eax,%edx
- movw %dx,%ax
- ... the second instruction would change to movw %ax,%ax, but
- given that it is now %ax that's active rather than %eax,
- penalties might occur due to a partial register write, so instead,
- change it to a MOVZX instruction when optimising for speed.
- }
- if not (cs_opt_size in current_settings.optimizerswitches) and
- IsMOVZXAcceptable and
- (taicpu(hp1).opsize < taicpu(p).opsize)
- {$ifdef x86_64}
- { operations already implicitly set the upper 64 bits to zero }
- and not ((taicpu(hp1).opsize = S_L) and (taicpu(p).opsize = S_Q))
- {$endif x86_64}
- then
- begin
- CurrentReg := taicpu(hp1).oper[1]^.reg;
- DebugMsg(SPeepholeOptimization + 'Zero-extension to minimise pipeline stall (Mov2Movz)',hp1);
- case taicpu(p).opsize of
- S_W:
- if taicpu(hp1).opsize = S_B then
- taicpu(hp1).opsize := S_BL
- else
- InternalError(2020012911);
- S_L{$ifdef x86_64}, S_Q{$endif x86_64}:
- case taicpu(hp1).opsize of
- S_B:
- taicpu(hp1).opsize := S_BL;
- S_W:
- taicpu(hp1).opsize := S_WL;
- else
- InternalError(2020012912);
- end;
- else
- InternalError(2020012910);
- end;
- taicpu(hp1).opcode := A_MOVZX;
- taicpu(hp1).oper[1]^.reg := newreg(getregtype(CurrentReg), getsupreg(CurrentReg), R_SUBD)
- end
- else
- begin
- GetNextInstruction_p := GetNextInstruction(hp1, hp2);
- DebugMsg(SPeepholeOptimization + 'Mov2Nop 4 done',hp1);
- RemoveInstruction(hp1);
- { The instruction after what was hp1 is now the immediate next instruction,
- so we can continue to make optimisations if it's present }
- if not GetNextInstruction_p or (hp2.typ <> ait_instruction) then
- Exit;
- hp1 := hp2;
- end;
- end;
- end;
- end;
- { Depending on the DeepMOVOpt above, it may turn out that hp1 completely
- overwrites the original destination register. e.g.
- movl ###,%reg2d
- movslq ###,%reg2q (### doesn't have to be the same as the first one)
- In this case, we can remove the MOV (Go to "Mov2Nop 5" below)
- }
- if (taicpu(p).oper[1]^.typ = top_reg) and
- MatchInstruction(hp1, [A_LEA, A_MOV, A_MOVSX, A_MOVZX{$ifdef x86_64}, A_MOVSXD{$endif x86_64}], []) and
- (taicpu(hp1).oper[1]^.typ = top_reg) and
- Reg1WriteOverwritesReg2Entirely(taicpu(hp1).oper[1]^.reg, taicpu(p).oper[1]^.reg) then
- begin
- if RegInOp(taicpu(p).oper[1]^.reg, taicpu(hp1).oper[0]^) then
- begin
- if (taicpu(hp1).oper[0]^.typ = top_reg) then
- case taicpu(p).oper[0]^.typ of
- top_const:
- { We have something like:
- movb $x, %regb
- movzbl %regb,%regd
- Change to:
- movl $x, %regd
- }
- begin
- case taicpu(hp1).opsize of
- S_BW:
- begin
- convert_mov_value(A_MOVSX, $FF);
- setsubreg(taicpu(p).oper[1]^.reg, R_SUBW);
- taicpu(p).opsize := S_W;
- end;
- S_BL:
- begin
- convert_mov_value(A_MOVSX, $FF);
- setsubreg(taicpu(p).oper[1]^.reg, R_SUBD);
- taicpu(p).opsize := S_L;
- end;
- S_WL:
- begin
- convert_mov_value(A_MOVSX, $FFFF);
- setsubreg(taicpu(p).oper[1]^.reg, R_SUBD);
- taicpu(p).opsize := S_L;
- end;
- {$ifdef x86_64}
- S_BQ:
- begin
- convert_mov_value(A_MOVSX, $FF);
- setsubreg(taicpu(p).oper[1]^.reg, R_SUBQ);
- taicpu(p).opsize := S_Q;
- end;
- S_WQ:
- begin
- convert_mov_value(A_MOVSX, $FFFF);
- setsubreg(taicpu(p).oper[1]^.reg, R_SUBQ);
- taicpu(p).opsize := S_Q;
- end;
- S_LQ:
- begin
- convert_mov_value(A_MOVSXD, $FFFFFFFF); { Note it's MOVSXD, not MOVSX }
- setsubreg(taicpu(p).oper[1]^.reg, R_SUBQ);
- taicpu(p).opsize := S_Q;
- end;
- {$endif x86_64}
- else
- { If hp1 was a MOV instruction, it should have been
- optimised already }
- InternalError(2020021001);
- end;
- DebugMsg(SPeepholeOptimization + 'MovMovXX2MovXX 2 done',p);
- RemoveInstruction(hp1);
- Result := True;
- Exit;
- end;
- top_ref:
- { We have something like:
- movb mem, %regb
- movzbl %regb,%regd
- Change to:
- movzbl mem, %regd
- }
- if (taicpu(p).oper[0]^.ref^.refaddr<>addr_full) and (IsMOVZXAcceptable or (taicpu(hp1).opcode<>A_MOVZX)) then
- begin
- DebugMsg(SPeepholeOptimization + 'MovMovXX2MovXX 1 done',p);
- taicpu(hp1).loadref(0,taicpu(p).oper[0]^.ref^);
- RemoveCurrentP(p, hp1);
- Result:=True;
- Exit;
- end;
- else
- if (taicpu(hp1).opcode <> A_MOV) and (taicpu(hp1).opcode <> A_LEA) then
- { Just to make a saving, since there are no more optimisations with MOVZX and MOVSX/D }
- Exit;
- end;
- end
- { The RegInOp check makes sure that movl r/m,%reg1l; movzbl (%reg1l),%reg1l"
- and "movl r/m,%reg1; leal $1(%reg1,%reg2),%reg1" etc. are not incorrectly
- optimised }
- else
- begin
- DebugMsg(SPeepholeOptimization + 'Mov2Nop 5 done',p);
- RemoveCurrentP(p, hp1);
- Result := True;
- Exit;
- end;
- end;
- if (taicpu(hp1).opcode = A_AND) and
- (taicpu(p).oper[1]^.typ = top_reg) and
- MatchOpType(taicpu(hp1),top_const,top_reg) then
- begin
- if MatchOperand(taicpu(p).oper[1]^,taicpu(hp1).oper[1]^) then
- begin
- case taicpu(p).opsize of
- S_L:
- if (taicpu(hp1).oper[0]^.val = $ffffffff) then
- begin
- { Optimize out:
- mov x, %reg
- and ffffffffh, %reg
- }
- DebugMsg(SPeepholeOptimization + 'MovAnd2Mov 1 done',p);
- RemoveInstruction(hp1);
- Result:=true;
- exit;
- end;
- S_Q: { TODO: Confirm if this is even possible }
- if (taicpu(hp1).oper[0]^.val = $ffffffffffffffff) then
- begin
- { Optimize out:
- mov x, %reg
- and ffffffffffffffffh, %reg
- }
- DebugMsg(SPeepholeOptimization + 'MovAnd2Mov 2 done',p);
- RemoveInstruction(hp1);
- Result:=true;
- exit;
- end;
- else
- ;
- end;
- if ((taicpu(p).oper[0]^.typ=top_reg) or
- ((taicpu(p).oper[0]^.typ=top_ref) and (taicpu(p).oper[0]^.ref^.refaddr<>addr_full))) and
- GetNextInstruction(hp1,hp2) and
- MatchInstruction(hp2,A_TEST,[taicpu(p).opsize]) and
- MatchOperand(taicpu(hp1).oper[1]^,taicpu(hp2).oper[1]^) and
- (MatchOperand(taicpu(hp2).oper[0]^,taicpu(hp2).oper[1]^) or
- MatchOperand(taicpu(hp2).oper[0]^,-1)) and
- GetNextInstruction(hp2,hp3) and
- MatchInstruction(hp3,A_Jcc,A_Setcc,[]) and
- (taicpu(hp3).condition in [C_E,C_NE]) then
- begin
- TransferUsedRegs(TmpUsedRegs);
- UpdateUsedRegs(TmpUsedRegs, tai(p.Next));
- UpdateUsedRegs(TmpUsedRegs, tai(hp1.Next));
- if not(RegUsedAfterInstruction(taicpu(hp2).oper[1]^.reg, hp2, TmpUsedRegs)) then
- begin
- DebugMsg(SPeepholeOptimization + 'MovAndTest2Test done',p);
- taicpu(hp1).loadoper(1,taicpu(p).oper[0]^);
- taicpu(hp1).opcode:=A_TEST;
- RemoveInstruction(hp2);
- RemoveCurrentP(p, hp1);
- Result:=true;
- exit;
- end;
- end;
- end
- else if IsMOVZXAcceptable and
- (taicpu(p).oper[1]^.typ = top_reg) and (taicpu(hp1).oper[1]^.typ = top_reg) and
- (taicpu(p).oper[0]^.typ <> top_const) and { MOVZX only supports registers and memory, not immediates (use MOV for that!) }
- (getsupreg(taicpu(p).oper[1]^.reg) = getsupreg(taicpu(hp1).oper[1]^.reg))
- then
- begin
- InputVal := debug_operstr(taicpu(p).oper[0]^);
- MaskNum := debug_tostr(taicpu(hp1).oper[0]^.val);
- case taicpu(p).opsize of
- S_B:
- if (taicpu(hp1).oper[0]^.val = $ff) then
- begin
- { Convert:
- movb x, %regl movb x, %regl
- andw ffh, %regw andl ffh, %regd
- To:
- movzbw x, %regd movzbl x, %regd
- (Identical registers, just different sizes)
- }
- RegName1 := debug_regname(taicpu(p).oper[1]^.reg); { 8-bit register name }
- RegName2 := debug_regname(taicpu(hp1).oper[1]^.reg); { 16/32-bit register name }
- case taicpu(hp1).opsize of
- S_W: NewSize := S_BW;
- S_L: NewSize := S_BL;
- {$ifdef x86_64}
- S_Q: NewSize := S_BQ;
- {$endif x86_64}
- else
- InternalError(2018011510);
- end;
- end
- else
- NewSize := S_NO;
- S_W:
- if (taicpu(hp1).oper[0]^.val = $ffff) then
- begin
- { Convert:
- movw x, %regw
- andl ffffh, %regd
- To:
- movzwl x, %regd
- (Identical registers, just different sizes)
- }
- RegName1 := debug_regname(taicpu(p).oper[1]^.reg); { 16-bit register name }
- RegName2 := debug_regname(taicpu(hp1).oper[1]^.reg); { 32-bit register name }
- case taicpu(hp1).opsize of
- S_L: NewSize := S_WL;
- {$ifdef x86_64}
- S_Q: NewSize := S_WQ;
- {$endif x86_64}
- else
- InternalError(2018011511);
- end;
- end
- else
- NewSize := S_NO;
- else
- NewSize := S_NO;
- end;
- if NewSize <> S_NO then
- begin
- PreMessage := 'mov' + debug_opsize2str(taicpu(p).opsize) + ' ' + InputVal + ',' + RegName1;
- { The actual optimization }
- taicpu(p).opcode := A_MOVZX;
- taicpu(p).changeopsize(NewSize);
- taicpu(p).oper[1]^ := taicpu(hp1).oper[1]^;
- { Safeguard if "and" is followed by a conditional command }
- TransferUsedRegs(TmpUsedRegs);
- UpdateUsedRegs(TmpUsedRegs,tai(p.next));
- if (RegUsedAfterInstruction(NR_DEFAULTFLAGS, hp1, TmpUsedRegs)) then
- begin
- { At this point, the "and" command is effectively equivalent to
- "test %reg,%reg". This will be handled separately by the
- Peephole Optimizer. [Kit] }
- DebugMsg(SPeepholeOptimization + PreMessage +
- ' -> movz' + debug_opsize2str(NewSize) + ' ' + InputVal + ',' + RegName2, p);
- end
- else
- begin
- DebugMsg(SPeepholeOptimization + PreMessage + '; and' + debug_opsize2str(taicpu(hp1).opsize) + ' $' + MaskNum + ',' + RegName2 +
- ' -> movz' + debug_opsize2str(NewSize) + ' ' + InputVal + ',' + RegName2, p);
- RemoveInstruction(hp1);
- end;
- Result := True;
- Exit;
- end;
- end;
- end;
- if (taicpu(hp1).opcode = A_OR) and
- (taicpu(p).oper[1]^.typ = top_reg) and
- MatchOperand(taicpu(p).oper[0]^, 0) and
- MatchOperand(taicpu(hp1).oper[1]^, taicpu(p).oper[1]^.reg) then
- begin
- { mov 0, %reg
- or ###,%reg
- Change to (only if the flags are not used):
- mov ###,%reg
- }
- TransferUsedRegs(TmpUsedRegs);
- UpdateUsedRegs(TmpUsedRegs, tai(p.Next));
- DoOptimisation := True;
- { Even if the flags are used, we might be able to do the optimisation
- if the conditions are predictable }
- if RegInUsedRegs(NR_DEFAULTFLAGS, TmpUsedRegs) then
- begin
- { Only perform if ### = %reg (the same register) or equal to 0,
- so %reg is guaranteed to still have a value of zero }
- if MatchOperand(taicpu(hp1).oper[0]^, 0) or
- MatchOperand(taicpu(hp1).oper[0]^, taicpu(hp1).oper[1]^.reg) then
- begin
- hp2 := hp1;
- UpdateUsedRegs(TmpUsedRegs, tai(hp1.Next));
- while RegInUsedRegs(NR_DEFAULTFLAGS, TmpUsedRegs) and
- GetNextInstruction(hp2, hp3) do
- begin
- { Don't continue modifying if the flags state is getting changed }
- if RegModifiedByInstruction(NR_DEFAULTFLAGS, hp3) then
- Break;
- UpdateUsedRegs(TmpUsedRegs, tai(hp3.Next));
- if MatchInstruction(hp3, A_Jcc, A_SETcc, A_CMOVcc, []) then
- begin
- if condition_in(C_E, taicpu(hp3).condition) or (taicpu(hp3).condition in [C_NC, C_NS, C_NO]) then
- begin
- { Condition is always true }
- case taicpu(hp3).opcode of
- A_Jcc:
- begin
- DebugMsg(SPeepholeOptimization + 'Condition is always true (jump made unconditional)', hp3);
- { Check for jump shortcuts before we destroy the condition }
- DoJumpOptimizations(hp3, TempBool);
- MakeUnconditional(taicpu(hp3));
- Result := True;
- end;
- A_CMOVcc:
- begin
- DebugMsg(SPeepholeOptimization + 'Condition is always true (CMOVcc -> MOV)', hp3);
- taicpu(hp3).opcode := A_MOV;
- taicpu(hp3).condition := C_None;
- Result := True;
- end;
- A_SETcc:
- begin
- DebugMsg(SPeepholeOptimization + 'Condition is always true (changed to MOV 1)', hp3);
- { Convert "set(c) %reg" instruction to "movb 1,%reg" }
- taicpu(hp3).opcode := A_MOV;
- taicpu(hp3).ops := 2;
- taicpu(hp3).condition := C_None;
- taicpu(hp3).opsize := S_B;
- taicpu(hp3).loadreg(1,taicpu(hp3).oper[0]^.reg);
- taicpu(hp3).loadconst(0, 1);
- Result := True;
- end;
- else
- InternalError(2021090701);
- end;
- end
- else if (taicpu(hp3).condition in [C_A, C_B, C_C, C_G, C_L, C_NE, C_NZ, C_O, C_S]) then
- begin
- { Condition is always false }
- case taicpu(hp3).opcode of
- A_Jcc:
- begin
- DebugMsg(SPeepholeOptimization + 'Condition is always false (jump removed)', hp3);
- TAsmLabel(taicpu(hp3).oper[0]^.ref^.symbol).decrefs;
- RemoveInstruction(hp3);
- Result := True;
- { Since hp3 was deleted, hp2 must not be updated }
- Continue;
- end;
- A_CMOVcc:
- begin
- DebugMsg(SPeepholeOptimization + 'Condition is always false (conditional load removed)', hp3);
- RemoveInstruction(hp3);
- Result := True;
- { Since hp3 was deleted, hp2 must not be updated }
- Continue;
- end;
- A_SETcc:
- begin
- DebugMsg(SPeepholeOptimization + 'Condition is always false (changed to MOV 0)', hp3);
- { Convert "set(c) %reg" instruction to "movb 0,%reg" }
- taicpu(hp3).opcode := A_MOV;
- taicpu(hp3).ops := 2;
- taicpu(hp3).condition := C_None;
- taicpu(hp3).opsize := S_B;
- taicpu(hp3).loadreg(1,taicpu(hp3).oper[0]^.reg);
- taicpu(hp3).loadconst(0, 0);
- Result := True;
- end;
- else
- InternalError(2021090702);
- end;
- end
- else
- { Uncertain what to do - don't optimise (although optimise other conditional statements if present) }
- DoOptimisation := False;
- end;
- hp2 := hp3;
- end;
- { Flags are still in use - don't optimise }
- if DoOptimisation and RegInUsedRegs(NR_DEFAULTFLAGS, TmpUsedRegs) then
- DoOptimisation := False;
- end
- else
- DoOptimisation := False;
- end;
- if DoOptimisation then
- begin
- {$ifdef x86_64}
- { OR only supports 32-bit sign-extended constants for 64-bit
- instructions, so compensate for this if the constant is
- encoded as a value greater than or equal to 2^31 }
- if (taicpu(hp1).opsize = S_Q) and
- (taicpu(hp1).oper[0]^.typ = top_const) and
- (taicpu(hp1).oper[0]^.val >= $80000000) then
- taicpu(hp1).oper[0]^.val := taicpu(hp1).oper[0]^.val or $FFFFFFFF00000000;
- {$endif x86_64}
- DebugMsg(SPeepholeOptimization + 'MOV 0 / OR -> MOV', p);
- taicpu(hp1).opcode := A_MOV;
- RemoveCurrentP(p, hp1);
- Result := True;
- Exit;
- end;
- end;
- { Next instruction is also a MOV ? }
- if MatchInstruction(hp1,A_MOV,[taicpu(p).opsize]) then
- begin
- if MatchOpType(taicpu(p), top_const, top_ref) and
- MatchOpType(taicpu(hp1), top_const, top_ref) and
- TryConstMerge(p, hp1) then
- begin
- Result := True;
- { In case we have four byte writes in a row, check for 2 more
- right now so we don't have to wait for another iteration of
- pass 1
- }
- { If two byte-writes were merged, the opsize is now S_W, not S_B }
- case taicpu(p).opsize of
- S_W:
- begin
- if GetNextInstruction(p, hp1) and
- MatchInstruction(hp1, A_MOV, [S_B]) and
- MatchOpType(taicpu(hp1), top_const, top_ref) and
- GetNextInstruction(hp1, hp2) and
- MatchInstruction(hp2, A_MOV, [S_B]) and
- MatchOpType(taicpu(hp2), top_const, top_ref) and
- { Try to merge the two bytes }
- TryConstMerge(hp1, hp2) then
- { Now try to merge the two words (hp2 will get deleted) }
- TryConstMerge(p, hp1);
- end;
- S_L:
- begin
- { Though this only really benefits x86_64 and not i386, it
- gets a potential optimisation done faster and hence
- reduces the number of times OptPass1MOV is entered }
- if GetNextInstruction(p, hp1) and
- MatchInstruction(hp1, A_MOV, [S_W]) and
- MatchOpType(taicpu(hp1), top_const, top_ref) and
- GetNextInstruction(hp1, hp2) and
- MatchInstruction(hp2, A_MOV, [S_W]) and
- MatchOpType(taicpu(hp2), top_const, top_ref) and
- { Try to merge the two words }
- TryConstMerge(hp1, hp2) then
- { This will always fail on i386, so don't bother
- calling it unless we're doing x86_64 }
- {$ifdef x86_64}
- { Now try to merge the two longwords (hp2 will get deleted) }
- TryConstMerge(p, hp1)
- {$endif x86_64}
- ;
- end;
- else
- ;
- end;
- Exit;
- end;
- if (taicpu(p).oper[1]^.typ = top_reg) and
- MatchOperand(taicpu(p).oper[1]^,taicpu(hp1).oper[0]^) then
- begin
- CurrentReg := taicpu(p).oper[1]^.reg;
- TransferUsedRegs(TmpUsedRegs);
- UpdateUsedRegs(TmpUsedRegs, tai(p.Next));
- { we have
- mov x, %treg
- mov %treg, y
- }
- if not(RegInOp(CurrentReg, taicpu(hp1).oper[1]^)) then
- if not(RegUsedAfterInstruction(CurrentReg, hp1, TmpUsedRegs)) then
- { we've got
- mov x, %treg
- mov %treg, y
- with %treg is not used after }
- case taicpu(p).oper[0]^.typ Of
- { top_reg is covered by DeepMOVOpt }
- top_const:
- begin
- { change
- mov const, %treg
- mov %treg, y
- to
- mov const, y
- }
- if (taicpu(hp1).oper[1]^.typ=top_reg) or
- ((taicpu(p).oper[0]^.val>=low(longint)) and (taicpu(p).oper[0]^.val<=high(longint))) then
- begin
- if taicpu(hp1).oper[1]^.typ=top_reg then
- AllocRegBetween(taicpu(hp1).oper[1]^.reg,p,hp1,usedregs);
- taicpu(p).loadOper(1,taicpu(hp1).oper[1]^);
- DebugMsg(SPeepholeOptimization + 'MovMov2Mov 5 done',p);
- RemoveInstruction(hp1);
- Result:=true;
- Exit;
- end;
- end;
- top_ref:
- case taicpu(hp1).oper[1]^.typ of
- top_reg:
- begin
- { change
- mov mem, %treg
- mov %treg, %reg
- to
- mov mem, %reg"
- }
- AllocRegBetween(taicpu(hp1).oper[1]^.reg,p,hp1,usedregs);
- taicpu(p).loadreg(1, taicpu(hp1).oper[1]^.reg);
- DebugMsg(SPeepholeOptimization + 'MovMov2Mov 3 done',p);
- RemoveInstruction(hp1);
- Result:=true;
- Exit;
- end;
- top_ref:
- begin
- {$ifdef x86_64}
- { Look for the following to simplify:
- mov x(mem1), %reg
- mov %reg, y(mem2)
- mov x+8(mem1), %reg
- mov %reg, y+8(mem2)
- Change to:
- movdqu x(mem1), %xmmreg
- movdqu %xmmreg, y(mem2)
- }
- SourceRef := taicpu(p).oper[0]^.ref^;
- TargetRef := taicpu(hp1).oper[1]^.ref^;
- if (taicpu(p).opsize = S_Q) and
- GetNextInstruction(hp1, hp2) and
- MatchInstruction(hp2, A_MOV, [taicpu(p).opsize]) and
- MatchOpType(taicpu(hp2), top_ref, top_reg) then
- begin
- { Delay calling GetNextInstruction(hp2, hp3) for as long as possible }
- UpdateUsedRegs(TmpUsedRegs, tai(hp1.Next));
- Inc(SourceRef.offset, 8);
- if UseAVX then
- begin
- MovAligned := A_VMOVDQA;
- MovUnaligned := A_VMOVDQU;
- end
- else
- begin
- MovAligned := A_MOVDQA;
- MovUnaligned := A_MOVDQU;
- end;
- if RefsEqual(SourceRef, taicpu(hp2).oper[0]^.ref^) then
- begin
- UpdateUsedRegs(TmpUsedRegs, tai(hp2.Next));
- Inc(TargetRef.offset, 8);
- if GetNextInstruction(hp2, hp3) and
- MatchInstruction(hp3, A_MOV, [taicpu(p).opsize]) and
- MatchOpType(taicpu(hp3), top_reg, top_ref) and
- (taicpu(hp2).oper[1]^.reg = taicpu(hp3).oper[0]^.reg) and
- RefsEqual(TargetRef, taicpu(hp3).oper[1]^.ref^) and
- not RegUsedAfterInstruction(taicpu(hp2).oper[1]^.reg, hp3, TmpUsedRegs) then
- begin
- CurrentReg := GetMMRegisterBetween(R_SUBMMX, UsedRegs, p, hp3);
- if CurrentReg <> NR_NO then
- begin
- { Remember that the offsets are 8 ahead }
- if ((SourceRef.offset mod 16) = 8) and
- (
- { Base pointer is always aligned (stack pointer won't be if there's no stack frame) }
- (SourceRef.base = current_procinfo.framepointer) or
- ((SourceRef.alignment >= 16) and ((SourceRef.alignment mod 16) = 0))
- ) then
- taicpu(p).opcode := MovAligned
- else
- taicpu(p).opcode := MovUnaligned;
- taicpu(p).opsize := S_XMM;
- taicpu(p).oper[1]^.reg := CurrentReg;
- if ((TargetRef.offset mod 16) = 8) and
- (
- { Base pointer is always aligned (stack pointer won't be if there's no stack frame) }
- (TargetRef.base = current_procinfo.framepointer) or
- ((TargetRef.alignment >= 16) and ((TargetRef.alignment mod 16) = 0))
- ) then
- taicpu(hp1).opcode := MovAligned
- else
- taicpu(hp1).opcode := MovUnaligned;
- taicpu(hp1).opsize := S_XMM;
- taicpu(hp1).oper[0]^.reg := CurrentReg;
- DebugMsg(SPeepholeOptimization + 'Used ' + debug_regname(CurrentReg) + ' to merge a pair of memory moves (MovMovMovMov2MovdqMovdq 1)', p);
- RemoveInstruction(hp2);
- RemoveInstruction(hp3);
- Result := True;
- Exit;
- end;
- end;
- end
- else
- begin
- { See if the next references are 8 less rather than 8 greater }
- Dec(SourceRef.offset, 16); { -8 the other way }
- if RefsEqual(SourceRef, taicpu(hp2).oper[0]^.ref^) then
- begin
- UpdateUsedRegs(TmpUsedRegs, tai(hp2.Next));
- Dec(TargetRef.offset, 8); { Only 8, not 16, as it wasn't incremented unlike SourceRef }
- if GetNextInstruction(hp2, hp3) and
- MatchInstruction(hp3, A_MOV, [taicpu(p).opsize]) and
- MatchOpType(taicpu(hp3), top_reg, top_ref) and
- (taicpu(hp2).oper[1]^.reg = taicpu(hp3).oper[0]^.reg) and
- RefsEqual(TargetRef, taicpu(hp3).oper[1]^.ref^) and
- not RegUsedAfterInstruction(taicpu(hp2).oper[1]^.reg, hp3, TmpUsedRegs) then
- begin
- CurrentReg := GetMMRegisterBetween(R_SUBMMX, UsedRegs, p, hp3);
- if CurrentReg <> NR_NO then
- begin
- { hp2 and hp3 are the starting offsets, so mod = 0 this time }
- if ((SourceRef.offset mod 16) = 0) and
- (
- { Base pointer is always aligned (stack pointer won't be if there's no stack frame) }
- (SourceRef.base = current_procinfo.framepointer) or
- ((SourceRef.alignment >= 16) and ((SourceRef.alignment mod 16) = 0))
- ) then
- taicpu(hp2).opcode := MovAligned
- else
- taicpu(hp2).opcode := MovUnaligned;
- taicpu(hp2).opsize := S_XMM;
- taicpu(hp2).oper[1]^.reg := CurrentReg;
- if ((TargetRef.offset mod 16) = 0) and
- (
- { Base pointer is always aligned (stack pointer won't be if there's no stack frame) }
- (TargetRef.base = current_procinfo.framepointer) or
- ((TargetRef.alignment >= 16) and ((TargetRef.alignment mod 16) = 0))
- ) then
- taicpu(hp3).opcode := MovAligned
- else
- taicpu(hp3).opcode := MovUnaligned;
- taicpu(hp3).opsize := S_XMM;
- taicpu(hp3).oper[0]^.reg := CurrentReg;
- DebugMsg(SPeepholeOptimization + 'Used ' + debug_regname(CurrentReg) + ' to merge a pair of memory moves (MovMovMovMov2MovdqMovdq 2)', p);
- RemoveInstruction(hp1);
- RemoveCurrentP(p, hp2);
- Result := True;
- Exit;
- end;
- end;
- end;
- end;
- end;
- {$endif x86_64}
- end;
- else
- { The write target should be a reg or a ref }
- InternalError(2021091601);
- end;
- else
- ;
- end
- else
- { %treg is used afterwards, but all eventualities
- other than the first MOV instruction being a constant
- are covered by DeepMOVOpt, so only check for that }
- if (taicpu(p).oper[0]^.typ = top_const) and
- (
- { For MOV operations, a size saving is only made if the register/const is byte-sized }
- not (cs_opt_size in current_settings.optimizerswitches) or
- (taicpu(hp1).opsize = S_B)
- ) and
- (
- (taicpu(hp1).oper[1]^.typ = top_reg) or
- ((taicpu(p).oper[0]^.val >= low(longint)) and (taicpu(p).oper[0]^.val <= high(longint)))
- ) then
- begin
- DebugMsg(SPeepholeOptimization + debug_operstr(taicpu(hp1).oper[0]^) + ' = $' + debug_tostr(taicpu(p).oper[0]^.val) + '; changed to minimise pipeline stall (MovMov2Mov 6b)',hp1);
- taicpu(hp1).loadconst(0, taicpu(p).oper[0]^.val);
- end;
- end;
- if (taicpu(hp1).oper[0]^.typ = taicpu(p).oper[1]^.typ) and
- (taicpu(hp1).oper[1]^.typ = taicpu(p).oper[0]^.typ) then
- { mov reg1, mem1 or mov mem1, reg1
- mov mem2, reg2 mov reg2, mem2}
- begin
- if OpsEqual(taicpu(hp1).oper[1]^,taicpu(p).oper[0]^) then
- { mov reg1, mem1 or mov mem1, reg1
- mov mem2, reg1 mov reg2, mem1}
- begin
- if OpsEqual(taicpu(hp1).oper[0]^,taicpu(p).oper[1]^) then
- { Removes the second statement from
- mov reg1, mem1/reg2
- mov mem1/reg2, reg1 }
- begin
- if taicpu(p).oper[0]^.typ=top_reg then
- AllocRegBetween(taicpu(p).oper[0]^.reg,p,hp1,usedregs);
- DebugMsg(SPeepholeOptimization + 'MovMov2Mov 1',p);
- RemoveInstruction(hp1);
- Result:=true;
- exit;
- end
- else
- begin
- TransferUsedRegs(TmpUsedRegs);
- UpdateUsedRegs(TmpUsedRegs, tai(hp1.next));
- if (taicpu(p).oper[1]^.typ = top_ref) and
- { mov reg1, mem1
- mov mem2, reg1 }
- (taicpu(hp1).oper[0]^.ref^.refaddr = addr_no) and
- GetNextInstruction(hp1, hp2) and
- MatchInstruction(hp2,A_CMP,[taicpu(p).opsize]) and
- OpsEqual(taicpu(p).oper[1]^,taicpu(hp2).oper[0]^) and
- OpsEqual(taicpu(p).oper[0]^,taicpu(hp2).oper[1]^) and
- not(RegUsedAfterInstruction(taicpu(p).oper[0]^.reg, hp2, TmpUsedRegs)) then
- { change to
- mov reg1, mem1 mov reg1, mem1
- mov mem2, reg1 cmp reg1, mem2
- cmp mem1, reg1
- }
- begin
- RemoveInstruction(hp2);
- taicpu(hp1).opcode := A_CMP;
- taicpu(hp1).loadref(1,taicpu(hp1).oper[0]^.ref^);
- taicpu(hp1).loadreg(0,taicpu(p).oper[0]^.reg);
- AllocRegBetween(taicpu(p).oper[0]^.reg,p,hp1,UsedRegs);
- DebugMsg(SPeepholeOptimization + 'MovMovCmp2MovCmp done',hp1);
- end;
- end;
- end
- else if (taicpu(p).oper[1]^.typ=top_ref) and
- OpsEqual(taicpu(hp1).oper[0]^,taicpu(p).oper[1]^) then
- begin
- AllocRegBetween(taicpu(p).oper[0]^.reg,p,hp1,UsedRegs);
- taicpu(hp1).loadreg(0,taicpu(p).oper[0]^.reg);
- DebugMsg(SPeepholeOptimization + 'MovMov2MovMov1 done',p);
- end
- else
- begin
- TransferUsedRegs(TmpUsedRegs);
- if GetNextInstruction(hp1, hp2) and
- MatchOpType(taicpu(p),top_ref,top_reg) and
- MatchOperand(taicpu(p).oper[1]^,taicpu(hp1).oper[0]^) and
- (taicpu(hp1).oper[1]^.typ = top_ref) and
- MatchInstruction(hp2,A_MOV,[taicpu(p).opsize]) and
- MatchOpType(taicpu(hp2),top_ref,top_reg) and
- RefsEqual(taicpu(hp2).oper[0]^.ref^, taicpu(hp1).oper[1]^.ref^) then
- if not RegInRef(taicpu(hp2).oper[1]^.reg,taicpu(hp2).oper[0]^.ref^) and
- not(RegUsedAfterInstruction(taicpu(p).oper[1]^.reg,hp1,tmpUsedRegs)) then
- { mov mem1, %reg1
- mov %reg1, mem2
- mov mem2, reg2
- to:
- mov mem1, reg2
- mov reg2, mem2}
- begin
- AllocRegBetween(taicpu(hp2).oper[1]^.reg,p,hp2,usedregs);
- DebugMsg(SPeepholeOptimization + 'MovMovMov2MovMov 1 done',p);
- taicpu(p).loadoper(1,taicpu(hp2).oper[1]^);
- taicpu(hp1).loadoper(0,taicpu(hp2).oper[1]^);
- RemoveInstruction(hp2);
- Result := True;
- end
- {$ifdef i386}
- { this is enabled for i386 only, as the rules to create the reg sets below
- are too complicated for x86-64, so this makes this code too error prone
- on x86-64
- }
- else if (taicpu(p).oper[1]^.reg <> taicpu(hp2).oper[1]^.reg) and
- not(RegInRef(taicpu(p).oper[1]^.reg,taicpu(p).oper[0]^.ref^)) and
- not(RegInRef(taicpu(hp2).oper[1]^.reg,taicpu(hp2).oper[0]^.ref^)) then
- { mov mem1, reg1 mov mem1, reg1
- mov reg1, mem2 mov reg1, mem2
- mov mem2, reg2 mov mem2, reg1
- to: to:
- mov mem1, reg1 mov mem1, reg1
- mov mem1, reg2 mov reg1, mem2
- mov reg1, mem2
- or (if mem1 depends on reg1
- and/or if mem2 depends on reg2)
- to:
- mov mem1, reg1
- mov reg1, mem2
- mov reg1, reg2
- }
- begin
- taicpu(hp1).loadRef(0,taicpu(p).oper[0]^.ref^);
- taicpu(hp1).loadReg(1,taicpu(hp2).oper[1]^.reg);
- taicpu(hp2).loadRef(1,taicpu(hp2).oper[0]^.ref^);
- taicpu(hp2).loadReg(0,taicpu(p).oper[1]^.reg);
- AllocRegBetween(taicpu(p).oper[1]^.reg,p,hp2,usedregs);
- if (taicpu(p).oper[0]^.ref^.base <> NR_NO) and
- (getsupreg(taicpu(p).oper[0]^.ref^.base) in [RS_EAX,RS_EBX,RS_ECX,RS_EDX,RS_ESI,RS_EDI]) then
- AllocRegBetween(taicpu(p).oper[0]^.ref^.base,p,hp2,usedregs);
- if (taicpu(p).oper[0]^.ref^.index <> NR_NO) and
- (getsupreg(taicpu(p).oper[0]^.ref^.index) in [RS_EAX,RS_EBX,RS_ECX,RS_EDX,RS_ESI,RS_EDI]) then
- AllocRegBetween(taicpu(p).oper[0]^.ref^.index,p,hp2,usedregs);
- end
- else if (taicpu(hp1).Oper[0]^.reg <> taicpu(hp2).Oper[1]^.reg) then
- begin
- taicpu(hp2).loadReg(0,taicpu(hp1).Oper[0]^.reg);
- AllocRegBetween(taicpu(p).oper[1]^.reg,p,hp2,usedregs);
- end
- else
- begin
- RemoveInstruction(hp2);
- end
- {$endif i386}
- ;
- end;
- end
- { movl [mem1],reg1
- movl [mem1],reg2
- to
- movl [mem1],reg1
- movl reg1,reg2
- }
- else if MatchOpType(taicpu(p),top_ref,top_reg) and
- MatchOpType(taicpu(hp1),top_ref,top_reg) and
- (taicpu(p).opsize = taicpu(hp1).opsize) and
- RefsEqual(taicpu(p).oper[0]^.ref^,taicpu(hp1).oper[0]^.ref^) and
- (taicpu(p).oper[0]^.ref^.volatility=[]) and
- (taicpu(hp1).oper[0]^.ref^.volatility=[]) and
- not(SuperRegistersEqual(taicpu(p).oper[1]^.reg,taicpu(hp1).oper[0]^.ref^.base)) and
- not(SuperRegistersEqual(taicpu(p).oper[1]^.reg,taicpu(hp1).oper[0]^.ref^.index)) then
- begin
- DebugMsg(SPeepholeOptimization + 'MovMov2MovMov 2',p);
- taicpu(hp1).loadReg(0,taicpu(p).oper[1]^.reg);
- end;
- { movl const1,[mem1]
- movl [mem1],reg1
- to
- movl const1,reg1
- movl reg1,[mem1]
- }
- if MatchOpType(Taicpu(p),top_const,top_ref) and
- MatchOpType(Taicpu(hp1),top_ref,top_reg) and
- (taicpu(p).opsize = taicpu(hp1).opsize) and
- RefsEqual(taicpu(hp1).oper[0]^.ref^,taicpu(p).oper[1]^.ref^) and
- not(RegInRef(taicpu(hp1).oper[1]^.reg,taicpu(hp1).oper[0]^.ref^)) then
- begin
- AllocRegBetween(taicpu(hp1).oper[1]^.reg,p,hp1,usedregs);
- taicpu(hp1).loadReg(0,taicpu(hp1).oper[1]^.reg);
- taicpu(hp1).loadRef(1,taicpu(p).oper[1]^.ref^);
- taicpu(p).loadReg(1,taicpu(hp1).oper[0]^.reg);
- taicpu(hp1).fileinfo := taicpu(p).fileinfo;
- DebugMsg(SPeepholeOptimization + 'MovMov2MovMov 1',p);
- Result:=true;
- exit;
- end;
- { mov x,reg1; mov y,reg1 -> mov y,reg1 is handled by the Mov2Nop 5 optimisation }
- end;
- { search further than the next instruction for a mov (as long as it's not a jump) }
- if not is_calljmpuncondret(taicpu(hp1).opcode) and
- { check as much as possible before the expensive GetNextInstructionUsingRegCond call }
- (taicpu(p).oper[1]^.typ = top_reg) and
- (taicpu(p).oper[0]^.typ in [top_reg,top_const]) and
- not RegModifiedByInstruction(taicpu(p).oper[1]^.reg, hp1) then
- begin
- { we work with hp2 here, so hp1 can be still used later on when
- checking for GetNextInstruction_p }
- hp3 := hp1;
- { Initialise CrossJump (if it becomes True at any point, it will remain True) }
- CrossJump := (taicpu(hp1).opcode = A_Jcc);
- { Saves on a large number of dereferences }
- ActiveReg := taicpu(p).oper[1]^.reg;
- TransferUsedRegs(TmpUsedRegs);
- UpdateUsedRegs(TmpUsedRegs, tai(p.Next));
- while GetNextInstructionUsingRegCond(hp3,hp2,ActiveReg,CrossJump) and
- { GetNextInstructionUsingRegCond only searches one instruction ahead unless -O3 is specified }
- (hp2.typ=ait_instruction) do
- begin
- case taicpu(hp2).opcode of
- A_POP:
- if MatchOperand(taicpu(hp2).oper[0]^,ActiveReg) then
- begin
- if not CrossJump and
- not RegUsedBetween(ActiveReg, p, hp2) then
- begin
- { We can remove the original MOV since the register
- wasn't used between it and its popping from the stack }
- DebugMsg(SPeepholeOptimization + 'Mov2Nop 3c done',p);
- RemoveCurrentp(p, hp1);
- Result := True;
- Exit;
- end;
- { Can't go any further }
- Break;
- end;
- A_MOV:
- if MatchOperand(taicpu(hp2).oper[0]^,ActiveReg) and
- ((taicpu(p).oper[0]^.typ=top_const) or
- ((taicpu(p).oper[0]^.typ=top_reg) and
- not(RegModifiedBetween(taicpu(p).oper[0]^.reg, p, hp2))
- )
- ) then
- begin
- { we have
- mov x, %treg
- mov %treg, y
- }
- { We don't need to call UpdateUsedRegs for every instruction between
- p and hp2 because the register we're concerned about will not
- become deallocated (otherwise GetNextInstructionUsingReg would
- have stopped at an earlier instruction). [Kit] }
- TempRegUsed :=
- CrossJump { Assume the register is in use if it crossed a conditional jump } or
- RegReadByInstruction(ActiveReg, hp3) or
- RegUsedAfterInstruction(ActiveReg, hp2, TmpUsedRegs);
- case taicpu(p).oper[0]^.typ Of
- top_reg:
- begin
- { change
- mov %reg, %treg
- mov %treg, y
- to
- mov %reg, y
- }
- CurrentReg := taicpu(p).oper[0]^.reg; { Saves on a handful of pointer dereferences }
- RegName1 := debug_regname(taicpu(hp2).oper[0]^.reg);
- if MatchOperand(taicpu(hp2).oper[1]^, CurrentReg) then
- begin
- { %reg = y - remove hp2 completely (doing it here instead of relying on
- the "mov %reg,%reg" optimisation might cut down on a pass iteration) }
- if TempRegUsed then
- begin
- DebugMsg(SPeepholeOptimization + debug_regname(CurrentReg) + ' = ' + RegName1 + '; removed unnecessary instruction (MovMov2MovNop 6b}',hp2);
- AllocRegBetween(CurrentReg, p, hp2, UsedRegs);
- { Set the start of the next GetNextInstructionUsingRegCond search
- to start at the entry right before hp2 (which is about to be removed) }
- hp3 := tai(hp2.Previous);
- RemoveInstruction(hp2);
- { See if there's more we can optimise }
- Continue;
- end
- else
- begin
- RemoveInstruction(hp2);
- { We can remove the original MOV too }
- DebugMsg(SPeepholeOptimization + 'MovMov2NopNop 6b done',p);
- RemoveCurrentP(p, hp1);
- Result:=true;
- Exit;
- end;
- end
- else
- begin
- AllocRegBetween(CurrentReg, p, hp2, UsedRegs);
- taicpu(hp2).loadReg(0, CurrentReg);
- DebugMsg(SPeepholeOptimization + RegName1 + ' = ' + debug_regname(CurrentReg) + '; changed to minimise pipeline stall (MovMov2Mov 6a}',hp2);
- { Check to see if the register also appears in the reference }
- if (taicpu(hp2).oper[1]^.typ = top_ref) then
- ReplaceRegisterInRef(taicpu(hp2).oper[1]^.ref^, ActiveReg, CurrentReg);
- { Don't remove the first instruction if the temporary register is in use }
- if not TempRegUsed and
- { ReplaceRegisterInRef won't actually replace the register if it's a different size }
- not RegInOp(ActiveReg, taicpu(hp2).oper[1]^) then
- begin
- DebugMsg(SPeepholeOptimization + 'MovMov2Mov 6 done',p);
- RemoveCurrentP(p, hp1);
- Result:=true;
- Exit;
- end;
- { No need to set Result to True here. If there's another instruction later
- on that can be optimised, it will be detected when the main Pass 1 loop
- reaches what is now hp2 and passes it through OptPass1MOV. [Kit] }
- end;
- end;
- top_const:
- if not (cs_opt_size in current_settings.optimizerswitches) or (taicpu(hp2).opsize = S_B) then
- begin
- { change
- mov const, %treg
- mov %treg, y
- to
- mov const, y
- }
- if (taicpu(hp2).oper[1]^.typ=top_reg) or
- ((taicpu(p).oper[0]^.val>=low(longint)) and (taicpu(p).oper[0]^.val<=high(longint))) then
- begin
- RegName1 := debug_regname(taicpu(hp2).oper[0]^.reg);
- taicpu(hp2).loadOper(0,taicpu(p).oper[0]^);
- if TempRegUsed then
- begin
- { Don't remove the first instruction if the temporary register is in use }
- DebugMsg(SPeepholeOptimization + RegName1 + ' = ' + debug_tostr(taicpu(p).oper[0]^.val) + '; changed to minimise pipeline stall (MovMov2Mov 7a)',hp2);
- { No need to set Result to True. If there's another instruction later on
- that can be optimised, it will be detected when the main Pass 1 loop
- reaches what is now hp2 and passes it through OptPass1MOV. [Kit] };
- end
- else
- begin
- DebugMsg(SPeepholeOptimization + 'MovMov2Mov 7 done',p);
- RemoveCurrentP(p, hp1);
- Result:=true;
- Exit;
- end;
- end;
- end;
- else
- Internalerror(2019103001);
- end;
- end
- else
- if MatchOperand(taicpu(hp2).oper[1]^, ActiveReg) then
- begin
- if not CrossJump and
- not RegUsedBetween(ActiveReg, p, hp2) and
- not RegReadByInstruction(ActiveReg, hp2) then
- begin
- { Register is not used before it is overwritten }
- DebugMsg(SPeepholeOptimization + 'Mov2Nop 3a done',p);
- RemoveCurrentp(p, hp1);
- Result := True;
- Exit;
- end;
- if (taicpu(p).oper[0]^.typ = top_const) and
- (taicpu(hp2).oper[0]^.typ = top_const) then
- begin
- if taicpu(p).oper[0]^.val = taicpu(hp2).oper[0]^.val then
- begin
- { Same value - register hasn't changed }
- DebugMsg(SPeepholeOptimization + 'Mov2Nop 2 done', hp2);
- RemoveInstruction(hp2);
- Result := True;
- { See if there's more we can optimise }
- Continue;
- end;
- end;
- end;
- A_MOVZX, A_MOVSX{$ifdef x86_64}, A_MOVSXD{$endif x86_64}:
- if MatchOpType(taicpu(hp2), top_reg, top_reg) and
- MatchOperand(taicpu(hp2).oper[0]^, ActiveReg) and
- SuperRegistersEqual(taicpu(hp2).oper[1]^.reg, ActiveReg) then
- begin
- {
- Change from:
- mov ###, %reg
- ...
- movs/z %reg,%reg (Same register, just different sizes)
- To:
- movs/z ###, %reg (Longer version)
- ...
- (remove)
- }
- DebugMsg(SPeepholeOptimization + 'MovMovs/z2Mov/s/z done', p);
- taicpu(p).oper[1]^.reg := taicpu(hp2).oper[1]^.reg;
- { Keep the first instruction as mov if ### is a constant }
- if taicpu(p).oper[0]^.typ = top_const then
- taicpu(p).opsize := reg2opsize(taicpu(hp2).oper[1]^.reg)
- else
- begin
- taicpu(p).opcode := taicpu(hp2).opcode;
- taicpu(p).opsize := taicpu(hp2).opsize;
- end;
- DebugMsg(SPeepholeOptimization + 'Removed movs/z instruction and extended earlier write (MovMovs/z2Mov/s/z)', hp2);
- AllocRegBetween(taicpu(hp2).oper[1]^.reg, p, hp2, UsedRegs);
- RemoveInstruction(hp2);
- Result := True;
- Exit;
- end;
- else
- { Move down to the MatchOpType if-block below };
- end;
- { Also catches MOV/S/Z instructions that aren't modified }
- if taicpu(p).oper[0]^.typ = top_reg then
- begin
- CurrentReg := taicpu(p).oper[0]^.reg;
- if
- not RegModifiedByInstruction(CurrentReg, hp3) and
- not RegModifiedBetween(CurrentReg, hp3, hp2) and
- DeepMOVOpt(taicpu(p), taicpu(hp2)) then
- begin
- Result := True;
- { Just in case something didn't get modified (e.g. an
- implicit register). Also, if it does read from this
- register, then there's no longer an advantage to
- changing the register on subsequent instructions.}
- if not RegReadByInstruction(ActiveReg, hp2) then
- begin
- { If a conditional jump was crossed, do not delete
- the original MOV no matter what }
- if not CrossJump and
- { RegEndOfLife returns True if the register is
- deallocated before the next instruction or has
- been loaded with a new value }
- RegEndOfLife(ActiveReg, taicpu(hp2)) then
- begin
- { We can remove the original MOV }
- DebugMsg(SPeepholeOptimization + 'Mov2Nop 3b done',p);
- RemoveCurrentp(p, hp1);
- Exit;
- end;
- if not RegModifiedByInstruction(ActiveReg, hp2) then
- begin
- { See if there's more we can optimise }
- hp3 := hp2;
- Continue;
- end;
- end;
- end;
- end;
- { Break out of the while loop under normal circumstances }
- Break;
- end;
- end;
- if (aoc_MovAnd2Mov_3 in OptsToCheck) and
- (taicpu(p).oper[1]^.typ = top_reg) and
- (taicpu(p).opsize = S_L) and
- GetNextInstructionUsingRegTrackingUse(p,hp2,taicpu(p).oper[1]^.reg) and
- (taicpu(hp2).opcode = A_AND) and
- (MatchOpType(taicpu(hp2),top_const,top_reg) or
- (MatchOpType(taicpu(hp2),top_reg,top_reg) and
- MatchOperand(taicpu(hp2).oper[0]^,taicpu(hp2).oper[1]^))
- ) then
- begin
- if SuperRegistersEqual(taicpu(p).oper[1]^.reg,taicpu(hp2).oper[1]^.reg) then
- begin
- if ((taicpu(hp2).oper[0]^.typ=top_const) and (taicpu(hp2).oper[0]^.val = $ffffffff)) or
- ((taicpu(hp2).oper[0]^.typ=top_reg) and (taicpu(hp2).opsize=S_L)) then
- begin
- { Optimize out:
- mov x, %reg
- and ffffffffh, %reg
- }
- DebugMsg(SPeepholeOptimization + 'MovAnd2Mov 3 done',p);
- RemoveInstruction(hp2);
- Result:=true;
- exit;
- end;
- end;
- end;
- { leave out the mov from "mov reg, x(%frame_pointer); leave/ret" (with
- x >= RetOffset) as it doesn't do anything (it writes either to a
- parameter or to the temporary storage room for the function
- result)
- }
- if IsExitCode(hp1) and
- (taicpu(p).oper[1]^.typ = top_ref) and
- (taicpu(p).oper[1]^.ref^.index = NR_NO) and
- (
- (
- (taicpu(p).oper[1]^.ref^.base = current_procinfo.FramePointer) and
- not (
- assigned(current_procinfo.procdef.funcretsym) and
- (taicpu(p).oper[1]^.ref^.offset <= tabstractnormalvarsym(current_procinfo.procdef.funcretsym).localloc.reference.offset)
- )
- ) or
- { Also discard writes to the stack that are below the base pointer,
- as this is temporary storage rather than a function result on the
- stack, say. }
- (
- (taicpu(p).oper[1]^.ref^.base = NR_STACK_POINTER_REG) and
- (taicpu(p).oper[1]^.ref^.offset < current_procinfo.final_localsize)
- )
- ) then
- begin
- RemoveCurrentp(p, hp1);
- DebugMsg(SPeepholeOptimization + 'removed deadstore before leave/ret',p);
- RemoveLastDeallocForFuncRes(p);
- Result:=true;
- exit;
- end;
- if MatchInstruction(hp1,A_CMP,A_TEST,[taicpu(p).opsize]) then
- begin
- if MatchOpType(taicpu(p),top_reg,top_ref) and
- (taicpu(hp1).oper[1]^.typ = top_ref) and
- RefsEqual(taicpu(p).oper[1]^.ref^, taicpu(hp1).oper[1]^.ref^) then
- begin
- { change
- mov reg1, mem1
- test/cmp x, mem1
- to
- mov reg1, mem1
- test/cmp x, reg1
- }
- taicpu(hp1).loadreg(1,taicpu(p).oper[0]^.reg);
- DebugMsg(SPeepholeOptimization + 'MovTestCmp2MovTestCmp 1',hp1);
- AllocRegBetween(taicpu(p).oper[0]^.reg,p,hp1,usedregs);
- Result := True;
- Exit;
- end;
- if DoMovCmpMemOpt(p, hp1, True) then
- begin
- Result := True;
- Exit;
- end;
- end;
- if MatchInstruction(hp1,A_LEA,[S_L{$ifdef x86_64},S_Q{$endif x86_64}]) and
- { If the flags register is in use, don't change the instruction to an
- ADD otherwise this will scramble the flags. [Kit] }
- not RegInUsedRegs(NR_DEFAULTFLAGS, UsedRegs) then
- begin
- if MatchOpType(Taicpu(p),top_ref,top_reg) and
- ((MatchReference(Taicpu(hp1).oper[0]^.ref^,Taicpu(hp1).oper[1]^.reg,Taicpu(p).oper[1]^.reg) and
- (Taicpu(hp1).oper[0]^.ref^.base<>Taicpu(p).oper[1]^.reg)
- ) or
- (MatchReference(Taicpu(hp1).oper[0]^.ref^,Taicpu(p).oper[1]^.reg,Taicpu(hp1).oper[1]^.reg) and
- (Taicpu(hp1).oper[0]^.ref^.index<>Taicpu(p).oper[1]^.reg)
- )
- ) then
- { mov reg1,ref
- lea reg2,[reg1,reg2]
- to
- add reg2,ref}
- begin
- TransferUsedRegs(TmpUsedRegs);
- { reg1 may not be used afterwards }
- if not(RegUsedAfterInstruction(taicpu(p).oper[1]^.reg, hp1, TmpUsedRegs)) then
- begin
- Taicpu(hp1).opcode:=A_ADD;
- Taicpu(hp1).oper[0]^.ref^:=Taicpu(p).oper[0]^.ref^;
- DebugMsg(SPeepholeOptimization + 'MovLea2Add done',hp1);
- RemoveCurrentp(p, hp1);
- result:=true;
- exit;
- end;
- end;
- { If the LEA instruction can be converted into an arithmetic instruction,
- it may be possible to then fold it in the next optimisation, otherwise
- there's nothing more that can be optimised here. }
- if not ConvertLEA(taicpu(hp1)) then
- Exit;
- end;
- if (taicpu(p).oper[1]^.typ = top_reg) and
- (hp1.typ = ait_instruction) and
- GetNextInstruction(hp1, hp2) and
- MatchInstruction(hp2,A_MOV,[]) and
- (SuperRegistersEqual(taicpu(hp2).oper[0]^.reg,taicpu(p).oper[1]^.reg)) and
- (topsize2memsize[taicpu(hp1).opsize]>=topsize2memsize[taicpu(hp2).opsize]) and
- (
- IsFoldableArithOp(taicpu(hp1), taicpu(p).oper[1]^.reg)
- {$ifdef x86_64}
- or
- (
- (taicpu(p).opsize=S_L) and (taicpu(hp1).opsize=S_Q) and (taicpu(hp2).opsize=S_L) and
- IsFoldableArithOp(taicpu(hp1), newreg(R_INTREGISTER,getsupreg(taicpu(p).oper[1]^.reg),R_SUBQ))
- )
- {$endif x86_64}
- ) then
- begin
- if OpsEqual(taicpu(hp2).oper[1]^, taicpu(p).oper[0]^) and
- (taicpu(hp2).oper[0]^.typ=top_reg) then
- { change movsX/movzX reg/ref, reg2
- add/sub/or/... reg3/$const, reg2
- mov reg2 reg/ref
- dealloc reg2
- to
- add/sub/or/... reg3/$const, reg/ref }
- begin
- TransferUsedRegs(TmpUsedRegs);
- UpdateUsedRegs(TmpUsedRegs, tai(p.next));
- UpdateUsedRegs(TmpUsedRegs, tai(hp1.next));
- If not(RegUsedAfterInstruction(taicpu(p).oper[1]^.reg,hp2,TmpUsedRegs)) then
- begin
- { by example:
- movswl %si,%eax movswl %si,%eax p
- decl %eax addl %edx,%eax hp1
- movw %ax,%si movw %ax,%si hp2
- ->
- movswl %si,%eax movswl %si,%eax p
- decw %eax addw %edx,%eax hp1
- movw %ax,%si movw %ax,%si hp2
- }
- DebugMsg(SPeepholeOptimization + 'MovOpMov2Op ('+
- debug_op2str(taicpu(p).opcode)+debug_opsize2str(taicpu(p).opsize)+' '+
- debug_op2str(taicpu(hp1).opcode)+debug_opsize2str(taicpu(hp1).opsize)+' '+
- debug_op2str(taicpu(hp2).opcode)+debug_opsize2str(taicpu(hp2).opsize)+')',p);
- taicpu(hp1).changeopsize(taicpu(hp2).opsize);
- {
- ->
- movswl %si,%eax movswl %si,%eax p
- decw %si addw %dx,%si hp1
- movw %ax,%si movw %ax,%si hp2
- }
- case taicpu(hp1).ops of
- 1:
- begin
- taicpu(hp1).loadoper(0, taicpu(hp2).oper[1]^);
- if taicpu(hp1).oper[0]^.typ=top_reg then
- setsubreg(taicpu(hp1).oper[0]^.reg,getsubreg(taicpu(hp2).oper[0]^.reg));
- end;
- 2:
- begin
- taicpu(hp1).loadoper(1, taicpu(hp2).oper[1]^);
- if (taicpu(hp1).oper[0]^.typ=top_reg) and
- (taicpu(hp1).opcode<>A_SHL) and
- (taicpu(hp1).opcode<>A_SHR) and
- (taicpu(hp1).opcode<>A_SAR) then
- setsubreg(taicpu(hp1).oper[0]^.reg,getsubreg(taicpu(hp2).oper[0]^.reg));
- end;
- else
- internalerror(2008042701);
- end;
- {
- ->
- decw %si addw %dx,%si p
- }
- RemoveInstruction(hp2);
- RemoveCurrentP(p, hp1);
- Result:=True;
- Exit;
- end;
- end;
- if MatchOpType(taicpu(hp2),top_reg,top_reg) and
- not(SuperRegistersEqual(taicpu(hp1).oper[0]^.reg,taicpu(hp2).oper[1]^.reg)) and
- ((topsize2memsize[taicpu(hp1).opsize]<= topsize2memsize[taicpu(hp2).opsize]) or
- { opsize matters for these opcodes, we could probably work around this, but it is not worth the effort }
- ((taicpu(hp1).opcode<>A_SHL) and (taicpu(hp1).opcode<>A_SHR) and (taicpu(hp1).opcode<>A_SAR))
- )
- {$ifdef i386}
- { byte registers of esi, edi, ebp, esp are not available on i386 }
- and ((taicpu(hp2).opsize<>S_B) or not(getsupreg(taicpu(hp1).oper[0]^.reg) in [RS_ESI,RS_EDI,RS_EBP,RS_ESP]))
- and ((taicpu(hp2).opsize<>S_B) or not(getsupreg(taicpu(p).oper[0]^.reg) in [RS_ESI,RS_EDI,RS_EBP,RS_ESP]))
- {$endif i386}
- then
- { change movsX/movzX reg/ref, reg2
- add/sub/or/... regX/$const, reg2
- mov reg2, reg3
- dealloc reg2
- to
- movsX/movzX reg/ref, reg3
- add/sub/or/... reg3/$const, reg3
- }
- begin
- TransferUsedRegs(TmpUsedRegs);
- UpdateUsedRegs(TmpUsedRegs, tai(p.next));
- UpdateUsedRegs(TmpUsedRegs, tai(hp1.next));
- If not(RegUsedAfterInstruction(taicpu(p).oper[1]^.reg,hp2,TmpUsedRegs)) then
- begin
- { by example:
- movswl %si,%eax movswl %si,%eax p
- decl %eax addl %edx,%eax hp1
- movw %ax,%si movw %ax,%si hp2
- ->
- movswl %si,%eax movswl %si,%eax p
- decw %eax addw %edx,%eax hp1
- movw %ax,%si movw %ax,%si hp2
- }
- DebugMsg(SPeepholeOptimization + 'MovOpMov2MovOp ('+
- debug_op2str(taicpu(p).opcode)+debug_opsize2str(taicpu(p).opsize)+' '+
- debug_op2str(taicpu(hp1).opcode)+debug_opsize2str(taicpu(hp1).opsize)+' '+
- debug_op2str(taicpu(hp2).opcode)+debug_opsize2str(taicpu(hp2).opsize)+')',p);
- { limit size of constants as well to avoid assembler errors, but
- check opsize to avoid overflow when left shifting the 1 }
- if (taicpu(p).oper[0]^.typ=top_const) and (topsize2memsize[taicpu(hp2).opsize]<=63) then
- taicpu(p).oper[0]^.val:=taicpu(p).oper[0]^.val and ((qword(1) shl topsize2memsize[taicpu(hp2).opsize])-1);
- {$ifdef x86_64}
- { Be careful of, for example:
- movl %reg1,%reg2
- addl %reg3,%reg2
- movq %reg2,%reg4
- This will cause problems if the upper 32-bits of %reg3 or %reg4 are non-zero
- }
- if (taicpu(hp1).opsize = S_L) and (taicpu(hp2).opsize = S_Q) then
- begin
- taicpu(hp2).changeopsize(S_L);
- setsubreg(taicpu(hp2).oper[0]^.reg, R_SUBD);
- setsubreg(taicpu(hp2).oper[1]^.reg, R_SUBD);
- end;
- {$endif x86_64}
- taicpu(hp1).changeopsize(taicpu(hp2).opsize);
- taicpu(p).changeopsize(taicpu(hp2).opsize);
- if taicpu(p).oper[0]^.typ=top_reg then
- setsubreg(taicpu(p).oper[0]^.reg,getsubreg(taicpu(hp2).oper[0]^.reg));
- taicpu(p).loadoper(1, taicpu(hp2).oper[1]^);
- AllocRegBetween(taicpu(p).oper[1]^.reg,p,hp1,usedregs);
- {
- ->
- movswl %si,%eax movswl %si,%eax p
- decw %si addw %dx,%si hp1
- movw %ax,%si movw %ax,%si hp2
- }
- case taicpu(hp1).ops of
- 1:
- begin
- taicpu(hp1).loadoper(0, taicpu(hp2).oper[1]^);
- if taicpu(hp1).oper[0]^.typ=top_reg then
- setsubreg(taicpu(hp1).oper[0]^.reg,getsubreg(taicpu(hp2).oper[0]^.reg));
- end;
- 2:
- begin
- taicpu(hp1).loadoper(1, taicpu(hp2).oper[1]^);
- if (taicpu(hp1).oper[0]^.typ=top_reg) and
- (taicpu(hp1).opcode<>A_SHL) and
- (taicpu(hp1).opcode<>A_SHR) and
- (taicpu(hp1).opcode<>A_SAR) then
- setsubreg(taicpu(hp1).oper[0]^.reg,getsubreg(taicpu(hp2).oper[0]^.reg));
- end;
- else
- internalerror(2018111801);
- end;
- {
- ->
- decw %si addw %dx,%si p
- }
- RemoveInstruction(hp2);
- end;
- end;
- end;
- if MatchInstruction(hp1,A_BTS,A_BTR,[Taicpu(p).opsize]) and
- GetNextInstruction(hp1, hp2) and
- MatchInstruction(hp2,A_OR,[Taicpu(p).opsize]) and
- MatchOperand(Taicpu(p).oper[0]^,0) and
- (Taicpu(p).oper[1]^.typ = top_reg) and
- MatchOperand(Taicpu(p).oper[1]^,Taicpu(hp1).oper[1]^) and
- MatchOperand(Taicpu(p).oper[1]^,Taicpu(hp2).oper[1]^) then
- { mov reg1,0
- bts reg1,operand1 --> mov reg1,operand2
- or reg1,operand2 bts reg1,operand1}
- begin
- Taicpu(hp2).opcode:=A_MOV;
- DebugMsg(SPeepholeOptimization + 'MovBtsOr2MovBts done',hp1);
- asml.remove(hp1);
- insertllitem(hp2,hp2.next,hp1);
- RemoveCurrentp(p, hp1);
- Result:=true;
- exit;
- end;
- {
- mov ref,reg0
- <op> reg0,reg1
- dealloc reg0
- to
- <op> ref,reg1
- }
- if MatchOpType(taicpu(p),top_ref,top_reg) and
- MatchOpType(taicpu(hp1),top_reg,top_reg) and
- MatchOperand(taicpu(p).oper[1]^,taicpu(hp1).oper[0]^) and
- MatchInstruction(hp1,[A_AND,A_OR,A_XOR,A_ADD,A_SUB,A_CMP],[Taicpu(p).opsize]) and
- not(MatchOperand(taicpu(hp1).oper[0]^,taicpu(hp1).oper[1]^)) and
- RegEndOfLife(taicpu(p).oper[1]^.reg,taicpu(hp1)) then
- begin
- taicpu(hp1).loadoper(0,taicpu(p).oper[0]^);
- DebugMsg(SPeepholeOptimization + 'MovOp2Op done',hp1);
- RemoveCurrentp(p, hp1);
- Result:=true;
- exit;
- end;
- {$ifdef x86_64}
- { Convert:
- movq x(ref),%reg64
- shrq y,%reg64
- To:
- movl x+4(ref),%reg32
- shrl y-32,%reg32 (Remove if y = 32)
- }
- if (taicpu(p).opsize = S_Q) and
- (taicpu(p).oper[0]^.typ = top_ref) and { Second operand will be a register }
- (taicpu(p).oper[0]^.ref^.offset <= $7FFFFFFB) and
- MatchInstruction(hp1, A_SHR, [taicpu(p).opsize]) and
- MatchOpType(taicpu(hp1), top_const, top_reg) and
- (taicpu(hp1).oper[0]^.val >= 32) and
- (taicpu(hp1).oper[1]^.reg = taicpu(p).oper[1]^.reg) then
- begin
- RegName1 := debug_regname(taicpu(hp1).oper[1]^.reg);
- PreMessage := 'movq ' + debug_operstr(taicpu(p).oper[0]^) + ',' + RegName1 + '; ' +
- 'shrq $' + debug_tostr(taicpu(hp1).oper[0]^.val) + ',' + RegName1 + ' -> movl ';
- { Convert to 32-bit }
- setsubreg(taicpu(p).oper[1]^.reg, R_SUBD);
- taicpu(p).opsize := S_L;
- Inc(taicpu(p).oper[0]^.ref^.offset, 4);
- PreMessage := PreMessage + debug_operstr(taicpu(p).oper[0]^) + ',' + debug_regname(taicpu(p).oper[1]^.reg);
- if (taicpu(hp1).oper[0]^.val = 32) then
- begin
- DebugMsg(SPeepholeOptimization + PreMessage + ' (MovShr2Mov)', p);
- RemoveInstruction(hp1);
- end
- else
- begin
- { This will potentially open up more arithmetic operations since
- the peephole optimizer now has a big hint that only the lower
- 32 bits are currently in use (and opcodes are smaller in size) }
- setsubreg(taicpu(hp1).oper[1]^.reg, R_SUBD);
- taicpu(hp1).opsize := S_L;
- Dec(taicpu(hp1).oper[0]^.val, 32);
- DebugMsg(SPeepholeOptimization + PreMessage +
- '; shrl $' + debug_tostr(taicpu(hp1).oper[0]^.val) + ',' + debug_regname(taicpu(hp1).oper[1]^.reg) + ' (MovShr2MovShr)', p);
- end;
- Result := True;
- Exit;
- end;
- {$endif x86_64}
- { Backward optimisation. If we have:
- func. %reg1,%reg2
- mov %reg2,%reg3
- (dealloc %reg2)
- Change to:
- func. %reg1,%reg3 (see comment below for what a valid func. is)
- }
- if MatchOpType(taicpu(p), top_reg, top_reg) then
- begin
- CurrentReg := taicpu(p).oper[0]^.reg;
- ActiveReg := taicpu(p).oper[1]^.reg;
- TransferUsedRegs(TmpUsedRegs);
- if not RegUsedAfterInstruction(CurrentReg, p, TmpUsedRegs) and
- GetLastInstruction(p, hp2) and
- (hp2.typ = ait_instruction) and
- { Have to make sure it's an instruction that only reads from
- operand 1 and only writes (not reads or modifies) from operand 2;
- in essence, a one-operand pure function such as BSR or POPCNT }
- (taicpu(hp2).ops = 2) and
- (insprop[taicpu(hp2).opcode].Ch * [Ch_Rop1, Ch_Wop2] = [Ch_Rop1, Ch_Wop2]) and
- (taicpu(hp2).oper[1]^.typ = top_reg) and
- (taicpu(hp2).oper[1]^.reg = CurrentReg) then
- begin
- case taicpu(hp2).opcode of
- A_FSTSW, A_FNSTSW,
- A_IN, A_INS, A_OUT, A_OUTS,
- A_CMPS, A_LODS, A_MOVS, A_SCAS, A_STOS,
- { These routines have explicit operands, but they are restricted in
- what they can be (e.g. IN and OUT can only read from AL, AX or
- EAX. }
- A_CMOVcc:
- { CMOV is not valid either because then CurrentReg will depend
- on an unknown value if the condition is False and hence is
- not a pure write }
- ;
- else
- begin
- DebugMsg(SPeepholeOptimization + 'Removed MOV and changed destination on previous instruction to optimise register usage (FuncMov2Func)', p);
- taicpu(hp2).oper[1]^.reg := ActiveReg;
- AllocRegBetween(ActiveReg, hp2, p, TmpUsedRegs);
- RemoveCurrentp(p, hp1);
- Result := True;
- Exit;
- end;
- end;
- end;
- end;
- end;
- function TX86AsmOptimizer.OptPass1MOVXX(var p : tai) : boolean;
- var
- hp1 : tai;
- begin
- Result:=false;
- if taicpu(p).ops <> 2 then
- exit;
- if (MatchOpType(taicpu(p),top_reg,top_reg) and GetNextInstructionUsingReg(p,hp1,taicpu(p).oper[1]^.reg)) or
- GetNextInstruction(p,hp1) then
- begin
- if MatchInstruction(hp1,taicpu(p).opcode,[taicpu(p).opsize]) and
- (taicpu(hp1).ops = 2) then
- begin
- if (taicpu(hp1).oper[0]^.typ = taicpu(p).oper[1]^.typ) and
- (taicpu(hp1).oper[1]^.typ = taicpu(p).oper[0]^.typ) then
- { movXX reg1, mem1 or movXX mem1, reg1
- movXX mem2, reg2 movXX reg2, mem2}
- begin
- if OpsEqual(taicpu(hp1).oper[1]^,taicpu(p).oper[0]^) then
- { movXX reg1, mem1 or movXX mem1, reg1
- movXX mem2, reg1 movXX reg2, mem1}
- begin
- if OpsEqual(taicpu(hp1).oper[0]^,taicpu(p).oper[1]^) then
- begin
- { Removes the second statement from
- movXX reg1, mem1/reg2
- movXX mem1/reg2, reg1
- }
- if taicpu(p).oper[0]^.typ=top_reg then
- AllocRegBetween(taicpu(p).oper[0]^.reg,p,hp1,usedregs);
- { Removes the second statement from
- movXX mem1/reg1, reg2
- movXX reg2, mem1/reg1
- }
- if (taicpu(p).oper[1]^.typ=top_reg) and
- not(RegUsedAfterInstruction(taicpu(p).oper[1]^.reg,hp1,UsedRegs)) then
- begin
- DebugMsg(SPeepholeOptimization + 'MovXXMovXX2Nop 1 done',p);
- RemoveInstruction(hp1);
- RemoveCurrentp(p); { p will now be equal to the instruction that follows what was hp1 }
- Result:=true;
- exit;
- end
- else if (taicpu(hp1).oper[1]^.typ<>top_ref) or (not(vol_write in taicpu(hp1).oper[1]^.ref^.volatility)) and
- (taicpu(hp1).oper[0]^.typ<>top_ref) or (not(vol_read in taicpu(hp1).oper[0]^.ref^.volatility)) then
- begin
- DebugMsg(SPeepholeOptimization + 'MovXXMovXX2MoVXX 1 done',p);
- RemoveInstruction(hp1);
- Result:=true;
- exit;
- end;
- end
- end;
- end;
- end;
- end;
- end;
- function TX86AsmOptimizer.OptPass1OP(var p : tai) : boolean;
- var
- hp1 : tai;
- begin
- result:=false;
- { replace
- <Op>X %mreg1,%mreg2 // Op in [ADD,MUL]
- MovX %mreg2,%mreg1
- dealloc %mreg2
- by
- <Op>X %mreg2,%mreg1
- ?
- }
- if GetNextInstruction(p,hp1) and
- { we mix single and double opperations here because we assume that the compiler
- generates vmovapd only after double operations and vmovaps only after single operations }
- MatchInstruction(hp1,A_MOVAPD,A_MOVAPS,[S_NO]) and
- MatchOperand(taicpu(p).oper[1]^,taicpu(hp1).oper[0]^) and
- MatchOperand(taicpu(p).oper[0]^,taicpu(hp1).oper[1]^) and
- (taicpu(p).oper[0]^.typ=top_reg) then
- begin
- TransferUsedRegs(TmpUsedRegs);
- UpdateUsedRegs(TmpUsedRegs, tai(p.next));
- if not(RegUsedAfterInstruction(taicpu(p).oper[1]^.reg,hp1,TmpUsedRegs)) then
- begin
- taicpu(p).loadoper(0,taicpu(hp1).oper[0]^);
- taicpu(p).loadoper(1,taicpu(hp1).oper[1]^);
- DebugMsg(SPeepholeOptimization + 'OpMov2Op done',p);
- RemoveInstruction(hp1);
- result:=true;
- end;
- end;
- end;
- function TX86AsmOptimizer.OptPass1Test(var p: tai) : boolean;
- var
- hp1, p_label, p_dist, hp1_dist: tai;
- JumpLabel, JumpLabel_dist: TAsmLabel;
- FirstValue, SecondValue: TCGInt;
- begin
- Result := False;
- if (taicpu(p).oper[0]^.typ = top_const) and
- (taicpu(p).oper[0]^.val <> -1) then
- begin
- { Convert unsigned maximum constants to -1 to aid optimisation }
- case taicpu(p).opsize of
- S_B:
- if (taicpu(p).oper[0]^.val and $FF) = $FF then
- begin
- taicpu(p).oper[0]^.val := -1;
- Result := True;
- Exit;
- end;
- S_W:
- if (taicpu(p).oper[0]^.val and $FFFF) = $FFFF then
- begin
- taicpu(p).oper[0]^.val := -1;
- Result := True;
- Exit;
- end;
- S_L:
- if (taicpu(p).oper[0]^.val and $FFFFFFFF) = $FFFFFFFF then
- begin
- taicpu(p).oper[0]^.val := -1;
- Result := True;
- Exit;
- end;
- {$ifdef x86_64}
- S_Q:
- { Storing anything greater than $7FFFFFFF is not possible so do
- nothing };
- {$endif x86_64}
- else
- InternalError(2021121001);
- end;
- end;
- if GetNextInstruction(p, hp1) and
- TrySwapMovCmp(p, hp1) then
- begin
- Result := True;
- Exit;
- end;
- { Search for:
- test $x,(reg/ref)
- jne @lbl1
- test $y,(reg/ref) (same register or reference)
- jne @lbl1
- Change to:
- test $(x or y),(reg/ref)
- jne @lbl1
- (Note, this doesn't work with je instead of jne)
- Also catch cases where "cmp $0,(reg/ref)" and "test %reg,%reg" are used.
- Also search for:
- test $x,(reg/ref)
- je @lbl1
- test $y,(reg/ref)
- je/jne @lbl2
- If (x or y) = x, then the second jump is deterministic
- }
- if (
- (
- (taicpu(p).oper[0]^.typ = top_const) or
- (
- { test %reg,%reg can be considered equivalent to test, -1,%reg }
- (taicpu(p).oper[0]^.typ = top_reg) and
- MatchOperand(taicpu(p).oper[1]^, taicpu(p).oper[0]^.reg)
- )
- ) and
- MatchInstruction(hp1, A_JCC, [])
- ) then
- begin
- if (taicpu(p).oper[0]^.typ = top_reg) and
- MatchOperand(taicpu(p).oper[1]^, taicpu(p).oper[0]^.reg) then
- FirstValue := -1
- else
- FirstValue := taicpu(p).oper[0]^.val;
- { If we have several test/jne's in a row, it might be the case that
- the second label doesn't go to the same location, but the one
- after it might (e.g. test; jne @lbl1; test; jne @lbl2; test @lbl1),
- so accommodate for this with a while loop.
- }
- hp1_dist := hp1;
- if GetNextInstruction(hp1, p_dist) and
- (p_dist.typ = ait_instruction) and
- (
- (
- (taicpu(p_dist).opcode = A_TEST) and
- (
- (taicpu(p_dist).oper[0]^.typ = top_const) or
- { test %reg,%reg can be considered equivalent to test, -1,%reg }
- MatchOperand(taicpu(p_dist).oper[1]^, taicpu(p_dist).oper[0]^)
- )
- ) or
- (
- { cmp 0,%reg = test %reg,%reg }
- (taicpu(p_dist).opcode = A_CMP) and
- MatchOperand(taicpu(p_dist).oper[0]^, 0)
- )
- ) and
- { Make sure the destination operands are actually the same }
- MatchOperand(taicpu(p_dist).oper[1]^, taicpu(p).oper[1]^) and
- GetNextInstruction(p_dist, hp1_dist) and
- MatchInstruction(hp1_dist, A_JCC, []) then
- begin
- if
- (taicpu(p_dist).opcode = A_CMP) { constant will be zero } or
- (
- (taicpu(p_dist).oper[0]^.typ = top_reg) and
- MatchOperand(taicpu(p_dist).oper[1]^, taicpu(p_dist).oper[0]^.reg)
- ) then
- SecondValue := -1
- else
- SecondValue := taicpu(p_dist).oper[0]^.val;
- { If both of the TEST constants are identical, delete the second
- TEST that is unnecessary. }
- if (FirstValue = SecondValue) then
- begin
- DebugMsg(SPeepholeOptimization + 'TEST/Jcc/TEST; removed superfluous TEST', p_dist);
- RemoveInstruction(p_dist);
- { Don't let the flags register become deallocated and reallocated between the jumps }
- AllocRegBetween(NR_DEFAULTFLAGS, hp1, hp1_dist, UsedRegs);
- Result := True;
- if condition_in(taicpu(hp1_dist).condition, taicpu(hp1).condition) then
- begin
- { Since the second jump's condition is a subset of the first, we
- know it will never branch because the first jump dominates it.
- Get it out of the way now rather than wait for the jump
- optimisations for a speed boost. }
- if IsJumpToLabel(taicpu(hp1_dist)) then
- TAsmLabel(taicpu(hp1_dist).oper[0]^.ref^.symbol).DecRefs;
- DebugMsg(SPeepholeOptimization + 'Removed dominated jump (via TEST/Jcc/TEST)', hp1_dist);
- RemoveInstruction(hp1_dist);
- end
- else if condition_in(inverse_cond(taicpu(hp1).condition), taicpu(hp1_dist).condition) then
- begin
- { If the inverse of the first condition is a subset of the second,
- the second one will definitely branch if the first one doesn't }
- DebugMsg(SPeepholeOptimization + 'Conditional jump will always branch (via TEST/Jcc/TEST)', hp1_dist);
- MakeUnconditional(taicpu(hp1_dist));
- RemoveDeadCodeAfterJump(hp1_dist);
- end;
- Exit;
- end;
- if (taicpu(hp1).condition in [C_NE, C_NZ]) and
- (taicpu(hp1_dist).condition in [C_NE, C_NZ]) and
- { If the first instruction is test %reg,%reg or test $-1,%reg,
- then the second jump will never branch, so it can also be
- removed regardless of where it goes }
- (
- (FirstValue = -1) or
- (SecondValue = -1) or
- MatchOperand(taicpu(hp1_dist).oper[0]^, taicpu(hp1).oper[0]^)
- ) then
- begin
- { Same jump location... can be a register since nothing's changed }
- { If any of the entries are equivalent to test %reg,%reg, then the
- merged $(x or y) is also test %reg,%reg / test $-1,%reg }
- taicpu(p).loadconst(0, FirstValue or SecondValue);
- if IsJumpToLabel(taicpu(hp1_dist)) then
- TAsmLabel(taicpu(hp1_dist).oper[0]^.ref^.symbol).DecRefs;
- DebugMsg(SPeepholeOptimization + 'TEST/JNE/TEST/JNE merged', p);
- RemoveInstruction(hp1_dist);
- { Only remove the second test if no jumps or other conditional instructions follow }
- TransferUsedRegs(TmpUsedRegs);
- UpdateUsedRegs(TmpUsedRegs, tai(p.Next));
- UpdateUsedRegs(TmpUsedRegs, tai(hp1.Next));
- if not RegUsedAfterInstruction(NR_DEFAULTFLAGS, p_dist, TmpUsedRegs) then
- RemoveInstruction(p_dist);
- Result := True;
- Exit;
- end;
- end;
- end;
- { Search for:
- test %reg,%reg
- j(c1) @lbl1
- ...
- @lbl:
- test %reg,%reg (same register)
- j(c2) @lbl2
- If c2 is a subset of c1, change to:
- test %reg,%reg
- j(c1) @lbl2
- (@lbl1 may become a dead label as a result)
- }
- if (taicpu(p).oper[1]^.typ = top_reg) and
- (taicpu(p).oper[0]^.typ = top_reg) and
- (taicpu(p).oper[0]^.reg = taicpu(p).oper[1]^.reg) and
- MatchInstruction(hp1, A_JCC, []) and
- IsJumpToLabel(taicpu(hp1)) then
- begin
- JumpLabel := TAsmLabel(taicpu(hp1).oper[0]^.ref^.symbol);
- p_label := nil;
- if Assigned(JumpLabel) then
- p_label := getlabelwithsym(JumpLabel);
- if Assigned(p_label) and
- GetNextInstruction(p_label, p_dist) and
- MatchInstruction(p_dist, A_TEST, []) and
- { It's fine if the second test uses smaller sub-registers }
- (taicpu(p_dist).opsize <= taicpu(p).opsize) and
- MatchOpType(taicpu(p_dist), top_reg, top_reg) and
- SuperRegistersEqual(taicpu(p_dist).oper[0]^.reg, taicpu(p).oper[0]^.reg) and
- SuperRegistersEqual(taicpu(p_dist).oper[1]^.reg, taicpu(p).oper[1]^.reg) and
- GetNextInstruction(p_dist, hp1_dist) and
- MatchInstruction(hp1_dist, A_JCC, []) then { This doesn't have to be an explicit label }
- begin
- JumpLabel_dist := TAsmLabel(taicpu(hp1_dist).oper[0]^.ref^.symbol);
- if JumpLabel = JumpLabel_dist then
- { This is an infinite loop }
- Exit;
- { Best optimisation when the first condition is a subset (or equal) of the second }
- if condition_in(taicpu(hp1).condition, taicpu(hp1_dist).condition) then
- begin
- { Any registers used here will already be allocated }
- if Assigned(JumpLabel_dist) then
- JumpLabel_dist.IncRefs;
- if Assigned(JumpLabel) then
- JumpLabel.DecRefs;
- DebugMsg(SPeepholeOptimization + 'TEST/Jcc/@Lbl/TEST/Jcc -> TEST/Jcc, redirecting first jump', hp1);
- taicpu(hp1).loadref(0, taicpu(hp1_dist).oper[0]^.ref^);
- Result := True;
- Exit;
- end;
- end;
- end;
- end;
- function TX86AsmOptimizer.OptPass1Add(var p : tai) : boolean;
- var
- hp1, hp2: tai;
- ActiveReg: TRegister;
- OldOffset: asizeint;
- ThisConst: TCGInt;
- function RegDeallocated: Boolean;
- begin
- TransferUsedRegs(TmpUsedRegs);
- UpdateUsedRegs(TmpUsedRegs, tai(p.next));
- Result := not(RegUsedAfterInstruction(ActiveReg,hp1,TmpUsedRegs))
- end;
- begin
- result:=false;
- hp1 := nil;
- { replace
- addX const,%reg1
- leaX (%reg1,%reg1,Y),%reg2 // Base or index might not be equal to reg1
- dealloc %reg1
- by
- leaX const+const*Y(%reg1,%reg1,Y),%reg2
- }
- if MatchOpType(taicpu(p),top_const,top_reg) then
- begin
- ActiveReg := taicpu(p).oper[1]^.reg;
- { Ensures the entire register was updated }
- if (taicpu(p).opsize >= S_L) and
- GetNextInstructionUsingReg(p,hp1, ActiveReg) and
- MatchInstruction(hp1,A_LEA,[]) and
- (SuperRegistersEqual(ActiveReg, taicpu(hp1).oper[0]^.ref^.base) or
- SuperRegistersEqual(ActiveReg, taicpu(hp1).oper[0]^.ref^.index)) and
- (
- { Cover the case where the register in the reference is also the destination register }
- Reg1WriteOverwritesReg2Entirely(taicpu(hp1).oper[1]^.reg, ActiveReg) or
- (
- { Try to avoid the expensive check of RegUsedAfterInstruction if we know it will return False }
- not SuperRegistersEqual(taicpu(hp1).oper[1]^.reg, ActiveReg) and
- RegDeallocated
- )
- ) then
- begin
- OldOffset := taicpu(hp1).oper[0]^.ref^.offset;
- {$push}
- {$R-}{$Q-}
- { Explicitly disable overflow checking for these offset calculation
- as those do not matter for the final result }
- if ActiveReg=taicpu(hp1).oper[0]^.ref^.base then
- inc(taicpu(hp1).oper[0]^.ref^.offset,taicpu(p).oper[0]^.val);
- if ActiveReg=taicpu(hp1).oper[0]^.ref^.index then
- inc(taicpu(hp1).oper[0]^.ref^.offset,taicpu(p).oper[0]^.val*max(taicpu(hp1).oper[0]^.ref^.scalefactor,1));
- {$pop}
- {$ifdef x86_64}
- if (taicpu(hp1).oper[0]^.ref^.offset > $7FFFFFFF) or (taicpu(hp1).oper[0]^.ref^.offset < -2147483648) then
- begin
- { Overflow; abort }
- taicpu(hp1).oper[0]^.ref^.offset := OldOffset;
- end
- else
- {$endif x86_64}
- begin
- DebugMsg(SPeepholeOptimization + 'AddLea2Lea done',p);
- if not (cs_opt_level3 in current_settings.optimizerswitches) then
- { hp1 is the immediate next instruction for sure - good for a quick speed boost }
- RemoveCurrentP(p, hp1)
- else
- RemoveCurrentP(p);
- result:=true;
- Exit;
- end;
- end;
- if (
- { Save calling GetNextInstructionUsingReg again }
- Assigned(hp1) or
- GetNextInstructionUsingReg(p,hp1, ActiveReg)
- ) and
- MatchInstruction(hp1,A_ADD,A_SUB,[taicpu(p).opsize]) and
- (taicpu(hp1).oper[1]^.reg = ActiveReg) then
- begin
- if taicpu(hp1).oper[0]^.typ = top_const then
- begin
- { Merge add const1,%reg; add/sub const2,%reg to add const1+/-const2,%reg }
- if taicpu(hp1).opcode = A_ADD then
- ThisConst := taicpu(p).oper[0]^.val + taicpu(hp1).oper[0]^.val
- else
- ThisConst := taicpu(p).oper[0]^.val - taicpu(hp1).oper[0]^.val;
- Result := True;
- { Handle any overflows }
- case taicpu(p).opsize of
- S_B:
- taicpu(p).oper[0]^.val := ThisConst and $FF;
- S_W:
- taicpu(p).oper[0]^.val := ThisConst and $FFFF;
- S_L:
- taicpu(p).oper[0]^.val := ThisConst and $FFFFFFFF;
- {$ifdef x86_64}
- S_Q:
- if (ThisConst > $7FFFFFFF) or (ThisConst < -2147483648) then
- { Overflow; abort }
- Result := False
- else
- taicpu(p).oper[0]^.val := ThisConst;
- {$endif x86_64}
- else
- InternalError(2021102610);
- end;
- { Result may get set to False again if the combined immediate overflows for S_Q sizes }
- if Result then
- begin
- if (taicpu(p).oper[0]^.val < 0) and
- (
- ((taicpu(p).opsize = S_B) and (taicpu(p).oper[0]^.val <> -128)) or
- ((taicpu(p).opsize = S_W) and (taicpu(p).oper[0]^.val <> -32768)) or
- ((taicpu(p).opsize in [S_L{$ifdef x86_64}, S_Q{$endif x86_64}]) and (taicpu(p).oper[0]^.val <> -2147483648))
- ) then
- begin
- DebugMsg(SPeepholeOptimization + 'ADD; ADD/SUB -> SUB',p);
- taicpu(p).opcode := A_SUB;
- taicpu(p).oper[0]^.val := -taicpu(p).oper[0]^.val;
- end
- else
- DebugMsg(SPeepholeOptimization + 'ADD; ADD/SUB -> ADD',p);
- RemoveInstruction(hp1);
- end;
- end
- else
- begin
- { Make doubly sure the flags aren't in use because the order of additions may affect them }
- TransferUsedRegs(TmpUsedRegs);
- UpdateUsedRegs(TmpUsedRegs, tai(p.next));
- hp2 := p;
- while not (cs_opt_level3 in current_settings.optimizerswitches) and
- GetNextInstruction(hp2, hp2) and (hp2 <> hp1) do
- UpdateUsedRegs(TmpUsedRegs, tai(hp2.next));
- if not RegInUsedRegs(NR_DEFAULTFLAGS, TmpUsedRegs) then
- begin
- { Move the constant addition to after the reg/ref addition to improve optimisation }
- DebugMsg(SPeepholeOptimization + 'Add/sub swap 1a done',p);
- Asml.Remove(p);
- Asml.InsertAfter(p, hp1);
- p := hp1;
- Result := True;
- end;
- end;
- end;
- end;
- end;
- function TX86AsmOptimizer.OptPass1LEA(var p : tai) : boolean;
- var
- hp1: tai;
- ref: Integer;
- saveref: treference;
- TempReg: TRegister;
- Multiple: TCGInt;
- begin
- Result:=false;
- { removes seg register prefixes from LEA operations, as they
- don't do anything}
- taicpu(p).oper[0]^.ref^.Segment:=NR_NO;
- { changes "lea (%reg1), %reg2" into "mov %reg1, %reg2" }
- if (taicpu(p).oper[0]^.ref^.base <> NR_NO) and
- (taicpu(p).oper[0]^.ref^.index = NR_NO) and
- (
- { do not mess with leas accessing the stack pointer
- unless it's a null operation }
- (taicpu(p).oper[1]^.reg <> NR_STACK_POINTER_REG) or
- (
- (taicpu(p).oper[0]^.ref^.base = NR_STACK_POINTER_REG) and
- (taicpu(p).oper[0]^.ref^.offset = 0)
- )
- ) and
- (not(Assigned(taicpu(p).oper[0]^.ref^.Symbol))) then
- begin
- if (taicpu(p).oper[0]^.ref^.offset = 0) then
- begin
- if (taicpu(p).oper[0]^.ref^.base <> taicpu(p).oper[1]^.reg) then
- begin
- hp1:=taicpu.op_reg_reg(A_MOV,taicpu(p).opsize,taicpu(p).oper[0]^.ref^.base,
- taicpu(p).oper[1]^.reg);
- InsertLLItem(p.previous,p.next, hp1);
- DebugMsg(SPeepholeOptimization + 'Lea2Mov done',hp1);
- p.free;
- p:=hp1;
- end
- else
- begin
- DebugMsg(SPeepholeOptimization + 'Lea2Nop done',p);
- RemoveCurrentP(p);
- end;
- Result:=true;
- exit;
- end
- else if (
- { continue to use lea to adjust the stack pointer,
- it is the recommended way, but only if not optimizing for size }
- (taicpu(p).oper[1]^.reg<>NR_STACK_POINTER_REG) or
- (cs_opt_size in current_settings.optimizerswitches)
- ) and
- { If the flags register is in use, don't change the instruction
- to an ADD otherwise this will scramble the flags. [Kit] }
- not RegInUsedRegs(NR_DEFAULTFLAGS, UsedRegs) and
- ConvertLEA(taicpu(p)) then
- begin
- Result:=true;
- exit;
- end;
- end;
- if GetNextInstruction(p,hp1) and
- (hp1.typ=ait_instruction) then
- begin
- if MatchInstruction(hp1,A_MOV,[taicpu(p).opsize]) and
- MatchOperand(taicpu(p).oper[1]^,taicpu(hp1).oper[0]^) and
- MatchOpType(Taicpu(hp1),top_reg,top_reg) and
- (taicpu(p).oper[1]^.reg<>NR_STACK_POINTER_REG) then
- begin
- TransferUsedRegs(TmpUsedRegs);
- UpdateUsedRegs(TmpUsedRegs, tai(p.next));
- if not(RegUsedAfterInstruction(taicpu(p).oper[1]^.reg,hp1,TmpUsedRegs)) then
- begin
- taicpu(p).loadoper(1,taicpu(hp1).oper[1]^);
- DebugMsg(SPeepholeOptimization + 'LeaMov2Lea done',p);
- RemoveInstruction(hp1);
- result:=true;
- exit;
- end;
- end;
- { changes
- lea <ref1>, reg1
- <op> ...,<ref. with reg1>,...
- to
- <op> ...,<ref1>,... }
- if (taicpu(p).oper[1]^.reg<>current_procinfo.framepointer) and
- (taicpu(p).oper[1]^.reg<>NR_STACK_POINTER_REG) and
- not(MatchInstruction(hp1,A_LEA,[])) then
- begin
- { find a reference which uses reg1 }
- if (taicpu(hp1).ops>=1) and (taicpu(hp1).oper[0]^.typ=top_ref) and RegInOp(taicpu(p).oper[1]^.reg,taicpu(hp1).oper[0]^) then
- ref:=0
- else if (taicpu(hp1).ops>=2) and (taicpu(hp1).oper[1]^.typ=top_ref) and RegInOp(taicpu(p).oper[1]^.reg,taicpu(hp1).oper[1]^) then
- ref:=1
- else
- ref:=-1;
- if (ref<>-1) and
- { reg1 must be either the base or the index }
- ((taicpu(hp1).oper[ref]^.ref^.base=taicpu(p).oper[1]^.reg) xor (taicpu(hp1).oper[ref]^.ref^.index=taicpu(p).oper[1]^.reg)) then
- begin
- { reg1 can be removed from the reference }
- saveref:=taicpu(hp1).oper[ref]^.ref^;
- if taicpu(hp1).oper[ref]^.ref^.base=taicpu(p).oper[1]^.reg then
- taicpu(hp1).oper[ref]^.ref^.base:=NR_NO
- else if taicpu(hp1).oper[ref]^.ref^.index=taicpu(p).oper[1]^.reg then
- taicpu(hp1).oper[ref]^.ref^.index:=NR_NO
- else
- Internalerror(2019111201);
- { check if the can insert all data of the lea into the second instruction }
- if ((taicpu(hp1).oper[ref]^.ref^.base=taicpu(p).oper[1]^.reg) or (taicpu(hp1).oper[ref]^.ref^.scalefactor <= 1)) and
- ((taicpu(p).oper[0]^.ref^.base=NR_NO) or (taicpu(hp1).oper[ref]^.ref^.base=NR_NO)) and
- ((taicpu(p).oper[0]^.ref^.index=NR_NO) or (taicpu(hp1).oper[ref]^.ref^.index=NR_NO)) and
- ((taicpu(p).oper[0]^.ref^.symbol=nil) or (taicpu(hp1).oper[ref]^.ref^.symbol=nil)) and
- ((taicpu(p).oper[0]^.ref^.relsymbol=nil) or (taicpu(hp1).oper[ref]^.ref^.relsymbol=nil)) and
- ((taicpu(p).oper[0]^.ref^.scalefactor <= 1) or (taicpu(hp1).oper[ref]^.ref^.scalefactor <= 1)) and
- (taicpu(p).oper[0]^.ref^.segment=NR_NO) and (taicpu(hp1).oper[ref]^.ref^.segment=NR_NO)
- {$ifdef x86_64}
- and (abs(taicpu(hp1).oper[ref]^.ref^.offset+taicpu(p).oper[0]^.ref^.offset)<=$7fffffff)
- and (((taicpu(p).oper[0]^.ref^.base<>NR_RIP) and (taicpu(p).oper[0]^.ref^.index<>NR_RIP)) or
- ((taicpu(hp1).oper[ref]^.ref^.base=NR_NO) and (taicpu(hp1).oper[ref]^.ref^.index=NR_NO))
- )
- {$endif x86_64}
- then
- begin
- { reg1 might not used by the second instruction after it is remove from the reference }
- if not(RegInInstruction(taicpu(p).oper[1]^.reg,taicpu(hp1))) then
- begin
- TransferUsedRegs(TmpUsedRegs);
- UpdateUsedRegs(TmpUsedRegs, tai(p.next));
- { reg1 is not updated so it might not be used afterwards }
- if not(RegUsedAfterInstruction(taicpu(p).oper[1]^.reg,hp1,TmpUsedRegs)) then
- begin
- DebugMsg(SPeepholeOptimization + 'LeaOp2Op done',p);
- if taicpu(p).oper[0]^.ref^.base<>NR_NO then
- taicpu(hp1).oper[ref]^.ref^.base:=taicpu(p).oper[0]^.ref^.base;
- if taicpu(p).oper[0]^.ref^.index<>NR_NO then
- taicpu(hp1).oper[ref]^.ref^.index:=taicpu(p).oper[0]^.ref^.index;
- if taicpu(p).oper[0]^.ref^.symbol<>nil then
- taicpu(hp1).oper[ref]^.ref^.symbol:=taicpu(p).oper[0]^.ref^.symbol;
- if taicpu(p).oper[0]^.ref^.relsymbol<>nil then
- taicpu(hp1).oper[ref]^.ref^.relsymbol:=taicpu(p).oper[0]^.ref^.relsymbol;
- if taicpu(p).oper[0]^.ref^.scalefactor > 1 then
- taicpu(hp1).oper[ref]^.ref^.scalefactor:=taicpu(p).oper[0]^.ref^.scalefactor;
- inc(taicpu(hp1).oper[ref]^.ref^.offset,taicpu(p).oper[0]^.ref^.offset);
- RemoveCurrentP(p, hp1);
- result:=true;
- exit;
- end
- end;
- end;
- { recover }
- taicpu(hp1).oper[ref]^.ref^:=saveref;
- end;
- end;
- end;
- { for now, we do not mess with the stack pointer, thought it might be usefull to remove
- unneeded lea sequences on the stack pointer, it needs to be tested in detail }
- if (taicpu(p).oper[1]^.reg <> NR_STACK_POINTER_REG) and
- GetNextInstructionUsingReg(p,hp1,taicpu(p).oper[1]^.reg) then
- begin
- { Check common LEA/LEA conditions }
- if MatchInstruction(hp1,A_LEA,[taicpu(p).opsize]) and
- (taicpu(p).oper[1]^.reg = taicpu(hp1).oper[1]^.reg) and
- (taicpu(p).oper[0]^.ref^.relsymbol = nil) and
- (taicpu(p).oper[0]^.ref^.segment = NR_NO) and
- (taicpu(p).oper[0]^.ref^.symbol = nil) and
- (taicpu(hp1).oper[0]^.ref^.relsymbol = nil) and
- (taicpu(hp1).oper[0]^.ref^.segment = NR_NO) and
- (taicpu(hp1).oper[0]^.ref^.symbol = nil) and
- (
- (taicpu(p).oper[0]^.ref^.base = NR_NO) or { Don't call RegModifiedBetween unnecessarily }
- not(RegModifiedBetween(taicpu(p).oper[0]^.ref^.base,p,hp1))
- ) and (
- (taicpu(p).oper[0]^.ref^.index = taicpu(p).oper[0]^.ref^.base) or { Don't call RegModifiedBetween unnecessarily }
- (taicpu(p).oper[0]^.ref^.index = NR_NO) or
- not(RegModifiedBetween(taicpu(p).oper[0]^.ref^.index,p,hp1))
- ) then
- begin
- { changes
- lea (regX,scale), reg1
- lea offset(reg1,reg1), reg1
- to
- lea offset(regX,scale*2), reg1
- and
- lea (regX,scale1), reg1
- lea offset(reg1,scale2), reg1
- to
- lea offset(regX,scale1*scale2), reg1
- ... so long as the final scale does not exceed 8
- (Similarly, allow the first instruction to be "lea (regX,regX),reg1")
- }
- if (taicpu(p).oper[0]^.ref^.offset = 0) and
- (taicpu(hp1).oper[0]^.ref^.index = taicpu(p).oper[1]^.reg) and
- (
- (
- (taicpu(p).oper[0]^.ref^.base = NR_NO)
- ) or (
- (taicpu(p).oper[0]^.ref^.scalefactor <= 1) and
- (
- (taicpu(p).oper[0]^.ref^.base = taicpu(p).oper[0]^.ref^.index) and
- not(RegUsedBetween(taicpu(p).oper[0]^.ref^.index, p, hp1))
- )
- )
- ) and (
- (
- { lea (reg1,scale2), reg1 variant }
- (taicpu(hp1).oper[0]^.ref^.base = NR_NO) and
- (
- (
- (taicpu(p).oper[0]^.ref^.base = NR_NO) and
- (taicpu(hp1).oper[0]^.ref^.scalefactor * taicpu(p).oper[0]^.ref^.scalefactor <= 8)
- ) or (
- { lea (regX,regX), reg1 variant }
- (taicpu(p).oper[0]^.ref^.base <> NR_NO) and
- (taicpu(hp1).oper[0]^.ref^.scalefactor <= 4)
- )
- )
- ) or (
- { lea (reg1,reg1), reg1 variant }
- (taicpu(hp1).oper[0]^.ref^.base = taicpu(p).oper[1]^.reg) and
- (taicpu(hp1).oper[0]^.ref^.scalefactor <= 1)
- )
- ) then
- begin
- DebugMsg(SPeepholeOptimization + 'LeaLea2Lea 2 done',p);
- { Make everything homogeneous to make calculations easier }
- if (taicpu(p).oper[0]^.ref^.base <> NR_NO) then
- begin
- if taicpu(p).oper[0]^.ref^.index <> NR_NO then
- { Convert lea (regX,regX),reg1 to lea (regX,2),reg1 }
- taicpu(p).oper[0]^.ref^.scalefactor := 2
- else
- taicpu(p).oper[0]^.ref^.index := taicpu(p).oper[0]^.ref^.base;
- taicpu(p).oper[0]^.ref^.base := NR_NO;
- end;
- if (taicpu(hp1).oper[0]^.ref^.base = NR_NO) then
- begin
- { Just to prevent miscalculations }
- if (taicpu(hp1).oper[0]^.ref^.scalefactor = 0) then
- taicpu(hp1).oper[0]^.ref^.scalefactor := taicpu(p).oper[0]^.ref^.scalefactor
- else
- taicpu(hp1).oper[0]^.ref^.scalefactor := taicpu(hp1).oper[0]^.ref^.scalefactor * taicpu(p).oper[0]^.ref^.scalefactor;
- end
- else
- begin
- taicpu(hp1).oper[0]^.ref^.base := NR_NO;
- taicpu(hp1).oper[0]^.ref^.scalefactor := taicpu(p).oper[0]^.ref^.scalefactor * 2;
- end;
- taicpu(hp1).oper[0]^.ref^.index := taicpu(p).oper[0]^.ref^.index;
- RemoveCurrentP(p);
- result:=true;
- exit;
- end
- { changes
- lea offset1(regX), reg1
- lea offset2(reg1), reg1
- to
- lea offset1+offset2(regX), reg1 }
- else if
- (
- (taicpu(hp1).oper[0]^.ref^.index = taicpu(p).oper[1]^.reg) and
- (taicpu(p).oper[0]^.ref^.index = NR_NO)
- ) or (
- (taicpu(hp1).oper[0]^.ref^.base = taicpu(p).oper[1]^.reg) and
- (taicpu(hp1).oper[0]^.ref^.scalefactor <= 1) and
- (
- (
- (taicpu(p).oper[0]^.ref^.index = NR_NO) or
- (taicpu(p).oper[0]^.ref^.base = NR_NO)
- ) or (
- (taicpu(p).oper[0]^.ref^.scalefactor <= 1) and
- (
- (taicpu(p).oper[0]^.ref^.index = NR_NO) or
- (
- (taicpu(p).oper[0]^.ref^.index = taicpu(p).oper[0]^.ref^.base) and
- (
- (taicpu(hp1).oper[0]^.ref^.index = NR_NO) or
- (taicpu(hp1).oper[0]^.ref^.base = NR_NO)
- )
- )
- )
- )
- )
- ) then
- begin
- DebugMsg(SPeepholeOptimization + 'LeaLea2Lea 1 done',p);
- if taicpu(hp1).oper[0]^.ref^.index=taicpu(p).oper[1]^.reg then
- begin
- taicpu(hp1).oper[0]^.ref^.index:=taicpu(p).oper[0]^.ref^.base;
- inc(taicpu(hp1).oper[0]^.ref^.offset,taicpu(p).oper[0]^.ref^.offset*max(taicpu(hp1).oper[0]^.ref^.scalefactor,1));
- { if the register is used as index and base, we have to increase for base as well
- and adapt base }
- if taicpu(hp1).oper[0]^.ref^.base=taicpu(p).oper[1]^.reg then
- begin
- taicpu(hp1).oper[0]^.ref^.base:=taicpu(p).oper[0]^.ref^.base;
- inc(taicpu(hp1).oper[0]^.ref^.offset,taicpu(p).oper[0]^.ref^.offset);
- end;
- end
- else
- begin
- inc(taicpu(hp1).oper[0]^.ref^.offset,taicpu(p).oper[0]^.ref^.offset);
- taicpu(hp1).oper[0]^.ref^.base:=taicpu(p).oper[0]^.ref^.base;
- end;
- if taicpu(p).oper[0]^.ref^.index<>NR_NO then
- begin
- taicpu(hp1).oper[0]^.ref^.base:=taicpu(hp1).oper[0]^.ref^.index;
- taicpu(hp1).oper[0]^.ref^.index:=taicpu(p).oper[0]^.ref^.index;
- taicpu(hp1).oper[0]^.ref^.scalefactor:=taicpu(p).oper[0]^.ref^.scalefactor;
- end;
- RemoveCurrentP(p);
- result:=true;
- exit;
- end;
- end;
- { Change:
- leal/q $x(%reg1),%reg2
- ...
- shll/q $y,%reg2
- To:
- leal/q $(x+2^y)(%reg1,2^y),%reg2 (if y <= 3)
- }
- if MatchInstruction(hp1, A_SHL, [taicpu(p).opsize]) and
- MatchOpType(taicpu(hp1), top_const, top_reg) and
- (taicpu(hp1).oper[0]^.val <= 3) then
- begin
- Multiple := 1 shl taicpu(hp1).oper[0]^.val;
- TransferUsedRegs(TmpUsedRegs);
- UpdateUsedRegs(TmpUsedRegs, tai(hp1.Next));
- TempReg := taicpu(hp1).oper[1]^.reg; { Store locally to reduce the number of dereferences }
- if
- { This allows the optimisation in some circumstances even if the lea instruction already has a scale factor
- (this works even if scalefactor is zero) }
- ((Multiple * taicpu(p).oper[0]^.ref^.scalefactor) <= 8) and
- { Ensure offset doesn't go out of bounds }
- (abs(taicpu(p).oper[0]^.ref^.offset * Multiple) <= $7FFFFFFF) and
- not (RegInUsedRegs(NR_DEFAULTFLAGS,TmpUsedRegs)) and
- MatchOperand(taicpu(p).oper[1]^, TempReg) and
- (
- (
- not SuperRegistersEqual(taicpu(p).oper[0]^.ref^.base, TempReg) and
- (
- (taicpu(p).oper[0]^.ref^.index = NR_NO) or
- (taicpu(p).oper[0]^.ref^.index = NR_INVALID) or
- (
- { Check for lea $x(%reg1,%reg1),%reg2 and treat as it it were lea $x(%reg1,2),%reg2 }
- (taicpu(p).oper[0]^.ref^.index = taicpu(p).oper[0]^.ref^.base) and
- (taicpu(p).oper[0]^.ref^.scalefactor <= 1)
- )
- )
- ) or (
- (
- (taicpu(p).oper[0]^.ref^.base = NR_NO) or
- (taicpu(p).oper[0]^.ref^.base = NR_INVALID)
- ) and
- not SuperRegistersEqual(taicpu(p).oper[0]^.ref^.index, TempReg)
- )
- ) then
- begin
- repeat
- with taicpu(p).oper[0]^.ref^ do
- begin
- { Convert lea $x(%reg1,%reg1),%reg2 to lea $x(%reg1,2),%reg2 }
- if index = base then
- begin
- if Multiple > 4 then
- { Optimisation will no longer work because resultant
- scale factor will exceed 8 }
- Break;
- base := NR_NO;
- scalefactor := 2;
- DebugMsg(SPeepholeOptimization + 'lea $x(%reg1,%reg1),%reg2 -> lea $x(%reg1,2),%reg2 for following optimisation', p);
- end
- else if (base <> NR_NO) and (base <> NR_INVALID) then
- begin
- { Scale factor only works on the index register }
- index := base;
- base := NR_NO;
- end;
- { For safety }
- if scalefactor <= 1 then
- begin
- DebugMsg(SPeepholeOptimization + 'LeaShl2Lea 1', p);
- scalefactor := Multiple;
- end
- else
- begin
- DebugMsg(SPeepholeOptimization + 'LeaShl2Lea 2', p);
- scalefactor := scalefactor * Multiple;
- end;
- offset := offset * Multiple;
- end;
- RemoveInstruction(hp1);
- Result := True;
- Exit;
- { This repeat..until loop exists for the benefit of Break }
- until True;
- end;
- end;
- end;
- end;
- function TX86AsmOptimizer.DoSubAddOpt(var p: tai): Boolean;
- var
- hp1 : tai;
- begin
- DoSubAddOpt := False;
- if taicpu(p).oper[0]^.typ <> top_const then
- { Should have been confirmed before calling }
- InternalError(2021102601);
- if GetLastInstruction(p, hp1) and
- (hp1.typ = ait_instruction) and
- (taicpu(hp1).opsize = taicpu(p).opsize) then
- case taicpu(hp1).opcode Of
- A_DEC:
- if MatchOperand(taicpu(hp1).oper[0]^,taicpu(p).oper[1]^) then
- begin
- taicpu(p).loadConst(0,taicpu(p).oper[0]^.val+1);
- RemoveInstruction(hp1);
- end;
- A_SUB:
- if (taicpu(hp1).oper[0]^.typ = top_const) and
- MatchOperand(taicpu(hp1).oper[1]^,taicpu(p).oper[1]^) then
- begin
- taicpu(p).loadConst(0,taicpu(p).oper[0]^.val+taicpu(hp1).oper[0]^.val);
- RemoveInstruction(hp1);
- end;
- A_ADD:
- begin
- if (taicpu(hp1).oper[0]^.typ = top_const) and
- MatchOperand(taicpu(hp1).oper[1]^,taicpu(p).oper[1]^) then
- begin
- taicpu(p).loadConst(0,taicpu(p).oper[0]^.val-taicpu(hp1).oper[0]^.val);
- RemoveInstruction(hp1);
- if (taicpu(p).oper[0]^.val = 0) then
- begin
- hp1 := tai(p.next);
- RemoveInstruction(p); { Note, the choice to not use RemoveCurrentp is deliberate }
- if not GetLastInstruction(hp1, p) then
- p := hp1;
- DoSubAddOpt := True;
- end
- end;
- end;
- else
- ;
- end;
- end;
- function TX86AsmOptimizer.DoMovCmpMemOpt(var p : tai; const hp1: tai; UpdateTmpUsedRegs: Boolean) : Boolean;
- begin
- Result := False;
- if UpdateTmpUsedRegs then
- TransferUsedRegs(TmpUsedRegs);
- if MatchOpType(taicpu(p),top_ref,top_reg) and
- { The x86 assemblers have difficulty comparing values against absolute addresses }
- (taicpu(p).oper[0]^.ref^.refaddr <> addr_full) and
- (taicpu(hp1).oper[0]^.typ <> top_ref) and
- MatchOperand(taicpu(hp1).oper[1]^, taicpu(p).oper[1]^.reg) and
- (
- (
- (taicpu(hp1).opcode = A_TEST)
- ) or (
- (taicpu(hp1).opcode = A_CMP) and
- { A sanity check more than anything }
- not MatchOperand(taicpu(hp1).oper[0]^, taicpu(p).oper[1]^.reg)
- )
- ) then
- begin
- { change
- mov mem, %reg
- cmp/test x, %reg / test %reg,%reg
- (reg deallocated)
- to
- cmp/test x, mem / cmp 0, mem
- }
- UpdateUsedRegs(TmpUsedRegs, tai(p.Next));
- if not RegUsedAfterInstruction(taicpu(p).oper[1]^.reg, hp1, TmpUsedRegs) then
- begin
- { Convert test %reg,%reg or test $-1,%reg to cmp $0,mem }
- if (taicpu(hp1).opcode = A_TEST) and
- (
- MatchOperand(taicpu(hp1).oper[0]^, taicpu(p).oper[1]^.reg) or
- MatchOperand(taicpu(hp1).oper[0]^, -1)
- ) then
- begin
- taicpu(hp1).opcode := A_CMP;
- taicpu(hp1).loadconst(0, 0);
- end;
- taicpu(hp1).loadref(1, taicpu(p).oper[0]^.ref^);
- DebugMsg(SPeepholeOptimization + 'MOV/CMP -> CMP (memory check)', p);
- RemoveCurrentP(p, hp1);
- Result := True;
- Exit;
- end;
- end;
- end;
- function TX86AsmOptimizer.OptPass1Sub(var p : tai) : boolean;
- var
- hp1, hp2: tai;
- ActiveReg: TRegister;
- OldOffset: asizeint;
- ThisConst: TCGInt;
- function RegDeallocated: Boolean;
- begin
- TransferUsedRegs(TmpUsedRegs);
- UpdateUsedRegs(TmpUsedRegs, tai(p.next));
- Result := not(RegUsedAfterInstruction(ActiveReg,hp1,TmpUsedRegs))
- end;
- begin
- Result:=false;
- hp1 := nil;
- { replace
- subX const,%reg1
- leaX (%reg1,%reg1,Y),%reg2 // Base or index might not be equal to reg1
- dealloc %reg1
- by
- leaX -const-const*Y(%reg1,%reg1,Y),%reg2
- }
- if MatchOpType(taicpu(p),top_const,top_reg) then
- begin
- ActiveReg := taicpu(p).oper[1]^.reg;
- { Ensures the entire register was updated }
- if (taicpu(p).opsize >= S_L) and
- GetNextInstructionUsingReg(p,hp1, ActiveReg) and
- MatchInstruction(hp1,A_LEA,[]) and
- (SuperRegistersEqual(ActiveReg, taicpu(hp1).oper[0]^.ref^.base) or
- SuperRegistersEqual(ActiveReg, taicpu(hp1).oper[0]^.ref^.index)) and
- (
- { Cover the case where the register in the reference is also the destination register }
- Reg1WriteOverwritesReg2Entirely(taicpu(hp1).oper[1]^.reg, ActiveReg) or
- (
- { Try to avoid the expensive check of RegUsedAfterInstruction if we know it will return False }
- not SuperRegistersEqual(taicpu(hp1).oper[1]^.reg, ActiveReg) and
- RegDeallocated
- )
- ) then
- begin
- OldOffset := taicpu(hp1).oper[0]^.ref^.offset;
- if ActiveReg=taicpu(hp1).oper[0]^.ref^.base then
- Dec(taicpu(hp1).oper[0]^.ref^.offset,taicpu(p).oper[0]^.val);
- if ActiveReg=taicpu(hp1).oper[0]^.ref^.index then
- Dec(taicpu(hp1).oper[0]^.ref^.offset,taicpu(p).oper[0]^.val*max(taicpu(hp1).oper[0]^.ref^.scalefactor,1));
- {$ifdef x86_64}
- if (taicpu(hp1).oper[0]^.ref^.offset > $7FFFFFFF) or (taicpu(hp1).oper[0]^.ref^.offset < -2147483648) then
- begin
- { Overflow; abort }
- taicpu(hp1).oper[0]^.ref^.offset := OldOffset;
- end
- else
- {$endif x86_64}
- begin
- DebugMsg(SPeepholeOptimization + 'SubLea2Lea done',p);
- if not (cs_opt_level3 in current_settings.optimizerswitches) then
- { hp1 is the immediate next instruction for sure - good for a quick speed boost }
- RemoveCurrentP(p, hp1)
- else
- RemoveCurrentP(p);
- result:=true;
- Exit;
- end;
- end;
- if (
- { Save calling GetNextInstructionUsingReg again }
- Assigned(hp1) or
- GetNextInstructionUsingReg(p,hp1, ActiveReg)
- ) and
- MatchInstruction(hp1,A_SUB,[taicpu(p).opsize]) and
- (taicpu(hp1).oper[1]^.reg = ActiveReg) then
- begin
- if taicpu(hp1).oper[0]^.typ = top_const then
- begin
- { Merge add const1,%reg; add const2,%reg to add const1+const2,%reg }
- ThisConst := taicpu(p).oper[0]^.val + taicpu(hp1).oper[0]^.val;
- Result := True;
- { Handle any overflows }
- case taicpu(p).opsize of
- S_B:
- taicpu(p).oper[0]^.val := ThisConst and $FF;
- S_W:
- taicpu(p).oper[0]^.val := ThisConst and $FFFF;
- S_L:
- taicpu(p).oper[0]^.val := ThisConst and $FFFFFFFF;
- {$ifdef x86_64}
- S_Q:
- if (ThisConst > $7FFFFFFF) or (ThisConst < -2147483648) then
- { Overflow; abort }
- Result := False
- else
- taicpu(p).oper[0]^.val := ThisConst;
- {$endif x86_64}
- else
- InternalError(2021102610);
- end;
- { Result may get set to False again if the combined immediate overflows for S_Q sizes }
- if Result then
- begin
- if (taicpu(p).oper[0]^.val < 0) and
- (
- ((taicpu(p).opsize = S_B) and (taicpu(p).oper[0]^.val <> -128)) or
- ((taicpu(p).opsize = S_W) and (taicpu(p).oper[0]^.val <> -32768)) or
- ((taicpu(p).opsize in [S_L{$ifdef x86_64}, S_Q{$endif x86_64}]) and (taicpu(p).oper[0]^.val <> -2147483648))
- ) then
- begin
- DebugMsg(SPeepholeOptimization + 'SUB; ADD/SUB -> ADD',p);
- taicpu(p).opcode := A_SUB;
- taicpu(p).oper[0]^.val := -taicpu(p).oper[0]^.val;
- end
- else
- DebugMsg(SPeepholeOptimization + 'SUB; ADD/SUB -> SUB',p);
- RemoveInstruction(hp1);
- end;
- end
- else
- begin
- { Make doubly sure the flags aren't in use because the order of subtractions may affect them }
- TransferUsedRegs(TmpUsedRegs);
- UpdateUsedRegs(TmpUsedRegs, tai(p.next));
- hp2 := p;
- while not (cs_opt_level3 in current_settings.optimizerswitches) and
- GetNextInstruction(hp2, hp2) and (hp2 <> hp1) do
- UpdateUsedRegs(TmpUsedRegs, tai(hp2.next));
- if not RegInUsedRegs(NR_DEFAULTFLAGS, TmpUsedRegs) then
- begin
- { Move the constant subtraction to after the reg/ref addition to improve optimisation }
- DebugMsg(SPeepholeOptimization + 'Add/sub swap 1b done',p);
- Asml.Remove(p);
- Asml.InsertAfter(p, hp1);
- p := hp1;
- Result := True;
- Exit;
- end;
- end;
- end;
- { * change "subl $2, %esp; pushw x" to "pushl x"}
- { * change "sub/add const1, reg" or "dec reg" followed by
- "sub const2, reg" to one "sub ..., reg" }
- {$ifdef i386}
- if (taicpu(p).oper[0]^.val = 2) and
- (ActiveReg = NR_ESP) and
- { Don't do the sub/push optimization if the sub }
- { comes from setting up the stack frame (JM) }
- (not(GetLastInstruction(p,hp1)) or
- not(MatchInstruction(hp1,A_MOV,[S_L]) and
- MatchOperand(taicpu(hp1).oper[0]^,NR_ESP) and
- MatchOperand(taicpu(hp1).oper[0]^,NR_EBP))) then
- begin
- hp1 := tai(p.next);
- while Assigned(hp1) and
- (tai(hp1).typ in [ait_instruction]+SkipInstr) and
- not RegReadByInstruction(NR_ESP,hp1) and
- not RegModifiedByInstruction(NR_ESP,hp1) do
- hp1 := tai(hp1.next);
- if Assigned(hp1) and
- MatchInstruction(hp1,A_PUSH,[S_W]) then
- begin
- taicpu(hp1).changeopsize(S_L);
- if taicpu(hp1).oper[0]^.typ=top_reg then
- setsubreg(taicpu(hp1).oper[0]^.reg,R_SUBWHOLE);
- hp1 := tai(p.next);
- RemoveCurrentp(p, hp1);
- Result:=true;
- exit;
- end;
- end;
- {$endif i386}
- if DoSubAddOpt(p) then
- Result:=true;
- end;
- end;
- function TX86AsmOptimizer.OptPass1SHLSAL(var p : tai) : boolean;
- var
- TmpBool1,TmpBool2 : Boolean;
- tmpref : treference;
- hp1,hp2: tai;
- mask: tcgint;
- begin
- Result:=false;
- { All these optimisations work on "shl/sal const,%reg" }
- if not MatchOpType(taicpu(p),top_const,top_reg) then
- Exit;
- if (taicpu(p).opsize in [S_L{$ifdef x86_64},S_Q{$endif x86_64}]) and
- (taicpu(p).oper[0]^.val <= 3) then
- { Changes "shl const, %reg32; add const/reg, %reg32" to one lea statement }
- begin
- { should we check the next instruction? }
- TmpBool1 := True;
- { have we found an add/sub which could be
- integrated in the lea? }
- TmpBool2 := False;
- reference_reset(tmpref,2,[]);
- TmpRef.index := taicpu(p).oper[1]^.reg;
- TmpRef.scalefactor := 1 shl taicpu(p).oper[0]^.val;
- while TmpBool1 and
- GetNextInstruction(p, hp1) and
- (tai(hp1).typ = ait_instruction) and
- ((((taicpu(hp1).opcode = A_ADD) or
- (taicpu(hp1).opcode = A_SUB)) and
- (taicpu(hp1).oper[1]^.typ = Top_Reg) and
- (taicpu(hp1).oper[1]^.reg = taicpu(p).oper[1]^.reg)) or
- (((taicpu(hp1).opcode = A_INC) or
- (taicpu(hp1).opcode = A_DEC)) and
- (taicpu(hp1).oper[0]^.typ = Top_Reg) and
- (taicpu(hp1).oper[0]^.reg = taicpu(p).oper[1]^.reg)) or
- ((taicpu(hp1).opcode = A_LEA) and
- (taicpu(hp1).oper[0]^.ref^.index = taicpu(p).oper[1]^.reg) and
- (taicpu(hp1).oper[1]^.reg = taicpu(p).oper[1]^.reg))) and
- (not GetNextInstruction(hp1,hp2) or
- not instrReadsFlags(hp2)) Do
- begin
- TmpBool1 := False;
- if taicpu(hp1).opcode=A_LEA then
- begin
- if (TmpRef.base = NR_NO) and
- (taicpu(hp1).oper[0]^.ref^.symbol=nil) and
- (taicpu(hp1).oper[0]^.ref^.relsymbol=nil) and
- (taicpu(hp1).oper[0]^.ref^.segment=NR_NO) and
- ((taicpu(hp1).oper[0]^.ref^.scalefactor=0) or
- (taicpu(hp1).oper[0]^.ref^.scalefactor*tmpref.scalefactor<=8)) then
- begin
- TmpBool1 := True;
- TmpBool2 := True;
- inc(TmpRef.offset, taicpu(hp1).oper[0]^.ref^.offset);
- if taicpu(hp1).oper[0]^.ref^.scalefactor<>0 then
- tmpref.scalefactor:=tmpref.scalefactor*taicpu(hp1).oper[0]^.ref^.scalefactor;
- TmpRef.base := taicpu(hp1).oper[0]^.ref^.base;
- RemoveInstruction(hp1);
- end
- end
- else if (taicpu(hp1).oper[0]^.typ = Top_Const) then
- begin
- TmpBool1 := True;
- TmpBool2 := True;
- case taicpu(hp1).opcode of
- A_ADD:
- inc(TmpRef.offset, longint(taicpu(hp1).oper[0]^.val));
- A_SUB:
- dec(TmpRef.offset, longint(taicpu(hp1).oper[0]^.val));
- else
- internalerror(2019050536);
- end;
- RemoveInstruction(hp1);
- end
- else
- if (taicpu(hp1).oper[0]^.typ = Top_Reg) and
- (((taicpu(hp1).opcode = A_ADD) and
- (TmpRef.base = NR_NO)) or
- (taicpu(hp1).opcode = A_INC) or
- (taicpu(hp1).opcode = A_DEC)) then
- begin
- TmpBool1 := True;
- TmpBool2 := True;
- case taicpu(hp1).opcode of
- A_ADD:
- TmpRef.base := taicpu(hp1).oper[0]^.reg;
- A_INC:
- inc(TmpRef.offset);
- A_DEC:
- dec(TmpRef.offset);
- else
- internalerror(2019050535);
- end;
- RemoveInstruction(hp1);
- end;
- end;
- if TmpBool2
- {$ifndef x86_64}
- or
- ((current_settings.optimizecputype < cpu_Pentium2) and
- (taicpu(p).oper[0]^.val <= 3) and
- not(cs_opt_size in current_settings.optimizerswitches))
- {$endif x86_64}
- then
- begin
- if not(TmpBool2) and
- (taicpu(p).oper[0]^.val=1) then
- begin
- hp1:=taicpu.Op_reg_reg(A_ADD,taicpu(p).opsize,
- taicpu(p).oper[1]^.reg, taicpu(p).oper[1]^.reg)
- end
- else
- hp1:=taicpu.op_ref_reg(A_LEA, taicpu(p).opsize, TmpRef,
- taicpu(p).oper[1]^.reg);
- DebugMsg(SPeepholeOptimization + 'ShlAddLeaSubIncDec2Lea',p);
- InsertLLItem(p.previous, p.next, hp1);
- p.free;
- p := hp1;
- end;
- end
- {$ifndef x86_64}
- else if (current_settings.optimizecputype < cpu_Pentium2) then
- begin
- { changes "shl $1, %reg" to "add %reg, %reg", which is the same on a 386,
- but faster on a 486, and Tairable in both U and V pipes on the Pentium
- (unlike shl, which is only Tairable in the U pipe) }
- if taicpu(p).oper[0]^.val=1 then
- begin
- hp1 := taicpu.Op_reg_reg(A_ADD,taicpu(p).opsize,
- taicpu(p).oper[1]^.reg, taicpu(p).oper[1]^.reg);
- InsertLLItem(p.previous, p.next, hp1);
- p.free;
- p := hp1;
- end
- { changes "shl $2, %reg" to "lea (,%reg,4), %reg"
- "shl $3, %reg" to "lea (,%reg,8), %reg }
- else if (taicpu(p).opsize = S_L) and
- (taicpu(p).oper[0]^.val<= 3) then
- begin
- reference_reset(tmpref,2,[]);
- TmpRef.index := taicpu(p).oper[1]^.reg;
- TmpRef.scalefactor := 1 shl taicpu(p).oper[0]^.val;
- hp1 := taicpu.Op_ref_reg(A_LEA,S_L,TmpRef, taicpu(p).oper[1]^.reg);
- InsertLLItem(p.previous, p.next, hp1);
- p.free;
- p := hp1;
- end;
- end
- {$endif x86_64}
- else if
- GetNextInstruction(p, hp1) and (hp1.typ = ait_instruction) and MatchOpType(taicpu(hp1), top_const, top_reg) and
- (
- (
- MatchInstruction(hp1, A_AND, [taicpu(p).opsize]) and
- SetAndTest(hp1, hp2)
- {$ifdef x86_64}
- ) or
- (
- MatchInstruction(hp1, A_MOV, [taicpu(p).opsize]) and
- GetNextInstruction(hp1, hp2) and
- MatchInstruction(hp2, A_AND, [taicpu(p).opsize]) and
- MatchOpType(taicpu(hp2), top_reg, top_reg) and
- (taicpu(hp1).oper[1]^.reg = taicpu(hp2).oper[0]^.reg)
- {$endif x86_64}
- )
- ) and
- (taicpu(p).oper[1]^.reg = taicpu(hp2).oper[1]^.reg) then
- begin
- { Change:
- shl x, %reg1
- mov -(1<<x), %reg2
- and %reg2, %reg1
- Or:
- shl x, %reg1
- and -(1<<x), %reg1
- To just:
- shl x, %reg1
- Since the and operation only zeroes bits that are already zero from the shl operation
- }
- case taicpu(p).oper[0]^.val of
- 8:
- mask:=$FFFFFFFFFFFFFF00;
- 16:
- mask:=$FFFFFFFFFFFF0000;
- 32:
- mask:=$FFFFFFFF00000000;
- 63:
- { Constant pre-calculated to prevent overflow errors with Int64 }
- mask:=$8000000000000000;
- else
- begin
- if taicpu(p).oper[0]^.val >= 64 then
- { Shouldn't happen realistically, since the register
- is guaranteed to be set to zero at this point }
- mask := 0
- else
- mask := -(Int64(1 shl taicpu(p).oper[0]^.val));
- end;
- end;
- if taicpu(hp1).oper[0]^.val = mask then
- begin
- { Everything checks out, perform the optimisation, as long as
- the FLAGS register isn't being used}
- TransferUsedRegs(TmpUsedRegs);
- UpdateUsedRegs(TmpUsedRegs, tai(p.next));
- {$ifdef x86_64}
- if (hp1 <> hp2) then
- begin
- { "shl/mov/and" version }
- UpdateUsedRegs(TmpUsedRegs, tai(hp1.next));
- { Don't do the optimisation if the FLAGS register is in use }
- if not(RegUsedAfterInstruction(NR_DEFAULTFLAGS, hp2, TmpUsedRegs)) then
- begin
- DebugMsg(SPeepholeOptimization + 'ShlMovAnd2Shl', p);
- { Don't remove the 'mov' instruction if its register is used elsewhere }
- if not(RegUsedAfterInstruction(taicpu(hp1).oper[1]^.reg, hp2, TmpUsedRegs)) then
- begin
- RemoveInstruction(hp1);
- Result := True;
- end;
- { Only set Result to True if the 'mov' instruction was removed }
- RemoveInstruction(hp2);
- end;
- end
- else
- {$endif x86_64}
- begin
- { "shl/and" version }
- { Don't do the optimisation if the FLAGS register is in use }
- if not(RegUsedAfterInstruction(NR_DEFAULTFLAGS, hp1, TmpUsedRegs)) then
- begin
- DebugMsg(SPeepholeOptimization + 'ShlAnd2Shl', p);
- RemoveInstruction(hp1);
- Result := True;
- end;
- end;
- Exit;
- end
- else {$ifdef x86_64}if (hp1 = hp2) then{$endif x86_64}
- begin
- { Even if the mask doesn't allow for its removal, we might be
- able to optimise the mask for the "shl/and" version, which
- may permit other peephole optimisations }
- {$ifdef DEBUG_AOPTCPU}
- mask := taicpu(hp1).oper[0]^.val and mask;
- if taicpu(hp1).oper[0]^.val <> mask then
- begin
- DebugMsg(
- SPeepholeOptimization +
- 'Changed mask from $' + debug_tostr(taicpu(hp1).oper[0]^.val) +
- ' to $' + debug_tostr(mask) +
- 'based on previous instruction (ShlAnd2ShlAnd)', hp1);
- taicpu(hp1).oper[0]^.val := mask;
- end;
- {$else DEBUG_AOPTCPU}
- { If debugging is off, just set the operand even if it's the same }
- taicpu(hp1).oper[0]^.val := taicpu(hp1).oper[0]^.val and mask;
- {$endif DEBUG_AOPTCPU}
- end;
- end;
- {
- change
- shl/sal const,reg
- <op> ...(...,reg,1),...
- into
- <op> ...(...,reg,1 shl const),...
- if const in 1..3
- }
- if MatchOpType(taicpu(p), top_const, top_reg) and
- (taicpu(p).oper[0]^.val in [1..3]) and
- GetNextInstruction(p, hp1) and
- MatchInstruction(hp1,A_MOV,A_LEA,[]) and
- MatchOpType(taicpu(hp1), top_ref, top_reg) and
- (taicpu(p).oper[1]^.reg=taicpu(hp1).oper[0]^.ref^.index) and
- (taicpu(p).oper[1]^.reg<>taicpu(hp1).oper[0]^.ref^.base) and
- (taicpu(hp1).oper[0]^.ref^.scalefactor in [0,1]) then
- begin
- TransferUsedRegs(TmpUsedRegs);
- UpdateUsedRegs(TmpUsedRegs, tai(p.next));
- if not(RegUsedAfterInstruction(taicpu(p).oper[1]^.reg, hp1, TmpUsedRegs)) then
- begin
- taicpu(hp1).oper[0]^.ref^.scalefactor:=1 shl taicpu(p).oper[0]^.val;
- DebugMsg(SPeepholeOptimization + 'ShlOp2Op', p);
- RemoveCurrentP(p);
- Result:=true;
- end;
- end;
- end;
- function TX86AsmOptimizer.CheckMemoryWrite(var first_mov, second_mov: taicpu): Boolean;
- var
- CurrentRef: TReference;
- FullReg: TRegister;
- hp1, hp2: tai;
- begin
- Result := False;
- if (first_mov.opsize <> S_B) or (second_mov.opsize <> S_B) then
- Exit;
- { We assume you've checked if the operand is actually a reference by
- this point. If it isn't, you'll most likely get an access violation }
- CurrentRef := first_mov.oper[1]^.ref^;
- { Memory must be aligned }
- if (CurrentRef.offset mod 4) <> 0 then
- Exit;
- Inc(CurrentRef.offset);
- CurrentRef.alignment := 1; { Otherwise references_equal will return False }
- if MatchOperand(second_mov.oper[0]^, 0) and
- references_equal(second_mov.oper[1]^.ref^, CurrentRef) and
- GetNextInstruction(second_mov, hp1) and
- (hp1.typ = ait_instruction) and
- (taicpu(hp1).opcode = A_MOV) and
- MatchOpType(taicpu(hp1), top_const, top_ref) and
- (taicpu(hp1).oper[0]^.val = 0) then
- begin
- Inc(CurrentRef.offset);
- CurrentRef.alignment := taicpu(hp1).oper[1]^.ref^.alignment; { Otherwise references_equal might return False }
- FullReg := newreg(R_INTREGISTER,getsupreg(first_mov.oper[0]^.reg), R_SUBD);
- if references_equal(taicpu(hp1).oper[1]^.ref^, CurrentRef) then
- begin
- case taicpu(hp1).opsize of
- S_B:
- if GetNextInstruction(hp1, hp2) and
- MatchInstruction(taicpu(hp2), A_MOV, [S_B]) and
- MatchOpType(taicpu(hp2), top_const, top_ref) and
- (taicpu(hp2).oper[0]^.val = 0) then
- begin
- Inc(CurrentRef.offset);
- CurrentRef.alignment := 1; { Otherwise references_equal will return False }
- if references_equal(taicpu(hp2).oper[1]^.ref^, CurrentRef) and
- (taicpu(hp2).opsize = S_B) then
- begin
- RemoveInstruction(hp1);
- RemoveInstruction(hp2);
- first_mov.opsize := S_L;
- if first_mov.oper[0]^.typ = top_reg then
- begin
- DebugMsg(SPeepholeOptimization + 'MOVb/MOVb/MOVb/MOVb -> MOVZX/MOVl', first_mov);
- { Reuse second_mov as a MOVZX instruction }
- second_mov.opcode := A_MOVZX;
- second_mov.opsize := S_BL;
- second_mov.loadreg(0, first_mov.oper[0]^.reg);
- second_mov.loadreg(1, FullReg);
- first_mov.oper[0]^.reg := FullReg;
- asml.Remove(second_mov);
- asml.InsertBefore(second_mov, first_mov);
- end
- else
- { It's a value }
- begin
- DebugMsg(SPeepholeOptimization + 'MOVb/MOVb/MOVb/MOVb -> MOVl', first_mov);
- RemoveInstruction(second_mov);
- end;
- Result := True;
- Exit;
- end;
- end;
- S_W:
- begin
- RemoveInstruction(hp1);
- first_mov.opsize := S_L;
- if first_mov.oper[0]^.typ = top_reg then
- begin
- DebugMsg(SPeepholeOptimization + 'MOVb/MOVb/MOVw -> MOVZX/MOVl', first_mov);
- { Reuse second_mov as a MOVZX instruction }
- second_mov.opcode := A_MOVZX;
- second_mov.opsize := S_BL;
- second_mov.loadreg(0, first_mov.oper[0]^.reg);
- second_mov.loadreg(1, FullReg);
- first_mov.oper[0]^.reg := FullReg;
- asml.Remove(second_mov);
- asml.InsertBefore(second_mov, first_mov);
- end
- else
- { It's a value }
- begin
- DebugMsg(SPeepholeOptimization + 'MOVb/MOVb/MOVw -> MOVl', first_mov);
- RemoveInstruction(second_mov);
- end;
- Result := True;
- Exit;
- end;
- else
- ;
- end;
- end;
- end;
- end;
- function TX86AsmOptimizer.OptPass1FSTP(var p: tai): boolean;
- { returns true if a "continue" should be done after this optimization }
- var
- hp1, hp2: tai;
- begin
- Result := false;
- if MatchOpType(taicpu(p),top_ref) and
- GetNextInstruction(p, hp1) and
- (hp1.typ = ait_instruction) and
- (((taicpu(hp1).opcode = A_FLD) and
- (taicpu(p).opcode = A_FSTP)) or
- ((taicpu(p).opcode = A_FISTP) and
- (taicpu(hp1).opcode = A_FILD))) and
- MatchOpType(taicpu(hp1),top_ref) and
- (taicpu(hp1).opsize = taicpu(p).opsize) and
- RefsEqual(taicpu(p).oper[0]^.ref^, taicpu(hp1).oper[0]^.ref^) then
- begin
- { replacing fstp f;fld f by fst f is only valid for extended because of rounding or if fastmath is on }
- if ((taicpu(p).opsize=S_FX) or (cs_opt_fastmath in current_settings.optimizerswitches)) and
- GetNextInstruction(hp1, hp2) and
- (hp2.typ = ait_instruction) and
- IsExitCode(hp2) and
- (taicpu(p).oper[0]^.ref^.base = current_procinfo.FramePointer) and
- not(assigned(current_procinfo.procdef.funcretsym) and
- (taicpu(p).oper[0]^.ref^.offset < tabstractnormalvarsym(current_procinfo.procdef.funcretsym).localloc.reference.offset)) and
- (taicpu(p).oper[0]^.ref^.index = NR_NO) then
- begin
- RemoveInstruction(hp1);
- RemoveCurrentP(p, hp2);
- RemoveLastDeallocForFuncRes(p);
- Result := true;
- end
- else
- { we can do this only in fast math mode as fstp is rounding ...
- ... still disabled as it breaks the compiler and/or rtl }
- if ({ (cs_opt_fastmath in current_settings.optimizerswitches) or }
- { ... or if another fstp equal to the first one follows }
- (GetNextInstruction(hp1,hp2) and
- (hp2.typ = ait_instruction) and
- (taicpu(p).opcode=taicpu(hp2).opcode) and
- (taicpu(p).opsize=taicpu(hp2).opsize))
- ) and
- { fst can't store an extended/comp value }
- (taicpu(p).opsize <> S_FX) and
- (taicpu(p).opsize <> S_IQ) then
- begin
- if (taicpu(p).opcode = A_FSTP) then
- taicpu(p).opcode := A_FST
- else
- taicpu(p).opcode := A_FIST;
- DebugMsg(SPeepholeOptimization + 'FstpFld2Fst',p);
- RemoveInstruction(hp1);
- end;
- end;
- end;
- function TX86AsmOptimizer.OptPass1FLD(var p : tai) : boolean;
- var
- hp1, hp2: tai;
- begin
- result:=false;
- if MatchOpType(taicpu(p),top_reg) and
- GetNextInstruction(p, hp1) and
- (hp1.typ = Ait_Instruction) and
- MatchOpType(taicpu(hp1),top_reg,top_reg) and
- (taicpu(hp1).oper[0]^.reg = NR_ST) and
- (taicpu(hp1).oper[1]^.reg = NR_ST1) then
- { change to
- fld reg fxxx reg,st
- fxxxp st, st1 (hp1)
- Remark: non commutative operations must be reversed!
- }
- begin
- case taicpu(hp1).opcode Of
- A_FMULP,A_FADDP,
- A_FSUBP,A_FDIVP,A_FSUBRP,A_FDIVRP:
- begin
- case taicpu(hp1).opcode Of
- A_FADDP: taicpu(hp1).opcode := A_FADD;
- A_FMULP: taicpu(hp1).opcode := A_FMUL;
- A_FSUBP: taicpu(hp1).opcode := A_FSUBR;
- A_FSUBRP: taicpu(hp1).opcode := A_FSUB;
- A_FDIVP: taicpu(hp1).opcode := A_FDIVR;
- A_FDIVRP: taicpu(hp1).opcode := A_FDIV;
- else
- internalerror(2019050534);
- end;
- taicpu(hp1).oper[0]^.reg := taicpu(p).oper[0]^.reg;
- taicpu(hp1).oper[1]^.reg := NR_ST;
- RemoveCurrentP(p, hp1);
- Result:=true;
- exit;
- end;
- else
- ;
- end;
- end
- else
- if MatchOpType(taicpu(p),top_ref) and
- GetNextInstruction(p, hp2) and
- (hp2.typ = Ait_Instruction) and
- MatchOpType(taicpu(hp2),top_reg,top_reg) and
- (taicpu(p).opsize in [S_FS, S_FL]) and
- (taicpu(hp2).oper[0]^.reg = NR_ST) and
- (taicpu(hp2).oper[1]^.reg = NR_ST1) then
- if GetLastInstruction(p, hp1) and
- MatchInstruction(hp1,A_FLD,A_FST,[taicpu(p).opsize]) and
- MatchOpType(taicpu(hp1),top_ref) and
- RefsEqual(taicpu(p).oper[0]^.ref^, taicpu(hp1).oper[0]^.ref^) then
- if ((taicpu(hp2).opcode = A_FMULP) or
- (taicpu(hp2).opcode = A_FADDP)) then
- { change to
- fld/fst mem1 (hp1) fld/fst mem1
- fld mem1 (p) fadd/
- faddp/ fmul st, st
- fmulp st, st1 (hp2) }
- begin
- RemoveCurrentP(p, hp1);
- if (taicpu(hp2).opcode = A_FADDP) then
- taicpu(hp2).opcode := A_FADD
- else
- taicpu(hp2).opcode := A_FMUL;
- taicpu(hp2).oper[1]^.reg := NR_ST;
- end
- else
- { change to
- fld/fst mem1 (hp1) fld/fst mem1
- fld mem1 (p) fld st}
- begin
- taicpu(p).changeopsize(S_FL);
- taicpu(p).loadreg(0,NR_ST);
- end
- else
- begin
- case taicpu(hp2).opcode Of
- A_FMULP,A_FADDP,A_FSUBP,A_FDIVP,A_FSUBRP,A_FDIVRP:
- { change to
- fld/fst mem1 (hp1) fld/fst mem1
- fld mem2 (p) fxxx mem2
- fxxxp st, st1 (hp2) }
- begin
- case taicpu(hp2).opcode Of
- A_FADDP: taicpu(p).opcode := A_FADD;
- A_FMULP: taicpu(p).opcode := A_FMUL;
- A_FSUBP: taicpu(p).opcode := A_FSUBR;
- A_FSUBRP: taicpu(p).opcode := A_FSUB;
- A_FDIVP: taicpu(p).opcode := A_FDIVR;
- A_FDIVRP: taicpu(p).opcode := A_FDIV;
- else
- internalerror(2019050533);
- end;
- RemoveInstruction(hp2);
- end
- else
- ;
- end
- end
- end;
- function IsCmpSubset(cond1, cond2: TAsmCond): Boolean; inline;
- begin
- Result := condition_in(cond1, cond2) or
- { Not strictly subsets due to the actual flags checked, but because we're
- comparing integers, E is a subset of AE and GE and their aliases }
- ((cond1 in [C_E, C_Z]) and (cond2 in [C_AE, C_NB, C_NC, C_GE, C_NL]));
- end;
- function TX86AsmOptimizer.OptPass1Cmp(var p: tai): boolean;
- var
- v: TCGInt;
- hp1, hp2, p_dist, p_jump, hp1_dist, p_label, hp1_label: tai;
- FirstMatch: Boolean;
- NewReg: TRegister;
- JumpLabel, JumpLabel_dist, JumpLabel_far: TAsmLabel;
- begin
- Result:=false;
- { All these optimisations need a next instruction }
- if not GetNextInstruction(p, hp1) then
- Exit;
- { Search for:
- cmp ###,###
- j(c1) @lbl1
- ...
- @lbl:
- cmp ###.### (same comparison as above)
- j(c2) @lbl2
- If c1 is a subset of c2, change to:
- cmp ###,###
- j(c2) @lbl2
- (@lbl1 may become a dead label as a result)
- }
- { Also handle cases where there are multiple jumps in a row }
- p_jump := hp1;
- while Assigned(p_jump) and MatchInstruction(p_jump, A_JCC, []) do
- begin
- if IsJumpToLabel(taicpu(p_jump)) then
- begin
- JumpLabel := TAsmLabel(taicpu(p_jump).oper[0]^.ref^.symbol);
- p_label := nil;
- if Assigned(JumpLabel) then
- p_label := getlabelwithsym(JumpLabel);
- if Assigned(p_label) and
- GetNextInstruction(p_label, p_dist) and
- MatchInstruction(p_dist, A_CMP, []) and
- MatchOperand(taicpu(p_dist).oper[0]^, taicpu(p).oper[0]^) and
- MatchOperand(taicpu(p_dist).oper[1]^, taicpu(p).oper[1]^) and
- GetNextInstruction(p_dist, hp1_dist) and
- MatchInstruction(hp1_dist, A_JCC, []) then { This doesn't have to be an explicit label }
- begin
- JumpLabel_dist := TAsmLabel(taicpu(hp1_dist).oper[0]^.ref^.symbol);
- if JumpLabel = JumpLabel_dist then
- { This is an infinite loop }
- Exit;
- { Best optimisation when the first condition is a subset (or equal) of the second }
- if IsCmpSubset(taicpu(p_jump).condition, taicpu(hp1_dist).condition) then
- begin
- { Any registers used here will already be allocated }
- if Assigned(JumpLabel_dist) then
- JumpLabel_dist.IncRefs;
- if Assigned(JumpLabel) then
- JumpLabel.DecRefs;
- DebugMsg(SPeepholeOptimization + 'CMP/Jcc/@Lbl/CMP/Jcc -> CMP/Jcc, redirecting first jump', p_jump);
- taicpu(p_jump).condition := taicpu(hp1_dist).condition;
- taicpu(p_jump).loadref(0, taicpu(hp1_dist).oper[0]^.ref^);
- Result := True;
- { Don't exit yet. Since p and p_jump haven't actually been
- removed, we can check for more on this iteration }
- end
- else if IsCmpSubset(taicpu(hp1_dist).condition, inverse_cond(taicpu(p_jump).condition)) and
- GetNextInstruction(hp1_dist, hp1_label) and
- SkipAligns(hp1_label, hp1_label) and
- (hp1_label.typ = ait_label) then
- begin
- JumpLabel_far := tai_label(hp1_label).labsym;
- if (JumpLabel_far = JumpLabel_dist) or (JumpLabel_far = JumpLabel) then
- { This is an infinite loop }
- Exit;
- if Assigned(JumpLabel_far) then
- begin
- { In this situation, if the first jump branches, the second one will never,
- branch so change the destination label to after the second jump }
- DebugMsg(SPeepholeOptimization + 'CMP/Jcc/@Lbl/CMP/Jcc/@Lbl -> CMP/Jcc, redirecting first jump to 2nd label', p_jump);
- if Assigned(JumpLabel) then
- JumpLabel.DecRefs;
- JumpLabel_far.IncRefs;
- taicpu(p_jump).oper[0]^.ref^.symbol := JumpLabel_far;
- Result := True;
- { Don't exit yet. Since p and p_jump haven't actually been
- removed, we can check for more on this iteration }
- Continue;
- end;
- end;
- end;
- end;
- { Search for:
- cmp ###,###
- j(c1) @lbl1
- cmp ###,### (same as first)
- Remove second cmp
- }
- if GetNextInstruction(p_jump, hp2) and
- (
- (
- MatchInstruction(hp2, A_CMP, [taicpu(p).opsize]) and
- (
- (
- MatchOpType(taicpu(p), top_const, top_reg) and
- MatchOpType(taicpu(hp2), top_const, top_reg) and
- (taicpu(hp2).oper[0]^.val = taicpu(p).oper[0]^.val) and
- Reg1WriteOverwritesReg2Entirely(taicpu(hp2).oper[1]^.reg, taicpu(p).oper[1]^.reg)
- ) or (
- MatchOperand(taicpu(hp2).oper[0]^, taicpu(p).oper[0]^) and
- MatchOperand(taicpu(hp2).oper[1]^, taicpu(p).oper[1]^)
- )
- )
- ) or (
- { Also match cmp $0,%reg; jcc @lbl; test %reg,%reg }
- MatchOperand(taicpu(p).oper[0]^, 0) and
- (taicpu(p).oper[1]^.typ = top_reg) and
- MatchInstruction(hp2, A_TEST, []) and
- MatchOpType(taicpu(hp2), top_reg, top_reg) and
- (taicpu(hp2).oper[0]^.reg = taicpu(hp2).oper[1]^.reg) and
- Reg1WriteOverwritesReg2Entirely(taicpu(hp2).oper[1]^.reg, taicpu(p).oper[1]^.reg)
- )
- ) then
- begin
- DebugMsg(SPeepholeOptimization + 'CMP/Jcc/CMP; removed superfluous CMP', hp2);
- RemoveInstruction(hp2);
- Result := True;
- { Continue the while loop in case "Jcc/CMP" follows the second CMP that was just removed }
- end;
- GetNextInstruction(p_jump, p_jump);
- end;
- {
- Try to optimise the following:
- cmp $x,### ($x and $y can be registers or constants)
- je @lbl1 (only reference)
- cmp $y,### (### are identical)
- @Lbl:
- sete %reg1
- Change to:
- cmp $x,###
- sete %reg2 (allocate new %reg2)
- cmp $y,###
- sete %reg1
- orb %reg2,%reg1
- (dealloc %reg2)
- This adds an instruction (so don't perform under -Os), but it removes
- a conditional branch.
- }
- if not (cs_opt_size in current_settings.optimizerswitches) and
- (
- (hp1 = p_jump) or
- GetNextInstruction(p, hp1)
- ) and
- MatchInstruction(hp1, A_Jcc, []) and
- IsJumpToLabel(taicpu(hp1)) and
- (taicpu(hp1).condition in [C_E, C_Z]) and
- GetNextInstruction(hp1, hp2) and
- MatchInstruction(hp2, A_CMP, A_TEST, [taicpu(p).opsize]) and
- MatchOperand(taicpu(p).oper[1]^, taicpu(hp2).oper[1]^) and
- { The first operand of CMP instructions can only be a register or
- immediate anyway, so no need to check }
- GetNextInstruction(hp2, p_label) and
- (p_label.typ = ait_label) and
- (tai_label(p_label).labsym.getrefs = 1) and
- (JumpTargetOp(taicpu(hp1))^.ref^.symbol = tai_label(p_label).labsym) and
- GetNextInstruction(p_label, p_dist) and
- MatchInstruction(p_dist, A_SETcc, []) and
- (taicpu(p_dist).condition in [C_E, C_Z]) and
- (taicpu(p_dist).oper[0]^.typ = top_reg) then
- begin
- TransferUsedRegs(TmpUsedRegs);
- UpdateUsedRegs(TmpUsedRegs, tai(hp1.Next));
- UpdateUsedRegs(TmpUsedRegs, tai(hp2.Next));
- UpdateUsedRegs(TmpUsedRegs, tai(p_label.Next));
- UpdateUsedRegs(TmpUsedRegs, tai(p_dist.Next));
- if not RegInUsedRegs(NR_DEFAULTFLAGS, TmpUsedRegs) and
- { Get the instruction after the SETcc instruction so we can
- allocate a new register over the entire range }
- GetNextInstruction(p_dist, hp1_dist) then
- begin
- { Register can appear in p if it's not used afterwards, so only
- allocate between hp1 and hp1_dist }
- NewReg := GetIntRegisterBetween(R_SUBL, TmpUsedRegs, hp1, hp1_dist);
- if NewReg <> NR_NO then
- begin
- DebugMsg(SPeepholeOptimization + 'CMP/JE/CMP/@Lbl/SETE -> CMP/SETE/CMP/SETE/OR, removing conditional branch', p);
- { Change the jump instruction into a SETcc instruction }
- taicpu(hp1).opcode := A_SETcc;
- taicpu(hp1).opsize := S_B;
- taicpu(hp1).loadreg(0, NewReg);
- { This is now a dead label }
- tai_label(p_label).labsym.decrefs;
- { Prefer adding before the next instruction so the FLAGS
- register is deallicated first }
- AsmL.InsertBefore(
- taicpu.op_reg_reg(A_OR, S_B, NewReg, taicpu(p_dist).oper[0]^.reg),
- hp1_dist
- );
- Result := True;
- { Don't exit yet, as p wasn't changed and hp1, while
- modified, is still intact and might be optimised by the
- SETcc optimisation below }
- end;
- end;
- end;
- if taicpu(p).oper[0]^.typ = top_const then
- begin
- if (taicpu(p).oper[0]^.val = 0) and
- (taicpu(p).oper[1]^.typ = top_reg) and
- MatchInstruction(hp1,A_Jcc,A_SETcc,[]) then
- begin
- hp2 := p;
- FirstMatch := True;
- { When dealing with "cmp $0,%reg", only ZF and SF contain
- anything meaningful once it's converted to "test %reg,%reg";
- additionally, some jumps will always (or never) branch, so
- evaluate every jump immediately following the
- comparison, optimising the conditions if possible.
- Similarly with SETcc... those that are always set to 0 or 1
- are changed to MOV instructions }
- while FirstMatch or { Saves calling GetNextInstruction unnecessarily }
- (
- GetNextInstruction(hp2, hp1) and
- MatchInstruction(hp1,A_Jcc,A_SETcc,[])
- ) do
- begin
- FirstMatch := False;
- case taicpu(hp1).condition of
- C_B, C_C, C_NAE, C_O:
- { For B/NAE:
- Will never branch since an unsigned integer can never be below zero
- For C/O:
- Result cannot overflow because 0 is being subtracted
- }
- begin
- if taicpu(hp1).opcode = A_Jcc then
- begin
- DebugMsg(SPeepholeOptimization + 'Cmpcc2Testcc - condition B/C/NAE/O --> Never (jump removed)', hp1);
- TAsmLabel(taicpu(hp1).oper[0]^.ref^.symbol).decrefs;
- RemoveInstruction(hp1);
- { Since hp1 was deleted, hp2 must not be updated }
- Continue;
- end
- else
- begin
- DebugMsg(SPeepholeOptimization + 'Cmpcc2Testcc - condition B/C/NAE/O --> Never (set -> mov 0)', hp1);
- { Convert "set(c) %reg" instruction to "movb 0,%reg" }
- taicpu(hp1).opcode := A_MOV;
- taicpu(hp1).ops := 2;
- taicpu(hp1).condition := C_None;
- taicpu(hp1).opsize := S_B;
- taicpu(hp1).loadreg(1,taicpu(hp1).oper[0]^.reg);
- taicpu(hp1).loadconst(0, 0);
- end;
- end;
- C_BE, C_NA:
- begin
- { Will only branch if equal to zero }
- DebugMsg(SPeepholeOptimization + 'Cmpcc2Testcc - condition BE/NA --> E', hp1);
- taicpu(hp1).condition := C_E;
- end;
- C_A, C_NBE:
- begin
- { Will only branch if not equal to zero }
- DebugMsg(SPeepholeOptimization + 'Cmpcc2Testcc - condition A/NBE --> NE', hp1);
- taicpu(hp1).condition := C_NE;
- end;
- C_AE, C_NB, C_NC, C_NO:
- begin
- { Will always branch }
- DebugMsg(SPeepholeOptimization + 'Cmpcc2Testcc - condition AE/NB/NC/NO --> Always', hp1);
- if taicpu(hp1).opcode = A_Jcc then
- begin
- MakeUnconditional(taicpu(hp1));
- { Any jumps/set that follow will now be dead code }
- RemoveDeadCodeAfterJump(taicpu(hp1));
- Break;
- end
- else
- begin
- { Convert "set(c) %reg" instruction to "movb 1,%reg" }
- taicpu(hp1).opcode := A_MOV;
- taicpu(hp1).ops := 2;
- taicpu(hp1).condition := C_None;
- taicpu(hp1).opsize := S_B;
- taicpu(hp1).loadreg(1,taicpu(hp1).oper[0]^.reg);
- taicpu(hp1).loadconst(0, 1);
- end;
- end;
- C_None:
- InternalError(2020012201);
- C_P, C_PE, C_NP, C_PO:
- { We can't handle parity checks and they should never be generated
- after a general-purpose CMP (it's used in some floating-point
- comparisons that don't use CMP) }
- InternalError(2020012202);
- else
- { Zero/Equality, Sign, their complements and all of the
- signed comparisons do not need to be converted };
- end;
- hp2 := hp1;
- end;
- { Convert the instruction to a TEST }
- taicpu(p).opcode := A_TEST;
- taicpu(p).loadreg(0,taicpu(p).oper[1]^.reg);
- Result := True;
- Exit;
- end
- else if (taicpu(p).oper[0]^.val = 1) and
- MatchInstruction(hp1,A_Jcc,A_SETcc,[]) and
- (taicpu(hp1).condition in [C_L, C_NGE]) then
- begin
- { Convert; To:
- cmp $1,r/m cmp $0,r/m
- jl @lbl jle @lbl
- }
- DebugMsg(SPeepholeOptimization + 'Cmp1Jl2Cmp0Jle', p);
- taicpu(p).oper[0]^.val := 0;
- taicpu(hp1).condition := C_LE;
- { If the instruction is now "cmp $0,%reg", convert it to a
- TEST (and effectively do the work of the "cmp $0,%reg" in
- the block above)
- If it's a reference, we can get away with not setting
- Result to True because he haven't evaluated the jump
- in this pass yet.
- }
- if (taicpu(p).oper[1]^.typ = top_reg) then
- begin
- taicpu(p).opcode := A_TEST;
- taicpu(p).loadreg(0,taicpu(p).oper[1]^.reg);
- Result := True;
- end;
- Exit;
- end
- else if (taicpu(p).oper[1]^.typ = top_reg)
- {$ifdef x86_64}
- and (taicpu(p).opsize <> S_Q) { S_Q will never happen: cmp with 64 bit constants is not possible }
- {$endif x86_64}
- then
- begin
- { cmp register,$8000 neg register
- je target --> jo target
- .... only if register is deallocated before jump.}
- case Taicpu(p).opsize of
- S_B: v:=$80;
- S_W: v:=$8000;
- S_L: v:=qword($80000000);
- else
- internalerror(2013112905);
- end;
- if (taicpu(p).oper[0]^.val=v) and
- MatchInstruction(hp1,A_Jcc,A_SETcc,[]) and
- (Taicpu(hp1).condition in [C_E,C_NE]) then
- begin
- TransferUsedRegs(TmpUsedRegs);
- UpdateUsedRegs(TmpUsedRegs,tai(p.next));
- if not(RegInUsedRegs(Taicpu(p).oper[1]^.reg, TmpUsedRegs)) then
- begin
- DebugMsg(SPeepholeOptimization + 'CmpJe2NegJo done',p);
- Taicpu(p).opcode:=A_NEG;
- Taicpu(p).loadoper(0,Taicpu(p).oper[1]^);
- Taicpu(p).clearop(1);
- Taicpu(p).ops:=1;
- if Taicpu(hp1).condition=C_E then
- Taicpu(hp1).condition:=C_O
- else
- Taicpu(hp1).condition:=C_NO;
- Result:=true;
- exit;
- end;
- end;
- end;
- end;
- if TrySwapMovCmp(p, hp1) then
- begin
- Result := True;
- Exit;
- end;
- end;
- function TX86AsmOptimizer.OptPass1PXor(var p: tai): boolean;
- var
- hp1: tai;
- begin
- {
- remove the second (v)pxor from
- pxor reg,reg
- ...
- pxor reg,reg
- }
- Result:=false;
- if MatchOperand(taicpu(p).oper[0]^,taicpu(p).oper[1]^) and
- MatchOpType(taicpu(p),top_reg,top_reg) and
- GetNextInstructionUsingReg(p,hp1,taicpu(p).oper[0]^.reg) and
- MatchInstruction(hp1,taicpu(p).opcode,[taicpu(p).opsize]) and
- MatchOperand(taicpu(p).oper[0]^,taicpu(hp1).oper[0]^) and
- MatchOperand(taicpu(hp1).oper[0]^,taicpu(hp1).oper[1]^) then
- begin
- DebugMsg(SPeepholeOptimization + 'PXorPXor2PXor done',hp1);
- RemoveInstruction(hp1);
- Result:=true;
- Exit;
- end
- {
- replace
- pxor reg1,reg1
- movapd/s reg1,reg2
- dealloc reg1
- by
- pxor reg2,reg2
- }
- else if GetNextInstruction(p,hp1) and
- { we mix single and double opperations here because we assume that the compiler
- generates vmovapd only after double operations and vmovaps only after single operations }
- MatchInstruction(hp1,A_MOVAPD,A_MOVAPS,[S_NO]) and
- MatchOperand(taicpu(p).oper[0]^,taicpu(p).oper[1]^) and
- MatchOperand(taicpu(p).oper[1]^,taicpu(hp1).oper[0]^) and
- (taicpu(p).oper[0]^.typ=top_reg) then
- begin
- TransferUsedRegs(TmpUsedRegs);
- UpdateUsedRegs(TmpUsedRegs, tai(p.next));
- if not(RegUsedAfterInstruction(taicpu(p).oper[1]^.reg,hp1,TmpUsedRegs)) then
- begin
- taicpu(p).loadoper(0,taicpu(hp1).oper[1]^);
- taicpu(p).loadoper(1,taicpu(hp1).oper[1]^);
- DebugMsg(SPeepholeOptimization + 'PXorMovapd2PXor done',p);
- RemoveInstruction(hp1);
- result:=true;
- end;
- end;
- end;
- function TX86AsmOptimizer.OptPass1VPXor(var p: tai): boolean;
- var
- hp1: tai;
- begin
- {
- remove the second (v)pxor from
- (v)pxor reg,reg
- ...
- (v)pxor reg,reg
- }
- Result:=false;
- if MatchOperand(taicpu(p).oper[0]^,taicpu(p).oper[1]^,taicpu(p).oper[2]^) and
- MatchOpType(taicpu(p),top_reg,top_reg,top_reg) and
- GetNextInstructionUsingReg(p,hp1,taicpu(p).oper[0]^.reg) and
- MatchInstruction(hp1,taicpu(p).opcode,[taicpu(p).opsize]) and
- MatchOperand(taicpu(p).oper[0]^,taicpu(hp1).oper[0]^) and
- MatchOperand(taicpu(hp1).oper[0]^,taicpu(hp1).oper[1]^,taicpu(hp1).oper[2]^) then
- begin
- DebugMsg(SPeepholeOptimization + 'VPXorVPXor2PXor done',hp1);
- RemoveInstruction(hp1);
- Result:=true;
- Exit;
- end
- else
- Result:=OptPass1VOP(p);
- end;
- function TX86AsmOptimizer.OptPass1Imul(var p: tai): boolean;
- var
- hp1 : tai;
- begin
- result:=false;
- { replace
- IMul const,%mreg1,%mreg2
- Mov %reg2,%mreg3
- dealloc %mreg3
- by
- Imul const,%mreg1,%mreg23
- }
- if (taicpu(p).ops=3) and
- GetNextInstruction(p,hp1) and
- MatchInstruction(hp1,A_MOV,[taicpu(p).opsize]) and
- MatchOperand(taicpu(p).oper[2]^,taicpu(hp1).oper[0]^) and
- (taicpu(hp1).oper[1]^.typ=top_reg) then
- begin
- TransferUsedRegs(TmpUsedRegs);
- UpdateUsedRegs(TmpUsedRegs, tai(p.next));
- if not(RegUsedAfterInstruction(taicpu(hp1).oper[0]^.reg,hp1,TmpUsedRegs)) then
- begin
- taicpu(p).loadoper(2,taicpu(hp1).oper[1]^);
- DebugMsg(SPeepholeOptimization + 'ImulMov2Imul done',p);
- RemoveInstruction(hp1);
- result:=true;
- end;
- end;
- end;
- function TX86AsmOptimizer.OptPass1SHXX(var p: tai): boolean;
- var
- hp1 : tai;
- begin
- result:=false;
- { replace
- IMul %reg0,%reg1,%reg2
- Mov %reg2,%reg3
- dealloc %reg2
- by
- Imul %reg0,%reg1,%reg3
- }
- if GetNextInstruction(p,hp1) and
- MatchInstruction(hp1,A_MOV,[taicpu(p).opsize]) and
- MatchOperand(taicpu(p).oper[2]^,taicpu(hp1).oper[0]^) and
- (taicpu(hp1).oper[1]^.typ=top_reg) then
- begin
- TransferUsedRegs(TmpUsedRegs);
- UpdateUsedRegs(TmpUsedRegs, tai(p.next));
- if not(RegUsedAfterInstruction(taicpu(hp1).oper[0]^.reg,hp1,TmpUsedRegs)) then
- begin
- taicpu(p).loadoper(2,taicpu(hp1).oper[1]^);
- DebugMsg(SPeepholeOptimization + 'SHXXMov2SHXX done',p);
- RemoveInstruction(hp1);
- result:=true;
- end;
- end;
- end;
- function TX86AsmOptimizer.OptPass1_V_Cvtss2sd(var p: tai): boolean;
- var
- hp1: tai;
- begin
- Result:=false;
- { get rid of
- (v)cvtss2sd reg0,<reg1,>reg2
- (v)cvtss2sd reg2,<reg2,>reg0
- }
- if GetNextInstruction(p,hp1) and
- (((taicpu(p).opcode=A_CVTSS2SD) and MatchInstruction(hp1,A_CVTSD2SS,[taicpu(p).opsize]) and
- MatchOperand(taicpu(p).oper[0]^,taicpu(hp1).oper[1]^) and MatchOperand(taicpu(p).oper[1]^,taicpu(hp1).oper[0]^)) or
- ((taicpu(p).opcode=A_VCVTSS2SD) and MatchInstruction(hp1,A_VCVTSD2SS,[taicpu(p).opsize]) and
- MatchOpType(taicpu(p),top_reg,top_reg,top_reg) and
- MatchOpType(taicpu(hp1),top_reg,top_reg,top_reg) and
- (getsupreg(taicpu(p).oper[0]^.reg)=getsupreg(taicpu(p).oper[1]^.reg)) and
- (getsupreg(taicpu(hp1).oper[0]^.reg)=getsupreg(taicpu(hp1).oper[1]^.reg)) and
- (getsupreg(taicpu(p).oper[2]^.reg)=getsupreg(taicpu(hp1).oper[0]^.reg))
- )
- ) then
- begin
- if ((taicpu(p).opcode=A_CVTSS2SD) and (getsupreg(taicpu(p).oper[0]^.reg)=getsupreg(taicpu(hp1).oper[1]^.reg))) or
- ((taicpu(p).opcode=A_VCVTSS2SD) and (getsupreg(taicpu(p).oper[0]^.reg)=getsupreg(taicpu(hp1).oper[2]^.reg))) then
- begin
- DebugMsg(SPeepholeOptimization + '(V)Cvtss2CvtSd(V)Cvtsd2ss2Nop done',p);
- RemoveCurrentP(p);
- RemoveInstruction(hp1);
- end
- else
- begin
- DebugMsg(SPeepholeOptimization + '(V)Cvtss2CvtSd(V)Cvtsd2ss2Vmovaps done',p);
- if taicpu(hp1).opcode=A_CVTSD2SS then
- begin
- taicpu(p).loadreg(1,taicpu(hp1).oper[1]^.reg);
- taicpu(p).opcode:=A_MOVAPS;
- end
- else
- begin
- taicpu(p).loadreg(1,taicpu(hp1).oper[2]^.reg);
- taicpu(p).opcode:=A_VMOVAPS;
- end;
- taicpu(p).ops:=2;
- RemoveInstruction(hp1);
- end;
- Result:=true;
- Exit;
- end;
- end;
- function TX86AsmOptimizer.OptPass1Jcc(var p : tai) : boolean;
- var
- hp1, hp2, hp3, hp4, hp5: tai;
- ThisReg: TRegister;
- begin
- Result := False;
- if not GetNextInstruction(p,hp1) or (hp1.typ <> ait_instruction) then
- Exit;
- {
- convert
- j<c> .L1
- mov 1,reg
- jmp .L2
- .L1
- mov 0,reg
- .L2
- into
- mov 0,reg
- set<not(c)> reg
- take care of alignment and that the mov 0,reg is not converted into a xor as this
- would destroy the flag contents
- Use MOVZX if size is preferred, since while mov 0,reg is bigger, it can be
- executed at the same time as a previous comparison.
- set<not(c)> reg
- movzx reg, reg
- }
- if MatchInstruction(hp1,A_MOV,[]) and
- (taicpu(hp1).oper[0]^.typ = top_const) and
- (
- (
- (taicpu(hp1).oper[1]^.typ = top_reg)
- {$ifdef i386}
- { Under i386, ESI, EDI, EBP and ESP
- don't have an 8-bit representation }
- and not (getsupreg(taicpu(hp1).oper[1]^.reg) in [RS_ESI, RS_EDI, RS_EBP, RS_ESP])
- {$endif i386}
- ) or (
- {$ifdef i386}
- (taicpu(hp1).oper[1]^.typ <> top_reg) and
- {$endif i386}
- (taicpu(hp1).opsize = S_B)
- )
- ) and
- GetNextInstruction(hp1,hp2) and
- MatchInstruction(hp2,A_JMP,[]) and (taicpu(hp2).oper[0]^.ref^.refaddr=addr_full) and
- GetNextInstruction(hp2,hp3) and
- SkipAligns(hp3, hp3) and
- (hp3.typ=ait_label) and
- (tasmlabel(taicpu(p).oper[0]^.ref^.symbol)=tai_label(hp3).labsym) and
- GetNextInstruction(hp3,hp4) and
- MatchInstruction(hp4,A_MOV,[taicpu(hp1).opsize]) and
- (taicpu(hp4).oper[0]^.typ = top_const) and
- (
- ((taicpu(hp1).oper[0]^.val = 0) and (taicpu(hp4).oper[0]^.val = 1)) or
- ((taicpu(hp1).oper[0]^.val = 1) and (taicpu(hp4).oper[0]^.val = 0))
- ) and
- MatchOperand(taicpu(hp1).oper[1]^,taicpu(hp4).oper[1]^) and
- GetNextInstruction(hp4,hp5) and
- SkipAligns(hp5, hp5) and
- (hp5.typ=ait_label) and
- (tasmlabel(taicpu(hp2).oper[0]^.ref^.symbol)=tai_label(hp5).labsym) then
- begin
- if (taicpu(hp1).oper[0]^.val = 1) and (taicpu(hp4).oper[0]^.val = 0) then
- taicpu(p).condition := inverse_cond(taicpu(p).condition);
- tai_label(hp3).labsym.DecRefs;
- { If this isn't the only reference to the middle label, we can
- still make a saving - only that the first jump and everything
- that follows will remain. }
- if (tai_label(hp3).labsym.getrefs = 0) then
- begin
- if (taicpu(hp1).oper[0]^.val = 1) and (taicpu(hp4).oper[0]^.val = 0) then
- DebugMsg(SPeepholeOptimization + 'J(c)Mov1JmpMov0 -> Set(~c)',p)
- else
- DebugMsg(SPeepholeOptimization + 'J(c)Mov0JmpMov1 -> Set(c)',p);
- { remove jump, first label and second MOV (also catching any aligns) }
- repeat
- if not GetNextInstruction(hp2, hp3) then
- InternalError(2021040810);
- RemoveInstruction(hp2);
- hp2 := hp3;
- until hp2 = hp5;
- { Don't decrement reference count before the removal loop
- above, otherwise GetNextInstruction won't stop on the
- the label }
- tai_label(hp5).labsym.DecRefs;
- end
- else
- begin
- if (taicpu(hp1).oper[0]^.val = 1) and (taicpu(hp4).oper[0]^.val = 0) then
- DebugMsg(SPeepholeOptimization + 'J(c)Mov1JmpMov0 -> Set(~c) (partial)',p)
- else
- DebugMsg(SPeepholeOptimization + 'J(c)Mov0JmpMov1 -> Set(c) (partial)',p);
- end;
- taicpu(p).opcode:=A_SETcc;
- taicpu(p).opsize:=S_B;
- taicpu(p).is_jmp:=False;
- if taicpu(hp1).opsize=S_B then
- begin
- taicpu(p).loadoper(0, taicpu(hp1).oper[1]^);
- if taicpu(hp1).oper[1]^.typ = top_reg then
- AllocRegBetween(taicpu(hp1).oper[1]^.reg, p, hp2, UsedRegs);
- RemoveInstruction(hp1);
- end
- else
- begin
- { Will be a register because the size can't be S_B otherwise }
- ThisReg := newreg(R_INTREGISTER,getsupreg(taicpu(hp1).oper[1]^.reg), R_SUBL);
- taicpu(p).loadreg(0, ThisReg);
- AllocRegBetween(ThisReg, p, hp2, UsedRegs);
- if (cs_opt_size in current_settings.optimizerswitches) and IsMOVZXAcceptable then
- begin
- case taicpu(hp1).opsize of
- S_W:
- taicpu(hp1).opsize := S_BW;
- S_L:
- taicpu(hp1).opsize := S_BL;
- {$ifdef x86_64}
- S_Q:
- begin
- taicpu(hp1).opsize := S_BL;
- { Change the destination register to 32-bit }
- taicpu(hp1).loadreg(1, newreg(R_INTREGISTER,getsupreg(ThisReg), R_SUBD));
- end;
- {$endif x86_64}
- else
- InternalError(2021040820);
- end;
- taicpu(hp1).opcode := A_MOVZX;
- taicpu(hp1).loadreg(0, ThisReg);
- end
- else
- begin
- AllocRegBetween(NR_FLAGS,p,hp1,UsedRegs);
- { hp1 is already a MOV instruction with the correct register }
- taicpu(hp1).loadconst(0, 0);
- { Inserting it right before p will guarantee that the flags are also tracked }
- asml.Remove(hp1);
- asml.InsertBefore(hp1, p);
- end;
- end;
- Result:=true;
- exit;
- end
- end;
- function TX86AsmOptimizer.OptPass1VMOVDQ(var p: tai): Boolean;
- var
- hp1, hp2, hp3: tai;
- SourceRef, TargetRef: TReference;
- CurrentReg: TRegister;
- begin
- { VMOVDQU/CMOVDQA shouldn't have even been generated }
- if not UseAVX then
- InternalError(2021100501);
- Result := False;
- { Look for the following to simplify:
- vmovdqa/u x(mem1), %xmmreg
- vmovdqa/u %xmmreg, y(mem2)
- vmovdqa/u x+16(mem1), %xmmreg
- vmovdqa/u %xmmreg, y+16(mem2)
- Change to:
- vmovdqa/u x(mem1), %ymmreg
- vmovdqa/u %ymmreg, y(mem2)
- vpxor %ymmreg, %ymmreg, %ymmreg
- ( The VPXOR instruction is to zero the upper half, thus removing the
- need to call the potentially expensive VZEROUPPER instruction. Other
- peephole optimisations can remove VPXOR if it's unnecessary )
- }
- TransferUsedRegs(TmpUsedRegs);
- UpdateUsedRegs(TmpUsedRegs, tai(p.Next));
- { NOTE: In the optimisations below, if the references dictate that an
- aligned move is possible (i.e. VMOVDQA), the existing instructions
- should already be VMOVDQA because if (x mod 32) = 0, then (x mod 16) = 0 }
- if (taicpu(p).opsize = S_XMM) and
- MatchOpType(taicpu(p), top_ref, top_reg) and
- GetNextInstruction(p, hp1) and
- MatchInstruction(hp1, A_VMOVDQA, A_VMOVDQU, [S_XMM]) and
- MatchOpType(taicpu(hp1), top_reg, top_ref) and
- not RegUsedAfterInstruction(taicpu(p).oper[1]^.reg, hp1, TmpUsedRegs) then
- begin
- SourceRef := taicpu(p).oper[0]^.ref^;
- TargetRef := taicpu(hp1).oper[1]^.ref^;
- if GetNextInstruction(hp1, hp2) and
- MatchInstruction(hp2, A_VMOVDQA, A_VMOVDQU, [S_XMM]) and
- MatchOpType(taicpu(hp2), top_ref, top_reg) then
- begin
- { Delay calling GetNextInstruction(hp2, hp3) for as long as possible }
- UpdateUsedRegs(TmpUsedRegs, tai(hp1.Next));
- Inc(SourceRef.offset, 16);
- { Reuse the register in the first block move }
- CurrentReg := newreg(R_MMREGISTER, getsupreg(taicpu(p).oper[1]^.reg), R_SUBMMY);
- if RefsEqual(SourceRef, taicpu(hp2).oper[0]^.ref^) then
- begin
- UpdateUsedRegs(TmpUsedRegs, tai(hp2.Next));
- Inc(TargetRef.offset, 16);
- if GetNextInstruction(hp2, hp3) and
- MatchInstruction(hp3, A_VMOVDQA, A_VMOVDQU, [S_XMM]) and
- MatchOpType(taicpu(hp3), top_reg, top_ref) and
- (taicpu(hp2).oper[1]^.reg = taicpu(hp3).oper[0]^.reg) and
- RefsEqual(TargetRef, taicpu(hp3).oper[1]^.ref^) and
- not RegUsedAfterInstruction(taicpu(hp2).oper[1]^.reg, hp3, TmpUsedRegs) then
- begin
- { Update the register tracking to the new size }
- AllocRegBetween(CurrentReg, p, hp2, UsedRegs);
- { Remember that the offsets are 16 ahead }
- { Switch to unaligned if the memory isn't on a 32-byte boundary }
- if not (
- ((SourceRef.offset mod 32) = 16) and
- (SourceRef.alignment >= 32) and ((SourceRef.alignment mod 32) = 0)
- ) then
- taicpu(p).opcode := A_VMOVDQU;
- taicpu(p).opsize := S_YMM;
- taicpu(p).oper[1]^.reg := CurrentReg;
- if not (
- ((TargetRef.offset mod 32) = 16) and
- (TargetRef.alignment >= 32) and ((TargetRef.alignment mod 32) = 0)
- ) then
- taicpu(hp1).opcode := A_VMOVDQU;
- taicpu(hp1).opsize := S_YMM;
- taicpu(hp1).oper[0]^.reg := CurrentReg;
- DebugMsg(SPeepholeOptimization + 'Used ' + debug_regname(CurrentReg) + ' to merge a pair of memory moves (VmovdqxVmovdqxVmovdqxVmovdqx2VmovdqyVmovdqy 1)', p);
- { If pi_uses_ymm is set, VZEROUPPER is present to do this for us }
- if (pi_uses_ymm in current_procinfo.flags) then
- RemoveInstruction(hp2)
- else
- begin
- taicpu(hp2).opcode := A_VPXOR;
- taicpu(hp2).opsize := S_YMM;
- taicpu(hp2).loadreg(0, CurrentReg);
- taicpu(hp2).loadreg(1, CurrentReg);
- taicpu(hp2).loadreg(2, CurrentReg);
- taicpu(hp2).ops := 3;
- end;
- RemoveInstruction(hp3);
- Result := True;
- Exit;
- end;
- end
- else
- begin
- { See if the next references are 16 less rather than 16 greater }
- Dec(SourceRef.offset, 32); { -16 the other way }
- if RefsEqual(SourceRef, taicpu(hp2).oper[0]^.ref^) then
- begin
- UpdateUsedRegs(TmpUsedRegs, tai(hp2.Next));
- Dec(TargetRef.offset, 16); { Only 16, not 32, as it wasn't incremented unlike SourceRef }
- if GetNextInstruction(hp2, hp3) and
- MatchInstruction(hp3, A_MOV, [taicpu(p).opsize]) and
- MatchOpType(taicpu(hp3), top_reg, top_ref) and
- (taicpu(hp2).oper[1]^.reg = taicpu(hp3).oper[0]^.reg) and
- RefsEqual(TargetRef, taicpu(hp3).oper[1]^.ref^) and
- not RegUsedAfterInstruction(taicpu(hp2).oper[1]^.reg, hp3, TmpUsedRegs) then
- begin
- { Update the register tracking to the new size }
- AllocRegBetween(CurrentReg, hp2, hp3, UsedRegs);
- { hp2 and hp3 are the starting offsets, so mod = 0 this time }
- { Switch to unaligned if the memory isn't on a 32-byte boundary }
- if not(
- ((SourceRef.offset mod 32) = 0) and
- (SourceRef.alignment >= 32) and ((SourceRef.alignment mod 32) = 0)
- ) then
- taicpu(hp2).opcode := A_VMOVDQU;
- taicpu(hp2).opsize := S_YMM;
- taicpu(hp2).oper[1]^.reg := CurrentReg;
- if not (
- ((TargetRef.offset mod 32) = 0) and
- (TargetRef.alignment >= 32) and ((TargetRef.alignment mod 32) = 0)
- ) then
- taicpu(hp3).opcode := A_VMOVDQU;
- taicpu(hp3).opsize := S_YMM;
- taicpu(hp3).oper[0]^.reg := CurrentReg;
- DebugMsg(SPeepholeOptimization + 'Used ' + debug_regname(CurrentReg) + ' to merge a pair of memory moves (VmovdqxVmovdqxVmovdqxVmovdqx2VmovdqyVmovdqy 2)', p);
- { If pi_uses_ymm is set, VZEROUPPER is present to do this for us }
- if (pi_uses_ymm in current_procinfo.flags) then
- RemoveInstruction(hp1)
- else
- begin
- taicpu(hp1).opcode := A_VPXOR;
- taicpu(hp1).opsize := S_YMM;
- taicpu(hp1).loadreg(0, CurrentReg);
- taicpu(hp1).loadreg(1, CurrentReg);
- taicpu(hp1).loadreg(2, CurrentReg);
- taicpu(hp1).ops := 3;
- Asml.Remove(hp1);
- Asml.InsertAfter(hp1, hp3); { Register deallocations will be after hp3 }
- end;
- RemoveCurrentP(p, hp2);
- Result := True;
- Exit;
- end;
- end;
- end;
- end;
- end;
- end;
- function TX86AsmOptimizer.CheckJumpMovTransferOpt(var p: tai; hp1: tai; LoopCount: Integer; out Count: Integer): Boolean;
- var
- hp2, hp3, first_assignment: tai;
- IncCount, OperIdx: Integer;
- OrigLabel: TAsmLabel;
- begin
- Count := 0;
- Result := False;
- first_assignment := nil;
- if (LoopCount >= 20) then
- begin
- { Guard against infinite loops }
- Exit;
- end;
- if (taicpu(p).oper[0]^.typ <> top_ref) or
- (taicpu(p).oper[0]^.ref^.refaddr <> addr_full) or
- (taicpu(p).oper[0]^.ref^.base <> NR_NO) or
- (taicpu(p).oper[0]^.ref^.index <> NR_NO) or
- not (taicpu(p).oper[0]^.ref^.symbol is TAsmLabel) then
- Exit;
- OrigLabel := TAsmLabel(taicpu(p).oper[0]^.ref^.symbol);
- {
- change
- jmp .L1
- ...
- .L1:
- mov ##, ## ( multiple movs possible )
- jmp/ret
- into
- mov ##, ##
- jmp/ret
- }
- if not Assigned(hp1) then
- begin
- hp1 := GetLabelWithSym(OrigLabel);
- if not Assigned(hp1) or not SkipLabels(hp1, hp1) then
- Exit;
- end;
- hp2 := hp1;
- while Assigned(hp2) do
- begin
- if Assigned(hp2) and (hp2.typ in [ait_label, ait_align]) then
- SkipLabels(hp2,hp2);
- if not Assigned(hp2) or (hp2.typ <> ait_instruction) then
- Break;
- case taicpu(hp2).opcode of
- A_MOVSS:
- begin
- if taicpu(hp2).ops = 0 then
- { Wrong MOVSS }
- Break;
- Inc(Count);
- if Count >= 5 then
- { Too many to be worthwhile }
- Break;
- GetNextInstruction(hp2, hp2);
- Continue;
- end;
- A_MOV,
- A_MOVD,
- A_MOVQ,
- A_MOVSX,
- {$ifdef x86_64}
- A_MOVSXD,
- {$endif x86_64}
- A_MOVZX,
- A_MOVAPS,
- A_MOVUPS,
- A_MOVSD,
- A_MOVAPD,
- A_MOVUPD,
- A_MOVDQA,
- A_MOVDQU,
- A_VMOVSS,
- A_VMOVAPS,
- A_VMOVUPS,
- A_VMOVSD,
- A_VMOVAPD,
- A_VMOVUPD,
- A_VMOVDQA,
- A_VMOVDQU:
- begin
- Inc(Count);
- if Count >= 5 then
- { Too many to be worthwhile }
- Break;
- GetNextInstruction(hp2, hp2);
- Continue;
- end;
- A_JMP:
- begin
- { Guard against infinite loops }
- if taicpu(hp2).oper[0]^.ref^.symbol = OrigLabel then
- Exit;
- { Analyse this jump first in case it also duplicates assignments }
- if CheckJumpMovTransferOpt(hp2, nil, LoopCount + 1, IncCount) then
- begin
- { Something did change! }
- Result := True;
- Inc(Count, IncCount);
- if Count >= 5 then
- begin
- { Too many to be worthwhile }
- Exit;
- end;
- if MatchInstruction(hp2, [A_JMP, A_RET], []) then
- Break;
- end;
- Result := True;
- Break;
- end;
- A_RET:
- begin
- Result := True;
- Break;
- end;
- else
- Break;
- end;
- end;
- if Result then
- begin
- { A count of zero can happen when CheckJumpMovTransferOpt is called recursively }
- if Count = 0 then
- begin
- Result := False;
- Exit;
- end;
- hp3 := p;
- DebugMsg(SPeepholeOptimization + 'Duplicated ' + debug_tostr(Count) + ' assignment(s) and redirected jump', p);
- while True do
- begin
- if Assigned(hp1) and (hp1.typ in [ait_label, ait_align]) then
- SkipLabels(hp1,hp1);
- if (hp1.typ <> ait_instruction) then
- InternalError(2021040720);
- case taicpu(hp1).opcode of
- A_JMP:
- begin
- { Change the original jump to the new destination }
- OrigLabel.decrefs;
- taicpu(hp1).oper[0]^.ref^.symbol.increfs;
- taicpu(p).loadref(0, taicpu(hp1).oper[0]^.ref^);
- { Set p to the first duplicated assignment so it can get optimised if needs be }
- if not Assigned(first_assignment) then
- InternalError(2021040810)
- else
- p := first_assignment;
- Exit;
- end;
- A_RET:
- begin
- { Now change the jump into a RET instruction }
- ConvertJumpToRET(p, hp1);
- { Set p to the first duplicated assignment so it can get optimised if needs be }
- if not Assigned(first_assignment) then
- InternalError(2021040811)
- else
- p := first_assignment;
- Exit;
- end;
- else
- begin
- { Duplicate the MOV instruction }
- hp3:=tai(hp1.getcopy);
- if first_assignment = nil then
- first_assignment := hp3;
- asml.InsertBefore(hp3, p);
- { Make sure the compiler knows about any final registers written here }
- for OperIdx := 0 to taicpu(hp3).ops - 1 do
- with taicpu(hp3).oper[OperIdx]^ do
- begin
- case typ of
- top_ref:
- begin
- if (ref^.base <> NR_NO) and
- (getsupreg(ref^.base) <> RS_ESP) and
- (getsupreg(ref^.base) <> RS_EBP)
- {$ifdef x86_64} and (ref^.base <> NR_RIP) {$endif x86_64}
- then
- AllocRegBetween(ref^.base, hp3, tai(p.Next), UsedRegs);
- if (ref^.index <> NR_NO) and
- (getsupreg(ref^.index) <> RS_ESP) and
- (getsupreg(ref^.index) <> RS_EBP)
- {$ifdef x86_64} and (ref^.index <> NR_RIP) {$endif x86_64} and
- (ref^.index <> ref^.base) then
- AllocRegBetween(ref^.index, hp3, tai(p.Next), UsedRegs);
- end;
- top_reg:
- AllocRegBetween(reg, hp3, tai(p.Next), UsedRegs);
- else
- ;
- end;
- end;
- end;
- end;
- if not GetNextInstruction(hp1, hp1) then
- { Should have dropped out earlier }
- InternalError(2021040710);
- end;
- end;
- end;
- function TX86AsmOptimizer.TrySwapMovCmp(var p, hp1: tai): Boolean;
- var
- hp2: tai;
- X: Integer;
- const
- WriteOp: array[0..3] of set of TInsChange = (
- [Ch_Wop1, Ch_RWop1, Ch_Mop1],
- [Ch_Wop2, Ch_RWop2, Ch_Mop2],
- [Ch_Wop3, Ch_RWop3, Ch_Mop3],
- [Ch_Wop4, Ch_RWop4, Ch_Mop4]);
- RegWriteFlags: array[0..7] of set of TInsChange = (
- { The order is important: EAX, ECX, EDX, EBX, ESI, EDI, EBP, ESP }
- [Ch_WEAX, Ch_RWEAX, Ch_MEAX{$ifdef x86_64}, Ch_WRAX, Ch_RWRAX, Ch_MRAX{$endif x86_64}],
- [Ch_WECX, Ch_RWECX, Ch_MECX{$ifdef x86_64}, Ch_WRCX, Ch_RWRCX, Ch_MRCX{$endif x86_64}],
- [Ch_WEDX, Ch_RWEDX, Ch_MEDX{$ifdef x86_64}, Ch_WRDX, Ch_RWRDX, Ch_MRDX{$endif x86_64}],
- [Ch_WEBX, Ch_RWEBX, Ch_MEBX{$ifdef x86_64}, Ch_WRBX, Ch_RWRBX, Ch_MRBX{$endif x86_64}],
- [Ch_WESI, Ch_RWESI, Ch_MESI{$ifdef x86_64}, Ch_WRSI, Ch_RWRSI, Ch_MRSI{$endif x86_64}],
- [Ch_WEDI, Ch_RWEDI, Ch_MEDI{$ifdef x86_64}, Ch_WRDI, Ch_RWRDI, Ch_MRDI{$endif x86_64}],
- [Ch_WEBP, Ch_RWEBP, Ch_MEBP{$ifdef x86_64}, Ch_WRBP, Ch_RWRBP, Ch_MRBP{$endif x86_64}],
- [Ch_WESP, Ch_RWESP, Ch_MESP{$ifdef x86_64}, Ch_WRSP, Ch_RWRSP, Ch_MRSP{$endif x86_64}]);
- begin
- { If we have something like:
- cmp ###,%reg1
- mov 0,%reg2
- And no modified registers are shared, move the instruction to before
- the comparison as this means it can be optimised without worrying
- about the FLAGS register. (CMP/MOV is generated by
- "J(c)Mov1JmpMov0 -> Set(~c)", among other things).
- As long as the second instruction doesn't use the flags or one of the
- registers used by CMP or TEST (also check any references that use the
- registers), then it can be moved prior to the comparison.
- }
- Result := False;
- if (hp1.typ <> ait_instruction) or
- taicpu(hp1).is_jmp or
- RegInInstruction(NR_DEFAULTFLAGS, hp1) then
- Exit;
- { NOP is a pipeline fence, likely marking the beginning of the function
- epilogue, so drop out. Similarly, drop out if POP or RET are
- encountered }
- if MatchInstruction(hp1, A_NOP, A_POP, []) then
- Exit;
- if (taicpu(hp1).opcode = A_MOVSS) and
- (taicpu(hp1).ops = 0) then
- { Wrong MOVSS }
- Exit;
- { Check for writes to specific registers first }
- { EAX, ECX, EDX, EBX, ESI, EDI, EBP, ESP in that order }
- for X := 0 to 7 do
- if (RegWriteFlags[X] * InsProp[taicpu(hp1).opcode].Ch <> [])
- and RegInInstruction(newreg(R_INTREGISTER, TSuperRegister(X), R_SUBWHOLE), p) then
- Exit;
- for X := 0 to taicpu(hp1).ops - 1 do
- begin
- { Check to see if this operand writes to something }
- if ((WriteOp[X] * InsProp[taicpu(hp1).opcode].Ch) <> []) and
- { And matches something in the CMP/TEST instruction }
- (
- MatchOperand(taicpu(hp1).oper[X]^, taicpu(p).oper[0]^) or
- MatchOperand(taicpu(hp1).oper[X]^, taicpu(p).oper[1]^) or
- (
- { If it's a register, make sure the register written to doesn't
- appear in the cmp instruction as part of a reference }
- (taicpu(hp1).oper[X]^.typ = top_reg) and
- RegInInstruction(taicpu(hp1).oper[X]^.reg, p)
- )
- ) then
- Exit;
- end;
- { The instruction can be safely moved }
- asml.Remove(hp1);
- { Try to insert after the last instructions where the FLAGS register is not yet in use }
- if not GetLastInstruction(p, hp2) then
- asml.InsertBefore(hp1, p)
- else
- asml.InsertAfter(hp1, hp2);
- DebugMsg(SPeepholeOptimization + 'Swapped ' + debug_op2str(taicpu(p).opcode) + ' and ' + debug_op2str(taicpu(hp1).opcode) + ' instructions to improve optimisation potential', hp1);
- for X := 0 to taicpu(hp1).ops - 1 do
- case taicpu(hp1).oper[X]^.typ of
- top_reg:
- AllocRegBetween(taicpu(hp1).oper[X]^.reg, hp1, p, UsedRegs);
- top_ref:
- begin
- if taicpu(hp1).oper[X]^.ref^.base <> NR_NO then
- AllocRegBetween(taicpu(hp1).oper[X]^.ref^.base, hp1, p, UsedRegs);
- if taicpu(hp1).oper[X]^.ref^.index <> NR_NO then
- AllocRegBetween(taicpu(hp1).oper[X]^.ref^.index, hp1, p, UsedRegs);
- end;
- else
- ;
- end;
- if taicpu(hp1).opcode = A_LEA then
- { The flags will be overwritten by the CMP/TEST instruction }
- ConvertLEA(taicpu(hp1));
- Result := True;
- end;
- function TX86AsmOptimizer.OptPass2MOV(var p : tai) : boolean;
- function IsXCHGAcceptable: Boolean; inline;
- begin
- { Always accept if optimising for size }
- Result := (cs_opt_size in current_settings.optimizerswitches) or
- (
- {$ifdef x86_64}
- { XCHG takes 3 cycles on AMD Athlon64 }
- (current_settings.optimizecputype >= cpu_core_i)
- {$else x86_64}
- { From the Pentium M onwards, XCHG only has a latency of 2 rather
- than 3, so it becomes a saving compared to three MOVs with two of
- them able to execute simultaneously. [Kit] }
- (current_settings.optimizecputype >= cpu_PentiumM)
- {$endif x86_64}
- );
- end;
- var
- NewRef: TReference;
- hp1, hp2, hp3, hp4: Tai;
- {$ifndef x86_64}
- OperIdx: Integer;
- {$endif x86_64}
- NewInstr : Taicpu;
- NewAligh : Tai_align;
- DestLabel: TAsmLabel;
- function TryMovArith2Lea(InputInstr: tai): Boolean;
- var
- NextInstr: tai;
- begin
- Result := False;
- UpdateUsedRegs(TmpUsedRegs, tai(InputInstr.Next));
- if not GetNextInstruction(InputInstr, NextInstr) or
- (
- { The FLAGS register isn't always tracked properly, so do not
- perform this optimisation if a conditional statement follows }
- not RegReadByInstruction(NR_DEFAULTFLAGS, NextInstr) and
- not RegUsedAfterInstruction(NR_DEFAULTFLAGS, NextInstr, TmpUsedRegs)
- ) then
- begin
- reference_reset(NewRef, 1, []);
- NewRef.base := taicpu(p).oper[0]^.reg;
- NewRef.scalefactor := 1;
- if taicpu(InputInstr).opcode = A_ADD then
- begin
- DebugMsg(SPeepholeOptimization + 'MovAdd2Lea', p);
- NewRef.offset := taicpu(InputInstr).oper[0]^.val;
- end
- else
- begin
- DebugMsg(SPeepholeOptimization + 'MovSub2Lea', p);
- NewRef.offset := -taicpu(InputInstr).oper[0]^.val;
- end;
- taicpu(p).opcode := A_LEA;
- taicpu(p).loadref(0, NewRef);
- RemoveInstruction(InputInstr);
- Result := True;
- end;
- end;
- begin
- Result:=false;
- { This optimisation adds an instruction, so only do it for speed }
- if not (cs_opt_size in current_settings.optimizerswitches) and
- MatchOpType(taicpu(p), top_const, top_reg) and
- (taicpu(p).oper[0]^.val = 0) then
- begin
- { To avoid compiler warning }
- DestLabel := nil;
- if (p.typ <> ait_instruction) or (taicpu(p).oper[1]^.typ <> top_reg) then
- InternalError(2021040750);
- if not GetNextInstructionUsingReg(p, hp1, taicpu(p).oper[1]^.reg) then
- Exit;
- case hp1.typ of
- ait_label:
- begin
- { Change:
- mov $0,%reg mov $0,%reg
- @Lbl1: @Lbl1:
- test %reg,%reg / cmp $0,%reg test %reg,%reg / mov $0,%reg
- je @Lbl2 jne @Lbl2
- To: To:
- mov $0,%reg mov $0,%reg
- jmp @Lbl2 jmp @Lbl3
- (align) (align)
- @Lbl1: @Lbl1:
- test %reg,%reg / cmp $0,%reg test %reg,%reg / cmp $0,%reg
- je @Lbl2 je @Lbl2
- @Lbl3: <-- Only if label exists
- (Not if it's optimised for size)
- }
- if not GetNextInstruction(hp1, hp2) then
- Exit;
- if not (cs_opt_size in current_settings.optimizerswitches) and
- (hp2.typ = ait_instruction) and
- (
- { Register sizes must exactly match }
- (
- (taicpu(hp2).opcode = A_CMP) and
- MatchOperand(taicpu(hp2).oper[0]^, 0) and
- MatchOperand(taicpu(hp2).oper[1]^, taicpu(p).oper[1]^.reg)
- ) or (
- (taicpu(hp2).opcode = A_TEST) and
- MatchOperand(taicpu(hp2).oper[0]^, taicpu(p).oper[1]^.reg) and
- MatchOperand(taicpu(hp2).oper[1]^, taicpu(p).oper[1]^.reg)
- )
- ) and GetNextInstruction(hp2, hp3) and
- (hp3.typ = ait_instruction) and
- (taicpu(hp3).opcode = A_JCC) and
- (taicpu(hp3).oper[0]^.typ=top_ref) and (taicpu(hp3).oper[0]^.ref^.refaddr=addr_full) and (taicpu(hp3).oper[0]^.ref^.base=NR_NO) and
- (taicpu(hp3).oper[0]^.ref^.index=NR_NO) and (taicpu(hp3).oper[0]^.ref^.symbol is tasmlabel) then
- begin
- { Check condition of jump }
- { Always true? }
- if condition_in(C_E, taicpu(hp3).condition) then
- begin
- { Copy label symbol and obtain matching label entry for the
- conditional jump, as this will be our destination}
- DestLabel := tasmlabel(taicpu(hp3).oper[0]^.ref^.symbol);
- DebugMsg(SPeepholeOptimization + 'Mov0LblCmp0Je -> Mov0JmpLblCmp0Je', p);
- Result := True;
- end
- { Always false? }
- else if condition_in(C_NE, taicpu(hp3).condition) and GetNextInstruction(hp3, hp2) then
- begin
- { This is only worth it if there's a jump to take }
- case hp2.typ of
- ait_instruction:
- begin
- if taicpu(hp2).opcode = A_JMP then
- begin
- DestLabel := tasmlabel(taicpu(hp2).oper[0]^.ref^.symbol);
- { An unconditional jump follows the conditional jump which will always be false,
- so use this jump's destination for the new jump }
- DebugMsg(SPeepholeOptimization + 'Mov0LblCmp0Jne -> Mov0JmpLblCmp0Jne (with JMP)', p);
- Result := True;
- end
- else if taicpu(hp2).opcode = A_JCC then
- begin
- DestLabel := tasmlabel(taicpu(hp2).oper[0]^.ref^.symbol);
- if condition_in(C_E, taicpu(hp2).condition) then
- begin
- { A second conditional jump follows the conditional jump which will always be false,
- while the second jump is always True, so use this jump's destination for the new jump }
- DebugMsg(SPeepholeOptimization + 'Mov0LblCmp0Jne -> Mov0JmpLblCmp0Jne (with second Jcc)', p);
- Result := True;
- end;
- { Don't risk it if the jump isn't always true (Result remains False) }
- end;
- end;
- else
- { If anything else don't optimise };
- end;
- end;
- if Result then
- begin
- { Just so we have something to insert as a paremeter}
- reference_reset(NewRef, 1, []);
- NewInstr := taicpu.op_ref(A_JMP, S_NO, NewRef);
- { Now actually load the correct parameter }
- NewInstr.loadsymbol(0, DestLabel, 0);
- { Get instruction before original label (may not be p under -O3) }
- if not GetLastInstruction(hp1, hp2) then
- { Shouldn't fail here }
- InternalError(2021040701);
- DestLabel.increfs;
- AsmL.InsertAfter(NewInstr, hp2);
- { Add new alignment field }
- (* AsmL.InsertAfter(
- cai_align.create_max(
- current_settings.alignment.jumpalign,
- current_settings.alignment.jumpalignskipmax
- ),
- NewInstr
- ); *)
- end;
- Exit;
- end;
- end;
- else
- ;
- end;
- end;
- if not GetNextInstruction(p, hp1) then
- Exit;
- if MatchInstruction(hp1, A_CMP, A_TEST, [taicpu(p).opsize])
- and DoMovCmpMemOpt(p, hp1, True) then
- begin
- Result := True;
- Exit;
- end
- else if MatchInstruction(hp1, A_JMP, [S_NO]) then
- begin
- { Sometimes the MOVs that OptPass2JMP produces can be improved
- further, but we can't just put this jump optimisation in pass 1
- because it tends to perform worse when conditional jumps are
- nearby (e.g. when converting CMOV instructions). [Kit] }
- if OptPass2JMP(hp1) then
- { call OptPass1MOV once to potentially merge any MOVs that were created }
- Result := OptPass1MOV(p)
- { OptPass2MOV will now exit but will be called again if OptPass1MOV
- returned True and the instruction is still a MOV, thus checking
- the optimisations below }
- { If OptPass2JMP returned False, no optimisations were done to
- the jump and there are no further optimisations that can be done
- to the MOV instruction on this pass }
- end
- else if MatchOpType(taicpu(p),top_reg,top_reg) and
- (taicpu(p).opsize in [S_L{$ifdef x86_64}, S_Q{$endif x86_64}]) and
- MatchInstruction(hp1,A_ADD,A_SUB,[taicpu(p).opsize]) and
- (taicpu(hp1).oper[1]^.typ = top_reg) and
- (taicpu(hp1).oper[1]^.reg = taicpu(p).oper[1]^.reg) then
- begin
- { Change:
- movl/q %reg1,%reg2 movl/q %reg1,%reg2
- addl/q $x,%reg2 subl/q $x,%reg2
- To:
- leal/q x(%reg1),%reg2 leal/q -x(%reg1),%reg2
- }
- if (taicpu(hp1).oper[0]^.typ = top_const) and
- { be lazy, checking separately for sub would be slightly better }
- (abs(taicpu(hp1).oper[0]^.val)<=$7fffffff) then
- begin
- TransferUsedRegs(TmpUsedRegs);
- UpdateUsedRegs(TmpUsedRegs, tai(p.Next));
- if TryMovArith2Lea(hp1) then
- begin
- Result := True;
- Exit;
- end
- end
- else if not RegInOp(taicpu(p).oper[1]^.reg, taicpu(hp1).oper[0]^) and
- GetNextInstructionUsingReg(hp1, hp2, taicpu(p).oper[1]^.reg) and
- { Same as above, but also adds or subtracts to %reg2 in between.
- It's still valid as long as the flags aren't in use }
- MatchInstruction(hp2,A_ADD,A_SUB,[taicpu(p).opsize]) and
- MatchOpType(taicpu(hp2), top_const, top_reg) and
- (taicpu(hp2).oper[1]^.reg = taicpu(p).oper[1]^.reg) and
- { be lazy, checking separately for sub would be slightly better }
- (abs(taicpu(hp2).oper[0]^.val)<=$7fffffff) then
- begin
- TransferUsedRegs(TmpUsedRegs);
- UpdateUsedRegs(TmpUsedRegs, tai(p.Next));
- UpdateUsedRegs(TmpUsedRegs, tai(hp1.Next));
- if TryMovArith2Lea(hp2) then
- begin
- Result := True;
- Exit;
- end;
- end;
- end
- else if MatchOpType(taicpu(p),top_reg,top_reg) and
- {$ifdef x86_64}
- MatchInstruction(hp1,A_MOVZX,A_MOVSX,A_MOVSXD,[]) and
- {$else x86_64}
- MatchInstruction(hp1,A_MOVZX,A_MOVSX,[]) and
- {$endif x86_64}
- MatchOpType(taicpu(hp1),top_reg,top_reg) and
- (taicpu(hp1).oper[0]^.reg = taicpu(p).oper[1]^.reg) then
- { mov reg1, reg2 mov reg1, reg2
- movzx/sx reg2, reg3 to movzx/sx reg1, reg3}
- begin
- taicpu(hp1).oper[0]^.reg := taicpu(p).oper[0]^.reg;
- DebugMsg(SPeepholeOptimization + 'mov %reg1,%reg2; movzx/sx %reg2,%reg3 -> mov %reg1,%reg2;movzx/sx %reg1,%reg3',p);
- { Don't remove the MOV command without first checking that reg2 isn't used afterwards,
- or unless supreg(reg3) = supreg(reg2)). [Kit] }
- TransferUsedRegs(TmpUsedRegs);
- UpdateUsedRegs(TmpUsedRegs, tai(p.next));
- if (getsupreg(taicpu(p).oper[1]^.reg) = getsupreg(taicpu(hp1).oper[1]^.reg)) or
- not RegUsedAfterInstruction(taicpu(p).oper[1]^.reg, hp1, TmpUsedRegs)
- then
- begin
- RemoveCurrentP(p, hp1);
- Result:=true;
- end;
- exit;
- end
- else if MatchOpType(taicpu(p),top_reg,top_reg) and
- IsXCHGAcceptable and
- { XCHG doesn't support 8-byte registers }
- (taicpu(p).opsize <> S_B) and
- MatchInstruction(hp1, A_MOV, []) and
- MatchOpType(taicpu(hp1),top_reg,top_reg) and
- (taicpu(hp1).oper[1]^.reg = taicpu(p).oper[0]^.reg) and
- GetNextInstruction(hp1, hp2) and
- MatchInstruction(hp2, A_MOV, []) and
- { Don't need to call MatchOpType for hp2 because the operand matches below cover for it }
- MatchOperand(taicpu(hp2).oper[0]^, taicpu(p).oper[1]^.reg) and
- MatchOperand(taicpu(hp2).oper[1]^, taicpu(hp1).oper[0]^.reg) then
- begin
- { mov %reg1,%reg2
- mov %reg3,%reg1 -> xchg %reg3,%reg1
- mov %reg2,%reg3
- (%reg2 not used afterwards)
- Note that xchg takes 3 cycles to execute, and generally mov's take
- only one cycle apiece, but the first two mov's can be executed in
- parallel, only taking 2 cycles overall. Older processors should
- therefore only optimise for size. [Kit]
- }
- TransferUsedRegs(TmpUsedRegs);
- UpdateUsedRegs(TmpUsedRegs, tai(p.Next));
- UpdateUsedRegs(TmpUsedRegs, tai(hp1.Next));
- if not RegUsedAfterInstruction(taicpu(p).oper[1]^.reg, hp2, TmpUsedRegs) then
- begin
- DebugMsg(SPeepholeOptimization + 'MovMovMov2XChg', p);
- AllocRegBetween(taicpu(hp2).oper[1]^.reg, p, hp1, UsedRegs);
- taicpu(hp1).opcode := A_XCHG;
- RemoveCurrentP(p, hp1);
- RemoveInstruction(hp2);
- Result := True;
- Exit;
- end;
- end
- else if MatchOpType(taicpu(p),top_reg,top_reg) and
- MatchInstruction(hp1, A_SAR, []) then
- begin
- if MatchOperand(taicpu(hp1).oper[0]^, 31) then
- begin
- { the use of %edx also covers the opsize being S_L }
- if MatchOperand(taicpu(hp1).oper[1]^, NR_EDX) then
- begin
- { Note it has to be specifically "movl %eax,%edx", and those specific sub-registers }
- if (taicpu(p).oper[0]^.reg = NR_EAX) and
- (taicpu(p).oper[1]^.reg = NR_EDX) then
- begin
- { Change:
- movl %eax,%edx
- sarl $31,%edx
- To:
- cltd
- }
- DebugMsg(SPeepholeOptimization + 'MovSar2Cltd', p);
- RemoveInstruction(hp1);
- taicpu(p).opcode := A_CDQ;
- taicpu(p).opsize := S_NO;
- taicpu(p).clearop(1);
- taicpu(p).clearop(0);
- taicpu(p).ops:=0;
- Result := True;
- end
- else if (cs_opt_size in current_settings.optimizerswitches) and
- (taicpu(p).oper[0]^.reg = NR_EDX) and
- (taicpu(p).oper[1]^.reg = NR_EAX) then
- begin
- { Change:
- movl %edx,%eax
- sarl $31,%edx
- To:
- movl %edx,%eax
- cltd
- Note that this creates a dependency between the two instructions,
- so only perform if optimising for size.
- }
- DebugMsg(SPeepholeOptimization + 'MovSar2MovCltd', p);
- taicpu(hp1).opcode := A_CDQ;
- taicpu(hp1).opsize := S_NO;
- taicpu(hp1).clearop(1);
- taicpu(hp1).clearop(0);
- taicpu(hp1).ops:=0;
- end;
- {$ifndef x86_64}
- end
- { Don't bother if CMOV is supported, because a more optimal
- sequence would have been generated for the Abs() intrinsic }
- else if not(CPUX86_HAS_CMOV in cpu_capabilities[current_settings.cputype]) and
- { the use of %eax also covers the opsize being S_L }
- MatchOperand(taicpu(hp1).oper[1]^, NR_EAX) and
- (taicpu(p).oper[0]^.reg = NR_EAX) and
- (taicpu(p).oper[1]^.reg = NR_EDX) and
- GetNextInstruction(hp1, hp2) and
- MatchInstruction(hp2, A_XOR, [S_L]) and
- MatchOperand(taicpu(hp2).oper[0]^, NR_EAX) and
- MatchOperand(taicpu(hp2).oper[1]^, NR_EDX) and
- GetNextInstruction(hp2, hp3) and
- MatchInstruction(hp3, A_SUB, [S_L]) and
- MatchOperand(taicpu(hp3).oper[0]^, NR_EAX) and
- MatchOperand(taicpu(hp3).oper[1]^, NR_EDX) then
- begin
- { Change:
- movl %eax,%edx
- sarl $31,%eax
- xorl %eax,%edx
- subl %eax,%edx
- (Instruction that uses %edx)
- (%eax deallocated)
- (%edx deallocated)
- To:
- cltd
- xorl %edx,%eax <-- Note the registers have swapped
- subl %edx,%eax
- (Instruction that uses %eax) <-- %eax rather than %edx
- }
- TransferUsedRegs(TmpUsedRegs);
- UpdateUsedRegs(TmpUsedRegs, tai(p.Next));
- UpdateUsedRegs(TmpUsedRegs, tai(hp1.Next));
- UpdateUsedRegs(TmpUsedRegs, tai(hp2.Next));
- if not RegUsedAfterInstruction(NR_EAX, hp3, TmpUsedRegs) then
- begin
- if GetNextInstruction(hp3, hp4) and
- not RegModifiedByInstruction(NR_EDX, hp4) and
- not RegUsedAfterInstruction(NR_EDX, hp4, TmpUsedRegs) then
- begin
- DebugMsg(SPeepholeOptimization + 'abs() intrinsic optimisation', p);
- taicpu(p).opcode := A_CDQ;
- taicpu(p).clearop(1);
- taicpu(p).clearop(0);
- taicpu(p).ops:=0;
- RemoveInstruction(hp1);
- taicpu(hp2).loadreg(0, NR_EDX);
- taicpu(hp2).loadreg(1, NR_EAX);
- taicpu(hp3).loadreg(0, NR_EDX);
- taicpu(hp3).loadreg(1, NR_EAX);
- AllocRegBetween(NR_EAX, hp3, hp4, TmpUsedRegs);
- { Convert references in the following instruction (hp4) from %edx to %eax }
- for OperIdx := 0 to taicpu(hp4).ops - 1 do
- with taicpu(hp4).oper[OperIdx]^ do
- case typ of
- top_reg:
- if getsupreg(reg) = RS_EDX then
- reg := newreg(R_INTREGISTER,RS_EAX,getsubreg(reg));
- top_ref:
- begin
- if getsupreg(reg) = RS_EDX then
- ref^.base := newreg(R_INTREGISTER,RS_EAX,getsubreg(reg));
- if getsupreg(reg) = RS_EDX then
- ref^.index := newreg(R_INTREGISTER,RS_EAX,getsubreg(reg));
- end;
- else
- ;
- end;
- end;
- end;
- {$else x86_64}
- end;
- end
- else if MatchOperand(taicpu(hp1).oper[0]^, 63) and
- { the use of %rdx also covers the opsize being S_Q }
- MatchOperand(taicpu(hp1).oper[1]^, NR_RDX) then
- begin
- { Note it has to be specifically "movq %rax,%rdx", and those specific sub-registers }
- if (taicpu(p).oper[0]^.reg = NR_RAX) and
- (taicpu(p).oper[1]^.reg = NR_RDX) then
- begin
- { Change:
- movq %rax,%rdx
- sarq $63,%rdx
- To:
- cqto
- }
- DebugMsg(SPeepholeOptimization + 'MovSar2Cqto', p);
- RemoveInstruction(hp1);
- taicpu(p).opcode := A_CQO;
- taicpu(p).opsize := S_NO;
- taicpu(p).clearop(1);
- taicpu(p).clearop(0);
- taicpu(p).ops:=0;
- Result := True;
- end
- else if (cs_opt_size in current_settings.optimizerswitches) and
- (taicpu(p).oper[0]^.reg = NR_RDX) and
- (taicpu(p).oper[1]^.reg = NR_RAX) then
- begin
- { Change:
- movq %rdx,%rax
- sarq $63,%rdx
- To:
- movq %rdx,%rax
- cqto
- Note that this creates a dependency between the two instructions,
- so only perform if optimising for size.
- }
- DebugMsg(SPeepholeOptimization + 'MovSar2MovCqto', p);
- taicpu(hp1).opcode := A_CQO;
- taicpu(hp1).opsize := S_NO;
- taicpu(hp1).clearop(1);
- taicpu(hp1).clearop(0);
- taicpu(hp1).ops:=0;
- {$endif x86_64}
- end;
- end;
- end
- else if MatchInstruction(hp1, A_MOV, []) and
- (taicpu(hp1).oper[1]^.typ = top_reg) then
- { Though "GetNextInstruction" could be factored out, along with
- the instructions that depend on hp2, it is an expensive call that
- should be delayed for as long as possible, hence we do cheaper
- checks first that are likely to be False. [Kit] }
- begin
- if (
- (
- MatchOperand(taicpu(p).oper[1]^, NR_EDX) and
- (taicpu(hp1).oper[1]^.reg = NR_EAX) and
- (
- MatchOperand(taicpu(hp1).oper[0]^, taicpu(p).oper[0]^) or
- MatchOperand(taicpu(hp1).oper[0]^, NR_EDX)
- )
- ) or
- (
- MatchOperand(taicpu(p).oper[1]^, NR_EAX) and
- (taicpu(hp1).oper[1]^.reg = NR_EDX) and
- (
- MatchOperand(taicpu(hp1).oper[0]^, taicpu(p).oper[0]^) or
- MatchOperand(taicpu(hp1).oper[0]^, NR_EAX)
- )
- )
- ) and
- GetNextInstruction(hp1, hp2) and
- MatchInstruction(hp2, A_SAR, []) and
- MatchOperand(taicpu(hp2).oper[0]^, 31) then
- begin
- if MatchOperand(taicpu(hp2).oper[1]^, NR_EDX) then
- begin
- { Change:
- movl r/m,%edx movl r/m,%eax movl r/m,%edx movl r/m,%eax
- movl %edx,%eax or movl %eax,%edx or movl r/m,%eax or movl r/m,%edx
- sarl $31,%edx sarl $31,%edx sarl $31,%edx sarl $31,%edx
- To:
- movl r/m,%eax <- Note the change in register
- cltd
- }
- DebugMsg(SPeepholeOptimization + 'MovMovSar2MovCltd', p);
- AllocRegBetween(NR_EAX, p, hp1, UsedRegs);
- taicpu(p).loadreg(1, NR_EAX);
- taicpu(hp1).opcode := A_CDQ;
- taicpu(hp1).clearop(1);
- taicpu(hp1).clearop(0);
- taicpu(hp1).ops:=0;
- RemoveInstruction(hp2);
- (*
- {$ifdef x86_64}
- end
- else if MatchOperand(taicpu(hp2).oper[1]^, NR_RDX) and
- { This code sequence does not get generated - however it might become useful
- if and when 128-bit signed integer types make an appearance, so the code
- is kept here for when it is eventually needed. [Kit] }
- (
- (
- (taicpu(hp1).oper[1]^.reg = NR_RAX) and
- (
- MatchOperand(taicpu(hp1).oper[0]^, taicpu(p).oper[0]^) or
- MatchOperand(taicpu(hp1).oper[0]^, NR_RDX)
- )
- ) or
- (
- (taicpu(hp1).oper[1]^.reg = NR_RDX) and
- (
- MatchOperand(taicpu(hp1).oper[0]^, taicpu(p).oper[0]^) or
- MatchOperand(taicpu(hp1).oper[0]^, NR_RAX)
- )
- )
- ) and
- GetNextInstruction(hp1, hp2) and
- MatchInstruction(hp2, A_SAR, [S_Q]) and
- MatchOperand(taicpu(hp2).oper[0]^, 63) and
- MatchOperand(taicpu(hp2).oper[1]^, NR_RDX) then
- begin
- { Change:
- movq r/m,%rdx movq r/m,%rax movq r/m,%rdx movq r/m,%rax
- movq %rdx,%rax or movq %rax,%rdx or movq r/m,%rax or movq r/m,%rdx
- sarq $63,%rdx sarq $63,%rdx sarq $63,%rdx sarq $63,%rdx
- To:
- movq r/m,%rax <- Note the change in register
- cqto
- }
- DebugMsg(SPeepholeOptimization + 'MovMovSar2MovCqto', p);
- AllocRegBetween(NR_RAX, p, hp1, UsedRegs);
- taicpu(p).loadreg(1, NR_RAX);
- taicpu(hp1).opcode := A_CQO;
- taicpu(hp1).clearop(1);
- taicpu(hp1).clearop(0);
- taicpu(hp1).ops:=0;
- RemoveInstruction(hp2);
- {$endif x86_64}
- *)
- end;
- end;
- {$ifdef x86_64}
- end
- else if (taicpu(p).opsize = S_L) and
- (taicpu(p).oper[1]^.typ = top_reg) and
- (
- MatchInstruction(hp1, A_MOV,[]) and
- (taicpu(hp1).opsize = S_L) and
- (taicpu(hp1).oper[1]^.typ = top_reg)
- ) and (
- GetNextInstruction(hp1, hp2) and
- (tai(hp2).typ=ait_instruction) and
- (taicpu(hp2).opsize = S_Q) and
- (
- (
- MatchInstruction(hp2, A_ADD,[]) and
- (taicpu(hp2).opsize = S_Q) and
- (taicpu(hp2).oper[0]^.typ = top_reg) and (taicpu(hp2).oper[1]^.typ = top_reg) and
- (
- (
- (getsupreg(taicpu(hp2).oper[0]^.reg) = getsupreg(taicpu(p).oper[1]^.reg)) and
- (getsupreg(taicpu(hp2).oper[1]^.reg) = getsupreg(taicpu(hp1).oper[1]^.reg))
- ) or (
- (getsupreg(taicpu(hp2).oper[0]^.reg) = getsupreg(taicpu(hp1).oper[1]^.reg)) and
- (getsupreg(taicpu(hp2).oper[1]^.reg) = getsupreg(taicpu(p).oper[1]^.reg))
- )
- )
- ) or (
- MatchInstruction(hp2, A_LEA,[]) and
- (taicpu(hp2).oper[0]^.ref^.offset = 0) and
- (taicpu(hp2).oper[0]^.ref^.scalefactor <= 1) and
- (
- (
- (getsupreg(taicpu(hp2).oper[0]^.ref^.base) = getsupreg(taicpu(p).oper[1]^.reg)) and
- (getsupreg(taicpu(hp2).oper[0]^.ref^.index) = getsupreg(taicpu(hp1).oper[1]^.reg))
- ) or (
- (getsupreg(taicpu(hp2).oper[0]^.ref^.base) = getsupreg(taicpu(hp1).oper[1]^.reg)) and
- (getsupreg(taicpu(hp2).oper[0]^.ref^.index) = getsupreg(taicpu(p).oper[1]^.reg))
- )
- ) and (
- (
- (getsupreg(taicpu(hp2).oper[1]^.reg) = getsupreg(taicpu(hp1).oper[1]^.reg))
- ) or (
- (getsupreg(taicpu(hp2).oper[1]^.reg) = getsupreg(taicpu(p).oper[1]^.reg))
- )
- )
- )
- )
- ) and (
- GetNextInstruction(hp2, hp3) and
- MatchInstruction(hp3, A_SHR,[]) and
- (taicpu(hp3).opsize = S_Q) and
- (taicpu(hp3).oper[0]^.typ = top_const) and (taicpu(hp2).oper[1]^.typ = top_reg) and
- (taicpu(hp3).oper[0]^.val = 1) and
- (taicpu(hp3).oper[1]^.reg = taicpu(hp2).oper[1]^.reg)
- ) then
- begin
- { Change movl x, reg1d movl x, reg1d
- movl y, reg2d movl y, reg2d
- addq reg2q,reg1q or leaq (reg1q,reg2q),reg1q
- shrq $1, reg1q shrq $1, reg1q
- ( reg1d and reg2d can be switched around in the first two instructions )
- To movl x, reg1d
- addl y, reg1d
- rcrl $1, reg1d
- This corresponds to the common expression (x + y) shr 1, where
- x and y are Cardinals (replacing "shr 1" with "div 2" produces
- smaller code, but won't account for x + y causing an overflow). [Kit]
- }
- if (getsupreg(taicpu(hp2).oper[1]^.reg) = getsupreg(taicpu(hp1).oper[1]^.reg)) then
- { Change first MOV command to have the same register as the final output }
- taicpu(p).oper[1]^.reg := taicpu(hp1).oper[1]^.reg
- else
- taicpu(hp1).oper[1]^.reg := taicpu(p).oper[1]^.reg;
- { Change second MOV command to an ADD command. This is easier than
- converting the existing command because it means we don't have to
- touch 'y', which might be a complicated reference, and also the
- fact that the third command might either be ADD or LEA. [Kit] }
- taicpu(hp1).opcode := A_ADD;
- { Delete old ADD/LEA instruction }
- RemoveInstruction(hp2);
- { Convert "shrq $1, reg1q" to "rcr $1, reg1d" }
- taicpu(hp3).opcode := A_RCR;
- taicpu(hp3).changeopsize(S_L);
- setsubreg(taicpu(hp3).oper[1]^.reg, R_SUBD);
- {$endif x86_64}
- end;
- end;
- {$push}
- {$q-}{$r-}
- function TX86AsmOptimizer.OptPass2Movx(var p : tai) : boolean;
- var
- ThisReg: TRegister;
- MinSize, MaxSize, TryShiftDown, TargetSize: TOpSize;
- TargetSubReg: TSubRegister;
- hp1, hp2: tai;
- RegInUse, RegChanged, p_removed: Boolean;
- { Store list of found instructions so we don't have to call
- GetNextInstructionUsingReg multiple times }
- InstrList: array of taicpu;
- InstrMax, Index: Integer;
- UpperLimit, SignedUpperLimit, SignedUpperLimitBottom,
- LowerLimit, SignedLowerLimit, SignedLowerLimitBottom,
- TryShiftDownLimit, TryShiftDownSignedLimit, TryShiftDownSignedLimitLower,
- WorkingValue: TCgInt;
- PreMessage: string;
- { Data flow analysis }
- TestValMin, TestValMax, TestValSignedMax: TCgInt;
- BitwiseOnly, OrXorUsed,
- ShiftDownOverflow, UpperSignedOverflow, UpperUnsignedOverflow, LowerSignedOverflow, LowerUnsignedOverflow: Boolean;
- function CheckOverflowConditions: Boolean;
- begin
- Result := True;
- if (TestValSignedMax > SignedUpperLimit) then
- UpperSignedOverflow := True;
- if (TestValSignedMax > SignedLowerLimit) or (TestValSignedMax < SignedLowerLimitBottom) then
- LowerSignedOverflow := True;
- if (TestValMin > LowerLimit) or (TestValMax > LowerLimit) then
- LowerUnsignedOverflow := True;
- if (TestValMin > UpperLimit) or (TestValMax > UpperLimit) or (TestValSignedMax > UpperLimit) or
- (TestValMin < SignedUpperLimitBottom) or (TestValMax < SignedUpperLimitBottom) or (TestValSignedMax < SignedUpperLimitBottom) then
- begin
- { Absolute overflow }
- Result := False;
- Exit;
- end;
- if not ShiftDownOverflow and (TryShiftDown <> S_NO) and
- ((TestValMin > TryShiftDownLimit) or (TestValMax > TryShiftDownLimit)) then
- ShiftDownOverflow := True;
- if (TestValMin < 0) or (TestValMax < 0) then
- begin
- LowerUnsignedOverflow := True;
- UpperUnsignedOverflow := True;
- end;
- end;
- procedure AdjustFinalLoad;
- begin
- if ((TargetSize = S_L) and (taicpu(hp1).opsize in [S_L, S_BL, S_WL])) or
- ((TargetSize = S_W) and (taicpu(hp1).opsize in [S_W, S_BW])) then
- begin
- { Convert the output MOVZX to a MOV }
- if SuperRegistersEqual(taicpu(hp1).oper[1]^.reg, ThisReg) then
- begin
- { Or remove it completely! }
- DebugMsg(SPeepholeOptimization + 'Movzx2Nop 2', hp1);
- { Be careful; if p = hp1 and p was also removed, p
- will become a dangling pointer }
- if p = hp1 then
- begin
- RemoveCurrentp(p); { p = hp1 and will then become the next instruction }
- p_removed := True;
- end
- else
- RemoveInstruction(hp1);
- end
- else
- begin
- DebugMsg(SPeepholeOptimization + 'Movzx2Mov 2', hp1);
- taicpu(hp1).opcode := A_MOV;
- taicpu(hp1).oper[0]^.reg := ThisReg;
- taicpu(hp1).opsize := TargetSize;
- end;
- end
- else if (TargetSize = S_B) and (MaxSize = S_W) and (taicpu(hp1).opsize = S_WL) then
- begin
- { Need to change the size of the output }
- DebugMsg(SPeepholeOptimization + 'movzwl2movzbl 2', hp1);
- taicpu(hp1).oper[0]^.reg := ThisReg;
- taicpu(hp1).opsize := S_BL;
- end;
- end;
- function CompressInstructions: Boolean;
- var
- LocalIndex: Integer;
- begin
- Result := False;
- { The objective here is to try to find a combination that
- removes one of the MOV/Z instructions. }
- if (
- (taicpu(p).oper[0]^.typ <> top_reg) or
- not SuperRegistersEqual(taicpu(p).oper[0]^.reg, ThisReg)
- ) and
- (taicpu(hp1).oper[1]^.typ = top_reg) and
- SuperRegistersEqual(taicpu(hp1).oper[1]^.reg, ThisReg) then
- begin
- { Make a preference to remove the second MOVZX instruction }
- case taicpu(hp1).opsize of
- S_BL, S_WL:
- begin
- TargetSize := S_L;
- TargetSubReg := R_SUBD;
- end;
- S_BW:
- begin
- TargetSize := S_W;
- TargetSubReg := R_SUBW;
- end;
- else
- InternalError(2020112302);
- end;
- end
- else
- begin
- if LowerUnsignedOverflow and not UpperUnsignedOverflow then
- begin
- { Exceeded lower bound but not upper bound }
- TargetSize := MaxSize;
- end
- else if not LowerUnsignedOverflow then
- begin
- { Size didn't exceed lower bound }
- TargetSize := MinSize;
- end
- else
- Exit;
- end;
- case TargetSize of
- S_B:
- TargetSubReg := R_SUBL;
- S_W:
- TargetSubReg := R_SUBW;
- S_L:
- TargetSubReg := R_SUBD;
- else
- InternalError(2020112350);
- end;
- { Update the register to its new size }
- setsubreg(ThisReg, TargetSubReg);
- if not SuperRegistersEqual(taicpu(hp1).oper[1]^.reg, ThisReg) then
- begin
- { Check to see if the active register is used afterwards;
- if not, we can change it and make a saving. }
- RegInUse := False;
- TransferUsedRegs(TmpUsedRegs);
- { The target register may be marked as in use to cross
- a jump to a distant label, so exclude it }
- ExcludeRegFromUsedRegs(taicpu(hp1).oper[1]^.reg, TmpUsedRegs);
- hp2 := p;
- repeat
- { Explicitly check for the excluded register (don't include the first
- instruction as it may be reading from here }
- if ((p <> hp2) and (RegInInstruction(taicpu(hp1).oper[1]^.reg, hp2))) or
- RegInUsedRegs(taicpu(hp1).oper[1]^.reg, TmpUsedRegs) then
- begin
- RegInUse := True;
- Break;
- end;
- UpdateUsedRegs(TmpUsedRegs, tai(hp2.next));
- if not GetNextInstruction(hp2, hp2) then
- InternalError(2020112340);
- until (hp2 = hp1);
- if not RegInUse and RegUsedAfterInstruction(ThisReg, hp1, TmpUsedRegs) then
- { We might still be able to get away with this }
- RegInUse := not
- (
- GetNextInstructionUsingReg(hp1, hp2, ThisReg) and
- (hp2.typ = ait_instruction) and
- (
- { Under -O1 and -O2, GetNextInstructionUsingReg may return an
- instruction that doesn't actually contain ThisReg }
- (cs_opt_level3 in current_settings.optimizerswitches) or
- RegInInstruction(ThisReg, hp2)
- ) and
- RegLoadedWithNewValue(ThisReg, hp2)
- );
- if not RegInUse then
- begin
- { Force the register size to the same as this instruction so it can be removed}
- if (taicpu(hp1).opsize in [S_L, S_BL, S_WL]) then
- begin
- TargetSize := S_L;
- TargetSubReg := R_SUBD;
- end
- else if (taicpu(hp1).opsize in [S_W, S_BW]) then
- begin
- TargetSize := S_W;
- TargetSubReg := R_SUBW;
- end;
- ThisReg := taicpu(hp1).oper[1]^.reg;
- setsubreg(ThisReg, TargetSubReg);
- RegChanged := True;
- DebugMsg(SPeepholeOptimization + 'Simplified register usage so ' + debug_regname(ThisReg) + ' = ' + debug_regname(taicpu(p).oper[1]^.reg), p);
- TransferUsedRegs(TmpUsedRegs);
- AllocRegBetween(ThisReg, p, hp1, TmpUsedRegs);
- DebugMsg(SPeepholeOptimization + 'Movzx2Nop 3', hp1);
- if p = hp1 then
- begin
- RemoveCurrentp(p); { p = hp1 and will then become the next instruction }
- p_removed := True;
- end
- else
- RemoveInstruction(hp1);
- { Instruction will become "mov %reg,%reg" }
- if not p_removed and (taicpu(p).opcode = A_MOV) and
- MatchOperand(taicpu(p).oper[0]^, ThisReg) then
- begin
- DebugMsg(SPeepholeOptimization + 'Movzx2Nop 6', p);
- RemoveCurrentP(p);
- p_removed := True;
- end
- else
- taicpu(p).oper[1]^.reg := ThisReg;
- Result := True;
- end
- else
- begin
- if TargetSize <> MaxSize then
- begin
- { Since the register is in use, we have to force it to
- MaxSize otherwise part of it may become undefined later on }
- TargetSize := MaxSize;
- case TargetSize of
- S_B:
- TargetSubReg := R_SUBL;
- S_W:
- TargetSubReg := R_SUBW;
- S_L:
- TargetSubReg := R_SUBD;
- else
- InternalError(2020112351);
- end;
- setsubreg(ThisReg, TargetSubReg);
- end;
- AdjustFinalLoad;
- end;
- end
- else
- AdjustFinalLoad;
- if not p_removed then
- begin
- if TargetSize = MinSize then
- begin
- { Convert the input MOVZX to a MOV }
- if (taicpu(p).oper[0]^.typ = top_reg) and
- SuperRegistersEqual(taicpu(p).oper[0]^.reg, ThisReg) then
- begin
- { Or remove it completely! }
- DebugMsg(SPeepholeOptimization + 'Movzx2Nop 1', p);
- DebugMsg(SPeepholeOptimization + tostr(InstrMax), p);
- RemoveCurrentP(p);
- p_removed := True;
- end
- else
- begin
- DebugMsg(SPeepholeOptimization + 'Movzx2Mov 1', p);
- taicpu(p).opcode := A_MOV;
- taicpu(p).oper[1]^.reg := ThisReg;
- taicpu(p).opsize := TargetSize;
- end;
- Result := True;
- end
- else if TargetSize <> MaxSize then
- begin
- case MaxSize of
- S_L:
- if TargetSize = S_W then
- begin
- DebugMsg(SPeepholeOptimization + 'movzbl2movzbw', p);
- taicpu(p).opsize := S_BW;
- taicpu(p).oper[1]^.reg := ThisReg;
- Result := True;
- end
- else
- InternalError(2020112341);
- S_W:
- if TargetSize = S_L then
- begin
- DebugMsg(SPeepholeOptimization + 'movzbw2movzbl', p);
- taicpu(p).opsize := S_BL;
- taicpu(p).oper[1]^.reg := ThisReg;
- Result := True;
- end
- else
- InternalError(2020112342);
- else
- ;
- end;
- end;
- end;
- { Now go through every instruction we found and change the
- size. If TargetSize = MaxSize, then almost no changes are
- needed and Result can remain False if it hasn't been set
- yet.
- If RegChanged is True, then the register requires changing
- and so the point about TargetSize = MaxSize doesn't apply. }
- if ((TargetSize <> MaxSize) or RegChanged) and (InstrMax >= 0) then
- begin
- for LocalIndex := 0 to InstrMax do
- begin
- { If p_removed is true, then the original MOV/Z was removed
- and removing the AND instruction may not be safe if it
- appears first }
- if (InstrList[LocalIndex].oper[InstrList[LocalIndex].ops - 1]^.typ <> top_reg) then
- InternalError(2020112310);
- if InstrList[LocalIndex].oper[0]^.typ = top_reg then
- InstrList[LocalIndex].oper[0]^.reg := ThisReg;
- InstrList[LocalIndex].oper[InstrList[LocalIndex].ops - 1]^.reg := ThisReg;
- InstrList[LocalIndex].opsize := TargetSize;
- end;
- Result := True;
- end;
- end;
- begin
- Result := False;
- p_removed := False;
- ThisReg := taicpu(p).oper[1]^.reg;
- { Check for:
- movs/z ###,%ecx (or %cx or %rcx)
- ...
- shl/shr/sar/rcl/rcr/ror/rol %cl,###
- (dealloc %ecx)
- Change to:
- mov ###,%cl (if ### = %cl, then remove completely)
- ...
- shl/shr/sar/rcl/rcr/ror/rol %cl,###
- }
- if (getsupreg(ThisReg) = RS_ECX) and
- GetNextInstructionUsingReg(p, hp1, NR_ECX) and
- (hp1.typ = ait_instruction) and
- (
- { Under -O1 and -O2, GetNextInstructionUsingReg may return an
- instruction that doesn't actually contain ECX }
- (cs_opt_level3 in current_settings.optimizerswitches) or
- RegInInstruction(NR_ECX, hp1) or
- (
- { It's common for the shift/rotate's read/write register to be
- initialised in between, so under -O2 and under, search ahead
- one more instruction
- }
- GetNextInstruction(hp1, hp1) and
- (hp1.typ = ait_instruction) and
- RegInInstruction(NR_ECX, hp1)
- )
- ) and
- MatchInstruction(hp1, [A_SHL, A_SHR, A_SAR, A_ROR, A_ROL, A_RCR, A_RCL], []) and
- (taicpu(hp1).oper[0]^.typ = top_reg) { This is enough to determine that it's %cl } then
- begin
- TransferUsedRegs(TmpUsedRegs);
- hp2 := p;
- repeat
- UpdateUsedRegs(TmpUsedRegs, tai(hp2.Next));
- until not GetNextInstruction(hp2, hp2) or (hp2 = hp1);
- if not RegUsedAfterInstruction(NR_CL, hp1, TmpUsedRegs) then
- begin
- case taicpu(p).opsize of
- S_BW, S_BL{$ifdef x86_64}, S_BQ{$endif x86_64}:
- if MatchOperand(taicpu(p).oper[0]^, NR_CL) then
- begin
- DebugMsg(SPeepholeOptimization + 'MovxOp2Op 3a', p);
- RemoveCurrentP(p);
- end
- else
- begin
- taicpu(p).opcode := A_MOV;
- taicpu(p).opsize := S_B;
- taicpu(p).oper[1]^.reg := NR_CL;
- DebugMsg(SPeepholeOptimization + 'MovxOp2MovOp 1', p);
- end;
- S_WL{$ifdef x86_64}, S_WQ{$endif x86_64}:
- if MatchOperand(taicpu(p).oper[0]^, NR_CX) then
- begin
- DebugMsg(SPeepholeOptimization + 'MovxOp2Op 3b', p);
- RemoveCurrentP(p);
- end
- else
- begin
- taicpu(p).opcode := A_MOV;
- taicpu(p).opsize := S_W;
- taicpu(p).oper[1]^.reg := NR_CX;
- DebugMsg(SPeepholeOptimization + 'MovxOp2MovOp 2', p);
- end;
- {$ifdef x86_64}
- S_LQ:
- if MatchOperand(taicpu(p).oper[0]^, NR_ECX) then
- begin
- DebugMsg(SPeepholeOptimization + 'MovxOp2Op 3c', p);
- RemoveCurrentP(p);
- end
- else
- begin
- taicpu(p).opcode := A_MOV;
- taicpu(p).opsize := S_L;
- taicpu(p).oper[1]^.reg := NR_ECX;
- DebugMsg(SPeepholeOptimization + 'MovxOp2MovOp 3', p);
- end;
- {$endif x86_64}
- else
- InternalError(2021120401);
- end;
- Result := True;
- Exit;
- end;
- end;
- { This is anything but quick! }
- if not(cs_opt_level2 in current_settings.optimizerswitches) then
- Exit;
- SetLength(InstrList, 0);
- InstrMax := -1;
- case taicpu(p).opsize of
- S_BW, S_BL{$ifdef x86_64}, S_BQ{$endif x86_64}:
- begin
- {$if defined(i386) or defined(i8086)}
- { If the target size is 8-bit, make sure we can actually encode it }
- if not (GetSupReg(ThisReg) in [RS_EAX,RS_EBX,RS_ECX,RS_EDX]) then
- Exit;
- {$endif i386 or i8086}
- LowerLimit := $FF;
- SignedLowerLimit := $7F;
- SignedLowerLimitBottom := -128;
- MinSize := S_B;
- if taicpu(p).opsize = S_BW then
- begin
- MaxSize := S_W;
- UpperLimit := $FFFF;
- SignedUpperLimit := $7FFF;
- SignedUpperLimitBottom := -32768;
- end
- else
- begin
- { Keep at a 32-bit limit for BQ as well since one can't really optimise otherwise }
- MaxSize := S_L;
- UpperLimit := $FFFFFFFF;
- SignedUpperLimit := $7FFFFFFF;
- SignedUpperLimitBottom := -2147483648;
- end;
- end;
- S_WL{$ifdef x86_64}, S_WQ{$endif x86_64}:
- begin
- { Keep at a 32-bit limit for WQ as well since one can't really optimise otherwise }
- LowerLimit := $FFFF;
- SignedLowerLimit := $7FFF;
- SignedLowerLimitBottom := -32768;
- UpperLimit := $FFFFFFFF;
- SignedUpperLimit := $7FFFFFFF;
- SignedUpperLimitBottom := -2147483648;
- MinSize := S_W;
- MaxSize := S_L;
- end;
- {$ifdef x86_64}
- S_LQ:
- begin
- { Both the lower and upper limits are set to 32-bit. If a limit
- is breached, then optimisation is impossible }
- LowerLimit := $FFFFFFFF;
- SignedLowerLimit := $7FFFFFFF;
- SignedLowerLimitBottom := -2147483648;
- UpperLimit := $FFFFFFFF;
- SignedUpperLimit := $7FFFFFFF;
- SignedUpperLimitBottom := -2147483648;
- MinSize := S_L;
- MaxSize := S_L;
- end;
- {$endif x86_64}
- else
- InternalError(2020112301);
- end;
- TestValMin := 0;
- TestValMax := LowerLimit;
- TestValSignedMax := SignedLowerLimit;
- TryShiftDownLimit := LowerLimit;
- TryShiftDown := S_NO;
- ShiftDownOverflow := False;
- RegChanged := False;
- BitwiseOnly := True;
- OrXorUsed := False;
- UpperSignedOverflow := False;
- LowerSignedOverflow := False;
- UpperUnsignedOverflow := False;
- LowerUnsignedOverflow := False;
- hp1 := p;
- while GetNextInstructionUsingReg(hp1, hp1, ThisReg) and
- (hp1.typ = ait_instruction) and
- (
- { Under -O1 and -O2, GetNextInstructionUsingReg may return an
- instruction that doesn't actually contain ThisReg }
- (cs_opt_level3 in current_settings.optimizerswitches) or
- { This allows this Movx optimisation to work through the SETcc instructions
- inserted by the 'CMP/JE/CMP/@Lbl/SETE -> CMP/SETE/CMP/SETE/OR'
- optimisation on -O1 and -O2 (on -O3, GetNextInstructionUsingReg will
- skip over these SETcc instructions). }
- (taicpu(hp1).opcode = A_SETcc) or
- RegInInstruction(ThisReg, hp1)
- ) do
- begin
- case taicpu(hp1).opcode of
- A_INC,A_DEC:
- begin
- { Has to be an exact match on the register }
- if not MatchOperand(taicpu(hp1).oper[0]^, ThisReg) then
- Break;
- if taicpu(hp1).opcode = A_INC then
- begin
- Inc(TestValMin);
- Inc(TestValMax);
- Inc(TestValSignedMax);
- end
- else
- begin
- Dec(TestValMin);
- Dec(TestValMax);
- Dec(TestValSignedMax);
- end;
- end;
- A_TEST, A_CMP:
- begin
- if (
- { Too high a risk of non-linear behaviour that breaks DFA
- here, unless it's cmp $0,%reg, which is equivalent to
- test %reg,%reg }
- OrXorUsed and
- (taicpu(hp1).opcode = A_CMP) and
- not Matchoperand(taicpu(hp1).oper[0]^, 0)
- ) or
- (taicpu(hp1).oper[1]^.typ <> top_reg) or
- { Has to be an exact match on the register }
- (taicpu(hp1).oper[1]^.reg <> ThisReg) or
- (
- { Permit "test %reg,%reg" }
- (taicpu(hp1).opcode = A_TEST) and
- (taicpu(hp1).oper[0]^.typ = top_reg) and
- (taicpu(hp1).oper[0]^.reg <> ThisReg)
- ) or
- (taicpu(hp1).oper[0]^.typ <> top_const) or
- { Make sure the comparison value is not smaller than the
- smallest allowed signed value for the minimum size (e.g.
- -128 for 8-bit) }
- not (
- ((taicpu(hp1).oper[0]^.val and LowerLimit) = taicpu(hp1).oper[0]^.val) or
- { Is it in the negative range? }
- (
- (taicpu(hp1).oper[0]^.val < 0) and
- (taicpu(hp1).oper[0]^.val >= SignedLowerLimitBottom)
- )
- ) then
- Break;
- { Check to see if the active register is used afterwards }
- TransferUsedRegs(TmpUsedRegs);
- IncludeRegInUsedRegs(ThisReg, TmpUsedRegs);
- if not RegUsedAfterInstruction(ThisReg, hp1, TmpUsedRegs) then
- begin
- { Make sure the comparison or any previous instructions
- hasn't pushed the test values outside of the range of
- MinSize }
- if LowerUnsignedOverflow and not UpperUnsignedOverflow then
- begin
- { Exceeded lower bound but not upper bound }
- TargetSize := MaxSize;
- end
- else if not LowerSignedOverflow or not LowerUnsignedOverflow then
- begin
- { Size didn't exceed lower bound }
- TargetSize := MinSize;
- end
- else
- Break;
- case TargetSize of
- S_B:
- TargetSubReg := R_SUBL;
- S_W:
- TargetSubReg := R_SUBW;
- S_L:
- TargetSubReg := R_SUBD;
- else
- InternalError(2021051002);
- end;
- { Update the register to its new size }
- setsubreg(ThisReg, TargetSubReg);
- taicpu(hp1).oper[1]^.reg := ThisReg;
- taicpu(hp1).opsize := MinSize;
- { Convert the input MOVZX to a MOV }
- if (taicpu(p).oper[0]^.typ = top_reg) and
- SuperRegistersEqual(taicpu(p).oper[0]^.reg, ThisReg) then
- begin
- { Or remove it completely! }
- DebugMsg(SPeepholeOptimization + 'Movzx2Nop 1a', p);
- RemoveCurrentP(p);
- p_removed := True;
- end
- else
- begin
- DebugMsg(SPeepholeOptimization + 'Movzx2Mov 1a', p);
- taicpu(p).opcode := A_MOV;
- taicpu(p).oper[1]^.reg := ThisReg;
- taicpu(p).opsize := MinSize;
- end;
- if (InstrMax >= 0) then
- begin
- for Index := 0 to InstrMax do
- begin
- { If p_removed is true, then the original MOV/Z was removed
- and removing the AND instruction may not be safe if it
- appears first }
- if (InstrList[Index].oper[InstrList[Index].ops - 1]^.typ <> top_reg) then
- InternalError(2020112311);
- if InstrList[Index].oper[0]^.typ = top_reg then
- InstrList[Index].oper[0]^.reg := ThisReg;
- InstrList[Index].oper[InstrList[Index].ops - 1]^.reg := ThisReg;
- InstrList[Index].opsize := MinSize;
- end;
- end;
- Result := True;
- Exit;
- end;
- end;
- A_SETcc:
- begin
- { This allows this Movx optimisation to work through the SETcc instructions
- inserted by the 'CMP/JE/CMP/@Lbl/SETE -> CMP/SETE/CMP/SETE/OR'
- optimisation on -O1 and -O2 (on -O3, GetNextInstructionUsingReg will
- skip over these SETcc instructions). }
- if (cs_opt_level3 in current_settings.optimizerswitches) or
- { Of course, break out if the current register is used }
- RegInOp(ThisReg, taicpu(hp1).oper[0]^) then
- Break
- else
- { We must use Continue so the instruction doesn't get added
- to InstrList }
- Continue;
- end;
- A_ADD,A_SUB,A_AND,A_OR,A_XOR,A_SHL,A_SHR,A_SAR:
- begin
- if
- (taicpu(hp1).oper[1]^.typ <> top_reg) or
- { Has to be an exact match on the register }
- (taicpu(hp1).oper[1]^.reg <> ThisReg) or not
- (
- (
- (taicpu(hp1).oper[0]^.typ = top_const) and
- (
- (
- (taicpu(hp1).opcode = A_SHL) and
- (
- ((MinSize = S_B) and (taicpu(hp1).oper[0]^.val < 8)) or
- ((MinSize = S_W) and (taicpu(hp1).oper[0]^.val < 16)) or
- ((MinSize = S_L) and (taicpu(hp1).oper[0]^.val < 32))
- )
- ) or (
- (taicpu(hp1).opcode <> A_SHL) and
- (
- ((taicpu(hp1).oper[0]^.val and UpperLimit) = taicpu(hp1).oper[0]^.val) or
- { Is it in the negative range? }
- (((not taicpu(hp1).oper[0]^.val) and (UpperLimit shr 1)) = (not taicpu(hp1).oper[0]^.val))
- )
- )
- )
- ) or (
- MatchOperand(taicpu(hp1).oper[0]^, taicpu(hp1).oper[1]^.reg) and
- ((taicpu(hp1).opcode = A_ADD) or (taicpu(hp1).opcode = A_AND) or (taicpu(hp1).opcode = A_SUB))
- )
- ) then
- Break;
- { Only process OR and XOR if there are only bitwise operations,
- since otherwise they can too easily fool the data flow
- analysis (they can cause non-linear behaviour) }
- case taicpu(hp1).opcode of
- A_ADD:
- begin
- if OrXorUsed then
- { Too high a risk of non-linear behaviour that breaks DFA here }
- Break
- else
- BitwiseOnly := False;
- if (taicpu(hp1).oper[0]^.typ = top_reg) then
- begin
- TestValMin := TestValMin * 2;
- TestValMax := TestValMax * 2;
- TestValSignedMax := TestValSignedMax * 2;
- end
- else
- begin
- WorkingValue := taicpu(hp1).oper[0]^.val;
- TestValMin := TestValMin + WorkingValue;
- TestValMax := TestValMax + WorkingValue;
- TestValSignedMax := TestValSignedMax + WorkingValue;
- end;
- end;
- A_SUB:
- begin
- if (taicpu(hp1).oper[0]^.typ = top_reg) then
- begin
- TestValMin := 0;
- TestValMax := 0;
- TestValSignedMax := 0;
- end
- else
- begin
- if OrXorUsed then
- { Too high a risk of non-linear behaviour that breaks DFA here }
- Break
- else
- BitwiseOnly := False;
- WorkingValue := taicpu(hp1).oper[0]^.val;
- TestValMin := TestValMin - WorkingValue;
- TestValMax := TestValMax - WorkingValue;
- TestValSignedMax := TestValSignedMax - WorkingValue;
- end;
- end;
- A_AND:
- if (taicpu(hp1).oper[0]^.typ = top_const) then
- begin
- { we might be able to go smaller if AND appears first }
- if InstrMax = -1 then
- case MinSize of
- S_B:
- ;
- S_W:
- if ((taicpu(hp1).oper[0]^.val and $FF) = taicpu(hp1).oper[0]^.val) or
- ((not(taicpu(hp1).oper[0]^.val) and $7F) = (not taicpu(hp1).oper[0]^.val)) then
- begin
- TryShiftDown := S_B;
- TryShiftDownLimit := $FF;
- end;
- S_L:
- if ((taicpu(hp1).oper[0]^.val and $FF) = taicpu(hp1).oper[0]^.val) or
- ((not(taicpu(hp1).oper[0]^.val) and $7F) = (not taicpu(hp1).oper[0]^.val)) then
- begin
- TryShiftDown := S_B;
- TryShiftDownLimit := $FF;
- end
- else if ((taicpu(hp1).oper[0]^.val and $FFFF) = taicpu(hp1).oper[0]^.val) or
- ((not(taicpu(hp1).oper[0]^.val) and $7FFF) = (not taicpu(hp1).oper[0]^.val)) then
- begin
- TryShiftDown := S_W;
- TryShiftDownLimit := $FFFF;
- end;
- else
- InternalError(2020112320);
- end;
- WorkingValue := taicpu(hp1).oper[0]^.val;
- TestValMin := TestValMin and WorkingValue;
- TestValMax := TestValMax and WorkingValue;
- TestValSignedMax := TestValSignedMax and WorkingValue;
- end;
- A_OR:
- begin
- if not BitwiseOnly then
- Break;
- OrXorUsed := True;
- WorkingValue := taicpu(hp1).oper[0]^.val;
- TestValMin := TestValMin or WorkingValue;
- TestValMax := TestValMax or WorkingValue;
- TestValSignedMax := TestValSignedMax or WorkingValue;
- end;
- A_XOR:
- begin
- if (taicpu(hp1).oper[0]^.typ = top_reg) then
- begin
- TestValMin := 0;
- TestValMax := 0;
- TestValSignedMax := 0;
- end
- else
- begin
- if not BitwiseOnly then
- Break;
- OrXorUsed := True;
- WorkingValue := taicpu(hp1).oper[0]^.val;
- TestValMin := TestValMin xor WorkingValue;
- TestValMax := TestValMax xor WorkingValue;
- TestValSignedMax := TestValSignedMax xor WorkingValue;
- end;
- end;
- A_SHL:
- begin
- BitwiseOnly := False;
- WorkingValue := taicpu(hp1).oper[0]^.val;
- TestValMin := TestValMin shl WorkingValue;
- TestValMax := TestValMax shl WorkingValue;
- TestValSignedMax := TestValSignedMax shl WorkingValue;
- end;
- A_SHR,
- { The first instruction was MOVZX, so the value won't be negative }
- A_SAR:
- begin
- if InstrMax <> -1 then
- BitwiseOnly := False
- else
- { we might be able to go smaller if SHR appears first }
- case MinSize of
- S_B:
- ;
- S_W:
- if (taicpu(hp1).oper[0]^.val >= 8) then
- begin
- TryShiftDown := S_B;
- TryShiftDownLimit := $FF;
- TryShiftDownSignedLimit := $7F;
- TryShiftDownSignedLimitLower := -128;
- end;
- S_L:
- if (taicpu(hp1).oper[0]^.val >= 24) then
- begin
- TryShiftDown := S_B;
- TryShiftDownLimit := $FF;
- TryShiftDownSignedLimit := $7F;
- TryShiftDownSignedLimitLower := -128;
- end
- else if (taicpu(hp1).oper[0]^.val >= 16) then
- begin
- TryShiftDown := S_W;
- TryShiftDownLimit := $FFFF;
- TryShiftDownSignedLimit := $7FFF;
- TryShiftDownSignedLimitLower := -32768;
- end;
- else
- InternalError(2020112321);
- end;
- WorkingValue := taicpu(hp1).oper[0]^.val;
- if taicpu(hp1).opcode = A_SAR then
- begin
- TestValMin := SarInt64(TestValMin, WorkingValue);
- TestValMax := SarInt64(TestValMax, WorkingValue);
- TestValSignedMax := SarInt64(TestValSignedMax, WorkingValue);
- end
- else
- begin
- TestValMin := TestValMin shr WorkingValue;
- TestValMax := TestValMax shr WorkingValue;
- TestValSignedMax := TestValSignedMax shr WorkingValue;
- end;
- end;
- else
- InternalError(2020112303);
- end;
- end;
- (*
- A_IMUL:
- case taicpu(hp1).ops of
- 2:
- begin
- if not MatchOpType(hp1, top_reg, top_reg) or
- { Has to be an exact match on the register }
- (taicpu(hp1).oper[0]^.reg <> ThisReg) or
- (taicpu(hp1).oper[1]^.reg <> ThisReg) then
- Break;
- TestValMin := TestValMin * TestValMin;
- TestValMax := TestValMax * TestValMax;
- TestValSignedMax := TestValSignedMax * TestValMax;
- end;
- 3:
- begin
- if not MatchOpType(hp1, top_const, top_reg, top_reg) or
- { Has to be an exact match on the register }
- (taicpu(hp1).oper[1]^.reg <> ThisReg) or
- (taicpu(hp1).oper[2]^.reg <> ThisReg) or
- ((taicpu(hp1).oper[0]^.val and UpperLimit) = taicpu(hp1).oper[0]^.val) or
- { Is it in the negative range? }
- (((not taicpu(hp1).oper[0]^.val) and (UpperLimit shr 1)) = (not taicpu(hp1).oper[0]^.val)) then
- Break;
- TestValMin := TestValMin * taicpu(hp1).oper[0]^.val;
- TestValMax := TestValMax * taicpu(hp1).oper[0]^.val;
- TestValSignedMax := TestValSignedMax * taicpu(hp1).oper[0]^.val;
- end;
- else
- Break;
- end;
- A_IDIV:
- case taicpu(hp1).ops of
- 3:
- begin
- if not MatchOpType(hp1, top_const, top_reg, top_reg) or
- { Has to be an exact match on the register }
- (taicpu(hp1).oper[1]^.reg <> ThisReg) or
- (taicpu(hp1).oper[2]^.reg <> ThisReg) or
- ((taicpu(hp1).oper[0]^.val and UpperLimit) = taicpu(hp1).oper[0]^.val) or
- { Is it in the negative range? }
- (((not taicpu(hp1).oper[0]^.val) and (UpperLimit shr 1)) = (not taicpu(hp1).oper[0]^.val)) then
- Break;
- TestValMin := TestValMin div taicpu(hp1).oper[0]^.val;
- TestValMax := TestValMax div taicpu(hp1).oper[0]^.val;
- TestValSignedMax := TestValSignedMax div taicpu(hp1).oper[0]^.val;
- end;
- else
- Break;
- end;
- *)
- A_MOVSX{$ifdef x86_64}, A_MOVSXD{$endif x86_64}:
- begin
- { If there are no instructions in between, then we might be able to make a saving }
- if UpperSignedOverflow or (taicpu(hp1).oper[0]^.typ <> top_reg) or (taicpu(hp1).oper[0]^.reg <> ThisReg) then
- Break;
- { We have something like:
- movzbw %dl,%dx
- ...
- movswl %dx,%edx
- Change the latter to a zero-extension then enter the
- A_MOVZX case branch.
- }
- {$ifdef x86_64}
- if (taicpu(hp1).opsize = S_LQ) and SuperRegistersEqual(taicpu(hp1).oper[1]^.reg, ThisReg) then
- begin
- { this becomes a zero extension from 32-bit to 64-bit, but
- the upper 32 bits are already zero, so just delete the
- instruction }
- DebugMsg(SPeepholeOptimization + 'MovzMovsxd2MovzNop', hp1);
- RemoveInstruction(hp1);
- Result := True;
- Exit;
- end
- else
- {$endif x86_64}
- begin
- DebugMsg(SPeepholeOptimization + 'MovzMovs2MovzMovz', hp1);
- taicpu(hp1).opcode := A_MOVZX;
- {$ifdef x86_64}
- case taicpu(hp1).opsize of
- S_BQ:
- begin
- taicpu(hp1).opsize := S_BL;
- setsubreg(taicpu(hp1).oper[1]^.reg, R_SUBD);
- end;
- S_WQ:
- begin
- taicpu(hp1).opsize := S_WL;
- setsubreg(taicpu(hp1).oper[1]^.reg, R_SUBD);
- end;
- S_LQ:
- begin
- taicpu(hp1).opcode := A_MOV;
- taicpu(hp1).opsize := S_L;
- setsubreg(taicpu(hp1).oper[1]^.reg, R_SUBD);
- { In this instance, we need to break out because the
- instruction is no longer MOVZX or MOVSXD }
- Result := True;
- Exit;
- end;
- else
- ;
- end;
- {$endif x86_64}
- Result := CompressInstructions;
- Exit;
- end;
- end;
- A_MOVZX:
- begin
- if UpperUnsignedOverflow or (taicpu(hp1).oper[0]^.typ <> top_reg) then
- Break;
- if not SuperRegistersEqual(taicpu(hp1).oper[0]^.reg, ThisReg) then
- begin
- if (InstrMax = -1) and
- { Will return false if the second parameter isn't ThisReg
- (can happen on -O2 and under) }
- Reg1WriteOverwritesReg2Entirely(taicpu(hp1).oper[1]^.reg, ThisReg) then
- begin
- { The two MOVZX instructions are adjacent, so remove the first one }
- DebugMsg(SPeepholeOptimization + 'Movzx2Nop 5', p);
- RemoveCurrentP(p);
- Result := True;
- Exit;
- end;
- Break;
- end;
- Result := CompressInstructions;
- Exit;
- end;
- else
- { This includes ADC, SBB and IDIV }
- Break;
- end;
- if not CheckOverflowConditions then
- Break;
- { Contains highest index (so instruction count - 1) }
- Inc(InstrMax);
- if InstrMax > High(InstrList) then
- SetLength(InstrList, InstrMax + LIST_STEP_SIZE);
- InstrList[InstrMax] := taicpu(hp1);
- end;
- end;
- {$pop}
- function TX86AsmOptimizer.OptPass2Imul(var p : tai) : boolean;
- var
- hp1 : tai;
- begin
- Result:=false;
- if (taicpu(p).ops >= 2) and
- ((taicpu(p).oper[0]^.typ = top_const) or
- ((taicpu(p).oper[0]^.typ = top_ref) and (taicpu(p).oper[0]^.ref^.refaddr=addr_full))) and
- (taicpu(p).oper[1]^.typ = top_reg) and
- ((taicpu(p).ops = 2) or
- ((taicpu(p).oper[2]^.typ = top_reg) and
- (taicpu(p).oper[2]^.reg = taicpu(p).oper[1]^.reg))) and
- GetLastInstruction(p,hp1) and
- MatchInstruction(hp1,A_MOV,[]) and
- MatchOpType(taicpu(hp1),top_reg,top_reg) and
- (taicpu(hp1).oper[1]^.reg = taicpu(p).oper[1]^.reg) then
- begin
- TransferUsedRegs(TmpUsedRegs);
- if not(RegUsedAfterInstruction(taicpu(p).oper[1]^.reg,p,TmpUsedRegs)) or
- ((taicpu(p).ops = 3) and (taicpu(p).oper[1]^.reg=taicpu(p).oper[2]^.reg)) then
- { change
- mov reg1,reg2
- imul y,reg2 to imul y,reg1,reg2 }
- begin
- taicpu(p).ops := 3;
- taicpu(p).loadreg(2,taicpu(p).oper[1]^.reg);
- taicpu(p).loadreg(1,taicpu(hp1).oper[0]^.reg);
- DebugMsg(SPeepholeOptimization + 'MovImul2Imul done',p);
- RemoveInstruction(hp1);
- result:=true;
- end;
- end;
- end;
- procedure TX86AsmOptimizer.ConvertJumpToRET(const p: tai; const ret_p: tai);
- var
- ThisLabel: TAsmLabel;
- begin
- ThisLabel := tasmlabel(taicpu(p).oper[0]^.ref^.symbol);
- ThisLabel.decrefs;
- taicpu(p).opcode := A_RET;
- taicpu(p).is_jmp := false;
- taicpu(p).ops := taicpu(ret_p).ops;
- case taicpu(ret_p).ops of
- 0:
- taicpu(p).clearop(0);
- 1:
- taicpu(p).loadconst(0,taicpu(ret_p).oper[0]^.val);
- else
- internalerror(2016041301);
- end;
- { If the original label is now dead, it might turn out that the label
- immediately follows p. As a result, everything beyond it, which will
- be just some final register configuration and a RET instruction, is
- now dead code. [Kit] }
- { NOTE: This is much faster than introducing a OptPass2RET routine and
- running RemoveDeadCodeAfterJump for each RET instruction, because
- this optimisation rarely happens and most RETs appear at the end of
- routines where there is nothing that can be stripped. [Kit] }
- if not ThisLabel.is_used then
- RemoveDeadCodeAfterJump(p);
- end;
- function TX86AsmOptimizer.OptPass2SETcc(var p: tai): boolean;
- var
- hp1,hp2,next: tai; SetC, JumpC: TAsmCond;
- Unconditional, PotentialModified: Boolean;
- OperPtr: POper;
- NewRef: TReference;
- InstrList: array of taicpu;
- InstrMax, Index: Integer;
- const
- {$ifdef DEBUG_AOPTCPU}
- SNoFlags: shortstring = ' so the flags aren''t modified';
- {$else DEBUG_AOPTCPU}
- SNoFlags = '';
- {$endif DEBUG_AOPTCPU}
- begin
- Result:=false;
- if MatchOpType(taicpu(p),top_reg) and GetNextInstructionUsingReg(p, hp1, taicpu(p).oper[0]^.reg) then
- begin
- if MatchInstruction(hp1, A_TEST, [S_B]) and
- MatchOpType(taicpu(hp1),top_reg,top_reg) and
- (taicpu(hp1).oper[0]^.reg = taicpu(hp1).oper[1]^.reg) and
- (taicpu(p).oper[0]^.reg = taicpu(hp1).oper[1]^.reg) and
- GetNextInstruction(hp1, hp2) and
- MatchInstruction(hp2, A_Jcc, A_SETcc, []) then
- { Change from: To:
- set(C) %reg j(~C) label
- test %reg,%reg/cmp $0,%reg
- je label
- set(C) %reg j(C) label
- test %reg,%reg/cmp $0,%reg
- jne label
- (Also do something similar with sete/setne instead of je/jne)
- }
- begin
- { Before we do anything else, we need to check the instructions
- in between SETcc and TEST to make sure they don't modify the
- FLAGS register - if -O2 or under, there won't be any
- instructions between SET and TEST }
- TransferUsedRegs(TmpUsedRegs);
- UpdateUsedRegs(TmpUsedRegs, tai(p.next));
- if (cs_opt_level3 in current_settings.optimizerswitches) then
- begin
- next := p;
- SetLength(InstrList, 0);
- InstrMax := -1;
- PotentialModified := False;
- { Make a note of every instruction that modifies the FLAGS
- register }
- while GetNextInstruction(next, next) and (next <> hp1) do
- begin
- if next.typ <> ait_instruction then
- { GetNextInstructionUsingReg should have returned False }
- InternalError(2021051701);
- if RegModifiedByInstruction(NR_DEFAULTFLAGS, next) then
- begin
- case taicpu(next).opcode of
- A_SETcc,
- A_CMOVcc,
- A_Jcc:
- begin
- if PotentialModified then
- { Not safe because the flags were modified earlier }
- Exit
- else
- { Condition is the same as the initial SETcc, so this is safe
- (don't add to instruction list though) }
- Continue;
- end;
- A_ADD:
- begin
- if (taicpu(next).opsize = S_B) or
- { LEA doesn't support 8-bit operands }
- (taicpu(next).oper[1]^.typ <> top_reg) or
- { Must write to a register }
- (taicpu(next).oper[0]^.typ = top_ref) then
- { Require a constant or a register }
- Exit;
- PotentialModified := True;
- end;
- A_SUB:
- begin
- if (taicpu(next).opsize = S_B) or
- { LEA doesn't support 8-bit operands }
- (taicpu(next).oper[1]^.typ <> top_reg) or
- { Must write to a register }
- (taicpu(next).oper[0]^.typ <> top_const) or
- (taicpu(next).oper[0]^.val = $80000000) then
- { Can't subtract a register with LEA - also
- check that the value isn't -2^31, as this
- can't be negated }
- Exit;
- PotentialModified := True;
- end;
- A_SAL,
- A_SHL:
- begin
- if (taicpu(next).opsize = S_B) or
- { LEA doesn't support 8-bit operands }
- (taicpu(next).oper[1]^.typ <> top_reg) or
- { Must write to a register }
- (taicpu(next).oper[0]^.typ <> top_const) or
- (taicpu(next).oper[0]^.val < 0) or
- (taicpu(next).oper[0]^.val > 3) then
- Exit;
- PotentialModified := True;
- end;
- A_IMUL:
- begin
- if (taicpu(next).ops <> 3) or
- (taicpu(next).oper[1]^.typ <> top_reg) or
- { Must write to a register }
- (taicpu(next).oper[2]^.val in [2,3,4,5,8,9]) then
- { We can convert "imul x,%reg1,%reg2" (where x = 2, 4 or 8)
- to "lea (%reg1,x),%reg2". If x = 3, 5 or 9, we can
- change this to "lea (%reg1,%reg1,(x-1)),%reg2" }
- Exit
- else
- PotentialModified := True;
- end;
- else
- { Don't know how to change this, so abort }
- Exit;
- end;
- { Contains highest index (so instruction count - 1) }
- Inc(InstrMax);
- if InstrMax > High(InstrList) then
- SetLength(InstrList, InstrMax + LIST_STEP_SIZE);
- InstrList[InstrMax] := taicpu(next);
- end;
- UpdateUsedRegs(TmpUsedRegs, tai(next.next));
- end;
- if not Assigned(next) or (next <> hp1) then
- { It should be equal to hp1 }
- InternalError(2021051702);
- { Cycle through each instruction and check to see if we can
- change them to versions that don't modify the flags }
- if (InstrMax >= 0) then
- begin
- for Index := 0 to InstrMax do
- case InstrList[Index].opcode of
- A_ADD:
- begin
- DebugMsg(SPeepholeOptimization + 'ADD -> LEA' + SNoFlags, InstrList[Index]);
- InstrList[Index].opcode := A_LEA;
- reference_reset(NewRef, 1, []);
- NewRef.base := InstrList[Index].oper[1]^.reg;
- if InstrList[Index].oper[0]^.typ = top_reg then
- begin
- NewRef.index := InstrList[Index].oper[0]^.reg;
- NewRef.scalefactor := 1;
- end
- else
- NewRef.offset := InstrList[Index].oper[0]^.val;
- InstrList[Index].loadref(0, NewRef);
- end;
- A_SUB:
- begin
- DebugMsg(SPeepholeOptimization + 'SUB -> LEA' + SNoFlags, InstrList[Index]);
- InstrList[Index].opcode := A_LEA;
- reference_reset(NewRef, 1, []);
- NewRef.base := InstrList[Index].oper[1]^.reg;
- NewRef.offset := -InstrList[Index].oper[0]^.val;
- InstrList[Index].loadref(0, NewRef);
- end;
- A_SHL,
- A_SAL:
- begin
- DebugMsg(SPeepholeOptimization + 'SHL -> LEA' + SNoFlags, InstrList[Index]);
- InstrList[Index].opcode := A_LEA;
- reference_reset(NewRef, 1, []);
- NewRef.index := InstrList[Index].oper[1]^.reg;
- NewRef.scalefactor := 1 shl (InstrList[Index].oper[0]^.val);
- InstrList[Index].loadref(0, NewRef);
- end;
- A_IMUL:
- begin
- DebugMsg(SPeepholeOptimization + 'IMUL -> LEA' + SNoFlags, InstrList[Index]);
- InstrList[Index].opcode := A_LEA;
- reference_reset(NewRef, 1, []);
- NewRef.index := InstrList[Index].oper[1]^.reg;
- case InstrList[Index].oper[0]^.val of
- 2, 4, 8:
- NewRef.scalefactor := InstrList[Index].oper[0]^.val;
- else {3, 5 and 9}
- begin
- NewRef.scalefactor := InstrList[Index].oper[0]^.val - 1;
- NewRef.base := InstrList[Index].oper[1]^.reg;
- end;
- end;
- InstrList[Index].loadref(0, NewRef);
- end;
- else
- InternalError(2021051710);
- end;
- end;
- { Mark the FLAGS register as used across this whole block }
- AllocRegBetween(NR_DEFAULTFLAGS, p, hp1, UsedRegs);
- end;
- UpdateUsedRegs(TmpUsedRegs, tai(hp1.next));
- JumpC := taicpu(hp2).condition;
- Unconditional := False;
- if conditions_equal(JumpC, C_E) then
- SetC := inverse_cond(taicpu(p).condition)
- else if conditions_equal(JumpC, C_NE) then
- SetC := taicpu(p).condition
- else
- { We've got something weird here (and inefficent) }
- begin
- DebugMsg('DEBUG: Inefficient jump - check code generation', p);
- SetC := C_NONE;
- { JAE/JNB will always branch (use 'condition_in', since C_AE <> C_NB normally) }
- if condition_in(C_AE, JumpC) then
- Unconditional := True
- else
- { Not sure what to do with this jump - drop out }
- Exit;
- end;
- RemoveInstruction(hp1);
- if Unconditional then
- MakeUnconditional(taicpu(hp2))
- else
- begin
- if SetC = C_NONE then
- InternalError(2018061402);
- taicpu(hp2).SetCondition(SetC);
- end;
- { as hp2 is a jump, we cannot use RegUsedAfterInstruction but we have to check if it is included in
- TmpUsedRegs }
- if not TmpUsedRegs[getregtype(taicpu(p).oper[0]^.reg)].IsUsed(taicpu(p).oper[0]^.reg) then
- begin
- RemoveCurrentp(p, hp2);
- if taicpu(hp2).opcode = A_SETcc then
- DebugMsg(SPeepholeOptimization + 'SETcc/TEST/SETcc -> SETcc',p)
- else
- DebugMsg(SPeepholeOptimization + 'SETcc/TEST/Jcc -> Jcc',p);
- end
- else
- if taicpu(hp2).opcode = A_SETcc then
- DebugMsg(SPeepholeOptimization + 'SETcc/TEST/SETcc -> SETcc/SETcc',p)
- else
- DebugMsg(SPeepholeOptimization + 'SETcc/TEST/Jcc -> SETcc/Jcc',p);
- Result := True;
- end
- else if
- { Make sure the instructions are adjacent }
- (
- not (cs_opt_level3 in current_settings.optimizerswitches) or
- GetNextInstruction(p, hp1)
- ) and
- MatchInstruction(hp1, A_MOV, [S_B]) and
- { Writing to memory is allowed }
- MatchOperand(taicpu(p).oper[0]^, taicpu(hp1).oper[0]^.reg) then
- begin
- {
- Watch out for sequences such as:
- set(c)b %regb
- movb %regb,(ref)
- movb $0,1(ref)
- movb $0,2(ref)
- movb $0,3(ref)
- Much more efficient to turn it into:
- movl $0,%regl
- set(c)b %regb
- movl %regl,(ref)
- Or:
- set(c)b %regb
- movzbl %regb,%regl
- movl %regl,(ref)
- }
- if (taicpu(hp1).oper[1]^.typ = top_ref) and
- GetNextInstruction(hp1, hp2) and
- MatchInstruction(hp2, A_MOV, [S_B]) and
- (taicpu(hp2).oper[1]^.typ = top_ref) and
- CheckMemoryWrite(taicpu(hp1), taicpu(hp2)) then
- begin
- { Don't do anything else except set Result to True }
- end
- else
- begin
- if taicpu(p).oper[0]^.typ = top_reg then
- begin
- TransferUsedRegs(TmpUsedRegs);
- UpdateUsedRegs(TmpUsedRegs, tai(p.Next));
- end;
- { If it's not a register, it's a memory address }
- if (taicpu(p).oper[0]^.typ <> top_reg) or RegUsedAfterInstruction(taicpu(p).oper[0]^.reg, hp1, TmpUsedRegs) then
- begin
- { Even if the register is still in use, we can minimise the
- pipeline stall by changing the MOV into another SETcc. }
- taicpu(hp1).opcode := A_SETcc;
- taicpu(hp1).condition := taicpu(p).condition;
- if taicpu(hp1).oper[1]^.typ = top_ref then
- begin
- { Swapping the operand pointers like this is probably a
- bit naughty, but it is far faster than using loadoper
- to transfer the reference from oper[1] to oper[0] if
- you take into account the extra procedure calls and
- the memory allocation and deallocation required }
- OperPtr := taicpu(hp1).oper[1];
- taicpu(hp1).oper[1] := taicpu(hp1).oper[0];
- taicpu(hp1).oper[0] := OperPtr;
- end
- else
- taicpu(hp1).oper[0]^.reg := taicpu(hp1).oper[1]^.reg;
- taicpu(hp1).clearop(1);
- taicpu(hp1).ops := 1;
- DebugMsg(SPeepholeOptimization + 'SETcc/Mov -> SETcc/SETcc',p);
- end
- else
- begin
- if taicpu(hp1).oper[1]^.typ = top_reg then
- AllocRegBetween(taicpu(hp1).oper[1]^.reg,p,hp1,UsedRegs);
- taicpu(p).loadoper(0, taicpu(hp1).oper[1]^);
- RemoveInstruction(hp1);
- DebugMsg(SPeepholeOptimization + 'SETcc/Mov -> SETcc',p);
- end
- end;
- Result := True;
- end;
- end;
- end;
- function TX86AsmOptimizer.OptPass2Jmp(var p : tai) : boolean;
- var
- hp1: tai;
- Count: Integer;
- OrigLabel: TAsmLabel;
- begin
- result := False;
- { Sometimes, the optimisations below can permit this }
- RemoveDeadCodeAfterJump(p);
- if (taicpu(p).oper[0]^.typ=top_ref) and (taicpu(p).oper[0]^.ref^.refaddr=addr_full) and (taicpu(p).oper[0]^.ref^.base=NR_NO) and
- (taicpu(p).oper[0]^.ref^.index=NR_NO) and (taicpu(p).oper[0]^.ref^.symbol is tasmlabel) then
- begin
- OrigLabel := TAsmLabel(taicpu(p).oper[0]^.ref^.symbol);
- { Also a side-effect of optimisations }
- if CollapseZeroDistJump(p, OrigLabel) then
- begin
- Result := True;
- Exit;
- end;
- hp1 := GetLabelWithSym(OrigLabel);
- if (taicpu(p).condition=C_None) and assigned(hp1) and SkipLabels(hp1,hp1) and (hp1.typ = ait_instruction) then
- begin
- case taicpu(hp1).opcode of
- A_RET:
- {
- change
- jmp .L1
- ...
- .L1:
- ret
- into
- ret
- }
- begin
- ConvertJumpToRET(p, hp1);
- result:=true;
- end;
- { Check any kind of direct assignment instruction }
- A_MOV,
- A_MOVD,
- A_MOVQ,
- A_MOVSX,
- {$ifdef x86_64}
- A_MOVSXD,
- {$endif x86_64}
- A_MOVZX,
- A_MOVAPS,
- A_MOVUPS,
- A_MOVSD,
- A_MOVAPD,
- A_MOVUPD,
- A_MOVDQA,
- A_MOVDQU,
- A_VMOVSS,
- A_VMOVAPS,
- A_VMOVUPS,
- A_VMOVSD,
- A_VMOVAPD,
- A_VMOVUPD,
- A_VMOVDQA,
- A_VMOVDQU:
- if ((current_settings.optimizerswitches * [cs_opt_level3, cs_opt_size]) <> [cs_opt_size]) and
- CheckJumpMovTransferOpt(p, hp1, 0, Count) then
- begin
- Result := True;
- Exit;
- end;
- else
- ;
- end;
- end;
- end;
- end;
- class function TX86AsmOptimizer.CanBeCMOV(p : tai) : boolean;
- begin
- CanBeCMOV:=assigned(p) and
- MatchInstruction(p,A_MOV,[S_W,S_L,S_Q]) and
- { we can't use cmov ref,reg because
- ref could be nil and cmov still throws an exception
- if ref=nil but the mov isn't done (FK)
- or ((taicpu(p).oper[0]^.typ = top_ref) and
- (taicpu(p).oper[0]^.ref^.refaddr = addr_no))
- }
- (taicpu(p).oper[1]^.typ = top_reg) and
- (
- (taicpu(p).oper[0]^.typ = top_reg) or
- { allow references, but only pure symbols or got rel. addressing with RIP as based,
- it is not expected that this can cause a seg. violation }
- (
- (taicpu(p).oper[0]^.typ = top_ref) and
- IsRefSafe(taicpu(p).oper[0]^.ref)
- )
- );
- end;
- function TX86AsmOptimizer.OptPass2Jcc(var p : tai) : boolean;
- var
- hp1,hp2: tai;
- {$ifndef i8086}
- hp3,hp4,hpmov2, hp5: tai;
- l : Longint;
- condition : TAsmCond;
- {$endif i8086}
- carryadd_opcode : TAsmOp;
- symbol: TAsmSymbol;
- increg, tmpreg: TRegister;
- begin
- result:=false;
- if GetNextInstruction(p,hp1) and (hp1.typ=ait_instruction) then
- begin
- symbol := TAsmLabel(taicpu(p).oper[0]^.ref^.symbol);
- if (
- (
- ((Taicpu(hp1).opcode=A_ADD) or (Taicpu(hp1).opcode=A_SUB)) and
- MatchOptype(Taicpu(hp1),top_const,top_reg) and
- (Taicpu(hp1).oper[0]^.val=1)
- ) or
- ((Taicpu(hp1).opcode=A_INC) or (Taicpu(hp1).opcode=A_DEC))
- ) and
- GetNextInstruction(hp1,hp2) and
- SkipAligns(hp2, hp2) and
- (hp2.typ = ait_label) and
- (Tasmlabel(symbol) = Tai_label(hp2).labsym) then
- { jb @@1 cmc
- inc/dec operand --> adc/sbb operand,0
- @@1:
- ... and ...
- jnb @@1
- inc/dec operand --> adc/sbb operand,0
- @@1: }
- begin
- if Taicpu(p).condition in [C_NAE,C_B,C_C] then
- begin
- case taicpu(hp1).opcode of
- A_INC,
- A_ADD:
- carryadd_opcode:=A_ADC;
- A_DEC,
- A_SUB:
- carryadd_opcode:=A_SBB;
- else
- InternalError(2021011001);
- end;
- Taicpu(p).clearop(0);
- Taicpu(p).ops:=0;
- Taicpu(p).is_jmp:=false;
- Taicpu(p).opcode:=A_CMC;
- Taicpu(p).condition:=C_NONE;
- DebugMsg(SPeepholeOptimization+'JccAdd/Inc/Dec2CmcAdc/Sbb',p);
- Taicpu(hp1).ops:=2;
- if (Taicpu(hp1).opcode=A_ADD) or (Taicpu(hp1).opcode=A_SUB) then
- Taicpu(hp1).loadoper(1,Taicpu(hp1).oper[1]^)
- else
- Taicpu(hp1).loadoper(1,Taicpu(hp1).oper[0]^);
- Taicpu(hp1).loadconst(0,0);
- Taicpu(hp1).opcode:=carryadd_opcode;
- result:=true;
- exit;
- end
- else if Taicpu(p).condition in [C_AE,C_NB,C_NC] then
- begin
- case taicpu(hp1).opcode of
- A_INC,
- A_ADD:
- carryadd_opcode:=A_ADC;
- A_DEC,
- A_SUB:
- carryadd_opcode:=A_SBB;
- else
- InternalError(2021011002);
- end;
- Taicpu(hp1).ops:=2;
- DebugMsg(SPeepholeOptimization+'JccAdd/Inc/Dec2Adc/Sbb',p);
- if (Taicpu(hp1).opcode=A_ADD) or (Taicpu(hp1).opcode=A_SUB) then
- Taicpu(hp1).loadoper(1,Taicpu(hp1).oper[1]^)
- else
- Taicpu(hp1).loadoper(1,Taicpu(hp1).oper[0]^);
- Taicpu(hp1).loadconst(0,0);
- Taicpu(hp1).opcode:=carryadd_opcode;
- RemoveCurrentP(p, hp1);
- result:=true;
- exit;
- end
- {
- jcc @@1 setcc tmpreg
- inc/dec/add/sub operand -> (movzx tmpreg)
- @@1: add/sub tmpreg,operand
- While this increases code size slightly, it makes the code much faster if the
- jump is unpredictable
- }
- else if not(cs_opt_size in current_settings.optimizerswitches) then
- begin
- { search for an available register which is volatile }
- increg := GetIntRegisterBetween(R_SUBL, UsedRegs, p, hp1);
- if increg <> NR_NO then
- begin
- { We don't need to check if tmpreg is in hp1 or not, because
- it will be marked as in use at p (if not, this is
- indictive of a compiler bug). }
- TAsmLabel(symbol).decrefs;
- Taicpu(p).clearop(0);
- Taicpu(p).ops:=1;
- Taicpu(p).is_jmp:=false;
- Taicpu(p).opcode:=A_SETcc;
- DebugMsg(SPeepholeOptimization+'JccAdd2SetccAdd',p);
- Taicpu(p).condition:=inverse_cond(Taicpu(p).condition);
- Taicpu(p).loadreg(0,increg);
- if getsubreg(Taicpu(hp1).oper[1]^.reg)<>R_SUBL then
- begin
- case getsubreg(Taicpu(hp1).oper[1]^.reg) of
- R_SUBW:
- begin
- tmpreg := newreg(R_INTREGISTER,getsupreg(increg),R_SUBW);
- hp2:=Taicpu.op_reg_reg(A_MOVZX,S_BW,increg,tmpreg);
- end;
- R_SUBD:
- begin
- tmpreg := newreg(R_INTREGISTER,getsupreg(increg),R_SUBD);
- hp2:=Taicpu.op_reg_reg(A_MOVZX,S_BL,increg,tmpreg);
- end;
- {$ifdef x86_64}
- R_SUBQ:
- begin
- { MOVZX doesn't have a 64-bit variant, because
- the 32-bit version implicitly zeroes the
- upper 32-bits of the destination register }
- tmpreg := newreg(R_INTREGISTER,getsupreg(increg),R_SUBD);
- hp2:=Taicpu.op_reg_reg(A_MOVZX,S_BL,increg,tmpreg);
- setsubreg(tmpreg, R_SUBQ);
- end;
- {$endif x86_64}
- else
- Internalerror(2020030601);
- end;
- taicpu(hp2).fileinfo:=taicpu(hp1).fileinfo;
- asml.InsertAfter(hp2,p);
- end
- else
- tmpreg := increg;
- if (Taicpu(hp1).opcode=A_INC) or (Taicpu(hp1).opcode=A_DEC) then
- begin
- Taicpu(hp1).ops:=2;
- Taicpu(hp1).loadoper(1,Taicpu(hp1).oper[0]^)
- end;
- Taicpu(hp1).loadreg(0,tmpreg);
- AllocRegBetween(tmpreg,p,hp1,UsedRegs);
- Result := True;
- { p is no longer a Jcc instruction, so exit }
- Exit;
- end;
- end;
- end;
- { Detect the following:
- jmp<cond> @Lbl1
- jmp @Lbl2
- ...
- @Lbl1:
- ret
- Change to:
- jmp<inv_cond> @Lbl2
- ret
- }
- if MatchInstruction(hp1,A_JMP,[]) and (taicpu(hp1).oper[0]^.ref^.refaddr=addr_full) then
- begin
- hp2:=getlabelwithsym(TAsmLabel(symbol));
- if Assigned(hp2) and SkipLabels(hp2,hp2) and
- MatchInstruction(hp2,A_RET,[S_NO]) then
- begin
- taicpu(p).condition := inverse_cond(taicpu(p).condition);
- { Change label address to that of the unconditional jump }
- taicpu(p).loadoper(0, taicpu(hp1).oper[0]^);
- TAsmLabel(symbol).DecRefs;
- taicpu(hp1).opcode := A_RET;
- taicpu(hp1).is_jmp := false;
- taicpu(hp1).ops := taicpu(hp2).ops;
- DebugMsg(SPeepholeOptimization+'JccJmpRet2J!ccRet',p);
- case taicpu(hp2).ops of
- 0:
- taicpu(hp1).clearop(0);
- 1:
- taicpu(hp1).loadconst(0,taicpu(hp2).oper[0]^.val);
- else
- internalerror(2016041302);
- end;
- end;
- {$ifndef i8086}
- end
- {
- convert
- j<c> .L1
- mov 1,reg
- jmp .L2
- .L1
- mov 0,reg
- .L2
- into
- mov 0,reg
- set<not(c)> reg
- take care of alignment and that the mov 0,reg is not converted into a xor as this
- would destroy the flag contents
- }
- else if MatchInstruction(hp1,A_MOV,[]) and
- MatchOpType(taicpu(hp1),top_const,top_reg) and
- {$ifdef i386}
- (
- { Under i386, ESI, EDI, EBP and ESP
- don't have an 8-bit representation }
- not (getsupreg(taicpu(hp1).oper[1]^.reg) in [RS_ESI, RS_EDI, RS_EBP, RS_ESP])
- ) and
- {$endif i386}
- (taicpu(hp1).oper[0]^.val=1) and
- GetNextInstruction(hp1,hp2) and
- MatchInstruction(hp2,A_JMP,[]) and (taicpu(hp2).oper[0]^.ref^.refaddr=addr_full) and
- GetNextInstruction(hp2,hp3) and
- { skip align }
- ((hp3.typ<>ait_align) or GetNextInstruction(hp3,hp3)) and
- (hp3.typ=ait_label) and
- (tasmlabel(taicpu(p).oper[0]^.ref^.symbol)=tai_label(hp3).labsym) and
- (tai_label(hp3).labsym.getrefs=1) and
- GetNextInstruction(hp3,hp4) and
- MatchInstruction(hp4,A_MOV,[]) and
- MatchOpType(taicpu(hp4),top_const,top_reg) and
- (taicpu(hp4).oper[0]^.val=0) and
- MatchOperand(taicpu(hp1).oper[1]^,taicpu(hp4).oper[1]^) and
- GetNextInstruction(hp4,hp5) and
- (hp5.typ=ait_label) and
- (tasmlabel(taicpu(hp2).oper[0]^.ref^.symbol)=tai_label(hp5).labsym) and
- (tai_label(hp5).labsym.getrefs=1) then
- begin
- AllocRegBetween(NR_FLAGS,p,hp4,UsedRegs);
- DebugMsg(SPeepholeOptimization+'JccMovJmpMov2MovSetcc',p);
- { remove last label }
- RemoveInstruction(hp5);
- { remove second label }
- RemoveInstruction(hp3);
- { if align is present remove it }
- if GetNextInstruction(hp2,hp3) and (hp3.typ=ait_align) then
- RemoveInstruction(hp3);
- { remove jmp }
- RemoveInstruction(hp2);
- if taicpu(hp1).opsize=S_B then
- RemoveInstruction(hp1)
- else
- taicpu(hp1).loadconst(0,0);
- taicpu(hp4).opcode:=A_SETcc;
- taicpu(hp4).opsize:=S_B;
- taicpu(hp4).condition:=inverse_cond(taicpu(p).condition);
- taicpu(hp4).loadreg(0,newreg(R_INTREGISTER,getsupreg(taicpu(hp4).oper[1]^.reg),R_SUBL));
- taicpu(hp4).opercnt:=1;
- taicpu(hp4).ops:=1;
- taicpu(hp4).freeop(1);
- RemoveCurrentP(p);
- Result:=true;
- exit;
- end
- else if CPUX86_HAS_CMOV in cpu_capabilities[current_settings.cputype] then
- begin
- { check for
- jCC xxx
- <several movs>
- xxx:
- }
- l:=0;
- while assigned(hp1) and
- CanBeCMOV(hp1) and
- { stop on labels }
- not(hp1.typ=ait_label) do
- begin
- inc(l);
- GetNextInstruction(hp1,hp1);
- end;
- if assigned(hp1) then
- begin
- if FindLabel(tasmlabel(symbol),hp1) then
- begin
- if (l<=4) and (l>0) then
- begin
- condition:=inverse_cond(taicpu(p).condition);
- UpdateUsedRegs(tai(p.next));
- GetNextInstruction(p,hp1);
- repeat
- if not Assigned(hp1) then
- InternalError(2018062900);
- taicpu(hp1).opcode:=A_CMOVcc;
- taicpu(hp1).condition:=condition;
- UpdateUsedRegs(tai(hp1.next));
- GetNextInstruction(hp1,hp1);
- until not(CanBeCMOV(hp1));
- { Remember what hp1 is in case there's multiple aligns to get rid of }
- hp2 := hp1;
- repeat
- if not Assigned(hp2) then
- InternalError(2018062910);
- case hp2.typ of
- ait_label:
- { What we expected - break out of the loop (it won't be a dead label at the top of
- a cluster because that was optimised at an earlier stage) }
- Break;
- ait_align:
- { Go to the next entry until a label is found (may be multiple aligns before it) }
- begin
- hp2 := tai(hp2.Next);
- Continue;
- end;
- else
- begin
- { Might be a comment or temporary allocation entry }
- if not (hp2.typ in SkipInstr) then
- InternalError(2018062911);
- hp2 := tai(hp2.Next);
- Continue;
- end;
- end;
- until False;
- { Now we can safely decrement the reference count }
- tasmlabel(symbol).decrefs;
- DebugMsg(SPeepholeOptimization+'JccMov2CMov',p);
- { Remove the original jump }
- RemoveInstruction(p); { Note, the choice to not use RemoveCurrentp is deliberate }
- UpdateUsedRegs(tai(hp2.next));
- GetNextInstruction(hp2, p); { Instruction after the label }
- { Remove the label if this is its final reference }
- if (tasmlabel(symbol).getrefs=0) then
- StripLabelFast(hp1);
- if Assigned(p) then
- result:=true;
- exit;
- end;
- end
- else
- begin
- { check further for
- jCC xxx
- <several movs 1>
- jmp yyy
- xxx:
- <several movs 2>
- yyy:
- }
- { hp2 points to jmp yyy }
- hp2:=hp1;
- { skip hp1 to xxx (or an align right before it) }
- GetNextInstruction(hp1, hp1);
- if assigned(hp2) and
- assigned(hp1) and
- (l<=3) and
- (hp2.typ=ait_instruction) and
- (taicpu(hp2).is_jmp) and
- (taicpu(hp2).condition=C_None) and
- { real label and jump, no further references to the
- label are allowed }
- (tasmlabel(symbol).getrefs=1) and
- FindLabel(tasmlabel(symbol),hp1) then
- begin
- l:=0;
- { skip hp1 to <several moves 2> }
- if (hp1.typ = ait_align) then
- GetNextInstruction(hp1, hp1);
- GetNextInstruction(hp1, hpmov2);
- hp1 := hpmov2;
- while assigned(hp1) and
- CanBeCMOV(hp1) do
- begin
- inc(l);
- GetNextInstruction(hp1, hp1);
- end;
- { hp1 points to yyy (or an align right before it) }
- hp3 := hp1;
- if assigned(hp1) and
- FindLabel(tasmlabel(taicpu(hp2).oper[0]^.ref^.symbol),hp1) then
- begin
- condition:=inverse_cond(taicpu(p).condition);
- UpdateUsedRegs(tai(p.next));
- GetNextInstruction(p,hp1);
- repeat
- taicpu(hp1).opcode:=A_CMOVcc;
- taicpu(hp1).condition:=condition;
- UpdateUsedRegs(tai(hp1.next));
- GetNextInstruction(hp1,hp1);
- until not(assigned(hp1)) or
- not(CanBeCMOV(hp1));
- condition:=inverse_cond(condition);
- if GetLastInstruction(hpmov2,hp1) then
- UpdateUsedRegs(tai(hp1.next));
- hp1 := hpmov2;
- { hp1 is now at <several movs 2> }
- while Assigned(hp1) and CanBeCMOV(hp1) do
- begin
- taicpu(hp1).opcode:=A_CMOVcc;
- taicpu(hp1).condition:=condition;
- UpdateUsedRegs(tai(hp1.next));
- GetNextInstruction(hp1,hp1);
- end;
- hp1 := p;
- { Get first instruction after label }
- UpdateUsedRegs(tai(hp3.next));
- GetNextInstruction(hp3, p);
- if assigned(p) and (hp3.typ = ait_align) then
- GetNextInstruction(p, p);
- { Don't dereference yet, as doing so will cause
- GetNextInstruction to skip the label and
- optional align marker. [Kit] }
- GetNextInstruction(hp2, hp4);
- DebugMsg(SPeepholeOptimization+'JccMovJmpMov2CMovCMov',hp1);
- { remove jCC }
- RemoveInstruction(hp1);
- { Now we can safely decrement it }
- tasmlabel(symbol).decrefs;
- { Remove label xxx (it will have a ref of zero due to the initial check }
- StripLabelFast(hp4);
- { remove jmp }
- symbol := taicpu(hp2).oper[0]^.ref^.symbol;
- RemoveInstruction(hp2);
- { As before, now we can safely decrement it }
- tasmlabel(symbol).decrefs;
- { Remove label yyy (and the optional alignment) if its reference falls to zero }
- if tasmlabel(symbol).getrefs = 0 then
- StripLabelFast(hp3);
- if Assigned(p) then
- result:=true;
- exit;
- end;
- end;
- end;
- end;
- {$endif i8086}
- end;
- end;
- end;
- function TX86AsmOptimizer.OptPass1Movx(var p : tai) : boolean;
- var
- hp1,hp2,hp3: tai;
- reg_and_hp1_is_instr, RegUsed, AndTest: Boolean;
- NewSize: TOpSize;
- NewRegSize: TSubRegister;
- Limit: TCgInt;
- SwapOper: POper;
- begin
- result:=false;
- reg_and_hp1_is_instr:=(taicpu(p).oper[1]^.typ = top_reg) and
- GetNextInstruction(p,hp1) and
- (hp1.typ = ait_instruction);
- if reg_and_hp1_is_instr and
- (
- (taicpu(hp1).opcode <> A_LEA) or
- { If the LEA instruction can be converted into an arithmetic instruction,
- it may be possible to then fold it. }
- (
- { If the flags register is in use, don't change the instruction
- to an ADD otherwise this will scramble the flags. [Kit] }
- not RegInUsedRegs(NR_DEFAULTFLAGS, UsedRegs) and
- ConvertLEA(taicpu(hp1))
- )
- ) and
- IsFoldableArithOp(taicpu(hp1),taicpu(p).oper[1]^.reg) and
- GetNextInstruction(hp1,hp2) and
- MatchInstruction(hp2,A_MOV,[]) and
- (taicpu(hp2).oper[0]^.typ = top_reg) and
- OpsEqual(taicpu(hp2).oper[1]^,taicpu(p).oper[0]^) and
- ((taicpu(p).opsize in [S_BW,S_BL]) and (taicpu(hp2).opsize=S_B) or
- (taicpu(p).opsize in [S_WL]) and (taicpu(hp2).opsize=S_W)) and
- {$ifdef i386}
- { not all registers have byte size sub registers on i386 }
- ((taicpu(hp2).opsize<>S_B) or (getsupreg(taicpu(hp1).oper[0]^.reg) in [RS_EAX, RS_EBX, RS_ECX, RS_EDX])) and
- {$endif i386}
- (((taicpu(hp1).ops=2) and
- (getsupreg(taicpu(hp2).oper[0]^.reg)=getsupreg(taicpu(hp1).oper[1]^.reg))) or
- ((taicpu(hp1).ops=1) and
- (getsupreg(taicpu(hp2).oper[0]^.reg)=getsupreg(taicpu(hp1).oper[0]^.reg)))) and
- not(RegUsedAfterInstruction(taicpu(hp2).oper[0]^.reg,hp2,UsedRegs)) then
- begin
- { change movsX/movzX reg/ref, reg2
- add/sub/or/... reg3/$const, reg2
- mov reg2 reg/ref
- to add/sub/or/... reg3/$const, reg/ref }
- { by example:
- movswl %si,%eax movswl %si,%eax p
- decl %eax addl %edx,%eax hp1
- movw %ax,%si movw %ax,%si hp2
- ->
- movswl %si,%eax movswl %si,%eax p
- decw %eax addw %edx,%eax hp1
- movw %ax,%si movw %ax,%si hp2
- }
- taicpu(hp1).changeopsize(taicpu(hp2).opsize);
- {
- ->
- movswl %si,%eax movswl %si,%eax p
- decw %si addw %dx,%si hp1
- movw %ax,%si movw %ax,%si hp2
- }
- case taicpu(hp1).ops of
- 1:
- taicpu(hp1).loadoper(0,taicpu(hp2).oper[1]^);
- 2:
- begin
- taicpu(hp1).loadoper(1,taicpu(hp2).oper[1]^);
- if (taicpu(hp1).oper[0]^.typ = top_reg) then
- setsubreg(taicpu(hp1).oper[0]^.reg,getsubreg(taicpu(hp2).oper[0]^.reg));
- end;
- else
- internalerror(2008042702);
- end;
- {
- ->
- decw %si addw %dx,%si p
- }
- DebugMsg(SPeepholeOptimization + 'var3',p);
- RemoveCurrentP(p, hp1);
- RemoveInstruction(hp2);
- Result := True;
- Exit;
- end;
- if reg_and_hp1_is_instr and
- (taicpu(hp1).opcode = A_MOV) and
- MatchOpType(taicpu(hp1),top_reg,top_reg) and
- (MatchOperand(taicpu(p).oper[1]^,taicpu(hp1).oper[0]^)
- {$ifdef x86_64}
- { check for implicit extension to 64 bit }
- or
- ((taicpu(p).opsize in [S_BL,S_WL]) and
- (taicpu(hp1).opsize=S_Q) and
- SuperRegistersEqual(taicpu(p).oper[1]^.reg,taicpu(hp1).oper[0]^.reg)
- )
- {$endif x86_64}
- )
- then
- begin
- { change
- movx %reg1,%reg2
- mov %reg2,%reg3
- dealloc %reg2
- into
- movx %reg,%reg3
- }
- TransferUsedRegs(TmpUsedRegs);
- UpdateUsedRegs(TmpUsedRegs, tai(p.next));
- if not(RegUsedAfterInstruction(taicpu(p).oper[1]^.reg,hp1,TmpUsedRegs)) then
- begin
- DebugMsg(SPeepholeOptimization + 'MovxMov2Movx',p);
- {$ifdef x86_64}
- if (taicpu(p).opsize in [S_BL,S_WL]) and
- (taicpu(hp1).opsize=S_Q) then
- taicpu(p).loadreg(1,newreg(R_INTREGISTER,getsupreg(taicpu(hp1).oper[1]^.reg),R_SUBD))
- else
- {$endif x86_64}
- taicpu(p).loadreg(1,taicpu(hp1).oper[1]^.reg);
- RemoveInstruction(hp1);
- Result := True;
- Exit;
- end;
- end;
- if reg_and_hp1_is_instr and
- ((taicpu(hp1).opcode=A_MOV) or
- (taicpu(hp1).opcode=A_ADD) or
- (taicpu(hp1).opcode=A_SUB) or
- (taicpu(hp1).opcode=A_CMP) or
- (taicpu(hp1).opcode=A_OR) or
- (taicpu(hp1).opcode=A_XOR) or
- (taicpu(hp1).opcode=A_AND)
- ) and
- (taicpu(hp1).oper[1]^.typ = top_reg) then
- begin
- AndTest := (taicpu(hp1).opcode=A_AND) and
- GetNextInstruction(hp1, hp2) and
- (hp2.typ = ait_instruction) and
- (
- (
- (taicpu(hp2).opcode=A_TEST) and
- (
- MatchOperand(taicpu(hp2).oper[0]^, taicpu(hp1).oper[1]^.reg) or
- MatchOperand(taicpu(hp2).oper[0]^, -1) or
- (
- { If the AND and TEST instructions share a constant, this is also valid }
- (taicpu(hp1).oper[0]^.typ = top_const) and
- MatchOperand(taicpu(hp2).oper[0]^, taicpu(hp1).oper[0]^.val)
- )
- ) and
- MatchOperand(taicpu(hp2).oper[1]^, taicpu(hp1).oper[1]^.reg)
- ) or
- (
- (taicpu(hp2).opcode=A_CMP) and
- MatchOperand(taicpu(hp2).oper[0]^, 0) and
- MatchOperand(taicpu(hp2).oper[1]^, taicpu(hp1).oper[1]^.reg)
- )
- );
- { change
- movx (oper),%reg2
- and $x,%reg2
- test %reg2,%reg2
- dealloc %reg2
- into
- op %reg1,%reg3
- if the second op accesses only the bits stored in reg1
- }
- if ((taicpu(p).oper[0]^.typ=top_reg) or
- ((taicpu(p).oper[0]^.typ=top_ref) and (taicpu(p).oper[0]^.ref^.refaddr<>addr_full))) and
- (taicpu(hp1).oper[0]^.typ = top_const) and
- (taicpu(hp1).oper[1]^.reg = taicpu(p).oper[1]^.reg) and
- AndTest then
- begin
- { Check if the AND constant is in range }
- case taicpu(p).opsize of
- S_BW, S_BL{$ifdef x86_64}, S_BQ{$endif x86_64}:
- begin
- NewSize := S_B;
- Limit := $FF;
- end;
- S_WL{$ifdef x86_64}, S_WQ{$endif x86_64}:
- begin
- NewSize := S_W;
- Limit := $FFFF;
- end;
- {$ifdef x86_64}
- S_LQ:
- begin
- NewSize := S_L;
- Limit := $FFFFFFFF;
- end;
- {$endif x86_64}
- else
- InternalError(2021120303);
- end;
- if (
- ((taicpu(hp1).oper[0]^.val and Limit) = taicpu(hp1).oper[0]^.val) or
- { Check for negative operands }
- (((not taicpu(hp1).oper[0]^.val) and Limit) = (not taicpu(hp1).oper[0]^.val))
- ) and
- GetNextInstruction(hp2,hp3) and
- MatchInstruction(hp3,A_Jcc,A_Setcc,A_CMOVcc,[]) and
- (taicpu(hp3).condition in [C_E,C_NE]) then
- begin
- TransferUsedRegs(TmpUsedRegs);
- UpdateUsedRegs(TmpUsedRegs, tai(p.Next));
- UpdateUsedRegs(TmpUsedRegs, tai(hp1.Next));
- if not(RegUsedAfterInstruction(taicpu(hp2).oper[1]^.reg, hp2, TmpUsedRegs)) then
- begin
- DebugMsg(SPeepholeOptimization + 'MovxAndTest2Test done',p);
- taicpu(hp1).loadoper(1, taicpu(p).oper[0]^);
- taicpu(hp1).opcode := A_TEST;
- taicpu(hp1).opsize := NewSize;
- RemoveInstruction(hp2);
- RemoveCurrentP(p, hp1);
- Result:=true;
- exit;
- end;
- end;
- end;
- if (taicpu(hp1).oper[0]^.typ = top_reg) and
- (((taicpu(p).opsize in [S_BW,S_BL,S_WL{$ifdef x86_64},S_BQ,S_WQ,S_LQ{$endif x86_64}]) and
- (taicpu(hp1).opsize=S_B)) or
- ((taicpu(p).opsize in [S_WL{$ifdef x86_64},S_WQ,S_LQ{$endif x86_64}]) and
- (taicpu(hp1).opsize=S_W))
- {$ifdef x86_64}
- or ((taicpu(p).opsize=S_LQ) and
- (taicpu(hp1).opsize=S_L))
- {$endif x86_64}
- ) and
- SuperRegistersEqual(taicpu(p).oper[1]^.reg,taicpu(hp1).oper[0]^.reg) then
- begin
- { change
- movx %reg1,%reg2
- op %reg2,%reg3
- dealloc %reg2
- into
- op %reg1,%reg3
- if the second op accesses only the bits stored in reg1
- }
- TransferUsedRegs(TmpUsedRegs);
- UpdateUsedRegs(TmpUsedRegs, tai(p.next));
- if AndTest then
- begin
- UpdateUsedRegs(TmpUsedRegs, tai(hp1.next));
- RegUsed := RegUsedAfterInstruction(taicpu(p).oper[1]^.reg,hp2,TmpUsedRegs);
- end
- else
- RegUsed := RegUsedAfterInstruction(taicpu(p).oper[1]^.reg,hp1,TmpUsedRegs);
- if not RegUsed then
- begin
- DebugMsg(SPeepholeOptimization + 'MovxOp2Op 1',p);
- if taicpu(p).oper[0]^.typ=top_reg then
- begin
- case taicpu(hp1).opsize of
- S_B:
- taicpu(hp1).loadreg(0,newreg(R_INTREGISTER,getsupreg(taicpu(p).oper[0]^.reg),R_SUBL));
- S_W:
- taicpu(hp1).loadreg(0,newreg(R_INTREGISTER,getsupreg(taicpu(p).oper[0]^.reg),R_SUBW));
- S_L:
- taicpu(hp1).loadreg(0,newreg(R_INTREGISTER,getsupreg(taicpu(p).oper[0]^.reg),R_SUBD));
- else
- Internalerror(2020102301);
- end;
- AllocRegBetween(taicpu(hp1).oper[0]^.reg,p,hp1,UsedRegs);
- end
- else
- taicpu(hp1).loadref(0,taicpu(p).oper[0]^.ref^);
- RemoveCurrentP(p);
- if AndTest then
- RemoveInstruction(hp2);
- result:=true;
- exit;
- end;
- end
- else if (taicpu(p).oper[1]^.reg = taicpu(hp1).oper[1]^.reg) and
- (
- { Bitwise operations only }
- (taicpu(hp1).opcode=A_AND) or
- (taicpu(hp1).opcode=A_TEST) or
- (
- (taicpu(hp1).oper[0]^.typ = top_const) and
- (
- (taicpu(hp1).opcode=A_OR) or
- (taicpu(hp1).opcode=A_XOR)
- )
- )
- ) and
- (
- (taicpu(hp1).oper[0]^.typ = top_const) or
- MatchOperand(taicpu(hp1).oper[0]^, taicpu(p).oper[1]^.reg) or
- not RegInOp(taicpu(p).oper[1]^.reg, taicpu(hp1).oper[0]^)
- ) then
- begin
- { change
- movx %reg2,%reg2
- op const,%reg2
- into
- op const,%reg2 (smaller version)
- movx %reg2,%reg2
- also change
- movx %reg1,%reg2
- and/test (oper),%reg2
- dealloc %reg2
- into
- and/test (oper),%reg1
- }
- case taicpu(p).opsize of
- S_BW, S_BL{$ifdef x86_64}, S_BQ{$endif x86_64}:
- begin
- NewSize := S_B;
- NewRegSize := R_SUBL;
- Limit := $FF;
- end;
- S_WL{$ifdef x86_64}, S_WQ{$endif x86_64}:
- begin
- NewSize := S_W;
- NewRegSize := R_SUBW;
- Limit := $FFFF;
- end;
- {$ifdef x86_64}
- S_LQ:
- begin
- NewSize := S_L;
- NewRegSize := R_SUBD;
- Limit := $FFFFFFFF;
- end;
- {$endif x86_64}
- else
- Internalerror(2021120302);
- end;
- TransferUsedRegs(TmpUsedRegs);
- UpdateUsedRegs(TmpUsedRegs, tai(p.next));
- if AndTest then
- begin
- UpdateUsedRegs(TmpUsedRegs, tai(hp1.next));
- RegUsed := RegUsedAfterInstruction(taicpu(p).oper[1]^.reg,hp2,TmpUsedRegs);
- end
- else
- RegUsed := RegUsedAfterInstruction(taicpu(p).oper[1]^.reg,hp1,TmpUsedRegs);
- if
- (
- (taicpu(p).opcode = A_MOVZX) and
- (
- (taicpu(hp1).opcode=A_AND) or
- (taicpu(hp1).opcode=A_TEST)
- ) and
- not (
- { If both are references, then the final instruction will have
- both operands as references, which is not allowed }
- (taicpu(p).oper[0]^.typ = top_ref) and
- (taicpu(hp1).oper[0]^.typ = top_ref)
- ) and
- not RegUsed
- ) or
- (
- (
- SuperRegistersEqual(taicpu(p).oper[0]^.reg, taicpu(p).oper[1]^.reg) or
- not RegUsed
- ) and
- (taicpu(p).oper[0]^.typ = top_reg) and
- SuperRegistersEqual(taicpu(p).oper[0]^.reg, taicpu(p).oper[1]^.reg) and
- (taicpu(hp1).oper[0]^.typ = top_const) and
- ((taicpu(hp1).oper[0]^.val and Limit) = taicpu(hp1).oper[0]^.val)
- ) then
- begin
- {$if defined(i386) or defined(i8086)}
- { If the target size is 8-bit, make sure we can actually encode it }
- if (NewRegSize = R_SUBL) and (taicpu(hp1).oper[0]^.typ = top_reg) and not (GetSupReg(taicpu(hp1).oper[0]^.reg) in [RS_EAX,RS_EBX,RS_ECX,RS_EDX]) then
- Exit;
- {$endif i386 or i8086}
- DebugMsg(SPeepholeOptimization + 'MovxOp2Op 2',p);
- taicpu(hp1).opsize := NewSize;
- taicpu(hp1).loadoper(1, taicpu(p).oper[0]^);
- if AndTest then
- begin
- RemoveInstruction(hp2);
- if not RegUsed then
- begin
- taicpu(hp1).opcode := A_TEST;
- if (taicpu(hp1).oper[0]^.typ = top_ref) then
- begin
- { Make sure the reference is the second operand }
- SwapOper := taicpu(hp1).oper[0];
- taicpu(hp1).oper[0] := taicpu(hp1).oper[1];
- taicpu(hp1).oper[1] := SwapOper;
- end;
- end;
- end;
- case taicpu(hp1).oper[0]^.typ of
- top_reg:
- setsubreg(taicpu(hp1).oper[0]^.reg, NewRegSize);
- top_const:
- { For the AND/TEST case }
- taicpu(hp1).oper[0]^.val := taicpu(hp1).oper[0]^.val and Limit;
- else
- ;
- end;
- if RegUsed then
- begin
- AsmL.Remove(p);
- AsmL.InsertAfter(p, hp1);
- p := hp1;
- end
- else
- RemoveCurrentP(p, hp1);
- result:=true;
- exit;
- end;
- end;
- end;
- if reg_and_hp1_is_instr and
- (taicpu(p).oper[0]^.typ = top_reg) and
- (
- (taicpu(hp1).opcode = A_SHL) or (taicpu(hp1).opcode = A_SAL)
- ) and
- (taicpu(hp1).oper[0]^.typ = top_const) and
- SuperRegistersEqual(taicpu(p).oper[0]^.reg, taicpu(p).oper[1]^.reg) and
- MatchOperand(taicpu(hp1).oper[1]^, taicpu(p).oper[1]^.reg) and
- { Minimum shift value allowed is the bit difference between the sizes }
- (taicpu(hp1).oper[0]^.val >=
- { Multiply by 8 because tcgsize2size returns bytes, not bits }
- 8 * (
- tcgsize2size[reg_cgsize(taicpu(p).oper[1]^.reg)] -
- tcgsize2size[reg_cgsize(taicpu(p).oper[0]^.reg)]
- )
- ) then
- begin
- { For:
- movsx/movzx %reg1,%reg1 (same register, just different sizes)
- shl/sal ##, %reg1
- Remove the movsx/movzx instruction if the shift overwrites the
- extended bits of the register (e.g. movslq %eax,%rax; shlq $32,%rax
- }
- DebugMsg(SPeepholeOptimization + 'MovxShl2Shl',p);
- RemoveCurrentP(p, hp1);
- Result := True;
- Exit;
- end
- else if reg_and_hp1_is_instr and
- (taicpu(p).oper[0]^.typ = top_reg) and
- (
- ((taicpu(hp1).opcode = A_SHR) and (taicpu(p).opcode = A_MOVZX)) or
- ((taicpu(hp1).opcode = A_SAR) and (taicpu(p).opcode <> A_MOVZX))
- ) and
- (taicpu(hp1).oper[0]^.typ = top_const) and
- SuperRegistersEqual(taicpu(p).oper[0]^.reg, taicpu(p).oper[1]^.reg) and
- MatchOperand(taicpu(hp1).oper[1]^, taicpu(p).oper[1]^.reg) and
- { Minimum shift value allowed is the bit size of the smallest register - 1 }
- (taicpu(hp1).oper[0]^.val <
- { Multiply by 8 because tcgsize2size returns bytes, not bits }
- 8 * (
- tcgsize2size[reg_cgsize(taicpu(p).oper[0]^.reg)]
- )
- ) then
- begin
- { For:
- movsx %reg1,%reg1 movzx %reg1,%reg1 (same register, just different sizes)
- sar ##, %reg1 shr ##, %reg1
- Move the shift to before the movx instruction if the shift value
- is not too large.
- }
- asml.Remove(hp1);
- asml.InsertBefore(hp1, p);
- taicpu(hp1).oper[1]^.reg := taicpu(p).oper[0]^.reg;
- case taicpu(p).opsize of
- s_BW, S_BL{$ifdef x86_64}, S_BQ{$endif}:
- taicpu(hp1).opsize := S_B;
- S_WL{$ifdef x86_64}, S_WQ{$endif}:
- taicpu(hp1).opsize := S_W;
- {$ifdef x86_64}
- S_LQ:
- taicpu(hp1).opsize := S_L;
- {$endif}
- else
- InternalError(2020112401);
- end;
- if (taicpu(hp1).opcode = A_SHR) then
- DebugMsg(SPeepholeOptimization + 'MovzShr2ShrMovz', hp1)
- else
- DebugMsg(SPeepholeOptimization + 'MovsSar2SarMovs', hp1);
- Result := True;
- end;
- if reg_and_hp1_is_instr and
- (taicpu(p).oper[0]^.typ = top_reg) and
- SuperRegistersEqual(taicpu(p).oper[0]^.reg, taicpu(p).oper[1]^.reg) and
- (
- (taicpu(hp1).opcode = taicpu(p).opcode)
- or ((taicpu(p).opcode = A_MOVZX) and ((taicpu(hp1).opcode = A_MOVSX){$ifdef x86_64} or (taicpu(hp1).opcode = A_MOVSXD){$endif x86_64}))
- {$ifdef x86_64}
- or ((taicpu(p).opcode = A_MOVSX) and (taicpu(hp1).opcode = A_MOVSXD))
- {$endif x86_64}
- ) then
- begin
- if MatchOpType(taicpu(hp1), top_reg, top_reg) and
- (taicpu(p).oper[1]^.reg = taicpu(hp1).oper[0]^.reg) and
- SuperRegistersEqual(taicpu(hp1).oper[0]^.reg, taicpu(hp1).oper[1]^.reg) then
- begin
- {
- For example:
- movzbw %al,%ax
- movzwl %ax,%eax
- Compress into:
- movzbl %al,%eax
- }
- RegUsed := False;
- case taicpu(p).opsize of
- S_BW:
- case taicpu(hp1).opsize of
- S_WL:
- begin
- taicpu(p).opsize := S_BL;
- RegUsed := True;
- end;
- {$ifdef x86_64}
- S_WQ:
- begin
- if taicpu(p).opcode = A_MOVZX then
- begin
- taicpu(p).opsize := S_BL;
- { 64-bit zero extension is implicit, so change to the 32-bit register }
- setsubreg(taicpu(hp1).oper[1]^.reg, R_SUBD);
- end
- else
- taicpu(p).opsize := S_BQ;
- RegUsed := True;
- end;
- {$endif x86_64}
- else
- ;
- end;
- {$ifdef x86_64}
- S_BL:
- case taicpu(hp1).opsize of
- S_LQ:
- begin
- if taicpu(p).opcode = A_MOVZX then
- begin
- taicpu(p).opsize := S_BL;
- { 64-bit zero extension is implicit, so change to the 32-bit register }
- setsubreg(taicpu(hp1).oper[1]^.reg, R_SUBD);
- end
- else
- taicpu(p).opsize := S_BQ;
- RegUsed := True;
- end;
- else
- ;
- end;
- S_WL:
- case taicpu(hp1).opsize of
- S_LQ:
- begin
- if taicpu(p).opcode = A_MOVZX then
- begin
- taicpu(p).opsize := S_WL;
- { 64-bit zero extension is implicit, so change to the 32-bit register }
- setsubreg(taicpu(hp1).oper[1]^.reg, R_SUBD);
- end
- else
- taicpu(p).opsize := S_WQ;
- RegUsed := True;
- end;
- else
- ;
- end;
- {$endif x86_64}
- else
- ;
- end;
- if RegUsed then
- begin
- DebugMsg(SPeepholeOptimization + 'MovxMovx2Movx', p);
- taicpu(p).oper[1]^.reg := taicpu(hp1).oper[1]^.reg;
- RemoveInstruction(hp1);
- Result := True;
- Exit;
- end;
- end;
- if (taicpu(hp1).opsize = taicpu(p).opsize) and
- not RegInInstruction(taicpu(p).oper[1]^.reg, hp1) and
- GetNextInstruction(hp1, hp2) and
- MatchInstruction(hp2, [A_AND, A_OR, A_XOR, A_TEST], []) and
- (
- ((taicpu(hp2).opsize = S_W) and (taicpu(p).opsize = S_BW)) or
- ((taicpu(hp2).opsize = S_L) and (taicpu(p).opsize in [S_BL, S_WL]))
- {$ifdef x86_64}
- or ((taicpu(hp2).opsize = S_Q) and (taicpu(p).opsize in [S_BL, S_BQ, S_WL, S_WQ, S_LQ]))
- {$endif x86_64}
- ) and
- MatchOpType(taicpu(hp2), top_reg, top_reg) and
- (
- (
- (taicpu(hp2).oper[0]^.reg = taicpu(hp1).oper[1]^.reg) and
- (taicpu(hp2).oper[1]^.reg = taicpu(p).oper[1]^.reg)
- ) or
- (
- { Only allow the operands in reverse order for TEST instructions }
- (taicpu(hp2).opcode = A_TEST) and
- (taicpu(hp2).oper[0]^.reg = taicpu(p).oper[1]^.reg) and
- (taicpu(hp2).oper[1]^.reg = taicpu(hp1).oper[1]^.reg)
- )
- ) then
- begin
- {
- For example:
- movzbl %al,%eax
- movzbl (ref),%edx
- andl %edx,%eax
- (%edx deallocated)
- Change to:
- andb (ref),%al
- movzbl %al,%eax
- Rules are:
- - First two instructions have the same opcode and opsize
- - First instruction's operands are the same super-register
- - Second instruction operates on a different register
- - Third instruction is AND, OR, XOR or TEST
- - Third instruction's operands are the destination registers of the first two instructions
- - Third instruction writes to the destination register of the first instruction (except with TEST)
- - Second instruction's destination register is deallocated afterwards
- }
- TransferUsedRegs(TmpUsedRegs);
- UpdateUsedRegs(TmpUsedRegs, tai(p.Next));
- UpdateUsedRegs(TmpUsedRegs, tai(hp1.Next));
- if not RegUsedAfterInstruction(taicpu(hp1).oper[1]^.reg, hp2, TmpUsedRegs) then
- begin
- case taicpu(p).opsize of
- S_BW, S_BL{$ifdef x86_64}, S_BQ{$endif x86_64}:
- NewSize := S_B;
- S_WL{$ifdef x86_64}, S_WQ{$endif x86_64}:
- NewSize := S_W;
- {$ifdef x86_64}
- S_LQ:
- NewSize := S_L;
- {$endif x86_64}
- else
- InternalError(2021120301);
- end;
- taicpu(hp2).loadoper(0, taicpu(hp1).oper[0]^);
- taicpu(hp2).loadreg(1, taicpu(p).oper[0]^.reg);
- taicpu(hp2).opsize := NewSize;
- RemoveInstruction(hp1);
- { With TEST, it's best to keep the MOVX instruction at the top }
- if (taicpu(hp2).opcode <> A_TEST) then
- begin
- DebugMsg(SPeepholeOptimization + 'MovxMovxTest2MovxTest', p);
- asml.Remove(p);
- { If the third instruction uses the flags, the MOVX instruction won't modify then }
- asml.InsertAfter(p, hp2);
- p := hp2;
- end
- else
- DebugMsg(SPeepholeOptimization + 'MovxMovxOp2OpMovx', p);
- Result := True;
- Exit;
- end;
- end;
- end;
- if taicpu(p).opcode=A_MOVZX then
- begin
- { removes superfluous And's after movzx's }
- if reg_and_hp1_is_instr and
- (taicpu(hp1).opcode = A_AND) and
- MatchOpType(taicpu(hp1),top_const,top_reg) and
- ((taicpu(hp1).oper[1]^.reg = taicpu(p).oper[1]^.reg)
- {$ifdef x86_64}
- { check for implicit extension to 64 bit }
- or
- ((taicpu(p).opsize in [S_BL,S_WL]) and
- (taicpu(hp1).opsize=S_Q) and
- SuperRegistersEqual(taicpu(p).oper[1]^.reg,taicpu(hp1).oper[1]^.reg)
- )
- {$endif x86_64}
- )
- then
- begin
- case taicpu(p).opsize Of
- S_BL, S_BW{$ifdef x86_64}, S_BQ{$endif x86_64}:
- if (taicpu(hp1).oper[0]^.val = $ff) then
- begin
- DebugMsg(SPeepholeOptimization + 'MovzAnd2Movz1',p);
- RemoveInstruction(hp1);
- Result:=true;
- exit;
- end;
- S_WL{$ifdef x86_64}, S_WQ{$endif x86_64}:
- if (taicpu(hp1).oper[0]^.val = $ffff) then
- begin
- DebugMsg(SPeepholeOptimization + 'MovzAnd2Movz2',p);
- RemoveInstruction(hp1);
- Result:=true;
- exit;
- end;
- {$ifdef x86_64}
- S_LQ:
- if (taicpu(hp1).oper[0]^.val = $ffffffff) then
- begin
- DebugMsg(SPeepholeOptimization + 'MovzAnd2Movz3',p);
- RemoveInstruction(hp1);
- Result:=true;
- exit;
- end;
- {$endif x86_64}
- else
- ;
- end;
- { we cannot get rid of the and, but can we get rid of the movz ?}
- if SuperRegistersEqual(taicpu(p).oper[0]^.reg,taicpu(p).oper[1]^.reg) then
- begin
- case taicpu(p).opsize Of
- S_BL, S_BW{$ifdef x86_64}, S_BQ{$endif x86_64}:
- if (taicpu(hp1).oper[0]^.val and $ff)=taicpu(hp1).oper[0]^.val then
- begin
- DebugMsg(SPeepholeOptimization + 'MovzAnd2And1',p);
- RemoveCurrentP(p,hp1);
- Result:=true;
- exit;
- end;
- S_WL{$ifdef x86_64}, S_WQ{$endif x86_64}:
- if (taicpu(hp1).oper[0]^.val and $ffff)=taicpu(hp1).oper[0]^.val then
- begin
- DebugMsg(SPeepholeOptimization + 'MovzAnd2And2',p);
- RemoveCurrentP(p,hp1);
- Result:=true;
- exit;
- end;
- {$ifdef x86_64}
- S_LQ:
- if (taicpu(hp1).oper[0]^.val and $ffffffff)=taicpu(hp1).oper[0]^.val then
- begin
- DebugMsg(SPeepholeOptimization + 'MovzAnd2And3',p);
- RemoveCurrentP(p,hp1);
- Result:=true;
- exit;
- end;
- {$endif x86_64}
- else
- ;
- end;
- end;
- end;
- { changes some movzx constructs to faster synonyms (all examples
- are given with eax/ax, but are also valid for other registers)}
- if MatchOpType(taicpu(p),top_reg,top_reg) then
- begin
- case taicpu(p).opsize of
- { Technically, movzbw %al,%ax cannot be encoded in 32/64-bit mode
- (the machine code is equivalent to movzbl %al,%eax), but the
- code generator still generates that assembler instruction and
- it is silently converted. This should probably be checked.
- [Kit] }
- S_BW:
- begin
- if (getsupreg(taicpu(p).oper[0]^.reg)=getsupreg(taicpu(p).oper[1]^.reg)) and
- (
- not IsMOVZXAcceptable
- { and $0xff,%ax has a smaller encoding but risks a partial write penalty }
- or (
- (cs_opt_size in current_settings.optimizerswitches) and
- (taicpu(p).oper[1]^.reg = NR_AX)
- )
- ) then
- {Change "movzbw %al, %ax" to "andw $0x0ffh, %ax"}
- begin
- DebugMsg(SPeepholeOptimization + 'var7',p);
- taicpu(p).opcode := A_AND;
- taicpu(p).changeopsize(S_W);
- taicpu(p).loadConst(0,$ff);
- Result := True;
- end
- else if not IsMOVZXAcceptable and
- GetNextInstruction(p, hp1) and
- (tai(hp1).typ = ait_instruction) and
- (taicpu(hp1).opcode = A_AND) and
- MatchOpType(taicpu(hp1),top_const,top_reg) and
- (taicpu(hp1).oper[1]^.reg = taicpu(p).oper[1]^.reg) then
- { Change "movzbw %reg1, %reg2; andw $const, %reg2"
- to "movw %reg1, reg2; andw $(const1 and $ff), %reg2"}
- begin
- DebugMsg(SPeepholeOptimization + 'var8',p);
- taicpu(p).opcode := A_MOV;
- taicpu(p).changeopsize(S_W);
- setsubreg(taicpu(p).oper[0]^.reg,R_SUBW);
- taicpu(hp1).loadConst(0,taicpu(hp1).oper[0]^.val and $ff);
- Result := True;
- end;
- end;
- {$ifndef i8086} { movzbl %al,%eax cannot be encoded in 16-bit mode (the machine code is equivalent to movzbw %al,%ax }
- S_BL:
- begin
- if (getsupreg(taicpu(p).oper[0]^.reg)=getsupreg(taicpu(p).oper[1]^.reg)) and
- (
- not IsMOVZXAcceptable
- { and $0xff,%eax has a smaller encoding but risks a partial write penalty }
- or (
- (cs_opt_size in current_settings.optimizerswitches) and
- (taicpu(p).oper[1]^.reg = NR_EAX)
- )
- ) then
- { Change "movzbl %al, %eax" to "andl $0x0ffh, %eax" }
- begin
- DebugMsg(SPeepholeOptimization + 'var9',p);
- taicpu(p).opcode := A_AND;
- taicpu(p).changeopsize(S_L);
- taicpu(p).loadConst(0,$ff);
- Result := True;
- end
- else if not IsMOVZXAcceptable and
- GetNextInstruction(p, hp1) and
- (tai(hp1).typ = ait_instruction) and
- (taicpu(hp1).opcode = A_AND) and
- MatchOpType(taicpu(hp1),top_const,top_reg) and
- (taicpu(hp1).oper[1]^.reg = taicpu(p).oper[1]^.reg) then
- { Change "movzbl %reg1, %reg2; andl $const, %reg2"
- to "movl %reg1, reg2; andl $(const1 and $ff), %reg2"}
- begin
- DebugMsg(SPeepholeOptimization + 'var10',p);
- taicpu(p).opcode := A_MOV;
- taicpu(p).changeopsize(S_L);
- { do not use R_SUBWHOLE
- as movl %rdx,%eax
- is invalid in assembler PM }
- setsubreg(taicpu(p).oper[0]^.reg, R_SUBD);
- taicpu(hp1).loadConst(0,taicpu(hp1).oper[0]^.val and $ff);
- Result := True;
- end;
- end;
- {$endif i8086}
- S_WL:
- if not IsMOVZXAcceptable then
- begin
- if (getsupreg(taicpu(p).oper[0]^.reg)=getsupreg(taicpu(p).oper[1]^.reg)) then
- { Change "movzwl %ax, %eax" to "andl $0x0ffffh, %eax" }
- begin
- DebugMsg(SPeepholeOptimization + 'var11',p);
- taicpu(p).opcode := A_AND;
- taicpu(p).changeopsize(S_L);
- taicpu(p).loadConst(0,$ffff);
- Result := True;
- end
- else if GetNextInstruction(p, hp1) and
- (tai(hp1).typ = ait_instruction) and
- (taicpu(hp1).opcode = A_AND) and
- (taicpu(hp1).oper[0]^.typ = top_const) and
- (taicpu(hp1).oper[1]^.typ = top_reg) and
- (taicpu(hp1).oper[1]^.reg = taicpu(p).oper[1]^.reg) then
- { Change "movzwl %reg1, %reg2; andl $const, %reg2"
- to "movl %reg1, reg2; andl $(const1 and $ffff), %reg2"}
- begin
- DebugMsg(SPeepholeOptimization + 'var12',p);
- taicpu(p).opcode := A_MOV;
- taicpu(p).changeopsize(S_L);
- { do not use R_SUBWHOLE
- as movl %rdx,%eax
- is invalid in assembler PM }
- setsubreg(taicpu(p).oper[0]^.reg, R_SUBD);
- taicpu(hp1).loadConst(0,taicpu(hp1).oper[0]^.val and $ffff);
- Result := True;
- end;
- end;
- else
- InternalError(2017050705);
- end;
- end
- else if not IsMOVZXAcceptable and (taicpu(p).oper[0]^.typ = top_ref) then
- begin
- if GetNextInstruction(p, hp1) and
- (tai(hp1).typ = ait_instruction) and
- (taicpu(hp1).opcode = A_AND) and
- MatchOpType(taicpu(hp1),top_const,top_reg) and
- (taicpu(hp1).oper[1]^.reg = taicpu(p).oper[1]^.reg) then
- begin
- //taicpu(p).opcode := A_MOV;
- case taicpu(p).opsize Of
- S_BL:
- begin
- DebugMsg(SPeepholeOptimization + 'var13',p);
- taicpu(hp1).changeopsize(S_L);
- taicpu(hp1).loadConst(0,taicpu(hp1).oper[0]^.val and $ff);
- end;
- S_WL:
- begin
- DebugMsg(SPeepholeOptimization + 'var14',p);
- taicpu(hp1).changeopsize(S_L);
- taicpu(hp1).loadConst(0,taicpu(hp1).oper[0]^.val and $ffff);
- end;
- S_BW:
- begin
- DebugMsg(SPeepholeOptimization + 'var15',p);
- taicpu(hp1).changeopsize(S_W);
- taicpu(hp1).loadConst(0,taicpu(hp1).oper[0]^.val and $ff);
- end;
- else
- Internalerror(2017050704)
- end;
- Result := True;
- end;
- end;
- end;
- end;
- function TX86AsmOptimizer.OptPass1AND(var p : tai) : boolean;
- var
- hp1, hp2 : tai;
- MaskLength : Cardinal;
- MaskedBits : TCgInt;
- ActiveReg : TRegister;
- begin
- Result:=false;
- { There are no optimisations for reference targets }
- if (taicpu(p).oper[1]^.typ <> top_reg) then
- Exit;
- while GetNextInstruction(p, hp1) and
- (hp1.typ = ait_instruction) do
- begin
- if (taicpu(p).oper[0]^.typ = top_const) then
- begin
- case taicpu(hp1).opcode of
- A_AND:
- if MatchOpType(taicpu(hp1),top_const,top_reg) and
- (getsupreg(taicpu(p).oper[1]^.reg) = getsupreg(taicpu(hp1).oper[1]^.reg)) and
- { the second register must contain the first one, so compare their subreg types }
- (getsubreg(taicpu(p).oper[1]^.reg)<=getsubreg(taicpu(hp1).oper[1]^.reg)) and
- (abs(taicpu(p).oper[0]^.val and taicpu(hp1).oper[0]^.val)<$80000000) then
- { change
- and const1, reg
- and const2, reg
- to
- and (const1 and const2), reg
- }
- begin
- taicpu(hp1).loadConst(0, taicpu(p).oper[0]^.val and taicpu(hp1).oper[0]^.val);
- DebugMsg(SPeepholeOptimization + 'AndAnd2And done',hp1);
- RemoveCurrentP(p, hp1);
- Result:=true;
- exit;
- end;
- A_CMP:
- if (PopCnt(DWord(taicpu(p).oper[0]^.val)) = 1) and { Only 1 bit set }
- MatchOperand(taicpu(hp1).oper[0]^, taicpu(p).oper[0]^.val) and
- MatchOperand(taicpu(hp1).oper[1]^, taicpu(p).oper[1]^.reg) and
- { Just check that the condition on the next instruction is compatible }
- GetNextInstruction(hp1, hp2) and
- (hp2.typ = ait_instruction) and
- (taicpu(hp2).condition in [C_Z, C_E, C_NZ, C_NE])
- then
- { change
- and 2^n, reg
- cmp 2^n, reg
- j(c) / set(c) / cmov(c) (c is equal or not equal)
- to
- and 2^n, reg
- test reg, reg
- j(~c) / set(~c) / cmov(~c)
- }
- begin
- { Keep TEST instruction in, rather than remove it, because
- it may trigger other optimisations such as MovAndTest2Test }
- taicpu(hp1).loadreg(0, taicpu(hp1).oper[1]^.reg);
- taicpu(hp1).opcode := A_TEST;
- DebugMsg(SPeepholeOptimization + 'AND/CMP/J(c) -> AND/J(~c) with power of 2 constant', p);
- taicpu(hp2).condition := inverse_cond(taicpu(hp2).condition);
- Result := True;
- Exit;
- end;
- A_MOVZX:
- if MatchOpType(taicpu(hp1),top_reg,top_reg) and
- SuperRegistersEqual(taicpu(p).oper[1]^.reg,taicpu(hp1).oper[1]^.reg) and
- (getsupreg(taicpu(hp1).oper[0]^.reg)=getsupreg(taicpu(hp1).oper[1]^.reg)) and
- (
- (
- (taicpu(p).opsize=S_W) and
- (taicpu(hp1).opsize=S_BW)
- ) or
- (
- (taicpu(p).opsize=S_L) and
- (taicpu(hp1).opsize in [S_WL,S_BL{$ifdef x86_64},S_BQ,S_WQ{$endif x86_64}])
- )
- {$ifdef x86_64}
- or
- (
- (taicpu(p).opsize=S_Q) and
- (taicpu(hp1).opsize in [S_BQ,S_WQ,S_BL,S_WL])
- )
- {$endif x86_64}
- ) then
- begin
- if (((taicpu(hp1).opsize) in [S_BW,S_BL{$ifdef x86_64},S_BQ{$endif x86_64}]) and
- ((taicpu(p).oper[0]^.val and $ff)=taicpu(p).oper[0]^.val)
- ) or
- (((taicpu(hp1).opsize) in [S_WL{$ifdef x86_64},S_WQ{$endif x86_64}]) and
- ((taicpu(p).oper[0]^.val and $ffff)=taicpu(p).oper[0]^.val))
- then
- begin
- { Unlike MOVSX, MOVZX doesn't actually have a version that zero-extends a
- 32-bit register to a 64-bit register, or even a version called MOVZXD, so
- code that tests for the presence of AND 0xffffffff followed by MOVZX is
- wasted, and is indictive of a compiler bug if it were triggered. [Kit]
- NOTE: To zero-extend from 32 bits to 64 bits, simply use the standard MOV.
- }
- DebugMsg(SPeepholeOptimization + 'AndMovzToAnd done',p);
- RemoveInstruction(hp1);
- { See if there are other optimisations possible }
- Continue;
- end;
- end;
- A_SHL:
- if MatchOpType(taicpu(hp1),top_const,top_reg) and
- (getsupreg(taicpu(p).oper[1]^.reg)=getsupreg(taicpu(hp1).oper[1]^.reg)) then
- begin
- {$ifopt R+}
- {$define RANGE_WAS_ON}
- {$R-}
- {$endif}
- { get length of potential and mask }
- MaskLength:=SizeOf(taicpu(p).oper[0]^.val)*8-BsrQWord(taicpu(p).oper[0]^.val)-1;
- { really a mask? }
- {$ifdef RANGE_WAS_ON}
- {$R+}
- {$endif}
- if (((QWord(1) shl MaskLength)-1)=taicpu(p).oper[0]^.val) and
- { unmasked part shifted out? }
- ((MaskLength+taicpu(hp1).oper[0]^.val)>=topsize2memsize[taicpu(hp1).opsize]) then
- begin
- DebugMsg(SPeepholeOptimization + 'AndShlToShl done',p);
- RemoveCurrentP(p, hp1);
- Result:=true;
- exit;
- end;
- end;
- A_SHR:
- if MatchOpType(taicpu(hp1),top_const,top_reg) and
- (taicpu(p).oper[1]^.reg = taicpu(hp1).oper[1]^.reg) and
- (taicpu(hp1).oper[0]^.val <= 63) then
- begin
- { Does SHR combined with the AND cover all the bits?
- e.g. for "andb $252,%reg; shrb $2,%reg" - the "and" can be removed }
- MaskedBits := taicpu(p).oper[0]^.val or ((TCgInt(1) shl taicpu(hp1).oper[0]^.val) - 1);
- if ((taicpu(p).opsize = S_B) and ((MaskedBits and $FF) = $FF)) or
- ((taicpu(p).opsize = S_W) and ((MaskedBits and $FFFF) = $FFFF)) or
- ((taicpu(p).opsize = S_L) and ((MaskedBits and $FFFFFFFF) = $FFFFFFFF)) then
- begin
- DebugMsg(SPeepholeOptimization + 'AndShrToShr done', p);
- RemoveCurrentP(p, hp1);
- Result := True;
- Exit;
- end;
- end;
- A_MOVSX{$ifdef x86_64}, A_MOVSXD{$endif x86_64}:
- if (taicpu(hp1).oper[0]^.typ = top_reg) and
- SuperRegistersEqual(taicpu(hp1).oper[0]^.reg, taicpu(hp1).oper[1]^.reg) then
- begin
- if SuperRegistersEqual(taicpu(p).oper[1]^.reg, taicpu(hp1).oper[1]^.reg) and
- (
- (
- (taicpu(hp1).opsize in [S_BW,S_BL{$ifdef x86_64},S_BQ{$endif x86_64}]) and
- ((taicpu(p).oper[0]^.val and $7F) = taicpu(p).oper[0]^.val)
- ) or (
- (taicpu(hp1).opsize in [S_WL{$ifdef x86_64},S_WQ{$endif x86_64}]) and
- ((taicpu(p).oper[0]^.val and $7FFF) = taicpu(p).oper[0]^.val)
- {$ifdef x86_64}
- ) or (
- (taicpu(hp1).opsize = S_LQ) and
- ((taicpu(p).oper[0]^.val and $7fffffff) = taicpu(p).oper[0]^.val)
- {$endif x86_64}
- )
- ) then
- begin
- if (taicpu(p).oper[1]^.reg = taicpu(hp1).oper[1]^.reg){$ifdef x86_64} or (taicpu(hp1).opsize = S_LQ){$endif x86_64} then
- begin
- DebugMsg(SPeepholeOptimization + 'AndMovsxToAnd',p);
- RemoveInstruction(hp1);
- { See if there are other optimisations possible }
- Continue;
- end;
- { The super-registers are the same though.
- Note that this change by itself doesn't improve
- code speed, but it opens up other optimisations. }
- {$ifdef x86_64}
- { Convert 64-bit register to 32-bit }
- case taicpu(hp1).opsize of
- S_BQ:
- begin
- taicpu(hp1).opsize := S_BL;
- taicpu(hp1).oper[1]^.reg := newreg(R_INTREGISTER, getsupreg(taicpu(hp1).oper[1]^.reg), R_SUBD);
- end;
- S_WQ:
- begin
- taicpu(hp1).opsize := S_WL;
- taicpu(hp1).oper[1]^.reg := newreg(R_INTREGISTER, getsupreg(taicpu(hp1).oper[1]^.reg), R_SUBD);
- end
- else
- ;
- end;
- {$endif x86_64}
- DebugMsg(SPeepholeOptimization + 'AndMovsxToAndMovzx', hp1);
- taicpu(hp1).opcode := A_MOVZX;
- { See if there are other optimisations possible }
- Continue;
- end;
- end;
- else
- ;
- end;
- end
- else if MatchOperand(taicpu(p).oper[0]^, taicpu(p).oper[1]^.reg) and
- not RegInUsedRegs(NR_DEFAULTFLAGS, UsedRegs) then
- begin
- {$ifdef x86_64}
- if (taicpu(p).opsize = S_Q) then
- begin
- { Never necessary }
- DebugMsg(SPeepholeOptimization + 'Andq2Nop', p);
- RemoveCurrentP(p, hp1);
- Result := True;
- Exit;
- end;
- {$endif x86_64}
- { Forward check to determine necessity of and %reg,%reg }
- TransferUsedRegs(TmpUsedRegs);
- UpdateUsedRegs(TmpUsedRegs, tai(p.Next));
- { Saves on a bunch of dereferences }
- ActiveReg := taicpu(p).oper[1]^.reg;
- case taicpu(hp1).opcode of
- A_MOV, A_MOVZX, A_MOVSX{$ifdef x86_64}, A_MOVSXD{$endif x86_64}:
- if (
- (taicpu(hp1).oper[0]^.typ <> top_ref) or
- not RegInRef(ActiveReg, taicpu(hp1).oper[0]^.ref^)
- ) and
- (
- (taicpu(hp1).opcode <> A_MOV) or
- (taicpu(hp1).oper[1]^.typ <> top_ref) or
- not RegInRef(ActiveReg, taicpu(hp1).oper[1]^.ref^)
- ) and
- not (
- { If mov %reg,%reg is present, remove that instruction instead in OptPass1MOV }
- (taicpu(hp1).opcode = A_MOV) and
- MatchOperand(taicpu(hp1).oper[0]^, ActiveReg) and
- MatchOperand(taicpu(hp1).oper[1]^, ActiveReg)
- ) and
- (
- (
- (taicpu(hp1).oper[0]^.typ = top_reg) and
- (taicpu(hp1).oper[0]^.reg = ActiveReg) and
- SuperRegistersEqual(taicpu(hp1).oper[0]^.reg, taicpu(hp1).oper[1]^.reg)
- ) or
- (
- {$ifdef x86_64}
- (
- { If we read from the register, make sure it's not dependent on the upper 32 bits }
- (taicpu(hp1).oper[0]^.typ <> top_reg) or
- not SuperRegistersEqual(taicpu(hp1).oper[0]^.reg, ActiveReg) or
- (GetSubReg(taicpu(hp1).oper[0]^.reg) <> R_SUBQ)
- ) and
- {$endif x86_64}
- not RegUsedAfterInstruction(ActiveReg, hp1, TmpUsedRegs)
- )
- ) then
- begin
- DebugMsg(SPeepholeOptimization + 'AndMovx2Movx', p);
- RemoveCurrentP(p, hp1);
- Result := True;
- Exit;
- end;
- A_ADD,
- A_AND,
- A_BSF,
- A_BSR,
- A_BTC,
- A_BTR,
- A_BTS,
- A_OR,
- A_SUB,
- A_XOR:
- { Register is written to, so this will clear the upper 32 bits (2-operand instructions) }
- if (
- (taicpu(hp1).oper[0]^.typ <> top_ref) or
- not RegInRef(ActiveReg, taicpu(hp1).oper[0]^.ref^)
- ) and
- MatchOperand(taicpu(hp1).oper[1]^, ActiveReg) then
- begin
- DebugMsg(SPeepholeOptimization + 'AndOp2Op 2', p);
- RemoveCurrentP(p, hp1);
- Result := True;
- Exit;
- end;
- A_CMP,
- A_TEST:
- if (
- (taicpu(hp1).oper[0]^.typ <> top_ref) or
- not RegInRef(ActiveReg, taicpu(hp1).oper[0]^.ref^)
- ) and
- MatchOperand(taicpu(hp1).oper[1]^, ActiveReg) and
- not RegUsedAfterInstruction(ActiveReg, hp1, TmpUsedRegs) then
- begin
- DebugMsg(SPeepholeOptimization + 'AND; CMP/TEST -> CMP/TEST', p);
- RemoveCurrentP(p, hp1);
- Result := True;
- Exit;
- end;
- A_BSWAP,
- A_NEG,
- A_NOT:
- { Register is written to, so this will clear the upper 32 bits (1-operand instructions) }
- if MatchOperand(taicpu(hp1).oper[0]^, ActiveReg) then
- begin
- DebugMsg(SPeepholeOptimization + 'AndOp2Op 1', p);
- RemoveCurrentP(p, hp1);
- Result := True;
- Exit;
- end;
- else
- ;
- end;
- end;
- if (taicpu(hp1).is_jmp) and
- (taicpu(hp1).opcode<>A_JMP) and
- not(RegInUsedRegs(taicpu(p).oper[1]^.reg,UsedRegs)) then
- begin
- { change
- and x, reg
- jxx
- to
- test x, reg
- jxx
- if reg is deallocated before the
- jump, but only if it's a conditional jump (PFV)
- }
- taicpu(p).opcode := A_TEST;
- Exit;
- end;
- Break;
- end;
- { Lone AND tests }
- if (taicpu(p).oper[0]^.typ = top_const) then
- begin
- {
- - Convert and $0xFF,reg to and reg,reg if reg is 8-bit
- - Convert and $0xFFFF,reg to and reg,reg if reg is 16-bit
- - Convert and $0xFFFFFFFF,reg to and reg,reg if reg is 32-bit
- }
- if ((taicpu(p).oper[0]^.val = $FF) and (taicpu(p).opsize = S_B)) or
- ((taicpu(p).oper[0]^.val = $FFFF) and (taicpu(p).opsize = S_W)) or
- ((taicpu(p).oper[0]^.val = $FFFFFFFF) and (taicpu(p).opsize = S_L)) then
- begin
- taicpu(p).loadreg(0, taicpu(p).oper[1]^.reg);
- if taicpu(p).opsize = S_L then
- begin
- Include(OptsToCheck,aoc_MovAnd2Mov_3);
- Result := True;
- end;
- end;
- end;
- { Backward check to determine necessity of and %reg,%reg }
- if (taicpu(p).oper[0]^.typ = top_reg) and
- (taicpu(p).oper[0]^.reg = taicpu(p).oper[1]^.reg) and
- not RegInUsedRegs(NR_DEFAULTFLAGS, UsedRegs) and
- GetLastInstruction(p, hp2) and
- RegModifiedByInstruction(taicpu(p).oper[1]^.reg, hp2) and
- { Check size of adjacent instruction to determine if the AND is
- effectively a null operation }
- (
- (taicpu(p).opsize = taicpu(hp2).opsize) or
- { Note: Don't include S_Q }
- ((taicpu(p).opsize = S_L) and (taicpu(hp2).opsize in [S_BL, S_WL])) or
- ((taicpu(p).opsize = S_W) and (taicpu(hp2).opsize in [S_BW, S_BL, S_WL, S_L])) or
- ((taicpu(p).opsize = S_B) and (taicpu(hp2).opsize in [S_BW, S_BL, S_WL, S_W, S_L]))
- ) then
- begin
- DebugMsg(SPeepholeOptimization + 'And2Nop', p);
- { If GetNextInstruction returned False, hp1 will be nil }
- RemoveCurrentP(p, hp1);
- Result := True;
- Exit;
- end;
- end;
- function TX86AsmOptimizer.OptPass2ADD(var p : tai) : boolean;
- var
- hp1: tai; NewRef: TReference;
- { This entire nested function is used in an if-statement below, but we
- want to avoid all the used reg transfers and GetNextInstruction calls
- until we really have to check }
- function MemRegisterNotUsedLater: Boolean; inline;
- var
- hp2: tai;
- begin
- TransferUsedRegs(TmpUsedRegs);
- hp2 := p;
- repeat
- UpdateUsedRegs(TmpUsedRegs, tai(hp2.Next));
- until not GetNextInstruction(hp2, hp2) or (hp2 = hp1);
- Result := not RegUsedAfterInstruction(taicpu(p).oper[1]^.reg, hp1, TmpUsedRegs);
- end;
- begin
- Result := False;
- if not GetNextInstruction(p, hp1) or (hp1.typ <> ait_instruction) then
- Exit;
- if (taicpu(p).opsize in [S_L{$ifdef x86_64}, S_Q{$endif}]) then
- begin
- { Change:
- add %reg2,%reg1
- mov/s/z #(%reg1),%reg1 (%reg1 superregisters must be the same)
- To:
- mov/s/z #(%reg1,%reg2),%reg1
- }
- if MatchOpType(taicpu(p), top_reg, top_reg) and
- MatchInstruction(hp1, [A_MOV, A_MOVZX, A_MOVSX{$ifdef x86_64}, A_MOVSXD{$endif}], []) and
- MatchOpType(taicpu(hp1), top_ref, top_reg) and
- (taicpu(hp1).oper[0]^.ref^.scalefactor <= 1) and
- (
- (
- (taicpu(hp1).oper[0]^.ref^.base = taicpu(p).oper[1]^.reg) and
- (taicpu(hp1).oper[0]^.ref^.index = NR_NO) and
- { r/esp cannot be an index }
- (taicpu(p).oper[0]^.reg<>NR_STACK_POINTER_REG)
- ) or (
- (taicpu(hp1).oper[0]^.ref^.index = taicpu(p).oper[1]^.reg) and
- (taicpu(hp1).oper[0]^.ref^.base = NR_NO)
- )
- ) and (
- Reg1WriteOverwritesReg2Entirely(taicpu(p).oper[1]^.reg, taicpu(hp1).oper[1]^.reg) or
- (
- { If the super registers ARE equal, then this MOV/S/Z does a partial write }
- not SuperRegistersEqual(taicpu(p).oper[1]^.reg, taicpu(hp1).oper[1]^.reg) and
- MemRegisterNotUsedLater
- )
- ) then
- begin
- taicpu(hp1).oper[0]^.ref^.base := taicpu(p).oper[1]^.reg;
- taicpu(hp1).oper[0]^.ref^.index := taicpu(p).oper[0]^.reg;
- DebugMsg(SPeepholeOptimization + 'AddMov2Mov done', p);
- RemoveCurrentp(p, hp1);
- Result := True;
- Exit;
- end;
- { Change:
- addl/q $x,%reg1
- movl/q %reg1,%reg2
- To:
- leal/q $x(%reg1),%reg2
- addl/q $x,%reg1 (can be removed if %reg1 or the flags are not used afterwards)
- Breaks the dependency chain.
- }
- if MatchOpType(taicpu(p),top_const,top_reg) and
- MatchInstruction(hp1, A_MOV, [taicpu(p).opsize]) and
- (taicpu(hp1).oper[1]^.typ = top_reg) and
- MatchOperand(taicpu(hp1).oper[0]^, taicpu(p).oper[1]^.reg) and
- (
- { Don't do AddMov2LeaAdd under -Os, but do allow AddMov2Lea }
- not (cs_opt_size in current_settings.optimizerswitches) or
- (
- not RegUsedAfterInstruction(taicpu(p).oper[1]^.reg, hp1, TmpUsedRegs) and
- RegUsedAfterInstruction(NR_DEFAULTFLAGS, hp1, TmpUsedRegs)
- )
- ) then
- begin
- { Change the MOV instruction to a LEA instruction, and update the
- first operand }
- reference_reset(NewRef, 1, []);
- NewRef.base := taicpu(p).oper[1]^.reg;
- NewRef.scalefactor := 1;
- NewRef.offset := asizeint(taicpu(p).oper[0]^.val);
- taicpu(hp1).opcode := A_LEA;
- taicpu(hp1).loadref(0, NewRef);
- TransferUsedRegs(TmpUsedRegs);
- UpdateUsedRegs(TmpUsedRegs, tai(p.Next));
- if RegUsedAfterInstruction(NewRef.base, hp1, TmpUsedRegs) or
- RegUsedAfterInstruction(NR_DEFAULTFLAGS, hp1, TmpUsedRegs) then
- begin
- { Move what is now the LEA instruction to before the SUB instruction }
- Asml.Remove(hp1);
- Asml.InsertBefore(hp1, p);
- AllocRegBetween(taicpu(hp1).oper[1]^.reg, hp1, p, UsedRegs);
- DebugMsg(SPeepholeOptimization + 'AddMov2LeaAdd', p);
- p := hp1;
- end
- else
- begin
- { Since %reg1 or the flags aren't used afterwards, we can delete p completely }
- RemoveCurrentP(p, hp1);
- DebugMsg(SPeepholeOptimization + 'AddMov2Lea', p);
- end;
- Result := True;
- end;
- end;
- end;
- function TX86AsmOptimizer.OptPass2Lea(var p : tai) : Boolean;
- var
- SubReg: TSubRegister;
- begin
- Result:=false;
- SubReg := getsubreg(taicpu(p).oper[1]^.reg);
- if not (RegInUsedRegs(NR_DEFAULTFLAGS,UsedRegs)) then
- with taicpu(p).oper[0]^.ref^ do
- if (offset = 0) and not Assigned(symbol) and not Assigned(relsymbol) and (index <> NR_NO) then
- begin
- if (scalefactor <= 1) and SuperRegistersEqual(base, taicpu(p).oper[1]^.reg) then
- begin
- taicpu(p).loadreg(0, newreg(R_INTREGISTER, getsupreg(index), SubReg));
- taicpu(p).opcode := A_ADD;
- DebugMsg(SPeepholeOptimization + 'Lea2AddBase done',p);
- Result := True;
- end
- else if SuperRegistersEqual(index, taicpu(p).oper[1]^.reg) then
- begin
- if (base <> NR_NO) then
- begin
- if (scalefactor <= 1) then
- begin
- taicpu(p).loadreg(0, newreg(R_INTREGISTER, getsupreg(base), SubReg));
- taicpu(p).opcode := A_ADD;
- DebugMsg(SPeepholeOptimization + 'Lea2AddIndex done',p);
- Result := True;
- end;
- end
- else
- { Convert lea (%reg,2^x),%reg to shl x,%reg }
- if (scalefactor in [2, 4, 8]) then
- begin
- { BsrByte is, in essence, the base-2 logarithm of the scale factor }
- taicpu(p).loadconst(0, BsrByte(scalefactor));
- taicpu(p).opcode := A_SHL;
- DebugMsg(SPeepholeOptimization + 'Lea2Shl done',p);
- Result := True;
- end;
- end;
- end;
- end;
- function TX86AsmOptimizer.OptPass2SUB(var p: tai): Boolean;
- var
- hp1: tai; NewRef: TReference;
- begin
- { Change:
- subl/q $x,%reg1
- movl/q %reg1,%reg2
- To:
- leal/q $-x(%reg1),%reg2
- subl/q $x,%reg1 (can be removed if %reg1 or the flags are not used afterwards)
- Breaks the dependency chain and potentially permits the removal of
- a CMP instruction if one follows.
- }
- Result := False;
- if (taicpu(p).opsize in [S_L{$ifdef x86_64}, S_Q{$endif x86_64}]) and
- MatchOpType(taicpu(p),top_const,top_reg) and
- GetNextInstruction(p, hp1) and
- MatchInstruction(hp1, A_MOV, [taicpu(p).opsize]) and
- (taicpu(hp1).oper[1]^.typ = top_reg) and
- MatchOperand(taicpu(hp1).oper[0]^, taicpu(p).oper[1]^.reg) and
- (
- { Don't do SubMov2LeaSub under -Os, but do allow SubMov2Lea }
- not (cs_opt_size in current_settings.optimizerswitches) or
- (
- not RegUsedAfterInstruction(taicpu(p).oper[1]^.reg, hp1, TmpUsedRegs) and
- RegUsedAfterInstruction(NR_DEFAULTFLAGS, hp1, TmpUsedRegs)
- )
- ) then
- begin
- { Change the MOV instruction to a LEA instruction, and update the
- first operand }
- reference_reset(NewRef, 1, []);
- NewRef.base := taicpu(p).oper[1]^.reg;
- NewRef.scalefactor := 1;
- NewRef.offset := -taicpu(p).oper[0]^.val;
- taicpu(hp1).opcode := A_LEA;
- taicpu(hp1).loadref(0, NewRef);
- TransferUsedRegs(TmpUsedRegs);
- UpdateUsedRegs(TmpUsedRegs, tai(p.Next));
- if RegUsedAfterInstruction(NewRef.base, hp1, TmpUsedRegs) or
- RegUsedAfterInstruction(NR_DEFAULTFLAGS, hp1, TmpUsedRegs) then
- begin
- { Move what is now the LEA instruction to before the SUB instruction }
- Asml.Remove(hp1);
- Asml.InsertBefore(hp1, p);
- AllocRegBetween(taicpu(hp1).oper[1]^.reg, hp1, p, UsedRegs);
- DebugMsg(SPeepholeOptimization + 'SubMov2LeaSub', p);
- p := hp1;
- end
- else
- begin
- { Since %reg1 or the flags aren't used afterwards, we can delete p completely }
- RemoveCurrentP(p, hp1);
- DebugMsg(SPeepholeOptimization + 'SubMov2Lea', p);
- end;
- Result := True;
- end;
- end;
- function TX86AsmOptimizer.SkipSimpleInstructions(var hp1 : tai) : Boolean;
- begin
- { we can skip all instructions not messing with the stack pointer }
- while assigned(hp1) and {MatchInstruction(hp1,[A_LEA,A_MOV,A_MOVQ,A_MOVSQ,A_MOVSX,A_MOVSXD,A_MOVZX,
- A_AND,A_OR,A_XOR,A_ADD,A_SHR,A_SHL,A_IMUL,A_SETcc,A_SAR,A_SUB,A_TEST,A_CMOVcc,
- A_MOVSS,A_MOVSD,A_MOVAPS,A_MOVUPD,A_MOVAPD,A_MOVUPS,
- A_VMOVSS,A_VMOVSD,A_VMOVAPS,A_VMOVUPD,A_VMOVAPD,A_VMOVUPS],[]) and}
- ({(taicpu(hp1).ops=0) or }
- ({(MatchOpType(taicpu(hp1),top_reg,top_reg) or MatchOpType(taicpu(hp1),top_const,top_reg) or
- (MatchOpType(taicpu(hp1),top_ref,top_reg))
- ) and }
- not(RegInInstruction(NR_STACK_POINTER_REG,hp1)) { and not(RegInInstruction(NR_FRAME_POINTER_REG,hp1))}
- )
- ) do
- GetNextInstruction(hp1,hp1);
- Result:=assigned(hp1);
- end;
- function TX86AsmOptimizer.PostPeepholeOptLea(var p : tai) : Boolean;
- var
- hp1, hp2, hp3, hp4, hp5: tai;
- begin
- Result:=false;
- hp5:=nil;
- { replace
- leal(q) x(<stackpointer>),<stackpointer>
- call procname
- leal(q) -x(<stackpointer>),<stackpointer>
- ret
- by
- jmp procname
- but do it only on level 4 because it destroys stack back traces
- }
- if (cs_opt_level4 in current_settings.optimizerswitches) and
- MatchOpType(taicpu(p),top_ref,top_reg) and
- (taicpu(p).oper[0]^.ref^.base=NR_STACK_POINTER_REG) and
- (taicpu(p).oper[0]^.ref^.index=NR_NO) and
- { the -8 or -24 are not required, but bail out early if possible,
- higher values are unlikely }
- ((taicpu(p).oper[0]^.ref^.offset=-8) or
- (taicpu(p).oper[0]^.ref^.offset=-24)) and
- (taicpu(p).oper[0]^.ref^.symbol=nil) and
- (taicpu(p).oper[0]^.ref^.relsymbol=nil) and
- (taicpu(p).oper[0]^.ref^.segment=NR_NO) and
- (taicpu(p).oper[1]^.reg=NR_STACK_POINTER_REG) and
- GetNextInstruction(p, hp1) and
- { Take a copy of hp1 }
- SetAndTest(hp1, hp4) and
- { trick to skip label }
- ((hp1.typ=ait_instruction) or GetNextInstruction(hp1, hp1)) and
- SkipSimpleInstructions(hp1) and
- MatchInstruction(hp1,A_CALL,[S_NO]) and
- GetNextInstruction(hp1, hp2) and
- MatchInstruction(hp2,A_LEA,[taicpu(p).opsize]) and
- MatchOpType(taicpu(hp2),top_ref,top_reg) and
- (taicpu(hp2).oper[0]^.ref^.offset=-taicpu(p).oper[0]^.ref^.offset) and
- (taicpu(hp2).oper[0]^.ref^.base=NR_STACK_POINTER_REG) and
- (taicpu(hp2).oper[0]^.ref^.index=NR_NO) and
- (taicpu(hp2).oper[0]^.ref^.symbol=nil) and
- (taicpu(hp2).oper[0]^.ref^.relsymbol=nil) and
- (taicpu(hp2).oper[0]^.ref^.segment=NR_NO) and
- (taicpu(hp2).oper[1]^.reg=NR_STACK_POINTER_REG) and
- GetNextInstruction(hp2, hp3) and
- { trick to skip label }
- ((hp3.typ=ait_instruction) or GetNextInstruction(hp3, hp3)) and
- (MatchInstruction(hp3,A_RET,[S_NO]) or
- (MatchInstruction(hp3,A_VZEROUPPER,[S_NO]) and
- SetAndTest(hp3,hp5) and
- GetNextInstruction(hp3,hp3) and
- MatchInstruction(hp3,A_RET,[S_NO])
- )
- ) and
- (taicpu(hp3).ops=0) then
- begin
- taicpu(hp1).opcode := A_JMP;
- taicpu(hp1).is_jmp := true;
- DebugMsg(SPeepholeOptimization + 'LeaCallLeaRet2Jmp done',p);
- RemoveCurrentP(p, hp4);
- RemoveInstruction(hp2);
- RemoveInstruction(hp3);
- if Assigned(hp5) then
- begin
- AsmL.Remove(hp5);
- ASmL.InsertBefore(hp5,hp1)
- end;
- Result:=true;
- end;
- end;
- function TX86AsmOptimizer.PostPeepholeOptPush(var p : tai) : Boolean;
- {$ifdef x86_64}
- var
- hp1, hp2, hp3, hp4, hp5: tai;
- {$endif x86_64}
- begin
- Result:=false;
- {$ifdef x86_64}
- hp5:=nil;
- { replace
- push %rax
- call procname
- pop %rcx
- ret
- by
- jmp procname
- but do it only on level 4 because it destroys stack back traces
- It depends on the fact, that the sequence push rax/pop rcx is used for stack alignment as rcx is volatile
- for all supported calling conventions
- }
- if (cs_opt_level4 in current_settings.optimizerswitches) and
- MatchOpType(taicpu(p),top_reg) and
- (taicpu(p).oper[0]^.reg=NR_RAX) and
- GetNextInstruction(p, hp1) and
- { Take a copy of hp1 }
- SetAndTest(hp1, hp4) and
- { trick to skip label }
- ((hp1.typ=ait_instruction) or GetNextInstruction(hp1, hp1)) and
- SkipSimpleInstructions(hp1) and
- MatchInstruction(hp1,A_CALL,[S_NO]) and
- GetNextInstruction(hp1, hp2) and
- MatchInstruction(hp2,A_POP,[taicpu(p).opsize]) and
- MatchOpType(taicpu(hp2),top_reg) and
- (taicpu(hp2).oper[0]^.reg=NR_RCX) and
- GetNextInstruction(hp2, hp3) and
- { trick to skip label }
- ((hp3.typ=ait_instruction) or GetNextInstruction(hp3, hp3)) and
- (MatchInstruction(hp3,A_RET,[S_NO]) or
- (MatchInstruction(hp3,A_VZEROUPPER,[S_NO]) and
- SetAndTest(hp3,hp5) and
- GetNextInstruction(hp3,hp3) and
- MatchInstruction(hp3,A_RET,[S_NO])
- )
- ) and
- (taicpu(hp3).ops=0) then
- begin
- taicpu(hp1).opcode := A_JMP;
- taicpu(hp1).is_jmp := true;
- DebugMsg(SPeepholeOptimization + 'PushCallPushRet2Jmp done',p);
- RemoveCurrentP(p, hp4);
- RemoveInstruction(hp2);
- RemoveInstruction(hp3);
- if Assigned(hp5) then
- begin
- AsmL.Remove(hp5);
- ASmL.InsertBefore(hp5,hp1)
- end;
- Result:=true;
- end;
- {$endif x86_64}
- end;
- function TX86AsmOptimizer.PostPeepholeOptMov(var p : tai) : Boolean;
- var
- Value, RegName: string;
- begin
- Result:=false;
- if (taicpu(p).oper[1]^.typ = top_reg) and (taicpu(p).oper[0]^.typ = top_const) then
- begin
- case taicpu(p).oper[0]^.val of
- 0:
- { Don't make this optimisation if the CPU flags are required, since XOR scrambles them }
- if not (RegInUsedRegs(NR_DEFAULTFLAGS,UsedRegs)) then
- begin
- { change "mov $0,%reg" into "xor %reg,%reg" }
- taicpu(p).opcode := A_XOR;
- taicpu(p).loadReg(0,taicpu(p).oper[1]^.reg);
- Result := True;
- {$ifdef x86_64}
- end
- else if (taicpu(p).opsize = S_Q) then
- begin
- RegName := debug_regname(taicpu(p).oper[1]^.reg); { 64-bit register name }
- { The actual optimization }
- setsubreg(taicpu(p).oper[1]^.reg, R_SUBD);
- taicpu(p).changeopsize(S_L);
- DebugMsg(SPeepholeOptimization + 'movq $0,' + RegName + ' -> movl $0,' + debug_regname(taicpu(p).oper[1]^.reg) + ' (immediate can be represented with just 32 bits)', p);
- Result := True;
- end;
- $1..$FFFFFFFF:
- begin
- { Code size reduction by J. Gareth "Kit" Moreton }
- { change 64-bit register to 32-bit register to reduce code size (upper 32 bits will be set to zero) }
- case taicpu(p).opsize of
- S_Q:
- begin
- RegName := debug_regname(taicpu(p).oper[1]^.reg); { 64-bit register name }
- Value := debug_tostr(taicpu(p).oper[0]^.val);
- { The actual optimization }
- setsubreg(taicpu(p).oper[1]^.reg, R_SUBD);
- taicpu(p).changeopsize(S_L);
- DebugMsg(SPeepholeOptimization + 'movq $' + Value + ',' + RegName + ' -> movl $' + Value + ',' + debug_regname(taicpu(p).oper[1]^.reg) + ' (immediate can be represented with just 32 bits)', p);
- Result := True;
- end;
- else
- { Do nothing };
- end;
- {$endif x86_64}
- end;
- -1:
- { Don't make this optimisation if the CPU flags are required, since OR scrambles them }
- if (cs_opt_size in current_settings.optimizerswitches) and
- (taicpu(p).opsize <> S_B) and
- not (RegInUsedRegs(NR_DEFAULTFLAGS,UsedRegs)) then
- begin
- { change "mov $-1,%reg" into "or $-1,%reg" }
- { NOTES:
- - No size saving is made when changing a Word-sized assignment unless the register is AX (smaller encoding)
- - This operation creates a false dependency on the register, so only do it when optimising for size
- - It is possible to set memory operands using this method, but this creates an even greater false dependency, so don't do this at all
- }
- taicpu(p).opcode := A_OR;
- Result := True;
- end;
- else
- { Do nothing };
- end;
- end;
- end;
- function TX86AsmOptimizer.PostPeepholeOptAnd(var p : tai) : boolean;
- var
- hp1: tai;
- begin
- { Detect:
- andw x, %ax (0 <= x < $8000)
- ...
- movzwl %ax,%eax
- Change movzwl %ax,%eax to cwtl (shorter encoding for movswl %ax,%eax)
- }
- Result := False; if MatchOpType(taicpu(p), top_const, top_reg) and
- (taicpu(p).oper[1]^.reg = NR_AX) and { This is also enough to determine that opsize = S_W }
- ((taicpu(p).oper[0]^.val and $7FFF) = taicpu(p).oper[0]^.val) and
- GetNextInstructionUsingReg(p, hp1, NR_EAX) and
- MatchInstruction(hp1, A_MOVZX, [S_WL]) and
- MatchOperand(taicpu(hp1).oper[0]^, NR_AX) and
- MatchOperand(taicpu(hp1).oper[1]^, NR_EAX) then
- begin
- DebugMsg(SPeepholeOptimization + 'Converted movzwl %ax,%eax to cwtl (via AndMovz2AndCwtl)', hp1);
- taicpu(hp1).opcode := A_CWDE;
- taicpu(hp1).clearop(0);
- taicpu(hp1).clearop(1);
- taicpu(hp1).ops := 0;
- { A change was made, but not with p, so move forward 1 }
- p := tai(p.Next);
- Result := True;
- end;
- end;
- function TX86AsmOptimizer.PostPeepholeOptMOVSX(var p : tai) : boolean;
- begin
- Result := False;
- if not MatchOpType(taicpu(p), top_reg, top_reg) then
- Exit;
- { Convert:
- movswl %ax,%eax -> cwtl
- movslq %eax,%rax -> cdqe
- NOTE: Don't convert movswl %al,%ax to cbw, because cbw and cwde
- refer to the same opcode and depends only on the assembler's
- current operand-size attribute. [Kit]
- }
- with taicpu(p) do
- case opsize of
- S_WL:
- if (oper[0]^.reg = NR_AX) and (oper[1]^.reg = NR_EAX) then
- begin
- DebugMsg(SPeepholeOptimization + 'Converted movswl %ax,%eax to cwtl', p);
- opcode := A_CWDE;
- clearop(0);
- clearop(1);
- ops := 0;
- Result := True;
- end;
- {$ifdef x86_64}
- S_LQ:
- if (oper[0]^.reg = NR_EAX) and (oper[1]^.reg = NR_RAX) then
- begin
- DebugMsg(SPeepholeOptimization + 'Converted movslq %eax,%rax to cltq', p);
- opcode := A_CDQE;
- clearop(0);
- clearop(1);
- ops := 0;
- Result := True;
- end;
- {$endif x86_64}
- else
- ;
- end;
- end;
- function TX86AsmOptimizer.PostPeepholeOptShr(var p : tai) : boolean;
- var
- hp1: tai;
- begin
- { Detect:
- shr x, %ax (x > 0)
- ...
- movzwl %ax,%eax
- Change movzwl %ax,%eax to cwtl (shorter encoding for movswl %ax,%eax)
- }
- Result := False;
- if MatchOpType(taicpu(p), top_const, top_reg) and
- (taicpu(p).oper[1]^.reg = NR_AX) and { This is also enough to determine that opsize = S_W }
- (taicpu(p).oper[0]^.val > 0) and
- GetNextInstructionUsingReg(p, hp1, NR_EAX) and
- MatchInstruction(hp1, A_MOVZX, [S_WL]) and
- MatchOperand(taicpu(hp1).oper[0]^, NR_AX) and
- MatchOperand(taicpu(hp1).oper[1]^, NR_EAX) then
- begin
- DebugMsg(SPeepholeOptimization + 'Converted movzwl %ax,%eax to cwtl (via ShrMovz2ShrCwtl)', hp1);
- taicpu(hp1).opcode := A_CWDE;
- taicpu(hp1).clearop(0);
- taicpu(hp1).clearop(1);
- taicpu(hp1).ops := 0;
- { A change was made, but not with p, so move forward 1 }
- p := tai(p.Next);
- Result := True;
- end;
- end;
- function TX86AsmOptimizer.PostPeepholeOptADDSUB(var p : tai) : boolean;
- var
- hp1, hp2: tai;
- Opposite, SecondOpposite: TAsmOp;
- NewCond: TAsmCond;
- begin
- Result := False;
- { Change:
- add/sub 128,(dest)
- To:
- sub/add -128,(dest)
- This generaally takes fewer bytes to encode because -128 can be stored
- in a signed byte, whereas +128 cannot.
- }
- if (taicpu(p).opsize <> S_B) and MatchOperand(taicpu(p).oper[0]^, 128) then
- begin
- if taicpu(p).opcode = A_ADD then
- Opposite := A_SUB
- else
- Opposite := A_ADD;
- { Be careful if the flags are in use, because the CF flag inverts
- when changing from ADD to SUB and vice versa }
- if RegInUsedRegs(NR_DEFAULTFLAGS, UsedRegs) and
- GetNextInstruction(p, hp1) then
- begin
- TransferUsedRegs(TmpUsedRegs);
- TmpUsedRegs[R_SPECIALREGISTER].Update(tai(p.Next), True);
- hp2 := hp1;
- { Scan ahead to check if everything's safe }
- while Assigned(hp1) and RegInUsedRegs(NR_DEFAULTFLAGS, TmpUsedRegs) do
- begin
- if (hp1.typ <> ait_instruction) then
- { Probably unsafe since the flags are still in use }
- Exit;
- if MatchInstruction(hp1, A_CALL, A_JMP, A_RET, []) then
- { Stop searching at an unconditional jump }
- Break;
- if not
- (
- MatchInstruction(hp1, A_ADC, A_SBB, []) and
- (taicpu(hp1).oper[0]^.typ = top_const) { We need to be able to invert a constant }
- ) and
- (taicpu(hp1).condition = C_None) and RegInInstruction(NR_DEFAULTFLAGS, hp1) then
- { Instruction depends on FLAGS (and is not ADC or SBB); break out }
- Exit;
- UpdateUsedRegs(TmpUsedRegs, tai(p.Next));
- TmpUsedRegs[R_SPECIALREGISTER].Update(tai(hp1.Next), True);
- { Move to the next instruction }
- GetNextInstruction(hp1, hp1);
- end;
- while Assigned(hp2) and (hp2 <> hp1) do
- begin
- NewCond := C_None;
- case taicpu(hp2).condition of
- C_A, C_NBE:
- NewCond := C_BE;
- C_B, C_C, C_NAE:
- NewCond := C_AE;
- C_AE, C_NB, C_NC:
- NewCond := C_B;
- C_BE, C_NA:
- NewCond := C_A;
- else
- { No change needed };
- end;
- if NewCond <> C_None then
- begin
- DebugMsg(SPeepholeOptimization + 'Condition changed from ' + cond2str[taicpu(hp2).condition] + ' to ' + cond2str[NewCond] +
- ' to accommodate ' + debug_op2str(taicpu(p).opcode) + ' -> ' + debug_op2str(opposite) + ' above', hp2);
- taicpu(hp2).condition := NewCond;
- end
- else
- if MatchInstruction(hp2, A_ADC, A_SBB, []) then
- begin
- { Because of the flipping of the carry bit, to ensure
- the operation remains equivalent, ADC becomes SBB
- and vice versa, and the constant is not-inverted.
- If multiple ADCs or SBBs appear in a row, each one
- changed causes the carry bit to invert, so they all
- need to be flipped }
- if taicpu(hp2).opcode = A_ADC then
- SecondOpposite := A_SBB
- else
- SecondOpposite := A_ADC;
- if taicpu(hp2).oper[0]^.typ <> top_const then
- { Should have broken out of this optimisation already }
- InternalError(2021112901);
- DebugMsg(SPeepholeOptimization + debug_op2str(taicpu(hp2).opcode) + debug_opsize2str(taicpu(hp2).opsize) + ' $' + debug_tostr(taicpu(hp2).oper[0]^.val) + ',' + debug_operstr(taicpu(hp2).oper[1]^) + ' -> ' +
- debug_op2str(SecondOpposite) + debug_opsize2str(taicpu(hp2).opsize) + ' $' + debug_tostr(not taicpu(hp2).oper[0]^.val) + ',' + debug_operstr(taicpu(hp2).oper[1]^) + ' to accommodate inverted carry bit', hp2);
- { Bit-invert the constant (effectively equivalent to "-1 - val") }
- taicpu(hp2).opcode := SecondOpposite;
- taicpu(hp2).oper[0]^.val := not taicpu(hp2).oper[0]^.val;
- end;
- { Move to the next instruction }
- GetNextInstruction(hp2, hp2);
- end;
- if (hp2 <> hp1) then
- InternalError(2021111501);
- end;
- DebugMsg(SPeepholeOptimization + debug_op2str(taicpu(p).opcode) + debug_opsize2str(taicpu(p).opsize) + ' $128,' + debug_operstr(taicpu(p).oper[1]^) + ' changed to ' +
- debug_op2str(opposite) + debug_opsize2str(taicpu(p).opsize) + ' $-128,' + debug_operstr(taicpu(p).oper[1]^) + ' to reduce instruction size', p);
- taicpu(p).opcode := Opposite;
- taicpu(p).oper[0]^.val := -128;
- { No further optimisations can be made on this instruction, so move
- onto the next one to save time }
- p := tai(p.Next);
- UpdateUsedRegs(p);
- Result := True;
- Exit;
- end;
- { Detect:
- add/sub %reg2,(dest)
- add/sub x, (dest)
- (dest can be a register or a reference)
- Swap the instructions to minimise a pipeline stall. This reverses the
- "Add swap" and "Sub swap" optimisations done in pass 1 if no new
- optimisations could be made.
- }
- if (taicpu(p).oper[0]^.typ = top_reg) and
- not RegInOp(taicpu(p).oper[0]^.reg, taicpu(p).oper[1]^) and
- (
- (
- (taicpu(p).oper[1]^.typ = top_reg) and
- { We can try searching further ahead if we're writing to a register }
- GetNextInstructionUsingReg(p, hp1, taicpu(p).oper[1]^.reg)
- ) or
- (
- (taicpu(p).oper[1]^.typ = top_ref) and
- GetNextInstruction(p, hp1)
- )
- ) and
- MatchInstruction(hp1, A_ADD, A_SUB, [taicpu(p).opsize]) and
- (taicpu(hp1).oper[0]^.typ = top_const) and
- MatchOperand(taicpu(p).oper[1]^, taicpu(hp1).oper[1]^) then
- begin
- { Make doubly sure the flags aren't in use because the order of additions may affect them }
- TransferUsedRegs(TmpUsedRegs);
- UpdateUsedRegs(TmpUsedRegs, tai(p.next));
- hp2 := p;
- while not (cs_opt_level3 in current_settings.optimizerswitches) and
- GetNextInstruction(hp2, hp2) and (hp2 <> hp1) do
- UpdateUsedRegs(TmpUsedRegs, tai(hp2.next));
- if not RegInUsedRegs(NR_DEFAULTFLAGS, TmpUsedRegs) then
- begin
- asml.remove(hp1);
- asml.InsertBefore(hp1, p);
- DebugMsg(SPeepholeOptimization + 'Add/Sub swap 2 done', hp1);
- Result := True;
- end;
- end;
- end;
- function TX86AsmOptimizer.PostPeepholeOptCmp(var p : tai) : Boolean;
- begin
- Result:=false;
- { change "cmp $0, %reg" to "test %reg, %reg" }
- if MatchOpType(taicpu(p),top_const,top_reg) and
- (taicpu(p).oper[0]^.val = 0) then
- begin
- taicpu(p).opcode := A_TEST;
- taicpu(p).loadreg(0,taicpu(p).oper[1]^.reg);
- Result:=true;
- end;
- end;
- function TX86AsmOptimizer.PostPeepholeOptTestOr(var p : tai) : Boolean;
- var
- IsTestConstX : Boolean;
- hp1,hp2 : tai;
- begin
- Result:=false;
- { removes the line marked with (x) from the sequence
- and/or/xor/add/sub/... $x, %y
- test/or %y, %y | test $-1, %y (x)
- j(n)z _Label
- as the first instruction already adjusts the ZF
- %y operand may also be a reference }
- IsTestConstX:=(taicpu(p).opcode=A_TEST) and
- MatchOperand(taicpu(p).oper[0]^,-1);
- if (OpsEqual(taicpu(p).oper[0]^,taicpu(p).oper[1]^) or IsTestConstX) and
- GetLastInstruction(p, hp1) and
- (tai(hp1).typ = ait_instruction) and
- GetNextInstruction(p,hp2) and
- MatchInstruction(hp2,A_SETcc,A_Jcc,A_CMOVcc,[]) then
- case taicpu(hp1).opcode Of
- A_ADD, A_SUB, A_OR, A_XOR, A_AND,
- { These two instructions set the zero flag if the result is zero }
- A_POPCNT, A_LZCNT:
- begin
- if (
- { With POPCNT, an input of zero will set the zero flag
- because the population count of zero is zero }
- (taicpu(hp1).opcode = A_POPCNT) and
- (taicpu(hp2).condition in [C_Z,C_NZ,C_E,C_NE]) and
- (
- OpsEqual(taicpu(hp1).oper[0]^, taicpu(p).oper[1]^) or
- { Faster than going through the second half of the 'or'
- condition below }
- OpsEqual(taicpu(hp1).oper[1]^, taicpu(p).oper[1]^)
- )
- ) or (
- OpsEqual(taicpu(hp1).oper[1]^, taicpu(p).oper[1]^) and
- { does not work in case of overflow for G(E)/L(E)/C_O/C_NO }
- { and in case of carry for A(E)/B(E)/C/NC }
- (
- (taicpu(hp2).condition in [C_Z,C_NZ,C_E,C_NE]) or
- (
- (taicpu(hp1).opcode <> A_ADD) and
- (taicpu(hp1).opcode <> A_SUB) and
- (taicpu(hp1).opcode <> A_LZCNT)
- )
- )
- ) then
- begin
- RemoveCurrentP(p, hp2);
- Result:=true;
- Exit;
- end;
- end;
- A_SHL, A_SAL, A_SHR, A_SAR:
- begin
- if OpsEqual(taicpu(hp1).oper[1]^,taicpu(p).oper[1]^) and
- { SHL/SAL/SHR/SAR with a value of 0 do not change the flags }
- { therefore, it's only safe to do this optimization for }
- { shifts by a (nonzero) constant }
- (taicpu(hp1).oper[0]^.typ = top_const) and
- (taicpu(hp1).oper[0]^.val <> 0) and
- { does not work in case of overflow for G(E)/L(E)/C_O/C_NO }
- { and in case of carry for A(E)/B(E)/C/NC }
- (taicpu(hp2).condition in [C_Z,C_NZ,C_E,C_NE]) then
- begin
- RemoveCurrentP(p, hp2);
- Result:=true;
- Exit;
- end;
- end;
- A_DEC, A_INC, A_NEG:
- begin
- if OpsEqual(taicpu(hp1).oper[0]^,taicpu(p).oper[1]^) and
- { does not work in case of overflow for G(E)/L(E)/C_O/C_NO }
- { and in case of carry for A(E)/B(E)/C/NC }
- (taicpu(hp2).condition in [C_Z,C_NZ,C_E,C_NE]) then
- begin
- RemoveCurrentP(p, hp2);
- Result:=true;
- Exit;
- end;
- end
- else
- ;
- end; { case }
- { change "test $-1,%reg" into "test %reg,%reg" }
- if IsTestConstX and (taicpu(p).oper[1]^.typ=top_reg) then
- taicpu(p).loadoper(0,taicpu(p).oper[1]^);
- { Change "or %reg,%reg" to "test %reg,%reg" as OR generates a false dependency }
- if MatchInstruction(p, A_OR, []) and
- { Can only match if they're both registers }
- MatchOperand(taicpu(p).oper[0]^, taicpu(p).oper[1]^) then
- begin
- DebugMsg(SPeepholeOptimization + 'or %reg,%reg -> test %reg,%reg to remove false dependency (Or2Test)', p);
- taicpu(p).opcode := A_TEST;
- { No need to set Result to True, as we've done all the optimisations we can }
- end;
- end;
- function TX86AsmOptimizer.PostPeepholeOptCall(var p : tai) : Boolean;
- var
- hp1,hp3 : tai;
- {$ifndef x86_64}
- hp2 : taicpu;
- {$endif x86_64}
- begin
- Result:=false;
- hp3:=nil;
- {$ifndef x86_64}
- { don't do this on modern CPUs, this really hurts them due to
- broken call/ret pairing }
- if (current_settings.optimizecputype < cpu_Pentium2) and
- not(cs_create_pic in current_settings.moduleswitches) and
- GetNextInstruction(p, hp1) and
- MatchInstruction(hp1,A_JMP,[S_NO]) and
- MatchOpType(taicpu(hp1),top_ref) and
- (taicpu(hp1).oper[0]^.ref^.refaddr=addr_full) then
- begin
- hp2 := taicpu.Op_sym(A_PUSH,S_L,taicpu(hp1).oper[0]^.ref^.symbol);
- InsertLLItem(p.previous, p, hp2);
- taicpu(p).opcode := A_JMP;
- taicpu(p).is_jmp := true;
- RemoveInstruction(hp1);
- Result:=true;
- end
- else
- {$endif x86_64}
- { replace
- call procname
- ret
- by
- jmp procname
- but do it only on level 4 because it destroys stack back traces
- else if the subroutine is marked as no return, remove the ret
- }
- if ((cs_opt_level4 in current_settings.optimizerswitches) or
- (po_noreturn in current_procinfo.procdef.procoptions)) and
- GetNextInstruction(p, hp1) and
- (MatchInstruction(hp1,A_RET,[S_NO]) or
- (MatchInstruction(hp1,A_VZEROUPPER,[S_NO]) and
- SetAndTest(hp1,hp3) and
- GetNextInstruction(hp1,hp1) and
- MatchInstruction(hp1,A_RET,[S_NO])
- )
- ) and
- (taicpu(hp1).ops=0) then
- begin
- if (cs_opt_level4 in current_settings.optimizerswitches) and
- { we might destroy stack alignment here if we do not do a call }
- (target_info.stackalign<=sizeof(SizeUInt)) then
- begin
- taicpu(p).opcode := A_JMP;
- taicpu(p).is_jmp := true;
- DebugMsg(SPeepholeOptimization + 'CallRet2Jmp done',p);
- end
- else
- DebugMsg(SPeepholeOptimization + 'CallRet2Call done',p);
- RemoveInstruction(hp1);
- if Assigned(hp3) then
- begin
- AsmL.Remove(hp3);
- AsmL.InsertBefore(hp3,p)
- end;
- Result:=true;
- end;
- end;
- function TX86AsmOptimizer.PostPeepholeOptMovzx(var p : tai) : Boolean;
- function ConstInRange(const Val: TCGInt; const OpSize: TOpSize): Boolean;
- begin
- case OpSize of
- S_B, S_BW, S_BL{$ifdef x86_64}, S_BQ{$endif x86_64}:
- Result := (Val <= $FF) and (Val >= -128);
- S_W, S_WL{$ifdef x86_64}, S_WQ{$endif x86_64}:
- Result := (Val <= $FFFF) and (Val >= -32768);
- S_L{$ifdef x86_64}, S_LQ{$endif x86_64}:
- Result := (Val <= $FFFFFFFF) and (Val >= -2147483648);
- else
- Result := True;
- end;
- end;
- var
- hp1, hp2 : tai;
- SizeChange: Boolean;
- PreMessage: string;
- begin
- Result := False;
- if (taicpu(p).oper[0]^.typ = top_reg) and
- SuperRegistersEqual(taicpu(p).oper[0]^.reg, taicpu(p).oper[1]^.reg) and
- GetNextInstruction(p, hp1) and (hp1.typ = ait_instruction) then
- begin
- { Change (using movzbl %al,%eax as an example):
- movzbl %al, %eax movzbl %al, %eax
- cmpl x, %eax testl %eax,%eax
- To:
- cmpb x, %al testb %al, %al (Move one back to avoid a false dependency)
- movzbl %al, %eax movzbl %al, %eax
- Smaller instruction and minimises pipeline stall as the CPU
- doesn't have to wait for the register to get zero-extended. [Kit]
- Also allow if the smaller of the two registers is being checked,
- as this still removes the false dependency.
- }
- if
- (
- (
- (taicpu(hp1).opcode = A_CMP) and MatchOpType(taicpu(hp1), top_const, top_reg) and
- ConstInRange(taicpu(hp1).oper[0]^.val, taicpu(p).opsize)
- ) or (
- { If MatchOperand returns True, they must both be registers }
- (taicpu(hp1).opcode = A_TEST) and MatchOperand(taicpu(hp1).oper[0]^, taicpu(hp1).oper[1]^)
- )
- ) and
- (reg2opsize(taicpu(hp1).oper[1]^.reg) <= reg2opsize(taicpu(p).oper[1]^.reg)) and
- SuperRegistersEqual(taicpu(p).oper[1]^.reg, taicpu(hp1).oper[1]^.reg) then
- begin
- PreMessage := debug_op2str(taicpu(hp1).opcode) + debug_opsize2str(taicpu(hp1).opsize) + ' ' + debug_operstr(taicpu(hp1).oper[0]^) + ',' + debug_regname(taicpu(hp1).oper[1]^.reg) + ' -> ' + debug_op2str(taicpu(hp1).opcode);
- asml.Remove(hp1);
- asml.InsertBefore(hp1, p);
- { Swap instructions in the case of cmp 0,%reg or test %reg,%reg }
- if (taicpu(hp1).opcode = A_TEST) or (taicpu(hp1).oper[0]^.val = 0) then
- begin
- taicpu(hp1).opcode := A_TEST;
- taicpu(hp1).loadreg(0, taicpu(p).oper[0]^.reg);
- end;
- taicpu(hp1).oper[1]^.reg := taicpu(p).oper[0]^.reg;
- case taicpu(p).opsize of
- S_BW, S_BL:
- begin
- SizeChange := taicpu(hp1).opsize <> S_B;
- taicpu(hp1).changeopsize(S_B);
- end;
- S_WL:
- begin
- SizeChange := taicpu(hp1).opsize <> S_W;
- taicpu(hp1).changeopsize(S_W);
- end
- else
- InternalError(2020112701);
- end;
- UpdateUsedRegs(tai(p.Next));
- { Check if the register is used aferwards - if not, we can
- remove the movzx instruction completely }
- if not RegUsedAfterInstruction(taicpu(hp1).oper[1]^.reg, p, UsedRegs) then
- begin
- { Hp1 is a better position than p for debugging purposes }
- DebugMsg(SPeepholeOptimization + 'Movzx2Nop 4a', hp1);
- RemoveCurrentp(p, hp1);
- Result := True;
- end;
- if SizeChange then
- DebugMsg(SPeepholeOptimization + PreMessage +
- debug_opsize2str(taicpu(hp1).opsize) + ' ' + debug_operstr(taicpu(hp1).oper[0]^) + ',' + debug_regname(taicpu(hp1).oper[1]^.reg) + ' (smaller and minimises pipeline stall - MovzxCmp2CmpMovzx)', hp1)
- else
- DebugMsg(SPeepholeOptimization + 'MovzxCmp2CmpMovzx', hp1);
- Exit;
- end;
- { Change (using movzwl %ax,%eax as an example):
- movzwl %ax, %eax
- movb %al, (dest) (Register is smaller than read register in movz)
- To:
- movb %al, (dest) (Move one back to avoid a false dependency)
- movzwl %ax, %eax
- }
- if (taicpu(hp1).opcode = A_MOV) and
- (taicpu(hp1).oper[0]^.typ = top_reg) and
- not RegInOp(taicpu(hp1).oper[0]^.reg, taicpu(hp1).oper[1]^) and
- SuperRegistersEqual(taicpu(hp1).oper[0]^.reg, taicpu(p).oper[0]^.reg) and
- (reg2opsize(taicpu(hp1).oper[0]^.reg) <= reg2opsize(taicpu(p).oper[0]^.reg)) then
- begin
- DebugMsg(SPeepholeOptimization + 'MovzxMov2MovMovzx', hp1);
- hp2 := tai(hp1.Previous); { Effectively the old position of hp1 }
- asml.Remove(hp1);
- asml.InsertBefore(hp1, p);
- if taicpu(hp1).oper[1]^.typ = top_reg then
- AllocRegBetween(taicpu(hp1).oper[1]^.reg, hp1, hp2, UsedRegs);
- { Check if the register is used aferwards - if not, we can
- remove the movzx instruction completely }
- if not RegUsedAfterInstruction(taicpu(hp1).oper[0]^.reg, p, UsedRegs) then
- begin
- { Hp1 is a better position than p for debugging purposes }
- DebugMsg(SPeepholeOptimization + 'Movzx2Nop 4b', hp1);
- RemoveCurrentp(p, hp1);
- Result := True;
- end;
- Exit;
- end;
- end;
- end;
- {$ifdef x86_64}
- function TX86AsmOptimizer.PostPeepholeOptXor(var p : tai) : Boolean;
- var
- PreMessage, RegName: string;
- begin
- { Code size reduction by J. Gareth "Kit" Moreton }
- { change "xorq %reg,%reg" to "xorl %reg,%reg" for %rax, %rcx, %rdx, %rbx, %rsi, %rdi, %rbp and %rsp,
- as this removes the REX prefix }
- Result := False;
- if not OpsEqual(taicpu(p).oper[0]^,taicpu(p).oper[1]^) then
- Exit;
- if taicpu(p).oper[0]^.typ <> top_reg then
- { Should be impossible if both operands were equal, since one of XOR's operands must be a register }
- InternalError(2018011500);
- case taicpu(p).opsize of
- S_Q:
- begin
- RegName := debug_regname(taicpu(p).oper[0]^.reg); { 64-bit register name }
- PreMessage := 'xorq ' + RegName + ',' + RegName + ' -> xorl ';
- { The actual optimization }
- setsubreg(taicpu(p).oper[0]^.reg, R_SUBD);
- setsubreg(taicpu(p).oper[1]^.reg, R_SUBD);
- taicpu(p).changeopsize(S_L);
- RegName := debug_regname(taicpu(p).oper[0]^.reg); { 32-bit register name }
- DebugMsg(SPeepholeOptimization + PreMessage + RegName + ',' + RegName + ' (32-bit register recommended when zeroing 64-bit counterpart)', p);
- end;
- else
- ;
- end;
- end;
- {$endif}
- function TX86AsmOptimizer.PostPeepholeOptVPXOR(var p : tai) : Boolean;
- var
- XReg: TRegister;
- begin
- Result := False;
- { Turn "vpxor %ymmreg2,%ymmreg2,%ymmreg1" to "vpxor %xmmreg2,%xmmreg2,%xmmreg1"
- Smaller encoding and slightly faster on some platforms (also works for
- ZMM-sized registers) }
- if (taicpu(p).opsize in [S_YMM, S_ZMM]) and
- MatchOpType(taicpu(p), top_reg, top_reg, top_reg) then
- begin
- XReg := taicpu(p).oper[0]^.reg;
- if (taicpu(p).oper[1]^.reg = XReg) then
- begin
- taicpu(p).changeopsize(S_XMM);
- setsubreg(taicpu(p).oper[2]^.reg, R_SUBMMX);
- if (cs_opt_size in current_settings.optimizerswitches) then
- begin
- { Change input registers to %xmm0 to reduce size. Note that
- there's a risk of a false dependency doing this, so only
- optimise for size here }
- XReg := NR_XMM0;
- DebugMsg(SPeepholeOptimization + 'Changed zero-setting vpxor from Y/ZMM to XMM and changed input registers to %xmm0 to reduce size', p);
- end
- else
- begin
- setsubreg(XReg, R_SUBMMX);
- DebugMsg(SPeepholeOptimization + 'Changed zero-setting vpxor from Y/ZMM to XMM to reduce size and increase efficiency', p);
- end;
- taicpu(p).oper[0]^.reg := XReg;
- taicpu(p).oper[1]^.reg := XReg;
- Result := True;
- end;
- end;
- end;
- class procedure TX86AsmOptimizer.OptimizeRefs(var p: taicpu);
- var
- OperIdx: Integer;
- begin
- for OperIdx := 0 to p.ops - 1 do
- if p.oper[OperIdx]^.typ = top_ref then
- optimize_ref(p.oper[OperIdx]^.ref^, False);
- end;
- end.
|