aoptcpu.pas 123 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914191519161917191819191920192119221923192419251926192719281929193019311932193319341935193619371938193919401941194219431944194519461947194819491950195119521953195419551956195719581959196019611962196319641965196619671968196919701971197219731974197519761977197819791980198119821983198419851986198719881989199019911992199319941995199619971998199920002001200220032004200520062007200820092010201120122013201420152016201720182019202020212022202320242025202620272028202920302031203220332034203520362037203820392040204120422043204420452046204720482049205020512052205320542055205620572058205920602061206220632064206520662067206820692070207120722073207420752076207720782079208020812082208320842085208620872088208920902091209220932094209520962097209820992100210121022103210421052106210721082109211021112112211321142115211621172118211921202121212221232124212521262127212821292130213121322133213421352136213721382139214021412142214321442145214621472148214921502151215221532154215521562157215821592160216121622163216421652166216721682169217021712172217321742175217621772178217921802181218221832184218521862187218821892190219121922193219421952196219721982199220022012202220322042205220622072208220922102211221222132214221522162217221822192220222122222223222422252226222722282229223022312232223322342235223622372238223922402241224222432244224522462247224822492250225122522253225422552256225722582259226022612262226322642265226622672268226922702271227222732274227522762277227822792280228122822283228422852286228722882289229022912292229322942295229622972298229923002301230223032304230523062307230823092310231123122313231423152316231723182319232023212322232323242325232623272328232923302331233223332334233523362337233823392340234123422343234423452346234723482349235023512352235323542355235623572358235923602361236223632364236523662367236823692370237123722373237423752376237723782379238023812382238323842385238623872388238923902391239223932394239523962397239823992400240124022403240424052406240724082409241024112412241324142415241624172418241924202421242224232424242524262427242824292430243124322433243424352436243724382439244024412442244324442445244624472448244924502451245224532454245524562457245824592460246124622463246424652466246724682469247024712472247324742475247624772478247924802481248224832484248524862487248824892490249124922493249424952496249724982499250025012502250325042505250625072508250925102511251225132514251525162517251825192520252125222523252425252526252725282529253025312532253325342535253625372538253925402541254225432544254525462547254825492550255125522553255425552556255725582559256025612562256325642565256625672568256925702571257225732574257525762577257825792580258125822583258425852586258725882589259025912592259325942595259625972598259926002601260226032604260526062607260826092610261126122613261426152616261726182619262026212622262326242625262626272628262926302631263226332634263526362637263826392640264126422643264426452646264726482649265026512652265326542655265626572658265926602661266226632664266526662667266826692670267126722673267426752676267726782679268026812682268326842685268626872688268926902691269226932694269526962697269826992700270127022703270427052706270727082709271027112712271327142715271627172718271927202721272227232724272527262727272827292730273127322733273427352736273727382739274027412742274327442745274627472748274927502751275227532754275527562757275827592760276127622763276427652766276727682769277027712772277327742775277627772778277927802781278227832784278527862787278827892790279127922793279427952796279727982799280028012802280328042805280628072808280928102811281228132814281528162817281828192820282128222823282428252826282728282829283028312832283328342835283628372838283928402841284228432844284528462847284828492850285128522853285428552856285728582859286028612862286328642865286628672868286928702871287228732874287528762877287828792880288128822883288428852886288728882889289028912892289328942895289628972898289929002901290229032904290529062907290829092910291129122913291429152916291729182919292029212922292329242925292629272928292929302931293229332934293529362937293829392940294129422943294429452946294729482949295029512952295329542955295629572958295929602961296229632964296529662967296829692970297129722973297429752976297729782979298029812982298329842985298629872988298929902991299229932994299529962997299829993000300130023003300430053006300730083009301030113012301330143015301630173018301930203021302230233024302530263027302830293030303130323033303430353036303730383039304030413042304330443045304630473048304930503051305230533054305530563057305830593060306130623063306430653066306730683069307030713072307330743075307630773078307930803081308230833084308530863087308830893090309130923093309430953096309730983099310031013102310331043105310631073108310931103111311231133114311531163117311831193120312131223123312431253126312731283129313031313132313331343135313631373138313931403141314231433144314531463147314831493150315131523153315431553156315731583159316031613162316331643165316631673168316931703171317231733174317531763177317831793180318131823183318431853186318731883189319031913192319331943195319631973198319932003201320232033204320532063207320832093210
  1. {
  2. Copyright (c) 1998-2002 by Jonas Maebe, member of the Free Pascal
  3. Development Team
  4. This unit implements the ARM optimizer object
  5. This program is free software; you can redistribute it and/or modify
  6. it under the terms of the GNU General Public License as published by
  7. the Free Software Foundation; either version 2 of the License, or
  8. (at your option) any later version.
  9. This program is distributed in the hope that it will be useful,
  10. but WITHOUT ANY WARRANTY; without even the implied warranty of
  11. MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  12. GNU General Public License for more details.
  13. You should have received a copy of the GNU General Public License
  14. along with this program; if not, write to the Free Software
  15. Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
  16. ****************************************************************************
  17. }
  18. Unit aoptcpu;
  19. {$i fpcdefs.inc}
  20. {$ifdef EXTDEBUG}
  21. {$define DEBUG_AOPTCPU}
  22. {$endif EXTDEBUG}
  23. { $define DEBUG_PREREGSCHEDULER}
  24. Interface
  25. uses
  26. cgbase, cgutils, cpubase, aasmtai,
  27. aasmcpu,
  28. aopt, aoptobj, aoptarm;
  29. Type
  30. { TCpuAsmOptimizer }
  31. TCpuAsmOptimizer = class(TARMAsmOptimizer)
  32. { Can't be done in some cases due to the limited range of jumps }
  33. function CanDoJumpOpts: Boolean; override;
  34. { uses the same constructor as TAopObj }
  35. function PrePeepHoleOptsCpu(var p: tai): Boolean; override;
  36. function PeepHoleOptPass1Cpu(var p: tai): boolean; override;
  37. function PeepHoleOptPass2Cpu(var p: tai): boolean; override;
  38. Function RegInInstruction(Reg: TRegister; p1: tai): Boolean;override;
  39. function RemoveSuperfluousVMov(const p : tai; movp : tai; const optimizer : string) : boolean;
  40. { gets the next tai object after current that contains info relevant
  41. to the optimizer in p1 which used the given register or does a
  42. change in program flow.
  43. If there is none, it returns false and
  44. sets p1 to nil }
  45. Function GetNextInstructionUsingRef(Current: tai; Out Next: tai; const ref: TReference; StopOnStore: Boolean = true): Boolean;
  46. { outputs a debug message into the assembler file }
  47. procedure DebugMsg(const s: string; p: tai);
  48. function InstructionLoadsFromReg(const reg : TRegister; const hp : tai) : boolean; override;
  49. function RegLoadedWithNewValue(reg : tregister; hp : tai) : boolean; override;
  50. { With these routines, there's optimisation code that's general for all ARM platforms }
  51. function OptPass1And(var p: tai): Boolean; override;
  52. function OptPass1LDR(var p: tai): Boolean; override;
  53. function OptPass1STR(var p: tai): Boolean; override;
  54. protected
  55. function LookForPreindexedPattern(p: taicpu): boolean;
  56. function LookForPostindexedPattern(p: taicpu): boolean;
  57. { Individual optimisation routines }
  58. function OptPass1DataCheckMov(var p: tai): Boolean;
  59. function OptPass1ADDSUB(var p: tai): Boolean;
  60. function OptPass1CMP(var p: tai): Boolean;
  61. function OptPass1STM(var p: tai): Boolean;
  62. function OptPass1MOV(var p: tai): Boolean;
  63. function OptPass1MOVW(var p: tai): Boolean;
  64. function OptPass1MUL(var p: tai): Boolean;
  65. function OptPass1MVN(var p: tai): Boolean;
  66. function OptPass1VMov(var p: tai): Boolean;
  67. function OptPass1VOp(var p: tai): Boolean;
  68. function OptPass1Push(var p: tai): Boolean;
  69. function OptPass2Bcc(var p: tai): Boolean;
  70. function OptPass2CMP(var p: tai): Boolean;
  71. function OptPass2STM(var p: tai): Boolean;
  72. function OptPass2STR(var p: tai): Boolean;
  73. End;
  74. TCpuPreRegallocScheduler = class(TAsmScheduler)
  75. function SchedulerPass1Cpu(var p: tai): boolean;override;
  76. procedure SwapRegLive(p, hp1: taicpu);
  77. end;
  78. TCpuThumb2AsmOptimizer = class(TCpuAsmOptimizer)
  79. { uses the same constructor as TAopObj }
  80. function PeepHoleOptPass1Cpu(var p: tai): boolean; override;
  81. procedure PeepHoleOptPass2;override;
  82. function PostPeepHoleOptsCpu(var p: tai): boolean; override;
  83. protected
  84. function OptPass1AndThumb2(var p : tai) : boolean;
  85. function OptPass1LDM(var p : tai) : boolean;
  86. function OptPass1STM(var p : tai) : boolean;
  87. End;
  88. function MustBeLast(p : tai) : boolean;
  89. Implementation
  90. uses
  91. cutils,verbose,globtype,globals,
  92. systems,
  93. cpuinfo,
  94. cgobj,procinfo,
  95. aasmbase,aasmdata,
  96. aoptutils;
  97. { Range check must be disabled explicitly as conversions between signed and unsigned
  98. 32-bit values are done without explicit typecasts }
  99. {$R-}
  100. function CanBeCond(p : tai) : boolean;
  101. begin
  102. result:=
  103. not(GenerateThumbCode) and
  104. (p.typ=ait_instruction) and
  105. (taicpu(p).condition=C_None) and
  106. ((taicpu(p).opcode<A_IT) or (taicpu(p).opcode>A_ITTTT)) and
  107. (taicpu(p).opcode<>A_CBZ) and
  108. (taicpu(p).opcode<>A_CBNZ) and
  109. (taicpu(p).opcode<>A_PLD) and
  110. (((taicpu(p).opcode<>A_BLX) and
  111. { BL may need to be converted into BLX by the linker -- could possibly
  112. be allowed in case it's to a local symbol of which we know that it
  113. uses the same instruction set as the current one }
  114. (taicpu(p).opcode<>A_BL)) or
  115. (taicpu(p).oper[0]^.typ=top_reg));
  116. end;
  117. function RemoveRedundantMove(const cmpp: tai; movp: tai; asml: TAsmList):Boolean;
  118. begin
  119. Result:=false;
  120. if (taicpu(movp).condition = C_EQ) and
  121. (taicpu(cmpp).oper[0]^.reg = taicpu(movp).oper[0]^.reg) and
  122. (taicpu(cmpp).oper[1]^.val = taicpu(movp).oper[1]^.val) then
  123. begin
  124. asml.insertafter(tai_comment.Create(strpnew('Peephole Optimization: CmpMovMov - Removed redundant moveq')), movp);
  125. asml.remove(movp);
  126. movp.free;
  127. Result:=true;
  128. end;
  129. end;
  130. function AlignedToQWord(const ref : treference) : boolean;
  131. begin
  132. { (safe) heuristics to ensure alignment }
  133. result:=(target_info.abi in [abi_eabi,abi_armeb,abi_eabihf]) and
  134. (((ref.offset>=0) and
  135. ((ref.offset mod 8)=0) and
  136. ((ref.base=NR_R13) or
  137. (ref.index=NR_R13))
  138. ) or
  139. ((ref.offset<=0) and
  140. { when using NR_R11, it has always a value of <qword align>+4 }
  141. ((abs(ref.offset+4) mod 8)=0) and
  142. (current_procinfo.framepointer=NR_R11) and
  143. ((ref.base=NR_R11) or
  144. (ref.index=NR_R11))
  145. )
  146. );
  147. end;
  148. function isValidConstLoadStoreOffset(const aoffset: longint; const pf: TOpPostfix) : boolean;
  149. begin
  150. if GenerateThumb2Code then
  151. result := (aoffset<4096) and (aoffset>-256)
  152. else
  153. result := ((pf in [PF_None,PF_B]) and
  154. (abs(aoffset)<4096)) or
  155. (abs(aoffset)<256);
  156. end;
  157. function TCpuAsmOptimizer.InstructionLoadsFromReg(const reg: TRegister; const hp: tai): boolean;
  158. var
  159. p: taicpu;
  160. i: longint;
  161. begin
  162. instructionLoadsFromReg := false;
  163. if not (assigned(hp) and (hp.typ = ait_instruction)) then
  164. exit;
  165. p:=taicpu(hp);
  166. i:=1;
  167. {For these instructions we have to start on oper[0]}
  168. if (p.opcode in [A_STR, A_LDM, A_STM, A_PLD,
  169. A_CMP, A_CMN, A_TST, A_TEQ,
  170. A_B, A_BL, A_BX, A_BLX,
  171. A_SMLAL, A_UMLAL, A_VSTM, A_VLDM]) then i:=0;
  172. while(i<p.ops) do
  173. begin
  174. case p.oper[I]^.typ of
  175. top_reg:
  176. instructionLoadsFromReg := (p.oper[I]^.reg = reg) or
  177. { STRD }
  178. ((i=0) and (p.opcode=A_STR) and (p.oppostfix=PF_D) and (getsupreg(p.oper[0]^.reg)+1=getsupreg(reg)));
  179. top_regset:
  180. instructionLoadsFromReg := (getsupreg(reg) in p.oper[I]^.regset^);
  181. top_shifterop:
  182. instructionLoadsFromReg := p.oper[I]^.shifterop^.rs = reg;
  183. top_ref:
  184. instructionLoadsFromReg :=
  185. (p.oper[I]^.ref^.base = reg) or
  186. (p.oper[I]^.ref^.index = reg);
  187. else
  188. ;
  189. end;
  190. if (i=0) and (p.opcode in [A_LDM,A_VLDM]) then
  191. exit;
  192. if instructionLoadsFromReg then
  193. exit; {Bailout if we found something}
  194. Inc(I);
  195. end;
  196. end;
  197. function TCpuAsmOptimizer.RegLoadedWithNewValue(reg: tregister; hp: tai): boolean;
  198. var
  199. p: taicpu;
  200. begin
  201. Result := false;
  202. if not ((assigned(hp)) and (hp.typ = ait_instruction)) then
  203. exit;
  204. p := taicpu(hp);
  205. case p.opcode of
  206. { These operands do not write into a register at all }
  207. A_CMP, A_CMN, A_TST, A_TEQ, A_B, A_BL, A_BX, A_BLX, A_SWI, A_MSR, A_PLD,
  208. A_VCMP:
  209. exit;
  210. {Take care of post/preincremented store and loads, they will change their base register}
  211. A_STR, A_LDR:
  212. begin
  213. Result := false;
  214. { actually, this does not apply here because post-/preindexed does not mean that a register
  215. is loaded with a new value, it is only modified
  216. (taicpu(p).oper[1]^.typ=top_ref) and
  217. (taicpu(p).oper[1]^.ref^.addressmode in [AM_PREINDEXED,AM_POSTINDEXED]) and
  218. (taicpu(p).oper[1]^.ref^.base = reg);
  219. }
  220. { STR does not load into it's first register }
  221. if p.opcode = A_STR then
  222. exit;
  223. end;
  224. A_VSTR:
  225. begin
  226. Result := false;
  227. exit;
  228. end;
  229. { These four are writing into the first 2 register, UMLAL and SMLAL will also read from them }
  230. A_UMLAL, A_UMULL, A_SMLAL, A_SMULL:
  231. Result :=
  232. (p.oper[1]^.typ = top_reg) and
  233. (p.oper[1]^.reg = reg);
  234. {Loads to oper2 from coprocessor}
  235. {
  236. MCR/MRC is currently not supported in FPC
  237. A_MRC:
  238. Result :=
  239. (p.oper[2]^.typ = top_reg) and
  240. (p.oper[2]^.reg = reg);
  241. }
  242. {Loads to all register in the registerset}
  243. A_LDM, A_VLDM:
  244. Result := (getsupreg(reg) in p.oper[1]^.regset^);
  245. A_POP:
  246. Result := (getsupreg(reg) in p.oper[0]^.regset^) or
  247. (reg=NR_STACK_POINTER_REG);
  248. else
  249. ;
  250. end;
  251. if Result then
  252. exit;
  253. case p.oper[0]^.typ of
  254. {This is the case}
  255. top_reg:
  256. Result := (p.oper[0]^.reg = reg) or
  257. { LDRD }
  258. (p.opcode=A_LDR) and (p.oppostfix=PF_D) and (getsupreg(p.oper[0]^.reg)+1=getsupreg(reg));
  259. {LDM/STM might write a new value to their index register}
  260. top_ref:
  261. Result :=
  262. (taicpu(p).oper[0]^.ref^.addressmode in [AM_PREINDEXED,AM_POSTINDEXED]) and
  263. (taicpu(p).oper[0]^.ref^.base = reg);
  264. else
  265. ;
  266. end;
  267. end;
  268. function TCpuAsmOptimizer.GetNextInstructionUsingRef(Current: tai; out
  269. Next: tai; const ref: TReference; StopOnStore: Boolean): Boolean;
  270. begin
  271. Next:=Current;
  272. repeat
  273. Result:=GetNextInstruction(Next,Next);
  274. if Result and
  275. (Next.typ=ait_instruction) and
  276. (taicpu(Next).opcode in [A_LDR, A_STR]) and
  277. (
  278. ((taicpu(Next).ops = 2) and
  279. (taicpu(Next).oper[1]^.typ = top_ref) and
  280. RefsEqual(taicpu(Next).oper[1]^.ref^,ref)) or
  281. ((taicpu(Next).ops = 3) and { LDRD/STRD }
  282. (taicpu(Next).oper[2]^.typ = top_ref) and
  283. RefsEqual(taicpu(Next).oper[2]^.ref^,ref))
  284. ) then
  285. {We've found an instruction LDR or STR with the same reference}
  286. exit;
  287. until not(Result) or
  288. (Next.typ<>ait_instruction) or
  289. not(cs_opt_level3 in current_settings.optimizerswitches) or
  290. is_calljmp(taicpu(Next).opcode) or
  291. (StopOnStore and (taicpu(Next).opcode in [A_STR, A_STM])) or
  292. RegModifiedByInstruction(NR_PC,Next);
  293. Result:=false;
  294. end;
  295. {$ifdef DEBUG_AOPTCPU}
  296. const
  297. SPeepholeOptimization: shortstring = 'Peephole Optimization: ';
  298. procedure TCpuAsmOptimizer.DebugMsg(const s: string;p : tai);
  299. begin
  300. asml.insertbefore(tai_comment.Create(strpnew(s)), p);
  301. end;
  302. {$else DEBUG_AOPTCPU}
  303. const
  304. SPeepholeOptimization = '';
  305. procedure TCpuAsmOptimizer.DebugMsg(const s: string;p : tai);inline;
  306. begin
  307. end;
  308. {$endif DEBUG_AOPTCPU}
  309. function TCpuAsmOptimizer.CanDoJumpOpts: Boolean;
  310. begin
  311. { Cannot perform these jump optimisations if the ARM architecture has 16-bit thumb codes }
  312. Result := not (
  313. (current_settings.instructionset = is_thumb) and not (CPUARM_HAS_THUMB2 in cpu_capabilities[current_settings.cputype])
  314. );
  315. end;
  316. function TCpuAsmOptimizer.RemoveSuperfluousVMov(const p: tai; movp: tai; const optimizer: string):boolean;
  317. var
  318. alloc,
  319. dealloc : tai_regalloc;
  320. hp1 : tai;
  321. begin
  322. Result:=false;
  323. if ((MatchInstruction(movp, A_VMOV, [taicpu(p).condition], [taicpu(p).oppostfix]) and
  324. ((getregtype(taicpu(movp).oper[0]^.reg)=R_MMREGISTER) or (taicpu(p).opcode=A_VLDR))
  325. ) or
  326. (((taicpu(p).oppostfix in [PF_F64F32,PF_F64S16,PF_F64S32,PF_F64U16,PF_F64U32]) or (getsubreg(taicpu(p).oper[0]^.reg)=R_SUBFD)) and MatchInstruction(movp, A_VMOV, [taicpu(p).condition], [PF_F64])) or
  327. (((taicpu(p).oppostfix in [PF_F32F64,PF_F32S16,PF_F32S32,PF_F32U16,PF_F32U32]) or (getsubreg(taicpu(p).oper[0]^.reg)=R_SUBFS)) and MatchInstruction(movp, A_VMOV, [taicpu(p).condition], [PF_F32]))
  328. ) and
  329. (taicpu(movp).ops=2) and
  330. MatchOperand(taicpu(movp).oper[1]^, taicpu(p).oper[0]^.reg) and
  331. { the destination register of the mov might not be used beween p and movp }
  332. not(RegUsedBetween(taicpu(movp).oper[0]^.reg,p,movp)) and
  333. { Take care to only do this for instructions which REALLY load to the first register.
  334. Otherwise
  335. vstr reg0, [reg1]
  336. vmov reg2, reg0
  337. will be optimized to
  338. vstr reg2, [reg1]
  339. }
  340. regLoadedWithNewValue(taicpu(p).oper[0]^.reg, p) then
  341. begin
  342. dealloc:=FindRegDeAlloc(taicpu(p).oper[0]^.reg,tai(movp.Next));
  343. if assigned(dealloc) then
  344. begin
  345. DebugMsg(SPeepholeOptimization + optimizer + ' removed superfluous vmov', movp);
  346. result:=true;
  347. { taicpu(p).oper[0]^.reg is not used anymore, try to find its allocation
  348. and remove it if possible }
  349. asml.Remove(dealloc);
  350. alloc:=FindRegAllocBackward(taicpu(p).oper[0]^.reg,tai(p.previous));
  351. if assigned(alloc) then
  352. begin
  353. asml.Remove(alloc);
  354. alloc.free;
  355. dealloc.free;
  356. end
  357. else
  358. asml.InsertAfter(dealloc,p);
  359. { try to move the allocation of the target register }
  360. GetLastInstruction(movp,hp1);
  361. alloc:=FindRegAlloc(taicpu(movp).oper[0]^.reg,tai(hp1.Next));
  362. if assigned(alloc) then
  363. begin
  364. asml.Remove(alloc);
  365. asml.InsertBefore(alloc,p);
  366. { adjust used regs }
  367. IncludeRegInUsedRegs(taicpu(movp).oper[0]^.reg,UsedRegs);
  368. end;
  369. { change
  370. vldr reg0,[reg1]
  371. vmov reg2,reg0
  372. into
  373. ldr reg2,[reg1]
  374. if reg2 is an int register
  375. }
  376. if (taicpu(p).opcode=A_VLDR) and (getregtype(taicpu(movp).oper[0]^.reg)=R_INTREGISTER) then
  377. taicpu(p).opcode:=A_LDR;
  378. { finally get rid of the mov }
  379. taicpu(p).loadreg(0,taicpu(movp).oper[0]^.reg);
  380. asml.remove(movp);
  381. movp.free;
  382. end;
  383. end;
  384. end;
  385. {
  386. optimize
  387. add/sub reg1,reg1,regY/const
  388. ...
  389. ldr/str regX,[reg1]
  390. into
  391. ldr/str regX,[reg1, regY/const]!
  392. }
  393. function TCpuAsmOptimizer.LookForPreindexedPattern(p: taicpu): boolean;
  394. var
  395. hp1: tai;
  396. begin
  397. if GenerateARMCode and
  398. (p.ops=3) and
  399. MatchOperand(p.oper[0]^, p.oper[1]^.reg) and
  400. GetNextInstructionUsingReg(p, hp1, p.oper[0]^.reg) and
  401. (not RegModifiedBetween(p.oper[0]^.reg, p, hp1)) and
  402. MatchInstruction(hp1, [A_LDR,A_STR], [C_None], [PF_None,PF_B,PF_H,PF_SH,PF_SB]) and
  403. (taicpu(hp1).oper[1]^.ref^.addressmode=AM_OFFSET) and
  404. (taicpu(hp1).oper[1]^.ref^.base=p.oper[0]^.reg) and
  405. (taicpu(hp1).oper[0]^.reg<>p.oper[0]^.reg) and
  406. (taicpu(hp1).oper[1]^.ref^.offset=0) and
  407. (taicpu(hp1).oper[1]^.ref^.index=NR_NO) and
  408. (((p.oper[2]^.typ=top_reg) and
  409. (not RegModifiedBetween(p.oper[2]^.reg, p, hp1))) or
  410. ((p.oper[2]^.typ=top_const) and
  411. ((abs(p.oper[2]^.val) < 256) or
  412. ((abs(p.oper[2]^.val) < 4096) and
  413. (taicpu(hp1).oppostfix in [PF_None,PF_B]))))) then
  414. begin
  415. taicpu(hp1).oper[1]^.ref^.addressmode:=AM_PREINDEXED;
  416. if p.oper[2]^.typ=top_reg then
  417. begin
  418. taicpu(hp1).oper[1]^.ref^.index:=p.oper[2]^.reg;
  419. if p.opcode=A_ADD then
  420. taicpu(hp1).oper[1]^.ref^.signindex:=1
  421. else
  422. taicpu(hp1).oper[1]^.ref^.signindex:=-1;
  423. end
  424. else
  425. begin
  426. if p.opcode=A_ADD then
  427. taicpu(hp1).oper[1]^.ref^.offset:=p.oper[2]^.val
  428. else
  429. taicpu(hp1).oper[1]^.ref^.offset:=-p.oper[2]^.val;
  430. end;
  431. result:=true;
  432. end
  433. else
  434. result:=false;
  435. end;
  436. {
  437. optimize
  438. ldr/str regX,[reg1]
  439. ...
  440. add/sub reg1,reg1,regY/const
  441. into
  442. ldr/str regX,[reg1], regY/const
  443. }
  444. function TCpuAsmOptimizer.LookForPostindexedPattern(p: taicpu) : boolean;
  445. var
  446. hp1 : tai;
  447. begin
  448. Result:=false;
  449. if (p.oper[1]^.typ = top_ref) and
  450. (p.oper[1]^.ref^.addressmode=AM_OFFSET) and
  451. (p.oper[1]^.ref^.index=NR_NO) and
  452. (p.oper[1]^.ref^.offset=0) and
  453. GetNextInstructionUsingReg(p, hp1, p.oper[1]^.ref^.base) and
  454. { we cannot check NR_DEFAULTFLAGS for modification yet so don't allow a condition }
  455. MatchInstruction(hp1, [A_ADD, A_SUB], [C_None], [PF_None]) and
  456. (taicpu(hp1).oper[0]^.reg=p.oper[1]^.ref^.base) and
  457. (taicpu(hp1).oper[1]^.reg=p.oper[1]^.ref^.base) and
  458. (
  459. (taicpu(hp1).oper[2]^.typ=top_reg) or
  460. { valid offset? }
  461. ((taicpu(hp1).oper[2]^.typ=top_const) and
  462. ((abs(taicpu(hp1).oper[2]^.val)<256) or
  463. ((abs(taicpu(hp1).oper[2]^.val)<4096) and (p.oppostfix in [PF_None,PF_B]))
  464. )
  465. )
  466. ) and
  467. { don't apply the optimization if the base register is loaded }
  468. (p.oper[0]^.reg<>p.oper[1]^.ref^.base) and
  469. not(RegModifiedBetween(taicpu(hp1).oper[0]^.reg,p,hp1)) and
  470. { don't apply the optimization if the (new) index register is loaded }
  471. (p.oper[0]^.reg<>taicpu(hp1).oper[2]^.reg) and
  472. not(RegModifiedBetween(taicpu(hp1).oper[2]^.reg,p,hp1)) and
  473. GenerateARMCode then
  474. begin
  475. DebugMsg(SPeepholeOptimization + 'Str/LdrAdd/Sub2Str/Ldr Postindex done', p);
  476. p.oper[1]^.ref^.addressmode:=AM_POSTINDEXED;
  477. if taicpu(hp1).oper[2]^.typ=top_const then
  478. begin
  479. if taicpu(hp1).opcode=A_ADD then
  480. p.oper[1]^.ref^.offset:=taicpu(hp1).oper[2]^.val
  481. else
  482. p.oper[1]^.ref^.offset:=-taicpu(hp1).oper[2]^.val;
  483. end
  484. else
  485. begin
  486. p.oper[1]^.ref^.index:=taicpu(hp1).oper[2]^.reg;
  487. if taicpu(hp1).opcode=A_ADD then
  488. p.oper[1]^.ref^.signindex:=1
  489. else
  490. p.oper[1]^.ref^.signindex:=-1;
  491. end;
  492. asml.Remove(hp1);
  493. hp1.Free;
  494. Result:=true;
  495. end;
  496. end;
  497. function TCpuAsmOptimizer.OptPass1ADDSUB(var p: tai): Boolean;
  498. var
  499. hp1,hp2: tai;
  500. sign: Integer;
  501. newvalue: TCGInt;
  502. b: byte;
  503. begin
  504. Result := OptPass1DataCheckMov(p);
  505. {
  506. change
  507. add/sub reg2,reg1,const1
  508. str/ldr reg3,[reg2,const2]
  509. dealloc reg2
  510. to
  511. str/ldr reg3,[reg1,const2+/-const1]
  512. }
  513. if (not GenerateThumbCode) and
  514. (taicpu(p).ops>2) and
  515. (taicpu(p).oper[1]^.typ = top_reg) and
  516. (taicpu(p).oper[2]^.typ = top_const) then
  517. begin
  518. hp1:=p;
  519. while GetNextInstructionUsingReg(hp1, hp1, taicpu(p).oper[0]^.reg) and
  520. { we cannot check NR_DEFAULTFLAGS for modification yet so don't allow a condition }
  521. MatchInstruction(hp1, [A_LDR, A_STR], [C_None], []) and
  522. (taicpu(hp1).oper[1]^.typ = top_ref) and
  523. (taicpu(hp1).oper[1]^.ref^.base=taicpu(p).oper[0]^.reg) and
  524. { don't optimize if the register is stored/overwritten }
  525. (taicpu(hp1).oper[0]^.reg<>taicpu(p).oper[1]^.reg) and
  526. (taicpu(hp1).oper[1]^.ref^.index=NR_NO) and
  527. (taicpu(hp1).oper[1]^.ref^.addressmode=AM_OFFSET) and
  528. { new offset must be valid: either in the range of 8 or 12 bit, depend on the
  529. ldr postfix }
  530. (((taicpu(p).opcode=A_ADD) and
  531. isValidConstLoadStoreOffset(taicpu(hp1).oper[1]^.ref^.offset+taicpu(p).oper[2]^.val, taicpu(hp1).oppostfix)
  532. ) or
  533. ((taicpu(p).opcode=A_SUB) and
  534. isValidConstLoadStoreOffset(taicpu(hp1).oper[1]^.ref^.offset-taicpu(p).oper[2]^.val, taicpu(hp1).oppostfix)
  535. )
  536. ) do
  537. begin
  538. { neither reg1 nor reg2 might be changed inbetween }
  539. if RegModifiedBetween(taicpu(p).oper[0]^.reg,p,hp1) or
  540. RegModifiedBetween(taicpu(p).oper[1]^.reg,p,hp1) then
  541. break;
  542. { reg2 must be either overwritten by the ldr or it is deallocated afterwards }
  543. if ((taicpu(hp1).opcode=A_LDR) and (taicpu(p).oper[0]^.reg=taicpu(hp1).oper[0]^.reg)) or
  544. assigned(FindRegDeAlloc(taicpu(p).oper[0]^.reg,tai(hp1.Next))) then
  545. begin
  546. { remember last instruction }
  547. hp2:=hp1;
  548. DebugMsg(SPeepholeOptimization + 'Add/SubLdr2Ldr done', p);
  549. hp1:=p;
  550. { fix all ldr/str }
  551. while GetNextInstructionUsingReg(hp1, hp1, taicpu(p).oper[0]^.reg) do
  552. begin
  553. taicpu(hp1).oper[1]^.ref^.base:=taicpu(p).oper[1]^.reg;
  554. if taicpu(p).opcode=A_ADD then
  555. inc(taicpu(hp1).oper[1]^.ref^.offset,taicpu(p).oper[2]^.val)
  556. else
  557. dec(taicpu(hp1).oper[1]^.ref^.offset,taicpu(p).oper[2]^.val);
  558. if hp1=hp2 then
  559. break;
  560. end;
  561. RemoveCurrentP(p);
  562. result:=true;
  563. Exit;
  564. end;
  565. end;
  566. end;
  567. {
  568. optimize
  569. add/sub rx,ry,const1
  570. add/sub rx,rx,const2
  571. into
  572. add/sub rx,ry,const1+/-const
  573. or
  574. mov rx,ry if const1+/-const=0
  575. or
  576. remove it, if rx=ry and const1+/-const=0
  577. check if the first operation has no postfix and condition
  578. }
  579. if MatchInstruction(p,[A_ADD,A_SUB],[C_None],[PF_None]) and
  580. MatchOptype(taicpu(p),top_reg,top_reg,top_const) and
  581. GetNextInstructionUsingReg(p,hp1,taicpu(p).oper[0]^.reg) and
  582. MatchInstruction(hp1,[A_ADD,A_SUB],[C_None],[PF_None]) and
  583. MatchOptype(taicpu(hp1),top_reg,top_reg,top_const) and
  584. MatchOperand(taicpu(p).oper[0]^,taicpu(hp1).oper[0]^) and
  585. MatchOperand(taicpu(p).oper[0]^,taicpu(hp1).oper[1]^) then
  586. begin
  587. sign:=1;
  588. if (taicpu(p).opcode=A_SUB) xor (taicpu(hp1).opcode=A_SUB) then
  589. sign:=-1;
  590. newvalue:=taicpu(p).oper[2]^.val+sign*taicpu(hp1).oper[2]^.val;
  591. if (not(GenerateThumbCode) and is_shifter_const(newvalue,b)) or
  592. (GenerateThumbCode and is_thumb_imm(newvalue)) then
  593. begin
  594. DebugMsg(SPeepholeOptimization + 'Merge Add/Sub done', p);
  595. taicpu(p).oper[2]^.val:=newvalue;
  596. RemoveInstruction(hp1);
  597. Result:=true;
  598. if newvalue=0 then
  599. begin
  600. if MatchOperand(taicpu(p).oper[0]^,taicpu(p).oper[1]^) then
  601. RemoveCurrentP(p)
  602. else
  603. begin
  604. taicpu(p).opcode:=A_MOV;
  605. taicpu(p).ops:=2;
  606. end;
  607. Exit;
  608. end;
  609. end;
  610. end;
  611. if (taicpu(p).condition = C_None) and
  612. (taicpu(p).oppostfix = PF_None) and
  613. LookForPreindexedPattern(taicpu(p)) then
  614. begin
  615. DebugMsg(SPeepholeOptimization + 'Add/Sub to Preindexed done', p);
  616. RemoveCurrentP(p);
  617. Result:=true;
  618. Exit;
  619. end;
  620. end;
  621. function TCpuAsmOptimizer.OptPass1MUL(var p: tai): Boolean;
  622. var
  623. hp1: tai;
  624. oldreg: tregister;
  625. begin
  626. Result := OptPass1DataCheckMov(p);
  627. {
  628. Turn
  629. mul reg0, z,w
  630. sub/add x, y, reg0
  631. dealloc reg0
  632. into
  633. mls/mla x,z,w,y
  634. }
  635. if (taicpu(p).condition = C_None) and
  636. (taicpu(p).oppostfix = PF_None) and
  637. (taicpu(p).ops=3) and
  638. (taicpu(p).oper[0]^.typ = top_reg) and
  639. (taicpu(p).oper[1]^.typ = top_reg) and
  640. (taicpu(p).oper[2]^.typ = top_reg) and
  641. GetNextInstructionUsingReg(p,hp1,taicpu(p).oper[0]^.reg) and
  642. MatchInstruction(hp1,[A_ADD,A_SUB],[C_None],[PF_None]) and
  643. (not RegModifiedBetween(taicpu(p).oper[1]^.reg, p, hp1)) and
  644. (not RegModifiedBetween(taicpu(p).oper[2]^.reg, p, hp1)) and
  645. (((taicpu(hp1).opcode=A_ADD) and (current_settings.cputype>=cpu_armv4)) or
  646. ((taicpu(hp1).opcode=A_SUB) and (current_settings.cputype in [cpu_armv6t2,cpu_armv7,cpu_armv7a,cpu_armv7r,cpu_armv7m,cpu_armv7em]))) and
  647. // CPUs before ARMv6 don't recommend having the same Rd and Rm for MLA.
  648. // TODO: A workaround would be to swap Rm and Rs
  649. (not ((taicpu(hp1).opcode=A_ADD) and (current_settings.cputype<=cpu_armv6) and MatchOperand(taicpu(hp1).oper[0]^, taicpu(p).oper[1]^))) and
  650. (((taicpu(hp1).ops=3) and
  651. (taicpu(hp1).oper[2]^.typ=top_reg) and
  652. ((MatchOperand(taicpu(hp1).oper[2]^, taicpu(p).oper[0]^.reg) and
  653. (not RegModifiedBetween(taicpu(hp1).oper[1]^.reg, p, hp1))) or
  654. ((MatchOperand(taicpu(hp1).oper[1]^, taicpu(p).oper[0]^.reg) and
  655. (taicpu(hp1).opcode=A_ADD) and
  656. (not RegModifiedBetween(taicpu(hp1).oper[2]^.reg, p, hp1)))))) or
  657. ((taicpu(hp1).ops=2) and
  658. (taicpu(hp1).oper[1]^.typ=top_reg) and
  659. MatchOperand(taicpu(hp1).oper[1]^, taicpu(p).oper[0]^.reg))) and
  660. (RegEndOfLife(taicpu(p).oper[0]^.reg,taicpu(hp1))) then
  661. begin
  662. if taicpu(hp1).opcode=A_ADD then
  663. begin
  664. taicpu(hp1).opcode:=A_MLA;
  665. if taicpu(hp1).ops=3 then
  666. begin
  667. if MatchOperand(taicpu(hp1).oper[1]^, taicpu(p).oper[0]^) then
  668. oldreg:=taicpu(hp1).oper[2]^.reg
  669. else
  670. oldreg:=taicpu(hp1).oper[1]^.reg;
  671. end
  672. else
  673. oldreg:=taicpu(hp1).oper[0]^.reg;
  674. taicpu(hp1).loadreg(1,taicpu(p).oper[1]^.reg);
  675. taicpu(hp1).loadreg(2,taicpu(p).oper[2]^.reg);
  676. taicpu(hp1).loadreg(3,oldreg);
  677. DebugMsg(SPeepholeOptimization + 'MulAdd2MLA done', p);
  678. end
  679. else
  680. begin
  681. taicpu(hp1).opcode:=A_MLS;
  682. taicpu(hp1).loadreg(3,taicpu(hp1).oper[1]^.reg);
  683. if taicpu(hp1).ops=2 then
  684. taicpu(hp1).loadreg(1,taicpu(hp1).oper[0]^.reg)
  685. else
  686. taicpu(hp1).loadreg(1,taicpu(p).oper[2]^.reg);
  687. taicpu(hp1).loadreg(2,taicpu(p).oper[1]^.reg);
  688. DebugMsg(SPeepholeOptimization + 'MulSub2MLS done', p);
  689. AllocRegBetween(taicpu(hp1).oper[1]^.reg,p,hp1,UsedRegs);
  690. AllocRegBetween(taicpu(hp1).oper[2]^.reg,p,hp1,UsedRegs);
  691. AllocRegBetween(taicpu(hp1).oper[3]^.reg,p,hp1,UsedRegs);
  692. end;
  693. taicpu(hp1).ops:=4;
  694. RemoveCurrentP(p);
  695. Result := True;
  696. Exit;
  697. end
  698. end;
  699. function TCpuAsmOptimizer.OptPass1And(var p: tai): Boolean;
  700. begin
  701. Result := OptPass1DataCheckMov(p);
  702. Result := inherited OptPass1And(p) or Result;
  703. end;
  704. function TCpuAsmOptimizer.OptPass1DataCheckMov(var p: tai): Boolean;
  705. var
  706. hp1: tai;
  707. begin
  708. {
  709. change
  710. op reg1, ...
  711. mov reg2, reg1
  712. to
  713. op reg2, ...
  714. }
  715. Result := (taicpu(p).ops >= 3) and
  716. GetNextInstructionUsingReg(p, hp1, taicpu(p).oper[0]^.reg) and
  717. RemoveSuperfluousMove(p, hp1, 'DataMov2Data');
  718. end;
  719. function TCpuAsmOptimizer.OptPass1CMP(var p: tai): Boolean;
  720. var
  721. hp1, hp2, hp_last: tai;
  722. MovRem1, MovRem2: Boolean;
  723. begin
  724. Result := False;
  725. { These optimizations can be applied only to the currently enabled operations because
  726. the other operations do not update all flags and FPC does not track flag usage }
  727. if (taicpu(p).condition = C_None) and
  728. (taicpu(p).oper[1]^.typ = top_const) and
  729. GetNextInstruction(p, hp1) then
  730. begin
  731. {
  732. change
  733. cmp reg,const1
  734. moveq reg,const1
  735. movne reg,const2
  736. to
  737. cmp reg,const1
  738. movne reg,const2
  739. }
  740. if MatchInstruction(hp1, A_MOV, [C_EQ, C_NE], [PF_NONE]) and
  741. (taicpu(hp1).oper[1]^.typ = top_const) and
  742. GetNextInstruction(hp1, hp2) and
  743. MatchInstruction(hp2, A_MOV, [C_EQ, C_NE], [PF_NONE]) and
  744. (taicpu(hp2).oper[1]^.typ = top_const) then
  745. begin
  746. MovRem1 := RemoveRedundantMove(p, hp1, asml);
  747. MovRem2 := RemoveRedundantMove(p, hp2, asml);
  748. Result:= MovRem1 or MovRem2;
  749. { Make sure that hp1 is still the next instruction after p }
  750. if MovRem1 then
  751. if MovRem2 then
  752. begin
  753. if not GetNextInstruction(p, hp1) then
  754. Exit;
  755. end
  756. else
  757. hp1 := hp2;
  758. end;
  759. end;
  760. end;
  761. function TCpuAsmOptimizer.OptPass1LDR(var p: tai): Boolean;
  762. var
  763. hp1: tai;
  764. begin
  765. Result := inherited OptPass1LDR(p);
  766. if Result then
  767. Exit;
  768. { change
  769. ldr reg1,ref
  770. ldr reg2,ref
  771. into ...
  772. }
  773. if (taicpu(p).oper[1]^.typ = top_ref) and
  774. (taicpu(p).oper[1]^.ref^.addressmode=AM_OFFSET) and
  775. GetNextInstruction(p,hp1) and
  776. { ldrd is not allowed here }
  777. MatchInstruction(hp1, A_LDR, [taicpu(p).condition, C_None], [taicpu(p).oppostfix,PF_None]-[PF_D]) then
  778. begin
  779. {
  780. ...
  781. ldr reg1,ref
  782. mov reg2,reg1
  783. }
  784. if (taicpu(p).oppostfix=taicpu(hp1).oppostfix) and
  785. RefsEqual(taicpu(p).oper[1]^.ref^,taicpu(hp1).oper[1]^.ref^) and
  786. (taicpu(p).oper[0]^.reg<>taicpu(hp1).oper[1]^.ref^.index) and
  787. (taicpu(p).oper[0]^.reg<>taicpu(hp1).oper[1]^.ref^.base) and
  788. (taicpu(hp1).oper[1]^.ref^.addressmode=AM_OFFSET) then
  789. begin
  790. if taicpu(hp1).oper[0]^.reg=taicpu(p).oper[0]^.reg then
  791. begin
  792. DebugMsg(SPeepholeOptimization + 'LdrLdr2Ldr done', hp1);
  793. asml.remove(hp1);
  794. hp1.free;
  795. end
  796. else
  797. begin
  798. DebugMsg(SPeepholeOptimization + 'LdrLdr2LdrMov done', hp1);
  799. taicpu(hp1).opcode:=A_MOV;
  800. taicpu(hp1).oppostfix:=PF_None;
  801. taicpu(hp1).loadreg(1,taicpu(p).oper[0]^.reg);
  802. end;
  803. result := true;
  804. end
  805. {
  806. ...
  807. ldrd reg1,reg1+1,ref
  808. }
  809. else if (GenerateARMCode or GenerateThumb2Code) and
  810. (CPUARM_HAS_EDSP in cpu_capabilities[current_settings.cputype]) and
  811. { ldrd does not allow any postfixes ... }
  812. (taicpu(p).oppostfix=PF_None) and
  813. not(odd(getsupreg(taicpu(p).oper[0]^.reg))) and
  814. (getsupreg(taicpu(p).oper[0]^.reg)+1=getsupreg(taicpu(hp1).oper[0]^.reg)) and
  815. { ldr ensures that either base or index contain no register, else ldr wouldn't
  816. use an offset either
  817. }
  818. (taicpu(p).oper[1]^.ref^.base=taicpu(hp1).oper[1]^.ref^.base) and
  819. (taicpu(p).oper[1]^.ref^.index=taicpu(hp1).oper[1]^.ref^.index) and
  820. (taicpu(p).oper[1]^.ref^.offset+4=taicpu(hp1).oper[1]^.ref^.offset) and
  821. (abs(taicpu(p).oper[1]^.ref^.offset)<256) and
  822. AlignedToQWord(taicpu(p).oper[1]^.ref^) then
  823. begin
  824. DebugMsg(SPeepholeOptimization + 'LdrLdr2Ldrd done', p);
  825. taicpu(p).loadref(2,taicpu(p).oper[1]^.ref^);
  826. taicpu(p).loadreg(1, taicpu(hp1).oper[0]^.reg);
  827. taicpu(p).ops:=3;
  828. taicpu(p).oppostfix:=PF_D;
  829. asml.remove(hp1);
  830. hp1.free;
  831. result:=true;
  832. end;
  833. end;
  834. {
  835. Change
  836. ldrb dst1, [REF]
  837. and dst2, dst1, #255
  838. into
  839. ldrb dst2, [ref]
  840. }
  841. if not(GenerateThumbCode) and
  842. (taicpu(p).oppostfix=PF_B) and
  843. GetNextInstructionUsingReg(p, hp1, taicpu(p).oper[0]^.reg) and
  844. MatchInstruction(hp1, A_AND, [taicpu(p).condition], [PF_NONE]) and
  845. (taicpu(hp1).oper[1]^.reg = taicpu(p).oper[0]^.reg) and
  846. (taicpu(hp1).oper[2]^.typ = top_const) and
  847. (taicpu(hp1).oper[2]^.val = $FF) and
  848. not(RegUsedBetween(taicpu(hp1).oper[0]^.reg, p, hp1)) and
  849. RegEndOfLife(taicpu(p).oper[0]^.reg, taicpu(hp1)) then
  850. begin
  851. DebugMsg(SPeepholeOptimization + 'LdrbAnd2Ldrb done', p);
  852. taicpu(p).oper[0]^.reg := taicpu(hp1).oper[0]^.reg;
  853. asml.remove(hp1);
  854. hp1.free;
  855. result:=true;
  856. end;
  857. Result:=LookForPostindexedPattern(taicpu(p)) or Result;
  858. { Remove superfluous mov after ldr
  859. changes
  860. ldr reg1, ref
  861. mov reg2, reg1
  862. to
  863. ldr reg2, ref
  864. conditions are:
  865. * no ldrd usage
  866. * reg1 must be released after mov
  867. * mov can not contain shifterops
  868. * ldr+mov have the same conditions
  869. * mov does not set flags
  870. }
  871. if (taicpu(p).oppostfix<>PF_D) and
  872. GetNextInstructionUsingReg(p, hp1, taicpu(p).oper[0]^.reg) and
  873. RemoveSuperfluousMove(p, hp1, 'LdrMov2Ldr') then
  874. Result:=true;
  875. end;
  876. function TCpuAsmOptimizer.OptPass1STM(var p: tai): Boolean;
  877. var
  878. hp1, hp2, hp3, hp4: tai;
  879. begin
  880. Result := False;
  881. {
  882. change
  883. stmfd r13!,[r14]
  884. sub r13,r13,#4
  885. bl abc
  886. add r13,r13,#4
  887. ldmfd r13!,[r15]
  888. into
  889. b abc
  890. }
  891. if not(ts_thumb_interworking in current_settings.targetswitches) and
  892. (taicpu(p).condition = C_None) and
  893. (taicpu(p).oppostfix = PF_FD) and
  894. (taicpu(p).oper[0]^.typ = top_ref) and
  895. (taicpu(p).oper[0]^.ref^.index=NR_STACK_POINTER_REG) and
  896. (taicpu(p).oper[0]^.ref^.base=NR_NO) and
  897. (taicpu(p).oper[0]^.ref^.offset=0) and
  898. (taicpu(p).oper[0]^.ref^.addressmode=AM_PREINDEXED) and
  899. (taicpu(p).oper[1]^.typ = top_regset) and
  900. (taicpu(p).oper[1]^.regset^ = [RS_R14]) and
  901. GetNextInstruction(p, hp1) and
  902. MatchInstruction(hp1, A_SUB, [C_None], [PF_NONE]) and
  903. (taicpu(hp1).oper[0]^.typ = top_reg) and
  904. (taicpu(hp1).oper[0]^.reg = NR_STACK_POINTER_REG) and
  905. MatchOperand(taicpu(hp1).oper[0]^,taicpu(hp1).oper[1]^) and
  906. (taicpu(hp1).oper[2]^.typ = top_const) and
  907. GetNextInstruction(hp1, hp2) and
  908. SkipEntryExitMarker(hp2, hp2) and
  909. MatchInstruction(hp2, [A_BL,A_BLX], [C_None], [PF_NONE]) and
  910. (taicpu(hp2).oper[0]^.typ = top_ref) and
  911. GetNextInstruction(hp2, hp3) and
  912. SkipEntryExitMarker(hp3, hp3) and
  913. MatchInstruction(hp3, A_ADD, [C_None], [PF_NONE]) and
  914. MatchOperand(taicpu(hp1).oper[0]^,taicpu(hp3).oper[0]^) and
  915. MatchOperand(taicpu(hp1).oper[0]^,taicpu(hp3).oper[1]^) and
  916. MatchOperand(taicpu(hp1).oper[2]^,taicpu(hp3).oper[2]^) and
  917. GetNextInstruction(hp3, hp4) and
  918. MatchInstruction(hp4, A_LDM, [C_None], [PF_FD]) and
  919. MatchOperand(taicpu(p).oper[0]^,taicpu(hp4).oper[0]^) and
  920. (taicpu(hp4).oper[1]^.typ = top_regset) and
  921. (taicpu(hp4).oper[1]^.regset^ = [RS_R15]) then
  922. begin
  923. asml.Remove(hp1);
  924. asml.Remove(hp3);
  925. asml.Remove(hp4);
  926. taicpu(hp2).opcode:=A_B;
  927. hp1.free;
  928. hp3.free;
  929. hp4.free;
  930. RemoveCurrentp(p, hp2);
  931. DebugMsg(SPeepholeOptimization + 'Bl2B done', p);
  932. Result := True;
  933. end;
  934. end;
  935. function TCpuAsmOptimizer.OptPass1STR(var p: tai): Boolean;
  936. var
  937. hp1: tai;
  938. begin
  939. Result := inherited OptPass1STR(p);
  940. if Result then
  941. Exit;
  942. { Common conditions }
  943. if (taicpu(p).oper[1]^.typ = top_ref) and
  944. (taicpu(p).oper[1]^.ref^.addressmode=AM_OFFSET) and
  945. (taicpu(p).oppostfix=PF_None) then
  946. begin
  947. { change
  948. str reg1,ref
  949. ldr reg2,ref
  950. into
  951. str reg1,ref
  952. mov reg2,reg1
  953. }
  954. if (taicpu(p).condition=C_None) and
  955. GetNextInstructionUsingRef(p,hp1,taicpu(p).oper[1]^.ref^) and
  956. MatchInstruction(hp1, A_LDR, [taicpu(p).condition], [PF_None]) and
  957. (taicpu(hp1).oper[1]^.typ=top_ref) and
  958. (taicpu(hp1).oper[1]^.ref^.addressmode=AM_OFFSET) and
  959. not(RegModifiedBetween(taicpu(p).oper[0]^.reg, p, hp1)) and
  960. ((taicpu(hp1).oper[1]^.ref^.index=NR_NO) or not (RegModifiedBetween(taicpu(hp1).oper[1]^.ref^.index, p, hp1))) and
  961. ((taicpu(hp1).oper[1]^.ref^.base=NR_NO) or not (RegModifiedBetween(taicpu(hp1).oper[1]^.ref^.base, p, hp1))) then
  962. begin
  963. if taicpu(hp1).oper[0]^.reg=taicpu(p).oper[0]^.reg then
  964. begin
  965. DebugMsg(SPeepholeOptimization + 'StrLdr2StrMov 1 done', hp1);
  966. asml.remove(hp1);
  967. hp1.free;
  968. end
  969. else
  970. begin
  971. taicpu(hp1).opcode:=A_MOV;
  972. taicpu(hp1).oppostfix:=PF_None;
  973. taicpu(hp1).loadreg(1,taicpu(p).oper[0]^.reg);
  974. DebugMsg(SPeepholeOptimization + 'StrLdr2StrMov 2 done', hp1);
  975. end;
  976. result := True;
  977. end
  978. { change
  979. str reg1,ref
  980. str reg2,ref
  981. into
  982. strd reg1,reg2,ref
  983. }
  984. else if (GenerateARMCode or GenerateThumb2Code) and
  985. (CPUARM_HAS_EDSP in cpu_capabilities[current_settings.cputype]) and
  986. not(odd(getsupreg(taicpu(p).oper[0]^.reg))) and
  987. (abs(taicpu(p).oper[1]^.ref^.offset)<256) and
  988. AlignedToQWord(taicpu(p).oper[1]^.ref^) and
  989. GetNextInstruction(p,hp1) and
  990. MatchInstruction(hp1, A_STR, [taicpu(p).condition, C_None], [PF_None]) and
  991. (getsupreg(taicpu(p).oper[0]^.reg)+1=getsupreg(taicpu(hp1).oper[0]^.reg)) and
  992. { str ensures that either base or index contain no register, else ldr wouldn't
  993. use an offset either
  994. }
  995. (taicpu(p).oper[1]^.ref^.base=taicpu(hp1).oper[1]^.ref^.base) and
  996. (taicpu(p).oper[1]^.ref^.index=taicpu(hp1).oper[1]^.ref^.index) and
  997. (taicpu(p).oper[1]^.ref^.offset+4=taicpu(hp1).oper[1]^.ref^.offset) then
  998. begin
  999. DebugMsg(SPeepholeOptimization + 'StrStr2Strd done', p);
  1000. taicpu(p).oppostfix:=PF_D;
  1001. taicpu(p).loadref(2,taicpu(p).oper[1]^.ref^);
  1002. taicpu(p).loadreg(1, taicpu(hp1).oper[0]^.reg);
  1003. taicpu(p).ops:=3;
  1004. asml.remove(hp1);
  1005. hp1.free;
  1006. result:=true;
  1007. end;
  1008. end;
  1009. Result:=LookForPostindexedPattern(taicpu(p)) or Result;
  1010. end;
  1011. function TCpuAsmOptimizer.OptPass1MOV(var p: tai): Boolean;
  1012. var
  1013. hp1, hpfar1, hp2: tai;
  1014. i, i2: longint;
  1015. tempop: tasmop;
  1016. dealloc: tai_regalloc;
  1017. begin
  1018. Result := False;
  1019. hp1 := nil;
  1020. { fold
  1021. mov reg1,reg0, shift imm1
  1022. mov reg1,reg1, shift imm2
  1023. }
  1024. if (taicpu(p).ops=3) and
  1025. (taicpu(p).oper[2]^.typ = top_shifterop) and
  1026. (taicpu(p).oper[2]^.shifterop^.rs = NR_NO) and
  1027. getnextinstruction(p,hp1) and
  1028. MatchInstruction(hp1, A_MOV, [taicpu(p).condition], [PF_None]) and
  1029. (taicpu(hp1).ops=3) and
  1030. MatchOperand(taicpu(hp1).oper[0]^, taicpu(p).oper[0]^.reg) and
  1031. MatchOperand(taicpu(hp1).oper[1]^, taicpu(p).oper[0]^.reg) and
  1032. (taicpu(hp1).oper[2]^.typ = top_shifterop) and
  1033. (taicpu(hp1).oper[2]^.shifterop^.rs = NR_NO) then
  1034. begin
  1035. { fold
  1036. mov reg1,reg0, lsl 16
  1037. mov reg1,reg1, lsr 16
  1038. strh reg1, ...
  1039. dealloc reg1
  1040. to
  1041. strh reg1, ...
  1042. dealloc reg1
  1043. }
  1044. if (taicpu(p).oper[2]^.shifterop^.shiftmode=SM_LSL) and
  1045. (taicpu(p).oper[2]^.shifterop^.shiftimm=16) and
  1046. (taicpu(hp1).oper[2]^.shifterop^.shiftmode in [SM_LSR,SM_ASR]) and
  1047. (taicpu(hp1).oper[2]^.shifterop^.shiftimm=16) and
  1048. getnextinstruction(hp1,hp2) and
  1049. MatchInstruction(hp2, A_STR, [taicpu(p).condition], [PF_H]) and
  1050. MatchOperand(taicpu(hp2).oper[0]^, taicpu(p).oper[0]^.reg) then
  1051. begin
  1052. TransferUsedRegs(TmpUsedRegs);
  1053. UpdateUsedRegs(TmpUsedRegs, tai(p.next));
  1054. UpdateUsedRegs(TmpUsedRegs, tai(hp1.next));
  1055. if not(RegUsedAfterInstruction(taicpu(p).oper[0]^.reg,hp2,TmpUsedRegs)) then
  1056. begin
  1057. DebugMsg(SPeepholeOptimization + 'Removed superfluous 16 Bit zero extension', hp1);
  1058. taicpu(hp2).loadreg(0,taicpu(p).oper[1]^.reg);
  1059. asml.remove(hp1);
  1060. hp1.free;
  1061. RemoveCurrentP(p, hp2);
  1062. Result:=true;
  1063. Exit;
  1064. end;
  1065. end
  1066. { fold
  1067. mov reg1,reg0, shift imm1
  1068. mov reg1,reg1, shift imm2
  1069. to
  1070. mov reg1,reg0, shift imm1+imm2
  1071. }
  1072. else if (taicpu(p).oper[2]^.shifterop^.shiftmode=taicpu(hp1).oper[2]^.shifterop^.shiftmode) or
  1073. { asr makes no use after a lsr, the asr can be foled into the lsr }
  1074. ((taicpu(p).oper[2]^.shifterop^.shiftmode=SM_LSR) and (taicpu(hp1).oper[2]^.shifterop^.shiftmode=SM_ASR) ) then
  1075. begin
  1076. inc(taicpu(p).oper[2]^.shifterop^.shiftimm,taicpu(hp1).oper[2]^.shifterop^.shiftimm);
  1077. { avoid overflows }
  1078. if taicpu(p).oper[2]^.shifterop^.shiftimm>31 then
  1079. case taicpu(p).oper[2]^.shifterop^.shiftmode of
  1080. SM_ROR:
  1081. taicpu(p).oper[2]^.shifterop^.shiftimm:=taicpu(p).oper[2]^.shifterop^.shiftimm and 31;
  1082. SM_ASR:
  1083. taicpu(p).oper[2]^.shifterop^.shiftimm:=31;
  1084. SM_LSR,
  1085. SM_LSL:
  1086. begin
  1087. hp2:=taicpu.op_reg_const(A_MOV,taicpu(p).oper[0]^.reg,0);
  1088. InsertLLItem(p.previous, p.next, hp2);
  1089. p.free;
  1090. p:=hp2;
  1091. end;
  1092. else
  1093. internalerror(2008072803);
  1094. end;
  1095. DebugMsg(SPeepholeOptimization + 'ShiftShift2Shift 1 done', p);
  1096. asml.remove(hp1);
  1097. hp1.free;
  1098. hp1 := nil;
  1099. result := true;
  1100. end
  1101. { fold
  1102. mov reg1,reg0, shift imm1
  1103. mov reg1,reg1, shift imm2
  1104. mov reg1,reg1, shift imm3 ...
  1105. mov reg2,reg1, shift imm3 ...
  1106. }
  1107. else if GetNextInstructionUsingReg(hp1,hp2, taicpu(hp1).oper[0]^.reg) and
  1108. MatchInstruction(hp2, A_MOV, [taicpu(p).condition], [PF_None]) and
  1109. (taicpu(hp2).ops=3) and
  1110. MatchOperand(taicpu(hp2).oper[1]^, taicpu(hp1).oper[0]^.reg) and
  1111. RegEndofLife(taicpu(p).oper[0]^.reg,taicpu(hp2)) and
  1112. (taicpu(hp2).oper[2]^.typ = top_shifterop) and
  1113. (taicpu(hp2).oper[2]^.shifterop^.rs = NR_NO) then
  1114. begin
  1115. { mov reg1,reg0, lsl imm1
  1116. mov reg1,reg1, lsr/asr imm2
  1117. mov reg2,reg1, lsl imm3 ...
  1118. to
  1119. mov reg1,reg0, lsl imm1
  1120. mov reg2,reg1, lsr/asr imm2-imm3
  1121. if
  1122. imm1>=imm2
  1123. }
  1124. if (taicpu(p).oper[2]^.shifterop^.shiftmode=SM_LSL) and (taicpu(hp2).oper[2]^.shifterop^.shiftmode=SM_LSL) and
  1125. (taicpu(hp1).oper[2]^.shifterop^.shiftmode in [SM_ASR,SM_LSR]) and
  1126. (taicpu(p).oper[2]^.shifterop^.shiftimm>=taicpu(hp1).oper[2]^.shifterop^.shiftimm) then
  1127. begin
  1128. if (taicpu(hp2).oper[2]^.shifterop^.shiftimm>=taicpu(hp1).oper[2]^.shifterop^.shiftimm) then
  1129. begin
  1130. if not(RegUsedBetween(taicpu(hp2).oper[0]^.reg,p,hp1)) and
  1131. not(RegUsedBetween(taicpu(hp2).oper[0]^.reg,hp1,hp2)) then
  1132. begin
  1133. DebugMsg(SPeepholeOptimization + 'ShiftShiftShift2ShiftShift 1a done', p);
  1134. inc(taicpu(p).oper[2]^.shifterop^.shiftimm,taicpu(hp2).oper[2]^.shifterop^.shiftimm-taicpu(hp1).oper[2]^.shifterop^.shiftimm);
  1135. taicpu(p).oper[0]^.reg:=taicpu(hp2).oper[0]^.reg;
  1136. asml.remove(hp1);
  1137. asml.remove(hp2);
  1138. hp1.free;
  1139. hp2.free;
  1140. if taicpu(p).oper[2]^.shifterop^.shiftimm>=32 then
  1141. begin
  1142. taicpu(p).freeop(1);
  1143. taicpu(p).freeop(2);
  1144. taicpu(p).loadconst(1,0);
  1145. end;
  1146. result := true;
  1147. Exit;
  1148. end;
  1149. end
  1150. else if not(RegUsedBetween(taicpu(hp2).oper[0]^.reg,hp1,hp2)) then
  1151. begin
  1152. DebugMsg(SPeepholeOptimization + 'ShiftShiftShift2ShiftShift 1b done', p);
  1153. dec(taicpu(hp1).oper[2]^.shifterop^.shiftimm,taicpu(hp2).oper[2]^.shifterop^.shiftimm);
  1154. taicpu(hp1).oper[0]^.reg:=taicpu(hp2).oper[0]^.reg;
  1155. asml.remove(hp2);
  1156. hp2.free;
  1157. result := true;
  1158. Exit;
  1159. end;
  1160. end
  1161. { mov reg1,reg0, lsr/asr imm1
  1162. mov reg1,reg1, lsl imm2
  1163. mov reg1,reg1, lsr/asr imm3 ...
  1164. if imm3>=imm1 and imm2>=imm1
  1165. to
  1166. mov reg1,reg0, lsl imm2-imm1
  1167. mov reg1,reg1, lsr/asr imm3 ...
  1168. }
  1169. else if (taicpu(p).oper[2]^.shifterop^.shiftmode in [SM_ASR,SM_LSR]) and (taicpu(hp2).oper[2]^.shifterop^.shiftmode in [SM_ASR,SM_LSR]) and
  1170. (taicpu(hp1).oper[2]^.shifterop^.shiftmode=SM_LSL) and
  1171. (taicpu(hp2).oper[2]^.shifterop^.shiftimm>=taicpu(p).oper[2]^.shifterop^.shiftimm) and
  1172. (taicpu(hp1).oper[2]^.shifterop^.shiftimm>=taicpu(p).oper[2]^.shifterop^.shiftimm) then
  1173. begin
  1174. dec(taicpu(hp1).oper[2]^.shifterop^.shiftimm,taicpu(p).oper[2]^.shifterop^.shiftimm);
  1175. taicpu(hp1).oper[1]^.reg:=taicpu(p).oper[1]^.reg;
  1176. DebugMsg(SPeepholeOptimization + 'ShiftShiftShift2ShiftShift 2 done', p);
  1177. if taicpu(hp1).oper[2]^.shifterop^.shiftimm=0 then
  1178. begin
  1179. taicpu(hp2).oper[1]^.reg:=taicpu(hp1).oper[1]^.reg;
  1180. asml.remove(hp1);
  1181. hp1.free;
  1182. end;
  1183. RemoveCurrentp(p);
  1184. result := true;
  1185. Exit;
  1186. end;
  1187. end;
  1188. end;
  1189. { All the optimisations from this point on require GetNextInstructionUsingReg
  1190. to return True }
  1191. while (
  1192. GetNextInstructionUsingReg(p, hpfar1, taicpu(p).oper[0]^.reg) and
  1193. (hpfar1.typ = ait_instruction)
  1194. ) do
  1195. begin
  1196. { Change the common
  1197. mov r0, r0, lsr #xxx
  1198. and r0, r0, #yyy/bic r0, r0, #xxx
  1199. and remove the superfluous and/bic if possible
  1200. This could be extended to handle more cases.
  1201. }
  1202. { Change
  1203. mov rx, ry, lsr/ror #xxx
  1204. uxtb/uxth rz,rx/and rz,rx,0xFF
  1205. dealloc rx
  1206. to
  1207. uxtb/uxth rz,ry,ror #xxx
  1208. }
  1209. if (GenerateThumb2Code) and
  1210. (taicpu(p).ops=3) and
  1211. (taicpu(p).oper[2]^.typ = top_shifterop) and
  1212. (taicpu(p).oper[2]^.shifterop^.rs = NR_NO) and
  1213. (taicpu(p).oper[2]^.shifterop^.shiftmode in [SM_LSR,SM_ROR]) and
  1214. RegEndOfLife(taicpu(p).oper[0]^.reg, taicpu(hpfar1)) then
  1215. begin
  1216. if MatchInstruction(hpfar1, A_UXTB, [C_None], [PF_None]) and
  1217. (taicpu(hpfar1).ops = 2) and
  1218. (taicpu(p).oper[2]^.shifterop^.shiftimm in [8,16,24]) and
  1219. MatchOperand(taicpu(hpfar1).oper[1]^, taicpu(p).oper[0]^.reg) then
  1220. begin
  1221. taicpu(hpfar1).oper[1]^.reg := taicpu(p).oper[1]^.reg;
  1222. taicpu(hpfar1).loadshifterop(2,taicpu(p).oper[2]^.shifterop^);
  1223. taicpu(hpfar1).oper[2]^.shifterop^.shiftmode:=SM_ROR;
  1224. taicpu(hpfar1).ops := 3;
  1225. if not Assigned(hp1) then
  1226. GetNextInstruction(p,hp1);
  1227. RemoveCurrentP(p, hp1);
  1228. result:=true;
  1229. exit;
  1230. end
  1231. else if MatchInstruction(hpfar1, A_UXTH, [C_None], [PF_None]) and
  1232. (taicpu(hpfar1).ops=2) and
  1233. (taicpu(p).oper[2]^.shifterop^.shiftimm in [16]) and
  1234. MatchOperand(taicpu(hpfar1).oper[1]^, taicpu(p).oper[0]^.reg) then
  1235. begin
  1236. taicpu(hpfar1).oper[1]^.reg := taicpu(p).oper[1]^.reg;
  1237. taicpu(hpfar1).loadshifterop(2,taicpu(p).oper[2]^.shifterop^);
  1238. taicpu(hpfar1).oper[2]^.shifterop^.shiftmode:=SM_ROR;
  1239. taicpu(hpfar1).ops := 3;
  1240. if not Assigned(hp1) then
  1241. GetNextInstruction(p,hp1);
  1242. RemoveCurrentP(p, hp1);
  1243. result:=true;
  1244. exit;
  1245. end
  1246. else if MatchInstruction(hpfar1, A_AND, [C_None], [PF_None]) and
  1247. (taicpu(hpfar1).ops = 3) and
  1248. (taicpu(hpfar1).oper[2]^.typ = top_const) and
  1249. (taicpu(hpfar1).oper[2]^.val = $FF) and
  1250. (taicpu(p).oper[2]^.shifterop^.shiftimm in [8,16,24]) and
  1251. MatchOperand(taicpu(hpfar1).oper[1]^, taicpu(p).oper[0]^.reg) then
  1252. begin
  1253. taicpu(hpfar1).ops := 3;
  1254. taicpu(hpfar1).opcode := A_UXTB;
  1255. taicpu(hpfar1).oper[1]^.reg := taicpu(p).oper[1]^.reg;
  1256. taicpu(hpfar1).loadshifterop(2,taicpu(p).oper[2]^.shifterop^);
  1257. taicpu(hpfar1).oper[2]^.shifterop^.shiftmode:=SM_ROR;
  1258. if not Assigned(hp1) then
  1259. GetNextInstruction(p,hp1);
  1260. RemoveCurrentP(p, hp1);
  1261. result:=true;
  1262. exit;
  1263. end;
  1264. end;
  1265. { 2-operald mov optimisations }
  1266. if (taicpu(p).ops = 2) then
  1267. begin
  1268. {
  1269. This removes the mul from
  1270. mov rX,0
  1271. ...
  1272. mul ...,rX,...
  1273. }
  1274. if (taicpu(p).oper[1]^.typ = top_const) then
  1275. begin
  1276. (* if false and
  1277. (taicpu(p).oper[1]^.val=0) and
  1278. MatchInstruction(hpfar1, [A_MUL,A_MLA], [taicpu(p).condition], [taicpu(p).oppostfix]) and
  1279. (((taicpu(hpfar1).oper[1]^.typ=top_reg) and MatchOperand(taicpu(p).oper[0]^, taicpu(hpfar1).oper[1]^)) or
  1280. ((taicpu(hpfar1).oper[2]^.typ=top_reg) and MatchOperand(taicpu(p).oper[0]^, taicpu(hpfar1).oper[2]^))) then
  1281. begin
  1282. TransferUsedRegs(TmpUsedRegs);
  1283. UpdateUsedRegs(TmpUsedRegs, tai(p.next));
  1284. UpdateUsedRegs(TmpUsedRegs, tai(hpfar1.next));
  1285. DebugMsg(SPeepholeOptimization + 'MovMUL/MLA2Mov0 done', p);
  1286. if taicpu(hpfar1).opcode=A_MUL then
  1287. taicpu(hpfar1).loadconst(1,0)
  1288. else
  1289. taicpu(hpfar1).loadreg(1,taicpu(hpfar1).oper[3]^.reg);
  1290. taicpu(hpfar1).ops:=2;
  1291. taicpu(hpfar1).opcode:=A_MOV;
  1292. if not(RegUsedAfterInstruction(taicpu(p).oper[0]^.reg,hpfar1,TmpUsedRegs)) then
  1293. RemoveCurrentP(p);
  1294. Result:=true;
  1295. exit;
  1296. end
  1297. else*) if (taicpu(p).oper[1]^.val=0) and
  1298. MatchInstruction(hpfar1, A_MLA, [taicpu(p).condition], [taicpu(p).oppostfix]) and
  1299. MatchOperand(taicpu(p).oper[0]^, taicpu(hpfar1).oper[3]^) then
  1300. begin
  1301. TransferUsedRegs(TmpUsedRegs);
  1302. UpdateUsedRegs(TmpUsedRegs, tai(p.next));
  1303. UpdateUsedRegs(TmpUsedRegs, tai(hpfar1.next));
  1304. DebugMsg(SPeepholeOptimization + 'MovMLA2MUL 1 done', p);
  1305. taicpu(hpfar1).ops:=3;
  1306. taicpu(hpfar1).opcode:=A_MUL;
  1307. if not(RegUsedAfterInstruction(taicpu(p).oper[0]^.reg,hpfar1,TmpUsedRegs)) then
  1308. begin
  1309. RemoveCurrentP(p);
  1310. Result:=true;
  1311. end;
  1312. exit;
  1313. end
  1314. {
  1315. This changes the very common
  1316. mov r0, #0
  1317. str r0, [...]
  1318. mov r0, #0
  1319. str r0, [...]
  1320. and removes all superfluous mov instructions
  1321. }
  1322. else if (taicpu(hpfar1).opcode=A_STR) then
  1323. begin
  1324. hp1 := hpfar1;
  1325. while MatchInstruction(hp1, A_STR, [taicpu(p).condition], []) and
  1326. MatchOperand(taicpu(p).oper[0]^, taicpu(hpfar1).oper[0]^) and
  1327. GetNextInstruction(hp1, hp2) and
  1328. MatchInstruction(hp2, A_MOV, [taicpu(p).condition], [PF_None]) and
  1329. (taicpu(hp2).ops = 2) and
  1330. MatchOperand(taicpu(hp2).oper[0]^, taicpu(p).oper[0]^) and
  1331. MatchOperand(taicpu(hp2).oper[1]^, taicpu(p).oper[1]^) do
  1332. begin
  1333. DebugMsg(SPeepholeOptimization + 'MovStrMov done', hp2);
  1334. GetNextInstruction(hp2,hp1);
  1335. asml.remove(hp2);
  1336. hp2.free;
  1337. result:=true;
  1338. if not assigned(hp1) then break;
  1339. end;
  1340. if Result then
  1341. Exit;
  1342. { If no changes were made, now try constant merging }
  1343. if TryConstMerge(p, hpfar1) then
  1344. begin
  1345. Result := True;
  1346. Exit;
  1347. end;
  1348. end;
  1349. end;
  1350. {
  1351. This removes the first mov from
  1352. mov rX,...
  1353. mov rX,...
  1354. }
  1355. if taicpu(hpfar1).opcode=A_MOV then
  1356. begin
  1357. hp1 := p;
  1358. while MatchInstruction(hpfar1, A_MOV, [taicpu(hp1).condition], [taicpu(hp1).oppostfix]) and
  1359. (taicpu(hpfar1).ops = 2) and
  1360. MatchOperand(taicpu(hp1).oper[0]^, taicpu(hpfar1).oper[0]^) and
  1361. { don't remove the first mov if the second is a mov rX,rX }
  1362. not(MatchOperand(taicpu(hpfar1).oper[0]^, taicpu(hpfar1).oper[1]^)) do
  1363. begin
  1364. { Defer removing the first p until after the while loop }
  1365. if p <> hp1 then
  1366. begin
  1367. DebugMsg(SPeepholeOptimization + 'MovMov done', hp1);
  1368. asml.remove(hp1);
  1369. hp1.free;
  1370. end;
  1371. hp1:=hpfar1;
  1372. GetNextInstruction(hpfar1,hpfar1);
  1373. result:=true;
  1374. if not assigned(hpfar1) then
  1375. Break;
  1376. end;
  1377. if Result then
  1378. begin
  1379. DebugMsg(SPeepholeOptimization + 'MovMov done', p);
  1380. RemoveCurrentp(p);
  1381. Exit;
  1382. end;
  1383. end;
  1384. if RedundantMovProcess(p,hpfar1) then
  1385. begin
  1386. Result:=true;
  1387. { p might not point at a mov anymore }
  1388. exit;
  1389. end;
  1390. { If hpfar1 is nil after the call to RedundantMovProcess, it is
  1391. because it would have become a dangling pointer, so reinitialise it. }
  1392. if not Assigned(hpfar1) then
  1393. Continue;
  1394. { Fold the very common sequence
  1395. mov regA, regB
  1396. ldr* regA, [regA]
  1397. to
  1398. ldr* regA, [regB]
  1399. CAUTION! If this one is successful p might not be a mov instruction anymore!
  1400. }
  1401. if
  1402. // Make sure that Thumb code doesn't propagate a high register into a reference
  1403. (
  1404. (
  1405. GenerateThumbCode and
  1406. (getsupreg(taicpu(p).oper[1]^.reg) < RS_R8)
  1407. ) or (not GenerateThumbCode)
  1408. ) and
  1409. (taicpu(p).oper[1]^.typ = top_reg) and
  1410. (taicpu(p).oppostfix = PF_NONE) and
  1411. MatchInstruction(hpfar1, [A_LDR, A_STR], [taicpu(p).condition], []) and
  1412. (taicpu(hpfar1).oper[1]^.typ = top_ref) and
  1413. { We can change the base register only when the instruction uses AM_OFFSET }
  1414. ((taicpu(hpfar1).oper[1]^.ref^.index = taicpu(p).oper[0]^.reg) or
  1415. ((taicpu(hpfar1).oper[1]^.ref^.addressmode = AM_OFFSET) and
  1416. (taicpu(hpfar1).oper[1]^.ref^.base = taicpu(p).oper[0]^.reg))
  1417. ) and
  1418. not(RegModifiedBetween(taicpu(p).oper[1]^.reg,p,hpfar1)) and
  1419. RegEndOfLife(taicpu(p).oper[0]^.reg, taicpu(hpfar1)) then
  1420. begin
  1421. DebugMsg(SPeepholeOptimization + 'MovLdr2Ldr done', hpfar1);
  1422. if (taicpu(hpfar1).oper[1]^.ref^.addressmode = AM_OFFSET) and
  1423. (taicpu(hpfar1).oper[1]^.ref^.base = taicpu(p).oper[0]^.reg) then
  1424. taicpu(hpfar1).oper[1]^.ref^.base := taicpu(p).oper[1]^.reg;
  1425. if taicpu(hpfar1).oper[1]^.ref^.index = taicpu(p).oper[0]^.reg then
  1426. taicpu(hpfar1).oper[1]^.ref^.index := taicpu(p).oper[1]^.reg;
  1427. dealloc:=FindRegDeAlloc(taicpu(p).oper[1]^.reg, tai(p.Next));
  1428. if Assigned(dealloc) then
  1429. begin
  1430. asml.remove(dealloc);
  1431. asml.InsertAfter(dealloc,hpfar1);
  1432. end;
  1433. if (not Assigned(hp1)) or (p=hp1) then
  1434. GetNextInstruction(p, hp1);
  1435. RemoveCurrentP(p, hp1);
  1436. result:=true;
  1437. Exit;
  1438. end
  1439. end
  1440. { 3-operald mov optimisations }
  1441. else if (taicpu(p).ops = 3) then
  1442. begin
  1443. if (taicpu(p).oper[2]^.typ = top_shifterop) and
  1444. (taicpu(p).oper[2]^.shifterop^.rs = NR_NO) and
  1445. (taicpu(p).oper[2]^.shifterop^.shiftmode = SM_LSR) and
  1446. (taicpu(hpfar1).ops>=1) and
  1447. (taicpu(hpfar1).oper[0]^.typ=top_reg) and
  1448. (not RegModifiedBetween(taicpu(hpfar1).oper[0]^.reg, p, hpfar1)) and
  1449. RegEndOfLife(taicpu(p).oper[0]^.reg, taicpu(hpfar1)) then
  1450. begin
  1451. if (taicpu(p).oper[2]^.shifterop^.shiftimm >= 24 ) and
  1452. MatchInstruction(hpfar1, A_AND, [taicpu(p).condition], [taicpu(p).oppostfix]) and
  1453. (taicpu(hpfar1).ops=3) and
  1454. MatchOperand(taicpu(p).oper[0]^, taicpu(hpfar1).oper[1]^) and
  1455. (taicpu(hpfar1).oper[2]^.typ = top_const) and
  1456. { Check if the AND actually would only mask out bits being already zero because of the shift
  1457. }
  1458. ((($ffffffff shr taicpu(p).oper[2]^.shifterop^.shiftimm) and taicpu(hpfar1).oper[2]^.val) =
  1459. ($ffffffff shr taicpu(p).oper[2]^.shifterop^.shiftimm)) then
  1460. begin
  1461. DebugMsg(SPeepholeOptimization + 'LsrAnd2Lsr done', hpfar1);
  1462. taicpu(p).oper[0]^.reg:=taicpu(hpfar1).oper[0]^.reg;
  1463. asml.remove(hpfar1);
  1464. hpfar1.free;
  1465. result:=true;
  1466. Exit;
  1467. end
  1468. else if MatchInstruction(hpfar1, A_BIC, [taicpu(p).condition], [taicpu(p).oppostfix]) and
  1469. (taicpu(hpfar1).ops=3) and
  1470. MatchOperand(taicpu(p).oper[0]^, taicpu(hpfar1).oper[1]^) and
  1471. (taicpu(hpfar1).oper[2]^.typ = top_const) and
  1472. { Check if the BIC actually would only mask out bits beeing already zero because of the shift }
  1473. (taicpu(hpfar1).oper[2]^.val<>0) and
  1474. (BsfDWord(taicpu(hpfar1).oper[2]^.val)>=32-taicpu(p).oper[2]^.shifterop^.shiftimm) then
  1475. begin
  1476. DebugMsg(SPeepholeOptimization + 'LsrBic2Lsr done', hpfar1);
  1477. taicpu(p).oper[0]^.reg:=taicpu(hpfar1).oper[0]^.reg;
  1478. asml.remove(hpfar1);
  1479. hpfar1.free;
  1480. result:=true;
  1481. Exit;
  1482. end;
  1483. end;
  1484. { This folds shifterops into following instructions
  1485. mov r0, r1, lsl #8
  1486. add r2, r3, r0
  1487. to
  1488. add r2, r3, r1, lsl #8
  1489. CAUTION! If this one is successful p might not be a mov instruction anymore!
  1490. }
  1491. if (taicpu(p).oper[1]^.typ = top_reg) and
  1492. (taicpu(p).oper[2]^.typ = top_shifterop) and
  1493. (taicpu(p).oppostfix = PF_NONE) and
  1494. MatchInstruction(hpfar1, [A_ADD, A_ADC, A_RSB, A_RSC, A_SUB, A_SBC,
  1495. A_AND, A_BIC, A_EOR, A_ORR, A_TEQ, A_TST,
  1496. A_CMP, A_CMN],
  1497. [taicpu(p).condition], [PF_None]) and
  1498. (not ((GenerateThumb2Code) and
  1499. (taicpu(hpfar1).opcode in [A_SBC]) and
  1500. (((taicpu(hpfar1).ops=3) and
  1501. MatchOperand(taicpu(p).oper[0]^, taicpu(hpfar1).oper[1]^.reg)) or
  1502. ((taicpu(hpfar1).ops=2) and
  1503. MatchOperand(taicpu(p).oper[0]^, taicpu(hpfar1).oper[0]^.reg))))) and
  1504. RegEndOfLife(taicpu(p).oper[0]^.reg, taicpu(hpfar1)) and
  1505. (taicpu(hpfar1).ops >= 2) and
  1506. {Currently we can't fold into another shifterop}
  1507. (taicpu(hpfar1).oper[taicpu(hpfar1).ops-1]^.typ = top_reg) and
  1508. {Folding rrx is problematic because of the C-Flag, as we currently can't check
  1509. NR_DEFAULTFLAGS for modification}
  1510. (
  1511. {Everything is fine if we don't use RRX}
  1512. (taicpu(p).oper[2]^.shifterop^.shiftmode <> SM_RRX) or
  1513. (
  1514. {If it is RRX, then check if we're just accessing the next instruction}
  1515. Assigned(hp1) and
  1516. (hpfar1 = hp1)
  1517. )
  1518. ) and
  1519. { reg1 might not be modified inbetween }
  1520. not(RegModifiedBetween(taicpu(p).oper[1]^.reg,p,hpfar1)) and
  1521. { The shifterop can contain a register, might not be modified}
  1522. (
  1523. (taicpu(p).oper[2]^.shifterop^.rs = NR_NO) or
  1524. not(RegModifiedBetween(taicpu(p).oper[2]^.shifterop^.rs, p, hpfar1))
  1525. ) and
  1526. (
  1527. {Only ONE of the two src operands is allowed to match}
  1528. MatchOperand(taicpu(p).oper[0]^, taicpu(hpfar1).oper[taicpu(hpfar1).ops-2]^) xor
  1529. MatchOperand(taicpu(p).oper[0]^, taicpu(hpfar1).oper[taicpu(hpfar1).ops-1]^)
  1530. ) then
  1531. begin
  1532. if taicpu(hpfar1).opcode in [A_TST, A_TEQ, A_CMN] then
  1533. I2:=0
  1534. else
  1535. I2:=1;
  1536. for I:=I2 to taicpu(hpfar1).ops-1 do
  1537. if MatchOperand(taicpu(p).oper[0]^, taicpu(hpfar1).oper[I]^.reg) then
  1538. begin
  1539. { If the parameter matched on the second op from the RIGHT
  1540. we have to switch the parameters, this will not happen for CMP
  1541. were we're only evaluating the most right parameter
  1542. }
  1543. if I <> taicpu(hpfar1).ops-1 then
  1544. begin
  1545. {The SUB operators need to be changed when we swap parameters}
  1546. case taicpu(hpfar1).opcode of
  1547. A_SUB: tempop:=A_RSB;
  1548. A_SBC: tempop:=A_RSC;
  1549. A_RSB: tempop:=A_SUB;
  1550. A_RSC: tempop:=A_SBC;
  1551. else tempop:=taicpu(hpfar1).opcode;
  1552. end;
  1553. if taicpu(hpfar1).ops = 3 then
  1554. hp2:=taicpu.op_reg_reg_reg_shifterop(tempop,
  1555. taicpu(hpfar1).oper[0]^.reg, taicpu(hpfar1).oper[2]^.reg,
  1556. taicpu(p).oper[1]^.reg, taicpu(p).oper[2]^.shifterop^)
  1557. else
  1558. hp2:=taicpu.op_reg_reg_shifterop(tempop,
  1559. taicpu(hpfar1).oper[0]^.reg, taicpu(p).oper[1]^.reg,
  1560. taicpu(p).oper[2]^.shifterop^);
  1561. end
  1562. else
  1563. if taicpu(hpfar1).ops = 3 then
  1564. hp2:=taicpu.op_reg_reg_reg_shifterop(taicpu(hpfar1).opcode,
  1565. taicpu(hpfar1).oper[0]^.reg, taicpu(hpfar1).oper[1]^.reg,
  1566. taicpu(p).oper[1]^.reg, taicpu(p).oper[2]^.shifterop^)
  1567. else
  1568. hp2:=taicpu.op_reg_reg_shifterop(taicpu(hpfar1).opcode,
  1569. taicpu(hpfar1).oper[0]^.reg, taicpu(p).oper[1]^.reg,
  1570. taicpu(p).oper[2]^.shifterop^);
  1571. if taicpu(p).oper[2]^.shifterop^.rs<>NR_NO then
  1572. AllocRegBetween(taicpu(p).oper[2]^.shifterop^.rs,p,hpfar1,UsedRegs);
  1573. AllocRegBetween(taicpu(p).oper[1]^.reg,p,hpfar1,UsedRegs);
  1574. asml.insertbefore(hp2, hpfar1);
  1575. asml.remove(hpfar1);
  1576. hpfar1.free;
  1577. DebugMsg(SPeepholeOptimization + 'FoldShiftProcess done', hp2);
  1578. if not Assigned(hp1) then
  1579. GetNextInstruction(p, hp1)
  1580. else if hp1 = hpfar1 then
  1581. { If hp1 = hpfar1, then it's a dangling pointer }
  1582. hp1 := hp2;
  1583. RemoveCurrentP(p, hp1);
  1584. Result:=true;
  1585. Exit;
  1586. end;
  1587. end;
  1588. {
  1589. Fold
  1590. mov r1, r1, lsl #2
  1591. ldr/ldrb r0, [r0, r1]
  1592. to
  1593. ldr/ldrb r0, [r0, r1, lsl #2]
  1594. XXX: This still needs some work, as we quite often encounter something like
  1595. mov r1, r2, lsl #2
  1596. add r2, r3, #imm
  1597. ldr r0, [r2, r1]
  1598. which can't be folded because r2 is overwritten between the shift and the ldr.
  1599. We could try to shuffle the registers around and fold it into.
  1600. add r1, r3, #imm
  1601. ldr r0, [r1, r2, lsl #2]
  1602. }
  1603. if (not(GenerateThumbCode)) and
  1604. { thumb2 allows only lsl #0..#3 }
  1605. (not(GenerateThumb2Code) or
  1606. ((taicpu(p).oper[2]^.shifterop^.shiftimm in [0..3]) and
  1607. (taicpu(p).oper[2]^.shifterop^.shiftmode=SM_LSL)
  1608. )
  1609. ) and
  1610. (taicpu(p).oper[1]^.typ = top_reg) and
  1611. (taicpu(p).oper[2]^.typ = top_shifterop) and
  1612. { RRX is tough to handle, because it requires tracking the C-Flag,
  1613. it is also extremly unlikely to be emitted this way}
  1614. (taicpu(p).oper[2]^.shifterop^.shiftmode <> SM_RRX) and
  1615. (taicpu(p).oper[2]^.shifterop^.shiftimm <> 0) and
  1616. (taicpu(p).oppostfix = PF_NONE) and
  1617. {Only LDR, LDRB, STR, STRB can handle scaled register indexing}
  1618. (MatchInstruction(hpfar1, [A_LDR, A_STR], [taicpu(p).condition], [PF_None, PF_B]) or
  1619. (GenerateThumb2Code and
  1620. MatchInstruction(hpfar1, [A_LDR, A_STR], [taicpu(p).condition], [PF_None, PF_B, PF_SB, PF_H, PF_SH]))
  1621. ) and
  1622. (
  1623. {If this is address by offset, one of the two registers can be used}
  1624. ((taicpu(hpfar1).oper[1]^.ref^.addressmode=AM_OFFSET) and
  1625. (
  1626. (taicpu(hpfar1).oper[1]^.ref^.index = taicpu(p).oper[0]^.reg) xor
  1627. (taicpu(hpfar1).oper[1]^.ref^.base = taicpu(p).oper[0]^.reg)
  1628. )
  1629. ) or
  1630. {For post and preindexed only the index register can be used}
  1631. ((taicpu(hpfar1).oper[1]^.ref^.addressmode in [AM_POSTINDEXED, AM_PREINDEXED]) and
  1632. (
  1633. (taicpu(hpfar1).oper[1]^.ref^.index = taicpu(p).oper[0]^.reg) and
  1634. (taicpu(hpfar1).oper[1]^.ref^.base <> taicpu(p).oper[0]^.reg)
  1635. ) and
  1636. (not GenerateThumb2Code)
  1637. )
  1638. ) and
  1639. { Only fold if both registers are used. Otherwise we are folding p with itself }
  1640. (taicpu(hpfar1).oper[1]^.ref^.index<>NR_NO) and
  1641. (taicpu(hpfar1).oper[1]^.ref^.base<>NR_NO) and
  1642. { Only fold if there isn't another shifterop already, and offset is zero. }
  1643. (taicpu(hpfar1).oper[1]^.ref^.offset = 0) and
  1644. (taicpu(hpfar1).oper[1]^.ref^.shiftmode = SM_None) and
  1645. not(RegModifiedBetween(taicpu(p).oper[1]^.reg,p,hpfar1)) and
  1646. RegEndOfLife(taicpu(p).oper[0]^.reg, taicpu(hpfar1)) then
  1647. begin
  1648. { If the register we want to do the shift for resides in base, we need to swap that}
  1649. if (taicpu(hpfar1).oper[1]^.ref^.base = taicpu(p).oper[0]^.reg) then
  1650. taicpu(hpfar1).oper[1]^.ref^.base := taicpu(hpfar1).oper[1]^.ref^.index;
  1651. taicpu(hpfar1).oper[1]^.ref^.index := taicpu(p).oper[1]^.reg;
  1652. taicpu(hpfar1).oper[1]^.ref^.shiftmode := taicpu(p).oper[2]^.shifterop^.shiftmode;
  1653. taicpu(hpfar1).oper[1]^.ref^.shiftimm := taicpu(p).oper[2]^.shifterop^.shiftimm;
  1654. DebugMsg(SPeepholeOptimization + 'FoldShiftLdrStr done', hpfar1);
  1655. RemoveCurrentP(p);
  1656. Result:=true;
  1657. Exit;
  1658. end;
  1659. end;
  1660. {
  1661. Often we see shifts and then a superfluous mov to another register
  1662. In the future this might be handled in RedundantMovProcess when it uses RegisterTracking
  1663. }
  1664. if RemoveSuperfluousMove(p, hpfar1, 'MovMov2Mov') then
  1665. Result:=true;
  1666. Exit;
  1667. end;
  1668. end;
  1669. function TCpuAsmOptimizer.OptPass1MOVW(var p: tai): Boolean;
  1670. var
  1671. ThisReg: TRegister;
  1672. a: aint;
  1673. imm_shift: byte;
  1674. hp1, hp2: tai;
  1675. begin
  1676. Result := False;
  1677. ThisReg := taicpu(p).oper[0]^.reg;
  1678. if GetNextInstruction(p, hp1) then
  1679. begin
  1680. { Can the MOVW/MOVT pair be represented by a single MOV instruction? }
  1681. if MatchInstruction(hp1, A_MOVT, [taicpu(p).condition], []) and
  1682. (taicpu(hp1).oper[0]^.reg = ThisReg) then
  1683. begin
  1684. a := (aint(taicpu(p).oper[1]^.val) and $FFFF) or aint(taicpu(hp1).oper[1]^.val shl 16);
  1685. if is_shifter_const(a,imm_shift) then
  1686. begin
  1687. DebugMsg(SPeepholeOptimization + 'MOVW/MOVT pair can encode value as a single MOV instruction (MovwMovT2Mov)', p);
  1688. taicpu(p).opcode := A_MOV;
  1689. taicpu(p).oper[1]^.val := a;
  1690. RemoveInstruction(hp1);
  1691. Result := True;
  1692. Exit;
  1693. end
  1694. else if is_shifter_const(not(a),imm_shift) then
  1695. begin
  1696. DebugMsg(SPeepholeOptimization + 'MOVW/MOVT pair can encode value as a single MVN instruction (MovwMovT2Mvn)', p);
  1697. taicpu(p).opcode := A_MVN;
  1698. taicpu(p).oper[1]^.val := not(a);
  1699. RemoveInstruction(hp1);
  1700. Result := True;
  1701. Exit;
  1702. end;
  1703. end;
  1704. if (
  1705. (
  1706. MatchInstruction(hp1, A_STR, [taicpu(p).condition], [PF_H]) and
  1707. (taicpu(hp1).oper[0]^.reg = ThisReg)
  1708. )
  1709. ) and
  1710. TryConstMerge(p, hp1) then
  1711. begin
  1712. Result := True;
  1713. Exit;
  1714. end;
  1715. end;
  1716. end;
  1717. function TCpuAsmOptimizer.OptPass1MVN(var p: tai): Boolean;
  1718. var
  1719. hp1: tai;
  1720. begin
  1721. {
  1722. change
  1723. mvn reg2,reg1
  1724. and reg3,reg4,reg2
  1725. dealloc reg2
  1726. to
  1727. bic reg3,reg4,reg1
  1728. }
  1729. Result := False;
  1730. if (taicpu(p).oper[1]^.typ = top_reg) and
  1731. GetNextInstructionUsingReg(p,hp1,taicpu(p).oper[0]^.reg) and
  1732. MatchInstruction(hp1,A_AND,[],[]) and
  1733. (((taicpu(hp1).ops=3) and
  1734. (taicpu(hp1).oper[2]^.typ=top_reg) and
  1735. (MatchOperand(taicpu(hp1).oper[2]^, taicpu(p).oper[0]^.reg) or
  1736. MatchOperand(taicpu(hp1).oper[1]^, taicpu(p).oper[0]^.reg))) or
  1737. ((taicpu(hp1).ops=2) and
  1738. (taicpu(hp1).oper[1]^.typ=top_reg) and
  1739. MatchOperand(taicpu(hp1).oper[1]^, taicpu(p).oper[0]^.reg))) and
  1740. assigned(FindRegDealloc(taicpu(p).oper[0]^.reg,tai(hp1.Next))) and
  1741. { reg1 might not be modified inbetween }
  1742. not(RegModifiedBetween(taicpu(p).oper[1]^.reg,p,hp1)) then
  1743. begin
  1744. DebugMsg(SPeepholeOptimization + 'MvnAnd2Bic done', p);
  1745. taicpu(hp1).opcode:=A_BIC;
  1746. if taicpu(hp1).ops=3 then
  1747. begin
  1748. if MatchOperand(taicpu(hp1).oper[1]^, taicpu(p).oper[0]^.reg) then
  1749. taicpu(hp1).loadReg(1,taicpu(hp1).oper[2]^.reg); // Swap operands
  1750. taicpu(hp1).loadReg(2,taicpu(p).oper[1]^.reg);
  1751. end
  1752. else
  1753. taicpu(hp1).loadReg(1,taicpu(p).oper[1]^.reg);
  1754. RemoveCurrentp(p);
  1755. Result := True;
  1756. Exit;
  1757. end;
  1758. end;
  1759. function TCpuAsmOptimizer.OptPass1VMov(var p: tai): Boolean;
  1760. var
  1761. hp1: tai;
  1762. begin
  1763. {
  1764. change
  1765. vmov reg0,reg1,reg2
  1766. vmov reg1,reg2,reg0
  1767. into
  1768. vmov reg0,reg1,reg2
  1769. can be applied regardless if reg0 or reg2 is the vfp register
  1770. }
  1771. Result := False;
  1772. if (taicpu(p).ops = 3) then
  1773. while GetNextInstruction(p, hp1) and
  1774. MatchInstruction(hp1, A_VMOV, [taicpu(p).condition], [taicpu(p).oppostfix]) and
  1775. (taicpu(hp1).ops = 3) and
  1776. MatchOperand(taicpu(p).oper[0]^, taicpu(hp1).oper[2]^) and
  1777. MatchOperand(taicpu(p).oper[1]^, taicpu(hp1).oper[0]^) and
  1778. MatchOperand(taicpu(p).oper[2]^, taicpu(hp1).oper[1]^) do
  1779. begin
  1780. asml.Remove(hp1);
  1781. hp1.free;
  1782. DebugMsg(SPeepholeOptimization + 'VMovVMov2VMov done', p);
  1783. { Can we do it again? }
  1784. end;
  1785. end;
  1786. function TCpuAsmOptimizer.OptPass1VOp(var p: tai): Boolean;
  1787. var
  1788. hp1: tai;
  1789. begin
  1790. Result := GetNextInstructionUsingReg(p, hp1, taicpu(p).oper[0]^.reg) and
  1791. RemoveSuperfluousVMov(p, hp1, 'VOpVMov2VOp');
  1792. end;
  1793. function TCpuAsmOptimizer.OptPass1Push(var p: tai): Boolean;
  1794. var
  1795. hp1: tai;
  1796. begin
  1797. Result:=false;
  1798. if (taicpu(p).oper[0]^.regset^=[RS_R14]) and
  1799. GetNextInstruction(p,hp1) and
  1800. MatchInstruction(hp1,A_POP,[C_None],[PF_None]) and
  1801. (taicpu(hp1).oper[0]^.regset^=[RS_R15]) then
  1802. begin
  1803. if not(CPUARM_HAS_BX in cpu_capabilities[current_settings.cputype]) then
  1804. begin
  1805. DebugMsg('Peephole Optimization: PushPop2Mov done', p);
  1806. taicpu(p).ops:=2;
  1807. taicpu(p).loadreg(1, NR_R14);
  1808. taicpu(p).loadreg(0, NR_R15);
  1809. taicpu(p).opcode:=A_MOV;
  1810. end
  1811. else
  1812. begin
  1813. DebugMsg('Peephole Optimization: PushPop2Bx done', p);
  1814. taicpu(p).loadreg(0, NR_R14);
  1815. taicpu(p).opcode:=A_BX;
  1816. end;
  1817. RemoveInstruction(hp1);
  1818. Result:=true;
  1819. Exit;
  1820. end;
  1821. end;
  1822. function TCpuAsmOptimizer.OptPass2Bcc(var p: tai): Boolean;
  1823. var
  1824. hp1,hp2,hp3,after_p: tai;
  1825. l : longint;
  1826. WasLast: boolean;
  1827. Label_X, Label_Y: TASmLabel;
  1828. procedure ConvertInstructins(this_hp: tai; newcond: tasmcond);
  1829. var
  1830. next_hp: tai;
  1831. begin
  1832. repeat
  1833. if this_hp.typ=ait_instruction then
  1834. taicpu(this_hp).condition := newcond;
  1835. GetNextInstruction(this_hp, next_hp);
  1836. if MustBeLast(this_hp) then
  1837. Break;
  1838. this_hp := next_hp
  1839. until not(assigned(this_hp)) or
  1840. not(CanBeCond(this_hp)) or
  1841. ((hp1.typ = ait_instruction) and (taicpu(hp1).opcode = A_B)) or
  1842. (this_hp.typ = ait_label);
  1843. end;
  1844. begin
  1845. Result := False;
  1846. if (taicpu(p).condition<>C_None) and
  1847. not(GenerateThumbCode) then
  1848. begin
  1849. { check for
  1850. Bxx xxx
  1851. <several instructions>
  1852. xxx:
  1853. }
  1854. Label_X := TAsmLabel(taicpu(p).oper[0]^.ref^.symbol);
  1855. l:=0;
  1856. WasLast:=False;
  1857. GetNextInstruction(p, hp1);
  1858. after_p := hp1;
  1859. while assigned(hp1) and
  1860. (l<=4) and
  1861. CanBeCond(hp1) and
  1862. { stop on labels }
  1863. not(hp1.typ=ait_label) and
  1864. { avoid that we cannot recognize the case BccB2Cond }
  1865. not((hp1.typ=ait_instruction) and (taicpu(hp1).opcode=A_B)) do
  1866. begin
  1867. inc(l);
  1868. if MustBeLast(hp1) then
  1869. begin
  1870. WasLast:=True;
  1871. GetNextInstruction(hp1,hp1);
  1872. break;
  1873. end
  1874. else
  1875. GetNextInstruction(hp1,hp1);
  1876. end;
  1877. if assigned(hp1) then
  1878. begin
  1879. if FindLabel(Label_X, hp1) then
  1880. begin
  1881. if (l<=4) and (l>0) then
  1882. begin
  1883. ConvertInstructins(after_p, inverse_cond(taicpu(p).condition));
  1884. DebugMsg(SPeepholeOptimization + 'Bcc2Cond done', p);
  1885. { wait with removing else GetNextInstruction could
  1886. ignore the label if it was the only usage in the
  1887. jump moved away }
  1888. Label_X.decrefs;
  1889. RemoveCurrentP(p, after_p);
  1890. Result := True;
  1891. Exit;
  1892. end;
  1893. end
  1894. else
  1895. { do not perform further optimizations if there is an instruction
  1896. in block #1 which cannot be optimized.
  1897. }
  1898. if not WasLast then
  1899. begin
  1900. { check further for
  1901. Bcc xxx
  1902. <several instructions 1>
  1903. B yyy
  1904. xxx:
  1905. <several instructions 2>
  1906. yyy:
  1907. }
  1908. { hp2 points to jmp yyy }
  1909. hp2:=hp1;
  1910. { skip hp2 to xxx }
  1911. if assigned(hp2) and
  1912. (l<=3) and
  1913. (hp2.typ=ait_instruction) and
  1914. (taicpu(hp2).is_jmp) and
  1915. (taicpu(hp2).condition=C_None) and
  1916. GetNextInstruction(hp2, hp1) and
  1917. { real label and jump, no further references to the
  1918. label are allowed }
  1919. (Label_X.getrefs = 1) and
  1920. FindLabel(Label_X, hp1) then
  1921. begin
  1922. Label_Y := TAsmLabel(taicpu(hp2).oper[0]^.ref^.symbol);
  1923. l:=0;
  1924. { skip hp1 and hp3 to <several moves 2> }
  1925. GetNextInstruction(hp1, hp1);
  1926. hp3 := hp1;
  1927. while assigned(hp1) and
  1928. CanBeCond(hp1) and
  1929. (l<=3) do
  1930. begin
  1931. inc(l);
  1932. if MustBeLast(hp1) then
  1933. begin
  1934. GetNextInstruction(hp1, hp1);
  1935. break;
  1936. end
  1937. else
  1938. GetNextInstruction(hp1, hp1);
  1939. end;
  1940. { hp1 points to yyy: }
  1941. if assigned(hp1) and
  1942. FindLabel(Label_Y, hp1) then
  1943. begin
  1944. ConvertInstructins(after_p, inverse_cond(taicpu(p).condition));
  1945. ConvertInstructins(hp3, taicpu(p).condition);
  1946. DebugMsg(SPeepholeOptimization + 'BccB2Cond done', after_p);
  1947. { remove B }
  1948. Label_Y.decrefs;
  1949. RemoveInstruction(hp2);
  1950. { remove Bcc }
  1951. Label_X.decrefs;
  1952. RemoveCurrentP(p, after_p);
  1953. Result := True;
  1954. Exit;
  1955. end;
  1956. end;
  1957. end;
  1958. end;
  1959. end;
  1960. end;
  1961. function TCpuAsmOptimizer.OptPass2CMP(var p: tai): Boolean;
  1962. var
  1963. hp1, hp_last: tai;
  1964. begin
  1965. Result := False;
  1966. if not GetNextInstructionUsingReg(p, hp1, NR_DEFAULTFLAGS) then
  1967. Exit;
  1968. if (hp1.typ = ait_label) or
  1969. (
  1970. (hp1.typ = ait_instruction) and
  1971. (taicpu(hp1).condition = C_None) and
  1972. (
  1973. RegModifiedByInstruction(NR_DEFAULTFLAGS, hp1) or
  1974. is_calljmp(taicpu(hp1).opcode)
  1975. )
  1976. ) then
  1977. begin
  1978. { The comparison is a null operation }
  1979. DebugMsg(SPeepholeOptimization + 'CMP -> nop', p);
  1980. RemoveCurrentP(p);
  1981. Result := True;
  1982. Exit;
  1983. end;
  1984. {
  1985. change
  1986. <op> reg,x,y
  1987. cmp reg,#0
  1988. into
  1989. <op>s reg,x,y
  1990. }
  1991. if (taicpu(p).oppostfix = PF_None) and
  1992. (taicpu(p).oper[1]^.val = 0) and
  1993. { be careful here, following instructions could use other flags
  1994. however after a jump fpc never depends on the value of flags }
  1995. { All above instructions set Z and N according to the following
  1996. Z := result = 0;
  1997. N := result[31];
  1998. EQ = Z=1; NE = Z=0;
  1999. MI = N=1; PL = N=0; }
  2000. (MatchInstruction(hp1, [A_B, A_CMP, A_CMN, A_TST, A_TEQ], [C_EQ,C_NE,C_MI,C_PL], []) or
  2001. { mov is also possible, but only if there is no shifter operand, it could be an rxx,
  2002. we are too lazy to check if it is rxx or something else }
  2003. (MatchInstruction(hp1, A_MOV, [C_EQ,C_NE,C_MI,C_PL], []) and (taicpu(hp1).ops=2))) and
  2004. GetLastInstruction(p, hp_last) and
  2005. MatchInstruction(hp_last, [A_ADC,A_ADD,A_BIC,A_SUB,A_MUL,A_MVN,A_MOV,A_ORR,
  2006. A_EOR,A_AND,A_RSB,A_RSC,A_SBC,A_MLA], [C_None], [PF_None]) and
  2007. (
  2008. { mlas is only allowed in arm mode }
  2009. (taicpu(hp_last).opcode<>A_MLA) or
  2010. (current_settings.instructionset<>is_thumb)
  2011. ) and
  2012. (taicpu(hp_last).oper[0]^.reg = taicpu(p).oper[0]^.reg) and
  2013. assigned(FindRegDealloc(NR_DEFAULTFLAGS,tai(hp1.Next))) then
  2014. begin
  2015. DebugMsg(SPeepholeOptimization + 'OpCmp2OpS done', hp_last);
  2016. taicpu(hp_last).oppostfix:=PF_S;
  2017. { move flag allocation if possible }
  2018. hp1:=FindRegAlloc(NR_DEFAULTFLAGS,tai(hp_last.Next));
  2019. if assigned(hp1) then
  2020. begin
  2021. asml.Remove(hp1);
  2022. asml.insertbefore(hp1, hp_last);
  2023. end;
  2024. RemoveCurrentP(p);
  2025. Result:=true;
  2026. end;
  2027. end;
  2028. function TCpuAsmOptimizer.OptPass2STR(var p: tai): Boolean;
  2029. var
  2030. hp1: tai;
  2031. Postfix: TOpPostfix;
  2032. OpcodeStr: shortstring;
  2033. begin
  2034. Result := False;
  2035. { Try to merge two STRs into an STM instruction }
  2036. if not(GenerateThumbCode) and (taicpu(p).oper[1]^.typ = top_ref) and
  2037. (taicpu(p).oper[1]^.ref^.addressmode = AM_OFFSET) and
  2038. (
  2039. (taicpu(p).oper[1]^.ref^.base = NR_NO) or
  2040. (taicpu(p).oper[1]^.ref^.index = NR_NO)
  2041. ) and
  2042. (taicpu(p).oppostfix = PF_None) and
  2043. (getregtype(taicpu(p).oper[0]^.reg) = R_INTREGISTER) then
  2044. begin
  2045. hp1 := p;
  2046. while GetNextInstruction(hp1, hp1) and (hp1.typ = ait_instruction) and
  2047. (taicpu(hp1).opcode = A_STR) do
  2048. if (taicpu(hp1).condition = taicpu(p).condition) and
  2049. (taicpu(hp1).oppostfix = PF_None) and
  2050. (getregtype(taicpu(hp1).oper[0]^.reg) = R_INTREGISTER) and
  2051. (taicpu(hp1).oper[1]^.ref^.addressmode = AM_OFFSET) and
  2052. (taicpu(hp1).oper[1]^.ref^.base = taicpu(p).oper[1]^.ref^.base) and
  2053. (taicpu(hp1).oper[1]^.ref^.index = taicpu(p).oper[1]^.ref^.index) and
  2054. (
  2055. (
  2056. (taicpu(p).oper[1]^.ref^.offset = 0) and
  2057. (getsupreg(taicpu(hp1).oper[0]^.reg) > getsupreg(taicpu(p).oper[0]^.reg)) and
  2058. (abs(taicpu(hp1).oper[1]^.ref^.offset) = 4)
  2059. ) or (
  2060. (taicpu(hp1).oper[1]^.ref^.offset = 0) and
  2061. (getsupreg(taicpu(hp1).oper[0]^.reg) < getsupreg(taicpu(p).oper[0]^.reg)) and
  2062. (abs(taicpu(p).oper[1]^.ref^.offset) = 4)
  2063. )
  2064. ) then
  2065. begin
  2066. if (getsupreg(taicpu(hp1).oper[0]^.reg) < getsupreg(taicpu(p).oper[0]^.reg)) xor
  2067. (taicpu(hp1).oper[1]^.ref^.offset < taicpu(p).oper[1]^.ref^.offset) then
  2068. begin
  2069. Postfix := PF_DA;
  2070. OpcodeStr := 'DA';
  2071. end
  2072. else
  2073. begin
  2074. Postfix := PF_None;
  2075. OpcodeStr := '';
  2076. end;
  2077. taicpu(hp1).oper[1]^.ref^.offset := 0;
  2078. if taicpu(hp1).oper[1]^.ref^.index = NR_NO then
  2079. begin
  2080. taicpu(hp1).oper[1]^.ref^.index := taicpu(hp1).oper[1]^.ref^.base;
  2081. taicpu(hp1).oper[1]^.ref^.base := NR_NO;
  2082. end;
  2083. taicpu(p).opcode := A_STM;
  2084. taicpu(p).loadregset(1, R_INTREGISTER, R_SUBWHOLE, [getsupreg(taicpu(p).oper[0]^.reg), getsupreg(taicpu(hp1).oper[0]^.reg)]);
  2085. taicpu(p).loadref(0, taicpu(hp1).oper[1]^.ref^);
  2086. taicpu(p).oppostfix := Postfix;
  2087. RemoveInstruction(hp1);
  2088. DebugMsg(SPeepholeOptimization + 'Merging stores: STR/STR -> STM' + OpcodeStr, p);
  2089. Result := True;
  2090. Exit;
  2091. end;
  2092. end;
  2093. end;
  2094. function TCpuAsmOptimizer.OptPass2STM(var p: tai): Boolean;
  2095. var
  2096. hp1: tai;
  2097. CorrectOffset:ASizeInt;
  2098. i, LastReg: TSuperRegister;
  2099. Postfix: TOpPostfix;
  2100. OpcodeStr: shortstring;
  2101. basereg : tregister;
  2102. begin
  2103. Result := False;
  2104. { See if STM/STR can be merged into a single STM }
  2105. { taicpu(p).opcode is A_STM, so first operand is a memory reference }
  2106. if (taicpu(p).oper[0]^.ref^.addressmode = AM_OFFSET) then
  2107. begin
  2108. { Only try to handle simple base reg, without index }
  2109. if (taicpu(p).oper[0]^.ref^.index = NR_NO) then
  2110. basereg:=taicpu(p).oper[0]^.ref^.base
  2111. else if (taicpu(p).oper[0]^.ref^.base = NR_NO) and
  2112. (taicpu(p).oper[0]^.ref^.shiftmode = SM_NONE) then
  2113. basereg:=taicpu(p).oper[0]^.ref^.index
  2114. else
  2115. exit;
  2116. CorrectOffset := 0;
  2117. LastReg := RS_NO;
  2118. for i in taicpu(p).oper[1]^.regset^ do
  2119. begin
  2120. LastReg := i;
  2121. Inc(CorrectOffset, 4);
  2122. end;
  2123. { This while loop effectively doea a Selection Sort on any STR
  2124. instructions that follow }
  2125. hp1 := p;
  2126. while (LastReg < maxcpuregister) and
  2127. GetNextInstruction(hp1, hp1) and (hp1.typ = ait_instruction) and
  2128. (taicpu(hp1).opcode = A_STR) and
  2129. (taicpu(hp1).oper[1]^.typ = top_ref) do
  2130. if (taicpu(hp1).condition = taicpu(p).condition) and
  2131. (taicpu(hp1).oppostfix = PF_None) and
  2132. (getregtype(taicpu(hp1).oper[0]^.reg) = R_INTREGISTER) and
  2133. (taicpu(hp1).oper[1]^.ref^.addressmode = AM_OFFSET) and
  2134. (taicpu(hp1).oper[1]^.ref^.shiftmode = SM_NONE) and
  2135. (
  2136. (
  2137. (taicpu(hp1).oper[1]^.ref^.base = NR_NO) and
  2138. (taicpu(hp1).oper[1]^.ref^.index = basereg)
  2139. ) or (
  2140. (taicpu(hp1).oper[1]^.ref^.index = NR_NO) and
  2141. (taicpu(hp1).oper[1]^.ref^.base = basereg)
  2142. )
  2143. ) and
  2144. { Next register must be later in the set }
  2145. (getsupreg(taicpu(hp1).oper[0]^.reg) > LastReg) and
  2146. (
  2147. (
  2148. (taicpu(p).oppostfix = PF_None) and
  2149. (taicpu(hp1).oper[1]^.ref^.offset = CorrectOffset)
  2150. ) or (
  2151. (taicpu(p).oppostfix = PF_DA) and
  2152. (taicpu(hp1).oper[1]^.ref^.offset = -CorrectOffset)
  2153. )
  2154. ) then
  2155. begin
  2156. { Increment the reference values ready for the next STR instruction to find }
  2157. LastReg := getsupreg(taicpu(hp1).oper[0]^.reg);
  2158. Inc(CorrectOffset, 4);
  2159. if (taicpu(p).oppostfix = PF_DA) then
  2160. OpcodeStr := 'DA'
  2161. else
  2162. OpcodeStr := '';
  2163. Include(taicpu(p).oper[1]^.regset^, LastReg);
  2164. DebugMsg(SPeepholeOptimization + 'Merging stores: STM' + OpcodeStr + '/STR -> STM' + OpcodeStr, hp1);
  2165. RemoveInstruction(hp1);
  2166. Result := True;
  2167. { See if we can find another one to merge }
  2168. hp1 := p;
  2169. Continue;
  2170. end;
  2171. end;
  2172. end;
  2173. function TCpuAsmOptimizer.PrePeepHoleOptsCpu(var p: tai): Boolean;
  2174. begin
  2175. result := false;
  2176. if p.typ=ait_instruction then
  2177. begin
  2178. case taicpu(p).opcode of
  2179. A_SBFX,
  2180. A_UBFX:
  2181. Result:=OptPreSBFXUBFX(p);
  2182. else
  2183. ;
  2184. end;
  2185. end;
  2186. end;
  2187. function TCpuAsmOptimizer.PeepHoleOptPass1Cpu(var p: tai): boolean;
  2188. begin
  2189. result := false;
  2190. if p.typ = ait_instruction then
  2191. begin
  2192. case taicpu(p).opcode of
  2193. A_CMP:
  2194. Result := OptPass1CMP(p);
  2195. A_STR:
  2196. Result := OptPass1STR(p);
  2197. A_LDR:
  2198. Result := OptPass1LDR(p);
  2199. A_MOV:
  2200. Result := OptPass1MOV(p);
  2201. A_MOVW:
  2202. Result := OptPass1MOVW(p);
  2203. A_AND:
  2204. Result := OptPass1And(p);
  2205. A_ADD,
  2206. A_SUB:
  2207. Result := OptPass1ADDSUB(p);
  2208. A_MUL:
  2209. REsult := OptPass1MUL(p);
  2210. A_ADC,
  2211. A_RSB,
  2212. A_RSC,
  2213. A_SBC,
  2214. A_BIC,
  2215. A_EOR,
  2216. A_ORR,
  2217. A_MLA,
  2218. A_MLS,
  2219. A_QADD,A_QADD16,A_QADD8,
  2220. A_QSUB,A_QSUB16,A_QSUB8,
  2221. A_QDADD,A_QDSUB,A_QASX,A_QSAX,
  2222. A_SHADD16,A_SHADD8,A_UHADD16,A_UHADD8,
  2223. A_SHSUB16,A_SHSUB8,A_UHSUB16,A_UHSUB8,
  2224. A_PKHTB,A_PKHBT,
  2225. A_SMUAD,A_SMUSD:
  2226. Result := OptPass1DataCheckMov(p);
  2227. {$ifdef dummy}
  2228. A_MVN:
  2229. Result := OPtPass1MVN(p);
  2230. {$endif dummy}
  2231. A_UXTB:
  2232. Result := OptPass1UXTB(p);
  2233. A_UXTH:
  2234. Result := OptPass1UXTH(p);
  2235. A_SXTB:
  2236. Result := OptPass1SXTB(p);
  2237. A_SXTH:
  2238. Result := OptPass1SXTH(p);
  2239. A_STM:
  2240. Result := OptPass1STM(p);
  2241. A_VMOV:
  2242. Result := OptPass1VMov(p);
  2243. A_VLDR,
  2244. A_VADD,
  2245. A_VMUL,
  2246. A_VDIV,
  2247. A_VSUB,
  2248. A_VSQRT,
  2249. A_VNEG,
  2250. A_VCVT,
  2251. A_VABS:
  2252. Result := OptPass1VOp(p);
  2253. A_PUSH:
  2254. Result := OptPass1Push(p);
  2255. else
  2256. ;
  2257. end;
  2258. end;
  2259. end;
  2260. function TCpuAsmOptimizer.PeepHoleOptPass2Cpu(var p: tai): boolean;
  2261. begin
  2262. result := False;
  2263. if p.typ = ait_instruction then
  2264. begin
  2265. case taicpu(p).opcode of
  2266. A_AND,
  2267. A_ORR,
  2268. A_EOR,
  2269. A_BIC,
  2270. A_ORN:
  2271. Result := OptPass2Bitwise(p);
  2272. A_CMP:
  2273. Result := OptPass2CMP(p);
  2274. A_B:
  2275. Result := OptPass2Bcc(p);
  2276. A_STM:
  2277. Result := OptPass2STM(p);
  2278. A_STR:
  2279. Result := OptPass2STR(p);
  2280. A_TST:
  2281. Result := OptPass2TST(p);
  2282. else
  2283. ;
  2284. end;
  2285. end;
  2286. end;
  2287. { instructions modifying the CPSR can be only the last instruction }
  2288. function MustBeLast(p : tai) : boolean;
  2289. begin
  2290. Result:=(p.typ=ait_instruction) and
  2291. ((taicpu(p).opcode in [A_BL,A_BLX,A_CMP,A_CMN,A_SWI,A_TEQ,A_TST,A_CMF,A_CMFE {,A_MSR}]) or
  2292. ((taicpu(p).ops>=1) and (taicpu(p).oper[0]^.typ=top_reg) and (taicpu(p).oper[0]^.reg=NR_PC)) or
  2293. (taicpu(p).oppostfix=PF_S));
  2294. end;
  2295. function TCpuAsmOptimizer.RegInInstruction(Reg: TRegister; p1: tai): Boolean;
  2296. begin
  2297. If (p1.typ = ait_instruction) and (taicpu(p1).opcode=A_BL) then
  2298. Result:=true
  2299. else If MatchInstruction(p1, [A_LDR, A_STR], [], [PF_D]) and
  2300. (getsupreg(taicpu(p1).oper[0]^.reg)+1=getsupreg(reg)) then
  2301. Result:=true
  2302. else
  2303. begin
  2304. if SuperRegistersEqual(Reg, NR_DEFAULTFLAGS) and (p1.typ = ait_instruction) then
  2305. begin
  2306. { Conditional instruction reads CPSR register }
  2307. if (taicpu(p1).condition <> C_None) then
  2308. Exit(True);
  2309. { Comparison instructions (and procedural jump) }
  2310. if (taicpu(p1).opcode in [A_BL, A_CMP, A_CMN, A_TST, A_TEQ]) then
  2311. Exit(True);
  2312. { Instruction sets CPSR register due to S suffix (floating-point
  2313. instructios won't raise false positives) }
  2314. if (taicpu(p1).oppostfix = PF_S) then
  2315. Exit(True)
  2316. end;
  2317. Result:=inherited RegInInstruction(Reg, p1);
  2318. end;
  2319. end;
  2320. const
  2321. { set of opcode which might or do write to memory }
  2322. { TODO : extend armins.dat to contain r/w info }
  2323. opcode_could_mem_write = [A_B,A_BL,A_BLX,A_BKPT,A_BX,A_STR,A_STRB,A_STRBT,
  2324. A_STRH,A_STRT,A_STF,A_SFM,A_STM,A_FSTS,A_FSTD,A_VSTR,A_VSTM];
  2325. { adjust the register live information when swapping the two instructions p and hp1,
  2326. they must follow one after the other }
  2327. procedure TCpuPreRegallocScheduler.SwapRegLive(p,hp1 : taicpu);
  2328. procedure CheckLiveEnd(reg : tregister);
  2329. var
  2330. supreg : TSuperRegister;
  2331. regtype : TRegisterType;
  2332. begin
  2333. if reg=NR_NO then
  2334. exit;
  2335. regtype:=getregtype(reg);
  2336. supreg:=getsupreg(reg);
  2337. if assigned(cg.rg[regtype]) and (cg.rg[regtype].live_end[supreg]=hp1) and
  2338. RegInInstruction(reg,p) then
  2339. cg.rg[regtype].live_end[supreg]:=p;
  2340. end;
  2341. procedure CheckLiveStart(reg : TRegister);
  2342. var
  2343. supreg : TSuperRegister;
  2344. regtype : TRegisterType;
  2345. begin
  2346. if reg=NR_NO then
  2347. exit;
  2348. regtype:=getregtype(reg);
  2349. supreg:=getsupreg(reg);
  2350. if assigned(cg.rg[regtype]) and (cg.rg[regtype].live_start[supreg]=p) and
  2351. RegInInstruction(reg,hp1) then
  2352. cg.rg[regtype].live_start[supreg]:=hp1;
  2353. end;
  2354. var
  2355. i : longint;
  2356. r : TSuperRegister;
  2357. begin
  2358. { assumption: p is directly followed by hp1 }
  2359. { if live of any reg used by p starts at p and hp1 uses this register then
  2360. set live start to hp1 }
  2361. for i:=0 to p.ops-1 do
  2362. case p.oper[i]^.typ of
  2363. Top_Reg:
  2364. CheckLiveStart(p.oper[i]^.reg);
  2365. Top_Ref:
  2366. begin
  2367. CheckLiveStart(p.oper[i]^.ref^.base);
  2368. CheckLiveStart(p.oper[i]^.ref^.index);
  2369. end;
  2370. Top_Shifterop:
  2371. CheckLiveStart(p.oper[i]^.shifterop^.rs);
  2372. Top_RegSet:
  2373. for r:=RS_R0 to RS_R15 do
  2374. if r in p.oper[i]^.regset^ then
  2375. CheckLiveStart(newreg(R_INTREGISTER,r,R_SUBWHOLE));
  2376. else
  2377. ;
  2378. end;
  2379. { if live of any reg used by hp1 ends at hp1 and p uses this register then
  2380. set live end to p }
  2381. for i:=0 to hp1.ops-1 do
  2382. case hp1.oper[i]^.typ of
  2383. Top_Reg:
  2384. CheckLiveEnd(hp1.oper[i]^.reg);
  2385. Top_Ref:
  2386. begin
  2387. CheckLiveEnd(hp1.oper[i]^.ref^.base);
  2388. CheckLiveEnd(hp1.oper[i]^.ref^.index);
  2389. end;
  2390. Top_Shifterop:
  2391. CheckLiveStart(hp1.oper[i]^.shifterop^.rs);
  2392. Top_RegSet:
  2393. for r:=RS_R0 to RS_R15 do
  2394. if r in hp1.oper[i]^.regset^ then
  2395. CheckLiveEnd(newreg(R_INTREGISTER,r,R_SUBWHOLE));
  2396. else
  2397. ;
  2398. end;
  2399. end;
  2400. function TCpuPreRegallocScheduler.SchedulerPass1Cpu(var p: tai): boolean;
  2401. { TODO : schedule also forward }
  2402. { TODO : schedule distance > 1 }
  2403. { returns true if p might be a load of a pc relative tls offset }
  2404. function PossibleTLSLoad(const p: tai) : boolean;
  2405. begin
  2406. Result:=(p.typ=ait_instruction) and (taicpu(p).opcode=A_LDR) and (taicpu(p).oper[1]^.typ=top_ref) and (((taicpu(p).oper[1]^.ref^.base=NR_PC) and
  2407. (taicpu(p).oper[1]^.ref^.index<>NR_NO)) or ((taicpu(p).oper[1]^.ref^.base<>NR_NO) and
  2408. (taicpu(p).oper[1]^.ref^.index=NR_PC)));
  2409. end;
  2410. var
  2411. hp1,hp2,hp3,hp4,hp5,insertpos : tai;
  2412. list : TAsmList;
  2413. begin
  2414. result:=true;
  2415. list:=TAsmList.create;
  2416. p:=BlockStart;
  2417. while p<>BlockEnd Do
  2418. begin
  2419. if (p.typ=ait_instruction) and
  2420. GetNextInstruction(p,hp1) and
  2421. (hp1.typ=ait_instruction) and
  2422. (taicpu(hp1).opcode in [A_LDR,A_LDRB,A_LDRH,A_LDRSB,A_LDRSH]) and
  2423. (taicpu(hp1).oppostfix in [PF_NONE, PF_B, PF_H, PF_SB, PF_SH]) and
  2424. { for now we don't reschedule if the previous instruction changes potentially a memory location }
  2425. ( (not(taicpu(p).opcode in opcode_could_mem_write) and
  2426. not(RegModifiedByInstruction(NR_PC,p))
  2427. ) or
  2428. ((taicpu(p).opcode in [A_STM,A_STRB,A_STRH,A_STR]) and
  2429. ((taicpu(hp1).oper[1]^.ref^.base=NR_PC) or
  2430. (assigned(taicpu(hp1).oper[1]^.ref^.symboldata) and
  2431. (taicpu(hp1).oper[1]^.ref^.offset=0)
  2432. )
  2433. ) or
  2434. { try to prove that the memory accesses don't overlapp }
  2435. ((taicpu(p).opcode in [A_STRB,A_STRH,A_STR]) and
  2436. (taicpu(p).oper[1]^.typ = top_ref) and
  2437. (taicpu(p).oper[1]^.ref^.base=taicpu(hp1).oper[1]^.ref^.base) and
  2438. (taicpu(p).oppostfix=PF_None) and
  2439. (taicpu(hp1).oppostfix=PF_None) and
  2440. (taicpu(p).oper[1]^.ref^.index=NR_NO) and
  2441. (taicpu(hp1).oper[1]^.ref^.index=NR_NO) and
  2442. { get operand sizes and check if the offset distance is large enough to ensure no overlapp }
  2443. (abs(taicpu(p).oper[1]^.ref^.offset-taicpu(hp1).oper[1]^.ref^.offset)>=max(tcgsize2size[reg_cgsize(taicpu(p).oper[0]^.reg)],tcgsize2size[reg_cgsize(taicpu(hp1).oper[0]^.reg)]))
  2444. )
  2445. )
  2446. ) and
  2447. GetNextInstruction(hp1,hp2) and
  2448. (hp2.typ=ait_instruction) and
  2449. { loaded register used by next instruction?
  2450. if we ever support labels (they could be skipped in theory) here, the gnu2 tls general-dynamic code could get broken (the ldr before
  2451. the bl may not be scheduled away from the bl) and it needs to be taken care of this case
  2452. }
  2453. (RegInInstruction(taicpu(hp1).oper[0]^.reg,hp2)) and
  2454. { loaded register not used by previous instruction? }
  2455. not(RegInInstruction(taicpu(hp1).oper[0]^.reg,p)) and
  2456. { same condition? }
  2457. (taicpu(p).condition=taicpu(hp1).condition) and
  2458. { first instruction might not change the register used as base }
  2459. ((taicpu(hp1).oper[1]^.ref^.base=NR_NO) or
  2460. not(RegModifiedByInstruction(taicpu(hp1).oper[1]^.ref^.base,p))
  2461. ) and
  2462. { first instruction might not change the register used as index }
  2463. ((taicpu(hp1).oper[1]^.ref^.index=NR_NO) or
  2464. not(RegModifiedByInstruction(taicpu(hp1).oper[1]^.ref^.index,p))
  2465. ) and
  2466. { if we modify the basereg AND the first instruction used that reg, we can not schedule }
  2467. ((taicpu(hp1).oper[1]^.ref^.addressmode = AM_OFFSET) or
  2468. not(instructionLoadsFromReg(taicpu(hp1).oper[1]^.ref^.base,p))) and
  2469. not(PossibleTLSLoad(p)) and
  2470. not(PossibleTLSLoad(hp1)) then
  2471. begin
  2472. hp3:=tai(p.Previous);
  2473. hp5:=tai(p.next);
  2474. asml.Remove(p);
  2475. { if there is a reg. alloc/dealloc/sync instructions or address labels (e.g. for GOT-less PIC)
  2476. associated with p, move it together with p }
  2477. { before the instruction? }
  2478. { find reg allocs,deallocs and PIC labels }
  2479. while assigned(hp3) and (hp3.typ<>ait_instruction) do
  2480. begin
  2481. if ( (hp3.typ=ait_regalloc) and (tai_regalloc(hp3).ratype in [ra_alloc, ra_dealloc]) and
  2482. RegInInstruction(tai_regalloc(hp3).reg,p) )
  2483. or ( (hp3.typ=ait_label) and (tai_label(hp3).labsym.typ=AT_ADDR) )
  2484. then
  2485. begin
  2486. hp4:=hp3;
  2487. hp3:=tai(hp3.Previous);
  2488. asml.Remove(hp4);
  2489. list.Insert(hp4);
  2490. end
  2491. else
  2492. hp3:=tai(hp3.Previous);
  2493. end;
  2494. list.Concat(p);
  2495. SwapRegLive(taicpu(p),taicpu(hp1));
  2496. { after the instruction? }
  2497. { find reg deallocs and reg syncs }
  2498. while assigned(hp5) and (hp5.typ<>ait_instruction) do
  2499. begin
  2500. if (hp5.typ=ait_regalloc) and (tai_regalloc(hp5).ratype in [ra_dealloc, ra_sync]) and
  2501. RegInInstruction(tai_regalloc(hp5).reg,p) then
  2502. begin
  2503. hp4:=hp5;
  2504. hp5:=tai(hp5.next);
  2505. asml.Remove(hp4);
  2506. list.Concat(hp4);
  2507. end
  2508. else
  2509. hp5:=tai(hp5.Next);
  2510. end;
  2511. asml.Remove(hp1);
  2512. { if there are address labels associated with hp2, those must
  2513. stay with hp2 (e.g. for GOT-less PIC) }
  2514. insertpos:=hp2;
  2515. while assigned(hp2.previous) and
  2516. (tai(hp2.previous).typ<>ait_instruction) do
  2517. begin
  2518. hp2:=tai(hp2.previous);
  2519. if (hp2.typ=ait_label) and
  2520. (tai_label(hp2).labsym.typ=AT_ADDR) then
  2521. insertpos:=hp2;
  2522. end;
  2523. {$ifdef DEBUG_PREREGSCHEDULER}
  2524. asml.insertbefore(tai_comment.Create(strpnew('Rescheduled')),insertpos);
  2525. {$endif DEBUG_PREREGSCHEDULER}
  2526. asml.InsertBefore(hp1,insertpos);
  2527. asml.InsertListBefore(insertpos,list);
  2528. p:=tai(p.next);
  2529. end
  2530. else if p.typ=ait_instruction then
  2531. p:=hp1
  2532. else
  2533. p:=tai(p.next);
  2534. end;
  2535. list.Free;
  2536. end;
  2537. procedure DecrementPreceedingIT(list: TAsmList; p: tai);
  2538. var
  2539. hp : tai;
  2540. l : longint;
  2541. begin
  2542. hp := tai(p.Previous);
  2543. l := 1;
  2544. while assigned(hp) and
  2545. (l <= 4) do
  2546. begin
  2547. if hp.typ=ait_instruction then
  2548. begin
  2549. if (taicpu(hp).opcode>=A_IT) and
  2550. (taicpu(hp).opcode <= A_ITTTT) then
  2551. begin
  2552. if (taicpu(hp).opcode = A_IT) and
  2553. (l=1) then
  2554. list.Remove(hp)
  2555. else
  2556. case taicpu(hp).opcode of
  2557. A_ITE:
  2558. if l=2 then taicpu(hp).opcode := A_IT;
  2559. A_ITT:
  2560. if l=2 then taicpu(hp).opcode := A_IT;
  2561. A_ITEE:
  2562. if l=3 then taicpu(hp).opcode := A_ITE;
  2563. A_ITTE:
  2564. if l=3 then taicpu(hp).opcode := A_ITT;
  2565. A_ITET:
  2566. if l=3 then taicpu(hp).opcode := A_ITE;
  2567. A_ITTT:
  2568. if l=3 then taicpu(hp).opcode := A_ITT;
  2569. A_ITEEE:
  2570. if l=4 then taicpu(hp).opcode := A_ITEE;
  2571. A_ITTEE:
  2572. if l=4 then taicpu(hp).opcode := A_ITTE;
  2573. A_ITETE:
  2574. if l=4 then taicpu(hp).opcode := A_ITET;
  2575. A_ITTTE:
  2576. if l=4 then taicpu(hp).opcode := A_ITTT;
  2577. A_ITEET:
  2578. if l=4 then taicpu(hp).opcode := A_ITEE;
  2579. A_ITTET:
  2580. if l=4 then taicpu(hp).opcode := A_ITTE;
  2581. A_ITETT:
  2582. if l=4 then taicpu(hp).opcode := A_ITET;
  2583. A_ITTTT:
  2584. begin
  2585. if l=4 then taicpu(hp).opcode := A_ITTT;
  2586. end
  2587. else
  2588. ;
  2589. end;
  2590. break;
  2591. end;
  2592. {else if (taicpu(hp).condition<>taicpu(p).condition) or
  2593. (taicpu(hp).condition<>inverse_cond(taicpu(p).condition)) then
  2594. break;}
  2595. inc(l);
  2596. end;
  2597. hp := tai(hp.Previous);
  2598. end;
  2599. end;
  2600. function TCpuThumb2AsmOptimizer.OptPass1STM(var p: tai): boolean;
  2601. var
  2602. hp : taicpu;
  2603. begin
  2604. result:=false;
  2605. if MatchInstruction(p, A_STM, [C_None], [PF_FD,PF_DB]) and
  2606. (taicpu(p).oper[0]^.ref^.addressmode=AM_PREINDEXED) and
  2607. (taicpu(p).oper[0]^.ref^.index=NR_STACK_POINTER_REG) and
  2608. ((taicpu(p).oper[1]^.regset^*[8..13,15])=[]) then
  2609. begin
  2610. DebugMsg('Peephole Stm2Push done', p);
  2611. hp := taicpu.op_regset(A_PUSH, R_INTREGISTER, R_SUBWHOLE, taicpu(p).oper[1]^.regset^);
  2612. AsmL.InsertAfter(hp, p);
  2613. asml.Remove(p);
  2614. p:=hp;
  2615. result:=true;
  2616. end;
  2617. end;
  2618. function TCpuThumb2AsmOptimizer.OptPass1LDM(var p: tai): boolean;
  2619. var
  2620. hp : taicpu;
  2621. begin
  2622. result:=false;
  2623. if MatchInstruction(p, A_LDM, [C_None], [PF_FD,PF_IA]) and
  2624. (taicpu(p).oper[0]^.ref^.addressmode=AM_PREINDEXED) and
  2625. (taicpu(p).oper[0]^.ref^.index=NR_STACK_POINTER_REG) and
  2626. ((taicpu(p).oper[1]^.regset^*[8..14])=[]) then
  2627. begin
  2628. DebugMsg('Peephole Ldm2Pop done', p);
  2629. hp := taicpu.op_regset(A_POP, R_INTREGISTER, R_SUBWHOLE, taicpu(p).oper[1]^.regset^);
  2630. asml.InsertBefore(hp, p);
  2631. asml.Remove(p);
  2632. p.Free;
  2633. p:=hp;
  2634. result:=true;
  2635. end;
  2636. end;
  2637. function TCpuThumb2AsmOptimizer.OptPass1AndThumb2(var p : tai) : boolean;
  2638. begin
  2639. result:=false;
  2640. if MatchInstruction(p, [A_AND], [], [PF_None]) and
  2641. (taicpu(p).ops = 2) and
  2642. (taicpu(p).oper[1]^.typ=top_const) and
  2643. ((taicpu(p).oper[1]^.val=255) or
  2644. (taicpu(p).oper[1]^.val=65535)) then
  2645. begin
  2646. DebugMsg('Peephole AndR2Uxt done', p);
  2647. if taicpu(p).oper[1]^.val=255 then
  2648. taicpu(p).opcode:=A_UXTB
  2649. else
  2650. taicpu(p).opcode:=A_UXTH;
  2651. taicpu(p).loadreg(1, taicpu(p).oper[0]^.reg);
  2652. result := true;
  2653. end
  2654. else if MatchInstruction(p, [A_AND], [], [PF_None]) and
  2655. (taicpu(p).ops = 3) and
  2656. (taicpu(p).oper[2]^.typ=top_const) and
  2657. ((taicpu(p).oper[2]^.val=255) or
  2658. (taicpu(p).oper[2]^.val=65535)) then
  2659. begin
  2660. DebugMsg('Peephole AndRR2Uxt done', p);
  2661. if taicpu(p).oper[2]^.val=255 then
  2662. taicpu(p).opcode:=A_UXTB
  2663. else
  2664. taicpu(p).opcode:=A_UXTH;
  2665. taicpu(p).ops:=2;
  2666. result := true;
  2667. end;
  2668. end;
  2669. function TCpuThumb2AsmOptimizer.PeepHoleOptPass1Cpu(var p: tai): boolean;
  2670. begin
  2671. result:=false;
  2672. if inherited PeepHoleOptPass1Cpu(p) then
  2673. result:=true
  2674. else if p.typ=ait_instruction then
  2675. case taicpu(p).opcode of
  2676. A_STM:
  2677. result:=OptPass1STM(p);
  2678. A_LDM:
  2679. result:=OptPass1LDM(p);
  2680. A_AND:
  2681. result:=OptPass1AndThumb2(p);
  2682. else
  2683. ;
  2684. end;
  2685. end;
  2686. procedure TCpuThumb2AsmOptimizer.PeepHoleOptPass2;
  2687. var
  2688. p,hp1,hp2: tai;
  2689. l : longint;
  2690. condition : tasmcond;
  2691. { UsedRegs, TmpUsedRegs: TRegSet; }
  2692. begin
  2693. p := BlockStart;
  2694. { UsedRegs := []; }
  2695. while (p <> BlockEnd) Do
  2696. begin
  2697. { UpdateUsedRegs(UsedRegs, tai(p.next)); }
  2698. case p.Typ Of
  2699. Ait_Instruction:
  2700. begin
  2701. case taicpu(p).opcode Of
  2702. A_B:
  2703. if taicpu(p).condition<>C_None then
  2704. begin
  2705. { check for
  2706. Bxx xxx
  2707. <several instructions>
  2708. xxx:
  2709. }
  2710. l:=0;
  2711. GetNextInstruction(p, hp1);
  2712. while assigned(hp1) and
  2713. (l<=4) and
  2714. CanBeCond(hp1) and
  2715. { stop on labels }
  2716. not(hp1.typ=ait_label) do
  2717. begin
  2718. inc(l);
  2719. if MustBeLast(hp1) then
  2720. begin
  2721. //hp1:=nil;
  2722. GetNextInstruction(hp1,hp1);
  2723. break;
  2724. end
  2725. else
  2726. GetNextInstruction(hp1,hp1);
  2727. end;
  2728. if assigned(hp1) then
  2729. begin
  2730. if FindLabel(tasmlabel(taicpu(p).oper[0]^.ref^.symbol),hp1) then
  2731. begin
  2732. if (l<=4) and (l>0) then
  2733. begin
  2734. condition:=inverse_cond(taicpu(p).condition);
  2735. hp2:=p;
  2736. GetNextInstruction(p,hp1);
  2737. p:=hp1;
  2738. repeat
  2739. if hp1.typ=ait_instruction then
  2740. taicpu(hp1).condition:=condition;
  2741. if MustBeLast(hp1) then
  2742. begin
  2743. GetNextInstruction(hp1,hp1);
  2744. break;
  2745. end
  2746. else
  2747. GetNextInstruction(hp1,hp1);
  2748. until not(assigned(hp1)) or
  2749. not(CanBeCond(hp1)) or
  2750. (hp1.typ=ait_label);
  2751. { wait with removing else GetNextInstruction could
  2752. ignore the label if it was the only usage in the
  2753. jump moved away }
  2754. asml.InsertAfter(tai_comment.create(strpnew('Collapsed')), hp2);
  2755. DecrementPreceedingIT(asml, hp2);
  2756. case l of
  2757. 1: asml.InsertAfter(taicpu.op_cond(A_IT,condition), hp2);
  2758. 2: asml.InsertAfter(taicpu.op_cond(A_ITT,condition), hp2);
  2759. 3: asml.InsertAfter(taicpu.op_cond(A_ITTT,condition), hp2);
  2760. 4: asml.InsertAfter(taicpu.op_cond(A_ITTTT,condition), hp2);
  2761. end;
  2762. tasmlabel(taicpu(hp2).oper[0]^.ref^.symbol).decrefs;
  2763. asml.remove(hp2);
  2764. hp2.free;
  2765. continue;
  2766. end;
  2767. end;
  2768. end;
  2769. end;
  2770. else
  2771. ;
  2772. end;
  2773. end;
  2774. else
  2775. ;
  2776. end;
  2777. p := tai(p.next)
  2778. end;
  2779. end;
  2780. function TCpuThumb2AsmOptimizer.PostPeepHoleOptsCpu(var p: tai): boolean;
  2781. begin
  2782. result:=false;
  2783. if p.typ = ait_instruction then
  2784. begin
  2785. if MatchInstruction(p, A_MOV, [C_None], [PF_None]) and
  2786. (taicpu(p).oper[1]^.typ=top_const) and
  2787. (taicpu(p).oper[1]^.val >= 0) and
  2788. (taicpu(p).oper[1]^.val < 256) and
  2789. (not RegInUsedRegs(NR_DEFAULTFLAGS,UsedRegs)) then
  2790. begin
  2791. DebugMsg('Peephole Mov2Movs done', p);
  2792. asml.InsertBefore(tai_regalloc.alloc(NR_DEFAULTFLAGS,p), p);
  2793. asml.InsertAfter(tai_regalloc.dealloc(NR_DEFAULTFLAGS,p), p);
  2794. IncludeRegInUsedRegs(NR_DEFAULTFLAGS,UsedRegs);
  2795. taicpu(p).oppostfix:=PF_S;
  2796. result:=true;
  2797. end
  2798. else if MatchInstruction(p, A_MVN, [C_None], [PF_None]) and
  2799. (taicpu(p).oper[1]^.typ=top_reg) and
  2800. (not RegInUsedRegs(NR_DEFAULTFLAGS,UsedRegs)) then
  2801. begin
  2802. DebugMsg('Peephole Mvn2Mvns done', p);
  2803. asml.InsertBefore(tai_regalloc.alloc(NR_DEFAULTFLAGS,p), p);
  2804. asml.InsertAfter(tai_regalloc.dealloc(NR_DEFAULTFLAGS,p), p);
  2805. IncludeRegInUsedRegs(NR_DEFAULTFLAGS,UsedRegs);
  2806. taicpu(p).oppostfix:=PF_S;
  2807. result:=true;
  2808. end
  2809. else if MatchInstruction(p, A_RSB, [C_None], [PF_None]) and
  2810. (taicpu(p).ops = 3) and
  2811. (taicpu(p).oper[2]^.typ=top_const) and
  2812. (taicpu(p).oper[2]^.val=0) and
  2813. (not RegInUsedRegs(NR_DEFAULTFLAGS,UsedRegs)) then
  2814. begin
  2815. DebugMsg('Peephole Rsb2Rsbs done', p);
  2816. asml.InsertBefore(tai_regalloc.alloc(NR_DEFAULTFLAGS,p), p);
  2817. asml.InsertAfter(tai_regalloc.dealloc(NR_DEFAULTFLAGS,p), p);
  2818. IncludeRegInUsedRegs(NR_DEFAULTFLAGS,UsedRegs);
  2819. taicpu(p).oppostfix:=PF_S;
  2820. result:=true;
  2821. end
  2822. else if MatchInstruction(p, [A_ADD,A_SUB], [C_None], [PF_None]) and
  2823. (taicpu(p).ops = 3) and
  2824. MatchOperand(taicpu(p).oper[0]^, taicpu(p).oper[1]^) and
  2825. (not MatchOperand(taicpu(p).oper[0]^, NR_STACK_POINTER_REG)) and
  2826. (taicpu(p).oper[2]^.typ=top_const) and
  2827. (taicpu(p).oper[2]^.val >= 0) and
  2828. (taicpu(p).oper[2]^.val < 256) and
  2829. (not RegInUsedRegs(NR_DEFAULTFLAGS,UsedRegs)) then
  2830. begin
  2831. DebugMsg('Peephole AddSub2*s done', p);
  2832. asml.InsertBefore(tai_regalloc.alloc(NR_DEFAULTFLAGS,p), p);
  2833. asml.InsertAfter(tai_regalloc.dealloc(NR_DEFAULTFLAGS,p), p);
  2834. IncludeRegInUsedRegs(NR_DEFAULTFLAGS,UsedRegs);
  2835. taicpu(p).loadconst(1,taicpu(p).oper[2]^.val);
  2836. taicpu(p).oppostfix:=PF_S;
  2837. taicpu(p).ops := 2;
  2838. result:=true;
  2839. end
  2840. else if MatchInstruction(p, [A_ADD,A_SUB], [C_None], [PF_None]) and
  2841. (taicpu(p).ops = 2) and
  2842. (taicpu(p).oper[1]^.typ=top_reg) and
  2843. (not MatchOperand(taicpu(p).oper[0]^, NR_STACK_POINTER_REG)) and
  2844. (not MatchOperand(taicpu(p).oper[1]^, NR_STACK_POINTER_REG)) and
  2845. (not RegInUsedRegs(NR_DEFAULTFLAGS,UsedRegs)) then
  2846. begin
  2847. DebugMsg('Peephole AddSub2*s done', p);
  2848. asml.InsertBefore(tai_regalloc.alloc(NR_DEFAULTFLAGS,p), p);
  2849. asml.InsertAfter(tai_regalloc.dealloc(NR_DEFAULTFLAGS,p), p);
  2850. IncludeRegInUsedRegs(NR_DEFAULTFLAGS,UsedRegs);
  2851. taicpu(p).oppostfix:=PF_S;
  2852. result:=true;
  2853. end
  2854. else if MatchInstruction(p, [A_ADD], [C_None], [PF_None]) and
  2855. (taicpu(p).ops = 3) and
  2856. MatchOperand(taicpu(p).oper[0]^, taicpu(p).oper[1]^) and
  2857. (taicpu(p).oper[2]^.typ=top_reg) then
  2858. begin
  2859. DebugMsg('Peephole AddRRR2AddRR done', p);
  2860. taicpu(p).ops := 2;
  2861. taicpu(p).loadreg(1,taicpu(p).oper[2]^.reg);
  2862. result:=true;
  2863. end
  2864. else if MatchInstruction(p, [A_AND,A_ORR,A_EOR,A_BIC,A_LSL,A_LSR,A_ASR,A_ROR], [C_None], [PF_None]) and
  2865. (taicpu(p).ops = 3) and
  2866. MatchOperand(taicpu(p).oper[0]^, taicpu(p).oper[1]^) and
  2867. (taicpu(p).oper[2]^.typ=top_reg) and
  2868. (not RegInUsedRegs(NR_DEFAULTFLAGS,UsedRegs)) then
  2869. begin
  2870. DebugMsg('Peephole opXXY2opsXY done', p);
  2871. asml.InsertBefore(tai_regalloc.alloc(NR_DEFAULTFLAGS,p), p);
  2872. asml.InsertAfter(tai_regalloc.dealloc(NR_DEFAULTFLAGS,p), p);
  2873. IncludeRegInUsedRegs(NR_DEFAULTFLAGS,UsedRegs);
  2874. taicpu(p).ops := 2;
  2875. taicpu(p).loadreg(1,taicpu(p).oper[2]^.reg);
  2876. taicpu(p).oppostfix:=PF_S;
  2877. result:=true;
  2878. end
  2879. else if MatchInstruction(p, [A_AND,A_ORR,A_EOR,A_BIC,A_LSL,A_LSR,A_ASR,A_ROR], [C_None], [PF_S]) and
  2880. (taicpu(p).ops = 3) and
  2881. MatchOperand(taicpu(p).oper[0]^, taicpu(p).oper[1]^) and
  2882. (taicpu(p).oper[2]^.typ in [top_reg,top_const]) then
  2883. begin
  2884. DebugMsg('Peephole opXXY2opXY done', p);
  2885. taicpu(p).ops := 2;
  2886. if taicpu(p).oper[2]^.typ=top_reg then
  2887. taicpu(p).loadreg(1,taicpu(p).oper[2]^.reg)
  2888. else
  2889. taicpu(p).loadconst(1,taicpu(p).oper[2]^.val);
  2890. result:=true;
  2891. end
  2892. else if MatchInstruction(p, [A_AND,A_ORR,A_EOR], [C_None], [PF_None,PF_S]) and
  2893. (taicpu(p).ops = 3) and
  2894. MatchOperand(taicpu(p).oper[0]^, taicpu(p).oper[2]^) and
  2895. (not RegInUsedRegs(NR_DEFAULTFLAGS,UsedRegs)) then
  2896. begin
  2897. DebugMsg('Peephole opXYX2opsXY done', p);
  2898. asml.InsertBefore(tai_regalloc.alloc(NR_DEFAULTFLAGS,p), p);
  2899. asml.InsertAfter(tai_regalloc.dealloc(NR_DEFAULTFLAGS,p), p);
  2900. IncludeRegInUsedRegs(NR_DEFAULTFLAGS,UsedRegs);
  2901. taicpu(p).oppostfix:=PF_S;
  2902. taicpu(p).ops := 2;
  2903. result:=true;
  2904. end
  2905. else if MatchInstruction(p, [A_MOV], [C_None], [PF_None]) and
  2906. (taicpu(p).ops=3) and
  2907. (taicpu(p).oper[2]^.typ=top_shifterop) and
  2908. (taicpu(p).oper[2]^.shifterop^.shiftmode in [SM_LSL,SM_LSR,SM_ASR,SM_ROR]) and
  2909. //MatchOperand(taicpu(p).oper[0]^, taicpu(p).oper[1]^) and
  2910. (not RegInUsedRegs(NR_DEFAULTFLAGS,UsedRegs)) then
  2911. begin
  2912. DebugMsg('Peephole Mov2Shift done', p);
  2913. asml.InsertBefore(tai_regalloc.alloc(NR_DEFAULTFLAGS,p), p);
  2914. asml.InsertAfter(tai_regalloc.dealloc(NR_DEFAULTFLAGS,p), p);
  2915. IncludeRegInUsedRegs(NR_DEFAULTFLAGS,UsedRegs);
  2916. taicpu(p).oppostfix:=PF_S;
  2917. case taicpu(p).oper[2]^.shifterop^.shiftmode of
  2918. SM_LSL: taicpu(p).opcode:=A_LSL;
  2919. SM_LSR: taicpu(p).opcode:=A_LSR;
  2920. SM_ASR: taicpu(p).opcode:=A_ASR;
  2921. SM_ROR: taicpu(p).opcode:=A_ROR;
  2922. else
  2923. internalerror(2019050912);
  2924. end;
  2925. if taicpu(p).oper[2]^.shifterop^.rs<>NR_NO then
  2926. taicpu(p).loadreg(2, taicpu(p).oper[2]^.shifterop^.rs)
  2927. else
  2928. taicpu(p).loadconst(2, taicpu(p).oper[2]^.shifterop^.shiftimm);
  2929. result:=true;
  2930. end
  2931. end;
  2932. end;
  2933. begin
  2934. casmoptimizer:=TCpuAsmOptimizer;
  2935. cpreregallocscheduler:=TCpuPreRegallocScheduler;
  2936. End.