aoptcpu.pas 123 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914191519161917191819191920192119221923192419251926192719281929193019311932193319341935193619371938193919401941194219431944194519461947194819491950195119521953195419551956195719581959196019611962196319641965196619671968196919701971197219731974197519761977197819791980198119821983198419851986198719881989199019911992199319941995199619971998199920002001200220032004200520062007200820092010201120122013201420152016201720182019202020212022202320242025202620272028202920302031203220332034203520362037203820392040204120422043204420452046204720482049205020512052205320542055205620572058205920602061206220632064206520662067206820692070207120722073207420752076207720782079208020812082208320842085208620872088208920902091209220932094209520962097209820992100210121022103210421052106210721082109211021112112211321142115211621172118211921202121212221232124212521262127212821292130213121322133213421352136213721382139214021412142214321442145214621472148214921502151215221532154215521562157215821592160216121622163216421652166216721682169217021712172217321742175217621772178217921802181218221832184218521862187218821892190219121922193219421952196219721982199220022012202220322042205220622072208220922102211221222132214221522162217221822192220222122222223222422252226222722282229223022312232223322342235223622372238223922402241224222432244224522462247224822492250225122522253225422552256225722582259226022612262226322642265226622672268226922702271227222732274227522762277227822792280228122822283228422852286228722882289229022912292229322942295229622972298229923002301230223032304230523062307230823092310231123122313231423152316231723182319232023212322232323242325232623272328232923302331233223332334233523362337233823392340234123422343234423452346234723482349235023512352235323542355235623572358235923602361236223632364236523662367236823692370237123722373237423752376237723782379238023812382238323842385238623872388238923902391239223932394239523962397239823992400240124022403240424052406240724082409241024112412241324142415241624172418241924202421242224232424242524262427242824292430243124322433243424352436243724382439244024412442244324442445244624472448244924502451245224532454245524562457245824592460246124622463246424652466246724682469247024712472247324742475247624772478247924802481248224832484248524862487248824892490249124922493249424952496249724982499250025012502250325042505250625072508250925102511251225132514251525162517251825192520252125222523252425252526252725282529253025312532253325342535253625372538253925402541254225432544254525462547254825492550255125522553255425552556255725582559256025612562256325642565256625672568256925702571257225732574257525762577257825792580258125822583258425852586258725882589259025912592259325942595259625972598259926002601260226032604260526062607260826092610261126122613261426152616261726182619262026212622262326242625262626272628262926302631263226332634263526362637263826392640264126422643264426452646264726482649265026512652265326542655265626572658265926602661266226632664266526662667266826692670267126722673267426752676267726782679268026812682268326842685268626872688268926902691269226932694269526962697269826992700270127022703270427052706270727082709271027112712271327142715271627172718271927202721272227232724272527262727272827292730273127322733273427352736273727382739274027412742274327442745274627472748274927502751275227532754275527562757275827592760276127622763276427652766276727682769277027712772277327742775277627772778277927802781278227832784278527862787278827892790279127922793279427952796279727982799280028012802280328042805280628072808280928102811281228132814281528162817281828192820282128222823282428252826282728282829283028312832283328342835283628372838283928402841284228432844284528462847284828492850285128522853285428552856285728582859286028612862286328642865286628672868286928702871287228732874287528762877287828792880288128822883288428852886288728882889289028912892289328942895289628972898289929002901290229032904290529062907290829092910291129122913291429152916291729182919292029212922292329242925292629272928292929302931293229332934293529362937293829392940294129422943294429452946294729482949295029512952295329542955295629572958295929602961296229632964296529662967296829692970297129722973297429752976297729782979298029812982298329842985298629872988298929902991299229932994299529962997299829993000300130023003300430053006300730083009301030113012301330143015301630173018301930203021302230233024302530263027302830293030303130323033303430353036303730383039304030413042304330443045304630473048304930503051305230533054305530563057305830593060306130623063306430653066306730683069307030713072307330743075307630773078307930803081308230833084308530863087308830893090309130923093309430953096309730983099310031013102310331043105310631073108310931103111311231133114311531163117311831193120312131223123312431253126312731283129313031313132313331343135313631373138313931403141314231433144314531463147314831493150315131523153315431553156315731583159316031613162316331643165316631673168316931703171317231733174317531763177317831793180318131823183318431853186318731883189319031913192319331943195319631973198319932003201320232033204320532063207
  1. {
  2. Copyright (c) 1998-2002 by Jonas Maebe, member of the Free Pascal
  3. Development Team
  4. This unit implements the ARM optimizer object
  5. This program is free software; you can redistribute it and/or modify
  6. it under the terms of the GNU General Public License as published by
  7. the Free Software Foundation; either version 2 of the License, or
  8. (at your option) any later version.
  9. This program is distributed in the hope that it will be useful,
  10. but WITHOUT ANY WARRANTY; without even the implied warranty of
  11. MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  12. GNU General Public License for more details.
  13. You should have received a copy of the GNU General Public License
  14. along with this program; if not, write to the Free Software
  15. Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
  16. ****************************************************************************
  17. }
  18. Unit aoptcpu;
  19. {$i fpcdefs.inc}
  20. { $define DEBUG_PREREGSCHEDULER}
  21. { $define DEBUG_AOPTCPU}
  22. Interface
  23. uses
  24. cgbase, cgutils, cpubase, aasmtai,
  25. aasmcpu,
  26. aopt, aoptobj, aoptarm;
  27. Type
  28. { TCpuAsmOptimizer }
  29. TCpuAsmOptimizer = class(TARMAsmOptimizer)
  30. { Can't be done in some cases due to the limited range of jumps }
  31. function CanDoJumpOpts: Boolean; override;
  32. { uses the same constructor as TAopObj }
  33. function PrePeepHoleOptsCpu(var p: tai): Boolean; override;
  34. function PeepHoleOptPass1Cpu(var p: tai): boolean; override;
  35. function PeepHoleOptPass2Cpu(var p: tai): boolean; override;
  36. Function RegInInstruction(Reg: TRegister; p1: tai): Boolean;override;
  37. function RemoveSuperfluousVMov(const p : tai; movp : tai; const optimizer : string) : boolean;
  38. { gets the next tai object after current that contains info relevant
  39. to the optimizer in p1 which used the given register or does a
  40. change in program flow.
  41. If there is none, it returns false and
  42. sets p1 to nil }
  43. Function GetNextInstructionUsingRef(Current: tai; Out Next: tai; const ref: TReference; StopOnStore: Boolean = true): Boolean;
  44. { outputs a debug message into the assembler file }
  45. procedure DebugMsg(const s: string; p: tai);
  46. function InstructionLoadsFromReg(const reg : TRegister; const hp : tai) : boolean; override;
  47. function RegLoadedWithNewValue(reg : tregister; hp : tai) : boolean; override;
  48. { With these routines, there's optimisation code that's general for all ARM platforms }
  49. function OptPass1And(var p: tai): Boolean; override;
  50. function OptPass1LDR(var p: tai): Boolean; override;
  51. function OptPass1STR(var p: tai): Boolean; override;
  52. protected
  53. function LookForPreindexedPattern(p: taicpu): boolean;
  54. function LookForPostindexedPattern(p: taicpu): boolean;
  55. { Individual optimisation routines }
  56. function OptPass1DataCheckMov(var p: tai): Boolean;
  57. function OptPass1ADDSUB(var p: tai): Boolean;
  58. function OptPass1CMP(var p: tai): Boolean;
  59. function OptPass1STM(var p: tai): Boolean;
  60. function OptPass1MOV(var p: tai): Boolean;
  61. function OptPass1MOVW(var p: tai): Boolean;
  62. function OptPass1MUL(var p: tai): Boolean;
  63. function OptPass1MVN(var p: tai): Boolean;
  64. function OptPass1VMov(var p: tai): Boolean;
  65. function OptPass1VOp(var p: tai): Boolean;
  66. function OptPass1Push(var p: tai): Boolean;
  67. function OptPass2Bcc(var p: tai): Boolean;
  68. function OptPass2CMP(var p: tai): Boolean;
  69. function OptPass2STM(var p: tai): Boolean;
  70. function OptPass2STR(var p: tai): Boolean;
  71. End;
  72. TCpuPreRegallocScheduler = class(TAsmScheduler)
  73. function SchedulerPass1Cpu(var p: tai): boolean;override;
  74. procedure SwapRegLive(p, hp1: taicpu);
  75. end;
  76. TCpuThumb2AsmOptimizer = class(TCpuAsmOptimizer)
  77. { uses the same constructor as TAopObj }
  78. function PeepHoleOptPass1Cpu(var p: tai): boolean; override;
  79. procedure PeepHoleOptPass2;override;
  80. function PostPeepHoleOptsCpu(var p: tai): boolean; override;
  81. protected
  82. function OptPass1AndThumb2(var p : tai) : boolean;
  83. function OptPass1LDM(var p : tai) : boolean;
  84. function OptPass1STM(var p : tai) : boolean;
  85. End;
  86. function MustBeLast(p : tai) : boolean;
  87. Implementation
  88. uses
  89. cutils,verbose,globtype,globals,
  90. systems,
  91. cpuinfo,
  92. cgobj,procinfo,
  93. aasmbase,aasmdata,
  94. aoptutils;
  95. { Range check must be disabled explicitly as conversions between signed and unsigned
  96. 32-bit values are done without explicit typecasts }
  97. {$R-}
  98. function CanBeCond(p : tai) : boolean;
  99. begin
  100. result:=
  101. not(GenerateThumbCode) and
  102. (p.typ=ait_instruction) and
  103. (taicpu(p).condition=C_None) and
  104. ((taicpu(p).opcode<A_IT) or (taicpu(p).opcode>A_ITTTT)) and
  105. (taicpu(p).opcode<>A_CBZ) and
  106. (taicpu(p).opcode<>A_CBNZ) and
  107. (taicpu(p).opcode<>A_PLD) and
  108. (((taicpu(p).opcode<>A_BLX) and
  109. { BL may need to be converted into BLX by the linker -- could possibly
  110. be allowed in case it's to a local symbol of which we know that it
  111. uses the same instruction set as the current one }
  112. (taicpu(p).opcode<>A_BL)) or
  113. (taicpu(p).oper[0]^.typ=top_reg));
  114. end;
  115. function RemoveRedundantMove(const cmpp: tai; movp: tai; asml: TAsmList):Boolean;
  116. begin
  117. Result:=false;
  118. if (taicpu(movp).condition = C_EQ) and
  119. (taicpu(cmpp).oper[0]^.reg = taicpu(movp).oper[0]^.reg) and
  120. (taicpu(cmpp).oper[1]^.val = taicpu(movp).oper[1]^.val) then
  121. begin
  122. asml.insertafter(tai_comment.Create(strpnew('Peephole Optimization: CmpMovMov - Removed redundant moveq')), movp);
  123. asml.remove(movp);
  124. movp.free;
  125. Result:=true;
  126. end;
  127. end;
  128. function AlignedToQWord(const ref : treference) : boolean;
  129. begin
  130. { (safe) heuristics to ensure alignment }
  131. result:=(target_info.abi in [abi_eabi,abi_armeb,abi_eabihf]) and
  132. (((ref.offset>=0) and
  133. ((ref.offset mod 8)=0) and
  134. ((ref.base=NR_R13) or
  135. (ref.index=NR_R13))
  136. ) or
  137. ((ref.offset<=0) and
  138. { when using NR_R11, it has always a value of <qword align>+4 }
  139. ((abs(ref.offset+4) mod 8)=0) and
  140. (current_procinfo.framepointer=NR_R11) and
  141. ((ref.base=NR_R11) or
  142. (ref.index=NR_R11))
  143. )
  144. );
  145. end;
  146. function isValidConstLoadStoreOffset(const aoffset: longint; const pf: TOpPostfix) : boolean;
  147. begin
  148. if GenerateThumb2Code then
  149. result := (aoffset<4096) and (aoffset>-256)
  150. else
  151. result := ((pf in [PF_None,PF_B]) and
  152. (abs(aoffset)<4096)) or
  153. (abs(aoffset)<256);
  154. end;
  155. function TCpuAsmOptimizer.InstructionLoadsFromReg(const reg: TRegister; const hp: tai): boolean;
  156. var
  157. p: taicpu;
  158. i: longint;
  159. begin
  160. instructionLoadsFromReg := false;
  161. if not (assigned(hp) and (hp.typ = ait_instruction)) then
  162. exit;
  163. p:=taicpu(hp);
  164. i:=1;
  165. {For these instructions we have to start on oper[0]}
  166. if (p.opcode in [A_STR, A_LDM, A_STM, A_PLD,
  167. A_CMP, A_CMN, A_TST, A_TEQ,
  168. A_B, A_BL, A_BX, A_BLX,
  169. A_SMLAL, A_UMLAL, A_VSTM, A_VLDM]) then i:=0;
  170. while(i<p.ops) do
  171. begin
  172. case p.oper[I]^.typ of
  173. top_reg:
  174. instructionLoadsFromReg := (p.oper[I]^.reg = reg) or
  175. { STRD }
  176. ((i=0) and (p.opcode=A_STR) and (p.oppostfix=PF_D) and (getsupreg(p.oper[0]^.reg)+1=getsupreg(reg)));
  177. top_regset:
  178. instructionLoadsFromReg := (getsupreg(reg) in p.oper[I]^.regset^);
  179. top_shifterop:
  180. instructionLoadsFromReg := p.oper[I]^.shifterop^.rs = reg;
  181. top_ref:
  182. instructionLoadsFromReg :=
  183. (p.oper[I]^.ref^.base = reg) or
  184. (p.oper[I]^.ref^.index = reg);
  185. else
  186. ;
  187. end;
  188. if (i=0) and (p.opcode in [A_LDM,A_VLDM]) then
  189. exit;
  190. if instructionLoadsFromReg then
  191. exit; {Bailout if we found something}
  192. Inc(I);
  193. end;
  194. end;
  195. function TCpuAsmOptimizer.RegLoadedWithNewValue(reg: tregister; hp: tai): boolean;
  196. var
  197. p: taicpu;
  198. begin
  199. Result := false;
  200. if not ((assigned(hp)) and (hp.typ = ait_instruction)) then
  201. exit;
  202. p := taicpu(hp);
  203. case p.opcode of
  204. { These operands do not write into a register at all }
  205. A_CMP, A_CMN, A_TST, A_TEQ, A_B, A_BL, A_BX, A_BLX, A_SWI, A_MSR, A_PLD,
  206. A_VCMP:
  207. exit;
  208. {Take care of post/preincremented store and loads, they will change their base register}
  209. A_STR, A_LDR:
  210. begin
  211. Result := false;
  212. { actually, this does not apply here because post-/preindexed does not mean that a register
  213. is loaded with a new value, it is only modified
  214. (taicpu(p).oper[1]^.typ=top_ref) and
  215. (taicpu(p).oper[1]^.ref^.addressmode in [AM_PREINDEXED,AM_POSTINDEXED]) and
  216. (taicpu(p).oper[1]^.ref^.base = reg);
  217. }
  218. { STR does not load into it's first register }
  219. if p.opcode = A_STR then
  220. exit;
  221. end;
  222. A_VSTR:
  223. begin
  224. Result := false;
  225. exit;
  226. end;
  227. { These four are writing into the first 2 register, UMLAL and SMLAL will also read from them }
  228. A_UMLAL, A_UMULL, A_SMLAL, A_SMULL:
  229. Result :=
  230. (p.oper[1]^.typ = top_reg) and
  231. (p.oper[1]^.reg = reg);
  232. {Loads to oper2 from coprocessor}
  233. {
  234. MCR/MRC is currently not supported in FPC
  235. A_MRC:
  236. Result :=
  237. (p.oper[2]^.typ = top_reg) and
  238. (p.oper[2]^.reg = reg);
  239. }
  240. {Loads to all register in the registerset}
  241. A_LDM, A_VLDM:
  242. Result := (getsupreg(reg) in p.oper[1]^.regset^);
  243. A_POP:
  244. Result := (getsupreg(reg) in p.oper[0]^.regset^) or
  245. (reg=NR_STACK_POINTER_REG);
  246. else
  247. ;
  248. end;
  249. if Result then
  250. exit;
  251. case p.oper[0]^.typ of
  252. {This is the case}
  253. top_reg:
  254. Result := (p.oper[0]^.reg = reg) or
  255. { LDRD }
  256. (p.opcode=A_LDR) and (p.oppostfix=PF_D) and (getsupreg(p.oper[0]^.reg)+1=getsupreg(reg));
  257. {LDM/STM might write a new value to their index register}
  258. top_ref:
  259. Result :=
  260. (taicpu(p).oper[0]^.ref^.addressmode in [AM_PREINDEXED,AM_POSTINDEXED]) and
  261. (taicpu(p).oper[0]^.ref^.base = reg);
  262. else
  263. ;
  264. end;
  265. end;
  266. function TCpuAsmOptimizer.GetNextInstructionUsingRef(Current: tai; out
  267. Next: tai; const ref: TReference; StopOnStore: Boolean): Boolean;
  268. begin
  269. Next:=Current;
  270. repeat
  271. Result:=GetNextInstruction(Next,Next);
  272. if Result and
  273. (Next.typ=ait_instruction) and
  274. (taicpu(Next).opcode in [A_LDR, A_STR]) and
  275. (
  276. ((taicpu(Next).ops = 2) and
  277. (taicpu(Next).oper[1]^.typ = top_ref) and
  278. RefsEqual(taicpu(Next).oper[1]^.ref^,ref)) or
  279. ((taicpu(Next).ops = 3) and { LDRD/STRD }
  280. (taicpu(Next).oper[2]^.typ = top_ref) and
  281. RefsEqual(taicpu(Next).oper[2]^.ref^,ref))
  282. ) then
  283. {We've found an instruction LDR or STR with the same reference}
  284. exit;
  285. until not(Result) or
  286. (Next.typ<>ait_instruction) or
  287. not(cs_opt_level3 in current_settings.optimizerswitches) or
  288. is_calljmp(taicpu(Next).opcode) or
  289. (StopOnStore and (taicpu(Next).opcode in [A_STR, A_STM])) or
  290. RegModifiedByInstruction(NR_PC,Next);
  291. Result:=false;
  292. end;
  293. {$ifdef DEBUG_AOPTCPU}
  294. const
  295. SPeepholeOptimization: shortstring = 'Peephole Optimization: ';
  296. procedure TCpuAsmOptimizer.DebugMsg(const s: string;p : tai);
  297. begin
  298. asml.insertbefore(tai_comment.Create(strpnew(s)), p);
  299. end;
  300. {$else DEBUG_AOPTCPU}
  301. const
  302. SPeepholeOptimization = '';
  303. procedure TCpuAsmOptimizer.DebugMsg(const s: string;p : tai);inline;
  304. begin
  305. end;
  306. {$endif DEBUG_AOPTCPU}
  307. function TCpuAsmOptimizer.CanDoJumpOpts: Boolean;
  308. begin
  309. { Cannot perform these jump optimisations if the ARM architecture has 16-bit thumb codes }
  310. Result := not (
  311. (current_settings.instructionset = is_thumb) and not (CPUARM_HAS_THUMB2 in cpu_capabilities[current_settings.cputype])
  312. );
  313. end;
  314. function TCpuAsmOptimizer.RemoveSuperfluousVMov(const p: tai; movp: tai; const optimizer: string):boolean;
  315. var
  316. alloc,
  317. dealloc : tai_regalloc;
  318. hp1 : tai;
  319. begin
  320. Result:=false;
  321. if ((MatchInstruction(movp, A_VMOV, [taicpu(p).condition], [taicpu(p).oppostfix]) and
  322. ((getregtype(taicpu(movp).oper[0]^.reg)=R_MMREGISTER) or (taicpu(p).opcode=A_VLDR))
  323. ) or
  324. (((taicpu(p).oppostfix in [PF_F64F32,PF_F64S16,PF_F64S32,PF_F64U16,PF_F64U32]) or (getsubreg(taicpu(p).oper[0]^.reg)=R_SUBFD)) and MatchInstruction(movp, A_VMOV, [taicpu(p).condition], [PF_F64])) or
  325. (((taicpu(p).oppostfix in [PF_F32F64,PF_F32S16,PF_F32S32,PF_F32U16,PF_F32U32]) or (getsubreg(taicpu(p).oper[0]^.reg)=R_SUBFS)) and MatchInstruction(movp, A_VMOV, [taicpu(p).condition], [PF_F32]))
  326. ) and
  327. (taicpu(movp).ops=2) and
  328. MatchOperand(taicpu(movp).oper[1]^, taicpu(p).oper[0]^.reg) and
  329. { the destination register of the mov might not be used beween p and movp }
  330. not(RegUsedBetween(taicpu(movp).oper[0]^.reg,p,movp)) and
  331. { Take care to only do this for instructions which REALLY load to the first register.
  332. Otherwise
  333. vstr reg0, [reg1]
  334. vmov reg2, reg0
  335. will be optimized to
  336. vstr reg2, [reg1]
  337. }
  338. regLoadedWithNewValue(taicpu(p).oper[0]^.reg, p) then
  339. begin
  340. dealloc:=FindRegDeAlloc(taicpu(p).oper[0]^.reg,tai(movp.Next));
  341. if assigned(dealloc) then
  342. begin
  343. DebugMsg(SPeepholeOptimization + optimizer + ' removed superfluous vmov', movp);
  344. result:=true;
  345. { taicpu(p).oper[0]^.reg is not used anymore, try to find its allocation
  346. and remove it if possible }
  347. asml.Remove(dealloc);
  348. alloc:=FindRegAllocBackward(taicpu(p).oper[0]^.reg,tai(p.previous));
  349. if assigned(alloc) then
  350. begin
  351. asml.Remove(alloc);
  352. alloc.free;
  353. dealloc.free;
  354. end
  355. else
  356. asml.InsertAfter(dealloc,p);
  357. { try to move the allocation of the target register }
  358. GetLastInstruction(movp,hp1);
  359. alloc:=FindRegAlloc(taicpu(movp).oper[0]^.reg,tai(hp1.Next));
  360. if assigned(alloc) then
  361. begin
  362. asml.Remove(alloc);
  363. asml.InsertBefore(alloc,p);
  364. { adjust used regs }
  365. IncludeRegInUsedRegs(taicpu(movp).oper[0]^.reg,UsedRegs);
  366. end;
  367. { change
  368. vldr reg0,[reg1]
  369. vmov reg2,reg0
  370. into
  371. ldr reg2,[reg1]
  372. if reg2 is an int register
  373. }
  374. if (taicpu(p).opcode=A_VLDR) and (getregtype(taicpu(movp).oper[0]^.reg)=R_INTREGISTER) then
  375. taicpu(p).opcode:=A_LDR;
  376. { finally get rid of the mov }
  377. taicpu(p).loadreg(0,taicpu(movp).oper[0]^.reg);
  378. asml.remove(movp);
  379. movp.free;
  380. end;
  381. end;
  382. end;
  383. {
  384. optimize
  385. add/sub reg1,reg1,regY/const
  386. ...
  387. ldr/str regX,[reg1]
  388. into
  389. ldr/str regX,[reg1, regY/const]!
  390. }
  391. function TCpuAsmOptimizer.LookForPreindexedPattern(p: taicpu): boolean;
  392. var
  393. hp1: tai;
  394. begin
  395. if GenerateARMCode and
  396. (p.ops=3) and
  397. MatchOperand(p.oper[0]^, p.oper[1]^.reg) and
  398. GetNextInstructionUsingReg(p, hp1, p.oper[0]^.reg) and
  399. (not RegModifiedBetween(p.oper[0]^.reg, p, hp1)) and
  400. MatchInstruction(hp1, [A_LDR,A_STR], [C_None], [PF_None,PF_B,PF_H,PF_SH,PF_SB]) and
  401. (taicpu(hp1).oper[1]^.ref^.addressmode=AM_OFFSET) and
  402. (taicpu(hp1).oper[1]^.ref^.base=p.oper[0]^.reg) and
  403. (taicpu(hp1).oper[0]^.reg<>p.oper[0]^.reg) and
  404. (taicpu(hp1).oper[1]^.ref^.offset=0) and
  405. (taicpu(hp1).oper[1]^.ref^.index=NR_NO) and
  406. (((p.oper[2]^.typ=top_reg) and
  407. (not RegModifiedBetween(p.oper[2]^.reg, p, hp1))) or
  408. ((p.oper[2]^.typ=top_const) and
  409. ((abs(p.oper[2]^.val) < 256) or
  410. ((abs(p.oper[2]^.val) < 4096) and
  411. (taicpu(hp1).oppostfix in [PF_None,PF_B]))))) then
  412. begin
  413. taicpu(hp1).oper[1]^.ref^.addressmode:=AM_PREINDEXED;
  414. if p.oper[2]^.typ=top_reg then
  415. begin
  416. taicpu(hp1).oper[1]^.ref^.index:=p.oper[2]^.reg;
  417. if p.opcode=A_ADD then
  418. taicpu(hp1).oper[1]^.ref^.signindex:=1
  419. else
  420. taicpu(hp1).oper[1]^.ref^.signindex:=-1;
  421. end
  422. else
  423. begin
  424. if p.opcode=A_ADD then
  425. taicpu(hp1).oper[1]^.ref^.offset:=p.oper[2]^.val
  426. else
  427. taicpu(hp1).oper[1]^.ref^.offset:=-p.oper[2]^.val;
  428. end;
  429. result:=true;
  430. end
  431. else
  432. result:=false;
  433. end;
  434. {
  435. optimize
  436. ldr/str regX,[reg1]
  437. ...
  438. add/sub reg1,reg1,regY/const
  439. into
  440. ldr/str regX,[reg1], regY/const
  441. }
  442. function TCpuAsmOptimizer.LookForPostindexedPattern(p: taicpu) : boolean;
  443. var
  444. hp1 : tai;
  445. begin
  446. Result:=false;
  447. if (p.oper[1]^.typ = top_ref) and
  448. (p.oper[1]^.ref^.addressmode=AM_OFFSET) and
  449. (p.oper[1]^.ref^.index=NR_NO) and
  450. (p.oper[1]^.ref^.offset=0) and
  451. GetNextInstructionUsingReg(p, hp1, p.oper[1]^.ref^.base) and
  452. { we cannot check NR_DEFAULTFLAGS for modification yet so don't allow a condition }
  453. MatchInstruction(hp1, [A_ADD, A_SUB], [C_None], [PF_None]) and
  454. (taicpu(hp1).oper[0]^.reg=p.oper[1]^.ref^.base) and
  455. (taicpu(hp1).oper[1]^.reg=p.oper[1]^.ref^.base) and
  456. (
  457. (taicpu(hp1).oper[2]^.typ=top_reg) or
  458. { valid offset? }
  459. ((taicpu(hp1).oper[2]^.typ=top_const) and
  460. ((abs(taicpu(hp1).oper[2]^.val)<256) or
  461. ((abs(taicpu(hp1).oper[2]^.val)<4096) and (p.oppostfix in [PF_None,PF_B]))
  462. )
  463. )
  464. ) and
  465. { don't apply the optimization if the base register is loaded }
  466. (p.oper[0]^.reg<>p.oper[1]^.ref^.base) and
  467. not(RegModifiedBetween(taicpu(hp1).oper[0]^.reg,p,hp1)) and
  468. { don't apply the optimization if the (new) index register is loaded }
  469. (p.oper[0]^.reg<>taicpu(hp1).oper[2]^.reg) and
  470. not(RegModifiedBetween(taicpu(hp1).oper[2]^.reg,p,hp1)) and
  471. GenerateARMCode then
  472. begin
  473. DebugMsg(SPeepholeOptimization + 'Str/LdrAdd/Sub2Str/Ldr Postindex done', p);
  474. p.oper[1]^.ref^.addressmode:=AM_POSTINDEXED;
  475. if taicpu(hp1).oper[2]^.typ=top_const then
  476. begin
  477. if taicpu(hp1).opcode=A_ADD then
  478. p.oper[1]^.ref^.offset:=taicpu(hp1).oper[2]^.val
  479. else
  480. p.oper[1]^.ref^.offset:=-taicpu(hp1).oper[2]^.val;
  481. end
  482. else
  483. begin
  484. p.oper[1]^.ref^.index:=taicpu(hp1).oper[2]^.reg;
  485. if taicpu(hp1).opcode=A_ADD then
  486. p.oper[1]^.ref^.signindex:=1
  487. else
  488. p.oper[1]^.ref^.signindex:=-1;
  489. end;
  490. asml.Remove(hp1);
  491. hp1.Free;
  492. Result:=true;
  493. end;
  494. end;
  495. function TCpuAsmOptimizer.OptPass1ADDSUB(var p: tai): Boolean;
  496. var
  497. hp1,hp2: tai;
  498. sign: Integer;
  499. newvalue: TCGInt;
  500. b: byte;
  501. begin
  502. Result := OptPass1DataCheckMov(p);
  503. {
  504. change
  505. add/sub reg2,reg1,const1
  506. str/ldr reg3,[reg2,const2]
  507. dealloc reg2
  508. to
  509. str/ldr reg3,[reg1,const2+/-const1]
  510. }
  511. if (not GenerateThumbCode) and
  512. (taicpu(p).ops>2) and
  513. (taicpu(p).oper[1]^.typ = top_reg) and
  514. (taicpu(p).oper[2]^.typ = top_const) then
  515. begin
  516. hp1:=p;
  517. while GetNextInstructionUsingReg(hp1, hp1, taicpu(p).oper[0]^.reg) and
  518. { we cannot check NR_DEFAULTFLAGS for modification yet so don't allow a condition }
  519. MatchInstruction(hp1, [A_LDR, A_STR], [C_None], []) and
  520. (taicpu(hp1).oper[1]^.typ = top_ref) and
  521. (taicpu(hp1).oper[1]^.ref^.base=taicpu(p).oper[0]^.reg) and
  522. { don't optimize if the register is stored/overwritten }
  523. (taicpu(hp1).oper[0]^.reg<>taicpu(p).oper[1]^.reg) and
  524. (taicpu(hp1).oper[1]^.ref^.index=NR_NO) and
  525. (taicpu(hp1).oper[1]^.ref^.addressmode=AM_OFFSET) and
  526. { new offset must be valid: either in the range of 8 or 12 bit, depend on the
  527. ldr postfix }
  528. (((taicpu(p).opcode=A_ADD) and
  529. isValidConstLoadStoreOffset(taicpu(hp1).oper[1]^.ref^.offset+taicpu(p).oper[2]^.val, taicpu(hp1).oppostfix)
  530. ) or
  531. ((taicpu(p).opcode=A_SUB) and
  532. isValidConstLoadStoreOffset(taicpu(hp1).oper[1]^.ref^.offset-taicpu(p).oper[2]^.val, taicpu(hp1).oppostfix)
  533. )
  534. ) do
  535. begin
  536. { neither reg1 nor reg2 might be changed inbetween }
  537. if RegModifiedBetween(taicpu(p).oper[0]^.reg,p,hp1) or
  538. RegModifiedBetween(taicpu(p).oper[1]^.reg,p,hp1) then
  539. break;
  540. { reg2 must be either overwritten by the ldr or it is deallocated afterwards }
  541. if ((taicpu(hp1).opcode=A_LDR) and (taicpu(p).oper[0]^.reg=taicpu(hp1).oper[0]^.reg)) or
  542. assigned(FindRegDeAlloc(taicpu(p).oper[0]^.reg,tai(hp1.Next))) then
  543. begin
  544. { remember last instruction }
  545. hp2:=hp1;
  546. DebugMsg(SPeepholeOptimization + 'Add/SubLdr2Ldr done', p);
  547. hp1:=p;
  548. { fix all ldr/str }
  549. while GetNextInstructionUsingReg(hp1, hp1, taicpu(p).oper[0]^.reg) do
  550. begin
  551. taicpu(hp1).oper[1]^.ref^.base:=taicpu(p).oper[1]^.reg;
  552. if taicpu(p).opcode=A_ADD then
  553. inc(taicpu(hp1).oper[1]^.ref^.offset,taicpu(p).oper[2]^.val)
  554. else
  555. dec(taicpu(hp1).oper[1]^.ref^.offset,taicpu(p).oper[2]^.val);
  556. if hp1=hp2 then
  557. break;
  558. end;
  559. RemoveCurrentP(p);
  560. result:=true;
  561. Exit;
  562. end;
  563. end;
  564. end;
  565. {
  566. optimize
  567. add/sub rx,ry,const1
  568. add/sub rx,rx,const2
  569. into
  570. add/sub rx,ry,const1+/-const
  571. or
  572. mov rx,ry if const1+/-const=0
  573. or
  574. remove it, if rx=ry and const1+/-const=0
  575. check if the first operation has no postfix and condition
  576. }
  577. if MatchInstruction(p,[A_ADD,A_SUB],[C_None],[PF_None]) and
  578. MatchOptype(taicpu(p),top_reg,top_reg,top_const) and
  579. GetNextInstructionUsingReg(p,hp1,taicpu(p).oper[0]^.reg) and
  580. MatchInstruction(hp1,[A_ADD,A_SUB],[C_None],[PF_None]) and
  581. MatchOptype(taicpu(hp1),top_reg,top_reg,top_const) and
  582. MatchOperand(taicpu(p).oper[0]^,taicpu(hp1).oper[0]^) and
  583. MatchOperand(taicpu(p).oper[0]^,taicpu(hp1).oper[1]^) then
  584. begin
  585. sign:=1;
  586. if (taicpu(p).opcode=A_SUB) xor (taicpu(hp1).opcode=A_SUB) then
  587. sign:=-1;
  588. newvalue:=taicpu(p).oper[2]^.val+sign*taicpu(hp1).oper[2]^.val;
  589. if (not(GenerateThumbCode) and is_shifter_const(newvalue,b)) or
  590. (GenerateThumbCode and is_thumb_imm(newvalue)) then
  591. begin
  592. DebugMsg(SPeepholeOptimization + 'Merge Add/Sub done', p);
  593. taicpu(p).oper[2]^.val:=newvalue;
  594. RemoveInstruction(hp1);
  595. Result:=true;
  596. if newvalue=0 then
  597. begin
  598. if MatchOperand(taicpu(p).oper[0]^,taicpu(p).oper[1]^) then
  599. RemoveCurrentP(p)
  600. else
  601. begin
  602. taicpu(p).opcode:=A_MOV;
  603. taicpu(p).ops:=2;
  604. end;
  605. Exit;
  606. end;
  607. end;
  608. end;
  609. if (taicpu(p).condition = C_None) and
  610. (taicpu(p).oppostfix = PF_None) and
  611. LookForPreindexedPattern(taicpu(p)) then
  612. begin
  613. DebugMsg(SPeepholeOptimization + 'Add/Sub to Preindexed done', p);
  614. RemoveCurrentP(p);
  615. Result:=true;
  616. Exit;
  617. end;
  618. end;
  619. function TCpuAsmOptimizer.OptPass1MUL(var p: tai): Boolean;
  620. var
  621. hp1: tai;
  622. oldreg: tregister;
  623. begin
  624. Result := OptPass1DataCheckMov(p);
  625. {
  626. Turn
  627. mul reg0, z,w
  628. sub/add x, y, reg0
  629. dealloc reg0
  630. into
  631. mls/mla x,z,w,y
  632. }
  633. if (taicpu(p).condition = C_None) and
  634. (taicpu(p).oppostfix = PF_None) and
  635. (taicpu(p).ops=3) and
  636. (taicpu(p).oper[0]^.typ = top_reg) and
  637. (taicpu(p).oper[1]^.typ = top_reg) and
  638. (taicpu(p).oper[2]^.typ = top_reg) and
  639. GetNextInstructionUsingReg(p,hp1,taicpu(p).oper[0]^.reg) and
  640. MatchInstruction(hp1,[A_ADD,A_SUB],[C_None],[PF_None]) and
  641. (not RegModifiedBetween(taicpu(p).oper[1]^.reg, p, hp1)) and
  642. (not RegModifiedBetween(taicpu(p).oper[2]^.reg, p, hp1)) and
  643. (((taicpu(hp1).opcode=A_ADD) and (current_settings.cputype>=cpu_armv4)) or
  644. ((taicpu(hp1).opcode=A_SUB) and (current_settings.cputype in [cpu_armv6t2,cpu_armv7,cpu_armv7a,cpu_armv7r,cpu_armv7m,cpu_armv7em]))) and
  645. // CPUs before ARMv6 don't recommend having the same Rd and Rm for MLA.
  646. // TODO: A workaround would be to swap Rm and Rs
  647. (not ((taicpu(hp1).opcode=A_ADD) and (current_settings.cputype<=cpu_armv6) and MatchOperand(taicpu(hp1).oper[0]^, taicpu(p).oper[1]^))) and
  648. (((taicpu(hp1).ops=3) and
  649. (taicpu(hp1).oper[2]^.typ=top_reg) and
  650. ((MatchOperand(taicpu(hp1).oper[2]^, taicpu(p).oper[0]^.reg) and
  651. (not RegModifiedBetween(taicpu(hp1).oper[1]^.reg, p, hp1))) or
  652. ((MatchOperand(taicpu(hp1).oper[1]^, taicpu(p).oper[0]^.reg) and
  653. (taicpu(hp1).opcode=A_ADD) and
  654. (not RegModifiedBetween(taicpu(hp1).oper[2]^.reg, p, hp1)))))) or
  655. ((taicpu(hp1).ops=2) and
  656. (taicpu(hp1).oper[1]^.typ=top_reg) and
  657. MatchOperand(taicpu(hp1).oper[1]^, taicpu(p).oper[0]^.reg))) and
  658. (RegEndOfLife(taicpu(p).oper[0]^.reg,taicpu(hp1))) then
  659. begin
  660. if taicpu(hp1).opcode=A_ADD then
  661. begin
  662. taicpu(hp1).opcode:=A_MLA;
  663. if taicpu(hp1).ops=3 then
  664. begin
  665. if MatchOperand(taicpu(hp1).oper[1]^, taicpu(p).oper[0]^) then
  666. oldreg:=taicpu(hp1).oper[2]^.reg
  667. else
  668. oldreg:=taicpu(hp1).oper[1]^.reg;
  669. end
  670. else
  671. oldreg:=taicpu(hp1).oper[0]^.reg;
  672. taicpu(hp1).loadreg(1,taicpu(p).oper[1]^.reg);
  673. taicpu(hp1).loadreg(2,taicpu(p).oper[2]^.reg);
  674. taicpu(hp1).loadreg(3,oldreg);
  675. DebugMsg(SPeepholeOptimization + 'MulAdd2MLA done', p);
  676. end
  677. else
  678. begin
  679. taicpu(hp1).opcode:=A_MLS;
  680. taicpu(hp1).loadreg(3,taicpu(hp1).oper[1]^.reg);
  681. if taicpu(hp1).ops=2 then
  682. taicpu(hp1).loadreg(1,taicpu(hp1).oper[0]^.reg)
  683. else
  684. taicpu(hp1).loadreg(1,taicpu(p).oper[2]^.reg);
  685. taicpu(hp1).loadreg(2,taicpu(p).oper[1]^.reg);
  686. DebugMsg(SPeepholeOptimization + 'MulSub2MLS done', p);
  687. AllocRegBetween(taicpu(hp1).oper[1]^.reg,p,hp1,UsedRegs);
  688. AllocRegBetween(taicpu(hp1).oper[2]^.reg,p,hp1,UsedRegs);
  689. AllocRegBetween(taicpu(hp1).oper[3]^.reg,p,hp1,UsedRegs);
  690. end;
  691. taicpu(hp1).ops:=4;
  692. RemoveCurrentP(p);
  693. Result := True;
  694. Exit;
  695. end
  696. end;
  697. function TCpuAsmOptimizer.OptPass1And(var p: tai): Boolean;
  698. begin
  699. Result := OptPass1DataCheckMov(p);
  700. Result := inherited OptPass1And(p) or Result;
  701. end;
  702. function TCpuAsmOptimizer.OptPass1DataCheckMov(var p: tai): Boolean;
  703. var
  704. hp1: tai;
  705. begin
  706. {
  707. change
  708. op reg1, ...
  709. mov reg2, reg1
  710. to
  711. op reg2, ...
  712. }
  713. Result := (taicpu(p).ops >= 3) and
  714. GetNextInstructionUsingReg(p, hp1, taicpu(p).oper[0]^.reg) and
  715. RemoveSuperfluousMove(p, hp1, 'DataMov2Data');
  716. end;
  717. function TCpuAsmOptimizer.OptPass1CMP(var p: tai): Boolean;
  718. var
  719. hp1, hp2, hp_last: tai;
  720. MovRem1, MovRem2: Boolean;
  721. begin
  722. Result := False;
  723. { These optimizations can be applied only to the currently enabled operations because
  724. the other operations do not update all flags and FPC does not track flag usage }
  725. if (taicpu(p).condition = C_None) and
  726. (taicpu(p).oper[1]^.typ = top_const) and
  727. GetNextInstruction(p, hp1) then
  728. begin
  729. {
  730. change
  731. cmp reg,const1
  732. moveq reg,const1
  733. movne reg,const2
  734. to
  735. cmp reg,const1
  736. movne reg,const2
  737. }
  738. if MatchInstruction(hp1, A_MOV, [C_EQ, C_NE], [PF_NONE]) and
  739. (taicpu(hp1).oper[1]^.typ = top_const) and
  740. GetNextInstruction(hp1, hp2) and
  741. MatchInstruction(hp2, A_MOV, [C_EQ, C_NE], [PF_NONE]) and
  742. (taicpu(hp2).oper[1]^.typ = top_const) then
  743. begin
  744. MovRem1 := RemoveRedundantMove(p, hp1, asml);
  745. MovRem2 := RemoveRedundantMove(p, hp2, asml);
  746. Result:= MovRem1 or MovRem2;
  747. { Make sure that hp1 is still the next instruction after p }
  748. if MovRem1 then
  749. if MovRem2 then
  750. begin
  751. if not GetNextInstruction(p, hp1) then
  752. Exit;
  753. end
  754. else
  755. hp1 := hp2;
  756. end;
  757. end;
  758. end;
  759. function TCpuAsmOptimizer.OptPass1LDR(var p: tai): Boolean;
  760. var
  761. hp1: tai;
  762. begin
  763. Result := inherited OptPass1LDR(p);
  764. if Result then
  765. Exit;
  766. { change
  767. ldr reg1,ref
  768. ldr reg2,ref
  769. into ...
  770. }
  771. if (taicpu(p).oper[1]^.typ = top_ref) and
  772. (taicpu(p).oper[1]^.ref^.addressmode=AM_OFFSET) and
  773. GetNextInstruction(p,hp1) and
  774. { ldrd is not allowed here }
  775. MatchInstruction(hp1, A_LDR, [taicpu(p).condition, C_None], [taicpu(p).oppostfix,PF_None]-[PF_D]) then
  776. begin
  777. {
  778. ...
  779. ldr reg1,ref
  780. mov reg2,reg1
  781. }
  782. if (taicpu(p).oppostfix=taicpu(hp1).oppostfix) and
  783. RefsEqual(taicpu(p).oper[1]^.ref^,taicpu(hp1).oper[1]^.ref^) and
  784. (taicpu(p).oper[0]^.reg<>taicpu(hp1).oper[1]^.ref^.index) and
  785. (taicpu(p).oper[0]^.reg<>taicpu(hp1).oper[1]^.ref^.base) and
  786. (taicpu(hp1).oper[1]^.ref^.addressmode=AM_OFFSET) then
  787. begin
  788. if taicpu(hp1).oper[0]^.reg=taicpu(p).oper[0]^.reg then
  789. begin
  790. DebugMsg(SPeepholeOptimization + 'LdrLdr2Ldr done', hp1);
  791. asml.remove(hp1);
  792. hp1.free;
  793. end
  794. else
  795. begin
  796. DebugMsg(SPeepholeOptimization + 'LdrLdr2LdrMov done', hp1);
  797. taicpu(hp1).opcode:=A_MOV;
  798. taicpu(hp1).oppostfix:=PF_None;
  799. taicpu(hp1).loadreg(1,taicpu(p).oper[0]^.reg);
  800. end;
  801. result := true;
  802. end
  803. {
  804. ...
  805. ldrd reg1,reg1+1,ref
  806. }
  807. else if (GenerateARMCode or GenerateThumb2Code) and
  808. (CPUARM_HAS_EDSP in cpu_capabilities[current_settings.cputype]) and
  809. { ldrd does not allow any postfixes ... }
  810. (taicpu(p).oppostfix=PF_None) and
  811. not(odd(getsupreg(taicpu(p).oper[0]^.reg))) and
  812. (getsupreg(taicpu(p).oper[0]^.reg)+1=getsupreg(taicpu(hp1).oper[0]^.reg)) and
  813. { ldr ensures that either base or index contain no register, else ldr wouldn't
  814. use an offset either
  815. }
  816. (taicpu(p).oper[1]^.ref^.base=taicpu(hp1).oper[1]^.ref^.base) and
  817. (taicpu(p).oper[1]^.ref^.index=taicpu(hp1).oper[1]^.ref^.index) and
  818. (taicpu(p).oper[1]^.ref^.offset+4=taicpu(hp1).oper[1]^.ref^.offset) and
  819. (abs(taicpu(p).oper[1]^.ref^.offset)<256) and
  820. AlignedToQWord(taicpu(p).oper[1]^.ref^) then
  821. begin
  822. DebugMsg(SPeepholeOptimization + 'LdrLdr2Ldrd done', p);
  823. taicpu(p).loadref(2,taicpu(p).oper[1]^.ref^);
  824. taicpu(p).loadreg(1, taicpu(hp1).oper[0]^.reg);
  825. taicpu(p).ops:=3;
  826. taicpu(p).oppostfix:=PF_D;
  827. asml.remove(hp1);
  828. hp1.free;
  829. result:=true;
  830. end;
  831. end;
  832. {
  833. Change
  834. ldrb dst1, [REF]
  835. and dst2, dst1, #255
  836. into
  837. ldrb dst2, [ref]
  838. }
  839. if not(GenerateThumbCode) and
  840. (taicpu(p).oppostfix=PF_B) and
  841. GetNextInstructionUsingReg(p, hp1, taicpu(p).oper[0]^.reg) and
  842. MatchInstruction(hp1, A_AND, [taicpu(p).condition], [PF_NONE]) and
  843. (taicpu(hp1).oper[1]^.reg = taicpu(p).oper[0]^.reg) and
  844. (taicpu(hp1).oper[2]^.typ = top_const) and
  845. (taicpu(hp1).oper[2]^.val = $FF) and
  846. not(RegUsedBetween(taicpu(hp1).oper[0]^.reg, p, hp1)) and
  847. RegEndOfLife(taicpu(p).oper[0]^.reg, taicpu(hp1)) then
  848. begin
  849. DebugMsg(SPeepholeOptimization + 'LdrbAnd2Ldrb done', p);
  850. taicpu(p).oper[0]^.reg := taicpu(hp1).oper[0]^.reg;
  851. asml.remove(hp1);
  852. hp1.free;
  853. result:=true;
  854. end;
  855. Result:=LookForPostindexedPattern(taicpu(p)) or Result;
  856. { Remove superfluous mov after ldr
  857. changes
  858. ldr reg1, ref
  859. mov reg2, reg1
  860. to
  861. ldr reg2, ref
  862. conditions are:
  863. * no ldrd usage
  864. * reg1 must be released after mov
  865. * mov can not contain shifterops
  866. * ldr+mov have the same conditions
  867. * mov does not set flags
  868. }
  869. if (taicpu(p).oppostfix<>PF_D) and
  870. GetNextInstructionUsingReg(p, hp1, taicpu(p).oper[0]^.reg) and
  871. RemoveSuperfluousMove(p, hp1, 'LdrMov2Ldr') then
  872. Result:=true;
  873. end;
  874. function TCpuAsmOptimizer.OptPass1STM(var p: tai): Boolean;
  875. var
  876. hp1, hp2, hp3, hp4: tai;
  877. begin
  878. Result := False;
  879. {
  880. change
  881. stmfd r13!,[r14]
  882. sub r13,r13,#4
  883. bl abc
  884. add r13,r13,#4
  885. ldmfd r13!,[r15]
  886. into
  887. b abc
  888. }
  889. if not(ts_thumb_interworking in current_settings.targetswitches) and
  890. (taicpu(p).condition = C_None) and
  891. (taicpu(p).oppostfix = PF_FD) and
  892. (taicpu(p).oper[0]^.typ = top_ref) and
  893. (taicpu(p).oper[0]^.ref^.index=NR_STACK_POINTER_REG) and
  894. (taicpu(p).oper[0]^.ref^.base=NR_NO) and
  895. (taicpu(p).oper[0]^.ref^.offset=0) and
  896. (taicpu(p).oper[0]^.ref^.addressmode=AM_PREINDEXED) and
  897. (taicpu(p).oper[1]^.typ = top_regset) and
  898. (taicpu(p).oper[1]^.regset^ = [RS_R14]) and
  899. GetNextInstruction(p, hp1) and
  900. MatchInstruction(hp1, A_SUB, [C_None], [PF_NONE]) and
  901. (taicpu(hp1).oper[0]^.typ = top_reg) and
  902. (taicpu(hp1).oper[0]^.reg = NR_STACK_POINTER_REG) and
  903. MatchOperand(taicpu(hp1).oper[0]^,taicpu(hp1).oper[1]^) and
  904. (taicpu(hp1).oper[2]^.typ = top_const) and
  905. GetNextInstruction(hp1, hp2) and
  906. SkipEntryExitMarker(hp2, hp2) and
  907. MatchInstruction(hp2, [A_BL,A_BLX], [C_None], [PF_NONE]) and
  908. (taicpu(hp2).oper[0]^.typ = top_ref) and
  909. GetNextInstruction(hp2, hp3) and
  910. SkipEntryExitMarker(hp3, hp3) and
  911. MatchInstruction(hp3, A_ADD, [C_None], [PF_NONE]) and
  912. MatchOperand(taicpu(hp1).oper[0]^,taicpu(hp3).oper[0]^) and
  913. MatchOperand(taicpu(hp1).oper[0]^,taicpu(hp3).oper[1]^) and
  914. MatchOperand(taicpu(hp1).oper[2]^,taicpu(hp3).oper[2]^) and
  915. GetNextInstruction(hp3, hp4) and
  916. MatchInstruction(hp4, A_LDM, [C_None], [PF_FD]) and
  917. MatchOperand(taicpu(p).oper[0]^,taicpu(hp4).oper[0]^) and
  918. (taicpu(hp4).oper[1]^.typ = top_regset) and
  919. (taicpu(hp4).oper[1]^.regset^ = [RS_R15]) then
  920. begin
  921. asml.Remove(hp1);
  922. asml.Remove(hp3);
  923. asml.Remove(hp4);
  924. taicpu(hp2).opcode:=A_B;
  925. hp1.free;
  926. hp3.free;
  927. hp4.free;
  928. RemoveCurrentp(p, hp2);
  929. DebugMsg(SPeepholeOptimization + 'Bl2B done', p);
  930. Result := True;
  931. end;
  932. end;
  933. function TCpuAsmOptimizer.OptPass1STR(var p: tai): Boolean;
  934. var
  935. hp1: tai;
  936. begin
  937. Result := inherited OptPass1STR(p);
  938. if Result then
  939. Exit;
  940. { Common conditions }
  941. if (taicpu(p).oper[1]^.typ = top_ref) and
  942. (taicpu(p).oper[1]^.ref^.addressmode=AM_OFFSET) and
  943. (taicpu(p).oppostfix=PF_None) then
  944. begin
  945. { change
  946. str reg1,ref
  947. ldr reg2,ref
  948. into
  949. str reg1,ref
  950. mov reg2,reg1
  951. }
  952. if (taicpu(p).condition=C_None) and
  953. GetNextInstructionUsingRef(p,hp1,taicpu(p).oper[1]^.ref^) and
  954. MatchInstruction(hp1, A_LDR, [taicpu(p).condition], [PF_None]) and
  955. (taicpu(hp1).oper[1]^.typ=top_ref) and
  956. (taicpu(hp1).oper[1]^.ref^.addressmode=AM_OFFSET) and
  957. not(RegModifiedBetween(taicpu(p).oper[0]^.reg, p, hp1)) and
  958. ((taicpu(hp1).oper[1]^.ref^.index=NR_NO) or not (RegModifiedBetween(taicpu(hp1).oper[1]^.ref^.index, p, hp1))) and
  959. ((taicpu(hp1).oper[1]^.ref^.base=NR_NO) or not (RegModifiedBetween(taicpu(hp1).oper[1]^.ref^.base, p, hp1))) then
  960. begin
  961. if taicpu(hp1).oper[0]^.reg=taicpu(p).oper[0]^.reg then
  962. begin
  963. DebugMsg(SPeepholeOptimization + 'StrLdr2StrMov 1 done', hp1);
  964. asml.remove(hp1);
  965. hp1.free;
  966. end
  967. else
  968. begin
  969. taicpu(hp1).opcode:=A_MOV;
  970. taicpu(hp1).oppostfix:=PF_None;
  971. taicpu(hp1).loadreg(1,taicpu(p).oper[0]^.reg);
  972. DebugMsg(SPeepholeOptimization + 'StrLdr2StrMov 2 done', hp1);
  973. end;
  974. result := True;
  975. end
  976. { change
  977. str reg1,ref
  978. str reg2,ref
  979. into
  980. strd reg1,reg2,ref
  981. }
  982. else if (GenerateARMCode or GenerateThumb2Code) and
  983. (CPUARM_HAS_EDSP in cpu_capabilities[current_settings.cputype]) and
  984. not(odd(getsupreg(taicpu(p).oper[0]^.reg))) and
  985. (abs(taicpu(p).oper[1]^.ref^.offset)<256) and
  986. AlignedToQWord(taicpu(p).oper[1]^.ref^) and
  987. GetNextInstruction(p,hp1) and
  988. MatchInstruction(hp1, A_STR, [taicpu(p).condition, C_None], [PF_None]) and
  989. (getsupreg(taicpu(p).oper[0]^.reg)+1=getsupreg(taicpu(hp1).oper[0]^.reg)) and
  990. { str ensures that either base or index contain no register, else ldr wouldn't
  991. use an offset either
  992. }
  993. (taicpu(p).oper[1]^.ref^.base=taicpu(hp1).oper[1]^.ref^.base) and
  994. (taicpu(p).oper[1]^.ref^.index=taicpu(hp1).oper[1]^.ref^.index) and
  995. (taicpu(p).oper[1]^.ref^.offset+4=taicpu(hp1).oper[1]^.ref^.offset) then
  996. begin
  997. DebugMsg(SPeepholeOptimization + 'StrStr2Strd done', p);
  998. taicpu(p).oppostfix:=PF_D;
  999. taicpu(p).loadref(2,taicpu(p).oper[1]^.ref^);
  1000. taicpu(p).loadreg(1, taicpu(hp1).oper[0]^.reg);
  1001. taicpu(p).ops:=3;
  1002. asml.remove(hp1);
  1003. hp1.free;
  1004. result:=true;
  1005. end;
  1006. end;
  1007. Result:=LookForPostindexedPattern(taicpu(p)) or Result;
  1008. end;
  1009. function TCpuAsmOptimizer.OptPass1MOV(var p: tai): Boolean;
  1010. var
  1011. hp1, hpfar1, hp2: tai;
  1012. i, i2: longint;
  1013. tempop: tasmop;
  1014. dealloc: tai_regalloc;
  1015. begin
  1016. Result := False;
  1017. hp1 := nil;
  1018. { fold
  1019. mov reg1,reg0, shift imm1
  1020. mov reg1,reg1, shift imm2
  1021. }
  1022. if (taicpu(p).ops=3) and
  1023. (taicpu(p).oper[2]^.typ = top_shifterop) and
  1024. (taicpu(p).oper[2]^.shifterop^.rs = NR_NO) and
  1025. getnextinstruction(p,hp1) and
  1026. MatchInstruction(hp1, A_MOV, [taicpu(p).condition], [PF_None]) and
  1027. (taicpu(hp1).ops=3) and
  1028. MatchOperand(taicpu(hp1).oper[0]^, taicpu(p).oper[0]^.reg) and
  1029. MatchOperand(taicpu(hp1).oper[1]^, taicpu(p).oper[0]^.reg) and
  1030. (taicpu(hp1).oper[2]^.typ = top_shifterop) and
  1031. (taicpu(hp1).oper[2]^.shifterop^.rs = NR_NO) then
  1032. begin
  1033. { fold
  1034. mov reg1,reg0, lsl 16
  1035. mov reg1,reg1, lsr 16
  1036. strh reg1, ...
  1037. dealloc reg1
  1038. to
  1039. strh reg1, ...
  1040. dealloc reg1
  1041. }
  1042. if (taicpu(p).oper[2]^.shifterop^.shiftmode=SM_LSL) and
  1043. (taicpu(p).oper[2]^.shifterop^.shiftimm=16) and
  1044. (taicpu(hp1).oper[2]^.shifterop^.shiftmode in [SM_LSR,SM_ASR]) and
  1045. (taicpu(hp1).oper[2]^.shifterop^.shiftimm=16) and
  1046. getnextinstruction(hp1,hp2) and
  1047. MatchInstruction(hp2, A_STR, [taicpu(p).condition], [PF_H]) and
  1048. MatchOperand(taicpu(hp2).oper[0]^, taicpu(p).oper[0]^.reg) then
  1049. begin
  1050. TransferUsedRegs(TmpUsedRegs);
  1051. UpdateUsedRegs(TmpUsedRegs, tai(p.next));
  1052. UpdateUsedRegs(TmpUsedRegs, tai(hp1.next));
  1053. if not(RegUsedAfterInstruction(taicpu(p).oper[0]^.reg,hp2,TmpUsedRegs)) then
  1054. begin
  1055. DebugMsg(SPeepholeOptimization + 'Removed superfluous 16 Bit zero extension', hp1);
  1056. taicpu(hp2).loadreg(0,taicpu(p).oper[1]^.reg);
  1057. asml.remove(hp1);
  1058. hp1.free;
  1059. RemoveCurrentP(p, hp2);
  1060. Result:=true;
  1061. Exit;
  1062. end;
  1063. end
  1064. { fold
  1065. mov reg1,reg0, shift imm1
  1066. mov reg1,reg1, shift imm2
  1067. to
  1068. mov reg1,reg0, shift imm1+imm2
  1069. }
  1070. else if (taicpu(p).oper[2]^.shifterop^.shiftmode=taicpu(hp1).oper[2]^.shifterop^.shiftmode) or
  1071. { asr makes no use after a lsr, the asr can be foled into the lsr }
  1072. ((taicpu(p).oper[2]^.shifterop^.shiftmode=SM_LSR) and (taicpu(hp1).oper[2]^.shifterop^.shiftmode=SM_ASR) ) then
  1073. begin
  1074. inc(taicpu(p).oper[2]^.shifterop^.shiftimm,taicpu(hp1).oper[2]^.shifterop^.shiftimm);
  1075. { avoid overflows }
  1076. if taicpu(p).oper[2]^.shifterop^.shiftimm>31 then
  1077. case taicpu(p).oper[2]^.shifterop^.shiftmode of
  1078. SM_ROR:
  1079. taicpu(p).oper[2]^.shifterop^.shiftimm:=taicpu(p).oper[2]^.shifterop^.shiftimm and 31;
  1080. SM_ASR:
  1081. taicpu(p).oper[2]^.shifterop^.shiftimm:=31;
  1082. SM_LSR,
  1083. SM_LSL:
  1084. begin
  1085. hp2:=taicpu.op_reg_const(A_MOV,taicpu(p).oper[0]^.reg,0);
  1086. InsertLLItem(p.previous, p.next, hp2);
  1087. p.free;
  1088. p:=hp2;
  1089. end;
  1090. else
  1091. internalerror(2008072803);
  1092. end;
  1093. DebugMsg(SPeepholeOptimization + 'ShiftShift2Shift 1 done', p);
  1094. asml.remove(hp1);
  1095. hp1.free;
  1096. hp1 := nil;
  1097. result := true;
  1098. end
  1099. { fold
  1100. mov reg1,reg0, shift imm1
  1101. mov reg1,reg1, shift imm2
  1102. mov reg1,reg1, shift imm3 ...
  1103. mov reg2,reg1, shift imm3 ...
  1104. }
  1105. else if GetNextInstructionUsingReg(hp1,hp2, taicpu(hp1).oper[0]^.reg) and
  1106. MatchInstruction(hp2, A_MOV, [taicpu(p).condition], [PF_None]) and
  1107. (taicpu(hp2).ops=3) and
  1108. MatchOperand(taicpu(hp2).oper[1]^, taicpu(hp1).oper[0]^.reg) and
  1109. RegEndofLife(taicpu(p).oper[0]^.reg,taicpu(hp2)) and
  1110. (taicpu(hp2).oper[2]^.typ = top_shifterop) and
  1111. (taicpu(hp2).oper[2]^.shifterop^.rs = NR_NO) then
  1112. begin
  1113. { mov reg1,reg0, lsl imm1
  1114. mov reg1,reg1, lsr/asr imm2
  1115. mov reg2,reg1, lsl imm3 ...
  1116. to
  1117. mov reg1,reg0, lsl imm1
  1118. mov reg2,reg1, lsr/asr imm2-imm3
  1119. if
  1120. imm1>=imm2
  1121. }
  1122. if (taicpu(p).oper[2]^.shifterop^.shiftmode=SM_LSL) and (taicpu(hp2).oper[2]^.shifterop^.shiftmode=SM_LSL) and
  1123. (taicpu(hp1).oper[2]^.shifterop^.shiftmode in [SM_ASR,SM_LSR]) and
  1124. (taicpu(p).oper[2]^.shifterop^.shiftimm>=taicpu(hp1).oper[2]^.shifterop^.shiftimm) then
  1125. begin
  1126. if (taicpu(hp2).oper[2]^.shifterop^.shiftimm>=taicpu(hp1).oper[2]^.shifterop^.shiftimm) then
  1127. begin
  1128. if not(RegUsedBetween(taicpu(hp2).oper[0]^.reg,p,hp1)) and
  1129. not(RegUsedBetween(taicpu(hp2).oper[0]^.reg,hp1,hp2)) then
  1130. begin
  1131. DebugMsg(SPeepholeOptimization + 'ShiftShiftShift2ShiftShift 1a done', p);
  1132. inc(taicpu(p).oper[2]^.shifterop^.shiftimm,taicpu(hp2).oper[2]^.shifterop^.shiftimm-taicpu(hp1).oper[2]^.shifterop^.shiftimm);
  1133. taicpu(p).oper[0]^.reg:=taicpu(hp2).oper[0]^.reg;
  1134. asml.remove(hp1);
  1135. asml.remove(hp2);
  1136. hp1.free;
  1137. hp2.free;
  1138. if taicpu(p).oper[2]^.shifterop^.shiftimm>=32 then
  1139. begin
  1140. taicpu(p).freeop(1);
  1141. taicpu(p).freeop(2);
  1142. taicpu(p).loadconst(1,0);
  1143. end;
  1144. result := true;
  1145. Exit;
  1146. end;
  1147. end
  1148. else if not(RegUsedBetween(taicpu(hp2).oper[0]^.reg,hp1,hp2)) then
  1149. begin
  1150. DebugMsg(SPeepholeOptimization + 'ShiftShiftShift2ShiftShift 1b done', p);
  1151. dec(taicpu(hp1).oper[2]^.shifterop^.shiftimm,taicpu(hp2).oper[2]^.shifterop^.shiftimm);
  1152. taicpu(hp1).oper[0]^.reg:=taicpu(hp2).oper[0]^.reg;
  1153. asml.remove(hp2);
  1154. hp2.free;
  1155. result := true;
  1156. Exit;
  1157. end;
  1158. end
  1159. { mov reg1,reg0, lsr/asr imm1
  1160. mov reg1,reg1, lsl imm2
  1161. mov reg1,reg1, lsr/asr imm3 ...
  1162. if imm3>=imm1 and imm2>=imm1
  1163. to
  1164. mov reg1,reg0, lsl imm2-imm1
  1165. mov reg1,reg1, lsr/asr imm3 ...
  1166. }
  1167. else if (taicpu(p).oper[2]^.shifterop^.shiftmode in [SM_ASR,SM_LSR]) and (taicpu(hp2).oper[2]^.shifterop^.shiftmode in [SM_ASR,SM_LSR]) and
  1168. (taicpu(hp1).oper[2]^.shifterop^.shiftmode=SM_LSL) and
  1169. (taicpu(hp2).oper[2]^.shifterop^.shiftimm>=taicpu(p).oper[2]^.shifterop^.shiftimm) and
  1170. (taicpu(hp1).oper[2]^.shifterop^.shiftimm>=taicpu(p).oper[2]^.shifterop^.shiftimm) then
  1171. begin
  1172. dec(taicpu(hp1).oper[2]^.shifterop^.shiftimm,taicpu(p).oper[2]^.shifterop^.shiftimm);
  1173. taicpu(hp1).oper[1]^.reg:=taicpu(p).oper[1]^.reg;
  1174. DebugMsg(SPeepholeOptimization + 'ShiftShiftShift2ShiftShift 2 done', p);
  1175. if taicpu(hp1).oper[2]^.shifterop^.shiftimm=0 then
  1176. begin
  1177. taicpu(hp2).oper[1]^.reg:=taicpu(hp1).oper[1]^.reg;
  1178. asml.remove(hp1);
  1179. hp1.free;
  1180. end;
  1181. RemoveCurrentp(p);
  1182. result := true;
  1183. Exit;
  1184. end;
  1185. end;
  1186. end;
  1187. { All the optimisations from this point on require GetNextInstructionUsingReg
  1188. to return True }
  1189. while (
  1190. GetNextInstructionUsingReg(p, hpfar1, taicpu(p).oper[0]^.reg) and
  1191. (hpfar1.typ = ait_instruction)
  1192. ) do
  1193. begin
  1194. { Change the common
  1195. mov r0, r0, lsr #xxx
  1196. and r0, r0, #yyy/bic r0, r0, #xxx
  1197. and remove the superfluous and/bic if possible
  1198. This could be extended to handle more cases.
  1199. }
  1200. { Change
  1201. mov rx, ry, lsr/ror #xxx
  1202. uxtb/uxth rz,rx/and rz,rx,0xFF
  1203. dealloc rx
  1204. to
  1205. uxtb/uxth rz,ry,ror #xxx
  1206. }
  1207. if (GenerateThumb2Code) and
  1208. (taicpu(p).ops=3) and
  1209. (taicpu(p).oper[2]^.typ = top_shifterop) and
  1210. (taicpu(p).oper[2]^.shifterop^.rs = NR_NO) and
  1211. (taicpu(p).oper[2]^.shifterop^.shiftmode in [SM_LSR,SM_ROR]) and
  1212. RegEndOfLife(taicpu(p).oper[0]^.reg, taicpu(hpfar1)) then
  1213. begin
  1214. if MatchInstruction(hpfar1, A_UXTB, [C_None], [PF_None]) and
  1215. (taicpu(hpfar1).ops = 2) and
  1216. (taicpu(p).oper[2]^.shifterop^.shiftimm in [8,16,24]) and
  1217. MatchOperand(taicpu(hpfar1).oper[1]^, taicpu(p).oper[0]^.reg) then
  1218. begin
  1219. taicpu(hpfar1).oper[1]^.reg := taicpu(p).oper[1]^.reg;
  1220. taicpu(hpfar1).loadshifterop(2,taicpu(p).oper[2]^.shifterop^);
  1221. taicpu(hpfar1).oper[2]^.shifterop^.shiftmode:=SM_ROR;
  1222. taicpu(hpfar1).ops := 3;
  1223. if not Assigned(hp1) then
  1224. GetNextInstruction(p,hp1);
  1225. RemoveCurrentP(p, hp1);
  1226. result:=true;
  1227. exit;
  1228. end
  1229. else if MatchInstruction(hpfar1, A_UXTH, [C_None], [PF_None]) and
  1230. (taicpu(hpfar1).ops=2) and
  1231. (taicpu(p).oper[2]^.shifterop^.shiftimm in [16]) and
  1232. MatchOperand(taicpu(hpfar1).oper[1]^, taicpu(p).oper[0]^.reg) then
  1233. begin
  1234. taicpu(hpfar1).oper[1]^.reg := taicpu(p).oper[1]^.reg;
  1235. taicpu(hpfar1).loadshifterop(2,taicpu(p).oper[2]^.shifterop^);
  1236. taicpu(hpfar1).oper[2]^.shifterop^.shiftmode:=SM_ROR;
  1237. taicpu(hpfar1).ops := 3;
  1238. if not Assigned(hp1) then
  1239. GetNextInstruction(p,hp1);
  1240. RemoveCurrentP(p, hp1);
  1241. result:=true;
  1242. exit;
  1243. end
  1244. else if MatchInstruction(hpfar1, A_AND, [C_None], [PF_None]) and
  1245. (taicpu(hpfar1).ops = 3) and
  1246. (taicpu(hpfar1).oper[2]^.typ = top_const) and
  1247. (taicpu(hpfar1).oper[2]^.val = $FF) and
  1248. (taicpu(p).oper[2]^.shifterop^.shiftimm in [8,16,24]) and
  1249. MatchOperand(taicpu(hpfar1).oper[1]^, taicpu(p).oper[0]^.reg) then
  1250. begin
  1251. taicpu(hpfar1).ops := 3;
  1252. taicpu(hpfar1).opcode := A_UXTB;
  1253. taicpu(hpfar1).oper[1]^.reg := taicpu(p).oper[1]^.reg;
  1254. taicpu(hpfar1).loadshifterop(2,taicpu(p).oper[2]^.shifterop^);
  1255. taicpu(hpfar1).oper[2]^.shifterop^.shiftmode:=SM_ROR;
  1256. if not Assigned(hp1) then
  1257. GetNextInstruction(p,hp1);
  1258. RemoveCurrentP(p, hp1);
  1259. result:=true;
  1260. exit;
  1261. end;
  1262. end;
  1263. { 2-operald mov optimisations }
  1264. if (taicpu(p).ops = 2) then
  1265. begin
  1266. {
  1267. This removes the mul from
  1268. mov rX,0
  1269. ...
  1270. mul ...,rX,...
  1271. }
  1272. if (taicpu(p).oper[1]^.typ = top_const) then
  1273. begin
  1274. (* if false and
  1275. (taicpu(p).oper[1]^.val=0) and
  1276. MatchInstruction(hpfar1, [A_MUL,A_MLA], [taicpu(p).condition], [taicpu(p).oppostfix]) and
  1277. (((taicpu(hpfar1).oper[1]^.typ=top_reg) and MatchOperand(taicpu(p).oper[0]^, taicpu(hpfar1).oper[1]^)) or
  1278. ((taicpu(hpfar1).oper[2]^.typ=top_reg) and MatchOperand(taicpu(p).oper[0]^, taicpu(hpfar1).oper[2]^))) then
  1279. begin
  1280. TransferUsedRegs(TmpUsedRegs);
  1281. UpdateUsedRegs(TmpUsedRegs, tai(p.next));
  1282. UpdateUsedRegs(TmpUsedRegs, tai(hpfar1.next));
  1283. DebugMsg(SPeepholeOptimization + 'MovMUL/MLA2Mov0 done', p);
  1284. if taicpu(hpfar1).opcode=A_MUL then
  1285. taicpu(hpfar1).loadconst(1,0)
  1286. else
  1287. taicpu(hpfar1).loadreg(1,taicpu(hpfar1).oper[3]^.reg);
  1288. taicpu(hpfar1).ops:=2;
  1289. taicpu(hpfar1).opcode:=A_MOV;
  1290. if not(RegUsedAfterInstruction(taicpu(p).oper[0]^.reg,hpfar1,TmpUsedRegs)) then
  1291. RemoveCurrentP(p);
  1292. Result:=true;
  1293. exit;
  1294. end
  1295. else*) if (taicpu(p).oper[1]^.val=0) and
  1296. MatchInstruction(hpfar1, A_MLA, [taicpu(p).condition], [taicpu(p).oppostfix]) and
  1297. MatchOperand(taicpu(p).oper[0]^, taicpu(hpfar1).oper[3]^) then
  1298. begin
  1299. TransferUsedRegs(TmpUsedRegs);
  1300. UpdateUsedRegs(TmpUsedRegs, tai(p.next));
  1301. UpdateUsedRegs(TmpUsedRegs, tai(hpfar1.next));
  1302. DebugMsg(SPeepholeOptimization + 'MovMLA2MUL 1 done', p);
  1303. taicpu(hpfar1).ops:=3;
  1304. taicpu(hpfar1).opcode:=A_MUL;
  1305. if not(RegUsedAfterInstruction(taicpu(p).oper[0]^.reg,hpfar1,TmpUsedRegs)) then
  1306. begin
  1307. RemoveCurrentP(p);
  1308. Result:=true;
  1309. end;
  1310. exit;
  1311. end
  1312. {
  1313. This changes the very common
  1314. mov r0, #0
  1315. str r0, [...]
  1316. mov r0, #0
  1317. str r0, [...]
  1318. and removes all superfluous mov instructions
  1319. }
  1320. else if (taicpu(hpfar1).opcode=A_STR) then
  1321. begin
  1322. hp1 := hpfar1;
  1323. while MatchInstruction(hp1, A_STR, [taicpu(p).condition], []) and
  1324. MatchOperand(taicpu(p).oper[0]^, taicpu(hpfar1).oper[0]^) and
  1325. GetNextInstruction(hp1, hp2) and
  1326. MatchInstruction(hp2, A_MOV, [taicpu(p).condition], [PF_None]) and
  1327. (taicpu(hp2).ops = 2) and
  1328. MatchOperand(taicpu(hp2).oper[0]^, taicpu(p).oper[0]^) and
  1329. MatchOperand(taicpu(hp2).oper[1]^, taicpu(p).oper[1]^) do
  1330. begin
  1331. DebugMsg(SPeepholeOptimization + 'MovStrMov done', hp2);
  1332. GetNextInstruction(hp2,hp1);
  1333. asml.remove(hp2);
  1334. hp2.free;
  1335. result:=true;
  1336. if not assigned(hp1) then break;
  1337. end;
  1338. if Result then
  1339. Exit;
  1340. { If no changes were made, now try constant merging }
  1341. if TryConstMerge(p, hpfar1) then
  1342. begin
  1343. Result := True;
  1344. Exit;
  1345. end;
  1346. end;
  1347. end;
  1348. {
  1349. This removes the first mov from
  1350. mov rX,...
  1351. mov rX,...
  1352. }
  1353. if taicpu(hpfar1).opcode=A_MOV then
  1354. begin
  1355. hp1 := p;
  1356. while MatchInstruction(hpfar1, A_MOV, [taicpu(hp1).condition], [taicpu(hp1).oppostfix]) and
  1357. (taicpu(hpfar1).ops = 2) and
  1358. MatchOperand(taicpu(hp1).oper[0]^, taicpu(hpfar1).oper[0]^) and
  1359. { don't remove the first mov if the second is a mov rX,rX }
  1360. not(MatchOperand(taicpu(hpfar1).oper[0]^, taicpu(hpfar1).oper[1]^)) do
  1361. begin
  1362. { Defer removing the first p until after the while loop }
  1363. if p <> hp1 then
  1364. begin
  1365. DebugMsg(SPeepholeOptimization + 'MovMov done', hp1);
  1366. asml.remove(hp1);
  1367. hp1.free;
  1368. end;
  1369. hp1:=hpfar1;
  1370. GetNextInstruction(hpfar1,hpfar1);
  1371. result:=true;
  1372. if not assigned(hpfar1) then
  1373. Break;
  1374. end;
  1375. if Result then
  1376. begin
  1377. DebugMsg(SPeepholeOptimization + 'MovMov done', p);
  1378. RemoveCurrentp(p);
  1379. Exit;
  1380. end;
  1381. end;
  1382. if RedundantMovProcess(p,hpfar1) then
  1383. begin
  1384. Result:=true;
  1385. { p might not point at a mov anymore }
  1386. exit;
  1387. end;
  1388. { If hpfar1 is nil after the call to RedundantMovProcess, it is
  1389. because it would have become a dangling pointer, so reinitialise it. }
  1390. if not Assigned(hpfar1) then
  1391. Continue;
  1392. { Fold the very common sequence
  1393. mov regA, regB
  1394. ldr* regA, [regA]
  1395. to
  1396. ldr* regA, [regB]
  1397. CAUTION! If this one is successful p might not be a mov instruction anymore!
  1398. }
  1399. if
  1400. // Make sure that Thumb code doesn't propagate a high register into a reference
  1401. (
  1402. (
  1403. GenerateThumbCode and
  1404. (getsupreg(taicpu(p).oper[1]^.reg) < RS_R8)
  1405. ) or (not GenerateThumbCode)
  1406. ) and
  1407. (taicpu(p).oper[1]^.typ = top_reg) and
  1408. (taicpu(p).oppostfix = PF_NONE) and
  1409. MatchInstruction(hpfar1, [A_LDR, A_STR], [taicpu(p).condition], []) and
  1410. (taicpu(hpfar1).oper[1]^.typ = top_ref) and
  1411. { We can change the base register only when the instruction uses AM_OFFSET }
  1412. ((taicpu(hpfar1).oper[1]^.ref^.index = taicpu(p).oper[0]^.reg) or
  1413. ((taicpu(hpfar1).oper[1]^.ref^.addressmode = AM_OFFSET) and
  1414. (taicpu(hpfar1).oper[1]^.ref^.base = taicpu(p).oper[0]^.reg))
  1415. ) and
  1416. not(RegModifiedBetween(taicpu(p).oper[1]^.reg,p,hpfar1)) and
  1417. RegEndOfLife(taicpu(p).oper[0]^.reg, taicpu(hpfar1)) then
  1418. begin
  1419. DebugMsg(SPeepholeOptimization + 'MovLdr2Ldr done', hpfar1);
  1420. if (taicpu(hpfar1).oper[1]^.ref^.addressmode = AM_OFFSET) and
  1421. (taicpu(hpfar1).oper[1]^.ref^.base = taicpu(p).oper[0]^.reg) then
  1422. taicpu(hpfar1).oper[1]^.ref^.base := taicpu(p).oper[1]^.reg;
  1423. if taicpu(hpfar1).oper[1]^.ref^.index = taicpu(p).oper[0]^.reg then
  1424. taicpu(hpfar1).oper[1]^.ref^.index := taicpu(p).oper[1]^.reg;
  1425. dealloc:=FindRegDeAlloc(taicpu(p).oper[1]^.reg, tai(p.Next));
  1426. if Assigned(dealloc) then
  1427. begin
  1428. asml.remove(dealloc);
  1429. asml.InsertAfter(dealloc,hpfar1);
  1430. end;
  1431. if (not Assigned(hp1)) or (p=hp1) then
  1432. GetNextInstruction(p, hp1);
  1433. RemoveCurrentP(p, hp1);
  1434. result:=true;
  1435. Exit;
  1436. end
  1437. end
  1438. { 3-operald mov optimisations }
  1439. else if (taicpu(p).ops = 3) then
  1440. begin
  1441. if (taicpu(p).oper[2]^.typ = top_shifterop) and
  1442. (taicpu(p).oper[2]^.shifterop^.rs = NR_NO) and
  1443. (taicpu(p).oper[2]^.shifterop^.shiftmode = SM_LSR) and
  1444. (taicpu(hpfar1).ops>=1) and
  1445. (taicpu(hpfar1).oper[0]^.typ=top_reg) and
  1446. (not RegModifiedBetween(taicpu(hpfar1).oper[0]^.reg, p, hpfar1)) and
  1447. RegEndOfLife(taicpu(p).oper[0]^.reg, taicpu(hpfar1)) then
  1448. begin
  1449. if (taicpu(p).oper[2]^.shifterop^.shiftimm >= 24 ) and
  1450. MatchInstruction(hpfar1, A_AND, [taicpu(p).condition], [taicpu(p).oppostfix]) and
  1451. (taicpu(hpfar1).ops=3) and
  1452. MatchOperand(taicpu(p).oper[0]^, taicpu(hpfar1).oper[1]^) and
  1453. (taicpu(hpfar1).oper[2]^.typ = top_const) and
  1454. { Check if the AND actually would only mask out bits being already zero because of the shift
  1455. }
  1456. ((($ffffffff shr taicpu(p).oper[2]^.shifterop^.shiftimm) and taicpu(hpfar1).oper[2]^.val) =
  1457. ($ffffffff shr taicpu(p).oper[2]^.shifterop^.shiftimm)) then
  1458. begin
  1459. DebugMsg(SPeepholeOptimization + 'LsrAnd2Lsr done', hpfar1);
  1460. taicpu(p).oper[0]^.reg:=taicpu(hpfar1).oper[0]^.reg;
  1461. asml.remove(hpfar1);
  1462. hpfar1.free;
  1463. result:=true;
  1464. Exit;
  1465. end
  1466. else if MatchInstruction(hpfar1, A_BIC, [taicpu(p).condition], [taicpu(p).oppostfix]) and
  1467. (taicpu(hpfar1).ops=3) and
  1468. MatchOperand(taicpu(p).oper[0]^, taicpu(hpfar1).oper[1]^) and
  1469. (taicpu(hpfar1).oper[2]^.typ = top_const) and
  1470. { Check if the BIC actually would only mask out bits beeing already zero because of the shift }
  1471. (taicpu(hpfar1).oper[2]^.val<>0) and
  1472. (BsfDWord(taicpu(hpfar1).oper[2]^.val)>=32-taicpu(p).oper[2]^.shifterop^.shiftimm) then
  1473. begin
  1474. DebugMsg(SPeepholeOptimization + 'LsrBic2Lsr done', hpfar1);
  1475. taicpu(p).oper[0]^.reg:=taicpu(hpfar1).oper[0]^.reg;
  1476. asml.remove(hpfar1);
  1477. hpfar1.free;
  1478. result:=true;
  1479. Exit;
  1480. end;
  1481. end;
  1482. { This folds shifterops into following instructions
  1483. mov r0, r1, lsl #8
  1484. add r2, r3, r0
  1485. to
  1486. add r2, r3, r1, lsl #8
  1487. CAUTION! If this one is successful p might not be a mov instruction anymore!
  1488. }
  1489. if (taicpu(p).oper[1]^.typ = top_reg) and
  1490. (taicpu(p).oper[2]^.typ = top_shifterop) and
  1491. (taicpu(p).oppostfix = PF_NONE) and
  1492. MatchInstruction(hpfar1, [A_ADD, A_ADC, A_RSB, A_RSC, A_SUB, A_SBC,
  1493. A_AND, A_BIC, A_EOR, A_ORR, A_TEQ, A_TST,
  1494. A_CMP, A_CMN],
  1495. [taicpu(p).condition], [PF_None]) and
  1496. (not ((GenerateThumb2Code) and
  1497. (taicpu(hpfar1).opcode in [A_SBC]) and
  1498. (((taicpu(hpfar1).ops=3) and
  1499. MatchOperand(taicpu(p).oper[0]^, taicpu(hpfar1).oper[1]^.reg)) or
  1500. ((taicpu(hpfar1).ops=2) and
  1501. MatchOperand(taicpu(p).oper[0]^, taicpu(hpfar1).oper[0]^.reg))))) and
  1502. RegEndOfLife(taicpu(p).oper[0]^.reg, taicpu(hpfar1)) and
  1503. (taicpu(hpfar1).ops >= 2) and
  1504. {Currently we can't fold into another shifterop}
  1505. (taicpu(hpfar1).oper[taicpu(hpfar1).ops-1]^.typ = top_reg) and
  1506. {Folding rrx is problematic because of the C-Flag, as we currently can't check
  1507. NR_DEFAULTFLAGS for modification}
  1508. (
  1509. {Everything is fine if we don't use RRX}
  1510. (taicpu(p).oper[2]^.shifterop^.shiftmode <> SM_RRX) or
  1511. (
  1512. {If it is RRX, then check if we're just accessing the next instruction}
  1513. Assigned(hp1) and
  1514. (hpfar1 = hp1)
  1515. )
  1516. ) and
  1517. { reg1 might not be modified inbetween }
  1518. not(RegModifiedBetween(taicpu(p).oper[1]^.reg,p,hpfar1)) and
  1519. { The shifterop can contain a register, might not be modified}
  1520. (
  1521. (taicpu(p).oper[2]^.shifterop^.rs = NR_NO) or
  1522. not(RegModifiedBetween(taicpu(p).oper[2]^.shifterop^.rs, p, hpfar1))
  1523. ) and
  1524. (
  1525. {Only ONE of the two src operands is allowed to match}
  1526. MatchOperand(taicpu(p).oper[0]^, taicpu(hpfar1).oper[taicpu(hpfar1).ops-2]^) xor
  1527. MatchOperand(taicpu(p).oper[0]^, taicpu(hpfar1).oper[taicpu(hpfar1).ops-1]^)
  1528. ) then
  1529. begin
  1530. if taicpu(hpfar1).opcode in [A_TST, A_TEQ, A_CMN] then
  1531. I2:=0
  1532. else
  1533. I2:=1;
  1534. for I:=I2 to taicpu(hpfar1).ops-1 do
  1535. if MatchOperand(taicpu(p).oper[0]^, taicpu(hpfar1).oper[I]^.reg) then
  1536. begin
  1537. { If the parameter matched on the second op from the RIGHT
  1538. we have to switch the parameters, this will not happen for CMP
  1539. were we're only evaluating the most right parameter
  1540. }
  1541. if I <> taicpu(hpfar1).ops-1 then
  1542. begin
  1543. {The SUB operators need to be changed when we swap parameters}
  1544. case taicpu(hpfar1).opcode of
  1545. A_SUB: tempop:=A_RSB;
  1546. A_SBC: tempop:=A_RSC;
  1547. A_RSB: tempop:=A_SUB;
  1548. A_RSC: tempop:=A_SBC;
  1549. else tempop:=taicpu(hpfar1).opcode;
  1550. end;
  1551. if taicpu(hpfar1).ops = 3 then
  1552. hp2:=taicpu.op_reg_reg_reg_shifterop(tempop,
  1553. taicpu(hpfar1).oper[0]^.reg, taicpu(hpfar1).oper[2]^.reg,
  1554. taicpu(p).oper[1]^.reg, taicpu(p).oper[2]^.shifterop^)
  1555. else
  1556. hp2:=taicpu.op_reg_reg_shifterop(tempop,
  1557. taicpu(hpfar1).oper[0]^.reg, taicpu(p).oper[1]^.reg,
  1558. taicpu(p).oper[2]^.shifterop^);
  1559. end
  1560. else
  1561. if taicpu(hpfar1).ops = 3 then
  1562. hp2:=taicpu.op_reg_reg_reg_shifterop(taicpu(hpfar1).opcode,
  1563. taicpu(hpfar1).oper[0]^.reg, taicpu(hpfar1).oper[1]^.reg,
  1564. taicpu(p).oper[1]^.reg, taicpu(p).oper[2]^.shifterop^)
  1565. else
  1566. hp2:=taicpu.op_reg_reg_shifterop(taicpu(hpfar1).opcode,
  1567. taicpu(hpfar1).oper[0]^.reg, taicpu(p).oper[1]^.reg,
  1568. taicpu(p).oper[2]^.shifterop^);
  1569. if taicpu(p).oper[2]^.shifterop^.rs<>NR_NO then
  1570. AllocRegBetween(taicpu(p).oper[2]^.shifterop^.rs,p,hpfar1,UsedRegs);
  1571. AllocRegBetween(taicpu(p).oper[1]^.reg,p,hpfar1,UsedRegs);
  1572. asml.insertbefore(hp2, hpfar1);
  1573. asml.remove(hpfar1);
  1574. hpfar1.free;
  1575. DebugMsg(SPeepholeOptimization + 'FoldShiftProcess done', hp2);
  1576. if not Assigned(hp1) then
  1577. GetNextInstruction(p, hp1)
  1578. else if hp1 = hpfar1 then
  1579. { If hp1 = hpfar1, then it's a dangling pointer }
  1580. hp1 := hp2;
  1581. RemoveCurrentP(p, hp1);
  1582. Result:=true;
  1583. Exit;
  1584. end;
  1585. end;
  1586. {
  1587. Fold
  1588. mov r1, r1, lsl #2
  1589. ldr/ldrb r0, [r0, r1]
  1590. to
  1591. ldr/ldrb r0, [r0, r1, lsl #2]
  1592. XXX: This still needs some work, as we quite often encounter something like
  1593. mov r1, r2, lsl #2
  1594. add r2, r3, #imm
  1595. ldr r0, [r2, r1]
  1596. which can't be folded because r2 is overwritten between the shift and the ldr.
  1597. We could try to shuffle the registers around and fold it into.
  1598. add r1, r3, #imm
  1599. ldr r0, [r1, r2, lsl #2]
  1600. }
  1601. if (not(GenerateThumbCode)) and
  1602. { thumb2 allows only lsl #0..#3 }
  1603. (not(GenerateThumb2Code) or
  1604. ((taicpu(p).oper[2]^.shifterop^.shiftimm in [0..3]) and
  1605. (taicpu(p).oper[2]^.shifterop^.shiftmode=SM_LSL)
  1606. )
  1607. ) and
  1608. (taicpu(p).oper[1]^.typ = top_reg) and
  1609. (taicpu(p).oper[2]^.typ = top_shifterop) and
  1610. { RRX is tough to handle, because it requires tracking the C-Flag,
  1611. it is also extremly unlikely to be emitted this way}
  1612. (taicpu(p).oper[2]^.shifterop^.shiftmode <> SM_RRX) and
  1613. (taicpu(p).oper[2]^.shifterop^.shiftimm <> 0) and
  1614. (taicpu(p).oppostfix = PF_NONE) and
  1615. {Only LDR, LDRB, STR, STRB can handle scaled register indexing}
  1616. (MatchInstruction(hpfar1, [A_LDR, A_STR], [taicpu(p).condition], [PF_None, PF_B]) or
  1617. (GenerateThumb2Code and
  1618. MatchInstruction(hpfar1, [A_LDR, A_STR], [taicpu(p).condition], [PF_None, PF_B, PF_SB, PF_H, PF_SH]))
  1619. ) and
  1620. (
  1621. {If this is address by offset, one of the two registers can be used}
  1622. ((taicpu(hpfar1).oper[1]^.ref^.addressmode=AM_OFFSET) and
  1623. (
  1624. (taicpu(hpfar1).oper[1]^.ref^.index = taicpu(p).oper[0]^.reg) xor
  1625. (taicpu(hpfar1).oper[1]^.ref^.base = taicpu(p).oper[0]^.reg)
  1626. )
  1627. ) or
  1628. {For post and preindexed only the index register can be used}
  1629. ((taicpu(hpfar1).oper[1]^.ref^.addressmode in [AM_POSTINDEXED, AM_PREINDEXED]) and
  1630. (
  1631. (taicpu(hpfar1).oper[1]^.ref^.index = taicpu(p).oper[0]^.reg) and
  1632. (taicpu(hpfar1).oper[1]^.ref^.base <> taicpu(p).oper[0]^.reg)
  1633. ) and
  1634. (not GenerateThumb2Code)
  1635. )
  1636. ) and
  1637. { Only fold if both registers are used. Otherwise we are folding p with itself }
  1638. (taicpu(hpfar1).oper[1]^.ref^.index<>NR_NO) and
  1639. (taicpu(hpfar1).oper[1]^.ref^.base<>NR_NO) and
  1640. { Only fold if there isn't another shifterop already, and offset is zero. }
  1641. (taicpu(hpfar1).oper[1]^.ref^.offset = 0) and
  1642. (taicpu(hpfar1).oper[1]^.ref^.shiftmode = SM_None) and
  1643. not(RegModifiedBetween(taicpu(p).oper[1]^.reg,p,hpfar1)) and
  1644. RegEndOfLife(taicpu(p).oper[0]^.reg, taicpu(hpfar1)) then
  1645. begin
  1646. { If the register we want to do the shift for resides in base, we need to swap that}
  1647. if (taicpu(hpfar1).oper[1]^.ref^.base = taicpu(p).oper[0]^.reg) then
  1648. taicpu(hpfar1).oper[1]^.ref^.base := taicpu(hpfar1).oper[1]^.ref^.index;
  1649. taicpu(hpfar1).oper[1]^.ref^.index := taicpu(p).oper[1]^.reg;
  1650. taicpu(hpfar1).oper[1]^.ref^.shiftmode := taicpu(p).oper[2]^.shifterop^.shiftmode;
  1651. taicpu(hpfar1).oper[1]^.ref^.shiftimm := taicpu(p).oper[2]^.shifterop^.shiftimm;
  1652. DebugMsg(SPeepholeOptimization + 'FoldShiftLdrStr done', hpfar1);
  1653. RemoveCurrentP(p);
  1654. Result:=true;
  1655. Exit;
  1656. end;
  1657. end;
  1658. {
  1659. Often we see shifts and then a superfluous mov to another register
  1660. In the future this might be handled in RedundantMovProcess when it uses RegisterTracking
  1661. }
  1662. if RemoveSuperfluousMove(p, hpfar1, 'MovMov2Mov') then
  1663. Result:=true;
  1664. Exit;
  1665. end;
  1666. end;
  1667. function TCpuAsmOptimizer.OptPass1MOVW(var p: tai): Boolean;
  1668. var
  1669. ThisReg: TRegister;
  1670. a: aint;
  1671. imm_shift: byte;
  1672. hp1, hp2: tai;
  1673. begin
  1674. Result := False;
  1675. ThisReg := taicpu(p).oper[0]^.reg;
  1676. if GetNextInstruction(p, hp1) then
  1677. begin
  1678. { Can the MOVW/MOVT pair be represented by a single MOV instruction? }
  1679. if MatchInstruction(hp1, A_MOVT, [taicpu(p).condition], []) and
  1680. (taicpu(hp1).oper[0]^.reg = ThisReg) then
  1681. begin
  1682. a := (aint(taicpu(p).oper[1]^.val) and $FFFF) or aint(taicpu(hp1).oper[1]^.val shl 16);
  1683. if is_shifter_const(a,imm_shift) then
  1684. begin
  1685. DebugMsg(SPeepholeOptimization + 'MOVW/MOVT pair can encode value as a single MOV instruction (MovwMovT2Mov)', p);
  1686. taicpu(p).opcode := A_MOV;
  1687. taicpu(p).oper[1]^.val := a;
  1688. RemoveInstruction(hp1);
  1689. Result := True;
  1690. Exit;
  1691. end
  1692. else if is_shifter_const(not(a),imm_shift) then
  1693. begin
  1694. DebugMsg(SPeepholeOptimization + 'MOVW/MOVT pair can encode value as a single MVN instruction (MovwMovT2Mvn)', p);
  1695. taicpu(p).opcode := A_MVN;
  1696. taicpu(p).oper[1]^.val := not(a);
  1697. RemoveInstruction(hp1);
  1698. Result := True;
  1699. Exit;
  1700. end;
  1701. end;
  1702. if (
  1703. (
  1704. MatchInstruction(hp1, A_STR, [taicpu(p).condition], [PF_H]) and
  1705. (taicpu(hp1).oper[0]^.reg = ThisReg)
  1706. )
  1707. ) and
  1708. TryConstMerge(p, hp1) then
  1709. begin
  1710. Result := True;
  1711. Exit;
  1712. end;
  1713. end;
  1714. end;
  1715. function TCpuAsmOptimizer.OptPass1MVN(var p: tai): Boolean;
  1716. var
  1717. hp1: tai;
  1718. begin
  1719. {
  1720. change
  1721. mvn reg2,reg1
  1722. and reg3,reg4,reg2
  1723. dealloc reg2
  1724. to
  1725. bic reg3,reg4,reg1
  1726. }
  1727. Result := False;
  1728. if (taicpu(p).oper[1]^.typ = top_reg) and
  1729. GetNextInstructionUsingReg(p,hp1,taicpu(p).oper[0]^.reg) and
  1730. MatchInstruction(hp1,A_AND,[],[]) and
  1731. (((taicpu(hp1).ops=3) and
  1732. (taicpu(hp1).oper[2]^.typ=top_reg) and
  1733. (MatchOperand(taicpu(hp1).oper[2]^, taicpu(p).oper[0]^.reg) or
  1734. MatchOperand(taicpu(hp1).oper[1]^, taicpu(p).oper[0]^.reg))) or
  1735. ((taicpu(hp1).ops=2) and
  1736. (taicpu(hp1).oper[1]^.typ=top_reg) and
  1737. MatchOperand(taicpu(hp1).oper[1]^, taicpu(p).oper[0]^.reg))) and
  1738. assigned(FindRegDealloc(taicpu(p).oper[0]^.reg,tai(hp1.Next))) and
  1739. { reg1 might not be modified inbetween }
  1740. not(RegModifiedBetween(taicpu(p).oper[1]^.reg,p,hp1)) then
  1741. begin
  1742. DebugMsg(SPeepholeOptimization + 'MvnAnd2Bic done', p);
  1743. taicpu(hp1).opcode:=A_BIC;
  1744. if taicpu(hp1).ops=3 then
  1745. begin
  1746. if MatchOperand(taicpu(hp1).oper[1]^, taicpu(p).oper[0]^.reg) then
  1747. taicpu(hp1).loadReg(1,taicpu(hp1).oper[2]^.reg); // Swap operands
  1748. taicpu(hp1).loadReg(2,taicpu(p).oper[1]^.reg);
  1749. end
  1750. else
  1751. taicpu(hp1).loadReg(1,taicpu(p).oper[1]^.reg);
  1752. RemoveCurrentp(p);
  1753. Result := True;
  1754. Exit;
  1755. end;
  1756. end;
  1757. function TCpuAsmOptimizer.OptPass1VMov(var p: tai): Boolean;
  1758. var
  1759. hp1: tai;
  1760. begin
  1761. {
  1762. change
  1763. vmov reg0,reg1,reg2
  1764. vmov reg1,reg2,reg0
  1765. into
  1766. vmov reg0,reg1,reg2
  1767. can be applied regardless if reg0 or reg2 is the vfp register
  1768. }
  1769. Result := False;
  1770. if (taicpu(p).ops = 3) then
  1771. while GetNextInstruction(p, hp1) and
  1772. MatchInstruction(hp1, A_VMOV, [taicpu(p).condition], [taicpu(p).oppostfix]) and
  1773. (taicpu(hp1).ops = 3) and
  1774. MatchOperand(taicpu(p).oper[0]^, taicpu(hp1).oper[2]^) and
  1775. MatchOperand(taicpu(p).oper[1]^, taicpu(hp1).oper[0]^) and
  1776. MatchOperand(taicpu(p).oper[2]^, taicpu(hp1).oper[1]^) do
  1777. begin
  1778. asml.Remove(hp1);
  1779. hp1.free;
  1780. DebugMsg(SPeepholeOptimization + 'VMovVMov2VMov done', p);
  1781. { Can we do it again? }
  1782. end;
  1783. end;
  1784. function TCpuAsmOptimizer.OptPass1VOp(var p: tai): Boolean;
  1785. var
  1786. hp1: tai;
  1787. begin
  1788. Result := GetNextInstructionUsingReg(p, hp1, taicpu(p).oper[0]^.reg) and
  1789. RemoveSuperfluousVMov(p, hp1, 'VOpVMov2VOp');
  1790. end;
  1791. function TCpuAsmOptimizer.OptPass1Push(var p: tai): Boolean;
  1792. var
  1793. hp1: tai;
  1794. begin
  1795. Result:=false;
  1796. if (taicpu(p).oper[0]^.regset^=[RS_R14]) and
  1797. GetNextInstruction(p,hp1) and
  1798. MatchInstruction(hp1,A_POP,[C_None],[PF_None]) and
  1799. (taicpu(hp1).oper[0]^.regset^=[RS_R15]) then
  1800. begin
  1801. if not(CPUARM_HAS_BX in cpu_capabilities[current_settings.cputype]) then
  1802. begin
  1803. DebugMsg('Peephole Optimization: PushPop2Mov done', p);
  1804. taicpu(p).ops:=2;
  1805. taicpu(p).loadreg(1, NR_R14);
  1806. taicpu(p).loadreg(0, NR_R15);
  1807. taicpu(p).opcode:=A_MOV;
  1808. end
  1809. else
  1810. begin
  1811. DebugMsg('Peephole Optimization: PushPop2Bx done', p);
  1812. taicpu(p).loadreg(0, NR_R14);
  1813. taicpu(p).opcode:=A_BX;
  1814. end;
  1815. RemoveInstruction(hp1);
  1816. Result:=true;
  1817. Exit;
  1818. end;
  1819. end;
  1820. function TCpuAsmOptimizer.OptPass2Bcc(var p: tai): Boolean;
  1821. var
  1822. hp1,hp2,hp3,after_p: tai;
  1823. l : longint;
  1824. WasLast: boolean;
  1825. Label_X, Label_Y: TASmLabel;
  1826. procedure ConvertInstructins(this_hp: tai; newcond: tasmcond);
  1827. var
  1828. next_hp: tai;
  1829. begin
  1830. repeat
  1831. if this_hp.typ=ait_instruction then
  1832. taicpu(this_hp).condition := newcond;
  1833. GetNextInstruction(this_hp, next_hp);
  1834. if MustBeLast(this_hp) then
  1835. Break;
  1836. this_hp := next_hp
  1837. until not(assigned(this_hp)) or
  1838. not(CanBeCond(this_hp)) or
  1839. ((hp1.typ = ait_instruction) and (taicpu(hp1).opcode = A_B)) or
  1840. (this_hp.typ = ait_label);
  1841. end;
  1842. begin
  1843. Result := False;
  1844. if (taicpu(p).condition<>C_None) and
  1845. not(GenerateThumbCode) then
  1846. begin
  1847. { check for
  1848. Bxx xxx
  1849. <several instructions>
  1850. xxx:
  1851. }
  1852. Label_X := TAsmLabel(taicpu(p).oper[0]^.ref^.symbol);
  1853. l:=0;
  1854. WasLast:=False;
  1855. GetNextInstruction(p, hp1);
  1856. after_p := hp1;
  1857. while assigned(hp1) and
  1858. (l<=4) and
  1859. CanBeCond(hp1) and
  1860. { stop on labels }
  1861. not(hp1.typ=ait_label) and
  1862. { avoid that we cannot recognize the case BccB2Cond }
  1863. not((hp1.typ=ait_instruction) and (taicpu(hp1).opcode=A_B)) do
  1864. begin
  1865. inc(l);
  1866. if MustBeLast(hp1) then
  1867. begin
  1868. WasLast:=True;
  1869. GetNextInstruction(hp1,hp1);
  1870. break;
  1871. end
  1872. else
  1873. GetNextInstruction(hp1,hp1);
  1874. end;
  1875. if assigned(hp1) then
  1876. begin
  1877. if FindLabel(Label_X, hp1) then
  1878. begin
  1879. if (l<=4) and (l>0) then
  1880. begin
  1881. ConvertInstructins(after_p, inverse_cond(taicpu(p).condition));
  1882. DebugMsg(SPeepholeOptimization + 'Bcc2Cond done', p);
  1883. { wait with removing else GetNextInstruction could
  1884. ignore the label if it was the only usage in the
  1885. jump moved away }
  1886. Label_X.decrefs;
  1887. RemoveCurrentP(p, after_p);
  1888. Result := True;
  1889. Exit;
  1890. end;
  1891. end
  1892. else
  1893. { do not perform further optimizations if there is an instruction
  1894. in block #1 which cannot be optimized.
  1895. }
  1896. if not WasLast then
  1897. begin
  1898. { check further for
  1899. Bcc xxx
  1900. <several instructions 1>
  1901. B yyy
  1902. xxx:
  1903. <several instructions 2>
  1904. yyy:
  1905. }
  1906. { hp2 points to jmp yyy }
  1907. hp2:=hp1;
  1908. { skip hp2 to xxx }
  1909. if assigned(hp2) and
  1910. (l<=3) and
  1911. (hp2.typ=ait_instruction) and
  1912. (taicpu(hp2).is_jmp) and
  1913. (taicpu(hp2).condition=C_None) and
  1914. GetNextInstruction(hp2, hp1) and
  1915. { real label and jump, no further references to the
  1916. label are allowed }
  1917. (Label_X.getrefs = 1) and
  1918. FindLabel(Label_X, hp1) then
  1919. begin
  1920. Label_Y := TAsmLabel(taicpu(hp2).oper[0]^.ref^.symbol);
  1921. l:=0;
  1922. { skip hp1 and hp3 to <several moves 2> }
  1923. GetNextInstruction(hp1, hp1);
  1924. hp3 := hp1;
  1925. while assigned(hp1) and
  1926. CanBeCond(hp1) and
  1927. (l<=3) do
  1928. begin
  1929. inc(l);
  1930. if MustBeLast(hp1) then
  1931. begin
  1932. GetNextInstruction(hp1, hp1);
  1933. break;
  1934. end
  1935. else
  1936. GetNextInstruction(hp1, hp1);
  1937. end;
  1938. { hp1 points to yyy: }
  1939. if assigned(hp1) and
  1940. FindLabel(Label_Y, hp1) then
  1941. begin
  1942. ConvertInstructins(after_p, inverse_cond(taicpu(p).condition));
  1943. ConvertInstructins(hp3, taicpu(p).condition);
  1944. DebugMsg(SPeepholeOptimization + 'BccB2Cond done', after_p);
  1945. { remove B }
  1946. Label_Y.decrefs;
  1947. RemoveInstruction(hp2);
  1948. { remove Bcc }
  1949. Label_X.decrefs;
  1950. RemoveCurrentP(p, after_p);
  1951. Result := True;
  1952. Exit;
  1953. end;
  1954. end;
  1955. end;
  1956. end;
  1957. end;
  1958. end;
  1959. function TCpuAsmOptimizer.OptPass2CMP(var p: tai): Boolean;
  1960. var
  1961. hp1, hp_last: tai;
  1962. begin
  1963. Result := False;
  1964. if not GetNextInstructionUsingReg(p, hp1, NR_DEFAULTFLAGS) then
  1965. Exit;
  1966. if (hp1.typ = ait_label) or
  1967. (
  1968. (hp1.typ = ait_instruction) and
  1969. (taicpu(hp1).condition = C_None) and
  1970. (
  1971. RegModifiedByInstruction(NR_DEFAULTFLAGS, hp1) or
  1972. is_calljmp(taicpu(hp1).opcode)
  1973. )
  1974. ) then
  1975. begin
  1976. { The comparison is a null operation }
  1977. DebugMsg(SPeepholeOptimization + 'CMP -> nop', p);
  1978. RemoveCurrentP(p);
  1979. Result := True;
  1980. Exit;
  1981. end;
  1982. {
  1983. change
  1984. <op> reg,x,y
  1985. cmp reg,#0
  1986. into
  1987. <op>s reg,x,y
  1988. }
  1989. if (taicpu(p).oppostfix = PF_None) and
  1990. (taicpu(p).oper[1]^.val = 0) and
  1991. { be careful here, following instructions could use other flags
  1992. however after a jump fpc never depends on the value of flags }
  1993. { All above instructions set Z and N according to the following
  1994. Z := result = 0;
  1995. N := result[31];
  1996. EQ = Z=1; NE = Z=0;
  1997. MI = N=1; PL = N=0; }
  1998. (MatchInstruction(hp1, [A_B, A_CMP, A_CMN, A_TST, A_TEQ], [C_EQ,C_NE,C_MI,C_PL], []) or
  1999. { mov is also possible, but only if there is no shifter operand, it could be an rxx,
  2000. we are too lazy to check if it is rxx or something else }
  2001. (MatchInstruction(hp1, A_MOV, [C_EQ,C_NE,C_MI,C_PL], []) and (taicpu(hp1).ops=2))) and
  2002. GetLastInstruction(p, hp_last) and
  2003. MatchInstruction(hp_last, [A_ADC,A_ADD,A_BIC,A_SUB,A_MUL,A_MVN,A_MOV,A_ORR,
  2004. A_EOR,A_AND,A_RSB,A_RSC,A_SBC,A_MLA], [C_None], [PF_None]) and
  2005. (
  2006. { mlas is only allowed in arm mode }
  2007. (taicpu(hp_last).opcode<>A_MLA) or
  2008. (current_settings.instructionset<>is_thumb)
  2009. ) and
  2010. (taicpu(hp_last).oper[0]^.reg = taicpu(p).oper[0]^.reg) and
  2011. assigned(FindRegDealloc(NR_DEFAULTFLAGS,tai(hp1.Next))) then
  2012. begin
  2013. DebugMsg(SPeepholeOptimization + 'OpCmp2OpS done', hp_last);
  2014. taicpu(hp_last).oppostfix:=PF_S;
  2015. { move flag allocation if possible }
  2016. hp1:=FindRegAlloc(NR_DEFAULTFLAGS,tai(hp_last.Next));
  2017. if assigned(hp1) then
  2018. begin
  2019. asml.Remove(hp1);
  2020. asml.insertbefore(hp1, hp_last);
  2021. end;
  2022. RemoveCurrentP(p);
  2023. Result:=true;
  2024. end;
  2025. end;
  2026. function TCpuAsmOptimizer.OptPass2STR(var p: tai): Boolean;
  2027. var
  2028. hp1: tai;
  2029. Postfix: TOpPostfix;
  2030. OpcodeStr: shortstring;
  2031. begin
  2032. Result := False;
  2033. { Try to merge two STRs into an STM instruction }
  2034. if not(GenerateThumbCode) and (taicpu(p).oper[1]^.typ = top_ref) and
  2035. (taicpu(p).oper[1]^.ref^.addressmode = AM_OFFSET) and
  2036. (
  2037. (taicpu(p).oper[1]^.ref^.base = NR_NO) or
  2038. (taicpu(p).oper[1]^.ref^.index = NR_NO)
  2039. ) and
  2040. (taicpu(p).oppostfix = PF_None) and
  2041. (getregtype(taicpu(p).oper[0]^.reg) = R_INTREGISTER) then
  2042. begin
  2043. hp1 := p;
  2044. while GetNextInstruction(hp1, hp1) and (hp1.typ = ait_instruction) and
  2045. (taicpu(hp1).opcode = A_STR) do
  2046. if (taicpu(hp1).condition = taicpu(p).condition) and
  2047. (taicpu(hp1).oppostfix = PF_None) and
  2048. (getregtype(taicpu(hp1).oper[0]^.reg) = R_INTREGISTER) and
  2049. (taicpu(hp1).oper[1]^.ref^.addressmode = AM_OFFSET) and
  2050. (taicpu(hp1).oper[1]^.ref^.base = taicpu(p).oper[1]^.ref^.base) and
  2051. (taicpu(hp1).oper[1]^.ref^.index = taicpu(p).oper[1]^.ref^.index) and
  2052. (
  2053. (
  2054. (taicpu(p).oper[1]^.ref^.offset = 0) and
  2055. (getsupreg(taicpu(hp1).oper[0]^.reg) > getsupreg(taicpu(p).oper[0]^.reg)) and
  2056. (abs(taicpu(hp1).oper[1]^.ref^.offset) = 4)
  2057. ) or (
  2058. (taicpu(hp1).oper[1]^.ref^.offset = 0) and
  2059. (getsupreg(taicpu(hp1).oper[0]^.reg) < getsupreg(taicpu(p).oper[0]^.reg)) and
  2060. (abs(taicpu(p).oper[1]^.ref^.offset) = 4)
  2061. )
  2062. ) then
  2063. begin
  2064. if (getsupreg(taicpu(hp1).oper[0]^.reg) < getsupreg(taicpu(p).oper[0]^.reg)) xor
  2065. (taicpu(hp1).oper[1]^.ref^.offset < taicpu(p).oper[1]^.ref^.offset) then
  2066. begin
  2067. Postfix := PF_DA;
  2068. OpcodeStr := 'DA';
  2069. end
  2070. else
  2071. begin
  2072. Postfix := PF_None;
  2073. OpcodeStr := '';
  2074. end;
  2075. taicpu(hp1).oper[1]^.ref^.offset := 0;
  2076. if taicpu(hp1).oper[1]^.ref^.index = NR_NO then
  2077. begin
  2078. taicpu(hp1).oper[1]^.ref^.index := taicpu(hp1).oper[1]^.ref^.base;
  2079. taicpu(hp1).oper[1]^.ref^.base := NR_NO;
  2080. end;
  2081. taicpu(p).opcode := A_STM;
  2082. taicpu(p).loadregset(1, R_INTREGISTER, R_SUBWHOLE, [getsupreg(taicpu(p).oper[0]^.reg), getsupreg(taicpu(hp1).oper[0]^.reg)]);
  2083. taicpu(p).loadref(0, taicpu(hp1).oper[1]^.ref^);
  2084. taicpu(p).oppostfix := Postfix;
  2085. RemoveInstruction(hp1);
  2086. DebugMsg(SPeepholeOptimization + 'Merging stores: STR/STR -> STM' + OpcodeStr, p);
  2087. Result := True;
  2088. Exit;
  2089. end;
  2090. end;
  2091. end;
  2092. function TCpuAsmOptimizer.OptPass2STM(var p: tai): Boolean;
  2093. var
  2094. hp1: tai;
  2095. CorrectOffset:ASizeInt;
  2096. i, LastReg: TSuperRegister;
  2097. Postfix: TOpPostfix;
  2098. OpcodeStr: shortstring;
  2099. basereg : tregister;
  2100. begin
  2101. Result := False;
  2102. { See if STM/STR can be merged into a single STM }
  2103. { taicpu(p).opcode is A_STM, so first operand is a memory reference }
  2104. if (taicpu(p).oper[0]^.ref^.addressmode = AM_OFFSET) then
  2105. begin
  2106. { Only try to handle simple base reg, without index }
  2107. if (taicpu(p).oper[0]^.ref^.index = NR_NO) then
  2108. basereg:=taicpu(p).oper[0]^.ref^.base
  2109. else if (taicpu(p).oper[0]^.ref^.base = NR_NO) and
  2110. (taicpu(p).oper[0]^.ref^.shiftmode = SM_NONE) then
  2111. basereg:=taicpu(p).oper[0]^.ref^.index
  2112. else
  2113. exit;
  2114. CorrectOffset := 0;
  2115. LastReg := RS_NO;
  2116. for i in taicpu(p).oper[1]^.regset^ do
  2117. begin
  2118. LastReg := i;
  2119. Inc(CorrectOffset, 4);
  2120. end;
  2121. { This while loop effectively doea a Selection Sort on any STR
  2122. instructions that follow }
  2123. hp1 := p;
  2124. while (LastReg < maxcpuregister) and
  2125. GetNextInstruction(hp1, hp1) and (hp1.typ = ait_instruction) and
  2126. (taicpu(hp1).opcode = A_STR) and
  2127. (taicpu(hp1).oper[1]^.typ = top_ref) do
  2128. if (taicpu(hp1).condition = taicpu(p).condition) and
  2129. (taicpu(hp1).oppostfix = PF_None) and
  2130. (getregtype(taicpu(hp1).oper[0]^.reg) = R_INTREGISTER) and
  2131. (taicpu(hp1).oper[1]^.ref^.addressmode = AM_OFFSET) and
  2132. (taicpu(hp1).oper[1]^.ref^.shiftmode = SM_NONE) and
  2133. (
  2134. (
  2135. (taicpu(hp1).oper[1]^.ref^.base = NR_NO) and
  2136. (taicpu(hp1).oper[1]^.ref^.index = basereg)
  2137. ) or (
  2138. (taicpu(hp1).oper[1]^.ref^.index = NR_NO) and
  2139. (taicpu(hp1).oper[1]^.ref^.base = basereg)
  2140. )
  2141. ) and
  2142. { Next register must be later in the set }
  2143. (getsupreg(taicpu(hp1).oper[0]^.reg) > LastReg) and
  2144. (
  2145. (
  2146. (taicpu(p).oppostfix = PF_None) and
  2147. (taicpu(hp1).oper[1]^.ref^.offset = CorrectOffset)
  2148. ) or (
  2149. (taicpu(p).oppostfix = PF_DA) and
  2150. (taicpu(hp1).oper[1]^.ref^.offset = -CorrectOffset)
  2151. )
  2152. ) then
  2153. begin
  2154. { Increment the reference values ready for the next STR instruction to find }
  2155. LastReg := getsupreg(taicpu(hp1).oper[0]^.reg);
  2156. Inc(CorrectOffset, 4);
  2157. if (taicpu(p).oppostfix = PF_DA) then
  2158. OpcodeStr := 'DA'
  2159. else
  2160. OpcodeStr := '';
  2161. Include(taicpu(p).oper[1]^.regset^, LastReg);
  2162. DebugMsg(SPeepholeOptimization + 'Merging stores: STM' + OpcodeStr + '/STR -> STM' + OpcodeStr, hp1);
  2163. RemoveInstruction(hp1);
  2164. Result := True;
  2165. { See if we can find another one to merge }
  2166. hp1 := p;
  2167. Continue;
  2168. end;
  2169. end;
  2170. end;
  2171. function TCpuAsmOptimizer.PrePeepHoleOptsCpu(var p: tai): Boolean;
  2172. begin
  2173. result := false;
  2174. if p.typ=ait_instruction then
  2175. begin
  2176. case taicpu(p).opcode of
  2177. A_SBFX,
  2178. A_UBFX:
  2179. Result:=OptPreSBFXUBFX(p);
  2180. else
  2181. ;
  2182. end;
  2183. end;
  2184. end;
  2185. function TCpuAsmOptimizer.PeepHoleOptPass1Cpu(var p: tai): boolean;
  2186. begin
  2187. result := false;
  2188. if p.typ = ait_instruction then
  2189. begin
  2190. case taicpu(p).opcode of
  2191. A_CMP:
  2192. Result := OptPass1CMP(p);
  2193. A_STR:
  2194. Result := OptPass1STR(p);
  2195. A_LDR:
  2196. Result := OptPass1LDR(p);
  2197. A_MOV:
  2198. Result := OptPass1MOV(p);
  2199. A_MOVW:
  2200. Result := OptPass1MOVW(p);
  2201. A_AND:
  2202. Result := OptPass1And(p);
  2203. A_ADD,
  2204. A_SUB:
  2205. Result := OptPass1ADDSUB(p);
  2206. A_MUL:
  2207. REsult := OptPass1MUL(p);
  2208. A_ADC,
  2209. A_RSB,
  2210. A_RSC,
  2211. A_SBC,
  2212. A_BIC,
  2213. A_EOR,
  2214. A_ORR,
  2215. A_MLA,
  2216. A_MLS,
  2217. A_QADD,A_QADD16,A_QADD8,
  2218. A_QSUB,A_QSUB16,A_QSUB8,
  2219. A_QDADD,A_QDSUB,A_QASX,A_QSAX,
  2220. A_SHADD16,A_SHADD8,A_UHADD16,A_UHADD8,
  2221. A_SHSUB16,A_SHSUB8,A_UHSUB16,A_UHSUB8,
  2222. A_PKHTB,A_PKHBT,
  2223. A_SMUAD,A_SMUSD:
  2224. Result := OptPass1DataCheckMov(p);
  2225. {$ifdef dummy}
  2226. A_MVN:
  2227. Result := OPtPass1MVN(p);
  2228. {$endif dummy}
  2229. A_UXTB:
  2230. Result := OptPass1UXTB(p);
  2231. A_UXTH:
  2232. Result := OptPass1UXTH(p);
  2233. A_SXTB:
  2234. Result := OptPass1SXTB(p);
  2235. A_SXTH:
  2236. Result := OptPass1SXTH(p);
  2237. A_STM:
  2238. Result := OptPass1STM(p);
  2239. A_VMOV:
  2240. Result := OptPass1VMov(p);
  2241. A_VLDR,
  2242. A_VADD,
  2243. A_VMUL,
  2244. A_VDIV,
  2245. A_VSUB,
  2246. A_VSQRT,
  2247. A_VNEG,
  2248. A_VCVT,
  2249. A_VABS:
  2250. Result := OptPass1VOp(p);
  2251. A_PUSH:
  2252. Result := OptPass1Push(p);
  2253. else
  2254. ;
  2255. end;
  2256. end;
  2257. end;
  2258. function TCpuAsmOptimizer.PeepHoleOptPass2Cpu(var p: tai): boolean;
  2259. begin
  2260. result := False;
  2261. if p.typ = ait_instruction then
  2262. begin
  2263. case taicpu(p).opcode of
  2264. A_AND,
  2265. A_ORR,
  2266. A_EOR,
  2267. A_BIC,
  2268. A_ORN:
  2269. Result := OptPass2Bitwise(p);
  2270. A_CMP:
  2271. Result := OptPass2CMP(p);
  2272. A_B:
  2273. Result := OptPass2Bcc(p);
  2274. A_STM:
  2275. Result := OptPass2STM(p);
  2276. A_STR:
  2277. Result := OptPass2STR(p);
  2278. A_TST:
  2279. Result := OptPass2TST(p);
  2280. else
  2281. ;
  2282. end;
  2283. end;
  2284. end;
  2285. { instructions modifying the CPSR can be only the last instruction }
  2286. function MustBeLast(p : tai) : boolean;
  2287. begin
  2288. Result:=(p.typ=ait_instruction) and
  2289. ((taicpu(p).opcode in [A_BL,A_BLX,A_CMP,A_CMN,A_SWI,A_TEQ,A_TST,A_CMF,A_CMFE {,A_MSR}]) or
  2290. ((taicpu(p).ops>=1) and (taicpu(p).oper[0]^.typ=top_reg) and (taicpu(p).oper[0]^.reg=NR_PC)) or
  2291. (taicpu(p).oppostfix=PF_S));
  2292. end;
  2293. function TCpuAsmOptimizer.RegInInstruction(Reg: TRegister; p1: tai): Boolean;
  2294. begin
  2295. If (p1.typ = ait_instruction) and (taicpu(p1).opcode=A_BL) then
  2296. Result:=true
  2297. else If MatchInstruction(p1, [A_LDR, A_STR], [], [PF_D]) and
  2298. (getsupreg(taicpu(p1).oper[0]^.reg)+1=getsupreg(reg)) then
  2299. Result:=true
  2300. else
  2301. begin
  2302. if SuperRegistersEqual(Reg, NR_DEFAULTFLAGS) and (p1.typ = ait_instruction) then
  2303. begin
  2304. { Conditional instruction reads CPSR register }
  2305. if (taicpu(p1).condition <> C_None) then
  2306. Exit(True);
  2307. { Comparison instructions (and procedural jump) }
  2308. if (taicpu(p1).opcode in [A_BL, A_CMP, A_CMN, A_TST, A_TEQ]) then
  2309. Exit(True);
  2310. { Instruction sets CPSR register due to S suffix (floating-point
  2311. instructios won't raise false positives) }
  2312. if (taicpu(p1).oppostfix = PF_S) then
  2313. Exit(True)
  2314. end;
  2315. Result:=inherited RegInInstruction(Reg, p1);
  2316. end;
  2317. end;
  2318. const
  2319. { set of opcode which might or do write to memory }
  2320. { TODO : extend armins.dat to contain r/w info }
  2321. opcode_could_mem_write = [A_B,A_BL,A_BLX,A_BKPT,A_BX,A_STR,A_STRB,A_STRBT,
  2322. A_STRH,A_STRT,A_STF,A_SFM,A_STM,A_FSTS,A_FSTD,A_VSTR,A_VSTM];
  2323. { adjust the register live information when swapping the two instructions p and hp1,
  2324. they must follow one after the other }
  2325. procedure TCpuPreRegallocScheduler.SwapRegLive(p,hp1 : taicpu);
  2326. procedure CheckLiveEnd(reg : tregister);
  2327. var
  2328. supreg : TSuperRegister;
  2329. regtype : TRegisterType;
  2330. begin
  2331. if reg=NR_NO then
  2332. exit;
  2333. regtype:=getregtype(reg);
  2334. supreg:=getsupreg(reg);
  2335. if assigned(cg.rg[regtype]) and (cg.rg[regtype].live_end[supreg]=hp1) and
  2336. RegInInstruction(reg,p) then
  2337. cg.rg[regtype].live_end[supreg]:=p;
  2338. end;
  2339. procedure CheckLiveStart(reg : TRegister);
  2340. var
  2341. supreg : TSuperRegister;
  2342. regtype : TRegisterType;
  2343. begin
  2344. if reg=NR_NO then
  2345. exit;
  2346. regtype:=getregtype(reg);
  2347. supreg:=getsupreg(reg);
  2348. if assigned(cg.rg[regtype]) and (cg.rg[regtype].live_start[supreg]=p) and
  2349. RegInInstruction(reg,hp1) then
  2350. cg.rg[regtype].live_start[supreg]:=hp1;
  2351. end;
  2352. var
  2353. i : longint;
  2354. r : TSuperRegister;
  2355. begin
  2356. { assumption: p is directly followed by hp1 }
  2357. { if live of any reg used by p starts at p and hp1 uses this register then
  2358. set live start to hp1 }
  2359. for i:=0 to p.ops-1 do
  2360. case p.oper[i]^.typ of
  2361. Top_Reg:
  2362. CheckLiveStart(p.oper[i]^.reg);
  2363. Top_Ref:
  2364. begin
  2365. CheckLiveStart(p.oper[i]^.ref^.base);
  2366. CheckLiveStart(p.oper[i]^.ref^.index);
  2367. end;
  2368. Top_Shifterop:
  2369. CheckLiveStart(p.oper[i]^.shifterop^.rs);
  2370. Top_RegSet:
  2371. for r:=RS_R0 to RS_R15 do
  2372. if r in p.oper[i]^.regset^ then
  2373. CheckLiveStart(newreg(R_INTREGISTER,r,R_SUBWHOLE));
  2374. else
  2375. ;
  2376. end;
  2377. { if live of any reg used by hp1 ends at hp1 and p uses this register then
  2378. set live end to p }
  2379. for i:=0 to hp1.ops-1 do
  2380. case hp1.oper[i]^.typ of
  2381. Top_Reg:
  2382. CheckLiveEnd(hp1.oper[i]^.reg);
  2383. Top_Ref:
  2384. begin
  2385. CheckLiveEnd(hp1.oper[i]^.ref^.base);
  2386. CheckLiveEnd(hp1.oper[i]^.ref^.index);
  2387. end;
  2388. Top_Shifterop:
  2389. CheckLiveStart(hp1.oper[i]^.shifterop^.rs);
  2390. Top_RegSet:
  2391. for r:=RS_R0 to RS_R15 do
  2392. if r in hp1.oper[i]^.regset^ then
  2393. CheckLiveEnd(newreg(R_INTREGISTER,r,R_SUBWHOLE));
  2394. else
  2395. ;
  2396. end;
  2397. end;
  2398. function TCpuPreRegallocScheduler.SchedulerPass1Cpu(var p: tai): boolean;
  2399. { TODO : schedule also forward }
  2400. { TODO : schedule distance > 1 }
  2401. { returns true if p might be a load of a pc relative tls offset }
  2402. function PossibleTLSLoad(const p: tai) : boolean;
  2403. begin
  2404. Result:=(p.typ=ait_instruction) and (taicpu(p).opcode=A_LDR) and (taicpu(p).oper[1]^.typ=top_ref) and (((taicpu(p).oper[1]^.ref^.base=NR_PC) and
  2405. (taicpu(p).oper[1]^.ref^.index<>NR_NO)) or ((taicpu(p).oper[1]^.ref^.base<>NR_NO) and
  2406. (taicpu(p).oper[1]^.ref^.index=NR_PC)));
  2407. end;
  2408. var
  2409. hp1,hp2,hp3,hp4,hp5,insertpos : tai;
  2410. list : TAsmList;
  2411. begin
  2412. result:=true;
  2413. list:=TAsmList.create;
  2414. p:=BlockStart;
  2415. while p<>BlockEnd Do
  2416. begin
  2417. if (p.typ=ait_instruction) and
  2418. GetNextInstruction(p,hp1) and
  2419. (hp1.typ=ait_instruction) and
  2420. (taicpu(hp1).opcode in [A_LDR,A_LDRB,A_LDRH,A_LDRSB,A_LDRSH]) and
  2421. (taicpu(hp1).oppostfix in [PF_NONE, PF_B, PF_H, PF_SB, PF_SH]) and
  2422. { for now we don't reschedule if the previous instruction changes potentially a memory location }
  2423. ( (not(taicpu(p).opcode in opcode_could_mem_write) and
  2424. not(RegModifiedByInstruction(NR_PC,p))
  2425. ) or
  2426. ((taicpu(p).opcode in [A_STM,A_STRB,A_STRH,A_STR]) and
  2427. ((taicpu(hp1).oper[1]^.ref^.base=NR_PC) or
  2428. (assigned(taicpu(hp1).oper[1]^.ref^.symboldata) and
  2429. (taicpu(hp1).oper[1]^.ref^.offset=0)
  2430. )
  2431. ) or
  2432. { try to prove that the memory accesses don't overlapp }
  2433. ((taicpu(p).opcode in [A_STRB,A_STRH,A_STR]) and
  2434. (taicpu(p).oper[1]^.typ = top_ref) and
  2435. (taicpu(p).oper[1]^.ref^.base=taicpu(hp1).oper[1]^.ref^.base) and
  2436. (taicpu(p).oppostfix=PF_None) and
  2437. (taicpu(hp1).oppostfix=PF_None) and
  2438. (taicpu(p).oper[1]^.ref^.index=NR_NO) and
  2439. (taicpu(hp1).oper[1]^.ref^.index=NR_NO) and
  2440. { get operand sizes and check if the offset distance is large enough to ensure no overlapp }
  2441. (abs(taicpu(p).oper[1]^.ref^.offset-taicpu(hp1).oper[1]^.ref^.offset)>=max(tcgsize2size[reg_cgsize(taicpu(p).oper[0]^.reg)],tcgsize2size[reg_cgsize(taicpu(hp1).oper[0]^.reg)]))
  2442. )
  2443. )
  2444. ) and
  2445. GetNextInstruction(hp1,hp2) and
  2446. (hp2.typ=ait_instruction) and
  2447. { loaded register used by next instruction?
  2448. if we ever support labels (they could be skipped in theory) here, the gnu2 tls general-dynamic code could get broken (the ldr before
  2449. the bl may not be scheduled away from the bl) and it needs to be taken care of this case
  2450. }
  2451. (RegInInstruction(taicpu(hp1).oper[0]^.reg,hp2)) and
  2452. { loaded register not used by previous instruction? }
  2453. not(RegInInstruction(taicpu(hp1).oper[0]^.reg,p)) and
  2454. { same condition? }
  2455. (taicpu(p).condition=taicpu(hp1).condition) and
  2456. { first instruction might not change the register used as base }
  2457. ((taicpu(hp1).oper[1]^.ref^.base=NR_NO) or
  2458. not(RegModifiedByInstruction(taicpu(hp1).oper[1]^.ref^.base,p))
  2459. ) and
  2460. { first instruction might not change the register used as index }
  2461. ((taicpu(hp1).oper[1]^.ref^.index=NR_NO) or
  2462. not(RegModifiedByInstruction(taicpu(hp1).oper[1]^.ref^.index,p))
  2463. ) and
  2464. { if we modify the basereg AND the first instruction used that reg, we can not schedule }
  2465. ((taicpu(hp1).oper[1]^.ref^.addressmode = AM_OFFSET) or
  2466. not(instructionLoadsFromReg(taicpu(hp1).oper[1]^.ref^.base,p))) and
  2467. not(PossibleTLSLoad(p)) and
  2468. not(PossibleTLSLoad(hp1)) then
  2469. begin
  2470. hp3:=tai(p.Previous);
  2471. hp5:=tai(p.next);
  2472. asml.Remove(p);
  2473. { if there is a reg. alloc/dealloc/sync instructions or address labels (e.g. for GOT-less PIC)
  2474. associated with p, move it together with p }
  2475. { before the instruction? }
  2476. { find reg allocs,deallocs and PIC labels }
  2477. while assigned(hp3) and (hp3.typ<>ait_instruction) do
  2478. begin
  2479. if ( (hp3.typ=ait_regalloc) and (tai_regalloc(hp3).ratype in [ra_alloc, ra_dealloc]) and
  2480. RegInInstruction(tai_regalloc(hp3).reg,p) )
  2481. or ( (hp3.typ=ait_label) and (tai_label(hp3).labsym.typ=AT_ADDR) )
  2482. then
  2483. begin
  2484. hp4:=hp3;
  2485. hp3:=tai(hp3.Previous);
  2486. asml.Remove(hp4);
  2487. list.Insert(hp4);
  2488. end
  2489. else
  2490. hp3:=tai(hp3.Previous);
  2491. end;
  2492. list.Concat(p);
  2493. SwapRegLive(taicpu(p),taicpu(hp1));
  2494. { after the instruction? }
  2495. { find reg deallocs and reg syncs }
  2496. while assigned(hp5) and (hp5.typ<>ait_instruction) do
  2497. begin
  2498. if (hp5.typ=ait_regalloc) and (tai_regalloc(hp5).ratype in [ra_dealloc, ra_sync]) and
  2499. RegInInstruction(tai_regalloc(hp5).reg,p) then
  2500. begin
  2501. hp4:=hp5;
  2502. hp5:=tai(hp5.next);
  2503. asml.Remove(hp4);
  2504. list.Concat(hp4);
  2505. end
  2506. else
  2507. hp5:=tai(hp5.Next);
  2508. end;
  2509. asml.Remove(hp1);
  2510. { if there are address labels associated with hp2, those must
  2511. stay with hp2 (e.g. for GOT-less PIC) }
  2512. insertpos:=hp2;
  2513. while assigned(hp2.previous) and
  2514. (tai(hp2.previous).typ<>ait_instruction) do
  2515. begin
  2516. hp2:=tai(hp2.previous);
  2517. if (hp2.typ=ait_label) and
  2518. (tai_label(hp2).labsym.typ=AT_ADDR) then
  2519. insertpos:=hp2;
  2520. end;
  2521. {$ifdef DEBUG_PREREGSCHEDULER}
  2522. asml.insertbefore(tai_comment.Create(strpnew('Rescheduled')),insertpos);
  2523. {$endif DEBUG_PREREGSCHEDULER}
  2524. asml.InsertBefore(hp1,insertpos);
  2525. asml.InsertListBefore(insertpos,list);
  2526. p:=tai(p.next);
  2527. end
  2528. else if p.typ=ait_instruction then
  2529. p:=hp1
  2530. else
  2531. p:=tai(p.next);
  2532. end;
  2533. list.Free;
  2534. end;
  2535. procedure DecrementPreceedingIT(list: TAsmList; p: tai);
  2536. var
  2537. hp : tai;
  2538. l : longint;
  2539. begin
  2540. hp := tai(p.Previous);
  2541. l := 1;
  2542. while assigned(hp) and
  2543. (l <= 4) do
  2544. begin
  2545. if hp.typ=ait_instruction then
  2546. begin
  2547. if (taicpu(hp).opcode>=A_IT) and
  2548. (taicpu(hp).opcode <= A_ITTTT) then
  2549. begin
  2550. if (taicpu(hp).opcode = A_IT) and
  2551. (l=1) then
  2552. list.Remove(hp)
  2553. else
  2554. case taicpu(hp).opcode of
  2555. A_ITE:
  2556. if l=2 then taicpu(hp).opcode := A_IT;
  2557. A_ITT:
  2558. if l=2 then taicpu(hp).opcode := A_IT;
  2559. A_ITEE:
  2560. if l=3 then taicpu(hp).opcode := A_ITE;
  2561. A_ITTE:
  2562. if l=3 then taicpu(hp).opcode := A_ITT;
  2563. A_ITET:
  2564. if l=3 then taicpu(hp).opcode := A_ITE;
  2565. A_ITTT:
  2566. if l=3 then taicpu(hp).opcode := A_ITT;
  2567. A_ITEEE:
  2568. if l=4 then taicpu(hp).opcode := A_ITEE;
  2569. A_ITTEE:
  2570. if l=4 then taicpu(hp).opcode := A_ITTE;
  2571. A_ITETE:
  2572. if l=4 then taicpu(hp).opcode := A_ITET;
  2573. A_ITTTE:
  2574. if l=4 then taicpu(hp).opcode := A_ITTT;
  2575. A_ITEET:
  2576. if l=4 then taicpu(hp).opcode := A_ITEE;
  2577. A_ITTET:
  2578. if l=4 then taicpu(hp).opcode := A_ITTE;
  2579. A_ITETT:
  2580. if l=4 then taicpu(hp).opcode := A_ITET;
  2581. A_ITTTT:
  2582. begin
  2583. if l=4 then taicpu(hp).opcode := A_ITTT;
  2584. end
  2585. else
  2586. ;
  2587. end;
  2588. break;
  2589. end;
  2590. {else if (taicpu(hp).condition<>taicpu(p).condition) or
  2591. (taicpu(hp).condition<>inverse_cond(taicpu(p).condition)) then
  2592. break;}
  2593. inc(l);
  2594. end;
  2595. hp := tai(hp.Previous);
  2596. end;
  2597. end;
  2598. function TCpuThumb2AsmOptimizer.OptPass1STM(var p: tai): boolean;
  2599. var
  2600. hp : taicpu;
  2601. begin
  2602. result:=false;
  2603. if MatchInstruction(p, A_STM, [C_None], [PF_FD,PF_DB]) and
  2604. (taicpu(p).oper[0]^.ref^.addressmode=AM_PREINDEXED) and
  2605. (taicpu(p).oper[0]^.ref^.index=NR_STACK_POINTER_REG) and
  2606. ((taicpu(p).oper[1]^.regset^*[8..13,15])=[]) then
  2607. begin
  2608. DebugMsg('Peephole Stm2Push done', p);
  2609. hp := taicpu.op_regset(A_PUSH, R_INTREGISTER, R_SUBWHOLE, taicpu(p).oper[1]^.regset^);
  2610. AsmL.InsertAfter(hp, p);
  2611. asml.Remove(p);
  2612. p:=hp;
  2613. result:=true;
  2614. end;
  2615. end;
  2616. function TCpuThumb2AsmOptimizer.OptPass1LDM(var p: tai): boolean;
  2617. var
  2618. hp : taicpu;
  2619. begin
  2620. result:=false;
  2621. if MatchInstruction(p, A_LDM, [C_None], [PF_FD,PF_IA]) and
  2622. (taicpu(p).oper[0]^.ref^.addressmode=AM_PREINDEXED) and
  2623. (taicpu(p).oper[0]^.ref^.index=NR_STACK_POINTER_REG) and
  2624. ((taicpu(p).oper[1]^.regset^*[8..14])=[]) then
  2625. begin
  2626. DebugMsg('Peephole Ldm2Pop done', p);
  2627. hp := taicpu.op_regset(A_POP, R_INTREGISTER, R_SUBWHOLE, taicpu(p).oper[1]^.regset^);
  2628. asml.InsertBefore(hp, p);
  2629. asml.Remove(p);
  2630. p.Free;
  2631. p:=hp;
  2632. result:=true;
  2633. end;
  2634. end;
  2635. function TCpuThumb2AsmOptimizer.OptPass1AndThumb2(var p : tai) : boolean;
  2636. begin
  2637. result:=false;
  2638. if MatchInstruction(p, [A_AND], [], [PF_None]) and
  2639. (taicpu(p).ops = 2) and
  2640. (taicpu(p).oper[1]^.typ=top_const) and
  2641. ((taicpu(p).oper[1]^.val=255) or
  2642. (taicpu(p).oper[1]^.val=65535)) then
  2643. begin
  2644. DebugMsg('Peephole AndR2Uxt done', p);
  2645. if taicpu(p).oper[1]^.val=255 then
  2646. taicpu(p).opcode:=A_UXTB
  2647. else
  2648. taicpu(p).opcode:=A_UXTH;
  2649. taicpu(p).loadreg(1, taicpu(p).oper[0]^.reg);
  2650. result := true;
  2651. end
  2652. else if MatchInstruction(p, [A_AND], [], [PF_None]) and
  2653. (taicpu(p).ops = 3) and
  2654. (taicpu(p).oper[2]^.typ=top_const) and
  2655. ((taicpu(p).oper[2]^.val=255) or
  2656. (taicpu(p).oper[2]^.val=65535)) then
  2657. begin
  2658. DebugMsg('Peephole AndRR2Uxt done', p);
  2659. if taicpu(p).oper[2]^.val=255 then
  2660. taicpu(p).opcode:=A_UXTB
  2661. else
  2662. taicpu(p).opcode:=A_UXTH;
  2663. taicpu(p).ops:=2;
  2664. result := true;
  2665. end;
  2666. end;
  2667. function TCpuThumb2AsmOptimizer.PeepHoleOptPass1Cpu(var p: tai): boolean;
  2668. begin
  2669. result:=false;
  2670. if inherited PeepHoleOptPass1Cpu(p) then
  2671. result:=true
  2672. else if p.typ=ait_instruction then
  2673. case taicpu(p).opcode of
  2674. A_STM:
  2675. result:=OptPass1STM(p);
  2676. A_LDM:
  2677. result:=OptPass1LDM(p);
  2678. A_AND:
  2679. result:=OptPass1AndThumb2(p);
  2680. else
  2681. ;
  2682. end;
  2683. end;
  2684. procedure TCpuThumb2AsmOptimizer.PeepHoleOptPass2;
  2685. var
  2686. p,hp1,hp2: tai;
  2687. l : longint;
  2688. condition : tasmcond;
  2689. { UsedRegs, TmpUsedRegs: TRegSet; }
  2690. begin
  2691. p := BlockStart;
  2692. { UsedRegs := []; }
  2693. while (p <> BlockEnd) Do
  2694. begin
  2695. { UpdateUsedRegs(UsedRegs, tai(p.next)); }
  2696. case p.Typ Of
  2697. Ait_Instruction:
  2698. begin
  2699. case taicpu(p).opcode Of
  2700. A_B:
  2701. if taicpu(p).condition<>C_None then
  2702. begin
  2703. { check for
  2704. Bxx xxx
  2705. <several instructions>
  2706. xxx:
  2707. }
  2708. l:=0;
  2709. GetNextInstruction(p, hp1);
  2710. while assigned(hp1) and
  2711. (l<=4) and
  2712. CanBeCond(hp1) and
  2713. { stop on labels }
  2714. not(hp1.typ=ait_label) do
  2715. begin
  2716. inc(l);
  2717. if MustBeLast(hp1) then
  2718. begin
  2719. //hp1:=nil;
  2720. GetNextInstruction(hp1,hp1);
  2721. break;
  2722. end
  2723. else
  2724. GetNextInstruction(hp1,hp1);
  2725. end;
  2726. if assigned(hp1) then
  2727. begin
  2728. if FindLabel(tasmlabel(taicpu(p).oper[0]^.ref^.symbol),hp1) then
  2729. begin
  2730. if (l<=4) and (l>0) then
  2731. begin
  2732. condition:=inverse_cond(taicpu(p).condition);
  2733. hp2:=p;
  2734. GetNextInstruction(p,hp1);
  2735. p:=hp1;
  2736. repeat
  2737. if hp1.typ=ait_instruction then
  2738. taicpu(hp1).condition:=condition;
  2739. if MustBeLast(hp1) then
  2740. begin
  2741. GetNextInstruction(hp1,hp1);
  2742. break;
  2743. end
  2744. else
  2745. GetNextInstruction(hp1,hp1);
  2746. until not(assigned(hp1)) or
  2747. not(CanBeCond(hp1)) or
  2748. (hp1.typ=ait_label);
  2749. { wait with removing else GetNextInstruction could
  2750. ignore the label if it was the only usage in the
  2751. jump moved away }
  2752. asml.InsertAfter(tai_comment.create(strpnew('Collapsed')), hp2);
  2753. DecrementPreceedingIT(asml, hp2);
  2754. case l of
  2755. 1: asml.InsertAfter(taicpu.op_cond(A_IT,condition), hp2);
  2756. 2: asml.InsertAfter(taicpu.op_cond(A_ITT,condition), hp2);
  2757. 3: asml.InsertAfter(taicpu.op_cond(A_ITTT,condition), hp2);
  2758. 4: asml.InsertAfter(taicpu.op_cond(A_ITTTT,condition), hp2);
  2759. end;
  2760. tasmlabel(taicpu(hp2).oper[0]^.ref^.symbol).decrefs;
  2761. asml.remove(hp2);
  2762. hp2.free;
  2763. continue;
  2764. end;
  2765. end;
  2766. end;
  2767. end;
  2768. else
  2769. ;
  2770. end;
  2771. end;
  2772. else
  2773. ;
  2774. end;
  2775. p := tai(p.next)
  2776. end;
  2777. end;
  2778. function TCpuThumb2AsmOptimizer.PostPeepHoleOptsCpu(var p: tai): boolean;
  2779. begin
  2780. result:=false;
  2781. if p.typ = ait_instruction then
  2782. begin
  2783. if MatchInstruction(p, A_MOV, [C_None], [PF_None]) and
  2784. (taicpu(p).oper[1]^.typ=top_const) and
  2785. (taicpu(p).oper[1]^.val >= 0) and
  2786. (taicpu(p).oper[1]^.val < 256) and
  2787. (not RegInUsedRegs(NR_DEFAULTFLAGS,UsedRegs)) then
  2788. begin
  2789. DebugMsg('Peephole Mov2Movs done', p);
  2790. asml.InsertBefore(tai_regalloc.alloc(NR_DEFAULTFLAGS,p), p);
  2791. asml.InsertAfter(tai_regalloc.dealloc(NR_DEFAULTFLAGS,p), p);
  2792. IncludeRegInUsedRegs(NR_DEFAULTFLAGS,UsedRegs);
  2793. taicpu(p).oppostfix:=PF_S;
  2794. result:=true;
  2795. end
  2796. else if MatchInstruction(p, A_MVN, [C_None], [PF_None]) and
  2797. (taicpu(p).oper[1]^.typ=top_reg) and
  2798. (not RegInUsedRegs(NR_DEFAULTFLAGS,UsedRegs)) then
  2799. begin
  2800. DebugMsg('Peephole Mvn2Mvns done', p);
  2801. asml.InsertBefore(tai_regalloc.alloc(NR_DEFAULTFLAGS,p), p);
  2802. asml.InsertAfter(tai_regalloc.dealloc(NR_DEFAULTFLAGS,p), p);
  2803. IncludeRegInUsedRegs(NR_DEFAULTFLAGS,UsedRegs);
  2804. taicpu(p).oppostfix:=PF_S;
  2805. result:=true;
  2806. end
  2807. else if MatchInstruction(p, A_RSB, [C_None], [PF_None]) and
  2808. (taicpu(p).ops = 3) and
  2809. (taicpu(p).oper[2]^.typ=top_const) and
  2810. (taicpu(p).oper[2]^.val=0) and
  2811. (not RegInUsedRegs(NR_DEFAULTFLAGS,UsedRegs)) then
  2812. begin
  2813. DebugMsg('Peephole Rsb2Rsbs done', p);
  2814. asml.InsertBefore(tai_regalloc.alloc(NR_DEFAULTFLAGS,p), p);
  2815. asml.InsertAfter(tai_regalloc.dealloc(NR_DEFAULTFLAGS,p), p);
  2816. IncludeRegInUsedRegs(NR_DEFAULTFLAGS,UsedRegs);
  2817. taicpu(p).oppostfix:=PF_S;
  2818. result:=true;
  2819. end
  2820. else if MatchInstruction(p, [A_ADD,A_SUB], [C_None], [PF_None]) and
  2821. (taicpu(p).ops = 3) and
  2822. MatchOperand(taicpu(p).oper[0]^, taicpu(p).oper[1]^) and
  2823. (not MatchOperand(taicpu(p).oper[0]^, NR_STACK_POINTER_REG)) and
  2824. (taicpu(p).oper[2]^.typ=top_const) and
  2825. (taicpu(p).oper[2]^.val >= 0) and
  2826. (taicpu(p).oper[2]^.val < 256) and
  2827. (not RegInUsedRegs(NR_DEFAULTFLAGS,UsedRegs)) then
  2828. begin
  2829. DebugMsg('Peephole AddSub2*s done', p);
  2830. asml.InsertBefore(tai_regalloc.alloc(NR_DEFAULTFLAGS,p), p);
  2831. asml.InsertAfter(tai_regalloc.dealloc(NR_DEFAULTFLAGS,p), p);
  2832. IncludeRegInUsedRegs(NR_DEFAULTFLAGS,UsedRegs);
  2833. taicpu(p).loadconst(1,taicpu(p).oper[2]^.val);
  2834. taicpu(p).oppostfix:=PF_S;
  2835. taicpu(p).ops := 2;
  2836. result:=true;
  2837. end
  2838. else if MatchInstruction(p, [A_ADD,A_SUB], [C_None], [PF_None]) and
  2839. (taicpu(p).ops = 2) and
  2840. (taicpu(p).oper[1]^.typ=top_reg) and
  2841. (not MatchOperand(taicpu(p).oper[0]^, NR_STACK_POINTER_REG)) and
  2842. (not MatchOperand(taicpu(p).oper[1]^, NR_STACK_POINTER_REG)) and
  2843. (not RegInUsedRegs(NR_DEFAULTFLAGS,UsedRegs)) then
  2844. begin
  2845. DebugMsg('Peephole AddSub2*s done', p);
  2846. asml.InsertBefore(tai_regalloc.alloc(NR_DEFAULTFLAGS,p), p);
  2847. asml.InsertAfter(tai_regalloc.dealloc(NR_DEFAULTFLAGS,p), p);
  2848. IncludeRegInUsedRegs(NR_DEFAULTFLAGS,UsedRegs);
  2849. taicpu(p).oppostfix:=PF_S;
  2850. result:=true;
  2851. end
  2852. else if MatchInstruction(p, [A_ADD], [C_None], [PF_None]) and
  2853. (taicpu(p).ops = 3) and
  2854. MatchOperand(taicpu(p).oper[0]^, taicpu(p).oper[1]^) and
  2855. (taicpu(p).oper[2]^.typ=top_reg) then
  2856. begin
  2857. DebugMsg('Peephole AddRRR2AddRR done', p);
  2858. taicpu(p).ops := 2;
  2859. taicpu(p).loadreg(1,taicpu(p).oper[2]^.reg);
  2860. result:=true;
  2861. end
  2862. else if MatchInstruction(p, [A_AND,A_ORR,A_EOR,A_BIC,A_LSL,A_LSR,A_ASR,A_ROR], [C_None], [PF_None]) and
  2863. (taicpu(p).ops = 3) and
  2864. MatchOperand(taicpu(p).oper[0]^, taicpu(p).oper[1]^) and
  2865. (taicpu(p).oper[2]^.typ=top_reg) and
  2866. (not RegInUsedRegs(NR_DEFAULTFLAGS,UsedRegs)) then
  2867. begin
  2868. DebugMsg('Peephole opXXY2opsXY done', p);
  2869. asml.InsertBefore(tai_regalloc.alloc(NR_DEFAULTFLAGS,p), p);
  2870. asml.InsertAfter(tai_regalloc.dealloc(NR_DEFAULTFLAGS,p), p);
  2871. IncludeRegInUsedRegs(NR_DEFAULTFLAGS,UsedRegs);
  2872. taicpu(p).ops := 2;
  2873. taicpu(p).loadreg(1,taicpu(p).oper[2]^.reg);
  2874. taicpu(p).oppostfix:=PF_S;
  2875. result:=true;
  2876. end
  2877. else if MatchInstruction(p, [A_AND,A_ORR,A_EOR,A_BIC,A_LSL,A_LSR,A_ASR,A_ROR], [C_None], [PF_S]) and
  2878. (taicpu(p).ops = 3) and
  2879. MatchOperand(taicpu(p).oper[0]^, taicpu(p).oper[1]^) and
  2880. (taicpu(p).oper[2]^.typ in [top_reg,top_const]) then
  2881. begin
  2882. DebugMsg('Peephole opXXY2opXY done', p);
  2883. taicpu(p).ops := 2;
  2884. if taicpu(p).oper[2]^.typ=top_reg then
  2885. taicpu(p).loadreg(1,taicpu(p).oper[2]^.reg)
  2886. else
  2887. taicpu(p).loadconst(1,taicpu(p).oper[2]^.val);
  2888. result:=true;
  2889. end
  2890. else if MatchInstruction(p, [A_AND,A_ORR,A_EOR], [C_None], [PF_None,PF_S]) and
  2891. (taicpu(p).ops = 3) and
  2892. MatchOperand(taicpu(p).oper[0]^, taicpu(p).oper[2]^) and
  2893. (not RegInUsedRegs(NR_DEFAULTFLAGS,UsedRegs)) then
  2894. begin
  2895. DebugMsg('Peephole opXYX2opsXY done', p);
  2896. asml.InsertBefore(tai_regalloc.alloc(NR_DEFAULTFLAGS,p), p);
  2897. asml.InsertAfter(tai_regalloc.dealloc(NR_DEFAULTFLAGS,p), p);
  2898. IncludeRegInUsedRegs(NR_DEFAULTFLAGS,UsedRegs);
  2899. taicpu(p).oppostfix:=PF_S;
  2900. taicpu(p).ops := 2;
  2901. result:=true;
  2902. end
  2903. else if MatchInstruction(p, [A_MOV], [C_None], [PF_None]) and
  2904. (taicpu(p).ops=3) and
  2905. (taicpu(p).oper[2]^.typ=top_shifterop) and
  2906. (taicpu(p).oper[2]^.shifterop^.shiftmode in [SM_LSL,SM_LSR,SM_ASR,SM_ROR]) and
  2907. //MatchOperand(taicpu(p).oper[0]^, taicpu(p).oper[1]^) and
  2908. (not RegInUsedRegs(NR_DEFAULTFLAGS,UsedRegs)) then
  2909. begin
  2910. DebugMsg('Peephole Mov2Shift done', p);
  2911. asml.InsertBefore(tai_regalloc.alloc(NR_DEFAULTFLAGS,p), p);
  2912. asml.InsertAfter(tai_regalloc.dealloc(NR_DEFAULTFLAGS,p), p);
  2913. IncludeRegInUsedRegs(NR_DEFAULTFLAGS,UsedRegs);
  2914. taicpu(p).oppostfix:=PF_S;
  2915. case taicpu(p).oper[2]^.shifterop^.shiftmode of
  2916. SM_LSL: taicpu(p).opcode:=A_LSL;
  2917. SM_LSR: taicpu(p).opcode:=A_LSR;
  2918. SM_ASR: taicpu(p).opcode:=A_ASR;
  2919. SM_ROR: taicpu(p).opcode:=A_ROR;
  2920. else
  2921. internalerror(2019050912);
  2922. end;
  2923. if taicpu(p).oper[2]^.shifterop^.rs<>NR_NO then
  2924. taicpu(p).loadreg(2, taicpu(p).oper[2]^.shifterop^.rs)
  2925. else
  2926. taicpu(p).loadconst(2, taicpu(p).oper[2]^.shifterop^.shiftimm);
  2927. result:=true;
  2928. end
  2929. end;
  2930. end;
  2931. begin
  2932. casmoptimizer:=TCpuAsmOptimizer;
  2933. cpreregallocscheduler:=TCpuPreRegallocScheduler;
  2934. End.