aoptcpu.pas 121 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914191519161917191819191920192119221923192419251926192719281929193019311932193319341935193619371938193919401941194219431944194519461947194819491950195119521953195419551956195719581959196019611962196319641965196619671968196919701971197219731974197519761977197819791980198119821983198419851986198719881989199019911992199319941995199619971998199920002001200220032004200520062007200820092010201120122013201420152016201720182019202020212022202320242025202620272028202920302031203220332034203520362037203820392040204120422043204420452046204720482049205020512052205320542055205620572058205920602061206220632064206520662067206820692070207120722073207420752076207720782079208020812082208320842085208620872088208920902091209220932094209520962097209820992100210121022103210421052106210721082109211021112112211321142115211621172118211921202121212221232124212521262127212821292130213121322133213421352136213721382139214021412142214321442145214621472148214921502151215221532154215521562157215821592160216121622163216421652166216721682169217021712172217321742175217621772178217921802181218221832184218521862187218821892190219121922193219421952196219721982199220022012202220322042205220622072208220922102211221222132214221522162217221822192220222122222223222422252226222722282229223022312232223322342235223622372238223922402241224222432244224522462247224822492250225122522253225422552256225722582259226022612262226322642265226622672268226922702271227222732274227522762277227822792280228122822283228422852286228722882289229022912292229322942295229622972298229923002301230223032304230523062307230823092310231123122313231423152316231723182319232023212322232323242325232623272328232923302331233223332334233523362337233823392340234123422343234423452346234723482349235023512352235323542355235623572358235923602361236223632364236523662367236823692370237123722373237423752376237723782379238023812382238323842385238623872388238923902391239223932394239523962397239823992400240124022403240424052406240724082409241024112412241324142415241624172418241924202421242224232424242524262427242824292430243124322433243424352436243724382439244024412442244324442445244624472448244924502451245224532454245524562457245824592460246124622463246424652466246724682469247024712472247324742475247624772478247924802481248224832484248524862487248824892490249124922493249424952496249724982499250025012502250325042505250625072508250925102511251225132514251525162517251825192520252125222523252425252526252725282529253025312532253325342535253625372538253925402541254225432544254525462547254825492550255125522553255425552556255725582559256025612562256325642565256625672568256925702571257225732574257525762577257825792580258125822583258425852586258725882589259025912592259325942595259625972598259926002601260226032604260526062607260826092610261126122613261426152616261726182619262026212622262326242625262626272628262926302631263226332634263526362637263826392640264126422643264426452646264726482649265026512652265326542655265626572658265926602661266226632664266526662667266826692670267126722673267426752676267726782679268026812682268326842685268626872688268926902691269226932694269526962697269826992700270127022703270427052706270727082709271027112712271327142715271627172718271927202721272227232724272527262727272827292730273127322733273427352736273727382739274027412742274327442745274627472748274927502751275227532754275527562757275827592760276127622763276427652766276727682769277027712772277327742775277627772778277927802781278227832784278527862787278827892790279127922793279427952796279727982799280028012802280328042805280628072808280928102811281228132814281528162817281828192820282128222823282428252826282728282829283028312832283328342835283628372838283928402841284228432844284528462847284828492850285128522853285428552856285728582859286028612862286328642865286628672868286928702871287228732874287528762877287828792880288128822883288428852886288728882889289028912892289328942895289628972898289929002901290229032904290529062907290829092910291129122913291429152916291729182919292029212922292329242925292629272928292929302931293229332934293529362937293829392940294129422943294429452946294729482949295029512952295329542955295629572958295929602961296229632964296529662967296829692970297129722973297429752976297729782979298029812982298329842985298629872988298929902991299229932994299529962997299829993000300130023003300430053006300730083009301030113012301330143015301630173018301930203021302230233024302530263027302830293030303130323033303430353036303730383039304030413042304330443045304630473048304930503051305230533054305530563057305830593060306130623063306430653066306730683069307030713072307330743075307630773078307930803081308230833084308530863087308830893090309130923093309430953096309730983099310031013102310331043105310631073108310931103111311231133114311531163117311831193120312131223123312431253126312731283129313031313132313331343135313631373138313931403141
  1. {
  2. Copyright (c) 1998-2002 by Jonas Maebe, member of the Free Pascal
  3. Development Team
  4. This unit implements the ARM optimizer object
  5. This program is free software; you can redistribute it and/or modify
  6. it under the terms of the GNU General Public License as published by
  7. the Free Software Foundation; either version 2 of the License, or
  8. (at your option) any later version.
  9. This program is distributed in the hope that it will be useful,
  10. but WITHOUT ANY WARRANTY; without even the implied warranty of
  11. MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  12. GNU General Public License for more details.
  13. You should have received a copy of the GNU General Public License
  14. along with this program; if not, write to the Free Software
  15. Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
  16. ****************************************************************************
  17. }
  18. Unit aoptcpu;
  19. {$i fpcdefs.inc}
  20. { $define DEBUG_PREREGSCHEDULER}
  21. { $define DEBUG_AOPTCPU}
  22. Interface
  23. uses
  24. cgbase, cgutils, cpubase, aasmtai,
  25. aasmcpu,
  26. aopt, aoptobj, aoptarm;
  27. Type
  28. { TCpuAsmOptimizer }
  29. TCpuAsmOptimizer = class(TARMAsmOptimizer)
  30. { Can't be done in some cases due to the limited range of jumps }
  31. function CanDoJumpOpts: Boolean; override;
  32. { uses the same constructor as TAopObj }
  33. function PrePeepHoleOptsCpu(var p: tai): Boolean; override;
  34. function PeepHoleOptPass1Cpu(var p: tai): boolean; override;
  35. function PeepHoleOptPass2Cpu(var p: tai): boolean; override;
  36. Function RegInInstruction(Reg: TRegister; p1: tai): Boolean;override;
  37. function RemoveSuperfluousVMov(const p : tai; movp : tai; const optimizer : string) : boolean;
  38. { gets the next tai object after current that contains info relevant
  39. to the optimizer in p1 which used the given register or does a
  40. change in program flow.
  41. If there is none, it returns false and
  42. sets p1 to nil }
  43. Function GetNextInstructionUsingRef(Current: tai; Out Next: tai; const ref: TReference; StopOnStore: Boolean = true): Boolean;
  44. { outputs a debug message into the assembler file }
  45. procedure DebugMsg(const s: string; p: tai);
  46. function InstructionLoadsFromReg(const reg : TRegister; const hp : tai) : boolean; override;
  47. function RegLoadedWithNewValue(reg : tregister; hp : tai) : boolean; override;
  48. { With these routines, there's optimisation code that's general for all ARM platforms }
  49. function OptPass1And(var p: tai): Boolean; override;
  50. function OptPass1LDR(var p: tai): Boolean; override;
  51. function OptPass1STR(var p: tai): Boolean; override;
  52. protected
  53. function LookForPreindexedPattern(p: taicpu): boolean;
  54. function LookForPostindexedPattern(p: taicpu): boolean;
  55. { Individual optimisation routines }
  56. function OptPass1DataCheckMov(var p: tai): Boolean;
  57. function OptPass1ADDSUB(var p: tai): Boolean;
  58. function OptPass1CMP(var p: tai): Boolean;
  59. function OptPass1STM(var p: tai): Boolean;
  60. function OptPass1MOV(var p: tai): Boolean;
  61. function OptPass1MUL(var p: tai): Boolean;
  62. function OptPass1MVN(var p: tai): Boolean;
  63. function OptPass1VMov(var p: tai): Boolean;
  64. function OptPass1VOp(var p: tai): Boolean;
  65. function OptPass1Push(var p: tai): Boolean;
  66. function OptPass2Bcc(var p: tai): Boolean;
  67. function OptPass2CMP(var p: tai): Boolean;
  68. function OptPass2STM(var p: tai): Boolean;
  69. function OptPass2STR(var p: tai): Boolean;
  70. End;
  71. TCpuPreRegallocScheduler = class(TAsmScheduler)
  72. function SchedulerPass1Cpu(var p: tai): boolean;override;
  73. procedure SwapRegLive(p, hp1: taicpu);
  74. end;
  75. TCpuThumb2AsmOptimizer = class(TCpuAsmOptimizer)
  76. { uses the same constructor as TAopObj }
  77. function PeepHoleOptPass1Cpu(var p: tai): boolean; override;
  78. procedure PeepHoleOptPass2;override;
  79. function PostPeepHoleOptsCpu(var p: tai): boolean; override;
  80. protected
  81. function OptPass1AndThumb2(var p : tai) : boolean;
  82. function OptPass1LDM(var p : tai) : boolean;
  83. function OptPass1STM(var p : tai) : boolean;
  84. End;
  85. function MustBeLast(p : tai) : boolean;
  86. Implementation
  87. uses
  88. cutils,verbose,globtype,globals,
  89. systems,
  90. cpuinfo,
  91. cgobj,procinfo,
  92. aasmbase,aasmdata,
  93. aoptutils;
  94. { Range check must be disabled explicitly as conversions between signed and unsigned
  95. 32-bit values are done without explicit typecasts }
  96. {$R-}
  97. function CanBeCond(p : tai) : boolean;
  98. begin
  99. result:=
  100. not(GenerateThumbCode) and
  101. (p.typ=ait_instruction) and
  102. (taicpu(p).condition=C_None) and
  103. ((taicpu(p).opcode<A_IT) or (taicpu(p).opcode>A_ITTTT)) and
  104. (taicpu(p).opcode<>A_CBZ) and
  105. (taicpu(p).opcode<>A_CBNZ) and
  106. (taicpu(p).opcode<>A_PLD) and
  107. (((taicpu(p).opcode<>A_BLX) and
  108. { BL may need to be converted into BLX by the linker -- could possibly
  109. be allowed in case it's to a local symbol of which we know that it
  110. uses the same instruction set as the current one }
  111. (taicpu(p).opcode<>A_BL)) or
  112. (taicpu(p).oper[0]^.typ=top_reg));
  113. end;
  114. function RemoveRedundantMove(const cmpp: tai; movp: tai; asml: TAsmList):Boolean;
  115. begin
  116. Result:=false;
  117. if (taicpu(movp).condition = C_EQ) and
  118. (taicpu(cmpp).oper[0]^.reg = taicpu(movp).oper[0]^.reg) and
  119. (taicpu(cmpp).oper[1]^.val = taicpu(movp).oper[1]^.val) then
  120. begin
  121. asml.insertafter(tai_comment.Create(strpnew('Peephole Optimization: CmpMovMov - Removed redundant moveq')), movp);
  122. asml.remove(movp);
  123. movp.free;
  124. Result:=true;
  125. end;
  126. end;
  127. function AlignedToQWord(const ref : treference) : boolean;
  128. begin
  129. { (safe) heuristics to ensure alignment }
  130. result:=(target_info.abi in [abi_eabi,abi_armeb,abi_eabihf]) and
  131. (((ref.offset>=0) and
  132. ((ref.offset mod 8)=0) and
  133. ((ref.base=NR_R13) or
  134. (ref.index=NR_R13))
  135. ) or
  136. ((ref.offset<=0) and
  137. { when using NR_R11, it has always a value of <qword align>+4 }
  138. ((abs(ref.offset+4) mod 8)=0) and
  139. (current_procinfo.framepointer=NR_R11) and
  140. ((ref.base=NR_R11) or
  141. (ref.index=NR_R11))
  142. )
  143. );
  144. end;
  145. function isValidConstLoadStoreOffset(const aoffset: longint; const pf: TOpPostfix) : boolean;
  146. begin
  147. if GenerateThumb2Code then
  148. result := (aoffset<4096) and (aoffset>-256)
  149. else
  150. result := ((pf in [PF_None,PF_B]) and
  151. (abs(aoffset)<4096)) or
  152. (abs(aoffset)<256);
  153. end;
  154. function TCpuAsmOptimizer.InstructionLoadsFromReg(const reg: TRegister; const hp: tai): boolean;
  155. var
  156. p: taicpu;
  157. i: longint;
  158. begin
  159. instructionLoadsFromReg := false;
  160. if not (assigned(hp) and (hp.typ = ait_instruction)) then
  161. exit;
  162. p:=taicpu(hp);
  163. i:=1;
  164. {For these instructions we have to start on oper[0]}
  165. if (p.opcode in [A_STR, A_LDM, A_STM, A_PLD,
  166. A_CMP, A_CMN, A_TST, A_TEQ,
  167. A_B, A_BL, A_BX, A_BLX,
  168. A_SMLAL, A_UMLAL, A_VSTM, A_VLDM]) then i:=0;
  169. while(i<p.ops) do
  170. begin
  171. case p.oper[I]^.typ of
  172. top_reg:
  173. instructionLoadsFromReg := (p.oper[I]^.reg = reg) or
  174. { STRD }
  175. ((i=0) and (p.opcode=A_STR) and (p.oppostfix=PF_D) and (getsupreg(p.oper[0]^.reg)+1=getsupreg(reg)));
  176. top_regset:
  177. instructionLoadsFromReg := (getsupreg(reg) in p.oper[I]^.regset^);
  178. top_shifterop:
  179. instructionLoadsFromReg := p.oper[I]^.shifterop^.rs = reg;
  180. top_ref:
  181. instructionLoadsFromReg :=
  182. (p.oper[I]^.ref^.base = reg) or
  183. (p.oper[I]^.ref^.index = reg);
  184. else
  185. ;
  186. end;
  187. if (i=0) and (p.opcode in [A_LDM,A_VLDM]) then
  188. exit;
  189. if instructionLoadsFromReg then
  190. exit; {Bailout if we found something}
  191. Inc(I);
  192. end;
  193. end;
  194. function TCpuAsmOptimizer.RegLoadedWithNewValue(reg: tregister; hp: tai): boolean;
  195. var
  196. p: taicpu;
  197. begin
  198. Result := false;
  199. if not ((assigned(hp)) and (hp.typ = ait_instruction)) then
  200. exit;
  201. p := taicpu(hp);
  202. case p.opcode of
  203. { These operands do not write into a register at all }
  204. A_CMP, A_CMN, A_TST, A_TEQ, A_B, A_BL, A_BX, A_BLX, A_SWI, A_MSR, A_PLD,
  205. A_VCMP:
  206. exit;
  207. {Take care of post/preincremented store and loads, they will change their base register}
  208. A_STR, A_LDR:
  209. begin
  210. Result := false;
  211. { actually, this does not apply here because post-/preindexed does not mean that a register
  212. is loaded with a new value, it is only modified
  213. (taicpu(p).oper[1]^.typ=top_ref) and
  214. (taicpu(p).oper[1]^.ref^.addressmode in [AM_PREINDEXED,AM_POSTINDEXED]) and
  215. (taicpu(p).oper[1]^.ref^.base = reg);
  216. }
  217. { STR does not load into it's first register }
  218. if p.opcode = A_STR then
  219. exit;
  220. end;
  221. A_VSTR:
  222. begin
  223. Result := false;
  224. exit;
  225. end;
  226. { These four are writing into the first 2 register, UMLAL and SMLAL will also read from them }
  227. A_UMLAL, A_UMULL, A_SMLAL, A_SMULL:
  228. Result :=
  229. (p.oper[1]^.typ = top_reg) and
  230. (p.oper[1]^.reg = reg);
  231. {Loads to oper2 from coprocessor}
  232. {
  233. MCR/MRC is currently not supported in FPC
  234. A_MRC:
  235. Result :=
  236. (p.oper[2]^.typ = top_reg) and
  237. (p.oper[2]^.reg = reg);
  238. }
  239. {Loads to all register in the registerset}
  240. A_LDM, A_VLDM:
  241. Result := (getsupreg(reg) in p.oper[1]^.regset^);
  242. A_POP:
  243. Result := (getsupreg(reg) in p.oper[0]^.regset^) or
  244. (reg=NR_STACK_POINTER_REG);
  245. else
  246. ;
  247. end;
  248. if Result then
  249. exit;
  250. case p.oper[0]^.typ of
  251. {This is the case}
  252. top_reg:
  253. Result := (p.oper[0]^.reg = reg) or
  254. { LDRD }
  255. (p.opcode=A_LDR) and (p.oppostfix=PF_D) and (getsupreg(p.oper[0]^.reg)+1=getsupreg(reg));
  256. {LDM/STM might write a new value to their index register}
  257. top_ref:
  258. Result :=
  259. (taicpu(p).oper[0]^.ref^.addressmode in [AM_PREINDEXED,AM_POSTINDEXED]) and
  260. (taicpu(p).oper[0]^.ref^.base = reg);
  261. else
  262. ;
  263. end;
  264. end;
  265. function TCpuAsmOptimizer.GetNextInstructionUsingRef(Current: tai; out
  266. Next: tai; const ref: TReference; StopOnStore: Boolean): Boolean;
  267. begin
  268. Next:=Current;
  269. repeat
  270. Result:=GetNextInstruction(Next,Next);
  271. if Result and
  272. (Next.typ=ait_instruction) and
  273. (taicpu(Next).opcode in [A_LDR, A_STR]) and
  274. (
  275. ((taicpu(Next).ops = 2) and
  276. (taicpu(Next).oper[1]^.typ = top_ref) and
  277. RefsEqual(taicpu(Next).oper[1]^.ref^,ref)) or
  278. ((taicpu(Next).ops = 3) and { LDRD/STRD }
  279. (taicpu(Next).oper[2]^.typ = top_ref) and
  280. RefsEqual(taicpu(Next).oper[2]^.ref^,ref))
  281. ) then
  282. {We've found an instruction LDR or STR with the same reference}
  283. exit;
  284. until not(Result) or
  285. (Next.typ<>ait_instruction) or
  286. not(cs_opt_level3 in current_settings.optimizerswitches) or
  287. is_calljmp(taicpu(Next).opcode) or
  288. (StopOnStore and (taicpu(Next).opcode in [A_STR, A_STM])) or
  289. RegModifiedByInstruction(NR_PC,Next);
  290. Result:=false;
  291. end;
  292. {$ifdef DEBUG_AOPTCPU}
  293. const
  294. SPeepholeOptimization: shortstring = 'Peephole Optimization: ';
  295. procedure TCpuAsmOptimizer.DebugMsg(const s: string;p : tai);
  296. begin
  297. asml.insertbefore(tai_comment.Create(strpnew(s)), p);
  298. end;
  299. {$else DEBUG_AOPTCPU}
  300. const
  301. SPeepholeOptimization = '';
  302. procedure TCpuAsmOptimizer.DebugMsg(const s: string;p : tai);inline;
  303. begin
  304. end;
  305. {$endif DEBUG_AOPTCPU}
  306. function TCpuAsmOptimizer.CanDoJumpOpts: Boolean;
  307. begin
  308. { Cannot perform these jump optimisations if the ARM architecture has 16-bit thumb codes }
  309. Result := not (
  310. (current_settings.instructionset = is_thumb) and not (CPUARM_HAS_THUMB2 in cpu_capabilities[current_settings.cputype])
  311. );
  312. end;
  313. function TCpuAsmOptimizer.RemoveSuperfluousVMov(const p: tai; movp: tai; const optimizer: string):boolean;
  314. var
  315. alloc,
  316. dealloc : tai_regalloc;
  317. hp1 : tai;
  318. begin
  319. Result:=false;
  320. if ((MatchInstruction(movp, A_VMOV, [taicpu(p).condition], [taicpu(p).oppostfix]) and
  321. ((getregtype(taicpu(movp).oper[0]^.reg)=R_MMREGISTER) or (taicpu(p).opcode=A_VLDR))
  322. ) or
  323. (((taicpu(p).oppostfix in [PF_F64F32,PF_F64S16,PF_F64S32,PF_F64U16,PF_F64U32]) or (getsubreg(taicpu(p).oper[0]^.reg)=R_SUBFD)) and MatchInstruction(movp, A_VMOV, [taicpu(p).condition], [PF_F64])) or
  324. (((taicpu(p).oppostfix in [PF_F32F64,PF_F32S16,PF_F32S32,PF_F32U16,PF_F32U32]) or (getsubreg(taicpu(p).oper[0]^.reg)=R_SUBFS)) and MatchInstruction(movp, A_VMOV, [taicpu(p).condition], [PF_F32]))
  325. ) and
  326. (taicpu(movp).ops=2) and
  327. MatchOperand(taicpu(movp).oper[1]^, taicpu(p).oper[0]^.reg) and
  328. { the destination register of the mov might not be used beween p and movp }
  329. not(RegUsedBetween(taicpu(movp).oper[0]^.reg,p,movp)) and
  330. { Take care to only do this for instructions which REALLY load to the first register.
  331. Otherwise
  332. vstr reg0, [reg1]
  333. vmov reg2, reg0
  334. will be optimized to
  335. vstr reg2, [reg1]
  336. }
  337. regLoadedWithNewValue(taicpu(p).oper[0]^.reg, p) then
  338. begin
  339. dealloc:=FindRegDeAlloc(taicpu(p).oper[0]^.reg,tai(movp.Next));
  340. if assigned(dealloc) then
  341. begin
  342. DebugMsg(SPeepholeOptimization + optimizer + ' removed superfluous vmov', movp);
  343. result:=true;
  344. { taicpu(p).oper[0]^.reg is not used anymore, try to find its allocation
  345. and remove it if possible }
  346. asml.Remove(dealloc);
  347. alloc:=FindRegAllocBackward(taicpu(p).oper[0]^.reg,tai(p.previous));
  348. if assigned(alloc) then
  349. begin
  350. asml.Remove(alloc);
  351. alloc.free;
  352. dealloc.free;
  353. end
  354. else
  355. asml.InsertAfter(dealloc,p);
  356. { try to move the allocation of the target register }
  357. GetLastInstruction(movp,hp1);
  358. alloc:=FindRegAlloc(taicpu(movp).oper[0]^.reg,tai(hp1.Next));
  359. if assigned(alloc) then
  360. begin
  361. asml.Remove(alloc);
  362. asml.InsertBefore(alloc,p);
  363. { adjust used regs }
  364. IncludeRegInUsedRegs(taicpu(movp).oper[0]^.reg,UsedRegs);
  365. end;
  366. { change
  367. vldr reg0,[reg1]
  368. vmov reg2,reg0
  369. into
  370. ldr reg2,[reg1]
  371. if reg2 is an int register
  372. }
  373. if (taicpu(p).opcode=A_VLDR) and (getregtype(taicpu(movp).oper[0]^.reg)=R_INTREGISTER) then
  374. taicpu(p).opcode:=A_LDR;
  375. { finally get rid of the mov }
  376. taicpu(p).loadreg(0,taicpu(movp).oper[0]^.reg);
  377. asml.remove(movp);
  378. movp.free;
  379. end;
  380. end;
  381. end;
  382. {
  383. optimize
  384. add/sub reg1,reg1,regY/const
  385. ...
  386. ldr/str regX,[reg1]
  387. into
  388. ldr/str regX,[reg1, regY/const]!
  389. }
  390. function TCpuAsmOptimizer.LookForPreindexedPattern(p: taicpu): boolean;
  391. var
  392. hp1: tai;
  393. begin
  394. if GenerateARMCode and
  395. (p.ops=3) and
  396. MatchOperand(p.oper[0]^, p.oper[1]^.reg) and
  397. GetNextInstructionUsingReg(p, hp1, p.oper[0]^.reg) and
  398. (not RegModifiedBetween(p.oper[0]^.reg, p, hp1)) and
  399. MatchInstruction(hp1, [A_LDR,A_STR], [C_None], [PF_None,PF_B,PF_H,PF_SH,PF_SB]) and
  400. (taicpu(hp1).oper[1]^.ref^.addressmode=AM_OFFSET) and
  401. (taicpu(hp1).oper[1]^.ref^.base=p.oper[0]^.reg) and
  402. (taicpu(hp1).oper[0]^.reg<>p.oper[0]^.reg) and
  403. (taicpu(hp1).oper[1]^.ref^.offset=0) and
  404. (taicpu(hp1).oper[1]^.ref^.index=NR_NO) and
  405. (((p.oper[2]^.typ=top_reg) and
  406. (not RegModifiedBetween(p.oper[2]^.reg, p, hp1))) or
  407. ((p.oper[2]^.typ=top_const) and
  408. ((abs(p.oper[2]^.val) < 256) or
  409. ((abs(p.oper[2]^.val) < 4096) and
  410. (taicpu(hp1).oppostfix in [PF_None,PF_B]))))) then
  411. begin
  412. taicpu(hp1).oper[1]^.ref^.addressmode:=AM_PREINDEXED;
  413. if p.oper[2]^.typ=top_reg then
  414. begin
  415. taicpu(hp1).oper[1]^.ref^.index:=p.oper[2]^.reg;
  416. if p.opcode=A_ADD then
  417. taicpu(hp1).oper[1]^.ref^.signindex:=1
  418. else
  419. taicpu(hp1).oper[1]^.ref^.signindex:=-1;
  420. end
  421. else
  422. begin
  423. if p.opcode=A_ADD then
  424. taicpu(hp1).oper[1]^.ref^.offset:=p.oper[2]^.val
  425. else
  426. taicpu(hp1).oper[1]^.ref^.offset:=-p.oper[2]^.val;
  427. end;
  428. result:=true;
  429. end
  430. else
  431. result:=false;
  432. end;
  433. {
  434. optimize
  435. ldr/str regX,[reg1]
  436. ...
  437. add/sub reg1,reg1,regY/const
  438. into
  439. ldr/str regX,[reg1], regY/const
  440. }
  441. function TCpuAsmOptimizer.LookForPostindexedPattern(p: taicpu) : boolean;
  442. var
  443. hp1 : tai;
  444. begin
  445. Result:=false;
  446. if (p.oper[1]^.typ = top_ref) and
  447. (p.oper[1]^.ref^.addressmode=AM_OFFSET) and
  448. (p.oper[1]^.ref^.index=NR_NO) and
  449. (p.oper[1]^.ref^.offset=0) and
  450. GetNextInstructionUsingReg(p, hp1, p.oper[1]^.ref^.base) and
  451. { we cannot check NR_DEFAULTFLAGS for modification yet so don't allow a condition }
  452. MatchInstruction(hp1, [A_ADD, A_SUB], [C_None], [PF_None]) and
  453. (taicpu(hp1).oper[0]^.reg=p.oper[1]^.ref^.base) and
  454. (taicpu(hp1).oper[1]^.reg=p.oper[1]^.ref^.base) and
  455. (
  456. (taicpu(hp1).oper[2]^.typ=top_reg) or
  457. { valid offset? }
  458. ((taicpu(hp1).oper[2]^.typ=top_const) and
  459. ((abs(taicpu(hp1).oper[2]^.val)<256) or
  460. ((abs(taicpu(hp1).oper[2]^.val)<4096) and (p.oppostfix in [PF_None,PF_B]))
  461. )
  462. )
  463. ) and
  464. { don't apply the optimization if the base register is loaded }
  465. (p.oper[0]^.reg<>p.oper[1]^.ref^.base) and
  466. not(RegModifiedBetween(taicpu(hp1).oper[0]^.reg,p,hp1)) and
  467. { don't apply the optimization if the (new) index register is loaded }
  468. (p.oper[0]^.reg<>taicpu(hp1).oper[2]^.reg) and
  469. not(RegModifiedBetween(taicpu(hp1).oper[2]^.reg,p,hp1)) and
  470. GenerateARMCode then
  471. begin
  472. DebugMsg(SPeepholeOptimization + 'Str/LdrAdd/Sub2Str/Ldr Postindex done', p);
  473. p.oper[1]^.ref^.addressmode:=AM_POSTINDEXED;
  474. if taicpu(hp1).oper[2]^.typ=top_const then
  475. begin
  476. if taicpu(hp1).opcode=A_ADD then
  477. p.oper[1]^.ref^.offset:=taicpu(hp1).oper[2]^.val
  478. else
  479. p.oper[1]^.ref^.offset:=-taicpu(hp1).oper[2]^.val;
  480. end
  481. else
  482. begin
  483. p.oper[1]^.ref^.index:=taicpu(hp1).oper[2]^.reg;
  484. if taicpu(hp1).opcode=A_ADD then
  485. p.oper[1]^.ref^.signindex:=1
  486. else
  487. p.oper[1]^.ref^.signindex:=-1;
  488. end;
  489. asml.Remove(hp1);
  490. hp1.Free;
  491. Result:=true;
  492. end;
  493. end;
  494. function TCpuAsmOptimizer.OptPass1ADDSUB(var p: tai): Boolean;
  495. var
  496. hp1,hp2: tai;
  497. sign: Integer;
  498. newvalue: TCGInt;
  499. b: byte;
  500. begin
  501. Result := OptPass1DataCheckMov(p);
  502. {
  503. change
  504. add/sub reg2,reg1,const1
  505. str/ldr reg3,[reg2,const2]
  506. dealloc reg2
  507. to
  508. str/ldr reg3,[reg1,const2+/-const1]
  509. }
  510. if (not GenerateThumbCode) and
  511. (taicpu(p).ops>2) and
  512. (taicpu(p).oper[1]^.typ = top_reg) and
  513. (taicpu(p).oper[2]^.typ = top_const) then
  514. begin
  515. hp1:=p;
  516. while GetNextInstructionUsingReg(hp1, hp1, taicpu(p).oper[0]^.reg) and
  517. { we cannot check NR_DEFAULTFLAGS for modification yet so don't allow a condition }
  518. MatchInstruction(hp1, [A_LDR, A_STR], [C_None], []) and
  519. (taicpu(hp1).oper[1]^.typ = top_ref) and
  520. (taicpu(hp1).oper[1]^.ref^.base=taicpu(p).oper[0]^.reg) and
  521. { don't optimize if the register is stored/overwritten }
  522. (taicpu(hp1).oper[0]^.reg<>taicpu(p).oper[1]^.reg) and
  523. (taicpu(hp1).oper[1]^.ref^.index=NR_NO) and
  524. (taicpu(hp1).oper[1]^.ref^.addressmode=AM_OFFSET) and
  525. { new offset must be valid: either in the range of 8 or 12 bit, depend on the
  526. ldr postfix }
  527. (((taicpu(p).opcode=A_ADD) and
  528. isValidConstLoadStoreOffset(taicpu(hp1).oper[1]^.ref^.offset+taicpu(p).oper[2]^.val, taicpu(hp1).oppostfix)
  529. ) or
  530. ((taicpu(p).opcode=A_SUB) and
  531. isValidConstLoadStoreOffset(taicpu(hp1).oper[1]^.ref^.offset-taicpu(p).oper[2]^.val, taicpu(hp1).oppostfix)
  532. )
  533. ) do
  534. begin
  535. { neither reg1 nor reg2 might be changed inbetween }
  536. if RegModifiedBetween(taicpu(p).oper[0]^.reg,p,hp1) or
  537. RegModifiedBetween(taicpu(p).oper[1]^.reg,p,hp1) then
  538. break;
  539. { reg2 must be either overwritten by the ldr or it is deallocated afterwards }
  540. if ((taicpu(hp1).opcode=A_LDR) and (taicpu(p).oper[0]^.reg=taicpu(hp1).oper[0]^.reg)) or
  541. assigned(FindRegDeAlloc(taicpu(p).oper[0]^.reg,tai(hp1.Next))) then
  542. begin
  543. { remember last instruction }
  544. hp2:=hp1;
  545. DebugMsg(SPeepholeOptimization + 'Add/SubLdr2Ldr done', p);
  546. hp1:=p;
  547. { fix all ldr/str }
  548. while GetNextInstructionUsingReg(hp1, hp1, taicpu(p).oper[0]^.reg) do
  549. begin
  550. taicpu(hp1).oper[1]^.ref^.base:=taicpu(p).oper[1]^.reg;
  551. if taicpu(p).opcode=A_ADD then
  552. inc(taicpu(hp1).oper[1]^.ref^.offset,taicpu(p).oper[2]^.val)
  553. else
  554. dec(taicpu(hp1).oper[1]^.ref^.offset,taicpu(p).oper[2]^.val);
  555. if hp1=hp2 then
  556. break;
  557. end;
  558. RemoveCurrentP(p);
  559. result:=true;
  560. Exit;
  561. end;
  562. end;
  563. end;
  564. {
  565. optimize
  566. add/sub rx,ry,const1
  567. add/sub rx,rx,const2
  568. into
  569. add/sub rx,ry,const1+/-const
  570. or
  571. mov rx,ry if const1+/-const=0
  572. or
  573. remove it, if rx=ry and const1+/-const=0
  574. check if the first operation has no postfix and condition
  575. }
  576. if MatchInstruction(p,[A_ADD,A_SUB],[C_None],[PF_None]) and
  577. MatchOptype(taicpu(p),top_reg,top_reg,top_const) and
  578. GetNextInstructionUsingReg(p,hp1,taicpu(p).oper[0]^.reg) and
  579. MatchInstruction(hp1,[A_ADD,A_SUB],[C_None],[PF_None]) and
  580. MatchOptype(taicpu(hp1),top_reg,top_reg,top_const) and
  581. MatchOperand(taicpu(p).oper[0]^,taicpu(hp1).oper[0]^) and
  582. MatchOperand(taicpu(p).oper[0]^,taicpu(hp1).oper[1]^) then
  583. begin
  584. sign:=1;
  585. if (taicpu(p).opcode=A_SUB) xor (taicpu(hp1).opcode=A_SUB) then
  586. sign:=-1;
  587. newvalue:=taicpu(p).oper[2]^.val+sign*taicpu(hp1).oper[2]^.val;
  588. if (not(GenerateThumbCode) and is_shifter_const(newvalue,b)) or
  589. (GenerateThumbCode and is_thumb_imm(newvalue)) then
  590. begin
  591. DebugMsg(SPeepholeOptimization + 'Merge Add/Sub done', p);
  592. taicpu(p).oper[2]^.val:=newvalue;
  593. RemoveInstruction(hp1);
  594. Result:=true;
  595. if newvalue=0 then
  596. begin
  597. if MatchOperand(taicpu(p).oper[0]^,taicpu(p).oper[1]^) then
  598. RemoveCurrentP(p)
  599. else
  600. begin
  601. taicpu(p).opcode:=A_MOV;
  602. taicpu(p).ops:=2;
  603. end;
  604. Exit;
  605. end;
  606. end;
  607. end;
  608. if (taicpu(p).condition = C_None) and
  609. (taicpu(p).oppostfix = PF_None) and
  610. LookForPreindexedPattern(taicpu(p)) then
  611. begin
  612. DebugMsg(SPeepholeOptimization + 'Add/Sub to Preindexed done', p);
  613. RemoveCurrentP(p);
  614. Result:=true;
  615. Exit;
  616. end;
  617. end;
  618. function TCpuAsmOptimizer.OptPass1MUL(var p: tai): Boolean;
  619. var
  620. hp1: tai;
  621. oldreg: tregister;
  622. begin
  623. Result := OptPass1DataCheckMov(p);
  624. {
  625. Turn
  626. mul reg0, z,w
  627. sub/add x, y, reg0
  628. dealloc reg0
  629. into
  630. mls/mla x,z,w,y
  631. }
  632. if (taicpu(p).condition = C_None) and
  633. (taicpu(p).oppostfix = PF_None) and
  634. (taicpu(p).ops=3) and
  635. (taicpu(p).oper[0]^.typ = top_reg) and
  636. (taicpu(p).oper[1]^.typ = top_reg) and
  637. (taicpu(p).oper[2]^.typ = top_reg) and
  638. GetNextInstructionUsingReg(p,hp1,taicpu(p).oper[0]^.reg) and
  639. MatchInstruction(hp1,[A_ADD,A_SUB],[C_None],[PF_None]) and
  640. (not RegModifiedBetween(taicpu(p).oper[1]^.reg, p, hp1)) and
  641. (not RegModifiedBetween(taicpu(p).oper[2]^.reg, p, hp1)) and
  642. (((taicpu(hp1).opcode=A_ADD) and (current_settings.cputype>=cpu_armv4)) or
  643. ((taicpu(hp1).opcode=A_SUB) and (current_settings.cputype in [cpu_armv6t2,cpu_armv7,cpu_armv7a,cpu_armv7r,cpu_armv7m,cpu_armv7em]))) and
  644. // CPUs before ARMv6 don't recommend having the same Rd and Rm for MLA.
  645. // TODO: A workaround would be to swap Rm and Rs
  646. (not ((taicpu(hp1).opcode=A_ADD) and (current_settings.cputype<=cpu_armv6) and MatchOperand(taicpu(hp1).oper[0]^, taicpu(p).oper[1]^))) and
  647. (((taicpu(hp1).ops=3) and
  648. (taicpu(hp1).oper[2]^.typ=top_reg) and
  649. ((MatchOperand(taicpu(hp1).oper[2]^, taicpu(p).oper[0]^.reg) and
  650. (not RegModifiedBetween(taicpu(hp1).oper[1]^.reg, p, hp1))) or
  651. ((MatchOperand(taicpu(hp1).oper[1]^, taicpu(p).oper[0]^.reg) and
  652. (taicpu(hp1).opcode=A_ADD) and
  653. (not RegModifiedBetween(taicpu(hp1).oper[2]^.reg, p, hp1)))))) or
  654. ((taicpu(hp1).ops=2) and
  655. (taicpu(hp1).oper[1]^.typ=top_reg) and
  656. MatchOperand(taicpu(hp1).oper[1]^, taicpu(p).oper[0]^.reg))) and
  657. (RegEndOfLife(taicpu(p).oper[0]^.reg,taicpu(hp1))) then
  658. begin
  659. if taicpu(hp1).opcode=A_ADD then
  660. begin
  661. taicpu(hp1).opcode:=A_MLA;
  662. if taicpu(hp1).ops=3 then
  663. begin
  664. if MatchOperand(taicpu(hp1).oper[1]^, taicpu(p).oper[0]^) then
  665. oldreg:=taicpu(hp1).oper[2]^.reg
  666. else
  667. oldreg:=taicpu(hp1).oper[1]^.reg;
  668. end
  669. else
  670. oldreg:=taicpu(hp1).oper[0]^.reg;
  671. taicpu(hp1).loadreg(1,taicpu(p).oper[1]^.reg);
  672. taicpu(hp1).loadreg(2,taicpu(p).oper[2]^.reg);
  673. taicpu(hp1).loadreg(3,oldreg);
  674. DebugMsg(SPeepholeOptimization + 'MulAdd2MLA done', p);
  675. end
  676. else
  677. begin
  678. taicpu(hp1).opcode:=A_MLS;
  679. taicpu(hp1).loadreg(3,taicpu(hp1).oper[1]^.reg);
  680. if taicpu(hp1).ops=2 then
  681. taicpu(hp1).loadreg(1,taicpu(hp1).oper[0]^.reg)
  682. else
  683. taicpu(hp1).loadreg(1,taicpu(p).oper[2]^.reg);
  684. taicpu(hp1).loadreg(2,taicpu(p).oper[1]^.reg);
  685. DebugMsg(SPeepholeOptimization + 'MulSub2MLS done', p);
  686. AllocRegBetween(taicpu(hp1).oper[1]^.reg,p,hp1,UsedRegs);
  687. AllocRegBetween(taicpu(hp1).oper[2]^.reg,p,hp1,UsedRegs);
  688. AllocRegBetween(taicpu(hp1).oper[3]^.reg,p,hp1,UsedRegs);
  689. end;
  690. taicpu(hp1).ops:=4;
  691. RemoveCurrentP(p);
  692. Result := True;
  693. Exit;
  694. end
  695. end;
  696. function TCpuAsmOptimizer.OptPass1And(var p: tai): Boolean;
  697. begin
  698. Result := OptPass1DataCheckMov(p);
  699. Result := inherited OptPass1And(p) or Result;
  700. end;
  701. function TCpuAsmOptimizer.OptPass1DataCheckMov(var p: tai): Boolean;
  702. var
  703. hp1: tai;
  704. begin
  705. {
  706. change
  707. op reg1, ...
  708. mov reg2, reg1
  709. to
  710. op reg2, ...
  711. }
  712. Result := (taicpu(p).ops >= 3) and
  713. GetNextInstructionUsingReg(p, hp1, taicpu(p).oper[0]^.reg) and
  714. RemoveSuperfluousMove(p, hp1, 'DataMov2Data');
  715. end;
  716. function TCpuAsmOptimizer.OptPass1CMP(var p: tai): Boolean;
  717. var
  718. hp1, hp2, hp_last: tai;
  719. MovRem1, MovRem2: Boolean;
  720. begin
  721. Result := False;
  722. { These optimizations can be applied only to the currently enabled operations because
  723. the other operations do not update all flags and FPC does not track flag usage }
  724. if (taicpu(p).condition = C_None) and
  725. (taicpu(p).oper[1]^.typ = top_const) and
  726. GetNextInstruction(p, hp1) then
  727. begin
  728. {
  729. change
  730. cmp reg,const1
  731. moveq reg,const1
  732. movne reg,const2
  733. to
  734. cmp reg,const1
  735. movne reg,const2
  736. }
  737. if MatchInstruction(hp1, A_MOV, [C_EQ, C_NE], [PF_NONE]) and
  738. (taicpu(hp1).oper[1]^.typ = top_const) and
  739. GetNextInstruction(hp1, hp2) and
  740. MatchInstruction(hp2, A_MOV, [C_EQ, C_NE], [PF_NONE]) and
  741. (taicpu(hp2).oper[1]^.typ = top_const) then
  742. begin
  743. MovRem1 := RemoveRedundantMove(p, hp1, asml);
  744. MovRem2 := RemoveRedundantMove(p, hp2, asml);
  745. Result:= MovRem1 or MovRem2;
  746. { Make sure that hp1 is still the next instruction after p }
  747. if MovRem1 then
  748. if MovRem2 then
  749. begin
  750. if not GetNextInstruction(p, hp1) then
  751. Exit;
  752. end
  753. else
  754. hp1 := hp2;
  755. end;
  756. end;
  757. end;
  758. function TCpuAsmOptimizer.OptPass1LDR(var p: tai): Boolean;
  759. var
  760. hp1: tai;
  761. begin
  762. Result := inherited OptPass1LDR(p);
  763. if Result then
  764. Exit;
  765. { change
  766. ldr reg1,ref
  767. ldr reg2,ref
  768. into ...
  769. }
  770. if (taicpu(p).oper[1]^.typ = top_ref) and
  771. (taicpu(p).oper[1]^.ref^.addressmode=AM_OFFSET) and
  772. GetNextInstruction(p,hp1) and
  773. { ldrd is not allowed here }
  774. MatchInstruction(hp1, A_LDR, [taicpu(p).condition, C_None], [taicpu(p).oppostfix,PF_None]-[PF_D]) then
  775. begin
  776. {
  777. ...
  778. ldr reg1,ref
  779. mov reg2,reg1
  780. }
  781. if (taicpu(p).oppostfix=taicpu(hp1).oppostfix) and
  782. RefsEqual(taicpu(p).oper[1]^.ref^,taicpu(hp1).oper[1]^.ref^) and
  783. (taicpu(p).oper[0]^.reg<>taicpu(hp1).oper[1]^.ref^.index) and
  784. (taicpu(p).oper[0]^.reg<>taicpu(hp1).oper[1]^.ref^.base) and
  785. (taicpu(hp1).oper[1]^.ref^.addressmode=AM_OFFSET) then
  786. begin
  787. if taicpu(hp1).oper[0]^.reg=taicpu(p).oper[0]^.reg then
  788. begin
  789. DebugMsg(SPeepholeOptimization + 'LdrLdr2Ldr done', hp1);
  790. asml.remove(hp1);
  791. hp1.free;
  792. end
  793. else
  794. begin
  795. DebugMsg(SPeepholeOptimization + 'LdrLdr2LdrMov done', hp1);
  796. taicpu(hp1).opcode:=A_MOV;
  797. taicpu(hp1).oppostfix:=PF_None;
  798. taicpu(hp1).loadreg(1,taicpu(p).oper[0]^.reg);
  799. end;
  800. result := true;
  801. end
  802. {
  803. ...
  804. ldrd reg1,reg1+1,ref
  805. }
  806. else if (GenerateARMCode or GenerateThumb2Code) and
  807. (CPUARM_HAS_EDSP in cpu_capabilities[current_settings.cputype]) and
  808. { ldrd does not allow any postfixes ... }
  809. (taicpu(p).oppostfix=PF_None) and
  810. not(odd(getsupreg(taicpu(p).oper[0]^.reg))) and
  811. (getsupreg(taicpu(p).oper[0]^.reg)+1=getsupreg(taicpu(hp1).oper[0]^.reg)) and
  812. { ldr ensures that either base or index contain no register, else ldr wouldn't
  813. use an offset either
  814. }
  815. (taicpu(p).oper[1]^.ref^.base=taicpu(hp1).oper[1]^.ref^.base) and
  816. (taicpu(p).oper[1]^.ref^.index=taicpu(hp1).oper[1]^.ref^.index) and
  817. (taicpu(p).oper[1]^.ref^.offset+4=taicpu(hp1).oper[1]^.ref^.offset) and
  818. (abs(taicpu(p).oper[1]^.ref^.offset)<256) and
  819. AlignedToQWord(taicpu(p).oper[1]^.ref^) then
  820. begin
  821. DebugMsg(SPeepholeOptimization + 'LdrLdr2Ldrd done', p);
  822. taicpu(p).loadref(2,taicpu(p).oper[1]^.ref^);
  823. taicpu(p).loadreg(1, taicpu(hp1).oper[0]^.reg);
  824. taicpu(p).ops:=3;
  825. taicpu(p).oppostfix:=PF_D;
  826. asml.remove(hp1);
  827. hp1.free;
  828. result:=true;
  829. end;
  830. end;
  831. {
  832. Change
  833. ldrb dst1, [REF]
  834. and dst2, dst1, #255
  835. into
  836. ldrb dst2, [ref]
  837. }
  838. if not(GenerateThumbCode) and
  839. (taicpu(p).oppostfix=PF_B) and
  840. GetNextInstructionUsingReg(p, hp1, taicpu(p).oper[0]^.reg) and
  841. MatchInstruction(hp1, A_AND, [taicpu(p).condition], [PF_NONE]) and
  842. (taicpu(hp1).oper[1]^.reg = taicpu(p).oper[0]^.reg) and
  843. (taicpu(hp1).oper[2]^.typ = top_const) and
  844. (taicpu(hp1).oper[2]^.val = $FF) and
  845. not(RegUsedBetween(taicpu(hp1).oper[0]^.reg, p, hp1)) and
  846. RegEndOfLife(taicpu(p).oper[0]^.reg, taicpu(hp1)) then
  847. begin
  848. DebugMsg(SPeepholeOptimization + 'LdrbAnd2Ldrb done', p);
  849. taicpu(p).oper[0]^.reg := taicpu(hp1).oper[0]^.reg;
  850. asml.remove(hp1);
  851. hp1.free;
  852. result:=true;
  853. end;
  854. Result:=LookForPostindexedPattern(taicpu(p)) or Result;
  855. { Remove superfluous mov after ldr
  856. changes
  857. ldr reg1, ref
  858. mov reg2, reg1
  859. to
  860. ldr reg2, ref
  861. conditions are:
  862. * no ldrd usage
  863. * reg1 must be released after mov
  864. * mov can not contain shifterops
  865. * ldr+mov have the same conditions
  866. * mov does not set flags
  867. }
  868. if (taicpu(p).oppostfix<>PF_D) and
  869. GetNextInstructionUsingReg(p, hp1, taicpu(p).oper[0]^.reg) and
  870. RemoveSuperfluousMove(p, hp1, 'LdrMov2Ldr') then
  871. Result:=true;
  872. end;
  873. function TCpuAsmOptimizer.OptPass1STM(var p: tai): Boolean;
  874. var
  875. hp1, hp2, hp3, hp4: tai;
  876. begin
  877. Result := False;
  878. {
  879. change
  880. stmfd r13!,[r14]
  881. sub r13,r13,#4
  882. bl abc
  883. add r13,r13,#4
  884. ldmfd r13!,[r15]
  885. into
  886. b abc
  887. }
  888. if not(ts_thumb_interworking in current_settings.targetswitches) and
  889. (taicpu(p).condition = C_None) and
  890. (taicpu(p).oppostfix = PF_FD) and
  891. (taicpu(p).oper[0]^.typ = top_ref) and
  892. (taicpu(p).oper[0]^.ref^.index=NR_STACK_POINTER_REG) and
  893. (taicpu(p).oper[0]^.ref^.base=NR_NO) and
  894. (taicpu(p).oper[0]^.ref^.offset=0) and
  895. (taicpu(p).oper[0]^.ref^.addressmode=AM_PREINDEXED) and
  896. (taicpu(p).oper[1]^.typ = top_regset) and
  897. (taicpu(p).oper[1]^.regset^ = [RS_R14]) and
  898. GetNextInstruction(p, hp1) and
  899. MatchInstruction(hp1, A_SUB, [C_None], [PF_NONE]) and
  900. (taicpu(hp1).oper[0]^.typ = top_reg) and
  901. (taicpu(hp1).oper[0]^.reg = NR_STACK_POINTER_REG) and
  902. MatchOperand(taicpu(hp1).oper[0]^,taicpu(hp1).oper[1]^) and
  903. (taicpu(hp1).oper[2]^.typ = top_const) and
  904. GetNextInstruction(hp1, hp2) and
  905. SkipEntryExitMarker(hp2, hp2) and
  906. MatchInstruction(hp2, [A_BL,A_BLX], [C_None], [PF_NONE]) and
  907. (taicpu(hp2).oper[0]^.typ = top_ref) and
  908. GetNextInstruction(hp2, hp3) and
  909. SkipEntryExitMarker(hp3, hp3) and
  910. MatchInstruction(hp3, A_ADD, [C_None], [PF_NONE]) and
  911. MatchOperand(taicpu(hp1).oper[0]^,taicpu(hp3).oper[0]^) and
  912. MatchOperand(taicpu(hp1).oper[0]^,taicpu(hp3).oper[1]^) and
  913. MatchOperand(taicpu(hp1).oper[2]^,taicpu(hp3).oper[2]^) and
  914. GetNextInstruction(hp3, hp4) and
  915. MatchInstruction(hp4, A_LDM, [C_None], [PF_FD]) and
  916. MatchOperand(taicpu(p).oper[0]^,taicpu(hp4).oper[0]^) and
  917. (taicpu(hp4).oper[1]^.typ = top_regset) and
  918. (taicpu(hp4).oper[1]^.regset^ = [RS_R15]) then
  919. begin
  920. asml.Remove(hp1);
  921. asml.Remove(hp3);
  922. asml.Remove(hp4);
  923. taicpu(hp2).opcode:=A_B;
  924. hp1.free;
  925. hp3.free;
  926. hp4.free;
  927. RemoveCurrentp(p, hp2);
  928. DebugMsg(SPeepholeOptimization + 'Bl2B done', p);
  929. Result := True;
  930. end;
  931. end;
  932. function TCpuAsmOptimizer.OptPass1STR(var p: tai): Boolean;
  933. var
  934. hp1: tai;
  935. begin
  936. Result := inherited OptPass1STR(p);
  937. if Result then
  938. Exit;
  939. { Common conditions }
  940. if (taicpu(p).oper[1]^.typ = top_ref) and
  941. (taicpu(p).oper[1]^.ref^.addressmode=AM_OFFSET) and
  942. (taicpu(p).oppostfix=PF_None) then
  943. begin
  944. { change
  945. str reg1,ref
  946. ldr reg2,ref
  947. into
  948. str reg1,ref
  949. mov reg2,reg1
  950. }
  951. if (taicpu(p).condition=C_None) and
  952. GetNextInstructionUsingRef(p,hp1,taicpu(p).oper[1]^.ref^) and
  953. MatchInstruction(hp1, A_LDR, [taicpu(p).condition], [PF_None]) and
  954. (taicpu(hp1).oper[1]^.typ=top_ref) and
  955. (taicpu(hp1).oper[1]^.ref^.addressmode=AM_OFFSET) and
  956. not(RegModifiedBetween(taicpu(p).oper[0]^.reg, p, hp1)) and
  957. ((taicpu(hp1).oper[1]^.ref^.index=NR_NO) or not (RegModifiedBetween(taicpu(hp1).oper[1]^.ref^.index, p, hp1))) and
  958. ((taicpu(hp1).oper[1]^.ref^.base=NR_NO) or not (RegModifiedBetween(taicpu(hp1).oper[1]^.ref^.base, p, hp1))) then
  959. begin
  960. if taicpu(hp1).oper[0]^.reg=taicpu(p).oper[0]^.reg then
  961. begin
  962. DebugMsg(SPeepholeOptimization + 'StrLdr2StrMov 1 done', hp1);
  963. asml.remove(hp1);
  964. hp1.free;
  965. end
  966. else
  967. begin
  968. taicpu(hp1).opcode:=A_MOV;
  969. taicpu(hp1).oppostfix:=PF_None;
  970. taicpu(hp1).loadreg(1,taicpu(p).oper[0]^.reg);
  971. DebugMsg(SPeepholeOptimization + 'StrLdr2StrMov 2 done', hp1);
  972. end;
  973. result := True;
  974. end
  975. { change
  976. str reg1,ref
  977. str reg2,ref
  978. into
  979. strd reg1,reg2,ref
  980. }
  981. else if (GenerateARMCode or GenerateThumb2Code) and
  982. (CPUARM_HAS_EDSP in cpu_capabilities[current_settings.cputype]) and
  983. not(odd(getsupreg(taicpu(p).oper[0]^.reg))) and
  984. (abs(taicpu(p).oper[1]^.ref^.offset)<256) and
  985. AlignedToQWord(taicpu(p).oper[1]^.ref^) and
  986. GetNextInstruction(p,hp1) and
  987. MatchInstruction(hp1, A_STR, [taicpu(p).condition, C_None], [PF_None]) and
  988. (getsupreg(taicpu(p).oper[0]^.reg)+1=getsupreg(taicpu(hp1).oper[0]^.reg)) and
  989. { str ensures that either base or index contain no register, else ldr wouldn't
  990. use an offset either
  991. }
  992. (taicpu(p).oper[1]^.ref^.base=taicpu(hp1).oper[1]^.ref^.base) and
  993. (taicpu(p).oper[1]^.ref^.index=taicpu(hp1).oper[1]^.ref^.index) and
  994. (taicpu(p).oper[1]^.ref^.offset+4=taicpu(hp1).oper[1]^.ref^.offset) then
  995. begin
  996. DebugMsg(SPeepholeOptimization + 'StrStr2Strd done', p);
  997. taicpu(p).oppostfix:=PF_D;
  998. taicpu(p).loadref(2,taicpu(p).oper[1]^.ref^);
  999. taicpu(p).loadreg(1, taicpu(hp1).oper[0]^.reg);
  1000. taicpu(p).ops:=3;
  1001. asml.remove(hp1);
  1002. hp1.free;
  1003. result:=true;
  1004. end;
  1005. end;
  1006. Result:=LookForPostindexedPattern(taicpu(p)) or Result;
  1007. end;
  1008. function TCpuAsmOptimizer.OptPass1MOV(var p: tai): Boolean;
  1009. var
  1010. hp1, hpfar1, hp2: tai;
  1011. i, i2: longint;
  1012. tempop: tasmop;
  1013. dealloc: tai_regalloc;
  1014. begin
  1015. Result := False;
  1016. hp1 := nil;
  1017. { fold
  1018. mov reg1,reg0, shift imm1
  1019. mov reg1,reg1, shift imm2
  1020. }
  1021. if (taicpu(p).ops=3) and
  1022. (taicpu(p).oper[2]^.typ = top_shifterop) and
  1023. (taicpu(p).oper[2]^.shifterop^.rs = NR_NO) and
  1024. getnextinstruction(p,hp1) and
  1025. MatchInstruction(hp1, A_MOV, [taicpu(p).condition], [PF_None]) and
  1026. (taicpu(hp1).ops=3) and
  1027. MatchOperand(taicpu(hp1).oper[0]^, taicpu(p).oper[0]^.reg) and
  1028. MatchOperand(taicpu(hp1).oper[1]^, taicpu(p).oper[0]^.reg) and
  1029. (taicpu(hp1).oper[2]^.typ = top_shifterop) and
  1030. (taicpu(hp1).oper[2]^.shifterop^.rs = NR_NO) then
  1031. begin
  1032. { fold
  1033. mov reg1,reg0, lsl 16
  1034. mov reg1,reg1, lsr 16
  1035. strh reg1, ...
  1036. dealloc reg1
  1037. to
  1038. strh reg1, ...
  1039. dealloc reg1
  1040. }
  1041. if (taicpu(p).oper[2]^.shifterop^.shiftmode=SM_LSL) and
  1042. (taicpu(p).oper[2]^.shifterop^.shiftimm=16) and
  1043. (taicpu(hp1).oper[2]^.shifterop^.shiftmode in [SM_LSR,SM_ASR]) and
  1044. (taicpu(hp1).oper[2]^.shifterop^.shiftimm=16) and
  1045. getnextinstruction(hp1,hp2) and
  1046. MatchInstruction(hp2, A_STR, [taicpu(p).condition], [PF_H]) and
  1047. MatchOperand(taicpu(hp2).oper[0]^, taicpu(p).oper[0]^.reg) then
  1048. begin
  1049. TransferUsedRegs(TmpUsedRegs);
  1050. UpdateUsedRegs(TmpUsedRegs, tai(p.next));
  1051. UpdateUsedRegs(TmpUsedRegs, tai(hp1.next));
  1052. if not(RegUsedAfterInstruction(taicpu(p).oper[0]^.reg,hp2,TmpUsedRegs)) then
  1053. begin
  1054. DebugMsg(SPeepholeOptimization + 'Removed superfluous 16 Bit zero extension', hp1);
  1055. taicpu(hp2).loadreg(0,taicpu(p).oper[1]^.reg);
  1056. asml.remove(hp1);
  1057. hp1.free;
  1058. RemoveCurrentP(p, hp2);
  1059. Result:=true;
  1060. Exit;
  1061. end;
  1062. end
  1063. { fold
  1064. mov reg1,reg0, shift imm1
  1065. mov reg1,reg1, shift imm2
  1066. to
  1067. mov reg1,reg0, shift imm1+imm2
  1068. }
  1069. else if (taicpu(p).oper[2]^.shifterop^.shiftmode=taicpu(hp1).oper[2]^.shifterop^.shiftmode) or
  1070. { asr makes no use after a lsr, the asr can be foled into the lsr }
  1071. ((taicpu(p).oper[2]^.shifterop^.shiftmode=SM_LSR) and (taicpu(hp1).oper[2]^.shifterop^.shiftmode=SM_ASR) ) then
  1072. begin
  1073. inc(taicpu(p).oper[2]^.shifterop^.shiftimm,taicpu(hp1).oper[2]^.shifterop^.shiftimm);
  1074. { avoid overflows }
  1075. if taicpu(p).oper[2]^.shifterop^.shiftimm>31 then
  1076. case taicpu(p).oper[2]^.shifterop^.shiftmode of
  1077. SM_ROR:
  1078. taicpu(p).oper[2]^.shifterop^.shiftimm:=taicpu(p).oper[2]^.shifterop^.shiftimm and 31;
  1079. SM_ASR:
  1080. taicpu(p).oper[2]^.shifterop^.shiftimm:=31;
  1081. SM_LSR,
  1082. SM_LSL:
  1083. begin
  1084. hp2:=taicpu.op_reg_const(A_MOV,taicpu(p).oper[0]^.reg,0);
  1085. InsertLLItem(p.previous, p.next, hp2);
  1086. p.free;
  1087. p:=hp2;
  1088. end;
  1089. else
  1090. internalerror(2008072803);
  1091. end;
  1092. DebugMsg(SPeepholeOptimization + 'ShiftShift2Shift 1 done', p);
  1093. asml.remove(hp1);
  1094. hp1.free;
  1095. hp1 := nil;
  1096. result := true;
  1097. end
  1098. { fold
  1099. mov reg1,reg0, shift imm1
  1100. mov reg1,reg1, shift imm2
  1101. mov reg1,reg1, shift imm3 ...
  1102. mov reg2,reg1, shift imm3 ...
  1103. }
  1104. else if GetNextInstructionUsingReg(hp1,hp2, taicpu(hp1).oper[0]^.reg) and
  1105. MatchInstruction(hp2, A_MOV, [taicpu(p).condition], [PF_None]) and
  1106. (taicpu(hp2).ops=3) and
  1107. MatchOperand(taicpu(hp2).oper[1]^, taicpu(hp1).oper[0]^.reg) and
  1108. RegEndofLife(taicpu(p).oper[0]^.reg,taicpu(hp2)) and
  1109. (taicpu(hp2).oper[2]^.typ = top_shifterop) and
  1110. (taicpu(hp2).oper[2]^.shifterop^.rs = NR_NO) then
  1111. begin
  1112. { mov reg1,reg0, lsl imm1
  1113. mov reg1,reg1, lsr/asr imm2
  1114. mov reg2,reg1, lsl imm3 ...
  1115. to
  1116. mov reg1,reg0, lsl imm1
  1117. mov reg2,reg1, lsr/asr imm2-imm3
  1118. if
  1119. imm1>=imm2
  1120. }
  1121. if (taicpu(p).oper[2]^.shifterop^.shiftmode=SM_LSL) and (taicpu(hp2).oper[2]^.shifterop^.shiftmode=SM_LSL) and
  1122. (taicpu(hp1).oper[2]^.shifterop^.shiftmode in [SM_ASR,SM_LSR]) and
  1123. (taicpu(p).oper[2]^.shifterop^.shiftimm>=taicpu(hp1).oper[2]^.shifterop^.shiftimm) then
  1124. begin
  1125. if (taicpu(hp2).oper[2]^.shifterop^.shiftimm>=taicpu(hp1).oper[2]^.shifterop^.shiftimm) then
  1126. begin
  1127. if not(RegUsedBetween(taicpu(hp2).oper[0]^.reg,p,hp1)) and
  1128. not(RegUsedBetween(taicpu(hp2).oper[0]^.reg,hp1,hp2)) then
  1129. begin
  1130. DebugMsg(SPeepholeOptimization + 'ShiftShiftShift2ShiftShift 1a done', p);
  1131. inc(taicpu(p).oper[2]^.shifterop^.shiftimm,taicpu(hp2).oper[2]^.shifterop^.shiftimm-taicpu(hp1).oper[2]^.shifterop^.shiftimm);
  1132. taicpu(p).oper[0]^.reg:=taicpu(hp2).oper[0]^.reg;
  1133. asml.remove(hp1);
  1134. asml.remove(hp2);
  1135. hp1.free;
  1136. hp2.free;
  1137. if taicpu(p).oper[2]^.shifterop^.shiftimm>=32 then
  1138. begin
  1139. taicpu(p).freeop(1);
  1140. taicpu(p).freeop(2);
  1141. taicpu(p).loadconst(1,0);
  1142. end;
  1143. result := true;
  1144. Exit;
  1145. end;
  1146. end
  1147. else if not(RegUsedBetween(taicpu(hp2).oper[0]^.reg,hp1,hp2)) then
  1148. begin
  1149. DebugMsg(SPeepholeOptimization + 'ShiftShiftShift2ShiftShift 1b done', p);
  1150. dec(taicpu(hp1).oper[2]^.shifterop^.shiftimm,taicpu(hp2).oper[2]^.shifterop^.shiftimm);
  1151. taicpu(hp1).oper[0]^.reg:=taicpu(hp2).oper[0]^.reg;
  1152. asml.remove(hp2);
  1153. hp2.free;
  1154. result := true;
  1155. Exit;
  1156. end;
  1157. end
  1158. { mov reg1,reg0, lsr/asr imm1
  1159. mov reg1,reg1, lsl imm2
  1160. mov reg1,reg1, lsr/asr imm3 ...
  1161. if imm3>=imm1 and imm2>=imm1
  1162. to
  1163. mov reg1,reg0, lsl imm2-imm1
  1164. mov reg1,reg1, lsr/asr imm3 ...
  1165. }
  1166. else if (taicpu(p).oper[2]^.shifterop^.shiftmode in [SM_ASR,SM_LSR]) and (taicpu(hp2).oper[2]^.shifterop^.shiftmode in [SM_ASR,SM_LSR]) and
  1167. (taicpu(hp1).oper[2]^.shifterop^.shiftmode=SM_LSL) and
  1168. (taicpu(hp2).oper[2]^.shifterop^.shiftimm>=taicpu(p).oper[2]^.shifterop^.shiftimm) and
  1169. (taicpu(hp1).oper[2]^.shifterop^.shiftimm>=taicpu(p).oper[2]^.shifterop^.shiftimm) then
  1170. begin
  1171. dec(taicpu(hp1).oper[2]^.shifterop^.shiftimm,taicpu(p).oper[2]^.shifterop^.shiftimm);
  1172. taicpu(hp1).oper[1]^.reg:=taicpu(p).oper[1]^.reg;
  1173. DebugMsg(SPeepholeOptimization + 'ShiftShiftShift2ShiftShift 2 done', p);
  1174. if taicpu(hp1).oper[2]^.shifterop^.shiftimm=0 then
  1175. begin
  1176. taicpu(hp2).oper[1]^.reg:=taicpu(hp1).oper[1]^.reg;
  1177. asml.remove(hp1);
  1178. hp1.free;
  1179. end;
  1180. RemoveCurrentp(p);
  1181. result := true;
  1182. Exit;
  1183. end;
  1184. end;
  1185. end;
  1186. { All the optimisations from this point on require GetNextInstructionUsingReg
  1187. to return True }
  1188. while (
  1189. GetNextInstructionUsingReg(p, hpfar1, taicpu(p).oper[0]^.reg) and
  1190. (hpfar1.typ = ait_instruction)
  1191. ) do
  1192. begin
  1193. { Change the common
  1194. mov r0, r0, lsr #xxx
  1195. and r0, r0, #yyy/bic r0, r0, #xxx
  1196. and remove the superfluous and/bic if possible
  1197. This could be extended to handle more cases.
  1198. }
  1199. { Change
  1200. mov rx, ry, lsr/ror #xxx
  1201. uxtb/uxth rz,rx/and rz,rx,0xFF
  1202. dealloc rx
  1203. to
  1204. uxtb/uxth rz,ry,ror #xxx
  1205. }
  1206. if (GenerateThumb2Code) and
  1207. (taicpu(p).ops=3) and
  1208. (taicpu(p).oper[2]^.typ = top_shifterop) and
  1209. (taicpu(p).oper[2]^.shifterop^.rs = NR_NO) and
  1210. (taicpu(p).oper[2]^.shifterop^.shiftmode in [SM_LSR,SM_ROR]) and
  1211. RegEndOfLife(taicpu(p).oper[0]^.reg, taicpu(hpfar1)) then
  1212. begin
  1213. if MatchInstruction(hpfar1, A_UXTB, [C_None], [PF_None]) and
  1214. (taicpu(hpfar1).ops = 2) and
  1215. (taicpu(p).oper[2]^.shifterop^.shiftimm in [8,16,24]) and
  1216. MatchOperand(taicpu(hpfar1).oper[1]^, taicpu(p).oper[0]^.reg) then
  1217. begin
  1218. taicpu(hpfar1).oper[1]^.reg := taicpu(p).oper[1]^.reg;
  1219. taicpu(hpfar1).loadshifterop(2,taicpu(p).oper[2]^.shifterop^);
  1220. taicpu(hpfar1).oper[2]^.shifterop^.shiftmode:=SM_ROR;
  1221. taicpu(hpfar1).ops := 3;
  1222. if not Assigned(hp1) then
  1223. GetNextInstruction(p,hp1);
  1224. RemoveCurrentP(p, hp1);
  1225. result:=true;
  1226. exit;
  1227. end
  1228. else if MatchInstruction(hpfar1, A_UXTH, [C_None], [PF_None]) and
  1229. (taicpu(hpfar1).ops=2) and
  1230. (taicpu(p).oper[2]^.shifterop^.shiftimm in [16]) and
  1231. MatchOperand(taicpu(hpfar1).oper[1]^, taicpu(p).oper[0]^.reg) then
  1232. begin
  1233. taicpu(hpfar1).oper[1]^.reg := taicpu(p).oper[1]^.reg;
  1234. taicpu(hpfar1).loadshifterop(2,taicpu(p).oper[2]^.shifterop^);
  1235. taicpu(hpfar1).oper[2]^.shifterop^.shiftmode:=SM_ROR;
  1236. taicpu(hpfar1).ops := 3;
  1237. if not Assigned(hp1) then
  1238. GetNextInstruction(p,hp1);
  1239. RemoveCurrentP(p, hp1);
  1240. result:=true;
  1241. exit;
  1242. end
  1243. else if MatchInstruction(hpfar1, A_AND, [C_None], [PF_None]) and
  1244. (taicpu(hpfar1).ops = 3) and
  1245. (taicpu(hpfar1).oper[2]^.typ = top_const) and
  1246. (taicpu(hpfar1).oper[2]^.val = $FF) and
  1247. (taicpu(p).oper[2]^.shifterop^.shiftimm in [8,16,24]) and
  1248. MatchOperand(taicpu(hpfar1).oper[1]^, taicpu(p).oper[0]^.reg) then
  1249. begin
  1250. taicpu(hpfar1).ops := 3;
  1251. taicpu(hpfar1).opcode := A_UXTB;
  1252. taicpu(hpfar1).oper[1]^.reg := taicpu(p).oper[1]^.reg;
  1253. taicpu(hpfar1).loadshifterop(2,taicpu(p).oper[2]^.shifterop^);
  1254. taicpu(hpfar1).oper[2]^.shifterop^.shiftmode:=SM_ROR;
  1255. if not Assigned(hp1) then
  1256. GetNextInstruction(p,hp1);
  1257. RemoveCurrentP(p, hp1);
  1258. result:=true;
  1259. exit;
  1260. end;
  1261. end;
  1262. { 2-operald mov optimisations }
  1263. if (taicpu(p).ops = 2) then
  1264. begin
  1265. {
  1266. This removes the mul from
  1267. mov rX,0
  1268. ...
  1269. mul ...,rX,...
  1270. }
  1271. if (taicpu(p).oper[1]^.typ = top_const) then
  1272. begin
  1273. (* if false and
  1274. (taicpu(p).oper[1]^.val=0) and
  1275. MatchInstruction(hpfar1, [A_MUL,A_MLA], [taicpu(p).condition], [taicpu(p).oppostfix]) and
  1276. (((taicpu(hpfar1).oper[1]^.typ=top_reg) and MatchOperand(taicpu(p).oper[0]^, taicpu(hpfar1).oper[1]^)) or
  1277. ((taicpu(hpfar1).oper[2]^.typ=top_reg) and MatchOperand(taicpu(p).oper[0]^, taicpu(hpfar1).oper[2]^))) then
  1278. begin
  1279. TransferUsedRegs(TmpUsedRegs);
  1280. UpdateUsedRegs(TmpUsedRegs, tai(p.next));
  1281. UpdateUsedRegs(TmpUsedRegs, tai(hpfar1.next));
  1282. DebugMsg(SPeepholeOptimization + 'MovMUL/MLA2Mov0 done', p);
  1283. if taicpu(hpfar1).opcode=A_MUL then
  1284. taicpu(hpfar1).loadconst(1,0)
  1285. else
  1286. taicpu(hpfar1).loadreg(1,taicpu(hpfar1).oper[3]^.reg);
  1287. taicpu(hpfar1).ops:=2;
  1288. taicpu(hpfar1).opcode:=A_MOV;
  1289. if not(RegUsedAfterInstruction(taicpu(p).oper[0]^.reg,hpfar1,TmpUsedRegs)) then
  1290. RemoveCurrentP(p);
  1291. Result:=true;
  1292. exit;
  1293. end
  1294. else*) if (taicpu(p).oper[1]^.val=0) and
  1295. MatchInstruction(hpfar1, A_MLA, [taicpu(p).condition], [taicpu(p).oppostfix]) and
  1296. MatchOperand(taicpu(p).oper[0]^, taicpu(hpfar1).oper[3]^) then
  1297. begin
  1298. TransferUsedRegs(TmpUsedRegs);
  1299. UpdateUsedRegs(TmpUsedRegs, tai(p.next));
  1300. UpdateUsedRegs(TmpUsedRegs, tai(hpfar1.next));
  1301. DebugMsg(SPeepholeOptimization + 'MovMLA2MUL 1 done', p);
  1302. taicpu(hpfar1).ops:=3;
  1303. taicpu(hpfar1).opcode:=A_MUL;
  1304. if not(RegUsedAfterInstruction(taicpu(p).oper[0]^.reg,hpfar1,TmpUsedRegs)) then
  1305. begin
  1306. RemoveCurrentP(p);
  1307. Result:=true;
  1308. end;
  1309. exit;
  1310. end
  1311. {
  1312. This changes the very common
  1313. mov r0, #0
  1314. str r0, [...]
  1315. mov r0, #0
  1316. str r0, [...]
  1317. and removes all superfluous mov instructions
  1318. }
  1319. else if (taicpu(hpfar1).opcode=A_STR) then
  1320. begin
  1321. hp1 := hpfar1;
  1322. while MatchInstruction(hp1, A_STR, [taicpu(p).condition], []) and
  1323. MatchOperand(taicpu(p).oper[0]^, taicpu(hpfar1).oper[0]^) and
  1324. GetNextInstruction(hp1, hp2) and
  1325. MatchInstruction(hp2, A_MOV, [taicpu(p).condition], [PF_None]) and
  1326. (taicpu(hp2).ops = 2) and
  1327. MatchOperand(taicpu(hp2).oper[0]^, taicpu(p).oper[0]^) and
  1328. MatchOperand(taicpu(hp2).oper[1]^, taicpu(p).oper[1]^) do
  1329. begin
  1330. DebugMsg(SPeepholeOptimization + 'MovStrMov done', hp2);
  1331. GetNextInstruction(hp2,hp1);
  1332. asml.remove(hp2);
  1333. hp2.free;
  1334. result:=true;
  1335. if not assigned(hp1) then break;
  1336. end;
  1337. if Result then
  1338. Exit;
  1339. end;
  1340. end;
  1341. {
  1342. This removes the first mov from
  1343. mov rX,...
  1344. mov rX,...
  1345. }
  1346. if taicpu(hpfar1).opcode=A_MOV then
  1347. begin
  1348. hp1 := p;
  1349. while MatchInstruction(hpfar1, A_MOV, [taicpu(hp1).condition], [taicpu(hp1).oppostfix]) and
  1350. (taicpu(hpfar1).ops = 2) and
  1351. MatchOperand(taicpu(hp1).oper[0]^, taicpu(hpfar1).oper[0]^) and
  1352. { don't remove the first mov if the second is a mov rX,rX }
  1353. not(MatchOperand(taicpu(hpfar1).oper[0]^, taicpu(hpfar1).oper[1]^)) do
  1354. begin
  1355. { Defer removing the first p until after the while loop }
  1356. if p <> hp1 then
  1357. begin
  1358. DebugMsg(SPeepholeOptimization + 'MovMov done', hp1);
  1359. asml.remove(hp1);
  1360. hp1.free;
  1361. end;
  1362. hp1:=hpfar1;
  1363. GetNextInstruction(hpfar1,hpfar1);
  1364. result:=true;
  1365. if not assigned(hpfar1) then
  1366. Break;
  1367. end;
  1368. if Result then
  1369. begin
  1370. DebugMsg(SPeepholeOptimization + 'MovMov done', p);
  1371. RemoveCurrentp(p);
  1372. Exit;
  1373. end;
  1374. end;
  1375. if RedundantMovProcess(p,hpfar1) then
  1376. begin
  1377. Result:=true;
  1378. { p might not point at a mov anymore }
  1379. exit;
  1380. end;
  1381. { If hpfar1 is nil after the call to RedundantMovProcess, it is
  1382. because it would have become a dangling pointer, so reinitialise it. }
  1383. if not Assigned(hpfar1) then
  1384. Continue;
  1385. { Fold the very common sequence
  1386. mov regA, regB
  1387. ldr* regA, [regA]
  1388. to
  1389. ldr* regA, [regB]
  1390. CAUTION! If this one is successful p might not be a mov instruction anymore!
  1391. }
  1392. if
  1393. // Make sure that Thumb code doesn't propagate a high register into a reference
  1394. (
  1395. (
  1396. GenerateThumbCode and
  1397. (getsupreg(taicpu(p).oper[1]^.reg) < RS_R8)
  1398. ) or (not GenerateThumbCode)
  1399. ) and
  1400. (taicpu(p).oper[1]^.typ = top_reg) and
  1401. (taicpu(p).oppostfix = PF_NONE) and
  1402. MatchInstruction(hpfar1, [A_LDR, A_STR], [taicpu(p).condition], []) and
  1403. (taicpu(hpfar1).oper[1]^.typ = top_ref) and
  1404. { We can change the base register only when the instruction uses AM_OFFSET }
  1405. ((taicpu(hpfar1).oper[1]^.ref^.index = taicpu(p).oper[0]^.reg) or
  1406. ((taicpu(hpfar1).oper[1]^.ref^.addressmode = AM_OFFSET) and
  1407. (taicpu(hpfar1).oper[1]^.ref^.base = taicpu(p).oper[0]^.reg))
  1408. ) and
  1409. not(RegModifiedBetween(taicpu(p).oper[1]^.reg,p,hpfar1)) and
  1410. RegEndOfLife(taicpu(p).oper[0]^.reg, taicpu(hpfar1)) then
  1411. begin
  1412. DebugMsg(SPeepholeOptimization + 'MovLdr2Ldr done', hpfar1);
  1413. if (taicpu(hpfar1).oper[1]^.ref^.addressmode = AM_OFFSET) and
  1414. (taicpu(hpfar1).oper[1]^.ref^.base = taicpu(p).oper[0]^.reg) then
  1415. taicpu(hpfar1).oper[1]^.ref^.base := taicpu(p).oper[1]^.reg;
  1416. if taicpu(hpfar1).oper[1]^.ref^.index = taicpu(p).oper[0]^.reg then
  1417. taicpu(hpfar1).oper[1]^.ref^.index := taicpu(p).oper[1]^.reg;
  1418. dealloc:=FindRegDeAlloc(taicpu(p).oper[1]^.reg, tai(p.Next));
  1419. if Assigned(dealloc) then
  1420. begin
  1421. asml.remove(dealloc);
  1422. asml.InsertAfter(dealloc,hpfar1);
  1423. end;
  1424. if (not Assigned(hp1)) or (p=hp1) then
  1425. GetNextInstruction(p, hp1);
  1426. RemoveCurrentP(p, hp1);
  1427. result:=true;
  1428. Exit;
  1429. end
  1430. end
  1431. { 3-operald mov optimisations }
  1432. else if (taicpu(p).ops = 3) then
  1433. begin
  1434. if (taicpu(p).oper[2]^.typ = top_shifterop) and
  1435. (taicpu(p).oper[2]^.shifterop^.rs = NR_NO) and
  1436. (taicpu(p).oper[2]^.shifterop^.shiftmode = SM_LSR) and
  1437. (taicpu(hpfar1).ops>=1) and
  1438. (taicpu(hpfar1).oper[0]^.typ=top_reg) and
  1439. (not RegModifiedBetween(taicpu(hpfar1).oper[0]^.reg, p, hpfar1)) and
  1440. RegEndOfLife(taicpu(p).oper[0]^.reg, taicpu(hpfar1)) then
  1441. begin
  1442. if (taicpu(p).oper[2]^.shifterop^.shiftimm >= 24 ) and
  1443. MatchInstruction(hpfar1, A_AND, [taicpu(p).condition], [taicpu(p).oppostfix]) and
  1444. (taicpu(hpfar1).ops=3) and
  1445. MatchOperand(taicpu(p).oper[0]^, taicpu(hpfar1).oper[1]^) and
  1446. (taicpu(hpfar1).oper[2]^.typ = top_const) and
  1447. { Check if the AND actually would only mask out bits being already zero because of the shift
  1448. }
  1449. ((($ffffffff shr taicpu(p).oper[2]^.shifterop^.shiftimm) and taicpu(hpfar1).oper[2]^.val) =
  1450. ($ffffffff shr taicpu(p).oper[2]^.shifterop^.shiftimm)) then
  1451. begin
  1452. DebugMsg(SPeepholeOptimization + 'LsrAnd2Lsr done', hpfar1);
  1453. taicpu(p).oper[0]^.reg:=taicpu(hpfar1).oper[0]^.reg;
  1454. asml.remove(hpfar1);
  1455. hpfar1.free;
  1456. result:=true;
  1457. Exit;
  1458. end
  1459. else if MatchInstruction(hpfar1, A_BIC, [taicpu(p).condition], [taicpu(p).oppostfix]) and
  1460. (taicpu(hpfar1).ops=3) and
  1461. MatchOperand(taicpu(p).oper[0]^, taicpu(hpfar1).oper[1]^) and
  1462. (taicpu(hpfar1).oper[2]^.typ = top_const) and
  1463. { Check if the BIC actually would only mask out bits beeing already zero because of the shift }
  1464. (taicpu(hpfar1).oper[2]^.val<>0) and
  1465. (BsfDWord(taicpu(hpfar1).oper[2]^.val)>=32-taicpu(p).oper[2]^.shifterop^.shiftimm) then
  1466. begin
  1467. DebugMsg(SPeepholeOptimization + 'LsrBic2Lsr done', hpfar1);
  1468. taicpu(p).oper[0]^.reg:=taicpu(hpfar1).oper[0]^.reg;
  1469. asml.remove(hpfar1);
  1470. hpfar1.free;
  1471. result:=true;
  1472. Exit;
  1473. end;
  1474. end;
  1475. { This folds shifterops into following instructions
  1476. mov r0, r1, lsl #8
  1477. add r2, r3, r0
  1478. to
  1479. add r2, r3, r1, lsl #8
  1480. CAUTION! If this one is successful p might not be a mov instruction anymore!
  1481. }
  1482. if (taicpu(p).oper[1]^.typ = top_reg) and
  1483. (taicpu(p).oper[2]^.typ = top_shifterop) and
  1484. (taicpu(p).oppostfix = PF_NONE) and
  1485. MatchInstruction(hpfar1, [A_ADD, A_ADC, A_RSB, A_RSC, A_SUB, A_SBC,
  1486. A_AND, A_BIC, A_EOR, A_ORR, A_TEQ, A_TST,
  1487. A_CMP, A_CMN],
  1488. [taicpu(p).condition], [PF_None]) and
  1489. (not ((GenerateThumb2Code) and
  1490. (taicpu(hpfar1).opcode in [A_SBC]) and
  1491. (((taicpu(hpfar1).ops=3) and
  1492. MatchOperand(taicpu(p).oper[0]^, taicpu(hpfar1).oper[1]^.reg)) or
  1493. ((taicpu(hpfar1).ops=2) and
  1494. MatchOperand(taicpu(p).oper[0]^, taicpu(hpfar1).oper[0]^.reg))))) and
  1495. RegEndOfLife(taicpu(p).oper[0]^.reg, taicpu(hpfar1)) and
  1496. (taicpu(hpfar1).ops >= 2) and
  1497. {Currently we can't fold into another shifterop}
  1498. (taicpu(hpfar1).oper[taicpu(hpfar1).ops-1]^.typ = top_reg) and
  1499. {Folding rrx is problematic because of the C-Flag, as we currently can't check
  1500. NR_DEFAULTFLAGS for modification}
  1501. (
  1502. {Everything is fine if we don't use RRX}
  1503. (taicpu(p).oper[2]^.shifterop^.shiftmode <> SM_RRX) or
  1504. (
  1505. {If it is RRX, then check if we're just accessing the next instruction}
  1506. Assigned(hp1) and
  1507. (hpfar1 = hp1)
  1508. )
  1509. ) and
  1510. { reg1 might not be modified inbetween }
  1511. not(RegModifiedBetween(taicpu(p).oper[1]^.reg,p,hpfar1)) and
  1512. { The shifterop can contain a register, might not be modified}
  1513. (
  1514. (taicpu(p).oper[2]^.shifterop^.rs = NR_NO) or
  1515. not(RegModifiedBetween(taicpu(p).oper[2]^.shifterop^.rs, p, hpfar1))
  1516. ) and
  1517. (
  1518. {Only ONE of the two src operands is allowed to match}
  1519. MatchOperand(taicpu(p).oper[0]^, taicpu(hpfar1).oper[taicpu(hpfar1).ops-2]^) xor
  1520. MatchOperand(taicpu(p).oper[0]^, taicpu(hpfar1).oper[taicpu(hpfar1).ops-1]^)
  1521. ) then
  1522. begin
  1523. if taicpu(hpfar1).opcode in [A_TST, A_TEQ, A_CMN] then
  1524. I2:=0
  1525. else
  1526. I2:=1;
  1527. for I:=I2 to taicpu(hpfar1).ops-1 do
  1528. if MatchOperand(taicpu(p).oper[0]^, taicpu(hpfar1).oper[I]^.reg) then
  1529. begin
  1530. { If the parameter matched on the second op from the RIGHT
  1531. we have to switch the parameters, this will not happen for CMP
  1532. were we're only evaluating the most right parameter
  1533. }
  1534. if I <> taicpu(hpfar1).ops-1 then
  1535. begin
  1536. {The SUB operators need to be changed when we swap parameters}
  1537. case taicpu(hpfar1).opcode of
  1538. A_SUB: tempop:=A_RSB;
  1539. A_SBC: tempop:=A_RSC;
  1540. A_RSB: tempop:=A_SUB;
  1541. A_RSC: tempop:=A_SBC;
  1542. else tempop:=taicpu(hpfar1).opcode;
  1543. end;
  1544. if taicpu(hpfar1).ops = 3 then
  1545. hp2:=taicpu.op_reg_reg_reg_shifterop(tempop,
  1546. taicpu(hpfar1).oper[0]^.reg, taicpu(hpfar1).oper[2]^.reg,
  1547. taicpu(p).oper[1]^.reg, taicpu(p).oper[2]^.shifterop^)
  1548. else
  1549. hp2:=taicpu.op_reg_reg_shifterop(tempop,
  1550. taicpu(hpfar1).oper[0]^.reg, taicpu(p).oper[1]^.reg,
  1551. taicpu(p).oper[2]^.shifterop^);
  1552. end
  1553. else
  1554. if taicpu(hpfar1).ops = 3 then
  1555. hp2:=taicpu.op_reg_reg_reg_shifterop(taicpu(hpfar1).opcode,
  1556. taicpu(hpfar1).oper[0]^.reg, taicpu(hpfar1).oper[1]^.reg,
  1557. taicpu(p).oper[1]^.reg, taicpu(p).oper[2]^.shifterop^)
  1558. else
  1559. hp2:=taicpu.op_reg_reg_shifterop(taicpu(hpfar1).opcode,
  1560. taicpu(hpfar1).oper[0]^.reg, taicpu(p).oper[1]^.reg,
  1561. taicpu(p).oper[2]^.shifterop^);
  1562. if taicpu(p).oper[2]^.shifterop^.rs<>NR_NO then
  1563. AllocRegBetween(taicpu(p).oper[2]^.shifterop^.rs,p,hpfar1,UsedRegs);
  1564. AllocRegBetween(taicpu(p).oper[1]^.reg,p,hpfar1,UsedRegs);
  1565. asml.insertbefore(hp2, hpfar1);
  1566. asml.remove(hpfar1);
  1567. hpfar1.free;
  1568. DebugMsg(SPeepholeOptimization + 'FoldShiftProcess done', hp2);
  1569. if not Assigned(hp1) then
  1570. GetNextInstruction(p, hp1)
  1571. else if hp1 = hpfar1 then
  1572. { If hp1 = hpfar1, then it's a dangling pointer }
  1573. hp1 := hp2;
  1574. RemoveCurrentP(p, hp1);
  1575. Result:=true;
  1576. Exit;
  1577. end;
  1578. end;
  1579. {
  1580. Fold
  1581. mov r1, r1, lsl #2
  1582. ldr/ldrb r0, [r0, r1]
  1583. to
  1584. ldr/ldrb r0, [r0, r1, lsl #2]
  1585. XXX: This still needs some work, as we quite often encounter something like
  1586. mov r1, r2, lsl #2
  1587. add r2, r3, #imm
  1588. ldr r0, [r2, r1]
  1589. which can't be folded because r2 is overwritten between the shift and the ldr.
  1590. We could try to shuffle the registers around and fold it into.
  1591. add r1, r3, #imm
  1592. ldr r0, [r1, r2, lsl #2]
  1593. }
  1594. if (not(GenerateThumbCode)) and
  1595. { thumb2 allows only lsl #0..#3 }
  1596. (not(GenerateThumb2Code) or
  1597. ((taicpu(p).oper[2]^.shifterop^.shiftimm in [0..3]) and
  1598. (taicpu(p).oper[2]^.shifterop^.shiftmode=SM_LSL)
  1599. )
  1600. ) and
  1601. (taicpu(p).oper[1]^.typ = top_reg) and
  1602. (taicpu(p).oper[2]^.typ = top_shifterop) and
  1603. { RRX is tough to handle, because it requires tracking the C-Flag,
  1604. it is also extremly unlikely to be emitted this way}
  1605. (taicpu(p).oper[2]^.shifterop^.shiftmode <> SM_RRX) and
  1606. (taicpu(p).oper[2]^.shifterop^.shiftimm <> 0) and
  1607. (taicpu(p).oppostfix = PF_NONE) and
  1608. {Only LDR, LDRB, STR, STRB can handle scaled register indexing}
  1609. (MatchInstruction(hpfar1, [A_LDR, A_STR], [taicpu(p).condition], [PF_None, PF_B]) or
  1610. (GenerateThumb2Code and
  1611. MatchInstruction(hpfar1, [A_LDR, A_STR], [taicpu(p).condition], [PF_None, PF_B, PF_SB, PF_H, PF_SH]))
  1612. ) and
  1613. (
  1614. {If this is address by offset, one of the two registers can be used}
  1615. ((taicpu(hpfar1).oper[1]^.ref^.addressmode=AM_OFFSET) and
  1616. (
  1617. (taicpu(hpfar1).oper[1]^.ref^.index = taicpu(p).oper[0]^.reg) xor
  1618. (taicpu(hpfar1).oper[1]^.ref^.base = taicpu(p).oper[0]^.reg)
  1619. )
  1620. ) or
  1621. {For post and preindexed only the index register can be used}
  1622. ((taicpu(hpfar1).oper[1]^.ref^.addressmode in [AM_POSTINDEXED, AM_PREINDEXED]) and
  1623. (
  1624. (taicpu(hpfar1).oper[1]^.ref^.index = taicpu(p).oper[0]^.reg) and
  1625. (taicpu(hpfar1).oper[1]^.ref^.base <> taicpu(p).oper[0]^.reg)
  1626. ) and
  1627. (not GenerateThumb2Code)
  1628. )
  1629. ) and
  1630. { Only fold if both registers are used. Otherwise we are folding p with itself }
  1631. (taicpu(hpfar1).oper[1]^.ref^.index<>NR_NO) and
  1632. (taicpu(hpfar1).oper[1]^.ref^.base<>NR_NO) and
  1633. { Only fold if there isn't another shifterop already, and offset is zero. }
  1634. (taicpu(hpfar1).oper[1]^.ref^.offset = 0) and
  1635. (taicpu(hpfar1).oper[1]^.ref^.shiftmode = SM_None) and
  1636. not(RegModifiedBetween(taicpu(p).oper[1]^.reg,p,hpfar1)) and
  1637. RegEndOfLife(taicpu(p).oper[0]^.reg, taicpu(hpfar1)) then
  1638. begin
  1639. { If the register we want to do the shift for resides in base, we need to swap that}
  1640. if (taicpu(hpfar1).oper[1]^.ref^.base = taicpu(p).oper[0]^.reg) then
  1641. taicpu(hpfar1).oper[1]^.ref^.base := taicpu(hpfar1).oper[1]^.ref^.index;
  1642. taicpu(hpfar1).oper[1]^.ref^.index := taicpu(p).oper[1]^.reg;
  1643. taicpu(hpfar1).oper[1]^.ref^.shiftmode := taicpu(p).oper[2]^.shifterop^.shiftmode;
  1644. taicpu(hpfar1).oper[1]^.ref^.shiftimm := taicpu(p).oper[2]^.shifterop^.shiftimm;
  1645. DebugMsg(SPeepholeOptimization + 'FoldShiftLdrStr done', hpfar1);
  1646. RemoveCurrentP(p);
  1647. Result:=true;
  1648. Exit;
  1649. end;
  1650. end;
  1651. {
  1652. Often we see shifts and then a superfluous mov to another register
  1653. In the future this might be handled in RedundantMovProcess when it uses RegisterTracking
  1654. }
  1655. if RemoveSuperfluousMove(p, hpfar1, 'MovMov2Mov') then
  1656. Result:=true;
  1657. Exit;
  1658. end;
  1659. end;
  1660. function TCpuAsmOptimizer.OptPass1MVN(var p: tai): Boolean;
  1661. var
  1662. hp1: tai;
  1663. begin
  1664. {
  1665. change
  1666. mvn reg2,reg1
  1667. and reg3,reg4,reg2
  1668. dealloc reg2
  1669. to
  1670. bic reg3,reg4,reg1
  1671. }
  1672. Result := False;
  1673. if (taicpu(p).oper[1]^.typ = top_reg) and
  1674. GetNextInstructionUsingReg(p,hp1,taicpu(p).oper[0]^.reg) and
  1675. MatchInstruction(hp1,A_AND,[],[]) and
  1676. (((taicpu(hp1).ops=3) and
  1677. (taicpu(hp1).oper[2]^.typ=top_reg) and
  1678. (MatchOperand(taicpu(hp1).oper[2]^, taicpu(p).oper[0]^.reg) or
  1679. MatchOperand(taicpu(hp1).oper[1]^, taicpu(p).oper[0]^.reg))) or
  1680. ((taicpu(hp1).ops=2) and
  1681. (taicpu(hp1).oper[1]^.typ=top_reg) and
  1682. MatchOperand(taicpu(hp1).oper[1]^, taicpu(p).oper[0]^.reg))) and
  1683. assigned(FindRegDealloc(taicpu(p).oper[0]^.reg,tai(hp1.Next))) and
  1684. { reg1 might not be modified inbetween }
  1685. not(RegModifiedBetween(taicpu(p).oper[1]^.reg,p,hp1)) then
  1686. begin
  1687. DebugMsg(SPeepholeOptimization + 'MvnAnd2Bic done', p);
  1688. taicpu(hp1).opcode:=A_BIC;
  1689. if taicpu(hp1).ops=3 then
  1690. begin
  1691. if MatchOperand(taicpu(hp1).oper[1]^, taicpu(p).oper[0]^.reg) then
  1692. taicpu(hp1).loadReg(1,taicpu(hp1).oper[2]^.reg); // Swap operands
  1693. taicpu(hp1).loadReg(2,taicpu(p).oper[1]^.reg);
  1694. end
  1695. else
  1696. taicpu(hp1).loadReg(1,taicpu(p).oper[1]^.reg);
  1697. RemoveCurrentp(p);
  1698. Result := True;
  1699. Exit;
  1700. end;
  1701. end;
  1702. function TCpuAsmOptimizer.OptPass1VMov(var p: tai): Boolean;
  1703. var
  1704. hp1: tai;
  1705. begin
  1706. {
  1707. change
  1708. vmov reg0,reg1,reg2
  1709. vmov reg1,reg2,reg0
  1710. into
  1711. vmov reg0,reg1,reg2
  1712. can be applied regardless if reg0 or reg2 is the vfp register
  1713. }
  1714. Result := False;
  1715. if (taicpu(p).ops = 3) then
  1716. while GetNextInstruction(p, hp1) and
  1717. MatchInstruction(hp1, A_VMOV, [taicpu(p).condition], [taicpu(p).oppostfix]) and
  1718. (taicpu(hp1).ops = 3) and
  1719. MatchOperand(taicpu(p).oper[0]^, taicpu(hp1).oper[2]^) and
  1720. MatchOperand(taicpu(p).oper[1]^, taicpu(hp1).oper[0]^) and
  1721. MatchOperand(taicpu(p).oper[2]^, taicpu(hp1).oper[1]^) do
  1722. begin
  1723. asml.Remove(hp1);
  1724. hp1.free;
  1725. DebugMsg(SPeepholeOptimization + 'VMovVMov2VMov done', p);
  1726. { Can we do it again? }
  1727. end;
  1728. end;
  1729. function TCpuAsmOptimizer.OptPass1VOp(var p: tai): Boolean;
  1730. var
  1731. hp1: tai;
  1732. begin
  1733. Result := GetNextInstructionUsingReg(p, hp1, taicpu(p).oper[0]^.reg) and
  1734. RemoveSuperfluousVMov(p, hp1, 'VOpVMov2VOp');
  1735. end;
  1736. function TCpuAsmOptimizer.OptPass1Push(var p: tai): Boolean;
  1737. var
  1738. hp1: tai;
  1739. begin
  1740. Result:=false;
  1741. if (taicpu(p).oper[0]^.regset^=[RS_R14]) and
  1742. GetNextInstruction(p,hp1) and
  1743. MatchInstruction(hp1,A_POP,[C_None],[PF_None]) and
  1744. (taicpu(hp1).oper[0]^.regset^=[RS_R15]) then
  1745. begin
  1746. if not(CPUARM_HAS_BX in cpu_capabilities[current_settings.cputype]) then
  1747. begin
  1748. DebugMsg('Peephole Optimization: PushPop2Mov done', p);
  1749. taicpu(p).ops:=2;
  1750. taicpu(p).loadreg(1, NR_R14);
  1751. taicpu(p).loadreg(0, NR_R15);
  1752. taicpu(p).opcode:=A_MOV;
  1753. end
  1754. else
  1755. begin
  1756. DebugMsg('Peephole Optimization: PushPop2Bx done', p);
  1757. taicpu(p).loadreg(0, NR_R14);
  1758. taicpu(p).opcode:=A_BX;
  1759. end;
  1760. RemoveInstruction(hp1);
  1761. Result:=true;
  1762. Exit;
  1763. end;
  1764. end;
  1765. function TCpuAsmOptimizer.OptPass2Bcc(var p: tai): Boolean;
  1766. var
  1767. hp1,hp2,hp3,after_p: tai;
  1768. l : longint;
  1769. WasLast: boolean;
  1770. Label_X, Label_Y: TASmLabel;
  1771. procedure ConvertInstructins(this_hp: tai; newcond: tasmcond);
  1772. var
  1773. next_hp: tai;
  1774. begin
  1775. repeat
  1776. if this_hp.typ=ait_instruction then
  1777. taicpu(this_hp).condition := newcond;
  1778. GetNextInstruction(this_hp, next_hp);
  1779. if MustBeLast(this_hp) then
  1780. Break;
  1781. this_hp := next_hp
  1782. until not(assigned(this_hp)) or
  1783. not(CanBeCond(this_hp)) or
  1784. ((hp1.typ = ait_instruction) and (taicpu(hp1).opcode = A_B)) or
  1785. (this_hp.typ = ait_label);
  1786. end;
  1787. begin
  1788. Result := False;
  1789. if (taicpu(p).condition<>C_None) and
  1790. not(GenerateThumbCode) then
  1791. begin
  1792. { check for
  1793. Bxx xxx
  1794. <several instructions>
  1795. xxx:
  1796. }
  1797. Label_X := TAsmLabel(taicpu(p).oper[0]^.ref^.symbol);
  1798. l:=0;
  1799. WasLast:=False;
  1800. GetNextInstruction(p, hp1);
  1801. after_p := hp1;
  1802. while assigned(hp1) and
  1803. (l<=4) and
  1804. CanBeCond(hp1) and
  1805. { stop on labels }
  1806. not(hp1.typ=ait_label) and
  1807. { avoid that we cannot recognize the case BccB2Cond }
  1808. not((hp1.typ=ait_instruction) and (taicpu(hp1).opcode=A_B)) do
  1809. begin
  1810. inc(l);
  1811. if MustBeLast(hp1) then
  1812. begin
  1813. WasLast:=True;
  1814. GetNextInstruction(hp1,hp1);
  1815. break;
  1816. end
  1817. else
  1818. GetNextInstruction(hp1,hp1);
  1819. end;
  1820. if assigned(hp1) then
  1821. begin
  1822. if FindLabel(Label_X, hp1) then
  1823. begin
  1824. if (l<=4) and (l>0) then
  1825. begin
  1826. ConvertInstructins(after_p, inverse_cond(taicpu(p).condition));
  1827. DebugMsg(SPeepholeOptimization + 'Bcc2Cond done', p);
  1828. { wait with removing else GetNextInstruction could
  1829. ignore the label if it was the only usage in the
  1830. jump moved away }
  1831. Label_X.decrefs;
  1832. RemoveCurrentP(p, after_p);
  1833. Result := True;
  1834. Exit;
  1835. end;
  1836. end
  1837. else
  1838. { do not perform further optimizations if there is an instruction
  1839. in block #1 which cannot be optimized.
  1840. }
  1841. if not WasLast then
  1842. begin
  1843. { check further for
  1844. Bcc xxx
  1845. <several instructions 1>
  1846. B yyy
  1847. xxx:
  1848. <several instructions 2>
  1849. yyy:
  1850. }
  1851. { hp2 points to jmp yyy }
  1852. hp2:=hp1;
  1853. { skip hp2 to xxx }
  1854. if assigned(hp2) and
  1855. (l<=3) and
  1856. (hp2.typ=ait_instruction) and
  1857. (taicpu(hp2).is_jmp) and
  1858. (taicpu(hp2).condition=C_None) and
  1859. GetNextInstruction(hp2, hp1) and
  1860. { real label and jump, no further references to the
  1861. label are allowed }
  1862. (Label_X.getrefs = 1) and
  1863. FindLabel(Label_X, hp1) then
  1864. begin
  1865. Label_Y := TAsmLabel(taicpu(hp2).oper[0]^.ref^.symbol);
  1866. l:=0;
  1867. { skip hp1 and hp3 to <several moves 2> }
  1868. GetNextInstruction(hp1, hp1);
  1869. hp3 := hp1;
  1870. while assigned(hp1) and
  1871. CanBeCond(hp1) and
  1872. (l<=3) do
  1873. begin
  1874. inc(l);
  1875. if MustBeLast(hp1) then
  1876. begin
  1877. GetNextInstruction(hp1, hp1);
  1878. break;
  1879. end
  1880. else
  1881. GetNextInstruction(hp1, hp1);
  1882. end;
  1883. { hp1 points to yyy: }
  1884. if assigned(hp1) and
  1885. FindLabel(Label_Y, hp1) then
  1886. begin
  1887. ConvertInstructins(after_p, inverse_cond(taicpu(p).condition));
  1888. ConvertInstructins(hp3, taicpu(p).condition);
  1889. DebugMsg(SPeepholeOptimization + 'BccB2Cond done', after_p);
  1890. { remove B }
  1891. Label_Y.decrefs;
  1892. RemoveInstruction(hp2);
  1893. { remove Bcc }
  1894. Label_X.decrefs;
  1895. RemoveCurrentP(p, after_p);
  1896. Result := True;
  1897. Exit;
  1898. end;
  1899. end;
  1900. end;
  1901. end;
  1902. end;
  1903. end;
  1904. function TCpuAsmOptimizer.OptPass2CMP(var p: tai): Boolean;
  1905. var
  1906. hp1, hp_last: tai;
  1907. begin
  1908. Result := False;
  1909. if not GetNextInstructionUsingReg(p, hp1, NR_DEFAULTFLAGS) then
  1910. Exit;
  1911. if (hp1.typ = ait_label) or
  1912. (
  1913. (hp1.typ = ait_instruction) and
  1914. (taicpu(hp1).condition = C_None) and
  1915. (
  1916. RegModifiedByInstruction(NR_DEFAULTFLAGS, hp1) or
  1917. is_calljmp(taicpu(hp1).opcode)
  1918. )
  1919. ) then
  1920. begin
  1921. { The comparison is a null operation }
  1922. DebugMsg(SPeepholeOptimization + 'CMP -> nop', p);
  1923. RemoveCurrentP(p);
  1924. Result := True;
  1925. Exit;
  1926. end;
  1927. {
  1928. change
  1929. <op> reg,x,y
  1930. cmp reg,#0
  1931. into
  1932. <op>s reg,x,y
  1933. }
  1934. if (taicpu(p).oppostfix = PF_None) and
  1935. (taicpu(p).oper[1]^.val = 0) and
  1936. { be careful here, following instructions could use other flags
  1937. however after a jump fpc never depends on the value of flags }
  1938. { All above instructions set Z and N according to the following
  1939. Z := result = 0;
  1940. N := result[31];
  1941. EQ = Z=1; NE = Z=0;
  1942. MI = N=1; PL = N=0; }
  1943. (MatchInstruction(hp1, [A_B, A_CMP, A_CMN, A_TST, A_TEQ], [C_EQ,C_NE,C_MI,C_PL], []) or
  1944. { mov is also possible, but only if there is no shifter operand, it could be an rxx,
  1945. we are too lazy to check if it is rxx or something else }
  1946. (MatchInstruction(hp1, A_MOV, [C_EQ,C_NE,C_MI,C_PL], []) and (taicpu(hp1).ops=2))) and
  1947. GetLastInstruction(p, hp_last) and
  1948. MatchInstruction(hp_last, [A_ADC,A_ADD,A_BIC,A_SUB,A_MUL,A_MVN,A_MOV,A_ORR,
  1949. A_EOR,A_AND,A_RSB,A_RSC,A_SBC,A_MLA], [C_None], [PF_None]) and
  1950. (
  1951. { mlas is only allowed in arm mode }
  1952. (taicpu(hp_last).opcode<>A_MLA) or
  1953. (current_settings.instructionset<>is_thumb)
  1954. ) and
  1955. (taicpu(hp_last).oper[0]^.reg = taicpu(p).oper[0]^.reg) and
  1956. assigned(FindRegDealloc(NR_DEFAULTFLAGS,tai(hp1.Next))) then
  1957. begin
  1958. DebugMsg(SPeepholeOptimization + 'OpCmp2OpS done', hp_last);
  1959. taicpu(hp_last).oppostfix:=PF_S;
  1960. { move flag allocation if possible }
  1961. hp1:=FindRegAlloc(NR_DEFAULTFLAGS,tai(hp_last.Next));
  1962. if assigned(hp1) then
  1963. begin
  1964. asml.Remove(hp1);
  1965. asml.insertbefore(hp1, hp_last);
  1966. end;
  1967. RemoveCurrentP(p);
  1968. Result:=true;
  1969. end;
  1970. end;
  1971. function TCpuAsmOptimizer.OptPass2STR(var p: tai): Boolean;
  1972. var
  1973. hp1: tai;
  1974. Postfix: TOpPostfix;
  1975. OpcodeStr: shortstring;
  1976. begin
  1977. Result := False;
  1978. { Try to merge two STRs into an STM instruction }
  1979. if not(GenerateThumbCode) and (taicpu(p).oper[1]^.typ = top_ref) and
  1980. (taicpu(p).oper[1]^.ref^.addressmode = AM_OFFSET) and
  1981. (
  1982. (taicpu(p).oper[1]^.ref^.base = NR_NO) or
  1983. (taicpu(p).oper[1]^.ref^.index = NR_NO)
  1984. ) and
  1985. (taicpu(p).oppostfix = PF_None) and
  1986. (getregtype(taicpu(p).oper[0]^.reg) = R_INTREGISTER) then
  1987. begin
  1988. hp1 := p;
  1989. while GetNextInstruction(hp1, hp1) and (hp1.typ = ait_instruction) and
  1990. (taicpu(hp1).opcode = A_STR) do
  1991. if (taicpu(hp1).condition = taicpu(p).condition) and
  1992. (taicpu(hp1).oppostfix = PF_None) and
  1993. (getregtype(taicpu(hp1).oper[0]^.reg) = R_INTREGISTER) and
  1994. (taicpu(hp1).oper[1]^.ref^.addressmode = AM_OFFSET) and
  1995. (taicpu(hp1).oper[1]^.ref^.base = taicpu(p).oper[1]^.ref^.base) and
  1996. (taicpu(hp1).oper[1]^.ref^.index = taicpu(p).oper[1]^.ref^.index) and
  1997. (
  1998. (
  1999. (taicpu(p).oper[1]^.ref^.offset = 0) and
  2000. (getsupreg(taicpu(hp1).oper[0]^.reg) > getsupreg(taicpu(p).oper[0]^.reg)) and
  2001. (abs(taicpu(hp1).oper[1]^.ref^.offset) = 4)
  2002. ) or (
  2003. (taicpu(hp1).oper[1]^.ref^.offset = 0) and
  2004. (getsupreg(taicpu(hp1).oper[0]^.reg) < getsupreg(taicpu(p).oper[0]^.reg)) and
  2005. (abs(taicpu(p).oper[1]^.ref^.offset) = 4)
  2006. )
  2007. ) then
  2008. begin
  2009. if (getsupreg(taicpu(hp1).oper[0]^.reg) < getsupreg(taicpu(p).oper[0]^.reg)) xor
  2010. (taicpu(hp1).oper[1]^.ref^.offset < taicpu(p).oper[1]^.ref^.offset) then
  2011. begin
  2012. Postfix := PF_DA;
  2013. OpcodeStr := 'DA';
  2014. end
  2015. else
  2016. begin
  2017. Postfix := PF_None;
  2018. OpcodeStr := '';
  2019. end;
  2020. taicpu(hp1).oper[1]^.ref^.offset := 0;
  2021. if taicpu(hp1).oper[1]^.ref^.index = NR_NO then
  2022. begin
  2023. taicpu(hp1).oper[1]^.ref^.index := taicpu(hp1).oper[1]^.ref^.base;
  2024. taicpu(hp1).oper[1]^.ref^.base := NR_NO;
  2025. end;
  2026. taicpu(p).opcode := A_STM;
  2027. taicpu(p).loadregset(1, R_INTREGISTER, R_SUBWHOLE, [getsupreg(taicpu(p).oper[0]^.reg), getsupreg(taicpu(hp1).oper[0]^.reg)]);
  2028. taicpu(p).loadref(0, taicpu(hp1).oper[1]^.ref^);
  2029. taicpu(p).oppostfix := Postfix;
  2030. RemoveInstruction(hp1);
  2031. DebugMsg(SPeepholeOptimization + 'Merging stores: STR/STR -> STM' + OpcodeStr, p);
  2032. Result := True;
  2033. Exit;
  2034. end;
  2035. end;
  2036. end;
  2037. function TCpuAsmOptimizer.OptPass2STM(var p: tai): Boolean;
  2038. var
  2039. hp1: tai;
  2040. CorrectOffset:ASizeInt;
  2041. i, LastReg: TSuperRegister;
  2042. Postfix: TOpPostfix;
  2043. OpcodeStr: shortstring;
  2044. basereg : tregister;
  2045. begin
  2046. Result := False;
  2047. { See if STM/STR can be merged into a single STM }
  2048. { taicpu(p).opcode is A_STM, so first operand is a memory reference }
  2049. if (taicpu(p).oper[0]^.ref^.addressmode = AM_OFFSET) then
  2050. begin
  2051. { Only try to handle simple base reg, without index }
  2052. if (taicpu(p).oper[0]^.ref^.index = NR_NO) then
  2053. basereg:=taicpu(p).oper[0]^.ref^.base
  2054. else if (taicpu(p).oper[0]^.ref^.base = NR_NO) and
  2055. (taicpu(p).oper[0]^.ref^.shiftmode = SM_NONE) then
  2056. basereg:=taicpu(p).oper[0]^.ref^.index
  2057. else
  2058. exit;
  2059. CorrectOffset := 0;
  2060. LastReg := RS_NO;
  2061. for i in taicpu(p).oper[1]^.regset^ do
  2062. begin
  2063. LastReg := i;
  2064. Inc(CorrectOffset, 4);
  2065. end;
  2066. { This while loop effectively doea a Selection Sort on any STR
  2067. instructions that follow }
  2068. hp1 := p;
  2069. while (LastReg < maxcpuregister) and
  2070. GetNextInstruction(hp1, hp1) and (hp1.typ = ait_instruction) and
  2071. (taicpu(hp1).opcode = A_STR) and
  2072. (taicpu(hp1).oper[1]^.typ = top_ref) do
  2073. if (taicpu(hp1).condition = taicpu(p).condition) and
  2074. (taicpu(hp1).oppostfix = PF_None) and
  2075. (getregtype(taicpu(hp1).oper[0]^.reg) = R_INTREGISTER) and
  2076. (taicpu(hp1).oper[1]^.ref^.addressmode = AM_OFFSET) and
  2077. (taicpu(hp1).oper[1]^.ref^.shiftmode = SM_NONE) and
  2078. (
  2079. (
  2080. (taicpu(hp1).oper[1]^.ref^.base = NR_NO) and
  2081. (taicpu(hp1).oper[1]^.ref^.index = basereg)
  2082. ) or (
  2083. (taicpu(hp1).oper[1]^.ref^.index = NR_NO) and
  2084. (taicpu(hp1).oper[1]^.ref^.base = basereg)
  2085. )
  2086. ) and
  2087. { Next register must be later in the set }
  2088. (getsupreg(taicpu(hp1).oper[0]^.reg) > LastReg) and
  2089. (
  2090. (
  2091. (taicpu(p).oppostfix = PF_None) and
  2092. (taicpu(hp1).oper[1]^.ref^.offset = CorrectOffset)
  2093. ) or (
  2094. (taicpu(p).oppostfix = PF_DA) and
  2095. (taicpu(hp1).oper[1]^.ref^.offset = -CorrectOffset)
  2096. )
  2097. ) then
  2098. begin
  2099. { Increment the reference values ready for the next STR instruction to find }
  2100. LastReg := getsupreg(taicpu(hp1).oper[0]^.reg);
  2101. Inc(CorrectOffset, 4);
  2102. if (taicpu(p).oppostfix = PF_DA) then
  2103. OpcodeStr := 'DA'
  2104. else
  2105. OpcodeStr := '';
  2106. Include(taicpu(p).oper[1]^.regset^, LastReg);
  2107. DebugMsg(SPeepholeOptimization + 'Merging stores: STM' + OpcodeStr + '/STR -> STM' + OpcodeStr, hp1);
  2108. RemoveInstruction(hp1);
  2109. Result := True;
  2110. { See if we can find another one to merge }
  2111. hp1 := p;
  2112. Continue;
  2113. end;
  2114. end;
  2115. end;
  2116. function TCpuAsmOptimizer.PrePeepHoleOptsCpu(var p: tai): Boolean;
  2117. begin
  2118. result := false;
  2119. if p.typ=ait_instruction then
  2120. begin
  2121. case taicpu(p).opcode of
  2122. A_SBFX,
  2123. A_UBFX:
  2124. Result:=OptPreSBFXUBFX(p);
  2125. else
  2126. ;
  2127. end;
  2128. end;
  2129. end;
  2130. function TCpuAsmOptimizer.PeepHoleOptPass1Cpu(var p: tai): boolean;
  2131. begin
  2132. result := false;
  2133. if p.typ = ait_instruction then
  2134. begin
  2135. case taicpu(p).opcode of
  2136. A_CMP:
  2137. Result := OptPass1CMP(p);
  2138. A_STR:
  2139. Result := OptPass1STR(p);
  2140. A_LDR:
  2141. Result := OptPass1LDR(p);
  2142. A_MOV:
  2143. Result := OptPass1MOV(p);
  2144. A_AND:
  2145. Result := OptPass1And(p);
  2146. A_ADD,
  2147. A_SUB:
  2148. Result := OptPass1ADDSUB(p);
  2149. A_MUL:
  2150. REsult := OptPass1MUL(p);
  2151. A_ADC,
  2152. A_RSB,
  2153. A_RSC,
  2154. A_SBC,
  2155. A_BIC,
  2156. A_EOR,
  2157. A_ORR,
  2158. A_MLA,
  2159. A_MLS,
  2160. A_QADD,A_QADD16,A_QADD8,
  2161. A_QSUB,A_QSUB16,A_QSUB8,
  2162. A_QDADD,A_QDSUB,A_QASX,A_QSAX,
  2163. A_SHADD16,A_SHADD8,A_UHADD16,A_UHADD8,
  2164. A_SHSUB16,A_SHSUB8,A_UHSUB16,A_UHSUB8,
  2165. A_PKHTB,A_PKHBT,
  2166. A_SMUAD,A_SMUSD:
  2167. Result := OptPass1DataCheckMov(p);
  2168. {$ifdef dummy}
  2169. A_MVN:
  2170. Result := OPtPass1MVN(p);
  2171. {$endif dummy}
  2172. A_UXTB:
  2173. Result := OptPass1UXTB(p);
  2174. A_UXTH:
  2175. Result := OptPass1UXTH(p);
  2176. A_SXTB:
  2177. Result := OptPass1SXTB(p);
  2178. A_SXTH:
  2179. Result := OptPass1SXTH(p);
  2180. A_STM:
  2181. Result := OptPass1STM(p);
  2182. A_VMOV:
  2183. Result := OptPass1VMov(p);
  2184. A_VLDR,
  2185. A_VADD,
  2186. A_VMUL,
  2187. A_VDIV,
  2188. A_VSUB,
  2189. A_VSQRT,
  2190. A_VNEG,
  2191. A_VCVT,
  2192. A_VABS:
  2193. Result := OptPass1VOp(p);
  2194. A_PUSH:
  2195. Result := OptPass1Push(p);
  2196. else
  2197. ;
  2198. end;
  2199. end;
  2200. end;
  2201. function TCpuAsmOptimizer.PeepHoleOptPass2Cpu(var p: tai): boolean;
  2202. begin
  2203. result := False;
  2204. if p.typ = ait_instruction then
  2205. begin
  2206. case taicpu(p).opcode of
  2207. A_AND:
  2208. Result := OptPass2AND(p);
  2209. A_CMP:
  2210. Result := OptPass2CMP(p);
  2211. A_B:
  2212. Result := OptPass2Bcc(p);
  2213. A_STM:
  2214. Result := OptPass2STM(p);
  2215. A_STR:
  2216. Result := OptPass2STR(p);
  2217. A_TST:
  2218. Result := OptPass2TST(p);
  2219. else
  2220. ;
  2221. end;
  2222. end;
  2223. end;
  2224. { instructions modifying the CPSR can be only the last instruction }
  2225. function MustBeLast(p : tai) : boolean;
  2226. begin
  2227. Result:=(p.typ=ait_instruction) and
  2228. ((taicpu(p).opcode in [A_BL,A_BLX,A_CMP,A_CMN,A_SWI,A_TEQ,A_TST,A_CMF,A_CMFE {,A_MSR}]) or
  2229. ((taicpu(p).ops>=1) and (taicpu(p).oper[0]^.typ=top_reg) and (taicpu(p).oper[0]^.reg=NR_PC)) or
  2230. (taicpu(p).oppostfix=PF_S));
  2231. end;
  2232. function TCpuAsmOptimizer.RegInInstruction(Reg: TRegister; p1: tai): Boolean;
  2233. begin
  2234. If (p1.typ = ait_instruction) and (taicpu(p1).opcode=A_BL) then
  2235. Result:=true
  2236. else If MatchInstruction(p1, [A_LDR, A_STR], [], [PF_D]) and
  2237. (getsupreg(taicpu(p1).oper[0]^.reg)+1=getsupreg(reg)) then
  2238. Result:=true
  2239. else
  2240. begin
  2241. if SuperRegistersEqual(Reg, NR_DEFAULTFLAGS) and (p1.typ = ait_instruction) then
  2242. begin
  2243. { Conditional instruction reads CPSR register }
  2244. if (taicpu(p1).condition <> C_None) then
  2245. Exit(True);
  2246. { Comparison instructions (and procedural jump) }
  2247. if (taicpu(p1).opcode in [A_BL, A_CMP, A_CMN, A_TST, A_TEQ]) then
  2248. Exit(True);
  2249. { Instruction sets CPSR register due to S suffix (floating-point
  2250. instructios won't raise false positives) }
  2251. if (taicpu(p1).oppostfix = PF_S) then
  2252. Exit(True)
  2253. end;
  2254. Result:=inherited RegInInstruction(Reg, p1);
  2255. end;
  2256. end;
  2257. const
  2258. { set of opcode which might or do write to memory }
  2259. { TODO : extend armins.dat to contain r/w info }
  2260. opcode_could_mem_write = [A_B,A_BL,A_BLX,A_BKPT,A_BX,A_STR,A_STRB,A_STRBT,
  2261. A_STRH,A_STRT,A_STF,A_SFM,A_STM,A_FSTS,A_FSTD,A_VSTR,A_VSTM];
  2262. { adjust the register live information when swapping the two instructions p and hp1,
  2263. they must follow one after the other }
  2264. procedure TCpuPreRegallocScheduler.SwapRegLive(p,hp1 : taicpu);
  2265. procedure CheckLiveEnd(reg : tregister);
  2266. var
  2267. supreg : TSuperRegister;
  2268. regtype : TRegisterType;
  2269. begin
  2270. if reg=NR_NO then
  2271. exit;
  2272. regtype:=getregtype(reg);
  2273. supreg:=getsupreg(reg);
  2274. if assigned(cg.rg[regtype]) and (cg.rg[regtype].live_end[supreg]=hp1) and
  2275. RegInInstruction(reg,p) then
  2276. cg.rg[regtype].live_end[supreg]:=p;
  2277. end;
  2278. procedure CheckLiveStart(reg : TRegister);
  2279. var
  2280. supreg : TSuperRegister;
  2281. regtype : TRegisterType;
  2282. begin
  2283. if reg=NR_NO then
  2284. exit;
  2285. regtype:=getregtype(reg);
  2286. supreg:=getsupreg(reg);
  2287. if assigned(cg.rg[regtype]) and (cg.rg[regtype].live_start[supreg]=p) and
  2288. RegInInstruction(reg,hp1) then
  2289. cg.rg[regtype].live_start[supreg]:=hp1;
  2290. end;
  2291. var
  2292. i : longint;
  2293. r : TSuperRegister;
  2294. begin
  2295. { assumption: p is directly followed by hp1 }
  2296. { if live of any reg used by p starts at p and hp1 uses this register then
  2297. set live start to hp1 }
  2298. for i:=0 to p.ops-1 do
  2299. case p.oper[i]^.typ of
  2300. Top_Reg:
  2301. CheckLiveStart(p.oper[i]^.reg);
  2302. Top_Ref:
  2303. begin
  2304. CheckLiveStart(p.oper[i]^.ref^.base);
  2305. CheckLiveStart(p.oper[i]^.ref^.index);
  2306. end;
  2307. Top_Shifterop:
  2308. CheckLiveStart(p.oper[i]^.shifterop^.rs);
  2309. Top_RegSet:
  2310. for r:=RS_R0 to RS_R15 do
  2311. if r in p.oper[i]^.regset^ then
  2312. CheckLiveStart(newreg(R_INTREGISTER,r,R_SUBWHOLE));
  2313. else
  2314. ;
  2315. end;
  2316. { if live of any reg used by hp1 ends at hp1 and p uses this register then
  2317. set live end to p }
  2318. for i:=0 to hp1.ops-1 do
  2319. case hp1.oper[i]^.typ of
  2320. Top_Reg:
  2321. CheckLiveEnd(hp1.oper[i]^.reg);
  2322. Top_Ref:
  2323. begin
  2324. CheckLiveEnd(hp1.oper[i]^.ref^.base);
  2325. CheckLiveEnd(hp1.oper[i]^.ref^.index);
  2326. end;
  2327. Top_Shifterop:
  2328. CheckLiveStart(hp1.oper[i]^.shifterop^.rs);
  2329. Top_RegSet:
  2330. for r:=RS_R0 to RS_R15 do
  2331. if r in hp1.oper[i]^.regset^ then
  2332. CheckLiveEnd(newreg(R_INTREGISTER,r,R_SUBWHOLE));
  2333. else
  2334. ;
  2335. end;
  2336. end;
  2337. function TCpuPreRegallocScheduler.SchedulerPass1Cpu(var p: tai): boolean;
  2338. { TODO : schedule also forward }
  2339. { TODO : schedule distance > 1 }
  2340. { returns true if p might be a load of a pc relative tls offset }
  2341. function PossibleTLSLoad(const p: tai) : boolean;
  2342. begin
  2343. Result:=(p.typ=ait_instruction) and (taicpu(p).opcode=A_LDR) and (taicpu(p).oper[1]^.typ=top_ref) and (((taicpu(p).oper[1]^.ref^.base=NR_PC) and
  2344. (taicpu(p).oper[1]^.ref^.index<>NR_NO)) or ((taicpu(p).oper[1]^.ref^.base<>NR_NO) and
  2345. (taicpu(p).oper[1]^.ref^.index=NR_PC)));
  2346. end;
  2347. var
  2348. hp1,hp2,hp3,hp4,hp5,insertpos : tai;
  2349. list : TAsmList;
  2350. begin
  2351. result:=true;
  2352. list:=TAsmList.create;
  2353. p:=BlockStart;
  2354. while p<>BlockEnd Do
  2355. begin
  2356. if (p.typ=ait_instruction) and
  2357. GetNextInstruction(p,hp1) and
  2358. (hp1.typ=ait_instruction) and
  2359. (taicpu(hp1).opcode in [A_LDR,A_LDRB,A_LDRH,A_LDRSB,A_LDRSH]) and
  2360. (taicpu(hp1).oppostfix in [PF_NONE, PF_B, PF_H, PF_SB, PF_SH]) and
  2361. { for now we don't reschedule if the previous instruction changes potentially a memory location }
  2362. ( (not(taicpu(p).opcode in opcode_could_mem_write) and
  2363. not(RegModifiedByInstruction(NR_PC,p))
  2364. ) or
  2365. ((taicpu(p).opcode in [A_STM,A_STRB,A_STRH,A_STR]) and
  2366. ((taicpu(hp1).oper[1]^.ref^.base=NR_PC) or
  2367. (assigned(taicpu(hp1).oper[1]^.ref^.symboldata) and
  2368. (taicpu(hp1).oper[1]^.ref^.offset=0)
  2369. )
  2370. ) or
  2371. { try to prove that the memory accesses don't overlapp }
  2372. ((taicpu(p).opcode in [A_STRB,A_STRH,A_STR]) and
  2373. (taicpu(p).oper[1]^.typ = top_ref) and
  2374. (taicpu(p).oper[1]^.ref^.base=taicpu(hp1).oper[1]^.ref^.base) and
  2375. (taicpu(p).oppostfix=PF_None) and
  2376. (taicpu(hp1).oppostfix=PF_None) and
  2377. (taicpu(p).oper[1]^.ref^.index=NR_NO) and
  2378. (taicpu(hp1).oper[1]^.ref^.index=NR_NO) and
  2379. { get operand sizes and check if the offset distance is large enough to ensure no overlapp }
  2380. (abs(taicpu(p).oper[1]^.ref^.offset-taicpu(hp1).oper[1]^.ref^.offset)>=max(tcgsize2size[reg_cgsize(taicpu(p).oper[0]^.reg)],tcgsize2size[reg_cgsize(taicpu(hp1).oper[0]^.reg)]))
  2381. )
  2382. )
  2383. ) and
  2384. GetNextInstruction(hp1,hp2) and
  2385. (hp2.typ=ait_instruction) and
  2386. { loaded register used by next instruction?
  2387. if we ever support labels (they could be skipped in theory) here, the gnu2 tls general-dynamic code could get broken (the ldr before
  2388. the bl may not be scheduled away from the bl) and it needs to be taken care of this case
  2389. }
  2390. (RegInInstruction(taicpu(hp1).oper[0]^.reg,hp2)) and
  2391. { loaded register not used by previous instruction? }
  2392. not(RegInInstruction(taicpu(hp1).oper[0]^.reg,p)) and
  2393. { same condition? }
  2394. (taicpu(p).condition=taicpu(hp1).condition) and
  2395. { first instruction might not change the register used as base }
  2396. ((taicpu(hp1).oper[1]^.ref^.base=NR_NO) or
  2397. not(RegModifiedByInstruction(taicpu(hp1).oper[1]^.ref^.base,p))
  2398. ) and
  2399. { first instruction might not change the register used as index }
  2400. ((taicpu(hp1).oper[1]^.ref^.index=NR_NO) or
  2401. not(RegModifiedByInstruction(taicpu(hp1).oper[1]^.ref^.index,p))
  2402. ) and
  2403. { if we modify the basereg AND the first instruction used that reg, we can not schedule }
  2404. ((taicpu(hp1).oper[1]^.ref^.addressmode = AM_OFFSET) or
  2405. not(instructionLoadsFromReg(taicpu(hp1).oper[1]^.ref^.base,p))) and
  2406. not(PossibleTLSLoad(p)) and
  2407. not(PossibleTLSLoad(hp1)) then
  2408. begin
  2409. hp3:=tai(p.Previous);
  2410. hp5:=tai(p.next);
  2411. asml.Remove(p);
  2412. { if there is a reg. alloc/dealloc/sync instructions or address labels (e.g. for GOT-less PIC)
  2413. associated with p, move it together with p }
  2414. { before the instruction? }
  2415. { find reg allocs,deallocs and PIC labels }
  2416. while assigned(hp3) and (hp3.typ<>ait_instruction) do
  2417. begin
  2418. if ( (hp3.typ=ait_regalloc) and (tai_regalloc(hp3).ratype in [ra_alloc, ra_dealloc]) and
  2419. RegInInstruction(tai_regalloc(hp3).reg,p) )
  2420. or ( (hp3.typ=ait_label) and (tai_label(hp3).labsym.typ=AT_ADDR) )
  2421. then
  2422. begin
  2423. hp4:=hp3;
  2424. hp3:=tai(hp3.Previous);
  2425. asml.Remove(hp4);
  2426. list.Insert(hp4);
  2427. end
  2428. else
  2429. hp3:=tai(hp3.Previous);
  2430. end;
  2431. list.Concat(p);
  2432. SwapRegLive(taicpu(p),taicpu(hp1));
  2433. { after the instruction? }
  2434. { find reg deallocs and reg syncs }
  2435. while assigned(hp5) and (hp5.typ<>ait_instruction) do
  2436. begin
  2437. if (hp5.typ=ait_regalloc) and (tai_regalloc(hp5).ratype in [ra_dealloc, ra_sync]) and
  2438. RegInInstruction(tai_regalloc(hp5).reg,p) then
  2439. begin
  2440. hp4:=hp5;
  2441. hp5:=tai(hp5.next);
  2442. asml.Remove(hp4);
  2443. list.Concat(hp4);
  2444. end
  2445. else
  2446. hp5:=tai(hp5.Next);
  2447. end;
  2448. asml.Remove(hp1);
  2449. { if there are address labels associated with hp2, those must
  2450. stay with hp2 (e.g. for GOT-less PIC) }
  2451. insertpos:=hp2;
  2452. while assigned(hp2.previous) and
  2453. (tai(hp2.previous).typ<>ait_instruction) do
  2454. begin
  2455. hp2:=tai(hp2.previous);
  2456. if (hp2.typ=ait_label) and
  2457. (tai_label(hp2).labsym.typ=AT_ADDR) then
  2458. insertpos:=hp2;
  2459. end;
  2460. {$ifdef DEBUG_PREREGSCHEDULER}
  2461. asml.insertbefore(tai_comment.Create(strpnew('Rescheduled')),insertpos);
  2462. {$endif DEBUG_PREREGSCHEDULER}
  2463. asml.InsertBefore(hp1,insertpos);
  2464. asml.InsertListBefore(insertpos,list);
  2465. p:=tai(p.next);
  2466. end
  2467. else if p.typ=ait_instruction then
  2468. p:=hp1
  2469. else
  2470. p:=tai(p.next);
  2471. end;
  2472. list.Free;
  2473. end;
  2474. procedure DecrementPreceedingIT(list: TAsmList; p: tai);
  2475. var
  2476. hp : tai;
  2477. l : longint;
  2478. begin
  2479. hp := tai(p.Previous);
  2480. l := 1;
  2481. while assigned(hp) and
  2482. (l <= 4) do
  2483. begin
  2484. if hp.typ=ait_instruction then
  2485. begin
  2486. if (taicpu(hp).opcode>=A_IT) and
  2487. (taicpu(hp).opcode <= A_ITTTT) then
  2488. begin
  2489. if (taicpu(hp).opcode = A_IT) and
  2490. (l=1) then
  2491. list.Remove(hp)
  2492. else
  2493. case taicpu(hp).opcode of
  2494. A_ITE:
  2495. if l=2 then taicpu(hp).opcode := A_IT;
  2496. A_ITT:
  2497. if l=2 then taicpu(hp).opcode := A_IT;
  2498. A_ITEE:
  2499. if l=3 then taicpu(hp).opcode := A_ITE;
  2500. A_ITTE:
  2501. if l=3 then taicpu(hp).opcode := A_ITT;
  2502. A_ITET:
  2503. if l=3 then taicpu(hp).opcode := A_ITE;
  2504. A_ITTT:
  2505. if l=3 then taicpu(hp).opcode := A_ITT;
  2506. A_ITEEE:
  2507. if l=4 then taicpu(hp).opcode := A_ITEE;
  2508. A_ITTEE:
  2509. if l=4 then taicpu(hp).opcode := A_ITTE;
  2510. A_ITETE:
  2511. if l=4 then taicpu(hp).opcode := A_ITET;
  2512. A_ITTTE:
  2513. if l=4 then taicpu(hp).opcode := A_ITTT;
  2514. A_ITEET:
  2515. if l=4 then taicpu(hp).opcode := A_ITEE;
  2516. A_ITTET:
  2517. if l=4 then taicpu(hp).opcode := A_ITTE;
  2518. A_ITETT:
  2519. if l=4 then taicpu(hp).opcode := A_ITET;
  2520. A_ITTTT:
  2521. begin
  2522. if l=4 then taicpu(hp).opcode := A_ITTT;
  2523. end
  2524. else
  2525. ;
  2526. end;
  2527. break;
  2528. end;
  2529. {else if (taicpu(hp).condition<>taicpu(p).condition) or
  2530. (taicpu(hp).condition<>inverse_cond(taicpu(p).condition)) then
  2531. break;}
  2532. inc(l);
  2533. end;
  2534. hp := tai(hp.Previous);
  2535. end;
  2536. end;
  2537. function TCpuThumb2AsmOptimizer.OptPass1STM(var p: tai): boolean;
  2538. var
  2539. hp : taicpu;
  2540. begin
  2541. result:=false;
  2542. if MatchInstruction(p, A_STM, [C_None], [PF_FD,PF_DB]) and
  2543. (taicpu(p).oper[0]^.ref^.addressmode=AM_PREINDEXED) and
  2544. (taicpu(p).oper[0]^.ref^.index=NR_STACK_POINTER_REG) and
  2545. ((taicpu(p).oper[1]^.regset^*[8..13,15])=[]) then
  2546. begin
  2547. DebugMsg('Peephole Stm2Push done', p);
  2548. hp := taicpu.op_regset(A_PUSH, R_INTREGISTER, R_SUBWHOLE, taicpu(p).oper[1]^.regset^);
  2549. AsmL.InsertAfter(hp, p);
  2550. asml.Remove(p);
  2551. p:=hp;
  2552. result:=true;
  2553. end;
  2554. end;
  2555. function TCpuThumb2AsmOptimizer.OptPass1LDM(var p: tai): boolean;
  2556. var
  2557. hp : taicpu;
  2558. begin
  2559. result:=false;
  2560. if MatchInstruction(p, A_LDM, [C_None], [PF_FD,PF_IA]) and
  2561. (taicpu(p).oper[0]^.ref^.addressmode=AM_PREINDEXED) and
  2562. (taicpu(p).oper[0]^.ref^.index=NR_STACK_POINTER_REG) and
  2563. ((taicpu(p).oper[1]^.regset^*[8..14])=[]) then
  2564. begin
  2565. DebugMsg('Peephole Ldm2Pop done', p);
  2566. hp := taicpu.op_regset(A_POP, R_INTREGISTER, R_SUBWHOLE, taicpu(p).oper[1]^.regset^);
  2567. asml.InsertBefore(hp, p);
  2568. asml.Remove(p);
  2569. p.Free;
  2570. p:=hp;
  2571. result:=true;
  2572. end;
  2573. end;
  2574. function TCpuThumb2AsmOptimizer.OptPass1AndThumb2(var p : tai) : boolean;
  2575. begin
  2576. result:=false;
  2577. if MatchInstruction(p, [A_AND], [], [PF_None]) and
  2578. (taicpu(p).ops = 2) and
  2579. (taicpu(p).oper[1]^.typ=top_const) and
  2580. ((taicpu(p).oper[1]^.val=255) or
  2581. (taicpu(p).oper[1]^.val=65535)) then
  2582. begin
  2583. DebugMsg('Peephole AndR2Uxt done', p);
  2584. if taicpu(p).oper[1]^.val=255 then
  2585. taicpu(p).opcode:=A_UXTB
  2586. else
  2587. taicpu(p).opcode:=A_UXTH;
  2588. taicpu(p).loadreg(1, taicpu(p).oper[0]^.reg);
  2589. result := true;
  2590. end
  2591. else if MatchInstruction(p, [A_AND], [], [PF_None]) and
  2592. (taicpu(p).ops = 3) and
  2593. (taicpu(p).oper[2]^.typ=top_const) and
  2594. ((taicpu(p).oper[2]^.val=255) or
  2595. (taicpu(p).oper[2]^.val=65535)) then
  2596. begin
  2597. DebugMsg('Peephole AndRR2Uxt done', p);
  2598. if taicpu(p).oper[2]^.val=255 then
  2599. taicpu(p).opcode:=A_UXTB
  2600. else
  2601. taicpu(p).opcode:=A_UXTH;
  2602. taicpu(p).ops:=2;
  2603. result := true;
  2604. end;
  2605. end;
  2606. function TCpuThumb2AsmOptimizer.PeepHoleOptPass1Cpu(var p: tai): boolean;
  2607. begin
  2608. result:=false;
  2609. if inherited PeepHoleOptPass1Cpu(p) then
  2610. result:=true
  2611. else if p.typ=ait_instruction then
  2612. case taicpu(p).opcode of
  2613. A_STM:
  2614. result:=OptPass1STM(p);
  2615. A_LDM:
  2616. result:=OptPass1LDM(p);
  2617. A_AND:
  2618. result:=OptPass1AndThumb2(p);
  2619. else
  2620. ;
  2621. end;
  2622. end;
  2623. procedure TCpuThumb2AsmOptimizer.PeepHoleOptPass2;
  2624. var
  2625. p,hp1,hp2: tai;
  2626. l : longint;
  2627. condition : tasmcond;
  2628. { UsedRegs, TmpUsedRegs: TRegSet; }
  2629. begin
  2630. p := BlockStart;
  2631. { UsedRegs := []; }
  2632. while (p <> BlockEnd) Do
  2633. begin
  2634. { UpdateUsedRegs(UsedRegs, tai(p.next)); }
  2635. case p.Typ Of
  2636. Ait_Instruction:
  2637. begin
  2638. case taicpu(p).opcode Of
  2639. A_B:
  2640. if taicpu(p).condition<>C_None then
  2641. begin
  2642. { check for
  2643. Bxx xxx
  2644. <several instructions>
  2645. xxx:
  2646. }
  2647. l:=0;
  2648. GetNextInstruction(p, hp1);
  2649. while assigned(hp1) and
  2650. (l<=4) and
  2651. CanBeCond(hp1) and
  2652. { stop on labels }
  2653. not(hp1.typ=ait_label) do
  2654. begin
  2655. inc(l);
  2656. if MustBeLast(hp1) then
  2657. begin
  2658. //hp1:=nil;
  2659. GetNextInstruction(hp1,hp1);
  2660. break;
  2661. end
  2662. else
  2663. GetNextInstruction(hp1,hp1);
  2664. end;
  2665. if assigned(hp1) then
  2666. begin
  2667. if FindLabel(tasmlabel(taicpu(p).oper[0]^.ref^.symbol),hp1) then
  2668. begin
  2669. if (l<=4) and (l>0) then
  2670. begin
  2671. condition:=inverse_cond(taicpu(p).condition);
  2672. hp2:=p;
  2673. GetNextInstruction(p,hp1);
  2674. p:=hp1;
  2675. repeat
  2676. if hp1.typ=ait_instruction then
  2677. taicpu(hp1).condition:=condition;
  2678. if MustBeLast(hp1) then
  2679. begin
  2680. GetNextInstruction(hp1,hp1);
  2681. break;
  2682. end
  2683. else
  2684. GetNextInstruction(hp1,hp1);
  2685. until not(assigned(hp1)) or
  2686. not(CanBeCond(hp1)) or
  2687. (hp1.typ=ait_label);
  2688. { wait with removing else GetNextInstruction could
  2689. ignore the label if it was the only usage in the
  2690. jump moved away }
  2691. asml.InsertAfter(tai_comment.create(strpnew('Collapsed')), hp2);
  2692. DecrementPreceedingIT(asml, hp2);
  2693. case l of
  2694. 1: asml.InsertAfter(taicpu.op_cond(A_IT,condition), hp2);
  2695. 2: asml.InsertAfter(taicpu.op_cond(A_ITT,condition), hp2);
  2696. 3: asml.InsertAfter(taicpu.op_cond(A_ITTT,condition), hp2);
  2697. 4: asml.InsertAfter(taicpu.op_cond(A_ITTTT,condition), hp2);
  2698. end;
  2699. tasmlabel(taicpu(hp2).oper[0]^.ref^.symbol).decrefs;
  2700. asml.remove(hp2);
  2701. hp2.free;
  2702. continue;
  2703. end;
  2704. end;
  2705. end;
  2706. end;
  2707. else
  2708. ;
  2709. end;
  2710. end;
  2711. else
  2712. ;
  2713. end;
  2714. p := tai(p.next)
  2715. end;
  2716. end;
  2717. function TCpuThumb2AsmOptimizer.PostPeepHoleOptsCpu(var p: tai): boolean;
  2718. begin
  2719. result:=false;
  2720. if p.typ = ait_instruction then
  2721. begin
  2722. if MatchInstruction(p, A_MOV, [C_None], [PF_None]) and
  2723. (taicpu(p).oper[1]^.typ=top_const) and
  2724. (taicpu(p).oper[1]^.val >= 0) and
  2725. (taicpu(p).oper[1]^.val < 256) and
  2726. (not RegInUsedRegs(NR_DEFAULTFLAGS,UsedRegs)) then
  2727. begin
  2728. DebugMsg('Peephole Mov2Movs done', p);
  2729. asml.InsertBefore(tai_regalloc.alloc(NR_DEFAULTFLAGS,p), p);
  2730. asml.InsertAfter(tai_regalloc.dealloc(NR_DEFAULTFLAGS,p), p);
  2731. IncludeRegInUsedRegs(NR_DEFAULTFLAGS,UsedRegs);
  2732. taicpu(p).oppostfix:=PF_S;
  2733. result:=true;
  2734. end
  2735. else if MatchInstruction(p, A_MVN, [C_None], [PF_None]) and
  2736. (taicpu(p).oper[1]^.typ=top_reg) and
  2737. (not RegInUsedRegs(NR_DEFAULTFLAGS,UsedRegs)) then
  2738. begin
  2739. DebugMsg('Peephole Mvn2Mvns done', p);
  2740. asml.InsertBefore(tai_regalloc.alloc(NR_DEFAULTFLAGS,p), p);
  2741. asml.InsertAfter(tai_regalloc.dealloc(NR_DEFAULTFLAGS,p), p);
  2742. IncludeRegInUsedRegs(NR_DEFAULTFLAGS,UsedRegs);
  2743. taicpu(p).oppostfix:=PF_S;
  2744. result:=true;
  2745. end
  2746. else if MatchInstruction(p, A_RSB, [C_None], [PF_None]) and
  2747. (taicpu(p).ops = 3) and
  2748. (taicpu(p).oper[2]^.typ=top_const) and
  2749. (taicpu(p).oper[2]^.val=0) and
  2750. (not RegInUsedRegs(NR_DEFAULTFLAGS,UsedRegs)) then
  2751. begin
  2752. DebugMsg('Peephole Rsb2Rsbs done', p);
  2753. asml.InsertBefore(tai_regalloc.alloc(NR_DEFAULTFLAGS,p), p);
  2754. asml.InsertAfter(tai_regalloc.dealloc(NR_DEFAULTFLAGS,p), p);
  2755. IncludeRegInUsedRegs(NR_DEFAULTFLAGS,UsedRegs);
  2756. taicpu(p).oppostfix:=PF_S;
  2757. result:=true;
  2758. end
  2759. else if MatchInstruction(p, [A_ADD,A_SUB], [C_None], [PF_None]) and
  2760. (taicpu(p).ops = 3) and
  2761. MatchOperand(taicpu(p).oper[0]^, taicpu(p).oper[1]^) and
  2762. (not MatchOperand(taicpu(p).oper[0]^, NR_STACK_POINTER_REG)) and
  2763. (taicpu(p).oper[2]^.typ=top_const) and
  2764. (taicpu(p).oper[2]^.val >= 0) and
  2765. (taicpu(p).oper[2]^.val < 256) and
  2766. (not RegInUsedRegs(NR_DEFAULTFLAGS,UsedRegs)) then
  2767. begin
  2768. DebugMsg('Peephole AddSub2*s done', p);
  2769. asml.InsertBefore(tai_regalloc.alloc(NR_DEFAULTFLAGS,p), p);
  2770. asml.InsertAfter(tai_regalloc.dealloc(NR_DEFAULTFLAGS,p), p);
  2771. IncludeRegInUsedRegs(NR_DEFAULTFLAGS,UsedRegs);
  2772. taicpu(p).loadconst(1,taicpu(p).oper[2]^.val);
  2773. taicpu(p).oppostfix:=PF_S;
  2774. taicpu(p).ops := 2;
  2775. result:=true;
  2776. end
  2777. else if MatchInstruction(p, [A_ADD,A_SUB], [C_None], [PF_None]) and
  2778. (taicpu(p).ops = 2) and
  2779. (taicpu(p).oper[1]^.typ=top_reg) and
  2780. (not MatchOperand(taicpu(p).oper[0]^, NR_STACK_POINTER_REG)) and
  2781. (not MatchOperand(taicpu(p).oper[1]^, NR_STACK_POINTER_REG)) and
  2782. (not RegInUsedRegs(NR_DEFAULTFLAGS,UsedRegs)) then
  2783. begin
  2784. DebugMsg('Peephole AddSub2*s done', p);
  2785. asml.InsertBefore(tai_regalloc.alloc(NR_DEFAULTFLAGS,p), p);
  2786. asml.InsertAfter(tai_regalloc.dealloc(NR_DEFAULTFLAGS,p), p);
  2787. IncludeRegInUsedRegs(NR_DEFAULTFLAGS,UsedRegs);
  2788. taicpu(p).oppostfix:=PF_S;
  2789. result:=true;
  2790. end
  2791. else if MatchInstruction(p, [A_ADD], [C_None], [PF_None]) and
  2792. (taicpu(p).ops = 3) and
  2793. MatchOperand(taicpu(p).oper[0]^, taicpu(p).oper[1]^) and
  2794. (taicpu(p).oper[2]^.typ=top_reg) then
  2795. begin
  2796. DebugMsg('Peephole AddRRR2AddRR done', p);
  2797. taicpu(p).ops := 2;
  2798. taicpu(p).loadreg(1,taicpu(p).oper[2]^.reg);
  2799. result:=true;
  2800. end
  2801. else if MatchInstruction(p, [A_AND,A_ORR,A_EOR,A_BIC,A_LSL,A_LSR,A_ASR,A_ROR], [C_None], [PF_None]) and
  2802. (taicpu(p).ops = 3) and
  2803. MatchOperand(taicpu(p).oper[0]^, taicpu(p).oper[1]^) and
  2804. (taicpu(p).oper[2]^.typ=top_reg) and
  2805. (not RegInUsedRegs(NR_DEFAULTFLAGS,UsedRegs)) then
  2806. begin
  2807. DebugMsg('Peephole opXXY2opsXY done', p);
  2808. asml.InsertBefore(tai_regalloc.alloc(NR_DEFAULTFLAGS,p), p);
  2809. asml.InsertAfter(tai_regalloc.dealloc(NR_DEFAULTFLAGS,p), p);
  2810. IncludeRegInUsedRegs(NR_DEFAULTFLAGS,UsedRegs);
  2811. taicpu(p).ops := 2;
  2812. taicpu(p).loadreg(1,taicpu(p).oper[2]^.reg);
  2813. taicpu(p).oppostfix:=PF_S;
  2814. result:=true;
  2815. end
  2816. else if MatchInstruction(p, [A_AND,A_ORR,A_EOR,A_BIC,A_LSL,A_LSR,A_ASR,A_ROR], [C_None], [PF_S]) and
  2817. (taicpu(p).ops = 3) and
  2818. MatchOperand(taicpu(p).oper[0]^, taicpu(p).oper[1]^) and
  2819. (taicpu(p).oper[2]^.typ in [top_reg,top_const]) then
  2820. begin
  2821. DebugMsg('Peephole opXXY2opXY done', p);
  2822. taicpu(p).ops := 2;
  2823. if taicpu(p).oper[2]^.typ=top_reg then
  2824. taicpu(p).loadreg(1,taicpu(p).oper[2]^.reg)
  2825. else
  2826. taicpu(p).loadconst(1,taicpu(p).oper[2]^.val);
  2827. result:=true;
  2828. end
  2829. else if MatchInstruction(p, [A_AND,A_ORR,A_EOR], [C_None], [PF_None,PF_S]) and
  2830. (taicpu(p).ops = 3) and
  2831. MatchOperand(taicpu(p).oper[0]^, taicpu(p).oper[2]^) and
  2832. (not RegInUsedRegs(NR_DEFAULTFLAGS,UsedRegs)) then
  2833. begin
  2834. DebugMsg('Peephole opXYX2opsXY done', p);
  2835. asml.InsertBefore(tai_regalloc.alloc(NR_DEFAULTFLAGS,p), p);
  2836. asml.InsertAfter(tai_regalloc.dealloc(NR_DEFAULTFLAGS,p), p);
  2837. IncludeRegInUsedRegs(NR_DEFAULTFLAGS,UsedRegs);
  2838. taicpu(p).oppostfix:=PF_S;
  2839. taicpu(p).ops := 2;
  2840. result:=true;
  2841. end
  2842. else if MatchInstruction(p, [A_MOV], [C_None], [PF_None]) and
  2843. (taicpu(p).ops=3) and
  2844. (taicpu(p).oper[2]^.typ=top_shifterop) and
  2845. (taicpu(p).oper[2]^.shifterop^.shiftmode in [SM_LSL,SM_LSR,SM_ASR,SM_ROR]) and
  2846. //MatchOperand(taicpu(p).oper[0]^, taicpu(p).oper[1]^) and
  2847. (not RegInUsedRegs(NR_DEFAULTFLAGS,UsedRegs)) then
  2848. begin
  2849. DebugMsg('Peephole Mov2Shift done', p);
  2850. asml.InsertBefore(tai_regalloc.alloc(NR_DEFAULTFLAGS,p), p);
  2851. asml.InsertAfter(tai_regalloc.dealloc(NR_DEFAULTFLAGS,p), p);
  2852. IncludeRegInUsedRegs(NR_DEFAULTFLAGS,UsedRegs);
  2853. taicpu(p).oppostfix:=PF_S;
  2854. case taicpu(p).oper[2]^.shifterop^.shiftmode of
  2855. SM_LSL: taicpu(p).opcode:=A_LSL;
  2856. SM_LSR: taicpu(p).opcode:=A_LSR;
  2857. SM_ASR: taicpu(p).opcode:=A_ASR;
  2858. SM_ROR: taicpu(p).opcode:=A_ROR;
  2859. else
  2860. internalerror(2019050912);
  2861. end;
  2862. if taicpu(p).oper[2]^.shifterop^.rs<>NR_NO then
  2863. taicpu(p).loadreg(2, taicpu(p).oper[2]^.shifterop^.rs)
  2864. else
  2865. taicpu(p).loadconst(2, taicpu(p).oper[2]^.shifterop^.shiftimm);
  2866. result:=true;
  2867. end
  2868. end;
  2869. end;
  2870. begin
  2871. casmoptimizer:=TCpuAsmOptimizer;
  2872. cpreregallocscheduler:=TCpuPreRegallocScheduler;
  2873. End.