aoptcpu.pas 119 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914191519161917191819191920192119221923192419251926192719281929193019311932193319341935193619371938193919401941194219431944194519461947194819491950195119521953195419551956195719581959196019611962196319641965196619671968196919701971197219731974197519761977197819791980198119821983198419851986198719881989199019911992199319941995199619971998199920002001200220032004200520062007200820092010201120122013201420152016201720182019202020212022202320242025202620272028202920302031203220332034203520362037203820392040204120422043204420452046204720482049205020512052205320542055205620572058205920602061206220632064206520662067206820692070207120722073207420752076207720782079208020812082208320842085208620872088208920902091209220932094209520962097209820992100210121022103210421052106210721082109211021112112211321142115211621172118211921202121212221232124212521262127212821292130213121322133213421352136213721382139214021412142214321442145214621472148214921502151215221532154215521562157215821592160216121622163216421652166216721682169217021712172217321742175217621772178217921802181218221832184218521862187218821892190219121922193219421952196219721982199220022012202220322042205220622072208220922102211221222132214221522162217221822192220222122222223222422252226222722282229223022312232223322342235223622372238223922402241224222432244224522462247224822492250225122522253225422552256225722582259226022612262226322642265226622672268226922702271227222732274227522762277227822792280228122822283228422852286228722882289229022912292229322942295229622972298229923002301230223032304230523062307230823092310231123122313231423152316231723182319232023212322232323242325232623272328232923302331233223332334233523362337233823392340234123422343234423452346234723482349235023512352235323542355235623572358235923602361236223632364236523662367236823692370237123722373237423752376237723782379238023812382238323842385238623872388238923902391239223932394239523962397239823992400240124022403240424052406240724082409241024112412241324142415241624172418241924202421242224232424242524262427242824292430243124322433243424352436243724382439244024412442244324442445244624472448244924502451245224532454245524562457245824592460246124622463246424652466246724682469247024712472247324742475247624772478247924802481248224832484248524862487248824892490249124922493249424952496249724982499250025012502250325042505250625072508250925102511251225132514251525162517251825192520252125222523252425252526252725282529253025312532253325342535253625372538253925402541254225432544254525462547254825492550255125522553255425552556255725582559256025612562256325642565256625672568256925702571257225732574257525762577257825792580258125822583258425852586258725882589259025912592259325942595259625972598259926002601260226032604260526062607260826092610261126122613261426152616261726182619262026212622262326242625262626272628262926302631263226332634263526362637263826392640264126422643264426452646264726482649265026512652265326542655265626572658265926602661266226632664266526662667266826692670267126722673267426752676267726782679268026812682268326842685268626872688268926902691269226932694269526962697269826992700270127022703270427052706270727082709271027112712271327142715271627172718271927202721272227232724272527262727272827292730273127322733273427352736273727382739274027412742274327442745274627472748274927502751275227532754275527562757275827592760276127622763276427652766276727682769277027712772277327742775277627772778277927802781278227832784278527862787278827892790279127922793279427952796279727982799280028012802280328042805280628072808280928102811281228132814281528162817281828192820282128222823282428252826282728282829283028312832283328342835283628372838283928402841284228432844284528462847284828492850285128522853285428552856285728582859286028612862286328642865286628672868286928702871287228732874287528762877287828792880288128822883288428852886288728882889289028912892289328942895289628972898289929002901290229032904290529062907290829092910291129122913291429152916291729182919292029212922292329242925292629272928292929302931293229332934293529362937293829392940294129422943294429452946294729482949295029512952295329542955295629572958295929602961296229632964296529662967296829692970297129722973297429752976297729782979298029812982298329842985298629872988298929902991299229932994299529962997299829993000300130023003300430053006300730083009301030113012301330143015301630173018301930203021302230233024302530263027302830293030303130323033303430353036303730383039304030413042304330443045304630473048304930503051305230533054305530563057305830593060306130623063306430653066306730683069307030713072307330743075307630773078
  1. {
  2. Copyright (c) 1998-2002 by Jonas Maebe, member of the Free Pascal
  3. Development Team
  4. This unit implements the ARM optimizer object
  5. This program is free software; you can redistribute it and/or modify
  6. it under the terms of the GNU General Public License as published by
  7. the Free Software Foundation; either version 2 of the License, or
  8. (at your option) any later version.
  9. This program is distributed in the hope that it will be useful,
  10. but WITHOUT ANY WARRANTY; without even the implied warranty of
  11. MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  12. GNU General Public License for more details.
  13. You should have received a copy of the GNU General Public License
  14. along with this program; if not, write to the Free Software
  15. Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
  16. ****************************************************************************
  17. }
  18. Unit aoptcpu;
  19. {$i fpcdefs.inc}
  20. { $define DEBUG_PREREGSCHEDULER}
  21. { $define DEBUG_AOPTCPU}
  22. Interface
  23. uses
  24. cgbase, cgutils, cpubase, aasmtai,
  25. aasmcpu,
  26. aopt, aoptobj, aoptarm;
  27. Type
  28. { TCpuAsmOptimizer }
  29. TCpuAsmOptimizer = class(TARMAsmOptimizer)
  30. { Can't be done in some cases due to the limited range of jumps }
  31. function CanDoJumpOpts: Boolean; override;
  32. { uses the same constructor as TAopObj }
  33. function PrePeepHoleOptsCpu(var p: tai): Boolean; override;
  34. function PeepHoleOptPass1Cpu(var p: tai): boolean; override;
  35. function PeepHoleOptPass2Cpu(var p: tai): boolean; override;
  36. Function RegInInstruction(Reg: TRegister; p1: tai): Boolean;override;
  37. function RemoveSuperfluousVMov(const p : tai; movp : tai; const optimizer : string) : boolean;
  38. { gets the next tai object after current that contains info relevant
  39. to the optimizer in p1 which used the given register or does a
  40. change in program flow.
  41. If there is none, it returns false and
  42. sets p1 to nil }
  43. Function GetNextInstructionUsingRef(Current: tai; Out Next: tai; const ref: TReference; StopOnStore: Boolean = true): Boolean;
  44. { outputs a debug message into the assembler file }
  45. procedure DebugMsg(const s: string; p: tai);
  46. function InstructionLoadsFromReg(const reg : TRegister; const hp : tai) : boolean; override;
  47. function RegLoadedWithNewValue(reg : tregister; hp : tai) : boolean; override;
  48. { With these routines, there's optimisation code that's general for all ARM platforms }
  49. function OptPass1And(var p: tai): Boolean; override;
  50. function OptPass1LDR(var p: tai): Boolean; override;
  51. function OptPass1STR(var p: tai): Boolean; override;
  52. protected
  53. function LookForPreindexedPattern(p: taicpu): boolean;
  54. function LookForPostindexedPattern(p: taicpu): boolean;
  55. { Individual optimisation routines }
  56. function OptPass1DataCheckMov(var p: tai): Boolean;
  57. function OptPass1ADDSUB(var p: tai): Boolean;
  58. function OptPass1CMP(var p: tai): Boolean;
  59. function OptPass1STM(var p: tai): Boolean;
  60. function OptPass1MOV(var p: tai): Boolean;
  61. function OptPass1MUL(var p: tai): Boolean;
  62. function OptPass1MVN(var p: tai): Boolean;
  63. function OptPass1VMov(var p: tai): Boolean;
  64. function OptPass1VOp(var p: tai): Boolean;
  65. function OptPass1Push(var p: tai): Boolean;
  66. function OptPass2Bcc(var p: tai): Boolean;
  67. function OptPass2STM(var p: tai): Boolean;
  68. function OptPass2STR(var p: tai): Boolean;
  69. End;
  70. TCpuPreRegallocScheduler = class(TAsmScheduler)
  71. function SchedulerPass1Cpu(var p: tai): boolean;override;
  72. procedure SwapRegLive(p, hp1: taicpu);
  73. end;
  74. TCpuThumb2AsmOptimizer = class(TCpuAsmOptimizer)
  75. { uses the same constructor as TAopObj }
  76. function PeepHoleOptPass1Cpu(var p: tai): boolean; override;
  77. procedure PeepHoleOptPass2;override;
  78. function PostPeepHoleOptsCpu(var p: tai): boolean; override;
  79. protected
  80. function OptPass1AndThumb2(var p : tai) : boolean;
  81. function OptPass1LDM(var p : tai) : boolean;
  82. function OptPass1STM(var p : tai) : boolean;
  83. End;
  84. function MustBeLast(p : tai) : boolean;
  85. Implementation
  86. uses
  87. cutils,verbose,globtype,globals,
  88. systems,
  89. cpuinfo,
  90. cgobj,procinfo,
  91. aasmbase,aasmdata,
  92. aoptutils;
  93. { Range check must be disabled explicitly as conversions between signed and unsigned
  94. 32-bit values are done without explicit typecasts }
  95. {$R-}
  96. function CanBeCond(p : tai) : boolean;
  97. begin
  98. result:=
  99. not(GenerateThumbCode) and
  100. (p.typ=ait_instruction) and
  101. (taicpu(p).condition=C_None) and
  102. ((taicpu(p).opcode<A_IT) or (taicpu(p).opcode>A_ITTTT)) and
  103. (taicpu(p).opcode<>A_CBZ) and
  104. (taicpu(p).opcode<>A_CBNZ) and
  105. (taicpu(p).opcode<>A_PLD) and
  106. (((taicpu(p).opcode<>A_BLX) and
  107. { BL may need to be converted into BLX by the linker -- could possibly
  108. be allowed in case it's to a local symbol of which we know that it
  109. uses the same instruction set as the current one }
  110. (taicpu(p).opcode<>A_BL)) or
  111. (taicpu(p).oper[0]^.typ=top_reg));
  112. end;
  113. function RemoveRedundantMove(const cmpp: tai; movp: tai; asml: TAsmList):Boolean;
  114. begin
  115. Result:=false;
  116. if (taicpu(movp).condition = C_EQ) and
  117. (taicpu(cmpp).oper[0]^.reg = taicpu(movp).oper[0]^.reg) and
  118. (taicpu(cmpp).oper[1]^.val = taicpu(movp).oper[1]^.val) then
  119. begin
  120. asml.insertafter(tai_comment.Create(strpnew('Peephole Optimization: CmpMovMov - Removed redundant moveq')), movp);
  121. asml.remove(movp);
  122. movp.free;
  123. Result:=true;
  124. end;
  125. end;
  126. function AlignedToQWord(const ref : treference) : boolean;
  127. begin
  128. { (safe) heuristics to ensure alignment }
  129. result:=(target_info.abi in [abi_eabi,abi_armeb,abi_eabihf]) and
  130. (((ref.offset>=0) and
  131. ((ref.offset mod 8)=0) and
  132. ((ref.base=NR_R13) or
  133. (ref.index=NR_R13))
  134. ) or
  135. ((ref.offset<=0) and
  136. { when using NR_R11, it has always a value of <qword align>+4 }
  137. ((abs(ref.offset+4) mod 8)=0) and
  138. (current_procinfo.framepointer=NR_R11) and
  139. ((ref.base=NR_R11) or
  140. (ref.index=NR_R11))
  141. )
  142. );
  143. end;
  144. function isValidConstLoadStoreOffset(const aoffset: longint; const pf: TOpPostfix) : boolean;
  145. begin
  146. if GenerateThumb2Code then
  147. result := (aoffset<4096) and (aoffset>-256)
  148. else
  149. result := ((pf in [PF_None,PF_B]) and
  150. (abs(aoffset)<4096)) or
  151. (abs(aoffset)<256);
  152. end;
  153. function TCpuAsmOptimizer.InstructionLoadsFromReg(const reg: TRegister; const hp: tai): boolean;
  154. var
  155. p: taicpu;
  156. i: longint;
  157. begin
  158. instructionLoadsFromReg := false;
  159. if not (assigned(hp) and (hp.typ = ait_instruction)) then
  160. exit;
  161. p:=taicpu(hp);
  162. i:=1;
  163. {For these instructions we have to start on oper[0]}
  164. if (p.opcode in [A_STR, A_LDM, A_STM, A_PLD,
  165. A_CMP, A_CMN, A_TST, A_TEQ,
  166. A_B, A_BL, A_BX, A_BLX,
  167. A_SMLAL, A_UMLAL, A_VSTM, A_VLDM]) then i:=0;
  168. while(i<p.ops) do
  169. begin
  170. case p.oper[I]^.typ of
  171. top_reg:
  172. instructionLoadsFromReg := (p.oper[I]^.reg = reg) or
  173. { STRD }
  174. ((i=0) and (p.opcode=A_STR) and (p.oppostfix=PF_D) and (getsupreg(p.oper[0]^.reg)+1=getsupreg(reg)));
  175. top_regset:
  176. instructionLoadsFromReg := (getsupreg(reg) in p.oper[I]^.regset^);
  177. top_shifterop:
  178. instructionLoadsFromReg := p.oper[I]^.shifterop^.rs = reg;
  179. top_ref:
  180. instructionLoadsFromReg :=
  181. (p.oper[I]^.ref^.base = reg) or
  182. (p.oper[I]^.ref^.index = reg);
  183. else
  184. ;
  185. end;
  186. if (i=0) and (p.opcode in [A_LDM,A_VLDM]) then
  187. exit;
  188. if instructionLoadsFromReg then
  189. exit; {Bailout if we found something}
  190. Inc(I);
  191. end;
  192. end;
  193. function TCpuAsmOptimizer.RegLoadedWithNewValue(reg: tregister; hp: tai): boolean;
  194. var
  195. p: taicpu;
  196. begin
  197. Result := false;
  198. if not ((assigned(hp)) and (hp.typ = ait_instruction)) then
  199. exit;
  200. p := taicpu(hp);
  201. case p.opcode of
  202. { These operands do not write into a register at all }
  203. A_CMP, A_CMN, A_TST, A_TEQ, A_B, A_BL, A_BX, A_BLX, A_SWI, A_MSR, A_PLD,
  204. A_VCMP:
  205. exit;
  206. {Take care of post/preincremented store and loads, they will change their base register}
  207. A_STR, A_LDR:
  208. begin
  209. Result := false;
  210. { actually, this does not apply here because post-/preindexed does not mean that a register
  211. is loaded with a new value, it is only modified
  212. (taicpu(p).oper[1]^.typ=top_ref) and
  213. (taicpu(p).oper[1]^.ref^.addressmode in [AM_PREINDEXED,AM_POSTINDEXED]) and
  214. (taicpu(p).oper[1]^.ref^.base = reg);
  215. }
  216. { STR does not load into it's first register }
  217. if p.opcode = A_STR then
  218. exit;
  219. end;
  220. A_VSTR:
  221. begin
  222. Result := false;
  223. exit;
  224. end;
  225. { These four are writing into the first 2 register, UMLAL and SMLAL will also read from them }
  226. A_UMLAL, A_UMULL, A_SMLAL, A_SMULL:
  227. Result :=
  228. (p.oper[1]^.typ = top_reg) and
  229. (p.oper[1]^.reg = reg);
  230. {Loads to oper2 from coprocessor}
  231. {
  232. MCR/MRC is currently not supported in FPC
  233. A_MRC:
  234. Result :=
  235. (p.oper[2]^.typ = top_reg) and
  236. (p.oper[2]^.reg = reg);
  237. }
  238. {Loads to all register in the registerset}
  239. A_LDM, A_VLDM:
  240. Result := (getsupreg(reg) in p.oper[1]^.regset^);
  241. A_POP:
  242. Result := (getsupreg(reg) in p.oper[0]^.regset^) or
  243. (reg=NR_STACK_POINTER_REG);
  244. else
  245. ;
  246. end;
  247. if Result then
  248. exit;
  249. case p.oper[0]^.typ of
  250. {This is the case}
  251. top_reg:
  252. Result := (p.oper[0]^.reg = reg) or
  253. { LDRD }
  254. (p.opcode=A_LDR) and (p.oppostfix=PF_D) and (getsupreg(p.oper[0]^.reg)+1=getsupreg(reg));
  255. {LDM/STM might write a new value to their index register}
  256. top_ref:
  257. Result :=
  258. (taicpu(p).oper[0]^.ref^.addressmode in [AM_PREINDEXED,AM_POSTINDEXED]) and
  259. (taicpu(p).oper[0]^.ref^.base = reg);
  260. else
  261. ;
  262. end;
  263. end;
  264. function TCpuAsmOptimizer.GetNextInstructionUsingRef(Current: tai; out
  265. Next: tai; const ref: TReference; StopOnStore: Boolean): Boolean;
  266. begin
  267. Next:=Current;
  268. repeat
  269. Result:=GetNextInstruction(Next,Next);
  270. if Result and
  271. (Next.typ=ait_instruction) and
  272. (taicpu(Next).opcode in [A_LDR, A_STR]) and
  273. (
  274. ((taicpu(Next).ops = 2) and
  275. (taicpu(Next).oper[1]^.typ = top_ref) and
  276. RefsEqual(taicpu(Next).oper[1]^.ref^,ref)) or
  277. ((taicpu(Next).ops = 3) and { LDRD/STRD }
  278. (taicpu(Next).oper[2]^.typ = top_ref) and
  279. RefsEqual(taicpu(Next).oper[2]^.ref^,ref))
  280. ) then
  281. {We've found an instruction LDR or STR with the same reference}
  282. exit;
  283. until not(Result) or
  284. (Next.typ<>ait_instruction) or
  285. not(cs_opt_level3 in current_settings.optimizerswitches) or
  286. is_calljmp(taicpu(Next).opcode) or
  287. (StopOnStore and (taicpu(Next).opcode in [A_STR, A_STM])) or
  288. RegModifiedByInstruction(NR_PC,Next);
  289. Result:=false;
  290. end;
  291. {$ifdef DEBUG_AOPTCPU}
  292. const
  293. SPeepholeOptimization: shortstring = 'Peephole Optimization: ';
  294. procedure TCpuAsmOptimizer.DebugMsg(const s: string;p : tai);
  295. begin
  296. asml.insertbefore(tai_comment.Create(strpnew(s)), p);
  297. end;
  298. {$else DEBUG_AOPTCPU}
  299. const
  300. SPeepholeOptimization = '';
  301. procedure TCpuAsmOptimizer.DebugMsg(const s: string;p : tai);inline;
  302. begin
  303. end;
  304. {$endif DEBUG_AOPTCPU}
  305. function TCpuAsmOptimizer.CanDoJumpOpts: Boolean;
  306. begin
  307. { Cannot perform these jump optimisations if the ARM architecture has 16-bit thumb codes }
  308. Result := not (
  309. (current_settings.instructionset = is_thumb) and not (CPUARM_HAS_THUMB2 in cpu_capabilities[current_settings.cputype])
  310. );
  311. end;
  312. function TCpuAsmOptimizer.RemoveSuperfluousVMov(const p: tai; movp: tai; const optimizer: string):boolean;
  313. var
  314. alloc,
  315. dealloc : tai_regalloc;
  316. hp1 : tai;
  317. begin
  318. Result:=false;
  319. if ((MatchInstruction(movp, A_VMOV, [taicpu(p).condition], [taicpu(p).oppostfix]) and
  320. ((getregtype(taicpu(movp).oper[0]^.reg)=R_MMREGISTER) or (taicpu(p).opcode=A_VLDR))
  321. ) or
  322. (((taicpu(p).oppostfix in [PF_F64F32,PF_F64S16,PF_F64S32,PF_F64U16,PF_F64U32]) or (getsubreg(taicpu(p).oper[0]^.reg)=R_SUBFD)) and MatchInstruction(movp, A_VMOV, [taicpu(p).condition], [PF_F64])) or
  323. (((taicpu(p).oppostfix in [PF_F32F64,PF_F32S16,PF_F32S32,PF_F32U16,PF_F32U32]) or (getsubreg(taicpu(p).oper[0]^.reg)=R_SUBFS)) and MatchInstruction(movp, A_VMOV, [taicpu(p).condition], [PF_F32]))
  324. ) and
  325. (taicpu(movp).ops=2) and
  326. MatchOperand(taicpu(movp).oper[1]^, taicpu(p).oper[0]^.reg) and
  327. { the destination register of the mov might not be used beween p and movp }
  328. not(RegUsedBetween(taicpu(movp).oper[0]^.reg,p,movp)) and
  329. { Take care to only do this for instructions which REALLY load to the first register.
  330. Otherwise
  331. vstr reg0, [reg1]
  332. vmov reg2, reg0
  333. will be optimized to
  334. vstr reg2, [reg1]
  335. }
  336. regLoadedWithNewValue(taicpu(p).oper[0]^.reg, p) then
  337. begin
  338. dealloc:=FindRegDeAlloc(taicpu(p).oper[0]^.reg,tai(movp.Next));
  339. if assigned(dealloc) then
  340. begin
  341. DebugMsg(SPeepholeOptimization + optimizer + ' removed superfluous vmov', movp);
  342. result:=true;
  343. { taicpu(p).oper[0]^.reg is not used anymore, try to find its allocation
  344. and remove it if possible }
  345. asml.Remove(dealloc);
  346. alloc:=FindRegAllocBackward(taicpu(p).oper[0]^.reg,tai(p.previous));
  347. if assigned(alloc) then
  348. begin
  349. asml.Remove(alloc);
  350. alloc.free;
  351. dealloc.free;
  352. end
  353. else
  354. asml.InsertAfter(dealloc,p);
  355. { try to move the allocation of the target register }
  356. GetLastInstruction(movp,hp1);
  357. alloc:=FindRegAlloc(taicpu(movp).oper[0]^.reg,tai(hp1.Next));
  358. if assigned(alloc) then
  359. begin
  360. asml.Remove(alloc);
  361. asml.InsertBefore(alloc,p);
  362. { adjust used regs }
  363. IncludeRegInUsedRegs(taicpu(movp).oper[0]^.reg,UsedRegs);
  364. end;
  365. { change
  366. vldr reg0,[reg1]
  367. vmov reg2,reg0
  368. into
  369. ldr reg2,[reg1]
  370. if reg2 is an int register
  371. }
  372. if (taicpu(p).opcode=A_VLDR) and (getregtype(taicpu(movp).oper[0]^.reg)=R_INTREGISTER) then
  373. taicpu(p).opcode:=A_LDR;
  374. { finally get rid of the mov }
  375. taicpu(p).loadreg(0,taicpu(movp).oper[0]^.reg);
  376. asml.remove(movp);
  377. movp.free;
  378. end;
  379. end;
  380. end;
  381. {
  382. optimize
  383. add/sub reg1,reg1,regY/const
  384. ...
  385. ldr/str regX,[reg1]
  386. into
  387. ldr/str regX,[reg1, regY/const]!
  388. }
  389. function TCpuAsmOptimizer.LookForPreindexedPattern(p: taicpu): boolean;
  390. var
  391. hp1: tai;
  392. begin
  393. if GenerateARMCode and
  394. (p.ops=3) and
  395. MatchOperand(p.oper[0]^, p.oper[1]^.reg) and
  396. GetNextInstructionUsingReg(p, hp1, p.oper[0]^.reg) and
  397. (not RegModifiedBetween(p.oper[0]^.reg, p, hp1)) and
  398. MatchInstruction(hp1, [A_LDR,A_STR], [C_None], [PF_None,PF_B,PF_H,PF_SH,PF_SB]) and
  399. (taicpu(hp1).oper[1]^.ref^.addressmode=AM_OFFSET) and
  400. (taicpu(hp1).oper[1]^.ref^.base=p.oper[0]^.reg) and
  401. (taicpu(hp1).oper[0]^.reg<>p.oper[0]^.reg) and
  402. (taicpu(hp1).oper[1]^.ref^.offset=0) and
  403. (taicpu(hp1).oper[1]^.ref^.index=NR_NO) and
  404. (((p.oper[2]^.typ=top_reg) and
  405. (not RegModifiedBetween(p.oper[2]^.reg, p, hp1))) or
  406. ((p.oper[2]^.typ=top_const) and
  407. ((abs(p.oper[2]^.val) < 256) or
  408. ((abs(p.oper[2]^.val) < 4096) and
  409. (taicpu(hp1).oppostfix in [PF_None,PF_B]))))) then
  410. begin
  411. taicpu(hp1).oper[1]^.ref^.addressmode:=AM_PREINDEXED;
  412. if p.oper[2]^.typ=top_reg then
  413. begin
  414. taicpu(hp1).oper[1]^.ref^.index:=p.oper[2]^.reg;
  415. if p.opcode=A_ADD then
  416. taicpu(hp1).oper[1]^.ref^.signindex:=1
  417. else
  418. taicpu(hp1).oper[1]^.ref^.signindex:=-1;
  419. end
  420. else
  421. begin
  422. if p.opcode=A_ADD then
  423. taicpu(hp1).oper[1]^.ref^.offset:=p.oper[2]^.val
  424. else
  425. taicpu(hp1).oper[1]^.ref^.offset:=-p.oper[2]^.val;
  426. end;
  427. result:=true;
  428. end
  429. else
  430. result:=false;
  431. end;
  432. {
  433. optimize
  434. ldr/str regX,[reg1]
  435. ...
  436. add/sub reg1,reg1,regY/const
  437. into
  438. ldr/str regX,[reg1], regY/const
  439. }
  440. function TCpuAsmOptimizer.LookForPostindexedPattern(p: taicpu) : boolean;
  441. var
  442. hp1 : tai;
  443. begin
  444. Result:=false;
  445. if (p.oper[1]^.typ = top_ref) and
  446. (p.oper[1]^.ref^.addressmode=AM_OFFSET) and
  447. (p.oper[1]^.ref^.index=NR_NO) and
  448. (p.oper[1]^.ref^.offset=0) and
  449. GetNextInstructionUsingReg(p, hp1, p.oper[1]^.ref^.base) and
  450. { we cannot check NR_DEFAULTFLAGS for modification yet so don't allow a condition }
  451. MatchInstruction(hp1, [A_ADD, A_SUB], [C_None], [PF_None]) and
  452. (taicpu(hp1).oper[0]^.reg=p.oper[1]^.ref^.base) and
  453. (taicpu(hp1).oper[1]^.reg=p.oper[1]^.ref^.base) and
  454. (
  455. (taicpu(hp1).oper[2]^.typ=top_reg) or
  456. { valid offset? }
  457. ((taicpu(hp1).oper[2]^.typ=top_const) and
  458. ((abs(taicpu(hp1).oper[2]^.val)<256) or
  459. ((abs(taicpu(hp1).oper[2]^.val)<4096) and (p.oppostfix in [PF_None,PF_B]))
  460. )
  461. )
  462. ) and
  463. { don't apply the optimization if the base register is loaded }
  464. (p.oper[0]^.reg<>p.oper[1]^.ref^.base) and
  465. not(RegModifiedBetween(taicpu(hp1).oper[0]^.reg,p,hp1)) and
  466. { don't apply the optimization if the (new) index register is loaded }
  467. (p.oper[0]^.reg<>taicpu(hp1).oper[2]^.reg) and
  468. not(RegModifiedBetween(taicpu(hp1).oper[2]^.reg,p,hp1)) and
  469. GenerateARMCode then
  470. begin
  471. DebugMsg(SPeepholeOptimization + 'Str/LdrAdd/Sub2Str/Ldr Postindex done', p);
  472. p.oper[1]^.ref^.addressmode:=AM_POSTINDEXED;
  473. if taicpu(hp1).oper[2]^.typ=top_const then
  474. begin
  475. if taicpu(hp1).opcode=A_ADD then
  476. p.oper[1]^.ref^.offset:=taicpu(hp1).oper[2]^.val
  477. else
  478. p.oper[1]^.ref^.offset:=-taicpu(hp1).oper[2]^.val;
  479. end
  480. else
  481. begin
  482. p.oper[1]^.ref^.index:=taicpu(hp1).oper[2]^.reg;
  483. if taicpu(hp1).opcode=A_ADD then
  484. p.oper[1]^.ref^.signindex:=1
  485. else
  486. p.oper[1]^.ref^.signindex:=-1;
  487. end;
  488. asml.Remove(hp1);
  489. hp1.Free;
  490. Result:=true;
  491. end;
  492. end;
  493. function TCpuAsmOptimizer.OptPass1ADDSUB(var p: tai): Boolean;
  494. var
  495. hp1,hp2: tai;
  496. sign: Integer;
  497. newvalue: TCGInt;
  498. b: byte;
  499. begin
  500. Result := OptPass1DataCheckMov(p);
  501. {
  502. change
  503. add/sub reg2,reg1,const1
  504. str/ldr reg3,[reg2,const2]
  505. dealloc reg2
  506. to
  507. str/ldr reg3,[reg1,const2+/-const1]
  508. }
  509. if (not GenerateThumbCode) and
  510. (taicpu(p).ops>2) and
  511. (taicpu(p).oper[1]^.typ = top_reg) and
  512. (taicpu(p).oper[2]^.typ = top_const) then
  513. begin
  514. hp1:=p;
  515. while GetNextInstructionUsingReg(hp1, hp1, taicpu(p).oper[0]^.reg) and
  516. { we cannot check NR_DEFAULTFLAGS for modification yet so don't allow a condition }
  517. MatchInstruction(hp1, [A_LDR, A_STR], [C_None], []) and
  518. (taicpu(hp1).oper[1]^.typ = top_ref) and
  519. (taicpu(hp1).oper[1]^.ref^.base=taicpu(p).oper[0]^.reg) and
  520. { don't optimize if the register is stored/overwritten }
  521. (taicpu(hp1).oper[0]^.reg<>taicpu(p).oper[1]^.reg) and
  522. (taicpu(hp1).oper[1]^.ref^.index=NR_NO) and
  523. (taicpu(hp1).oper[1]^.ref^.addressmode=AM_OFFSET) and
  524. { new offset must be valid: either in the range of 8 or 12 bit, depend on the
  525. ldr postfix }
  526. (((taicpu(p).opcode=A_ADD) and
  527. isValidConstLoadStoreOffset(taicpu(hp1).oper[1]^.ref^.offset+taicpu(p).oper[2]^.val, taicpu(hp1).oppostfix)
  528. ) or
  529. ((taicpu(p).opcode=A_SUB) and
  530. isValidConstLoadStoreOffset(taicpu(hp1).oper[1]^.ref^.offset-taicpu(p).oper[2]^.val, taicpu(hp1).oppostfix)
  531. )
  532. ) do
  533. begin
  534. { neither reg1 nor reg2 might be changed inbetween }
  535. if RegModifiedBetween(taicpu(p).oper[0]^.reg,p,hp1) or
  536. RegModifiedBetween(taicpu(p).oper[1]^.reg,p,hp1) then
  537. break;
  538. { reg2 must be either overwritten by the ldr or it is deallocated afterwards }
  539. if ((taicpu(hp1).opcode=A_LDR) and (taicpu(p).oper[0]^.reg=taicpu(hp1).oper[0]^.reg)) or
  540. assigned(FindRegDeAlloc(taicpu(p).oper[0]^.reg,tai(hp1.Next))) then
  541. begin
  542. { remember last instruction }
  543. hp2:=hp1;
  544. DebugMsg(SPeepholeOptimization + 'Add/SubLdr2Ldr done', p);
  545. hp1:=p;
  546. { fix all ldr/str }
  547. while GetNextInstructionUsingReg(hp1, hp1, taicpu(p).oper[0]^.reg) do
  548. begin
  549. taicpu(hp1).oper[1]^.ref^.base:=taicpu(p).oper[1]^.reg;
  550. if taicpu(p).opcode=A_ADD then
  551. inc(taicpu(hp1).oper[1]^.ref^.offset,taicpu(p).oper[2]^.val)
  552. else
  553. dec(taicpu(hp1).oper[1]^.ref^.offset,taicpu(p).oper[2]^.val);
  554. if hp1=hp2 then
  555. break;
  556. end;
  557. RemoveCurrentP(p);
  558. result:=true;
  559. Exit;
  560. end;
  561. end;
  562. end;
  563. {
  564. optimize
  565. add/sub rx,ry,const1
  566. add/sub rx,rx,const2
  567. into
  568. add/sub rx,ry,const1+/-const
  569. or
  570. mov rx,ry if const1+/-const=0
  571. or
  572. remove it, if rx=ry and const1+/-const=0
  573. check if the first operation has no postfix and condition
  574. }
  575. if MatchInstruction(p,[A_ADD,A_SUB],[C_None],[PF_None]) and
  576. MatchOptype(taicpu(p),top_reg,top_reg,top_const) and
  577. GetNextInstructionUsingReg(p,hp1,taicpu(p).oper[0]^.reg) and
  578. MatchInstruction(hp1,[A_ADD,A_SUB],[C_None],[PF_None]) and
  579. MatchOptype(taicpu(hp1),top_reg,top_reg,top_const) and
  580. MatchOperand(taicpu(p).oper[0]^,taicpu(hp1).oper[0]^) and
  581. MatchOperand(taicpu(p).oper[0]^,taicpu(hp1).oper[1]^) then
  582. begin
  583. sign:=1;
  584. if (taicpu(p).opcode=A_SUB) xor (taicpu(hp1).opcode=A_SUB) then
  585. sign:=-1;
  586. newvalue:=taicpu(p).oper[2]^.val+sign*taicpu(hp1).oper[2]^.val;
  587. if (not(GenerateThumbCode) and is_shifter_const(newvalue,b)) or
  588. (GenerateThumbCode and is_thumb_imm(newvalue)) then
  589. begin
  590. DebugMsg(SPeepholeOptimization + 'Merge Add/Sub done', p);
  591. taicpu(p).oper[2]^.val:=newvalue;
  592. RemoveInstruction(hp1);
  593. Result:=true;
  594. if newvalue=0 then
  595. begin
  596. if MatchOperand(taicpu(p).oper[0]^,taicpu(p).oper[1]^) then
  597. RemoveCurrentP(p)
  598. else
  599. begin
  600. taicpu(p).opcode:=A_MOV;
  601. taicpu(p).ops:=2;
  602. end;
  603. Exit;
  604. end;
  605. end;
  606. end;
  607. if (taicpu(p).condition = C_None) and
  608. (taicpu(p).oppostfix = PF_None) and
  609. LookForPreindexedPattern(taicpu(p)) then
  610. begin
  611. DebugMsg(SPeepholeOptimization + 'Add/Sub to Preindexed done', p);
  612. RemoveCurrentP(p);
  613. Result:=true;
  614. Exit;
  615. end;
  616. end;
  617. function TCpuAsmOptimizer.OptPass1MUL(var p: tai): Boolean;
  618. var
  619. hp1: tai;
  620. oldreg: tregister;
  621. begin
  622. Result := OptPass1DataCheckMov(p);
  623. {
  624. Turn
  625. mul reg0, z,w
  626. sub/add x, y, reg0
  627. dealloc reg0
  628. into
  629. mls/mla x,z,w,y
  630. }
  631. if (taicpu(p).condition = C_None) and
  632. (taicpu(p).oppostfix = PF_None) and
  633. (taicpu(p).ops=3) and
  634. (taicpu(p).oper[0]^.typ = top_reg) and
  635. (taicpu(p).oper[1]^.typ = top_reg) and
  636. (taicpu(p).oper[2]^.typ = top_reg) and
  637. GetNextInstructionUsingReg(p,hp1,taicpu(p).oper[0]^.reg) and
  638. MatchInstruction(hp1,[A_ADD,A_SUB],[C_None],[PF_None]) and
  639. (not RegModifiedBetween(taicpu(p).oper[1]^.reg, p, hp1)) and
  640. (not RegModifiedBetween(taicpu(p).oper[2]^.reg, p, hp1)) and
  641. (((taicpu(hp1).opcode=A_ADD) and (current_settings.cputype>=cpu_armv4)) or
  642. ((taicpu(hp1).opcode=A_SUB) and (current_settings.cputype in [cpu_armv6t2,cpu_armv7,cpu_armv7a,cpu_armv7r,cpu_armv7m,cpu_armv7em]))) and
  643. // CPUs before ARMv6 don't recommend having the same Rd and Rm for MLA.
  644. // TODO: A workaround would be to swap Rm and Rs
  645. (not ((taicpu(hp1).opcode=A_ADD) and (current_settings.cputype<=cpu_armv6) and MatchOperand(taicpu(hp1).oper[0]^, taicpu(p).oper[1]^))) and
  646. (((taicpu(hp1).ops=3) and
  647. (taicpu(hp1).oper[2]^.typ=top_reg) and
  648. ((MatchOperand(taicpu(hp1).oper[2]^, taicpu(p).oper[0]^.reg) and
  649. (not RegModifiedBetween(taicpu(hp1).oper[1]^.reg, p, hp1))) or
  650. ((MatchOperand(taicpu(hp1).oper[1]^, taicpu(p).oper[0]^.reg) and
  651. (taicpu(hp1).opcode=A_ADD) and
  652. (not RegModifiedBetween(taicpu(hp1).oper[2]^.reg, p, hp1)))))) or
  653. ((taicpu(hp1).ops=2) and
  654. (taicpu(hp1).oper[1]^.typ=top_reg) and
  655. MatchOperand(taicpu(hp1).oper[1]^, taicpu(p).oper[0]^.reg))) and
  656. (RegEndOfLife(taicpu(p).oper[0]^.reg,taicpu(hp1))) then
  657. begin
  658. if taicpu(hp1).opcode=A_ADD then
  659. begin
  660. taicpu(hp1).opcode:=A_MLA;
  661. if taicpu(hp1).ops=3 then
  662. begin
  663. if MatchOperand(taicpu(hp1).oper[1]^, taicpu(p).oper[0]^) then
  664. oldreg:=taicpu(hp1).oper[2]^.reg
  665. else
  666. oldreg:=taicpu(hp1).oper[1]^.reg;
  667. end
  668. else
  669. oldreg:=taicpu(hp1).oper[0]^.reg;
  670. taicpu(hp1).loadreg(1,taicpu(p).oper[1]^.reg);
  671. taicpu(hp1).loadreg(2,taicpu(p).oper[2]^.reg);
  672. taicpu(hp1).loadreg(3,oldreg);
  673. DebugMsg(SPeepholeOptimization + 'MulAdd2MLA done', p);
  674. end
  675. else
  676. begin
  677. taicpu(hp1).opcode:=A_MLS;
  678. taicpu(hp1).loadreg(3,taicpu(hp1).oper[1]^.reg);
  679. if taicpu(hp1).ops=2 then
  680. taicpu(hp1).loadreg(1,taicpu(hp1).oper[0]^.reg)
  681. else
  682. taicpu(hp1).loadreg(1,taicpu(p).oper[2]^.reg);
  683. taicpu(hp1).loadreg(2,taicpu(p).oper[1]^.reg);
  684. DebugMsg(SPeepholeOptimization + 'MulSub2MLS done', p);
  685. AllocRegBetween(taicpu(hp1).oper[1]^.reg,p,hp1,UsedRegs);
  686. AllocRegBetween(taicpu(hp1).oper[2]^.reg,p,hp1,UsedRegs);
  687. AllocRegBetween(taicpu(hp1).oper[3]^.reg,p,hp1,UsedRegs);
  688. end;
  689. taicpu(hp1).ops:=4;
  690. RemoveCurrentP(p);
  691. Result := True;
  692. Exit;
  693. end
  694. end;
  695. function TCpuAsmOptimizer.OptPass1And(var p: tai): Boolean;
  696. begin
  697. Result := OptPass1DataCheckMov(p);
  698. Result := inherited OptPass1And(p) or Result;
  699. end;
  700. function TCpuAsmOptimizer.OptPass1DataCheckMov(var p: tai): Boolean;
  701. var
  702. hp1: tai;
  703. begin
  704. {
  705. change
  706. op reg1, ...
  707. mov reg2, reg1
  708. to
  709. op reg2, ...
  710. }
  711. Result := (taicpu(p).ops >= 3) and
  712. GetNextInstructionUsingReg(p, hp1, taicpu(p).oper[0]^.reg) and
  713. RemoveSuperfluousMove(p, hp1, 'DataMov2Data');
  714. end;
  715. function TCpuAsmOptimizer.OptPass1CMP(var p: tai): Boolean;
  716. var
  717. hp1, hp2, hp_last: tai;
  718. MovRem1, MovRem2: Boolean;
  719. begin
  720. Result := False;
  721. { These optimizations can be applied only to the currently enabled operations because
  722. the other operations do not update all flags and FPC does not track flag usage }
  723. if (taicpu(p).condition = C_None) and
  724. (taicpu(p).oper[1]^.typ = top_const) and
  725. GetNextInstruction(p, hp1) then
  726. begin
  727. {
  728. change
  729. cmp reg,const1
  730. moveq reg,const1
  731. movne reg,const2
  732. to
  733. cmp reg,const1
  734. movne reg,const2
  735. }
  736. if MatchInstruction(hp1, A_MOV, [C_EQ, C_NE], [PF_NONE]) and
  737. (taicpu(hp1).oper[1]^.typ = top_const) and
  738. GetNextInstruction(hp1, hp2) and
  739. MatchInstruction(hp2, A_MOV, [C_EQ, C_NE], [PF_NONE]) and
  740. (taicpu(hp2).oper[1]^.typ = top_const) then
  741. begin
  742. MovRem1 := RemoveRedundantMove(p, hp1, asml);
  743. MovRem2 := RemoveRedundantMove(p, hp2, asml);
  744. Result:= MovRem1 or MovRem2;
  745. { Make sure that hp1 is still the next instruction after p }
  746. if MovRem1 then
  747. if MovRem2 then
  748. begin
  749. if not GetNextInstruction(p, hp1) then
  750. Exit;
  751. end
  752. else
  753. hp1 := hp2;
  754. end;
  755. {
  756. change
  757. <op> reg,x,y
  758. cmp reg,#0
  759. into
  760. <op>s reg,x,y
  761. }
  762. if (taicpu(p).oppostfix = PF_None) and
  763. (taicpu(p).oper[1]^.val = 0) and
  764. { be careful here, following instructions could use other flags
  765. however after a jump fpc never depends on the value of flags }
  766. { All above instructions set Z and N according to the following
  767. Z := result = 0;
  768. N := result[31];
  769. EQ = Z=1; NE = Z=0;
  770. MI = N=1; PL = N=0; }
  771. (MatchInstruction(hp1, A_B, [C_EQ,C_NE,C_MI,C_PL], []) or
  772. { mov is also possible, but only if there is no shifter operand, it could be an rxx,
  773. we are too lazy to check if it is rxx or something else }
  774. (MatchInstruction(hp1, A_MOV, [C_EQ,C_NE,C_MI,C_PL], []) and (taicpu(hp1).ops=2))) and
  775. GetLastInstruction(p, hp_last) and
  776. MatchInstruction(hp_last, [A_ADC,A_ADD,A_BIC,A_SUB,A_MUL,A_MVN,A_MOV,A_ORR,
  777. A_EOR,A_AND,A_RSB,A_RSC,A_SBC,A_MLA], [C_None], [PF_None]) and
  778. (
  779. { mlas is only allowed in arm mode }
  780. (taicpu(hp_last).opcode<>A_MLA) or
  781. (current_settings.instructionset<>is_thumb)
  782. ) and
  783. (taicpu(hp_last).oper[0]^.reg = taicpu(p).oper[0]^.reg) and
  784. assigned(FindRegDealloc(NR_DEFAULTFLAGS,tai(hp1.Next))) then
  785. begin
  786. DebugMsg(SPeepholeOptimization + 'OpCmp2OpS done', hp_last);
  787. taicpu(hp_last).oppostfix:=PF_S;
  788. { move flag allocation if possible }
  789. hp1:=FindRegAlloc(NR_DEFAULTFLAGS,tai(hp_last.Next));
  790. if assigned(hp1) then
  791. begin
  792. asml.Remove(hp1);
  793. asml.insertbefore(hp1, hp_last);
  794. end;
  795. RemoveCurrentP(p);
  796. Result:=true;
  797. end;
  798. end;
  799. end;
  800. function TCpuAsmOptimizer.OptPass1LDR(var p: tai): Boolean;
  801. var
  802. hp1: tai;
  803. begin
  804. Result := inherited OptPass1LDR(p);
  805. if Result then
  806. Exit;
  807. { change
  808. ldr reg1,ref
  809. ldr reg2,ref
  810. into ...
  811. }
  812. if (taicpu(p).oper[1]^.typ = top_ref) and
  813. (taicpu(p).oper[1]^.ref^.addressmode=AM_OFFSET) and
  814. GetNextInstruction(p,hp1) and
  815. { ldrd is not allowed here }
  816. MatchInstruction(hp1, A_LDR, [taicpu(p).condition, C_None], [taicpu(p).oppostfix,PF_None]-[PF_D]) then
  817. begin
  818. {
  819. ...
  820. ldr reg1,ref
  821. mov reg2,reg1
  822. }
  823. if (taicpu(p).oppostfix=taicpu(hp1).oppostfix) and
  824. RefsEqual(taicpu(p).oper[1]^.ref^,taicpu(hp1).oper[1]^.ref^) and
  825. (taicpu(p).oper[0]^.reg<>taicpu(hp1).oper[1]^.ref^.index) and
  826. (taicpu(p).oper[0]^.reg<>taicpu(hp1).oper[1]^.ref^.base) and
  827. (taicpu(hp1).oper[1]^.ref^.addressmode=AM_OFFSET) then
  828. begin
  829. if taicpu(hp1).oper[0]^.reg=taicpu(p).oper[0]^.reg then
  830. begin
  831. DebugMsg(SPeepholeOptimization + 'LdrLdr2Ldr done', hp1);
  832. asml.remove(hp1);
  833. hp1.free;
  834. end
  835. else
  836. begin
  837. DebugMsg(SPeepholeOptimization + 'LdrLdr2LdrMov done', hp1);
  838. taicpu(hp1).opcode:=A_MOV;
  839. taicpu(hp1).oppostfix:=PF_None;
  840. taicpu(hp1).loadreg(1,taicpu(p).oper[0]^.reg);
  841. end;
  842. result := true;
  843. end
  844. {
  845. ...
  846. ldrd reg1,reg1+1,ref
  847. }
  848. else if (GenerateARMCode or GenerateThumb2Code) and
  849. (CPUARM_HAS_EDSP in cpu_capabilities[current_settings.cputype]) and
  850. { ldrd does not allow any postfixes ... }
  851. (taicpu(p).oppostfix=PF_None) and
  852. not(odd(getsupreg(taicpu(p).oper[0]^.reg))) and
  853. (getsupreg(taicpu(p).oper[0]^.reg)+1=getsupreg(taicpu(hp1).oper[0]^.reg)) and
  854. { ldr ensures that either base or index contain no register, else ldr wouldn't
  855. use an offset either
  856. }
  857. (taicpu(p).oper[1]^.ref^.base=taicpu(hp1).oper[1]^.ref^.base) and
  858. (taicpu(p).oper[1]^.ref^.index=taicpu(hp1).oper[1]^.ref^.index) and
  859. (taicpu(p).oper[1]^.ref^.offset+4=taicpu(hp1).oper[1]^.ref^.offset) and
  860. (abs(taicpu(p).oper[1]^.ref^.offset)<256) and
  861. AlignedToQWord(taicpu(p).oper[1]^.ref^) then
  862. begin
  863. DebugMsg(SPeepholeOptimization + 'LdrLdr2Ldrd done', p);
  864. taicpu(p).loadref(2,taicpu(p).oper[1]^.ref^);
  865. taicpu(p).loadreg(1, taicpu(hp1).oper[0]^.reg);
  866. taicpu(p).ops:=3;
  867. taicpu(p).oppostfix:=PF_D;
  868. asml.remove(hp1);
  869. hp1.free;
  870. result:=true;
  871. end;
  872. end;
  873. {
  874. Change
  875. ldrb dst1, [REF]
  876. and dst2, dst1, #255
  877. into
  878. ldrb dst2, [ref]
  879. }
  880. if not(GenerateThumbCode) and
  881. (taicpu(p).oppostfix=PF_B) and
  882. GetNextInstructionUsingReg(p, hp1, taicpu(p).oper[0]^.reg) and
  883. MatchInstruction(hp1, A_AND, [taicpu(p).condition], [PF_NONE]) and
  884. (taicpu(hp1).oper[1]^.reg = taicpu(p).oper[0]^.reg) and
  885. (taicpu(hp1).oper[2]^.typ = top_const) and
  886. (taicpu(hp1).oper[2]^.val = $FF) and
  887. not(RegUsedBetween(taicpu(hp1).oper[0]^.reg, p, hp1)) and
  888. RegEndOfLife(taicpu(p).oper[0]^.reg, taicpu(hp1)) then
  889. begin
  890. DebugMsg(SPeepholeOptimization + 'LdrbAnd2Ldrb done', p);
  891. taicpu(p).oper[0]^.reg := taicpu(hp1).oper[0]^.reg;
  892. asml.remove(hp1);
  893. hp1.free;
  894. result:=true;
  895. end;
  896. Result:=LookForPostindexedPattern(taicpu(p)) or Result;
  897. { Remove superfluous mov after ldr
  898. changes
  899. ldr reg1, ref
  900. mov reg2, reg1
  901. to
  902. ldr reg2, ref
  903. conditions are:
  904. * no ldrd usage
  905. * reg1 must be released after mov
  906. * mov can not contain shifterops
  907. * ldr+mov have the same conditions
  908. * mov does not set flags
  909. }
  910. if (taicpu(p).oppostfix<>PF_D) and
  911. GetNextInstructionUsingReg(p, hp1, taicpu(p).oper[0]^.reg) and
  912. RemoveSuperfluousMove(p, hp1, 'LdrMov2Ldr') then
  913. Result:=true;
  914. end;
  915. function TCpuAsmOptimizer.OptPass1STM(var p: tai): Boolean;
  916. var
  917. hp1, hp2, hp3, hp4: tai;
  918. begin
  919. Result := False;
  920. {
  921. change
  922. stmfd r13!,[r14]
  923. sub r13,r13,#4
  924. bl abc
  925. add r13,r13,#4
  926. ldmfd r13!,[r15]
  927. into
  928. b abc
  929. }
  930. if not(ts_thumb_interworking in current_settings.targetswitches) and
  931. (taicpu(p).condition = C_None) and
  932. (taicpu(p).oppostfix = PF_FD) and
  933. (taicpu(p).oper[0]^.typ = top_ref) and
  934. (taicpu(p).oper[0]^.ref^.index=NR_STACK_POINTER_REG) and
  935. (taicpu(p).oper[0]^.ref^.base=NR_NO) and
  936. (taicpu(p).oper[0]^.ref^.offset=0) and
  937. (taicpu(p).oper[0]^.ref^.addressmode=AM_PREINDEXED) and
  938. (taicpu(p).oper[1]^.typ = top_regset) and
  939. (taicpu(p).oper[1]^.regset^ = [RS_R14]) and
  940. GetNextInstruction(p, hp1) and
  941. MatchInstruction(hp1, A_SUB, [C_None], [PF_NONE]) and
  942. (taicpu(hp1).oper[0]^.typ = top_reg) and
  943. (taicpu(hp1).oper[0]^.reg = NR_STACK_POINTER_REG) and
  944. MatchOperand(taicpu(hp1).oper[0]^,taicpu(hp1).oper[1]^) and
  945. (taicpu(hp1).oper[2]^.typ = top_const) and
  946. GetNextInstruction(hp1, hp2) and
  947. SkipEntryExitMarker(hp2, hp2) and
  948. MatchInstruction(hp2, [A_BL,A_BLX], [C_None], [PF_NONE]) and
  949. (taicpu(hp2).oper[0]^.typ = top_ref) and
  950. GetNextInstruction(hp2, hp3) and
  951. SkipEntryExitMarker(hp3, hp3) and
  952. MatchInstruction(hp3, A_ADD, [C_None], [PF_NONE]) and
  953. MatchOperand(taicpu(hp1).oper[0]^,taicpu(hp3).oper[0]^) and
  954. MatchOperand(taicpu(hp1).oper[0]^,taicpu(hp3).oper[1]^) and
  955. MatchOperand(taicpu(hp1).oper[2]^,taicpu(hp3).oper[2]^) and
  956. GetNextInstruction(hp3, hp4) and
  957. MatchInstruction(hp4, A_LDM, [C_None], [PF_FD]) and
  958. MatchOperand(taicpu(p).oper[0]^,taicpu(hp4).oper[0]^) and
  959. (taicpu(hp4).oper[1]^.typ = top_regset) and
  960. (taicpu(hp4).oper[1]^.regset^ = [RS_R15]) then
  961. begin
  962. asml.Remove(hp1);
  963. asml.Remove(hp3);
  964. asml.Remove(hp4);
  965. taicpu(hp2).opcode:=A_B;
  966. hp1.free;
  967. hp3.free;
  968. hp4.free;
  969. RemoveCurrentp(p, hp2);
  970. DebugMsg(SPeepholeOptimization + 'Bl2B done', p);
  971. Result := True;
  972. end;
  973. end;
  974. function TCpuAsmOptimizer.OptPass1STR(var p: tai): Boolean;
  975. var
  976. hp1: tai;
  977. begin
  978. Result := inherited OptPass1STR(p);
  979. if Result then
  980. Exit;
  981. { Common conditions }
  982. if (taicpu(p).oper[1]^.typ = top_ref) and
  983. (taicpu(p).oper[1]^.ref^.addressmode=AM_OFFSET) and
  984. (taicpu(p).oppostfix=PF_None) then
  985. begin
  986. { change
  987. str reg1,ref
  988. ldr reg2,ref
  989. into
  990. str reg1,ref
  991. mov reg2,reg1
  992. }
  993. if (taicpu(p).condition=C_None) and
  994. GetNextInstructionUsingRef(p,hp1,taicpu(p).oper[1]^.ref^) and
  995. MatchInstruction(hp1, A_LDR, [taicpu(p).condition], [PF_None]) and
  996. (taicpu(hp1).oper[1]^.typ=top_ref) and
  997. (taicpu(hp1).oper[1]^.ref^.addressmode=AM_OFFSET) and
  998. not(RegModifiedBetween(taicpu(p).oper[0]^.reg, p, hp1)) and
  999. ((taicpu(hp1).oper[1]^.ref^.index=NR_NO) or not (RegModifiedBetween(taicpu(hp1).oper[1]^.ref^.index, p, hp1))) and
  1000. ((taicpu(hp1).oper[1]^.ref^.base=NR_NO) or not (RegModifiedBetween(taicpu(hp1).oper[1]^.ref^.base, p, hp1))) then
  1001. begin
  1002. if taicpu(hp1).oper[0]^.reg=taicpu(p).oper[0]^.reg then
  1003. begin
  1004. DebugMsg(SPeepholeOptimization + 'StrLdr2StrMov 1 done', hp1);
  1005. asml.remove(hp1);
  1006. hp1.free;
  1007. end
  1008. else
  1009. begin
  1010. taicpu(hp1).opcode:=A_MOV;
  1011. taicpu(hp1).oppostfix:=PF_None;
  1012. taicpu(hp1).loadreg(1,taicpu(p).oper[0]^.reg);
  1013. DebugMsg(SPeepholeOptimization + 'StrLdr2StrMov 2 done', hp1);
  1014. end;
  1015. result := True;
  1016. end
  1017. { change
  1018. str reg1,ref
  1019. str reg2,ref
  1020. into
  1021. strd reg1,reg2,ref
  1022. }
  1023. else if (GenerateARMCode or GenerateThumb2Code) and
  1024. (CPUARM_HAS_EDSP in cpu_capabilities[current_settings.cputype]) and
  1025. not(odd(getsupreg(taicpu(p).oper[0]^.reg))) and
  1026. (abs(taicpu(p).oper[1]^.ref^.offset)<256) and
  1027. AlignedToQWord(taicpu(p).oper[1]^.ref^) and
  1028. GetNextInstruction(p,hp1) and
  1029. MatchInstruction(hp1, A_STR, [taicpu(p).condition, C_None], [PF_None]) and
  1030. (getsupreg(taicpu(p).oper[0]^.reg)+1=getsupreg(taicpu(hp1).oper[0]^.reg)) and
  1031. { str ensures that either base or index contain no register, else ldr wouldn't
  1032. use an offset either
  1033. }
  1034. (taicpu(p).oper[1]^.ref^.base=taicpu(hp1).oper[1]^.ref^.base) and
  1035. (taicpu(p).oper[1]^.ref^.index=taicpu(hp1).oper[1]^.ref^.index) and
  1036. (taicpu(p).oper[1]^.ref^.offset+4=taicpu(hp1).oper[1]^.ref^.offset) then
  1037. begin
  1038. DebugMsg(SPeepholeOptimization + 'StrStr2Strd done', p);
  1039. taicpu(p).oppostfix:=PF_D;
  1040. taicpu(p).loadref(2,taicpu(p).oper[1]^.ref^);
  1041. taicpu(p).loadreg(1, taicpu(hp1).oper[0]^.reg);
  1042. taicpu(p).ops:=3;
  1043. asml.remove(hp1);
  1044. hp1.free;
  1045. result:=true;
  1046. end;
  1047. end;
  1048. Result:=LookForPostindexedPattern(taicpu(p)) or Result;
  1049. end;
  1050. function TCpuAsmOptimizer.OptPass1MOV(var p: tai): Boolean;
  1051. var
  1052. hp1, hpfar1, hp2: tai;
  1053. i, i2: longint;
  1054. tempop: tasmop;
  1055. dealloc: tai_regalloc;
  1056. begin
  1057. Result := False;
  1058. hp1 := nil;
  1059. { fold
  1060. mov reg1,reg0, shift imm1
  1061. mov reg1,reg1, shift imm2
  1062. }
  1063. if (taicpu(p).ops=3) and
  1064. (taicpu(p).oper[2]^.typ = top_shifterop) and
  1065. (taicpu(p).oper[2]^.shifterop^.rs = NR_NO) and
  1066. getnextinstruction(p,hp1) and
  1067. MatchInstruction(hp1, A_MOV, [taicpu(p).condition], [PF_None]) and
  1068. (taicpu(hp1).ops=3) and
  1069. MatchOperand(taicpu(hp1).oper[0]^, taicpu(p).oper[0]^.reg) and
  1070. MatchOperand(taicpu(hp1).oper[1]^, taicpu(p).oper[0]^.reg) and
  1071. (taicpu(hp1).oper[2]^.typ = top_shifterop) and
  1072. (taicpu(hp1).oper[2]^.shifterop^.rs = NR_NO) then
  1073. begin
  1074. { fold
  1075. mov reg1,reg0, lsl 16
  1076. mov reg1,reg1, lsr 16
  1077. strh reg1, ...
  1078. dealloc reg1
  1079. to
  1080. strh reg1, ...
  1081. dealloc reg1
  1082. }
  1083. if (taicpu(p).oper[2]^.shifterop^.shiftmode=SM_LSL) and
  1084. (taicpu(p).oper[2]^.shifterop^.shiftimm=16) and
  1085. (taicpu(hp1).oper[2]^.shifterop^.shiftmode in [SM_LSR,SM_ASR]) and
  1086. (taicpu(hp1).oper[2]^.shifterop^.shiftimm=16) and
  1087. getnextinstruction(hp1,hp2) and
  1088. MatchInstruction(hp2, A_STR, [taicpu(p).condition], [PF_H]) and
  1089. MatchOperand(taicpu(hp2).oper[0]^, taicpu(p).oper[0]^.reg) then
  1090. begin
  1091. TransferUsedRegs(TmpUsedRegs);
  1092. UpdateUsedRegs(TmpUsedRegs, tai(p.next));
  1093. UpdateUsedRegs(TmpUsedRegs, tai(hp1.next));
  1094. if not(RegUsedAfterInstruction(taicpu(p).oper[0]^.reg,hp2,TmpUsedRegs)) then
  1095. begin
  1096. DebugMsg(SPeepholeOptimization + 'Removed superfluous 16 Bit zero extension', hp1);
  1097. taicpu(hp2).loadreg(0,taicpu(p).oper[1]^.reg);
  1098. asml.remove(hp1);
  1099. hp1.free;
  1100. RemoveCurrentP(p, hp2);
  1101. Result:=true;
  1102. Exit;
  1103. end;
  1104. end
  1105. { fold
  1106. mov reg1,reg0, shift imm1
  1107. mov reg1,reg1, shift imm2
  1108. to
  1109. mov reg1,reg0, shift imm1+imm2
  1110. }
  1111. else if (taicpu(p).oper[2]^.shifterop^.shiftmode=taicpu(hp1).oper[2]^.shifterop^.shiftmode) or
  1112. { asr makes no use after a lsr, the asr can be foled into the lsr }
  1113. ((taicpu(p).oper[2]^.shifterop^.shiftmode=SM_LSR) and (taicpu(hp1).oper[2]^.shifterop^.shiftmode=SM_ASR) ) then
  1114. begin
  1115. inc(taicpu(p).oper[2]^.shifterop^.shiftimm,taicpu(hp1).oper[2]^.shifterop^.shiftimm);
  1116. { avoid overflows }
  1117. if taicpu(p).oper[2]^.shifterop^.shiftimm>31 then
  1118. case taicpu(p).oper[2]^.shifterop^.shiftmode of
  1119. SM_ROR:
  1120. taicpu(p).oper[2]^.shifterop^.shiftimm:=taicpu(p).oper[2]^.shifterop^.shiftimm and 31;
  1121. SM_ASR:
  1122. taicpu(p).oper[2]^.shifterop^.shiftimm:=31;
  1123. SM_LSR,
  1124. SM_LSL:
  1125. begin
  1126. hp2:=taicpu.op_reg_const(A_MOV,taicpu(p).oper[0]^.reg,0);
  1127. InsertLLItem(p.previous, p.next, hp2);
  1128. p.free;
  1129. p:=hp2;
  1130. end;
  1131. else
  1132. internalerror(2008072803);
  1133. end;
  1134. DebugMsg(SPeepholeOptimization + 'ShiftShift2Shift 1 done', p);
  1135. asml.remove(hp1);
  1136. hp1.free;
  1137. hp1 := nil;
  1138. result := true;
  1139. end
  1140. { fold
  1141. mov reg1,reg0, shift imm1
  1142. mov reg1,reg1, shift imm2
  1143. mov reg1,reg1, shift imm3 ...
  1144. mov reg2,reg1, shift imm3 ...
  1145. }
  1146. else if GetNextInstructionUsingReg(hp1,hp2, taicpu(hp1).oper[0]^.reg) and
  1147. MatchInstruction(hp2, A_MOV, [taicpu(p).condition], [PF_None]) and
  1148. (taicpu(hp2).ops=3) and
  1149. MatchOperand(taicpu(hp2).oper[1]^, taicpu(hp1).oper[0]^.reg) and
  1150. RegEndofLife(taicpu(p).oper[0]^.reg,taicpu(hp2)) and
  1151. (taicpu(hp2).oper[2]^.typ = top_shifterop) and
  1152. (taicpu(hp2).oper[2]^.shifterop^.rs = NR_NO) then
  1153. begin
  1154. { mov reg1,reg0, lsl imm1
  1155. mov reg1,reg1, lsr/asr imm2
  1156. mov reg2,reg1, lsl imm3 ...
  1157. to
  1158. mov reg1,reg0, lsl imm1
  1159. mov reg2,reg1, lsr/asr imm2-imm3
  1160. if
  1161. imm1>=imm2
  1162. }
  1163. if (taicpu(p).oper[2]^.shifterop^.shiftmode=SM_LSL) and (taicpu(hp2).oper[2]^.shifterop^.shiftmode=SM_LSL) and
  1164. (taicpu(hp1).oper[2]^.shifterop^.shiftmode in [SM_ASR,SM_LSR]) and
  1165. (taicpu(p).oper[2]^.shifterop^.shiftimm>=taicpu(hp1).oper[2]^.shifterop^.shiftimm) then
  1166. begin
  1167. if (taicpu(hp2).oper[2]^.shifterop^.shiftimm>=taicpu(hp1).oper[2]^.shifterop^.shiftimm) then
  1168. begin
  1169. if not(RegUsedBetween(taicpu(hp2).oper[0]^.reg,p,hp1)) and
  1170. not(RegUsedBetween(taicpu(hp2).oper[0]^.reg,hp1,hp2)) then
  1171. begin
  1172. DebugMsg(SPeepholeOptimization + 'ShiftShiftShift2ShiftShift 1a done', p);
  1173. inc(taicpu(p).oper[2]^.shifterop^.shiftimm,taicpu(hp2).oper[2]^.shifterop^.shiftimm-taicpu(hp1).oper[2]^.shifterop^.shiftimm);
  1174. taicpu(p).oper[0]^.reg:=taicpu(hp2).oper[0]^.reg;
  1175. asml.remove(hp1);
  1176. asml.remove(hp2);
  1177. hp1.free;
  1178. hp2.free;
  1179. if taicpu(p).oper[2]^.shifterop^.shiftimm>=32 then
  1180. begin
  1181. taicpu(p).freeop(1);
  1182. taicpu(p).freeop(2);
  1183. taicpu(p).loadconst(1,0);
  1184. end;
  1185. result := true;
  1186. Exit;
  1187. end;
  1188. end
  1189. else if not(RegUsedBetween(taicpu(hp2).oper[0]^.reg,hp1,hp2)) then
  1190. begin
  1191. DebugMsg(SPeepholeOptimization + 'ShiftShiftShift2ShiftShift 1b done', p);
  1192. dec(taicpu(hp1).oper[2]^.shifterop^.shiftimm,taicpu(hp2).oper[2]^.shifterop^.shiftimm);
  1193. taicpu(hp1).oper[0]^.reg:=taicpu(hp2).oper[0]^.reg;
  1194. asml.remove(hp2);
  1195. hp2.free;
  1196. result := true;
  1197. Exit;
  1198. end;
  1199. end
  1200. { mov reg1,reg0, lsr/asr imm1
  1201. mov reg1,reg1, lsl imm2
  1202. mov reg1,reg1, lsr/asr imm3 ...
  1203. if imm3>=imm1 and imm2>=imm1
  1204. to
  1205. mov reg1,reg0, lsl imm2-imm1
  1206. mov reg1,reg1, lsr/asr imm3 ...
  1207. }
  1208. else if (taicpu(p).oper[2]^.shifterop^.shiftmode in [SM_ASR,SM_LSR]) and (taicpu(hp2).oper[2]^.shifterop^.shiftmode in [SM_ASR,SM_LSR]) and
  1209. (taicpu(hp1).oper[2]^.shifterop^.shiftmode=SM_LSL) and
  1210. (taicpu(hp2).oper[2]^.shifterop^.shiftimm>=taicpu(p).oper[2]^.shifterop^.shiftimm) and
  1211. (taicpu(hp1).oper[2]^.shifterop^.shiftimm>=taicpu(p).oper[2]^.shifterop^.shiftimm) then
  1212. begin
  1213. dec(taicpu(hp1).oper[2]^.shifterop^.shiftimm,taicpu(p).oper[2]^.shifterop^.shiftimm);
  1214. taicpu(hp1).oper[1]^.reg:=taicpu(p).oper[1]^.reg;
  1215. DebugMsg(SPeepholeOptimization + 'ShiftShiftShift2ShiftShift 2 done', p);
  1216. if taicpu(hp1).oper[2]^.shifterop^.shiftimm=0 then
  1217. begin
  1218. taicpu(hp2).oper[1]^.reg:=taicpu(hp1).oper[1]^.reg;
  1219. asml.remove(hp1);
  1220. hp1.free;
  1221. end;
  1222. RemoveCurrentp(p);
  1223. result := true;
  1224. Exit;
  1225. end;
  1226. end;
  1227. end;
  1228. { All the optimisations from this point on require GetNextInstructionUsingReg
  1229. to return True }
  1230. while (
  1231. GetNextInstructionUsingReg(p, hpfar1, taicpu(p).oper[0]^.reg) and
  1232. (hpfar1.typ = ait_instruction)
  1233. ) do
  1234. begin
  1235. { Change the common
  1236. mov r0, r0, lsr #xxx
  1237. and r0, r0, #yyy/bic r0, r0, #xxx
  1238. and remove the superfluous and/bic if possible
  1239. This could be extended to handle more cases.
  1240. }
  1241. { Change
  1242. mov rx, ry, lsr/ror #xxx
  1243. uxtb/uxth rz,rx/and rz,rx,0xFF
  1244. dealloc rx
  1245. to
  1246. uxtb/uxth rz,ry,ror #xxx
  1247. }
  1248. if (GenerateThumb2Code) and
  1249. (taicpu(p).ops=3) and
  1250. (taicpu(p).oper[2]^.typ = top_shifterop) and
  1251. (taicpu(p).oper[2]^.shifterop^.rs = NR_NO) and
  1252. (taicpu(p).oper[2]^.shifterop^.shiftmode in [SM_LSR,SM_ROR]) and
  1253. RegEndOfLife(taicpu(p).oper[0]^.reg, taicpu(hpfar1)) then
  1254. begin
  1255. if MatchInstruction(hpfar1, A_UXTB, [C_None], [PF_None]) and
  1256. (taicpu(hpfar1).ops = 2) and
  1257. (taicpu(p).oper[2]^.shifterop^.shiftimm in [8,16,24]) and
  1258. MatchOperand(taicpu(hpfar1).oper[1]^, taicpu(p).oper[0]^.reg) then
  1259. begin
  1260. taicpu(hpfar1).oper[1]^.reg := taicpu(p).oper[1]^.reg;
  1261. taicpu(hpfar1).loadshifterop(2,taicpu(p).oper[2]^.shifterop^);
  1262. taicpu(hpfar1).oper[2]^.shifterop^.shiftmode:=SM_ROR;
  1263. taicpu(hpfar1).ops := 3;
  1264. if not Assigned(hp1) then
  1265. GetNextInstruction(p,hp1);
  1266. RemoveCurrentP(p, hp1);
  1267. result:=true;
  1268. exit;
  1269. end
  1270. else if MatchInstruction(hpfar1, A_UXTH, [C_None], [PF_None]) and
  1271. (taicpu(hpfar1).ops=2) and
  1272. (taicpu(p).oper[2]^.shifterop^.shiftimm in [16]) and
  1273. MatchOperand(taicpu(hpfar1).oper[1]^, taicpu(p).oper[0]^.reg) then
  1274. begin
  1275. taicpu(hpfar1).oper[1]^.reg := taicpu(p).oper[1]^.reg;
  1276. taicpu(hpfar1).loadshifterop(2,taicpu(p).oper[2]^.shifterop^);
  1277. taicpu(hpfar1).oper[2]^.shifterop^.shiftmode:=SM_ROR;
  1278. taicpu(hpfar1).ops := 3;
  1279. if not Assigned(hp1) then
  1280. GetNextInstruction(p,hp1);
  1281. RemoveCurrentP(p, hp1);
  1282. result:=true;
  1283. exit;
  1284. end
  1285. else if MatchInstruction(hpfar1, A_AND, [C_None], [PF_None]) and
  1286. (taicpu(hpfar1).ops = 3) and
  1287. (taicpu(hpfar1).oper[2]^.typ = top_const) and
  1288. (taicpu(hpfar1).oper[2]^.val = $FF) and
  1289. (taicpu(p).oper[2]^.shifterop^.shiftimm in [8,16,24]) and
  1290. MatchOperand(taicpu(hpfar1).oper[1]^, taicpu(p).oper[0]^.reg) then
  1291. begin
  1292. taicpu(hpfar1).ops := 3;
  1293. taicpu(hpfar1).opcode := A_UXTB;
  1294. taicpu(hpfar1).oper[1]^.reg := taicpu(p).oper[1]^.reg;
  1295. taicpu(hpfar1).loadshifterop(2,taicpu(p).oper[2]^.shifterop^);
  1296. taicpu(hpfar1).oper[2]^.shifterop^.shiftmode:=SM_ROR;
  1297. if not Assigned(hp1) then
  1298. GetNextInstruction(p,hp1);
  1299. RemoveCurrentP(p, hp1);
  1300. result:=true;
  1301. exit;
  1302. end;
  1303. end;
  1304. { 2-operald mov optimisations }
  1305. if (taicpu(p).ops = 2) then
  1306. begin
  1307. {
  1308. This removes the mul from
  1309. mov rX,0
  1310. ...
  1311. mul ...,rX,...
  1312. }
  1313. if (taicpu(p).oper[1]^.typ = top_const) then
  1314. begin
  1315. (* if false and
  1316. (taicpu(p).oper[1]^.val=0) and
  1317. MatchInstruction(hpfar1, [A_MUL,A_MLA], [taicpu(p).condition], [taicpu(p).oppostfix]) and
  1318. (((taicpu(hpfar1).oper[1]^.typ=top_reg) and MatchOperand(taicpu(p).oper[0]^, taicpu(hpfar1).oper[1]^)) or
  1319. ((taicpu(hpfar1).oper[2]^.typ=top_reg) and MatchOperand(taicpu(p).oper[0]^, taicpu(hpfar1).oper[2]^))) then
  1320. begin
  1321. TransferUsedRegs(TmpUsedRegs);
  1322. UpdateUsedRegs(TmpUsedRegs, tai(p.next));
  1323. UpdateUsedRegs(TmpUsedRegs, tai(hpfar1.next));
  1324. DebugMsg(SPeepholeOptimization + 'MovMUL/MLA2Mov0 done', p);
  1325. if taicpu(hpfar1).opcode=A_MUL then
  1326. taicpu(hpfar1).loadconst(1,0)
  1327. else
  1328. taicpu(hpfar1).loadreg(1,taicpu(hpfar1).oper[3]^.reg);
  1329. taicpu(hpfar1).ops:=2;
  1330. taicpu(hpfar1).opcode:=A_MOV;
  1331. if not(RegUsedAfterInstruction(taicpu(p).oper[0]^.reg,hpfar1,TmpUsedRegs)) then
  1332. RemoveCurrentP(p);
  1333. Result:=true;
  1334. exit;
  1335. end
  1336. else*) if (taicpu(p).oper[1]^.val=0) and
  1337. MatchInstruction(hpfar1, A_MLA, [taicpu(p).condition], [taicpu(p).oppostfix]) and
  1338. MatchOperand(taicpu(p).oper[0]^, taicpu(hpfar1).oper[3]^) then
  1339. begin
  1340. TransferUsedRegs(TmpUsedRegs);
  1341. UpdateUsedRegs(TmpUsedRegs, tai(p.next));
  1342. UpdateUsedRegs(TmpUsedRegs, tai(hpfar1.next));
  1343. DebugMsg(SPeepholeOptimization + 'MovMLA2MUL 1 done', p);
  1344. taicpu(hpfar1).ops:=3;
  1345. taicpu(hpfar1).opcode:=A_MUL;
  1346. if not(RegUsedAfterInstruction(taicpu(p).oper[0]^.reg,hpfar1,TmpUsedRegs)) then
  1347. begin
  1348. RemoveCurrentP(p);
  1349. Result:=true;
  1350. end;
  1351. exit;
  1352. end
  1353. {
  1354. This changes the very common
  1355. mov r0, #0
  1356. str r0, [...]
  1357. mov r0, #0
  1358. str r0, [...]
  1359. and removes all superfluous mov instructions
  1360. }
  1361. else if (taicpu(hpfar1).opcode=A_STR) then
  1362. begin
  1363. hp1 := hpfar1;
  1364. while MatchInstruction(hp1, A_STR, [taicpu(p).condition], []) and
  1365. MatchOperand(taicpu(p).oper[0]^, taicpu(hpfar1).oper[0]^) and
  1366. GetNextInstruction(hp1, hp2) and
  1367. MatchInstruction(hp2, A_MOV, [taicpu(p).condition], [PF_None]) and
  1368. (taicpu(hp2).ops = 2) and
  1369. MatchOperand(taicpu(hp2).oper[0]^, taicpu(p).oper[0]^) and
  1370. MatchOperand(taicpu(hp2).oper[1]^, taicpu(p).oper[1]^) do
  1371. begin
  1372. DebugMsg(SPeepholeOptimization + 'MovStrMov done', hp2);
  1373. GetNextInstruction(hp2,hp1);
  1374. asml.remove(hp2);
  1375. hp2.free;
  1376. result:=true;
  1377. if not assigned(hp1) then break;
  1378. end;
  1379. if Result then
  1380. Exit;
  1381. end;
  1382. end;
  1383. {
  1384. This removes the first mov from
  1385. mov rX,...
  1386. mov rX,...
  1387. }
  1388. if taicpu(hpfar1).opcode=A_MOV then
  1389. begin
  1390. hp1 := p;
  1391. while MatchInstruction(hpfar1, A_MOV, [taicpu(hp1).condition], [taicpu(hp1).oppostfix]) and
  1392. (taicpu(hpfar1).ops = 2) and
  1393. MatchOperand(taicpu(hp1).oper[0]^, taicpu(hpfar1).oper[0]^) and
  1394. { don't remove the first mov if the second is a mov rX,rX }
  1395. not(MatchOperand(taicpu(hpfar1).oper[0]^, taicpu(hpfar1).oper[1]^)) do
  1396. begin
  1397. { Defer removing the first p until after the while loop }
  1398. if p <> hp1 then
  1399. begin
  1400. DebugMsg(SPeepholeOptimization + 'MovMov done', hp1);
  1401. asml.remove(hp1);
  1402. hp1.free;
  1403. end;
  1404. hp1:=hpfar1;
  1405. GetNextInstruction(hpfar1,hpfar1);
  1406. result:=true;
  1407. if not assigned(hpfar1) then
  1408. Break;
  1409. end;
  1410. if Result then
  1411. begin
  1412. DebugMsg(SPeepholeOptimization + 'MovMov done', p);
  1413. RemoveCurrentp(p);
  1414. Exit;
  1415. end;
  1416. end;
  1417. if RedundantMovProcess(p,hpfar1) then
  1418. begin
  1419. Result:=true;
  1420. { p might not point at a mov anymore }
  1421. exit;
  1422. end;
  1423. { If hpfar1 is nil after the call to RedundantMovProcess, it is
  1424. because it would have become a dangling pointer, so reinitialise it. }
  1425. if not Assigned(hpfar1) then
  1426. Continue;
  1427. { Fold the very common sequence
  1428. mov regA, regB
  1429. ldr* regA, [regA]
  1430. to
  1431. ldr* regA, [regB]
  1432. CAUTION! If this one is successful p might not be a mov instruction anymore!
  1433. }
  1434. if
  1435. // Make sure that Thumb code doesn't propagate a high register into a reference
  1436. (
  1437. (
  1438. GenerateThumbCode and
  1439. (getsupreg(taicpu(p).oper[1]^.reg) < RS_R8)
  1440. ) or (not GenerateThumbCode)
  1441. ) and
  1442. (taicpu(p).oper[1]^.typ = top_reg) and
  1443. (taicpu(p).oppostfix = PF_NONE) and
  1444. MatchInstruction(hpfar1, [A_LDR, A_STR], [taicpu(p).condition], []) and
  1445. (taicpu(hpfar1).oper[1]^.typ = top_ref) and
  1446. { We can change the base register only when the instruction uses AM_OFFSET }
  1447. ((taicpu(hpfar1).oper[1]^.ref^.index = taicpu(p).oper[0]^.reg) or
  1448. ((taicpu(hpfar1).oper[1]^.ref^.addressmode = AM_OFFSET) and
  1449. (taicpu(hpfar1).oper[1]^.ref^.base = taicpu(p).oper[0]^.reg))
  1450. ) and
  1451. not(RegModifiedBetween(taicpu(p).oper[1]^.reg,p,hpfar1)) and
  1452. RegEndOfLife(taicpu(p).oper[0]^.reg, taicpu(hpfar1)) then
  1453. begin
  1454. DebugMsg(SPeepholeOptimization + 'MovLdr2Ldr done', hpfar1);
  1455. if (taicpu(hpfar1).oper[1]^.ref^.addressmode = AM_OFFSET) and
  1456. (taicpu(hpfar1).oper[1]^.ref^.base = taicpu(p).oper[0]^.reg) then
  1457. taicpu(hpfar1).oper[1]^.ref^.base := taicpu(p).oper[1]^.reg;
  1458. if taicpu(hpfar1).oper[1]^.ref^.index = taicpu(p).oper[0]^.reg then
  1459. taicpu(hpfar1).oper[1]^.ref^.index := taicpu(p).oper[1]^.reg;
  1460. dealloc:=FindRegDeAlloc(taicpu(p).oper[1]^.reg, tai(p.Next));
  1461. if Assigned(dealloc) then
  1462. begin
  1463. asml.remove(dealloc);
  1464. asml.InsertAfter(dealloc,hpfar1);
  1465. end;
  1466. if (not Assigned(hp1)) or (p=hp1) then
  1467. GetNextInstruction(p, hp1);
  1468. RemoveCurrentP(p, hp1);
  1469. result:=true;
  1470. Exit;
  1471. end
  1472. end
  1473. { 3-operald mov optimisations }
  1474. else if (taicpu(p).ops = 3) then
  1475. begin
  1476. if (taicpu(p).oper[2]^.typ = top_shifterop) and
  1477. (taicpu(p).oper[2]^.shifterop^.rs = NR_NO) and
  1478. (taicpu(p).oper[2]^.shifterop^.shiftmode = SM_LSR) and
  1479. (taicpu(hpfar1).ops>=1) and
  1480. (taicpu(hpfar1).oper[0]^.typ=top_reg) and
  1481. (not RegModifiedBetween(taicpu(hpfar1).oper[0]^.reg, p, hpfar1)) and
  1482. RegEndOfLife(taicpu(p).oper[0]^.reg, taicpu(hpfar1)) then
  1483. begin
  1484. if (taicpu(p).oper[2]^.shifterop^.shiftimm >= 24 ) and
  1485. MatchInstruction(hpfar1, A_AND, [taicpu(p).condition], [taicpu(p).oppostfix]) and
  1486. (taicpu(hpfar1).ops=3) and
  1487. MatchOperand(taicpu(p).oper[0]^, taicpu(hpfar1).oper[1]^) and
  1488. (taicpu(hpfar1).oper[2]^.typ = top_const) and
  1489. { Check if the AND actually would only mask out bits being already zero because of the shift
  1490. }
  1491. ((($ffffffff shr taicpu(p).oper[2]^.shifterop^.shiftimm) and taicpu(hpfar1).oper[2]^.val) =
  1492. ($ffffffff shr taicpu(p).oper[2]^.shifterop^.shiftimm)) then
  1493. begin
  1494. DebugMsg(SPeepholeOptimization + 'LsrAnd2Lsr done', hpfar1);
  1495. taicpu(p).oper[0]^.reg:=taicpu(hpfar1).oper[0]^.reg;
  1496. asml.remove(hpfar1);
  1497. hpfar1.free;
  1498. result:=true;
  1499. Exit;
  1500. end
  1501. else if MatchInstruction(hpfar1, A_BIC, [taicpu(p).condition], [taicpu(p).oppostfix]) and
  1502. (taicpu(hpfar1).ops=3) and
  1503. MatchOperand(taicpu(p).oper[0]^, taicpu(hpfar1).oper[1]^) and
  1504. (taicpu(hpfar1).oper[2]^.typ = top_const) and
  1505. { Check if the BIC actually would only mask out bits beeing already zero because of the shift }
  1506. (taicpu(hpfar1).oper[2]^.val<>0) and
  1507. (BsfDWord(taicpu(hpfar1).oper[2]^.val)>=32-taicpu(p).oper[2]^.shifterop^.shiftimm) then
  1508. begin
  1509. DebugMsg(SPeepholeOptimization + 'LsrBic2Lsr done', hpfar1);
  1510. taicpu(p).oper[0]^.reg:=taicpu(hpfar1).oper[0]^.reg;
  1511. asml.remove(hpfar1);
  1512. hpfar1.free;
  1513. result:=true;
  1514. Exit;
  1515. end;
  1516. end;
  1517. { This folds shifterops into following instructions
  1518. mov r0, r1, lsl #8
  1519. add r2, r3, r0
  1520. to
  1521. add r2, r3, r1, lsl #8
  1522. CAUTION! If this one is successful p might not be a mov instruction anymore!
  1523. }
  1524. if (taicpu(p).oper[1]^.typ = top_reg) and
  1525. (taicpu(p).oper[2]^.typ = top_shifterop) and
  1526. (taicpu(p).oppostfix = PF_NONE) and
  1527. MatchInstruction(hpfar1, [A_ADD, A_ADC, A_RSB, A_RSC, A_SUB, A_SBC,
  1528. A_AND, A_BIC, A_EOR, A_ORR, A_TEQ, A_TST,
  1529. A_CMP, A_CMN],
  1530. [taicpu(p).condition], [PF_None]) and
  1531. (not ((GenerateThumb2Code) and
  1532. (taicpu(hpfar1).opcode in [A_SBC]) and
  1533. (((taicpu(hpfar1).ops=3) and
  1534. MatchOperand(taicpu(p).oper[0]^, taicpu(hpfar1).oper[1]^.reg)) or
  1535. ((taicpu(hpfar1).ops=2) and
  1536. MatchOperand(taicpu(p).oper[0]^, taicpu(hpfar1).oper[0]^.reg))))) and
  1537. RegEndOfLife(taicpu(p).oper[0]^.reg, taicpu(hpfar1)) and
  1538. (taicpu(hpfar1).ops >= 2) and
  1539. {Currently we can't fold into another shifterop}
  1540. (taicpu(hpfar1).oper[taicpu(hpfar1).ops-1]^.typ = top_reg) and
  1541. {Folding rrx is problematic because of the C-Flag, as we currently can't check
  1542. NR_DEFAULTFLAGS for modification}
  1543. (
  1544. {Everything is fine if we don't use RRX}
  1545. (taicpu(p).oper[2]^.shifterop^.shiftmode <> SM_RRX) or
  1546. (
  1547. {If it is RRX, then check if we're just accessing the next instruction}
  1548. Assigned(hp1) and
  1549. (hpfar1 = hp1)
  1550. )
  1551. ) and
  1552. { reg1 might not be modified inbetween }
  1553. not(RegModifiedBetween(taicpu(p).oper[1]^.reg,p,hpfar1)) and
  1554. { The shifterop can contain a register, might not be modified}
  1555. (
  1556. (taicpu(p).oper[2]^.shifterop^.rs = NR_NO) or
  1557. not(RegModifiedBetween(taicpu(p).oper[2]^.shifterop^.rs, p, hpfar1))
  1558. ) and
  1559. (
  1560. {Only ONE of the two src operands is allowed to match}
  1561. MatchOperand(taicpu(p).oper[0]^, taicpu(hpfar1).oper[taicpu(hpfar1).ops-2]^) xor
  1562. MatchOperand(taicpu(p).oper[0]^, taicpu(hpfar1).oper[taicpu(hpfar1).ops-1]^)
  1563. ) then
  1564. begin
  1565. if taicpu(hpfar1).opcode in [A_TST, A_TEQ, A_CMN] then
  1566. I2:=0
  1567. else
  1568. I2:=1;
  1569. for I:=I2 to taicpu(hpfar1).ops-1 do
  1570. if MatchOperand(taicpu(p).oper[0]^, taicpu(hpfar1).oper[I]^.reg) then
  1571. begin
  1572. { If the parameter matched on the second op from the RIGHT
  1573. we have to switch the parameters, this will not happen for CMP
  1574. were we're only evaluating the most right parameter
  1575. }
  1576. if I <> taicpu(hpfar1).ops-1 then
  1577. begin
  1578. {The SUB operators need to be changed when we swap parameters}
  1579. case taicpu(hpfar1).opcode of
  1580. A_SUB: tempop:=A_RSB;
  1581. A_SBC: tempop:=A_RSC;
  1582. A_RSB: tempop:=A_SUB;
  1583. A_RSC: tempop:=A_SBC;
  1584. else tempop:=taicpu(hpfar1).opcode;
  1585. end;
  1586. if taicpu(hpfar1).ops = 3 then
  1587. hp2:=taicpu.op_reg_reg_reg_shifterop(tempop,
  1588. taicpu(hpfar1).oper[0]^.reg, taicpu(hpfar1).oper[2]^.reg,
  1589. taicpu(p).oper[1]^.reg, taicpu(p).oper[2]^.shifterop^)
  1590. else
  1591. hp2:=taicpu.op_reg_reg_shifterop(tempop,
  1592. taicpu(hpfar1).oper[0]^.reg, taicpu(p).oper[1]^.reg,
  1593. taicpu(p).oper[2]^.shifterop^);
  1594. end
  1595. else
  1596. if taicpu(hpfar1).ops = 3 then
  1597. hp2:=taicpu.op_reg_reg_reg_shifterop(taicpu(hpfar1).opcode,
  1598. taicpu(hpfar1).oper[0]^.reg, taicpu(hpfar1).oper[1]^.reg,
  1599. taicpu(p).oper[1]^.reg, taicpu(p).oper[2]^.shifterop^)
  1600. else
  1601. hp2:=taicpu.op_reg_reg_shifterop(taicpu(hpfar1).opcode,
  1602. taicpu(hpfar1).oper[0]^.reg, taicpu(p).oper[1]^.reg,
  1603. taicpu(p).oper[2]^.shifterop^);
  1604. if taicpu(p).oper[2]^.shifterop^.rs<>NR_NO then
  1605. AllocRegBetween(taicpu(p).oper[2]^.shifterop^.rs,p,hpfar1,UsedRegs);
  1606. AllocRegBetween(taicpu(p).oper[1]^.reg,p,hpfar1,UsedRegs);
  1607. asml.insertbefore(hp2, hpfar1);
  1608. asml.remove(hpfar1);
  1609. hpfar1.free;
  1610. DebugMsg(SPeepholeOptimization + 'FoldShiftProcess done', hp2);
  1611. if not Assigned(hp1) then
  1612. GetNextInstruction(p, hp1)
  1613. else if hp1 = hpfar1 then
  1614. { If hp1 = hpfar1, then it's a dangling pointer }
  1615. hp1 := hp2;
  1616. RemoveCurrentP(p, hp1);
  1617. Result:=true;
  1618. Exit;
  1619. end;
  1620. end;
  1621. {
  1622. Fold
  1623. mov r1, r1, lsl #2
  1624. ldr/ldrb r0, [r0, r1]
  1625. to
  1626. ldr/ldrb r0, [r0, r1, lsl #2]
  1627. XXX: This still needs some work, as we quite often encounter something like
  1628. mov r1, r2, lsl #2
  1629. add r2, r3, #imm
  1630. ldr r0, [r2, r1]
  1631. which can't be folded because r2 is overwritten between the shift and the ldr.
  1632. We could try to shuffle the registers around and fold it into.
  1633. add r1, r3, #imm
  1634. ldr r0, [r1, r2, lsl #2]
  1635. }
  1636. if (not(GenerateThumbCode)) and
  1637. { thumb2 allows only lsl #0..#3 }
  1638. (not(GenerateThumb2Code) or
  1639. ((taicpu(p).oper[2]^.shifterop^.shiftimm in [0..3]) and
  1640. (taicpu(p).oper[2]^.shifterop^.shiftmode=SM_LSL)
  1641. )
  1642. ) and
  1643. (taicpu(p).oper[1]^.typ = top_reg) and
  1644. (taicpu(p).oper[2]^.typ = top_shifterop) and
  1645. { RRX is tough to handle, because it requires tracking the C-Flag,
  1646. it is also extremly unlikely to be emitted this way}
  1647. (taicpu(p).oper[2]^.shifterop^.shiftmode <> SM_RRX) and
  1648. (taicpu(p).oper[2]^.shifterop^.shiftimm <> 0) and
  1649. (taicpu(p).oppostfix = PF_NONE) and
  1650. {Only LDR, LDRB, STR, STRB can handle scaled register indexing}
  1651. (MatchInstruction(hpfar1, [A_LDR, A_STR], [taicpu(p).condition], [PF_None, PF_B]) or
  1652. (GenerateThumb2Code and
  1653. MatchInstruction(hpfar1, [A_LDR, A_STR], [taicpu(p).condition], [PF_None, PF_B, PF_SB, PF_H, PF_SH]))
  1654. ) and
  1655. (
  1656. {If this is address by offset, one of the two registers can be used}
  1657. ((taicpu(hpfar1).oper[1]^.ref^.addressmode=AM_OFFSET) and
  1658. (
  1659. (taicpu(hpfar1).oper[1]^.ref^.index = taicpu(p).oper[0]^.reg) xor
  1660. (taicpu(hpfar1).oper[1]^.ref^.base = taicpu(p).oper[0]^.reg)
  1661. )
  1662. ) or
  1663. {For post and preindexed only the index register can be used}
  1664. ((taicpu(hpfar1).oper[1]^.ref^.addressmode in [AM_POSTINDEXED, AM_PREINDEXED]) and
  1665. (
  1666. (taicpu(hpfar1).oper[1]^.ref^.index = taicpu(p).oper[0]^.reg) and
  1667. (taicpu(hpfar1).oper[1]^.ref^.base <> taicpu(p).oper[0]^.reg)
  1668. ) and
  1669. (not GenerateThumb2Code)
  1670. )
  1671. ) and
  1672. { Only fold if both registers are used. Otherwise we are folding p with itself }
  1673. (taicpu(hpfar1).oper[1]^.ref^.index<>NR_NO) and
  1674. (taicpu(hpfar1).oper[1]^.ref^.base<>NR_NO) and
  1675. { Only fold if there isn't another shifterop already, and offset is zero. }
  1676. (taicpu(hpfar1).oper[1]^.ref^.offset = 0) and
  1677. (taicpu(hpfar1).oper[1]^.ref^.shiftmode = SM_None) and
  1678. not(RegModifiedBetween(taicpu(p).oper[1]^.reg,p,hpfar1)) and
  1679. RegEndOfLife(taicpu(p).oper[0]^.reg, taicpu(hpfar1)) then
  1680. begin
  1681. { If the register we want to do the shift for resides in base, we need to swap that}
  1682. if (taicpu(hpfar1).oper[1]^.ref^.base = taicpu(p).oper[0]^.reg) then
  1683. taicpu(hpfar1).oper[1]^.ref^.base := taicpu(hpfar1).oper[1]^.ref^.index;
  1684. taicpu(hpfar1).oper[1]^.ref^.index := taicpu(p).oper[1]^.reg;
  1685. taicpu(hpfar1).oper[1]^.ref^.shiftmode := taicpu(p).oper[2]^.shifterop^.shiftmode;
  1686. taicpu(hpfar1).oper[1]^.ref^.shiftimm := taicpu(p).oper[2]^.shifterop^.shiftimm;
  1687. DebugMsg(SPeepholeOptimization + 'FoldShiftLdrStr done', hpfar1);
  1688. RemoveCurrentP(p);
  1689. Result:=true;
  1690. Exit;
  1691. end;
  1692. end;
  1693. {
  1694. Often we see shifts and then a superfluous mov to another register
  1695. In the future this might be handled in RedundantMovProcess when it uses RegisterTracking
  1696. }
  1697. if RemoveSuperfluousMove(p, hpfar1, 'MovMov2Mov') then
  1698. Result:=true;
  1699. Exit;
  1700. end;
  1701. end;
  1702. function TCpuAsmOptimizer.OptPass1MVN(var p: tai): Boolean;
  1703. var
  1704. hp1: tai;
  1705. begin
  1706. {
  1707. change
  1708. mvn reg2,reg1
  1709. and reg3,reg4,reg2
  1710. dealloc reg2
  1711. to
  1712. bic reg3,reg4,reg1
  1713. }
  1714. Result := False;
  1715. if (taicpu(p).oper[1]^.typ = top_reg) and
  1716. GetNextInstructionUsingReg(p,hp1,taicpu(p).oper[0]^.reg) and
  1717. MatchInstruction(hp1,A_AND,[],[]) and
  1718. (((taicpu(hp1).ops=3) and
  1719. (taicpu(hp1).oper[2]^.typ=top_reg) and
  1720. (MatchOperand(taicpu(hp1).oper[2]^, taicpu(p).oper[0]^.reg) or
  1721. MatchOperand(taicpu(hp1).oper[1]^, taicpu(p).oper[0]^.reg))) or
  1722. ((taicpu(hp1).ops=2) and
  1723. (taicpu(hp1).oper[1]^.typ=top_reg) and
  1724. MatchOperand(taicpu(hp1).oper[1]^, taicpu(p).oper[0]^.reg))) and
  1725. assigned(FindRegDealloc(taicpu(p).oper[0]^.reg,tai(hp1.Next))) and
  1726. { reg1 might not be modified inbetween }
  1727. not(RegModifiedBetween(taicpu(p).oper[1]^.reg,p,hp1)) then
  1728. begin
  1729. DebugMsg(SPeepholeOptimization + 'MvnAnd2Bic done', p);
  1730. taicpu(hp1).opcode:=A_BIC;
  1731. if taicpu(hp1).ops=3 then
  1732. begin
  1733. if MatchOperand(taicpu(hp1).oper[1]^, taicpu(p).oper[0]^.reg) then
  1734. taicpu(hp1).loadReg(1,taicpu(hp1).oper[2]^.reg); // Swap operands
  1735. taicpu(hp1).loadReg(2,taicpu(p).oper[1]^.reg);
  1736. end
  1737. else
  1738. taicpu(hp1).loadReg(1,taicpu(p).oper[1]^.reg);
  1739. RemoveCurrentp(p);
  1740. Result := True;
  1741. Exit;
  1742. end;
  1743. end;
  1744. function TCpuAsmOptimizer.OptPass1VMov(var p: tai): Boolean;
  1745. var
  1746. hp1: tai;
  1747. begin
  1748. {
  1749. change
  1750. vmov reg0,reg1,reg2
  1751. vmov reg1,reg2,reg0
  1752. into
  1753. vmov reg0,reg1,reg2
  1754. can be applied regardless if reg0 or reg2 is the vfp register
  1755. }
  1756. Result := False;
  1757. if (taicpu(p).ops = 3) then
  1758. while GetNextInstruction(p, hp1) and
  1759. MatchInstruction(hp1, A_VMOV, [taicpu(p).condition], [taicpu(p).oppostfix]) and
  1760. (taicpu(hp1).ops = 3) and
  1761. MatchOperand(taicpu(p).oper[0]^, taicpu(hp1).oper[2]^) and
  1762. MatchOperand(taicpu(p).oper[1]^, taicpu(hp1).oper[0]^) and
  1763. MatchOperand(taicpu(p).oper[2]^, taicpu(hp1).oper[1]^) do
  1764. begin
  1765. asml.Remove(hp1);
  1766. hp1.free;
  1767. DebugMsg(SPeepholeOptimization + 'VMovVMov2VMov done', p);
  1768. { Can we do it again? }
  1769. end;
  1770. end;
  1771. function TCpuAsmOptimizer.OptPass1VOp(var p: tai): Boolean;
  1772. var
  1773. hp1: tai;
  1774. begin
  1775. Result := GetNextInstructionUsingReg(p, hp1, taicpu(p).oper[0]^.reg) and
  1776. RemoveSuperfluousVMov(p, hp1, 'VOpVMov2VOp');
  1777. end;
  1778. function TCpuAsmOptimizer.OptPass1Push(var p: tai): Boolean;
  1779. var
  1780. hp1: tai;
  1781. begin
  1782. Result:=false;
  1783. if (taicpu(p).oper[0]^.regset^=[RS_R14]) and
  1784. GetNextInstruction(p,hp1) and
  1785. MatchInstruction(hp1,A_POP,[C_None],[PF_None]) and
  1786. (taicpu(hp1).oper[0]^.regset^=[RS_R15]) then
  1787. begin
  1788. if not(CPUARM_HAS_BX in cpu_capabilities[current_settings.cputype]) then
  1789. begin
  1790. DebugMsg('Peephole Optimization: PushPop2Mov done', p);
  1791. taicpu(p).ops:=2;
  1792. taicpu(p).loadreg(1, NR_R14);
  1793. taicpu(p).loadreg(0, NR_R15);
  1794. taicpu(p).opcode:=A_MOV;
  1795. end
  1796. else
  1797. begin
  1798. DebugMsg('Peephole Optimization: PushPop2Bx done', p);
  1799. taicpu(p).loadreg(0, NR_R14);
  1800. taicpu(p).opcode:=A_BX;
  1801. end;
  1802. RemoveInstruction(hp1);
  1803. Result:=true;
  1804. Exit;
  1805. end;
  1806. end;
  1807. function TCpuAsmOptimizer.OptPass2Bcc(var p: tai): Boolean;
  1808. var
  1809. hp1,hp2,hp3,after_p: tai;
  1810. l : longint;
  1811. WasLast: boolean;
  1812. Label_X, Label_Y: TASmLabel;
  1813. procedure ConvertInstructins(this_hp: tai; newcond: tasmcond);
  1814. var
  1815. next_hp: tai;
  1816. begin
  1817. repeat
  1818. if this_hp.typ=ait_instruction then
  1819. taicpu(this_hp).condition := newcond;
  1820. GetNextInstruction(this_hp, next_hp);
  1821. if MustBeLast(this_hp) then
  1822. Break;
  1823. this_hp := next_hp
  1824. until not(assigned(this_hp)) or
  1825. not(CanBeCond(this_hp)) or
  1826. ((hp1.typ = ait_instruction) and (taicpu(hp1).opcode = A_B)) or
  1827. (this_hp.typ = ait_label);
  1828. end;
  1829. begin
  1830. Result := False;
  1831. if (taicpu(p).condition<>C_None) and
  1832. not(GenerateThumbCode) then
  1833. begin
  1834. { check for
  1835. Bxx xxx
  1836. <several instructions>
  1837. xxx:
  1838. }
  1839. Label_X := TAsmLabel(taicpu(p).oper[0]^.ref^.symbol);
  1840. l:=0;
  1841. WasLast:=False;
  1842. GetNextInstruction(p, hp1);
  1843. after_p := hp1;
  1844. while assigned(hp1) and
  1845. (l<=4) and
  1846. CanBeCond(hp1) and
  1847. { stop on labels }
  1848. not(hp1.typ=ait_label) and
  1849. { avoid that we cannot recognize the case BccB2Cond }
  1850. not((hp1.typ=ait_instruction) and (taicpu(hp1).opcode=A_B)) do
  1851. begin
  1852. inc(l);
  1853. if MustBeLast(hp1) then
  1854. begin
  1855. WasLast:=True;
  1856. GetNextInstruction(hp1,hp1);
  1857. break;
  1858. end
  1859. else
  1860. GetNextInstruction(hp1,hp1);
  1861. end;
  1862. if assigned(hp1) then
  1863. begin
  1864. if FindLabel(Label_X, hp1) then
  1865. begin
  1866. if (l<=4) and (l>0) then
  1867. begin
  1868. ConvertInstructins(after_p, inverse_cond(taicpu(p).condition));
  1869. DebugMsg(SPeepholeOptimization + 'Bcc2Cond done', p);
  1870. { wait with removing else GetNextInstruction could
  1871. ignore the label if it was the only usage in the
  1872. jump moved away }
  1873. Label_X.decrefs;
  1874. RemoveCurrentP(p, after_p);
  1875. Result := True;
  1876. Exit;
  1877. end;
  1878. end
  1879. else
  1880. { do not perform further optimizations if there is an instruction
  1881. in block #1 which cannot be optimized.
  1882. }
  1883. if not WasLast then
  1884. begin
  1885. { check further for
  1886. Bcc xxx
  1887. <several instructions 1>
  1888. B yyy
  1889. xxx:
  1890. <several instructions 2>
  1891. yyy:
  1892. }
  1893. { hp2 points to jmp yyy }
  1894. hp2:=hp1;
  1895. { skip hp2 to xxx }
  1896. if assigned(hp2) and
  1897. (l<=3) and
  1898. (hp2.typ=ait_instruction) and
  1899. (taicpu(hp2).is_jmp) and
  1900. (taicpu(hp2).condition=C_None) and
  1901. GetNextInstruction(hp2, hp1) and
  1902. { real label and jump, no further references to the
  1903. label are allowed }
  1904. (Label_X.getrefs = 1) and
  1905. FindLabel(Label_X, hp1) then
  1906. begin
  1907. Label_Y := TAsmLabel(taicpu(hp2).oper[0]^.ref^.symbol);
  1908. l:=0;
  1909. { skip hp1 and hp3 to <several moves 2> }
  1910. GetNextInstruction(hp1, hp1);
  1911. hp3 := hp1;
  1912. while assigned(hp1) and
  1913. CanBeCond(hp1) and
  1914. (l<=3) do
  1915. begin
  1916. inc(l);
  1917. if MustBeLast(hp1) then
  1918. begin
  1919. GetNextInstruction(hp1, hp1);
  1920. break;
  1921. end
  1922. else
  1923. GetNextInstruction(hp1, hp1);
  1924. end;
  1925. { hp1 points to yyy: }
  1926. if assigned(hp1) and
  1927. FindLabel(Label_Y, hp1) then
  1928. begin
  1929. ConvertInstructins(after_p, inverse_cond(taicpu(p).condition));
  1930. ConvertInstructins(hp3, taicpu(p).condition);
  1931. DebugMsg(SPeepholeOptimization + 'BccB2Cond done', after_p);
  1932. { remove B }
  1933. Label_Y.decrefs;
  1934. RemoveInstruction(hp2);
  1935. { remove Bcc }
  1936. Label_X.decrefs;
  1937. RemoveCurrentP(p, after_p);
  1938. Result := True;
  1939. Exit;
  1940. end;
  1941. end;
  1942. end;
  1943. end;
  1944. end;
  1945. end;
  1946. function TCpuAsmOptimizer.OptPass2STR(var p: tai): Boolean;
  1947. var
  1948. hp1: tai;
  1949. Postfix: TOpPostfix;
  1950. OpcodeStr: shortstring;
  1951. begin
  1952. Result := False;
  1953. { Try to merge two STRs into an STM instruction }
  1954. if not(GenerateThumbCode) and (taicpu(p).oper[1]^.typ = top_ref) and
  1955. (taicpu(p).oper[1]^.ref^.addressmode = AM_OFFSET) and
  1956. (
  1957. (taicpu(p).oper[1]^.ref^.base = NR_NO) or
  1958. (taicpu(p).oper[1]^.ref^.index = NR_NO)
  1959. ) and
  1960. (taicpu(p).oppostfix = PF_None) and
  1961. (getregtype(taicpu(p).oper[0]^.reg) = R_INTREGISTER) then
  1962. begin
  1963. hp1 := p;
  1964. while GetNextInstruction(hp1, hp1) and (hp1.typ = ait_instruction) and
  1965. (taicpu(hp1).opcode = A_STR) do
  1966. if (taicpu(hp1).condition = taicpu(p).condition) and
  1967. (taicpu(hp1).oppostfix = PF_None) and
  1968. (getregtype(taicpu(hp1).oper[0]^.reg) = R_INTREGISTER) and
  1969. (taicpu(hp1).oper[1]^.ref^.addressmode = AM_OFFSET) and
  1970. (taicpu(hp1).oper[1]^.ref^.base = taicpu(p).oper[1]^.ref^.base) and
  1971. (taicpu(hp1).oper[1]^.ref^.index = taicpu(p).oper[1]^.ref^.index) and
  1972. (
  1973. (
  1974. (taicpu(p).oper[1]^.ref^.offset = 0) and
  1975. (getsupreg(taicpu(hp1).oper[0]^.reg) > getsupreg(taicpu(p).oper[0]^.reg)) and
  1976. (abs(taicpu(hp1).oper[1]^.ref^.offset) = 4)
  1977. ) or (
  1978. (taicpu(hp1).oper[1]^.ref^.offset = 0) and
  1979. (getsupreg(taicpu(hp1).oper[0]^.reg) < getsupreg(taicpu(p).oper[0]^.reg)) and
  1980. (abs(taicpu(p).oper[1]^.ref^.offset) = 4)
  1981. )
  1982. ) then
  1983. begin
  1984. if (getsupreg(taicpu(hp1).oper[0]^.reg) < getsupreg(taicpu(p).oper[0]^.reg)) xor
  1985. (taicpu(hp1).oper[1]^.ref^.offset < taicpu(p).oper[1]^.ref^.offset) then
  1986. begin
  1987. Postfix := PF_DA;
  1988. OpcodeStr := 'DA';
  1989. end
  1990. else
  1991. begin
  1992. Postfix := PF_None;
  1993. OpcodeStr := '';
  1994. end;
  1995. taicpu(hp1).oper[1]^.ref^.offset := 0;
  1996. if taicpu(hp1).oper[1]^.ref^.index = NR_NO then
  1997. begin
  1998. taicpu(hp1).oper[1]^.ref^.index := taicpu(hp1).oper[1]^.ref^.base;
  1999. taicpu(hp1).oper[1]^.ref^.base := NR_NO;
  2000. end;
  2001. taicpu(p).opcode := A_STM;
  2002. taicpu(p).loadregset(1, R_INTREGISTER, R_SUBWHOLE, [getsupreg(taicpu(p).oper[0]^.reg), getsupreg(taicpu(hp1).oper[0]^.reg)]);
  2003. taicpu(p).loadref(0, taicpu(hp1).oper[1]^.ref^);
  2004. taicpu(p).oppostfix := Postfix;
  2005. RemoveInstruction(hp1);
  2006. DebugMsg(SPeepholeOptimization + 'Merging stores: STR/STR -> STM' + OpcodeStr, p);
  2007. Result := True;
  2008. Exit;
  2009. end;
  2010. end;
  2011. end;
  2012. function TCpuAsmOptimizer.OptPass2STM(var p: tai): Boolean;
  2013. var
  2014. hp1: tai;
  2015. CorrectOffset:ASizeInt;
  2016. i, LastReg: TSuperRegister;
  2017. Postfix: TOpPostfix;
  2018. OpcodeStr: shortstring;
  2019. begin
  2020. Result := False;
  2021. { See if STM/STR can be merged into a single STM }
  2022. if (taicpu(p).oper[0]^.ref^.addressmode = AM_OFFSET) then
  2023. begin
  2024. CorrectOffset := 0;
  2025. LastReg := RS_NO;
  2026. for i in taicpu(p).oper[1]^.regset^ do
  2027. begin
  2028. LastReg := i;
  2029. Inc(CorrectOffset, 4);
  2030. end;
  2031. { This while loop effectively doea a Selection Sort on any STR
  2032. instructions that follow }
  2033. hp1 := p;
  2034. while (LastReg < maxcpuregister) and
  2035. GetNextInstruction(hp1, hp1) and (hp1.typ = ait_instruction) and
  2036. (taicpu(hp1).opcode = A_STR) do
  2037. if (taicpu(hp1).condition = taicpu(p).condition) and
  2038. (taicpu(hp1).oppostfix = PF_None) and
  2039. (getregtype(taicpu(hp1).oper[0]^.reg) = R_INTREGISTER) and
  2040. (taicpu(hp1).oper[1]^.ref^.addressmode = AM_OFFSET) and
  2041. (
  2042. (
  2043. (taicpu(p).oper[1]^.ref^.base = NR_NO) and
  2044. (taicpu(hp1).oper[1]^.ref^.index = taicpu(p).oper[0]^.ref^.index)
  2045. ) or (
  2046. (taicpu(p).oper[1]^.ref^.index = NR_NO) and
  2047. (taicpu(hp1).oper[1]^.ref^.base = taicpu(p).oper[0]^.ref^.base)
  2048. )
  2049. ) and
  2050. { Next register must be later in the set }
  2051. (getsupreg(taicpu(hp1).oper[0]^.reg) > LastReg) and
  2052. (
  2053. (
  2054. (taicpu(p).oppostfix = PF_None) and
  2055. (taicpu(hp1).oper[1]^.ref^.offset = CorrectOffset)
  2056. ) or (
  2057. (taicpu(p).oppostfix = PF_DA) and
  2058. (taicpu(hp1).oper[1]^.ref^.offset = -CorrectOffset)
  2059. )
  2060. ) then
  2061. begin
  2062. { Increment the reference values ready for the next STR instruction to find }
  2063. LastReg := getsupreg(taicpu(hp1).oper[0]^.reg);
  2064. Inc(CorrectOffset, 4);
  2065. if (taicpu(p).oppostfix = PF_DA) then
  2066. OpcodeStr := 'DA'
  2067. else
  2068. OpcodeStr := '';
  2069. Include(taicpu(p).oper[1]^.regset^, LastReg);
  2070. DebugMsg(SPeepholeOptimization + 'Merging stores: STM' + OpcodeStr + '/STR -> STM' + OpcodeStr, hp1);
  2071. RemoveInstruction(hp1);
  2072. Result := True;
  2073. { See if we can find another one to merge }
  2074. hp1 := p;
  2075. Continue;
  2076. end;
  2077. end;
  2078. end;
  2079. function TCpuAsmOptimizer.PrePeepHoleOptsCpu(var p: tai): Boolean;
  2080. begin
  2081. result := false;
  2082. if p.typ=ait_instruction then
  2083. begin
  2084. case taicpu(p).opcode of
  2085. A_SBFX,
  2086. A_UBFX:
  2087. Result:=OptPreSBFXUBFX(p);
  2088. else
  2089. ;
  2090. end;
  2091. end;
  2092. end;
  2093. function TCpuAsmOptimizer.PeepHoleOptPass1Cpu(var p: tai): boolean;
  2094. begin
  2095. result := false;
  2096. if p.typ = ait_instruction then
  2097. begin
  2098. case taicpu(p).opcode of
  2099. A_CMP:
  2100. Result := OptPass1CMP(p);
  2101. A_STR:
  2102. Result := OptPass1STR(p);
  2103. A_LDR:
  2104. Result := OptPass1LDR(p);
  2105. A_MOV:
  2106. Result := OptPass1MOV(p);
  2107. A_AND:
  2108. Result := OptPass1And(p);
  2109. A_ADD,
  2110. A_SUB:
  2111. Result := OptPass1ADDSUB(p);
  2112. A_MUL:
  2113. REsult := OptPass1MUL(p);
  2114. A_ADC,
  2115. A_RSB,
  2116. A_RSC,
  2117. A_SBC,
  2118. A_BIC,
  2119. A_EOR,
  2120. A_ORR,
  2121. A_MLA,
  2122. A_MLS,
  2123. A_QADD,A_QADD16,A_QADD8,
  2124. A_QSUB,A_QSUB16,A_QSUB8,
  2125. A_QDADD,A_QDSUB,A_QASX,A_QSAX,
  2126. A_SHADD16,A_SHADD8,A_UHADD16,A_UHADD8,
  2127. A_SHSUB16,A_SHSUB8,A_UHSUB16,A_UHSUB8,
  2128. A_PKHTB,A_PKHBT,
  2129. A_SMUAD,A_SMUSD:
  2130. Result := OptPass1DataCheckMov(p);
  2131. {$ifdef dummy}
  2132. A_MVN:
  2133. Result := OPtPass1MVN(p);
  2134. {$endif dummy}
  2135. A_UXTB:
  2136. Result := OptPass1UXTB(p);
  2137. A_UXTH:
  2138. Result := OptPass1UXTH(p);
  2139. A_SXTB:
  2140. Result := OptPass1SXTB(p);
  2141. A_SXTH:
  2142. Result := OptPass1SXTH(p);
  2143. A_STM:
  2144. Result := OptPass1STM(p);
  2145. A_VMOV:
  2146. Result := OptPass1VMov(p);
  2147. A_VLDR,
  2148. A_VADD,
  2149. A_VMUL,
  2150. A_VDIV,
  2151. A_VSUB,
  2152. A_VSQRT,
  2153. A_VNEG,
  2154. A_VCVT,
  2155. A_VABS:
  2156. Result := OptPass1VOp(p);
  2157. A_PUSH:
  2158. Result := OptPass1Push(p);
  2159. else
  2160. ;
  2161. end;
  2162. end;
  2163. end;
  2164. function TCpuAsmOptimizer.PeepHoleOptPass2Cpu(var p: tai): boolean;
  2165. begin
  2166. result := False;
  2167. if p.typ = ait_instruction then
  2168. begin
  2169. case taicpu(p).opcode of
  2170. A_B:
  2171. Result := OptPass2Bcc(p);
  2172. A_STM:
  2173. Result := OptPass2STM(p);
  2174. A_STR:
  2175. Result := OptPass2STR(p);
  2176. else
  2177. ;
  2178. end;
  2179. end;
  2180. end;
  2181. { instructions modifying the CPSR can be only the last instruction }
  2182. function MustBeLast(p : tai) : boolean;
  2183. begin
  2184. Result:=(p.typ=ait_instruction) and
  2185. ((taicpu(p).opcode in [A_BL,A_BLX,A_CMP,A_CMN,A_SWI,A_TEQ,A_TST,A_CMF,A_CMFE {,A_MSR}]) or
  2186. ((taicpu(p).ops>=1) and (taicpu(p).oper[0]^.typ=top_reg) and (taicpu(p).oper[0]^.reg=NR_PC)) or
  2187. (taicpu(p).oppostfix=PF_S));
  2188. end;
  2189. function TCpuAsmOptimizer.RegInInstruction(Reg: TRegister; p1: tai): Boolean;
  2190. begin
  2191. If (p1.typ = ait_instruction) and (taicpu(p1).opcode=A_BL) then
  2192. Result:=true
  2193. else If MatchInstruction(p1, [A_LDR, A_STR], [], [PF_D]) and
  2194. (getsupreg(taicpu(p1).oper[0]^.reg)+1=getsupreg(reg)) then
  2195. Result:=true
  2196. else
  2197. Result:=inherited RegInInstruction(Reg, p1);
  2198. end;
  2199. const
  2200. { set of opcode which might or do write to memory }
  2201. { TODO : extend armins.dat to contain r/w info }
  2202. opcode_could_mem_write = [A_B,A_BL,A_BLX,A_BKPT,A_BX,A_STR,A_STRB,A_STRBT,
  2203. A_STRH,A_STRT,A_STF,A_SFM,A_STM,A_FSTS,A_FSTD,A_VSTR,A_VSTM];
  2204. { adjust the register live information when swapping the two instructions p and hp1,
  2205. they must follow one after the other }
  2206. procedure TCpuPreRegallocScheduler.SwapRegLive(p,hp1 : taicpu);
  2207. procedure CheckLiveEnd(reg : tregister);
  2208. var
  2209. supreg : TSuperRegister;
  2210. regtype : TRegisterType;
  2211. begin
  2212. if reg=NR_NO then
  2213. exit;
  2214. regtype:=getregtype(reg);
  2215. supreg:=getsupreg(reg);
  2216. if assigned(cg.rg[regtype]) and (cg.rg[regtype].live_end[supreg]=hp1) and
  2217. RegInInstruction(reg,p) then
  2218. cg.rg[regtype].live_end[supreg]:=p;
  2219. end;
  2220. procedure CheckLiveStart(reg : TRegister);
  2221. var
  2222. supreg : TSuperRegister;
  2223. regtype : TRegisterType;
  2224. begin
  2225. if reg=NR_NO then
  2226. exit;
  2227. regtype:=getregtype(reg);
  2228. supreg:=getsupreg(reg);
  2229. if assigned(cg.rg[regtype]) and (cg.rg[regtype].live_start[supreg]=p) and
  2230. RegInInstruction(reg,hp1) then
  2231. cg.rg[regtype].live_start[supreg]:=hp1;
  2232. end;
  2233. var
  2234. i : longint;
  2235. r : TSuperRegister;
  2236. begin
  2237. { assumption: p is directly followed by hp1 }
  2238. { if live of any reg used by p starts at p and hp1 uses this register then
  2239. set live start to hp1 }
  2240. for i:=0 to p.ops-1 do
  2241. case p.oper[i]^.typ of
  2242. Top_Reg:
  2243. CheckLiveStart(p.oper[i]^.reg);
  2244. Top_Ref:
  2245. begin
  2246. CheckLiveStart(p.oper[i]^.ref^.base);
  2247. CheckLiveStart(p.oper[i]^.ref^.index);
  2248. end;
  2249. Top_Shifterop:
  2250. CheckLiveStart(p.oper[i]^.shifterop^.rs);
  2251. Top_RegSet:
  2252. for r:=RS_R0 to RS_R15 do
  2253. if r in p.oper[i]^.regset^ then
  2254. CheckLiveStart(newreg(R_INTREGISTER,r,R_SUBWHOLE));
  2255. else
  2256. ;
  2257. end;
  2258. { if live of any reg used by hp1 ends at hp1 and p uses this register then
  2259. set live end to p }
  2260. for i:=0 to hp1.ops-1 do
  2261. case hp1.oper[i]^.typ of
  2262. Top_Reg:
  2263. CheckLiveEnd(hp1.oper[i]^.reg);
  2264. Top_Ref:
  2265. begin
  2266. CheckLiveEnd(hp1.oper[i]^.ref^.base);
  2267. CheckLiveEnd(hp1.oper[i]^.ref^.index);
  2268. end;
  2269. Top_Shifterop:
  2270. CheckLiveStart(hp1.oper[i]^.shifterop^.rs);
  2271. Top_RegSet:
  2272. for r:=RS_R0 to RS_R15 do
  2273. if r in hp1.oper[i]^.regset^ then
  2274. CheckLiveEnd(newreg(R_INTREGISTER,r,R_SUBWHOLE));
  2275. else
  2276. ;
  2277. end;
  2278. end;
  2279. function TCpuPreRegallocScheduler.SchedulerPass1Cpu(var p: tai): boolean;
  2280. { TODO : schedule also forward }
  2281. { TODO : schedule distance > 1 }
  2282. { returns true if p might be a load of a pc relative tls offset }
  2283. function PossibleTLSLoad(const p: tai) : boolean;
  2284. begin
  2285. Result:=(p.typ=ait_instruction) and (taicpu(p).opcode=A_LDR) and (taicpu(p).oper[1]^.typ=top_ref) and (((taicpu(p).oper[1]^.ref^.base=NR_PC) and
  2286. (taicpu(p).oper[1]^.ref^.index<>NR_NO)) or ((taicpu(p).oper[1]^.ref^.base<>NR_NO) and
  2287. (taicpu(p).oper[1]^.ref^.index=NR_PC)));
  2288. end;
  2289. var
  2290. hp1,hp2,hp3,hp4,hp5,insertpos : tai;
  2291. list : TAsmList;
  2292. begin
  2293. result:=true;
  2294. list:=TAsmList.create;
  2295. p:=BlockStart;
  2296. while p<>BlockEnd Do
  2297. begin
  2298. if (p.typ=ait_instruction) and
  2299. GetNextInstruction(p,hp1) and
  2300. (hp1.typ=ait_instruction) and
  2301. (taicpu(hp1).opcode in [A_LDR,A_LDRB,A_LDRH,A_LDRSB,A_LDRSH]) and
  2302. (taicpu(hp1).oppostfix in [PF_NONE, PF_B, PF_H, PF_SB, PF_SH]) and
  2303. { for now we don't reschedule if the previous instruction changes potentially a memory location }
  2304. ( (not(taicpu(p).opcode in opcode_could_mem_write) and
  2305. not(RegModifiedByInstruction(NR_PC,p))
  2306. ) or
  2307. ((taicpu(p).opcode in [A_STM,A_STRB,A_STRH,A_STR]) and
  2308. ((taicpu(hp1).oper[1]^.ref^.base=NR_PC) or
  2309. (assigned(taicpu(hp1).oper[1]^.ref^.symboldata) and
  2310. (taicpu(hp1).oper[1]^.ref^.offset=0)
  2311. )
  2312. ) or
  2313. { try to prove that the memory accesses don't overlapp }
  2314. ((taicpu(p).opcode in [A_STRB,A_STRH,A_STR]) and
  2315. (taicpu(p).oper[1]^.typ = top_ref) and
  2316. (taicpu(p).oper[1]^.ref^.base=taicpu(hp1).oper[1]^.ref^.base) and
  2317. (taicpu(p).oppostfix=PF_None) and
  2318. (taicpu(hp1).oppostfix=PF_None) and
  2319. (taicpu(p).oper[1]^.ref^.index=NR_NO) and
  2320. (taicpu(hp1).oper[1]^.ref^.index=NR_NO) and
  2321. { get operand sizes and check if the offset distance is large enough to ensure no overlapp }
  2322. (abs(taicpu(p).oper[1]^.ref^.offset-taicpu(hp1).oper[1]^.ref^.offset)>=max(tcgsize2size[reg_cgsize(taicpu(p).oper[0]^.reg)],tcgsize2size[reg_cgsize(taicpu(hp1).oper[0]^.reg)]))
  2323. )
  2324. )
  2325. ) and
  2326. GetNextInstruction(hp1,hp2) and
  2327. (hp2.typ=ait_instruction) and
  2328. { loaded register used by next instruction?
  2329. if we ever support labels (they could be skipped in theory) here, the gnu2 tls general-dynamic code could get broken (the ldr before
  2330. the bl may not be scheduled away from the bl) and it needs to be taken care of this case
  2331. }
  2332. (RegInInstruction(taicpu(hp1).oper[0]^.reg,hp2)) and
  2333. { loaded register not used by previous instruction? }
  2334. not(RegInInstruction(taicpu(hp1).oper[0]^.reg,p)) and
  2335. { same condition? }
  2336. (taicpu(p).condition=taicpu(hp1).condition) and
  2337. { first instruction might not change the register used as base }
  2338. ((taicpu(hp1).oper[1]^.ref^.base=NR_NO) or
  2339. not(RegModifiedByInstruction(taicpu(hp1).oper[1]^.ref^.base,p))
  2340. ) and
  2341. { first instruction might not change the register used as index }
  2342. ((taicpu(hp1).oper[1]^.ref^.index=NR_NO) or
  2343. not(RegModifiedByInstruction(taicpu(hp1).oper[1]^.ref^.index,p))
  2344. ) and
  2345. { if we modify the basereg AND the first instruction used that reg, we can not schedule }
  2346. ((taicpu(hp1).oper[1]^.ref^.addressmode = AM_OFFSET) or
  2347. not(instructionLoadsFromReg(taicpu(hp1).oper[1]^.ref^.base,p))) and
  2348. not(PossibleTLSLoad(p)) and
  2349. not(PossibleTLSLoad(hp1)) then
  2350. begin
  2351. hp3:=tai(p.Previous);
  2352. hp5:=tai(p.next);
  2353. asml.Remove(p);
  2354. { if there is a reg. alloc/dealloc/sync instructions or address labels (e.g. for GOT-less PIC)
  2355. associated with p, move it together with p }
  2356. { before the instruction? }
  2357. { find reg allocs,deallocs and PIC labels }
  2358. while assigned(hp3) and (hp3.typ<>ait_instruction) do
  2359. begin
  2360. if ( (hp3.typ=ait_regalloc) and (tai_regalloc(hp3).ratype in [ra_alloc, ra_dealloc]) and
  2361. RegInInstruction(tai_regalloc(hp3).reg,p) )
  2362. or ( (hp3.typ=ait_label) and (tai_label(hp3).labsym.typ=AT_ADDR) )
  2363. then
  2364. begin
  2365. hp4:=hp3;
  2366. hp3:=tai(hp3.Previous);
  2367. asml.Remove(hp4);
  2368. list.Insert(hp4);
  2369. end
  2370. else
  2371. hp3:=tai(hp3.Previous);
  2372. end;
  2373. list.Concat(p);
  2374. SwapRegLive(taicpu(p),taicpu(hp1));
  2375. { after the instruction? }
  2376. { find reg deallocs and reg syncs }
  2377. while assigned(hp5) and (hp5.typ<>ait_instruction) do
  2378. begin
  2379. if (hp5.typ=ait_regalloc) and (tai_regalloc(hp5).ratype in [ra_dealloc, ra_sync]) and
  2380. RegInInstruction(tai_regalloc(hp5).reg,p) then
  2381. begin
  2382. hp4:=hp5;
  2383. hp5:=tai(hp5.next);
  2384. asml.Remove(hp4);
  2385. list.Concat(hp4);
  2386. end
  2387. else
  2388. hp5:=tai(hp5.Next);
  2389. end;
  2390. asml.Remove(hp1);
  2391. { if there are address labels associated with hp2, those must
  2392. stay with hp2 (e.g. for GOT-less PIC) }
  2393. insertpos:=hp2;
  2394. while assigned(hp2.previous) and
  2395. (tai(hp2.previous).typ<>ait_instruction) do
  2396. begin
  2397. hp2:=tai(hp2.previous);
  2398. if (hp2.typ=ait_label) and
  2399. (tai_label(hp2).labsym.typ=AT_ADDR) then
  2400. insertpos:=hp2;
  2401. end;
  2402. {$ifdef DEBUG_PREREGSCHEDULER}
  2403. asml.insertbefore(tai_comment.Create(strpnew('Rescheduled')),insertpos);
  2404. {$endif DEBUG_PREREGSCHEDULER}
  2405. asml.InsertBefore(hp1,insertpos);
  2406. asml.InsertListBefore(insertpos,list);
  2407. p:=tai(p.next);
  2408. end
  2409. else if p.typ=ait_instruction then
  2410. p:=hp1
  2411. else
  2412. p:=tai(p.next);
  2413. end;
  2414. list.Free;
  2415. end;
  2416. procedure DecrementPreceedingIT(list: TAsmList; p: tai);
  2417. var
  2418. hp : tai;
  2419. l : longint;
  2420. begin
  2421. hp := tai(p.Previous);
  2422. l := 1;
  2423. while assigned(hp) and
  2424. (l <= 4) do
  2425. begin
  2426. if hp.typ=ait_instruction then
  2427. begin
  2428. if (taicpu(hp).opcode>=A_IT) and
  2429. (taicpu(hp).opcode <= A_ITTTT) then
  2430. begin
  2431. if (taicpu(hp).opcode = A_IT) and
  2432. (l=1) then
  2433. list.Remove(hp)
  2434. else
  2435. case taicpu(hp).opcode of
  2436. A_ITE:
  2437. if l=2 then taicpu(hp).opcode := A_IT;
  2438. A_ITT:
  2439. if l=2 then taicpu(hp).opcode := A_IT;
  2440. A_ITEE:
  2441. if l=3 then taicpu(hp).opcode := A_ITE;
  2442. A_ITTE:
  2443. if l=3 then taicpu(hp).opcode := A_ITT;
  2444. A_ITET:
  2445. if l=3 then taicpu(hp).opcode := A_ITE;
  2446. A_ITTT:
  2447. if l=3 then taicpu(hp).opcode := A_ITT;
  2448. A_ITEEE:
  2449. if l=4 then taicpu(hp).opcode := A_ITEE;
  2450. A_ITTEE:
  2451. if l=4 then taicpu(hp).opcode := A_ITTE;
  2452. A_ITETE:
  2453. if l=4 then taicpu(hp).opcode := A_ITET;
  2454. A_ITTTE:
  2455. if l=4 then taicpu(hp).opcode := A_ITTT;
  2456. A_ITEET:
  2457. if l=4 then taicpu(hp).opcode := A_ITEE;
  2458. A_ITTET:
  2459. if l=4 then taicpu(hp).opcode := A_ITTE;
  2460. A_ITETT:
  2461. if l=4 then taicpu(hp).opcode := A_ITET;
  2462. A_ITTTT:
  2463. begin
  2464. if l=4 then taicpu(hp).opcode := A_ITTT;
  2465. end
  2466. else
  2467. ;
  2468. end;
  2469. break;
  2470. end;
  2471. {else if (taicpu(hp).condition<>taicpu(p).condition) or
  2472. (taicpu(hp).condition<>inverse_cond(taicpu(p).condition)) then
  2473. break;}
  2474. inc(l);
  2475. end;
  2476. hp := tai(hp.Previous);
  2477. end;
  2478. end;
  2479. function TCpuThumb2AsmOptimizer.OptPass1STM(var p: tai): boolean;
  2480. var
  2481. hp : taicpu;
  2482. begin
  2483. result:=false;
  2484. if MatchInstruction(p, A_STM, [C_None], [PF_FD,PF_DB]) and
  2485. (taicpu(p).oper[0]^.ref^.addressmode=AM_PREINDEXED) and
  2486. (taicpu(p).oper[0]^.ref^.index=NR_STACK_POINTER_REG) and
  2487. ((taicpu(p).oper[1]^.regset^*[8..13,15])=[]) then
  2488. begin
  2489. DebugMsg('Peephole Stm2Push done', p);
  2490. hp := taicpu.op_regset(A_PUSH, R_INTREGISTER, R_SUBWHOLE, taicpu(p).oper[1]^.regset^);
  2491. AsmL.InsertAfter(hp, p);
  2492. asml.Remove(p);
  2493. p:=hp;
  2494. result:=true;
  2495. end;
  2496. end;
  2497. function TCpuThumb2AsmOptimizer.OptPass1LDM(var p: tai): boolean;
  2498. var
  2499. hp : taicpu;
  2500. begin
  2501. result:=false;
  2502. if MatchInstruction(p, A_LDM, [C_None], [PF_FD,PF_IA]) and
  2503. (taicpu(p).oper[0]^.ref^.addressmode=AM_PREINDEXED) and
  2504. (taicpu(p).oper[0]^.ref^.index=NR_STACK_POINTER_REG) and
  2505. ((taicpu(p).oper[1]^.regset^*[8..14])=[]) then
  2506. begin
  2507. DebugMsg('Peephole Ldm2Pop done', p);
  2508. hp := taicpu.op_regset(A_POP, R_INTREGISTER, R_SUBWHOLE, taicpu(p).oper[1]^.regset^);
  2509. asml.InsertBefore(hp, p);
  2510. asml.Remove(p);
  2511. p.Free;
  2512. p:=hp;
  2513. result:=true;
  2514. end;
  2515. end;
  2516. function TCpuThumb2AsmOptimizer.OptPass1AndThumb2(var p : tai) : boolean;
  2517. begin
  2518. result:=false;
  2519. if MatchInstruction(p, [A_AND], [], [PF_None]) and
  2520. (taicpu(p).ops = 2) and
  2521. (taicpu(p).oper[1]^.typ=top_const) and
  2522. ((taicpu(p).oper[1]^.val=255) or
  2523. (taicpu(p).oper[1]^.val=65535)) then
  2524. begin
  2525. DebugMsg('Peephole AndR2Uxt done', p);
  2526. if taicpu(p).oper[1]^.val=255 then
  2527. taicpu(p).opcode:=A_UXTB
  2528. else
  2529. taicpu(p).opcode:=A_UXTH;
  2530. taicpu(p).loadreg(1, taicpu(p).oper[0]^.reg);
  2531. result := true;
  2532. end
  2533. else if MatchInstruction(p, [A_AND], [], [PF_None]) and
  2534. (taicpu(p).ops = 3) and
  2535. (taicpu(p).oper[2]^.typ=top_const) and
  2536. ((taicpu(p).oper[2]^.val=255) or
  2537. (taicpu(p).oper[2]^.val=65535)) then
  2538. begin
  2539. DebugMsg('Peephole AndRR2Uxt done', p);
  2540. if taicpu(p).oper[2]^.val=255 then
  2541. taicpu(p).opcode:=A_UXTB
  2542. else
  2543. taicpu(p).opcode:=A_UXTH;
  2544. taicpu(p).ops:=2;
  2545. result := true;
  2546. end;
  2547. end;
  2548. function TCpuThumb2AsmOptimizer.PeepHoleOptPass1Cpu(var p: tai): boolean;
  2549. begin
  2550. result:=false;
  2551. if inherited PeepHoleOptPass1Cpu(p) then
  2552. result:=true
  2553. else if p.typ=ait_instruction then
  2554. case taicpu(p).opcode of
  2555. A_STM:
  2556. result:=OptPass1STM(p);
  2557. A_LDM:
  2558. result:=OptPass1LDM(p);
  2559. A_AND:
  2560. result:=OptPass1AndThumb2(p);
  2561. else
  2562. ;
  2563. end;
  2564. end;
  2565. procedure TCpuThumb2AsmOptimizer.PeepHoleOptPass2;
  2566. var
  2567. p,hp1,hp2: tai;
  2568. l : longint;
  2569. condition : tasmcond;
  2570. { UsedRegs, TmpUsedRegs: TRegSet; }
  2571. begin
  2572. p := BlockStart;
  2573. { UsedRegs := []; }
  2574. while (p <> BlockEnd) Do
  2575. begin
  2576. { UpdateUsedRegs(UsedRegs, tai(p.next)); }
  2577. case p.Typ Of
  2578. Ait_Instruction:
  2579. begin
  2580. case taicpu(p).opcode Of
  2581. A_B:
  2582. if taicpu(p).condition<>C_None then
  2583. begin
  2584. { check for
  2585. Bxx xxx
  2586. <several instructions>
  2587. xxx:
  2588. }
  2589. l:=0;
  2590. GetNextInstruction(p, hp1);
  2591. while assigned(hp1) and
  2592. (l<=4) and
  2593. CanBeCond(hp1) and
  2594. { stop on labels }
  2595. not(hp1.typ=ait_label) do
  2596. begin
  2597. inc(l);
  2598. if MustBeLast(hp1) then
  2599. begin
  2600. //hp1:=nil;
  2601. GetNextInstruction(hp1,hp1);
  2602. break;
  2603. end
  2604. else
  2605. GetNextInstruction(hp1,hp1);
  2606. end;
  2607. if assigned(hp1) then
  2608. begin
  2609. if FindLabel(tasmlabel(taicpu(p).oper[0]^.ref^.symbol),hp1) then
  2610. begin
  2611. if (l<=4) and (l>0) then
  2612. begin
  2613. condition:=inverse_cond(taicpu(p).condition);
  2614. hp2:=p;
  2615. GetNextInstruction(p,hp1);
  2616. p:=hp1;
  2617. repeat
  2618. if hp1.typ=ait_instruction then
  2619. taicpu(hp1).condition:=condition;
  2620. if MustBeLast(hp1) then
  2621. begin
  2622. GetNextInstruction(hp1,hp1);
  2623. break;
  2624. end
  2625. else
  2626. GetNextInstruction(hp1,hp1);
  2627. until not(assigned(hp1)) or
  2628. not(CanBeCond(hp1)) or
  2629. (hp1.typ=ait_label);
  2630. { wait with removing else GetNextInstruction could
  2631. ignore the label if it was the only usage in the
  2632. jump moved away }
  2633. asml.InsertAfter(tai_comment.create(strpnew('Collapsed')), hp2);
  2634. DecrementPreceedingIT(asml, hp2);
  2635. case l of
  2636. 1: asml.InsertAfter(taicpu.op_cond(A_IT,condition), hp2);
  2637. 2: asml.InsertAfter(taicpu.op_cond(A_ITT,condition), hp2);
  2638. 3: asml.InsertAfter(taicpu.op_cond(A_ITTT,condition), hp2);
  2639. 4: asml.InsertAfter(taicpu.op_cond(A_ITTTT,condition), hp2);
  2640. end;
  2641. tasmlabel(taicpu(hp2).oper[0]^.ref^.symbol).decrefs;
  2642. asml.remove(hp2);
  2643. hp2.free;
  2644. continue;
  2645. end;
  2646. end;
  2647. end;
  2648. end;
  2649. else
  2650. ;
  2651. end;
  2652. end;
  2653. else
  2654. ;
  2655. end;
  2656. p := tai(p.next)
  2657. end;
  2658. end;
  2659. function TCpuThumb2AsmOptimizer.PostPeepHoleOptsCpu(var p: tai): boolean;
  2660. begin
  2661. result:=false;
  2662. if p.typ = ait_instruction then
  2663. begin
  2664. if MatchInstruction(p, A_MOV, [C_None], [PF_None]) and
  2665. (taicpu(p).oper[1]^.typ=top_const) and
  2666. (taicpu(p).oper[1]^.val >= 0) and
  2667. (taicpu(p).oper[1]^.val < 256) and
  2668. (not RegInUsedRegs(NR_DEFAULTFLAGS,UsedRegs)) then
  2669. begin
  2670. DebugMsg('Peephole Mov2Movs done', p);
  2671. asml.InsertBefore(tai_regalloc.alloc(NR_DEFAULTFLAGS,p), p);
  2672. asml.InsertAfter(tai_regalloc.dealloc(NR_DEFAULTFLAGS,p), p);
  2673. IncludeRegInUsedRegs(NR_DEFAULTFLAGS,UsedRegs);
  2674. taicpu(p).oppostfix:=PF_S;
  2675. result:=true;
  2676. end
  2677. else if MatchInstruction(p, A_MVN, [C_None], [PF_None]) and
  2678. (taicpu(p).oper[1]^.typ=top_reg) and
  2679. (not RegInUsedRegs(NR_DEFAULTFLAGS,UsedRegs)) then
  2680. begin
  2681. DebugMsg('Peephole Mvn2Mvns done', p);
  2682. asml.InsertBefore(tai_regalloc.alloc(NR_DEFAULTFLAGS,p), p);
  2683. asml.InsertAfter(tai_regalloc.dealloc(NR_DEFAULTFLAGS,p), p);
  2684. IncludeRegInUsedRegs(NR_DEFAULTFLAGS,UsedRegs);
  2685. taicpu(p).oppostfix:=PF_S;
  2686. result:=true;
  2687. end
  2688. else if MatchInstruction(p, A_RSB, [C_None], [PF_None]) and
  2689. (taicpu(p).ops = 3) and
  2690. (taicpu(p).oper[2]^.typ=top_const) and
  2691. (taicpu(p).oper[2]^.val=0) and
  2692. (not RegInUsedRegs(NR_DEFAULTFLAGS,UsedRegs)) then
  2693. begin
  2694. DebugMsg('Peephole Rsb2Rsbs done', p);
  2695. asml.InsertBefore(tai_regalloc.alloc(NR_DEFAULTFLAGS,p), p);
  2696. asml.InsertAfter(tai_regalloc.dealloc(NR_DEFAULTFLAGS,p), p);
  2697. IncludeRegInUsedRegs(NR_DEFAULTFLAGS,UsedRegs);
  2698. taicpu(p).oppostfix:=PF_S;
  2699. result:=true;
  2700. end
  2701. else if MatchInstruction(p, [A_ADD,A_SUB], [C_None], [PF_None]) and
  2702. (taicpu(p).ops = 3) and
  2703. MatchOperand(taicpu(p).oper[0]^, taicpu(p).oper[1]^) and
  2704. (not MatchOperand(taicpu(p).oper[0]^, NR_STACK_POINTER_REG)) and
  2705. (taicpu(p).oper[2]^.typ=top_const) and
  2706. (taicpu(p).oper[2]^.val >= 0) and
  2707. (taicpu(p).oper[2]^.val < 256) and
  2708. (not RegInUsedRegs(NR_DEFAULTFLAGS,UsedRegs)) then
  2709. begin
  2710. DebugMsg('Peephole AddSub2*s done', p);
  2711. asml.InsertBefore(tai_regalloc.alloc(NR_DEFAULTFLAGS,p), p);
  2712. asml.InsertAfter(tai_regalloc.dealloc(NR_DEFAULTFLAGS,p), p);
  2713. IncludeRegInUsedRegs(NR_DEFAULTFLAGS,UsedRegs);
  2714. taicpu(p).loadconst(1,taicpu(p).oper[2]^.val);
  2715. taicpu(p).oppostfix:=PF_S;
  2716. taicpu(p).ops := 2;
  2717. result:=true;
  2718. end
  2719. else if MatchInstruction(p, [A_ADD,A_SUB], [C_None], [PF_None]) and
  2720. (taicpu(p).ops = 2) and
  2721. (taicpu(p).oper[1]^.typ=top_reg) and
  2722. (not MatchOperand(taicpu(p).oper[0]^, NR_STACK_POINTER_REG)) and
  2723. (not MatchOperand(taicpu(p).oper[1]^, NR_STACK_POINTER_REG)) and
  2724. (not RegInUsedRegs(NR_DEFAULTFLAGS,UsedRegs)) then
  2725. begin
  2726. DebugMsg('Peephole AddSub2*s done', p);
  2727. asml.InsertBefore(tai_regalloc.alloc(NR_DEFAULTFLAGS,p), p);
  2728. asml.InsertAfter(tai_regalloc.dealloc(NR_DEFAULTFLAGS,p), p);
  2729. IncludeRegInUsedRegs(NR_DEFAULTFLAGS,UsedRegs);
  2730. taicpu(p).oppostfix:=PF_S;
  2731. result:=true;
  2732. end
  2733. else if MatchInstruction(p, [A_ADD], [C_None], [PF_None]) and
  2734. (taicpu(p).ops = 3) and
  2735. MatchOperand(taicpu(p).oper[0]^, taicpu(p).oper[1]^) and
  2736. (taicpu(p).oper[2]^.typ=top_reg) then
  2737. begin
  2738. DebugMsg('Peephole AddRRR2AddRR done', p);
  2739. taicpu(p).ops := 2;
  2740. taicpu(p).loadreg(1,taicpu(p).oper[2]^.reg);
  2741. result:=true;
  2742. end
  2743. else if MatchInstruction(p, [A_AND,A_ORR,A_EOR,A_BIC,A_LSL,A_LSR,A_ASR,A_ROR], [C_None], [PF_None]) and
  2744. (taicpu(p).ops = 3) and
  2745. MatchOperand(taicpu(p).oper[0]^, taicpu(p).oper[1]^) and
  2746. (taicpu(p).oper[2]^.typ=top_reg) and
  2747. (not RegInUsedRegs(NR_DEFAULTFLAGS,UsedRegs)) then
  2748. begin
  2749. DebugMsg('Peephole opXXY2opsXY done', p);
  2750. asml.InsertBefore(tai_regalloc.alloc(NR_DEFAULTFLAGS,p), p);
  2751. asml.InsertAfter(tai_regalloc.dealloc(NR_DEFAULTFLAGS,p), p);
  2752. IncludeRegInUsedRegs(NR_DEFAULTFLAGS,UsedRegs);
  2753. taicpu(p).ops := 2;
  2754. taicpu(p).loadreg(1,taicpu(p).oper[2]^.reg);
  2755. taicpu(p).oppostfix:=PF_S;
  2756. result:=true;
  2757. end
  2758. else if MatchInstruction(p, [A_AND,A_ORR,A_EOR,A_BIC,A_LSL,A_LSR,A_ASR,A_ROR], [C_None], [PF_S]) and
  2759. (taicpu(p).ops = 3) and
  2760. MatchOperand(taicpu(p).oper[0]^, taicpu(p).oper[1]^) and
  2761. (taicpu(p).oper[2]^.typ in [top_reg,top_const]) then
  2762. begin
  2763. DebugMsg('Peephole opXXY2opXY done', p);
  2764. taicpu(p).ops := 2;
  2765. if taicpu(p).oper[2]^.typ=top_reg then
  2766. taicpu(p).loadreg(1,taicpu(p).oper[2]^.reg)
  2767. else
  2768. taicpu(p).loadconst(1,taicpu(p).oper[2]^.val);
  2769. result:=true;
  2770. end
  2771. else if MatchInstruction(p, [A_AND,A_ORR,A_EOR], [C_None], [PF_None,PF_S]) and
  2772. (taicpu(p).ops = 3) and
  2773. MatchOperand(taicpu(p).oper[0]^, taicpu(p).oper[2]^) and
  2774. (not RegInUsedRegs(NR_DEFAULTFLAGS,UsedRegs)) then
  2775. begin
  2776. DebugMsg('Peephole opXYX2opsXY done', p);
  2777. asml.InsertBefore(tai_regalloc.alloc(NR_DEFAULTFLAGS,p), p);
  2778. asml.InsertAfter(tai_regalloc.dealloc(NR_DEFAULTFLAGS,p), p);
  2779. IncludeRegInUsedRegs(NR_DEFAULTFLAGS,UsedRegs);
  2780. taicpu(p).oppostfix:=PF_S;
  2781. taicpu(p).ops := 2;
  2782. result:=true;
  2783. end
  2784. else if MatchInstruction(p, [A_MOV], [C_None], [PF_None]) and
  2785. (taicpu(p).ops=3) and
  2786. (taicpu(p).oper[2]^.typ=top_shifterop) and
  2787. (taicpu(p).oper[2]^.shifterop^.shiftmode in [SM_LSL,SM_LSR,SM_ASR,SM_ROR]) and
  2788. //MatchOperand(taicpu(p).oper[0]^, taicpu(p).oper[1]^) and
  2789. (not RegInUsedRegs(NR_DEFAULTFLAGS,UsedRegs)) then
  2790. begin
  2791. DebugMsg('Peephole Mov2Shift done', p);
  2792. asml.InsertBefore(tai_regalloc.alloc(NR_DEFAULTFLAGS,p), p);
  2793. asml.InsertAfter(tai_regalloc.dealloc(NR_DEFAULTFLAGS,p), p);
  2794. IncludeRegInUsedRegs(NR_DEFAULTFLAGS,UsedRegs);
  2795. taicpu(p).oppostfix:=PF_S;
  2796. case taicpu(p).oper[2]^.shifterop^.shiftmode of
  2797. SM_LSL: taicpu(p).opcode:=A_LSL;
  2798. SM_LSR: taicpu(p).opcode:=A_LSR;
  2799. SM_ASR: taicpu(p).opcode:=A_ASR;
  2800. SM_ROR: taicpu(p).opcode:=A_ROR;
  2801. else
  2802. internalerror(2019050912);
  2803. end;
  2804. if taicpu(p).oper[2]^.shifterop^.rs<>NR_NO then
  2805. taicpu(p).loadreg(2, taicpu(p).oper[2]^.shifterop^.rs)
  2806. else
  2807. taicpu(p).loadconst(2, taicpu(p).oper[2]^.shifterop^.shiftimm);
  2808. result:=true;
  2809. end
  2810. end;
  2811. end;
  2812. begin
  2813. casmoptimizer:=TCpuAsmOptimizer;
  2814. cpreregallocscheduler:=TCpuPreRegallocScheduler;
  2815. End.