aoptcpu.pas 112 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914191519161917191819191920192119221923192419251926192719281929193019311932193319341935193619371938193919401941194219431944194519461947194819491950195119521953195419551956195719581959196019611962196319641965196619671968196919701971197219731974197519761977197819791980198119821983198419851986198719881989199019911992199319941995199619971998199920002001200220032004200520062007200820092010201120122013201420152016201720182019202020212022202320242025202620272028202920302031203220332034203520362037203820392040204120422043204420452046204720482049205020512052205320542055205620572058205920602061206220632064206520662067206820692070207120722073207420752076207720782079208020812082208320842085208620872088208920902091209220932094209520962097209820992100210121022103210421052106210721082109211021112112211321142115211621172118211921202121212221232124212521262127212821292130213121322133213421352136213721382139214021412142214321442145214621472148214921502151215221532154215521562157215821592160216121622163216421652166216721682169217021712172217321742175217621772178217921802181218221832184218521862187218821892190219121922193219421952196219721982199220022012202220322042205220622072208220922102211221222132214221522162217221822192220222122222223222422252226222722282229223022312232223322342235223622372238223922402241224222432244224522462247224822492250225122522253225422552256225722582259226022612262226322642265226622672268226922702271227222732274227522762277227822792280228122822283228422852286228722882289229022912292229322942295229622972298229923002301230223032304230523062307230823092310231123122313231423152316231723182319232023212322232323242325232623272328232923302331233223332334233523362337233823392340234123422343234423452346234723482349235023512352235323542355235623572358235923602361236223632364236523662367236823692370237123722373237423752376237723782379238023812382238323842385238623872388238923902391239223932394239523962397239823992400240124022403240424052406240724082409241024112412241324142415241624172418241924202421242224232424242524262427242824292430243124322433243424352436243724382439244024412442244324442445244624472448244924502451245224532454245524562457245824592460246124622463246424652466246724682469247024712472247324742475247624772478247924802481248224832484248524862487248824892490249124922493249424952496249724982499250025012502250325042505250625072508250925102511251225132514251525162517251825192520252125222523252425252526252725282529253025312532253325342535253625372538253925402541254225432544254525462547254825492550255125522553255425552556255725582559256025612562256325642565256625672568256925702571257225732574257525762577257825792580258125822583258425852586258725882589259025912592259325942595259625972598259926002601260226032604260526062607260826092610261126122613261426152616261726182619262026212622262326242625262626272628262926302631263226332634263526362637263826392640264126422643264426452646264726482649265026512652265326542655265626572658265926602661266226632664266526662667266826692670267126722673267426752676267726782679268026812682268326842685268626872688268926902691269226932694269526962697269826992700270127022703270427052706270727082709271027112712271327142715271627172718271927202721272227232724272527262727272827292730273127322733273427352736273727382739274027412742274327442745274627472748274927502751275227532754275527562757275827592760276127622763276427652766276727682769277027712772277327742775277627772778277927802781278227832784278527862787278827892790279127922793279427952796279727982799280028012802280328042805280628072808280928102811281228132814281528162817281828192820282128222823282428252826282728282829283028312832283328342835283628372838283928402841284228432844284528462847
  1. {
  2. Copyright (c) 1998-2002 by Jonas Maebe, member of the Free Pascal
  3. Development Team
  4. This unit implements the ARM optimizer object
  5. This program is free software; you can redistribute it and/or modify
  6. it under the terms of the GNU General Public License as published by
  7. the Free Software Foundation; either version 2 of the License, or
  8. (at your option) any later version.
  9. This program is distributed in the hope that it will be useful,
  10. but WITHOUT ANY WARRANTY; without even the implied warranty of
  11. MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  12. GNU General Public License for more details.
  13. You should have received a copy of the GNU General Public License
  14. along with this program; if not, write to the Free Software
  15. Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
  16. ****************************************************************************
  17. }
  18. Unit aoptcpu;
  19. {$i fpcdefs.inc}
  20. { $define DEBUG_PREREGSCHEDULER}
  21. { $define DEBUG_AOPTCPU}
  22. Interface
  23. uses
  24. cgbase, cgutils, cpubase, aasmtai,
  25. aasmcpu,
  26. aopt, aoptobj, aoptarm;
  27. Type
  28. TCpuAsmOptimizer = class(TARMAsmOptimizer)
  29. { Can't be done in some cases due to the limited range of jumps }
  30. function CanDoJumpOpts: Boolean; override;
  31. { uses the same constructor as TAopObj }
  32. function PeepHoleOptPass1Cpu(var p: tai): boolean; override;
  33. procedure PeepHoleOptPass2;override;
  34. Function RegInInstruction(Reg: TRegister; p1: tai): Boolean;override;
  35. function RemoveSuperfluousVMov(const p : tai; movp : tai; const optimizer : string) : boolean;
  36. { gets the next tai object after current that contains info relevant
  37. to the optimizer in p1 which used the given register or does a
  38. change in program flow.
  39. If there is none, it returns false and
  40. sets p1 to nil }
  41. Function GetNextInstructionUsingRef(Current: tai; Out Next: tai; const ref: TReference; StopOnStore: Boolean = true): Boolean;
  42. { outputs a debug message into the assembler file }
  43. procedure DebugMsg(const s: string; p: tai);
  44. function InstructionLoadsFromReg(const reg : TRegister; const hp : tai) : boolean; override;
  45. function RegLoadedWithNewValue(reg : tregister; hp : tai) : boolean; override;
  46. protected
  47. function LookForPreindexedPattern(p: taicpu): boolean;
  48. function LookForPostindexedPattern(p: taicpu): boolean;
  49. { Individual optimisation routines }
  50. function OptPass1DataCheckMov(var p: tai): Boolean;
  51. function OptPass1ADDSUB(var p: tai): Boolean;
  52. function OptPass1And(var p: tai): Boolean; override; { There's optimisation code that's general for all ARM platforms }
  53. function OptPass1CMP(var p: tai): Boolean;
  54. function OptPass1LDR(var p: tai): Boolean;
  55. function OptPass1STM(var p: tai): Boolean;
  56. function OptPass1STR(var p: tai): Boolean;
  57. function OptPass1MOV(var p: tai): Boolean;
  58. function OptPass1MUL(var p: tai): Boolean;
  59. function OptPass1MVN(var p: tai): Boolean;
  60. function OptPass1VMov(var p: tai): Boolean;
  61. function OptPass1VOp(var p: tai): Boolean;
  62. End;
  63. TCpuPreRegallocScheduler = class(TAsmScheduler)
  64. function SchedulerPass1Cpu(var p: tai): boolean;override;
  65. procedure SwapRegLive(p, hp1: taicpu);
  66. end;
  67. TCpuThumb2AsmOptimizer = class(TCpuAsmOptimizer)
  68. { uses the same constructor as TAopObj }
  69. function PeepHoleOptPass1Cpu(var p: tai): boolean; override;
  70. procedure PeepHoleOptPass2;override;
  71. function PostPeepHoleOptsCpu(var p: tai): boolean; override;
  72. End;
  73. function MustBeLast(p : tai) : boolean;
  74. Implementation
  75. uses
  76. cutils,verbose,globtype,globals,
  77. systems,
  78. cpuinfo,
  79. cgobj,procinfo,
  80. aasmbase,aasmdata;
  81. { Range check must be disabled explicitly as conversions between signed and unsigned
  82. 32-bit values are done without explicit typecasts }
  83. {$R-}
  84. function CanBeCond(p : tai) : boolean;
  85. begin
  86. result:=
  87. not(GenerateThumbCode) and
  88. (p.typ=ait_instruction) and
  89. (taicpu(p).condition=C_None) and
  90. ((taicpu(p).opcode<A_IT) or (taicpu(p).opcode>A_ITTTT)) and
  91. (taicpu(p).opcode<>A_CBZ) and
  92. (taicpu(p).opcode<>A_CBNZ) and
  93. (taicpu(p).opcode<>A_PLD) and
  94. (((taicpu(p).opcode<>A_BLX) and
  95. { BL may need to be converted into BLX by the linker -- could possibly
  96. be allowed in case it's to a local symbol of which we know that it
  97. uses the same instruction set as the current one }
  98. (taicpu(p).opcode<>A_BL)) or
  99. (taicpu(p).oper[0]^.typ=top_reg));
  100. end;
  101. function RemoveRedundantMove(const cmpp: tai; movp: tai; asml: TAsmList):Boolean;
  102. begin
  103. Result:=false;
  104. if (taicpu(movp).condition = C_EQ) and
  105. (taicpu(cmpp).oper[0]^.reg = taicpu(movp).oper[0]^.reg) and
  106. (taicpu(cmpp).oper[1]^.val = taicpu(movp).oper[1]^.val) then
  107. begin
  108. asml.insertafter(tai_comment.Create(strpnew('Peephole Optimization: CmpMovMov - Removed redundant moveq')), movp);
  109. asml.remove(movp);
  110. movp.free;
  111. Result:=true;
  112. end;
  113. end;
  114. function AlignedToQWord(const ref : treference) : boolean;
  115. begin
  116. { (safe) heuristics to ensure alignment }
  117. result:=(target_info.abi in [abi_eabi,abi_armeb,abi_eabihf]) and
  118. (((ref.offset>=0) and
  119. ((ref.offset mod 8)=0) and
  120. ((ref.base=NR_R13) or
  121. (ref.index=NR_R13))
  122. ) or
  123. ((ref.offset<=0) and
  124. { when using NR_R11, it has always a value of <qword align>+4 }
  125. ((abs(ref.offset+4) mod 8)=0) and
  126. (current_procinfo.framepointer=NR_R11) and
  127. ((ref.base=NR_R11) or
  128. (ref.index=NR_R11))
  129. )
  130. );
  131. end;
  132. function isValidConstLoadStoreOffset(const aoffset: longint; const pf: TOpPostfix) : boolean;
  133. begin
  134. if GenerateThumb2Code then
  135. result := (aoffset<4096) and (aoffset>-256)
  136. else
  137. result := ((pf in [PF_None,PF_B]) and
  138. (abs(aoffset)<4096)) or
  139. (abs(aoffset)<256);
  140. end;
  141. function TCpuAsmOptimizer.InstructionLoadsFromReg(const reg: TRegister; const hp: tai): boolean;
  142. var
  143. p: taicpu;
  144. i: longint;
  145. begin
  146. instructionLoadsFromReg := false;
  147. if not (assigned(hp) and (hp.typ = ait_instruction)) then
  148. exit;
  149. p:=taicpu(hp);
  150. i:=1;
  151. {For these instructions we have to start on oper[0]}
  152. if (p.opcode in [A_STR, A_LDM, A_STM, A_PLD,
  153. A_CMP, A_CMN, A_TST, A_TEQ,
  154. A_B, A_BL, A_BX, A_BLX,
  155. A_SMLAL, A_UMLAL, A_VSTM, A_VLDM]) then i:=0;
  156. while(i<p.ops) do
  157. begin
  158. case p.oper[I]^.typ of
  159. top_reg:
  160. instructionLoadsFromReg := (p.oper[I]^.reg = reg) or
  161. { STRD }
  162. ((i=0) and (p.opcode=A_STR) and (p.oppostfix=PF_D) and (getsupreg(p.oper[0]^.reg)+1=getsupreg(reg)));
  163. top_regset:
  164. instructionLoadsFromReg := (getsupreg(reg) in p.oper[I]^.regset^);
  165. top_shifterop:
  166. instructionLoadsFromReg := p.oper[I]^.shifterop^.rs = reg;
  167. top_ref:
  168. instructionLoadsFromReg :=
  169. (p.oper[I]^.ref^.base = reg) or
  170. (p.oper[I]^.ref^.index = reg);
  171. else
  172. ;
  173. end;
  174. if (i=0) and (p.opcode in [A_LDM,A_VLDM]) then
  175. exit;
  176. if instructionLoadsFromReg then
  177. exit; {Bailout if we found something}
  178. Inc(I);
  179. end;
  180. end;
  181. function TCpuAsmOptimizer.RegLoadedWithNewValue(reg: tregister; hp: tai): boolean;
  182. var
  183. p: taicpu;
  184. begin
  185. p := taicpu(hp);
  186. Result := false;
  187. if not ((assigned(hp)) and (hp.typ = ait_instruction)) then
  188. exit;
  189. case p.opcode of
  190. { These operands do not write into a register at all }
  191. A_CMP, A_CMN, A_TST, A_TEQ, A_B, A_BL, A_BX, A_BLX, A_SWI, A_MSR, A_PLD,
  192. A_VCMP:
  193. exit;
  194. {Take care of post/preincremented store and loads, they will change their base register}
  195. A_STR, A_LDR:
  196. begin
  197. Result := false;
  198. { actually, this does not apply here because post-/preindexed does not mean that a register
  199. is loaded with a new value, it is only modified
  200. (taicpu(p).oper[1]^.typ=top_ref) and
  201. (taicpu(p).oper[1]^.ref^.addressmode in [AM_PREINDEXED,AM_POSTINDEXED]) and
  202. (taicpu(p).oper[1]^.ref^.base = reg);
  203. }
  204. { STR does not load into it's first register }
  205. if p.opcode = A_STR then
  206. exit;
  207. end;
  208. A_VSTR:
  209. begin
  210. Result := false;
  211. exit;
  212. end;
  213. { These four are writing into the first 2 register, UMLAL and SMLAL will also read from them }
  214. A_UMLAL, A_UMULL, A_SMLAL, A_SMULL:
  215. Result :=
  216. (p.oper[1]^.typ = top_reg) and
  217. (p.oper[1]^.reg = reg);
  218. {Loads to oper2 from coprocessor}
  219. {
  220. MCR/MRC is currently not supported in FPC
  221. A_MRC:
  222. Result :=
  223. (p.oper[2]^.typ = top_reg) and
  224. (p.oper[2]^.reg = reg);
  225. }
  226. {Loads to all register in the registerset}
  227. A_LDM, A_VLDM:
  228. Result := (getsupreg(reg) in p.oper[1]^.regset^);
  229. A_POP:
  230. Result := (getsupreg(reg) in p.oper[0]^.regset^) or
  231. (reg=NR_STACK_POINTER_REG);
  232. else
  233. ;
  234. end;
  235. if Result then
  236. exit;
  237. case p.oper[0]^.typ of
  238. {This is the case}
  239. top_reg:
  240. Result := (p.oper[0]^.reg = reg) or
  241. { LDRD }
  242. (p.opcode=A_LDR) and (p.oppostfix=PF_D) and (getsupreg(p.oper[0]^.reg)+1=getsupreg(reg));
  243. {LDM/STM might write a new value to their index register}
  244. top_ref:
  245. Result :=
  246. (taicpu(p).oper[0]^.ref^.addressmode in [AM_PREINDEXED,AM_POSTINDEXED]) and
  247. (taicpu(p).oper[0]^.ref^.base = reg);
  248. else
  249. ;
  250. end;
  251. end;
  252. function TCpuAsmOptimizer.GetNextInstructionUsingRef(Current: tai;
  253. Out Next: tai; const ref: TReference; StopOnStore: Boolean = true): Boolean;
  254. begin
  255. Next:=Current;
  256. repeat
  257. Result:=GetNextInstruction(Next,Next);
  258. if Result and
  259. (Next.typ=ait_instruction) and
  260. (taicpu(Next).opcode in [A_LDR, A_STR]) and
  261. (
  262. ((taicpu(Next).ops = 2) and
  263. (taicpu(Next).oper[1]^.typ = top_ref) and
  264. RefsEqual(taicpu(Next).oper[1]^.ref^,ref)) or
  265. ((taicpu(Next).ops = 3) and { LDRD/STRD }
  266. (taicpu(Next).oper[2]^.typ = top_ref) and
  267. RefsEqual(taicpu(Next).oper[2]^.ref^,ref))
  268. ) then
  269. {We've found an instruction LDR or STR with the same reference}
  270. exit;
  271. until not(Result) or
  272. (Next.typ<>ait_instruction) or
  273. not(cs_opt_level3 in current_settings.optimizerswitches) or
  274. is_calljmp(taicpu(Next).opcode) or
  275. (StopOnStore and (taicpu(Next).opcode in [A_STR, A_STM])) or
  276. RegModifiedByInstruction(NR_PC,Next);
  277. Result:=false;
  278. end;
  279. {$ifdef DEBUG_AOPTCPU}
  280. procedure TCpuAsmOptimizer.DebugMsg(const s: string;p : tai);
  281. begin
  282. asml.insertbefore(tai_comment.Create(strpnew(s)), p);
  283. end;
  284. {$else DEBUG_AOPTCPU}
  285. procedure TCpuAsmOptimizer.DebugMsg(const s: string;p : tai);inline;
  286. begin
  287. end;
  288. {$endif DEBUG_AOPTCPU}
  289. function TCpuAsmOptimizer.CanDoJumpOpts: Boolean;
  290. begin
  291. { Cannot perform these jump optimisations if the ARM architecture has 16-bit thumb codes }
  292. Result := not (
  293. (current_settings.instructionset = is_thumb) and not (CPUARM_HAS_THUMB2 in cpu_capabilities[current_settings.cputype])
  294. );
  295. end;
  296. function TCpuAsmOptimizer.RemoveSuperfluousVMov(const p: tai; movp: tai; const optimizer: string):boolean;
  297. var
  298. alloc,
  299. dealloc : tai_regalloc;
  300. hp1 : tai;
  301. begin
  302. Result:=false;
  303. if ((MatchInstruction(movp, A_VMOV, [taicpu(p).condition], [taicpu(p).oppostfix]) and
  304. ((getregtype(taicpu(movp).oper[0]^.reg)=R_MMREGISTER) or (taicpu(p).opcode=A_VLDR))
  305. ) or
  306. (((taicpu(p).oppostfix in [PF_F64F32,PF_F64S16,PF_F64S32,PF_F64U16,PF_F64U32]) or (getsubreg(taicpu(p).oper[0]^.reg)=R_SUBFD)) and MatchInstruction(movp, A_VMOV, [taicpu(p).condition], [PF_F64])) or
  307. (((taicpu(p).oppostfix in [PF_F32F64,PF_F32S16,PF_F32S32,PF_F32U16,PF_F32U32]) or (getsubreg(taicpu(p).oper[0]^.reg)=R_SUBFS)) and MatchInstruction(movp, A_VMOV, [taicpu(p).condition], [PF_F32]))
  308. ) and
  309. (taicpu(movp).ops=2) and
  310. MatchOperand(taicpu(movp).oper[1]^, taicpu(p).oper[0]^.reg) and
  311. { the destination register of the mov might not be used beween p and movp }
  312. not(RegUsedBetween(taicpu(movp).oper[0]^.reg,p,movp)) and
  313. { Take care to only do this for instructions which REALLY load to the first register.
  314. Otherwise
  315. vstr reg0, [reg1]
  316. vmov reg2, reg0
  317. will be optimized to
  318. vstr reg2, [reg1]
  319. }
  320. regLoadedWithNewValue(taicpu(p).oper[0]^.reg, p) then
  321. begin
  322. dealloc:=FindRegDeAlloc(taicpu(p).oper[0]^.reg,tai(movp.Next));
  323. if assigned(dealloc) then
  324. begin
  325. DebugMsg('Peephole Optimization: '+optimizer+' removed superfluous vmov', movp);
  326. result:=true;
  327. { taicpu(p).oper[0]^.reg is not used anymore, try to find its allocation
  328. and remove it if possible }
  329. asml.Remove(dealloc);
  330. alloc:=FindRegAllocBackward(taicpu(p).oper[0]^.reg,tai(p.previous));
  331. if assigned(alloc) then
  332. begin
  333. asml.Remove(alloc);
  334. alloc.free;
  335. dealloc.free;
  336. end
  337. else
  338. asml.InsertAfter(dealloc,p);
  339. { try to move the allocation of the target register }
  340. GetLastInstruction(movp,hp1);
  341. alloc:=FindRegAlloc(taicpu(movp).oper[0]^.reg,tai(hp1.Next));
  342. if assigned(alloc) then
  343. begin
  344. asml.Remove(alloc);
  345. asml.InsertBefore(alloc,p);
  346. { adjust used regs }
  347. IncludeRegInUsedRegs(taicpu(movp).oper[0]^.reg,UsedRegs);
  348. end;
  349. { change
  350. vldr reg0,[reg1]
  351. vmov reg2,reg0
  352. into
  353. ldr reg2,[reg1]
  354. if reg2 is an int register
  355. }
  356. if (taicpu(p).opcode=A_VLDR) and (getregtype(taicpu(movp).oper[0]^.reg)=R_INTREGISTER) then
  357. taicpu(p).opcode:=A_LDR;
  358. { finally get rid of the mov }
  359. taicpu(p).loadreg(0,taicpu(movp).oper[0]^.reg);
  360. asml.remove(movp);
  361. movp.free;
  362. end;
  363. end;
  364. end;
  365. {
  366. optimize
  367. add/sub reg1,reg1,regY/const
  368. ...
  369. ldr/str regX,[reg1]
  370. into
  371. ldr/str regX,[reg1, regY/const]!
  372. }
  373. function TCpuAsmOptimizer.LookForPreindexedPattern(p: taicpu): boolean;
  374. var
  375. hp1: tai;
  376. begin
  377. if GenerateARMCode and
  378. (p.ops=3) and
  379. MatchOperand(p.oper[0]^, p.oper[1]^.reg) and
  380. GetNextInstructionUsingReg(p, hp1, p.oper[0]^.reg) and
  381. (not RegModifiedBetween(p.oper[0]^.reg, p, hp1)) and
  382. MatchInstruction(hp1, [A_LDR,A_STR], [C_None], [PF_None,PF_B,PF_H,PF_SH,PF_SB]) and
  383. (taicpu(hp1).oper[1]^.ref^.addressmode=AM_OFFSET) and
  384. (taicpu(hp1).oper[1]^.ref^.base=p.oper[0]^.reg) and
  385. (taicpu(hp1).oper[0]^.reg<>p.oper[0]^.reg) and
  386. (taicpu(hp1).oper[1]^.ref^.offset=0) and
  387. (taicpu(hp1).oper[1]^.ref^.index=NR_NO) and
  388. (((p.oper[2]^.typ=top_reg) and
  389. (not RegModifiedBetween(p.oper[2]^.reg, p, hp1))) or
  390. ((p.oper[2]^.typ=top_const) and
  391. ((abs(p.oper[2]^.val) < 256) or
  392. ((abs(p.oper[2]^.val) < 4096) and
  393. (taicpu(hp1).oppostfix in [PF_None,PF_B]))))) then
  394. begin
  395. taicpu(hp1).oper[1]^.ref^.addressmode:=AM_PREINDEXED;
  396. if p.oper[2]^.typ=top_reg then
  397. begin
  398. taicpu(hp1).oper[1]^.ref^.index:=p.oper[2]^.reg;
  399. if p.opcode=A_ADD then
  400. taicpu(hp1).oper[1]^.ref^.signindex:=1
  401. else
  402. taicpu(hp1).oper[1]^.ref^.signindex:=-1;
  403. end
  404. else
  405. begin
  406. if p.opcode=A_ADD then
  407. taicpu(hp1).oper[1]^.ref^.offset:=p.oper[2]^.val
  408. else
  409. taicpu(hp1).oper[1]^.ref^.offset:=-p.oper[2]^.val;
  410. end;
  411. result:=true;
  412. end
  413. else
  414. result:=false;
  415. end;
  416. {
  417. optimize
  418. ldr/str regX,[reg1]
  419. ...
  420. add/sub reg1,reg1,regY/const
  421. into
  422. ldr/str regX,[reg1], regY/const
  423. }
  424. function TCpuAsmOptimizer.LookForPostindexedPattern(p: taicpu) : boolean;
  425. var
  426. hp1 : tai;
  427. begin
  428. Result:=false;
  429. if (p.oper[1]^.typ = top_ref) and
  430. (p.oper[1]^.ref^.addressmode=AM_OFFSET) and
  431. (p.oper[1]^.ref^.index=NR_NO) and
  432. (p.oper[1]^.ref^.offset=0) and
  433. GetNextInstructionUsingReg(p, hp1, p.oper[1]^.ref^.base) and
  434. { we cannot check NR_DEFAULTFLAGS for modification yet so don't allow a condition }
  435. MatchInstruction(hp1, [A_ADD, A_SUB], [C_None], [PF_None]) and
  436. (taicpu(hp1).oper[0]^.reg=p.oper[1]^.ref^.base) and
  437. (taicpu(hp1).oper[1]^.reg=p.oper[1]^.ref^.base) and
  438. (
  439. (taicpu(hp1).oper[2]^.typ=top_reg) or
  440. { valid offset? }
  441. ((taicpu(hp1).oper[2]^.typ=top_const) and
  442. ((abs(taicpu(hp1).oper[2]^.val)<256) or
  443. ((abs(taicpu(hp1).oper[2]^.val)<4096) and (p.oppostfix in [PF_None,PF_B]))
  444. )
  445. )
  446. ) and
  447. { don't apply the optimization if the base register is loaded }
  448. (p.oper[0]^.reg<>p.oper[1]^.ref^.base) and
  449. not(RegModifiedBetween(taicpu(hp1).oper[0]^.reg,p,hp1)) and
  450. { don't apply the optimization if the (new) index register is loaded }
  451. (p.oper[0]^.reg<>taicpu(hp1).oper[2]^.reg) and
  452. not(RegModifiedBetween(taicpu(hp1).oper[2]^.reg,p,hp1)) and
  453. GenerateARMCode then
  454. begin
  455. DebugMsg('Peephole Optimization: Str/LdrAdd/Sub2Str/Ldr Postindex done', p);
  456. p.oper[1]^.ref^.addressmode:=AM_POSTINDEXED;
  457. if taicpu(hp1).oper[2]^.typ=top_const then
  458. begin
  459. if taicpu(hp1).opcode=A_ADD then
  460. p.oper[1]^.ref^.offset:=taicpu(hp1).oper[2]^.val
  461. else
  462. p.oper[1]^.ref^.offset:=-taicpu(hp1).oper[2]^.val;
  463. end
  464. else
  465. begin
  466. p.oper[1]^.ref^.index:=taicpu(hp1).oper[2]^.reg;
  467. if taicpu(hp1).opcode=A_ADD then
  468. p.oper[1]^.ref^.signindex:=1
  469. else
  470. p.oper[1]^.ref^.signindex:=-1;
  471. end;
  472. asml.Remove(hp1);
  473. hp1.Free;
  474. Result:=true;
  475. end;
  476. end;
  477. function TCpuAsmOptimizer.OptPass1ADDSUB(var p: tai): Boolean;
  478. var
  479. hp1,hp2: tai;
  480. oldreg: tregister;
  481. begin
  482. Result := OptPass1DataCheckMov(p);
  483. {
  484. change
  485. add/sub reg2,reg1,const1
  486. str/ldr reg3,[reg2,const2]
  487. dealloc reg2
  488. to
  489. str/ldr reg3,[reg1,const2+/-const1]
  490. }
  491. if (not GenerateThumbCode) and
  492. (taicpu(p).ops>2) and
  493. (taicpu(p).oper[1]^.typ = top_reg) and
  494. (taicpu(p).oper[2]^.typ = top_const) then
  495. begin
  496. hp1:=p;
  497. while GetNextInstructionUsingReg(hp1, hp1, taicpu(p).oper[0]^.reg) and
  498. { we cannot check NR_DEFAULTFLAGS for modification yet so don't allow a condition }
  499. MatchInstruction(hp1, [A_LDR, A_STR], [C_None], []) and
  500. (taicpu(hp1).oper[1]^.typ = top_ref) and
  501. (taicpu(hp1).oper[1]^.ref^.base=taicpu(p).oper[0]^.reg) and
  502. { don't optimize if the register is stored/overwritten }
  503. (taicpu(hp1).oper[0]^.reg<>taicpu(p).oper[1]^.reg) and
  504. (taicpu(hp1).oper[1]^.ref^.index=NR_NO) and
  505. (taicpu(hp1).oper[1]^.ref^.addressmode=AM_OFFSET) and
  506. { new offset must be valid: either in the range of 8 or 12 bit, depend on the
  507. ldr postfix }
  508. (((taicpu(p).opcode=A_ADD) and
  509. isValidConstLoadStoreOffset(taicpu(hp1).oper[1]^.ref^.offset+taicpu(p).oper[2]^.val, taicpu(hp1).oppostfix)
  510. ) or
  511. ((taicpu(p).opcode=A_SUB) and
  512. isValidConstLoadStoreOffset(taicpu(hp1).oper[1]^.ref^.offset-taicpu(p).oper[2]^.val, taicpu(hp1).oppostfix)
  513. )
  514. ) do
  515. begin
  516. { neither reg1 nor reg2 might be changed inbetween }
  517. if RegModifiedBetween(taicpu(p).oper[0]^.reg,p,hp1) or
  518. RegModifiedBetween(taicpu(p).oper[1]^.reg,p,hp1) then
  519. break;
  520. { reg2 must be either overwritten by the ldr or it is deallocated afterwards }
  521. if ((taicpu(hp1).opcode=A_LDR) and (taicpu(p).oper[0]^.reg=taicpu(hp1).oper[0]^.reg)) or
  522. assigned(FindRegDeAlloc(taicpu(p).oper[0]^.reg,tai(hp1.Next))) then
  523. begin
  524. { remember last instruction }
  525. hp2:=hp1;
  526. DebugMsg('Peephole Optimization: Add/SubLdr2Ldr done', p);
  527. hp1:=p;
  528. { fix all ldr/str }
  529. while GetNextInstructionUsingReg(hp1, hp1, taicpu(p).oper[0]^.reg) do
  530. begin
  531. taicpu(hp1).oper[1]^.ref^.base:=taicpu(p).oper[1]^.reg;
  532. if taicpu(p).opcode=A_ADD then
  533. inc(taicpu(hp1).oper[1]^.ref^.offset,taicpu(p).oper[2]^.val)
  534. else
  535. dec(taicpu(hp1).oper[1]^.ref^.offset,taicpu(p).oper[2]^.val);
  536. if hp1=hp2 then
  537. break;
  538. end;
  539. RemoveCurrentP(p);
  540. result:=true;
  541. Exit;
  542. end;
  543. end;
  544. end;
  545. if (taicpu(p).condition = C_None) and
  546. (taicpu(p).oppostfix = PF_None) and
  547. LookForPreindexedPattern(taicpu(p)) then
  548. begin
  549. DebugMsg('Peephole Optimization: Add/Sub to Preindexed done', p);
  550. RemoveCurrentP(p);
  551. Result:=true;
  552. Exit;
  553. end;
  554. end;
  555. function TCpuAsmOptimizer.OptPass1MUL(var p: tai): Boolean;
  556. var
  557. hp1,hp2: tai;
  558. oldreg: tregister;
  559. begin
  560. Result := OptPass1DataCheckMov(p);
  561. {
  562. Turn
  563. mul reg0, z,w
  564. sub/add x, y, reg0
  565. dealloc reg0
  566. into
  567. mls/mla x,z,w,y
  568. }
  569. if (taicpu(p).condition = C_None) and
  570. (taicpu(p).oppostfix = PF_None) and
  571. (taicpu(p).ops=3) and
  572. (taicpu(p).oper[0]^.typ = top_reg) and
  573. (taicpu(p).oper[1]^.typ = top_reg) and
  574. (taicpu(p).oper[2]^.typ = top_reg) and
  575. GetNextInstructionUsingReg(p,hp1,taicpu(p).oper[0]^.reg) and
  576. MatchInstruction(hp1,[A_ADD,A_SUB],[C_None],[PF_None]) and
  577. (not RegModifiedBetween(taicpu(p).oper[1]^.reg, p, hp1)) and
  578. (not RegModifiedBetween(taicpu(p).oper[2]^.reg, p, hp1)) and
  579. (((taicpu(hp1).opcode=A_ADD) and (current_settings.cputype>=cpu_armv4)) or
  580. ((taicpu(hp1).opcode=A_SUB) and (current_settings.cputype in [cpu_armv6t2,cpu_armv7,cpu_armv7a,cpu_armv7r,cpu_armv7m,cpu_armv7em]))) and
  581. // CPUs before ARMv6 don't recommend having the same Rd and Rm for MLA.
  582. // TODO: A workaround would be to swap Rm and Rs
  583. (not ((taicpu(hp1).opcode=A_ADD) and (current_settings.cputype<=cpu_armv6) and MatchOperand(taicpu(hp1).oper[0]^, taicpu(p).oper[1]^))) and
  584. (((taicpu(hp1).ops=3) and
  585. (taicpu(hp1).oper[2]^.typ=top_reg) and
  586. ((MatchOperand(taicpu(hp1).oper[2]^, taicpu(p).oper[0]^.reg) and
  587. (not RegModifiedBetween(taicpu(hp1).oper[1]^.reg, p, hp1))) or
  588. ((MatchOperand(taicpu(hp1).oper[1]^, taicpu(p).oper[0]^.reg) and
  589. (taicpu(hp1).opcode=A_ADD) and
  590. (not RegModifiedBetween(taicpu(hp1).oper[2]^.reg, p, hp1)))))) or
  591. ((taicpu(hp1).ops=2) and
  592. (taicpu(hp1).oper[1]^.typ=top_reg) and
  593. MatchOperand(taicpu(hp1).oper[1]^, taicpu(p).oper[0]^.reg))) and
  594. (RegEndOfLife(taicpu(p).oper[0]^.reg,taicpu(hp1))) then
  595. begin
  596. if taicpu(hp1).opcode=A_ADD then
  597. begin
  598. taicpu(hp1).opcode:=A_MLA;
  599. if taicpu(hp1).ops=3 then
  600. begin
  601. if MatchOperand(taicpu(hp1).oper[1]^, taicpu(p).oper[0]^) then
  602. oldreg:=taicpu(hp1).oper[2]^.reg
  603. else
  604. oldreg:=taicpu(hp1).oper[1]^.reg;
  605. end
  606. else
  607. oldreg:=taicpu(hp1).oper[0]^.reg;
  608. taicpu(hp1).loadreg(1,taicpu(p).oper[1]^.reg);
  609. taicpu(hp1).loadreg(2,taicpu(p).oper[2]^.reg);
  610. taicpu(hp1).loadreg(3,oldreg);
  611. DebugMsg('Peephole Optimization: MulAdd2MLA done', p);
  612. end
  613. else
  614. begin
  615. taicpu(hp1).opcode:=A_MLS;
  616. taicpu(hp1).loadreg(3,taicpu(hp1).oper[1]^.reg);
  617. if taicpu(hp1).ops=2 then
  618. taicpu(hp1).loadreg(1,taicpu(hp1).oper[0]^.reg)
  619. else
  620. taicpu(hp1).loadreg(1,taicpu(p).oper[2]^.reg);
  621. taicpu(hp1).loadreg(2,taicpu(p).oper[1]^.reg);
  622. DebugMsg('Peephole Optimization: MulSub2MLS done', p);
  623. AllocRegBetween(taicpu(hp1).oper[1]^.reg,p,hp1,UsedRegs);
  624. AllocRegBetween(taicpu(hp1).oper[2]^.reg,p,hp1,UsedRegs);
  625. AllocRegBetween(taicpu(hp1).oper[3]^.reg,p,hp1,UsedRegs);
  626. end;
  627. taicpu(hp1).ops:=4;
  628. RemoveCurrentP(p);
  629. Result := True;
  630. Exit;
  631. end
  632. end;
  633. function TCpuAsmOptimizer.OptPass1And(var p: tai): Boolean;
  634. begin
  635. Result := OptPass1DataCheckMov(p);
  636. Result := inherited OptPass1And(p) or Result;
  637. end;
  638. function TCpuAsmOptimizer.OptPass1DataCheckMov(var p: tai): Boolean;
  639. var
  640. hp1: tai;
  641. begin
  642. {
  643. change
  644. op reg1, ...
  645. mov reg2, reg1
  646. to
  647. op reg2, ...
  648. }
  649. Result := (taicpu(p).ops >= 3) and
  650. GetNextInstructionUsingReg(p, hp1, taicpu(p).oper[0]^.reg) and
  651. RemoveSuperfluousMove(p, hp1, 'DataMov2Data');
  652. end;
  653. function TCpuAsmOptimizer.OptPass1CMP(var p: tai): Boolean;
  654. var
  655. hp1, hp2, hp_last: tai;
  656. MovRem1, MovRem2: Boolean;
  657. begin
  658. Result := False;
  659. { These optimizations can be applied only to the currently enabled operations because
  660. the other operations do not update all flags and FPC does not track flag usage }
  661. if (taicpu(p).condition = C_None) and
  662. (taicpu(p).oper[1]^.typ = top_const) and
  663. GetNextInstruction(p, hp1) then
  664. begin
  665. {
  666. change
  667. cmp reg,const1
  668. moveq reg,const1
  669. movne reg,const2
  670. to
  671. cmp reg,const1
  672. movne reg,const2
  673. }
  674. if MatchInstruction(hp1, A_MOV, [C_EQ, C_NE], [PF_NONE]) and
  675. (taicpu(hp1).oper[1]^.typ = top_const) and
  676. GetNextInstruction(hp1, hp2) and
  677. MatchInstruction(hp2, A_MOV, [C_EQ, C_NE], [PF_NONE]) and
  678. (taicpu(hp2).oper[1]^.typ = top_const) then
  679. begin
  680. MovRem1 := RemoveRedundantMove(p, hp1, asml);
  681. MovRem2 := RemoveRedundantMove(p, hp2, asml);
  682. Result:= MovRem1 or MovRem2;
  683. { Make sure that hp1 is still the next instruction after p }
  684. if MovRem1 then
  685. if MovRem2 then
  686. begin
  687. if not GetNextInstruction(p, hp1) then
  688. Exit;
  689. end
  690. else
  691. hp1 := hp2;
  692. end;
  693. {
  694. change
  695. <op> reg,x,y
  696. cmp reg,#0
  697. into
  698. <op>s reg,x,y
  699. }
  700. if (taicpu(p).oppostfix = PF_None) and
  701. (taicpu(p).oper[1]^.val = 0) and
  702. { be careful here, following instructions could use other flags
  703. however after a jump fpc never depends on the value of flags }
  704. { All above instructions set Z and N according to the following
  705. Z := result = 0;
  706. N := result[31];
  707. EQ = Z=1; NE = Z=0;
  708. MI = N=1; PL = N=0; }
  709. (MatchInstruction(hp1, A_B, [C_EQ,C_NE,C_MI,C_PL], []) or
  710. { mov is also possible, but only if there is no shifter operand, it could be an rxx,
  711. we are too lazy to check if it is rxx or something else }
  712. (MatchInstruction(hp1, A_MOV, [C_EQ,C_NE,C_MI,C_PL], []) and (taicpu(hp1).ops=2))) and
  713. GetLastInstruction(p, hp_last) and
  714. MatchInstruction(hp_last, [A_ADC,A_ADD,A_BIC,A_SUB,A_MUL,A_MVN,A_MOV,A_ORR,
  715. A_EOR,A_AND,A_RSB,A_RSC,A_SBC,A_MLA], [C_None], [PF_None]) and
  716. (
  717. { mlas is only allowed in arm mode }
  718. (taicpu(hp_last).opcode<>A_MLA) or
  719. (current_settings.instructionset<>is_thumb)
  720. ) and
  721. (taicpu(hp_last).oper[0]^.reg = taicpu(p).oper[0]^.reg) and
  722. assigned(FindRegDealloc(NR_DEFAULTFLAGS,tai(hp1.Next))) then
  723. begin
  724. DebugMsg('Peephole Optimization: OpCmp2OpS done', hp_last);
  725. taicpu(hp_last).oppostfix:=PF_S;
  726. { move flag allocation if possible }
  727. hp1:=FindRegAlloc(NR_DEFAULTFLAGS,tai(hp_last.Next));
  728. if assigned(hp1) then
  729. begin
  730. asml.Remove(hp1);
  731. asml.insertbefore(hp1, hp_last);
  732. end;
  733. RemoveCurrentP(p);
  734. Result:=true;
  735. end;
  736. end;
  737. end;
  738. function TCpuAsmOptimizer.OptPass1LDR(var p: tai): Boolean;
  739. var
  740. hp1: tai;
  741. begin
  742. Result := False;
  743. { change
  744. ldr reg1,ref
  745. ldr reg2,ref
  746. into ...
  747. }
  748. if (taicpu(p).oper[1]^.typ = top_ref) and
  749. (taicpu(p).oper[1]^.ref^.addressmode=AM_OFFSET) and
  750. GetNextInstruction(p,hp1) and
  751. { ldrd is not allowed here }
  752. MatchInstruction(hp1, A_LDR, [taicpu(p).condition, C_None], [taicpu(p).oppostfix,PF_None]-[PF_D]) then
  753. begin
  754. {
  755. ...
  756. ldr reg1,ref
  757. mov reg2,reg1
  758. }
  759. if (taicpu(p).oppostfix=taicpu(hp1).oppostfix) and
  760. RefsEqual(taicpu(p).oper[1]^.ref^,taicpu(hp1).oper[1]^.ref^) and
  761. (taicpu(p).oper[0]^.reg<>taicpu(hp1).oper[1]^.ref^.index) and
  762. (taicpu(p).oper[0]^.reg<>taicpu(hp1).oper[1]^.ref^.base) and
  763. (taicpu(hp1).oper[1]^.ref^.addressmode=AM_OFFSET) then
  764. begin
  765. if taicpu(hp1).oper[0]^.reg=taicpu(p).oper[0]^.reg then
  766. begin
  767. DebugMsg('Peephole Optimization: LdrLdr2Ldr done', hp1);
  768. asml.remove(hp1);
  769. hp1.free;
  770. end
  771. else
  772. begin
  773. DebugMsg('Peephole Optimization: LdrLdr2LdrMov done', hp1);
  774. taicpu(hp1).opcode:=A_MOV;
  775. taicpu(hp1).oppostfix:=PF_None;
  776. taicpu(hp1).loadreg(1,taicpu(p).oper[0]^.reg);
  777. end;
  778. result := true;
  779. end
  780. {
  781. ...
  782. ldrd reg1,reg1+1,ref
  783. }
  784. else if (GenerateARMCode or GenerateThumb2Code) and
  785. (CPUARM_HAS_EDSP in cpu_capabilities[current_settings.cputype]) and
  786. { ldrd does not allow any postfixes ... }
  787. (taicpu(p).oppostfix=PF_None) and
  788. not(odd(getsupreg(taicpu(p).oper[0]^.reg))) and
  789. (getsupreg(taicpu(p).oper[0]^.reg)+1=getsupreg(taicpu(hp1).oper[0]^.reg)) and
  790. { ldr ensures that either base or index contain no register, else ldr wouldn't
  791. use an offset either
  792. }
  793. (taicpu(p).oper[1]^.ref^.base=taicpu(hp1).oper[1]^.ref^.base) and
  794. (taicpu(p).oper[1]^.ref^.index=taicpu(hp1).oper[1]^.ref^.index) and
  795. (taicpu(p).oper[1]^.ref^.offset+4=taicpu(hp1).oper[1]^.ref^.offset) and
  796. (abs(taicpu(p).oper[1]^.ref^.offset)<256) and
  797. AlignedToQWord(taicpu(p).oper[1]^.ref^) then
  798. begin
  799. DebugMsg('Peephole Optimization: LdrLdr2Ldrd done', p);
  800. taicpu(p).loadref(2,taicpu(p).oper[1]^.ref^);
  801. taicpu(p).loadreg(1, taicpu(hp1).oper[0]^.reg);
  802. taicpu(p).ops:=3;
  803. taicpu(p).oppostfix:=PF_D;
  804. asml.remove(hp1);
  805. hp1.free;
  806. result:=true;
  807. end;
  808. end;
  809. {
  810. Change
  811. ldrb dst1, [REF]
  812. and dst2, dst1, #255
  813. into
  814. ldrb dst2, [ref]
  815. }
  816. if not(GenerateThumbCode) and
  817. (taicpu(p).oppostfix=PF_B) and
  818. GetNextInstructionUsingReg(p, hp1, taicpu(p).oper[0]^.reg) and
  819. MatchInstruction(hp1, A_AND, [taicpu(p).condition], [PF_NONE]) and
  820. (taicpu(hp1).oper[1]^.reg = taicpu(p).oper[0]^.reg) and
  821. (taicpu(hp1).oper[2]^.typ = top_const) and
  822. (taicpu(hp1).oper[2]^.val = $FF) and
  823. not(RegUsedBetween(taicpu(hp1).oper[0]^.reg, p, hp1)) and
  824. RegEndOfLife(taicpu(p).oper[0]^.reg, taicpu(hp1)) then
  825. begin
  826. DebugMsg('Peephole Optimization: LdrbAnd2Ldrb done', p);
  827. taicpu(p).oper[0]^.reg := taicpu(hp1).oper[0]^.reg;
  828. asml.remove(hp1);
  829. hp1.free;
  830. result:=true;
  831. end;
  832. Result:=LookForPostindexedPattern(taicpu(p)) or Result;
  833. { Remove superfluous mov after ldr
  834. changes
  835. ldr reg1, ref
  836. mov reg2, reg1
  837. to
  838. ldr reg2, ref
  839. conditions are:
  840. * no ldrd usage
  841. * reg1 must be released after mov
  842. * mov can not contain shifterops
  843. * ldr+mov have the same conditions
  844. * mov does not set flags
  845. }
  846. if (taicpu(p).oppostfix<>PF_D) and
  847. GetNextInstructionUsingReg(p, hp1, taicpu(p).oper[0]^.reg) and
  848. RemoveSuperfluousMove(p, hp1, 'LdrMov2Ldr') then
  849. Result:=true;
  850. end;
  851. function TCpuAsmOptimizer.OptPass1STM(var p: tai): Boolean;
  852. var
  853. hp1, hp2, hp3, hp4: tai;
  854. begin
  855. Result := False;
  856. {
  857. change
  858. stmfd r13!,[r14]
  859. sub r13,r13,#4
  860. bl abc
  861. add r13,r13,#4
  862. ldmfd r13!,[r15]
  863. into
  864. b abc
  865. }
  866. if not(ts_thumb_interworking in current_settings.targetswitches) and
  867. (taicpu(p).condition = C_None) and
  868. (taicpu(p).oppostfix = PF_FD) and
  869. (taicpu(p).oper[0]^.typ = top_ref) and
  870. (taicpu(p).oper[0]^.ref^.index=NR_STACK_POINTER_REG) and
  871. (taicpu(p).oper[0]^.ref^.base=NR_NO) and
  872. (taicpu(p).oper[0]^.ref^.offset=0) and
  873. (taicpu(p).oper[0]^.ref^.addressmode=AM_PREINDEXED) and
  874. (taicpu(p).oper[1]^.typ = top_regset) and
  875. (taicpu(p).oper[1]^.regset^ = [RS_R14]) and
  876. GetNextInstruction(p, hp1) and
  877. MatchInstruction(hp1, A_SUB, [C_None], [PF_NONE]) and
  878. (taicpu(hp1).oper[0]^.typ = top_reg) and
  879. (taicpu(hp1).oper[0]^.reg = NR_STACK_POINTER_REG) and
  880. MatchOperand(taicpu(hp1).oper[0]^,taicpu(hp1).oper[1]^) and
  881. (taicpu(hp1).oper[2]^.typ = top_const) and
  882. GetNextInstruction(hp1, hp2) and
  883. SkipEntryExitMarker(hp2, hp2) and
  884. MatchInstruction(hp2, [A_BL,A_BLX], [C_None], [PF_NONE]) and
  885. (taicpu(hp2).oper[0]^.typ = top_ref) and
  886. GetNextInstruction(hp2, hp3) and
  887. SkipEntryExitMarker(hp3, hp3) and
  888. MatchInstruction(hp3, A_ADD, [C_None], [PF_NONE]) and
  889. MatchOperand(taicpu(hp1).oper[0]^,taicpu(hp3).oper[0]^) and
  890. MatchOperand(taicpu(hp1).oper[0]^,taicpu(hp3).oper[1]^) and
  891. MatchOperand(taicpu(hp1).oper[2]^,taicpu(hp3).oper[2]^) and
  892. GetNextInstruction(hp3, hp4) and
  893. MatchInstruction(hp4, A_LDM, [C_None], [PF_FD]) and
  894. MatchOperand(taicpu(p).oper[0]^,taicpu(hp4).oper[0]^) and
  895. (taicpu(hp4).oper[1]^.typ = top_regset) and
  896. (taicpu(hp4).oper[1]^.regset^ = [RS_R15]) then
  897. begin
  898. asml.Remove(hp1);
  899. asml.Remove(hp3);
  900. asml.Remove(hp4);
  901. taicpu(hp2).opcode:=A_B;
  902. hp1.free;
  903. hp3.free;
  904. hp4.free;
  905. RemoveCurrentp(p, hp2);
  906. DebugMsg('Peephole Optimization: Bl2B done', p);
  907. Result := True;
  908. end;
  909. end;
  910. function TCpuAsmOptimizer.OptPass1STR(var p: tai): Boolean;
  911. var
  912. hp1: tai;
  913. begin
  914. Result := False;
  915. { Common conditions }
  916. if (taicpu(p).oper[1]^.typ = top_ref) and
  917. (taicpu(p).oper[1]^.ref^.addressmode=AM_OFFSET) and
  918. (taicpu(p).oppostfix=PF_None) then
  919. begin
  920. { change
  921. str reg1,ref
  922. ldr reg2,ref
  923. into
  924. str reg1,ref
  925. mov reg2,reg1
  926. }
  927. if (taicpu(p).condition=C_None) and
  928. GetNextInstructionUsingRef(p,hp1,taicpu(p).oper[1]^.ref^) and
  929. MatchInstruction(hp1, A_LDR, [taicpu(p).condition], [PF_None]) and
  930. (taicpu(hp1).oper[1]^.typ=top_ref) and
  931. (taicpu(hp1).oper[1]^.ref^.addressmode=AM_OFFSET) and
  932. not(RegModifiedBetween(taicpu(p).oper[0]^.reg, p, hp1)) and
  933. ((taicpu(hp1).oper[1]^.ref^.index=NR_NO) or not (RegModifiedBetween(taicpu(hp1).oper[1]^.ref^.index, p, hp1))) and
  934. ((taicpu(hp1).oper[1]^.ref^.base=NR_NO) or not (RegModifiedBetween(taicpu(hp1).oper[1]^.ref^.base, p, hp1))) then
  935. begin
  936. if taicpu(hp1).oper[0]^.reg=taicpu(p).oper[0]^.reg then
  937. begin
  938. DebugMsg('Peephole Optimization: StrLdr2StrMov 1 done', hp1);
  939. asml.remove(hp1);
  940. hp1.free;
  941. end
  942. else
  943. begin
  944. taicpu(hp1).opcode:=A_MOV;
  945. taicpu(hp1).oppostfix:=PF_None;
  946. taicpu(hp1).loadreg(1,taicpu(p).oper[0]^.reg);
  947. DebugMsg('Peephole Optimization: StrLdr2StrMov 2 done', hp1);
  948. end;
  949. result := True;
  950. end
  951. { change
  952. str reg1,ref
  953. str reg2,ref
  954. into
  955. strd reg1,reg2,ref
  956. }
  957. else if (GenerateARMCode or GenerateThumb2Code) and
  958. (CPUARM_HAS_EDSP in cpu_capabilities[current_settings.cputype]) and
  959. not(odd(getsupreg(taicpu(p).oper[0]^.reg))) and
  960. (abs(taicpu(p).oper[1]^.ref^.offset)<256) and
  961. AlignedToQWord(taicpu(p).oper[1]^.ref^) and
  962. GetNextInstruction(p,hp1) and
  963. MatchInstruction(hp1, A_STR, [taicpu(p).condition, C_None], [PF_None]) and
  964. (getsupreg(taicpu(p).oper[0]^.reg)+1=getsupreg(taicpu(hp1).oper[0]^.reg)) and
  965. { str ensures that either base or index contain no register, else ldr wouldn't
  966. use an offset either
  967. }
  968. (taicpu(p).oper[1]^.ref^.base=taicpu(hp1).oper[1]^.ref^.base) and
  969. (taicpu(p).oper[1]^.ref^.index=taicpu(hp1).oper[1]^.ref^.index) and
  970. (taicpu(p).oper[1]^.ref^.offset+4=taicpu(hp1).oper[1]^.ref^.offset) then
  971. begin
  972. DebugMsg('Peephole Optimization: StrStr2Strd done', p);
  973. taicpu(p).oppostfix:=PF_D;
  974. taicpu(p).loadref(2,taicpu(p).oper[1]^.ref^);
  975. taicpu(p).loadreg(1, taicpu(hp1).oper[0]^.reg);
  976. taicpu(p).ops:=3;
  977. asml.remove(hp1);
  978. hp1.free;
  979. result:=true;
  980. end;
  981. end;
  982. Result:=LookForPostindexedPattern(taicpu(p)) or Result;
  983. end;
  984. function TCpuAsmOptimizer.OptPass1MOV(var p: tai): Boolean;
  985. var
  986. hp1, hpfar1, hp2, hp3: tai;
  987. i, i2: longint;
  988. tempop: tasmop;
  989. dealloc: tai_regalloc;
  990. begin
  991. Result := False;
  992. hp1 := nil;
  993. { fold
  994. mov reg1,reg0, shift imm1
  995. mov reg1,reg1, shift imm2
  996. }
  997. if (taicpu(p).ops=3) and
  998. (taicpu(p).oper[2]^.typ = top_shifterop) and
  999. (taicpu(p).oper[2]^.shifterop^.rs = NR_NO) and
  1000. getnextinstruction(p,hp1) and
  1001. MatchInstruction(hp1, A_MOV, [taicpu(p).condition], [PF_None]) and
  1002. (taicpu(hp1).ops=3) and
  1003. MatchOperand(taicpu(hp1).oper[0]^, taicpu(p).oper[0]^.reg) and
  1004. MatchOperand(taicpu(hp1).oper[1]^, taicpu(p).oper[0]^.reg) and
  1005. (taicpu(hp1).oper[2]^.typ = top_shifterop) and
  1006. (taicpu(hp1).oper[2]^.shifterop^.rs = NR_NO) then
  1007. begin
  1008. { fold
  1009. mov reg1,reg0, lsl 16
  1010. mov reg1,reg1, lsr 16
  1011. strh reg1, ...
  1012. dealloc reg1
  1013. to
  1014. strh reg1, ...
  1015. dealloc reg1
  1016. }
  1017. if (taicpu(p).oper[2]^.shifterop^.shiftmode=SM_LSL) and
  1018. (taicpu(p).oper[2]^.shifterop^.shiftimm=16) and
  1019. (taicpu(hp1).oper[2]^.shifterop^.shiftmode in [SM_LSR,SM_ASR]) and
  1020. (taicpu(hp1).oper[2]^.shifterop^.shiftimm=16) and
  1021. getnextinstruction(hp1,hp2) and
  1022. MatchInstruction(hp2, A_STR, [taicpu(p).condition], [PF_H]) and
  1023. MatchOperand(taicpu(hp2).oper[0]^, taicpu(p).oper[0]^.reg) then
  1024. begin
  1025. TransferUsedRegs(TmpUsedRegs);
  1026. UpdateUsedRegs(TmpUsedRegs, tai(p.next));
  1027. UpdateUsedRegs(TmpUsedRegs, tai(hp1.next));
  1028. if not(RegUsedAfterInstruction(taicpu(p).oper[0]^.reg,hp2,TmpUsedRegs)) then
  1029. begin
  1030. DebugMsg('Peephole Optimization: removed superfluous 16 Bit zero extension', hp1);
  1031. taicpu(hp2).loadreg(0,taicpu(p).oper[1]^.reg);
  1032. asml.remove(hp1);
  1033. hp1.free;
  1034. RemoveCurrentP(p, hp2);
  1035. Result:=true;
  1036. Exit;
  1037. end;
  1038. end
  1039. { fold
  1040. mov reg1,reg0, shift imm1
  1041. mov reg1,reg1, shift imm2
  1042. to
  1043. mov reg1,reg0, shift imm1+imm2
  1044. }
  1045. else if (taicpu(p).oper[2]^.shifterop^.shiftmode=taicpu(hp1).oper[2]^.shifterop^.shiftmode) or
  1046. { asr makes no use after a lsr, the asr can be foled into the lsr }
  1047. ((taicpu(p).oper[2]^.shifterop^.shiftmode=SM_LSR) and (taicpu(hp1).oper[2]^.shifterop^.shiftmode=SM_ASR) ) then
  1048. begin
  1049. inc(taicpu(p).oper[2]^.shifterop^.shiftimm,taicpu(hp1).oper[2]^.shifterop^.shiftimm);
  1050. { avoid overflows }
  1051. if taicpu(p).oper[2]^.shifterop^.shiftimm>31 then
  1052. case taicpu(p).oper[2]^.shifterop^.shiftmode of
  1053. SM_ROR:
  1054. taicpu(p).oper[2]^.shifterop^.shiftimm:=taicpu(p).oper[2]^.shifterop^.shiftimm and 31;
  1055. SM_ASR:
  1056. taicpu(p).oper[2]^.shifterop^.shiftimm:=31;
  1057. SM_LSR,
  1058. SM_LSL:
  1059. begin
  1060. hp2:=taicpu.op_reg_const(A_MOV,taicpu(p).oper[0]^.reg,0);
  1061. InsertLLItem(p.previous, p.next, hp2);
  1062. p.free;
  1063. p:=hp2;
  1064. end;
  1065. else
  1066. internalerror(2008072803);
  1067. end;
  1068. DebugMsg('Peephole Optimization: ShiftShift2Shift 1 done', p);
  1069. asml.remove(hp1);
  1070. hp1.free;
  1071. hp1 := nil;
  1072. result := true;
  1073. end
  1074. { fold
  1075. mov reg1,reg0, shift imm1
  1076. mov reg1,reg1, shift imm2
  1077. mov reg1,reg1, shift imm3 ...
  1078. mov reg2,reg1, shift imm3 ...
  1079. }
  1080. else if GetNextInstructionUsingReg(hp1,hp2, taicpu(hp1).oper[0]^.reg) and
  1081. MatchInstruction(hp2, A_MOV, [taicpu(p).condition], [PF_None]) and
  1082. (taicpu(hp2).ops=3) and
  1083. MatchOperand(taicpu(hp2).oper[1]^, taicpu(hp1).oper[0]^.reg) and
  1084. RegEndofLife(taicpu(p).oper[0]^.reg,taicpu(hp2)) and
  1085. (taicpu(hp2).oper[2]^.typ = top_shifterop) and
  1086. (taicpu(hp2).oper[2]^.shifterop^.rs = NR_NO) then
  1087. begin
  1088. { mov reg1,reg0, lsl imm1
  1089. mov reg1,reg1, lsr/asr imm2
  1090. mov reg2,reg1, lsl imm3 ...
  1091. to
  1092. mov reg1,reg0, lsl imm1
  1093. mov reg2,reg1, lsr/asr imm2-imm3
  1094. if
  1095. imm1>=imm2
  1096. }
  1097. if (taicpu(p).oper[2]^.shifterop^.shiftmode=SM_LSL) and (taicpu(hp2).oper[2]^.shifterop^.shiftmode=SM_LSL) and
  1098. (taicpu(hp1).oper[2]^.shifterop^.shiftmode in [SM_ASR,SM_LSR]) and
  1099. (taicpu(p).oper[2]^.shifterop^.shiftimm>=taicpu(hp1).oper[2]^.shifterop^.shiftimm) then
  1100. begin
  1101. if (taicpu(hp2).oper[2]^.shifterop^.shiftimm>=taicpu(hp1).oper[2]^.shifterop^.shiftimm) then
  1102. begin
  1103. if not(RegUsedBetween(taicpu(hp2).oper[0]^.reg,p,hp1)) and
  1104. not(RegUsedBetween(taicpu(hp2).oper[0]^.reg,hp1,hp2)) then
  1105. begin
  1106. DebugMsg('Peephole Optimization: ShiftShiftShift2ShiftShift 1a done', p);
  1107. inc(taicpu(p).oper[2]^.shifterop^.shiftimm,taicpu(hp2).oper[2]^.shifterop^.shiftimm-taicpu(hp1).oper[2]^.shifterop^.shiftimm);
  1108. taicpu(p).oper[0]^.reg:=taicpu(hp2).oper[0]^.reg;
  1109. asml.remove(hp1);
  1110. asml.remove(hp2);
  1111. hp1.free;
  1112. hp2.free;
  1113. if taicpu(p).oper[2]^.shifterop^.shiftimm>=32 then
  1114. begin
  1115. taicpu(p).freeop(1);
  1116. taicpu(p).freeop(2);
  1117. taicpu(p).loadconst(1,0);
  1118. end;
  1119. result := true;
  1120. Exit;
  1121. end;
  1122. end
  1123. else if not(RegUsedBetween(taicpu(hp2).oper[0]^.reg,hp1,hp2)) then
  1124. begin
  1125. DebugMsg('Peephole Optimization: ShiftShiftShift2ShiftShift 1b done', p);
  1126. dec(taicpu(hp1).oper[2]^.shifterop^.shiftimm,taicpu(hp2).oper[2]^.shifterop^.shiftimm);
  1127. taicpu(hp1).oper[0]^.reg:=taicpu(hp2).oper[0]^.reg;
  1128. asml.remove(hp2);
  1129. hp2.free;
  1130. result := true;
  1131. Exit;
  1132. end;
  1133. end
  1134. { mov reg1,reg0, lsr/asr imm1
  1135. mov reg1,reg1, lsl imm2
  1136. mov reg1,reg1, lsr/asr imm3 ...
  1137. if imm3>=imm1 and imm2>=imm1
  1138. to
  1139. mov reg1,reg0, lsl imm2-imm1
  1140. mov reg1,reg1, lsr/asr imm3 ...
  1141. }
  1142. else if (taicpu(p).oper[2]^.shifterop^.shiftmode in [SM_ASR,SM_LSR]) and (taicpu(hp2).oper[2]^.shifterop^.shiftmode in [SM_ASR,SM_LSR]) and
  1143. (taicpu(hp1).oper[2]^.shifterop^.shiftmode=SM_LSL) and
  1144. (taicpu(hp2).oper[2]^.shifterop^.shiftimm>=taicpu(p).oper[2]^.shifterop^.shiftimm) and
  1145. (taicpu(hp1).oper[2]^.shifterop^.shiftimm>=taicpu(p).oper[2]^.shifterop^.shiftimm) then
  1146. begin
  1147. dec(taicpu(hp1).oper[2]^.shifterop^.shiftimm,taicpu(p).oper[2]^.shifterop^.shiftimm);
  1148. taicpu(hp1).oper[1]^.reg:=taicpu(p).oper[1]^.reg;
  1149. DebugMsg('Peephole Optimization: ShiftShiftShift2ShiftShift 2 done', p);
  1150. if taicpu(hp1).oper[2]^.shifterop^.shiftimm=0 then
  1151. begin
  1152. taicpu(hp2).oper[1]^.reg:=taicpu(hp1).oper[1]^.reg;
  1153. asml.remove(hp1);
  1154. hp1.free;
  1155. end;
  1156. RemoveCurrentp(p);
  1157. result := true;
  1158. Exit;
  1159. end;
  1160. end;
  1161. end;
  1162. { All the optimisations from this point on require GetNextInstructionUsingReg
  1163. to return True }
  1164. if not (
  1165. GetNextInstructionUsingReg(p, hpfar1, taicpu(p).oper[0]^.reg) and
  1166. (hpfar1.typ = ait_instruction)
  1167. ) then
  1168. Exit;
  1169. { Change the common
  1170. mov r0, r0, lsr #xxx
  1171. and r0, r0, #yyy/bic r0, r0, #xxx
  1172. and remove the superfluous and/bic if possible
  1173. This could be extended to handle more cases.
  1174. }
  1175. { Change
  1176. mov rx, ry, lsr/ror #xxx
  1177. uxtb/uxth rz,rx/and rz,rx,0xFF
  1178. dealloc rx
  1179. to
  1180. uxtb/uxth rz,ry,ror #xxx
  1181. }
  1182. if (GenerateThumb2Code) and
  1183. (taicpu(p).ops=3) and
  1184. (taicpu(p).oper[2]^.typ = top_shifterop) and
  1185. (taicpu(p).oper[2]^.shifterop^.rs = NR_NO) and
  1186. (taicpu(p).oper[2]^.shifterop^.shiftmode in [SM_LSR,SM_ROR]) and
  1187. RegEndOfLife(taicpu(p).oper[0]^.reg, taicpu(hpfar1)) then
  1188. begin
  1189. if MatchInstruction(hpfar1, A_UXTB, [C_None], [PF_None]) and
  1190. (taicpu(hpfar1).ops = 2) and
  1191. (taicpu(p).oper[2]^.shifterop^.shiftimm in [8,16,24]) and
  1192. MatchOperand(taicpu(hpfar1).oper[1]^, taicpu(p).oper[0]^.reg) then
  1193. begin
  1194. taicpu(hpfar1).oper[1]^.reg := taicpu(p).oper[1]^.reg;
  1195. taicpu(hpfar1).loadshifterop(2,taicpu(p).oper[2]^.shifterop^);
  1196. taicpu(hpfar1).oper[2]^.shifterop^.shiftmode:=SM_ROR;
  1197. taicpu(hpfar1).ops := 3;
  1198. if not Assigned(hp1) then
  1199. GetNextInstruction(p,hp1);
  1200. RemoveCurrentP(p, hp1);
  1201. result:=true;
  1202. exit;
  1203. end
  1204. else if MatchInstruction(hpfar1, A_UXTH, [C_None], [PF_None]) and
  1205. (taicpu(hpfar1).ops=2) and
  1206. (taicpu(p).oper[2]^.shifterop^.shiftimm in [16]) and
  1207. MatchOperand(taicpu(hpfar1).oper[1]^, taicpu(p).oper[0]^.reg) then
  1208. begin
  1209. taicpu(hpfar1).oper[1]^.reg := taicpu(p).oper[1]^.reg;
  1210. taicpu(hpfar1).loadshifterop(2,taicpu(p).oper[2]^.shifterop^);
  1211. taicpu(hpfar1).oper[2]^.shifterop^.shiftmode:=SM_ROR;
  1212. taicpu(hpfar1).ops := 3;
  1213. if not Assigned(hp1) then
  1214. GetNextInstruction(p,hp1);
  1215. RemoveCurrentP(p, hp1);
  1216. result:=true;
  1217. exit;
  1218. end
  1219. else if MatchInstruction(hpfar1, A_AND, [C_None], [PF_None]) and
  1220. (taicpu(hpfar1).ops = 3) and
  1221. (taicpu(hpfar1).oper[2]^.typ = top_const) and
  1222. (taicpu(hpfar1).oper[2]^.val = $FF) and
  1223. (taicpu(p).oper[2]^.shifterop^.shiftimm in [8,16,24]) and
  1224. MatchOperand(taicpu(hpfar1).oper[1]^, taicpu(p).oper[0]^.reg) then
  1225. begin
  1226. taicpu(hpfar1).ops := 3;
  1227. taicpu(hpfar1).opcode := A_UXTB;
  1228. taicpu(hpfar1).oper[1]^.reg := taicpu(p).oper[1]^.reg;
  1229. taicpu(hpfar1).loadshifterop(2,taicpu(p).oper[2]^.shifterop^);
  1230. taicpu(hpfar1).oper[2]^.shifterop^.shiftmode:=SM_ROR;
  1231. if not Assigned(hp1) then
  1232. GetNextInstruction(p,hp1);
  1233. RemoveCurrentP(p, hp1);
  1234. result:=true;
  1235. exit;
  1236. end;
  1237. end;
  1238. { 2-operald mov optimisations }
  1239. if (taicpu(p).ops = 2) then
  1240. begin
  1241. {
  1242. This removes the mul from
  1243. mov rX,0
  1244. ...
  1245. mul ...,rX,...
  1246. }
  1247. if (taicpu(p).oper[1]^.typ = top_const) then
  1248. begin
  1249. (* if false and
  1250. (taicpu(p).oper[1]^.val=0) and
  1251. MatchInstruction(hpfar1, [A_MUL,A_MLA], [taicpu(p).condition], [taicpu(p).oppostfix]) and
  1252. (((taicpu(hpfar1).oper[1]^.typ=top_reg) and MatchOperand(taicpu(p).oper[0]^, taicpu(hpfar1).oper[1]^)) or
  1253. ((taicpu(hpfar1).oper[2]^.typ=top_reg) and MatchOperand(taicpu(p).oper[0]^, taicpu(hpfar1).oper[2]^))) then
  1254. begin
  1255. TransferUsedRegs(TmpUsedRegs);
  1256. UpdateUsedRegs(TmpUsedRegs, tai(p.next));
  1257. UpdateUsedRegs(TmpUsedRegs, tai(hpfar1.next));
  1258. DebugMsg('Peephole Optimization: MovMUL/MLA2Mov0 done', p);
  1259. if taicpu(hpfar1).opcode=A_MUL then
  1260. taicpu(hpfar1).loadconst(1,0)
  1261. else
  1262. taicpu(hpfar1).loadreg(1,taicpu(hpfar1).oper[3]^.reg);
  1263. taicpu(hpfar1).ops:=2;
  1264. taicpu(hpfar1).opcode:=A_MOV;
  1265. if not(RegUsedAfterInstruction(taicpu(p).oper[0]^.reg,hpfar1,TmpUsedRegs)) then
  1266. RemoveCurrentP(p);
  1267. Result:=true;
  1268. exit;
  1269. end
  1270. else*) if (taicpu(p).oper[1]^.val=0) and
  1271. MatchInstruction(hpfar1, A_MLA, [taicpu(p).condition], [taicpu(p).oppostfix]) and
  1272. MatchOperand(taicpu(p).oper[0]^, taicpu(hpfar1).oper[3]^) then
  1273. begin
  1274. TransferUsedRegs(TmpUsedRegs);
  1275. UpdateUsedRegs(TmpUsedRegs, tai(p.next));
  1276. UpdateUsedRegs(TmpUsedRegs, tai(hpfar1.next));
  1277. DebugMsg('Peephole Optimization: MovMLA2MUL 1 done', p);
  1278. taicpu(hpfar1).ops:=3;
  1279. taicpu(hpfar1).opcode:=A_MUL;
  1280. if not(RegUsedAfterInstruction(taicpu(p).oper[0]^.reg,hpfar1,TmpUsedRegs)) then
  1281. begin
  1282. RemoveCurrentP(p);
  1283. Result:=true;
  1284. end;
  1285. exit;
  1286. end
  1287. {
  1288. This changes the very common
  1289. mov r0, #0
  1290. str r0, [...]
  1291. mov r0, #0
  1292. str r0, [...]
  1293. and removes all superfluous mov instructions
  1294. }
  1295. else if (taicpu(hpfar1).opcode=A_STR) then
  1296. begin
  1297. hp1 := hpfar1;
  1298. while MatchInstruction(hp1, A_STR, [taicpu(p).condition], []) and
  1299. MatchOperand(taicpu(p).oper[0]^, taicpu(hpfar1).oper[0]^) and
  1300. GetNextInstruction(hp1, hp2) and
  1301. MatchInstruction(hp2, A_MOV, [taicpu(p).condition], [PF_None]) and
  1302. (taicpu(hp2).ops = 2) and
  1303. MatchOperand(taicpu(hp2).oper[0]^, taicpu(p).oper[0]^) and
  1304. MatchOperand(taicpu(hp2).oper[1]^, taicpu(p).oper[1]^) do
  1305. begin
  1306. DebugMsg('Peephole Optimization: MovStrMov done', hp2);
  1307. GetNextInstruction(hp2,hp1);
  1308. asml.remove(hp2);
  1309. hp2.free;
  1310. result:=true;
  1311. if not assigned(hp1) then break;
  1312. end;
  1313. if Result then
  1314. Exit;
  1315. end;
  1316. end;
  1317. {
  1318. This removes the first mov from
  1319. mov rX,...
  1320. mov rX,...
  1321. }
  1322. if taicpu(hpfar1).opcode=A_MOV then
  1323. begin
  1324. hp1 := p;
  1325. while MatchInstruction(hpfar1, A_MOV, [taicpu(hp1).condition], [taicpu(hp1).oppostfix]) and
  1326. (taicpu(hpfar1).ops = 2) and
  1327. MatchOperand(taicpu(hp1).oper[0]^, taicpu(hpfar1).oper[0]^) and
  1328. { don't remove the first mov if the second is a mov rX,rX }
  1329. not(MatchOperand(taicpu(hpfar1).oper[0]^, taicpu(hpfar1).oper[1]^)) do
  1330. begin
  1331. { Defer removing the first p until after the while loop }
  1332. if p <> hp1 then
  1333. begin
  1334. DebugMsg('Peephole Optimization: MovMov done', hp1);
  1335. asml.remove(hp1);
  1336. hp1.free;
  1337. end;
  1338. hp1:=hpfar1;
  1339. GetNextInstruction(hpfar1,hpfar1);
  1340. result:=true;
  1341. if not assigned(hpfar1) then
  1342. Break;
  1343. end;
  1344. if Result then
  1345. begin
  1346. DebugMsg('Peephole Optimization: MovMov done', p);
  1347. RemoveCurrentp(p);
  1348. Exit;
  1349. end;
  1350. end;
  1351. if RedundantMovProcess(p,hpfar1) then
  1352. begin
  1353. Result:=true;
  1354. { p might not point at a mov anymore }
  1355. exit;
  1356. end;
  1357. { Fold the very common sequence
  1358. mov regA, regB
  1359. ldr* regA, [regA]
  1360. to
  1361. ldr* regA, [regB]
  1362. CAUTION! If this one is successful p might not be a mov instruction anymore!
  1363. }
  1364. if
  1365. // Make sure that Thumb code doesn't propagate a high register into a reference
  1366. (
  1367. (
  1368. GenerateThumbCode and
  1369. (getsupreg(taicpu(p).oper[1]^.reg) < RS_R8)
  1370. ) or (not GenerateThumbCode)
  1371. ) and
  1372. (taicpu(p).oper[1]^.typ = top_reg) and
  1373. (taicpu(p).oppostfix = PF_NONE) and
  1374. MatchInstruction(hpfar1, [A_LDR, A_STR], [taicpu(p).condition], []) and
  1375. (taicpu(hpfar1).oper[1]^.typ = top_ref) and
  1376. { We can change the base register only when the instruction uses AM_OFFSET }
  1377. ((taicpu(hpfar1).oper[1]^.ref^.index = taicpu(p).oper[0]^.reg) or
  1378. ((taicpu(hpfar1).oper[1]^.ref^.addressmode = AM_OFFSET) and
  1379. (taicpu(hpfar1).oper[1]^.ref^.base = taicpu(p).oper[0]^.reg))
  1380. ) and
  1381. not(RegModifiedBetween(taicpu(p).oper[1]^.reg,p,hpfar1)) and
  1382. RegEndOfLife(taicpu(p).oper[0]^.reg, taicpu(hpfar1)) then
  1383. begin
  1384. DebugMsg('Peephole Optimization: MovLdr2Ldr done', hpfar1);
  1385. if (taicpu(hpfar1).oper[1]^.ref^.addressmode = AM_OFFSET) and
  1386. (taicpu(hpfar1).oper[1]^.ref^.base = taicpu(p).oper[0]^.reg) then
  1387. taicpu(hpfar1).oper[1]^.ref^.base := taicpu(p).oper[1]^.reg;
  1388. if taicpu(hpfar1).oper[1]^.ref^.index = taicpu(p).oper[0]^.reg then
  1389. taicpu(hpfar1).oper[1]^.ref^.index := taicpu(p).oper[1]^.reg;
  1390. dealloc:=FindRegDeAlloc(taicpu(p).oper[1]^.reg, tai(p.Next));
  1391. if Assigned(dealloc) then
  1392. begin
  1393. asml.remove(dealloc);
  1394. asml.InsertAfter(dealloc,hpfar1);
  1395. end;
  1396. if not Assigned(hp1) then
  1397. GetNextInstruction(p, hp1);
  1398. RemoveCurrentP(p, hp1);
  1399. result:=true;
  1400. Exit;
  1401. end
  1402. end
  1403. { 3-operald mov optimisations }
  1404. else if (taicpu(p).ops = 3) then
  1405. begin
  1406. if (taicpu(p).oper[2]^.typ = top_shifterop) and
  1407. (taicpu(p).oper[2]^.shifterop^.rs = NR_NO) and
  1408. (taicpu(p).oper[2]^.shifterop^.shiftmode = SM_LSR) and
  1409. (taicpu(hpfar1).ops>=1) and
  1410. (taicpu(hpfar1).oper[0]^.typ=top_reg) and
  1411. (not RegModifiedBetween(taicpu(hpfar1).oper[0]^.reg, p, hpfar1)) and
  1412. RegEndOfLife(taicpu(p).oper[0]^.reg, taicpu(hpfar1)) then
  1413. begin
  1414. if (taicpu(p).oper[2]^.shifterop^.shiftimm >= 24 ) and
  1415. MatchInstruction(hpfar1, A_AND, [taicpu(p).condition], [taicpu(p).oppostfix]) and
  1416. (taicpu(hpfar1).ops=3) and
  1417. MatchOperand(taicpu(p).oper[0]^, taicpu(hpfar1).oper[1]^) and
  1418. (taicpu(hpfar1).oper[2]^.typ = top_const) and
  1419. { Check if the AND actually would only mask out bits being already zero because of the shift
  1420. }
  1421. ((($ffffffff shr taicpu(p).oper[2]^.shifterop^.shiftimm) and taicpu(hpfar1).oper[2]^.val) =
  1422. ($ffffffff shr taicpu(p).oper[2]^.shifterop^.shiftimm)) then
  1423. begin
  1424. DebugMsg('Peephole Optimization: LsrAnd2Lsr done', hpfar1);
  1425. taicpu(p).oper[0]^.reg:=taicpu(hpfar1).oper[0]^.reg;
  1426. asml.remove(hpfar1);
  1427. hpfar1.free;
  1428. result:=true;
  1429. Exit;
  1430. end
  1431. else if MatchInstruction(hpfar1, A_BIC, [taicpu(p).condition], [taicpu(p).oppostfix]) and
  1432. (taicpu(hpfar1).ops=3) and
  1433. MatchOperand(taicpu(p).oper[0]^, taicpu(hpfar1).oper[1]^) and
  1434. (taicpu(hpfar1).oper[2]^.typ = top_const) and
  1435. { Check if the BIC actually would only mask out bits beeing already zero because of the shift }
  1436. (taicpu(hpfar1).oper[2]^.val<>0) and
  1437. (BsfDWord(taicpu(hpfar1).oper[2]^.val)>=32-taicpu(p).oper[2]^.shifterop^.shiftimm) then
  1438. begin
  1439. DebugMsg('Peephole Optimization: LsrBic2Lsr done', hpfar1);
  1440. taicpu(p).oper[0]^.reg:=taicpu(hpfar1).oper[0]^.reg;
  1441. asml.remove(hpfar1);
  1442. hpfar1.free;
  1443. result:=true;
  1444. Exit;
  1445. end;
  1446. end;
  1447. { This folds shifterops into following instructions
  1448. mov r0, r1, lsl #8
  1449. add r2, r3, r0
  1450. to
  1451. add r2, r3, r1, lsl #8
  1452. CAUTION! If this one is successful p might not be a mov instruction anymore!
  1453. }
  1454. if (taicpu(p).oper[1]^.typ = top_reg) and
  1455. (taicpu(p).oper[2]^.typ = top_shifterop) and
  1456. (taicpu(p).oppostfix = PF_NONE) and
  1457. MatchInstruction(hpfar1, [A_ADD, A_ADC, A_RSB, A_RSC, A_SUB, A_SBC,
  1458. A_AND, A_BIC, A_EOR, A_ORR, A_TEQ, A_TST,
  1459. A_CMP, A_CMN],
  1460. [taicpu(p).condition], [PF_None]) and
  1461. (not ((GenerateThumb2Code) and
  1462. (taicpu(hpfar1).opcode in [A_SBC]) and
  1463. (((taicpu(hpfar1).ops=3) and
  1464. MatchOperand(taicpu(p).oper[0]^, taicpu(hpfar1).oper[1]^.reg)) or
  1465. ((taicpu(hpfar1).ops=2) and
  1466. MatchOperand(taicpu(p).oper[0]^, taicpu(hpfar1).oper[0]^.reg))))) and
  1467. RegEndOfLife(taicpu(p).oper[0]^.reg, taicpu(hpfar1)) and
  1468. (taicpu(hpfar1).ops >= 2) and
  1469. {Currently we can't fold into another shifterop}
  1470. (taicpu(hpfar1).oper[taicpu(hpfar1).ops-1]^.typ = top_reg) and
  1471. {Folding rrx is problematic because of the C-Flag, as we currently can't check
  1472. NR_DEFAULTFLAGS for modification}
  1473. (
  1474. {Everything is fine if we don't use RRX}
  1475. (taicpu(p).oper[2]^.shifterop^.shiftmode <> SM_RRX) or
  1476. (
  1477. {If it is RRX, then check if we're just accessing the next instruction}
  1478. Assigned(hp1) and
  1479. (hpfar1 = hp1)
  1480. )
  1481. ) and
  1482. { reg1 might not be modified inbetween }
  1483. not(RegModifiedBetween(taicpu(p).oper[1]^.reg,p,hpfar1)) and
  1484. { The shifterop can contain a register, might not be modified}
  1485. (
  1486. (taicpu(p).oper[2]^.shifterop^.rs = NR_NO) or
  1487. not(RegModifiedBetween(taicpu(p).oper[2]^.shifterop^.rs, p, hpfar1))
  1488. ) and
  1489. (
  1490. {Only ONE of the two src operands is allowed to match}
  1491. MatchOperand(taicpu(p).oper[0]^, taicpu(hpfar1).oper[taicpu(hpfar1).ops-2]^) xor
  1492. MatchOperand(taicpu(p).oper[0]^, taicpu(hpfar1).oper[taicpu(hpfar1).ops-1]^)
  1493. ) then
  1494. begin
  1495. if taicpu(hpfar1).opcode in [A_TST, A_TEQ, A_CMN] then
  1496. I2:=0
  1497. else
  1498. I2:=1;
  1499. for I:=I2 to taicpu(hpfar1).ops-1 do
  1500. if MatchOperand(taicpu(p).oper[0]^, taicpu(hpfar1).oper[I]^.reg) then
  1501. begin
  1502. { If the parameter matched on the second op from the RIGHT
  1503. we have to switch the parameters, this will not happen for CMP
  1504. were we're only evaluating the most right parameter
  1505. }
  1506. if I <> taicpu(hpfar1).ops-1 then
  1507. begin
  1508. {The SUB operators need to be changed when we swap parameters}
  1509. case taicpu(hpfar1).opcode of
  1510. A_SUB: tempop:=A_RSB;
  1511. A_SBC: tempop:=A_RSC;
  1512. A_RSB: tempop:=A_SUB;
  1513. A_RSC: tempop:=A_SBC;
  1514. else tempop:=taicpu(hpfar1).opcode;
  1515. end;
  1516. if taicpu(hpfar1).ops = 3 then
  1517. hp2:=taicpu.op_reg_reg_reg_shifterop(tempop,
  1518. taicpu(hpfar1).oper[0]^.reg, taicpu(hpfar1).oper[2]^.reg,
  1519. taicpu(p).oper[1]^.reg, taicpu(p).oper[2]^.shifterop^)
  1520. else
  1521. hp2:=taicpu.op_reg_reg_shifterop(tempop,
  1522. taicpu(hpfar1).oper[0]^.reg, taicpu(p).oper[1]^.reg,
  1523. taicpu(p).oper[2]^.shifterop^);
  1524. end
  1525. else
  1526. if taicpu(hpfar1).ops = 3 then
  1527. hp2:=taicpu.op_reg_reg_reg_shifterop(taicpu(hpfar1).opcode,
  1528. taicpu(hpfar1).oper[0]^.reg, taicpu(hpfar1).oper[1]^.reg,
  1529. taicpu(p).oper[1]^.reg, taicpu(p).oper[2]^.shifterop^)
  1530. else
  1531. hp2:=taicpu.op_reg_reg_shifterop(taicpu(hpfar1).opcode,
  1532. taicpu(hpfar1).oper[0]^.reg, taicpu(p).oper[1]^.reg,
  1533. taicpu(p).oper[2]^.shifterop^);
  1534. if taicpu(p).oper[2]^.shifterop^.rs<>NR_NO then
  1535. AllocRegBetween(taicpu(p).oper[2]^.shifterop^.rs,p,hpfar1,UsedRegs);
  1536. AllocRegBetween(taicpu(p).oper[1]^.reg,p,hpfar1,UsedRegs);
  1537. asml.insertbefore(hp2, hpfar1);
  1538. asml.remove(hpfar1);
  1539. hpfar1.free;
  1540. DebugMsg('Peephole Optimization: FoldShiftProcess done', hp2);
  1541. if not Assigned(hp1) then
  1542. GetNextInstruction(p, hp1)
  1543. else if hp1 = hpfar1 then
  1544. { If hp1 = hpfar1, then it's a dangling pointer }
  1545. hp1 := hp2;
  1546. RemoveCurrentP(p, hp1);
  1547. Result:=true;
  1548. Exit;
  1549. end;
  1550. end;
  1551. {
  1552. Fold
  1553. mov r1, r1, lsl #2
  1554. ldr/ldrb r0, [r0, r1]
  1555. to
  1556. ldr/ldrb r0, [r0, r1, lsl #2]
  1557. XXX: This still needs some work, as we quite often encounter something like
  1558. mov r1, r2, lsl #2
  1559. add r2, r3, #imm
  1560. ldr r0, [r2, r1]
  1561. which can't be folded because r2 is overwritten between the shift and the ldr.
  1562. We could try to shuffle the registers around and fold it into.
  1563. add r1, r3, #imm
  1564. ldr r0, [r1, r2, lsl #2]
  1565. }
  1566. if (not(GenerateThumbCode)) and
  1567. { thumb2 allows only lsl #0..#3 }
  1568. (not(GenerateThumb2Code) or
  1569. ((taicpu(p).oper[2]^.shifterop^.shiftimm in [0..3]) and
  1570. (taicpu(p).oper[2]^.shifterop^.shiftmode=SM_LSL)
  1571. )
  1572. ) and
  1573. (taicpu(p).oper[1]^.typ = top_reg) and
  1574. (taicpu(p).oper[2]^.typ = top_shifterop) and
  1575. { RRX is tough to handle, because it requires tracking the C-Flag,
  1576. it is also extremly unlikely to be emitted this way}
  1577. (taicpu(p).oper[2]^.shifterop^.shiftmode <> SM_RRX) and
  1578. (taicpu(p).oper[2]^.shifterop^.shiftimm <> 0) and
  1579. (taicpu(p).oppostfix = PF_NONE) and
  1580. {Only LDR, LDRB, STR, STRB can handle scaled register indexing}
  1581. (MatchInstruction(hpfar1, [A_LDR, A_STR], [taicpu(p).condition], [PF_None, PF_B]) or
  1582. (GenerateThumb2Code and
  1583. MatchInstruction(hpfar1, [A_LDR, A_STR], [taicpu(p).condition], [PF_None, PF_B, PF_SB, PF_H, PF_SH]))
  1584. ) and
  1585. (
  1586. {If this is address by offset, one of the two registers can be used}
  1587. ((taicpu(hpfar1).oper[1]^.ref^.addressmode=AM_OFFSET) and
  1588. (
  1589. (taicpu(hpfar1).oper[1]^.ref^.index = taicpu(p).oper[0]^.reg) xor
  1590. (taicpu(hpfar1).oper[1]^.ref^.base = taicpu(p).oper[0]^.reg)
  1591. )
  1592. ) or
  1593. {For post and preindexed only the index register can be used}
  1594. ((taicpu(hpfar1).oper[1]^.ref^.addressmode in [AM_POSTINDEXED, AM_PREINDEXED]) and
  1595. (
  1596. (taicpu(hpfar1).oper[1]^.ref^.index = taicpu(p).oper[0]^.reg) and
  1597. (taicpu(hpfar1).oper[1]^.ref^.base <> taicpu(p).oper[0]^.reg)
  1598. ) and
  1599. (not GenerateThumb2Code)
  1600. )
  1601. ) and
  1602. { Only fold if both registers are used. Otherwise we are folding p with itself }
  1603. (taicpu(hpfar1).oper[1]^.ref^.index<>NR_NO) and
  1604. (taicpu(hpfar1).oper[1]^.ref^.base<>NR_NO) and
  1605. { Only fold if there isn't another shifterop already, and offset is zero. }
  1606. (taicpu(hpfar1).oper[1]^.ref^.offset = 0) and
  1607. (taicpu(hpfar1).oper[1]^.ref^.shiftmode = SM_None) and
  1608. not(RegModifiedBetween(taicpu(p).oper[1]^.reg,p,hpfar1)) and
  1609. RegEndOfLife(taicpu(p).oper[0]^.reg, taicpu(hpfar1)) then
  1610. begin
  1611. { If the register we want to do the shift for resides in base, we need to swap that}
  1612. if (taicpu(hpfar1).oper[1]^.ref^.base = taicpu(p).oper[0]^.reg) then
  1613. taicpu(hpfar1).oper[1]^.ref^.base := taicpu(hpfar1).oper[1]^.ref^.index;
  1614. taicpu(hpfar1).oper[1]^.ref^.index := taicpu(p).oper[1]^.reg;
  1615. taicpu(hpfar1).oper[1]^.ref^.shiftmode := taicpu(p).oper[2]^.shifterop^.shiftmode;
  1616. taicpu(hpfar1).oper[1]^.ref^.shiftimm := taicpu(p).oper[2]^.shifterop^.shiftimm;
  1617. DebugMsg('Peephole Optimization: FoldShiftLdrStr done', hpfar1);
  1618. RemoveCurrentP(p);
  1619. Result:=true;
  1620. Exit;
  1621. end;
  1622. end;
  1623. {
  1624. Often we see shifts and then a superfluous mov to another register
  1625. In the future this might be handled in RedundantMovProcess when it uses RegisterTracking
  1626. }
  1627. if RemoveSuperfluousMove(p, hpfar1, 'MovMov2Mov') then
  1628. Result:=true;
  1629. end;
  1630. function TCpuAsmOptimizer.OptPass1MVN(var p: tai): Boolean;
  1631. var
  1632. hp1: tai;
  1633. begin
  1634. {
  1635. change
  1636. mvn reg2,reg1
  1637. and reg3,reg4,reg2
  1638. dealloc reg2
  1639. to
  1640. bic reg3,reg4,reg1
  1641. }
  1642. Result := False;
  1643. if (taicpu(p).oper[1]^.typ = top_reg) and
  1644. GetNextInstructionUsingReg(p,hp1,taicpu(p).oper[0]^.reg) and
  1645. MatchInstruction(hp1,A_AND,[],[]) and
  1646. (((taicpu(hp1).ops=3) and
  1647. (taicpu(hp1).oper[2]^.typ=top_reg) and
  1648. (MatchOperand(taicpu(hp1).oper[2]^, taicpu(p).oper[0]^.reg) or
  1649. MatchOperand(taicpu(hp1).oper[1]^, taicpu(p).oper[0]^.reg))) or
  1650. ((taicpu(hp1).ops=2) and
  1651. (taicpu(hp1).oper[1]^.typ=top_reg) and
  1652. MatchOperand(taicpu(hp1).oper[1]^, taicpu(p).oper[0]^.reg))) and
  1653. assigned(FindRegDealloc(taicpu(p).oper[0]^.reg,tai(hp1.Next))) and
  1654. { reg1 might not be modified inbetween }
  1655. not(RegModifiedBetween(taicpu(p).oper[1]^.reg,p,hp1)) then
  1656. begin
  1657. DebugMsg('Peephole Optimization: MvnAnd2Bic done', p);
  1658. taicpu(hp1).opcode:=A_BIC;
  1659. if taicpu(hp1).ops=3 then
  1660. begin
  1661. if MatchOperand(taicpu(hp1).oper[1]^, taicpu(p).oper[0]^.reg) then
  1662. taicpu(hp1).loadReg(1,taicpu(hp1).oper[2]^.reg); // Swap operands
  1663. taicpu(hp1).loadReg(2,taicpu(p).oper[1]^.reg);
  1664. end
  1665. else
  1666. taicpu(hp1).loadReg(1,taicpu(p).oper[1]^.reg);
  1667. RemoveCurrentp(p);
  1668. Result := True;
  1669. Exit;
  1670. end;
  1671. end;
  1672. function TCpuAsmOptimizer.OptPass1VMov(var p: tai): Boolean;
  1673. var
  1674. hp1: tai;
  1675. begin
  1676. {
  1677. change
  1678. vmov reg0,reg1,reg2
  1679. vmov reg1,reg2,reg0
  1680. into
  1681. vmov reg0,reg1,reg2
  1682. can be applied regardless if reg0 or reg2 is the vfp register
  1683. }
  1684. Result := False;
  1685. if (taicpu(p).ops = 3) then
  1686. while GetNextInstruction(p, hp1) and
  1687. MatchInstruction(hp1, A_VMOV, [taicpu(p).condition], [taicpu(p).oppostfix]) and
  1688. (taicpu(hp1).ops = 3) and
  1689. MatchOperand(taicpu(p).oper[0]^, taicpu(hp1).oper[2]^) and
  1690. MatchOperand(taicpu(p).oper[1]^, taicpu(hp1).oper[0]^) and
  1691. MatchOperand(taicpu(p).oper[2]^, taicpu(hp1).oper[1]^) do
  1692. begin
  1693. asml.Remove(hp1);
  1694. hp1.free;
  1695. DebugMsg('Peephole Optimization: VMovVMov2VMov done', p);
  1696. { Can we do it again? }
  1697. end;
  1698. end;
  1699. function TCpuAsmOptimizer.OptPass1VOp(var p: tai): Boolean;
  1700. var
  1701. hp1: tai;
  1702. begin
  1703. Result := GetNextInstructionUsingReg(p, hp1, taicpu(p).oper[0]^.reg) and
  1704. RemoveSuperfluousVMov(p, hp1, 'VOpVMov2VOp');
  1705. end;
  1706. function TCpuAsmOptimizer.PeepHoleOptPass1Cpu(var p: tai): boolean;
  1707. begin
  1708. result := false;
  1709. if p.typ = ait_instruction then
  1710. begin
  1711. case taicpu(p).opcode of
  1712. A_CMP:
  1713. Result := OptPass1CMP(p);
  1714. A_STR:
  1715. Result := OptPass1STR(p);
  1716. A_LDR:
  1717. Result := OptPass1LDR(p);
  1718. A_MOV:
  1719. Result := OptPass1MOV(p);
  1720. A_AND:
  1721. Result := OptPass1And(p);
  1722. A_ADD,
  1723. A_SUB:
  1724. Result := OptPass1ADDSUB(p);
  1725. A_MUL:
  1726. REsult := OptPass1MUL(p);
  1727. A_ADC,
  1728. A_RSB,
  1729. A_RSC,
  1730. A_SBC,
  1731. A_BIC,
  1732. A_EOR,
  1733. A_ORR,
  1734. A_MLA,
  1735. A_MLS,
  1736. A_QADD,A_QADD16,A_QADD8,
  1737. A_QSUB,A_QSUB16,A_QSUB8,
  1738. A_QDADD,A_QDSUB,A_QASX,A_QSAX,
  1739. A_SHADD16,A_SHADD8,A_UHADD16,A_UHADD8,
  1740. A_SHSUB16,A_SHSUB8,A_UHSUB16,A_UHSUB8,
  1741. A_PKHTB,A_PKHBT,
  1742. A_SMUAD,A_SMUSD:
  1743. Result := OptPass1DataCheckMov(p);
  1744. {$ifdef dummy}
  1745. A_MVN:
  1746. Result := OPtPass1MVN(p);
  1747. {$endif dummy}
  1748. A_UXTB:
  1749. Result := OptPass1UXTB(p);
  1750. A_UXTH:
  1751. Result := OptPass1UXTH(p);
  1752. A_SXTB:
  1753. Result := OptPass1SXTB(p);
  1754. A_SXTH:
  1755. Result := OptPass1SXTH(p);
  1756. A_STM:
  1757. Result := OptPass1STM(p);
  1758. A_VMOV:
  1759. Result := OptPass1VMov(p);
  1760. A_VLDR,
  1761. A_VADD,
  1762. A_VMUL,
  1763. A_VDIV,
  1764. A_VSUB,
  1765. A_VSQRT,
  1766. A_VNEG,
  1767. A_VCVT,
  1768. A_VABS:
  1769. Result := OptPass1VOp(p);
  1770. else
  1771. ;
  1772. end;
  1773. end;
  1774. end;
  1775. { instructions modifying the CPSR can be only the last instruction }
  1776. function MustBeLast(p : tai) : boolean;
  1777. begin
  1778. Result:=(p.typ=ait_instruction) and
  1779. ((taicpu(p).opcode in [A_BL,A_BLX,A_CMP,A_CMN,A_SWI,A_TEQ,A_TST,A_CMF,A_CMFE {,A_MSR}]) or
  1780. ((taicpu(p).ops>=1) and (taicpu(p).oper[0]^.typ=top_reg) and (taicpu(p).oper[0]^.reg=NR_PC)) or
  1781. (taicpu(p).oppostfix=PF_S));
  1782. end;
  1783. procedure TCpuAsmOptimizer.PeepHoleOptPass2;
  1784. var
  1785. p,hp1,hp2: tai;
  1786. l : longint;
  1787. condition : tasmcond;
  1788. hp3: tai;
  1789. WasLast: boolean;
  1790. { UsedRegs, TmpUsedRegs: TRegSet; }
  1791. begin
  1792. p := BlockStart;
  1793. { UsedRegs := []; }
  1794. while (p <> BlockEnd) Do
  1795. begin
  1796. { UpdateUsedRegs(UsedRegs, tai(p.next)); }
  1797. case p.Typ Of
  1798. Ait_Instruction:
  1799. begin
  1800. case taicpu(p).opcode Of
  1801. A_B:
  1802. if (taicpu(p).condition<>C_None) and
  1803. not(GenerateThumbCode) then
  1804. begin
  1805. { check for
  1806. Bxx xxx
  1807. <several instructions>
  1808. xxx:
  1809. }
  1810. l:=0;
  1811. WasLast:=False;
  1812. GetNextInstruction(p, hp1);
  1813. while assigned(hp1) and
  1814. (l<=4) and
  1815. CanBeCond(hp1) and
  1816. { stop on labels }
  1817. not(hp1.typ=ait_label) and
  1818. { avoid that we cannot recognize the case BccB2Cond }
  1819. not((hp1.typ=ait_instruction) and (taicpu(hp1).opcode=A_B)) do
  1820. begin
  1821. inc(l);
  1822. if MustBeLast(hp1) then
  1823. begin
  1824. WasLast:=True;
  1825. GetNextInstruction(hp1,hp1);
  1826. break;
  1827. end
  1828. else
  1829. GetNextInstruction(hp1,hp1);
  1830. end;
  1831. if assigned(hp1) then
  1832. begin
  1833. if FindLabel(tasmlabel(taicpu(p).oper[0]^.ref^.symbol),hp1) then
  1834. begin
  1835. if (l<=4) and (l>0) then
  1836. begin
  1837. condition:=inverse_cond(taicpu(p).condition);
  1838. hp2:=p;
  1839. GetNextInstruction(p,hp1);
  1840. p:=hp1;
  1841. repeat
  1842. if hp1.typ=ait_instruction then
  1843. taicpu(hp1).condition:=condition;
  1844. if MustBeLast(hp1) then
  1845. begin
  1846. GetNextInstruction(hp1,hp1);
  1847. break;
  1848. end
  1849. else
  1850. GetNextInstruction(hp1,hp1);
  1851. until not(assigned(hp1)) or
  1852. not(CanBeCond(hp1)) or
  1853. (hp1.typ=ait_label);
  1854. DebugMsg('Peephole Bcc2Cond done',hp2);
  1855. { wait with removing else GetNextInstruction could
  1856. ignore the label if it was the only usage in the
  1857. jump moved away }
  1858. tasmlabel(taicpu(hp2).oper[0]^.ref^.symbol).decrefs;
  1859. asml.remove(hp2);
  1860. hp2.free;
  1861. continue;
  1862. end;
  1863. end
  1864. else
  1865. { do not perform further optimizations if there is inctructon
  1866. in block #1 which can not be optimized.
  1867. }
  1868. if not WasLast then
  1869. begin
  1870. { check further for
  1871. Bcc xxx
  1872. <several instructions 1>
  1873. B yyy
  1874. xxx:
  1875. <several instructions 2>
  1876. yyy:
  1877. }
  1878. { hp2 points to jmp yyy }
  1879. hp2:=hp1;
  1880. { skip hp1 to xxx }
  1881. GetNextInstruction(hp1, hp1);
  1882. if assigned(hp2) and
  1883. assigned(hp1) and
  1884. (l<=3) and
  1885. (hp2.typ=ait_instruction) and
  1886. (taicpu(hp2).is_jmp) and
  1887. (taicpu(hp2).condition=C_None) and
  1888. { real label and jump, no further references to the
  1889. label are allowed }
  1890. (tasmlabel(taicpu(p).oper[0]^.ref^.symbol).getrefs=1) and
  1891. FindLabel(tasmlabel(taicpu(p).oper[0]^.ref^.symbol),hp1) then
  1892. begin
  1893. l:=0;
  1894. { skip hp1 to <several moves 2> }
  1895. GetNextInstruction(hp1, hp1);
  1896. while assigned(hp1) and
  1897. CanBeCond(hp1) and
  1898. (l<=3) do
  1899. begin
  1900. inc(l);
  1901. if MustBeLast(hp1) then
  1902. begin
  1903. GetNextInstruction(hp1, hp1);
  1904. break;
  1905. end
  1906. else
  1907. GetNextInstruction(hp1, hp1);
  1908. end;
  1909. { hp1 points to yyy: }
  1910. if assigned(hp1) and
  1911. FindLabel(tasmlabel(taicpu(hp2).oper[0]^.ref^.symbol),hp1) then
  1912. begin
  1913. condition:=inverse_cond(taicpu(p).condition);
  1914. GetNextInstruction(p,hp1);
  1915. hp3:=p;
  1916. p:=hp1;
  1917. repeat
  1918. if hp1.typ=ait_instruction then
  1919. taicpu(hp1).condition:=condition;
  1920. if MustBeLast(hp1) then
  1921. begin
  1922. GetNextInstruction(hp1, hp1);
  1923. break;
  1924. end
  1925. else
  1926. GetNextInstruction(hp1, hp1);
  1927. until not(assigned(hp1)) or
  1928. not(CanBeCond(hp1)) or
  1929. ((hp1.typ=ait_instruction) and (taicpu(hp1).opcode=A_B));
  1930. { hp2 is still at jmp yyy }
  1931. GetNextInstruction(hp2,hp1);
  1932. { hp1 is now at xxx: }
  1933. condition:=inverse_cond(condition);
  1934. GetNextInstruction(hp1,hp1);
  1935. { hp1 is now at <several movs 2> }
  1936. repeat
  1937. if hp1.typ=ait_instruction then
  1938. taicpu(hp1).condition:=condition;
  1939. GetNextInstruction(hp1,hp1);
  1940. until not(assigned(hp1)) or
  1941. not(CanBeCond(hp1)) or
  1942. (hp1.typ=ait_label);
  1943. DebugMsg('Peephole BccB2Cond done',hp3);
  1944. { remove Bcc }
  1945. tasmlabel(taicpu(hp3).oper[0]^.ref^.symbol).decrefs;
  1946. asml.remove(hp3);
  1947. hp3.free;
  1948. { remove B }
  1949. tasmlabel(taicpu(hp2).oper[0]^.ref^.symbol).decrefs;
  1950. asml.remove(hp2);
  1951. hp2.free;
  1952. continue;
  1953. end;
  1954. end;
  1955. end;
  1956. end;
  1957. end;
  1958. else
  1959. ;
  1960. end;
  1961. end;
  1962. else
  1963. ;
  1964. end;
  1965. p := tai(p.next)
  1966. end;
  1967. end;
  1968. function TCpuAsmOptimizer.RegInInstruction(Reg: TRegister; p1: tai): Boolean;
  1969. begin
  1970. If (p1.typ = ait_instruction) and (taicpu(p1).opcode=A_BL) then
  1971. Result:=true
  1972. else If MatchInstruction(p1, [A_LDR, A_STR], [], [PF_D]) and
  1973. (getsupreg(taicpu(p1).oper[0]^.reg)+1=getsupreg(reg)) then
  1974. Result:=true
  1975. else
  1976. Result:=inherited RegInInstruction(Reg, p1);
  1977. end;
  1978. const
  1979. { set of opcode which might or do write to memory }
  1980. { TODO : extend armins.dat to contain r/w info }
  1981. opcode_could_mem_write = [A_B,A_BL,A_BLX,A_BKPT,A_BX,A_STR,A_STRB,A_STRBT,
  1982. A_STRH,A_STRT,A_STF,A_SFM,A_STM,A_FSTS,A_FSTD,A_VSTR,A_VSTM];
  1983. { adjust the register live information when swapping the two instructions p and hp1,
  1984. they must follow one after the other }
  1985. procedure TCpuPreRegallocScheduler.SwapRegLive(p,hp1 : taicpu);
  1986. procedure CheckLiveEnd(reg : tregister);
  1987. var
  1988. supreg : TSuperRegister;
  1989. regtype : TRegisterType;
  1990. begin
  1991. if reg=NR_NO then
  1992. exit;
  1993. regtype:=getregtype(reg);
  1994. supreg:=getsupreg(reg);
  1995. if assigned(cg.rg[regtype]) and (cg.rg[regtype].live_end[supreg]=hp1) and
  1996. RegInInstruction(reg,p) then
  1997. cg.rg[regtype].live_end[supreg]:=p;
  1998. end;
  1999. procedure CheckLiveStart(reg : TRegister);
  2000. var
  2001. supreg : TSuperRegister;
  2002. regtype : TRegisterType;
  2003. begin
  2004. if reg=NR_NO then
  2005. exit;
  2006. regtype:=getregtype(reg);
  2007. supreg:=getsupreg(reg);
  2008. if assigned(cg.rg[regtype]) and (cg.rg[regtype].live_start[supreg]=p) and
  2009. RegInInstruction(reg,hp1) then
  2010. cg.rg[regtype].live_start[supreg]:=hp1;
  2011. end;
  2012. var
  2013. i : longint;
  2014. r : TSuperRegister;
  2015. begin
  2016. { assumption: p is directly followed by hp1 }
  2017. { if live of any reg used by p starts at p and hp1 uses this register then
  2018. set live start to hp1 }
  2019. for i:=0 to p.ops-1 do
  2020. case p.oper[i]^.typ of
  2021. Top_Reg:
  2022. CheckLiveStart(p.oper[i]^.reg);
  2023. Top_Ref:
  2024. begin
  2025. CheckLiveStart(p.oper[i]^.ref^.base);
  2026. CheckLiveStart(p.oper[i]^.ref^.index);
  2027. end;
  2028. Top_Shifterop:
  2029. CheckLiveStart(p.oper[i]^.shifterop^.rs);
  2030. Top_RegSet:
  2031. for r:=RS_R0 to RS_R15 do
  2032. if r in p.oper[i]^.regset^ then
  2033. CheckLiveStart(newreg(R_INTREGISTER,r,R_SUBWHOLE));
  2034. else
  2035. ;
  2036. end;
  2037. { if live of any reg used by hp1 ends at hp1 and p uses this register then
  2038. set live end to p }
  2039. for i:=0 to hp1.ops-1 do
  2040. case hp1.oper[i]^.typ of
  2041. Top_Reg:
  2042. CheckLiveEnd(hp1.oper[i]^.reg);
  2043. Top_Ref:
  2044. begin
  2045. CheckLiveEnd(hp1.oper[i]^.ref^.base);
  2046. CheckLiveEnd(hp1.oper[i]^.ref^.index);
  2047. end;
  2048. Top_Shifterop:
  2049. CheckLiveStart(hp1.oper[i]^.shifterop^.rs);
  2050. Top_RegSet:
  2051. for r:=RS_R0 to RS_R15 do
  2052. if r in hp1.oper[i]^.regset^ then
  2053. CheckLiveEnd(newreg(R_INTREGISTER,r,R_SUBWHOLE));
  2054. else
  2055. ;
  2056. end;
  2057. end;
  2058. function TCpuPreRegallocScheduler.SchedulerPass1Cpu(var p: tai): boolean;
  2059. { TODO : schedule also forward }
  2060. { TODO : schedule distance > 1 }
  2061. { returns true if p might be a load of a pc relative tls offset }
  2062. function PossibleTLSLoad(const p: tai) : boolean;
  2063. begin
  2064. Result:=(p.typ=ait_instruction) and (taicpu(p).opcode=A_LDR) and (taicpu(p).oper[1]^.typ=top_ref) and (((taicpu(p).oper[1]^.ref^.base=NR_PC) and
  2065. (taicpu(p).oper[1]^.ref^.index<>NR_NO)) or ((taicpu(p).oper[1]^.ref^.base<>NR_NO) and
  2066. (taicpu(p).oper[1]^.ref^.index=NR_PC)));
  2067. end;
  2068. var
  2069. hp1,hp2,hp3,hp4,hp5,insertpos : tai;
  2070. list : TAsmList;
  2071. begin
  2072. result:=true;
  2073. list:=TAsmList.create;
  2074. p:=BlockStart;
  2075. while p<>BlockEnd Do
  2076. begin
  2077. if (p.typ=ait_instruction) and
  2078. GetNextInstruction(p,hp1) and
  2079. (hp1.typ=ait_instruction) and
  2080. (taicpu(hp1).opcode in [A_LDR,A_LDRB,A_LDRH,A_LDRSB,A_LDRSH]) and
  2081. (taicpu(hp1).oppostfix in [PF_NONE, PF_B, PF_H, PF_SB, PF_SH]) and
  2082. { for now we don't reschedule if the previous instruction changes potentially a memory location }
  2083. ( (not(taicpu(p).opcode in opcode_could_mem_write) and
  2084. not(RegModifiedByInstruction(NR_PC,p))
  2085. ) or
  2086. ((taicpu(p).opcode in [A_STM,A_STRB,A_STRH,A_STR]) and
  2087. ((taicpu(hp1).oper[1]^.ref^.base=NR_PC) or
  2088. (assigned(taicpu(hp1).oper[1]^.ref^.symboldata) and
  2089. (taicpu(hp1).oper[1]^.ref^.offset=0)
  2090. )
  2091. ) or
  2092. { try to prove that the memory accesses don't overlapp }
  2093. ((taicpu(p).opcode in [A_STRB,A_STRH,A_STR]) and
  2094. (taicpu(p).oper[1]^.typ = top_ref) and
  2095. (taicpu(p).oper[1]^.ref^.base=taicpu(hp1).oper[1]^.ref^.base) and
  2096. (taicpu(p).oppostfix=PF_None) and
  2097. (taicpu(hp1).oppostfix=PF_None) and
  2098. (taicpu(p).oper[1]^.ref^.index=NR_NO) and
  2099. (taicpu(hp1).oper[1]^.ref^.index=NR_NO) and
  2100. { get operand sizes and check if the offset distance is large enough to ensure no overlapp }
  2101. (abs(taicpu(p).oper[1]^.ref^.offset-taicpu(hp1).oper[1]^.ref^.offset)>=max(tcgsize2size[reg_cgsize(taicpu(p).oper[0]^.reg)],tcgsize2size[reg_cgsize(taicpu(hp1).oper[0]^.reg)]))
  2102. )
  2103. )
  2104. ) and
  2105. GetNextInstruction(hp1,hp2) and
  2106. (hp2.typ=ait_instruction) and
  2107. { loaded register used by next instruction?
  2108. if we ever support labels (they could be skipped in theory) here, the gnu2 tls general-dynamic code could get broken (the ldr before
  2109. the bl may not be scheduled away from the bl) and it needs to be taken care of this case
  2110. }
  2111. (RegInInstruction(taicpu(hp1).oper[0]^.reg,hp2)) and
  2112. { loaded register not used by previous instruction? }
  2113. not(RegInInstruction(taicpu(hp1).oper[0]^.reg,p)) and
  2114. { same condition? }
  2115. (taicpu(p).condition=taicpu(hp1).condition) and
  2116. { first instruction might not change the register used as base }
  2117. ((taicpu(hp1).oper[1]^.ref^.base=NR_NO) or
  2118. not(RegModifiedByInstruction(taicpu(hp1).oper[1]^.ref^.base,p))
  2119. ) and
  2120. { first instruction might not change the register used as index }
  2121. ((taicpu(hp1).oper[1]^.ref^.index=NR_NO) or
  2122. not(RegModifiedByInstruction(taicpu(hp1).oper[1]^.ref^.index,p))
  2123. ) and
  2124. { if we modify the basereg AND the first instruction used that reg, we can not schedule }
  2125. ((taicpu(hp1).oper[1]^.ref^.addressmode = AM_OFFSET) or
  2126. not(instructionLoadsFromReg(taicpu(hp1).oper[1]^.ref^.base,p))) and
  2127. not(PossibleTLSLoad(p)) and
  2128. not(PossibleTLSLoad(hp1)) then
  2129. begin
  2130. hp3:=tai(p.Previous);
  2131. hp5:=tai(p.next);
  2132. asml.Remove(p);
  2133. { if there is a reg. alloc/dealloc/sync instructions or address labels (e.g. for GOT-less PIC)
  2134. associated with p, move it together with p }
  2135. { before the instruction? }
  2136. { find reg allocs,deallocs and PIC labels }
  2137. while assigned(hp3) and (hp3.typ<>ait_instruction) do
  2138. begin
  2139. if ( (hp3.typ=ait_regalloc) and (tai_regalloc(hp3).ratype in [ra_alloc, ra_dealloc]) and
  2140. RegInInstruction(tai_regalloc(hp3).reg,p) )
  2141. or ( (hp3.typ=ait_label) and (tai_label(hp3).labsym.typ=AT_ADDR) )
  2142. then
  2143. begin
  2144. hp4:=hp3;
  2145. hp3:=tai(hp3.Previous);
  2146. asml.Remove(hp4);
  2147. list.Insert(hp4);
  2148. end
  2149. else
  2150. hp3:=tai(hp3.Previous);
  2151. end;
  2152. list.Concat(p);
  2153. SwapRegLive(taicpu(p),taicpu(hp1));
  2154. { after the instruction? }
  2155. { find reg deallocs and reg syncs }
  2156. while assigned(hp5) and (hp5.typ<>ait_instruction) do
  2157. begin
  2158. if (hp5.typ=ait_regalloc) and (tai_regalloc(hp5).ratype in [ra_dealloc, ra_sync]) and
  2159. RegInInstruction(tai_regalloc(hp5).reg,p) then
  2160. begin
  2161. hp4:=hp5;
  2162. hp5:=tai(hp5.next);
  2163. asml.Remove(hp4);
  2164. list.Concat(hp4);
  2165. end
  2166. else
  2167. hp5:=tai(hp5.Next);
  2168. end;
  2169. asml.Remove(hp1);
  2170. { if there are address labels associated with hp2, those must
  2171. stay with hp2 (e.g. for GOT-less PIC) }
  2172. insertpos:=hp2;
  2173. while assigned(hp2.previous) and
  2174. (tai(hp2.previous).typ<>ait_instruction) do
  2175. begin
  2176. hp2:=tai(hp2.previous);
  2177. if (hp2.typ=ait_label) and
  2178. (tai_label(hp2).labsym.typ=AT_ADDR) then
  2179. insertpos:=hp2;
  2180. end;
  2181. {$ifdef DEBUG_PREREGSCHEDULER}
  2182. asml.insertbefore(tai_comment.Create(strpnew('Rescheduled')),insertpos);
  2183. {$endif DEBUG_PREREGSCHEDULER}
  2184. asml.InsertBefore(hp1,insertpos);
  2185. asml.InsertListBefore(insertpos,list);
  2186. p:=tai(p.next);
  2187. end
  2188. else if p.typ=ait_instruction then
  2189. p:=hp1
  2190. else
  2191. p:=tai(p.next);
  2192. end;
  2193. list.Free;
  2194. end;
  2195. procedure DecrementPreceedingIT(list: TAsmList; p: tai);
  2196. var
  2197. hp : tai;
  2198. l : longint;
  2199. begin
  2200. hp := tai(p.Previous);
  2201. l := 1;
  2202. while assigned(hp) and
  2203. (l <= 4) do
  2204. begin
  2205. if hp.typ=ait_instruction then
  2206. begin
  2207. if (taicpu(hp).opcode>=A_IT) and
  2208. (taicpu(hp).opcode <= A_ITTTT) then
  2209. begin
  2210. if (taicpu(hp).opcode = A_IT) and
  2211. (l=1) then
  2212. list.Remove(hp)
  2213. else
  2214. case taicpu(hp).opcode of
  2215. A_ITE:
  2216. if l=2 then taicpu(hp).opcode := A_IT;
  2217. A_ITT:
  2218. if l=2 then taicpu(hp).opcode := A_IT;
  2219. A_ITEE:
  2220. if l=3 then taicpu(hp).opcode := A_ITE;
  2221. A_ITTE:
  2222. if l=3 then taicpu(hp).opcode := A_ITT;
  2223. A_ITET:
  2224. if l=3 then taicpu(hp).opcode := A_ITE;
  2225. A_ITTT:
  2226. if l=3 then taicpu(hp).opcode := A_ITT;
  2227. A_ITEEE:
  2228. if l=4 then taicpu(hp).opcode := A_ITEE;
  2229. A_ITTEE:
  2230. if l=4 then taicpu(hp).opcode := A_ITTE;
  2231. A_ITETE:
  2232. if l=4 then taicpu(hp).opcode := A_ITET;
  2233. A_ITTTE:
  2234. if l=4 then taicpu(hp).opcode := A_ITTT;
  2235. A_ITEET:
  2236. if l=4 then taicpu(hp).opcode := A_ITEE;
  2237. A_ITTET:
  2238. if l=4 then taicpu(hp).opcode := A_ITTE;
  2239. A_ITETT:
  2240. if l=4 then taicpu(hp).opcode := A_ITET;
  2241. A_ITTTT:
  2242. begin
  2243. if l=4 then taicpu(hp).opcode := A_ITTT;
  2244. end
  2245. else
  2246. ;
  2247. end;
  2248. break;
  2249. end;
  2250. {else if (taicpu(hp).condition<>taicpu(p).condition) or
  2251. (taicpu(hp).condition<>inverse_cond(taicpu(p).condition)) then
  2252. break;}
  2253. inc(l);
  2254. end;
  2255. hp := tai(hp.Previous);
  2256. end;
  2257. end;
  2258. function TCpuThumb2AsmOptimizer.PeepHoleOptPass1Cpu(var p: tai): boolean;
  2259. var
  2260. hp : taicpu;
  2261. //hp1,hp2 : tai;
  2262. begin
  2263. result:=false;
  2264. if inherited PeepHoleOptPass1Cpu(p) then
  2265. result:=true
  2266. else if (p.typ=ait_instruction) and
  2267. MatchInstruction(p, A_STM, [C_None], [PF_FD,PF_DB]) and
  2268. (taicpu(p).oper[0]^.ref^.addressmode=AM_PREINDEXED) and
  2269. (taicpu(p).oper[0]^.ref^.index=NR_STACK_POINTER_REG) and
  2270. ((taicpu(p).oper[1]^.regset^*[8..13,15])=[]) then
  2271. begin
  2272. DebugMsg('Peephole Stm2Push done', p);
  2273. hp := taicpu.op_regset(A_PUSH, R_INTREGISTER, R_SUBWHOLE, taicpu(p).oper[1]^.regset^);
  2274. AsmL.InsertAfter(hp, p);
  2275. asml.Remove(p);
  2276. p:=hp;
  2277. result:=true;
  2278. end
  2279. {else if (p.typ=ait_instruction) and
  2280. MatchInstruction(p, A_STR, [C_None], [PF_None]) and
  2281. (taicpu(p).oper[1]^.ref^.addressmode=AM_PREINDEXED) and
  2282. (taicpu(p).oper[1]^.ref^.index=NR_STACK_POINTER_REG) and
  2283. (taicpu(p).oper[1]^.ref^.offset=-4) and
  2284. (getsupreg(taicpu(p).oper[0]^.reg) in [0..7,14]) then
  2285. begin
  2286. DebugMsg('Peephole Str2Push done', p);
  2287. hp := taicpu.op_regset(A_PUSH, R_INTREGISTER, R_SUBWHOLE, [getsupreg(taicpu(p).oper[0]^.reg)]);
  2288. asml.InsertAfter(hp, p);
  2289. asml.Remove(p);
  2290. p.Free;
  2291. p:=hp;
  2292. result:=true;
  2293. end}
  2294. else if (p.typ=ait_instruction) and
  2295. MatchInstruction(p, A_LDM, [C_None], [PF_FD,PF_IA]) and
  2296. (taicpu(p).oper[0]^.ref^.addressmode=AM_PREINDEXED) and
  2297. (taicpu(p).oper[0]^.ref^.index=NR_STACK_POINTER_REG) and
  2298. ((taicpu(p).oper[1]^.regset^*[8..14])=[]) then
  2299. begin
  2300. DebugMsg('Peephole Ldm2Pop done', p);
  2301. hp := taicpu.op_regset(A_POP, R_INTREGISTER, R_SUBWHOLE, taicpu(p).oper[1]^.regset^);
  2302. asml.InsertBefore(hp, p);
  2303. asml.Remove(p);
  2304. p.Free;
  2305. p:=hp;
  2306. result:=true;
  2307. end
  2308. {else if (p.typ=ait_instruction) and
  2309. MatchInstruction(p, A_LDR, [C_None], [PF_None]) and
  2310. (taicpu(p).oper[1]^.ref^.addressmode=AM_POSTINDEXED) and
  2311. (taicpu(p).oper[1]^.ref^.index=NR_STACK_POINTER_REG) and
  2312. (taicpu(p).oper[1]^.ref^.offset=4) and
  2313. (getsupreg(taicpu(p).oper[0]^.reg) in [0..7,15]) then
  2314. begin
  2315. DebugMsg('Peephole Ldr2Pop done', p);
  2316. hp := taicpu.op_regset(A_POP, R_INTREGISTER, R_SUBWHOLE, [getsupreg(taicpu(p).oper[0]^.reg)]);
  2317. asml.InsertBefore(hp, p);
  2318. asml.Remove(p);
  2319. p.Free;
  2320. p:=hp;
  2321. result:=true;
  2322. end}
  2323. else if (p.typ=ait_instruction) and
  2324. MatchInstruction(p, [A_AND], [], [PF_None]) and
  2325. (taicpu(p).ops = 2) and
  2326. (taicpu(p).oper[1]^.typ=top_const) and
  2327. ((taicpu(p).oper[1]^.val=255) or
  2328. (taicpu(p).oper[1]^.val=65535)) then
  2329. begin
  2330. DebugMsg('Peephole AndR2Uxt done', p);
  2331. if taicpu(p).oper[1]^.val=255 then
  2332. taicpu(p).opcode:=A_UXTB
  2333. else
  2334. taicpu(p).opcode:=A_UXTH;
  2335. taicpu(p).loadreg(1, taicpu(p).oper[0]^.reg);
  2336. result := true;
  2337. end
  2338. else if (p.typ=ait_instruction) and
  2339. MatchInstruction(p, [A_AND], [], [PF_None]) and
  2340. (taicpu(p).ops = 3) and
  2341. (taicpu(p).oper[2]^.typ=top_const) and
  2342. ((taicpu(p).oper[2]^.val=255) or
  2343. (taicpu(p).oper[2]^.val=65535)) then
  2344. begin
  2345. DebugMsg('Peephole AndRR2Uxt done', p);
  2346. if taicpu(p).oper[2]^.val=255 then
  2347. taicpu(p).opcode:=A_UXTB
  2348. else
  2349. taicpu(p).opcode:=A_UXTH;
  2350. taicpu(p).ops:=2;
  2351. result := true;
  2352. end
  2353. {else if (p.typ=ait_instruction) and
  2354. MatchInstruction(p, [A_CMP], [C_None], [PF_None]) and
  2355. (taicpu(p).oper[1]^.typ=top_const) and
  2356. (taicpu(p).oper[1]^.val=0) and
  2357. GetNextInstruction(p,hp1) and
  2358. (taicpu(hp1).opcode=A_B) and
  2359. (taicpu(hp1).condition in [C_EQ,C_NE]) then
  2360. begin
  2361. if taicpu(hp1).condition = C_EQ then
  2362. hp2:=taicpu.op_reg_ref(A_CBZ, taicpu(p).oper[0]^.reg, taicpu(hp1).oper[0]^.ref^)
  2363. else
  2364. hp2:=taicpu.op_reg_ref(A_CBNZ, taicpu(p).oper[0]^.reg, taicpu(hp1).oper[0]^.ref^);
  2365. taicpu(hp2).is_jmp := true;
  2366. asml.InsertAfter(hp2, hp1);
  2367. asml.Remove(hp1);
  2368. hp1.Free;
  2369. asml.Remove(p);
  2370. p.Free;
  2371. p := hp2;
  2372. result := true;
  2373. end}
  2374. end;
  2375. procedure TCpuThumb2AsmOptimizer.PeepHoleOptPass2;
  2376. var
  2377. p,hp1,hp2: tai;
  2378. l : longint;
  2379. condition : tasmcond;
  2380. { UsedRegs, TmpUsedRegs: TRegSet; }
  2381. begin
  2382. p := BlockStart;
  2383. { UsedRegs := []; }
  2384. while (p <> BlockEnd) Do
  2385. begin
  2386. { UpdateUsedRegs(UsedRegs, tai(p.next)); }
  2387. case p.Typ Of
  2388. Ait_Instruction:
  2389. begin
  2390. case taicpu(p).opcode Of
  2391. A_B:
  2392. if taicpu(p).condition<>C_None then
  2393. begin
  2394. { check for
  2395. Bxx xxx
  2396. <several instructions>
  2397. xxx:
  2398. }
  2399. l:=0;
  2400. GetNextInstruction(p, hp1);
  2401. while assigned(hp1) and
  2402. (l<=4) and
  2403. CanBeCond(hp1) and
  2404. { stop on labels }
  2405. not(hp1.typ=ait_label) do
  2406. begin
  2407. inc(l);
  2408. if MustBeLast(hp1) then
  2409. begin
  2410. //hp1:=nil;
  2411. GetNextInstruction(hp1,hp1);
  2412. break;
  2413. end
  2414. else
  2415. GetNextInstruction(hp1,hp1);
  2416. end;
  2417. if assigned(hp1) then
  2418. begin
  2419. if FindLabel(tasmlabel(taicpu(p).oper[0]^.ref^.symbol),hp1) then
  2420. begin
  2421. if (l<=4) and (l>0) then
  2422. begin
  2423. condition:=inverse_cond(taicpu(p).condition);
  2424. hp2:=p;
  2425. GetNextInstruction(p,hp1);
  2426. p:=hp1;
  2427. repeat
  2428. if hp1.typ=ait_instruction then
  2429. taicpu(hp1).condition:=condition;
  2430. if MustBeLast(hp1) then
  2431. begin
  2432. GetNextInstruction(hp1,hp1);
  2433. break;
  2434. end
  2435. else
  2436. GetNextInstruction(hp1,hp1);
  2437. until not(assigned(hp1)) or
  2438. not(CanBeCond(hp1)) or
  2439. (hp1.typ=ait_label);
  2440. { wait with removing else GetNextInstruction could
  2441. ignore the label if it was the only usage in the
  2442. jump moved away }
  2443. asml.InsertAfter(tai_comment.create(strpnew('Collapsed')), hp2);
  2444. DecrementPreceedingIT(asml, hp2);
  2445. case l of
  2446. 1: asml.InsertAfter(taicpu.op_cond(A_IT,condition), hp2);
  2447. 2: asml.InsertAfter(taicpu.op_cond(A_ITT,condition), hp2);
  2448. 3: asml.InsertAfter(taicpu.op_cond(A_ITTT,condition), hp2);
  2449. 4: asml.InsertAfter(taicpu.op_cond(A_ITTTT,condition), hp2);
  2450. end;
  2451. tasmlabel(taicpu(hp2).oper[0]^.ref^.symbol).decrefs;
  2452. asml.remove(hp2);
  2453. hp2.free;
  2454. continue;
  2455. end;
  2456. end;
  2457. end;
  2458. end;
  2459. else
  2460. ;
  2461. end;
  2462. end;
  2463. else
  2464. ;
  2465. end;
  2466. p := tai(p.next)
  2467. end;
  2468. end;
  2469. function TCpuThumb2AsmOptimizer.PostPeepHoleOptsCpu(var p: tai): boolean;
  2470. begin
  2471. result:=false;
  2472. if p.typ = ait_instruction then
  2473. begin
  2474. if MatchInstruction(p, A_MOV, [C_None], [PF_None]) and
  2475. (taicpu(p).oper[1]^.typ=top_const) and
  2476. (taicpu(p).oper[1]^.val >= 0) and
  2477. (taicpu(p).oper[1]^.val < 256) and
  2478. (not RegInUsedRegs(NR_DEFAULTFLAGS,UsedRegs)) then
  2479. begin
  2480. DebugMsg('Peephole Mov2Movs done', p);
  2481. asml.InsertBefore(tai_regalloc.alloc(NR_DEFAULTFLAGS,p), p);
  2482. asml.InsertAfter(tai_regalloc.dealloc(NR_DEFAULTFLAGS,p), p);
  2483. IncludeRegInUsedRegs(NR_DEFAULTFLAGS,UsedRegs);
  2484. taicpu(p).oppostfix:=PF_S;
  2485. result:=true;
  2486. end
  2487. else if MatchInstruction(p, A_MVN, [C_None], [PF_None]) and
  2488. (taicpu(p).oper[1]^.typ=top_reg) and
  2489. (not RegInUsedRegs(NR_DEFAULTFLAGS,UsedRegs)) then
  2490. begin
  2491. DebugMsg('Peephole Mvn2Mvns done', p);
  2492. asml.InsertBefore(tai_regalloc.alloc(NR_DEFAULTFLAGS,p), p);
  2493. asml.InsertAfter(tai_regalloc.dealloc(NR_DEFAULTFLAGS,p), p);
  2494. IncludeRegInUsedRegs(NR_DEFAULTFLAGS,UsedRegs);
  2495. taicpu(p).oppostfix:=PF_S;
  2496. result:=true;
  2497. end
  2498. else if MatchInstruction(p, A_RSB, [C_None], [PF_None]) and
  2499. (taicpu(p).ops = 3) and
  2500. (taicpu(p).oper[2]^.typ=top_const) and
  2501. (taicpu(p).oper[2]^.val=0) and
  2502. (not RegInUsedRegs(NR_DEFAULTFLAGS,UsedRegs)) then
  2503. begin
  2504. DebugMsg('Peephole Rsb2Rsbs done', p);
  2505. asml.InsertBefore(tai_regalloc.alloc(NR_DEFAULTFLAGS,p), p);
  2506. asml.InsertAfter(tai_regalloc.dealloc(NR_DEFAULTFLAGS,p), p);
  2507. IncludeRegInUsedRegs(NR_DEFAULTFLAGS,UsedRegs);
  2508. taicpu(p).oppostfix:=PF_S;
  2509. result:=true;
  2510. end
  2511. else if MatchInstruction(p, [A_ADD,A_SUB], [C_None], [PF_None]) and
  2512. (taicpu(p).ops = 3) and
  2513. MatchOperand(taicpu(p).oper[0]^, taicpu(p).oper[1]^) and
  2514. (not MatchOperand(taicpu(p).oper[0]^, NR_STACK_POINTER_REG)) and
  2515. (taicpu(p).oper[2]^.typ=top_const) and
  2516. (taicpu(p).oper[2]^.val >= 0) and
  2517. (taicpu(p).oper[2]^.val < 256) and
  2518. (not RegInUsedRegs(NR_DEFAULTFLAGS,UsedRegs)) then
  2519. begin
  2520. DebugMsg('Peephole AddSub2*s done', p);
  2521. asml.InsertBefore(tai_regalloc.alloc(NR_DEFAULTFLAGS,p), p);
  2522. asml.InsertAfter(tai_regalloc.dealloc(NR_DEFAULTFLAGS,p), p);
  2523. IncludeRegInUsedRegs(NR_DEFAULTFLAGS,UsedRegs);
  2524. taicpu(p).loadconst(1,taicpu(p).oper[2]^.val);
  2525. taicpu(p).oppostfix:=PF_S;
  2526. taicpu(p).ops := 2;
  2527. result:=true;
  2528. end
  2529. else if MatchInstruction(p, [A_ADD,A_SUB], [C_None], [PF_None]) and
  2530. (taicpu(p).ops = 2) and
  2531. (taicpu(p).oper[1]^.typ=top_reg) and
  2532. (not MatchOperand(taicpu(p).oper[0]^, NR_STACK_POINTER_REG)) and
  2533. (not MatchOperand(taicpu(p).oper[1]^, NR_STACK_POINTER_REG)) and
  2534. (not RegInUsedRegs(NR_DEFAULTFLAGS,UsedRegs)) then
  2535. begin
  2536. DebugMsg('Peephole AddSub2*s done', p);
  2537. asml.InsertBefore(tai_regalloc.alloc(NR_DEFAULTFLAGS,p), p);
  2538. asml.InsertAfter(tai_regalloc.dealloc(NR_DEFAULTFLAGS,p), p);
  2539. IncludeRegInUsedRegs(NR_DEFAULTFLAGS,UsedRegs);
  2540. taicpu(p).oppostfix:=PF_S;
  2541. result:=true;
  2542. end
  2543. else if MatchInstruction(p, [A_ADD], [C_None], [PF_None]) and
  2544. (taicpu(p).ops = 3) and
  2545. MatchOperand(taicpu(p).oper[0]^, taicpu(p).oper[1]^) and
  2546. (taicpu(p).oper[2]^.typ=top_reg) then
  2547. begin
  2548. DebugMsg('Peephole AddRRR2AddRR done', p);
  2549. taicpu(p).ops := 2;
  2550. taicpu(p).loadreg(1,taicpu(p).oper[2]^.reg);
  2551. result:=true;
  2552. end
  2553. else if MatchInstruction(p, [A_AND,A_ORR,A_EOR,A_BIC,A_LSL,A_LSR,A_ASR,A_ROR], [C_None], [PF_None]) and
  2554. (taicpu(p).ops = 3) and
  2555. MatchOperand(taicpu(p).oper[0]^, taicpu(p).oper[1]^) and
  2556. (taicpu(p).oper[2]^.typ=top_reg) and
  2557. (not RegInUsedRegs(NR_DEFAULTFLAGS,UsedRegs)) then
  2558. begin
  2559. DebugMsg('Peephole opXXY2opsXY done', p);
  2560. asml.InsertBefore(tai_regalloc.alloc(NR_DEFAULTFLAGS,p), p);
  2561. asml.InsertAfter(tai_regalloc.dealloc(NR_DEFAULTFLAGS,p), p);
  2562. IncludeRegInUsedRegs(NR_DEFAULTFLAGS,UsedRegs);
  2563. taicpu(p).ops := 2;
  2564. taicpu(p).loadreg(1,taicpu(p).oper[2]^.reg);
  2565. taicpu(p).oppostfix:=PF_S;
  2566. result:=true;
  2567. end
  2568. else if MatchInstruction(p, [A_AND,A_ORR,A_EOR,A_BIC,A_LSL,A_LSR,A_ASR,A_ROR], [C_None], [PF_S]) and
  2569. (taicpu(p).ops = 3) and
  2570. MatchOperand(taicpu(p).oper[0]^, taicpu(p).oper[1]^) and
  2571. (taicpu(p).oper[2]^.typ in [top_reg,top_const]) then
  2572. begin
  2573. DebugMsg('Peephole opXXY2opXY done', p);
  2574. taicpu(p).ops := 2;
  2575. if taicpu(p).oper[2]^.typ=top_reg then
  2576. taicpu(p).loadreg(1,taicpu(p).oper[2]^.reg)
  2577. else
  2578. taicpu(p).loadconst(1,taicpu(p).oper[2]^.val);
  2579. result:=true;
  2580. end
  2581. else if MatchInstruction(p, [A_AND,A_ORR,A_EOR], [C_None], [PF_None,PF_S]) and
  2582. (taicpu(p).ops = 3) and
  2583. MatchOperand(taicpu(p).oper[0]^, taicpu(p).oper[2]^) and
  2584. (not RegInUsedRegs(NR_DEFAULTFLAGS,UsedRegs)) then
  2585. begin
  2586. DebugMsg('Peephole opXYX2opsXY done', p);
  2587. asml.InsertBefore(tai_regalloc.alloc(NR_DEFAULTFLAGS,p), p);
  2588. asml.InsertAfter(tai_regalloc.dealloc(NR_DEFAULTFLAGS,p), p);
  2589. IncludeRegInUsedRegs(NR_DEFAULTFLAGS,UsedRegs);
  2590. taicpu(p).oppostfix:=PF_S;
  2591. taicpu(p).ops := 2;
  2592. result:=true;
  2593. end
  2594. else if MatchInstruction(p, [A_MOV], [C_None], [PF_None]) and
  2595. (taicpu(p).ops=3) and
  2596. (taicpu(p).oper[2]^.typ=top_shifterop) and
  2597. (taicpu(p).oper[2]^.shifterop^.shiftmode in [SM_LSL,SM_LSR,SM_ASR,SM_ROR]) and
  2598. //MatchOperand(taicpu(p).oper[0]^, taicpu(p).oper[1]^) and
  2599. (not RegInUsedRegs(NR_DEFAULTFLAGS,UsedRegs)) then
  2600. begin
  2601. DebugMsg('Peephole Mov2Shift done', p);
  2602. asml.InsertBefore(tai_regalloc.alloc(NR_DEFAULTFLAGS,p), p);
  2603. asml.InsertAfter(tai_regalloc.dealloc(NR_DEFAULTFLAGS,p), p);
  2604. IncludeRegInUsedRegs(NR_DEFAULTFLAGS,UsedRegs);
  2605. taicpu(p).oppostfix:=PF_S;
  2606. case taicpu(p).oper[2]^.shifterop^.shiftmode of
  2607. SM_LSL: taicpu(p).opcode:=A_LSL;
  2608. SM_LSR: taicpu(p).opcode:=A_LSR;
  2609. SM_ASR: taicpu(p).opcode:=A_ASR;
  2610. SM_ROR: taicpu(p).opcode:=A_ROR;
  2611. else
  2612. internalerror(2019050912);
  2613. end;
  2614. if taicpu(p).oper[2]^.shifterop^.rs<>NR_NO then
  2615. taicpu(p).loadreg(2, taicpu(p).oper[2]^.shifterop^.rs)
  2616. else
  2617. taicpu(p).loadconst(2, taicpu(p).oper[2]^.shifterop^.shiftimm);
  2618. result:=true;
  2619. end
  2620. end;
  2621. end;
  2622. begin
  2623. casmoptimizer:=TCpuAsmOptimizer;
  2624. cpreregallocscheduler:=TCpuPreRegallocScheduler;
  2625. End.