aoptcpu.pas 147 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914191519161917191819191920192119221923192419251926192719281929193019311932193319341935193619371938193919401941194219431944194519461947194819491950195119521953195419551956195719581959196019611962196319641965196619671968196919701971197219731974197519761977197819791980198119821983198419851986198719881989199019911992199319941995199619971998199920002001200220032004200520062007200820092010201120122013201420152016201720182019202020212022202320242025202620272028202920302031203220332034203520362037203820392040204120422043204420452046204720482049205020512052205320542055205620572058205920602061206220632064206520662067206820692070207120722073207420752076207720782079208020812082208320842085208620872088208920902091209220932094209520962097209820992100210121022103210421052106210721082109211021112112211321142115211621172118211921202121212221232124212521262127212821292130213121322133213421352136213721382139214021412142214321442145214621472148214921502151215221532154215521562157215821592160216121622163216421652166216721682169217021712172217321742175217621772178217921802181218221832184218521862187218821892190219121922193219421952196219721982199220022012202220322042205220622072208220922102211221222132214221522162217221822192220222122222223222422252226222722282229223022312232223322342235223622372238223922402241224222432244224522462247224822492250225122522253225422552256225722582259226022612262226322642265226622672268226922702271227222732274227522762277227822792280228122822283228422852286228722882289229022912292229322942295229622972298229923002301230223032304230523062307230823092310231123122313231423152316231723182319232023212322232323242325232623272328232923302331233223332334233523362337233823392340234123422343234423452346234723482349235023512352235323542355235623572358235923602361236223632364236523662367236823692370237123722373237423752376237723782379238023812382238323842385238623872388238923902391239223932394239523962397239823992400240124022403240424052406240724082409241024112412241324142415241624172418241924202421242224232424242524262427242824292430243124322433243424352436243724382439244024412442244324442445244624472448244924502451245224532454245524562457245824592460246124622463246424652466246724682469247024712472247324742475247624772478247924802481248224832484248524862487248824892490249124922493249424952496249724982499250025012502250325042505250625072508250925102511251225132514251525162517251825192520252125222523252425252526252725282529253025312532253325342535253625372538253925402541254225432544254525462547254825492550255125522553255425552556255725582559256025612562256325642565256625672568256925702571257225732574257525762577257825792580258125822583258425852586258725882589259025912592259325942595259625972598259926002601260226032604260526062607260826092610261126122613261426152616261726182619262026212622262326242625262626272628262926302631263226332634263526362637263826392640264126422643264426452646264726482649265026512652265326542655265626572658265926602661266226632664266526662667266826692670267126722673267426752676267726782679268026812682268326842685268626872688268926902691269226932694269526962697269826992700270127022703270427052706270727082709271027112712271327142715271627172718271927202721272227232724272527262727272827292730273127322733273427352736273727382739274027412742274327442745274627472748274927502751275227532754275527562757275827592760276127622763276427652766276727682769277027712772277327742775277627772778277927802781278227832784278527862787278827892790279127922793279427952796279727982799280028012802280328042805280628072808280928102811281228132814281528162817281828192820282128222823282428252826282728282829283028312832283328342835283628372838283928402841284228432844284528462847284828492850285128522853285428552856285728582859286028612862286328642865286628672868286928702871287228732874287528762877287828792880288128822883288428852886288728882889289028912892289328942895289628972898289929002901290229032904290529062907290829092910291129122913291429152916291729182919292029212922292329242925292629272928292929302931293229332934293529362937293829392940294129422943294429452946294729482949295029512952295329542955295629572958295929602961296229632964296529662967296829692970297129722973297429752976297729782979298029812982298329842985298629872988298929902991299229932994299529962997299829993000300130023003300430053006300730083009301030113012301330143015301630173018301930203021302230233024302530263027302830293030303130323033303430353036303730383039304030413042304330443045304630473048304930503051305230533054305530563057305830593060306130623063306430653066306730683069307030713072307330743075307630773078307930803081308230833084308530863087308830893090309130923093309430953096309730983099310031013102310331043105310631073108310931103111311231133114311531163117311831193120312131223123312431253126312731283129313031313132313331343135313631373138313931403141314231433144314531463147314831493150315131523153315431553156315731583159316031613162316331643165316631673168316931703171317231733174317531763177317831793180318131823183318431853186318731883189319031913192319331943195319631973198319932003201320232033204320532063207320832093210321132123213321432153216321732183219322032213222322332243225322632273228322932303231
  1. {
  2. Copyright (c) 1998-2002 by Jonas Maebe, member of the Free Pascal
  3. Development Team
  4. This unit implements the ARM optimizer object
  5. This program is free software; you can redistribute it and/or modify
  6. it under the terms of the GNU General Public License as published by
  7. the Free Software Foundation; either version 2 of the License, or
  8. (at your option) any later version.
  9. This program is distributed in the hope that it will be useful,
  10. but WITHOUT ANY WARRANTY; without even the implied warranty of
  11. MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  12. GNU General Public License for more details.
  13. You should have received a copy of the GNU General Public License
  14. along with this program; if not, write to the Free Software
  15. Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
  16. ****************************************************************************
  17. }
  18. Unit aoptcpu;
  19. {$i fpcdefs.inc}
  20. { $define DEBUG_PREREGSCHEDULER}
  21. {$define DEBUG_AOPTCPU}
  22. Interface
  23. uses cgbase, cgutils, cpubase, aasmtai, aasmcpu,aopt, aoptobj;
  24. Type
  25. TCpuAsmOptimizer = class(TAsmOptimizer)
  26. { Can't be done in some cases due to the limited range of jumps }
  27. function CanDoJumpOpts: Boolean; override;
  28. { uses the same constructor as TAopObj }
  29. function PeepHoleOptPass1Cpu(var p: tai): boolean; override;
  30. procedure PeepHoleOptPass2;override;
  31. Function RegInInstruction(Reg: TRegister; p1: tai): Boolean;override;
  32. function RemoveSuperfluousMove(const p: tai; movp: tai; const optimizer: string): boolean;
  33. function RemoveSuperfluousVMov(const p : tai; movp : tai; const optimizer : string) : boolean;
  34. { gets the next tai object after current that contains info relevant
  35. to the optimizer in p1 which used the given register or does a
  36. change in program flow.
  37. If there is none, it returns false and
  38. sets p1 to nil }
  39. Function GetNextInstructionUsingReg(Current: tai; Out Next: tai; reg: TRegister): Boolean;
  40. Function GetNextInstructionUsingRef(Current: tai; Out Next: tai; const ref: TReference; StopOnStore: Boolean = true): Boolean;
  41. { outputs a debug message into the assembler file }
  42. procedure DebugMsg(const s: string; p: tai);
  43. function InstructionLoadsFromReg(const reg : TRegister; const hp : tai) : boolean; override;
  44. function RegLoadedWithNewValue(reg : tregister; hp : tai) : boolean; override;
  45. protected
  46. function LookForPreindexedPattern(p: taicpu): boolean;
  47. function LookForPostindexedPattern(p: taicpu): boolean;
  48. End;
  49. TCpuPreRegallocScheduler = class(TAsmScheduler)
  50. function SchedulerPass1Cpu(var p: tai): boolean;override;
  51. procedure SwapRegLive(p, hp1: taicpu);
  52. end;
  53. TCpuThumb2AsmOptimizer = class(TCpuAsmOptimizer)
  54. { uses the same constructor as TAopObj }
  55. function PeepHoleOptPass1Cpu(var p: tai): boolean; override;
  56. procedure PeepHoleOptPass2;override;
  57. function PostPeepHoleOptsCpu(var p: tai): boolean; override;
  58. End;
  59. function MustBeLast(p : tai) : boolean;
  60. Implementation
  61. uses
  62. cutils,verbose,globtype,globals,
  63. systems,
  64. cpuinfo,
  65. cgobj,procinfo,
  66. aasmbase,aasmdata;
  67. { Range check must be disabled explicitly as conversions between signed and unsigned
  68. 32-bit values are done without explicit typecasts }
  69. {$R-}
  70. function CanBeCond(p : tai) : boolean;
  71. begin
  72. result:=
  73. not(GenerateThumbCode) and
  74. (p.typ=ait_instruction) and
  75. (taicpu(p).condition=C_None) and
  76. ((taicpu(p).opcode<A_IT) or (taicpu(p).opcode>A_ITTTT)) and
  77. (taicpu(p).opcode<>A_CBZ) and
  78. (taicpu(p).opcode<>A_CBNZ) and
  79. (taicpu(p).opcode<>A_PLD) and
  80. (((taicpu(p).opcode<>A_BLX) and
  81. { BL may need to be converted into BLX by the linker -- could possibly
  82. be allowed in case it's to a local symbol of which we know that it
  83. uses the same instruction set as the current one }
  84. (taicpu(p).opcode<>A_BL)) or
  85. (taicpu(p).oper[0]^.typ=top_reg));
  86. end;
  87. function RefsEqual(const r1, r2: treference): boolean;
  88. begin
  89. refsequal :=
  90. (r1.offset = r2.offset) and
  91. (r1.base = r2.base) and
  92. (r1.index = r2.index) and (r1.scalefactor = r2.scalefactor) and
  93. (r1.symbol=r2.symbol) and (r1.refaddr = r2.refaddr) and
  94. (r1.relsymbol = r2.relsymbol) and
  95. (r1.signindex = r2.signindex) and
  96. (r1.shiftimm = r2.shiftimm) and
  97. (r1.addressmode = r2.addressmode) and
  98. (r1.shiftmode = r2.shiftmode) and
  99. (r1.volatility=[]) and
  100. (r2.volatility=[]);
  101. end;
  102. function MatchInstruction(const instr: tai; const op: TCommonAsmOps; const cond: TAsmConds; const postfix: TOpPostfixes): boolean;
  103. begin
  104. result :=
  105. (instr.typ = ait_instruction) and
  106. ((op = []) or ((ord(taicpu(instr).opcode)<256) and (taicpu(instr).opcode in op))) and
  107. ((cond = []) or (taicpu(instr).condition in cond)) and
  108. ((postfix = []) or (taicpu(instr).oppostfix in postfix));
  109. end;
  110. function MatchInstruction(const instr: tai; const op: TAsmOp; const cond: TAsmConds; const postfix: TOpPostfixes): boolean;
  111. begin
  112. result :=
  113. (instr.typ = ait_instruction) and
  114. (taicpu(instr).opcode = op) and
  115. ((cond = []) or (taicpu(instr).condition in cond)) and
  116. ((postfix = []) or (taicpu(instr).oppostfix in postfix));
  117. end;
  118. function MatchOperand(const oper1: TOper; const oper2: TOper): boolean; inline;
  119. begin
  120. result := oper1.typ = oper2.typ;
  121. if result then
  122. case oper1.typ of
  123. top_const:
  124. Result:=oper1.val = oper2.val;
  125. top_reg:
  126. Result:=oper1.reg = oper2.reg;
  127. top_conditioncode:
  128. Result:=oper1.cc = oper2.cc;
  129. top_ref:
  130. Result:=RefsEqual(oper1.ref^, oper2.ref^);
  131. else Result:=false;
  132. end
  133. end;
  134. function MatchOperand(const oper: TOper; const reg: TRegister): boolean; inline;
  135. begin
  136. result := (oper.typ = top_reg) and (oper.reg = reg);
  137. end;
  138. function RemoveRedundantMove(const cmpp: tai; movp: tai; asml: TAsmList):Boolean;
  139. begin
  140. Result:=false;
  141. if (taicpu(movp).condition = C_EQ) and
  142. (taicpu(cmpp).oper[0]^.reg = taicpu(movp).oper[0]^.reg) and
  143. (taicpu(cmpp).oper[1]^.val = taicpu(movp).oper[1]^.val) then
  144. begin
  145. asml.insertafter(tai_comment.Create(strpnew('Peephole CmpMovMov - Removed redundant moveq')), movp);
  146. asml.remove(movp);
  147. movp.free;
  148. Result:=true;
  149. end;
  150. end;
  151. function AlignedToQWord(const ref : treference) : boolean;
  152. begin
  153. { (safe) heuristics to ensure alignment }
  154. result:=(target_info.abi in [abi_eabi,abi_armeb,abi_eabihf]) and
  155. (((ref.offset>=0) and
  156. ((ref.offset mod 8)=0) and
  157. ((ref.base=NR_R13) or
  158. (ref.index=NR_R13))
  159. ) or
  160. ((ref.offset<=0) and
  161. { when using NR_R11, it has always a value of <qword align>+4 }
  162. ((abs(ref.offset+4) mod 8)=0) and
  163. (current_procinfo.framepointer=NR_R11) and
  164. ((ref.base=NR_R11) or
  165. (ref.index=NR_R11))
  166. )
  167. );
  168. end;
  169. function isValidConstLoadStoreOffset(const aoffset: longint; const pf: TOpPostfix) : boolean;
  170. begin
  171. if GenerateThumb2Code then
  172. result := (aoffset<4096) and (aoffset>-256)
  173. else
  174. result := ((pf in [PF_None,PF_B]) and
  175. (abs(aoffset)<4096)) or
  176. (abs(aoffset)<256);
  177. end;
  178. function TCpuAsmOptimizer.InstructionLoadsFromReg(const reg: TRegister; const hp: tai): boolean;
  179. var
  180. p: taicpu;
  181. i: longint;
  182. begin
  183. instructionLoadsFromReg := false;
  184. if not (assigned(hp) and (hp.typ = ait_instruction)) then
  185. exit;
  186. p:=taicpu(hp);
  187. i:=1;
  188. {For these instructions we have to start on oper[0]}
  189. if (p.opcode in [A_STR, A_LDM, A_STM, A_PLD,
  190. A_CMP, A_CMN, A_TST, A_TEQ,
  191. A_B, A_BL, A_BX, A_BLX,
  192. A_SMLAL, A_UMLAL]) then i:=0;
  193. while(i<p.ops) do
  194. begin
  195. case p.oper[I]^.typ of
  196. top_reg:
  197. instructionLoadsFromReg := (p.oper[I]^.reg = reg) or
  198. { STRD }
  199. ((i=0) and (p.opcode=A_STR) and (p.oppostfix=PF_D) and (getsupreg(p.oper[0]^.reg)+1=getsupreg(reg)));
  200. top_regset:
  201. instructionLoadsFromReg := (getsupreg(reg) in p.oper[I]^.regset^);
  202. top_shifterop:
  203. instructionLoadsFromReg := p.oper[I]^.shifterop^.rs = reg;
  204. top_ref:
  205. instructionLoadsFromReg :=
  206. (p.oper[I]^.ref^.base = reg) or
  207. (p.oper[I]^.ref^.index = reg);
  208. else
  209. ;
  210. end;
  211. if instructionLoadsFromReg then exit; {Bailout if we found something}
  212. Inc(I);
  213. end;
  214. end;
  215. function TCpuAsmOptimizer.RegLoadedWithNewValue(reg: tregister; hp: tai): boolean;
  216. var
  217. p: taicpu;
  218. begin
  219. p := taicpu(hp);
  220. Result := false;
  221. if not ((assigned(hp)) and (hp.typ = ait_instruction)) then
  222. exit;
  223. case p.opcode of
  224. { These operands do not write into a register at all }
  225. A_CMP, A_CMN, A_TST, A_TEQ, A_B, A_BL, A_BX, A_BLX, A_SWI, A_MSR, A_PLD,
  226. A_VCMP:
  227. exit;
  228. {Take care of post/preincremented store and loads, they will change their base register}
  229. A_STR, A_LDR:
  230. begin
  231. Result := false;
  232. { actually, this does not apply here because post-/preindexed does not mean that a register
  233. is loaded with a new value, it is only modified
  234. (taicpu(p).oper[1]^.typ=top_ref) and
  235. (taicpu(p).oper[1]^.ref^.addressmode in [AM_PREINDEXED,AM_POSTINDEXED]) and
  236. (taicpu(p).oper[1]^.ref^.base = reg);
  237. }
  238. { STR does not load into it's first register }
  239. if p.opcode = A_STR then
  240. exit;
  241. end;
  242. A_VSTR:
  243. begin
  244. Result := false;
  245. exit;
  246. end;
  247. { These four are writing into the first 2 register, UMLAL and SMLAL will also read from them }
  248. A_UMLAL, A_UMULL, A_SMLAL, A_SMULL:
  249. Result :=
  250. (p.oper[1]^.typ = top_reg) and
  251. (p.oper[1]^.reg = reg);
  252. {Loads to oper2 from coprocessor}
  253. {
  254. MCR/MRC is currently not supported in FPC
  255. A_MRC:
  256. Result :=
  257. (p.oper[2]^.typ = top_reg) and
  258. (p.oper[2]^.reg = reg);
  259. }
  260. {Loads to all register in the registerset}
  261. A_LDM, A_VLDM:
  262. Result := (getsupreg(reg) in p.oper[1]^.regset^);
  263. A_POP:
  264. Result := (getsupreg(reg) in p.oper[0]^.regset^) or
  265. (reg=NR_STACK_POINTER_REG);
  266. else
  267. ;
  268. end;
  269. if Result then
  270. exit;
  271. case p.oper[0]^.typ of
  272. {This is the case}
  273. top_reg:
  274. Result := (p.oper[0]^.reg = reg) or
  275. { LDRD }
  276. (p.opcode=A_LDR) and (p.oppostfix=PF_D) and (getsupreg(p.oper[0]^.reg)+1=getsupreg(reg));
  277. {LDM/STM might write a new value to their index register}
  278. top_ref:
  279. Result :=
  280. (taicpu(p).oper[0]^.ref^.addressmode in [AM_PREINDEXED,AM_POSTINDEXED]) and
  281. (taicpu(p).oper[0]^.ref^.base = reg);
  282. else
  283. ;
  284. end;
  285. end;
  286. function TCpuAsmOptimizer.GetNextInstructionUsingReg(Current: tai;
  287. Out Next: tai; reg: TRegister): Boolean;
  288. begin
  289. Next:=Current;
  290. repeat
  291. Result:=GetNextInstruction(Next,Next);
  292. until not (Result) or
  293. not(cs_opt_level3 in current_settings.optimizerswitches) or
  294. (Next.typ<>ait_instruction) or
  295. RegInInstruction(reg,Next) or
  296. is_calljmp(taicpu(Next).opcode) or
  297. RegModifiedByInstruction(NR_PC,Next);
  298. end;
  299. function TCpuAsmOptimizer.GetNextInstructionUsingRef(Current: tai;
  300. Out Next: tai; const ref: TReference; StopOnStore: Boolean = true): Boolean;
  301. begin
  302. Next:=Current;
  303. repeat
  304. Result:=GetNextInstruction(Next,Next);
  305. if Result and
  306. (Next.typ=ait_instruction) and
  307. (taicpu(Next).opcode in [A_LDR, A_STR]) and
  308. (
  309. ((taicpu(Next).ops = 2) and
  310. (taicpu(Next).oper[1]^.typ = top_ref) and
  311. RefsEqual(taicpu(Next).oper[1]^.ref^,ref)) or
  312. ((taicpu(Next).ops = 3) and { LDRD/STRD }
  313. (taicpu(Next).oper[2]^.typ = top_ref) and
  314. RefsEqual(taicpu(Next).oper[2]^.ref^,ref))
  315. ) then
  316. {We've found an instruction LDR or STR with the same reference}
  317. exit;
  318. until not(Result) or
  319. (Next.typ<>ait_instruction) or
  320. not(cs_opt_level3 in current_settings.optimizerswitches) or
  321. is_calljmp(taicpu(Next).opcode) or
  322. (StopOnStore and (taicpu(Next).opcode in [A_STR, A_STM])) or
  323. RegModifiedByInstruction(NR_PC,Next);
  324. Result:=false;
  325. end;
  326. {$ifdef DEBUG_AOPTCPU}
  327. procedure TCpuAsmOptimizer.DebugMsg(const s: string;p : tai);
  328. begin
  329. asml.insertbefore(tai_comment.Create(strpnew(s)), p);
  330. end;
  331. {$else DEBUG_AOPTCPU}
  332. procedure TCpuAsmOptimizer.DebugMsg(const s: string;p : tai);inline;
  333. begin
  334. end;
  335. {$endif DEBUG_AOPTCPU}
  336. function TCpuAsmOptimizer.CanDoJumpOpts: Boolean;
  337. begin
  338. { Cannot perform these jump optimisations if the ARM architecture has 16-bit thumb codes }
  339. Result := not (
  340. (current_settings.instructionset = is_thumb) and not (CPUARM_HAS_THUMB2 in cpu_capabilities[current_settings.cputype])
  341. );
  342. end;
  343. function TCpuAsmOptimizer.RemoveSuperfluousMove(const p: tai; movp: tai; const optimizer: string):boolean;
  344. var
  345. alloc,
  346. dealloc : tai_regalloc;
  347. hp1 : tai;
  348. begin
  349. Result:=false;
  350. if MatchInstruction(movp, A_MOV, [taicpu(p).condition], [PF_None]) and
  351. (taicpu(movp).ops=2) and {We can't optimize if there is a shiftop}
  352. MatchOperand(taicpu(movp).oper[1]^, taicpu(p).oper[0]^.reg) and
  353. { don't mess with moves to pc }
  354. (taicpu(movp).oper[0]^.reg<>NR_PC) and
  355. { don't mess with moves to lr }
  356. (taicpu(movp).oper[0]^.reg<>NR_R14) and
  357. { the destination register of the mov might not be used beween p and movp }
  358. not(RegUsedBetween(taicpu(movp).oper[0]^.reg,p,movp)) and
  359. { cb[n]z are thumb instructions which require specific registers, with no wide forms }
  360. (taicpu(p).opcode<>A_CBZ) and
  361. (taicpu(p).opcode<>A_CBNZ) and
  362. {There is a special requirement for MUL and MLA, oper[0] and oper[1] are not allowed to be the same}
  363. not (
  364. (taicpu(p).opcode in [A_MLA, A_MUL]) and
  365. (taicpu(p).oper[1]^.reg = taicpu(movp).oper[0]^.reg) and
  366. (current_settings.cputype < cpu_armv6)
  367. ) and
  368. { Take care to only do this for instructions which REALLY load to the first register.
  369. Otherwise
  370. str reg0, [reg1]
  371. mov reg2, reg0
  372. will be optimized to
  373. str reg2, [reg1]
  374. }
  375. regLoadedWithNewValue(taicpu(p).oper[0]^.reg, p) then
  376. begin
  377. dealloc:=FindRegDeAlloc(taicpu(p).oper[0]^.reg,tai(movp.Next));
  378. if assigned(dealloc) then
  379. begin
  380. DebugMsg('Peephole '+optimizer+' removed superfluous mov', movp);
  381. result:=true;
  382. { taicpu(p).oper[0]^.reg is not used anymore, try to find its allocation
  383. and remove it if possible }
  384. asml.Remove(dealloc);
  385. alloc:=FindRegAllocBackward(taicpu(p).oper[0]^.reg,tai(p.previous));
  386. if assigned(alloc) then
  387. begin
  388. asml.Remove(alloc);
  389. alloc.free;
  390. dealloc.free;
  391. end
  392. else
  393. asml.InsertAfter(dealloc,p);
  394. { try to move the allocation of the target register }
  395. GetLastInstruction(movp,hp1);
  396. alloc:=FindRegAlloc(taicpu(movp).oper[0]^.reg,tai(hp1.Next));
  397. if assigned(alloc) then
  398. begin
  399. asml.Remove(alloc);
  400. asml.InsertBefore(alloc,p);
  401. { adjust used regs }
  402. IncludeRegInUsedRegs(taicpu(movp).oper[0]^.reg,UsedRegs);
  403. end;
  404. { finally get rid of the mov }
  405. taicpu(p).loadreg(0,taicpu(movp).oper[0]^.reg);
  406. { Remove preindexing and postindexing for LDR in some cases.
  407. For example:
  408. ldr reg2,[reg1, xxx]!
  409. mov reg1,reg2
  410. must be translated to:
  411. ldr reg1,[reg1, xxx]
  412. Preindexing must be removed there, since the same register is used as the base and as the target.
  413. Such case is not allowed for ARM CPU and produces crash. }
  414. if (taicpu(p).opcode = A_LDR) and (taicpu(p).oper[1]^.typ = top_ref)
  415. and (taicpu(movp).oper[0]^.reg = taicpu(p).oper[1]^.ref^.base)
  416. then
  417. taicpu(p).oper[1]^.ref^.addressmode:=AM_OFFSET;
  418. asml.remove(movp);
  419. movp.free;
  420. end;
  421. end;
  422. end;
  423. function TCpuAsmOptimizer.RemoveSuperfluousVMov(const p: tai; movp: tai; const optimizer: string):boolean;
  424. var
  425. alloc,
  426. dealloc : tai_regalloc;
  427. hp1 : tai;
  428. begin
  429. Result:=false;
  430. if ((MatchInstruction(movp, A_VMOV, [taicpu(p).condition], [taicpu(p).oppostfix]) and
  431. ((getregtype(taicpu(movp).oper[0]^.reg)=R_MMREGISTER) or (taicpu(p).opcode=A_VLDR))
  432. ) or
  433. (((taicpu(p).oppostfix in [PF_F64F32,PF_F64S16,PF_F64S32,PF_F64U16,PF_F64U32]) or (getsubreg(taicpu(p).oper[0]^.reg)=R_SUBFD)) and MatchInstruction(movp, A_VMOV, [taicpu(p).condition], [PF_F64])) or
  434. (((taicpu(p).oppostfix in [PF_F32F64,PF_F32S16,PF_F32S32,PF_F32U16,PF_F32U32]) or (getsubreg(taicpu(p).oper[0]^.reg)=R_SUBFS)) and MatchInstruction(movp, A_VMOV, [taicpu(p).condition], [PF_F32]))
  435. ) and
  436. (taicpu(movp).ops=2) and
  437. MatchOperand(taicpu(movp).oper[1]^, taicpu(p).oper[0]^.reg) and
  438. { the destination register of the mov might not be used beween p and movp }
  439. not(RegUsedBetween(taicpu(movp).oper[0]^.reg,p,movp)) and
  440. { Take care to only do this for instructions which REALLY load to the first register.
  441. Otherwise
  442. vstr reg0, [reg1]
  443. vmov reg2, reg0
  444. will be optimized to
  445. vstr reg2, [reg1]
  446. }
  447. regLoadedWithNewValue(taicpu(p).oper[0]^.reg, p) then
  448. begin
  449. dealloc:=FindRegDeAlloc(taicpu(p).oper[0]^.reg,tai(movp.Next));
  450. if assigned(dealloc) then
  451. begin
  452. DebugMsg('Peephole '+optimizer+' removed superfluous vmov', movp);
  453. result:=true;
  454. { taicpu(p).oper[0]^.reg is not used anymore, try to find its allocation
  455. and remove it if possible }
  456. asml.Remove(dealloc);
  457. alloc:=FindRegAllocBackward(taicpu(p).oper[0]^.reg,tai(p.previous));
  458. if assigned(alloc) then
  459. begin
  460. asml.Remove(alloc);
  461. alloc.free;
  462. dealloc.free;
  463. end
  464. else
  465. asml.InsertAfter(dealloc,p);
  466. { try to move the allocation of the target register }
  467. GetLastInstruction(movp,hp1);
  468. alloc:=FindRegAlloc(taicpu(movp).oper[0]^.reg,tai(hp1.Next));
  469. if assigned(alloc) then
  470. begin
  471. asml.Remove(alloc);
  472. asml.InsertBefore(alloc,p);
  473. { adjust used regs }
  474. IncludeRegInUsedRegs(taicpu(movp).oper[0]^.reg,UsedRegs);
  475. end;
  476. { change
  477. vldr reg0,[reg1]
  478. vmov reg2,reg0
  479. into
  480. ldr reg2,[reg1]
  481. if reg2 is an int register
  482. }
  483. if (taicpu(p).opcode=A_VLDR) and (getregtype(taicpu(movp).oper[0]^.reg)=R_INTREGISTER) then
  484. taicpu(p).opcode:=A_LDR;
  485. { finally get rid of the mov }
  486. taicpu(p).loadreg(0,taicpu(movp).oper[0]^.reg);
  487. asml.remove(movp);
  488. movp.free;
  489. end;
  490. end;
  491. end;
  492. {
  493. optimize
  494. add/sub reg1,reg1,regY/const
  495. ...
  496. ldr/str regX,[reg1]
  497. into
  498. ldr/str regX,[reg1, regY/const]!
  499. }
  500. function TCpuAsmOptimizer.LookForPreindexedPattern(p: taicpu): boolean;
  501. var
  502. hp1: tai;
  503. begin
  504. if GenerateARMCode and
  505. (p.ops=3) and
  506. MatchOperand(p.oper[0]^, p.oper[1]^.reg) and
  507. GetNextInstructionUsingReg(p, hp1, p.oper[0]^.reg) and
  508. (not RegModifiedBetween(p.oper[0]^.reg, p, hp1)) and
  509. MatchInstruction(hp1, [A_LDR,A_STR], [C_None], [PF_None,PF_B,PF_H,PF_SH,PF_SB]) and
  510. (taicpu(hp1).oper[1]^.ref^.addressmode=AM_OFFSET) and
  511. (taicpu(hp1).oper[1]^.ref^.base=p.oper[0]^.reg) and
  512. (taicpu(hp1).oper[0]^.reg<>p.oper[0]^.reg) and
  513. (taicpu(hp1).oper[1]^.ref^.offset=0) and
  514. (taicpu(hp1).oper[1]^.ref^.index=NR_NO) and
  515. (((p.oper[2]^.typ=top_reg) and
  516. (not RegModifiedBetween(p.oper[2]^.reg, p, hp1))) or
  517. ((p.oper[2]^.typ=top_const) and
  518. ((abs(p.oper[2]^.val) < 256) or
  519. ((abs(p.oper[2]^.val) < 4096) and
  520. (taicpu(hp1).oppostfix in [PF_None,PF_B]))))) then
  521. begin
  522. taicpu(hp1).oper[1]^.ref^.addressmode:=AM_PREINDEXED;
  523. if p.oper[2]^.typ=top_reg then
  524. begin
  525. taicpu(hp1).oper[1]^.ref^.index:=p.oper[2]^.reg;
  526. if p.opcode=A_ADD then
  527. taicpu(hp1).oper[1]^.ref^.signindex:=1
  528. else
  529. taicpu(hp1).oper[1]^.ref^.signindex:=-1;
  530. end
  531. else
  532. begin
  533. if p.opcode=A_ADD then
  534. taicpu(hp1).oper[1]^.ref^.offset:=p.oper[2]^.val
  535. else
  536. taicpu(hp1).oper[1]^.ref^.offset:=-p.oper[2]^.val;
  537. end;
  538. result:=true;
  539. end
  540. else
  541. result:=false;
  542. end;
  543. {
  544. optimize
  545. ldr/str regX,[reg1]
  546. ...
  547. add/sub reg1,reg1,regY/const
  548. into
  549. ldr/str regX,[reg1], regY/const
  550. }
  551. function TCpuAsmOptimizer.LookForPostindexedPattern(p: taicpu) : boolean;
  552. var
  553. hp1 : tai;
  554. begin
  555. Result:=false;
  556. if (p.oper[1]^.typ = top_ref) and
  557. (p.oper[1]^.ref^.addressmode=AM_OFFSET) and
  558. (p.oper[1]^.ref^.index=NR_NO) and
  559. (p.oper[1]^.ref^.offset=0) and
  560. GetNextInstructionUsingReg(p, hp1, p.oper[1]^.ref^.base) and
  561. { we cannot check NR_DEFAULTFLAGS for modification yet so don't allow a condition }
  562. MatchInstruction(hp1, [A_ADD, A_SUB], [C_None], [PF_None]) and
  563. (taicpu(hp1).oper[0]^.reg=p.oper[1]^.ref^.base) and
  564. (taicpu(hp1).oper[1]^.reg=p.oper[1]^.ref^.base) and
  565. (
  566. (taicpu(hp1).oper[2]^.typ=top_reg) or
  567. { valid offset? }
  568. ((taicpu(hp1).oper[2]^.typ=top_const) and
  569. ((abs(taicpu(hp1).oper[2]^.val)<256) or
  570. ((abs(taicpu(hp1).oper[2]^.val)<4096) and (p.oppostfix in [PF_None,PF_B]))
  571. )
  572. )
  573. ) and
  574. { don't apply the optimization if the base register is loaded }
  575. (p.oper[0]^.reg<>p.oper[1]^.ref^.base) and
  576. not(RegModifiedBetween(taicpu(hp1).oper[0]^.reg,p,hp1)) and
  577. { don't apply the optimization if the (new) index register is loaded }
  578. (p.oper[0]^.reg<>taicpu(hp1).oper[2]^.reg) and
  579. not(RegModifiedBetween(taicpu(hp1).oper[2]^.reg,p,hp1)) and
  580. GenerateARMCode then
  581. begin
  582. DebugMsg('Peephole Str/LdrAdd/Sub2Str/Ldr Postindex done', p);
  583. p.oper[1]^.ref^.addressmode:=AM_POSTINDEXED;
  584. if taicpu(hp1).oper[2]^.typ=top_const then
  585. begin
  586. if taicpu(hp1).opcode=A_ADD then
  587. p.oper[1]^.ref^.offset:=taicpu(hp1).oper[2]^.val
  588. else
  589. p.oper[1]^.ref^.offset:=-taicpu(hp1).oper[2]^.val;
  590. end
  591. else
  592. begin
  593. p.oper[1]^.ref^.index:=taicpu(hp1).oper[2]^.reg;
  594. if taicpu(hp1).opcode=A_ADD then
  595. p.oper[1]^.ref^.signindex:=1
  596. else
  597. p.oper[1]^.ref^.signindex:=-1;
  598. end;
  599. asml.Remove(hp1);
  600. hp1.Free;
  601. Result:=true;
  602. end;
  603. end;
  604. function TCpuAsmOptimizer.PeepHoleOptPass1Cpu(var p: tai): boolean;
  605. var
  606. hp1,hp2,hp3,hp4: tai;
  607. i, i2: longint;
  608. tempop: tasmop;
  609. oldreg: tregister;
  610. dealloc: tai_regalloc;
  611. function IsPowerOf2(const value: DWord): boolean; inline;
  612. begin
  613. Result:=(value and (value - 1)) = 0;
  614. end;
  615. begin
  616. result := false;
  617. case p.typ of
  618. ait_instruction:
  619. begin
  620. {
  621. change
  622. <op> reg,x,y
  623. cmp reg,#0
  624. into
  625. <op>s reg,x,y
  626. }
  627. { this optimization can applied only to the currently enabled operations because
  628. the other operations do not update all flags and FPC does not track flag usage }
  629. if MatchInstruction(p, [A_ADC,A_ADD,A_BIC,A_SUB,A_MUL,A_MVN,A_MOV,A_ORR,A_EOR,A_AND,
  630. A_RSB,A_RSC,A_SBC,A_MLA], [C_None], [PF_None]) and
  631. GetNextInstruction(p, hp1) and
  632. { mlas is only allowed in arm mode }
  633. ((taicpu(p).opcode<>A_MLA) or
  634. (current_settings.instructionset<>is_thumb)) and
  635. MatchInstruction(hp1, A_CMP, [C_None], [PF_None]) and
  636. (taicpu(hp1).oper[1]^.typ = top_const) and
  637. (taicpu(p).oper[0]^.reg = taicpu(hp1).oper[0]^.reg) and
  638. (taicpu(hp1).oper[1]^.val = 0) and
  639. GetNextInstruction(hp1, hp2) and
  640. { be careful here, following instructions could use other flags
  641. however after a jump fpc never depends on the value of flags }
  642. { All above instructions set Z and N according to the following
  643. Z := result = 0;
  644. N := result[31];
  645. EQ = Z=1; NE = Z=0;
  646. MI = N=1; PL = N=0; }
  647. (MatchInstruction(hp2, A_B, [C_EQ,C_NE,C_MI,C_PL], []) or
  648. { mov is also possible, but only if there is no shifter operand, it could be an rxx,
  649. we are too lazy to check if it is rxx or something else }
  650. (MatchInstruction(hp2, A_MOV, [C_EQ,C_NE,C_MI,C_PL], []) and (taicpu(hp2).ops=2))) and
  651. assigned(FindRegDealloc(NR_DEFAULTFLAGS,tai(hp2.Next))) then
  652. begin
  653. DebugMsg('Peephole OpCmp2OpS done', p);
  654. taicpu(p).oppostfix:=PF_S;
  655. { move flag allocation if possible }
  656. GetLastInstruction(hp1, hp2);
  657. hp2:=FindRegAlloc(NR_DEFAULTFLAGS,tai(hp2.Next));
  658. if assigned(hp2) then
  659. begin
  660. asml.Remove(hp2);
  661. asml.insertbefore(hp2, p);
  662. end;
  663. asml.remove(hp1);
  664. hp1.free;
  665. Result:=true;
  666. end
  667. else
  668. case taicpu(p).opcode of
  669. A_STR:
  670. begin
  671. { change
  672. str reg1,ref
  673. ldr reg2,ref
  674. into
  675. str reg1,ref
  676. mov reg2,reg1
  677. }
  678. if (taicpu(p).oper[1]^.typ = top_ref) and
  679. (taicpu(p).oper[1]^.ref^.addressmode=AM_OFFSET) and
  680. (taicpu(p).oppostfix=PF_None) and
  681. (taicpu(p).condition=C_None) and
  682. GetNextInstructionUsingRef(p,hp1,taicpu(p).oper[1]^.ref^) and
  683. MatchInstruction(hp1, A_LDR, [taicpu(p).condition], [PF_None]) and
  684. (taicpu(hp1).oper[1]^.typ=top_ref) and
  685. (taicpu(hp1).oper[1]^.ref^.addressmode=AM_OFFSET) and
  686. not(RegModifiedBetween(taicpu(p).oper[0]^.reg, p, hp1)) and
  687. ((taicpu(hp1).oper[1]^.ref^.index=NR_NO) or not (RegModifiedBetween(taicpu(hp1).oper[1]^.ref^.index, p, hp1))) and
  688. ((taicpu(hp1).oper[1]^.ref^.base=NR_NO) or not (RegModifiedBetween(taicpu(hp1).oper[1]^.ref^.base, p, hp1))) then
  689. begin
  690. if taicpu(hp1).oper[0]^.reg=taicpu(p).oper[0]^.reg then
  691. begin
  692. DebugMsg('Peephole StrLdr2StrMov 1 done', hp1);
  693. asml.remove(hp1);
  694. hp1.free;
  695. end
  696. else
  697. begin
  698. taicpu(hp1).opcode:=A_MOV;
  699. taicpu(hp1).oppostfix:=PF_None;
  700. taicpu(hp1).loadreg(1,taicpu(p).oper[0]^.reg);
  701. DebugMsg('Peephole StrLdr2StrMov 2 done', hp1);
  702. end;
  703. result := true;
  704. end
  705. { change
  706. str reg1,ref
  707. str reg2,ref
  708. into
  709. strd reg1,reg2,ref
  710. }
  711. else if (GenerateARMCode or GenerateThumb2Code) and
  712. (CPUARM_HAS_EDSP in cpu_capabilities[current_settings.cputype]) and
  713. (taicpu(p).oppostfix=PF_None) and
  714. (taicpu(p).oper[1]^.ref^.addressmode=AM_OFFSET) and
  715. GetNextInstruction(p,hp1) and
  716. MatchInstruction(hp1, A_STR, [taicpu(p).condition, C_None], [PF_None]) and
  717. not(odd(getsupreg(taicpu(p).oper[0]^.reg))) and
  718. (getsupreg(taicpu(p).oper[0]^.reg)+1=getsupreg(taicpu(hp1).oper[0]^.reg)) and
  719. { str ensures that either base or index contain no register, else ldr wouldn't
  720. use an offset either
  721. }
  722. (taicpu(p).oper[1]^.ref^.base=taicpu(hp1).oper[1]^.ref^.base) and
  723. (taicpu(p).oper[1]^.ref^.index=taicpu(hp1).oper[1]^.ref^.index) and
  724. (taicpu(p).oper[1]^.ref^.offset+4=taicpu(hp1).oper[1]^.ref^.offset) and
  725. (abs(taicpu(p).oper[1]^.ref^.offset)<256) and
  726. AlignedToQWord(taicpu(p).oper[1]^.ref^) then
  727. begin
  728. DebugMsg('Peephole StrStr2Strd done', p);
  729. taicpu(p).oppostfix:=PF_D;
  730. taicpu(p).loadref(2,taicpu(p).oper[1]^.ref^);
  731. taicpu(p).loadreg(1, taicpu(hp1).oper[0]^.reg);
  732. taicpu(p).ops:=3;
  733. asml.remove(hp1);
  734. hp1.free;
  735. result:=true;
  736. end;
  737. Result:=LookForPostindexedPattern(taicpu(p)) or Result;
  738. end;
  739. A_LDR:
  740. begin
  741. { change
  742. ldr reg1,ref
  743. ldr reg2,ref
  744. into ...
  745. }
  746. if (taicpu(p).oper[1]^.typ = top_ref) and
  747. (taicpu(p).oper[1]^.ref^.addressmode=AM_OFFSET) and
  748. GetNextInstruction(p,hp1) and
  749. { ldrd is not allowed here }
  750. MatchInstruction(hp1, A_LDR, [taicpu(p).condition, C_None], [taicpu(p).oppostfix,PF_None]-[PF_D]) then
  751. begin
  752. {
  753. ...
  754. ldr reg1,ref
  755. mov reg2,reg1
  756. }
  757. if (taicpu(p).oppostfix=taicpu(hp1).oppostfix) and
  758. RefsEqual(taicpu(p).oper[1]^.ref^,taicpu(hp1).oper[1]^.ref^) and
  759. (taicpu(p).oper[0]^.reg<>taicpu(hp1).oper[1]^.ref^.index) and
  760. (taicpu(p).oper[0]^.reg<>taicpu(hp1).oper[1]^.ref^.base) and
  761. (taicpu(hp1).oper[1]^.ref^.addressmode=AM_OFFSET) then
  762. begin
  763. if taicpu(hp1).oper[0]^.reg=taicpu(p).oper[0]^.reg then
  764. begin
  765. DebugMsg('Peephole LdrLdr2Ldr done', hp1);
  766. asml.remove(hp1);
  767. hp1.free;
  768. end
  769. else
  770. begin
  771. DebugMsg('Peephole LdrLdr2LdrMov done', hp1);
  772. taicpu(hp1).opcode:=A_MOV;
  773. taicpu(hp1).oppostfix:=PF_None;
  774. taicpu(hp1).loadreg(1,taicpu(p).oper[0]^.reg);
  775. end;
  776. result := true;
  777. end
  778. {
  779. ...
  780. ldrd reg1,reg1+1,ref
  781. }
  782. else if (GenerateARMCode or GenerateThumb2Code) and
  783. (CPUARM_HAS_EDSP in cpu_capabilities[current_settings.cputype]) and
  784. { ldrd does not allow any postfixes ... }
  785. (taicpu(p).oppostfix=PF_None) and
  786. not(odd(getsupreg(taicpu(p).oper[0]^.reg))) and
  787. (getsupreg(taicpu(p).oper[0]^.reg)+1=getsupreg(taicpu(hp1).oper[0]^.reg)) and
  788. { ldr ensures that either base or index contain no register, else ldr wouldn't
  789. use an offset either
  790. }
  791. (taicpu(p).oper[1]^.ref^.base=taicpu(hp1).oper[1]^.ref^.base) and
  792. (taicpu(p).oper[1]^.ref^.index=taicpu(hp1).oper[1]^.ref^.index) and
  793. (taicpu(p).oper[1]^.ref^.offset+4=taicpu(hp1).oper[1]^.ref^.offset) and
  794. (abs(taicpu(p).oper[1]^.ref^.offset)<256) and
  795. AlignedToQWord(taicpu(p).oper[1]^.ref^) then
  796. begin
  797. DebugMsg('Peephole LdrLdr2Ldrd done', p);
  798. taicpu(p).loadref(2,taicpu(p).oper[1]^.ref^);
  799. taicpu(p).loadreg(1, taicpu(hp1).oper[0]^.reg);
  800. taicpu(p).ops:=3;
  801. taicpu(p).oppostfix:=PF_D;
  802. asml.remove(hp1);
  803. hp1.free;
  804. result:=true;
  805. end;
  806. end;
  807. {
  808. Change
  809. ldrb dst1, [REF]
  810. and dst2, dst1, #255
  811. into
  812. ldrb dst2, [ref]
  813. }
  814. if not(GenerateThumbCode) and
  815. (taicpu(p).oppostfix=PF_B) and
  816. GetNextInstructionUsingReg(p, hp1, taicpu(p).oper[0]^.reg) and
  817. MatchInstruction(hp1, A_AND, [taicpu(p).condition], [PF_NONE]) and
  818. (taicpu(hp1).oper[1]^.reg = taicpu(p).oper[0]^.reg) and
  819. (taicpu(hp1).oper[2]^.typ = top_const) and
  820. (taicpu(hp1).oper[2]^.val = $FF) and
  821. not(RegUsedBetween(taicpu(hp1).oper[0]^.reg, p, hp1)) and
  822. RegEndOfLife(taicpu(p).oper[0]^.reg, taicpu(hp1)) then
  823. begin
  824. DebugMsg('Peephole LdrbAnd2Ldrb done', p);
  825. taicpu(p).oper[0]^.reg := taicpu(hp1).oper[0]^.reg;
  826. asml.remove(hp1);
  827. hp1.free;
  828. result:=true;
  829. end;
  830. Result:=LookForPostindexedPattern(taicpu(p)) or Result;
  831. { Remove superfluous mov after ldr
  832. changes
  833. ldr reg1, ref
  834. mov reg2, reg1
  835. to
  836. ldr reg2, ref
  837. conditions are:
  838. * no ldrd usage
  839. * reg1 must be released after mov
  840. * mov can not contain shifterops
  841. * ldr+mov have the same conditions
  842. * mov does not set flags
  843. }
  844. if (taicpu(p).oppostfix<>PF_D) and
  845. GetNextInstructionUsingReg(p, hp1, taicpu(p).oper[0]^.reg) and
  846. RemoveSuperfluousMove(p, hp1, 'LdrMov2Ldr') then
  847. Result:=true;
  848. end;
  849. A_MOV:
  850. begin
  851. { fold
  852. mov reg1,reg0, shift imm1
  853. mov reg1,reg1, shift imm2
  854. }
  855. if (taicpu(p).ops=3) and
  856. (taicpu(p).oper[2]^.typ = top_shifterop) and
  857. (taicpu(p).oper[2]^.shifterop^.rs = NR_NO) and
  858. getnextinstruction(p,hp1) and
  859. MatchInstruction(hp1, A_MOV, [taicpu(p).condition], [PF_None]) and
  860. (taicpu(hp1).ops=3) and
  861. MatchOperand(taicpu(hp1).oper[0]^, taicpu(p).oper[0]^.reg) and
  862. MatchOperand(taicpu(hp1).oper[1]^, taicpu(p).oper[0]^.reg) and
  863. (taicpu(hp1).oper[2]^.typ = top_shifterop) and
  864. (taicpu(hp1).oper[2]^.shifterop^.rs = NR_NO) then
  865. begin
  866. { fold
  867. mov reg1,reg0, lsl 16
  868. mov reg1,reg1, lsr 16
  869. strh reg1, ...
  870. dealloc reg1
  871. to
  872. strh reg1, ...
  873. dealloc reg1
  874. }
  875. if (taicpu(p).oper[2]^.shifterop^.shiftmode=SM_LSL) and
  876. (taicpu(p).oper[2]^.shifterop^.shiftimm=16) and
  877. (taicpu(hp1).oper[2]^.shifterop^.shiftmode in [SM_LSR,SM_ASR]) and
  878. (taicpu(hp1).oper[2]^.shifterop^.shiftimm=16) and
  879. getnextinstruction(hp1,hp2) and
  880. MatchInstruction(hp2, A_STR, [taicpu(p).condition], [PF_H]) and
  881. MatchOperand(taicpu(hp2).oper[0]^, taicpu(p).oper[0]^.reg) then
  882. begin
  883. TransferUsedRegs(TmpUsedRegs);
  884. UpdateUsedRegs(TmpUsedRegs, tai(p.next));
  885. UpdateUsedRegs(TmpUsedRegs, tai(hp1.next));
  886. if not(RegUsedAfterInstruction(taicpu(p).oper[0]^.reg,hp2,TmpUsedRegs)) then
  887. begin
  888. DebugMsg('Peephole optimizer removed superfluous 16 Bit zero extension', hp1);
  889. taicpu(hp2).loadreg(0,taicpu(p).oper[1]^.reg);
  890. asml.remove(p);
  891. asml.remove(hp1);
  892. p.free;
  893. hp1.free;
  894. p:=hp2;
  895. Result:=true;
  896. end;
  897. end
  898. { fold
  899. mov reg1,reg0, shift imm1
  900. mov reg1,reg1, shift imm2
  901. to
  902. mov reg1,reg0, shift imm1+imm2
  903. }
  904. else if (taicpu(p).oper[2]^.shifterop^.shiftmode=taicpu(hp1).oper[2]^.shifterop^.shiftmode) or
  905. { asr makes no use after a lsr, the asr can be foled into the lsr }
  906. ((taicpu(p).oper[2]^.shifterop^.shiftmode=SM_LSR) and (taicpu(hp1).oper[2]^.shifterop^.shiftmode=SM_ASR) ) then
  907. begin
  908. inc(taicpu(p).oper[2]^.shifterop^.shiftimm,taicpu(hp1).oper[2]^.shifterop^.shiftimm);
  909. { avoid overflows }
  910. if taicpu(p).oper[2]^.shifterop^.shiftimm>31 then
  911. case taicpu(p).oper[2]^.shifterop^.shiftmode of
  912. SM_ROR:
  913. taicpu(p).oper[2]^.shifterop^.shiftimm:=taicpu(p).oper[2]^.shifterop^.shiftimm and 31;
  914. SM_ASR:
  915. taicpu(p).oper[2]^.shifterop^.shiftimm:=31;
  916. SM_LSR,
  917. SM_LSL:
  918. begin
  919. hp2:=taicpu.op_reg_const(A_MOV,taicpu(p).oper[0]^.reg,0);
  920. InsertLLItem(p.previous, p.next, hp2);
  921. p.free;
  922. p:=hp2;
  923. end;
  924. else
  925. internalerror(2008072803);
  926. end;
  927. DebugMsg('Peephole ShiftShift2Shift 1 done', p);
  928. asml.remove(hp1);
  929. hp1.free;
  930. result := true;
  931. end
  932. { fold
  933. mov reg1,reg0, shift imm1
  934. mov reg1,reg1, shift imm2
  935. mov reg1,reg1, shift imm3 ...
  936. mov reg2,reg1, shift imm3 ...
  937. }
  938. else if GetNextInstructionUsingReg(hp1,hp2, taicpu(hp1).oper[0]^.reg) and
  939. MatchInstruction(hp2, A_MOV, [taicpu(p).condition], [PF_None]) and
  940. (taicpu(hp2).ops=3) and
  941. MatchOperand(taicpu(hp2).oper[1]^, taicpu(hp1).oper[0]^.reg) and
  942. RegEndofLife(taicpu(p).oper[0]^.reg,taicpu(hp2)) and
  943. (taicpu(hp2).oper[2]^.typ = top_shifterop) and
  944. (taicpu(hp2).oper[2]^.shifterop^.rs = NR_NO) then
  945. begin
  946. { mov reg1,reg0, lsl imm1
  947. mov reg1,reg1, lsr/asr imm2
  948. mov reg2,reg1, lsl imm3 ...
  949. to
  950. mov reg1,reg0, lsl imm1
  951. mov reg2,reg1, lsr/asr imm2-imm3
  952. if
  953. imm1>=imm2
  954. }
  955. if (taicpu(p).oper[2]^.shifterop^.shiftmode=SM_LSL) and (taicpu(hp2).oper[2]^.shifterop^.shiftmode=SM_LSL) and
  956. (taicpu(hp1).oper[2]^.shifterop^.shiftmode in [SM_ASR,SM_LSR]) and
  957. (taicpu(p).oper[2]^.shifterop^.shiftimm>=taicpu(hp1).oper[2]^.shifterop^.shiftimm) then
  958. begin
  959. if (taicpu(hp2).oper[2]^.shifterop^.shiftimm>=taicpu(hp1).oper[2]^.shifterop^.shiftimm) then
  960. begin
  961. if not(RegUsedBetween(taicpu(hp2).oper[0]^.reg,p,hp1)) and
  962. not(RegUsedBetween(taicpu(hp2).oper[0]^.reg,hp1,hp2)) then
  963. begin
  964. DebugMsg('Peephole ShiftShiftShift2ShiftShift 1a done', p);
  965. inc(taicpu(p).oper[2]^.shifterop^.shiftimm,taicpu(hp2).oper[2]^.shifterop^.shiftimm-taicpu(hp1).oper[2]^.shifterop^.shiftimm);
  966. taicpu(p).oper[0]^.reg:=taicpu(hp2).oper[0]^.reg;
  967. asml.remove(hp1);
  968. asml.remove(hp2);
  969. hp1.free;
  970. hp2.free;
  971. if taicpu(p).oper[2]^.shifterop^.shiftimm>=32 then
  972. begin
  973. taicpu(p).freeop(1);
  974. taicpu(p).freeop(2);
  975. taicpu(p).loadconst(1,0);
  976. end;
  977. result := true;
  978. end;
  979. end
  980. else if not(RegUsedBetween(taicpu(hp2).oper[0]^.reg,hp1,hp2)) then
  981. begin
  982. DebugMsg('Peephole ShiftShiftShift2ShiftShift 1b done', p);
  983. dec(taicpu(hp1).oper[2]^.shifterop^.shiftimm,taicpu(hp2).oper[2]^.shifterop^.shiftimm);
  984. taicpu(hp1).oper[0]^.reg:=taicpu(hp2).oper[0]^.reg;
  985. asml.remove(hp2);
  986. hp2.free;
  987. result := true;
  988. end;
  989. end
  990. { mov reg1,reg0, lsr/asr imm1
  991. mov reg1,reg1, lsl imm2
  992. mov reg1,reg1, lsr/asr imm3 ...
  993. if imm3>=imm1 and imm2>=imm1
  994. to
  995. mov reg1,reg0, lsl imm2-imm1
  996. mov reg1,reg1, lsr/asr imm3 ...
  997. }
  998. else if (taicpu(p).oper[2]^.shifterop^.shiftmode in [SM_ASR,SM_LSR]) and (taicpu(hp2).oper[2]^.shifterop^.shiftmode in [SM_ASR,SM_LSR]) and
  999. (taicpu(hp1).oper[2]^.shifterop^.shiftmode=SM_LSL) and
  1000. (taicpu(hp2).oper[2]^.shifterop^.shiftimm>=taicpu(p).oper[2]^.shifterop^.shiftimm) and
  1001. (taicpu(hp1).oper[2]^.shifterop^.shiftimm>=taicpu(p).oper[2]^.shifterop^.shiftimm) then
  1002. begin
  1003. dec(taicpu(hp1).oper[2]^.shifterop^.shiftimm,taicpu(p).oper[2]^.shifterop^.shiftimm);
  1004. taicpu(hp1).oper[1]^.reg:=taicpu(p).oper[1]^.reg;
  1005. DebugMsg('Peephole ShiftShiftShift2ShiftShift 2 done', p);
  1006. asml.remove(p);
  1007. p.free;
  1008. p:=hp2;
  1009. if taicpu(hp1).oper[2]^.shifterop^.shiftimm=0 then
  1010. begin
  1011. taicpu(hp2).oper[1]^.reg:=taicpu(hp1).oper[1]^.reg;
  1012. asml.remove(hp1);
  1013. hp1.free;
  1014. p:=hp2;
  1015. end;
  1016. result := true;
  1017. end;
  1018. end;
  1019. end;
  1020. { Change the common
  1021. mov r0, r0, lsr #xxx
  1022. and r0, r0, #yyy/bic r0, r0, #xxx
  1023. and remove the superfluous and/bic if possible
  1024. This could be extended to handle more cases.
  1025. }
  1026. if (taicpu(p).ops=3) and
  1027. (taicpu(p).oper[2]^.typ = top_shifterop) and
  1028. (taicpu(p).oper[2]^.shifterop^.rs = NR_NO) and
  1029. (taicpu(p).oper[2]^.shifterop^.shiftmode = SM_LSR) and
  1030. GetNextInstructionUsingReg(p,hp1, taicpu(p).oper[0]^.reg) and
  1031. (hp1.typ=ait_instruction) and
  1032. (taicpu(hp1).ops>=1) and
  1033. (taicpu(hp1).oper[0]^.typ=top_reg) and
  1034. (not RegModifiedBetween(taicpu(hp1).oper[0]^.reg, p, hp1)) and
  1035. RegEndOfLife(taicpu(p).oper[0]^.reg, taicpu(hp1)) then
  1036. begin
  1037. if (taicpu(p).oper[2]^.shifterop^.shiftimm >= 24 ) and
  1038. MatchInstruction(hp1, A_AND, [taicpu(p).condition], [taicpu(p).oppostfix]) and
  1039. (taicpu(hp1).ops=3) and
  1040. MatchOperand(taicpu(p).oper[0]^, taicpu(hp1).oper[1]^) and
  1041. (taicpu(hp1).oper[2]^.typ = top_const) and
  1042. { Check if the AND actually would only mask out bits being already zero because of the shift
  1043. }
  1044. ((($ffffffff shr taicpu(p).oper[2]^.shifterop^.shiftimm) and taicpu(hp1).oper[2]^.val) =
  1045. ($ffffffff shr taicpu(p).oper[2]^.shifterop^.shiftimm)) then
  1046. begin
  1047. DebugMsg('Peephole LsrAnd2Lsr done', hp1);
  1048. taicpu(p).oper[0]^.reg:=taicpu(hp1).oper[0]^.reg;
  1049. asml.remove(hp1);
  1050. hp1.free;
  1051. result:=true;
  1052. end
  1053. else if MatchInstruction(hp1, A_BIC, [taicpu(p).condition], [taicpu(p).oppostfix]) and
  1054. (taicpu(hp1).ops=3) and
  1055. MatchOperand(taicpu(p).oper[0]^, taicpu(hp1).oper[1]^) and
  1056. (taicpu(hp1).oper[2]^.typ = top_const) and
  1057. { Check if the BIC actually would only mask out bits beeing already zero because of the shift }
  1058. (taicpu(hp1).oper[2]^.val<>0) and
  1059. (BsfDWord(taicpu(hp1).oper[2]^.val)>=32-taicpu(p).oper[2]^.shifterop^.shiftimm) then
  1060. begin
  1061. DebugMsg('Peephole LsrBic2Lsr done', hp1);
  1062. taicpu(p).oper[0]^.reg:=taicpu(hp1).oper[0]^.reg;
  1063. asml.remove(hp1);
  1064. hp1.free;
  1065. result:=true;
  1066. end;
  1067. end;
  1068. { Change
  1069. mov rx, ry, lsr/ror #xxx
  1070. uxtb/uxth rz,rx/and rz,rx,0xFF
  1071. dealloc rx
  1072. to
  1073. uxtb/uxth rz,ry,ror #xxx
  1074. }
  1075. if (taicpu(p).ops=3) and
  1076. (taicpu(p).oper[2]^.typ = top_shifterop) and
  1077. (taicpu(p).oper[2]^.shifterop^.rs = NR_NO) and
  1078. (taicpu(p).oper[2]^.shifterop^.shiftmode in [SM_LSR,SM_ROR]) and
  1079. (GenerateThumb2Code) and
  1080. GetNextInstructionUsingReg(p,hp1, taicpu(p).oper[0]^.reg) and
  1081. RegEndOfLife(taicpu(p).oper[0]^.reg, taicpu(hp1)) then
  1082. begin
  1083. if MatchInstruction(hp1, A_UXTB, [C_None], [PF_None]) and
  1084. (taicpu(hp1).ops = 2) and
  1085. (taicpu(p).oper[2]^.shifterop^.shiftimm in [8,16,24]) and
  1086. MatchOperand(taicpu(hp1).oper[1]^, taicpu(p).oper[0]^.reg) then
  1087. begin
  1088. taicpu(hp1).oper[1]^.reg := taicpu(p).oper[1]^.reg;
  1089. taicpu(hp1).loadshifterop(2,taicpu(p).oper[2]^.shifterop^);
  1090. taicpu(hp1).oper[2]^.shifterop^.shiftmode:=SM_ROR;
  1091. taicpu(hp1).ops := 3;
  1092. GetNextInstruction(p,hp1);
  1093. asml.Remove(p);
  1094. p.Free;
  1095. p:=hp1;
  1096. result:=true;
  1097. exit;
  1098. end
  1099. else if MatchInstruction(hp1, A_UXTH, [C_None], [PF_None]) and
  1100. (taicpu(hp1).ops=2) and
  1101. (taicpu(p).oper[2]^.shifterop^.shiftimm in [16]) and
  1102. MatchOperand(taicpu(hp1).oper[1]^, taicpu(p).oper[0]^.reg) then
  1103. begin
  1104. taicpu(hp1).oper[1]^.reg := taicpu(p).oper[1]^.reg;
  1105. taicpu(hp1).loadshifterop(2,taicpu(p).oper[2]^.shifterop^);
  1106. taicpu(hp1).oper[2]^.shifterop^.shiftmode:=SM_ROR;
  1107. taicpu(hp1).ops := 3;
  1108. GetNextInstruction(p,hp1);
  1109. asml.Remove(p);
  1110. p.Free;
  1111. p:=hp1;
  1112. result:=true;
  1113. exit;
  1114. end
  1115. else if MatchInstruction(hp1, A_AND, [C_None], [PF_None]) and
  1116. (taicpu(hp1).ops = 3) and
  1117. (taicpu(hp1).oper[2]^.typ = top_const) and
  1118. (taicpu(hp1).oper[2]^.val = $FF) and
  1119. (taicpu(p).oper[2]^.shifterop^.shiftimm in [8,16,24]) and
  1120. MatchOperand(taicpu(hp1).oper[1]^, taicpu(p).oper[0]^.reg) then
  1121. begin
  1122. taicpu(hp1).ops := 3;
  1123. taicpu(hp1).opcode := A_UXTB;
  1124. taicpu(hp1).oper[1]^.reg := taicpu(p).oper[1]^.reg;
  1125. taicpu(hp1).loadshifterop(2,taicpu(p).oper[2]^.shifterop^);
  1126. taicpu(hp1).oper[2]^.shifterop^.shiftmode:=SM_ROR;
  1127. GetNextInstruction(p,hp1);
  1128. asml.Remove(p);
  1129. p.Free;
  1130. p:=hp1;
  1131. result:=true;
  1132. exit;
  1133. end;
  1134. end;
  1135. {
  1136. optimize
  1137. mov rX, yyyy
  1138. ....
  1139. }
  1140. if (taicpu(p).ops = 2) and
  1141. GetNextInstruction(p,hp1) and
  1142. (tai(hp1).typ = ait_instruction) then
  1143. begin
  1144. {
  1145. This changes the very common
  1146. mov r0, #0
  1147. str r0, [...]
  1148. mov r0, #0
  1149. str r0, [...]
  1150. and removes all superfluous mov instructions
  1151. }
  1152. if (taicpu(p).oper[1]^.typ = top_const) and
  1153. (taicpu(hp1).opcode=A_STR) then
  1154. while MatchInstruction(hp1, A_STR, [taicpu(p).condition], []) and
  1155. MatchOperand(taicpu(p).oper[0]^, taicpu(hp1).oper[0]^) and
  1156. GetNextInstruction(hp1, hp2) and
  1157. MatchInstruction(hp2, A_MOV, [taicpu(p).condition], [PF_None]) and
  1158. (taicpu(hp2).ops = 2) and
  1159. MatchOperand(taicpu(hp2).oper[0]^, taicpu(p).oper[0]^) and
  1160. MatchOperand(taicpu(hp2).oper[1]^, taicpu(p).oper[1]^) do
  1161. begin
  1162. DebugMsg('Peephole MovStrMov done', hp2);
  1163. GetNextInstruction(hp2,hp1);
  1164. asml.remove(hp2);
  1165. hp2.free;
  1166. result:=true;
  1167. if not assigned(hp1) then break;
  1168. end
  1169. {
  1170. This removes the first mov from
  1171. mov rX,...
  1172. mov rX,...
  1173. }
  1174. else if taicpu(hp1).opcode=A_MOV then
  1175. while MatchInstruction(hp1, A_MOV, [taicpu(p).condition], [taicpu(p).oppostfix]) and
  1176. (taicpu(hp1).ops = 2) and
  1177. MatchOperand(taicpu(p).oper[0]^, taicpu(hp1).oper[0]^) and
  1178. { don't remove the first mov if the second is a mov rX,rX }
  1179. not(MatchOperand(taicpu(hp1).oper[0]^, taicpu(hp1).oper[1]^)) do
  1180. begin
  1181. DebugMsg('Peephole MovMov done', p);
  1182. asml.remove(p);
  1183. p.free;
  1184. p:=hp1;
  1185. GetNextInstruction(hp1,hp1);
  1186. result:=true;
  1187. if not assigned(hp1) then
  1188. break;
  1189. end;
  1190. end;
  1191. {
  1192. change
  1193. mov r1, r0
  1194. add r1, r1, #1
  1195. to
  1196. add r1, r0, #1
  1197. Todo: Make it work for mov+cmp too
  1198. CAUTION! If this one is successful p might not be a mov instruction anymore!
  1199. }
  1200. if (taicpu(p).ops = 2) and
  1201. (taicpu(p).oper[1]^.typ = top_reg) and
  1202. (taicpu(p).oppostfix = PF_NONE) and
  1203. GetNextInstruction(p, hp1) and
  1204. MatchInstruction(hp1, [A_ADD, A_ADC, A_RSB, A_RSC, A_SUB, A_SBC,
  1205. A_AND, A_BIC, A_EOR, A_ORR, A_MOV, A_MVN],
  1206. [taicpu(p).condition], []) and
  1207. {MOV and MVN might only have 2 ops}
  1208. (taicpu(hp1).ops >= 2) and
  1209. MatchOperand(taicpu(p).oper[0]^, taicpu(hp1).oper[0]^.reg) and
  1210. (taicpu(hp1).oper[1]^.typ = top_reg) and
  1211. (
  1212. (taicpu(hp1).ops = 2) or
  1213. (taicpu(hp1).oper[2]^.typ in [top_reg, top_const, top_shifterop])
  1214. ) then
  1215. begin
  1216. { When we get here we still don't know if the registers match}
  1217. for I:=1 to 2 do
  1218. {
  1219. If the first loop was successful p will be replaced with hp1.
  1220. The checks will still be ok, because all required information
  1221. will also be in hp1 then.
  1222. }
  1223. if (taicpu(hp1).ops > I) and
  1224. MatchOperand(taicpu(p).oper[0]^, taicpu(hp1).oper[I]^.reg) and
  1225. { prevent certain combinations on thumb(2), this is only a safe approximation }
  1226. (not(GenerateThumbCode or GenerateThumb2Code) or
  1227. ((getsupreg(taicpu(p).oper[1]^.reg)<>RS_R13) and
  1228. (getsupreg(taicpu(p).oper[1]^.reg)<>RS_R15))
  1229. ) then
  1230. begin
  1231. DebugMsg('Peephole RedundantMovProcess done', hp1);
  1232. taicpu(hp1).oper[I]^.reg := taicpu(p).oper[1]^.reg;
  1233. if p<>hp1 then
  1234. begin
  1235. asml.remove(p);
  1236. p.free;
  1237. p:=hp1;
  1238. Result:=true;
  1239. end;
  1240. end;
  1241. end;
  1242. { Fold the very common sequence
  1243. mov regA, regB
  1244. ldr* regA, [regA]
  1245. to
  1246. ldr* regA, [regB]
  1247. CAUTION! If this one is successful p might not be a mov instruction anymore!
  1248. }
  1249. if (taicpu(p).opcode = A_MOV) and
  1250. (taicpu(p).ops = 2) and
  1251. (taicpu(p).oper[1]^.typ = top_reg) and
  1252. (taicpu(p).oppostfix = PF_NONE) and
  1253. GetNextInstructionUsingReg(p, hp1, taicpu(p).oper[0]^.reg) and
  1254. MatchInstruction(hp1, [A_LDR, A_STR], [taicpu(p).condition], []) and
  1255. (taicpu(hp1).oper[1]^.typ = top_ref) and
  1256. { We can change the base register only when the instruction uses AM_OFFSET }
  1257. ((taicpu(hp1).oper[1]^.ref^.index = taicpu(p).oper[0]^.reg) or
  1258. ((taicpu(hp1).oper[1]^.ref^.addressmode = AM_OFFSET) and
  1259. (taicpu(hp1).oper[1]^.ref^.base = taicpu(p).oper[0]^.reg))
  1260. ) and
  1261. not(RegModifiedBetween(taicpu(p).oper[1]^.reg,p,hp1)) and
  1262. // Make sure that Thumb code doesn't propagate a high register into a reference
  1263. ((GenerateThumbCode and
  1264. (getsupreg(taicpu(p).oper[1]^.reg) < RS_R8)) or
  1265. (not GenerateThumbCode)) and
  1266. RegEndOfLife(taicpu(p).oper[0]^.reg, taicpu(hp1)) then
  1267. begin
  1268. DebugMsg('Peephole MovLdr2Ldr done', hp1);
  1269. if (taicpu(hp1).oper[1]^.ref^.addressmode = AM_OFFSET) and
  1270. (taicpu(hp1).oper[1]^.ref^.base = taicpu(p).oper[0]^.reg) then
  1271. taicpu(hp1).oper[1]^.ref^.base := taicpu(p).oper[1]^.reg;
  1272. if taicpu(hp1).oper[1]^.ref^.index = taicpu(p).oper[0]^.reg then
  1273. taicpu(hp1).oper[1]^.ref^.index := taicpu(p).oper[1]^.reg;
  1274. dealloc:=FindRegDeAlloc(taicpu(p).oper[1]^.reg, tai(p.Next));
  1275. if Assigned(dealloc) then
  1276. begin
  1277. asml.remove(dealloc);
  1278. asml.InsertAfter(dealloc,hp1);
  1279. end;
  1280. GetNextInstruction(p, hp1);
  1281. asml.remove(p);
  1282. p.free;
  1283. p:=hp1;
  1284. result:=true;
  1285. end;
  1286. { This folds shifterops into following instructions
  1287. mov r0, r1, lsl #8
  1288. add r2, r3, r0
  1289. to
  1290. add r2, r3, r1, lsl #8
  1291. CAUTION! If this one is successful p might not be a mov instruction anymore!
  1292. }
  1293. if (taicpu(p).opcode = A_MOV) and
  1294. (taicpu(p).ops = 3) and
  1295. (taicpu(p).oper[1]^.typ = top_reg) and
  1296. (taicpu(p).oper[2]^.typ = top_shifterop) and
  1297. (taicpu(p).oppostfix = PF_NONE) and
  1298. GetNextInstructionUsingReg(p, hp1, taicpu(p).oper[0]^.reg) and
  1299. MatchInstruction(hp1, [A_ADD, A_ADC, A_RSB, A_RSC, A_SUB, A_SBC,
  1300. A_AND, A_BIC, A_EOR, A_ORR, A_TEQ, A_TST,
  1301. A_CMP, A_CMN],
  1302. [taicpu(p).condition], [PF_None]) and
  1303. (not ((GenerateThumb2Code) and
  1304. (taicpu(hp1).opcode in [A_SBC]) and
  1305. (((taicpu(hp1).ops=3) and
  1306. MatchOperand(taicpu(p).oper[0]^, taicpu(hp1).oper[1]^.reg)) or
  1307. ((taicpu(hp1).ops=2) and
  1308. MatchOperand(taicpu(p).oper[0]^, taicpu(hp1).oper[0]^.reg))))) and
  1309. RegEndOfLife(taicpu(p).oper[0]^.reg, taicpu(hp1)) and
  1310. (taicpu(hp1).ops >= 2) and
  1311. {Currently we can't fold into another shifterop}
  1312. (taicpu(hp1).oper[taicpu(hp1).ops-1]^.typ = top_reg) and
  1313. {Folding rrx is problematic because of the C-Flag, as we currently can't check
  1314. NR_DEFAULTFLAGS for modification}
  1315. (
  1316. {Everything is fine if we don't use RRX}
  1317. (taicpu(p).oper[2]^.shifterop^.shiftmode <> SM_RRX) or
  1318. (
  1319. {If it is RRX, then check if we're just accessing the next instruction}
  1320. GetNextInstruction(p, hp2) and
  1321. (hp1 = hp2)
  1322. )
  1323. ) and
  1324. { reg1 might not be modified inbetween }
  1325. not(RegModifiedBetween(taicpu(p).oper[1]^.reg,p,hp1)) and
  1326. { The shifterop can contain a register, might not be modified}
  1327. (
  1328. (taicpu(p).oper[2]^.shifterop^.rs = NR_NO) or
  1329. not(RegModifiedBetween(taicpu(p).oper[2]^.shifterop^.rs, p, hp1))
  1330. ) and
  1331. (
  1332. {Only ONE of the two src operands is allowed to match}
  1333. MatchOperand(taicpu(p).oper[0]^, taicpu(hp1).oper[taicpu(hp1).ops-2]^) xor
  1334. MatchOperand(taicpu(p).oper[0]^, taicpu(hp1).oper[taicpu(hp1).ops-1]^)
  1335. ) then
  1336. begin
  1337. if taicpu(hp1).opcode in [A_TST, A_TEQ, A_CMN] then
  1338. I2:=0
  1339. else
  1340. I2:=1;
  1341. for I:=I2 to taicpu(hp1).ops-1 do
  1342. if MatchOperand(taicpu(p).oper[0]^, taicpu(hp1).oper[I]^.reg) then
  1343. begin
  1344. { If the parameter matched on the second op from the RIGHT
  1345. we have to switch the parameters, this will not happen for CMP
  1346. were we're only evaluating the most right parameter
  1347. }
  1348. if I <> taicpu(hp1).ops-1 then
  1349. begin
  1350. {The SUB operators need to be changed when we swap parameters}
  1351. case taicpu(hp1).opcode of
  1352. A_SUB: tempop:=A_RSB;
  1353. A_SBC: tempop:=A_RSC;
  1354. A_RSB: tempop:=A_SUB;
  1355. A_RSC: tempop:=A_SBC;
  1356. else tempop:=taicpu(hp1).opcode;
  1357. end;
  1358. if taicpu(hp1).ops = 3 then
  1359. hp2:=taicpu.op_reg_reg_reg_shifterop(tempop,
  1360. taicpu(hp1).oper[0]^.reg, taicpu(hp1).oper[2]^.reg,
  1361. taicpu(p).oper[1]^.reg, taicpu(p).oper[2]^.shifterop^)
  1362. else
  1363. hp2:=taicpu.op_reg_reg_shifterop(tempop,
  1364. taicpu(hp1).oper[0]^.reg, taicpu(p).oper[1]^.reg,
  1365. taicpu(p).oper[2]^.shifterop^);
  1366. end
  1367. else
  1368. if taicpu(hp1).ops = 3 then
  1369. hp2:=taicpu.op_reg_reg_reg_shifterop(taicpu(hp1).opcode,
  1370. taicpu(hp1).oper[0]^.reg, taicpu(hp1).oper[1]^.reg,
  1371. taicpu(p).oper[1]^.reg, taicpu(p).oper[2]^.shifterop^)
  1372. else
  1373. hp2:=taicpu.op_reg_reg_shifterop(taicpu(hp1).opcode,
  1374. taicpu(hp1).oper[0]^.reg, taicpu(p).oper[1]^.reg,
  1375. taicpu(p).oper[2]^.shifterop^);
  1376. asml.insertbefore(hp2, hp1);
  1377. GetNextInstruction(p, hp2);
  1378. asml.remove(p);
  1379. asml.remove(hp1);
  1380. p.free;
  1381. hp1.free;
  1382. p:=hp2;
  1383. DebugMsg('Peephole FoldShiftProcess done', p);
  1384. Result:=true;
  1385. break;
  1386. end;
  1387. end;
  1388. {
  1389. Fold
  1390. mov r1, r1, lsl #2
  1391. ldr/ldrb r0, [r0, r1]
  1392. to
  1393. ldr/ldrb r0, [r0, r1, lsl #2]
  1394. XXX: This still needs some work, as we quite often encounter something like
  1395. mov r1, r2, lsl #2
  1396. add r2, r3, #imm
  1397. ldr r0, [r2, r1]
  1398. which can't be folded because r2 is overwritten between the shift and the ldr.
  1399. We could try to shuffle the registers around and fold it into.
  1400. add r1, r3, #imm
  1401. ldr r0, [r1, r2, lsl #2]
  1402. }
  1403. if (not(GenerateThumbCode)) and
  1404. (taicpu(p).opcode = A_MOV) and
  1405. (taicpu(p).ops = 3) and
  1406. (taicpu(p).oper[1]^.typ = top_reg) and
  1407. (taicpu(p).oper[2]^.typ = top_shifterop) and
  1408. { RRX is tough to handle, because it requires tracking the C-Flag,
  1409. it is also extremly unlikely to be emitted this way}
  1410. (taicpu(p).oper[2]^.shifterop^.shiftmode <> SM_RRX) and
  1411. (taicpu(p).oper[2]^.shifterop^.shiftimm <> 0) and
  1412. { thumb2 allows only lsl #0..#3 }
  1413. (not(GenerateThumb2Code) or
  1414. ((taicpu(p).oper[2]^.shifterop^.shiftimm in [0..3]) and
  1415. (taicpu(p).oper[2]^.shifterop^.shiftmode=SM_LSL)
  1416. )
  1417. ) and
  1418. (taicpu(p).oppostfix = PF_NONE) and
  1419. GetNextInstructionUsingReg(p, hp1, taicpu(p).oper[0]^.reg) and
  1420. {Only LDR, LDRB, STR, STRB can handle scaled register indexing}
  1421. (MatchInstruction(hp1, [A_LDR, A_STR], [taicpu(p).condition], [PF_None, PF_B]) or
  1422. (GenerateThumb2Code and
  1423. MatchInstruction(hp1, [A_LDR, A_STR], [taicpu(p).condition], [PF_None, PF_B, PF_SB, PF_H, PF_SH]))
  1424. ) and
  1425. (
  1426. {If this is address by offset, one of the two registers can be used}
  1427. ((taicpu(hp1).oper[1]^.ref^.addressmode=AM_OFFSET) and
  1428. (
  1429. (taicpu(hp1).oper[1]^.ref^.index = taicpu(p).oper[0]^.reg) xor
  1430. (taicpu(hp1).oper[1]^.ref^.base = taicpu(p).oper[0]^.reg)
  1431. )
  1432. ) or
  1433. {For post and preindexed only the index register can be used}
  1434. ((taicpu(hp1).oper[1]^.ref^.addressmode in [AM_POSTINDEXED, AM_PREINDEXED]) and
  1435. (
  1436. (taicpu(hp1).oper[1]^.ref^.index = taicpu(p).oper[0]^.reg) and
  1437. (taicpu(hp1).oper[1]^.ref^.base <> taicpu(p).oper[0]^.reg)
  1438. ) and
  1439. (not GenerateThumb2Code)
  1440. )
  1441. ) and
  1442. { Only fold if both registers are used. Otherwise we are folding p with itself }
  1443. (taicpu(hp1).oper[1]^.ref^.index<>NR_NO) and
  1444. (taicpu(hp1).oper[1]^.ref^.base<>NR_NO) and
  1445. { Only fold if there isn't another shifterop already, and offset is zero. }
  1446. (taicpu(hp1).oper[1]^.ref^.offset = 0) and
  1447. (taicpu(hp1).oper[1]^.ref^.shiftmode = SM_None) and
  1448. not(RegModifiedBetween(taicpu(p).oper[1]^.reg,p,hp1)) and
  1449. RegEndOfLife(taicpu(p).oper[0]^.reg, taicpu(hp1)) then
  1450. begin
  1451. { If the register we want to do the shift for resides in base, we need to swap that}
  1452. if (taicpu(hp1).oper[1]^.ref^.base = taicpu(p).oper[0]^.reg) then
  1453. taicpu(hp1).oper[1]^.ref^.base := taicpu(hp1).oper[1]^.ref^.index;
  1454. taicpu(hp1).oper[1]^.ref^.index := taicpu(p).oper[1]^.reg;
  1455. taicpu(hp1).oper[1]^.ref^.shiftmode := taicpu(p).oper[2]^.shifterop^.shiftmode;
  1456. taicpu(hp1).oper[1]^.ref^.shiftimm := taicpu(p).oper[2]^.shifterop^.shiftimm;
  1457. DebugMsg('Peephole FoldShiftLdrStr done', hp1);
  1458. GetNextInstruction(p, hp1);
  1459. asml.remove(p);
  1460. p.free;
  1461. p:=hp1;
  1462. Result:=true;
  1463. end;
  1464. {
  1465. Often we see shifts and then a superfluous mov to another register
  1466. In the future this might be handled in RedundantMovProcess when it uses RegisterTracking
  1467. }
  1468. if (taicpu(p).opcode = A_MOV) and
  1469. GetNextInstructionUsingReg(p, hp1, taicpu(p).oper[0]^.reg) and
  1470. RemoveSuperfluousMove(p, hp1, 'MovMov2Mov') then
  1471. Result:=true;
  1472. end;
  1473. A_ADD,
  1474. A_ADC,
  1475. A_RSB,
  1476. A_RSC,
  1477. A_SUB,
  1478. A_SBC,
  1479. A_AND,
  1480. A_BIC,
  1481. A_EOR,
  1482. A_ORR,
  1483. A_MLA,
  1484. A_MLS,
  1485. A_MUL,
  1486. A_QADD,A_QADD16,A_QADD8,
  1487. A_QSUB,A_QSUB16,A_QSUB8,
  1488. A_QDADD,A_QDSUB,A_QASX,A_QSAX,
  1489. A_SHADD16,A_SHADD8,A_UHADD16,A_UHADD8,
  1490. A_SHSUB16,A_SHSUB8,A_UHSUB16,A_UHSUB8,
  1491. A_PKHTB,A_PKHBT,
  1492. A_SMUAD,A_SMUSD:
  1493. begin
  1494. {
  1495. optimize
  1496. and reg2,reg1,const1
  1497. ...
  1498. }
  1499. if (taicpu(p).opcode = A_AND) and
  1500. (taicpu(p).ops>2) and
  1501. (taicpu(p).oper[1]^.typ = top_reg) and
  1502. (taicpu(p).oper[2]^.typ = top_const) then
  1503. begin
  1504. {
  1505. change
  1506. and reg2,reg1,const1
  1507. ...
  1508. and reg3,reg2,const2
  1509. to
  1510. and reg3,reg1,(const1 and const2)
  1511. }
  1512. if GetNextInstructionUsingReg(p,hp1,taicpu(p).oper[0]^.reg) and
  1513. MatchInstruction(hp1, A_AND, [taicpu(p).condition], [PF_None]) and
  1514. RegEndOfLife(taicpu(p).oper[0]^.reg,taicpu(hp1)) and
  1515. MatchOperand(taicpu(hp1).oper[1]^, taicpu(p).oper[0]^.reg) and
  1516. (taicpu(hp1).oper[2]^.typ = top_const) then
  1517. begin
  1518. if not(RegUsedBetween(taicpu(hp1).oper[0]^.reg,p,hp1)) then
  1519. begin
  1520. DebugMsg('Peephole AndAnd2And done', p);
  1521. taicpu(p).loadConst(2,taicpu(p).oper[2]^.val and taicpu(hp1).oper[2]^.val);
  1522. taicpu(p).oppostfix:=taicpu(hp1).oppostfix;
  1523. taicpu(p).loadReg(0,taicpu(hp1).oper[0]^.reg);
  1524. asml.remove(hp1);
  1525. hp1.free;
  1526. Result:=true;
  1527. end
  1528. else if not(RegUsedBetween(taicpu(p).oper[1]^.reg,p,hp1)) then
  1529. begin
  1530. DebugMsg('Peephole AndAnd2And done', hp1);
  1531. taicpu(hp1).loadConst(2,taicpu(p).oper[2]^.val and taicpu(hp1).oper[2]^.val);
  1532. taicpu(hp1).oppostfix:=taicpu(p).oppostfix;
  1533. taicpu(hp1).loadReg(1,taicpu(p).oper[1]^.reg);
  1534. GetNextInstruction(p, hp1);
  1535. asml.remove(p);
  1536. p.free;
  1537. p:=hp1;
  1538. Result:=true;
  1539. end;
  1540. end
  1541. {
  1542. change
  1543. and reg2,reg1,$xxxxxxFF
  1544. strb reg2,[...]
  1545. dealloc reg2
  1546. to
  1547. strb reg1,[...]
  1548. }
  1549. else if ((taicpu(p).oper[2]^.val and $FF) = $FF) and
  1550. MatchInstruction(p, A_AND, [C_None], [PF_None]) and
  1551. GetNextInstructionUsingReg(p,hp1,taicpu(p).oper[0]^.reg) and
  1552. MatchInstruction(hp1, A_STR, [C_None], [PF_B]) and
  1553. assigned(FindRegDealloc(taicpu(p).oper[0]^.reg,tai(hp1.Next))) and
  1554. { the reference in strb might not use reg2 }
  1555. not(RegInRef(taicpu(p).oper[0]^.reg,taicpu(hp1).oper[1]^.ref^)) and
  1556. { reg1 might not be modified inbetween }
  1557. not(RegModifiedBetween(taicpu(p).oper[1]^.reg,p,hp1)) then
  1558. begin
  1559. DebugMsg('Peephole AndStrb2Strb done', p);
  1560. taicpu(hp1).loadReg(0,taicpu(p).oper[1]^.reg);
  1561. GetNextInstruction(p, hp1);
  1562. asml.remove(p);
  1563. p.free;
  1564. p:=hp1;
  1565. result:=true;
  1566. end
  1567. {
  1568. change
  1569. and reg2,reg1,255
  1570. uxtb/uxth reg3,reg2
  1571. dealloc reg2
  1572. to
  1573. and reg3,reg1,x
  1574. }
  1575. else if (taicpu(p).oper[2]^.val = $FF) and
  1576. MatchInstruction(p, A_AND, [C_None], [PF_None]) and
  1577. GetNextInstructionUsingReg(p,hp1,taicpu(p).oper[0]^.reg) and
  1578. MatchInstruction(hp1, [A_UXTB,A_UXTH], [C_None], [PF_None]) and
  1579. (taicpu(hp1).ops = 2) and
  1580. RegEndofLife(taicpu(p).oper[0]^.reg,taicpu(hp1)) and
  1581. MatchOperand(taicpu(hp1).oper[1]^, taicpu(p).oper[0]^.reg) and
  1582. { reg1 might not be modified inbetween }
  1583. not(RegModifiedBetween(taicpu(p).oper[1]^.reg,p,hp1)) then
  1584. begin
  1585. DebugMsg('Peephole AndUxt2And done', p);
  1586. taicpu(hp1).opcode:=A_AND;
  1587. taicpu(hp1).ops:=3;
  1588. taicpu(hp1).loadReg(1,taicpu(p).oper[1]^.reg);
  1589. taicpu(hp1).loadconst(2,255);
  1590. GetNextInstruction(p,hp1);
  1591. asml.remove(p);
  1592. p.Free;
  1593. p:=hp1;
  1594. result:=true;
  1595. end
  1596. {
  1597. from
  1598. and reg1,reg0,2^n-1
  1599. mov reg2,reg1, lsl imm1
  1600. (mov reg3,reg2, lsr/asr imm1)
  1601. remove either the and or the lsl/xsr sequence if possible
  1602. }
  1603. else if cutils.ispowerof2(taicpu(p).oper[2]^.val+1,i) and
  1604. GetNextInstructionUsingReg(p,hp1,taicpu(p).oper[0]^.reg) and
  1605. MatchInstruction(hp1, A_MOV, [taicpu(p).condition], [PF_None]) and
  1606. (taicpu(hp1).ops=3) and
  1607. MatchOperand(taicpu(hp1).oper[1]^, taicpu(p).oper[0]^.reg) and
  1608. (taicpu(hp1).oper[2]^.typ = top_shifterop) and
  1609. (taicpu(hp1).oper[2]^.shifterop^.rs = NR_NO) and
  1610. (taicpu(hp1).oper[2]^.shifterop^.shiftmode=SM_LSL) and
  1611. RegEndOfLife(taicpu(p).oper[0]^.reg,taicpu(hp1)) then
  1612. begin
  1613. {
  1614. and reg1,reg0,2^n-1
  1615. mov reg2,reg1, lsl imm1
  1616. mov reg3,reg2, lsr/asr imm1
  1617. =>
  1618. and reg1,reg0,2^n-1
  1619. if lsr and 2^n-1>=imm1 or asr and 2^n-1>imm1
  1620. }
  1621. if GetNextInstructionUsingReg(hp1,hp2,taicpu(p).oper[0]^.reg) and
  1622. MatchInstruction(hp2, A_MOV, [taicpu(p).condition], [PF_None]) and
  1623. (taicpu(hp2).ops=3) and
  1624. MatchOperand(taicpu(hp2).oper[1]^, taicpu(hp1).oper[0]^.reg) and
  1625. (taicpu(hp2).oper[2]^.typ = top_shifterop) and
  1626. (taicpu(hp2).oper[2]^.shifterop^.rs = NR_NO) and
  1627. (taicpu(hp2).oper[2]^.shifterop^.shiftmode in [SM_ASR,SM_LSR]) and
  1628. (taicpu(hp1).oper[2]^.shifterop^.shiftimm=taicpu(hp2).oper[2]^.shifterop^.shiftimm) and
  1629. RegEndOfLife(taicpu(hp1).oper[0]^.reg,taicpu(hp2)) and
  1630. ((i<32-taicpu(hp1).oper[2]^.shifterop^.shiftimm) or
  1631. ((i=32-taicpu(hp1).oper[2]^.shifterop^.shiftimm) and
  1632. (taicpu(hp2).oper[2]^.shifterop^.shiftmode=SM_LSR))) then
  1633. begin
  1634. DebugMsg('Peephole AndLslXsr2And done', p);
  1635. taicpu(p).oper[0]^.reg:=taicpu(hp2).oper[0]^.reg;
  1636. asml.Remove(hp1);
  1637. asml.Remove(hp2);
  1638. hp1.free;
  1639. hp2.free;
  1640. result:=true;
  1641. end
  1642. {
  1643. and reg1,reg0,2^n-1
  1644. mov reg2,reg1, lsl imm1
  1645. =>
  1646. mov reg2,reg0, lsl imm1
  1647. if imm1>i
  1648. }
  1649. else if (i>32-taicpu(hp1).oper[2]^.shifterop^.shiftimm) and
  1650. not(RegModifiedBetween(taicpu(p).oper[1]^.reg, p, hp1)) then
  1651. begin
  1652. DebugMsg('Peephole AndLsl2Lsl done', p);
  1653. taicpu(hp1).oper[1]^.reg:=taicpu(p).oper[1]^.reg;
  1654. GetNextInstruction(p, hp1);
  1655. asml.Remove(p);
  1656. p.free;
  1657. p:=hp1;
  1658. result:=true;
  1659. end
  1660. end;
  1661. end;
  1662. {
  1663. change
  1664. add/sub reg2,reg1,const1
  1665. str/ldr reg3,[reg2,const2]
  1666. dealloc reg2
  1667. to
  1668. str/ldr reg3,[reg1,const2+/-const1]
  1669. }
  1670. if (not GenerateThumbCode) and
  1671. (taicpu(p).opcode in [A_ADD,A_SUB]) and
  1672. (taicpu(p).ops>2) and
  1673. (taicpu(p).oper[1]^.typ = top_reg) and
  1674. (taicpu(p).oper[2]^.typ = top_const) then
  1675. begin
  1676. hp1:=p;
  1677. while GetNextInstructionUsingReg(hp1, hp1, taicpu(p).oper[0]^.reg) and
  1678. { we cannot check NR_DEFAULTFLAGS for modification yet so don't allow a condition }
  1679. MatchInstruction(hp1, [A_LDR, A_STR], [C_None], []) and
  1680. (taicpu(hp1).oper[1]^.typ = top_ref) and
  1681. (taicpu(hp1).oper[1]^.ref^.base=taicpu(p).oper[0]^.reg) and
  1682. { don't optimize if the register is stored/overwritten }
  1683. (taicpu(hp1).oper[0]^.reg<>taicpu(p).oper[1]^.reg) and
  1684. (taicpu(hp1).oper[1]^.ref^.index=NR_NO) and
  1685. (taicpu(hp1).oper[1]^.ref^.addressmode=AM_OFFSET) and
  1686. { new offset must be valid: either in the range of 8 or 12 bit, depend on the
  1687. ldr postfix }
  1688. (((taicpu(p).opcode=A_ADD) and
  1689. isValidConstLoadStoreOffset(taicpu(hp1).oper[1]^.ref^.offset+taicpu(p).oper[2]^.val, taicpu(hp1).oppostfix)
  1690. ) or
  1691. ((taicpu(p).opcode=A_SUB) and
  1692. isValidConstLoadStoreOffset(taicpu(hp1).oper[1]^.ref^.offset-taicpu(p).oper[2]^.val, taicpu(hp1).oppostfix)
  1693. )
  1694. ) do
  1695. begin
  1696. { neither reg1 nor reg2 might be changed inbetween }
  1697. if RegModifiedBetween(taicpu(p).oper[0]^.reg,p,hp1) or
  1698. RegModifiedBetween(taicpu(p).oper[1]^.reg,p,hp1) then
  1699. break;
  1700. { reg2 must be either overwritten by the ldr or it is deallocated afterwards }
  1701. if ((taicpu(hp1).opcode=A_LDR) and (taicpu(p).oper[0]^.reg=taicpu(hp1).oper[0]^.reg)) or
  1702. assigned(FindRegDeAlloc(taicpu(p).oper[0]^.reg,tai(hp1.Next))) then
  1703. begin
  1704. { remember last instruction }
  1705. hp2:=hp1;
  1706. DebugMsg('Peephole Add/SubLdr2Ldr done', p);
  1707. hp1:=p;
  1708. { fix all ldr/str }
  1709. while GetNextInstructionUsingReg(hp1, hp1, taicpu(p).oper[0]^.reg) do
  1710. begin
  1711. taicpu(hp1).oper[1]^.ref^.base:=taicpu(p).oper[1]^.reg;
  1712. if taicpu(p).opcode=A_ADD then
  1713. inc(taicpu(hp1).oper[1]^.ref^.offset,taicpu(p).oper[2]^.val)
  1714. else
  1715. dec(taicpu(hp1).oper[1]^.ref^.offset,taicpu(p).oper[2]^.val);
  1716. if hp1=hp2 then
  1717. break;
  1718. end;
  1719. GetNextInstruction(p,hp1);
  1720. asml.remove(p);
  1721. p.free;
  1722. p:=hp1;
  1723. result:=true;
  1724. break;
  1725. end;
  1726. end;
  1727. end;
  1728. {
  1729. change
  1730. add reg1, ...
  1731. mov reg2, reg1
  1732. to
  1733. add reg2, ...
  1734. }
  1735. if GetNextInstructionUsingReg(p, hp1, taicpu(p).oper[0]^.reg) and
  1736. (taicpu(p).ops>=3) and
  1737. RemoveSuperfluousMove(p, hp1, 'DataMov2Data') then
  1738. Result:=true;
  1739. if MatchInstruction(p, [A_ADD,A_SUB], [C_None], [PF_None]) and
  1740. LookForPreindexedPattern(taicpu(p)) then
  1741. begin
  1742. GetNextInstruction(p,hp1);
  1743. DebugMsg('Peephole Add/Sub to Preindexed done', p);
  1744. asml.remove(p);
  1745. p.free;
  1746. p:=hp1;
  1747. Result:=true;
  1748. end;
  1749. {
  1750. Turn
  1751. mul reg0, z,w
  1752. sub/add x, y, reg0
  1753. dealloc reg0
  1754. into
  1755. mls/mla x,z,w,y
  1756. }
  1757. if MatchInstruction(p, [A_MUL], [C_None], [PF_None]) and
  1758. (taicpu(p).ops=3) and
  1759. (taicpu(p).oper[0]^.typ = top_reg) and
  1760. (taicpu(p).oper[1]^.typ = top_reg) and
  1761. (taicpu(p).oper[2]^.typ = top_reg) and
  1762. GetNextInstructionUsingReg(p,hp1,taicpu(p).oper[0]^.reg) and
  1763. MatchInstruction(hp1,[A_ADD,A_SUB],[C_None],[PF_None]) and
  1764. (not RegModifiedBetween(taicpu(p).oper[1]^.reg, p, hp1)) and
  1765. (not RegModifiedBetween(taicpu(p).oper[2]^.reg, p, hp1)) and
  1766. (((taicpu(hp1).opcode=A_ADD) and (current_settings.cputype>=cpu_armv4)) or
  1767. ((taicpu(hp1).opcode=A_SUB) and (current_settings.cputype in [cpu_armv6t2,cpu_armv7,cpu_armv7a,cpu_armv7r,cpu_armv7m,cpu_armv7em]))) and
  1768. // CPUs before ARMv6 don't recommend having the same Rd and Rm for MLA.
  1769. // TODO: A workaround would be to swap Rm and Rs
  1770. (not ((taicpu(hp1).opcode=A_ADD) and (current_settings.cputype<=cpu_armv6) and MatchOperand(taicpu(hp1).oper[0]^, taicpu(p).oper[1]^))) and
  1771. (((taicpu(hp1).ops=3) and
  1772. (taicpu(hp1).oper[2]^.typ=top_reg) and
  1773. ((MatchOperand(taicpu(hp1).oper[2]^, taicpu(p).oper[0]^.reg) and
  1774. (not RegModifiedBetween(taicpu(hp1).oper[1]^.reg, p, hp1))) or
  1775. ((MatchOperand(taicpu(hp1).oper[1]^, taicpu(p).oper[0]^.reg) and
  1776. (taicpu(hp1).opcode=A_ADD) and
  1777. (not RegModifiedBetween(taicpu(hp1).oper[2]^.reg, p, hp1)))))) or
  1778. ((taicpu(hp1).ops=2) and
  1779. (taicpu(hp1).oper[1]^.typ=top_reg) and
  1780. MatchOperand(taicpu(hp1).oper[1]^, taicpu(p).oper[0]^.reg))) and
  1781. (RegEndOfLife(taicpu(p).oper[0]^.reg,taicpu(hp1))) then
  1782. begin
  1783. if taicpu(hp1).opcode=A_ADD then
  1784. begin
  1785. taicpu(hp1).opcode:=A_MLA;
  1786. if taicpu(hp1).ops=3 then
  1787. begin
  1788. if MatchOperand(taicpu(hp1).oper[1]^, taicpu(p).oper[0]^) then
  1789. oldreg:=taicpu(hp1).oper[2]^.reg
  1790. else
  1791. oldreg:=taicpu(hp1).oper[1]^.reg;
  1792. end
  1793. else
  1794. oldreg:=taicpu(hp1).oper[0]^.reg;
  1795. taicpu(hp1).loadreg(1,taicpu(p).oper[1]^.reg);
  1796. taicpu(hp1).loadreg(2,taicpu(p).oper[2]^.reg);
  1797. taicpu(hp1).loadreg(3,oldreg);
  1798. DebugMsg('MulAdd2MLA done', p);
  1799. taicpu(hp1).ops:=4;
  1800. asml.remove(p);
  1801. p.free;
  1802. p:=hp1;
  1803. end
  1804. else
  1805. begin
  1806. taicpu(hp1).opcode:=A_MLS;
  1807. taicpu(hp1).loadreg(3,taicpu(hp1).oper[1]^.reg);
  1808. if taicpu(hp1).ops=2 then
  1809. taicpu(hp1).loadreg(1,taicpu(hp1).oper[0]^.reg)
  1810. else
  1811. taicpu(hp1).loadreg(1,taicpu(p).oper[2]^.reg);
  1812. taicpu(hp1).loadreg(2,taicpu(p).oper[1]^.reg);
  1813. DebugMsg('MulSub2MLS done', p);
  1814. taicpu(hp1).ops:=4;
  1815. asml.remove(p);
  1816. p.free;
  1817. p:=hp1;
  1818. end;
  1819. result:=true;
  1820. end
  1821. end;
  1822. {$ifdef dummy}
  1823. A_MVN:
  1824. begin
  1825. {
  1826. change
  1827. mvn reg2,reg1
  1828. and reg3,reg4,reg2
  1829. dealloc reg2
  1830. to
  1831. bic reg3,reg4,reg1
  1832. }
  1833. if (taicpu(p).oper[1]^.typ = top_reg) and
  1834. GetNextInstructionUsingReg(p,hp1,taicpu(p).oper[0]^.reg) and
  1835. MatchInstruction(hp1,A_AND,[],[]) and
  1836. (((taicpu(hp1).ops=3) and
  1837. (taicpu(hp1).oper[2]^.typ=top_reg) and
  1838. (MatchOperand(taicpu(hp1).oper[2]^, taicpu(p).oper[0]^.reg) or
  1839. MatchOperand(taicpu(hp1).oper[1]^, taicpu(p).oper[0]^.reg))) or
  1840. ((taicpu(hp1).ops=2) and
  1841. (taicpu(hp1).oper[1]^.typ=top_reg) and
  1842. MatchOperand(taicpu(hp1).oper[1]^, taicpu(p).oper[0]^.reg))) and
  1843. assigned(FindRegDealloc(taicpu(p).oper[0]^.reg,tai(hp1.Next))) and
  1844. { reg1 might not be modified inbetween }
  1845. not(RegModifiedBetween(taicpu(p).oper[1]^.reg,p,hp1)) then
  1846. begin
  1847. DebugMsg('Peephole MvnAnd2Bic done', p);
  1848. taicpu(hp1).opcode:=A_BIC;
  1849. if taicpu(hp1).ops=3 then
  1850. begin
  1851. if MatchOperand(taicpu(hp1).oper[1]^, taicpu(p).oper[0]^.reg) then
  1852. taicpu(hp1).loadReg(1,taicpu(hp1).oper[2]^.reg); // Swap operands
  1853. taicpu(hp1).loadReg(2,taicpu(p).oper[1]^.reg);
  1854. end
  1855. else
  1856. taicpu(hp1).loadReg(1,taicpu(p).oper[1]^.reg);
  1857. GetNextInstruction(p, hp1);
  1858. asml.remove(p);
  1859. p.free;
  1860. p:=hp1;
  1861. end;
  1862. end;
  1863. {$endif dummy}
  1864. A_UXTB:
  1865. begin
  1866. {
  1867. change
  1868. uxtb reg2,reg1
  1869. strb reg2,[...]
  1870. dealloc reg2
  1871. to
  1872. strb reg1,[...]
  1873. }
  1874. if MatchInstruction(p, taicpu(p).opcode, [C_None], [PF_None]) and
  1875. (taicpu(p).ops=2) and
  1876. GetNextInstructionUsingReg(p,hp1,taicpu(p).oper[0]^.reg) and
  1877. MatchInstruction(hp1, A_STR, [C_None], [PF_B]) and
  1878. assigned(FindRegDealloc(taicpu(p).oper[0]^.reg,tai(hp1.Next))) and
  1879. { the reference in strb might not use reg2 }
  1880. not(RegInRef(taicpu(p).oper[0]^.reg,taicpu(hp1).oper[1]^.ref^)) and
  1881. { reg1 might not be modified inbetween }
  1882. not(RegModifiedBetween(taicpu(p).oper[1]^.reg,p,hp1)) then
  1883. begin
  1884. DebugMsg('Peephole UxtbStrb2Strb done', p);
  1885. taicpu(hp1).loadReg(0,taicpu(p).oper[1]^.reg);
  1886. GetNextInstruction(p,hp2);
  1887. asml.remove(p);
  1888. p.free;
  1889. p:=hp2;
  1890. result:=true;
  1891. end
  1892. {
  1893. change
  1894. uxtb reg2,reg1
  1895. uxth reg3,reg2
  1896. dealloc reg2
  1897. to
  1898. uxtb reg3,reg1
  1899. }
  1900. else if MatchInstruction(p, A_UXTB, [C_None], [PF_None]) and
  1901. (taicpu(p).ops=2) and
  1902. GetNextInstructionUsingReg(p,hp1,taicpu(p).oper[0]^.reg) and
  1903. MatchInstruction(hp1, A_UXTH, [C_None], [PF_None]) and
  1904. (taicpu(hp1).ops = 2) and
  1905. MatchOperand(taicpu(hp1).oper[1]^, taicpu(p).oper[0]^.reg) and
  1906. RegEndofLife(taicpu(p).oper[0]^.reg,taicpu(hp1)) and
  1907. { reg1 might not be modified inbetween }
  1908. not(RegModifiedBetween(taicpu(p).oper[1]^.reg,p,hp1)) then
  1909. begin
  1910. DebugMsg('Peephole UxtbUxth2Uxtb done', p);
  1911. taicpu(p).loadReg(0,taicpu(hp1).oper[0]^.reg);
  1912. asml.remove(hp1);
  1913. hp1.free;
  1914. result:=true;
  1915. end
  1916. {
  1917. change
  1918. uxtb reg2,reg1
  1919. uxtb reg3,reg2
  1920. dealloc reg2
  1921. to
  1922. uxtb reg3,reg1
  1923. }
  1924. else if MatchInstruction(p, A_UXTB, [C_None], [PF_None]) and
  1925. (taicpu(p).ops=2) and
  1926. GetNextInstructionUsingReg(p,hp1,taicpu(p).oper[0]^.reg) and
  1927. MatchInstruction(hp1, A_UXTB, [C_None], [PF_None]) and
  1928. (taicpu(hp1).ops = 2) and
  1929. MatchOperand(taicpu(hp1).oper[1]^, taicpu(p).oper[0]^.reg) and
  1930. RegEndofLife(taicpu(p).oper[0]^.reg,taicpu(hp1)) and
  1931. { reg1 might not be modified inbetween }
  1932. not(RegModifiedBetween(taicpu(p).oper[1]^.reg,p,hp1)) then
  1933. begin
  1934. DebugMsg('Peephole UxtbUxtb2Uxtb done', p);
  1935. taicpu(p).loadReg(0,taicpu(hp1).oper[0]^.reg);
  1936. asml.remove(hp1);
  1937. hp1.free;
  1938. result:=true;
  1939. end
  1940. {
  1941. change
  1942. uxtb reg2,reg1
  1943. and reg3,reg2,#0x*FF
  1944. dealloc reg2
  1945. to
  1946. uxtb reg3,reg1
  1947. }
  1948. else if MatchInstruction(p, A_UXTB, [C_None], [PF_None]) and
  1949. (taicpu(p).ops=2) and
  1950. GetNextInstructionUsingReg(p,hp1,taicpu(p).oper[0]^.reg) and
  1951. MatchInstruction(hp1, A_AND, [C_None], [PF_None]) and
  1952. (taicpu(hp1).ops=3) and
  1953. (taicpu(hp1).oper[2]^.typ=top_const) and
  1954. ((taicpu(hp1).oper[2]^.val and $FF)=$FF) and
  1955. MatchOperand(taicpu(hp1).oper[1]^, taicpu(p).oper[0]^.reg) and
  1956. RegEndofLife(taicpu(p).oper[0]^.reg,taicpu(hp1)) and
  1957. { reg1 might not be modified inbetween }
  1958. not(RegModifiedBetween(taicpu(p).oper[1]^.reg,p,hp1)) then
  1959. begin
  1960. DebugMsg('Peephole UxtbAndImm2Uxtb done', p);
  1961. taicpu(hp1).opcode:=A_UXTB;
  1962. taicpu(hp1).ops:=2;
  1963. taicpu(hp1).loadReg(1,taicpu(p).oper[1]^.reg);
  1964. GetNextInstruction(p,hp2);
  1965. asml.remove(p);
  1966. p.free;
  1967. p:=hp2;
  1968. result:=true;
  1969. end
  1970. else if GetNextInstructionUsingReg(p, hp1, taicpu(p).oper[0]^.reg) and
  1971. RemoveSuperfluousMove(p, hp1, 'UxtbMov2Data') then
  1972. Result:=true;
  1973. end;
  1974. A_UXTH:
  1975. begin
  1976. {
  1977. change
  1978. uxth reg2,reg1
  1979. strh reg2,[...]
  1980. dealloc reg2
  1981. to
  1982. strh reg1,[...]
  1983. }
  1984. if MatchInstruction(p, taicpu(p).opcode, [C_None], [PF_None]) and
  1985. (taicpu(p).ops=2) and
  1986. GetNextInstructionUsingReg(p,hp1,taicpu(p).oper[0]^.reg) and
  1987. MatchInstruction(hp1, A_STR, [C_None], [PF_H]) and
  1988. RegEndofLife(taicpu(p).oper[0]^.reg,taicpu(hp1)) and
  1989. { the reference in strb might not use reg2 }
  1990. not(RegInRef(taicpu(p).oper[0]^.reg,taicpu(hp1).oper[1]^.ref^)) and
  1991. { reg1 might not be modified inbetween }
  1992. not(RegModifiedBetween(taicpu(p).oper[1]^.reg,p,hp1)) then
  1993. begin
  1994. DebugMsg('Peephole UXTHStrh2Strh done', p);
  1995. taicpu(hp1).loadReg(0,taicpu(p).oper[1]^.reg);
  1996. GetNextInstruction(p, hp1);
  1997. asml.remove(p);
  1998. p.free;
  1999. p:=hp1;
  2000. result:=true;
  2001. end
  2002. {
  2003. change
  2004. uxth reg2,reg1
  2005. uxth reg3,reg2
  2006. dealloc reg2
  2007. to
  2008. uxth reg3,reg1
  2009. }
  2010. else if MatchInstruction(p, A_UXTH, [C_None], [PF_None]) and
  2011. (taicpu(p).ops=2) and
  2012. GetNextInstructionUsingReg(p,hp1,taicpu(p).oper[0]^.reg) and
  2013. MatchInstruction(hp1, A_UXTH, [C_None], [PF_None]) and
  2014. (taicpu(hp1).ops=2) and
  2015. MatchOperand(taicpu(hp1).oper[1]^, taicpu(p).oper[0]^.reg) and
  2016. RegEndofLife(taicpu(p).oper[0]^.reg,taicpu(hp1)) and
  2017. { reg1 might not be modified inbetween }
  2018. not(RegModifiedBetween(taicpu(p).oper[1]^.reg,p,hp1)) then
  2019. begin
  2020. DebugMsg('Peephole UxthUxth2Uxth done', p);
  2021. taicpu(hp1).opcode:=A_UXTH;
  2022. taicpu(hp1).loadReg(1,taicpu(p).oper[1]^.reg);
  2023. GetNextInstruction(p, hp1);
  2024. asml.remove(p);
  2025. p.free;
  2026. p:=hp1;
  2027. result:=true;
  2028. end
  2029. {
  2030. change
  2031. uxth reg2,reg1
  2032. and reg3,reg2,#65535
  2033. dealloc reg2
  2034. to
  2035. uxth reg3,reg1
  2036. }
  2037. else if MatchInstruction(p, A_UXTH, [C_None], [PF_None]) and
  2038. (taicpu(p).ops=2) and
  2039. GetNextInstructionUsingReg(p,hp1,taicpu(p).oper[0]^.reg) and
  2040. MatchInstruction(hp1, A_AND, [C_None], [PF_None]) and
  2041. (taicpu(hp1).ops=3) and
  2042. (taicpu(hp1).oper[2]^.typ=top_const) and
  2043. ((taicpu(hp1).oper[2]^.val and $FFFF)=$FFFF) and
  2044. MatchOperand(taicpu(hp1).oper[1]^, taicpu(p).oper[0]^.reg) and
  2045. RegEndofLife(taicpu(p).oper[0]^.reg,taicpu(hp1)) and
  2046. { reg1 might not be modified inbetween }
  2047. not(RegModifiedBetween(taicpu(p).oper[1]^.reg,p,hp1)) then
  2048. begin
  2049. DebugMsg('Peephole UxthAndImm2Uxth done', p);
  2050. taicpu(hp1).opcode:=A_UXTH;
  2051. taicpu(hp1).ops:=2;
  2052. taicpu(hp1).loadReg(1,taicpu(p).oper[1]^.reg);
  2053. GetNextInstruction(p, hp1);
  2054. asml.remove(p);
  2055. p.free;
  2056. p:=hp1;
  2057. result:=true;
  2058. end
  2059. else if GetNextInstructionUsingReg(p, hp1, taicpu(p).oper[0]^.reg) and
  2060. RemoveSuperfluousMove(p, hp1, 'UxthMov2Data') then
  2061. Result:=true;
  2062. end;
  2063. A_CMP:
  2064. begin
  2065. {
  2066. change
  2067. cmp reg,const1
  2068. moveq reg,const1
  2069. movne reg,const2
  2070. to
  2071. cmp reg,const1
  2072. movne reg,const2
  2073. }
  2074. if (taicpu(p).oper[1]^.typ = top_const) and
  2075. GetNextInstruction(p, hp1) and
  2076. MatchInstruction(hp1, A_MOV, [C_EQ, C_NE], [PF_NONE]) and
  2077. (taicpu(hp1).oper[1]^.typ = top_const) and
  2078. GetNextInstruction(hp1, hp2) and
  2079. MatchInstruction(hp2, A_MOV, [C_EQ, C_NE], [PF_NONE]) and
  2080. (taicpu(hp1).oper[1]^.typ = top_const) then
  2081. begin
  2082. Result:=RemoveRedundantMove(p, hp1, asml) or Result;
  2083. Result:=RemoveRedundantMove(p, hp2, asml) or Result;
  2084. end;
  2085. end;
  2086. A_STM:
  2087. begin
  2088. {
  2089. change
  2090. stmfd r13!,[r14]
  2091. sub r13,r13,#4
  2092. bl abc
  2093. add r13,r13,#4
  2094. ldmfd r13!,[r15]
  2095. into
  2096. b abc
  2097. }
  2098. if not(ts_thumb_interworking in current_settings.targetswitches) and
  2099. MatchInstruction(p, A_STM, [C_None], [PF_FD]) and
  2100. GetNextInstruction(p, hp1) and
  2101. GetNextInstruction(hp1, hp2) and
  2102. SkipEntryExitMarker(hp2, hp2) and
  2103. GetNextInstruction(hp2, hp3) and
  2104. SkipEntryExitMarker(hp3, hp3) and
  2105. GetNextInstruction(hp3, hp4) and
  2106. (taicpu(p).oper[0]^.typ = top_ref) and
  2107. (taicpu(p).oper[0]^.ref^.index=NR_STACK_POINTER_REG) and
  2108. (taicpu(p).oper[0]^.ref^.base=NR_NO) and
  2109. (taicpu(p).oper[0]^.ref^.offset=0) and
  2110. (taicpu(p).oper[0]^.ref^.addressmode=AM_PREINDEXED) and
  2111. (taicpu(p).oper[1]^.typ = top_regset) and
  2112. (taicpu(p).oper[1]^.regset^ = [RS_R14]) and
  2113. MatchInstruction(hp1, A_SUB, [C_None], [PF_NONE]) and
  2114. (taicpu(hp1).oper[0]^.typ = top_reg) and
  2115. (taicpu(hp1).oper[0]^.reg = NR_STACK_POINTER_REG) and
  2116. MatchOperand(taicpu(hp1).oper[0]^,taicpu(hp1).oper[1]^) and
  2117. (taicpu(hp1).oper[2]^.typ = top_const) and
  2118. MatchInstruction(hp3, A_ADD, [C_None], [PF_NONE]) and
  2119. MatchOperand(taicpu(hp1).oper[0]^,taicpu(hp3).oper[0]^) and
  2120. MatchOperand(taicpu(hp1).oper[0]^,taicpu(hp3).oper[1]^) and
  2121. MatchOperand(taicpu(hp1).oper[2]^,taicpu(hp3).oper[2]^) and
  2122. MatchInstruction(hp2, [A_BL,A_BLX], [C_None], [PF_NONE]) and
  2123. (taicpu(hp2).oper[0]^.typ = top_ref) and
  2124. MatchInstruction(hp4, A_LDM, [C_None], [PF_FD]) and
  2125. MatchOperand(taicpu(p).oper[0]^,taicpu(hp4).oper[0]^) and
  2126. (taicpu(hp4).oper[1]^.typ = top_regset) and
  2127. (taicpu(hp4).oper[1]^.regset^ = [RS_R15]) then
  2128. begin
  2129. asml.Remove(p);
  2130. asml.Remove(hp1);
  2131. asml.Remove(hp3);
  2132. asml.Remove(hp4);
  2133. taicpu(hp2).opcode:=A_B;
  2134. p.free;
  2135. hp1.free;
  2136. hp3.free;
  2137. hp4.free;
  2138. p:=hp2;
  2139. DebugMsg('Peephole Bl2B done', p);
  2140. end;
  2141. end;
  2142. A_VMOV:
  2143. begin
  2144. {
  2145. change
  2146. vmov reg0,reg1,reg2
  2147. vmov reg1,reg2,reg0
  2148. into
  2149. vmov reg0,reg1,reg2
  2150. can be applied regardless if reg0 or reg2 is the vfp register
  2151. }
  2152. if (taicpu(p).ops = 3) and
  2153. GetNextInstruction(p, hp1) and
  2154. MatchInstruction(hp1, A_VMOV, [taicpu(p).condition], [taicpu(p).oppostfix]) and
  2155. (taicpu(hp1).ops = 3) and
  2156. MatchOperand(taicpu(p).oper[0]^, taicpu(hp1).oper[2]^) and
  2157. MatchOperand(taicpu(p).oper[1]^, taicpu(hp1).oper[0]^) and
  2158. MatchOperand(taicpu(p).oper[2]^, taicpu(hp1).oper[1]^) then
  2159. begin
  2160. asml.Remove(hp1);
  2161. hp1.free;
  2162. DebugMsg('Peephole VMovVMov2VMov done', p);
  2163. end;
  2164. end;
  2165. A_VLDR,
  2166. A_VADD,
  2167. A_VMUL,
  2168. A_VDIV,
  2169. A_VSUB,
  2170. A_VSQRT,
  2171. A_VNEG,
  2172. A_VCVT,
  2173. A_VABS:
  2174. begin
  2175. if GetNextInstructionUsingReg(p, hp1, taicpu(p).oper[0]^.reg) and
  2176. RemoveSuperfluousVMov(p, hp1, 'VOpVMov2VOp') then
  2177. Result:=true;
  2178. end
  2179. else
  2180. ;
  2181. end;
  2182. end;
  2183. else
  2184. ;
  2185. end;
  2186. end;
  2187. { instructions modifying the CPSR can be only the last instruction }
  2188. function MustBeLast(p : tai) : boolean;
  2189. begin
  2190. Result:=(p.typ=ait_instruction) and
  2191. ((taicpu(p).opcode in [A_BL,A_BLX,A_CMP,A_CMN,A_SWI,A_TEQ,A_TST,A_CMF,A_CMFE {,A_MSR}]) or
  2192. ((taicpu(p).ops>=1) and (taicpu(p).oper[0]^.typ=top_reg) and (taicpu(p).oper[0]^.reg=NR_PC)) or
  2193. (taicpu(p).oppostfix=PF_S));
  2194. end;
  2195. procedure TCpuAsmOptimizer.PeepHoleOptPass2;
  2196. var
  2197. p,hp1,hp2: tai;
  2198. l : longint;
  2199. condition : tasmcond;
  2200. hp3: tai;
  2201. WasLast: boolean;
  2202. { UsedRegs, TmpUsedRegs: TRegSet; }
  2203. begin
  2204. p := BlockStart;
  2205. { UsedRegs := []; }
  2206. while (p <> BlockEnd) Do
  2207. begin
  2208. { UpdateUsedRegs(UsedRegs, tai(p.next)); }
  2209. case p.Typ Of
  2210. Ait_Instruction:
  2211. begin
  2212. case taicpu(p).opcode Of
  2213. A_B:
  2214. if (taicpu(p).condition<>C_None) and
  2215. not(GenerateThumbCode) then
  2216. begin
  2217. { check for
  2218. Bxx xxx
  2219. <several instructions>
  2220. xxx:
  2221. }
  2222. l:=0;
  2223. WasLast:=False;
  2224. GetNextInstruction(p, hp1);
  2225. while assigned(hp1) and
  2226. (l<=4) and
  2227. CanBeCond(hp1) and
  2228. { stop on labels }
  2229. not(hp1.typ=ait_label) and
  2230. { avoid that we cannot recognize the case BccB2Cond }
  2231. not((hp1.typ=ait_instruction) and (taicpu(hp1).opcode=A_B)) do
  2232. begin
  2233. inc(l);
  2234. if MustBeLast(hp1) then
  2235. begin
  2236. WasLast:=True;
  2237. GetNextInstruction(hp1,hp1);
  2238. break;
  2239. end
  2240. else
  2241. GetNextInstruction(hp1,hp1);
  2242. end;
  2243. if assigned(hp1) then
  2244. begin
  2245. if FindLabel(tasmlabel(taicpu(p).oper[0]^.ref^.symbol),hp1) then
  2246. begin
  2247. if (l<=4) and (l>0) then
  2248. begin
  2249. condition:=inverse_cond(taicpu(p).condition);
  2250. hp2:=p;
  2251. GetNextInstruction(p,hp1);
  2252. p:=hp1;
  2253. repeat
  2254. if hp1.typ=ait_instruction then
  2255. taicpu(hp1).condition:=condition;
  2256. if MustBeLast(hp1) then
  2257. begin
  2258. GetNextInstruction(hp1,hp1);
  2259. break;
  2260. end
  2261. else
  2262. GetNextInstruction(hp1,hp1);
  2263. until not(assigned(hp1)) or
  2264. not(CanBeCond(hp1)) or
  2265. (hp1.typ=ait_label);
  2266. DebugMsg('Peephole Bcc2Cond done',hp2);
  2267. { wait with removing else GetNextInstruction could
  2268. ignore the label if it was the only usage in the
  2269. jump moved away }
  2270. tasmlabel(taicpu(hp2).oper[0]^.ref^.symbol).decrefs;
  2271. asml.remove(hp2);
  2272. hp2.free;
  2273. continue;
  2274. end;
  2275. end
  2276. else
  2277. { do not perform further optimizations if there is inctructon
  2278. in block #1 which can not be optimized.
  2279. }
  2280. if not WasLast then
  2281. begin
  2282. { check further for
  2283. Bcc xxx
  2284. <several instructions 1>
  2285. B yyy
  2286. xxx:
  2287. <several instructions 2>
  2288. yyy:
  2289. }
  2290. { hp2 points to jmp yyy }
  2291. hp2:=hp1;
  2292. { skip hp1 to xxx }
  2293. GetNextInstruction(hp1, hp1);
  2294. if assigned(hp2) and
  2295. assigned(hp1) and
  2296. (l<=3) and
  2297. (hp2.typ=ait_instruction) and
  2298. (taicpu(hp2).is_jmp) and
  2299. (taicpu(hp2).condition=C_None) and
  2300. { real label and jump, no further references to the
  2301. label are allowed }
  2302. (tasmlabel(taicpu(p).oper[0]^.ref^.symbol).getrefs=1) and
  2303. FindLabel(tasmlabel(taicpu(p).oper[0]^.ref^.symbol),hp1) then
  2304. begin
  2305. l:=0;
  2306. { skip hp1 to <several moves 2> }
  2307. GetNextInstruction(hp1, hp1);
  2308. while assigned(hp1) and
  2309. CanBeCond(hp1) and
  2310. (l<=3) do
  2311. begin
  2312. inc(l);
  2313. if MustBeLast(hp1) then
  2314. begin
  2315. GetNextInstruction(hp1, hp1);
  2316. break;
  2317. end
  2318. else
  2319. GetNextInstruction(hp1, hp1);
  2320. end;
  2321. { hp1 points to yyy: }
  2322. if assigned(hp1) and
  2323. FindLabel(tasmlabel(taicpu(hp2).oper[0]^.ref^.symbol),hp1) then
  2324. begin
  2325. condition:=inverse_cond(taicpu(p).condition);
  2326. GetNextInstruction(p,hp1);
  2327. hp3:=p;
  2328. p:=hp1;
  2329. repeat
  2330. if hp1.typ=ait_instruction then
  2331. taicpu(hp1).condition:=condition;
  2332. if MustBeLast(hp1) then
  2333. begin
  2334. GetNextInstruction(hp1, hp1);
  2335. break;
  2336. end
  2337. else
  2338. GetNextInstruction(hp1, hp1);
  2339. until not(assigned(hp1)) or
  2340. not(CanBeCond(hp1)) or
  2341. ((hp1.typ=ait_instruction) and (taicpu(hp1).opcode=A_B));
  2342. { hp2 is still at jmp yyy }
  2343. GetNextInstruction(hp2,hp1);
  2344. { hp1 is now at xxx: }
  2345. condition:=inverse_cond(condition);
  2346. GetNextInstruction(hp1,hp1);
  2347. { hp1 is now at <several movs 2> }
  2348. repeat
  2349. if hp1.typ=ait_instruction then
  2350. taicpu(hp1).condition:=condition;
  2351. GetNextInstruction(hp1,hp1);
  2352. until not(assigned(hp1)) or
  2353. not(CanBeCond(hp1)) or
  2354. (hp1.typ=ait_label);
  2355. DebugMsg('Peephole BccB2Cond done',hp3);
  2356. { remove Bcc }
  2357. tasmlabel(taicpu(hp3).oper[0]^.ref^.symbol).decrefs;
  2358. asml.remove(hp3);
  2359. hp3.free;
  2360. { remove B }
  2361. tasmlabel(taicpu(hp2).oper[0]^.ref^.symbol).decrefs;
  2362. asml.remove(hp2);
  2363. hp2.free;
  2364. continue;
  2365. end;
  2366. end;
  2367. end;
  2368. end;
  2369. end;
  2370. else
  2371. ;
  2372. end;
  2373. end;
  2374. else
  2375. ;
  2376. end;
  2377. p := tai(p.next)
  2378. end;
  2379. end;
  2380. function TCpuAsmOptimizer.RegInInstruction(Reg: TRegister; p1: tai): Boolean;
  2381. begin
  2382. If (p1.typ = ait_instruction) and (taicpu(p1).opcode=A_BL) then
  2383. Result:=true
  2384. else If MatchInstruction(p1, [A_LDR, A_STR], [], [PF_D]) and
  2385. (getsupreg(taicpu(p1).oper[0]^.reg)+1=getsupreg(reg)) then
  2386. Result:=true
  2387. else
  2388. Result:=inherited RegInInstruction(Reg, p1);
  2389. end;
  2390. const
  2391. { set of opcode which might or do write to memory }
  2392. { TODO : extend armins.dat to contain r/w info }
  2393. opcode_could_mem_write = [A_B,A_BL,A_BLX,A_BKPT,A_BX,A_STR,A_STRB,A_STRBT,
  2394. A_STRH,A_STRT,A_STF,A_SFM,A_STM,A_FSTS,A_FSTD,A_VSTR,A_VSTM];
  2395. { adjust the register live information when swapping the two instructions p and hp1,
  2396. they must follow one after the other }
  2397. procedure TCpuPreRegallocScheduler.SwapRegLive(p,hp1 : taicpu);
  2398. procedure CheckLiveEnd(reg : tregister);
  2399. var
  2400. supreg : TSuperRegister;
  2401. regtype : TRegisterType;
  2402. begin
  2403. if reg=NR_NO then
  2404. exit;
  2405. regtype:=getregtype(reg);
  2406. supreg:=getsupreg(reg);
  2407. if assigned(cg.rg[regtype]) and (cg.rg[regtype].live_end[supreg]=hp1) and
  2408. RegInInstruction(reg,p) then
  2409. cg.rg[regtype].live_end[supreg]:=p;
  2410. end;
  2411. procedure CheckLiveStart(reg : TRegister);
  2412. var
  2413. supreg : TSuperRegister;
  2414. regtype : TRegisterType;
  2415. begin
  2416. if reg=NR_NO then
  2417. exit;
  2418. regtype:=getregtype(reg);
  2419. supreg:=getsupreg(reg);
  2420. if assigned(cg.rg[regtype]) and (cg.rg[regtype].live_start[supreg]=p) and
  2421. RegInInstruction(reg,hp1) then
  2422. cg.rg[regtype].live_start[supreg]:=hp1;
  2423. end;
  2424. var
  2425. i : longint;
  2426. r : TSuperRegister;
  2427. begin
  2428. { assumption: p is directly followed by hp1 }
  2429. { if live of any reg used by p starts at p and hp1 uses this register then
  2430. set live start to hp1 }
  2431. for i:=0 to p.ops-1 do
  2432. case p.oper[i]^.typ of
  2433. Top_Reg:
  2434. CheckLiveStart(p.oper[i]^.reg);
  2435. Top_Ref:
  2436. begin
  2437. CheckLiveStart(p.oper[i]^.ref^.base);
  2438. CheckLiveStart(p.oper[i]^.ref^.index);
  2439. end;
  2440. Top_Shifterop:
  2441. CheckLiveStart(p.oper[i]^.shifterop^.rs);
  2442. Top_RegSet:
  2443. for r:=RS_R0 to RS_R15 do
  2444. if r in p.oper[i]^.regset^ then
  2445. CheckLiveStart(newreg(R_INTREGISTER,r,R_SUBWHOLE));
  2446. else
  2447. ;
  2448. end;
  2449. { if live of any reg used by hp1 ends at hp1 and p uses this register then
  2450. set live end to p }
  2451. for i:=0 to hp1.ops-1 do
  2452. case hp1.oper[i]^.typ of
  2453. Top_Reg:
  2454. CheckLiveEnd(hp1.oper[i]^.reg);
  2455. Top_Ref:
  2456. begin
  2457. CheckLiveEnd(hp1.oper[i]^.ref^.base);
  2458. CheckLiveEnd(hp1.oper[i]^.ref^.index);
  2459. end;
  2460. Top_Shifterop:
  2461. CheckLiveStart(hp1.oper[i]^.shifterop^.rs);
  2462. Top_RegSet:
  2463. for r:=RS_R0 to RS_R15 do
  2464. if r in hp1.oper[i]^.regset^ then
  2465. CheckLiveEnd(newreg(R_INTREGISTER,r,R_SUBWHOLE));
  2466. else
  2467. ;
  2468. end;
  2469. end;
  2470. function TCpuPreRegallocScheduler.SchedulerPass1Cpu(var p: tai): boolean;
  2471. { TODO : schedule also forward }
  2472. { TODO : schedule distance > 1 }
  2473. { returns true if p might be a load of a pc relative tls offset }
  2474. function PossibleTLSLoad(const p: tai) : boolean;
  2475. begin
  2476. Result:=(p.typ=ait_instruction) and (taicpu(p).opcode=A_LDR) and (taicpu(p).oper[1]^.typ=top_ref) and (((taicpu(p).oper[1]^.ref^.base=NR_PC) and
  2477. (taicpu(p).oper[1]^.ref^.index<>NR_NO)) or ((taicpu(p).oper[1]^.ref^.base<>NR_NO) and
  2478. (taicpu(p).oper[1]^.ref^.index=NR_PC)));
  2479. end;
  2480. var
  2481. hp1,hp2,hp3,hp4,hp5,insertpos : tai;
  2482. list : TAsmList;
  2483. begin
  2484. result:=true;
  2485. list:=TAsmList.create;
  2486. p:=BlockStart;
  2487. while p<>BlockEnd Do
  2488. begin
  2489. if (p.typ=ait_instruction) and
  2490. GetNextInstruction(p,hp1) and
  2491. (hp1.typ=ait_instruction) and
  2492. (taicpu(hp1).opcode in [A_LDR,A_LDRB,A_LDRH,A_LDRSB,A_LDRSH]) and
  2493. (taicpu(hp1).oppostfix in [PF_NONE, PF_B, PF_H, PF_SB, PF_SH]) and
  2494. { for now we don't reschedule if the previous instruction changes potentially a memory location }
  2495. ( (not(taicpu(p).opcode in opcode_could_mem_write) and
  2496. not(RegModifiedByInstruction(NR_PC,p))
  2497. ) or
  2498. ((taicpu(p).opcode in [A_STM,A_STRB,A_STRH,A_STR]) and
  2499. ((taicpu(hp1).oper[1]^.ref^.base=NR_PC) or
  2500. (assigned(taicpu(hp1).oper[1]^.ref^.symboldata) and
  2501. (taicpu(hp1).oper[1]^.ref^.offset=0)
  2502. )
  2503. ) or
  2504. { try to prove that the memory accesses don't overlapp }
  2505. ((taicpu(p).opcode in [A_STRB,A_STRH,A_STR]) and
  2506. (taicpu(p).oper[1]^.typ = top_ref) and
  2507. (taicpu(p).oper[1]^.ref^.base=taicpu(hp1).oper[1]^.ref^.base) and
  2508. (taicpu(p).oppostfix=PF_None) and
  2509. (taicpu(hp1).oppostfix=PF_None) and
  2510. (taicpu(p).oper[1]^.ref^.index=NR_NO) and
  2511. (taicpu(hp1).oper[1]^.ref^.index=NR_NO) and
  2512. { get operand sizes and check if the offset distance is large enough to ensure no overlapp }
  2513. (abs(taicpu(p).oper[1]^.ref^.offset-taicpu(hp1).oper[1]^.ref^.offset)>=max(tcgsize2size[reg_cgsize(taicpu(p).oper[0]^.reg)],tcgsize2size[reg_cgsize(taicpu(hp1).oper[0]^.reg)]))
  2514. )
  2515. )
  2516. ) and
  2517. GetNextInstruction(hp1,hp2) and
  2518. (hp2.typ=ait_instruction) and
  2519. { loaded register used by next instruction?
  2520. if we ever support labels (they could be skipped in theory) here, the gnu2 tls general-dynamic code could get broken (the ldr before
  2521. the bl may not be scheduled away from the bl) and it needs to be taken care of this case
  2522. }
  2523. (RegInInstruction(taicpu(hp1).oper[0]^.reg,hp2)) and
  2524. { loaded register not used by previous instruction? }
  2525. not(RegInInstruction(taicpu(hp1).oper[0]^.reg,p)) and
  2526. { same condition? }
  2527. (taicpu(p).condition=taicpu(hp1).condition) and
  2528. { first instruction might not change the register used as base }
  2529. ((taicpu(hp1).oper[1]^.ref^.base=NR_NO) or
  2530. not(RegModifiedByInstruction(taicpu(hp1).oper[1]^.ref^.base,p))
  2531. ) and
  2532. { first instruction might not change the register used as index }
  2533. ((taicpu(hp1).oper[1]^.ref^.index=NR_NO) or
  2534. not(RegModifiedByInstruction(taicpu(hp1).oper[1]^.ref^.index,p))
  2535. ) and
  2536. { if we modify the basereg AND the first instruction used that reg, we can not schedule }
  2537. ((taicpu(hp1).oper[1]^.ref^.addressmode = AM_OFFSET) or
  2538. not(instructionLoadsFromReg(taicpu(hp1).oper[1]^.ref^.base,p))) and
  2539. not(PossibleTLSLoad(p)) and
  2540. not(PossibleTLSLoad(hp1)) then
  2541. begin
  2542. hp3:=tai(p.Previous);
  2543. hp5:=tai(p.next);
  2544. asml.Remove(p);
  2545. { if there is a reg. alloc/dealloc/sync instructions or address labels (e.g. for GOT-less PIC)
  2546. associated with p, move it together with p }
  2547. { before the instruction? }
  2548. { find reg allocs,deallocs and PIC labels }
  2549. while assigned(hp3) and (hp3.typ<>ait_instruction) do
  2550. begin
  2551. if ( (hp3.typ=ait_regalloc) and (tai_regalloc(hp3).ratype in [ra_alloc, ra_dealloc]) and
  2552. RegInInstruction(tai_regalloc(hp3).reg,p) )
  2553. or ( (hp3.typ=ait_label) and (tai_label(hp3).labsym.typ=AT_ADDR) )
  2554. then
  2555. begin
  2556. hp4:=hp3;
  2557. hp3:=tai(hp3.Previous);
  2558. asml.Remove(hp4);
  2559. list.Insert(hp4);
  2560. end
  2561. else
  2562. hp3:=tai(hp3.Previous);
  2563. end;
  2564. list.Concat(p);
  2565. SwapRegLive(taicpu(p),taicpu(hp1));
  2566. { after the instruction? }
  2567. { find reg deallocs and reg syncs }
  2568. while assigned(hp5) and (hp5.typ<>ait_instruction) do
  2569. begin
  2570. if (hp5.typ=ait_regalloc) and (tai_regalloc(hp5).ratype in [ra_dealloc, ra_sync]) and
  2571. RegInInstruction(tai_regalloc(hp5).reg,p) then
  2572. begin
  2573. hp4:=hp5;
  2574. hp5:=tai(hp5.next);
  2575. asml.Remove(hp4);
  2576. list.Concat(hp4);
  2577. end
  2578. else
  2579. hp5:=tai(hp5.Next);
  2580. end;
  2581. asml.Remove(hp1);
  2582. { if there are address labels associated with hp2, those must
  2583. stay with hp2 (e.g. for GOT-less PIC) }
  2584. insertpos:=hp2;
  2585. while assigned(hp2.previous) and
  2586. (tai(hp2.previous).typ<>ait_instruction) do
  2587. begin
  2588. hp2:=tai(hp2.previous);
  2589. if (hp2.typ=ait_label) and
  2590. (tai_label(hp2).labsym.typ=AT_ADDR) then
  2591. insertpos:=hp2;
  2592. end;
  2593. {$ifdef DEBUG_PREREGSCHEDULER}
  2594. asml.insertbefore(tai_comment.Create(strpnew('Rescheduled')),insertpos);
  2595. {$endif DEBUG_PREREGSCHEDULER}
  2596. asml.InsertBefore(hp1,insertpos);
  2597. asml.InsertListBefore(insertpos,list);
  2598. p:=tai(p.next);
  2599. end
  2600. else if p.typ=ait_instruction then
  2601. p:=hp1
  2602. else
  2603. p:=tai(p.next);
  2604. end;
  2605. list.Free;
  2606. end;
  2607. procedure DecrementPreceedingIT(list: TAsmList; p: tai);
  2608. var
  2609. hp : tai;
  2610. l : longint;
  2611. begin
  2612. hp := tai(p.Previous);
  2613. l := 1;
  2614. while assigned(hp) and
  2615. (l <= 4) do
  2616. begin
  2617. if hp.typ=ait_instruction then
  2618. begin
  2619. if (taicpu(hp).opcode>=A_IT) and
  2620. (taicpu(hp).opcode <= A_ITTTT) then
  2621. begin
  2622. if (taicpu(hp).opcode = A_IT) and
  2623. (l=1) then
  2624. list.Remove(hp)
  2625. else
  2626. case taicpu(hp).opcode of
  2627. A_ITE:
  2628. if l=2 then taicpu(hp).opcode := A_IT;
  2629. A_ITT:
  2630. if l=2 then taicpu(hp).opcode := A_IT;
  2631. A_ITEE:
  2632. if l=3 then taicpu(hp).opcode := A_ITE;
  2633. A_ITTE:
  2634. if l=3 then taicpu(hp).opcode := A_ITT;
  2635. A_ITET:
  2636. if l=3 then taicpu(hp).opcode := A_ITE;
  2637. A_ITTT:
  2638. if l=3 then taicpu(hp).opcode := A_ITT;
  2639. A_ITEEE:
  2640. if l=4 then taicpu(hp).opcode := A_ITEE;
  2641. A_ITTEE:
  2642. if l=4 then taicpu(hp).opcode := A_ITTE;
  2643. A_ITETE:
  2644. if l=4 then taicpu(hp).opcode := A_ITET;
  2645. A_ITTTE:
  2646. if l=4 then taicpu(hp).opcode := A_ITTT;
  2647. A_ITEET:
  2648. if l=4 then taicpu(hp).opcode := A_ITEE;
  2649. A_ITTET:
  2650. if l=4 then taicpu(hp).opcode := A_ITTE;
  2651. A_ITETT:
  2652. if l=4 then taicpu(hp).opcode := A_ITET;
  2653. A_ITTTT:
  2654. begin
  2655. if l=4 then taicpu(hp).opcode := A_ITTT;
  2656. end
  2657. else
  2658. ;
  2659. end;
  2660. break;
  2661. end;
  2662. {else if (taicpu(hp).condition<>taicpu(p).condition) or
  2663. (taicpu(hp).condition<>inverse_cond(taicpu(p).condition)) then
  2664. break;}
  2665. inc(l);
  2666. end;
  2667. hp := tai(hp.Previous);
  2668. end;
  2669. end;
  2670. function TCpuThumb2AsmOptimizer.PeepHoleOptPass1Cpu(var p: tai): boolean;
  2671. var
  2672. hp : taicpu;
  2673. //hp1,hp2 : tai;
  2674. begin
  2675. result:=false;
  2676. if inherited PeepHoleOptPass1Cpu(p) then
  2677. result:=true
  2678. else if (p.typ=ait_instruction) and
  2679. MatchInstruction(p, A_STM, [C_None], [PF_FD,PF_DB]) and
  2680. (taicpu(p).oper[0]^.ref^.addressmode=AM_PREINDEXED) and
  2681. (taicpu(p).oper[0]^.ref^.index=NR_STACK_POINTER_REG) and
  2682. ((taicpu(p).oper[1]^.regset^*[8..13,15])=[]) then
  2683. begin
  2684. DebugMsg('Peephole Stm2Push done', p);
  2685. hp := taicpu.op_regset(A_PUSH, R_INTREGISTER, R_SUBWHOLE, taicpu(p).oper[1]^.regset^);
  2686. AsmL.InsertAfter(hp, p);
  2687. asml.Remove(p);
  2688. p:=hp;
  2689. result:=true;
  2690. end
  2691. {else if (p.typ=ait_instruction) and
  2692. MatchInstruction(p, A_STR, [C_None], [PF_None]) and
  2693. (taicpu(p).oper[1]^.ref^.addressmode=AM_PREINDEXED) and
  2694. (taicpu(p).oper[1]^.ref^.index=NR_STACK_POINTER_REG) and
  2695. (taicpu(p).oper[1]^.ref^.offset=-4) and
  2696. (getsupreg(taicpu(p).oper[0]^.reg) in [0..7,14]) then
  2697. begin
  2698. DebugMsg('Peephole Str2Push done', p);
  2699. hp := taicpu.op_regset(A_PUSH, R_INTREGISTER, R_SUBWHOLE, [getsupreg(taicpu(p).oper[0]^.reg)]);
  2700. asml.InsertAfter(hp, p);
  2701. asml.Remove(p);
  2702. p.Free;
  2703. p:=hp;
  2704. result:=true;
  2705. end}
  2706. else if (p.typ=ait_instruction) and
  2707. MatchInstruction(p, A_LDM, [C_None], [PF_FD,PF_IA]) and
  2708. (taicpu(p).oper[0]^.ref^.addressmode=AM_PREINDEXED) and
  2709. (taicpu(p).oper[0]^.ref^.index=NR_STACK_POINTER_REG) and
  2710. ((taicpu(p).oper[1]^.regset^*[8..14])=[]) then
  2711. begin
  2712. DebugMsg('Peephole Ldm2Pop done', p);
  2713. hp := taicpu.op_regset(A_POP, R_INTREGISTER, R_SUBWHOLE, taicpu(p).oper[1]^.regset^);
  2714. asml.InsertBefore(hp, p);
  2715. asml.Remove(p);
  2716. p.Free;
  2717. p:=hp;
  2718. result:=true;
  2719. end
  2720. {else if (p.typ=ait_instruction) and
  2721. MatchInstruction(p, A_LDR, [C_None], [PF_None]) and
  2722. (taicpu(p).oper[1]^.ref^.addressmode=AM_POSTINDEXED) and
  2723. (taicpu(p).oper[1]^.ref^.index=NR_STACK_POINTER_REG) and
  2724. (taicpu(p).oper[1]^.ref^.offset=4) and
  2725. (getsupreg(taicpu(p).oper[0]^.reg) in [0..7,15]) then
  2726. begin
  2727. DebugMsg('Peephole Ldr2Pop done', p);
  2728. hp := taicpu.op_regset(A_POP, R_INTREGISTER, R_SUBWHOLE, [getsupreg(taicpu(p).oper[0]^.reg)]);
  2729. asml.InsertBefore(hp, p);
  2730. asml.Remove(p);
  2731. p.Free;
  2732. p:=hp;
  2733. result:=true;
  2734. end}
  2735. else if (p.typ=ait_instruction) and
  2736. MatchInstruction(p, [A_AND], [], [PF_None]) and
  2737. (taicpu(p).ops = 2) and
  2738. (taicpu(p).oper[1]^.typ=top_const) and
  2739. ((taicpu(p).oper[1]^.val=255) or
  2740. (taicpu(p).oper[1]^.val=65535)) then
  2741. begin
  2742. DebugMsg('Peephole AndR2Uxt done', p);
  2743. if taicpu(p).oper[1]^.val=255 then
  2744. taicpu(p).opcode:=A_UXTB
  2745. else
  2746. taicpu(p).opcode:=A_UXTH;
  2747. taicpu(p).loadreg(1, taicpu(p).oper[0]^.reg);
  2748. result := true;
  2749. end
  2750. else if (p.typ=ait_instruction) and
  2751. MatchInstruction(p, [A_AND], [], [PF_None]) and
  2752. (taicpu(p).ops = 3) and
  2753. (taicpu(p).oper[2]^.typ=top_const) and
  2754. ((taicpu(p).oper[2]^.val=255) or
  2755. (taicpu(p).oper[2]^.val=65535)) then
  2756. begin
  2757. DebugMsg('Peephole AndRR2Uxt done', p);
  2758. if taicpu(p).oper[2]^.val=255 then
  2759. taicpu(p).opcode:=A_UXTB
  2760. else
  2761. taicpu(p).opcode:=A_UXTH;
  2762. taicpu(p).ops:=2;
  2763. result := true;
  2764. end
  2765. {else if (p.typ=ait_instruction) and
  2766. MatchInstruction(p, [A_CMP], [C_None], [PF_None]) and
  2767. (taicpu(p).oper[1]^.typ=top_const) and
  2768. (taicpu(p).oper[1]^.val=0) and
  2769. GetNextInstruction(p,hp1) and
  2770. (taicpu(hp1).opcode=A_B) and
  2771. (taicpu(hp1).condition in [C_EQ,C_NE]) then
  2772. begin
  2773. if taicpu(hp1).condition = C_EQ then
  2774. hp2:=taicpu.op_reg_ref(A_CBZ, taicpu(p).oper[0]^.reg, taicpu(hp1).oper[0]^.ref^)
  2775. else
  2776. hp2:=taicpu.op_reg_ref(A_CBNZ, taicpu(p).oper[0]^.reg, taicpu(hp1).oper[0]^.ref^);
  2777. taicpu(hp2).is_jmp := true;
  2778. asml.InsertAfter(hp2, hp1);
  2779. asml.Remove(hp1);
  2780. hp1.Free;
  2781. asml.Remove(p);
  2782. p.Free;
  2783. p := hp2;
  2784. result := true;
  2785. end}
  2786. end;
  2787. procedure TCpuThumb2AsmOptimizer.PeepHoleOptPass2;
  2788. var
  2789. p,hp1,hp2: tai;
  2790. l : longint;
  2791. condition : tasmcond;
  2792. { UsedRegs, TmpUsedRegs: TRegSet; }
  2793. begin
  2794. p := BlockStart;
  2795. { UsedRegs := []; }
  2796. while (p <> BlockEnd) Do
  2797. begin
  2798. { UpdateUsedRegs(UsedRegs, tai(p.next)); }
  2799. case p.Typ Of
  2800. Ait_Instruction:
  2801. begin
  2802. case taicpu(p).opcode Of
  2803. A_B:
  2804. if taicpu(p).condition<>C_None then
  2805. begin
  2806. { check for
  2807. Bxx xxx
  2808. <several instructions>
  2809. xxx:
  2810. }
  2811. l:=0;
  2812. GetNextInstruction(p, hp1);
  2813. while assigned(hp1) and
  2814. (l<=4) and
  2815. CanBeCond(hp1) and
  2816. { stop on labels }
  2817. not(hp1.typ=ait_label) do
  2818. begin
  2819. inc(l);
  2820. if MustBeLast(hp1) then
  2821. begin
  2822. //hp1:=nil;
  2823. GetNextInstruction(hp1,hp1);
  2824. break;
  2825. end
  2826. else
  2827. GetNextInstruction(hp1,hp1);
  2828. end;
  2829. if assigned(hp1) then
  2830. begin
  2831. if FindLabel(tasmlabel(taicpu(p).oper[0]^.ref^.symbol),hp1) then
  2832. begin
  2833. if (l<=4) and (l>0) then
  2834. begin
  2835. condition:=inverse_cond(taicpu(p).condition);
  2836. hp2:=p;
  2837. GetNextInstruction(p,hp1);
  2838. p:=hp1;
  2839. repeat
  2840. if hp1.typ=ait_instruction then
  2841. taicpu(hp1).condition:=condition;
  2842. if MustBeLast(hp1) then
  2843. begin
  2844. GetNextInstruction(hp1,hp1);
  2845. break;
  2846. end
  2847. else
  2848. GetNextInstruction(hp1,hp1);
  2849. until not(assigned(hp1)) or
  2850. not(CanBeCond(hp1)) or
  2851. (hp1.typ=ait_label);
  2852. { wait with removing else GetNextInstruction could
  2853. ignore the label if it was the only usage in the
  2854. jump moved away }
  2855. asml.InsertAfter(tai_comment.create(strpnew('Collapsed')), hp2);
  2856. DecrementPreceedingIT(asml, hp2);
  2857. case l of
  2858. 1: asml.InsertAfter(taicpu.op_cond(A_IT,condition), hp2);
  2859. 2: asml.InsertAfter(taicpu.op_cond(A_ITT,condition), hp2);
  2860. 3: asml.InsertAfter(taicpu.op_cond(A_ITTT,condition), hp2);
  2861. 4: asml.InsertAfter(taicpu.op_cond(A_ITTTT,condition), hp2);
  2862. end;
  2863. tasmlabel(taicpu(hp2).oper[0]^.ref^.symbol).decrefs;
  2864. asml.remove(hp2);
  2865. hp2.free;
  2866. continue;
  2867. end;
  2868. end;
  2869. end;
  2870. end;
  2871. else
  2872. ;
  2873. end;
  2874. end;
  2875. else
  2876. ;
  2877. end;
  2878. p := tai(p.next)
  2879. end;
  2880. end;
  2881. function TCpuThumb2AsmOptimizer.PostPeepHoleOptsCpu(var p: tai): boolean;
  2882. begin
  2883. result:=false;
  2884. if p.typ = ait_instruction then
  2885. begin
  2886. if MatchInstruction(p, A_MOV, [C_None], [PF_None]) and
  2887. (taicpu(p).oper[1]^.typ=top_const) and
  2888. (taicpu(p).oper[1]^.val >= 0) and
  2889. (taicpu(p).oper[1]^.val < 256) and
  2890. (not RegInUsedRegs(NR_DEFAULTFLAGS,UsedRegs)) then
  2891. begin
  2892. DebugMsg('Peephole Mov2Movs done', p);
  2893. asml.InsertBefore(tai_regalloc.alloc(NR_DEFAULTFLAGS,p), p);
  2894. asml.InsertAfter(tai_regalloc.dealloc(NR_DEFAULTFLAGS,p), p);
  2895. IncludeRegInUsedRegs(NR_DEFAULTFLAGS,UsedRegs);
  2896. taicpu(p).oppostfix:=PF_S;
  2897. result:=true;
  2898. end
  2899. else if MatchInstruction(p, A_MVN, [C_None], [PF_None]) and
  2900. (taicpu(p).oper[1]^.typ=top_reg) and
  2901. (not RegInUsedRegs(NR_DEFAULTFLAGS,UsedRegs)) then
  2902. begin
  2903. DebugMsg('Peephole Mvn2Mvns done', p);
  2904. asml.InsertBefore(tai_regalloc.alloc(NR_DEFAULTFLAGS,p), p);
  2905. asml.InsertAfter(tai_regalloc.dealloc(NR_DEFAULTFLAGS,p), p);
  2906. IncludeRegInUsedRegs(NR_DEFAULTFLAGS,UsedRegs);
  2907. taicpu(p).oppostfix:=PF_S;
  2908. result:=true;
  2909. end
  2910. else if MatchInstruction(p, A_RSB, [C_None], [PF_None]) and
  2911. (taicpu(p).ops = 3) and
  2912. (taicpu(p).oper[2]^.typ=top_const) and
  2913. (taicpu(p).oper[2]^.val=0) and
  2914. (not RegInUsedRegs(NR_DEFAULTFLAGS,UsedRegs)) then
  2915. begin
  2916. DebugMsg('Peephole Rsb2Rsbs done', p);
  2917. asml.InsertBefore(tai_regalloc.alloc(NR_DEFAULTFLAGS,p), p);
  2918. asml.InsertAfter(tai_regalloc.dealloc(NR_DEFAULTFLAGS,p), p);
  2919. IncludeRegInUsedRegs(NR_DEFAULTFLAGS,UsedRegs);
  2920. taicpu(p).oppostfix:=PF_S;
  2921. result:=true;
  2922. end
  2923. else if MatchInstruction(p, [A_ADD,A_SUB], [C_None], [PF_None]) and
  2924. (taicpu(p).ops = 3) and
  2925. MatchOperand(taicpu(p).oper[0]^, taicpu(p).oper[1]^) and
  2926. (not MatchOperand(taicpu(p).oper[0]^, NR_STACK_POINTER_REG)) and
  2927. (taicpu(p).oper[2]^.typ=top_const) and
  2928. (taicpu(p).oper[2]^.val >= 0) and
  2929. (taicpu(p).oper[2]^.val < 256) and
  2930. (not RegInUsedRegs(NR_DEFAULTFLAGS,UsedRegs)) then
  2931. begin
  2932. DebugMsg('Peephole AddSub2*s done', p);
  2933. asml.InsertBefore(tai_regalloc.alloc(NR_DEFAULTFLAGS,p), p);
  2934. asml.InsertAfter(tai_regalloc.dealloc(NR_DEFAULTFLAGS,p), p);
  2935. IncludeRegInUsedRegs(NR_DEFAULTFLAGS,UsedRegs);
  2936. taicpu(p).loadconst(1,taicpu(p).oper[2]^.val);
  2937. taicpu(p).oppostfix:=PF_S;
  2938. taicpu(p).ops := 2;
  2939. result:=true;
  2940. end
  2941. else if MatchInstruction(p, [A_ADD,A_SUB], [C_None], [PF_None]) and
  2942. (taicpu(p).ops = 2) and
  2943. (taicpu(p).oper[1]^.typ=top_reg) and
  2944. (not MatchOperand(taicpu(p).oper[0]^, NR_STACK_POINTER_REG)) and
  2945. (not MatchOperand(taicpu(p).oper[1]^, NR_STACK_POINTER_REG)) and
  2946. (not RegInUsedRegs(NR_DEFAULTFLAGS,UsedRegs)) then
  2947. begin
  2948. DebugMsg('Peephole AddSub2*s done', p);
  2949. asml.InsertBefore(tai_regalloc.alloc(NR_DEFAULTFLAGS,p), p);
  2950. asml.InsertAfter(tai_regalloc.dealloc(NR_DEFAULTFLAGS,p), p);
  2951. IncludeRegInUsedRegs(NR_DEFAULTFLAGS,UsedRegs);
  2952. taicpu(p).oppostfix:=PF_S;
  2953. result:=true;
  2954. end
  2955. else if MatchInstruction(p, [A_ADD], [C_None], [PF_None]) and
  2956. (taicpu(p).ops = 3) and
  2957. MatchOperand(taicpu(p).oper[0]^, taicpu(p).oper[1]^) and
  2958. (taicpu(p).oper[2]^.typ=top_reg) then
  2959. begin
  2960. DebugMsg('Peephole AddRRR2AddRR done', p);
  2961. taicpu(p).ops := 2;
  2962. taicpu(p).loadreg(1,taicpu(p).oper[2]^.reg);
  2963. result:=true;
  2964. end
  2965. else if MatchInstruction(p, [A_AND,A_ORR,A_EOR,A_BIC,A_LSL,A_LSR,A_ASR,A_ROR], [C_None], [PF_None]) and
  2966. (taicpu(p).ops = 3) and
  2967. MatchOperand(taicpu(p).oper[0]^, taicpu(p).oper[1]^) and
  2968. (taicpu(p).oper[2]^.typ=top_reg) and
  2969. (not RegInUsedRegs(NR_DEFAULTFLAGS,UsedRegs)) then
  2970. begin
  2971. DebugMsg('Peephole opXXY2opsXY done', p);
  2972. asml.InsertBefore(tai_regalloc.alloc(NR_DEFAULTFLAGS,p), p);
  2973. asml.InsertAfter(tai_regalloc.dealloc(NR_DEFAULTFLAGS,p), p);
  2974. IncludeRegInUsedRegs(NR_DEFAULTFLAGS,UsedRegs);
  2975. taicpu(p).ops := 2;
  2976. taicpu(p).loadreg(1,taicpu(p).oper[2]^.reg);
  2977. taicpu(p).oppostfix:=PF_S;
  2978. result:=true;
  2979. end
  2980. else if MatchInstruction(p, [A_AND,A_ORR,A_EOR,A_BIC,A_LSL,A_LSR,A_ASR,A_ROR], [C_None], [PF_S]) and
  2981. (taicpu(p).ops = 3) and
  2982. MatchOperand(taicpu(p).oper[0]^, taicpu(p).oper[1]^) and
  2983. (taicpu(p).oper[2]^.typ in [top_reg,top_const]) then
  2984. begin
  2985. DebugMsg('Peephole opXXY2opXY done', p);
  2986. taicpu(p).ops := 2;
  2987. if taicpu(p).oper[2]^.typ=top_reg then
  2988. taicpu(p).loadreg(1,taicpu(p).oper[2]^.reg)
  2989. else
  2990. taicpu(p).loadconst(1,taicpu(p).oper[2]^.val);
  2991. result:=true;
  2992. end
  2993. else if MatchInstruction(p, [A_AND,A_ORR,A_EOR], [C_None], [PF_None,PF_S]) and
  2994. (taicpu(p).ops = 3) and
  2995. MatchOperand(taicpu(p).oper[0]^, taicpu(p).oper[2]^) and
  2996. (not RegInUsedRegs(NR_DEFAULTFLAGS,UsedRegs)) then
  2997. begin
  2998. DebugMsg('Peephole opXYX2opsXY done', p);
  2999. asml.InsertBefore(tai_regalloc.alloc(NR_DEFAULTFLAGS,p), p);
  3000. asml.InsertAfter(tai_regalloc.dealloc(NR_DEFAULTFLAGS,p), p);
  3001. IncludeRegInUsedRegs(NR_DEFAULTFLAGS,UsedRegs);
  3002. taicpu(p).oppostfix:=PF_S;
  3003. taicpu(p).ops := 2;
  3004. result:=true;
  3005. end
  3006. else if MatchInstruction(p, [A_MOV], [C_None], [PF_None]) and
  3007. (taicpu(p).ops=3) and
  3008. (taicpu(p).oper[2]^.typ=top_shifterop) and
  3009. (taicpu(p).oper[2]^.shifterop^.shiftmode in [SM_LSL,SM_LSR,SM_ASR,SM_ROR]) and
  3010. //MatchOperand(taicpu(p).oper[0]^, taicpu(p).oper[1]^) and
  3011. (not RegInUsedRegs(NR_DEFAULTFLAGS,UsedRegs)) then
  3012. begin
  3013. DebugMsg('Peephole Mov2Shift done', p);
  3014. asml.InsertBefore(tai_regalloc.alloc(NR_DEFAULTFLAGS,p), p);
  3015. asml.InsertAfter(tai_regalloc.dealloc(NR_DEFAULTFLAGS,p), p);
  3016. IncludeRegInUsedRegs(NR_DEFAULTFLAGS,UsedRegs);
  3017. taicpu(p).oppostfix:=PF_S;
  3018. case taicpu(p).oper[2]^.shifterop^.shiftmode of
  3019. SM_LSL: taicpu(p).opcode:=A_LSL;
  3020. SM_LSR: taicpu(p).opcode:=A_LSR;
  3021. SM_ASR: taicpu(p).opcode:=A_ASR;
  3022. SM_ROR: taicpu(p).opcode:=A_ROR;
  3023. else
  3024. internalerror(2019050912);
  3025. end;
  3026. if taicpu(p).oper[2]^.shifterop^.rs<>NR_NO then
  3027. taicpu(p).loadreg(2, taicpu(p).oper[2]^.shifterop^.rs)
  3028. else
  3029. taicpu(p).loadconst(2, taicpu(p).oper[2]^.shifterop^.shiftimm);
  3030. result:=true;
  3031. end
  3032. end;
  3033. end;
  3034. begin
  3035. casmoptimizer:=TCpuAsmOptimizer;
  3036. cpreregallocscheduler:=TCpuPreRegallocScheduler;
  3037. End.