aoptcpu.pas 142 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914191519161917191819191920192119221923192419251926192719281929193019311932193319341935193619371938193919401941194219431944194519461947194819491950195119521953195419551956195719581959196019611962196319641965196619671968196919701971197219731974197519761977197819791980198119821983198419851986198719881989199019911992199319941995199619971998199920002001200220032004200520062007200820092010201120122013201420152016201720182019202020212022202320242025202620272028202920302031203220332034203520362037203820392040204120422043204420452046204720482049205020512052205320542055205620572058205920602061206220632064206520662067206820692070207120722073207420752076207720782079208020812082208320842085208620872088208920902091209220932094209520962097209820992100210121022103210421052106210721082109211021112112211321142115211621172118211921202121212221232124212521262127212821292130213121322133213421352136213721382139214021412142214321442145214621472148214921502151215221532154215521562157215821592160216121622163216421652166216721682169217021712172217321742175217621772178217921802181218221832184218521862187218821892190219121922193219421952196219721982199220022012202220322042205220622072208220922102211221222132214221522162217221822192220222122222223222422252226222722282229223022312232223322342235223622372238223922402241224222432244224522462247224822492250225122522253225422552256225722582259226022612262226322642265226622672268226922702271227222732274227522762277227822792280228122822283228422852286228722882289229022912292229322942295229622972298229923002301230223032304230523062307230823092310231123122313231423152316231723182319232023212322232323242325232623272328232923302331233223332334233523362337233823392340234123422343234423452346234723482349235023512352235323542355235623572358235923602361236223632364236523662367236823692370237123722373237423752376237723782379238023812382238323842385238623872388238923902391239223932394239523962397239823992400240124022403240424052406240724082409241024112412241324142415241624172418241924202421242224232424242524262427242824292430243124322433243424352436243724382439244024412442244324442445244624472448244924502451245224532454245524562457245824592460246124622463246424652466246724682469247024712472247324742475247624772478247924802481248224832484248524862487248824892490249124922493249424952496249724982499250025012502250325042505250625072508250925102511251225132514251525162517251825192520252125222523252425252526252725282529253025312532253325342535253625372538253925402541254225432544254525462547254825492550255125522553255425552556255725582559256025612562256325642565256625672568256925702571257225732574257525762577257825792580258125822583258425852586258725882589259025912592259325942595259625972598259926002601260226032604260526062607260826092610261126122613261426152616261726182619262026212622262326242625262626272628262926302631263226332634263526362637263826392640264126422643264426452646264726482649265026512652265326542655265626572658265926602661266226632664266526662667266826692670267126722673267426752676267726782679268026812682268326842685268626872688268926902691269226932694269526962697269826992700270127022703270427052706270727082709271027112712271327142715271627172718271927202721272227232724272527262727272827292730273127322733273427352736273727382739274027412742274327442745274627472748274927502751275227532754275527562757275827592760276127622763276427652766276727682769277027712772277327742775277627772778277927802781278227832784278527862787278827892790279127922793279427952796279727982799280028012802280328042805280628072808280928102811281228132814281528162817281828192820282128222823282428252826282728282829283028312832283328342835283628372838283928402841284228432844284528462847284828492850285128522853285428552856285728582859286028612862286328642865286628672868286928702871287228732874287528762877287828792880288128822883288428852886288728882889289028912892289328942895289628972898289929002901290229032904290529062907290829092910291129122913291429152916291729182919292029212922292329242925292629272928292929302931293229332934293529362937293829392940294129422943294429452946294729482949295029512952295329542955295629572958295929602961296229632964296529662967296829692970297129722973297429752976297729782979298029812982298329842985298629872988298929902991299229932994299529962997299829993000300130023003300430053006300730083009301030113012301330143015301630173018301930203021302230233024302530263027302830293030303130323033303430353036303730383039304030413042304330443045304630473048304930503051305230533054305530563057305830593060306130623063306430653066306730683069307030713072307330743075307630773078307930803081308230833084308530863087308830893090309130923093309430953096309730983099310031013102310331043105310631073108310931103111
  1. {
  2. Copyright (c) 1998-2002 by Jonas Maebe, member of the Free Pascal
  3. Development Team
  4. This unit implements the ARM optimizer object
  5. This program is free software; you can redistribute it and/or modify
  6. it under the terms of the GNU General Public License as published by
  7. the Free Software Foundation; either version 2 of the License, or
  8. (at your option) any later version.
  9. This program is distributed in the hope that it will be useful,
  10. but WITHOUT ANY WARRANTY; without even the implied warranty of
  11. MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  12. GNU General Public License for more details.
  13. You should have received a copy of the GNU General Public License
  14. along with this program; if not, write to the Free Software
  15. Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
  16. ****************************************************************************
  17. }
  18. Unit aoptcpu;
  19. {$i fpcdefs.inc}
  20. { $define DEBUG_PREREGSCHEDULER}
  21. { $define DEBUG_AOPTCPU}
  22. Interface
  23. uses cgbase, cgutils, cpubase, aasmtai, aasmcpu,aopt, aoptobj;
  24. Type
  25. TCpuAsmOptimizer = class(TAsmOptimizer)
  26. { uses the same constructor as TAopObj }
  27. function PeepHoleOptPass1Cpu(var p: tai): boolean; override;
  28. procedure PeepHoleOptPass2;override;
  29. Function RegInInstruction(Reg: TRegister; p1: tai): Boolean;override;
  30. function RemoveSuperfluousMove(const p: tai; movp: tai; const optimizer: string): boolean;
  31. function RemoveSuperfluousVMov(const p : tai; movp : tai; const optimizer : string) : boolean;
  32. { gets the next tai object after current that contains info relevant
  33. to the optimizer in p1 which used the given register or does a
  34. change in program flow.
  35. If there is none, it returns false and
  36. sets p1 to nil }
  37. Function GetNextInstructionUsingReg(Current: tai; Out Next: tai; reg: TRegister): Boolean;
  38. Function GetNextInstructionUsingRef(Current: tai; Out Next: tai; const ref: TReference; StopOnStore: Boolean = true): Boolean;
  39. { outputs a debug message into the assembler file }
  40. procedure DebugMsg(const s: string; p: tai);
  41. function InstructionLoadsFromReg(const reg : TRegister; const hp : tai) : boolean; override;
  42. function RegLoadedWithNewValue(reg : tregister; hp : tai) : boolean; override;
  43. protected
  44. function LookForPreindexedPattern(p: taicpu): boolean;
  45. function LookForPostindexedPattern(p: taicpu): boolean;
  46. End;
  47. TCpuPreRegallocScheduler = class(TAsmScheduler)
  48. function SchedulerPass1Cpu(var p: tai): boolean;override;
  49. procedure SwapRegLive(p, hp1: taicpu);
  50. end;
  51. TCpuThumb2AsmOptimizer = class(TCpuAsmOptimizer)
  52. { uses the same constructor as TAopObj }
  53. function PeepHoleOptPass1Cpu(var p: tai): boolean; override;
  54. procedure PeepHoleOptPass2;override;
  55. function PostPeepHoleOptsCpu(var p: tai): boolean; override;
  56. End;
  57. function MustBeLast(p : tai) : boolean;
  58. Implementation
  59. uses
  60. cutils,verbose,globtype,globals,
  61. systems,
  62. cpuinfo,
  63. cgobj,procinfo,
  64. aasmbase,aasmdata;
  65. { Range check must be disabled explicitly as conversions between signed and unsigned
  66. 32-bit values are done without explicit typecasts }
  67. {$R-}
  68. function CanBeCond(p : tai) : boolean;
  69. begin
  70. result:=
  71. not(GenerateThumbCode) and
  72. (p.typ=ait_instruction) and
  73. (taicpu(p).condition=C_None) and
  74. ((taicpu(p).opcode<A_IT) or (taicpu(p).opcode>A_ITTTT)) and
  75. (taicpu(p).opcode<>A_CBZ) and
  76. (taicpu(p).opcode<>A_CBNZ) and
  77. (taicpu(p).opcode<>A_PLD) and
  78. (((taicpu(p).opcode<>A_BLX) and
  79. { BL may need to be converted into BLX by the linker -- could possibly
  80. be allowed in case it's to a local symbol of which we know that it
  81. uses the same instruction set as the current one }
  82. (taicpu(p).opcode<>A_BL)) or
  83. (taicpu(p).oper[0]^.typ=top_reg));
  84. end;
  85. function RefsEqual(const r1, r2: treference): boolean;
  86. begin
  87. refsequal :=
  88. (r1.offset = r2.offset) and
  89. (r1.base = r2.base) and
  90. (r1.index = r2.index) and (r1.scalefactor = r2.scalefactor) and
  91. (r1.symbol=r2.symbol) and (r1.refaddr = r2.refaddr) and
  92. (r1.relsymbol = r2.relsymbol) and
  93. (r1.signindex = r2.signindex) and
  94. (r1.shiftimm = r2.shiftimm) and
  95. (r1.addressmode = r2.addressmode) and
  96. (r1.shiftmode = r2.shiftmode) and
  97. (r1.volatility=[]) and
  98. (r2.volatility=[]);
  99. end;
  100. function MatchInstruction(const instr: tai; const op: TCommonAsmOps; const cond: TAsmConds; const postfix: TOpPostfixes): boolean;
  101. begin
  102. result :=
  103. (instr.typ = ait_instruction) and
  104. ((op = []) or ((ord(taicpu(instr).opcode)<256) and (taicpu(instr).opcode in op))) and
  105. ((cond = []) or (taicpu(instr).condition in cond)) and
  106. ((postfix = []) or (taicpu(instr).oppostfix in postfix));
  107. end;
  108. function MatchInstruction(const instr: tai; const op: TAsmOp; const cond: TAsmConds; const postfix: TOpPostfixes): boolean;
  109. begin
  110. result :=
  111. (instr.typ = ait_instruction) and
  112. (taicpu(instr).opcode = op) and
  113. ((cond = []) or (taicpu(instr).condition in cond)) and
  114. ((postfix = []) or (taicpu(instr).oppostfix in postfix));
  115. end;
  116. function MatchOperand(const oper1: TOper; const oper2: TOper): boolean; inline;
  117. begin
  118. result := oper1.typ = oper2.typ;
  119. if result then
  120. case oper1.typ of
  121. top_const:
  122. Result:=oper1.val = oper2.val;
  123. top_reg:
  124. Result:=oper1.reg = oper2.reg;
  125. top_conditioncode:
  126. Result:=oper1.cc = oper2.cc;
  127. top_ref:
  128. Result:=RefsEqual(oper1.ref^, oper2.ref^);
  129. else Result:=false;
  130. end
  131. end;
  132. function MatchOperand(const oper: TOper; const reg: TRegister): boolean; inline;
  133. begin
  134. result := (oper.typ = top_reg) and (oper.reg = reg);
  135. end;
  136. function RemoveRedundantMove(const cmpp: tai; movp: tai; asml: TAsmList):Boolean;
  137. begin
  138. Result:=false;
  139. if (taicpu(movp).condition = C_EQ) and
  140. (taicpu(cmpp).oper[0]^.reg = taicpu(movp).oper[0]^.reg) and
  141. (taicpu(cmpp).oper[1]^.val = taicpu(movp).oper[1]^.val) then
  142. begin
  143. asml.insertafter(tai_comment.Create(strpnew('Peephole CmpMovMov - Removed redundant moveq')), movp);
  144. asml.remove(movp);
  145. movp.free;
  146. Result:=true;
  147. end;
  148. end;
  149. function AlignedToQWord(const ref : treference) : boolean;
  150. begin
  151. { (safe) heuristics to ensure alignment }
  152. result:=(target_info.abi in [abi_eabi,abi_armeb,abi_eabihf]) and
  153. (((ref.offset>=0) and
  154. ((ref.offset mod 8)=0) and
  155. ((ref.base=NR_R13) or
  156. (ref.index=NR_R13))
  157. ) or
  158. ((ref.offset<=0) and
  159. { when using NR_R11, it has always a value of <qword align>+4 }
  160. ((abs(ref.offset+4) mod 8)=0) and
  161. (current_procinfo.framepointer=NR_R11) and
  162. ((ref.base=NR_R11) or
  163. (ref.index=NR_R11))
  164. )
  165. );
  166. end;
  167. function isValidConstLoadStoreOffset(const aoffset: longint; const pf: TOpPostfix) : boolean;
  168. begin
  169. if GenerateThumb2Code then
  170. result := (aoffset<4096) and (aoffset>-256)
  171. else
  172. result := ((pf in [PF_None,PF_B]) and
  173. (abs(aoffset)<4096)) or
  174. (abs(aoffset)<256);
  175. end;
  176. function TCpuAsmOptimizer.InstructionLoadsFromReg(const reg: TRegister; const hp: tai): boolean;
  177. var
  178. p: taicpu;
  179. i: longint;
  180. begin
  181. instructionLoadsFromReg := false;
  182. if not (assigned(hp) and (hp.typ = ait_instruction)) then
  183. exit;
  184. p:=taicpu(hp);
  185. i:=1;
  186. {For these instructions we have to start on oper[0]}
  187. if (p.opcode in [A_STR, A_LDM, A_STM, A_PLD,
  188. A_CMP, A_CMN, A_TST, A_TEQ,
  189. A_B, A_BL, A_BX, A_BLX,
  190. A_SMLAL, A_UMLAL]) then i:=0;
  191. while(i<p.ops) do
  192. begin
  193. case p.oper[I]^.typ of
  194. top_reg:
  195. instructionLoadsFromReg := (p.oper[I]^.reg = reg) or
  196. { STRD }
  197. ((i=0) and (p.opcode=A_STR) and (p.oppostfix=PF_D) and (getsupreg(p.oper[0]^.reg)+1=getsupreg(reg)));
  198. top_regset:
  199. instructionLoadsFromReg := (getsupreg(reg) in p.oper[I]^.regset^);
  200. top_shifterop:
  201. instructionLoadsFromReg := p.oper[I]^.shifterop^.rs = reg;
  202. top_ref:
  203. instructionLoadsFromReg :=
  204. (p.oper[I]^.ref^.base = reg) or
  205. (p.oper[I]^.ref^.index = reg);
  206. end;
  207. if instructionLoadsFromReg then exit; {Bailout if we found something}
  208. Inc(I);
  209. end;
  210. end;
  211. function TCpuAsmOptimizer.RegLoadedWithNewValue(reg: tregister; hp: tai): boolean;
  212. var
  213. p: taicpu;
  214. begin
  215. p := taicpu(hp);
  216. Result := false;
  217. if not ((assigned(hp)) and (hp.typ = ait_instruction)) then
  218. exit;
  219. case p.opcode of
  220. { These operands do not write into a register at all }
  221. A_CMP, A_CMN, A_TST, A_TEQ, A_B, A_BL, A_BX, A_BLX, A_SWI, A_MSR, A_PLD,
  222. A_VCMP:
  223. exit;
  224. {Take care of post/preincremented store and loads, they will change their base register}
  225. A_STR, A_LDR:
  226. begin
  227. Result := false;
  228. { actually, this does not apply here because post-/preindexed does not mean that a register
  229. is loaded with a new value, it is only modified
  230. (taicpu(p).oper[1]^.typ=top_ref) and
  231. (taicpu(p).oper[1]^.ref^.addressmode in [AM_PREINDEXED,AM_POSTINDEXED]) and
  232. (taicpu(p).oper[1]^.ref^.base = reg);
  233. }
  234. { STR does not load into it's first register }
  235. if p.opcode = A_STR then
  236. exit;
  237. end;
  238. A_VSTR:
  239. begin
  240. Result := false;
  241. exit;
  242. end;
  243. { These four are writing into the first 2 register, UMLAL and SMLAL will also read from them }
  244. A_UMLAL, A_UMULL, A_SMLAL, A_SMULL:
  245. Result :=
  246. (p.oper[1]^.typ = top_reg) and
  247. (p.oper[1]^.reg = reg);
  248. {Loads to oper2 from coprocessor}
  249. {
  250. MCR/MRC is currently not supported in FPC
  251. A_MRC:
  252. Result :=
  253. (p.oper[2]^.typ = top_reg) and
  254. (p.oper[2]^.reg = reg);
  255. }
  256. {Loads to all register in the registerset}
  257. A_LDM, A_VLDM:
  258. Result := (getsupreg(reg) in p.oper[1]^.regset^);
  259. A_POP:
  260. Result := (getsupreg(reg) in p.oper[0]^.regset^) or
  261. (reg=NR_STACK_POINTER_REG);
  262. end;
  263. if Result then
  264. exit;
  265. case p.oper[0]^.typ of
  266. {This is the case}
  267. top_reg:
  268. Result := (p.oper[0]^.reg = reg) or
  269. { LDRD }
  270. (p.opcode=A_LDR) and (p.oppostfix=PF_D) and (getsupreg(p.oper[0]^.reg)+1=getsupreg(reg));
  271. {LDM/STM might write a new value to their index register}
  272. top_ref:
  273. Result :=
  274. (taicpu(p).oper[0]^.ref^.addressmode in [AM_PREINDEXED,AM_POSTINDEXED]) and
  275. (taicpu(p).oper[0]^.ref^.base = reg);
  276. end;
  277. end;
  278. function TCpuAsmOptimizer.GetNextInstructionUsingReg(Current: tai;
  279. Out Next: tai; reg: TRegister): Boolean;
  280. begin
  281. Next:=Current;
  282. repeat
  283. Result:=GetNextInstruction(Next,Next);
  284. until not (Result) or
  285. not(cs_opt_level3 in current_settings.optimizerswitches) or
  286. (Next.typ<>ait_instruction) or
  287. RegInInstruction(reg,Next) or
  288. is_calljmp(taicpu(Next).opcode) or
  289. RegModifiedByInstruction(NR_PC,Next);
  290. end;
  291. function TCpuAsmOptimizer.GetNextInstructionUsingRef(Current: tai;
  292. Out Next: tai; const ref: TReference; StopOnStore: Boolean = true): Boolean;
  293. begin
  294. Next:=Current;
  295. repeat
  296. Result:=GetNextInstruction(Next,Next);
  297. if Result and
  298. (Next.typ=ait_instruction) and
  299. (taicpu(Next).opcode in [A_LDR, A_STR]) and
  300. (
  301. ((taicpu(Next).ops = 2) and
  302. (taicpu(Next).oper[1]^.typ = top_ref) and
  303. RefsEqual(taicpu(Next).oper[1]^.ref^,ref)) or
  304. ((taicpu(Next).ops = 3) and { LDRD/STRD }
  305. (taicpu(Next).oper[2]^.typ = top_ref) and
  306. RefsEqual(taicpu(Next).oper[2]^.ref^,ref))
  307. ) then
  308. {We've found an instruction LDR or STR with the same reference}
  309. exit;
  310. until not(Result) or
  311. (Next.typ<>ait_instruction) or
  312. not(cs_opt_level3 in current_settings.optimizerswitches) or
  313. is_calljmp(taicpu(Next).opcode) or
  314. (StopOnStore and (taicpu(Next).opcode in [A_STR, A_STM])) or
  315. RegModifiedByInstruction(NR_PC,Next);
  316. Result:=false;
  317. end;
  318. {$ifdef DEBUG_AOPTCPU}
  319. procedure TCpuAsmOptimizer.DebugMsg(const s: string;p : tai);
  320. begin
  321. asml.insertbefore(tai_comment.Create(strpnew(s)), p);
  322. end;
  323. {$else DEBUG_AOPTCPU}
  324. procedure TCpuAsmOptimizer.DebugMsg(const s: string;p : tai);inline;
  325. begin
  326. end;
  327. {$endif DEBUG_AOPTCPU}
  328. function TCpuAsmOptimizer.RemoveSuperfluousMove(const p: tai; movp: tai; const optimizer: string):boolean;
  329. var
  330. alloc,
  331. dealloc : tai_regalloc;
  332. hp1 : tai;
  333. begin
  334. Result:=false;
  335. if MatchInstruction(movp, A_MOV, [taicpu(p).condition], [PF_None]) and
  336. (taicpu(movp).ops=2) and {We can't optimize if there is a shiftop}
  337. MatchOperand(taicpu(movp).oper[1]^, taicpu(p).oper[0]^.reg) and
  338. { don't mess with moves to pc }
  339. (taicpu(movp).oper[0]^.reg<>NR_PC) and
  340. { don't mess with moves to lr }
  341. (taicpu(movp).oper[0]^.reg<>NR_R14) and
  342. { the destination register of the mov might not be used beween p and movp }
  343. not(RegUsedBetween(taicpu(movp).oper[0]^.reg,p,movp)) and
  344. { cb[n]z are thumb instructions which require specific registers, with no wide forms }
  345. (taicpu(p).opcode<>A_CBZ) and
  346. (taicpu(p).opcode<>A_CBNZ) and
  347. {There is a special requirement for MUL and MLA, oper[0] and oper[1] are not allowed to be the same}
  348. not (
  349. (taicpu(p).opcode in [A_MLA, A_MUL]) and
  350. (taicpu(p).oper[1]^.reg = taicpu(movp).oper[0]^.reg) and
  351. (current_settings.cputype < cpu_armv6)
  352. ) and
  353. { Take care to only do this for instructions which REALLY load to the first register.
  354. Otherwise
  355. str reg0, [reg1]
  356. mov reg2, reg0
  357. will be optimized to
  358. str reg2, [reg1]
  359. }
  360. regLoadedWithNewValue(taicpu(p).oper[0]^.reg, p) then
  361. begin
  362. dealloc:=FindRegDeAlloc(taicpu(p).oper[0]^.reg,tai(movp.Next));
  363. if assigned(dealloc) then
  364. begin
  365. DebugMsg('Peephole '+optimizer+' removed superfluous mov', movp);
  366. result:=true;
  367. { taicpu(p).oper[0]^.reg is not used anymore, try to find its allocation
  368. and remove it if possible }
  369. asml.Remove(dealloc);
  370. alloc:=FindRegAllocBackward(taicpu(p).oper[0]^.reg,tai(p.previous));
  371. if assigned(alloc) then
  372. begin
  373. asml.Remove(alloc);
  374. alloc.free;
  375. dealloc.free;
  376. end
  377. else
  378. asml.InsertAfter(dealloc,p);
  379. { try to move the allocation of the target register }
  380. GetLastInstruction(movp,hp1);
  381. alloc:=FindRegAlloc(taicpu(movp).oper[0]^.reg,tai(hp1.Next));
  382. if assigned(alloc) then
  383. begin
  384. asml.Remove(alloc);
  385. asml.InsertBefore(alloc,p);
  386. { adjust used regs }
  387. IncludeRegInUsedRegs(taicpu(movp).oper[0]^.reg,UsedRegs);
  388. end;
  389. { finally get rid of the mov }
  390. taicpu(p).loadreg(0,taicpu(movp).oper[0]^.reg);
  391. asml.remove(movp);
  392. movp.free;
  393. end;
  394. end;
  395. end;
  396. function TCpuAsmOptimizer.RemoveSuperfluousVMov(const p: tai; movp: tai; const optimizer: string):boolean;
  397. var
  398. alloc,
  399. dealloc : tai_regalloc;
  400. hp1 : tai;
  401. begin
  402. Result:=false;
  403. if (MatchInstruction(movp, A_VMOV, [taicpu(p).condition], [taicpu(p).oppostfix]) or
  404. ((taicpu(p).oppostfix in [PF_F64F32,PF_F64S16,PF_F64S32,PF_F64U16,PF_F64U32]) and MatchInstruction(movp, A_VMOV, [taicpu(p).condition], [PF_F64])) or
  405. ((taicpu(p).oppostfix in [PF_F32F64,PF_F32S16,PF_F32S32,PF_F32U16,PF_F32U32]) and MatchInstruction(movp, A_VMOV, [taicpu(p).condition], [PF_F32]))
  406. ) and
  407. (taicpu(movp).ops=2) and
  408. MatchOperand(taicpu(movp).oper[1]^, taicpu(p).oper[0]^.reg) and
  409. { the destination register of the mov might not be used beween p and movp }
  410. not(RegUsedBetween(taicpu(movp).oper[0]^.reg,p,movp)) and
  411. { Take care to only do this for instructions which REALLY load to the first register.
  412. Otherwise
  413. vstr reg0, [reg1]
  414. vmov reg2, reg0
  415. will be optimized to
  416. vstr reg2, [reg1]
  417. }
  418. regLoadedWithNewValue(taicpu(p).oper[0]^.reg, p) then
  419. begin
  420. dealloc:=FindRegDeAlloc(taicpu(p).oper[0]^.reg,tai(movp.Next));
  421. if assigned(dealloc) then
  422. begin
  423. DebugMsg('Peephole '+optimizer+' removed superfluous vmov', movp);
  424. result:=true;
  425. { taicpu(p).oper[0]^.reg is not used anymore, try to find its allocation
  426. and remove it if possible }
  427. asml.Remove(dealloc);
  428. alloc:=FindRegAllocBackward(taicpu(p).oper[0]^.reg,tai(p.previous));
  429. if assigned(alloc) then
  430. begin
  431. asml.Remove(alloc);
  432. alloc.free;
  433. dealloc.free;
  434. end
  435. else
  436. asml.InsertAfter(dealloc,p);
  437. { try to move the allocation of the target register }
  438. GetLastInstruction(movp,hp1);
  439. alloc:=FindRegAlloc(taicpu(movp).oper[0]^.reg,tai(hp1.Next));
  440. if assigned(alloc) then
  441. begin
  442. asml.Remove(alloc);
  443. asml.InsertBefore(alloc,p);
  444. { adjust used regs }
  445. IncludeRegInUsedRegs(taicpu(movp).oper[0]^.reg,UsedRegs);
  446. end;
  447. { finally get rid of the mov }
  448. taicpu(p).loadreg(0,taicpu(movp).oper[0]^.reg);
  449. asml.remove(movp);
  450. movp.free;
  451. end;
  452. end;
  453. end;
  454. {
  455. optimize
  456. add/sub reg1,reg1,regY/const
  457. ...
  458. ldr/str regX,[reg1]
  459. into
  460. ldr/str regX,[reg1, regY/const]!
  461. }
  462. function TCpuAsmOptimizer.LookForPreindexedPattern(p: taicpu): boolean;
  463. var
  464. hp1: tai;
  465. begin
  466. if GenerateARMCode and
  467. (p.ops=3) and
  468. MatchOperand(p.oper[0]^, p.oper[1]^.reg) and
  469. GetNextInstructionUsingReg(p, hp1, p.oper[0]^.reg) and
  470. (not RegModifiedBetween(p.oper[0]^.reg, p, hp1)) and
  471. MatchInstruction(hp1, [A_LDR,A_STR], [C_None], [PF_None,PF_B,PF_H,PF_SH,PF_SB]) and
  472. (taicpu(hp1).oper[1]^.ref^.addressmode=AM_OFFSET) and
  473. (taicpu(hp1).oper[1]^.ref^.base=p.oper[0]^.reg) and
  474. (taicpu(hp1).oper[0]^.reg<>p.oper[0]^.reg) and
  475. (taicpu(hp1).oper[1]^.ref^.offset=0) and
  476. (taicpu(hp1).oper[1]^.ref^.index=NR_NO) and
  477. (((p.oper[2]^.typ=top_reg) and
  478. (not RegModifiedBetween(p.oper[2]^.reg, p, hp1))) or
  479. ((p.oper[2]^.typ=top_const) and
  480. ((abs(p.oper[2]^.val) < 256) or
  481. ((abs(p.oper[2]^.val) < 4096) and
  482. (taicpu(hp1).oppostfix in [PF_None,PF_B]))))) then
  483. begin
  484. taicpu(hp1).oper[1]^.ref^.addressmode:=AM_PREINDEXED;
  485. if p.oper[2]^.typ=top_reg then
  486. begin
  487. taicpu(hp1).oper[1]^.ref^.index:=p.oper[2]^.reg;
  488. if p.opcode=A_ADD then
  489. taicpu(hp1).oper[1]^.ref^.signindex:=1
  490. else
  491. taicpu(hp1).oper[1]^.ref^.signindex:=-1;
  492. end
  493. else
  494. begin
  495. if p.opcode=A_ADD then
  496. taicpu(hp1).oper[1]^.ref^.offset:=p.oper[2]^.val
  497. else
  498. taicpu(hp1).oper[1]^.ref^.offset:=-p.oper[2]^.val;
  499. end;
  500. result:=true;
  501. end
  502. else
  503. result:=false;
  504. end;
  505. {
  506. optimize
  507. ldr/str regX,[reg1]
  508. ...
  509. add/sub reg1,reg1,regY/const
  510. into
  511. ldr/str regX,[reg1], regY/const
  512. }
  513. function TCpuAsmOptimizer.LookForPostindexedPattern(p: taicpu) : boolean;
  514. var
  515. hp1 : tai;
  516. begin
  517. Result:=false;
  518. if (p.oper[1]^.typ = top_ref) and
  519. (p.oper[1]^.ref^.addressmode=AM_OFFSET) and
  520. (p.oper[1]^.ref^.index=NR_NO) and
  521. (p.oper[1]^.ref^.offset=0) and
  522. GetNextInstructionUsingReg(p, hp1, p.oper[1]^.ref^.base) and
  523. { we cannot check NR_DEFAULTFLAGS for modification yet so don't allow a condition }
  524. MatchInstruction(hp1, [A_ADD, A_SUB], [C_None], [PF_None]) and
  525. (taicpu(hp1).oper[0]^.reg=p.oper[1]^.ref^.base) and
  526. (taicpu(hp1).oper[1]^.reg=p.oper[1]^.ref^.base) and
  527. (
  528. (taicpu(hp1).oper[2]^.typ=top_reg) or
  529. { valid offset? }
  530. ((taicpu(hp1).oper[2]^.typ=top_const) and
  531. ((abs(taicpu(hp1).oper[2]^.val)<256) or
  532. ((abs(taicpu(hp1).oper[2]^.val)<4096) and (p.oppostfix in [PF_None,PF_B]))
  533. )
  534. )
  535. ) and
  536. { don't apply the optimization if the base register is loaded }
  537. (p.oper[0]^.reg<>p.oper[1]^.ref^.base) and
  538. not(RegModifiedBetween(taicpu(hp1).oper[0]^.reg,p,hp1)) and
  539. { don't apply the optimization if the (new) index register is loaded }
  540. (p.oper[0]^.reg<>taicpu(hp1).oper[2]^.reg) and
  541. not(RegModifiedBetween(taicpu(hp1).oper[2]^.reg,p,hp1)) and
  542. GenerateARMCode then
  543. begin
  544. DebugMsg('Peephole Str/LdrAdd/Sub2Str/Ldr Postindex done', p);
  545. p.oper[1]^.ref^.addressmode:=AM_POSTINDEXED;
  546. if taicpu(hp1).oper[2]^.typ=top_const then
  547. begin
  548. if taicpu(hp1).opcode=A_ADD then
  549. p.oper[1]^.ref^.offset:=taicpu(hp1).oper[2]^.val
  550. else
  551. p.oper[1]^.ref^.offset:=-taicpu(hp1).oper[2]^.val;
  552. end
  553. else
  554. begin
  555. p.oper[1]^.ref^.index:=taicpu(hp1).oper[2]^.reg;
  556. if taicpu(hp1).opcode=A_ADD then
  557. p.oper[1]^.ref^.signindex:=1
  558. else
  559. p.oper[1]^.ref^.signindex:=-1;
  560. end;
  561. asml.Remove(hp1);
  562. hp1.Free;
  563. Result:=true;
  564. end;
  565. end;
  566. function TCpuAsmOptimizer.PeepHoleOptPass1Cpu(var p: tai): boolean;
  567. var
  568. hp1,hp2,hp3,hp4: tai;
  569. i, i2: longint;
  570. TmpUsedRegs: TAllUsedRegs;
  571. tempop: tasmop;
  572. oldreg: tregister;
  573. dealloc: tai_regalloc;
  574. function IsPowerOf2(const value: DWord): boolean; inline;
  575. begin
  576. Result:=(value and (value - 1)) = 0;
  577. end;
  578. begin
  579. result := false;
  580. case p.typ of
  581. ait_instruction:
  582. begin
  583. {
  584. change
  585. <op> reg,x,y
  586. cmp reg,#0
  587. into
  588. <op>s reg,x,y
  589. }
  590. { this optimization can applied only to the currently enabled operations because
  591. the other operations do not update all flags and FPC does not track flag usage }
  592. if MatchInstruction(p, [A_ADC,A_ADD,A_BIC,A_SUB,A_MUL,A_MVN,A_MOV,A_ORR,A_EOR,A_AND,
  593. A_RSB,A_RSC,A_SBC,A_MLA], [C_None], [PF_None]) and
  594. GetNextInstruction(p, hp1) and
  595. { mlas is only allowed in arm mode }
  596. ((taicpu(p).opcode<>A_MLA) or
  597. (current_settings.instructionset<>is_thumb)) and
  598. MatchInstruction(hp1, A_CMP, [C_None], [PF_None]) and
  599. (taicpu(hp1).oper[1]^.typ = top_const) and
  600. (taicpu(p).oper[0]^.reg = taicpu(hp1).oper[0]^.reg) and
  601. (taicpu(hp1).oper[1]^.val = 0) and
  602. GetNextInstruction(hp1, hp2) and
  603. { be careful here, following instructions could use other flags
  604. however after a jump fpc never depends on the value of flags }
  605. { All above instructions set Z and N according to the following
  606. Z := result = 0;
  607. N := result[31];
  608. EQ = Z=1; NE = Z=0;
  609. MI = N=1; PL = N=0; }
  610. (MatchInstruction(hp2, A_B, [C_EQ,C_NE,C_MI,C_PL], []) or
  611. { mov is also possible, but only if there is no shifter operand, it could be an rxx,
  612. we are too lazy to check if it is rxx or something else }
  613. (MatchInstruction(hp2, A_MOV, [C_EQ,C_NE,C_MI,C_PL], []) and (taicpu(hp2).ops=2))) and
  614. assigned(FindRegDealloc(NR_DEFAULTFLAGS,tai(hp2.Next))) then
  615. begin
  616. DebugMsg('Peephole OpCmp2OpS done', p);
  617. taicpu(p).oppostfix:=PF_S;
  618. { move flag allocation if possible }
  619. GetLastInstruction(hp1, hp2);
  620. hp2:=FindRegAlloc(NR_DEFAULTFLAGS,tai(hp2.Next));
  621. if assigned(hp2) then
  622. begin
  623. asml.Remove(hp2);
  624. asml.insertbefore(hp2, p);
  625. end;
  626. asml.remove(hp1);
  627. hp1.free;
  628. Result:=true;
  629. end
  630. else
  631. case taicpu(p).opcode of
  632. A_STR:
  633. begin
  634. { change
  635. str reg1,ref
  636. ldr reg2,ref
  637. into
  638. str reg1,ref
  639. mov reg2,reg1
  640. }
  641. if (taicpu(p).oper[1]^.typ = top_ref) and
  642. (taicpu(p).oper[1]^.ref^.addressmode=AM_OFFSET) and
  643. (taicpu(p).oppostfix=PF_None) and
  644. (taicpu(p).condition=C_None) and
  645. GetNextInstructionUsingRef(p,hp1,taicpu(p).oper[1]^.ref^) and
  646. MatchInstruction(hp1, A_LDR, [taicpu(p).condition], [PF_None]) and
  647. (taicpu(hp1).oper[1]^.typ=top_ref) and
  648. (taicpu(hp1).oper[1]^.ref^.addressmode=AM_OFFSET) and
  649. not(RegModifiedBetween(taicpu(p).oper[0]^.reg, p, hp1)) and
  650. ((taicpu(hp1).oper[1]^.ref^.index=NR_NO) or not (RegModifiedBetween(taicpu(hp1).oper[1]^.ref^.index, p, hp1))) and
  651. ((taicpu(hp1).oper[1]^.ref^.base=NR_NO) or not (RegModifiedBetween(taicpu(hp1).oper[1]^.ref^.base, p, hp1))) then
  652. begin
  653. if taicpu(hp1).oper[0]^.reg=taicpu(p).oper[0]^.reg then
  654. begin
  655. DebugMsg('Peephole StrLdr2StrMov 1 done', hp1);
  656. asml.remove(hp1);
  657. hp1.free;
  658. end
  659. else
  660. begin
  661. taicpu(hp1).opcode:=A_MOV;
  662. taicpu(hp1).oppostfix:=PF_None;
  663. taicpu(hp1).loadreg(1,taicpu(p).oper[0]^.reg);
  664. DebugMsg('Peephole StrLdr2StrMov 2 done', hp1);
  665. end;
  666. result := true;
  667. end
  668. { change
  669. str reg1,ref
  670. str reg2,ref
  671. into
  672. strd reg1,reg2,ref
  673. }
  674. else if (GenerateARMCode or GenerateThumb2Code) and
  675. (CPUARM_HAS_EDSP in cpu_capabilities[current_settings.cputype]) and
  676. (taicpu(p).oppostfix=PF_None) and
  677. (taicpu(p).oper[1]^.ref^.addressmode=AM_OFFSET) and
  678. GetNextInstruction(p,hp1) and
  679. MatchInstruction(hp1, A_STR, [taicpu(p).condition, C_None], [PF_None]) and
  680. not(odd(getsupreg(taicpu(p).oper[0]^.reg))) and
  681. (getsupreg(taicpu(p).oper[0]^.reg)+1=getsupreg(taicpu(hp1).oper[0]^.reg)) and
  682. { str ensures that either base or index contain no register, else ldr wouldn't
  683. use an offset either
  684. }
  685. (taicpu(p).oper[1]^.ref^.base=taicpu(hp1).oper[1]^.ref^.base) and
  686. (taicpu(p).oper[1]^.ref^.index=taicpu(hp1).oper[1]^.ref^.index) and
  687. (taicpu(p).oper[1]^.ref^.offset+4=taicpu(hp1).oper[1]^.ref^.offset) and
  688. (abs(taicpu(p).oper[1]^.ref^.offset)<256) and
  689. AlignedToQWord(taicpu(p).oper[1]^.ref^) then
  690. begin
  691. DebugMsg('Peephole StrStr2Strd done', p);
  692. taicpu(p).oppostfix:=PF_D;
  693. taicpu(p).loadref(2,taicpu(p).oper[1]^.ref^);
  694. taicpu(p).loadreg(1, taicpu(hp1).oper[0]^.reg);
  695. taicpu(p).ops:=3;
  696. asml.remove(hp1);
  697. hp1.free;
  698. result:=true;
  699. end;
  700. Result:=LookForPostindexedPattern(taicpu(p)) or Result;
  701. end;
  702. A_LDR:
  703. begin
  704. { change
  705. ldr reg1,ref
  706. ldr reg2,ref
  707. into ...
  708. }
  709. if (taicpu(p).oper[1]^.typ = top_ref) and
  710. (taicpu(p).oper[1]^.ref^.addressmode=AM_OFFSET) and
  711. GetNextInstruction(p,hp1) and
  712. { ldrd is not allowed here }
  713. MatchInstruction(hp1, A_LDR, [taicpu(p).condition, C_None], [taicpu(p).oppostfix,PF_None]-[PF_D]) then
  714. begin
  715. {
  716. ...
  717. ldr reg1,ref
  718. mov reg2,reg1
  719. }
  720. if (taicpu(p).oppostfix=taicpu(hp1).oppostfix) and
  721. RefsEqual(taicpu(p).oper[1]^.ref^,taicpu(hp1).oper[1]^.ref^) and
  722. (taicpu(p).oper[0]^.reg<>taicpu(hp1).oper[1]^.ref^.index) and
  723. (taicpu(p).oper[0]^.reg<>taicpu(hp1).oper[1]^.ref^.base) and
  724. (taicpu(hp1).oper[1]^.ref^.addressmode=AM_OFFSET) then
  725. begin
  726. if taicpu(hp1).oper[0]^.reg=taicpu(p).oper[0]^.reg then
  727. begin
  728. DebugMsg('Peephole LdrLdr2Ldr done', hp1);
  729. asml.remove(hp1);
  730. hp1.free;
  731. end
  732. else
  733. begin
  734. DebugMsg('Peephole LdrLdr2LdrMov done', hp1);
  735. taicpu(hp1).opcode:=A_MOV;
  736. taicpu(hp1).oppostfix:=PF_None;
  737. taicpu(hp1).loadreg(1,taicpu(p).oper[0]^.reg);
  738. end;
  739. result := true;
  740. end
  741. {
  742. ...
  743. ldrd reg1,reg1+1,ref
  744. }
  745. else if (GenerateARMCode or GenerateThumb2Code) and
  746. (CPUARM_HAS_EDSP in cpu_capabilities[current_settings.cputype]) and
  747. { ldrd does not allow any postfixes ... }
  748. (taicpu(p).oppostfix=PF_None) and
  749. not(odd(getsupreg(taicpu(p).oper[0]^.reg))) and
  750. (getsupreg(taicpu(p).oper[0]^.reg)+1=getsupreg(taicpu(hp1).oper[0]^.reg)) and
  751. { ldr ensures that either base or index contain no register, else ldr wouldn't
  752. use an offset either
  753. }
  754. (taicpu(p).oper[1]^.ref^.base=taicpu(hp1).oper[1]^.ref^.base) and
  755. (taicpu(p).oper[1]^.ref^.index=taicpu(hp1).oper[1]^.ref^.index) and
  756. (taicpu(p).oper[1]^.ref^.offset+4=taicpu(hp1).oper[1]^.ref^.offset) and
  757. (abs(taicpu(p).oper[1]^.ref^.offset)<256) and
  758. AlignedToQWord(taicpu(p).oper[1]^.ref^) then
  759. begin
  760. DebugMsg('Peephole LdrLdr2Ldrd done', p);
  761. taicpu(p).loadref(2,taicpu(p).oper[1]^.ref^);
  762. taicpu(p).loadreg(1, taicpu(hp1).oper[0]^.reg);
  763. taicpu(p).ops:=3;
  764. taicpu(p).oppostfix:=PF_D;
  765. asml.remove(hp1);
  766. hp1.free;
  767. result:=true;
  768. end;
  769. end;
  770. {
  771. Change
  772. ldrb dst1, [REF]
  773. and dst2, dst1, #255
  774. into
  775. ldrb dst2, [ref]
  776. }
  777. if not(GenerateThumbCode) and
  778. (taicpu(p).oppostfix=PF_B) and
  779. GetNextInstructionUsingReg(p, hp1, taicpu(p).oper[0]^.reg) and
  780. MatchInstruction(hp1, A_AND, [taicpu(p).condition], [PF_NONE]) and
  781. (taicpu(hp1).oper[1]^.reg = taicpu(p).oper[0]^.reg) and
  782. (taicpu(hp1).oper[2]^.typ = top_const) and
  783. (taicpu(hp1).oper[2]^.val = $FF) and
  784. not(RegUsedBetween(taicpu(hp1).oper[0]^.reg, p, hp1)) and
  785. RegEndOfLife(taicpu(p).oper[0]^.reg, taicpu(hp1)) then
  786. begin
  787. DebugMsg('Peephole LdrbAnd2Ldrb done', p);
  788. taicpu(p).oper[0]^.reg := taicpu(hp1).oper[0]^.reg;
  789. asml.remove(hp1);
  790. hp1.free;
  791. result:=true;
  792. end;
  793. Result:=LookForPostindexedPattern(taicpu(p)) or Result;
  794. { Remove superfluous mov after ldr
  795. changes
  796. ldr reg1, ref
  797. mov reg2, reg1
  798. to
  799. ldr reg2, ref
  800. conditions are:
  801. * no ldrd usage
  802. * reg1 must be released after mov
  803. * mov can not contain shifterops
  804. * ldr+mov have the same conditions
  805. * mov does not set flags
  806. }
  807. if (taicpu(p).oppostfix<>PF_D) and
  808. GetNextInstructionUsingReg(p, hp1, taicpu(p).oper[0]^.reg) and
  809. RemoveSuperfluousMove(p, hp1, 'LdrMov2Ldr') then
  810. Result:=true;
  811. end;
  812. A_MOV:
  813. begin
  814. { fold
  815. mov reg1,reg0, shift imm1
  816. mov reg1,reg1, shift imm2
  817. }
  818. if (taicpu(p).ops=3) and
  819. (taicpu(p).oper[2]^.typ = top_shifterop) and
  820. (taicpu(p).oper[2]^.shifterop^.rs = NR_NO) and
  821. getnextinstruction(p,hp1) and
  822. MatchInstruction(hp1, A_MOV, [taicpu(p).condition], [PF_None]) and
  823. (taicpu(hp1).ops=3) and
  824. MatchOperand(taicpu(hp1).oper[0]^, taicpu(p).oper[0]^.reg) and
  825. MatchOperand(taicpu(hp1).oper[1]^, taicpu(p).oper[0]^.reg) and
  826. (taicpu(hp1).oper[2]^.typ = top_shifterop) and
  827. (taicpu(hp1).oper[2]^.shifterop^.rs = NR_NO) then
  828. begin
  829. { fold
  830. mov reg1,reg0, lsl 16
  831. mov reg1,reg1, lsr 16
  832. strh reg1, ...
  833. dealloc reg1
  834. to
  835. strh reg1, ...
  836. dealloc reg1
  837. }
  838. if (taicpu(p).oper[2]^.shifterop^.shiftmode=SM_LSL) and
  839. (taicpu(p).oper[2]^.shifterop^.shiftimm=16) and
  840. (taicpu(hp1).oper[2]^.shifterop^.shiftmode in [SM_LSR,SM_ASR]) and
  841. (taicpu(hp1).oper[2]^.shifterop^.shiftimm=16) and
  842. getnextinstruction(hp1,hp2) and
  843. MatchInstruction(hp2, A_STR, [taicpu(p).condition], [PF_H]) and
  844. MatchOperand(taicpu(hp2).oper[0]^, taicpu(p).oper[0]^.reg) then
  845. begin
  846. CopyUsedRegs(TmpUsedRegs);
  847. UpdateUsedRegs(TmpUsedRegs, tai(p.next));
  848. UpdateUsedRegs(TmpUsedRegs, tai(hp1.next));
  849. if not(RegUsedAfterInstruction(taicpu(p).oper[0]^.reg,hp2,TmpUsedRegs)) then
  850. begin
  851. DebugMsg('Peephole optimizer removed superfluous 16 Bit zero extension', hp1);
  852. taicpu(hp2).loadreg(0,taicpu(p).oper[1]^.reg);
  853. asml.remove(p);
  854. asml.remove(hp1);
  855. p.free;
  856. hp1.free;
  857. p:=hp2;
  858. Result:=true;
  859. end;
  860. ReleaseUsedRegs(TmpUsedRegs);
  861. end
  862. { fold
  863. mov reg1,reg0, shift imm1
  864. mov reg1,reg1, shift imm2
  865. to
  866. mov reg1,reg0, shift imm1+imm2
  867. }
  868. else if (taicpu(p).oper[2]^.shifterop^.shiftmode=taicpu(hp1).oper[2]^.shifterop^.shiftmode) or
  869. { asr makes no use after a lsr, the asr can be foled into the lsr }
  870. ((taicpu(p).oper[2]^.shifterop^.shiftmode=SM_LSR) and (taicpu(hp1).oper[2]^.shifterop^.shiftmode=SM_ASR) ) then
  871. begin
  872. inc(taicpu(p).oper[2]^.shifterop^.shiftimm,taicpu(hp1).oper[2]^.shifterop^.shiftimm);
  873. { avoid overflows }
  874. if taicpu(p).oper[2]^.shifterop^.shiftimm>31 then
  875. case taicpu(p).oper[2]^.shifterop^.shiftmode of
  876. SM_ROR:
  877. taicpu(p).oper[2]^.shifterop^.shiftimm:=taicpu(p).oper[2]^.shifterop^.shiftimm and 31;
  878. SM_ASR:
  879. taicpu(p).oper[2]^.shifterop^.shiftimm:=31;
  880. SM_LSR,
  881. SM_LSL:
  882. begin
  883. hp2:=taicpu.op_reg_const(A_MOV,taicpu(p).oper[0]^.reg,0);
  884. InsertLLItem(p.previous, p.next, hp2);
  885. p.free;
  886. p:=hp2;
  887. end;
  888. else
  889. internalerror(2008072803);
  890. end;
  891. DebugMsg('Peephole ShiftShift2Shift 1 done', p);
  892. asml.remove(hp1);
  893. hp1.free;
  894. result := true;
  895. end
  896. { fold
  897. mov reg1,reg0, shift imm1
  898. mov reg1,reg1, shift imm2
  899. mov reg1,reg1, shift imm3 ...
  900. mov reg2,reg1, shift imm3 ...
  901. }
  902. else if GetNextInstructionUsingReg(hp1,hp2, taicpu(hp1).oper[0]^.reg) and
  903. MatchInstruction(hp2, A_MOV, [taicpu(p).condition], [PF_None]) and
  904. (taicpu(hp2).ops=3) and
  905. MatchOperand(taicpu(hp2).oper[1]^, taicpu(hp1).oper[0]^.reg) and
  906. RegEndofLife(taicpu(p).oper[0]^.reg,taicpu(hp2)) and
  907. (taicpu(hp2).oper[2]^.typ = top_shifterop) and
  908. (taicpu(hp2).oper[2]^.shifterop^.rs = NR_NO) then
  909. begin
  910. { mov reg1,reg0, lsl imm1
  911. mov reg1,reg1, lsr/asr imm2
  912. mov reg2,reg1, lsl imm3 ...
  913. to
  914. mov reg1,reg0, lsl imm1
  915. mov reg2,reg1, lsr/asr imm2-imm3
  916. if
  917. imm1>=imm2
  918. }
  919. if (taicpu(p).oper[2]^.shifterop^.shiftmode=SM_LSL) and (taicpu(hp2).oper[2]^.shifterop^.shiftmode=SM_LSL) and
  920. (taicpu(hp1).oper[2]^.shifterop^.shiftmode in [SM_ASR,SM_LSR]) and
  921. (taicpu(p).oper[2]^.shifterop^.shiftimm>=taicpu(hp1).oper[2]^.shifterop^.shiftimm) then
  922. begin
  923. if (taicpu(hp2).oper[2]^.shifterop^.shiftimm>=taicpu(hp1).oper[2]^.shifterop^.shiftimm) then
  924. begin
  925. if not(RegUsedBetween(taicpu(hp2).oper[0]^.reg,p,hp1)) and
  926. not(RegUsedBetween(taicpu(hp2).oper[0]^.reg,hp1,hp2)) then
  927. begin
  928. DebugMsg('Peephole ShiftShiftShift2ShiftShift 1a done', p);
  929. inc(taicpu(p).oper[2]^.shifterop^.shiftimm,taicpu(hp2).oper[2]^.shifterop^.shiftimm-taicpu(hp1).oper[2]^.shifterop^.shiftimm);
  930. taicpu(p).oper[0]^.reg:=taicpu(hp2).oper[0]^.reg;
  931. asml.remove(hp1);
  932. asml.remove(hp2);
  933. hp1.free;
  934. hp2.free;
  935. if taicpu(p).oper[2]^.shifterop^.shiftimm>=32 then
  936. begin
  937. taicpu(p).freeop(1);
  938. taicpu(p).freeop(2);
  939. taicpu(p).loadconst(1,0);
  940. end;
  941. result := true;
  942. end;
  943. end
  944. else if not(RegUsedBetween(taicpu(hp2).oper[0]^.reg,hp1,hp2)) then
  945. begin
  946. DebugMsg('Peephole ShiftShiftShift2ShiftShift 1b done', p);
  947. dec(taicpu(hp1).oper[2]^.shifterop^.shiftimm,taicpu(hp2).oper[2]^.shifterop^.shiftimm);
  948. taicpu(hp1).oper[0]^.reg:=taicpu(hp2).oper[0]^.reg;
  949. asml.remove(hp2);
  950. hp2.free;
  951. result := true;
  952. end;
  953. end
  954. { mov reg1,reg0, lsr/asr imm1
  955. mov reg1,reg1, lsl imm2
  956. mov reg1,reg1, lsr/asr imm3 ...
  957. if imm3>=imm1 and imm2>=imm1
  958. to
  959. mov reg1,reg0, lsl imm2-imm1
  960. mov reg1,reg1, lsr/asr imm3 ...
  961. }
  962. else if (taicpu(p).oper[2]^.shifterop^.shiftmode in [SM_ASR,SM_LSR]) and (taicpu(hp2).oper[2]^.shifterop^.shiftmode in [SM_ASR,SM_LSR]) and
  963. (taicpu(hp1).oper[2]^.shifterop^.shiftmode=SM_LSL) and
  964. (taicpu(hp2).oper[2]^.shifterop^.shiftimm>=taicpu(p).oper[2]^.shifterop^.shiftimm) and
  965. (taicpu(hp1).oper[2]^.shifterop^.shiftimm>=taicpu(p).oper[2]^.shifterop^.shiftimm) then
  966. begin
  967. dec(taicpu(hp1).oper[2]^.shifterop^.shiftimm,taicpu(p).oper[2]^.shifterop^.shiftimm);
  968. taicpu(hp1).oper[1]^.reg:=taicpu(p).oper[1]^.reg;
  969. DebugMsg('Peephole ShiftShiftShift2ShiftShift 2 done', p);
  970. asml.remove(p);
  971. p.free;
  972. p:=hp2;
  973. if taicpu(hp1).oper[2]^.shifterop^.shiftimm=0 then
  974. begin
  975. taicpu(hp2).oper[1]^.reg:=taicpu(hp1).oper[1]^.reg;
  976. asml.remove(hp1);
  977. hp1.free;
  978. p:=hp2;
  979. end;
  980. result := true;
  981. end;
  982. end;
  983. end;
  984. { Change the common
  985. mov r0, r0, lsr #xxx
  986. and r0, r0, #yyy/bic r0, r0, #xxx
  987. and remove the superfluous and/bic if possible
  988. This could be extended to handle more cases.
  989. }
  990. if (taicpu(p).ops=3) and
  991. (taicpu(p).oper[2]^.typ = top_shifterop) and
  992. (taicpu(p).oper[2]^.shifterop^.rs = NR_NO) and
  993. (taicpu(p).oper[2]^.shifterop^.shiftmode = SM_LSR) and
  994. GetNextInstructionUsingReg(p,hp1, taicpu(p).oper[0]^.reg) and
  995. (hp1.typ=ait_instruction) and
  996. (taicpu(hp1).ops>=1) and
  997. (taicpu(hp1).oper[0]^.typ=top_reg) and
  998. (not RegModifiedBetween(taicpu(hp1).oper[0]^.reg, p, hp1)) and
  999. RegEndOfLife(taicpu(p).oper[0]^.reg, taicpu(hp1)) then
  1000. begin
  1001. if (taicpu(p).oper[2]^.shifterop^.shiftimm >= 24 ) and
  1002. MatchInstruction(hp1, A_AND, [taicpu(p).condition], [taicpu(p).oppostfix]) and
  1003. (taicpu(hp1).ops=3) and
  1004. MatchOperand(taicpu(p).oper[0]^, taicpu(hp1).oper[1]^) and
  1005. (taicpu(hp1).oper[2]^.typ = top_const) and
  1006. { Check if the AND actually would only mask out bits being already zero because of the shift
  1007. }
  1008. ((($ffffffff shr taicpu(p).oper[2]^.shifterop^.shiftimm) and taicpu(hp1).oper[2]^.val) =
  1009. ($ffffffff shr taicpu(p).oper[2]^.shifterop^.shiftimm)) then
  1010. begin
  1011. DebugMsg('Peephole LsrAnd2Lsr done', hp1);
  1012. taicpu(p).oper[0]^.reg:=taicpu(hp1).oper[0]^.reg;
  1013. asml.remove(hp1);
  1014. hp1.free;
  1015. result:=true;
  1016. end
  1017. else if MatchInstruction(hp1, A_BIC, [taicpu(p).condition], [taicpu(p).oppostfix]) and
  1018. (taicpu(hp1).ops=3) and
  1019. MatchOperand(taicpu(p).oper[0]^, taicpu(hp1).oper[1]^) and
  1020. (taicpu(hp1).oper[2]^.typ = top_const) and
  1021. { Check if the BIC actually would only mask out bits beeing already zero because of the shift }
  1022. (taicpu(hp1).oper[2]^.val<>0) and
  1023. (BsfDWord(taicpu(hp1).oper[2]^.val)>=32-taicpu(p).oper[2]^.shifterop^.shiftimm) then
  1024. begin
  1025. DebugMsg('Peephole LsrBic2Lsr done', hp1);
  1026. taicpu(p).oper[0]^.reg:=taicpu(hp1).oper[0]^.reg;
  1027. asml.remove(hp1);
  1028. hp1.free;
  1029. result:=true;
  1030. end;
  1031. end;
  1032. { Change
  1033. mov rx, ry, lsr/ror #xxx
  1034. uxtb/uxth rz,rx/and rz,rx,0xFF
  1035. dealloc rx
  1036. to
  1037. uxtb/uxth rz,ry,ror #xxx
  1038. }
  1039. if (taicpu(p).ops=3) and
  1040. (taicpu(p).oper[2]^.typ = top_shifterop) and
  1041. (taicpu(p).oper[2]^.shifterop^.rs = NR_NO) and
  1042. (taicpu(p).oper[2]^.shifterop^.shiftmode in [SM_LSR,SM_ROR]) and
  1043. (GenerateThumb2Code) and
  1044. GetNextInstructionUsingReg(p,hp1, taicpu(p).oper[0]^.reg) and
  1045. RegEndOfLife(taicpu(p).oper[0]^.reg, taicpu(hp1)) then
  1046. begin
  1047. if MatchInstruction(hp1, A_UXTB, [C_None], [PF_None]) and
  1048. (taicpu(hp1).ops = 2) and
  1049. (taicpu(p).oper[2]^.shifterop^.shiftimm in [8,16,24]) and
  1050. MatchOperand(taicpu(hp1).oper[1]^, taicpu(p).oper[0]^.reg) then
  1051. begin
  1052. taicpu(hp1).oper[1]^.reg := taicpu(p).oper[1]^.reg;
  1053. taicpu(hp1).loadshifterop(2,taicpu(p).oper[2]^.shifterop^);
  1054. taicpu(hp1).oper[2]^.shifterop^.shiftmode:=SM_ROR;
  1055. taicpu(hp1).ops := 3;
  1056. GetNextInstruction(p,hp1);
  1057. asml.Remove(p);
  1058. p.Free;
  1059. p:=hp1;
  1060. result:=true;
  1061. exit;
  1062. end
  1063. else if MatchInstruction(hp1, A_UXTH, [C_None], [PF_None]) and
  1064. (taicpu(hp1).ops=2) and
  1065. (taicpu(p).oper[2]^.shifterop^.shiftimm in [16]) and
  1066. MatchOperand(taicpu(hp1).oper[1]^, taicpu(p).oper[0]^.reg) then
  1067. begin
  1068. taicpu(hp1).oper[1]^.reg := taicpu(p).oper[1]^.reg;
  1069. taicpu(hp1).loadshifterop(2,taicpu(p).oper[2]^.shifterop^);
  1070. taicpu(hp1).oper[2]^.shifterop^.shiftmode:=SM_ROR;
  1071. taicpu(hp1).ops := 3;
  1072. GetNextInstruction(p,hp1);
  1073. asml.Remove(p);
  1074. p.Free;
  1075. p:=hp1;
  1076. result:=true;
  1077. exit;
  1078. end
  1079. else if MatchInstruction(hp1, A_AND, [C_None], [PF_None]) and
  1080. (taicpu(hp1).ops = 3) and
  1081. (taicpu(hp1).oper[2]^.typ = top_const) and
  1082. (taicpu(hp1).oper[2]^.val = $FF) and
  1083. (taicpu(p).oper[2]^.shifterop^.shiftimm in [8,16,24]) and
  1084. MatchOperand(taicpu(hp1).oper[1]^, taicpu(p).oper[0]^.reg) then
  1085. begin
  1086. taicpu(hp1).ops := 3;
  1087. taicpu(hp1).opcode := A_UXTB;
  1088. taicpu(hp1).oper[1]^.reg := taicpu(p).oper[1]^.reg;
  1089. taicpu(hp1).loadshifterop(2,taicpu(p).oper[2]^.shifterop^);
  1090. taicpu(hp1).oper[2]^.shifterop^.shiftmode:=SM_ROR;
  1091. GetNextInstruction(p,hp1);
  1092. asml.Remove(p);
  1093. p.Free;
  1094. p:=hp1;
  1095. result:=true;
  1096. exit;
  1097. end;
  1098. end;
  1099. {
  1100. optimize
  1101. mov rX, yyyy
  1102. ....
  1103. }
  1104. if (taicpu(p).ops = 2) and
  1105. GetNextInstruction(p,hp1) and
  1106. (tai(hp1).typ = ait_instruction) then
  1107. begin
  1108. {
  1109. This changes the very common
  1110. mov r0, #0
  1111. str r0, [...]
  1112. mov r0, #0
  1113. str r0, [...]
  1114. and removes all superfluous mov instructions
  1115. }
  1116. if (taicpu(p).oper[1]^.typ = top_const) and
  1117. (taicpu(hp1).opcode=A_STR) then
  1118. while MatchInstruction(hp1, A_STR, [taicpu(p).condition], []) and
  1119. MatchOperand(taicpu(p).oper[0]^, taicpu(hp1).oper[0]^) and
  1120. GetNextInstruction(hp1, hp2) and
  1121. MatchInstruction(hp2, A_MOV, [taicpu(p).condition], [PF_None]) and
  1122. (taicpu(hp2).ops = 2) and
  1123. MatchOperand(taicpu(hp2).oper[0]^, taicpu(p).oper[0]^) and
  1124. MatchOperand(taicpu(hp2).oper[1]^, taicpu(p).oper[1]^) do
  1125. begin
  1126. DebugMsg('Peephole MovStrMov done', hp2);
  1127. GetNextInstruction(hp2,hp1);
  1128. asml.remove(hp2);
  1129. hp2.free;
  1130. result:=true;
  1131. if not assigned(hp1) then break;
  1132. end
  1133. {
  1134. This removes the first mov from
  1135. mov rX,...
  1136. mov rX,...
  1137. }
  1138. else if taicpu(hp1).opcode=A_MOV then
  1139. while MatchInstruction(hp1, A_MOV, [taicpu(p).condition], [taicpu(p).oppostfix]) and
  1140. (taicpu(hp1).ops = 2) and
  1141. MatchOperand(taicpu(p).oper[0]^, taicpu(hp1).oper[0]^) and
  1142. { don't remove the first mov if the second is a mov rX,rX }
  1143. not(MatchOperand(taicpu(hp1).oper[0]^, taicpu(hp1).oper[1]^)) do
  1144. begin
  1145. DebugMsg('Peephole MovMov done', p);
  1146. asml.remove(p);
  1147. p.free;
  1148. p:=hp1;
  1149. GetNextInstruction(hp1,hp1);
  1150. result:=true;
  1151. if not assigned(hp1) then
  1152. break;
  1153. end;
  1154. end;
  1155. {
  1156. change
  1157. mov r1, r0
  1158. add r1, r1, #1
  1159. to
  1160. add r1, r0, #1
  1161. Todo: Make it work for mov+cmp too
  1162. CAUTION! If this one is successful p might not be a mov instruction anymore!
  1163. }
  1164. if (taicpu(p).ops = 2) and
  1165. (taicpu(p).oper[1]^.typ = top_reg) and
  1166. (taicpu(p).oppostfix = PF_NONE) and
  1167. GetNextInstruction(p, hp1) and
  1168. MatchInstruction(hp1, [A_ADD, A_ADC, A_RSB, A_RSC, A_SUB, A_SBC,
  1169. A_AND, A_BIC, A_EOR, A_ORR, A_MOV, A_MVN],
  1170. [taicpu(p).condition], []) and
  1171. {MOV and MVN might only have 2 ops}
  1172. (taicpu(hp1).ops >= 2) and
  1173. MatchOperand(taicpu(p).oper[0]^, taicpu(hp1).oper[0]^.reg) and
  1174. (taicpu(hp1).oper[1]^.typ = top_reg) and
  1175. (
  1176. (taicpu(hp1).ops = 2) or
  1177. (taicpu(hp1).oper[2]^.typ in [top_reg, top_const, top_shifterop])
  1178. ) then
  1179. begin
  1180. { When we get here we still don't know if the registers match}
  1181. for I:=1 to 2 do
  1182. {
  1183. If the first loop was successful p will be replaced with hp1.
  1184. The checks will still be ok, because all required information
  1185. will also be in hp1 then.
  1186. }
  1187. if (taicpu(hp1).ops > I) and
  1188. MatchOperand(taicpu(p).oper[0]^, taicpu(hp1).oper[I]^.reg) and
  1189. { prevent certain combinations on thumb(2), this is only a safe approximation }
  1190. (not(GenerateThumbCode or GenerateThumb2Code) or
  1191. ((getsupreg(taicpu(p).oper[1]^.reg)<>RS_R13) and
  1192. (getsupreg(taicpu(p).oper[1]^.reg)<>RS_R15))
  1193. ) then
  1194. begin
  1195. DebugMsg('Peephole RedundantMovProcess done', hp1);
  1196. taicpu(hp1).oper[I]^.reg := taicpu(p).oper[1]^.reg;
  1197. if p<>hp1 then
  1198. begin
  1199. asml.remove(p);
  1200. p.free;
  1201. p:=hp1;
  1202. Result:=true;
  1203. end;
  1204. end;
  1205. end;
  1206. { Fold the very common sequence
  1207. mov regA, regB
  1208. ldr* regA, [regA]
  1209. to
  1210. ldr* regA, [regB]
  1211. CAUTION! If this one is successful p might not be a mov instruction anymore!
  1212. }
  1213. if (taicpu(p).opcode = A_MOV) and
  1214. (taicpu(p).ops = 2) and
  1215. (taicpu(p).oper[1]^.typ = top_reg) and
  1216. (taicpu(p).oppostfix = PF_NONE) and
  1217. GetNextInstructionUsingReg(p, hp1, taicpu(p).oper[0]^.reg) and
  1218. MatchInstruction(hp1, [A_LDR, A_STR], [taicpu(p).condition], []) and
  1219. (taicpu(hp1).oper[1]^.typ = top_ref) and
  1220. { We can change the base register only when the instruction uses AM_OFFSET }
  1221. ((taicpu(hp1).oper[1]^.ref^.index = taicpu(p).oper[0]^.reg) or
  1222. ((taicpu(hp1).oper[1]^.ref^.addressmode = AM_OFFSET) and
  1223. (taicpu(hp1).oper[1]^.ref^.base = taicpu(p).oper[0]^.reg))
  1224. ) and
  1225. not(RegModifiedBetween(taicpu(p).oper[1]^.reg,p,hp1)) and
  1226. // Make sure that Thumb code doesn't propagate a high register into a reference
  1227. ((GenerateThumbCode and
  1228. (getsupreg(taicpu(p).oper[1]^.reg) < RS_R8)) or
  1229. (not GenerateThumbCode)) and
  1230. RegEndOfLife(taicpu(p).oper[0]^.reg, taicpu(hp1)) then
  1231. begin
  1232. DebugMsg('Peephole MovLdr2Ldr done', hp1);
  1233. if (taicpu(hp1).oper[1]^.ref^.addressmode = AM_OFFSET) and
  1234. (taicpu(hp1).oper[1]^.ref^.base = taicpu(p).oper[0]^.reg) then
  1235. taicpu(hp1).oper[1]^.ref^.base := taicpu(p).oper[1]^.reg;
  1236. if taicpu(hp1).oper[1]^.ref^.index = taicpu(p).oper[0]^.reg then
  1237. taicpu(hp1).oper[1]^.ref^.index := taicpu(p).oper[1]^.reg;
  1238. dealloc:=FindRegDeAlloc(taicpu(p).oper[1]^.reg, tai(p.Next));
  1239. if Assigned(dealloc) then
  1240. begin
  1241. asml.remove(dealloc);
  1242. asml.InsertAfter(dealloc,hp1);
  1243. end;
  1244. GetNextInstruction(p, hp1);
  1245. asml.remove(p);
  1246. p.free;
  1247. p:=hp1;
  1248. result:=true;
  1249. end;
  1250. { This folds shifterops into following instructions
  1251. mov r0, r1, lsl #8
  1252. add r2, r3, r0
  1253. to
  1254. add r2, r3, r1, lsl #8
  1255. CAUTION! If this one is successful p might not be a mov instruction anymore!
  1256. }
  1257. if (taicpu(p).opcode = A_MOV) and
  1258. (taicpu(p).ops = 3) and
  1259. (taicpu(p).oper[1]^.typ = top_reg) and
  1260. (taicpu(p).oper[2]^.typ = top_shifterop) and
  1261. (taicpu(p).oppostfix = PF_NONE) and
  1262. GetNextInstructionUsingReg(p, hp1, taicpu(p).oper[0]^.reg) and
  1263. MatchInstruction(hp1, [A_ADD, A_ADC, A_RSB, A_RSC, A_SUB, A_SBC,
  1264. A_AND, A_BIC, A_EOR, A_ORR, A_TEQ, A_TST,
  1265. A_CMP, A_CMN],
  1266. [taicpu(p).condition], [PF_None]) and
  1267. (not ((GenerateThumb2Code) and
  1268. (taicpu(hp1).opcode in [A_SBC]) and
  1269. (((taicpu(hp1).ops=3) and
  1270. MatchOperand(taicpu(p).oper[0]^, taicpu(hp1).oper[1]^.reg)) or
  1271. ((taicpu(hp1).ops=2) and
  1272. MatchOperand(taicpu(p).oper[0]^, taicpu(hp1).oper[0]^.reg))))) and
  1273. RegEndOfLife(taicpu(p).oper[0]^.reg, taicpu(hp1)) and
  1274. (taicpu(hp1).ops >= 2) and
  1275. {Currently we can't fold into another shifterop}
  1276. (taicpu(hp1).oper[taicpu(hp1).ops-1]^.typ = top_reg) and
  1277. {Folding rrx is problematic because of the C-Flag, as we currently can't check
  1278. NR_DEFAULTFLAGS for modification}
  1279. (
  1280. {Everything is fine if we don't use RRX}
  1281. (taicpu(p).oper[2]^.shifterop^.shiftmode <> SM_RRX) or
  1282. (
  1283. {If it is RRX, then check if we're just accessing the next instruction}
  1284. GetNextInstruction(p, hp2) and
  1285. (hp1 = hp2)
  1286. )
  1287. ) and
  1288. { reg1 might not be modified inbetween }
  1289. not(RegModifiedBetween(taicpu(p).oper[1]^.reg,p,hp1)) and
  1290. { The shifterop can contain a register, might not be modified}
  1291. (
  1292. (taicpu(p).oper[2]^.shifterop^.rs = NR_NO) or
  1293. not(RegModifiedBetween(taicpu(p).oper[2]^.shifterop^.rs, p, hp1))
  1294. ) and
  1295. (
  1296. {Only ONE of the two src operands is allowed to match}
  1297. MatchOperand(taicpu(p).oper[0]^, taicpu(hp1).oper[taicpu(hp1).ops-2]^) xor
  1298. MatchOperand(taicpu(p).oper[0]^, taicpu(hp1).oper[taicpu(hp1).ops-1]^)
  1299. ) then
  1300. begin
  1301. if taicpu(hp1).opcode in [A_TST, A_TEQ, A_CMN] then
  1302. I2:=0
  1303. else
  1304. I2:=1;
  1305. for I:=I2 to taicpu(hp1).ops-1 do
  1306. if MatchOperand(taicpu(p).oper[0]^, taicpu(hp1).oper[I]^.reg) then
  1307. begin
  1308. { If the parameter matched on the second op from the RIGHT
  1309. we have to switch the parameters, this will not happen for CMP
  1310. were we're only evaluating the most right parameter
  1311. }
  1312. if I <> taicpu(hp1).ops-1 then
  1313. begin
  1314. {The SUB operators need to be changed when we swap parameters}
  1315. case taicpu(hp1).opcode of
  1316. A_SUB: tempop:=A_RSB;
  1317. A_SBC: tempop:=A_RSC;
  1318. A_RSB: tempop:=A_SUB;
  1319. A_RSC: tempop:=A_SBC;
  1320. else tempop:=taicpu(hp1).opcode;
  1321. end;
  1322. if taicpu(hp1).ops = 3 then
  1323. hp2:=taicpu.op_reg_reg_reg_shifterop(tempop,
  1324. taicpu(hp1).oper[0]^.reg, taicpu(hp1).oper[2]^.reg,
  1325. taicpu(p).oper[1]^.reg, taicpu(p).oper[2]^.shifterop^)
  1326. else
  1327. hp2:=taicpu.op_reg_reg_shifterop(tempop,
  1328. taicpu(hp1).oper[0]^.reg, taicpu(p).oper[1]^.reg,
  1329. taicpu(p).oper[2]^.shifterop^);
  1330. end
  1331. else
  1332. if taicpu(hp1).ops = 3 then
  1333. hp2:=taicpu.op_reg_reg_reg_shifterop(taicpu(hp1).opcode,
  1334. taicpu(hp1).oper[0]^.reg, taicpu(hp1).oper[1]^.reg,
  1335. taicpu(p).oper[1]^.reg, taicpu(p).oper[2]^.shifterop^)
  1336. else
  1337. hp2:=taicpu.op_reg_reg_shifterop(taicpu(hp1).opcode,
  1338. taicpu(hp1).oper[0]^.reg, taicpu(p).oper[1]^.reg,
  1339. taicpu(p).oper[2]^.shifterop^);
  1340. asml.insertbefore(hp2, hp1);
  1341. GetNextInstruction(p, hp2);
  1342. asml.remove(p);
  1343. asml.remove(hp1);
  1344. p.free;
  1345. hp1.free;
  1346. p:=hp2;
  1347. DebugMsg('Peephole FoldShiftProcess done', p);
  1348. Result:=true;
  1349. break;
  1350. end;
  1351. end;
  1352. {
  1353. Fold
  1354. mov r1, r1, lsl #2
  1355. ldr/ldrb r0, [r0, r1]
  1356. to
  1357. ldr/ldrb r0, [r0, r1, lsl #2]
  1358. XXX: This still needs some work, as we quite often encounter something like
  1359. mov r1, r2, lsl #2
  1360. add r2, r3, #imm
  1361. ldr r0, [r2, r1]
  1362. which can't be folded because r2 is overwritten between the shift and the ldr.
  1363. We could try to shuffle the registers around and fold it into.
  1364. add r1, r3, #imm
  1365. ldr r0, [r1, r2, lsl #2]
  1366. }
  1367. if (not(GenerateThumbCode)) and
  1368. (taicpu(p).opcode = A_MOV) and
  1369. (taicpu(p).ops = 3) and
  1370. (taicpu(p).oper[1]^.typ = top_reg) and
  1371. (taicpu(p).oper[2]^.typ = top_shifterop) and
  1372. { RRX is tough to handle, because it requires tracking the C-Flag,
  1373. it is also extremly unlikely to be emitted this way}
  1374. (taicpu(p).oper[2]^.shifterop^.shiftmode <> SM_RRX) and
  1375. (taicpu(p).oper[2]^.shifterop^.shiftimm <> 0) and
  1376. { thumb2 allows only lsl #0..#3 }
  1377. (not(GenerateThumb2Code) or
  1378. ((taicpu(p).oper[2]^.shifterop^.shiftimm in [0..3]) and
  1379. (taicpu(p).oper[2]^.shifterop^.shiftmode=SM_LSL)
  1380. )
  1381. ) and
  1382. (taicpu(p).oppostfix = PF_NONE) and
  1383. GetNextInstructionUsingReg(p, hp1, taicpu(p).oper[0]^.reg) and
  1384. {Only LDR, LDRB, STR, STRB can handle scaled register indexing}
  1385. (MatchInstruction(hp1, [A_LDR, A_STR], [taicpu(p).condition], [PF_None, PF_B]) or
  1386. (GenerateThumb2Code and
  1387. MatchInstruction(hp1, [A_LDR, A_STR], [taicpu(p).condition], [PF_None, PF_B, PF_SB, PF_H, PF_SH]))
  1388. ) and
  1389. (
  1390. {If this is address by offset, one of the two registers can be used}
  1391. ((taicpu(hp1).oper[1]^.ref^.addressmode=AM_OFFSET) and
  1392. (
  1393. (taicpu(hp1).oper[1]^.ref^.index = taicpu(p).oper[0]^.reg) xor
  1394. (taicpu(hp1).oper[1]^.ref^.base = taicpu(p).oper[0]^.reg)
  1395. )
  1396. ) or
  1397. {For post and preindexed only the index register can be used}
  1398. ((taicpu(hp1).oper[1]^.ref^.addressmode in [AM_POSTINDEXED, AM_PREINDEXED]) and
  1399. (
  1400. (taicpu(hp1).oper[1]^.ref^.index = taicpu(p).oper[0]^.reg) and
  1401. (taicpu(hp1).oper[1]^.ref^.base <> taicpu(p).oper[0]^.reg)
  1402. ) and
  1403. (not GenerateThumb2Code)
  1404. )
  1405. ) and
  1406. { Only fold if both registers are used. Otherwise we are folding p with itself }
  1407. (taicpu(hp1).oper[1]^.ref^.index<>NR_NO) and
  1408. (taicpu(hp1).oper[1]^.ref^.base<>NR_NO) and
  1409. { Only fold if there isn't another shifterop already, and offset is zero. }
  1410. (taicpu(hp1).oper[1]^.ref^.offset = 0) and
  1411. (taicpu(hp1).oper[1]^.ref^.shiftmode = SM_None) and
  1412. not(RegModifiedBetween(taicpu(p).oper[1]^.reg,p,hp1)) and
  1413. RegEndOfLife(taicpu(p).oper[0]^.reg, taicpu(hp1)) then
  1414. begin
  1415. { If the register we want to do the shift for resides in base, we need to swap that}
  1416. if (taicpu(hp1).oper[1]^.ref^.base = taicpu(p).oper[0]^.reg) then
  1417. taicpu(hp1).oper[1]^.ref^.base := taicpu(hp1).oper[1]^.ref^.index;
  1418. taicpu(hp1).oper[1]^.ref^.index := taicpu(p).oper[1]^.reg;
  1419. taicpu(hp1).oper[1]^.ref^.shiftmode := taicpu(p).oper[2]^.shifterop^.shiftmode;
  1420. taicpu(hp1).oper[1]^.ref^.shiftimm := taicpu(p).oper[2]^.shifterop^.shiftimm;
  1421. DebugMsg('Peephole FoldShiftLdrStr done', hp1);
  1422. GetNextInstruction(p, hp1);
  1423. asml.remove(p);
  1424. p.free;
  1425. p:=hp1;
  1426. Result:=true;
  1427. end;
  1428. {
  1429. Often we see shifts and then a superfluous mov to another register
  1430. In the future this might be handled in RedundantMovProcess when it uses RegisterTracking
  1431. }
  1432. if (taicpu(p).opcode = A_MOV) and
  1433. GetNextInstructionUsingReg(p, hp1, taicpu(p).oper[0]^.reg) and
  1434. RemoveSuperfluousMove(p, hp1, 'MovMov2Mov') then
  1435. Result:=true;
  1436. end;
  1437. A_ADD,
  1438. A_ADC,
  1439. A_RSB,
  1440. A_RSC,
  1441. A_SUB,
  1442. A_SBC,
  1443. A_AND,
  1444. A_BIC,
  1445. A_EOR,
  1446. A_ORR,
  1447. A_MLA,
  1448. A_MLS,
  1449. A_MUL:
  1450. begin
  1451. {
  1452. optimize
  1453. and reg2,reg1,const1
  1454. ...
  1455. }
  1456. if (taicpu(p).opcode = A_AND) and
  1457. (taicpu(p).ops>2) and
  1458. (taicpu(p).oper[1]^.typ = top_reg) and
  1459. (taicpu(p).oper[2]^.typ = top_const) then
  1460. begin
  1461. {
  1462. change
  1463. and reg2,reg1,const1
  1464. ...
  1465. and reg3,reg2,const2
  1466. to
  1467. and reg3,reg1,(const1 and const2)
  1468. }
  1469. if GetNextInstructionUsingReg(p,hp1,taicpu(p).oper[0]^.reg) and
  1470. MatchInstruction(hp1, A_AND, [taicpu(p).condition], [PF_None]) and
  1471. RegEndOfLife(taicpu(p).oper[0]^.reg,taicpu(hp1)) and
  1472. MatchOperand(taicpu(hp1).oper[1]^, taicpu(p).oper[0]^.reg) and
  1473. (taicpu(hp1).oper[2]^.typ = top_const) then
  1474. begin
  1475. if not(RegUsedBetween(taicpu(hp1).oper[0]^.reg,p,hp1)) then
  1476. begin
  1477. DebugMsg('Peephole AndAnd2And done', p);
  1478. taicpu(p).loadConst(2,taicpu(p).oper[2]^.val and taicpu(hp1).oper[2]^.val);
  1479. taicpu(p).oppostfix:=taicpu(hp1).oppostfix;
  1480. taicpu(p).loadReg(0,taicpu(hp1).oper[0]^.reg);
  1481. asml.remove(hp1);
  1482. hp1.free;
  1483. Result:=true;
  1484. end
  1485. else if not(RegUsedBetween(taicpu(p).oper[1]^.reg,p,hp1)) then
  1486. begin
  1487. DebugMsg('Peephole AndAnd2And done', hp1);
  1488. taicpu(hp1).loadConst(2,taicpu(p).oper[2]^.val and taicpu(hp1).oper[2]^.val);
  1489. taicpu(hp1).oppostfix:=taicpu(p).oppostfix;
  1490. taicpu(hp1).loadReg(1,taicpu(p).oper[1]^.reg);
  1491. GetNextInstruction(p, hp1);
  1492. asml.remove(p);
  1493. p.free;
  1494. p:=hp1;
  1495. Result:=true;
  1496. end;
  1497. end
  1498. {
  1499. change
  1500. and reg2,reg1,$xxxxxxFF
  1501. strb reg2,[...]
  1502. dealloc reg2
  1503. to
  1504. strb reg1,[...]
  1505. }
  1506. else if ((taicpu(p).oper[2]^.val and $FF) = $FF) and
  1507. MatchInstruction(p, A_AND, [C_None], [PF_None]) and
  1508. GetNextInstructionUsingReg(p,hp1,taicpu(p).oper[0]^.reg) and
  1509. MatchInstruction(hp1, A_STR, [C_None], [PF_B]) and
  1510. assigned(FindRegDealloc(taicpu(p).oper[0]^.reg,tai(hp1.Next))) and
  1511. { the reference in strb might not use reg2 }
  1512. not(RegInRef(taicpu(p).oper[0]^.reg,taicpu(hp1).oper[1]^.ref^)) and
  1513. { reg1 might not be modified inbetween }
  1514. not(RegModifiedBetween(taicpu(p).oper[1]^.reg,p,hp1)) then
  1515. begin
  1516. DebugMsg('Peephole AndStrb2Strb done', p);
  1517. taicpu(hp1).loadReg(0,taicpu(p).oper[1]^.reg);
  1518. GetNextInstruction(p, hp1);
  1519. asml.remove(p);
  1520. p.free;
  1521. p:=hp1;
  1522. result:=true;
  1523. end
  1524. {
  1525. change
  1526. and reg2,reg1,255
  1527. uxtb/uxth reg3,reg2
  1528. dealloc reg2
  1529. to
  1530. and reg3,reg1,x
  1531. }
  1532. else if (taicpu(p).oper[2]^.val = $FF) and
  1533. MatchInstruction(p, A_AND, [C_None], [PF_None]) and
  1534. GetNextInstructionUsingReg(p,hp1,taicpu(p).oper[0]^.reg) and
  1535. MatchInstruction(hp1, [A_UXTB,A_UXTH], [C_None], [PF_None]) and
  1536. (taicpu(hp1).ops = 2) and
  1537. RegEndofLife(taicpu(p).oper[0]^.reg,taicpu(hp1)) and
  1538. MatchOperand(taicpu(hp1).oper[1]^, taicpu(p).oper[0]^.reg) and
  1539. { reg1 might not be modified inbetween }
  1540. not(RegModifiedBetween(taicpu(p).oper[1]^.reg,p,hp1)) then
  1541. begin
  1542. DebugMsg('Peephole AndUxt2And done', p);
  1543. taicpu(hp1).opcode:=A_AND;
  1544. taicpu(hp1).ops:=3;
  1545. taicpu(hp1).loadReg(1,taicpu(p).oper[1]^.reg);
  1546. taicpu(hp1).loadconst(2,255);
  1547. GetNextInstruction(p,hp1);
  1548. asml.remove(p);
  1549. p.Free;
  1550. p:=hp1;
  1551. result:=true;
  1552. end
  1553. {
  1554. from
  1555. and reg1,reg0,2^n-1
  1556. mov reg2,reg1, lsl imm1
  1557. (mov reg3,reg2, lsr/asr imm1)
  1558. remove either the and or the lsl/xsr sequence if possible
  1559. }
  1560. else if cutils.ispowerof2(taicpu(p).oper[2]^.val+1,i) and
  1561. GetNextInstructionUsingReg(p,hp1,taicpu(p).oper[0]^.reg) and
  1562. MatchInstruction(hp1, A_MOV, [taicpu(p).condition], [PF_None]) and
  1563. (taicpu(hp1).ops=3) and
  1564. MatchOperand(taicpu(hp1).oper[1]^, taicpu(p).oper[0]^.reg) and
  1565. (taicpu(hp1).oper[2]^.typ = top_shifterop) and
  1566. (taicpu(hp1).oper[2]^.shifterop^.rs = NR_NO) and
  1567. (taicpu(hp1).oper[2]^.shifterop^.shiftmode=SM_LSL) and
  1568. RegEndOfLife(taicpu(p).oper[0]^.reg,taicpu(hp1)) then
  1569. begin
  1570. {
  1571. and reg1,reg0,2^n-1
  1572. mov reg2,reg1, lsl imm1
  1573. mov reg3,reg2, lsr/asr imm1
  1574. =>
  1575. and reg1,reg0,2^n-1
  1576. if lsr and 2^n-1>=imm1 or asr and 2^n-1>imm1
  1577. }
  1578. if GetNextInstructionUsingReg(hp1,hp2,taicpu(p).oper[0]^.reg) and
  1579. MatchInstruction(hp2, A_MOV, [taicpu(p).condition], [PF_None]) and
  1580. (taicpu(hp2).ops=3) and
  1581. MatchOperand(taicpu(hp2).oper[1]^, taicpu(hp1).oper[0]^.reg) and
  1582. (taicpu(hp2).oper[2]^.typ = top_shifterop) and
  1583. (taicpu(hp2).oper[2]^.shifterop^.rs = NR_NO) and
  1584. (taicpu(hp2).oper[2]^.shifterop^.shiftmode in [SM_ASR,SM_LSR]) and
  1585. (taicpu(hp1).oper[2]^.shifterop^.shiftimm=taicpu(hp2).oper[2]^.shifterop^.shiftimm) and
  1586. RegEndOfLife(taicpu(hp1).oper[0]^.reg,taicpu(hp2)) and
  1587. ((i<32-taicpu(hp1).oper[2]^.shifterop^.shiftimm) or
  1588. ((i=32-taicpu(hp1).oper[2]^.shifterop^.shiftimm) and
  1589. (taicpu(hp2).oper[2]^.shifterop^.shiftmode=SM_LSR))) then
  1590. begin
  1591. DebugMsg('Peephole AndLslXsr2And done', p);
  1592. taicpu(p).oper[0]^.reg:=taicpu(hp2).oper[0]^.reg;
  1593. asml.Remove(hp1);
  1594. asml.Remove(hp2);
  1595. hp1.free;
  1596. hp2.free;
  1597. result:=true;
  1598. end
  1599. {
  1600. and reg1,reg0,2^n-1
  1601. mov reg2,reg1, lsl imm1
  1602. =>
  1603. mov reg2,reg0, lsl imm1
  1604. if imm1>i
  1605. }
  1606. else if (i>32-taicpu(hp1).oper[2]^.shifterop^.shiftimm) and
  1607. not(RegModifiedBetween(taicpu(p).oper[1]^.reg, p, hp1)) then
  1608. begin
  1609. DebugMsg('Peephole AndLsl2Lsl done', p);
  1610. taicpu(hp1).oper[1]^.reg:=taicpu(p).oper[1]^.reg;
  1611. GetNextInstruction(p, hp1);
  1612. asml.Remove(p);
  1613. p.free;
  1614. p:=hp1;
  1615. result:=true;
  1616. end
  1617. end;
  1618. end;
  1619. {
  1620. change
  1621. add/sub reg2,reg1,const1
  1622. str/ldr reg3,[reg2,const2]
  1623. dealloc reg2
  1624. to
  1625. str/ldr reg3,[reg1,const2+/-const1]
  1626. }
  1627. if (not GenerateThumbCode) and
  1628. (taicpu(p).opcode in [A_ADD,A_SUB]) and
  1629. (taicpu(p).ops>2) and
  1630. (taicpu(p).oper[1]^.typ = top_reg) and
  1631. (taicpu(p).oper[2]^.typ = top_const) then
  1632. begin
  1633. hp1:=p;
  1634. while GetNextInstructionUsingReg(hp1, hp1, taicpu(p).oper[0]^.reg) and
  1635. { we cannot check NR_DEFAULTFLAGS for modification yet so don't allow a condition }
  1636. MatchInstruction(hp1, [A_LDR, A_STR], [C_None], []) and
  1637. (taicpu(hp1).oper[1]^.typ = top_ref) and
  1638. (taicpu(hp1).oper[1]^.ref^.base=taicpu(p).oper[0]^.reg) and
  1639. { don't optimize if the register is stored/overwritten }
  1640. (taicpu(hp1).oper[0]^.reg<>taicpu(p).oper[1]^.reg) and
  1641. (taicpu(hp1).oper[1]^.ref^.index=NR_NO) and
  1642. (taicpu(hp1).oper[1]^.ref^.addressmode=AM_OFFSET) and
  1643. { new offset must be valid: either in the range of 8 or 12 bit, depend on the
  1644. ldr postfix }
  1645. (((taicpu(p).opcode=A_ADD) and
  1646. isValidConstLoadStoreOffset(taicpu(hp1).oper[1]^.ref^.offset+taicpu(p).oper[2]^.val, taicpu(hp1).oppostfix)
  1647. ) or
  1648. ((taicpu(p).opcode=A_SUB) and
  1649. isValidConstLoadStoreOffset(taicpu(hp1).oper[1]^.ref^.offset-taicpu(p).oper[2]^.val, taicpu(hp1).oppostfix)
  1650. )
  1651. ) do
  1652. begin
  1653. { neither reg1 nor reg2 might be changed inbetween }
  1654. if RegModifiedBetween(taicpu(p).oper[0]^.reg,p,hp1) or
  1655. RegModifiedBetween(taicpu(p).oper[1]^.reg,p,hp1) then
  1656. break;
  1657. { reg2 must be either overwritten by the ldr or it is deallocated afterwards }
  1658. if ((taicpu(hp1).opcode=A_LDR) and (taicpu(p).oper[0]^.reg=taicpu(hp1).oper[0]^.reg)) or
  1659. assigned(FindRegDeAlloc(taicpu(p).oper[0]^.reg,tai(hp1.Next))) then
  1660. begin
  1661. { remember last instruction }
  1662. hp2:=hp1;
  1663. DebugMsg('Peephole Add/SubLdr2Ldr done', p);
  1664. hp1:=p;
  1665. { fix all ldr/str }
  1666. while GetNextInstructionUsingReg(hp1, hp1, taicpu(p).oper[0]^.reg) do
  1667. begin
  1668. taicpu(hp1).oper[1]^.ref^.base:=taicpu(p).oper[1]^.reg;
  1669. if taicpu(p).opcode=A_ADD then
  1670. inc(taicpu(hp1).oper[1]^.ref^.offset,taicpu(p).oper[2]^.val)
  1671. else
  1672. dec(taicpu(hp1).oper[1]^.ref^.offset,taicpu(p).oper[2]^.val);
  1673. if hp1=hp2 then
  1674. break;
  1675. end;
  1676. GetNextInstruction(p,hp1);
  1677. asml.remove(p);
  1678. p.free;
  1679. p:=hp1;
  1680. result:=true;
  1681. break;
  1682. end;
  1683. end;
  1684. end;
  1685. {
  1686. change
  1687. add reg1, ...
  1688. mov reg2, reg1
  1689. to
  1690. add reg2, ...
  1691. }
  1692. if GetNextInstructionUsingReg(p, hp1, taicpu(p).oper[0]^.reg) and
  1693. (taicpu(p).ops>=3) and
  1694. RemoveSuperfluousMove(p, hp1, 'DataMov2Data') then
  1695. Result:=true;
  1696. if MatchInstruction(p, [A_ADD,A_SUB], [C_None], [PF_None]) and
  1697. LookForPreindexedPattern(taicpu(p)) then
  1698. begin
  1699. GetNextInstruction(p,hp1);
  1700. DebugMsg('Peephole Add/Sub to Preindexed done', p);
  1701. asml.remove(p);
  1702. p.free;
  1703. p:=hp1;
  1704. Result:=true;
  1705. end;
  1706. {
  1707. Turn
  1708. mul reg0, z,w
  1709. sub/add x, y, reg0
  1710. dealloc reg0
  1711. into
  1712. mls/mla x,z,w,y
  1713. }
  1714. if MatchInstruction(p, [A_MUL], [C_None], [PF_None]) and
  1715. (taicpu(p).ops=3) and
  1716. (taicpu(p).oper[0]^.typ = top_reg) and
  1717. (taicpu(p).oper[1]^.typ = top_reg) and
  1718. (taicpu(p).oper[2]^.typ = top_reg) and
  1719. GetNextInstructionUsingReg(p,hp1,taicpu(p).oper[0]^.reg) and
  1720. MatchInstruction(hp1,[A_ADD,A_SUB],[C_None],[PF_None]) and
  1721. (not RegModifiedBetween(taicpu(p).oper[1]^.reg, p, hp1)) and
  1722. (not RegModifiedBetween(taicpu(p).oper[2]^.reg, p, hp1)) and
  1723. (((taicpu(hp1).opcode=A_ADD) and (current_settings.cputype>=cpu_armv4)) or
  1724. ((taicpu(hp1).opcode=A_SUB) and (current_settings.cputype in [cpu_armv6t2,cpu_armv7,cpu_armv7a,cpu_armv7r,cpu_armv7m,cpu_armv7em]))) and
  1725. // CPUs before ARMv6 don't recommend having the same Rd and Rm for MLA.
  1726. // TODO: A workaround would be to swap Rm and Rs
  1727. (not ((taicpu(hp1).opcode=A_ADD) and (current_settings.cputype<=cpu_armv6) and MatchOperand(taicpu(hp1).oper[0]^, taicpu(p).oper[1]^))) and
  1728. (((taicpu(hp1).ops=3) and
  1729. (taicpu(hp1).oper[2]^.typ=top_reg) and
  1730. ((MatchOperand(taicpu(hp1).oper[2]^, taicpu(p).oper[0]^.reg) and
  1731. (not RegModifiedBetween(taicpu(hp1).oper[1]^.reg, p, hp1))) or
  1732. ((MatchOperand(taicpu(hp1).oper[1]^, taicpu(p).oper[0]^.reg) and
  1733. (taicpu(hp1).opcode=A_ADD) and
  1734. (not RegModifiedBetween(taicpu(hp1).oper[2]^.reg, p, hp1)))))) or
  1735. ((taicpu(hp1).ops=2) and
  1736. (taicpu(hp1).oper[1]^.typ=top_reg) and
  1737. MatchOperand(taicpu(hp1).oper[1]^, taicpu(p).oper[0]^.reg))) and
  1738. (RegEndOfLife(taicpu(p).oper[0]^.reg,taicpu(hp1))) then
  1739. begin
  1740. if taicpu(hp1).opcode=A_ADD then
  1741. begin
  1742. taicpu(hp1).opcode:=A_MLA;
  1743. if taicpu(hp1).ops=3 then
  1744. begin
  1745. if MatchOperand(taicpu(hp1).oper[1]^, taicpu(p).oper[0]^) then
  1746. oldreg:=taicpu(hp1).oper[2]^.reg
  1747. else
  1748. oldreg:=taicpu(hp1).oper[1]^.reg;
  1749. end
  1750. else
  1751. oldreg:=taicpu(hp1).oper[0]^.reg;
  1752. taicpu(hp1).loadreg(1,taicpu(p).oper[1]^.reg);
  1753. taicpu(hp1).loadreg(2,taicpu(p).oper[2]^.reg);
  1754. taicpu(hp1).loadreg(3,oldreg);
  1755. DebugMsg('MulAdd2MLA done', p);
  1756. taicpu(hp1).ops:=4;
  1757. asml.remove(p);
  1758. p.free;
  1759. p:=hp1;
  1760. end
  1761. else
  1762. begin
  1763. taicpu(hp1).opcode:=A_MLS;
  1764. taicpu(hp1).loadreg(3,taicpu(hp1).oper[1]^.reg);
  1765. if taicpu(hp1).ops=2 then
  1766. taicpu(hp1).loadreg(1,taicpu(hp1).oper[0]^.reg)
  1767. else
  1768. taicpu(hp1).loadreg(1,taicpu(p).oper[2]^.reg);
  1769. taicpu(hp1).loadreg(2,taicpu(p).oper[1]^.reg);
  1770. DebugMsg('MulSub2MLS done', p);
  1771. taicpu(hp1).ops:=4;
  1772. asml.remove(p);
  1773. p.free;
  1774. p:=hp1;
  1775. end;
  1776. result:=true;
  1777. end
  1778. end;
  1779. {$ifdef dummy}
  1780. A_MVN:
  1781. begin
  1782. {
  1783. change
  1784. mvn reg2,reg1
  1785. and reg3,reg4,reg2
  1786. dealloc reg2
  1787. to
  1788. bic reg3,reg4,reg1
  1789. }
  1790. if (taicpu(p).oper[1]^.typ = top_reg) and
  1791. GetNextInstructionUsingReg(p,hp1,taicpu(p).oper[0]^.reg) and
  1792. MatchInstruction(hp1,A_AND,[],[]) and
  1793. (((taicpu(hp1).ops=3) and
  1794. (taicpu(hp1).oper[2]^.typ=top_reg) and
  1795. (MatchOperand(taicpu(hp1).oper[2]^, taicpu(p).oper[0]^.reg) or
  1796. MatchOperand(taicpu(hp1).oper[1]^, taicpu(p).oper[0]^.reg))) or
  1797. ((taicpu(hp1).ops=2) and
  1798. (taicpu(hp1).oper[1]^.typ=top_reg) and
  1799. MatchOperand(taicpu(hp1).oper[1]^, taicpu(p).oper[0]^.reg))) and
  1800. assigned(FindRegDealloc(taicpu(p).oper[0]^.reg,tai(hp1.Next))) and
  1801. { reg1 might not be modified inbetween }
  1802. not(RegModifiedBetween(taicpu(p).oper[1]^.reg,p,hp1)) then
  1803. begin
  1804. DebugMsg('Peephole MvnAnd2Bic done', p);
  1805. taicpu(hp1).opcode:=A_BIC;
  1806. if taicpu(hp1).ops=3 then
  1807. begin
  1808. if MatchOperand(taicpu(hp1).oper[1]^, taicpu(p).oper[0]^.reg) then
  1809. taicpu(hp1).loadReg(1,taicpu(hp1).oper[2]^.reg); // Swap operands
  1810. taicpu(hp1).loadReg(2,taicpu(p).oper[1]^.reg);
  1811. end
  1812. else
  1813. taicpu(hp1).loadReg(1,taicpu(p).oper[1]^.reg);
  1814. GetNextInstruction(p, hp1);
  1815. asml.remove(p);
  1816. p.free;
  1817. p:=hp1;
  1818. end;
  1819. end;
  1820. {$endif dummy}
  1821. A_UXTB:
  1822. begin
  1823. {
  1824. change
  1825. uxtb reg2,reg1
  1826. strb reg2,[...]
  1827. dealloc reg2
  1828. to
  1829. strb reg1,[...]
  1830. }
  1831. if MatchInstruction(p, taicpu(p).opcode, [C_None], [PF_None]) and
  1832. GetNextInstructionUsingReg(p,hp1,taicpu(p).oper[0]^.reg) and
  1833. MatchInstruction(hp1, A_STR, [C_None], [PF_B]) and
  1834. assigned(FindRegDealloc(taicpu(p).oper[0]^.reg,tai(hp1.Next))) and
  1835. { the reference in strb might not use reg2 }
  1836. not(RegInRef(taicpu(p).oper[0]^.reg,taicpu(hp1).oper[1]^.ref^)) and
  1837. { reg1 might not be modified inbetween }
  1838. not(RegModifiedBetween(taicpu(p).oper[1]^.reg,p,hp1)) then
  1839. begin
  1840. DebugMsg('Peephole UxtbStrb2Strb done', p);
  1841. taicpu(hp1).loadReg(0,taicpu(p).oper[1]^.reg);
  1842. GetNextInstruction(p,hp2);
  1843. asml.remove(p);
  1844. p.free;
  1845. p:=hp2;
  1846. result:=true;
  1847. end
  1848. {
  1849. change
  1850. uxtb reg2,reg1
  1851. uxth reg3,reg2
  1852. dealloc reg2
  1853. to
  1854. uxtb reg3,reg1
  1855. }
  1856. else if MatchInstruction(p, A_UXTB, [C_None], [PF_None]) and
  1857. GetNextInstructionUsingReg(p,hp1,taicpu(p).oper[0]^.reg) and
  1858. MatchInstruction(hp1, A_UXTH, [C_None], [PF_None]) and
  1859. (taicpu(hp1).ops = 2) and
  1860. MatchOperand(taicpu(hp1).oper[1]^, taicpu(p).oper[0]^.reg) and
  1861. RegEndofLife(taicpu(p).oper[0]^.reg,taicpu(hp1)) and
  1862. { reg1 might not be modified inbetween }
  1863. not(RegModifiedBetween(taicpu(p).oper[1]^.reg,p,hp1)) then
  1864. begin
  1865. DebugMsg('Peephole UxtbUxth2Uxtb done', p);
  1866. taicpu(p).loadReg(0,taicpu(hp1).oper[0]^.reg);
  1867. asml.remove(hp1);
  1868. hp1.free;
  1869. result:=true;
  1870. end
  1871. {
  1872. change
  1873. uxtb reg2,reg1
  1874. uxtb reg3,reg2
  1875. dealloc reg2
  1876. to
  1877. uxtb reg3,reg1
  1878. }
  1879. else if MatchInstruction(p, A_UXTB, [C_None], [PF_None]) and
  1880. GetNextInstructionUsingReg(p,hp1,taicpu(p).oper[0]^.reg) and
  1881. MatchInstruction(hp1, A_UXTB, [C_None], [PF_None]) and
  1882. (taicpu(hp1).ops = 2) and
  1883. MatchOperand(taicpu(hp1).oper[1]^, taicpu(p).oper[0]^.reg) and
  1884. RegEndofLife(taicpu(p).oper[0]^.reg,taicpu(hp1)) and
  1885. { reg1 might not be modified inbetween }
  1886. not(RegModifiedBetween(taicpu(p).oper[1]^.reg,p,hp1)) then
  1887. begin
  1888. DebugMsg('Peephole UxtbUxtb2Uxtb done', p);
  1889. taicpu(p).loadReg(0,taicpu(hp1).oper[0]^.reg);
  1890. asml.remove(hp1);
  1891. hp1.free;
  1892. result:=true;
  1893. end
  1894. {
  1895. change
  1896. uxtb reg2,reg1
  1897. and reg3,reg2,#0x*FF
  1898. dealloc reg2
  1899. to
  1900. uxtb reg3,reg1
  1901. }
  1902. else if MatchInstruction(p, A_UXTB, [C_None], [PF_None]) and
  1903. GetNextInstructionUsingReg(p,hp1,taicpu(p).oper[0]^.reg) and
  1904. (taicpu(p).ops=2) and
  1905. MatchInstruction(hp1, A_AND, [C_None], [PF_None]) and
  1906. (taicpu(hp1).ops=3) and
  1907. (taicpu(hp1).oper[2]^.typ=top_const) and
  1908. ((taicpu(hp1).oper[2]^.val and $FF)=$FF) and
  1909. MatchOperand(taicpu(hp1).oper[1]^, taicpu(p).oper[0]^.reg) and
  1910. RegEndofLife(taicpu(p).oper[0]^.reg,taicpu(hp1)) and
  1911. { reg1 might not be modified inbetween }
  1912. not(RegModifiedBetween(taicpu(p).oper[1]^.reg,p,hp1)) then
  1913. begin
  1914. DebugMsg('Peephole UxtbAndImm2Uxtb done', p);
  1915. taicpu(hp1).opcode:=A_UXTB;
  1916. taicpu(hp1).ops:=2;
  1917. taicpu(hp1).loadReg(1,taicpu(p).oper[1]^.reg);
  1918. GetNextInstruction(p,hp2);
  1919. asml.remove(p);
  1920. p.free;
  1921. p:=hp2;
  1922. result:=true;
  1923. end
  1924. else if GetNextInstructionUsingReg(p, hp1, taicpu(p).oper[0]^.reg) and
  1925. RemoveSuperfluousMove(p, hp1, 'UxtbMov2Data') then
  1926. Result:=true;
  1927. end;
  1928. A_UXTH:
  1929. begin
  1930. {
  1931. change
  1932. uxth reg2,reg1
  1933. strh reg2,[...]
  1934. dealloc reg2
  1935. to
  1936. strh reg1,[...]
  1937. }
  1938. if MatchInstruction(p, taicpu(p).opcode, [C_None], [PF_None]) and
  1939. GetNextInstructionUsingReg(p,hp1,taicpu(p).oper[0]^.reg) and
  1940. MatchInstruction(hp1, A_STR, [C_None], [PF_H]) and
  1941. RegEndofLife(taicpu(p).oper[0]^.reg,taicpu(hp1)) and
  1942. { the reference in strb might not use reg2 }
  1943. not(RegInRef(taicpu(p).oper[0]^.reg,taicpu(hp1).oper[1]^.ref^)) and
  1944. { reg1 might not be modified inbetween }
  1945. not(RegModifiedBetween(taicpu(p).oper[1]^.reg,p,hp1)) then
  1946. begin
  1947. DebugMsg('Peephole UXTHStrh2Strh done', p);
  1948. taicpu(hp1).loadReg(0,taicpu(p).oper[1]^.reg);
  1949. GetNextInstruction(p, hp1);
  1950. asml.remove(p);
  1951. p.free;
  1952. p:=hp1;
  1953. result:=true;
  1954. end
  1955. {
  1956. change
  1957. uxth reg2,reg1
  1958. uxth reg3,reg2
  1959. dealloc reg2
  1960. to
  1961. uxth reg3,reg1
  1962. }
  1963. else if MatchInstruction(p, A_UXTH, [C_None], [PF_None]) and
  1964. GetNextInstructionUsingReg(p,hp1,taicpu(p).oper[0]^.reg) and
  1965. MatchInstruction(hp1, A_UXTH, [C_None], [PF_None]) and
  1966. (taicpu(hp1).ops=2) and
  1967. MatchOperand(taicpu(hp1).oper[1]^, taicpu(p).oper[0]^.reg) and
  1968. RegEndofLife(taicpu(p).oper[0]^.reg,taicpu(hp1)) and
  1969. { reg1 might not be modified inbetween }
  1970. not(RegModifiedBetween(taicpu(p).oper[1]^.reg,p,hp1)) then
  1971. begin
  1972. DebugMsg('Peephole UxthUxth2Uxth done', p);
  1973. taicpu(hp1).opcode:=A_UXTH;
  1974. taicpu(hp1).loadReg(1,taicpu(p).oper[1]^.reg);
  1975. GetNextInstruction(p, hp1);
  1976. asml.remove(p);
  1977. p.free;
  1978. p:=hp1;
  1979. result:=true;
  1980. end
  1981. {
  1982. change
  1983. uxth reg2,reg1
  1984. and reg3,reg2,#65535
  1985. dealloc reg2
  1986. to
  1987. uxth reg3,reg1
  1988. }
  1989. else if MatchInstruction(p, A_UXTH, [C_None], [PF_None]) and
  1990. GetNextInstructionUsingReg(p,hp1,taicpu(p).oper[0]^.reg) and
  1991. MatchInstruction(hp1, A_AND, [C_None], [PF_None]) and
  1992. (taicpu(hp1).ops=3) and
  1993. (taicpu(hp1).oper[2]^.typ=top_const) and
  1994. ((taicpu(hp1).oper[2]^.val and $FFFF)=$FFFF) and
  1995. MatchOperand(taicpu(hp1).oper[1]^, taicpu(p).oper[0]^.reg) and
  1996. RegEndofLife(taicpu(p).oper[0]^.reg,taicpu(hp1)) and
  1997. { reg1 might not be modified inbetween }
  1998. not(RegModifiedBetween(taicpu(p).oper[1]^.reg,p,hp1)) then
  1999. begin
  2000. DebugMsg('Peephole UxthAndImm2Uxth done', p);
  2001. taicpu(hp1).opcode:=A_UXTH;
  2002. taicpu(hp1).ops:=2;
  2003. taicpu(hp1).loadReg(1,taicpu(p).oper[1]^.reg);
  2004. GetNextInstruction(p, hp1);
  2005. asml.remove(p);
  2006. p.free;
  2007. p:=hp1;
  2008. result:=true;
  2009. end
  2010. else if GetNextInstructionUsingReg(p, hp1, taicpu(p).oper[0]^.reg) and
  2011. RemoveSuperfluousMove(p, hp1, 'UxthMov2Data') then
  2012. Result:=true;
  2013. end;
  2014. A_CMP:
  2015. begin
  2016. {
  2017. change
  2018. cmp reg,const1
  2019. moveq reg,const1
  2020. movne reg,const2
  2021. to
  2022. cmp reg,const1
  2023. movne reg,const2
  2024. }
  2025. if (taicpu(p).oper[1]^.typ = top_const) and
  2026. GetNextInstruction(p, hp1) and
  2027. MatchInstruction(hp1, A_MOV, [C_EQ, C_NE], [PF_NONE]) and
  2028. (taicpu(hp1).oper[1]^.typ = top_const) and
  2029. GetNextInstruction(hp1, hp2) and
  2030. MatchInstruction(hp2, A_MOV, [C_EQ, C_NE], [PF_NONE]) and
  2031. (taicpu(hp1).oper[1]^.typ = top_const) then
  2032. begin
  2033. Result:=RemoveRedundantMove(p, hp1, asml) or Result;
  2034. Result:=RemoveRedundantMove(p, hp2, asml) or Result;
  2035. end;
  2036. end;
  2037. A_STM:
  2038. begin
  2039. {
  2040. change
  2041. stmfd r13!,[r14]
  2042. sub r13,r13,#4
  2043. bl abc
  2044. add r13,r13,#4
  2045. ldmfd r13!,[r15]
  2046. into
  2047. b abc
  2048. }
  2049. if not(ts_thumb_interworking in current_settings.targetswitches) and
  2050. MatchInstruction(p, A_STM, [C_None], [PF_FD]) and
  2051. GetNextInstruction(p, hp1) and
  2052. GetNextInstruction(hp1, hp2) and
  2053. SkipEntryExitMarker(hp2, hp2) and
  2054. GetNextInstruction(hp2, hp3) and
  2055. SkipEntryExitMarker(hp3, hp3) and
  2056. GetNextInstruction(hp3, hp4) and
  2057. (taicpu(p).oper[0]^.typ = top_ref) and
  2058. (taicpu(p).oper[0]^.ref^.index=NR_STACK_POINTER_REG) and
  2059. (taicpu(p).oper[0]^.ref^.base=NR_NO) and
  2060. (taicpu(p).oper[0]^.ref^.offset=0) and
  2061. (taicpu(p).oper[0]^.ref^.addressmode=AM_PREINDEXED) and
  2062. (taicpu(p).oper[1]^.typ = top_regset) and
  2063. (taicpu(p).oper[1]^.regset^ = [RS_R14]) and
  2064. MatchInstruction(hp1, A_SUB, [C_None], [PF_NONE]) and
  2065. (taicpu(hp1).oper[0]^.typ = top_reg) and
  2066. (taicpu(hp1).oper[0]^.reg = NR_STACK_POINTER_REG) and
  2067. MatchOperand(taicpu(hp1).oper[0]^,taicpu(hp1).oper[1]^) and
  2068. (taicpu(hp1).oper[2]^.typ = top_const) and
  2069. MatchInstruction(hp3, A_ADD, [C_None], [PF_NONE]) and
  2070. MatchOperand(taicpu(hp1).oper[0]^,taicpu(hp3).oper[0]^) and
  2071. MatchOperand(taicpu(hp1).oper[0]^,taicpu(hp3).oper[1]^) and
  2072. MatchOperand(taicpu(hp1).oper[2]^,taicpu(hp3).oper[2]^) and
  2073. MatchInstruction(hp2, [A_BL,A_BLX], [C_None], [PF_NONE]) and
  2074. (taicpu(hp2).oper[0]^.typ = top_ref) and
  2075. MatchInstruction(hp4, A_LDM, [C_None], [PF_FD]) and
  2076. MatchOperand(taicpu(p).oper[0]^,taicpu(hp4).oper[0]^) and
  2077. (taicpu(hp4).oper[1]^.typ = top_regset) and
  2078. (taicpu(hp4).oper[1]^.regset^ = [RS_R15]) then
  2079. begin
  2080. asml.Remove(p);
  2081. asml.Remove(hp1);
  2082. asml.Remove(hp3);
  2083. asml.Remove(hp4);
  2084. taicpu(hp2).opcode:=A_B;
  2085. p.free;
  2086. hp1.free;
  2087. hp3.free;
  2088. hp4.free;
  2089. p:=hp2;
  2090. DebugMsg('Peephole Bl2B done', p);
  2091. end;
  2092. end;
  2093. A_VADD,
  2094. A_VMUL,
  2095. A_VDIV,
  2096. A_VSUB,
  2097. A_VSQRT,
  2098. A_VNEG,
  2099. A_VCVT,
  2100. A_VABS:
  2101. begin
  2102. if GetNextInstructionUsingReg(p, hp1, taicpu(p).oper[0]^.reg) and
  2103. RemoveSuperfluousVMov(p, hp1, 'VOpVMov2VOp') then
  2104. Result:=true;
  2105. end
  2106. end;
  2107. end;
  2108. end;
  2109. end;
  2110. { instructions modifying the CPSR can be only the last instruction }
  2111. function MustBeLast(p : tai) : boolean;
  2112. begin
  2113. Result:=(p.typ=ait_instruction) and
  2114. ((taicpu(p).opcode in [A_BL,A_BLX,A_CMP,A_CMN,A_SWI,A_TEQ,A_TST,A_CMF,A_CMFE {,A_MSR}]) or
  2115. ((taicpu(p).ops>=1) and (taicpu(p).oper[0]^.typ=top_reg) and (taicpu(p).oper[0]^.reg=NR_PC)) or
  2116. (taicpu(p).oppostfix=PF_S));
  2117. end;
  2118. procedure TCpuAsmOptimizer.PeepHoleOptPass2;
  2119. var
  2120. p,hp1,hp2: tai;
  2121. l : longint;
  2122. condition : tasmcond;
  2123. hp3: tai;
  2124. WasLast: boolean;
  2125. { UsedRegs, TmpUsedRegs: TRegSet; }
  2126. begin
  2127. p := BlockStart;
  2128. { UsedRegs := []; }
  2129. while (p <> BlockEnd) Do
  2130. begin
  2131. { UpdateUsedRegs(UsedRegs, tai(p.next)); }
  2132. case p.Typ Of
  2133. Ait_Instruction:
  2134. begin
  2135. case taicpu(p).opcode Of
  2136. A_B:
  2137. if (taicpu(p).condition<>C_None) and
  2138. not(GenerateThumbCode) then
  2139. begin
  2140. { check for
  2141. Bxx xxx
  2142. <several instructions>
  2143. xxx:
  2144. }
  2145. l:=0;
  2146. WasLast:=False;
  2147. GetNextInstruction(p, hp1);
  2148. while assigned(hp1) and
  2149. (l<=4) and
  2150. CanBeCond(hp1) and
  2151. { stop on labels }
  2152. not(hp1.typ=ait_label) do
  2153. begin
  2154. inc(l);
  2155. if MustBeLast(hp1) then
  2156. begin
  2157. WasLast:=True;
  2158. GetNextInstruction(hp1,hp1);
  2159. break;
  2160. end
  2161. else
  2162. GetNextInstruction(hp1,hp1);
  2163. end;
  2164. if assigned(hp1) then
  2165. begin
  2166. if FindLabel(tasmlabel(taicpu(p).oper[0]^.ref^.symbol),hp1) then
  2167. begin
  2168. if (l<=4) and (l>0) then
  2169. begin
  2170. condition:=inverse_cond(taicpu(p).condition);
  2171. hp2:=p;
  2172. GetNextInstruction(p,hp1);
  2173. p:=hp1;
  2174. repeat
  2175. if hp1.typ=ait_instruction then
  2176. taicpu(hp1).condition:=condition;
  2177. if MustBeLast(hp1) then
  2178. begin
  2179. GetNextInstruction(hp1,hp1);
  2180. break;
  2181. end
  2182. else
  2183. GetNextInstruction(hp1,hp1);
  2184. until not(assigned(hp1)) or
  2185. not(CanBeCond(hp1)) or
  2186. (hp1.typ=ait_label);
  2187. { wait with removing else GetNextInstruction could
  2188. ignore the label if it was the only usage in the
  2189. jump moved away }
  2190. tasmlabel(taicpu(hp2).oper[0]^.ref^.symbol).decrefs;
  2191. asml.remove(hp2);
  2192. hp2.free;
  2193. continue;
  2194. end;
  2195. end
  2196. else
  2197. { do not perform further optimizations if there is inctructon
  2198. in block #1 which can not be optimized.
  2199. }
  2200. if not WasLast then
  2201. begin
  2202. { check further for
  2203. Bcc xxx
  2204. <several instructions 1>
  2205. B yyy
  2206. xxx:
  2207. <several instructions 2>
  2208. yyy:
  2209. }
  2210. { hp2 points to jmp yyy }
  2211. hp2:=hp1;
  2212. { skip hp1 to xxx }
  2213. GetNextInstruction(hp1, hp1);
  2214. if assigned(hp2) and
  2215. assigned(hp1) and
  2216. (l<=3) and
  2217. (hp2.typ=ait_instruction) and
  2218. (taicpu(hp2).is_jmp) and
  2219. (taicpu(hp2).condition=C_None) and
  2220. { real label and jump, no further references to the
  2221. label are allowed }
  2222. (tasmlabel(taicpu(p).oper[0]^.ref^.symbol).getrefs=2) and
  2223. FindLabel(tasmlabel(taicpu(p).oper[0]^.ref^.symbol),hp1) then
  2224. begin
  2225. l:=0;
  2226. { skip hp1 to <several moves 2> }
  2227. GetNextInstruction(hp1, hp1);
  2228. while assigned(hp1) and
  2229. CanBeCond(hp1) do
  2230. begin
  2231. inc(l);
  2232. GetNextInstruction(hp1, hp1);
  2233. end;
  2234. { hp1 points to yyy: }
  2235. if assigned(hp1) and
  2236. FindLabel(tasmlabel(taicpu(hp2).oper[0]^.ref^.symbol),hp1) then
  2237. begin
  2238. condition:=inverse_cond(taicpu(p).condition);
  2239. GetNextInstruction(p,hp1);
  2240. hp3:=p;
  2241. p:=hp1;
  2242. repeat
  2243. if hp1.typ=ait_instruction then
  2244. taicpu(hp1).condition:=condition;
  2245. GetNextInstruction(hp1,hp1);
  2246. until not(assigned(hp1)) or
  2247. not(CanBeCond(hp1));
  2248. { hp2 is still at jmp yyy }
  2249. GetNextInstruction(hp2,hp1);
  2250. { hp2 is now at xxx: }
  2251. condition:=inverse_cond(condition);
  2252. GetNextInstruction(hp1,hp1);
  2253. { hp1 is now at <several movs 2> }
  2254. repeat
  2255. taicpu(hp1).condition:=condition;
  2256. GetNextInstruction(hp1,hp1);
  2257. until not(assigned(hp1)) or
  2258. not(CanBeCond(hp1)) or
  2259. (hp1.typ=ait_label);
  2260. {
  2261. asml.remove(hp1.next)
  2262. hp1.next.free;
  2263. asml.remove(hp1);
  2264. hp1.free;
  2265. }
  2266. { remove Bcc }
  2267. tasmlabel(taicpu(hp3).oper[0]^.ref^.symbol).decrefs;
  2268. asml.remove(hp3);
  2269. hp3.free;
  2270. { remove jmp }
  2271. tasmlabel(taicpu(hp2).oper[0]^.ref^.symbol).decrefs;
  2272. asml.remove(hp2);
  2273. hp2.free;
  2274. continue;
  2275. end;
  2276. end;
  2277. end;
  2278. end;
  2279. end;
  2280. end;
  2281. end;
  2282. end;
  2283. p := tai(p.next)
  2284. end;
  2285. end;
  2286. function TCpuAsmOptimizer.RegInInstruction(Reg: TRegister; p1: tai): Boolean;
  2287. begin
  2288. If (p1.typ = ait_instruction) and (taicpu(p1).opcode=A_BL) then
  2289. Result:=true
  2290. else If MatchInstruction(p1, [A_LDR, A_STR], [], [PF_D]) and
  2291. (getsupreg(taicpu(p1).oper[0]^.reg)+1=getsupreg(reg)) then
  2292. Result:=true
  2293. else
  2294. Result:=inherited RegInInstruction(Reg, p1);
  2295. end;
  2296. const
  2297. { set of opcode which might or do write to memory }
  2298. { TODO : extend armins.dat to contain r/w info }
  2299. opcode_could_mem_write = [A_B,A_BL,A_BLX,A_BKPT,A_BX,A_STR,A_STRB,A_STRBT,
  2300. A_STRH,A_STRT,A_STF,A_SFM,A_STM,A_FSTS,A_FSTD,A_VSTR,A_VSTM];
  2301. { adjust the register live information when swapping the two instructions p and hp1,
  2302. they must follow one after the other }
  2303. procedure TCpuPreRegallocScheduler.SwapRegLive(p,hp1 : taicpu);
  2304. procedure CheckLiveEnd(reg : tregister);
  2305. var
  2306. supreg : TSuperRegister;
  2307. regtype : TRegisterType;
  2308. begin
  2309. if reg=NR_NO then
  2310. exit;
  2311. regtype:=getregtype(reg);
  2312. supreg:=getsupreg(reg);
  2313. if (cg.rg[regtype].live_end[supreg]=hp1) and
  2314. RegInInstruction(reg,p) then
  2315. cg.rg[regtype].live_end[supreg]:=p;
  2316. end;
  2317. procedure CheckLiveStart(reg : TRegister);
  2318. var
  2319. supreg : TSuperRegister;
  2320. regtype : TRegisterType;
  2321. begin
  2322. if reg=NR_NO then
  2323. exit;
  2324. regtype:=getregtype(reg);
  2325. supreg:=getsupreg(reg);
  2326. if (cg.rg[regtype].live_start[supreg]=p) and
  2327. RegInInstruction(reg,hp1) then
  2328. cg.rg[regtype].live_start[supreg]:=hp1;
  2329. end;
  2330. var
  2331. i : longint;
  2332. r : TSuperRegister;
  2333. begin
  2334. { assumption: p is directly followed by hp1 }
  2335. { if live of any reg used by p starts at p and hp1 uses this register then
  2336. set live start to hp1 }
  2337. for i:=0 to p.ops-1 do
  2338. case p.oper[i]^.typ of
  2339. Top_Reg:
  2340. CheckLiveStart(p.oper[i]^.reg);
  2341. Top_Ref:
  2342. begin
  2343. CheckLiveStart(p.oper[i]^.ref^.base);
  2344. CheckLiveStart(p.oper[i]^.ref^.index);
  2345. end;
  2346. Top_Shifterop:
  2347. CheckLiveStart(p.oper[i]^.shifterop^.rs);
  2348. Top_RegSet:
  2349. for r:=RS_R0 to RS_R15 do
  2350. if r in p.oper[i]^.regset^ then
  2351. CheckLiveStart(newreg(R_INTREGISTER,r,R_SUBWHOLE));
  2352. end;
  2353. { if live of any reg used by hp1 ends at hp1 and p uses this register then
  2354. set live end to p }
  2355. for i:=0 to hp1.ops-1 do
  2356. case hp1.oper[i]^.typ of
  2357. Top_Reg:
  2358. CheckLiveEnd(hp1.oper[i]^.reg);
  2359. Top_Ref:
  2360. begin
  2361. CheckLiveEnd(hp1.oper[i]^.ref^.base);
  2362. CheckLiveEnd(hp1.oper[i]^.ref^.index);
  2363. end;
  2364. Top_Shifterop:
  2365. CheckLiveStart(hp1.oper[i]^.shifterop^.rs);
  2366. Top_RegSet:
  2367. for r:=RS_R0 to RS_R15 do
  2368. if r in hp1.oper[i]^.regset^ then
  2369. CheckLiveEnd(newreg(R_INTREGISTER,r,R_SUBWHOLE));
  2370. end;
  2371. end;
  2372. function TCpuPreRegallocScheduler.SchedulerPass1Cpu(var p: tai): boolean;
  2373. { TODO : schedule also forward }
  2374. { TODO : schedule distance > 1 }
  2375. { returns true if p might be a load of a pc relative tls offset }
  2376. function PossibleTLSLoad(const p: tai) : boolean;
  2377. begin
  2378. Result:=(p.typ=ait_instruction) and (taicpu(p).opcode=A_LDR) and (taicpu(p).oper[1]^.typ=top_ref) and (((taicpu(p).oper[1]^.ref^.base=NR_PC) and
  2379. (taicpu(p).oper[1]^.ref^.index<>NR_NO)) or ((taicpu(p).oper[1]^.ref^.base<>NR_NO) and
  2380. (taicpu(p).oper[1]^.ref^.index=NR_PC)));
  2381. end;
  2382. var
  2383. hp1,hp2,hp3,hp4,hp5,insertpos : tai;
  2384. list : TAsmList;
  2385. begin
  2386. result:=true;
  2387. list:=TAsmList.create;
  2388. p:=BlockStart;
  2389. while p<>BlockEnd Do
  2390. begin
  2391. if (p.typ=ait_instruction) and
  2392. GetNextInstruction(p,hp1) and
  2393. (hp1.typ=ait_instruction) and
  2394. (taicpu(hp1).opcode in [A_LDR,A_LDRB,A_LDRH,A_LDRSB,A_LDRSH]) and
  2395. (taicpu(hp1).oppostfix in [PF_NONE, PF_B, PF_H, PF_SB, PF_SH]) and
  2396. { for now we don't reschedule if the previous instruction changes potentially a memory location }
  2397. ( (not(taicpu(p).opcode in opcode_could_mem_write) and
  2398. not(RegModifiedByInstruction(NR_PC,p))
  2399. ) or
  2400. ((taicpu(p).opcode in [A_STM,A_STRB,A_STRH,A_STR]) and
  2401. ((taicpu(hp1).oper[1]^.ref^.base=NR_PC) or
  2402. (assigned(taicpu(hp1).oper[1]^.ref^.symboldata) and
  2403. (taicpu(hp1).oper[1]^.ref^.offset=0)
  2404. )
  2405. ) or
  2406. { try to prove that the memory accesses don't overlapp }
  2407. ((taicpu(p).opcode in [A_STRB,A_STRH,A_STR]) and
  2408. (taicpu(p).oper[1]^.typ = top_ref) and
  2409. (taicpu(p).oper[1]^.ref^.base=taicpu(hp1).oper[1]^.ref^.base) and
  2410. (taicpu(p).oppostfix=PF_None) and
  2411. (taicpu(hp1).oppostfix=PF_None) and
  2412. (taicpu(p).oper[1]^.ref^.index=NR_NO) and
  2413. (taicpu(hp1).oper[1]^.ref^.index=NR_NO) and
  2414. { get operand sizes and check if the offset distance is large enough to ensure no overlapp }
  2415. (abs(taicpu(p).oper[1]^.ref^.offset-taicpu(hp1).oper[1]^.ref^.offset)>=max(tcgsize2size[reg_cgsize(taicpu(p).oper[0]^.reg)],tcgsize2size[reg_cgsize(taicpu(hp1).oper[0]^.reg)]))
  2416. )
  2417. )
  2418. ) and
  2419. GetNextInstruction(hp1,hp2) and
  2420. (hp2.typ=ait_instruction) and
  2421. { loaded register used by next instruction? }
  2422. (RegInInstruction(taicpu(hp1).oper[0]^.reg,hp2)) and
  2423. { loaded register not used by previous instruction? }
  2424. not(RegInInstruction(taicpu(hp1).oper[0]^.reg,p)) and
  2425. { same condition? }
  2426. (taicpu(p).condition=taicpu(hp1).condition) and
  2427. { first instruction might not change the register used as base }
  2428. ((taicpu(hp1).oper[1]^.ref^.base=NR_NO) or
  2429. not(RegModifiedByInstruction(taicpu(hp1).oper[1]^.ref^.base,p))
  2430. ) and
  2431. { first instruction might not change the register used as index }
  2432. ((taicpu(hp1).oper[1]^.ref^.index=NR_NO) or
  2433. not(RegModifiedByInstruction(taicpu(hp1).oper[1]^.ref^.index,p))
  2434. ) and
  2435. { if we modify the basereg AND the first instruction used that reg, we can not schedule }
  2436. ((taicpu(hp1).oper[1]^.ref^.addressmode = AM_OFFSET) or
  2437. not(instructionLoadsFromReg(taicpu(hp1).oper[1]^.ref^.base,p))) and
  2438. not(PossibleTLSLoad(p)) and
  2439. not(PossibleTLSLoad(hp1)) then
  2440. begin
  2441. hp3:=tai(p.Previous);
  2442. hp5:=tai(p.next);
  2443. asml.Remove(p);
  2444. { if there is a reg. alloc/dealloc/sync instructions or address labels (e.g. for GOT-less PIC)
  2445. associated with p, move it together with p }
  2446. { before the instruction? }
  2447. { find reg allocs,deallocs and PIC labels }
  2448. while assigned(hp3) and (hp3.typ<>ait_instruction) do
  2449. begin
  2450. if ( (hp3.typ=ait_regalloc) and (tai_regalloc(hp3).ratype in [ra_alloc, ra_dealloc]) and
  2451. RegInInstruction(tai_regalloc(hp3).reg,p) )
  2452. or ( (hp3.typ=ait_label) and (tai_label(hp3).labsym.typ=AT_ADDR) )
  2453. then
  2454. begin
  2455. hp4:=hp3;
  2456. hp3:=tai(hp3.Previous);
  2457. asml.Remove(hp4);
  2458. list.Insert(hp4);
  2459. end
  2460. else
  2461. hp3:=tai(hp3.Previous);
  2462. end;
  2463. list.Concat(p);
  2464. SwapRegLive(taicpu(p),taicpu(hp1));
  2465. { after the instruction? }
  2466. { find reg deallocs and reg syncs }
  2467. while assigned(hp5) and (hp5.typ<>ait_instruction) do
  2468. begin
  2469. if (hp5.typ=ait_regalloc) and (tai_regalloc(hp5).ratype in [ra_dealloc, ra_sync]) and
  2470. RegInInstruction(tai_regalloc(hp5).reg,p) then
  2471. begin
  2472. hp4:=hp5;
  2473. hp5:=tai(hp5.next);
  2474. asml.Remove(hp4);
  2475. list.Concat(hp4);
  2476. end
  2477. else
  2478. hp5:=tai(hp5.Next);
  2479. end;
  2480. asml.Remove(hp1);
  2481. { if there are address labels associated with hp2, those must
  2482. stay with hp2 (e.g. for GOT-less PIC) }
  2483. insertpos:=hp2;
  2484. while assigned(hp2.previous) and
  2485. (tai(hp2.previous).typ<>ait_instruction) do
  2486. begin
  2487. hp2:=tai(hp2.previous);
  2488. if (hp2.typ=ait_label) and
  2489. (tai_label(hp2).labsym.typ=AT_ADDR) then
  2490. insertpos:=hp2;
  2491. end;
  2492. {$ifdef DEBUG_PREREGSCHEDULER}
  2493. asml.insertbefore(tai_comment.Create(strpnew('Rescheduled')),insertpos);
  2494. {$endif DEBUG_PREREGSCHEDULER}
  2495. asml.InsertBefore(hp1,insertpos);
  2496. asml.InsertListBefore(insertpos,list);
  2497. p:=tai(p.next);
  2498. end
  2499. else if p.typ=ait_instruction then
  2500. p:=hp1
  2501. else
  2502. p:=tai(p.next);
  2503. end;
  2504. list.Free;
  2505. end;
  2506. procedure DecrementPreceedingIT(list: TAsmList; p: tai);
  2507. var
  2508. hp : tai;
  2509. l : longint;
  2510. begin
  2511. hp := tai(p.Previous);
  2512. l := 1;
  2513. while assigned(hp) and
  2514. (l <= 4) do
  2515. begin
  2516. if hp.typ=ait_instruction then
  2517. begin
  2518. if (taicpu(hp).opcode>=A_IT) and
  2519. (taicpu(hp).opcode <= A_ITTTT) then
  2520. begin
  2521. if (taicpu(hp).opcode = A_IT) and
  2522. (l=1) then
  2523. list.Remove(hp)
  2524. else
  2525. case taicpu(hp).opcode of
  2526. A_ITE:
  2527. if l=2 then taicpu(hp).opcode := A_IT;
  2528. A_ITT:
  2529. if l=2 then taicpu(hp).opcode := A_IT;
  2530. A_ITEE:
  2531. if l=3 then taicpu(hp).opcode := A_ITE;
  2532. A_ITTE:
  2533. if l=3 then taicpu(hp).opcode := A_ITT;
  2534. A_ITET:
  2535. if l=3 then taicpu(hp).opcode := A_ITE;
  2536. A_ITTT:
  2537. if l=3 then taicpu(hp).opcode := A_ITT;
  2538. A_ITEEE:
  2539. if l=4 then taicpu(hp).opcode := A_ITEE;
  2540. A_ITTEE:
  2541. if l=4 then taicpu(hp).opcode := A_ITTE;
  2542. A_ITETE:
  2543. if l=4 then taicpu(hp).opcode := A_ITET;
  2544. A_ITTTE:
  2545. if l=4 then taicpu(hp).opcode := A_ITTT;
  2546. A_ITEET:
  2547. if l=4 then taicpu(hp).opcode := A_ITEE;
  2548. A_ITTET:
  2549. if l=4 then taicpu(hp).opcode := A_ITTE;
  2550. A_ITETT:
  2551. if l=4 then taicpu(hp).opcode := A_ITET;
  2552. A_ITTTT:
  2553. if l=4 then taicpu(hp).opcode := A_ITTT;
  2554. end;
  2555. break;
  2556. end;
  2557. {else if (taicpu(hp).condition<>taicpu(p).condition) or
  2558. (taicpu(hp).condition<>inverse_cond(taicpu(p).condition)) then
  2559. break;}
  2560. inc(l);
  2561. end;
  2562. hp := tai(hp.Previous);
  2563. end;
  2564. end;
  2565. function TCpuThumb2AsmOptimizer.PeepHoleOptPass1Cpu(var p: tai): boolean;
  2566. var
  2567. hp : taicpu;
  2568. //hp1,hp2 : tai;
  2569. begin
  2570. result:=false;
  2571. if inherited PeepHoleOptPass1Cpu(p) then
  2572. result:=true
  2573. else if (p.typ=ait_instruction) and
  2574. MatchInstruction(p, A_STM, [C_None], [PF_FD,PF_DB]) and
  2575. (taicpu(p).oper[0]^.ref^.addressmode=AM_PREINDEXED) and
  2576. (taicpu(p).oper[0]^.ref^.index=NR_STACK_POINTER_REG) and
  2577. ((taicpu(p).oper[1]^.regset^*[8..13,15])=[]) then
  2578. begin
  2579. DebugMsg('Peephole Stm2Push done', p);
  2580. hp := taicpu.op_regset(A_PUSH, R_INTREGISTER, R_SUBWHOLE, taicpu(p).oper[1]^.regset^);
  2581. AsmL.InsertAfter(hp, p);
  2582. asml.Remove(p);
  2583. p:=hp;
  2584. result:=true;
  2585. end
  2586. {else if (p.typ=ait_instruction) and
  2587. MatchInstruction(p, A_STR, [C_None], [PF_None]) and
  2588. (taicpu(p).oper[1]^.ref^.addressmode=AM_PREINDEXED) and
  2589. (taicpu(p).oper[1]^.ref^.index=NR_STACK_POINTER_REG) and
  2590. (taicpu(p).oper[1]^.ref^.offset=-4) and
  2591. (getsupreg(taicpu(p).oper[0]^.reg) in [0..7,14]) then
  2592. begin
  2593. DebugMsg('Peephole Str2Push done', p);
  2594. hp := taicpu.op_regset(A_PUSH, R_INTREGISTER, R_SUBWHOLE, [getsupreg(taicpu(p).oper[0]^.reg)]);
  2595. asml.InsertAfter(hp, p);
  2596. asml.Remove(p);
  2597. p.Free;
  2598. p:=hp;
  2599. result:=true;
  2600. end}
  2601. else if (p.typ=ait_instruction) and
  2602. MatchInstruction(p, A_LDM, [C_None], [PF_FD,PF_IA]) and
  2603. (taicpu(p).oper[0]^.ref^.addressmode=AM_PREINDEXED) and
  2604. (taicpu(p).oper[0]^.ref^.index=NR_STACK_POINTER_REG) and
  2605. ((taicpu(p).oper[1]^.regset^*[8..14])=[]) then
  2606. begin
  2607. DebugMsg('Peephole Ldm2Pop done', p);
  2608. hp := taicpu.op_regset(A_POP, R_INTREGISTER, R_SUBWHOLE, taicpu(p).oper[1]^.regset^);
  2609. asml.InsertBefore(hp, p);
  2610. asml.Remove(p);
  2611. p.Free;
  2612. p:=hp;
  2613. result:=true;
  2614. end
  2615. {else if (p.typ=ait_instruction) and
  2616. MatchInstruction(p, A_LDR, [C_None], [PF_None]) and
  2617. (taicpu(p).oper[1]^.ref^.addressmode=AM_POSTINDEXED) and
  2618. (taicpu(p).oper[1]^.ref^.index=NR_STACK_POINTER_REG) and
  2619. (taicpu(p).oper[1]^.ref^.offset=4) and
  2620. (getsupreg(taicpu(p).oper[0]^.reg) in [0..7,15]) then
  2621. begin
  2622. DebugMsg('Peephole Ldr2Pop done', p);
  2623. hp := taicpu.op_regset(A_POP, R_INTREGISTER, R_SUBWHOLE, [getsupreg(taicpu(p).oper[0]^.reg)]);
  2624. asml.InsertBefore(hp, p);
  2625. asml.Remove(p);
  2626. p.Free;
  2627. p:=hp;
  2628. result:=true;
  2629. end}
  2630. else if (p.typ=ait_instruction) and
  2631. MatchInstruction(p, [A_AND], [], [PF_None]) and
  2632. (taicpu(p).ops = 2) and
  2633. (taicpu(p).oper[1]^.typ=top_const) and
  2634. ((taicpu(p).oper[1]^.val=255) or
  2635. (taicpu(p).oper[1]^.val=65535)) then
  2636. begin
  2637. DebugMsg('Peephole AndR2Uxt done', p);
  2638. if taicpu(p).oper[1]^.val=255 then
  2639. taicpu(p).opcode:=A_UXTB
  2640. else
  2641. taicpu(p).opcode:=A_UXTH;
  2642. taicpu(p).loadreg(1, taicpu(p).oper[0]^.reg);
  2643. result := true;
  2644. end
  2645. else if (p.typ=ait_instruction) and
  2646. MatchInstruction(p, [A_AND], [], [PF_None]) and
  2647. (taicpu(p).ops = 3) and
  2648. (taicpu(p).oper[2]^.typ=top_const) and
  2649. ((taicpu(p).oper[2]^.val=255) or
  2650. (taicpu(p).oper[2]^.val=65535)) then
  2651. begin
  2652. DebugMsg('Peephole AndRR2Uxt done', p);
  2653. if taicpu(p).oper[2]^.val=255 then
  2654. taicpu(p).opcode:=A_UXTB
  2655. else
  2656. taicpu(p).opcode:=A_UXTH;
  2657. taicpu(p).ops:=2;
  2658. result := true;
  2659. end
  2660. {else if (p.typ=ait_instruction) and
  2661. MatchInstruction(p, [A_CMP], [C_None], [PF_None]) and
  2662. (taicpu(p).oper[1]^.typ=top_const) and
  2663. (taicpu(p).oper[1]^.val=0) and
  2664. GetNextInstruction(p,hp1) and
  2665. (taicpu(hp1).opcode=A_B) and
  2666. (taicpu(hp1).condition in [C_EQ,C_NE]) then
  2667. begin
  2668. if taicpu(hp1).condition = C_EQ then
  2669. hp2:=taicpu.op_reg_ref(A_CBZ, taicpu(p).oper[0]^.reg, taicpu(hp1).oper[0]^.ref^)
  2670. else
  2671. hp2:=taicpu.op_reg_ref(A_CBNZ, taicpu(p).oper[0]^.reg, taicpu(hp1).oper[0]^.ref^);
  2672. taicpu(hp2).is_jmp := true;
  2673. asml.InsertAfter(hp2, hp1);
  2674. asml.Remove(hp1);
  2675. hp1.Free;
  2676. asml.Remove(p);
  2677. p.Free;
  2678. p := hp2;
  2679. result := true;
  2680. end}
  2681. end;
  2682. procedure TCpuThumb2AsmOptimizer.PeepHoleOptPass2;
  2683. var
  2684. p,hp1,hp2: tai;
  2685. l : longint;
  2686. condition : tasmcond;
  2687. { UsedRegs, TmpUsedRegs: TRegSet; }
  2688. begin
  2689. p := BlockStart;
  2690. { UsedRegs := []; }
  2691. while (p <> BlockEnd) Do
  2692. begin
  2693. { UpdateUsedRegs(UsedRegs, tai(p.next)); }
  2694. case p.Typ Of
  2695. Ait_Instruction:
  2696. begin
  2697. case taicpu(p).opcode Of
  2698. A_B:
  2699. if taicpu(p).condition<>C_None then
  2700. begin
  2701. { check for
  2702. Bxx xxx
  2703. <several instructions>
  2704. xxx:
  2705. }
  2706. l:=0;
  2707. GetNextInstruction(p, hp1);
  2708. while assigned(hp1) and
  2709. (l<=4) and
  2710. CanBeCond(hp1) and
  2711. { stop on labels }
  2712. not(hp1.typ=ait_label) do
  2713. begin
  2714. inc(l);
  2715. if MustBeLast(hp1) then
  2716. begin
  2717. //hp1:=nil;
  2718. GetNextInstruction(hp1,hp1);
  2719. break;
  2720. end
  2721. else
  2722. GetNextInstruction(hp1,hp1);
  2723. end;
  2724. if assigned(hp1) then
  2725. begin
  2726. if FindLabel(tasmlabel(taicpu(p).oper[0]^.ref^.symbol),hp1) then
  2727. begin
  2728. if (l<=4) and (l>0) then
  2729. begin
  2730. condition:=inverse_cond(taicpu(p).condition);
  2731. hp2:=p;
  2732. GetNextInstruction(p,hp1);
  2733. p:=hp1;
  2734. repeat
  2735. if hp1.typ=ait_instruction then
  2736. taicpu(hp1).condition:=condition;
  2737. if MustBeLast(hp1) then
  2738. begin
  2739. GetNextInstruction(hp1,hp1);
  2740. break;
  2741. end
  2742. else
  2743. GetNextInstruction(hp1,hp1);
  2744. until not(assigned(hp1)) or
  2745. not(CanBeCond(hp1)) or
  2746. (hp1.typ=ait_label);
  2747. { wait with removing else GetNextInstruction could
  2748. ignore the label if it was the only usage in the
  2749. jump moved away }
  2750. asml.InsertAfter(tai_comment.create(strpnew('Collapsed')), hp2);
  2751. DecrementPreceedingIT(asml, hp2);
  2752. case l of
  2753. 1: asml.InsertAfter(taicpu.op_cond(A_IT,condition), hp2);
  2754. 2: asml.InsertAfter(taicpu.op_cond(A_ITT,condition), hp2);
  2755. 3: asml.InsertAfter(taicpu.op_cond(A_ITTT,condition), hp2);
  2756. 4: asml.InsertAfter(taicpu.op_cond(A_ITTTT,condition), hp2);
  2757. end;
  2758. tasmlabel(taicpu(hp2).oper[0]^.ref^.symbol).decrefs;
  2759. asml.remove(hp2);
  2760. hp2.free;
  2761. continue;
  2762. end;
  2763. end;
  2764. end;
  2765. end;
  2766. end;
  2767. end;
  2768. end;
  2769. p := tai(p.next)
  2770. end;
  2771. end;
  2772. function TCpuThumb2AsmOptimizer.PostPeepHoleOptsCpu(var p: tai): boolean;
  2773. begin
  2774. result:=false;
  2775. if p.typ = ait_instruction then
  2776. begin
  2777. if MatchInstruction(p, A_MOV, [C_None], [PF_None]) and
  2778. (taicpu(p).oper[1]^.typ=top_const) and
  2779. (taicpu(p).oper[1]^.val >= 0) and
  2780. (taicpu(p).oper[1]^.val < 256) and
  2781. (not RegInUsedRegs(NR_DEFAULTFLAGS,UsedRegs)) then
  2782. begin
  2783. DebugMsg('Peephole Mov2Movs done', p);
  2784. asml.InsertBefore(tai_regalloc.alloc(NR_DEFAULTFLAGS,p), p);
  2785. asml.InsertAfter(tai_regalloc.dealloc(NR_DEFAULTFLAGS,p), p);
  2786. IncludeRegInUsedRegs(NR_DEFAULTFLAGS,UsedRegs);
  2787. taicpu(p).oppostfix:=PF_S;
  2788. result:=true;
  2789. end
  2790. else if MatchInstruction(p, A_MVN, [C_None], [PF_None]) and
  2791. (taicpu(p).oper[1]^.typ=top_reg) and
  2792. (not RegInUsedRegs(NR_DEFAULTFLAGS,UsedRegs)) then
  2793. begin
  2794. DebugMsg('Peephole Mvn2Mvns done', p);
  2795. asml.InsertBefore(tai_regalloc.alloc(NR_DEFAULTFLAGS,p), p);
  2796. asml.InsertAfter(tai_regalloc.dealloc(NR_DEFAULTFLAGS,p), p);
  2797. IncludeRegInUsedRegs(NR_DEFAULTFLAGS,UsedRegs);
  2798. taicpu(p).oppostfix:=PF_S;
  2799. result:=true;
  2800. end
  2801. else if MatchInstruction(p, A_RSB, [C_None], [PF_None]) and
  2802. (taicpu(p).ops = 3) and
  2803. (taicpu(p).oper[2]^.typ=top_const) and
  2804. (taicpu(p).oper[2]^.val=0) and
  2805. (not RegInUsedRegs(NR_DEFAULTFLAGS,UsedRegs)) then
  2806. begin
  2807. DebugMsg('Peephole Rsb2Rsbs done', p);
  2808. asml.InsertBefore(tai_regalloc.alloc(NR_DEFAULTFLAGS,p), p);
  2809. asml.InsertAfter(tai_regalloc.dealloc(NR_DEFAULTFLAGS,p), p);
  2810. IncludeRegInUsedRegs(NR_DEFAULTFLAGS,UsedRegs);
  2811. taicpu(p).oppostfix:=PF_S;
  2812. result:=true;
  2813. end
  2814. else if MatchInstruction(p, [A_ADD,A_SUB], [C_None], [PF_None]) and
  2815. (taicpu(p).ops = 3) and
  2816. MatchOperand(taicpu(p).oper[0]^, taicpu(p).oper[1]^) and
  2817. (not MatchOperand(taicpu(p).oper[0]^, NR_STACK_POINTER_REG)) and
  2818. (taicpu(p).oper[2]^.typ=top_const) and
  2819. (taicpu(p).oper[2]^.val >= 0) and
  2820. (taicpu(p).oper[2]^.val < 256) and
  2821. (not RegInUsedRegs(NR_DEFAULTFLAGS,UsedRegs)) then
  2822. begin
  2823. DebugMsg('Peephole AddSub2*s done', p);
  2824. asml.InsertBefore(tai_regalloc.alloc(NR_DEFAULTFLAGS,p), p);
  2825. asml.InsertAfter(tai_regalloc.dealloc(NR_DEFAULTFLAGS,p), p);
  2826. IncludeRegInUsedRegs(NR_DEFAULTFLAGS,UsedRegs);
  2827. taicpu(p).loadconst(1,taicpu(p).oper[2]^.val);
  2828. taicpu(p).oppostfix:=PF_S;
  2829. taicpu(p).ops := 2;
  2830. result:=true;
  2831. end
  2832. else if MatchInstruction(p, [A_ADD,A_SUB], [C_None], [PF_None]) and
  2833. (taicpu(p).ops = 2) and
  2834. (taicpu(p).oper[1]^.typ=top_reg) and
  2835. (not MatchOperand(taicpu(p).oper[0]^, NR_STACK_POINTER_REG)) and
  2836. (not MatchOperand(taicpu(p).oper[1]^, NR_STACK_POINTER_REG)) and
  2837. (not RegInUsedRegs(NR_DEFAULTFLAGS,UsedRegs)) then
  2838. begin
  2839. DebugMsg('Peephole AddSub2*s done', p);
  2840. asml.InsertBefore(tai_regalloc.alloc(NR_DEFAULTFLAGS,p), p);
  2841. asml.InsertAfter(tai_regalloc.dealloc(NR_DEFAULTFLAGS,p), p);
  2842. IncludeRegInUsedRegs(NR_DEFAULTFLAGS,UsedRegs);
  2843. taicpu(p).oppostfix:=PF_S;
  2844. result:=true;
  2845. end
  2846. else if MatchInstruction(p, [A_ADD], [C_None], [PF_None]) and
  2847. (taicpu(p).ops = 3) and
  2848. MatchOperand(taicpu(p).oper[0]^, taicpu(p).oper[1]^) and
  2849. (taicpu(p).oper[2]^.typ=top_reg) then
  2850. begin
  2851. DebugMsg('Peephole AddRRR2AddRR done', p);
  2852. taicpu(p).ops := 2;
  2853. taicpu(p).loadreg(1,taicpu(p).oper[2]^.reg);
  2854. result:=true;
  2855. end
  2856. else if MatchInstruction(p, [A_AND,A_ORR,A_EOR,A_BIC,A_LSL,A_LSR,A_ASR,A_ROR], [C_None], [PF_None]) and
  2857. (taicpu(p).ops = 3) and
  2858. MatchOperand(taicpu(p).oper[0]^, taicpu(p).oper[1]^) and
  2859. (taicpu(p).oper[2]^.typ=top_reg) and
  2860. (not RegInUsedRegs(NR_DEFAULTFLAGS,UsedRegs)) then
  2861. begin
  2862. DebugMsg('Peephole opXXY2opsXY done', p);
  2863. asml.InsertBefore(tai_regalloc.alloc(NR_DEFAULTFLAGS,p), p);
  2864. asml.InsertAfter(tai_regalloc.dealloc(NR_DEFAULTFLAGS,p), p);
  2865. IncludeRegInUsedRegs(NR_DEFAULTFLAGS,UsedRegs);
  2866. taicpu(p).ops := 2;
  2867. taicpu(p).loadreg(1,taicpu(p).oper[2]^.reg);
  2868. taicpu(p).oppostfix:=PF_S;
  2869. result:=true;
  2870. end
  2871. else if MatchInstruction(p, [A_AND,A_ORR,A_EOR,A_BIC,A_LSL,A_LSR,A_ASR,A_ROR], [C_None], [PF_S]) and
  2872. (taicpu(p).ops = 3) and
  2873. MatchOperand(taicpu(p).oper[0]^, taicpu(p).oper[1]^) and
  2874. (taicpu(p).oper[2]^.typ in [top_reg,top_const]) then
  2875. begin
  2876. DebugMsg('Peephole opXXY2opXY done', p);
  2877. taicpu(p).ops := 2;
  2878. if taicpu(p).oper[2]^.typ=top_reg then
  2879. taicpu(p).loadreg(1,taicpu(p).oper[2]^.reg)
  2880. else
  2881. taicpu(p).loadconst(1,taicpu(p).oper[2]^.val);
  2882. result:=true;
  2883. end
  2884. else if MatchInstruction(p, [A_AND,A_ORR,A_EOR], [C_None], [PF_None,PF_S]) and
  2885. (taicpu(p).ops = 3) and
  2886. MatchOperand(taicpu(p).oper[0]^, taicpu(p).oper[2]^) and
  2887. (not RegInUsedRegs(NR_DEFAULTFLAGS,UsedRegs)) then
  2888. begin
  2889. DebugMsg('Peephole opXYX2opsXY done', p);
  2890. asml.InsertBefore(tai_regalloc.alloc(NR_DEFAULTFLAGS,p), p);
  2891. asml.InsertAfter(tai_regalloc.dealloc(NR_DEFAULTFLAGS,p), p);
  2892. IncludeRegInUsedRegs(NR_DEFAULTFLAGS,UsedRegs);
  2893. taicpu(p).oppostfix:=PF_S;
  2894. taicpu(p).ops := 2;
  2895. result:=true;
  2896. end
  2897. else if MatchInstruction(p, [A_MOV], [C_None], [PF_None]) and
  2898. (taicpu(p).ops=3) and
  2899. (taicpu(p).oper[2]^.typ=top_shifterop) and
  2900. (taicpu(p).oper[2]^.shifterop^.shiftmode in [SM_LSL,SM_LSR,SM_ASR,SM_ROR]) and
  2901. //MatchOperand(taicpu(p).oper[0]^, taicpu(p).oper[1]^) and
  2902. (not RegInUsedRegs(NR_DEFAULTFLAGS,UsedRegs)) then
  2903. begin
  2904. DebugMsg('Peephole Mov2Shift done', p);
  2905. asml.InsertBefore(tai_regalloc.alloc(NR_DEFAULTFLAGS,p), p);
  2906. asml.InsertAfter(tai_regalloc.dealloc(NR_DEFAULTFLAGS,p), p);
  2907. IncludeRegInUsedRegs(NR_DEFAULTFLAGS,UsedRegs);
  2908. taicpu(p).oppostfix:=PF_S;
  2909. case taicpu(p).oper[2]^.shifterop^.shiftmode of
  2910. SM_LSL: taicpu(p).opcode:=A_LSL;
  2911. SM_LSR: taicpu(p).opcode:=A_LSR;
  2912. SM_ASR: taicpu(p).opcode:=A_ASR;
  2913. SM_ROR: taicpu(p).opcode:=A_ROR;
  2914. end;
  2915. if taicpu(p).oper[2]^.shifterop^.rs<>NR_NO then
  2916. taicpu(p).loadreg(2, taicpu(p).oper[2]^.shifterop^.rs)
  2917. else
  2918. taicpu(p).loadconst(2, taicpu(p).oper[2]^.shifterop^.shiftimm);
  2919. result:=true;
  2920. end
  2921. end;
  2922. end;
  2923. begin
  2924. casmoptimizer:=TCpuAsmOptimizer;
  2925. cpreregallocscheduler:=TCpuPreRegallocScheduler;
  2926. End.