aoptcpu.pas 147 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847184818491850185118521853185418551856185718581859186018611862186318641865186618671868186918701871187218731874187518761877187818791880188118821883188418851886188718881889189018911892189318941895189618971898189919001901190219031904190519061907190819091910191119121913191419151916191719181919192019211922192319241925192619271928192919301931193219331934193519361937193819391940194119421943194419451946194719481949195019511952195319541955195619571958195919601961196219631964196519661967196819691970197119721973197419751976197719781979198019811982198319841985198619871988198919901991199219931994199519961997199819992000200120022003200420052006200720082009201020112012201320142015201620172018201920202021202220232024202520262027202820292030203120322033203420352036203720382039204020412042204320442045204620472048204920502051205220532054205520562057205820592060206120622063206420652066206720682069207020712072207320742075207620772078207920802081208220832084208520862087208820892090209120922093209420952096209720982099210021012102210321042105210621072108210921102111211221132114211521162117211821192120212121222123212421252126212721282129213021312132213321342135213621372138213921402141214221432144214521462147214821492150215121522153215421552156215721582159216021612162216321642165216621672168216921702171217221732174217521762177217821792180218121822183218421852186218721882189219021912192219321942195219621972198219922002201220222032204220522062207220822092210221122122213221422152216221722182219222022212222222322242225222622272228222922302231223222332234223522362237223822392240224122422243224422452246224722482249225022512252225322542255225622572258225922602261226222632264226522662267226822692270227122722273227422752276227722782279228022812282228322842285228622872288228922902291229222932294229522962297229822992300230123022303230423052306230723082309231023112312231323142315231623172318231923202321232223232324232523262327232823292330233123322333233423352336233723382339234023412342234323442345234623472348234923502351235223532354235523562357235823592360236123622363236423652366236723682369237023712372237323742375237623772378237923802381238223832384238523862387238823892390239123922393239423952396239723982399240024012402240324042405240624072408240924102411241224132414241524162417241824192420242124222423242424252426242724282429243024312432243324342435243624372438243924402441244224432444244524462447244824492450245124522453245424552456245724582459246024612462246324642465246624672468246924702471247224732474247524762477247824792480248124822483248424852486248724882489249024912492249324942495249624972498249925002501250225032504250525062507250825092510251125122513251425152516251725182519252025212522252325242525252625272528252925302531253225332534253525362537253825392540254125422543254425452546254725482549255025512552255325542555255625572558255925602561256225632564256525662567256825692570257125722573257425752576257725782579258025812582258325842585258625872588258925902591259225932594259525962597259825992600260126022603260426052606260726082609261026112612261326142615261626172618261926202621262226232624262526262627262826292630263126322633263426352636263726382639264026412642264326442645264626472648264926502651265226532654265526562657265826592660266126622663266426652666266726682669267026712672267326742675267626772678267926802681268226832684268526862687268826892690269126922693269426952696269726982699270027012702270327042705270627072708270927102711271227132714271527162717271827192720272127222723272427252726272727282729273027312732273327342735273627372738273927402741274227432744274527462747274827492750275127522753275427552756275727582759276027612762276327642765276627672768276927702771277227732774277527762777277827792780278127822783278427852786278727882789279027912792279327942795279627972798279928002801280228032804280528062807280828092810281128122813281428152816281728182819282028212822282328242825282628272828282928302831283228332834283528362837283828392840284128422843284428452846284728482849285028512852285328542855285628572858285928602861286228632864286528662867286828692870287128722873287428752876287728782879288028812882288328842885288628872888288928902891289228932894289528962897289828992900290129022903290429052906290729082909291029112912291329142915291629172918291929202921292229232924292529262927292829292930293129322933293429352936293729382939294029412942294329442945294629472948294929502951295229532954295529562957295829592960296129622963296429652966296729682969297029712972297329742975297629772978297929802981298229832984298529862987298829892990299129922993299429952996299729982999300030013002300330043005300630073008300930103011301230133014301530163017301830193020302130223023302430253026302730283029303030313032303330343035303630373038303930403041304230433044304530463047304830493050305130523053305430553056305730583059306030613062306330643065306630673068306930703071307230733074307530763077307830793080308130823083308430853086308730883089309030913092309330943095309630973098309931003101310231033104310531063107310831093110311131123113311431153116311731183119312031213122312331243125312631273128312931303131313231333134313531363137313831393140314131423143314431453146314731483149315031513152315331543155315631573158315931603161316231633164316531663167316831693170317131723173317431753176317731783179318031813182318331843185318631873188318931903191319231933194319531963197319831993200320132023203320432053206320732083209321032113212321332143215321632173218321932203221322232233224
  1. {
  2. Copyright (c) 1998-2002 by Jonas Maebe, member of the Free Pascal
  3. Development Team
  4. This unit implements the ARM optimizer object
  5. This program is free software; you can redistribute it and/or modify
  6. it under the terms of the GNU General Public License as published by
  7. the Free Software Foundation; either version 2 of the License, or
  8. (at your option) any later version.
  9. This program is distributed in the hope that it will be useful,
  10. but WITHOUT ANY WARRANTY; without even the implied warranty of
  11. MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  12. GNU General Public License for more details.
  13. You should have received a copy of the GNU General Public License
  14. along with this program; if not, write to the Free Software
  15. Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
  16. ****************************************************************************
  17. }
  18. Unit aoptcpu;
  19. {$i fpcdefs.inc}
  20. { $define DEBUG_PREREGSCHEDULER}
  21. {$define DEBUG_AOPTCPU}
  22. Interface
  23. uses cgbase, cgutils, cpubase, aasmtai, aasmcpu,aopt, aoptobj;
  24. Type
  25. TCpuAsmOptimizer = class(TAsmOptimizer)
  26. { Can't be done in some cases due to the limited range of jumps }
  27. function CanDoJumpOpts: Boolean; override;
  28. { uses the same constructor as TAopObj }
  29. function PeepHoleOptPass1Cpu(var p: tai): boolean; override;
  30. procedure PeepHoleOptPass2;override;
  31. Function RegInInstruction(Reg: TRegister; p1: tai): Boolean;override;
  32. function RemoveSuperfluousMove(const p: tai; movp: tai; const optimizer: string): boolean;
  33. function RemoveSuperfluousVMov(const p : tai; movp : tai; const optimizer : string) : boolean;
  34. { gets the next tai object after current that contains info relevant
  35. to the optimizer in p1 which used the given register or does a
  36. change in program flow.
  37. If there is none, it returns false and
  38. sets p1 to nil }
  39. Function GetNextInstructionUsingReg(Current: tai; Out Next: tai; reg: TRegister): Boolean;
  40. Function GetNextInstructionUsingRef(Current: tai; Out Next: tai; const ref: TReference; StopOnStore: Boolean = true): Boolean;
  41. { outputs a debug message into the assembler file }
  42. procedure DebugMsg(const s: string; p: tai);
  43. function InstructionLoadsFromReg(const reg : TRegister; const hp : tai) : boolean; override;
  44. function RegLoadedWithNewValue(reg : tregister; hp : tai) : boolean; override;
  45. protected
  46. function LookForPreindexedPattern(p: taicpu): boolean;
  47. function LookForPostindexedPattern(p: taicpu): boolean;
  48. End;
  49. TCpuPreRegallocScheduler = class(TAsmScheduler)
  50. function SchedulerPass1Cpu(var p: tai): boolean;override;
  51. procedure SwapRegLive(p, hp1: taicpu);
  52. end;
  53. TCpuThumb2AsmOptimizer = class(TCpuAsmOptimizer)
  54. { uses the same constructor as TAopObj }
  55. function PeepHoleOptPass1Cpu(var p: tai): boolean; override;
  56. procedure PeepHoleOptPass2;override;
  57. function PostPeepHoleOptsCpu(var p: tai): boolean; override;
  58. End;
  59. function MustBeLast(p : tai) : boolean;
  60. Implementation
  61. uses
  62. cutils,verbose,globtype,globals,
  63. systems,
  64. cpuinfo,
  65. cgobj,procinfo,
  66. aasmbase,aasmdata;
  67. { Range check must be disabled explicitly as conversions between signed and unsigned
  68. 32-bit values are done without explicit typecasts }
  69. {$R-}
  70. function CanBeCond(p : tai) : boolean;
  71. begin
  72. result:=
  73. not(GenerateThumbCode) and
  74. (p.typ=ait_instruction) and
  75. (taicpu(p).condition=C_None) and
  76. ((taicpu(p).opcode<A_IT) or (taicpu(p).opcode>A_ITTTT)) and
  77. (taicpu(p).opcode<>A_CBZ) and
  78. (taicpu(p).opcode<>A_CBNZ) and
  79. (taicpu(p).opcode<>A_PLD) and
  80. (((taicpu(p).opcode<>A_BLX) and
  81. { BL may need to be converted into BLX by the linker -- could possibly
  82. be allowed in case it's to a local symbol of which we know that it
  83. uses the same instruction set as the current one }
  84. (taicpu(p).opcode<>A_BL)) or
  85. (taicpu(p).oper[0]^.typ=top_reg));
  86. end;
  87. function RefsEqual(const r1, r2: treference): boolean;
  88. begin
  89. refsequal :=
  90. (r1.offset = r2.offset) and
  91. (r1.base = r2.base) and
  92. (r1.index = r2.index) and (r1.scalefactor = r2.scalefactor) and
  93. (r1.symbol=r2.symbol) and (r1.refaddr = r2.refaddr) and
  94. (r1.relsymbol = r2.relsymbol) and
  95. (r1.signindex = r2.signindex) and
  96. (r1.shiftimm = r2.shiftimm) and
  97. (r1.addressmode = r2.addressmode) and
  98. (r1.shiftmode = r2.shiftmode) and
  99. (r1.volatility=[]) and
  100. (r2.volatility=[]);
  101. end;
  102. function MatchInstruction(const instr: tai; const op: TCommonAsmOps; const cond: TAsmConds; const postfix: TOpPostfixes): boolean;
  103. begin
  104. result :=
  105. (instr.typ = ait_instruction) and
  106. ((op = []) or ((ord(taicpu(instr).opcode)<256) and (taicpu(instr).opcode in op))) and
  107. ((cond = []) or (taicpu(instr).condition in cond)) and
  108. ((postfix = []) or (taicpu(instr).oppostfix in postfix));
  109. end;
  110. function MatchInstruction(const instr: tai; const op: TAsmOp; const cond: TAsmConds; const postfix: TOpPostfixes): boolean;
  111. begin
  112. result :=
  113. (instr.typ = ait_instruction) and
  114. (taicpu(instr).opcode = op) and
  115. ((cond = []) or (taicpu(instr).condition in cond)) and
  116. ((postfix = []) or (taicpu(instr).oppostfix in postfix));
  117. end;
  118. function MatchOperand(const oper1: TOper; const oper2: TOper): boolean; inline;
  119. begin
  120. result := oper1.typ = oper2.typ;
  121. if result then
  122. case oper1.typ of
  123. top_const:
  124. Result:=oper1.val = oper2.val;
  125. top_reg:
  126. Result:=oper1.reg = oper2.reg;
  127. top_conditioncode:
  128. Result:=oper1.cc = oper2.cc;
  129. top_ref:
  130. Result:=RefsEqual(oper1.ref^, oper2.ref^);
  131. else Result:=false;
  132. end
  133. end;
  134. function MatchOperand(const oper: TOper; const reg: TRegister): boolean; inline;
  135. begin
  136. result := (oper.typ = top_reg) and (oper.reg = reg);
  137. end;
  138. function RemoveRedundantMove(const cmpp: tai; movp: tai; asml: TAsmList):Boolean;
  139. begin
  140. Result:=false;
  141. if (taicpu(movp).condition = C_EQ) and
  142. (taicpu(cmpp).oper[0]^.reg = taicpu(movp).oper[0]^.reg) and
  143. (taicpu(cmpp).oper[1]^.val = taicpu(movp).oper[1]^.val) then
  144. begin
  145. asml.insertafter(tai_comment.Create(strpnew('Peephole CmpMovMov - Removed redundant moveq')), movp);
  146. asml.remove(movp);
  147. movp.free;
  148. Result:=true;
  149. end;
  150. end;
  151. function AlignedToQWord(const ref : treference) : boolean;
  152. begin
  153. { (safe) heuristics to ensure alignment }
  154. result:=(target_info.abi in [abi_eabi,abi_armeb,abi_eabihf]) and
  155. (((ref.offset>=0) and
  156. ((ref.offset mod 8)=0) and
  157. ((ref.base=NR_R13) or
  158. (ref.index=NR_R13))
  159. ) or
  160. ((ref.offset<=0) and
  161. { when using NR_R11, it has always a value of <qword align>+4 }
  162. ((abs(ref.offset+4) mod 8)=0) and
  163. (current_procinfo.framepointer=NR_R11) and
  164. ((ref.base=NR_R11) or
  165. (ref.index=NR_R11))
  166. )
  167. );
  168. end;
  169. function isValidConstLoadStoreOffset(const aoffset: longint; const pf: TOpPostfix) : boolean;
  170. begin
  171. if GenerateThumb2Code then
  172. result := (aoffset<4096) and (aoffset>-256)
  173. else
  174. result := ((pf in [PF_None,PF_B]) and
  175. (abs(aoffset)<4096)) or
  176. (abs(aoffset)<256);
  177. end;
  178. function TCpuAsmOptimizer.InstructionLoadsFromReg(const reg: TRegister; const hp: tai): boolean;
  179. var
  180. p: taicpu;
  181. i: longint;
  182. begin
  183. instructionLoadsFromReg := false;
  184. if not (assigned(hp) and (hp.typ = ait_instruction)) then
  185. exit;
  186. p:=taicpu(hp);
  187. i:=1;
  188. {For these instructions we have to start on oper[0]}
  189. if (p.opcode in [A_STR, A_LDM, A_STM, A_PLD,
  190. A_CMP, A_CMN, A_TST, A_TEQ,
  191. A_B, A_BL, A_BX, A_BLX,
  192. A_SMLAL, A_UMLAL]) then i:=0;
  193. while(i<p.ops) do
  194. begin
  195. case p.oper[I]^.typ of
  196. top_reg:
  197. instructionLoadsFromReg := (p.oper[I]^.reg = reg) or
  198. { STRD }
  199. ((i=0) and (p.opcode=A_STR) and (p.oppostfix=PF_D) and (getsupreg(p.oper[0]^.reg)+1=getsupreg(reg)));
  200. top_regset:
  201. instructionLoadsFromReg := (getsupreg(reg) in p.oper[I]^.regset^);
  202. top_shifterop:
  203. instructionLoadsFromReg := p.oper[I]^.shifterop^.rs = reg;
  204. top_ref:
  205. instructionLoadsFromReg :=
  206. (p.oper[I]^.ref^.base = reg) or
  207. (p.oper[I]^.ref^.index = reg);
  208. else
  209. ;
  210. end;
  211. if instructionLoadsFromReg then exit; {Bailout if we found something}
  212. Inc(I);
  213. end;
  214. end;
  215. function TCpuAsmOptimizer.RegLoadedWithNewValue(reg: tregister; hp: tai): boolean;
  216. var
  217. p: taicpu;
  218. begin
  219. p := taicpu(hp);
  220. Result := false;
  221. if not ((assigned(hp)) and (hp.typ = ait_instruction)) then
  222. exit;
  223. case p.opcode of
  224. { These operands do not write into a register at all }
  225. A_CMP, A_CMN, A_TST, A_TEQ, A_B, A_BL, A_BX, A_BLX, A_SWI, A_MSR, A_PLD,
  226. A_VCMP:
  227. exit;
  228. {Take care of post/preincremented store and loads, they will change their base register}
  229. A_STR, A_LDR:
  230. begin
  231. Result := false;
  232. { actually, this does not apply here because post-/preindexed does not mean that a register
  233. is loaded with a new value, it is only modified
  234. (taicpu(p).oper[1]^.typ=top_ref) and
  235. (taicpu(p).oper[1]^.ref^.addressmode in [AM_PREINDEXED,AM_POSTINDEXED]) and
  236. (taicpu(p).oper[1]^.ref^.base = reg);
  237. }
  238. { STR does not load into it's first register }
  239. if p.opcode = A_STR then
  240. exit;
  241. end;
  242. A_VSTR:
  243. begin
  244. Result := false;
  245. exit;
  246. end;
  247. { These four are writing into the first 2 register, UMLAL and SMLAL will also read from them }
  248. A_UMLAL, A_UMULL, A_SMLAL, A_SMULL:
  249. Result :=
  250. (p.oper[1]^.typ = top_reg) and
  251. (p.oper[1]^.reg = reg);
  252. {Loads to oper2 from coprocessor}
  253. {
  254. MCR/MRC is currently not supported in FPC
  255. A_MRC:
  256. Result :=
  257. (p.oper[2]^.typ = top_reg) and
  258. (p.oper[2]^.reg = reg);
  259. }
  260. {Loads to all register in the registerset}
  261. A_LDM, A_VLDM:
  262. Result := (getsupreg(reg) in p.oper[1]^.regset^);
  263. A_POP:
  264. Result := (getsupreg(reg) in p.oper[0]^.regset^) or
  265. (reg=NR_STACK_POINTER_REG);
  266. else
  267. ;
  268. end;
  269. if Result then
  270. exit;
  271. case p.oper[0]^.typ of
  272. {This is the case}
  273. top_reg:
  274. Result := (p.oper[0]^.reg = reg) or
  275. { LDRD }
  276. (p.opcode=A_LDR) and (p.oppostfix=PF_D) and (getsupreg(p.oper[0]^.reg)+1=getsupreg(reg));
  277. {LDM/STM might write a new value to their index register}
  278. top_ref:
  279. Result :=
  280. (taicpu(p).oper[0]^.ref^.addressmode in [AM_PREINDEXED,AM_POSTINDEXED]) and
  281. (taicpu(p).oper[0]^.ref^.base = reg);
  282. else
  283. ;
  284. end;
  285. end;
  286. function TCpuAsmOptimizer.GetNextInstructionUsingReg(Current: tai;
  287. Out Next: tai; reg: TRegister): Boolean;
  288. begin
  289. Next:=Current;
  290. repeat
  291. Result:=GetNextInstruction(Next,Next);
  292. until not (Result) or
  293. not(cs_opt_level3 in current_settings.optimizerswitches) or
  294. (Next.typ<>ait_instruction) or
  295. RegInInstruction(reg,Next) or
  296. is_calljmp(taicpu(Next).opcode) or
  297. RegModifiedByInstruction(NR_PC,Next);
  298. end;
  299. function TCpuAsmOptimizer.GetNextInstructionUsingRef(Current: tai;
  300. Out Next: tai; const ref: TReference; StopOnStore: Boolean = true): Boolean;
  301. begin
  302. Next:=Current;
  303. repeat
  304. Result:=GetNextInstruction(Next,Next);
  305. if Result and
  306. (Next.typ=ait_instruction) and
  307. (taicpu(Next).opcode in [A_LDR, A_STR]) and
  308. (
  309. ((taicpu(Next).ops = 2) and
  310. (taicpu(Next).oper[1]^.typ = top_ref) and
  311. RefsEqual(taicpu(Next).oper[1]^.ref^,ref)) or
  312. ((taicpu(Next).ops = 3) and { LDRD/STRD }
  313. (taicpu(Next).oper[2]^.typ = top_ref) and
  314. RefsEqual(taicpu(Next).oper[2]^.ref^,ref))
  315. ) then
  316. {We've found an instruction LDR or STR with the same reference}
  317. exit;
  318. until not(Result) or
  319. (Next.typ<>ait_instruction) or
  320. not(cs_opt_level3 in current_settings.optimizerswitches) or
  321. is_calljmp(taicpu(Next).opcode) or
  322. (StopOnStore and (taicpu(Next).opcode in [A_STR, A_STM])) or
  323. RegModifiedByInstruction(NR_PC,Next);
  324. Result:=false;
  325. end;
  326. {$ifdef DEBUG_AOPTCPU}
  327. procedure TCpuAsmOptimizer.DebugMsg(const s: string;p : tai);
  328. begin
  329. asml.insertbefore(tai_comment.Create(strpnew(s)), p);
  330. end;
  331. {$else DEBUG_AOPTCPU}
  332. procedure TCpuAsmOptimizer.DebugMsg(const s: string;p : tai);inline;
  333. begin
  334. end;
  335. {$endif DEBUG_AOPTCPU}
  336. function TCpuAsmOptimizer.CanDoJumpOpts: Boolean;
  337. begin
  338. { Cannot perform these jump optimisations if the ARM architecture has 16-bit thumb codes }
  339. Result := not (
  340. (current_settings.instructionset = is_thumb) and not (CPUARM_HAS_THUMB2 in cpu_capabilities[current_settings.cputype])
  341. );
  342. end;
  343. function TCpuAsmOptimizer.RemoveSuperfluousMove(const p: tai; movp: tai; const optimizer: string):boolean;
  344. var
  345. alloc,
  346. dealloc : tai_regalloc;
  347. hp1 : tai;
  348. begin
  349. Result:=false;
  350. if MatchInstruction(movp, A_MOV, [taicpu(p).condition], [PF_None]) and
  351. (taicpu(movp).ops=2) and {We can't optimize if there is a shiftop}
  352. MatchOperand(taicpu(movp).oper[1]^, taicpu(p).oper[0]^.reg) and
  353. { don't mess with moves to pc }
  354. (taicpu(movp).oper[0]^.reg<>NR_PC) and
  355. { don't mess with moves to lr }
  356. (taicpu(movp).oper[0]^.reg<>NR_R14) and
  357. { the destination register of the mov might not be used beween p and movp }
  358. not(RegUsedBetween(taicpu(movp).oper[0]^.reg,p,movp)) and
  359. { cb[n]z are thumb instructions which require specific registers, with no wide forms }
  360. (taicpu(p).opcode<>A_CBZ) and
  361. (taicpu(p).opcode<>A_CBNZ) and
  362. {There is a special requirement for MUL and MLA, oper[0] and oper[1] are not allowed to be the same}
  363. not (
  364. (taicpu(p).opcode in [A_MLA, A_MUL]) and
  365. (taicpu(p).oper[1]^.reg = taicpu(movp).oper[0]^.reg) and
  366. (current_settings.cputype < cpu_armv6)
  367. ) and
  368. { Take care to only do this for instructions which REALLY load to the first register.
  369. Otherwise
  370. str reg0, [reg1]
  371. mov reg2, reg0
  372. will be optimized to
  373. str reg2, [reg1]
  374. }
  375. regLoadedWithNewValue(taicpu(p).oper[0]^.reg, p) then
  376. begin
  377. dealloc:=FindRegDeAlloc(taicpu(p).oper[0]^.reg,tai(movp.Next));
  378. if assigned(dealloc) then
  379. begin
  380. DebugMsg('Peephole '+optimizer+' removed superfluous mov', movp);
  381. result:=true;
  382. { taicpu(p).oper[0]^.reg is not used anymore, try to find its allocation
  383. and remove it if possible }
  384. asml.Remove(dealloc);
  385. alloc:=FindRegAllocBackward(taicpu(p).oper[0]^.reg,tai(p.previous));
  386. if assigned(alloc) then
  387. begin
  388. asml.Remove(alloc);
  389. alloc.free;
  390. dealloc.free;
  391. end
  392. else
  393. asml.InsertAfter(dealloc,p);
  394. { try to move the allocation of the target register }
  395. GetLastInstruction(movp,hp1);
  396. alloc:=FindRegAlloc(taicpu(movp).oper[0]^.reg,tai(hp1.Next));
  397. if assigned(alloc) then
  398. begin
  399. asml.Remove(alloc);
  400. asml.InsertBefore(alloc,p);
  401. { adjust used regs }
  402. IncludeRegInUsedRegs(taicpu(movp).oper[0]^.reg,UsedRegs);
  403. end;
  404. { finally get rid of the mov }
  405. taicpu(p).loadreg(0,taicpu(movp).oper[0]^.reg);
  406. { Remove preindexing and postindexing for LDR in some cases.
  407. For example:
  408. ldr reg2,[reg1, xxx]!
  409. mov reg1,reg2
  410. must be translated to:
  411. ldr reg1,[reg1, xxx]
  412. Preindexing must be removed there, since the same register is used as the base and as the target.
  413. Such case is not allowed for ARM CPU and produces crash. }
  414. if (taicpu(p).opcode = A_LDR) and (taicpu(p).oper[1]^.typ = top_ref)
  415. and (taicpu(movp).oper[0]^.reg = taicpu(p).oper[1]^.ref^.base)
  416. then
  417. taicpu(p).oper[1]^.ref^.addressmode:=AM_OFFSET;
  418. asml.remove(movp);
  419. movp.free;
  420. end;
  421. end;
  422. end;
  423. function TCpuAsmOptimizer.RemoveSuperfluousVMov(const p: tai; movp: tai; const optimizer: string):boolean;
  424. var
  425. alloc,
  426. dealloc : tai_regalloc;
  427. hp1 : tai;
  428. begin
  429. Result:=false;
  430. if ((MatchInstruction(movp, A_VMOV, [taicpu(p).condition], [taicpu(p).oppostfix]) and
  431. ((getregtype(taicpu(movp).oper[0]^.reg)=R_MMREGISTER) or (taicpu(p).opcode=A_VLDR))
  432. ) or
  433. (((taicpu(p).oppostfix in [PF_F64F32,PF_F64S16,PF_F64S32,PF_F64U16,PF_F64U32]) or (getsubreg(taicpu(p).oper[0]^.reg)=R_SUBFD)) and MatchInstruction(movp, A_VMOV, [taicpu(p).condition], [PF_F64])) or
  434. (((taicpu(p).oppostfix in [PF_F32F64,PF_F32S16,PF_F32S32,PF_F32U16,PF_F32U32]) or (getsubreg(taicpu(p).oper[0]^.reg)=R_SUBFS)) and MatchInstruction(movp, A_VMOV, [taicpu(p).condition], [PF_F32]))
  435. ) and
  436. (taicpu(movp).ops=2) and
  437. MatchOperand(taicpu(movp).oper[1]^, taicpu(p).oper[0]^.reg) and
  438. { the destination register of the mov might not be used beween p and movp }
  439. not(RegUsedBetween(taicpu(movp).oper[0]^.reg,p,movp)) and
  440. { Take care to only do this for instructions which REALLY load to the first register.
  441. Otherwise
  442. vstr reg0, [reg1]
  443. vmov reg2, reg0
  444. will be optimized to
  445. vstr reg2, [reg1]
  446. }
  447. regLoadedWithNewValue(taicpu(p).oper[0]^.reg, p) then
  448. begin
  449. dealloc:=FindRegDeAlloc(taicpu(p).oper[0]^.reg,tai(movp.Next));
  450. if assigned(dealloc) then
  451. begin
  452. DebugMsg('Peephole '+optimizer+' removed superfluous vmov', movp);
  453. result:=true;
  454. { taicpu(p).oper[0]^.reg is not used anymore, try to find its allocation
  455. and remove it if possible }
  456. asml.Remove(dealloc);
  457. alloc:=FindRegAllocBackward(taicpu(p).oper[0]^.reg,tai(p.previous));
  458. if assigned(alloc) then
  459. begin
  460. asml.Remove(alloc);
  461. alloc.free;
  462. dealloc.free;
  463. end
  464. else
  465. asml.InsertAfter(dealloc,p);
  466. { try to move the allocation of the target register }
  467. GetLastInstruction(movp,hp1);
  468. alloc:=FindRegAlloc(taicpu(movp).oper[0]^.reg,tai(hp1.Next));
  469. if assigned(alloc) then
  470. begin
  471. asml.Remove(alloc);
  472. asml.InsertBefore(alloc,p);
  473. { adjust used regs }
  474. IncludeRegInUsedRegs(taicpu(movp).oper[0]^.reg,UsedRegs);
  475. end;
  476. { change
  477. vldr reg0,[reg1]
  478. vmov reg2,reg0
  479. into
  480. ldr reg2,[reg1]
  481. if reg2 is an int register
  482. }
  483. if (taicpu(p).opcode=A_VLDR) and (getregtype(taicpu(movp).oper[0]^.reg)=R_INTREGISTER) then
  484. taicpu(p).opcode:=A_LDR;
  485. { finally get rid of the mov }
  486. taicpu(p).loadreg(0,taicpu(movp).oper[0]^.reg);
  487. asml.remove(movp);
  488. movp.free;
  489. end;
  490. end;
  491. end;
  492. {
  493. optimize
  494. add/sub reg1,reg1,regY/const
  495. ...
  496. ldr/str regX,[reg1]
  497. into
  498. ldr/str regX,[reg1, regY/const]!
  499. }
  500. function TCpuAsmOptimizer.LookForPreindexedPattern(p: taicpu): boolean;
  501. var
  502. hp1: tai;
  503. begin
  504. if GenerateARMCode and
  505. (p.ops=3) and
  506. MatchOperand(p.oper[0]^, p.oper[1]^.reg) and
  507. GetNextInstructionUsingReg(p, hp1, p.oper[0]^.reg) and
  508. (not RegModifiedBetween(p.oper[0]^.reg, p, hp1)) and
  509. MatchInstruction(hp1, [A_LDR,A_STR], [C_None], [PF_None,PF_B,PF_H,PF_SH,PF_SB]) and
  510. (taicpu(hp1).oper[1]^.ref^.addressmode=AM_OFFSET) and
  511. (taicpu(hp1).oper[1]^.ref^.base=p.oper[0]^.reg) and
  512. (taicpu(hp1).oper[0]^.reg<>p.oper[0]^.reg) and
  513. (taicpu(hp1).oper[1]^.ref^.offset=0) and
  514. (taicpu(hp1).oper[1]^.ref^.index=NR_NO) and
  515. (((p.oper[2]^.typ=top_reg) and
  516. (not RegModifiedBetween(p.oper[2]^.reg, p, hp1))) or
  517. ((p.oper[2]^.typ=top_const) and
  518. ((abs(p.oper[2]^.val) < 256) or
  519. ((abs(p.oper[2]^.val) < 4096) and
  520. (taicpu(hp1).oppostfix in [PF_None,PF_B]))))) then
  521. begin
  522. taicpu(hp1).oper[1]^.ref^.addressmode:=AM_PREINDEXED;
  523. if p.oper[2]^.typ=top_reg then
  524. begin
  525. taicpu(hp1).oper[1]^.ref^.index:=p.oper[2]^.reg;
  526. if p.opcode=A_ADD then
  527. taicpu(hp1).oper[1]^.ref^.signindex:=1
  528. else
  529. taicpu(hp1).oper[1]^.ref^.signindex:=-1;
  530. end
  531. else
  532. begin
  533. if p.opcode=A_ADD then
  534. taicpu(hp1).oper[1]^.ref^.offset:=p.oper[2]^.val
  535. else
  536. taicpu(hp1).oper[1]^.ref^.offset:=-p.oper[2]^.val;
  537. end;
  538. result:=true;
  539. end
  540. else
  541. result:=false;
  542. end;
  543. {
  544. optimize
  545. ldr/str regX,[reg1]
  546. ...
  547. add/sub reg1,reg1,regY/const
  548. into
  549. ldr/str regX,[reg1], regY/const
  550. }
  551. function TCpuAsmOptimizer.LookForPostindexedPattern(p: taicpu) : boolean;
  552. var
  553. hp1 : tai;
  554. begin
  555. Result:=false;
  556. if (p.oper[1]^.typ = top_ref) and
  557. (p.oper[1]^.ref^.addressmode=AM_OFFSET) and
  558. (p.oper[1]^.ref^.index=NR_NO) and
  559. (p.oper[1]^.ref^.offset=0) and
  560. GetNextInstructionUsingReg(p, hp1, p.oper[1]^.ref^.base) and
  561. { we cannot check NR_DEFAULTFLAGS for modification yet so don't allow a condition }
  562. MatchInstruction(hp1, [A_ADD, A_SUB], [C_None], [PF_None]) and
  563. (taicpu(hp1).oper[0]^.reg=p.oper[1]^.ref^.base) and
  564. (taicpu(hp1).oper[1]^.reg=p.oper[1]^.ref^.base) and
  565. (
  566. (taicpu(hp1).oper[2]^.typ=top_reg) or
  567. { valid offset? }
  568. ((taicpu(hp1).oper[2]^.typ=top_const) and
  569. ((abs(taicpu(hp1).oper[2]^.val)<256) or
  570. ((abs(taicpu(hp1).oper[2]^.val)<4096) and (p.oppostfix in [PF_None,PF_B]))
  571. )
  572. )
  573. ) and
  574. { don't apply the optimization if the base register is loaded }
  575. (p.oper[0]^.reg<>p.oper[1]^.ref^.base) and
  576. not(RegModifiedBetween(taicpu(hp1).oper[0]^.reg,p,hp1)) and
  577. { don't apply the optimization if the (new) index register is loaded }
  578. (p.oper[0]^.reg<>taicpu(hp1).oper[2]^.reg) and
  579. not(RegModifiedBetween(taicpu(hp1).oper[2]^.reg,p,hp1)) and
  580. GenerateARMCode then
  581. begin
  582. DebugMsg('Peephole Str/LdrAdd/Sub2Str/Ldr Postindex done', p);
  583. p.oper[1]^.ref^.addressmode:=AM_POSTINDEXED;
  584. if taicpu(hp1).oper[2]^.typ=top_const then
  585. begin
  586. if taicpu(hp1).opcode=A_ADD then
  587. p.oper[1]^.ref^.offset:=taicpu(hp1).oper[2]^.val
  588. else
  589. p.oper[1]^.ref^.offset:=-taicpu(hp1).oper[2]^.val;
  590. end
  591. else
  592. begin
  593. p.oper[1]^.ref^.index:=taicpu(hp1).oper[2]^.reg;
  594. if taicpu(hp1).opcode=A_ADD then
  595. p.oper[1]^.ref^.signindex:=1
  596. else
  597. p.oper[1]^.ref^.signindex:=-1;
  598. end;
  599. asml.Remove(hp1);
  600. hp1.Free;
  601. Result:=true;
  602. end;
  603. end;
  604. function TCpuAsmOptimizer.PeepHoleOptPass1Cpu(var p: tai): boolean;
  605. var
  606. hp1,hp2,hp3,hp4: tai;
  607. i, i2: longint;
  608. tempop: tasmop;
  609. oldreg: tregister;
  610. dealloc: tai_regalloc;
  611. function IsPowerOf2(const value: DWord): boolean; inline;
  612. begin
  613. Result:=(value and (value - 1)) = 0;
  614. end;
  615. begin
  616. result := false;
  617. case p.typ of
  618. ait_instruction:
  619. begin
  620. {
  621. change
  622. <op> reg,x,y
  623. cmp reg,#0
  624. into
  625. <op>s reg,x,y
  626. }
  627. { this optimization can applied only to the currently enabled operations because
  628. the other operations do not update all flags and FPC does not track flag usage }
  629. if MatchInstruction(p, [A_ADC,A_ADD,A_BIC,A_SUB,A_MUL,A_MVN,A_MOV,A_ORR,A_EOR,A_AND,
  630. A_RSB,A_RSC,A_SBC,A_MLA], [C_None], [PF_None]) and
  631. GetNextInstruction(p, hp1) and
  632. { mlas is only allowed in arm mode }
  633. ((taicpu(p).opcode<>A_MLA) or
  634. (current_settings.instructionset<>is_thumb)) and
  635. MatchInstruction(hp1, A_CMP, [C_None], [PF_None]) and
  636. (taicpu(hp1).oper[1]^.typ = top_const) and
  637. (taicpu(p).oper[0]^.reg = taicpu(hp1).oper[0]^.reg) and
  638. (taicpu(hp1).oper[1]^.val = 0) and
  639. GetNextInstruction(hp1, hp2) and
  640. { be careful here, following instructions could use other flags
  641. however after a jump fpc never depends on the value of flags }
  642. { All above instructions set Z and N according to the following
  643. Z := result = 0;
  644. N := result[31];
  645. EQ = Z=1; NE = Z=0;
  646. MI = N=1; PL = N=0; }
  647. (MatchInstruction(hp2, A_B, [C_EQ,C_NE,C_MI,C_PL], []) or
  648. { mov is also possible, but only if there is no shifter operand, it could be an rxx,
  649. we are too lazy to check if it is rxx or something else }
  650. (MatchInstruction(hp2, A_MOV, [C_EQ,C_NE,C_MI,C_PL], []) and (taicpu(hp2).ops=2))) and
  651. assigned(FindRegDealloc(NR_DEFAULTFLAGS,tai(hp2.Next))) then
  652. begin
  653. DebugMsg('Peephole OpCmp2OpS done', p);
  654. taicpu(p).oppostfix:=PF_S;
  655. { move flag allocation if possible }
  656. GetLastInstruction(hp1, hp2);
  657. hp2:=FindRegAlloc(NR_DEFAULTFLAGS,tai(hp2.Next));
  658. if assigned(hp2) then
  659. begin
  660. asml.Remove(hp2);
  661. asml.insertbefore(hp2, p);
  662. end;
  663. asml.remove(hp1);
  664. hp1.free;
  665. Result:=true;
  666. end
  667. else
  668. case taicpu(p).opcode of
  669. A_STR:
  670. begin
  671. { change
  672. str reg1,ref
  673. ldr reg2,ref
  674. into
  675. str reg1,ref
  676. mov reg2,reg1
  677. }
  678. if (taicpu(p).oper[1]^.typ = top_ref) and
  679. (taicpu(p).oper[1]^.ref^.addressmode=AM_OFFSET) and
  680. (taicpu(p).oppostfix=PF_None) and
  681. (taicpu(p).condition=C_None) and
  682. GetNextInstructionUsingRef(p,hp1,taicpu(p).oper[1]^.ref^) and
  683. MatchInstruction(hp1, A_LDR, [taicpu(p).condition], [PF_None]) and
  684. (taicpu(hp1).oper[1]^.typ=top_ref) and
  685. (taicpu(hp1).oper[1]^.ref^.addressmode=AM_OFFSET) and
  686. not(RegModifiedBetween(taicpu(p).oper[0]^.reg, p, hp1)) and
  687. ((taicpu(hp1).oper[1]^.ref^.index=NR_NO) or not (RegModifiedBetween(taicpu(hp1).oper[1]^.ref^.index, p, hp1))) and
  688. ((taicpu(hp1).oper[1]^.ref^.base=NR_NO) or not (RegModifiedBetween(taicpu(hp1).oper[1]^.ref^.base, p, hp1))) then
  689. begin
  690. if taicpu(hp1).oper[0]^.reg=taicpu(p).oper[0]^.reg then
  691. begin
  692. DebugMsg('Peephole StrLdr2StrMov 1 done', hp1);
  693. asml.remove(hp1);
  694. hp1.free;
  695. end
  696. else
  697. begin
  698. taicpu(hp1).opcode:=A_MOV;
  699. taicpu(hp1).oppostfix:=PF_None;
  700. taicpu(hp1).loadreg(1,taicpu(p).oper[0]^.reg);
  701. DebugMsg('Peephole StrLdr2StrMov 2 done', hp1);
  702. end;
  703. result := true;
  704. end
  705. { change
  706. str reg1,ref
  707. str reg2,ref
  708. into
  709. strd reg1,reg2,ref
  710. }
  711. else if (GenerateARMCode or GenerateThumb2Code) and
  712. (CPUARM_HAS_EDSP in cpu_capabilities[current_settings.cputype]) and
  713. (taicpu(p).oppostfix=PF_None) and
  714. (taicpu(p).oper[1]^.ref^.addressmode=AM_OFFSET) and
  715. GetNextInstruction(p,hp1) and
  716. MatchInstruction(hp1, A_STR, [taicpu(p).condition, C_None], [PF_None]) and
  717. not(odd(getsupreg(taicpu(p).oper[0]^.reg))) and
  718. (getsupreg(taicpu(p).oper[0]^.reg)+1=getsupreg(taicpu(hp1).oper[0]^.reg)) and
  719. { str ensures that either base or index contain no register, else ldr wouldn't
  720. use an offset either
  721. }
  722. (taicpu(p).oper[1]^.ref^.base=taicpu(hp1).oper[1]^.ref^.base) and
  723. (taicpu(p).oper[1]^.ref^.index=taicpu(hp1).oper[1]^.ref^.index) and
  724. (taicpu(p).oper[1]^.ref^.offset+4=taicpu(hp1).oper[1]^.ref^.offset) and
  725. (abs(taicpu(p).oper[1]^.ref^.offset)<256) and
  726. AlignedToQWord(taicpu(p).oper[1]^.ref^) then
  727. begin
  728. DebugMsg('Peephole StrStr2Strd done', p);
  729. taicpu(p).oppostfix:=PF_D;
  730. taicpu(p).loadref(2,taicpu(p).oper[1]^.ref^);
  731. taicpu(p).loadreg(1, taicpu(hp1).oper[0]^.reg);
  732. taicpu(p).ops:=3;
  733. asml.remove(hp1);
  734. hp1.free;
  735. result:=true;
  736. end;
  737. Result:=LookForPostindexedPattern(taicpu(p)) or Result;
  738. end;
  739. A_LDR:
  740. begin
  741. { change
  742. ldr reg1,ref
  743. ldr reg2,ref
  744. into ...
  745. }
  746. if (taicpu(p).oper[1]^.typ = top_ref) and
  747. (taicpu(p).oper[1]^.ref^.addressmode=AM_OFFSET) and
  748. GetNextInstruction(p,hp1) and
  749. { ldrd is not allowed here }
  750. MatchInstruction(hp1, A_LDR, [taicpu(p).condition, C_None], [taicpu(p).oppostfix,PF_None]-[PF_D]) then
  751. begin
  752. {
  753. ...
  754. ldr reg1,ref
  755. mov reg2,reg1
  756. }
  757. if (taicpu(p).oppostfix=taicpu(hp1).oppostfix) and
  758. RefsEqual(taicpu(p).oper[1]^.ref^,taicpu(hp1).oper[1]^.ref^) and
  759. (taicpu(p).oper[0]^.reg<>taicpu(hp1).oper[1]^.ref^.index) and
  760. (taicpu(p).oper[0]^.reg<>taicpu(hp1).oper[1]^.ref^.base) and
  761. (taicpu(hp1).oper[1]^.ref^.addressmode=AM_OFFSET) then
  762. begin
  763. if taicpu(hp1).oper[0]^.reg=taicpu(p).oper[0]^.reg then
  764. begin
  765. DebugMsg('Peephole LdrLdr2Ldr done', hp1);
  766. asml.remove(hp1);
  767. hp1.free;
  768. end
  769. else
  770. begin
  771. DebugMsg('Peephole LdrLdr2LdrMov done', hp1);
  772. taicpu(hp1).opcode:=A_MOV;
  773. taicpu(hp1).oppostfix:=PF_None;
  774. taicpu(hp1).loadreg(1,taicpu(p).oper[0]^.reg);
  775. end;
  776. result := true;
  777. end
  778. {
  779. ...
  780. ldrd reg1,reg1+1,ref
  781. }
  782. else if (GenerateARMCode or GenerateThumb2Code) and
  783. (CPUARM_HAS_EDSP in cpu_capabilities[current_settings.cputype]) and
  784. { ldrd does not allow any postfixes ... }
  785. (taicpu(p).oppostfix=PF_None) and
  786. not(odd(getsupreg(taicpu(p).oper[0]^.reg))) and
  787. (getsupreg(taicpu(p).oper[0]^.reg)+1=getsupreg(taicpu(hp1).oper[0]^.reg)) and
  788. { ldr ensures that either base or index contain no register, else ldr wouldn't
  789. use an offset either
  790. }
  791. (taicpu(p).oper[1]^.ref^.base=taicpu(hp1).oper[1]^.ref^.base) and
  792. (taicpu(p).oper[1]^.ref^.index=taicpu(hp1).oper[1]^.ref^.index) and
  793. (taicpu(p).oper[1]^.ref^.offset+4=taicpu(hp1).oper[1]^.ref^.offset) and
  794. (abs(taicpu(p).oper[1]^.ref^.offset)<256) and
  795. AlignedToQWord(taicpu(p).oper[1]^.ref^) then
  796. begin
  797. DebugMsg('Peephole LdrLdr2Ldrd done', p);
  798. taicpu(p).loadref(2,taicpu(p).oper[1]^.ref^);
  799. taicpu(p).loadreg(1, taicpu(hp1).oper[0]^.reg);
  800. taicpu(p).ops:=3;
  801. taicpu(p).oppostfix:=PF_D;
  802. asml.remove(hp1);
  803. hp1.free;
  804. result:=true;
  805. end;
  806. end;
  807. {
  808. Change
  809. ldrb dst1, [REF]
  810. and dst2, dst1, #255
  811. into
  812. ldrb dst2, [ref]
  813. }
  814. if not(GenerateThumbCode) and
  815. (taicpu(p).oppostfix=PF_B) and
  816. GetNextInstructionUsingReg(p, hp1, taicpu(p).oper[0]^.reg) and
  817. MatchInstruction(hp1, A_AND, [taicpu(p).condition], [PF_NONE]) and
  818. (taicpu(hp1).oper[1]^.reg = taicpu(p).oper[0]^.reg) and
  819. (taicpu(hp1).oper[2]^.typ = top_const) and
  820. (taicpu(hp1).oper[2]^.val = $FF) and
  821. not(RegUsedBetween(taicpu(hp1).oper[0]^.reg, p, hp1)) and
  822. RegEndOfLife(taicpu(p).oper[0]^.reg, taicpu(hp1)) then
  823. begin
  824. DebugMsg('Peephole LdrbAnd2Ldrb done', p);
  825. taicpu(p).oper[0]^.reg := taicpu(hp1).oper[0]^.reg;
  826. asml.remove(hp1);
  827. hp1.free;
  828. result:=true;
  829. end;
  830. Result:=LookForPostindexedPattern(taicpu(p)) or Result;
  831. { Remove superfluous mov after ldr
  832. changes
  833. ldr reg1, ref
  834. mov reg2, reg1
  835. to
  836. ldr reg2, ref
  837. conditions are:
  838. * no ldrd usage
  839. * reg1 must be released after mov
  840. * mov can not contain shifterops
  841. * ldr+mov have the same conditions
  842. * mov does not set flags
  843. }
  844. if (taicpu(p).oppostfix<>PF_D) and
  845. GetNextInstructionUsingReg(p, hp1, taicpu(p).oper[0]^.reg) and
  846. RemoveSuperfluousMove(p, hp1, 'LdrMov2Ldr') then
  847. Result:=true;
  848. end;
  849. A_MOV:
  850. begin
  851. { fold
  852. mov reg1,reg0, shift imm1
  853. mov reg1,reg1, shift imm2
  854. }
  855. if (taicpu(p).ops=3) and
  856. (taicpu(p).oper[2]^.typ = top_shifterop) and
  857. (taicpu(p).oper[2]^.shifterop^.rs = NR_NO) and
  858. getnextinstruction(p,hp1) and
  859. MatchInstruction(hp1, A_MOV, [taicpu(p).condition], [PF_None]) and
  860. (taicpu(hp1).ops=3) and
  861. MatchOperand(taicpu(hp1).oper[0]^, taicpu(p).oper[0]^.reg) and
  862. MatchOperand(taicpu(hp1).oper[1]^, taicpu(p).oper[0]^.reg) and
  863. (taicpu(hp1).oper[2]^.typ = top_shifterop) and
  864. (taicpu(hp1).oper[2]^.shifterop^.rs = NR_NO) then
  865. begin
  866. { fold
  867. mov reg1,reg0, lsl 16
  868. mov reg1,reg1, lsr 16
  869. strh reg1, ...
  870. dealloc reg1
  871. to
  872. strh reg1, ...
  873. dealloc reg1
  874. }
  875. if (taicpu(p).oper[2]^.shifterop^.shiftmode=SM_LSL) and
  876. (taicpu(p).oper[2]^.shifterop^.shiftimm=16) and
  877. (taicpu(hp1).oper[2]^.shifterop^.shiftmode in [SM_LSR,SM_ASR]) and
  878. (taicpu(hp1).oper[2]^.shifterop^.shiftimm=16) and
  879. getnextinstruction(hp1,hp2) and
  880. MatchInstruction(hp2, A_STR, [taicpu(p).condition], [PF_H]) and
  881. MatchOperand(taicpu(hp2).oper[0]^, taicpu(p).oper[0]^.reg) then
  882. begin
  883. TransferUsedRegs(TmpUsedRegs);
  884. UpdateUsedRegs(TmpUsedRegs, tai(p.next));
  885. UpdateUsedRegs(TmpUsedRegs, tai(hp1.next));
  886. if not(RegUsedAfterInstruction(taicpu(p).oper[0]^.reg,hp2,TmpUsedRegs)) then
  887. begin
  888. DebugMsg('Peephole optimizer removed superfluous 16 Bit zero extension', hp1);
  889. taicpu(hp2).loadreg(0,taicpu(p).oper[1]^.reg);
  890. asml.remove(p);
  891. asml.remove(hp1);
  892. p.free;
  893. hp1.free;
  894. p:=hp2;
  895. Result:=true;
  896. end;
  897. end
  898. { fold
  899. mov reg1,reg0, shift imm1
  900. mov reg1,reg1, shift imm2
  901. to
  902. mov reg1,reg0, shift imm1+imm2
  903. }
  904. else if (taicpu(p).oper[2]^.shifterop^.shiftmode=taicpu(hp1).oper[2]^.shifterop^.shiftmode) or
  905. { asr makes no use after a lsr, the asr can be foled into the lsr }
  906. ((taicpu(p).oper[2]^.shifterop^.shiftmode=SM_LSR) and (taicpu(hp1).oper[2]^.shifterop^.shiftmode=SM_ASR) ) then
  907. begin
  908. inc(taicpu(p).oper[2]^.shifterop^.shiftimm,taicpu(hp1).oper[2]^.shifterop^.shiftimm);
  909. { avoid overflows }
  910. if taicpu(p).oper[2]^.shifterop^.shiftimm>31 then
  911. case taicpu(p).oper[2]^.shifterop^.shiftmode of
  912. SM_ROR:
  913. taicpu(p).oper[2]^.shifterop^.shiftimm:=taicpu(p).oper[2]^.shifterop^.shiftimm and 31;
  914. SM_ASR:
  915. taicpu(p).oper[2]^.shifterop^.shiftimm:=31;
  916. SM_LSR,
  917. SM_LSL:
  918. begin
  919. hp2:=taicpu.op_reg_const(A_MOV,taicpu(p).oper[0]^.reg,0);
  920. InsertLLItem(p.previous, p.next, hp2);
  921. p.free;
  922. p:=hp2;
  923. end;
  924. else
  925. internalerror(2008072803);
  926. end;
  927. DebugMsg('Peephole ShiftShift2Shift 1 done', p);
  928. asml.remove(hp1);
  929. hp1.free;
  930. result := true;
  931. end
  932. { fold
  933. mov reg1,reg0, shift imm1
  934. mov reg1,reg1, shift imm2
  935. mov reg1,reg1, shift imm3 ...
  936. mov reg2,reg1, shift imm3 ...
  937. }
  938. else if GetNextInstructionUsingReg(hp1,hp2, taicpu(hp1).oper[0]^.reg) and
  939. MatchInstruction(hp2, A_MOV, [taicpu(p).condition], [PF_None]) and
  940. (taicpu(hp2).ops=3) and
  941. MatchOperand(taicpu(hp2).oper[1]^, taicpu(hp1).oper[0]^.reg) and
  942. RegEndofLife(taicpu(p).oper[0]^.reg,taicpu(hp2)) and
  943. (taicpu(hp2).oper[2]^.typ = top_shifterop) and
  944. (taicpu(hp2).oper[2]^.shifterop^.rs = NR_NO) then
  945. begin
  946. { mov reg1,reg0, lsl imm1
  947. mov reg1,reg1, lsr/asr imm2
  948. mov reg2,reg1, lsl imm3 ...
  949. to
  950. mov reg1,reg0, lsl imm1
  951. mov reg2,reg1, lsr/asr imm2-imm3
  952. if
  953. imm1>=imm2
  954. }
  955. if (taicpu(p).oper[2]^.shifterop^.shiftmode=SM_LSL) and (taicpu(hp2).oper[2]^.shifterop^.shiftmode=SM_LSL) and
  956. (taicpu(hp1).oper[2]^.shifterop^.shiftmode in [SM_ASR,SM_LSR]) and
  957. (taicpu(p).oper[2]^.shifterop^.shiftimm>=taicpu(hp1).oper[2]^.shifterop^.shiftimm) then
  958. begin
  959. if (taicpu(hp2).oper[2]^.shifterop^.shiftimm>=taicpu(hp1).oper[2]^.shifterop^.shiftimm) then
  960. begin
  961. if not(RegUsedBetween(taicpu(hp2).oper[0]^.reg,p,hp1)) and
  962. not(RegUsedBetween(taicpu(hp2).oper[0]^.reg,hp1,hp2)) then
  963. begin
  964. DebugMsg('Peephole ShiftShiftShift2ShiftShift 1a done', p);
  965. inc(taicpu(p).oper[2]^.shifterop^.shiftimm,taicpu(hp2).oper[2]^.shifterop^.shiftimm-taicpu(hp1).oper[2]^.shifterop^.shiftimm);
  966. taicpu(p).oper[0]^.reg:=taicpu(hp2).oper[0]^.reg;
  967. asml.remove(hp1);
  968. asml.remove(hp2);
  969. hp1.free;
  970. hp2.free;
  971. if taicpu(p).oper[2]^.shifterop^.shiftimm>=32 then
  972. begin
  973. taicpu(p).freeop(1);
  974. taicpu(p).freeop(2);
  975. taicpu(p).loadconst(1,0);
  976. end;
  977. result := true;
  978. end;
  979. end
  980. else if not(RegUsedBetween(taicpu(hp2).oper[0]^.reg,hp1,hp2)) then
  981. begin
  982. DebugMsg('Peephole ShiftShiftShift2ShiftShift 1b done', p);
  983. dec(taicpu(hp1).oper[2]^.shifterop^.shiftimm,taicpu(hp2).oper[2]^.shifterop^.shiftimm);
  984. taicpu(hp1).oper[0]^.reg:=taicpu(hp2).oper[0]^.reg;
  985. asml.remove(hp2);
  986. hp2.free;
  987. result := true;
  988. end;
  989. end
  990. { mov reg1,reg0, lsr/asr imm1
  991. mov reg1,reg1, lsl imm2
  992. mov reg1,reg1, lsr/asr imm3 ...
  993. if imm3>=imm1 and imm2>=imm1
  994. to
  995. mov reg1,reg0, lsl imm2-imm1
  996. mov reg1,reg1, lsr/asr imm3 ...
  997. }
  998. else if (taicpu(p).oper[2]^.shifterop^.shiftmode in [SM_ASR,SM_LSR]) and (taicpu(hp2).oper[2]^.shifterop^.shiftmode in [SM_ASR,SM_LSR]) and
  999. (taicpu(hp1).oper[2]^.shifterop^.shiftmode=SM_LSL) and
  1000. (taicpu(hp2).oper[2]^.shifterop^.shiftimm>=taicpu(p).oper[2]^.shifterop^.shiftimm) and
  1001. (taicpu(hp1).oper[2]^.shifterop^.shiftimm>=taicpu(p).oper[2]^.shifterop^.shiftimm) then
  1002. begin
  1003. dec(taicpu(hp1).oper[2]^.shifterop^.shiftimm,taicpu(p).oper[2]^.shifterop^.shiftimm);
  1004. taicpu(hp1).oper[1]^.reg:=taicpu(p).oper[1]^.reg;
  1005. DebugMsg('Peephole ShiftShiftShift2ShiftShift 2 done', p);
  1006. asml.remove(p);
  1007. p.free;
  1008. p:=hp2;
  1009. if taicpu(hp1).oper[2]^.shifterop^.shiftimm=0 then
  1010. begin
  1011. taicpu(hp2).oper[1]^.reg:=taicpu(hp1).oper[1]^.reg;
  1012. asml.remove(hp1);
  1013. hp1.free;
  1014. p:=hp2;
  1015. end;
  1016. result := true;
  1017. end;
  1018. end;
  1019. end;
  1020. { Change the common
  1021. mov r0, r0, lsr #xxx
  1022. and r0, r0, #yyy/bic r0, r0, #xxx
  1023. and remove the superfluous and/bic if possible
  1024. This could be extended to handle more cases.
  1025. }
  1026. if (taicpu(p).ops=3) and
  1027. (taicpu(p).oper[2]^.typ = top_shifterop) and
  1028. (taicpu(p).oper[2]^.shifterop^.rs = NR_NO) and
  1029. (taicpu(p).oper[2]^.shifterop^.shiftmode = SM_LSR) and
  1030. GetNextInstructionUsingReg(p,hp1, taicpu(p).oper[0]^.reg) and
  1031. (hp1.typ=ait_instruction) and
  1032. (taicpu(hp1).ops>=1) and
  1033. (taicpu(hp1).oper[0]^.typ=top_reg) and
  1034. (not RegModifiedBetween(taicpu(hp1).oper[0]^.reg, p, hp1)) and
  1035. RegEndOfLife(taicpu(p).oper[0]^.reg, taicpu(hp1)) then
  1036. begin
  1037. if (taicpu(p).oper[2]^.shifterop^.shiftimm >= 24 ) and
  1038. MatchInstruction(hp1, A_AND, [taicpu(p).condition], [taicpu(p).oppostfix]) and
  1039. (taicpu(hp1).ops=3) and
  1040. MatchOperand(taicpu(p).oper[0]^, taicpu(hp1).oper[1]^) and
  1041. (taicpu(hp1).oper[2]^.typ = top_const) and
  1042. { Check if the AND actually would only mask out bits being already zero because of the shift
  1043. }
  1044. ((($ffffffff shr taicpu(p).oper[2]^.shifterop^.shiftimm) and taicpu(hp1).oper[2]^.val) =
  1045. ($ffffffff shr taicpu(p).oper[2]^.shifterop^.shiftimm)) then
  1046. begin
  1047. DebugMsg('Peephole LsrAnd2Lsr done', hp1);
  1048. taicpu(p).oper[0]^.reg:=taicpu(hp1).oper[0]^.reg;
  1049. asml.remove(hp1);
  1050. hp1.free;
  1051. result:=true;
  1052. end
  1053. else if MatchInstruction(hp1, A_BIC, [taicpu(p).condition], [taicpu(p).oppostfix]) and
  1054. (taicpu(hp1).ops=3) and
  1055. MatchOperand(taicpu(p).oper[0]^, taicpu(hp1).oper[1]^) and
  1056. (taicpu(hp1).oper[2]^.typ = top_const) and
  1057. { Check if the BIC actually would only mask out bits beeing already zero because of the shift }
  1058. (taicpu(hp1).oper[2]^.val<>0) and
  1059. (BsfDWord(taicpu(hp1).oper[2]^.val)>=32-taicpu(p).oper[2]^.shifterop^.shiftimm) then
  1060. begin
  1061. DebugMsg('Peephole LsrBic2Lsr done', hp1);
  1062. taicpu(p).oper[0]^.reg:=taicpu(hp1).oper[0]^.reg;
  1063. asml.remove(hp1);
  1064. hp1.free;
  1065. result:=true;
  1066. end;
  1067. end;
  1068. { Change
  1069. mov rx, ry, lsr/ror #xxx
  1070. uxtb/uxth rz,rx/and rz,rx,0xFF
  1071. dealloc rx
  1072. to
  1073. uxtb/uxth rz,ry,ror #xxx
  1074. }
  1075. if (taicpu(p).ops=3) and
  1076. (taicpu(p).oper[2]^.typ = top_shifterop) and
  1077. (taicpu(p).oper[2]^.shifterop^.rs = NR_NO) and
  1078. (taicpu(p).oper[2]^.shifterop^.shiftmode in [SM_LSR,SM_ROR]) and
  1079. (GenerateThumb2Code) and
  1080. GetNextInstructionUsingReg(p,hp1, taicpu(p).oper[0]^.reg) and
  1081. RegEndOfLife(taicpu(p).oper[0]^.reg, taicpu(hp1)) then
  1082. begin
  1083. if MatchInstruction(hp1, A_UXTB, [C_None], [PF_None]) and
  1084. (taicpu(hp1).ops = 2) and
  1085. (taicpu(p).oper[2]^.shifterop^.shiftimm in [8,16,24]) and
  1086. MatchOperand(taicpu(hp1).oper[1]^, taicpu(p).oper[0]^.reg) then
  1087. begin
  1088. taicpu(hp1).oper[1]^.reg := taicpu(p).oper[1]^.reg;
  1089. taicpu(hp1).loadshifterop(2,taicpu(p).oper[2]^.shifterop^);
  1090. taicpu(hp1).oper[2]^.shifterop^.shiftmode:=SM_ROR;
  1091. taicpu(hp1).ops := 3;
  1092. GetNextInstruction(p,hp1);
  1093. asml.Remove(p);
  1094. p.Free;
  1095. p:=hp1;
  1096. result:=true;
  1097. exit;
  1098. end
  1099. else if MatchInstruction(hp1, A_UXTH, [C_None], [PF_None]) and
  1100. (taicpu(hp1).ops=2) and
  1101. (taicpu(p).oper[2]^.shifterop^.shiftimm in [16]) and
  1102. MatchOperand(taicpu(hp1).oper[1]^, taicpu(p).oper[0]^.reg) then
  1103. begin
  1104. taicpu(hp1).oper[1]^.reg := taicpu(p).oper[1]^.reg;
  1105. taicpu(hp1).loadshifterop(2,taicpu(p).oper[2]^.shifterop^);
  1106. taicpu(hp1).oper[2]^.shifterop^.shiftmode:=SM_ROR;
  1107. taicpu(hp1).ops := 3;
  1108. GetNextInstruction(p,hp1);
  1109. asml.Remove(p);
  1110. p.Free;
  1111. p:=hp1;
  1112. result:=true;
  1113. exit;
  1114. end
  1115. else if MatchInstruction(hp1, A_AND, [C_None], [PF_None]) and
  1116. (taicpu(hp1).ops = 3) and
  1117. (taicpu(hp1).oper[2]^.typ = top_const) and
  1118. (taicpu(hp1).oper[2]^.val = $FF) and
  1119. (taicpu(p).oper[2]^.shifterop^.shiftimm in [8,16,24]) and
  1120. MatchOperand(taicpu(hp1).oper[1]^, taicpu(p).oper[0]^.reg) then
  1121. begin
  1122. taicpu(hp1).ops := 3;
  1123. taicpu(hp1).opcode := A_UXTB;
  1124. taicpu(hp1).oper[1]^.reg := taicpu(p).oper[1]^.reg;
  1125. taicpu(hp1).loadshifterop(2,taicpu(p).oper[2]^.shifterop^);
  1126. taicpu(hp1).oper[2]^.shifterop^.shiftmode:=SM_ROR;
  1127. GetNextInstruction(p,hp1);
  1128. asml.Remove(p);
  1129. p.Free;
  1130. p:=hp1;
  1131. result:=true;
  1132. exit;
  1133. end;
  1134. end;
  1135. {
  1136. optimize
  1137. mov rX, yyyy
  1138. ....
  1139. }
  1140. if (taicpu(p).ops = 2) and
  1141. GetNextInstruction(p,hp1) and
  1142. (tai(hp1).typ = ait_instruction) then
  1143. begin
  1144. {
  1145. This changes the very common
  1146. mov r0, #0
  1147. str r0, [...]
  1148. mov r0, #0
  1149. str r0, [...]
  1150. and removes all superfluous mov instructions
  1151. }
  1152. if (taicpu(p).oper[1]^.typ = top_const) and
  1153. (taicpu(hp1).opcode=A_STR) then
  1154. while MatchInstruction(hp1, A_STR, [taicpu(p).condition], []) and
  1155. MatchOperand(taicpu(p).oper[0]^, taicpu(hp1).oper[0]^) and
  1156. GetNextInstruction(hp1, hp2) and
  1157. MatchInstruction(hp2, A_MOV, [taicpu(p).condition], [PF_None]) and
  1158. (taicpu(hp2).ops = 2) and
  1159. MatchOperand(taicpu(hp2).oper[0]^, taicpu(p).oper[0]^) and
  1160. MatchOperand(taicpu(hp2).oper[1]^, taicpu(p).oper[1]^) do
  1161. begin
  1162. DebugMsg('Peephole MovStrMov done', hp2);
  1163. GetNextInstruction(hp2,hp1);
  1164. asml.remove(hp2);
  1165. hp2.free;
  1166. result:=true;
  1167. if not assigned(hp1) then break;
  1168. end
  1169. {
  1170. This removes the first mov from
  1171. mov rX,...
  1172. mov rX,...
  1173. }
  1174. else if taicpu(hp1).opcode=A_MOV then
  1175. while MatchInstruction(hp1, A_MOV, [taicpu(p).condition], [taicpu(p).oppostfix]) and
  1176. (taicpu(hp1).ops = 2) and
  1177. MatchOperand(taicpu(p).oper[0]^, taicpu(hp1).oper[0]^) and
  1178. { don't remove the first mov if the second is a mov rX,rX }
  1179. not(MatchOperand(taicpu(hp1).oper[0]^, taicpu(hp1).oper[1]^)) do
  1180. begin
  1181. DebugMsg('Peephole MovMov done', p);
  1182. asml.remove(p);
  1183. p.free;
  1184. p:=hp1;
  1185. GetNextInstruction(hp1,hp1);
  1186. result:=true;
  1187. if not assigned(hp1) then
  1188. break;
  1189. end;
  1190. end;
  1191. {
  1192. change
  1193. mov r1, r0
  1194. add r1, r1, #1
  1195. to
  1196. add r1, r0, #1
  1197. Todo: Make it work for mov+cmp too
  1198. CAUTION! If this one is successful p might not be a mov instruction anymore!
  1199. }
  1200. if (taicpu(p).ops = 2) and
  1201. (taicpu(p).oper[1]^.typ = top_reg) and
  1202. (taicpu(p).oppostfix = PF_NONE) and
  1203. GetNextInstruction(p, hp1) and
  1204. MatchInstruction(hp1, [A_ADD, A_ADC, A_RSB, A_RSC, A_SUB, A_SBC,
  1205. A_AND, A_BIC, A_EOR, A_ORR, A_MOV, A_MVN],
  1206. [taicpu(p).condition], []) and
  1207. {MOV and MVN might only have 2 ops}
  1208. (taicpu(hp1).ops >= 2) and
  1209. MatchOperand(taicpu(p).oper[0]^, taicpu(hp1).oper[0]^.reg) and
  1210. (taicpu(hp1).oper[1]^.typ = top_reg) and
  1211. (
  1212. (taicpu(hp1).ops = 2) or
  1213. (taicpu(hp1).oper[2]^.typ in [top_reg, top_const, top_shifterop])
  1214. ) then
  1215. begin
  1216. { When we get here we still don't know if the registers match}
  1217. for I:=1 to 2 do
  1218. {
  1219. If the first loop was successful p will be replaced with hp1.
  1220. The checks will still be ok, because all required information
  1221. will also be in hp1 then.
  1222. }
  1223. if (taicpu(hp1).ops > I) and
  1224. MatchOperand(taicpu(p).oper[0]^, taicpu(hp1).oper[I]^.reg) and
  1225. { prevent certain combinations on thumb(2), this is only a safe approximation }
  1226. (not(GenerateThumbCode or GenerateThumb2Code) or
  1227. ((getsupreg(taicpu(p).oper[1]^.reg)<>RS_R13) and
  1228. (getsupreg(taicpu(p).oper[1]^.reg)<>RS_R15))
  1229. ) then
  1230. begin
  1231. DebugMsg('Peephole RedundantMovProcess done', hp1);
  1232. taicpu(hp1).oper[I]^.reg := taicpu(p).oper[1]^.reg;
  1233. if p<>hp1 then
  1234. begin
  1235. asml.remove(p);
  1236. p.free;
  1237. p:=hp1;
  1238. Result:=true;
  1239. end;
  1240. end;
  1241. end;
  1242. { Fold the very common sequence
  1243. mov regA, regB
  1244. ldr* regA, [regA]
  1245. to
  1246. ldr* regA, [regB]
  1247. CAUTION! If this one is successful p might not be a mov instruction anymore!
  1248. }
  1249. if (taicpu(p).opcode = A_MOV) and
  1250. (taicpu(p).ops = 2) and
  1251. (taicpu(p).oper[1]^.typ = top_reg) and
  1252. (taicpu(p).oppostfix = PF_NONE) and
  1253. GetNextInstructionUsingReg(p, hp1, taicpu(p).oper[0]^.reg) and
  1254. MatchInstruction(hp1, [A_LDR, A_STR], [taicpu(p).condition], []) and
  1255. (taicpu(hp1).oper[1]^.typ = top_ref) and
  1256. { We can change the base register only when the instruction uses AM_OFFSET }
  1257. ((taicpu(hp1).oper[1]^.ref^.index = taicpu(p).oper[0]^.reg) or
  1258. ((taicpu(hp1).oper[1]^.ref^.addressmode = AM_OFFSET) and
  1259. (taicpu(hp1).oper[1]^.ref^.base = taicpu(p).oper[0]^.reg))
  1260. ) and
  1261. not(RegModifiedBetween(taicpu(p).oper[1]^.reg,p,hp1)) and
  1262. // Make sure that Thumb code doesn't propagate a high register into a reference
  1263. ((GenerateThumbCode and
  1264. (getsupreg(taicpu(p).oper[1]^.reg) < RS_R8)) or
  1265. (not GenerateThumbCode)) and
  1266. RegEndOfLife(taicpu(p).oper[0]^.reg, taicpu(hp1)) then
  1267. begin
  1268. DebugMsg('Peephole MovLdr2Ldr done', hp1);
  1269. if (taicpu(hp1).oper[1]^.ref^.addressmode = AM_OFFSET) and
  1270. (taicpu(hp1).oper[1]^.ref^.base = taicpu(p).oper[0]^.reg) then
  1271. taicpu(hp1).oper[1]^.ref^.base := taicpu(p).oper[1]^.reg;
  1272. if taicpu(hp1).oper[1]^.ref^.index = taicpu(p).oper[0]^.reg then
  1273. taicpu(hp1).oper[1]^.ref^.index := taicpu(p).oper[1]^.reg;
  1274. dealloc:=FindRegDeAlloc(taicpu(p).oper[1]^.reg, tai(p.Next));
  1275. if Assigned(dealloc) then
  1276. begin
  1277. asml.remove(dealloc);
  1278. asml.InsertAfter(dealloc,hp1);
  1279. end;
  1280. GetNextInstruction(p, hp1);
  1281. asml.remove(p);
  1282. p.free;
  1283. p:=hp1;
  1284. result:=true;
  1285. end;
  1286. { This folds shifterops into following instructions
  1287. mov r0, r1, lsl #8
  1288. add r2, r3, r0
  1289. to
  1290. add r2, r3, r1, lsl #8
  1291. CAUTION! If this one is successful p might not be a mov instruction anymore!
  1292. }
  1293. if (taicpu(p).opcode = A_MOV) and
  1294. (taicpu(p).ops = 3) and
  1295. (taicpu(p).oper[1]^.typ = top_reg) and
  1296. (taicpu(p).oper[2]^.typ = top_shifterop) and
  1297. (taicpu(p).oppostfix = PF_NONE) and
  1298. GetNextInstructionUsingReg(p, hp1, taicpu(p).oper[0]^.reg) and
  1299. MatchInstruction(hp1, [A_ADD, A_ADC, A_RSB, A_RSC, A_SUB, A_SBC,
  1300. A_AND, A_BIC, A_EOR, A_ORR, A_TEQ, A_TST,
  1301. A_CMP, A_CMN],
  1302. [taicpu(p).condition], [PF_None]) and
  1303. (not ((GenerateThumb2Code) and
  1304. (taicpu(hp1).opcode in [A_SBC]) and
  1305. (((taicpu(hp1).ops=3) and
  1306. MatchOperand(taicpu(p).oper[0]^, taicpu(hp1).oper[1]^.reg)) or
  1307. ((taicpu(hp1).ops=2) and
  1308. MatchOperand(taicpu(p).oper[0]^, taicpu(hp1).oper[0]^.reg))))) and
  1309. RegEndOfLife(taicpu(p).oper[0]^.reg, taicpu(hp1)) and
  1310. (taicpu(hp1).ops >= 2) and
  1311. {Currently we can't fold into another shifterop}
  1312. (taicpu(hp1).oper[taicpu(hp1).ops-1]^.typ = top_reg) and
  1313. {Folding rrx is problematic because of the C-Flag, as we currently can't check
  1314. NR_DEFAULTFLAGS for modification}
  1315. (
  1316. {Everything is fine if we don't use RRX}
  1317. (taicpu(p).oper[2]^.shifterop^.shiftmode <> SM_RRX) or
  1318. (
  1319. {If it is RRX, then check if we're just accessing the next instruction}
  1320. GetNextInstruction(p, hp2) and
  1321. (hp1 = hp2)
  1322. )
  1323. ) and
  1324. { reg1 might not be modified inbetween }
  1325. not(RegModifiedBetween(taicpu(p).oper[1]^.reg,p,hp1)) and
  1326. { The shifterop can contain a register, might not be modified}
  1327. (
  1328. (taicpu(p).oper[2]^.shifterop^.rs = NR_NO) or
  1329. not(RegModifiedBetween(taicpu(p).oper[2]^.shifterop^.rs, p, hp1))
  1330. ) and
  1331. (
  1332. {Only ONE of the two src operands is allowed to match}
  1333. MatchOperand(taicpu(p).oper[0]^, taicpu(hp1).oper[taicpu(hp1).ops-2]^) xor
  1334. MatchOperand(taicpu(p).oper[0]^, taicpu(hp1).oper[taicpu(hp1).ops-1]^)
  1335. ) then
  1336. begin
  1337. if taicpu(hp1).opcode in [A_TST, A_TEQ, A_CMN] then
  1338. I2:=0
  1339. else
  1340. I2:=1;
  1341. for I:=I2 to taicpu(hp1).ops-1 do
  1342. if MatchOperand(taicpu(p).oper[0]^, taicpu(hp1).oper[I]^.reg) then
  1343. begin
  1344. { If the parameter matched on the second op from the RIGHT
  1345. we have to switch the parameters, this will not happen for CMP
  1346. were we're only evaluating the most right parameter
  1347. }
  1348. if I <> taicpu(hp1).ops-1 then
  1349. begin
  1350. {The SUB operators need to be changed when we swap parameters}
  1351. case taicpu(hp1).opcode of
  1352. A_SUB: tempop:=A_RSB;
  1353. A_SBC: tempop:=A_RSC;
  1354. A_RSB: tempop:=A_SUB;
  1355. A_RSC: tempop:=A_SBC;
  1356. else tempop:=taicpu(hp1).opcode;
  1357. end;
  1358. if taicpu(hp1).ops = 3 then
  1359. hp2:=taicpu.op_reg_reg_reg_shifterop(tempop,
  1360. taicpu(hp1).oper[0]^.reg, taicpu(hp1).oper[2]^.reg,
  1361. taicpu(p).oper[1]^.reg, taicpu(p).oper[2]^.shifterop^)
  1362. else
  1363. hp2:=taicpu.op_reg_reg_shifterop(tempop,
  1364. taicpu(hp1).oper[0]^.reg, taicpu(p).oper[1]^.reg,
  1365. taicpu(p).oper[2]^.shifterop^);
  1366. end
  1367. else
  1368. if taicpu(hp1).ops = 3 then
  1369. hp2:=taicpu.op_reg_reg_reg_shifterop(taicpu(hp1).opcode,
  1370. taicpu(hp1).oper[0]^.reg, taicpu(hp1).oper[1]^.reg,
  1371. taicpu(p).oper[1]^.reg, taicpu(p).oper[2]^.shifterop^)
  1372. else
  1373. hp2:=taicpu.op_reg_reg_shifterop(taicpu(hp1).opcode,
  1374. taicpu(hp1).oper[0]^.reg, taicpu(p).oper[1]^.reg,
  1375. taicpu(p).oper[2]^.shifterop^);
  1376. asml.insertbefore(hp2, hp1);
  1377. GetNextInstruction(p, hp2);
  1378. asml.remove(p);
  1379. asml.remove(hp1);
  1380. p.free;
  1381. hp1.free;
  1382. p:=hp2;
  1383. DebugMsg('Peephole FoldShiftProcess done', p);
  1384. Result:=true;
  1385. break;
  1386. end;
  1387. end;
  1388. {
  1389. Fold
  1390. mov r1, r1, lsl #2
  1391. ldr/ldrb r0, [r0, r1]
  1392. to
  1393. ldr/ldrb r0, [r0, r1, lsl #2]
  1394. XXX: This still needs some work, as we quite often encounter something like
  1395. mov r1, r2, lsl #2
  1396. add r2, r3, #imm
  1397. ldr r0, [r2, r1]
  1398. which can't be folded because r2 is overwritten between the shift and the ldr.
  1399. We could try to shuffle the registers around and fold it into.
  1400. add r1, r3, #imm
  1401. ldr r0, [r1, r2, lsl #2]
  1402. }
  1403. if (not(GenerateThumbCode)) and
  1404. (taicpu(p).opcode = A_MOV) and
  1405. (taicpu(p).ops = 3) and
  1406. (taicpu(p).oper[1]^.typ = top_reg) and
  1407. (taicpu(p).oper[2]^.typ = top_shifterop) and
  1408. { RRX is tough to handle, because it requires tracking the C-Flag,
  1409. it is also extremly unlikely to be emitted this way}
  1410. (taicpu(p).oper[2]^.shifterop^.shiftmode <> SM_RRX) and
  1411. (taicpu(p).oper[2]^.shifterop^.shiftimm <> 0) and
  1412. { thumb2 allows only lsl #0..#3 }
  1413. (not(GenerateThumb2Code) or
  1414. ((taicpu(p).oper[2]^.shifterop^.shiftimm in [0..3]) and
  1415. (taicpu(p).oper[2]^.shifterop^.shiftmode=SM_LSL)
  1416. )
  1417. ) and
  1418. (taicpu(p).oppostfix = PF_NONE) and
  1419. GetNextInstructionUsingReg(p, hp1, taicpu(p).oper[0]^.reg) and
  1420. {Only LDR, LDRB, STR, STRB can handle scaled register indexing}
  1421. (MatchInstruction(hp1, [A_LDR, A_STR], [taicpu(p).condition], [PF_None, PF_B]) or
  1422. (GenerateThumb2Code and
  1423. MatchInstruction(hp1, [A_LDR, A_STR], [taicpu(p).condition], [PF_None, PF_B, PF_SB, PF_H, PF_SH]))
  1424. ) and
  1425. (
  1426. {If this is address by offset, one of the two registers can be used}
  1427. ((taicpu(hp1).oper[1]^.ref^.addressmode=AM_OFFSET) and
  1428. (
  1429. (taicpu(hp1).oper[1]^.ref^.index = taicpu(p).oper[0]^.reg) xor
  1430. (taicpu(hp1).oper[1]^.ref^.base = taicpu(p).oper[0]^.reg)
  1431. )
  1432. ) or
  1433. {For post and preindexed only the index register can be used}
  1434. ((taicpu(hp1).oper[1]^.ref^.addressmode in [AM_POSTINDEXED, AM_PREINDEXED]) and
  1435. (
  1436. (taicpu(hp1).oper[1]^.ref^.index = taicpu(p).oper[0]^.reg) and
  1437. (taicpu(hp1).oper[1]^.ref^.base <> taicpu(p).oper[0]^.reg)
  1438. ) and
  1439. (not GenerateThumb2Code)
  1440. )
  1441. ) and
  1442. { Only fold if both registers are used. Otherwise we are folding p with itself }
  1443. (taicpu(hp1).oper[1]^.ref^.index<>NR_NO) and
  1444. (taicpu(hp1).oper[1]^.ref^.base<>NR_NO) and
  1445. { Only fold if there isn't another shifterop already, and offset is zero. }
  1446. (taicpu(hp1).oper[1]^.ref^.offset = 0) and
  1447. (taicpu(hp1).oper[1]^.ref^.shiftmode = SM_None) and
  1448. not(RegModifiedBetween(taicpu(p).oper[1]^.reg,p,hp1)) and
  1449. RegEndOfLife(taicpu(p).oper[0]^.reg, taicpu(hp1)) then
  1450. begin
  1451. { If the register we want to do the shift for resides in base, we need to swap that}
  1452. if (taicpu(hp1).oper[1]^.ref^.base = taicpu(p).oper[0]^.reg) then
  1453. taicpu(hp1).oper[1]^.ref^.base := taicpu(hp1).oper[1]^.ref^.index;
  1454. taicpu(hp1).oper[1]^.ref^.index := taicpu(p).oper[1]^.reg;
  1455. taicpu(hp1).oper[1]^.ref^.shiftmode := taicpu(p).oper[2]^.shifterop^.shiftmode;
  1456. taicpu(hp1).oper[1]^.ref^.shiftimm := taicpu(p).oper[2]^.shifterop^.shiftimm;
  1457. DebugMsg('Peephole FoldShiftLdrStr done', hp1);
  1458. GetNextInstruction(p, hp1);
  1459. asml.remove(p);
  1460. p.free;
  1461. p:=hp1;
  1462. Result:=true;
  1463. end;
  1464. {
  1465. Often we see shifts and then a superfluous mov to another register
  1466. In the future this might be handled in RedundantMovProcess when it uses RegisterTracking
  1467. }
  1468. if (taicpu(p).opcode = A_MOV) and
  1469. GetNextInstructionUsingReg(p, hp1, taicpu(p).oper[0]^.reg) and
  1470. RemoveSuperfluousMove(p, hp1, 'MovMov2Mov') then
  1471. Result:=true;
  1472. end;
  1473. A_ADD,
  1474. A_ADC,
  1475. A_RSB,
  1476. A_RSC,
  1477. A_SUB,
  1478. A_SBC,
  1479. A_AND,
  1480. A_BIC,
  1481. A_EOR,
  1482. A_ORR,
  1483. A_MLA,
  1484. A_MLS,
  1485. A_MUL:
  1486. begin
  1487. {
  1488. optimize
  1489. and reg2,reg1,const1
  1490. ...
  1491. }
  1492. if (taicpu(p).opcode = A_AND) and
  1493. (taicpu(p).ops>2) and
  1494. (taicpu(p).oper[1]^.typ = top_reg) and
  1495. (taicpu(p).oper[2]^.typ = top_const) then
  1496. begin
  1497. {
  1498. change
  1499. and reg2,reg1,const1
  1500. ...
  1501. and reg3,reg2,const2
  1502. to
  1503. and reg3,reg1,(const1 and const2)
  1504. }
  1505. if GetNextInstructionUsingReg(p,hp1,taicpu(p).oper[0]^.reg) and
  1506. MatchInstruction(hp1, A_AND, [taicpu(p).condition], [PF_None]) and
  1507. RegEndOfLife(taicpu(p).oper[0]^.reg,taicpu(hp1)) and
  1508. MatchOperand(taicpu(hp1).oper[1]^, taicpu(p).oper[0]^.reg) and
  1509. (taicpu(hp1).oper[2]^.typ = top_const) then
  1510. begin
  1511. if not(RegUsedBetween(taicpu(hp1).oper[0]^.reg,p,hp1)) then
  1512. begin
  1513. DebugMsg('Peephole AndAnd2And done', p);
  1514. taicpu(p).loadConst(2,taicpu(p).oper[2]^.val and taicpu(hp1).oper[2]^.val);
  1515. taicpu(p).oppostfix:=taicpu(hp1).oppostfix;
  1516. taicpu(p).loadReg(0,taicpu(hp1).oper[0]^.reg);
  1517. asml.remove(hp1);
  1518. hp1.free;
  1519. Result:=true;
  1520. end
  1521. else if not(RegUsedBetween(taicpu(p).oper[1]^.reg,p,hp1)) then
  1522. begin
  1523. DebugMsg('Peephole AndAnd2And done', hp1);
  1524. taicpu(hp1).loadConst(2,taicpu(p).oper[2]^.val and taicpu(hp1).oper[2]^.val);
  1525. taicpu(hp1).oppostfix:=taicpu(p).oppostfix;
  1526. taicpu(hp1).loadReg(1,taicpu(p).oper[1]^.reg);
  1527. GetNextInstruction(p, hp1);
  1528. asml.remove(p);
  1529. p.free;
  1530. p:=hp1;
  1531. Result:=true;
  1532. end;
  1533. end
  1534. {
  1535. change
  1536. and reg2,reg1,$xxxxxxFF
  1537. strb reg2,[...]
  1538. dealloc reg2
  1539. to
  1540. strb reg1,[...]
  1541. }
  1542. else if ((taicpu(p).oper[2]^.val and $FF) = $FF) and
  1543. MatchInstruction(p, A_AND, [C_None], [PF_None]) and
  1544. GetNextInstructionUsingReg(p,hp1,taicpu(p).oper[0]^.reg) and
  1545. MatchInstruction(hp1, A_STR, [C_None], [PF_B]) and
  1546. assigned(FindRegDealloc(taicpu(p).oper[0]^.reg,tai(hp1.Next))) and
  1547. { the reference in strb might not use reg2 }
  1548. not(RegInRef(taicpu(p).oper[0]^.reg,taicpu(hp1).oper[1]^.ref^)) and
  1549. { reg1 might not be modified inbetween }
  1550. not(RegModifiedBetween(taicpu(p).oper[1]^.reg,p,hp1)) then
  1551. begin
  1552. DebugMsg('Peephole AndStrb2Strb done', p);
  1553. taicpu(hp1).loadReg(0,taicpu(p).oper[1]^.reg);
  1554. GetNextInstruction(p, hp1);
  1555. asml.remove(p);
  1556. p.free;
  1557. p:=hp1;
  1558. result:=true;
  1559. end
  1560. {
  1561. change
  1562. and reg2,reg1,255
  1563. uxtb/uxth reg3,reg2
  1564. dealloc reg2
  1565. to
  1566. and reg3,reg1,x
  1567. }
  1568. else if (taicpu(p).oper[2]^.val = $FF) and
  1569. MatchInstruction(p, A_AND, [C_None], [PF_None]) and
  1570. GetNextInstructionUsingReg(p,hp1,taicpu(p).oper[0]^.reg) and
  1571. MatchInstruction(hp1, [A_UXTB,A_UXTH], [C_None], [PF_None]) and
  1572. (taicpu(hp1).ops = 2) and
  1573. RegEndofLife(taicpu(p).oper[0]^.reg,taicpu(hp1)) and
  1574. MatchOperand(taicpu(hp1).oper[1]^, taicpu(p).oper[0]^.reg) and
  1575. { reg1 might not be modified inbetween }
  1576. not(RegModifiedBetween(taicpu(p).oper[1]^.reg,p,hp1)) then
  1577. begin
  1578. DebugMsg('Peephole AndUxt2And done', p);
  1579. taicpu(hp1).opcode:=A_AND;
  1580. taicpu(hp1).ops:=3;
  1581. taicpu(hp1).loadReg(1,taicpu(p).oper[1]^.reg);
  1582. taicpu(hp1).loadconst(2,255);
  1583. GetNextInstruction(p,hp1);
  1584. asml.remove(p);
  1585. p.Free;
  1586. p:=hp1;
  1587. result:=true;
  1588. end
  1589. {
  1590. from
  1591. and reg1,reg0,2^n-1
  1592. mov reg2,reg1, lsl imm1
  1593. (mov reg3,reg2, lsr/asr imm1)
  1594. remove either the and or the lsl/xsr sequence if possible
  1595. }
  1596. else if cutils.ispowerof2(taicpu(p).oper[2]^.val+1,i) and
  1597. GetNextInstructionUsingReg(p,hp1,taicpu(p).oper[0]^.reg) and
  1598. MatchInstruction(hp1, A_MOV, [taicpu(p).condition], [PF_None]) and
  1599. (taicpu(hp1).ops=3) and
  1600. MatchOperand(taicpu(hp1).oper[1]^, taicpu(p).oper[0]^.reg) and
  1601. (taicpu(hp1).oper[2]^.typ = top_shifterop) and
  1602. (taicpu(hp1).oper[2]^.shifterop^.rs = NR_NO) and
  1603. (taicpu(hp1).oper[2]^.shifterop^.shiftmode=SM_LSL) and
  1604. RegEndOfLife(taicpu(p).oper[0]^.reg,taicpu(hp1)) then
  1605. begin
  1606. {
  1607. and reg1,reg0,2^n-1
  1608. mov reg2,reg1, lsl imm1
  1609. mov reg3,reg2, lsr/asr imm1
  1610. =>
  1611. and reg1,reg0,2^n-1
  1612. if lsr and 2^n-1>=imm1 or asr and 2^n-1>imm1
  1613. }
  1614. if GetNextInstructionUsingReg(hp1,hp2,taicpu(p).oper[0]^.reg) and
  1615. MatchInstruction(hp2, A_MOV, [taicpu(p).condition], [PF_None]) and
  1616. (taicpu(hp2).ops=3) and
  1617. MatchOperand(taicpu(hp2).oper[1]^, taicpu(hp1).oper[0]^.reg) and
  1618. (taicpu(hp2).oper[2]^.typ = top_shifterop) and
  1619. (taicpu(hp2).oper[2]^.shifterop^.rs = NR_NO) and
  1620. (taicpu(hp2).oper[2]^.shifterop^.shiftmode in [SM_ASR,SM_LSR]) and
  1621. (taicpu(hp1).oper[2]^.shifterop^.shiftimm=taicpu(hp2).oper[2]^.shifterop^.shiftimm) and
  1622. RegEndOfLife(taicpu(hp1).oper[0]^.reg,taicpu(hp2)) and
  1623. ((i<32-taicpu(hp1).oper[2]^.shifterop^.shiftimm) or
  1624. ((i=32-taicpu(hp1).oper[2]^.shifterop^.shiftimm) and
  1625. (taicpu(hp2).oper[2]^.shifterop^.shiftmode=SM_LSR))) then
  1626. begin
  1627. DebugMsg('Peephole AndLslXsr2And done', p);
  1628. taicpu(p).oper[0]^.reg:=taicpu(hp2).oper[0]^.reg;
  1629. asml.Remove(hp1);
  1630. asml.Remove(hp2);
  1631. hp1.free;
  1632. hp2.free;
  1633. result:=true;
  1634. end
  1635. {
  1636. and reg1,reg0,2^n-1
  1637. mov reg2,reg1, lsl imm1
  1638. =>
  1639. mov reg2,reg0, lsl imm1
  1640. if imm1>i
  1641. }
  1642. else if (i>32-taicpu(hp1).oper[2]^.shifterop^.shiftimm) and
  1643. not(RegModifiedBetween(taicpu(p).oper[1]^.reg, p, hp1)) then
  1644. begin
  1645. DebugMsg('Peephole AndLsl2Lsl done', p);
  1646. taicpu(hp1).oper[1]^.reg:=taicpu(p).oper[1]^.reg;
  1647. GetNextInstruction(p, hp1);
  1648. asml.Remove(p);
  1649. p.free;
  1650. p:=hp1;
  1651. result:=true;
  1652. end
  1653. end;
  1654. end;
  1655. {
  1656. change
  1657. add/sub reg2,reg1,const1
  1658. str/ldr reg3,[reg2,const2]
  1659. dealloc reg2
  1660. to
  1661. str/ldr reg3,[reg1,const2+/-const1]
  1662. }
  1663. if (not GenerateThumbCode) and
  1664. (taicpu(p).opcode in [A_ADD,A_SUB]) and
  1665. (taicpu(p).ops>2) and
  1666. (taicpu(p).oper[1]^.typ = top_reg) and
  1667. (taicpu(p).oper[2]^.typ = top_const) then
  1668. begin
  1669. hp1:=p;
  1670. while GetNextInstructionUsingReg(hp1, hp1, taicpu(p).oper[0]^.reg) and
  1671. { we cannot check NR_DEFAULTFLAGS for modification yet so don't allow a condition }
  1672. MatchInstruction(hp1, [A_LDR, A_STR], [C_None], []) and
  1673. (taicpu(hp1).oper[1]^.typ = top_ref) and
  1674. (taicpu(hp1).oper[1]^.ref^.base=taicpu(p).oper[0]^.reg) and
  1675. { don't optimize if the register is stored/overwritten }
  1676. (taicpu(hp1).oper[0]^.reg<>taicpu(p).oper[1]^.reg) and
  1677. (taicpu(hp1).oper[1]^.ref^.index=NR_NO) and
  1678. (taicpu(hp1).oper[1]^.ref^.addressmode=AM_OFFSET) and
  1679. { new offset must be valid: either in the range of 8 or 12 bit, depend on the
  1680. ldr postfix }
  1681. (((taicpu(p).opcode=A_ADD) and
  1682. isValidConstLoadStoreOffset(taicpu(hp1).oper[1]^.ref^.offset+taicpu(p).oper[2]^.val, taicpu(hp1).oppostfix)
  1683. ) or
  1684. ((taicpu(p).opcode=A_SUB) and
  1685. isValidConstLoadStoreOffset(taicpu(hp1).oper[1]^.ref^.offset-taicpu(p).oper[2]^.val, taicpu(hp1).oppostfix)
  1686. )
  1687. ) do
  1688. begin
  1689. { neither reg1 nor reg2 might be changed inbetween }
  1690. if RegModifiedBetween(taicpu(p).oper[0]^.reg,p,hp1) or
  1691. RegModifiedBetween(taicpu(p).oper[1]^.reg,p,hp1) then
  1692. break;
  1693. { reg2 must be either overwritten by the ldr or it is deallocated afterwards }
  1694. if ((taicpu(hp1).opcode=A_LDR) and (taicpu(p).oper[0]^.reg=taicpu(hp1).oper[0]^.reg)) or
  1695. assigned(FindRegDeAlloc(taicpu(p).oper[0]^.reg,tai(hp1.Next))) then
  1696. begin
  1697. { remember last instruction }
  1698. hp2:=hp1;
  1699. DebugMsg('Peephole Add/SubLdr2Ldr done', p);
  1700. hp1:=p;
  1701. { fix all ldr/str }
  1702. while GetNextInstructionUsingReg(hp1, hp1, taicpu(p).oper[0]^.reg) do
  1703. begin
  1704. taicpu(hp1).oper[1]^.ref^.base:=taicpu(p).oper[1]^.reg;
  1705. if taicpu(p).opcode=A_ADD then
  1706. inc(taicpu(hp1).oper[1]^.ref^.offset,taicpu(p).oper[2]^.val)
  1707. else
  1708. dec(taicpu(hp1).oper[1]^.ref^.offset,taicpu(p).oper[2]^.val);
  1709. if hp1=hp2 then
  1710. break;
  1711. end;
  1712. GetNextInstruction(p,hp1);
  1713. asml.remove(p);
  1714. p.free;
  1715. p:=hp1;
  1716. result:=true;
  1717. break;
  1718. end;
  1719. end;
  1720. end;
  1721. {
  1722. change
  1723. add reg1, ...
  1724. mov reg2, reg1
  1725. to
  1726. add reg2, ...
  1727. }
  1728. if GetNextInstructionUsingReg(p, hp1, taicpu(p).oper[0]^.reg) and
  1729. (taicpu(p).ops>=3) and
  1730. RemoveSuperfluousMove(p, hp1, 'DataMov2Data') then
  1731. Result:=true;
  1732. if MatchInstruction(p, [A_ADD,A_SUB], [C_None], [PF_None]) and
  1733. LookForPreindexedPattern(taicpu(p)) then
  1734. begin
  1735. GetNextInstruction(p,hp1);
  1736. DebugMsg('Peephole Add/Sub to Preindexed done', p);
  1737. asml.remove(p);
  1738. p.free;
  1739. p:=hp1;
  1740. Result:=true;
  1741. end;
  1742. {
  1743. Turn
  1744. mul reg0, z,w
  1745. sub/add x, y, reg0
  1746. dealloc reg0
  1747. into
  1748. mls/mla x,z,w,y
  1749. }
  1750. if MatchInstruction(p, [A_MUL], [C_None], [PF_None]) and
  1751. (taicpu(p).ops=3) and
  1752. (taicpu(p).oper[0]^.typ = top_reg) and
  1753. (taicpu(p).oper[1]^.typ = top_reg) and
  1754. (taicpu(p).oper[2]^.typ = top_reg) and
  1755. GetNextInstructionUsingReg(p,hp1,taicpu(p).oper[0]^.reg) and
  1756. MatchInstruction(hp1,[A_ADD,A_SUB],[C_None],[PF_None]) and
  1757. (not RegModifiedBetween(taicpu(p).oper[1]^.reg, p, hp1)) and
  1758. (not RegModifiedBetween(taicpu(p).oper[2]^.reg, p, hp1)) and
  1759. (((taicpu(hp1).opcode=A_ADD) and (current_settings.cputype>=cpu_armv4)) or
  1760. ((taicpu(hp1).opcode=A_SUB) and (current_settings.cputype in [cpu_armv6t2,cpu_armv7,cpu_armv7a,cpu_armv7r,cpu_armv7m,cpu_armv7em]))) and
  1761. // CPUs before ARMv6 don't recommend having the same Rd and Rm for MLA.
  1762. // TODO: A workaround would be to swap Rm and Rs
  1763. (not ((taicpu(hp1).opcode=A_ADD) and (current_settings.cputype<=cpu_armv6) and MatchOperand(taicpu(hp1).oper[0]^, taicpu(p).oper[1]^))) and
  1764. (((taicpu(hp1).ops=3) and
  1765. (taicpu(hp1).oper[2]^.typ=top_reg) and
  1766. ((MatchOperand(taicpu(hp1).oper[2]^, taicpu(p).oper[0]^.reg) and
  1767. (not RegModifiedBetween(taicpu(hp1).oper[1]^.reg, p, hp1))) or
  1768. ((MatchOperand(taicpu(hp1).oper[1]^, taicpu(p).oper[0]^.reg) and
  1769. (taicpu(hp1).opcode=A_ADD) and
  1770. (not RegModifiedBetween(taicpu(hp1).oper[2]^.reg, p, hp1)))))) or
  1771. ((taicpu(hp1).ops=2) and
  1772. (taicpu(hp1).oper[1]^.typ=top_reg) and
  1773. MatchOperand(taicpu(hp1).oper[1]^, taicpu(p).oper[0]^.reg))) and
  1774. (RegEndOfLife(taicpu(p).oper[0]^.reg,taicpu(hp1))) then
  1775. begin
  1776. if taicpu(hp1).opcode=A_ADD then
  1777. begin
  1778. taicpu(hp1).opcode:=A_MLA;
  1779. if taicpu(hp1).ops=3 then
  1780. begin
  1781. if MatchOperand(taicpu(hp1).oper[1]^, taicpu(p).oper[0]^) then
  1782. oldreg:=taicpu(hp1).oper[2]^.reg
  1783. else
  1784. oldreg:=taicpu(hp1).oper[1]^.reg;
  1785. end
  1786. else
  1787. oldreg:=taicpu(hp1).oper[0]^.reg;
  1788. taicpu(hp1).loadreg(1,taicpu(p).oper[1]^.reg);
  1789. taicpu(hp1).loadreg(2,taicpu(p).oper[2]^.reg);
  1790. taicpu(hp1).loadreg(3,oldreg);
  1791. DebugMsg('MulAdd2MLA done', p);
  1792. taicpu(hp1).ops:=4;
  1793. asml.remove(p);
  1794. p.free;
  1795. p:=hp1;
  1796. end
  1797. else
  1798. begin
  1799. taicpu(hp1).opcode:=A_MLS;
  1800. taicpu(hp1).loadreg(3,taicpu(hp1).oper[1]^.reg);
  1801. if taicpu(hp1).ops=2 then
  1802. taicpu(hp1).loadreg(1,taicpu(hp1).oper[0]^.reg)
  1803. else
  1804. taicpu(hp1).loadreg(1,taicpu(p).oper[2]^.reg);
  1805. taicpu(hp1).loadreg(2,taicpu(p).oper[1]^.reg);
  1806. DebugMsg('MulSub2MLS done', p);
  1807. taicpu(hp1).ops:=4;
  1808. asml.remove(p);
  1809. p.free;
  1810. p:=hp1;
  1811. end;
  1812. result:=true;
  1813. end
  1814. end;
  1815. {$ifdef dummy}
  1816. A_MVN:
  1817. begin
  1818. {
  1819. change
  1820. mvn reg2,reg1
  1821. and reg3,reg4,reg2
  1822. dealloc reg2
  1823. to
  1824. bic reg3,reg4,reg1
  1825. }
  1826. if (taicpu(p).oper[1]^.typ = top_reg) and
  1827. GetNextInstructionUsingReg(p,hp1,taicpu(p).oper[0]^.reg) and
  1828. MatchInstruction(hp1,A_AND,[],[]) and
  1829. (((taicpu(hp1).ops=3) and
  1830. (taicpu(hp1).oper[2]^.typ=top_reg) and
  1831. (MatchOperand(taicpu(hp1).oper[2]^, taicpu(p).oper[0]^.reg) or
  1832. MatchOperand(taicpu(hp1).oper[1]^, taicpu(p).oper[0]^.reg))) or
  1833. ((taicpu(hp1).ops=2) and
  1834. (taicpu(hp1).oper[1]^.typ=top_reg) and
  1835. MatchOperand(taicpu(hp1).oper[1]^, taicpu(p).oper[0]^.reg))) and
  1836. assigned(FindRegDealloc(taicpu(p).oper[0]^.reg,tai(hp1.Next))) and
  1837. { reg1 might not be modified inbetween }
  1838. not(RegModifiedBetween(taicpu(p).oper[1]^.reg,p,hp1)) then
  1839. begin
  1840. DebugMsg('Peephole MvnAnd2Bic done', p);
  1841. taicpu(hp1).opcode:=A_BIC;
  1842. if taicpu(hp1).ops=3 then
  1843. begin
  1844. if MatchOperand(taicpu(hp1).oper[1]^, taicpu(p).oper[0]^.reg) then
  1845. taicpu(hp1).loadReg(1,taicpu(hp1).oper[2]^.reg); // Swap operands
  1846. taicpu(hp1).loadReg(2,taicpu(p).oper[1]^.reg);
  1847. end
  1848. else
  1849. taicpu(hp1).loadReg(1,taicpu(p).oper[1]^.reg);
  1850. GetNextInstruction(p, hp1);
  1851. asml.remove(p);
  1852. p.free;
  1853. p:=hp1;
  1854. end;
  1855. end;
  1856. {$endif dummy}
  1857. A_UXTB:
  1858. begin
  1859. {
  1860. change
  1861. uxtb reg2,reg1
  1862. strb reg2,[...]
  1863. dealloc reg2
  1864. to
  1865. strb reg1,[...]
  1866. }
  1867. if MatchInstruction(p, taicpu(p).opcode, [C_None], [PF_None]) and
  1868. (taicpu(p).ops=2) and
  1869. GetNextInstructionUsingReg(p,hp1,taicpu(p).oper[0]^.reg) and
  1870. MatchInstruction(hp1, A_STR, [C_None], [PF_B]) and
  1871. assigned(FindRegDealloc(taicpu(p).oper[0]^.reg,tai(hp1.Next))) and
  1872. { the reference in strb might not use reg2 }
  1873. not(RegInRef(taicpu(p).oper[0]^.reg,taicpu(hp1).oper[1]^.ref^)) and
  1874. { reg1 might not be modified inbetween }
  1875. not(RegModifiedBetween(taicpu(p).oper[1]^.reg,p,hp1)) then
  1876. begin
  1877. DebugMsg('Peephole UxtbStrb2Strb done', p);
  1878. taicpu(hp1).loadReg(0,taicpu(p).oper[1]^.reg);
  1879. GetNextInstruction(p,hp2);
  1880. asml.remove(p);
  1881. p.free;
  1882. p:=hp2;
  1883. result:=true;
  1884. end
  1885. {
  1886. change
  1887. uxtb reg2,reg1
  1888. uxth reg3,reg2
  1889. dealloc reg2
  1890. to
  1891. uxtb reg3,reg1
  1892. }
  1893. else if MatchInstruction(p, A_UXTB, [C_None], [PF_None]) and
  1894. (taicpu(p).ops=2) and
  1895. GetNextInstructionUsingReg(p,hp1,taicpu(p).oper[0]^.reg) and
  1896. MatchInstruction(hp1, A_UXTH, [C_None], [PF_None]) and
  1897. (taicpu(hp1).ops = 2) and
  1898. MatchOperand(taicpu(hp1).oper[1]^, taicpu(p).oper[0]^.reg) and
  1899. RegEndofLife(taicpu(p).oper[0]^.reg,taicpu(hp1)) and
  1900. { reg1 might not be modified inbetween }
  1901. not(RegModifiedBetween(taicpu(p).oper[1]^.reg,p,hp1)) then
  1902. begin
  1903. DebugMsg('Peephole UxtbUxth2Uxtb done', p);
  1904. taicpu(p).loadReg(0,taicpu(hp1).oper[0]^.reg);
  1905. asml.remove(hp1);
  1906. hp1.free;
  1907. result:=true;
  1908. end
  1909. {
  1910. change
  1911. uxtb reg2,reg1
  1912. uxtb reg3,reg2
  1913. dealloc reg2
  1914. to
  1915. uxtb reg3,reg1
  1916. }
  1917. else if MatchInstruction(p, A_UXTB, [C_None], [PF_None]) and
  1918. (taicpu(p).ops=2) and
  1919. GetNextInstructionUsingReg(p,hp1,taicpu(p).oper[0]^.reg) and
  1920. MatchInstruction(hp1, A_UXTB, [C_None], [PF_None]) and
  1921. (taicpu(hp1).ops = 2) and
  1922. MatchOperand(taicpu(hp1).oper[1]^, taicpu(p).oper[0]^.reg) and
  1923. RegEndofLife(taicpu(p).oper[0]^.reg,taicpu(hp1)) and
  1924. { reg1 might not be modified inbetween }
  1925. not(RegModifiedBetween(taicpu(p).oper[1]^.reg,p,hp1)) then
  1926. begin
  1927. DebugMsg('Peephole UxtbUxtb2Uxtb done', p);
  1928. taicpu(p).loadReg(0,taicpu(hp1).oper[0]^.reg);
  1929. asml.remove(hp1);
  1930. hp1.free;
  1931. result:=true;
  1932. end
  1933. {
  1934. change
  1935. uxtb reg2,reg1
  1936. and reg3,reg2,#0x*FF
  1937. dealloc reg2
  1938. to
  1939. uxtb reg3,reg1
  1940. }
  1941. else if MatchInstruction(p, A_UXTB, [C_None], [PF_None]) and
  1942. (taicpu(p).ops=2) and
  1943. GetNextInstructionUsingReg(p,hp1,taicpu(p).oper[0]^.reg) and
  1944. MatchInstruction(hp1, A_AND, [C_None], [PF_None]) and
  1945. (taicpu(hp1).ops=3) and
  1946. (taicpu(hp1).oper[2]^.typ=top_const) and
  1947. ((taicpu(hp1).oper[2]^.val and $FF)=$FF) and
  1948. MatchOperand(taicpu(hp1).oper[1]^, taicpu(p).oper[0]^.reg) and
  1949. RegEndofLife(taicpu(p).oper[0]^.reg,taicpu(hp1)) and
  1950. { reg1 might not be modified inbetween }
  1951. not(RegModifiedBetween(taicpu(p).oper[1]^.reg,p,hp1)) then
  1952. begin
  1953. DebugMsg('Peephole UxtbAndImm2Uxtb done', p);
  1954. taicpu(hp1).opcode:=A_UXTB;
  1955. taicpu(hp1).ops:=2;
  1956. taicpu(hp1).loadReg(1,taicpu(p).oper[1]^.reg);
  1957. GetNextInstruction(p,hp2);
  1958. asml.remove(p);
  1959. p.free;
  1960. p:=hp2;
  1961. result:=true;
  1962. end
  1963. else if GetNextInstructionUsingReg(p, hp1, taicpu(p).oper[0]^.reg) and
  1964. RemoveSuperfluousMove(p, hp1, 'UxtbMov2Data') then
  1965. Result:=true;
  1966. end;
  1967. A_UXTH:
  1968. begin
  1969. {
  1970. change
  1971. uxth reg2,reg1
  1972. strh reg2,[...]
  1973. dealloc reg2
  1974. to
  1975. strh reg1,[...]
  1976. }
  1977. if MatchInstruction(p, taicpu(p).opcode, [C_None], [PF_None]) and
  1978. (taicpu(p).ops=2) and
  1979. GetNextInstructionUsingReg(p,hp1,taicpu(p).oper[0]^.reg) and
  1980. MatchInstruction(hp1, A_STR, [C_None], [PF_H]) and
  1981. RegEndofLife(taicpu(p).oper[0]^.reg,taicpu(hp1)) and
  1982. { the reference in strb might not use reg2 }
  1983. not(RegInRef(taicpu(p).oper[0]^.reg,taicpu(hp1).oper[1]^.ref^)) and
  1984. { reg1 might not be modified inbetween }
  1985. not(RegModifiedBetween(taicpu(p).oper[1]^.reg,p,hp1)) then
  1986. begin
  1987. DebugMsg('Peephole UXTHStrh2Strh done', p);
  1988. taicpu(hp1).loadReg(0,taicpu(p).oper[1]^.reg);
  1989. GetNextInstruction(p, hp1);
  1990. asml.remove(p);
  1991. p.free;
  1992. p:=hp1;
  1993. result:=true;
  1994. end
  1995. {
  1996. change
  1997. uxth reg2,reg1
  1998. uxth reg3,reg2
  1999. dealloc reg2
  2000. to
  2001. uxth reg3,reg1
  2002. }
  2003. else if MatchInstruction(p, A_UXTH, [C_None], [PF_None]) and
  2004. (taicpu(p).ops=2) and
  2005. GetNextInstructionUsingReg(p,hp1,taicpu(p).oper[0]^.reg) and
  2006. MatchInstruction(hp1, A_UXTH, [C_None], [PF_None]) and
  2007. (taicpu(hp1).ops=2) and
  2008. MatchOperand(taicpu(hp1).oper[1]^, taicpu(p).oper[0]^.reg) and
  2009. RegEndofLife(taicpu(p).oper[0]^.reg,taicpu(hp1)) and
  2010. { reg1 might not be modified inbetween }
  2011. not(RegModifiedBetween(taicpu(p).oper[1]^.reg,p,hp1)) then
  2012. begin
  2013. DebugMsg('Peephole UxthUxth2Uxth done', p);
  2014. taicpu(hp1).opcode:=A_UXTH;
  2015. taicpu(hp1).loadReg(1,taicpu(p).oper[1]^.reg);
  2016. GetNextInstruction(p, hp1);
  2017. asml.remove(p);
  2018. p.free;
  2019. p:=hp1;
  2020. result:=true;
  2021. end
  2022. {
  2023. change
  2024. uxth reg2,reg1
  2025. and reg3,reg2,#65535
  2026. dealloc reg2
  2027. to
  2028. uxth reg3,reg1
  2029. }
  2030. else if MatchInstruction(p, A_UXTH, [C_None], [PF_None]) and
  2031. (taicpu(p).ops=2) and
  2032. GetNextInstructionUsingReg(p,hp1,taicpu(p).oper[0]^.reg) and
  2033. MatchInstruction(hp1, A_AND, [C_None], [PF_None]) and
  2034. (taicpu(hp1).ops=3) and
  2035. (taicpu(hp1).oper[2]^.typ=top_const) and
  2036. ((taicpu(hp1).oper[2]^.val and $FFFF)=$FFFF) and
  2037. MatchOperand(taicpu(hp1).oper[1]^, taicpu(p).oper[0]^.reg) and
  2038. RegEndofLife(taicpu(p).oper[0]^.reg,taicpu(hp1)) and
  2039. { reg1 might not be modified inbetween }
  2040. not(RegModifiedBetween(taicpu(p).oper[1]^.reg,p,hp1)) then
  2041. begin
  2042. DebugMsg('Peephole UxthAndImm2Uxth done', p);
  2043. taicpu(hp1).opcode:=A_UXTH;
  2044. taicpu(hp1).ops:=2;
  2045. taicpu(hp1).loadReg(1,taicpu(p).oper[1]^.reg);
  2046. GetNextInstruction(p, hp1);
  2047. asml.remove(p);
  2048. p.free;
  2049. p:=hp1;
  2050. result:=true;
  2051. end
  2052. else if GetNextInstructionUsingReg(p, hp1, taicpu(p).oper[0]^.reg) and
  2053. RemoveSuperfluousMove(p, hp1, 'UxthMov2Data') then
  2054. Result:=true;
  2055. end;
  2056. A_CMP:
  2057. begin
  2058. {
  2059. change
  2060. cmp reg,const1
  2061. moveq reg,const1
  2062. movne reg,const2
  2063. to
  2064. cmp reg,const1
  2065. movne reg,const2
  2066. }
  2067. if (taicpu(p).oper[1]^.typ = top_const) and
  2068. GetNextInstruction(p, hp1) and
  2069. MatchInstruction(hp1, A_MOV, [C_EQ, C_NE], [PF_NONE]) and
  2070. (taicpu(hp1).oper[1]^.typ = top_const) and
  2071. GetNextInstruction(hp1, hp2) and
  2072. MatchInstruction(hp2, A_MOV, [C_EQ, C_NE], [PF_NONE]) and
  2073. (taicpu(hp1).oper[1]^.typ = top_const) then
  2074. begin
  2075. Result:=RemoveRedundantMove(p, hp1, asml) or Result;
  2076. Result:=RemoveRedundantMove(p, hp2, asml) or Result;
  2077. end;
  2078. end;
  2079. A_STM:
  2080. begin
  2081. {
  2082. change
  2083. stmfd r13!,[r14]
  2084. sub r13,r13,#4
  2085. bl abc
  2086. add r13,r13,#4
  2087. ldmfd r13!,[r15]
  2088. into
  2089. b abc
  2090. }
  2091. if not(ts_thumb_interworking in current_settings.targetswitches) and
  2092. MatchInstruction(p, A_STM, [C_None], [PF_FD]) and
  2093. GetNextInstruction(p, hp1) and
  2094. GetNextInstruction(hp1, hp2) and
  2095. SkipEntryExitMarker(hp2, hp2) and
  2096. GetNextInstruction(hp2, hp3) and
  2097. SkipEntryExitMarker(hp3, hp3) and
  2098. GetNextInstruction(hp3, hp4) and
  2099. (taicpu(p).oper[0]^.typ = top_ref) and
  2100. (taicpu(p).oper[0]^.ref^.index=NR_STACK_POINTER_REG) and
  2101. (taicpu(p).oper[0]^.ref^.base=NR_NO) and
  2102. (taicpu(p).oper[0]^.ref^.offset=0) and
  2103. (taicpu(p).oper[0]^.ref^.addressmode=AM_PREINDEXED) and
  2104. (taicpu(p).oper[1]^.typ = top_regset) and
  2105. (taicpu(p).oper[1]^.regset^ = [RS_R14]) and
  2106. MatchInstruction(hp1, A_SUB, [C_None], [PF_NONE]) and
  2107. (taicpu(hp1).oper[0]^.typ = top_reg) and
  2108. (taicpu(hp1).oper[0]^.reg = NR_STACK_POINTER_REG) and
  2109. MatchOperand(taicpu(hp1).oper[0]^,taicpu(hp1).oper[1]^) and
  2110. (taicpu(hp1).oper[2]^.typ = top_const) and
  2111. MatchInstruction(hp3, A_ADD, [C_None], [PF_NONE]) and
  2112. MatchOperand(taicpu(hp1).oper[0]^,taicpu(hp3).oper[0]^) and
  2113. MatchOperand(taicpu(hp1).oper[0]^,taicpu(hp3).oper[1]^) and
  2114. MatchOperand(taicpu(hp1).oper[2]^,taicpu(hp3).oper[2]^) and
  2115. MatchInstruction(hp2, [A_BL,A_BLX], [C_None], [PF_NONE]) and
  2116. (taicpu(hp2).oper[0]^.typ = top_ref) and
  2117. MatchInstruction(hp4, A_LDM, [C_None], [PF_FD]) and
  2118. MatchOperand(taicpu(p).oper[0]^,taicpu(hp4).oper[0]^) and
  2119. (taicpu(hp4).oper[1]^.typ = top_regset) and
  2120. (taicpu(hp4).oper[1]^.regset^ = [RS_R15]) then
  2121. begin
  2122. asml.Remove(p);
  2123. asml.Remove(hp1);
  2124. asml.Remove(hp3);
  2125. asml.Remove(hp4);
  2126. taicpu(hp2).opcode:=A_B;
  2127. p.free;
  2128. hp1.free;
  2129. hp3.free;
  2130. hp4.free;
  2131. p:=hp2;
  2132. DebugMsg('Peephole Bl2B done', p);
  2133. end;
  2134. end;
  2135. A_VMOV:
  2136. begin
  2137. {
  2138. change
  2139. vmov reg0,reg1,reg2
  2140. vmov reg1,reg2,reg0
  2141. into
  2142. vmov reg0,reg1,reg2
  2143. can be applied regardless if reg0 or reg2 is the vfp register
  2144. }
  2145. if (taicpu(p).ops = 3) and
  2146. GetNextInstruction(p, hp1) and
  2147. MatchInstruction(hp1, A_VMOV, [taicpu(p).condition], [taicpu(p).oppostfix]) and
  2148. (taicpu(hp1).ops = 3) and
  2149. MatchOperand(taicpu(p).oper[0]^, taicpu(hp1).oper[2]^) and
  2150. MatchOperand(taicpu(p).oper[1]^, taicpu(hp1).oper[0]^) and
  2151. MatchOperand(taicpu(p).oper[2]^, taicpu(hp1).oper[1]^) then
  2152. begin
  2153. asml.Remove(hp1);
  2154. hp1.free;
  2155. DebugMsg('Peephole VMovVMov2VMov done', p);
  2156. end;
  2157. end;
  2158. A_VLDR,
  2159. A_VADD,
  2160. A_VMUL,
  2161. A_VDIV,
  2162. A_VSUB,
  2163. A_VSQRT,
  2164. A_VNEG,
  2165. A_VCVT,
  2166. A_VABS:
  2167. begin
  2168. if GetNextInstructionUsingReg(p, hp1, taicpu(p).oper[0]^.reg) and
  2169. RemoveSuperfluousVMov(p, hp1, 'VOpVMov2VOp') then
  2170. Result:=true;
  2171. end
  2172. else
  2173. ;
  2174. end;
  2175. end;
  2176. else
  2177. ;
  2178. end;
  2179. end;
  2180. { instructions modifying the CPSR can be only the last instruction }
  2181. function MustBeLast(p : tai) : boolean;
  2182. begin
  2183. Result:=(p.typ=ait_instruction) and
  2184. ((taicpu(p).opcode in [A_BL,A_BLX,A_CMP,A_CMN,A_SWI,A_TEQ,A_TST,A_CMF,A_CMFE {,A_MSR}]) or
  2185. ((taicpu(p).ops>=1) and (taicpu(p).oper[0]^.typ=top_reg) and (taicpu(p).oper[0]^.reg=NR_PC)) or
  2186. (taicpu(p).oppostfix=PF_S));
  2187. end;
  2188. procedure TCpuAsmOptimizer.PeepHoleOptPass2;
  2189. var
  2190. p,hp1,hp2: tai;
  2191. l : longint;
  2192. condition : tasmcond;
  2193. hp3: tai;
  2194. WasLast: boolean;
  2195. { UsedRegs, TmpUsedRegs: TRegSet; }
  2196. begin
  2197. p := BlockStart;
  2198. { UsedRegs := []; }
  2199. while (p <> BlockEnd) Do
  2200. begin
  2201. { UpdateUsedRegs(UsedRegs, tai(p.next)); }
  2202. case p.Typ Of
  2203. Ait_Instruction:
  2204. begin
  2205. case taicpu(p).opcode Of
  2206. A_B:
  2207. if (taicpu(p).condition<>C_None) and
  2208. not(GenerateThumbCode) then
  2209. begin
  2210. { check for
  2211. Bxx xxx
  2212. <several instructions>
  2213. xxx:
  2214. }
  2215. l:=0;
  2216. WasLast:=False;
  2217. GetNextInstruction(p, hp1);
  2218. while assigned(hp1) and
  2219. (l<=4) and
  2220. CanBeCond(hp1) and
  2221. { stop on labels }
  2222. not(hp1.typ=ait_label) and
  2223. { avoid that we cannot recognize the case BccB2Cond }
  2224. not((hp1.typ=ait_instruction) and (taicpu(hp1).opcode=A_B)) do
  2225. begin
  2226. inc(l);
  2227. if MustBeLast(hp1) then
  2228. begin
  2229. WasLast:=True;
  2230. GetNextInstruction(hp1,hp1);
  2231. break;
  2232. end
  2233. else
  2234. GetNextInstruction(hp1,hp1);
  2235. end;
  2236. if assigned(hp1) then
  2237. begin
  2238. if FindLabel(tasmlabel(taicpu(p).oper[0]^.ref^.symbol),hp1) then
  2239. begin
  2240. if (l<=4) and (l>0) then
  2241. begin
  2242. condition:=inverse_cond(taicpu(p).condition);
  2243. hp2:=p;
  2244. GetNextInstruction(p,hp1);
  2245. p:=hp1;
  2246. repeat
  2247. if hp1.typ=ait_instruction then
  2248. taicpu(hp1).condition:=condition;
  2249. if MustBeLast(hp1) then
  2250. begin
  2251. GetNextInstruction(hp1,hp1);
  2252. break;
  2253. end
  2254. else
  2255. GetNextInstruction(hp1,hp1);
  2256. until not(assigned(hp1)) or
  2257. not(CanBeCond(hp1)) or
  2258. (hp1.typ=ait_label);
  2259. DebugMsg('Peephole Bcc2Cond done',hp2);
  2260. { wait with removing else GetNextInstruction could
  2261. ignore the label if it was the only usage in the
  2262. jump moved away }
  2263. tasmlabel(taicpu(hp2).oper[0]^.ref^.symbol).decrefs;
  2264. asml.remove(hp2);
  2265. hp2.free;
  2266. continue;
  2267. end;
  2268. end
  2269. else
  2270. { do not perform further optimizations if there is inctructon
  2271. in block #1 which can not be optimized.
  2272. }
  2273. if not WasLast then
  2274. begin
  2275. { check further for
  2276. Bcc xxx
  2277. <several instructions 1>
  2278. B yyy
  2279. xxx:
  2280. <several instructions 2>
  2281. yyy:
  2282. }
  2283. { hp2 points to jmp yyy }
  2284. hp2:=hp1;
  2285. { skip hp1 to xxx }
  2286. GetNextInstruction(hp1, hp1);
  2287. if assigned(hp2) and
  2288. assigned(hp1) and
  2289. (l<=3) and
  2290. (hp2.typ=ait_instruction) and
  2291. (taicpu(hp2).is_jmp) and
  2292. (taicpu(hp2).condition=C_None) and
  2293. { real label and jump, no further references to the
  2294. label are allowed }
  2295. (tasmlabel(taicpu(p).oper[0]^.ref^.symbol).getrefs=1) and
  2296. FindLabel(tasmlabel(taicpu(p).oper[0]^.ref^.symbol),hp1) then
  2297. begin
  2298. l:=0;
  2299. { skip hp1 to <several moves 2> }
  2300. GetNextInstruction(hp1, hp1);
  2301. while assigned(hp1) and
  2302. CanBeCond(hp1) and
  2303. (l<=3) do
  2304. begin
  2305. inc(l);
  2306. if MustBeLast(hp1) then
  2307. begin
  2308. GetNextInstruction(hp1, hp1);
  2309. break;
  2310. end
  2311. else
  2312. GetNextInstruction(hp1, hp1);
  2313. end;
  2314. { hp1 points to yyy: }
  2315. if assigned(hp1) and
  2316. FindLabel(tasmlabel(taicpu(hp2).oper[0]^.ref^.symbol),hp1) then
  2317. begin
  2318. condition:=inverse_cond(taicpu(p).condition);
  2319. GetNextInstruction(p,hp1);
  2320. hp3:=p;
  2321. p:=hp1;
  2322. repeat
  2323. if hp1.typ=ait_instruction then
  2324. taicpu(hp1).condition:=condition;
  2325. if MustBeLast(hp1) then
  2326. begin
  2327. GetNextInstruction(hp1, hp1);
  2328. break;
  2329. end
  2330. else
  2331. GetNextInstruction(hp1, hp1);
  2332. until not(assigned(hp1)) or
  2333. not(CanBeCond(hp1)) or
  2334. ((hp1.typ=ait_instruction) and (taicpu(hp1).opcode=A_B));
  2335. { hp2 is still at jmp yyy }
  2336. GetNextInstruction(hp2,hp1);
  2337. { hp1 is now at xxx: }
  2338. condition:=inverse_cond(condition);
  2339. GetNextInstruction(hp1,hp1);
  2340. { hp1 is now at <several movs 2> }
  2341. repeat
  2342. if hp1.typ=ait_instruction then
  2343. taicpu(hp1).condition:=condition;
  2344. GetNextInstruction(hp1,hp1);
  2345. until not(assigned(hp1)) or
  2346. not(CanBeCond(hp1)) or
  2347. (hp1.typ=ait_label);
  2348. DebugMsg('Peephole BccB2Cond done',hp3);
  2349. { remove Bcc }
  2350. tasmlabel(taicpu(hp3).oper[0]^.ref^.symbol).decrefs;
  2351. asml.remove(hp3);
  2352. hp3.free;
  2353. { remove B }
  2354. tasmlabel(taicpu(hp2).oper[0]^.ref^.symbol).decrefs;
  2355. asml.remove(hp2);
  2356. hp2.free;
  2357. continue;
  2358. end;
  2359. end;
  2360. end;
  2361. end;
  2362. end;
  2363. else
  2364. ;
  2365. end;
  2366. end;
  2367. else
  2368. ;
  2369. end;
  2370. p := tai(p.next)
  2371. end;
  2372. end;
  2373. function TCpuAsmOptimizer.RegInInstruction(Reg: TRegister; p1: tai): Boolean;
  2374. begin
  2375. If (p1.typ = ait_instruction) and (taicpu(p1).opcode=A_BL) then
  2376. Result:=true
  2377. else If MatchInstruction(p1, [A_LDR, A_STR], [], [PF_D]) and
  2378. (getsupreg(taicpu(p1).oper[0]^.reg)+1=getsupreg(reg)) then
  2379. Result:=true
  2380. else
  2381. Result:=inherited RegInInstruction(Reg, p1);
  2382. end;
  2383. const
  2384. { set of opcode which might or do write to memory }
  2385. { TODO : extend armins.dat to contain r/w info }
  2386. opcode_could_mem_write = [A_B,A_BL,A_BLX,A_BKPT,A_BX,A_STR,A_STRB,A_STRBT,
  2387. A_STRH,A_STRT,A_STF,A_SFM,A_STM,A_FSTS,A_FSTD,A_VSTR,A_VSTM];
  2388. { adjust the register live information when swapping the two instructions p and hp1,
  2389. they must follow one after the other }
  2390. procedure TCpuPreRegallocScheduler.SwapRegLive(p,hp1 : taicpu);
  2391. procedure CheckLiveEnd(reg : tregister);
  2392. var
  2393. supreg : TSuperRegister;
  2394. regtype : TRegisterType;
  2395. begin
  2396. if reg=NR_NO then
  2397. exit;
  2398. regtype:=getregtype(reg);
  2399. supreg:=getsupreg(reg);
  2400. if (cg.rg[regtype].live_end[supreg]=hp1) and
  2401. RegInInstruction(reg,p) then
  2402. cg.rg[regtype].live_end[supreg]:=p;
  2403. end;
  2404. procedure CheckLiveStart(reg : TRegister);
  2405. var
  2406. supreg : TSuperRegister;
  2407. regtype : TRegisterType;
  2408. begin
  2409. if reg=NR_NO then
  2410. exit;
  2411. regtype:=getregtype(reg);
  2412. supreg:=getsupreg(reg);
  2413. if (cg.rg[regtype].live_start[supreg]=p) and
  2414. RegInInstruction(reg,hp1) then
  2415. cg.rg[regtype].live_start[supreg]:=hp1;
  2416. end;
  2417. var
  2418. i : longint;
  2419. r : TSuperRegister;
  2420. begin
  2421. { assumption: p is directly followed by hp1 }
  2422. { if live of any reg used by p starts at p and hp1 uses this register then
  2423. set live start to hp1 }
  2424. for i:=0 to p.ops-1 do
  2425. case p.oper[i]^.typ of
  2426. Top_Reg:
  2427. CheckLiveStart(p.oper[i]^.reg);
  2428. Top_Ref:
  2429. begin
  2430. CheckLiveStart(p.oper[i]^.ref^.base);
  2431. CheckLiveStart(p.oper[i]^.ref^.index);
  2432. end;
  2433. Top_Shifterop:
  2434. CheckLiveStart(p.oper[i]^.shifterop^.rs);
  2435. Top_RegSet:
  2436. for r:=RS_R0 to RS_R15 do
  2437. if r in p.oper[i]^.regset^ then
  2438. CheckLiveStart(newreg(R_INTREGISTER,r,R_SUBWHOLE));
  2439. else
  2440. ;
  2441. end;
  2442. { if live of any reg used by hp1 ends at hp1 and p uses this register then
  2443. set live end to p }
  2444. for i:=0 to hp1.ops-1 do
  2445. case hp1.oper[i]^.typ of
  2446. Top_Reg:
  2447. CheckLiveEnd(hp1.oper[i]^.reg);
  2448. Top_Ref:
  2449. begin
  2450. CheckLiveEnd(hp1.oper[i]^.ref^.base);
  2451. CheckLiveEnd(hp1.oper[i]^.ref^.index);
  2452. end;
  2453. Top_Shifterop:
  2454. CheckLiveStart(hp1.oper[i]^.shifterop^.rs);
  2455. Top_RegSet:
  2456. for r:=RS_R0 to RS_R15 do
  2457. if r in hp1.oper[i]^.regset^ then
  2458. CheckLiveEnd(newreg(R_INTREGISTER,r,R_SUBWHOLE));
  2459. else
  2460. ;
  2461. end;
  2462. end;
  2463. function TCpuPreRegallocScheduler.SchedulerPass1Cpu(var p: tai): boolean;
  2464. { TODO : schedule also forward }
  2465. { TODO : schedule distance > 1 }
  2466. { returns true if p might be a load of a pc relative tls offset }
  2467. function PossibleTLSLoad(const p: tai) : boolean;
  2468. begin
  2469. Result:=(p.typ=ait_instruction) and (taicpu(p).opcode=A_LDR) and (taicpu(p).oper[1]^.typ=top_ref) and (((taicpu(p).oper[1]^.ref^.base=NR_PC) and
  2470. (taicpu(p).oper[1]^.ref^.index<>NR_NO)) or ((taicpu(p).oper[1]^.ref^.base<>NR_NO) and
  2471. (taicpu(p).oper[1]^.ref^.index=NR_PC)));
  2472. end;
  2473. var
  2474. hp1,hp2,hp3,hp4,hp5,insertpos : tai;
  2475. list : TAsmList;
  2476. begin
  2477. result:=true;
  2478. list:=TAsmList.create;
  2479. p:=BlockStart;
  2480. while p<>BlockEnd Do
  2481. begin
  2482. if (p.typ=ait_instruction) and
  2483. GetNextInstruction(p,hp1) and
  2484. (hp1.typ=ait_instruction) and
  2485. (taicpu(hp1).opcode in [A_LDR,A_LDRB,A_LDRH,A_LDRSB,A_LDRSH]) and
  2486. (taicpu(hp1).oppostfix in [PF_NONE, PF_B, PF_H, PF_SB, PF_SH]) and
  2487. { for now we don't reschedule if the previous instruction changes potentially a memory location }
  2488. ( (not(taicpu(p).opcode in opcode_could_mem_write) and
  2489. not(RegModifiedByInstruction(NR_PC,p))
  2490. ) or
  2491. ((taicpu(p).opcode in [A_STM,A_STRB,A_STRH,A_STR]) and
  2492. ((taicpu(hp1).oper[1]^.ref^.base=NR_PC) or
  2493. (assigned(taicpu(hp1).oper[1]^.ref^.symboldata) and
  2494. (taicpu(hp1).oper[1]^.ref^.offset=0)
  2495. )
  2496. ) or
  2497. { try to prove that the memory accesses don't overlapp }
  2498. ((taicpu(p).opcode in [A_STRB,A_STRH,A_STR]) and
  2499. (taicpu(p).oper[1]^.typ = top_ref) and
  2500. (taicpu(p).oper[1]^.ref^.base=taicpu(hp1).oper[1]^.ref^.base) and
  2501. (taicpu(p).oppostfix=PF_None) and
  2502. (taicpu(hp1).oppostfix=PF_None) and
  2503. (taicpu(p).oper[1]^.ref^.index=NR_NO) and
  2504. (taicpu(hp1).oper[1]^.ref^.index=NR_NO) and
  2505. { get operand sizes and check if the offset distance is large enough to ensure no overlapp }
  2506. (abs(taicpu(p).oper[1]^.ref^.offset-taicpu(hp1).oper[1]^.ref^.offset)>=max(tcgsize2size[reg_cgsize(taicpu(p).oper[0]^.reg)],tcgsize2size[reg_cgsize(taicpu(hp1).oper[0]^.reg)]))
  2507. )
  2508. )
  2509. ) and
  2510. GetNextInstruction(hp1,hp2) and
  2511. (hp2.typ=ait_instruction) and
  2512. { loaded register used by next instruction?
  2513. if we ever support labels (they could be skipped in theory) here, the gnu2 tls general-dynamic code could get broken (the ldr before
  2514. the bl may not be scheduled away from the bl) and it needs to be taken care of this case
  2515. }
  2516. (RegInInstruction(taicpu(hp1).oper[0]^.reg,hp2)) and
  2517. { loaded register not used by previous instruction? }
  2518. not(RegInInstruction(taicpu(hp1).oper[0]^.reg,p)) and
  2519. { same condition? }
  2520. (taicpu(p).condition=taicpu(hp1).condition) and
  2521. { first instruction might not change the register used as base }
  2522. ((taicpu(hp1).oper[1]^.ref^.base=NR_NO) or
  2523. not(RegModifiedByInstruction(taicpu(hp1).oper[1]^.ref^.base,p))
  2524. ) and
  2525. { first instruction might not change the register used as index }
  2526. ((taicpu(hp1).oper[1]^.ref^.index=NR_NO) or
  2527. not(RegModifiedByInstruction(taicpu(hp1).oper[1]^.ref^.index,p))
  2528. ) and
  2529. { if we modify the basereg AND the first instruction used that reg, we can not schedule }
  2530. ((taicpu(hp1).oper[1]^.ref^.addressmode = AM_OFFSET) or
  2531. not(instructionLoadsFromReg(taicpu(hp1).oper[1]^.ref^.base,p))) and
  2532. not(PossibleTLSLoad(p)) and
  2533. not(PossibleTLSLoad(hp1)) then
  2534. begin
  2535. hp3:=tai(p.Previous);
  2536. hp5:=tai(p.next);
  2537. asml.Remove(p);
  2538. { if there is a reg. alloc/dealloc/sync instructions or address labels (e.g. for GOT-less PIC)
  2539. associated with p, move it together with p }
  2540. { before the instruction? }
  2541. { find reg allocs,deallocs and PIC labels }
  2542. while assigned(hp3) and (hp3.typ<>ait_instruction) do
  2543. begin
  2544. if ( (hp3.typ=ait_regalloc) and (tai_regalloc(hp3).ratype in [ra_alloc, ra_dealloc]) and
  2545. RegInInstruction(tai_regalloc(hp3).reg,p) )
  2546. or ( (hp3.typ=ait_label) and (tai_label(hp3).labsym.typ=AT_ADDR) )
  2547. then
  2548. begin
  2549. hp4:=hp3;
  2550. hp3:=tai(hp3.Previous);
  2551. asml.Remove(hp4);
  2552. list.Insert(hp4);
  2553. end
  2554. else
  2555. hp3:=tai(hp3.Previous);
  2556. end;
  2557. list.Concat(p);
  2558. SwapRegLive(taicpu(p),taicpu(hp1));
  2559. { after the instruction? }
  2560. { find reg deallocs and reg syncs }
  2561. while assigned(hp5) and (hp5.typ<>ait_instruction) do
  2562. begin
  2563. if (hp5.typ=ait_regalloc) and (tai_regalloc(hp5).ratype in [ra_dealloc, ra_sync]) and
  2564. RegInInstruction(tai_regalloc(hp5).reg,p) then
  2565. begin
  2566. hp4:=hp5;
  2567. hp5:=tai(hp5.next);
  2568. asml.Remove(hp4);
  2569. list.Concat(hp4);
  2570. end
  2571. else
  2572. hp5:=tai(hp5.Next);
  2573. end;
  2574. asml.Remove(hp1);
  2575. { if there are address labels associated with hp2, those must
  2576. stay with hp2 (e.g. for GOT-less PIC) }
  2577. insertpos:=hp2;
  2578. while assigned(hp2.previous) and
  2579. (tai(hp2.previous).typ<>ait_instruction) do
  2580. begin
  2581. hp2:=tai(hp2.previous);
  2582. if (hp2.typ=ait_label) and
  2583. (tai_label(hp2).labsym.typ=AT_ADDR) then
  2584. insertpos:=hp2;
  2585. end;
  2586. {$ifdef DEBUG_PREREGSCHEDULER}
  2587. asml.insertbefore(tai_comment.Create(strpnew('Rescheduled')),insertpos);
  2588. {$endif DEBUG_PREREGSCHEDULER}
  2589. asml.InsertBefore(hp1,insertpos);
  2590. asml.InsertListBefore(insertpos,list);
  2591. p:=tai(p.next);
  2592. end
  2593. else if p.typ=ait_instruction then
  2594. p:=hp1
  2595. else
  2596. p:=tai(p.next);
  2597. end;
  2598. list.Free;
  2599. end;
  2600. procedure DecrementPreceedingIT(list: TAsmList; p: tai);
  2601. var
  2602. hp : tai;
  2603. l : longint;
  2604. begin
  2605. hp := tai(p.Previous);
  2606. l := 1;
  2607. while assigned(hp) and
  2608. (l <= 4) do
  2609. begin
  2610. if hp.typ=ait_instruction then
  2611. begin
  2612. if (taicpu(hp).opcode>=A_IT) and
  2613. (taicpu(hp).opcode <= A_ITTTT) then
  2614. begin
  2615. if (taicpu(hp).opcode = A_IT) and
  2616. (l=1) then
  2617. list.Remove(hp)
  2618. else
  2619. case taicpu(hp).opcode of
  2620. A_ITE:
  2621. if l=2 then taicpu(hp).opcode := A_IT;
  2622. A_ITT:
  2623. if l=2 then taicpu(hp).opcode := A_IT;
  2624. A_ITEE:
  2625. if l=3 then taicpu(hp).opcode := A_ITE;
  2626. A_ITTE:
  2627. if l=3 then taicpu(hp).opcode := A_ITT;
  2628. A_ITET:
  2629. if l=3 then taicpu(hp).opcode := A_ITE;
  2630. A_ITTT:
  2631. if l=3 then taicpu(hp).opcode := A_ITT;
  2632. A_ITEEE:
  2633. if l=4 then taicpu(hp).opcode := A_ITEE;
  2634. A_ITTEE:
  2635. if l=4 then taicpu(hp).opcode := A_ITTE;
  2636. A_ITETE:
  2637. if l=4 then taicpu(hp).opcode := A_ITET;
  2638. A_ITTTE:
  2639. if l=4 then taicpu(hp).opcode := A_ITTT;
  2640. A_ITEET:
  2641. if l=4 then taicpu(hp).opcode := A_ITEE;
  2642. A_ITTET:
  2643. if l=4 then taicpu(hp).opcode := A_ITTE;
  2644. A_ITETT:
  2645. if l=4 then taicpu(hp).opcode := A_ITET;
  2646. A_ITTTT:
  2647. begin
  2648. if l=4 then taicpu(hp).opcode := A_ITTT;
  2649. end
  2650. else
  2651. ;
  2652. end;
  2653. break;
  2654. end;
  2655. {else if (taicpu(hp).condition<>taicpu(p).condition) or
  2656. (taicpu(hp).condition<>inverse_cond(taicpu(p).condition)) then
  2657. break;}
  2658. inc(l);
  2659. end;
  2660. hp := tai(hp.Previous);
  2661. end;
  2662. end;
  2663. function TCpuThumb2AsmOptimizer.PeepHoleOptPass1Cpu(var p: tai): boolean;
  2664. var
  2665. hp : taicpu;
  2666. //hp1,hp2 : tai;
  2667. begin
  2668. result:=false;
  2669. if inherited PeepHoleOptPass1Cpu(p) then
  2670. result:=true
  2671. else if (p.typ=ait_instruction) and
  2672. MatchInstruction(p, A_STM, [C_None], [PF_FD,PF_DB]) and
  2673. (taicpu(p).oper[0]^.ref^.addressmode=AM_PREINDEXED) and
  2674. (taicpu(p).oper[0]^.ref^.index=NR_STACK_POINTER_REG) and
  2675. ((taicpu(p).oper[1]^.regset^*[8..13,15])=[]) then
  2676. begin
  2677. DebugMsg('Peephole Stm2Push done', p);
  2678. hp := taicpu.op_regset(A_PUSH, R_INTREGISTER, R_SUBWHOLE, taicpu(p).oper[1]^.regset^);
  2679. AsmL.InsertAfter(hp, p);
  2680. asml.Remove(p);
  2681. p:=hp;
  2682. result:=true;
  2683. end
  2684. {else if (p.typ=ait_instruction) and
  2685. MatchInstruction(p, A_STR, [C_None], [PF_None]) and
  2686. (taicpu(p).oper[1]^.ref^.addressmode=AM_PREINDEXED) and
  2687. (taicpu(p).oper[1]^.ref^.index=NR_STACK_POINTER_REG) and
  2688. (taicpu(p).oper[1]^.ref^.offset=-4) and
  2689. (getsupreg(taicpu(p).oper[0]^.reg) in [0..7,14]) then
  2690. begin
  2691. DebugMsg('Peephole Str2Push done', p);
  2692. hp := taicpu.op_regset(A_PUSH, R_INTREGISTER, R_SUBWHOLE, [getsupreg(taicpu(p).oper[0]^.reg)]);
  2693. asml.InsertAfter(hp, p);
  2694. asml.Remove(p);
  2695. p.Free;
  2696. p:=hp;
  2697. result:=true;
  2698. end}
  2699. else if (p.typ=ait_instruction) and
  2700. MatchInstruction(p, A_LDM, [C_None], [PF_FD,PF_IA]) and
  2701. (taicpu(p).oper[0]^.ref^.addressmode=AM_PREINDEXED) and
  2702. (taicpu(p).oper[0]^.ref^.index=NR_STACK_POINTER_REG) and
  2703. ((taicpu(p).oper[1]^.regset^*[8..14])=[]) then
  2704. begin
  2705. DebugMsg('Peephole Ldm2Pop done', p);
  2706. hp := taicpu.op_regset(A_POP, R_INTREGISTER, R_SUBWHOLE, taicpu(p).oper[1]^.regset^);
  2707. asml.InsertBefore(hp, p);
  2708. asml.Remove(p);
  2709. p.Free;
  2710. p:=hp;
  2711. result:=true;
  2712. end
  2713. {else if (p.typ=ait_instruction) and
  2714. MatchInstruction(p, A_LDR, [C_None], [PF_None]) and
  2715. (taicpu(p).oper[1]^.ref^.addressmode=AM_POSTINDEXED) and
  2716. (taicpu(p).oper[1]^.ref^.index=NR_STACK_POINTER_REG) and
  2717. (taicpu(p).oper[1]^.ref^.offset=4) and
  2718. (getsupreg(taicpu(p).oper[0]^.reg) in [0..7,15]) then
  2719. begin
  2720. DebugMsg('Peephole Ldr2Pop done', p);
  2721. hp := taicpu.op_regset(A_POP, R_INTREGISTER, R_SUBWHOLE, [getsupreg(taicpu(p).oper[0]^.reg)]);
  2722. asml.InsertBefore(hp, p);
  2723. asml.Remove(p);
  2724. p.Free;
  2725. p:=hp;
  2726. result:=true;
  2727. end}
  2728. else if (p.typ=ait_instruction) and
  2729. MatchInstruction(p, [A_AND], [], [PF_None]) and
  2730. (taicpu(p).ops = 2) and
  2731. (taicpu(p).oper[1]^.typ=top_const) and
  2732. ((taicpu(p).oper[1]^.val=255) or
  2733. (taicpu(p).oper[1]^.val=65535)) then
  2734. begin
  2735. DebugMsg('Peephole AndR2Uxt done', p);
  2736. if taicpu(p).oper[1]^.val=255 then
  2737. taicpu(p).opcode:=A_UXTB
  2738. else
  2739. taicpu(p).opcode:=A_UXTH;
  2740. taicpu(p).loadreg(1, taicpu(p).oper[0]^.reg);
  2741. result := true;
  2742. end
  2743. else if (p.typ=ait_instruction) and
  2744. MatchInstruction(p, [A_AND], [], [PF_None]) and
  2745. (taicpu(p).ops = 3) and
  2746. (taicpu(p).oper[2]^.typ=top_const) and
  2747. ((taicpu(p).oper[2]^.val=255) or
  2748. (taicpu(p).oper[2]^.val=65535)) then
  2749. begin
  2750. DebugMsg('Peephole AndRR2Uxt done', p);
  2751. if taicpu(p).oper[2]^.val=255 then
  2752. taicpu(p).opcode:=A_UXTB
  2753. else
  2754. taicpu(p).opcode:=A_UXTH;
  2755. taicpu(p).ops:=2;
  2756. result := true;
  2757. end
  2758. {else if (p.typ=ait_instruction) and
  2759. MatchInstruction(p, [A_CMP], [C_None], [PF_None]) and
  2760. (taicpu(p).oper[1]^.typ=top_const) and
  2761. (taicpu(p).oper[1]^.val=0) and
  2762. GetNextInstruction(p,hp1) and
  2763. (taicpu(hp1).opcode=A_B) and
  2764. (taicpu(hp1).condition in [C_EQ,C_NE]) then
  2765. begin
  2766. if taicpu(hp1).condition = C_EQ then
  2767. hp2:=taicpu.op_reg_ref(A_CBZ, taicpu(p).oper[0]^.reg, taicpu(hp1).oper[0]^.ref^)
  2768. else
  2769. hp2:=taicpu.op_reg_ref(A_CBNZ, taicpu(p).oper[0]^.reg, taicpu(hp1).oper[0]^.ref^);
  2770. taicpu(hp2).is_jmp := true;
  2771. asml.InsertAfter(hp2, hp1);
  2772. asml.Remove(hp1);
  2773. hp1.Free;
  2774. asml.Remove(p);
  2775. p.Free;
  2776. p := hp2;
  2777. result := true;
  2778. end}
  2779. end;
  2780. procedure TCpuThumb2AsmOptimizer.PeepHoleOptPass2;
  2781. var
  2782. p,hp1,hp2: tai;
  2783. l : longint;
  2784. condition : tasmcond;
  2785. { UsedRegs, TmpUsedRegs: TRegSet; }
  2786. begin
  2787. p := BlockStart;
  2788. { UsedRegs := []; }
  2789. while (p <> BlockEnd) Do
  2790. begin
  2791. { UpdateUsedRegs(UsedRegs, tai(p.next)); }
  2792. case p.Typ Of
  2793. Ait_Instruction:
  2794. begin
  2795. case taicpu(p).opcode Of
  2796. A_B:
  2797. if taicpu(p).condition<>C_None then
  2798. begin
  2799. { check for
  2800. Bxx xxx
  2801. <several instructions>
  2802. xxx:
  2803. }
  2804. l:=0;
  2805. GetNextInstruction(p, hp1);
  2806. while assigned(hp1) and
  2807. (l<=4) and
  2808. CanBeCond(hp1) and
  2809. { stop on labels }
  2810. not(hp1.typ=ait_label) do
  2811. begin
  2812. inc(l);
  2813. if MustBeLast(hp1) then
  2814. begin
  2815. //hp1:=nil;
  2816. GetNextInstruction(hp1,hp1);
  2817. break;
  2818. end
  2819. else
  2820. GetNextInstruction(hp1,hp1);
  2821. end;
  2822. if assigned(hp1) then
  2823. begin
  2824. if FindLabel(tasmlabel(taicpu(p).oper[0]^.ref^.symbol),hp1) then
  2825. begin
  2826. if (l<=4) and (l>0) then
  2827. begin
  2828. condition:=inverse_cond(taicpu(p).condition);
  2829. hp2:=p;
  2830. GetNextInstruction(p,hp1);
  2831. p:=hp1;
  2832. repeat
  2833. if hp1.typ=ait_instruction then
  2834. taicpu(hp1).condition:=condition;
  2835. if MustBeLast(hp1) then
  2836. begin
  2837. GetNextInstruction(hp1,hp1);
  2838. break;
  2839. end
  2840. else
  2841. GetNextInstruction(hp1,hp1);
  2842. until not(assigned(hp1)) or
  2843. not(CanBeCond(hp1)) or
  2844. (hp1.typ=ait_label);
  2845. { wait with removing else GetNextInstruction could
  2846. ignore the label if it was the only usage in the
  2847. jump moved away }
  2848. asml.InsertAfter(tai_comment.create(strpnew('Collapsed')), hp2);
  2849. DecrementPreceedingIT(asml, hp2);
  2850. case l of
  2851. 1: asml.InsertAfter(taicpu.op_cond(A_IT,condition), hp2);
  2852. 2: asml.InsertAfter(taicpu.op_cond(A_ITT,condition), hp2);
  2853. 3: asml.InsertAfter(taicpu.op_cond(A_ITTT,condition), hp2);
  2854. 4: asml.InsertAfter(taicpu.op_cond(A_ITTTT,condition), hp2);
  2855. end;
  2856. tasmlabel(taicpu(hp2).oper[0]^.ref^.symbol).decrefs;
  2857. asml.remove(hp2);
  2858. hp2.free;
  2859. continue;
  2860. end;
  2861. end;
  2862. end;
  2863. end;
  2864. else
  2865. ;
  2866. end;
  2867. end;
  2868. else
  2869. ;
  2870. end;
  2871. p := tai(p.next)
  2872. end;
  2873. end;
  2874. function TCpuThumb2AsmOptimizer.PostPeepHoleOptsCpu(var p: tai): boolean;
  2875. begin
  2876. result:=false;
  2877. if p.typ = ait_instruction then
  2878. begin
  2879. if MatchInstruction(p, A_MOV, [C_None], [PF_None]) and
  2880. (taicpu(p).oper[1]^.typ=top_const) and
  2881. (taicpu(p).oper[1]^.val >= 0) and
  2882. (taicpu(p).oper[1]^.val < 256) and
  2883. (not RegInUsedRegs(NR_DEFAULTFLAGS,UsedRegs)) then
  2884. begin
  2885. DebugMsg('Peephole Mov2Movs done', p);
  2886. asml.InsertBefore(tai_regalloc.alloc(NR_DEFAULTFLAGS,p), p);
  2887. asml.InsertAfter(tai_regalloc.dealloc(NR_DEFAULTFLAGS,p), p);
  2888. IncludeRegInUsedRegs(NR_DEFAULTFLAGS,UsedRegs);
  2889. taicpu(p).oppostfix:=PF_S;
  2890. result:=true;
  2891. end
  2892. else if MatchInstruction(p, A_MVN, [C_None], [PF_None]) and
  2893. (taicpu(p).oper[1]^.typ=top_reg) and
  2894. (not RegInUsedRegs(NR_DEFAULTFLAGS,UsedRegs)) then
  2895. begin
  2896. DebugMsg('Peephole Mvn2Mvns done', p);
  2897. asml.InsertBefore(tai_regalloc.alloc(NR_DEFAULTFLAGS,p), p);
  2898. asml.InsertAfter(tai_regalloc.dealloc(NR_DEFAULTFLAGS,p), p);
  2899. IncludeRegInUsedRegs(NR_DEFAULTFLAGS,UsedRegs);
  2900. taicpu(p).oppostfix:=PF_S;
  2901. result:=true;
  2902. end
  2903. else if MatchInstruction(p, A_RSB, [C_None], [PF_None]) and
  2904. (taicpu(p).ops = 3) and
  2905. (taicpu(p).oper[2]^.typ=top_const) and
  2906. (taicpu(p).oper[2]^.val=0) and
  2907. (not RegInUsedRegs(NR_DEFAULTFLAGS,UsedRegs)) then
  2908. begin
  2909. DebugMsg('Peephole Rsb2Rsbs done', p);
  2910. asml.InsertBefore(tai_regalloc.alloc(NR_DEFAULTFLAGS,p), p);
  2911. asml.InsertAfter(tai_regalloc.dealloc(NR_DEFAULTFLAGS,p), p);
  2912. IncludeRegInUsedRegs(NR_DEFAULTFLAGS,UsedRegs);
  2913. taicpu(p).oppostfix:=PF_S;
  2914. result:=true;
  2915. end
  2916. else if MatchInstruction(p, [A_ADD,A_SUB], [C_None], [PF_None]) and
  2917. (taicpu(p).ops = 3) and
  2918. MatchOperand(taicpu(p).oper[0]^, taicpu(p).oper[1]^) and
  2919. (not MatchOperand(taicpu(p).oper[0]^, NR_STACK_POINTER_REG)) and
  2920. (taicpu(p).oper[2]^.typ=top_const) and
  2921. (taicpu(p).oper[2]^.val >= 0) and
  2922. (taicpu(p).oper[2]^.val < 256) and
  2923. (not RegInUsedRegs(NR_DEFAULTFLAGS,UsedRegs)) then
  2924. begin
  2925. DebugMsg('Peephole AddSub2*s done', p);
  2926. asml.InsertBefore(tai_regalloc.alloc(NR_DEFAULTFLAGS,p), p);
  2927. asml.InsertAfter(tai_regalloc.dealloc(NR_DEFAULTFLAGS,p), p);
  2928. IncludeRegInUsedRegs(NR_DEFAULTFLAGS,UsedRegs);
  2929. taicpu(p).loadconst(1,taicpu(p).oper[2]^.val);
  2930. taicpu(p).oppostfix:=PF_S;
  2931. taicpu(p).ops := 2;
  2932. result:=true;
  2933. end
  2934. else if MatchInstruction(p, [A_ADD,A_SUB], [C_None], [PF_None]) and
  2935. (taicpu(p).ops = 2) and
  2936. (taicpu(p).oper[1]^.typ=top_reg) and
  2937. (not MatchOperand(taicpu(p).oper[0]^, NR_STACK_POINTER_REG)) and
  2938. (not MatchOperand(taicpu(p).oper[1]^, NR_STACK_POINTER_REG)) and
  2939. (not RegInUsedRegs(NR_DEFAULTFLAGS,UsedRegs)) then
  2940. begin
  2941. DebugMsg('Peephole AddSub2*s done', p);
  2942. asml.InsertBefore(tai_regalloc.alloc(NR_DEFAULTFLAGS,p), p);
  2943. asml.InsertAfter(tai_regalloc.dealloc(NR_DEFAULTFLAGS,p), p);
  2944. IncludeRegInUsedRegs(NR_DEFAULTFLAGS,UsedRegs);
  2945. taicpu(p).oppostfix:=PF_S;
  2946. result:=true;
  2947. end
  2948. else if MatchInstruction(p, [A_ADD], [C_None], [PF_None]) and
  2949. (taicpu(p).ops = 3) and
  2950. MatchOperand(taicpu(p).oper[0]^, taicpu(p).oper[1]^) and
  2951. (taicpu(p).oper[2]^.typ=top_reg) then
  2952. begin
  2953. DebugMsg('Peephole AddRRR2AddRR done', p);
  2954. taicpu(p).ops := 2;
  2955. taicpu(p).loadreg(1,taicpu(p).oper[2]^.reg);
  2956. result:=true;
  2957. end
  2958. else if MatchInstruction(p, [A_AND,A_ORR,A_EOR,A_BIC,A_LSL,A_LSR,A_ASR,A_ROR], [C_None], [PF_None]) and
  2959. (taicpu(p).ops = 3) and
  2960. MatchOperand(taicpu(p).oper[0]^, taicpu(p).oper[1]^) and
  2961. (taicpu(p).oper[2]^.typ=top_reg) and
  2962. (not RegInUsedRegs(NR_DEFAULTFLAGS,UsedRegs)) then
  2963. begin
  2964. DebugMsg('Peephole opXXY2opsXY done', p);
  2965. asml.InsertBefore(tai_regalloc.alloc(NR_DEFAULTFLAGS,p), p);
  2966. asml.InsertAfter(tai_regalloc.dealloc(NR_DEFAULTFLAGS,p), p);
  2967. IncludeRegInUsedRegs(NR_DEFAULTFLAGS,UsedRegs);
  2968. taicpu(p).ops := 2;
  2969. taicpu(p).loadreg(1,taicpu(p).oper[2]^.reg);
  2970. taicpu(p).oppostfix:=PF_S;
  2971. result:=true;
  2972. end
  2973. else if MatchInstruction(p, [A_AND,A_ORR,A_EOR,A_BIC,A_LSL,A_LSR,A_ASR,A_ROR], [C_None], [PF_S]) and
  2974. (taicpu(p).ops = 3) and
  2975. MatchOperand(taicpu(p).oper[0]^, taicpu(p).oper[1]^) and
  2976. (taicpu(p).oper[2]^.typ in [top_reg,top_const]) then
  2977. begin
  2978. DebugMsg('Peephole opXXY2opXY done', p);
  2979. taicpu(p).ops := 2;
  2980. if taicpu(p).oper[2]^.typ=top_reg then
  2981. taicpu(p).loadreg(1,taicpu(p).oper[2]^.reg)
  2982. else
  2983. taicpu(p).loadconst(1,taicpu(p).oper[2]^.val);
  2984. result:=true;
  2985. end
  2986. else if MatchInstruction(p, [A_AND,A_ORR,A_EOR], [C_None], [PF_None,PF_S]) and
  2987. (taicpu(p).ops = 3) and
  2988. MatchOperand(taicpu(p).oper[0]^, taicpu(p).oper[2]^) and
  2989. (not RegInUsedRegs(NR_DEFAULTFLAGS,UsedRegs)) then
  2990. begin
  2991. DebugMsg('Peephole opXYX2opsXY done', p);
  2992. asml.InsertBefore(tai_regalloc.alloc(NR_DEFAULTFLAGS,p), p);
  2993. asml.InsertAfter(tai_regalloc.dealloc(NR_DEFAULTFLAGS,p), p);
  2994. IncludeRegInUsedRegs(NR_DEFAULTFLAGS,UsedRegs);
  2995. taicpu(p).oppostfix:=PF_S;
  2996. taicpu(p).ops := 2;
  2997. result:=true;
  2998. end
  2999. else if MatchInstruction(p, [A_MOV], [C_None], [PF_None]) and
  3000. (taicpu(p).ops=3) and
  3001. (taicpu(p).oper[2]^.typ=top_shifterop) and
  3002. (taicpu(p).oper[2]^.shifterop^.shiftmode in [SM_LSL,SM_LSR,SM_ASR,SM_ROR]) and
  3003. //MatchOperand(taicpu(p).oper[0]^, taicpu(p).oper[1]^) and
  3004. (not RegInUsedRegs(NR_DEFAULTFLAGS,UsedRegs)) then
  3005. begin
  3006. DebugMsg('Peephole Mov2Shift done', p);
  3007. asml.InsertBefore(tai_regalloc.alloc(NR_DEFAULTFLAGS,p), p);
  3008. asml.InsertAfter(tai_regalloc.dealloc(NR_DEFAULTFLAGS,p), p);
  3009. IncludeRegInUsedRegs(NR_DEFAULTFLAGS,UsedRegs);
  3010. taicpu(p).oppostfix:=PF_S;
  3011. case taicpu(p).oper[2]^.shifterop^.shiftmode of
  3012. SM_LSL: taicpu(p).opcode:=A_LSL;
  3013. SM_LSR: taicpu(p).opcode:=A_LSR;
  3014. SM_ASR: taicpu(p).opcode:=A_ASR;
  3015. SM_ROR: taicpu(p).opcode:=A_ROR;
  3016. else
  3017. internalerror(2019050912);
  3018. end;
  3019. if taicpu(p).oper[2]^.shifterop^.rs<>NR_NO then
  3020. taicpu(p).loadreg(2, taicpu(p).oper[2]^.shifterop^.rs)
  3021. else
  3022. taicpu(p).loadconst(2, taicpu(p).oper[2]^.shifterop^.shiftimm);
  3023. result:=true;
  3024. end
  3025. end;
  3026. end;
  3027. begin
  3028. casmoptimizer:=TCpuAsmOptimizer;
  3029. cpreregallocscheduler:=TCpuPreRegallocScheduler;
  3030. End.