aoptcpu.pas 143 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847184818491850185118521853185418551856185718581859186018611862186318641865186618671868186918701871187218731874187518761877187818791880188118821883188418851886188718881889189018911892189318941895189618971898189919001901190219031904190519061907190819091910191119121913191419151916191719181919192019211922192319241925192619271928192919301931193219331934193519361937193819391940194119421943194419451946194719481949195019511952195319541955195619571958195919601961196219631964196519661967196819691970197119721973197419751976197719781979198019811982198319841985198619871988198919901991199219931994199519961997199819992000200120022003200420052006200720082009201020112012201320142015201620172018201920202021202220232024202520262027202820292030203120322033203420352036203720382039204020412042204320442045204620472048204920502051205220532054205520562057205820592060206120622063206420652066206720682069207020712072207320742075207620772078207920802081208220832084208520862087208820892090209120922093209420952096209720982099210021012102210321042105210621072108210921102111211221132114211521162117211821192120212121222123212421252126212721282129213021312132213321342135213621372138213921402141214221432144214521462147214821492150215121522153215421552156215721582159216021612162216321642165216621672168216921702171217221732174217521762177217821792180218121822183218421852186218721882189219021912192219321942195219621972198219922002201220222032204220522062207220822092210221122122213221422152216221722182219222022212222222322242225222622272228222922302231223222332234223522362237223822392240224122422243224422452246224722482249225022512252225322542255225622572258225922602261226222632264226522662267226822692270227122722273227422752276227722782279228022812282228322842285228622872288228922902291229222932294229522962297229822992300230123022303230423052306230723082309231023112312231323142315231623172318231923202321232223232324232523262327232823292330233123322333233423352336233723382339234023412342234323442345234623472348234923502351235223532354235523562357235823592360236123622363236423652366236723682369237023712372237323742375237623772378237923802381238223832384238523862387238823892390239123922393239423952396239723982399240024012402240324042405240624072408240924102411241224132414241524162417241824192420242124222423242424252426242724282429243024312432243324342435243624372438243924402441244224432444244524462447244824492450245124522453245424552456245724582459246024612462246324642465246624672468246924702471247224732474247524762477247824792480248124822483248424852486248724882489249024912492249324942495249624972498249925002501250225032504250525062507250825092510251125122513251425152516251725182519252025212522252325242525252625272528252925302531253225332534253525362537253825392540254125422543254425452546254725482549255025512552255325542555255625572558255925602561256225632564256525662567256825692570257125722573257425752576257725782579258025812582258325842585258625872588258925902591259225932594259525962597259825992600260126022603260426052606260726082609261026112612261326142615261626172618261926202621262226232624262526262627262826292630263126322633263426352636263726382639264026412642264326442645264626472648264926502651265226532654265526562657265826592660266126622663266426652666266726682669267026712672267326742675267626772678267926802681268226832684268526862687268826892690269126922693269426952696269726982699270027012702270327042705270627072708270927102711271227132714271527162717271827192720272127222723272427252726272727282729273027312732273327342735273627372738273927402741274227432744274527462747274827492750275127522753275427552756275727582759276027612762276327642765276627672768276927702771277227732774277527762777277827792780278127822783278427852786278727882789279027912792279327942795279627972798279928002801280228032804280528062807280828092810281128122813281428152816281728182819282028212822282328242825282628272828282928302831283228332834283528362837283828392840284128422843284428452846284728482849285028512852285328542855285628572858285928602861286228632864286528662867286828692870287128722873287428752876287728782879288028812882288328842885288628872888288928902891289228932894289528962897289828992900290129022903290429052906290729082909291029112912291329142915291629172918291929202921292229232924292529262927292829292930293129322933293429352936293729382939294029412942294329442945294629472948294929502951295229532954295529562957295829592960296129622963296429652966296729682969297029712972297329742975297629772978297929802981298229832984298529862987298829892990299129922993299429952996299729982999300030013002300330043005300630073008300930103011301230133014301530163017301830193020302130223023302430253026302730283029303030313032303330343035303630373038303930403041304230433044304530463047304830493050305130523053305430553056305730583059306030613062306330643065306630673068306930703071307230733074307530763077307830793080308130823083308430853086308730883089309030913092309330943095309630973098309931003101310231033104310531063107310831093110311131123113311431153116311731183119312031213122
  1. {
  2. Copyright (c) 1998-2002 by Jonas Maebe, member of the Free Pascal
  3. Development Team
  4. This unit implements the ARM optimizer object
  5. This program is free software; you can redistribute it and/or modify
  6. it under the terms of the GNU General Public License as published by
  7. the Free Software Foundation; either version 2 of the License, or
  8. (at your option) any later version.
  9. This program is distributed in the hope that it will be useful,
  10. but WITHOUT ANY WARRANTY; without even the implied warranty of
  11. MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  12. GNU General Public License for more details.
  13. You should have received a copy of the GNU General Public License
  14. along with this program; if not, write to the Free Software
  15. Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
  16. ****************************************************************************
  17. }
  18. Unit aoptcpu;
  19. {$i fpcdefs.inc}
  20. { $define DEBUG_PREREGSCHEDULER}
  21. { $define DEBUG_AOPTCPU}
  22. Interface
  23. uses cgbase, cgutils, cpubase, aasmtai, aasmcpu,aopt, aoptobj;
  24. Type
  25. TCpuAsmOptimizer = class(TAsmOptimizer)
  26. { uses the same constructor as TAopObj }
  27. function PeepHoleOptPass1Cpu(var p: tai): boolean; override;
  28. procedure PeepHoleOptPass2;override;
  29. Function RegInInstruction(Reg: TRegister; p1: tai): Boolean;override;
  30. function RemoveSuperfluousMove(const p: tai; movp: tai; const optimizer: string): boolean;
  31. function RemoveSuperfluousVMov(const p : tai; movp : tai; const optimizer : string) : boolean;
  32. { gets the next tai object after current that contains info relevant
  33. to the optimizer in p1 which used the given register or does a
  34. change in program flow.
  35. If there is none, it returns false and
  36. sets p1 to nil }
  37. Function GetNextInstructionUsingReg(Current: tai; Out Next: tai; reg: TRegister): Boolean;
  38. Function GetNextInstructionUsingRef(Current: tai; Out Next: tai; const ref: TReference; StopOnStore: Boolean = true): Boolean;
  39. { outputs a debug message into the assembler file }
  40. procedure DebugMsg(const s: string; p: tai);
  41. function InstructionLoadsFromReg(const reg : TRegister; const hp : tai) : boolean; override;
  42. function RegLoadedWithNewValue(reg : tregister; hp : tai) : boolean; override;
  43. protected
  44. function LookForPreindexedPattern(p: taicpu): boolean;
  45. function LookForPostindexedPattern(p: taicpu): boolean;
  46. End;
  47. TCpuPreRegallocScheduler = class(TAsmScheduler)
  48. function SchedulerPass1Cpu(var p: tai): boolean;override;
  49. procedure SwapRegLive(p, hp1: taicpu);
  50. end;
  51. TCpuThumb2AsmOptimizer = class(TCpuAsmOptimizer)
  52. { uses the same constructor as TAopObj }
  53. function PeepHoleOptPass1Cpu(var p: tai): boolean; override;
  54. procedure PeepHoleOptPass2;override;
  55. function PostPeepHoleOptsCpu(var p: tai): boolean; override;
  56. End;
  57. function MustBeLast(p : tai) : boolean;
  58. Implementation
  59. uses
  60. cutils,verbose,globtype,globals,
  61. systems,
  62. cpuinfo,
  63. cgobj,procinfo,
  64. aasmbase,aasmdata;
  65. { Range check must be disabled explicitly as conversions between signed and unsigned
  66. 32-bit values are done without explicit typecasts }
  67. {$R-}
  68. function CanBeCond(p : tai) : boolean;
  69. begin
  70. result:=
  71. not(GenerateThumbCode) and
  72. (p.typ=ait_instruction) and
  73. (taicpu(p).condition=C_None) and
  74. ((taicpu(p).opcode<A_IT) or (taicpu(p).opcode>A_ITTTT)) and
  75. (taicpu(p).opcode<>A_CBZ) and
  76. (taicpu(p).opcode<>A_CBNZ) and
  77. (taicpu(p).opcode<>A_PLD) and
  78. (((taicpu(p).opcode<>A_BLX) and
  79. { BL may need to be converted into BLX by the linker -- could possibly
  80. be allowed in case it's to a local symbol of which we know that it
  81. uses the same instruction set as the current one }
  82. (taicpu(p).opcode<>A_BL)) or
  83. (taicpu(p).oper[0]^.typ=top_reg));
  84. end;
  85. function RefsEqual(const r1, r2: treference): boolean;
  86. begin
  87. refsequal :=
  88. (r1.offset = r2.offset) and
  89. (r1.base = r2.base) and
  90. (r1.index = r2.index) and (r1.scalefactor = r2.scalefactor) and
  91. (r1.symbol=r2.symbol) and (r1.refaddr = r2.refaddr) and
  92. (r1.relsymbol = r2.relsymbol) and
  93. (r1.signindex = r2.signindex) and
  94. (r1.shiftimm = r2.shiftimm) and
  95. (r1.addressmode = r2.addressmode) and
  96. (r1.shiftmode = r2.shiftmode) and
  97. (r1.volatility=[]) and
  98. (r2.volatility=[]);
  99. end;
  100. function MatchInstruction(const instr: tai; const op: TCommonAsmOps; const cond: TAsmConds; const postfix: TOpPostfixes): boolean;
  101. begin
  102. result :=
  103. (instr.typ = ait_instruction) and
  104. ((op = []) or ((ord(taicpu(instr).opcode)<256) and (taicpu(instr).opcode in op))) and
  105. ((cond = []) or (taicpu(instr).condition in cond)) and
  106. ((postfix = []) or (taicpu(instr).oppostfix in postfix));
  107. end;
  108. function MatchInstruction(const instr: tai; const op: TAsmOp; const cond: TAsmConds; const postfix: TOpPostfixes): boolean;
  109. begin
  110. result :=
  111. (instr.typ = ait_instruction) and
  112. (taicpu(instr).opcode = op) and
  113. ((cond = []) or (taicpu(instr).condition in cond)) and
  114. ((postfix = []) or (taicpu(instr).oppostfix in postfix));
  115. end;
  116. function MatchOperand(const oper1: TOper; const oper2: TOper): boolean; inline;
  117. begin
  118. result := oper1.typ = oper2.typ;
  119. if result then
  120. case oper1.typ of
  121. top_const:
  122. Result:=oper1.val = oper2.val;
  123. top_reg:
  124. Result:=oper1.reg = oper2.reg;
  125. top_conditioncode:
  126. Result:=oper1.cc = oper2.cc;
  127. top_ref:
  128. Result:=RefsEqual(oper1.ref^, oper2.ref^);
  129. else Result:=false;
  130. end
  131. end;
  132. function MatchOperand(const oper: TOper; const reg: TRegister): boolean; inline;
  133. begin
  134. result := (oper.typ = top_reg) and (oper.reg = reg);
  135. end;
  136. function RemoveRedundantMove(const cmpp: tai; movp: tai; asml: TAsmList):Boolean;
  137. begin
  138. Result:=false;
  139. if (taicpu(movp).condition = C_EQ) and
  140. (taicpu(cmpp).oper[0]^.reg = taicpu(movp).oper[0]^.reg) and
  141. (taicpu(cmpp).oper[1]^.val = taicpu(movp).oper[1]^.val) then
  142. begin
  143. asml.insertafter(tai_comment.Create(strpnew('Peephole CmpMovMov - Removed redundant moveq')), movp);
  144. asml.remove(movp);
  145. movp.free;
  146. Result:=true;
  147. end;
  148. end;
  149. function AlignedToQWord(const ref : treference) : boolean;
  150. begin
  151. { (safe) heuristics to ensure alignment }
  152. result:=(target_info.abi in [abi_eabi,abi_armeb,abi_eabihf]) and
  153. (((ref.offset>=0) and
  154. ((ref.offset mod 8)=0) and
  155. ((ref.base=NR_R13) or
  156. (ref.index=NR_R13))
  157. ) or
  158. ((ref.offset<=0) and
  159. { when using NR_R11, it has always a value of <qword align>+4 }
  160. ((abs(ref.offset+4) mod 8)=0) and
  161. (current_procinfo.framepointer=NR_R11) and
  162. ((ref.base=NR_R11) or
  163. (ref.index=NR_R11))
  164. )
  165. );
  166. end;
  167. function isValidConstLoadStoreOffset(const aoffset: longint; const pf: TOpPostfix) : boolean;
  168. begin
  169. if GenerateThumb2Code then
  170. result := (aoffset<4096) and (aoffset>-256)
  171. else
  172. result := ((pf in [PF_None,PF_B]) and
  173. (abs(aoffset)<4096)) or
  174. (abs(aoffset)<256);
  175. end;
  176. function TCpuAsmOptimizer.InstructionLoadsFromReg(const reg: TRegister; const hp: tai): boolean;
  177. var
  178. p: taicpu;
  179. i: longint;
  180. begin
  181. instructionLoadsFromReg := false;
  182. if not (assigned(hp) and (hp.typ = ait_instruction)) then
  183. exit;
  184. p:=taicpu(hp);
  185. i:=1;
  186. {For these instructions we have to start on oper[0]}
  187. if (p.opcode in [A_STR, A_LDM, A_STM, A_PLD,
  188. A_CMP, A_CMN, A_TST, A_TEQ,
  189. A_B, A_BL, A_BX, A_BLX,
  190. A_SMLAL, A_UMLAL]) then i:=0;
  191. while(i<p.ops) do
  192. begin
  193. case p.oper[I]^.typ of
  194. top_reg:
  195. instructionLoadsFromReg := (p.oper[I]^.reg = reg) or
  196. { STRD }
  197. ((i=0) and (p.opcode=A_STR) and (p.oppostfix=PF_D) and (getsupreg(p.oper[0]^.reg)+1=getsupreg(reg)));
  198. top_regset:
  199. instructionLoadsFromReg := (getsupreg(reg) in p.oper[I]^.regset^);
  200. top_shifterop:
  201. instructionLoadsFromReg := p.oper[I]^.shifterop^.rs = reg;
  202. top_ref:
  203. instructionLoadsFromReg :=
  204. (p.oper[I]^.ref^.base = reg) or
  205. (p.oper[I]^.ref^.index = reg);
  206. end;
  207. if instructionLoadsFromReg then exit; {Bailout if we found something}
  208. Inc(I);
  209. end;
  210. end;
  211. function TCpuAsmOptimizer.RegLoadedWithNewValue(reg: tregister; hp: tai): boolean;
  212. var
  213. p: taicpu;
  214. begin
  215. p := taicpu(hp);
  216. Result := false;
  217. if not ((assigned(hp)) and (hp.typ = ait_instruction)) then
  218. exit;
  219. case p.opcode of
  220. { These operands do not write into a register at all }
  221. A_CMP, A_CMN, A_TST, A_TEQ, A_B, A_BL, A_BX, A_BLX, A_SWI, A_MSR, A_PLD,
  222. A_VCMP:
  223. exit;
  224. {Take care of post/preincremented store and loads, they will change their base register}
  225. A_STR, A_LDR:
  226. begin
  227. Result := false;
  228. { actually, this does not apply here because post-/preindexed does not mean that a register
  229. is loaded with a new value, it is only modified
  230. (taicpu(p).oper[1]^.typ=top_ref) and
  231. (taicpu(p).oper[1]^.ref^.addressmode in [AM_PREINDEXED,AM_POSTINDEXED]) and
  232. (taicpu(p).oper[1]^.ref^.base = reg);
  233. }
  234. { STR does not load into it's first register }
  235. if p.opcode = A_STR then
  236. exit;
  237. end;
  238. A_VSTR:
  239. begin
  240. Result := false;
  241. exit;
  242. end;
  243. { These four are writing into the first 2 register, UMLAL and SMLAL will also read from them }
  244. A_UMLAL, A_UMULL, A_SMLAL, A_SMULL:
  245. Result :=
  246. (p.oper[1]^.typ = top_reg) and
  247. (p.oper[1]^.reg = reg);
  248. {Loads to oper2 from coprocessor}
  249. {
  250. MCR/MRC is currently not supported in FPC
  251. A_MRC:
  252. Result :=
  253. (p.oper[2]^.typ = top_reg) and
  254. (p.oper[2]^.reg = reg);
  255. }
  256. {Loads to all register in the registerset}
  257. A_LDM, A_VLDM:
  258. Result := (getsupreg(reg) in p.oper[1]^.regset^);
  259. A_POP:
  260. Result := (getsupreg(reg) in p.oper[0]^.regset^) or
  261. (reg=NR_STACK_POINTER_REG);
  262. end;
  263. if Result then
  264. exit;
  265. case p.oper[0]^.typ of
  266. {This is the case}
  267. top_reg:
  268. Result := (p.oper[0]^.reg = reg) or
  269. { LDRD }
  270. (p.opcode=A_LDR) and (p.oppostfix=PF_D) and (getsupreg(p.oper[0]^.reg)+1=getsupreg(reg));
  271. {LDM/STM might write a new value to their index register}
  272. top_ref:
  273. Result :=
  274. (taicpu(p).oper[0]^.ref^.addressmode in [AM_PREINDEXED,AM_POSTINDEXED]) and
  275. (taicpu(p).oper[0]^.ref^.base = reg);
  276. end;
  277. end;
  278. function TCpuAsmOptimizer.GetNextInstructionUsingReg(Current: tai;
  279. Out Next: tai; reg: TRegister): Boolean;
  280. begin
  281. Next:=Current;
  282. repeat
  283. Result:=GetNextInstruction(Next,Next);
  284. until not (Result) or
  285. not(cs_opt_level3 in current_settings.optimizerswitches) or
  286. (Next.typ<>ait_instruction) or
  287. RegInInstruction(reg,Next) or
  288. is_calljmp(taicpu(Next).opcode) or
  289. RegModifiedByInstruction(NR_PC,Next);
  290. end;
  291. function TCpuAsmOptimizer.GetNextInstructionUsingRef(Current: tai;
  292. Out Next: tai; const ref: TReference; StopOnStore: Boolean = true): Boolean;
  293. begin
  294. Next:=Current;
  295. repeat
  296. Result:=GetNextInstruction(Next,Next);
  297. if Result and
  298. (Next.typ=ait_instruction) and
  299. (taicpu(Next).opcode in [A_LDR, A_STR]) and
  300. (
  301. ((taicpu(Next).ops = 2) and
  302. (taicpu(Next).oper[1]^.typ = top_ref) and
  303. RefsEqual(taicpu(Next).oper[1]^.ref^,ref)) or
  304. ((taicpu(Next).ops = 3) and { LDRD/STRD }
  305. (taicpu(Next).oper[2]^.typ = top_ref) and
  306. RefsEqual(taicpu(Next).oper[2]^.ref^,ref))
  307. ) then
  308. {We've found an instruction LDR or STR with the same reference}
  309. exit;
  310. until not(Result) or
  311. (Next.typ<>ait_instruction) or
  312. not(cs_opt_level3 in current_settings.optimizerswitches) or
  313. is_calljmp(taicpu(Next).opcode) or
  314. (StopOnStore and (taicpu(Next).opcode in [A_STR, A_STM])) or
  315. RegModifiedByInstruction(NR_PC,Next);
  316. Result:=false;
  317. end;
  318. {$ifdef DEBUG_AOPTCPU}
  319. procedure TCpuAsmOptimizer.DebugMsg(const s: string;p : tai);
  320. begin
  321. asml.insertbefore(tai_comment.Create(strpnew(s)), p);
  322. end;
  323. {$else DEBUG_AOPTCPU}
  324. procedure TCpuAsmOptimizer.DebugMsg(const s: string;p : tai);inline;
  325. begin
  326. end;
  327. {$endif DEBUG_AOPTCPU}
  328. function TCpuAsmOptimizer.RemoveSuperfluousMove(const p: tai; movp: tai; const optimizer: string):boolean;
  329. var
  330. alloc,
  331. dealloc : tai_regalloc;
  332. hp1 : tai;
  333. begin
  334. Result:=false;
  335. if MatchInstruction(movp, A_MOV, [taicpu(p).condition], [PF_None]) and
  336. (taicpu(movp).ops=2) and {We can't optimize if there is a shiftop}
  337. MatchOperand(taicpu(movp).oper[1]^, taicpu(p).oper[0]^.reg) and
  338. { don't mess with moves to pc }
  339. (taicpu(movp).oper[0]^.reg<>NR_PC) and
  340. { don't mess with moves to lr }
  341. (taicpu(movp).oper[0]^.reg<>NR_R14) and
  342. { the destination register of the mov might not be used beween p and movp }
  343. not(RegUsedBetween(taicpu(movp).oper[0]^.reg,p,movp)) and
  344. { cb[n]z are thumb instructions which require specific registers, with no wide forms }
  345. (taicpu(p).opcode<>A_CBZ) and
  346. (taicpu(p).opcode<>A_CBNZ) and
  347. {There is a special requirement for MUL and MLA, oper[0] and oper[1] are not allowed to be the same}
  348. not (
  349. (taicpu(p).opcode in [A_MLA, A_MUL]) and
  350. (taicpu(p).oper[1]^.reg = taicpu(movp).oper[0]^.reg) and
  351. (current_settings.cputype < cpu_armv6)
  352. ) and
  353. { Take care to only do this for instructions which REALLY load to the first register.
  354. Otherwise
  355. str reg0, [reg1]
  356. mov reg2, reg0
  357. will be optimized to
  358. str reg2, [reg1]
  359. }
  360. regLoadedWithNewValue(taicpu(p).oper[0]^.reg, p) then
  361. begin
  362. dealloc:=FindRegDeAlloc(taicpu(p).oper[0]^.reg,tai(movp.Next));
  363. if assigned(dealloc) then
  364. begin
  365. DebugMsg('Peephole '+optimizer+' removed superfluous mov', movp);
  366. result:=true;
  367. { taicpu(p).oper[0]^.reg is not used anymore, try to find its allocation
  368. and remove it if possible }
  369. asml.Remove(dealloc);
  370. alloc:=FindRegAllocBackward(taicpu(p).oper[0]^.reg,tai(p.previous));
  371. if assigned(alloc) then
  372. begin
  373. asml.Remove(alloc);
  374. alloc.free;
  375. dealloc.free;
  376. end
  377. else
  378. asml.InsertAfter(dealloc,p);
  379. { try to move the allocation of the target register }
  380. GetLastInstruction(movp,hp1);
  381. alloc:=FindRegAlloc(taicpu(movp).oper[0]^.reg,tai(hp1.Next));
  382. if assigned(alloc) then
  383. begin
  384. asml.Remove(alloc);
  385. asml.InsertBefore(alloc,p);
  386. { adjust used regs }
  387. IncludeRegInUsedRegs(taicpu(movp).oper[0]^.reg,UsedRegs);
  388. end;
  389. { finally get rid of the mov }
  390. taicpu(p).loadreg(0,taicpu(movp).oper[0]^.reg);
  391. { Remove preindexing and postindexing for LDR in some cases.
  392. For example:
  393. ldr reg2,[reg1, xxx]!
  394. mov reg1,reg2
  395. must be translated to:
  396. ldr reg1,[reg1, xxx]
  397. Preindexing must be removed there, since the same register is used as the base and as the target.
  398. Such case is not allowed for ARM CPU and produces crash. }
  399. if (taicpu(p).opcode = A_LDR) and (taicpu(p).oper[1]^.typ = top_ref)
  400. and (taicpu(movp).oper[0]^.reg = taicpu(p).oper[1]^.ref^.base)
  401. then
  402. taicpu(p).oper[1]^.ref^.addressmode:=AM_OFFSET;
  403. asml.remove(movp);
  404. movp.free;
  405. end;
  406. end;
  407. end;
  408. function TCpuAsmOptimizer.RemoveSuperfluousVMov(const p: tai; movp: tai; const optimizer: string):boolean;
  409. var
  410. alloc,
  411. dealloc : tai_regalloc;
  412. hp1 : tai;
  413. begin
  414. Result:=false;
  415. if (MatchInstruction(movp, A_VMOV, [taicpu(p).condition], [taicpu(p).oppostfix]) or
  416. ((taicpu(p).oppostfix in [PF_F64F32,PF_F64S16,PF_F64S32,PF_F64U16,PF_F64U32]) and MatchInstruction(movp, A_VMOV, [taicpu(p).condition], [PF_F64])) or
  417. ((taicpu(p).oppostfix in [PF_F32F64,PF_F32S16,PF_F32S32,PF_F32U16,PF_F32U32]) and MatchInstruction(movp, A_VMOV, [taicpu(p).condition], [PF_F32]))
  418. ) and
  419. (taicpu(movp).ops=2) and
  420. MatchOperand(taicpu(movp).oper[1]^, taicpu(p).oper[0]^.reg) and
  421. { the destination register of the mov might not be used beween p and movp }
  422. not(RegUsedBetween(taicpu(movp).oper[0]^.reg,p,movp)) and
  423. { Take care to only do this for instructions which REALLY load to the first register.
  424. Otherwise
  425. vstr reg0, [reg1]
  426. vmov reg2, reg0
  427. will be optimized to
  428. vstr reg2, [reg1]
  429. }
  430. regLoadedWithNewValue(taicpu(p).oper[0]^.reg, p) then
  431. begin
  432. dealloc:=FindRegDeAlloc(taicpu(p).oper[0]^.reg,tai(movp.Next));
  433. if assigned(dealloc) then
  434. begin
  435. DebugMsg('Peephole '+optimizer+' removed superfluous vmov', movp);
  436. result:=true;
  437. { taicpu(p).oper[0]^.reg is not used anymore, try to find its allocation
  438. and remove it if possible }
  439. asml.Remove(dealloc);
  440. alloc:=FindRegAllocBackward(taicpu(p).oper[0]^.reg,tai(p.previous));
  441. if assigned(alloc) then
  442. begin
  443. asml.Remove(alloc);
  444. alloc.free;
  445. dealloc.free;
  446. end
  447. else
  448. asml.InsertAfter(dealloc,p);
  449. { try to move the allocation of the target register }
  450. GetLastInstruction(movp,hp1);
  451. alloc:=FindRegAlloc(taicpu(movp).oper[0]^.reg,tai(hp1.Next));
  452. if assigned(alloc) then
  453. begin
  454. asml.Remove(alloc);
  455. asml.InsertBefore(alloc,p);
  456. { adjust used regs }
  457. IncludeRegInUsedRegs(taicpu(movp).oper[0]^.reg,UsedRegs);
  458. end;
  459. { finally get rid of the mov }
  460. taicpu(p).loadreg(0,taicpu(movp).oper[0]^.reg);
  461. asml.remove(movp);
  462. movp.free;
  463. end;
  464. end;
  465. end;
  466. {
  467. optimize
  468. add/sub reg1,reg1,regY/const
  469. ...
  470. ldr/str regX,[reg1]
  471. into
  472. ldr/str regX,[reg1, regY/const]!
  473. }
  474. function TCpuAsmOptimizer.LookForPreindexedPattern(p: taicpu): boolean;
  475. var
  476. hp1: tai;
  477. begin
  478. if GenerateARMCode and
  479. (p.ops=3) and
  480. MatchOperand(p.oper[0]^, p.oper[1]^.reg) and
  481. GetNextInstructionUsingReg(p, hp1, p.oper[0]^.reg) and
  482. (not RegModifiedBetween(p.oper[0]^.reg, p, hp1)) and
  483. MatchInstruction(hp1, [A_LDR,A_STR], [C_None], [PF_None,PF_B,PF_H,PF_SH,PF_SB]) and
  484. (taicpu(hp1).oper[1]^.ref^.addressmode=AM_OFFSET) and
  485. (taicpu(hp1).oper[1]^.ref^.base=p.oper[0]^.reg) and
  486. (taicpu(hp1).oper[0]^.reg<>p.oper[0]^.reg) and
  487. (taicpu(hp1).oper[1]^.ref^.offset=0) and
  488. (taicpu(hp1).oper[1]^.ref^.index=NR_NO) and
  489. (((p.oper[2]^.typ=top_reg) and
  490. (not RegModifiedBetween(p.oper[2]^.reg, p, hp1))) or
  491. ((p.oper[2]^.typ=top_const) and
  492. ((abs(p.oper[2]^.val) < 256) or
  493. ((abs(p.oper[2]^.val) < 4096) and
  494. (taicpu(hp1).oppostfix in [PF_None,PF_B]))))) then
  495. begin
  496. taicpu(hp1).oper[1]^.ref^.addressmode:=AM_PREINDEXED;
  497. if p.oper[2]^.typ=top_reg then
  498. begin
  499. taicpu(hp1).oper[1]^.ref^.index:=p.oper[2]^.reg;
  500. if p.opcode=A_ADD then
  501. taicpu(hp1).oper[1]^.ref^.signindex:=1
  502. else
  503. taicpu(hp1).oper[1]^.ref^.signindex:=-1;
  504. end
  505. else
  506. begin
  507. if p.opcode=A_ADD then
  508. taicpu(hp1).oper[1]^.ref^.offset:=p.oper[2]^.val
  509. else
  510. taicpu(hp1).oper[1]^.ref^.offset:=-p.oper[2]^.val;
  511. end;
  512. result:=true;
  513. end
  514. else
  515. result:=false;
  516. end;
  517. {
  518. optimize
  519. ldr/str regX,[reg1]
  520. ...
  521. add/sub reg1,reg1,regY/const
  522. into
  523. ldr/str regX,[reg1], regY/const
  524. }
  525. function TCpuAsmOptimizer.LookForPostindexedPattern(p: taicpu) : boolean;
  526. var
  527. hp1 : tai;
  528. begin
  529. Result:=false;
  530. if (p.oper[1]^.typ = top_ref) and
  531. (p.oper[1]^.ref^.addressmode=AM_OFFSET) and
  532. (p.oper[1]^.ref^.index=NR_NO) and
  533. (p.oper[1]^.ref^.offset=0) and
  534. GetNextInstructionUsingReg(p, hp1, p.oper[1]^.ref^.base) and
  535. { we cannot check NR_DEFAULTFLAGS for modification yet so don't allow a condition }
  536. MatchInstruction(hp1, [A_ADD, A_SUB], [C_None], [PF_None]) and
  537. (taicpu(hp1).oper[0]^.reg=p.oper[1]^.ref^.base) and
  538. (taicpu(hp1).oper[1]^.reg=p.oper[1]^.ref^.base) and
  539. (
  540. (taicpu(hp1).oper[2]^.typ=top_reg) or
  541. { valid offset? }
  542. ((taicpu(hp1).oper[2]^.typ=top_const) and
  543. ((abs(taicpu(hp1).oper[2]^.val)<256) or
  544. ((abs(taicpu(hp1).oper[2]^.val)<4096) and (p.oppostfix in [PF_None,PF_B]))
  545. )
  546. )
  547. ) and
  548. { don't apply the optimization if the base register is loaded }
  549. (p.oper[0]^.reg<>p.oper[1]^.ref^.base) and
  550. not(RegModifiedBetween(taicpu(hp1).oper[0]^.reg,p,hp1)) and
  551. { don't apply the optimization if the (new) index register is loaded }
  552. (p.oper[0]^.reg<>taicpu(hp1).oper[2]^.reg) and
  553. not(RegModifiedBetween(taicpu(hp1).oper[2]^.reg,p,hp1)) and
  554. GenerateARMCode then
  555. begin
  556. DebugMsg('Peephole Str/LdrAdd/Sub2Str/Ldr Postindex done', p);
  557. p.oper[1]^.ref^.addressmode:=AM_POSTINDEXED;
  558. if taicpu(hp1).oper[2]^.typ=top_const then
  559. begin
  560. if taicpu(hp1).opcode=A_ADD then
  561. p.oper[1]^.ref^.offset:=taicpu(hp1).oper[2]^.val
  562. else
  563. p.oper[1]^.ref^.offset:=-taicpu(hp1).oper[2]^.val;
  564. end
  565. else
  566. begin
  567. p.oper[1]^.ref^.index:=taicpu(hp1).oper[2]^.reg;
  568. if taicpu(hp1).opcode=A_ADD then
  569. p.oper[1]^.ref^.signindex:=1
  570. else
  571. p.oper[1]^.ref^.signindex:=-1;
  572. end;
  573. asml.Remove(hp1);
  574. hp1.Free;
  575. Result:=true;
  576. end;
  577. end;
  578. function TCpuAsmOptimizer.PeepHoleOptPass1Cpu(var p: tai): boolean;
  579. var
  580. hp1,hp2,hp3,hp4: tai;
  581. i, i2: longint;
  582. tempop: tasmop;
  583. oldreg: tregister;
  584. dealloc: tai_regalloc;
  585. function IsPowerOf2(const value: DWord): boolean; inline;
  586. begin
  587. Result:=(value and (value - 1)) = 0;
  588. end;
  589. begin
  590. result := false;
  591. case p.typ of
  592. ait_instruction:
  593. begin
  594. {
  595. change
  596. <op> reg,x,y
  597. cmp reg,#0
  598. into
  599. <op>s reg,x,y
  600. }
  601. { this optimization can applied only to the currently enabled operations because
  602. the other operations do not update all flags and FPC does not track flag usage }
  603. if MatchInstruction(p, [A_ADC,A_ADD,A_BIC,A_SUB,A_MUL,A_MVN,A_MOV,A_ORR,A_EOR,A_AND,
  604. A_RSB,A_RSC,A_SBC,A_MLA], [C_None], [PF_None]) and
  605. GetNextInstruction(p, hp1) and
  606. { mlas is only allowed in arm mode }
  607. ((taicpu(p).opcode<>A_MLA) or
  608. (current_settings.instructionset<>is_thumb)) and
  609. MatchInstruction(hp1, A_CMP, [C_None], [PF_None]) and
  610. (taicpu(hp1).oper[1]^.typ = top_const) and
  611. (taicpu(p).oper[0]^.reg = taicpu(hp1).oper[0]^.reg) and
  612. (taicpu(hp1).oper[1]^.val = 0) and
  613. GetNextInstruction(hp1, hp2) and
  614. { be careful here, following instructions could use other flags
  615. however after a jump fpc never depends on the value of flags }
  616. { All above instructions set Z and N according to the following
  617. Z := result = 0;
  618. N := result[31];
  619. EQ = Z=1; NE = Z=0;
  620. MI = N=1; PL = N=0; }
  621. (MatchInstruction(hp2, A_B, [C_EQ,C_NE,C_MI,C_PL], []) or
  622. { mov is also possible, but only if there is no shifter operand, it could be an rxx,
  623. we are too lazy to check if it is rxx or something else }
  624. (MatchInstruction(hp2, A_MOV, [C_EQ,C_NE,C_MI,C_PL], []) and (taicpu(hp2).ops=2))) and
  625. assigned(FindRegDealloc(NR_DEFAULTFLAGS,tai(hp2.Next))) then
  626. begin
  627. DebugMsg('Peephole OpCmp2OpS done', p);
  628. taicpu(p).oppostfix:=PF_S;
  629. { move flag allocation if possible }
  630. GetLastInstruction(hp1, hp2);
  631. hp2:=FindRegAlloc(NR_DEFAULTFLAGS,tai(hp2.Next));
  632. if assigned(hp2) then
  633. begin
  634. asml.Remove(hp2);
  635. asml.insertbefore(hp2, p);
  636. end;
  637. asml.remove(hp1);
  638. hp1.free;
  639. Result:=true;
  640. end
  641. else
  642. case taicpu(p).opcode of
  643. A_STR:
  644. begin
  645. { change
  646. str reg1,ref
  647. ldr reg2,ref
  648. into
  649. str reg1,ref
  650. mov reg2,reg1
  651. }
  652. if (taicpu(p).oper[1]^.typ = top_ref) and
  653. (taicpu(p).oper[1]^.ref^.addressmode=AM_OFFSET) and
  654. (taicpu(p).oppostfix=PF_None) and
  655. (taicpu(p).condition=C_None) and
  656. GetNextInstructionUsingRef(p,hp1,taicpu(p).oper[1]^.ref^) and
  657. MatchInstruction(hp1, A_LDR, [taicpu(p).condition], [PF_None]) and
  658. (taicpu(hp1).oper[1]^.typ=top_ref) and
  659. (taicpu(hp1).oper[1]^.ref^.addressmode=AM_OFFSET) and
  660. not(RegModifiedBetween(taicpu(p).oper[0]^.reg, p, hp1)) and
  661. ((taicpu(hp1).oper[1]^.ref^.index=NR_NO) or not (RegModifiedBetween(taicpu(hp1).oper[1]^.ref^.index, p, hp1))) and
  662. ((taicpu(hp1).oper[1]^.ref^.base=NR_NO) or not (RegModifiedBetween(taicpu(hp1).oper[1]^.ref^.base, p, hp1))) then
  663. begin
  664. if taicpu(hp1).oper[0]^.reg=taicpu(p).oper[0]^.reg then
  665. begin
  666. DebugMsg('Peephole StrLdr2StrMov 1 done', hp1);
  667. asml.remove(hp1);
  668. hp1.free;
  669. end
  670. else
  671. begin
  672. taicpu(hp1).opcode:=A_MOV;
  673. taicpu(hp1).oppostfix:=PF_None;
  674. taicpu(hp1).loadreg(1,taicpu(p).oper[0]^.reg);
  675. DebugMsg('Peephole StrLdr2StrMov 2 done', hp1);
  676. end;
  677. result := true;
  678. end
  679. { change
  680. str reg1,ref
  681. str reg2,ref
  682. into
  683. strd reg1,reg2,ref
  684. }
  685. else if (GenerateARMCode or GenerateThumb2Code) and
  686. (CPUARM_HAS_EDSP in cpu_capabilities[current_settings.cputype]) and
  687. (taicpu(p).oppostfix=PF_None) and
  688. (taicpu(p).oper[1]^.ref^.addressmode=AM_OFFSET) and
  689. GetNextInstruction(p,hp1) and
  690. MatchInstruction(hp1, A_STR, [taicpu(p).condition, C_None], [PF_None]) and
  691. not(odd(getsupreg(taicpu(p).oper[0]^.reg))) and
  692. (getsupreg(taicpu(p).oper[0]^.reg)+1=getsupreg(taicpu(hp1).oper[0]^.reg)) and
  693. { str ensures that either base or index contain no register, else ldr wouldn't
  694. use an offset either
  695. }
  696. (taicpu(p).oper[1]^.ref^.base=taicpu(hp1).oper[1]^.ref^.base) and
  697. (taicpu(p).oper[1]^.ref^.index=taicpu(hp1).oper[1]^.ref^.index) and
  698. (taicpu(p).oper[1]^.ref^.offset+4=taicpu(hp1).oper[1]^.ref^.offset) and
  699. (abs(taicpu(p).oper[1]^.ref^.offset)<256) and
  700. AlignedToQWord(taicpu(p).oper[1]^.ref^) then
  701. begin
  702. DebugMsg('Peephole StrStr2Strd done', p);
  703. taicpu(p).oppostfix:=PF_D;
  704. taicpu(p).loadref(2,taicpu(p).oper[1]^.ref^);
  705. taicpu(p).loadreg(1, taicpu(hp1).oper[0]^.reg);
  706. taicpu(p).ops:=3;
  707. asml.remove(hp1);
  708. hp1.free;
  709. result:=true;
  710. end;
  711. Result:=LookForPostindexedPattern(taicpu(p)) or Result;
  712. end;
  713. A_LDR:
  714. begin
  715. { change
  716. ldr reg1,ref
  717. ldr reg2,ref
  718. into ...
  719. }
  720. if (taicpu(p).oper[1]^.typ = top_ref) and
  721. (taicpu(p).oper[1]^.ref^.addressmode=AM_OFFSET) and
  722. GetNextInstruction(p,hp1) and
  723. { ldrd is not allowed here }
  724. MatchInstruction(hp1, A_LDR, [taicpu(p).condition, C_None], [taicpu(p).oppostfix,PF_None]-[PF_D]) then
  725. begin
  726. {
  727. ...
  728. ldr reg1,ref
  729. mov reg2,reg1
  730. }
  731. if (taicpu(p).oppostfix=taicpu(hp1).oppostfix) and
  732. RefsEqual(taicpu(p).oper[1]^.ref^,taicpu(hp1).oper[1]^.ref^) and
  733. (taicpu(p).oper[0]^.reg<>taicpu(hp1).oper[1]^.ref^.index) and
  734. (taicpu(p).oper[0]^.reg<>taicpu(hp1).oper[1]^.ref^.base) and
  735. (taicpu(hp1).oper[1]^.ref^.addressmode=AM_OFFSET) then
  736. begin
  737. if taicpu(hp1).oper[0]^.reg=taicpu(p).oper[0]^.reg then
  738. begin
  739. DebugMsg('Peephole LdrLdr2Ldr done', hp1);
  740. asml.remove(hp1);
  741. hp1.free;
  742. end
  743. else
  744. begin
  745. DebugMsg('Peephole LdrLdr2LdrMov done', hp1);
  746. taicpu(hp1).opcode:=A_MOV;
  747. taicpu(hp1).oppostfix:=PF_None;
  748. taicpu(hp1).loadreg(1,taicpu(p).oper[0]^.reg);
  749. end;
  750. result := true;
  751. end
  752. {
  753. ...
  754. ldrd reg1,reg1+1,ref
  755. }
  756. else if (GenerateARMCode or GenerateThumb2Code) and
  757. (CPUARM_HAS_EDSP in cpu_capabilities[current_settings.cputype]) and
  758. { ldrd does not allow any postfixes ... }
  759. (taicpu(p).oppostfix=PF_None) and
  760. not(odd(getsupreg(taicpu(p).oper[0]^.reg))) and
  761. (getsupreg(taicpu(p).oper[0]^.reg)+1=getsupreg(taicpu(hp1).oper[0]^.reg)) and
  762. { ldr ensures that either base or index contain no register, else ldr wouldn't
  763. use an offset either
  764. }
  765. (taicpu(p).oper[1]^.ref^.base=taicpu(hp1).oper[1]^.ref^.base) and
  766. (taicpu(p).oper[1]^.ref^.index=taicpu(hp1).oper[1]^.ref^.index) and
  767. (taicpu(p).oper[1]^.ref^.offset+4=taicpu(hp1).oper[1]^.ref^.offset) and
  768. (abs(taicpu(p).oper[1]^.ref^.offset)<256) and
  769. AlignedToQWord(taicpu(p).oper[1]^.ref^) then
  770. begin
  771. DebugMsg('Peephole LdrLdr2Ldrd done', p);
  772. taicpu(p).loadref(2,taicpu(p).oper[1]^.ref^);
  773. taicpu(p).loadreg(1, taicpu(hp1).oper[0]^.reg);
  774. taicpu(p).ops:=3;
  775. taicpu(p).oppostfix:=PF_D;
  776. asml.remove(hp1);
  777. hp1.free;
  778. result:=true;
  779. end;
  780. end;
  781. {
  782. Change
  783. ldrb dst1, [REF]
  784. and dst2, dst1, #255
  785. into
  786. ldrb dst2, [ref]
  787. }
  788. if not(GenerateThumbCode) and
  789. (taicpu(p).oppostfix=PF_B) and
  790. GetNextInstructionUsingReg(p, hp1, taicpu(p).oper[0]^.reg) and
  791. MatchInstruction(hp1, A_AND, [taicpu(p).condition], [PF_NONE]) and
  792. (taicpu(hp1).oper[1]^.reg = taicpu(p).oper[0]^.reg) and
  793. (taicpu(hp1).oper[2]^.typ = top_const) and
  794. (taicpu(hp1).oper[2]^.val = $FF) and
  795. not(RegUsedBetween(taicpu(hp1).oper[0]^.reg, p, hp1)) and
  796. RegEndOfLife(taicpu(p).oper[0]^.reg, taicpu(hp1)) then
  797. begin
  798. DebugMsg('Peephole LdrbAnd2Ldrb done', p);
  799. taicpu(p).oper[0]^.reg := taicpu(hp1).oper[0]^.reg;
  800. asml.remove(hp1);
  801. hp1.free;
  802. result:=true;
  803. end;
  804. Result:=LookForPostindexedPattern(taicpu(p)) or Result;
  805. { Remove superfluous mov after ldr
  806. changes
  807. ldr reg1, ref
  808. mov reg2, reg1
  809. to
  810. ldr reg2, ref
  811. conditions are:
  812. * no ldrd usage
  813. * reg1 must be released after mov
  814. * mov can not contain shifterops
  815. * ldr+mov have the same conditions
  816. * mov does not set flags
  817. }
  818. if (taicpu(p).oppostfix<>PF_D) and
  819. GetNextInstructionUsingReg(p, hp1, taicpu(p).oper[0]^.reg) and
  820. RemoveSuperfluousMove(p, hp1, 'LdrMov2Ldr') then
  821. Result:=true;
  822. end;
  823. A_MOV:
  824. begin
  825. { fold
  826. mov reg1,reg0, shift imm1
  827. mov reg1,reg1, shift imm2
  828. }
  829. if (taicpu(p).ops=3) and
  830. (taicpu(p).oper[2]^.typ = top_shifterop) and
  831. (taicpu(p).oper[2]^.shifterop^.rs = NR_NO) and
  832. getnextinstruction(p,hp1) and
  833. MatchInstruction(hp1, A_MOV, [taicpu(p).condition], [PF_None]) and
  834. (taicpu(hp1).ops=3) and
  835. MatchOperand(taicpu(hp1).oper[0]^, taicpu(p).oper[0]^.reg) and
  836. MatchOperand(taicpu(hp1).oper[1]^, taicpu(p).oper[0]^.reg) and
  837. (taicpu(hp1).oper[2]^.typ = top_shifterop) and
  838. (taicpu(hp1).oper[2]^.shifterop^.rs = NR_NO) then
  839. begin
  840. { fold
  841. mov reg1,reg0, lsl 16
  842. mov reg1,reg1, lsr 16
  843. strh reg1, ...
  844. dealloc reg1
  845. to
  846. strh reg1, ...
  847. dealloc reg1
  848. }
  849. if (taicpu(p).oper[2]^.shifterop^.shiftmode=SM_LSL) and
  850. (taicpu(p).oper[2]^.shifterop^.shiftimm=16) and
  851. (taicpu(hp1).oper[2]^.shifterop^.shiftmode in [SM_LSR,SM_ASR]) and
  852. (taicpu(hp1).oper[2]^.shifterop^.shiftimm=16) and
  853. getnextinstruction(hp1,hp2) and
  854. MatchInstruction(hp2, A_STR, [taicpu(p).condition], [PF_H]) and
  855. MatchOperand(taicpu(hp2).oper[0]^, taicpu(p).oper[0]^.reg) then
  856. begin
  857. TransferUsedRegs(TmpUsedRegs);
  858. UpdateUsedRegs(TmpUsedRegs, tai(p.next));
  859. UpdateUsedRegs(TmpUsedRegs, tai(hp1.next));
  860. if not(RegUsedAfterInstruction(taicpu(p).oper[0]^.reg,hp2,TmpUsedRegs)) then
  861. begin
  862. DebugMsg('Peephole optimizer removed superfluous 16 Bit zero extension', hp1);
  863. taicpu(hp2).loadreg(0,taicpu(p).oper[1]^.reg);
  864. asml.remove(p);
  865. asml.remove(hp1);
  866. p.free;
  867. hp1.free;
  868. p:=hp2;
  869. Result:=true;
  870. end;
  871. end
  872. { fold
  873. mov reg1,reg0, shift imm1
  874. mov reg1,reg1, shift imm2
  875. to
  876. mov reg1,reg0, shift imm1+imm2
  877. }
  878. else if (taicpu(p).oper[2]^.shifterop^.shiftmode=taicpu(hp1).oper[2]^.shifterop^.shiftmode) or
  879. { asr makes no use after a lsr, the asr can be foled into the lsr }
  880. ((taicpu(p).oper[2]^.shifterop^.shiftmode=SM_LSR) and (taicpu(hp1).oper[2]^.shifterop^.shiftmode=SM_ASR) ) then
  881. begin
  882. inc(taicpu(p).oper[2]^.shifterop^.shiftimm,taicpu(hp1).oper[2]^.shifterop^.shiftimm);
  883. { avoid overflows }
  884. if taicpu(p).oper[2]^.shifterop^.shiftimm>31 then
  885. case taicpu(p).oper[2]^.shifterop^.shiftmode of
  886. SM_ROR:
  887. taicpu(p).oper[2]^.shifterop^.shiftimm:=taicpu(p).oper[2]^.shifterop^.shiftimm and 31;
  888. SM_ASR:
  889. taicpu(p).oper[2]^.shifterop^.shiftimm:=31;
  890. SM_LSR,
  891. SM_LSL:
  892. begin
  893. hp2:=taicpu.op_reg_const(A_MOV,taicpu(p).oper[0]^.reg,0);
  894. InsertLLItem(p.previous, p.next, hp2);
  895. p.free;
  896. p:=hp2;
  897. end;
  898. else
  899. internalerror(2008072803);
  900. end;
  901. DebugMsg('Peephole ShiftShift2Shift 1 done', p);
  902. asml.remove(hp1);
  903. hp1.free;
  904. result := true;
  905. end
  906. { fold
  907. mov reg1,reg0, shift imm1
  908. mov reg1,reg1, shift imm2
  909. mov reg1,reg1, shift imm3 ...
  910. mov reg2,reg1, shift imm3 ...
  911. }
  912. else if GetNextInstructionUsingReg(hp1,hp2, taicpu(hp1).oper[0]^.reg) and
  913. MatchInstruction(hp2, A_MOV, [taicpu(p).condition], [PF_None]) and
  914. (taicpu(hp2).ops=3) and
  915. MatchOperand(taicpu(hp2).oper[1]^, taicpu(hp1).oper[0]^.reg) and
  916. RegEndofLife(taicpu(p).oper[0]^.reg,taicpu(hp2)) and
  917. (taicpu(hp2).oper[2]^.typ = top_shifterop) and
  918. (taicpu(hp2).oper[2]^.shifterop^.rs = NR_NO) then
  919. begin
  920. { mov reg1,reg0, lsl imm1
  921. mov reg1,reg1, lsr/asr imm2
  922. mov reg2,reg1, lsl imm3 ...
  923. to
  924. mov reg1,reg0, lsl imm1
  925. mov reg2,reg1, lsr/asr imm2-imm3
  926. if
  927. imm1>=imm2
  928. }
  929. if (taicpu(p).oper[2]^.shifterop^.shiftmode=SM_LSL) and (taicpu(hp2).oper[2]^.shifterop^.shiftmode=SM_LSL) and
  930. (taicpu(hp1).oper[2]^.shifterop^.shiftmode in [SM_ASR,SM_LSR]) and
  931. (taicpu(p).oper[2]^.shifterop^.shiftimm>=taicpu(hp1).oper[2]^.shifterop^.shiftimm) then
  932. begin
  933. if (taicpu(hp2).oper[2]^.shifterop^.shiftimm>=taicpu(hp1).oper[2]^.shifterop^.shiftimm) then
  934. begin
  935. if not(RegUsedBetween(taicpu(hp2).oper[0]^.reg,p,hp1)) and
  936. not(RegUsedBetween(taicpu(hp2).oper[0]^.reg,hp1,hp2)) then
  937. begin
  938. DebugMsg('Peephole ShiftShiftShift2ShiftShift 1a done', p);
  939. inc(taicpu(p).oper[2]^.shifterop^.shiftimm,taicpu(hp2).oper[2]^.shifterop^.shiftimm-taicpu(hp1).oper[2]^.shifterop^.shiftimm);
  940. taicpu(p).oper[0]^.reg:=taicpu(hp2).oper[0]^.reg;
  941. asml.remove(hp1);
  942. asml.remove(hp2);
  943. hp1.free;
  944. hp2.free;
  945. if taicpu(p).oper[2]^.shifterop^.shiftimm>=32 then
  946. begin
  947. taicpu(p).freeop(1);
  948. taicpu(p).freeop(2);
  949. taicpu(p).loadconst(1,0);
  950. end;
  951. result := true;
  952. end;
  953. end
  954. else if not(RegUsedBetween(taicpu(hp2).oper[0]^.reg,hp1,hp2)) then
  955. begin
  956. DebugMsg('Peephole ShiftShiftShift2ShiftShift 1b done', p);
  957. dec(taicpu(hp1).oper[2]^.shifterop^.shiftimm,taicpu(hp2).oper[2]^.shifterop^.shiftimm);
  958. taicpu(hp1).oper[0]^.reg:=taicpu(hp2).oper[0]^.reg;
  959. asml.remove(hp2);
  960. hp2.free;
  961. result := true;
  962. end;
  963. end
  964. { mov reg1,reg0, lsr/asr imm1
  965. mov reg1,reg1, lsl imm2
  966. mov reg1,reg1, lsr/asr imm3 ...
  967. if imm3>=imm1 and imm2>=imm1
  968. to
  969. mov reg1,reg0, lsl imm2-imm1
  970. mov reg1,reg1, lsr/asr imm3 ...
  971. }
  972. else if (taicpu(p).oper[2]^.shifterop^.shiftmode in [SM_ASR,SM_LSR]) and (taicpu(hp2).oper[2]^.shifterop^.shiftmode in [SM_ASR,SM_LSR]) and
  973. (taicpu(hp1).oper[2]^.shifterop^.shiftmode=SM_LSL) and
  974. (taicpu(hp2).oper[2]^.shifterop^.shiftimm>=taicpu(p).oper[2]^.shifterop^.shiftimm) and
  975. (taicpu(hp1).oper[2]^.shifterop^.shiftimm>=taicpu(p).oper[2]^.shifterop^.shiftimm) then
  976. begin
  977. dec(taicpu(hp1).oper[2]^.shifterop^.shiftimm,taicpu(p).oper[2]^.shifterop^.shiftimm);
  978. taicpu(hp1).oper[1]^.reg:=taicpu(p).oper[1]^.reg;
  979. DebugMsg('Peephole ShiftShiftShift2ShiftShift 2 done', p);
  980. asml.remove(p);
  981. p.free;
  982. p:=hp2;
  983. if taicpu(hp1).oper[2]^.shifterop^.shiftimm=0 then
  984. begin
  985. taicpu(hp2).oper[1]^.reg:=taicpu(hp1).oper[1]^.reg;
  986. asml.remove(hp1);
  987. hp1.free;
  988. p:=hp2;
  989. end;
  990. result := true;
  991. end;
  992. end;
  993. end;
  994. { Change the common
  995. mov r0, r0, lsr #xxx
  996. and r0, r0, #yyy/bic r0, r0, #xxx
  997. and remove the superfluous and/bic if possible
  998. This could be extended to handle more cases.
  999. }
  1000. if (taicpu(p).ops=3) and
  1001. (taicpu(p).oper[2]^.typ = top_shifterop) and
  1002. (taicpu(p).oper[2]^.shifterop^.rs = NR_NO) and
  1003. (taicpu(p).oper[2]^.shifterop^.shiftmode = SM_LSR) and
  1004. GetNextInstructionUsingReg(p,hp1, taicpu(p).oper[0]^.reg) and
  1005. (hp1.typ=ait_instruction) and
  1006. (taicpu(hp1).ops>=1) and
  1007. (taicpu(hp1).oper[0]^.typ=top_reg) and
  1008. (not RegModifiedBetween(taicpu(hp1).oper[0]^.reg, p, hp1)) and
  1009. RegEndOfLife(taicpu(p).oper[0]^.reg, taicpu(hp1)) then
  1010. begin
  1011. if (taicpu(p).oper[2]^.shifterop^.shiftimm >= 24 ) and
  1012. MatchInstruction(hp1, A_AND, [taicpu(p).condition], [taicpu(p).oppostfix]) and
  1013. (taicpu(hp1).ops=3) and
  1014. MatchOperand(taicpu(p).oper[0]^, taicpu(hp1).oper[1]^) and
  1015. (taicpu(hp1).oper[2]^.typ = top_const) and
  1016. { Check if the AND actually would only mask out bits being already zero because of the shift
  1017. }
  1018. ((($ffffffff shr taicpu(p).oper[2]^.shifterop^.shiftimm) and taicpu(hp1).oper[2]^.val) =
  1019. ($ffffffff shr taicpu(p).oper[2]^.shifterop^.shiftimm)) then
  1020. begin
  1021. DebugMsg('Peephole LsrAnd2Lsr done', hp1);
  1022. taicpu(p).oper[0]^.reg:=taicpu(hp1).oper[0]^.reg;
  1023. asml.remove(hp1);
  1024. hp1.free;
  1025. result:=true;
  1026. end
  1027. else if MatchInstruction(hp1, A_BIC, [taicpu(p).condition], [taicpu(p).oppostfix]) and
  1028. (taicpu(hp1).ops=3) and
  1029. MatchOperand(taicpu(p).oper[0]^, taicpu(hp1).oper[1]^) and
  1030. (taicpu(hp1).oper[2]^.typ = top_const) and
  1031. { Check if the BIC actually would only mask out bits beeing already zero because of the shift }
  1032. (taicpu(hp1).oper[2]^.val<>0) and
  1033. (BsfDWord(taicpu(hp1).oper[2]^.val)>=32-taicpu(p).oper[2]^.shifterop^.shiftimm) then
  1034. begin
  1035. DebugMsg('Peephole LsrBic2Lsr done', hp1);
  1036. taicpu(p).oper[0]^.reg:=taicpu(hp1).oper[0]^.reg;
  1037. asml.remove(hp1);
  1038. hp1.free;
  1039. result:=true;
  1040. end;
  1041. end;
  1042. { Change
  1043. mov rx, ry, lsr/ror #xxx
  1044. uxtb/uxth rz,rx/and rz,rx,0xFF
  1045. dealloc rx
  1046. to
  1047. uxtb/uxth rz,ry,ror #xxx
  1048. }
  1049. if (taicpu(p).ops=3) and
  1050. (taicpu(p).oper[2]^.typ = top_shifterop) and
  1051. (taicpu(p).oper[2]^.shifterop^.rs = NR_NO) and
  1052. (taicpu(p).oper[2]^.shifterop^.shiftmode in [SM_LSR,SM_ROR]) and
  1053. (GenerateThumb2Code) and
  1054. GetNextInstructionUsingReg(p,hp1, taicpu(p).oper[0]^.reg) and
  1055. RegEndOfLife(taicpu(p).oper[0]^.reg, taicpu(hp1)) then
  1056. begin
  1057. if MatchInstruction(hp1, A_UXTB, [C_None], [PF_None]) and
  1058. (taicpu(hp1).ops = 2) and
  1059. (taicpu(p).oper[2]^.shifterop^.shiftimm in [8,16,24]) and
  1060. MatchOperand(taicpu(hp1).oper[1]^, taicpu(p).oper[0]^.reg) then
  1061. begin
  1062. taicpu(hp1).oper[1]^.reg := taicpu(p).oper[1]^.reg;
  1063. taicpu(hp1).loadshifterop(2,taicpu(p).oper[2]^.shifterop^);
  1064. taicpu(hp1).oper[2]^.shifterop^.shiftmode:=SM_ROR;
  1065. taicpu(hp1).ops := 3;
  1066. GetNextInstruction(p,hp1);
  1067. asml.Remove(p);
  1068. p.Free;
  1069. p:=hp1;
  1070. result:=true;
  1071. exit;
  1072. end
  1073. else if MatchInstruction(hp1, A_UXTH, [C_None], [PF_None]) and
  1074. (taicpu(hp1).ops=2) and
  1075. (taicpu(p).oper[2]^.shifterop^.shiftimm in [16]) and
  1076. MatchOperand(taicpu(hp1).oper[1]^, taicpu(p).oper[0]^.reg) then
  1077. begin
  1078. taicpu(hp1).oper[1]^.reg := taicpu(p).oper[1]^.reg;
  1079. taicpu(hp1).loadshifterop(2,taicpu(p).oper[2]^.shifterop^);
  1080. taicpu(hp1).oper[2]^.shifterop^.shiftmode:=SM_ROR;
  1081. taicpu(hp1).ops := 3;
  1082. GetNextInstruction(p,hp1);
  1083. asml.Remove(p);
  1084. p.Free;
  1085. p:=hp1;
  1086. result:=true;
  1087. exit;
  1088. end
  1089. else if MatchInstruction(hp1, A_AND, [C_None], [PF_None]) and
  1090. (taicpu(hp1).ops = 3) and
  1091. (taicpu(hp1).oper[2]^.typ = top_const) and
  1092. (taicpu(hp1).oper[2]^.val = $FF) and
  1093. (taicpu(p).oper[2]^.shifterop^.shiftimm in [8,16,24]) and
  1094. MatchOperand(taicpu(hp1).oper[1]^, taicpu(p).oper[0]^.reg) then
  1095. begin
  1096. taicpu(hp1).ops := 3;
  1097. taicpu(hp1).opcode := A_UXTB;
  1098. taicpu(hp1).oper[1]^.reg := taicpu(p).oper[1]^.reg;
  1099. taicpu(hp1).loadshifterop(2,taicpu(p).oper[2]^.shifterop^);
  1100. taicpu(hp1).oper[2]^.shifterop^.shiftmode:=SM_ROR;
  1101. GetNextInstruction(p,hp1);
  1102. asml.Remove(p);
  1103. p.Free;
  1104. p:=hp1;
  1105. result:=true;
  1106. exit;
  1107. end;
  1108. end;
  1109. {
  1110. optimize
  1111. mov rX, yyyy
  1112. ....
  1113. }
  1114. if (taicpu(p).ops = 2) and
  1115. GetNextInstruction(p,hp1) and
  1116. (tai(hp1).typ = ait_instruction) then
  1117. begin
  1118. {
  1119. This changes the very common
  1120. mov r0, #0
  1121. str r0, [...]
  1122. mov r0, #0
  1123. str r0, [...]
  1124. and removes all superfluous mov instructions
  1125. }
  1126. if (taicpu(p).oper[1]^.typ = top_const) and
  1127. (taicpu(hp1).opcode=A_STR) then
  1128. while MatchInstruction(hp1, A_STR, [taicpu(p).condition], []) and
  1129. MatchOperand(taicpu(p).oper[0]^, taicpu(hp1).oper[0]^) and
  1130. GetNextInstruction(hp1, hp2) and
  1131. MatchInstruction(hp2, A_MOV, [taicpu(p).condition], [PF_None]) and
  1132. (taicpu(hp2).ops = 2) and
  1133. MatchOperand(taicpu(hp2).oper[0]^, taicpu(p).oper[0]^) and
  1134. MatchOperand(taicpu(hp2).oper[1]^, taicpu(p).oper[1]^) do
  1135. begin
  1136. DebugMsg('Peephole MovStrMov done', hp2);
  1137. GetNextInstruction(hp2,hp1);
  1138. asml.remove(hp2);
  1139. hp2.free;
  1140. result:=true;
  1141. if not assigned(hp1) then break;
  1142. end
  1143. {
  1144. This removes the first mov from
  1145. mov rX,...
  1146. mov rX,...
  1147. }
  1148. else if taicpu(hp1).opcode=A_MOV then
  1149. while MatchInstruction(hp1, A_MOV, [taicpu(p).condition], [taicpu(p).oppostfix]) and
  1150. (taicpu(hp1).ops = 2) and
  1151. MatchOperand(taicpu(p).oper[0]^, taicpu(hp1).oper[0]^) and
  1152. { don't remove the first mov if the second is a mov rX,rX }
  1153. not(MatchOperand(taicpu(hp1).oper[0]^, taicpu(hp1).oper[1]^)) do
  1154. begin
  1155. DebugMsg('Peephole MovMov done', p);
  1156. asml.remove(p);
  1157. p.free;
  1158. p:=hp1;
  1159. GetNextInstruction(hp1,hp1);
  1160. result:=true;
  1161. if not assigned(hp1) then
  1162. break;
  1163. end;
  1164. end;
  1165. {
  1166. change
  1167. mov r1, r0
  1168. add r1, r1, #1
  1169. to
  1170. add r1, r0, #1
  1171. Todo: Make it work for mov+cmp too
  1172. CAUTION! If this one is successful p might not be a mov instruction anymore!
  1173. }
  1174. if (taicpu(p).ops = 2) and
  1175. (taicpu(p).oper[1]^.typ = top_reg) and
  1176. (taicpu(p).oppostfix = PF_NONE) and
  1177. GetNextInstruction(p, hp1) and
  1178. MatchInstruction(hp1, [A_ADD, A_ADC, A_RSB, A_RSC, A_SUB, A_SBC,
  1179. A_AND, A_BIC, A_EOR, A_ORR, A_MOV, A_MVN],
  1180. [taicpu(p).condition], []) and
  1181. {MOV and MVN might only have 2 ops}
  1182. (taicpu(hp1).ops >= 2) and
  1183. MatchOperand(taicpu(p).oper[0]^, taicpu(hp1).oper[0]^.reg) and
  1184. (taicpu(hp1).oper[1]^.typ = top_reg) and
  1185. (
  1186. (taicpu(hp1).ops = 2) or
  1187. (taicpu(hp1).oper[2]^.typ in [top_reg, top_const, top_shifterop])
  1188. ) then
  1189. begin
  1190. { When we get here we still don't know if the registers match}
  1191. for I:=1 to 2 do
  1192. {
  1193. If the first loop was successful p will be replaced with hp1.
  1194. The checks will still be ok, because all required information
  1195. will also be in hp1 then.
  1196. }
  1197. if (taicpu(hp1).ops > I) and
  1198. MatchOperand(taicpu(p).oper[0]^, taicpu(hp1).oper[I]^.reg) and
  1199. { prevent certain combinations on thumb(2), this is only a safe approximation }
  1200. (not(GenerateThumbCode or GenerateThumb2Code) or
  1201. ((getsupreg(taicpu(p).oper[1]^.reg)<>RS_R13) and
  1202. (getsupreg(taicpu(p).oper[1]^.reg)<>RS_R15))
  1203. ) then
  1204. begin
  1205. DebugMsg('Peephole RedundantMovProcess done', hp1);
  1206. taicpu(hp1).oper[I]^.reg := taicpu(p).oper[1]^.reg;
  1207. if p<>hp1 then
  1208. begin
  1209. asml.remove(p);
  1210. p.free;
  1211. p:=hp1;
  1212. Result:=true;
  1213. end;
  1214. end;
  1215. end;
  1216. { Fold the very common sequence
  1217. mov regA, regB
  1218. ldr* regA, [regA]
  1219. to
  1220. ldr* regA, [regB]
  1221. CAUTION! If this one is successful p might not be a mov instruction anymore!
  1222. }
  1223. if (taicpu(p).opcode = A_MOV) and
  1224. (taicpu(p).ops = 2) and
  1225. (taicpu(p).oper[1]^.typ = top_reg) and
  1226. (taicpu(p).oppostfix = PF_NONE) and
  1227. GetNextInstructionUsingReg(p, hp1, taicpu(p).oper[0]^.reg) and
  1228. MatchInstruction(hp1, [A_LDR, A_STR], [taicpu(p).condition], []) and
  1229. (taicpu(hp1).oper[1]^.typ = top_ref) and
  1230. { We can change the base register only when the instruction uses AM_OFFSET }
  1231. ((taicpu(hp1).oper[1]^.ref^.index = taicpu(p).oper[0]^.reg) or
  1232. ((taicpu(hp1).oper[1]^.ref^.addressmode = AM_OFFSET) and
  1233. (taicpu(hp1).oper[1]^.ref^.base = taicpu(p).oper[0]^.reg))
  1234. ) and
  1235. not(RegModifiedBetween(taicpu(p).oper[1]^.reg,p,hp1)) and
  1236. // Make sure that Thumb code doesn't propagate a high register into a reference
  1237. ((GenerateThumbCode and
  1238. (getsupreg(taicpu(p).oper[1]^.reg) < RS_R8)) or
  1239. (not GenerateThumbCode)) and
  1240. RegEndOfLife(taicpu(p).oper[0]^.reg, taicpu(hp1)) then
  1241. begin
  1242. DebugMsg('Peephole MovLdr2Ldr done', hp1);
  1243. if (taicpu(hp1).oper[1]^.ref^.addressmode = AM_OFFSET) and
  1244. (taicpu(hp1).oper[1]^.ref^.base = taicpu(p).oper[0]^.reg) then
  1245. taicpu(hp1).oper[1]^.ref^.base := taicpu(p).oper[1]^.reg;
  1246. if taicpu(hp1).oper[1]^.ref^.index = taicpu(p).oper[0]^.reg then
  1247. taicpu(hp1).oper[1]^.ref^.index := taicpu(p).oper[1]^.reg;
  1248. dealloc:=FindRegDeAlloc(taicpu(p).oper[1]^.reg, tai(p.Next));
  1249. if Assigned(dealloc) then
  1250. begin
  1251. asml.remove(dealloc);
  1252. asml.InsertAfter(dealloc,hp1);
  1253. end;
  1254. GetNextInstruction(p, hp1);
  1255. asml.remove(p);
  1256. p.free;
  1257. p:=hp1;
  1258. result:=true;
  1259. end;
  1260. { This folds shifterops into following instructions
  1261. mov r0, r1, lsl #8
  1262. add r2, r3, r0
  1263. to
  1264. add r2, r3, r1, lsl #8
  1265. CAUTION! If this one is successful p might not be a mov instruction anymore!
  1266. }
  1267. if (taicpu(p).opcode = A_MOV) and
  1268. (taicpu(p).ops = 3) and
  1269. (taicpu(p).oper[1]^.typ = top_reg) and
  1270. (taicpu(p).oper[2]^.typ = top_shifterop) and
  1271. (taicpu(p).oppostfix = PF_NONE) and
  1272. GetNextInstructionUsingReg(p, hp1, taicpu(p).oper[0]^.reg) and
  1273. MatchInstruction(hp1, [A_ADD, A_ADC, A_RSB, A_RSC, A_SUB, A_SBC,
  1274. A_AND, A_BIC, A_EOR, A_ORR, A_TEQ, A_TST,
  1275. A_CMP, A_CMN],
  1276. [taicpu(p).condition], [PF_None]) and
  1277. (not ((GenerateThumb2Code) and
  1278. (taicpu(hp1).opcode in [A_SBC]) and
  1279. (((taicpu(hp1).ops=3) and
  1280. MatchOperand(taicpu(p).oper[0]^, taicpu(hp1).oper[1]^.reg)) or
  1281. ((taicpu(hp1).ops=2) and
  1282. MatchOperand(taicpu(p).oper[0]^, taicpu(hp1).oper[0]^.reg))))) and
  1283. RegEndOfLife(taicpu(p).oper[0]^.reg, taicpu(hp1)) and
  1284. (taicpu(hp1).ops >= 2) and
  1285. {Currently we can't fold into another shifterop}
  1286. (taicpu(hp1).oper[taicpu(hp1).ops-1]^.typ = top_reg) and
  1287. {Folding rrx is problematic because of the C-Flag, as we currently can't check
  1288. NR_DEFAULTFLAGS for modification}
  1289. (
  1290. {Everything is fine if we don't use RRX}
  1291. (taicpu(p).oper[2]^.shifterop^.shiftmode <> SM_RRX) or
  1292. (
  1293. {If it is RRX, then check if we're just accessing the next instruction}
  1294. GetNextInstruction(p, hp2) and
  1295. (hp1 = hp2)
  1296. )
  1297. ) and
  1298. { reg1 might not be modified inbetween }
  1299. not(RegModifiedBetween(taicpu(p).oper[1]^.reg,p,hp1)) and
  1300. { The shifterop can contain a register, might not be modified}
  1301. (
  1302. (taicpu(p).oper[2]^.shifterop^.rs = NR_NO) or
  1303. not(RegModifiedBetween(taicpu(p).oper[2]^.shifterop^.rs, p, hp1))
  1304. ) and
  1305. (
  1306. {Only ONE of the two src operands is allowed to match}
  1307. MatchOperand(taicpu(p).oper[0]^, taicpu(hp1).oper[taicpu(hp1).ops-2]^) xor
  1308. MatchOperand(taicpu(p).oper[0]^, taicpu(hp1).oper[taicpu(hp1).ops-1]^)
  1309. ) then
  1310. begin
  1311. if taicpu(hp1).opcode in [A_TST, A_TEQ, A_CMN] then
  1312. I2:=0
  1313. else
  1314. I2:=1;
  1315. for I:=I2 to taicpu(hp1).ops-1 do
  1316. if MatchOperand(taicpu(p).oper[0]^, taicpu(hp1).oper[I]^.reg) then
  1317. begin
  1318. { If the parameter matched on the second op from the RIGHT
  1319. we have to switch the parameters, this will not happen for CMP
  1320. were we're only evaluating the most right parameter
  1321. }
  1322. if I <> taicpu(hp1).ops-1 then
  1323. begin
  1324. {The SUB operators need to be changed when we swap parameters}
  1325. case taicpu(hp1).opcode of
  1326. A_SUB: tempop:=A_RSB;
  1327. A_SBC: tempop:=A_RSC;
  1328. A_RSB: tempop:=A_SUB;
  1329. A_RSC: tempop:=A_SBC;
  1330. else tempop:=taicpu(hp1).opcode;
  1331. end;
  1332. if taicpu(hp1).ops = 3 then
  1333. hp2:=taicpu.op_reg_reg_reg_shifterop(tempop,
  1334. taicpu(hp1).oper[0]^.reg, taicpu(hp1).oper[2]^.reg,
  1335. taicpu(p).oper[1]^.reg, taicpu(p).oper[2]^.shifterop^)
  1336. else
  1337. hp2:=taicpu.op_reg_reg_shifterop(tempop,
  1338. taicpu(hp1).oper[0]^.reg, taicpu(p).oper[1]^.reg,
  1339. taicpu(p).oper[2]^.shifterop^);
  1340. end
  1341. else
  1342. if taicpu(hp1).ops = 3 then
  1343. hp2:=taicpu.op_reg_reg_reg_shifterop(taicpu(hp1).opcode,
  1344. taicpu(hp1).oper[0]^.reg, taicpu(hp1).oper[1]^.reg,
  1345. taicpu(p).oper[1]^.reg, taicpu(p).oper[2]^.shifterop^)
  1346. else
  1347. hp2:=taicpu.op_reg_reg_shifterop(taicpu(hp1).opcode,
  1348. taicpu(hp1).oper[0]^.reg, taicpu(p).oper[1]^.reg,
  1349. taicpu(p).oper[2]^.shifterop^);
  1350. asml.insertbefore(hp2, hp1);
  1351. GetNextInstruction(p, hp2);
  1352. asml.remove(p);
  1353. asml.remove(hp1);
  1354. p.free;
  1355. hp1.free;
  1356. p:=hp2;
  1357. DebugMsg('Peephole FoldShiftProcess done', p);
  1358. Result:=true;
  1359. break;
  1360. end;
  1361. end;
  1362. {
  1363. Fold
  1364. mov r1, r1, lsl #2
  1365. ldr/ldrb r0, [r0, r1]
  1366. to
  1367. ldr/ldrb r0, [r0, r1, lsl #2]
  1368. XXX: This still needs some work, as we quite often encounter something like
  1369. mov r1, r2, lsl #2
  1370. add r2, r3, #imm
  1371. ldr r0, [r2, r1]
  1372. which can't be folded because r2 is overwritten between the shift and the ldr.
  1373. We could try to shuffle the registers around and fold it into.
  1374. add r1, r3, #imm
  1375. ldr r0, [r1, r2, lsl #2]
  1376. }
  1377. if (not(GenerateThumbCode)) and
  1378. (taicpu(p).opcode = A_MOV) and
  1379. (taicpu(p).ops = 3) and
  1380. (taicpu(p).oper[1]^.typ = top_reg) and
  1381. (taicpu(p).oper[2]^.typ = top_shifterop) and
  1382. { RRX is tough to handle, because it requires tracking the C-Flag,
  1383. it is also extremly unlikely to be emitted this way}
  1384. (taicpu(p).oper[2]^.shifterop^.shiftmode <> SM_RRX) and
  1385. (taicpu(p).oper[2]^.shifterop^.shiftimm <> 0) and
  1386. { thumb2 allows only lsl #0..#3 }
  1387. (not(GenerateThumb2Code) or
  1388. ((taicpu(p).oper[2]^.shifterop^.shiftimm in [0..3]) and
  1389. (taicpu(p).oper[2]^.shifterop^.shiftmode=SM_LSL)
  1390. )
  1391. ) and
  1392. (taicpu(p).oppostfix = PF_NONE) and
  1393. GetNextInstructionUsingReg(p, hp1, taicpu(p).oper[0]^.reg) and
  1394. {Only LDR, LDRB, STR, STRB can handle scaled register indexing}
  1395. (MatchInstruction(hp1, [A_LDR, A_STR], [taicpu(p).condition], [PF_None, PF_B]) or
  1396. (GenerateThumb2Code and
  1397. MatchInstruction(hp1, [A_LDR, A_STR], [taicpu(p).condition], [PF_None, PF_B, PF_SB, PF_H, PF_SH]))
  1398. ) and
  1399. (
  1400. {If this is address by offset, one of the two registers can be used}
  1401. ((taicpu(hp1).oper[1]^.ref^.addressmode=AM_OFFSET) and
  1402. (
  1403. (taicpu(hp1).oper[1]^.ref^.index = taicpu(p).oper[0]^.reg) xor
  1404. (taicpu(hp1).oper[1]^.ref^.base = taicpu(p).oper[0]^.reg)
  1405. )
  1406. ) or
  1407. {For post and preindexed only the index register can be used}
  1408. ((taicpu(hp1).oper[1]^.ref^.addressmode in [AM_POSTINDEXED, AM_PREINDEXED]) and
  1409. (
  1410. (taicpu(hp1).oper[1]^.ref^.index = taicpu(p).oper[0]^.reg) and
  1411. (taicpu(hp1).oper[1]^.ref^.base <> taicpu(p).oper[0]^.reg)
  1412. ) and
  1413. (not GenerateThumb2Code)
  1414. )
  1415. ) and
  1416. { Only fold if both registers are used. Otherwise we are folding p with itself }
  1417. (taicpu(hp1).oper[1]^.ref^.index<>NR_NO) and
  1418. (taicpu(hp1).oper[1]^.ref^.base<>NR_NO) and
  1419. { Only fold if there isn't another shifterop already, and offset is zero. }
  1420. (taicpu(hp1).oper[1]^.ref^.offset = 0) and
  1421. (taicpu(hp1).oper[1]^.ref^.shiftmode = SM_None) and
  1422. not(RegModifiedBetween(taicpu(p).oper[1]^.reg,p,hp1)) and
  1423. RegEndOfLife(taicpu(p).oper[0]^.reg, taicpu(hp1)) then
  1424. begin
  1425. { If the register we want to do the shift for resides in base, we need to swap that}
  1426. if (taicpu(hp1).oper[1]^.ref^.base = taicpu(p).oper[0]^.reg) then
  1427. taicpu(hp1).oper[1]^.ref^.base := taicpu(hp1).oper[1]^.ref^.index;
  1428. taicpu(hp1).oper[1]^.ref^.index := taicpu(p).oper[1]^.reg;
  1429. taicpu(hp1).oper[1]^.ref^.shiftmode := taicpu(p).oper[2]^.shifterop^.shiftmode;
  1430. taicpu(hp1).oper[1]^.ref^.shiftimm := taicpu(p).oper[2]^.shifterop^.shiftimm;
  1431. DebugMsg('Peephole FoldShiftLdrStr done', hp1);
  1432. GetNextInstruction(p, hp1);
  1433. asml.remove(p);
  1434. p.free;
  1435. p:=hp1;
  1436. Result:=true;
  1437. end;
  1438. {
  1439. Often we see shifts and then a superfluous mov to another register
  1440. In the future this might be handled in RedundantMovProcess when it uses RegisterTracking
  1441. }
  1442. if (taicpu(p).opcode = A_MOV) and
  1443. GetNextInstructionUsingReg(p, hp1, taicpu(p).oper[0]^.reg) and
  1444. RemoveSuperfluousMove(p, hp1, 'MovMov2Mov') then
  1445. Result:=true;
  1446. end;
  1447. A_ADD,
  1448. A_ADC,
  1449. A_RSB,
  1450. A_RSC,
  1451. A_SUB,
  1452. A_SBC,
  1453. A_AND,
  1454. A_BIC,
  1455. A_EOR,
  1456. A_ORR,
  1457. A_MLA,
  1458. A_MLS,
  1459. A_MUL:
  1460. begin
  1461. {
  1462. optimize
  1463. and reg2,reg1,const1
  1464. ...
  1465. }
  1466. if (taicpu(p).opcode = A_AND) and
  1467. (taicpu(p).ops>2) and
  1468. (taicpu(p).oper[1]^.typ = top_reg) and
  1469. (taicpu(p).oper[2]^.typ = top_const) then
  1470. begin
  1471. {
  1472. change
  1473. and reg2,reg1,const1
  1474. ...
  1475. and reg3,reg2,const2
  1476. to
  1477. and reg3,reg1,(const1 and const2)
  1478. }
  1479. if GetNextInstructionUsingReg(p,hp1,taicpu(p).oper[0]^.reg) and
  1480. MatchInstruction(hp1, A_AND, [taicpu(p).condition], [PF_None]) and
  1481. RegEndOfLife(taicpu(p).oper[0]^.reg,taicpu(hp1)) and
  1482. MatchOperand(taicpu(hp1).oper[1]^, taicpu(p).oper[0]^.reg) and
  1483. (taicpu(hp1).oper[2]^.typ = top_const) then
  1484. begin
  1485. if not(RegUsedBetween(taicpu(hp1).oper[0]^.reg,p,hp1)) then
  1486. begin
  1487. DebugMsg('Peephole AndAnd2And done', p);
  1488. taicpu(p).loadConst(2,taicpu(p).oper[2]^.val and taicpu(hp1).oper[2]^.val);
  1489. taicpu(p).oppostfix:=taicpu(hp1).oppostfix;
  1490. taicpu(p).loadReg(0,taicpu(hp1).oper[0]^.reg);
  1491. asml.remove(hp1);
  1492. hp1.free;
  1493. Result:=true;
  1494. end
  1495. else if not(RegUsedBetween(taicpu(p).oper[1]^.reg,p,hp1)) then
  1496. begin
  1497. DebugMsg('Peephole AndAnd2And done', hp1);
  1498. taicpu(hp1).loadConst(2,taicpu(p).oper[2]^.val and taicpu(hp1).oper[2]^.val);
  1499. taicpu(hp1).oppostfix:=taicpu(p).oppostfix;
  1500. taicpu(hp1).loadReg(1,taicpu(p).oper[1]^.reg);
  1501. GetNextInstruction(p, hp1);
  1502. asml.remove(p);
  1503. p.free;
  1504. p:=hp1;
  1505. Result:=true;
  1506. end;
  1507. end
  1508. {
  1509. change
  1510. and reg2,reg1,$xxxxxxFF
  1511. strb reg2,[...]
  1512. dealloc reg2
  1513. to
  1514. strb reg1,[...]
  1515. }
  1516. else if ((taicpu(p).oper[2]^.val and $FF) = $FF) and
  1517. MatchInstruction(p, A_AND, [C_None], [PF_None]) and
  1518. GetNextInstructionUsingReg(p,hp1,taicpu(p).oper[0]^.reg) and
  1519. MatchInstruction(hp1, A_STR, [C_None], [PF_B]) and
  1520. assigned(FindRegDealloc(taicpu(p).oper[0]^.reg,tai(hp1.Next))) and
  1521. { the reference in strb might not use reg2 }
  1522. not(RegInRef(taicpu(p).oper[0]^.reg,taicpu(hp1).oper[1]^.ref^)) and
  1523. { reg1 might not be modified inbetween }
  1524. not(RegModifiedBetween(taicpu(p).oper[1]^.reg,p,hp1)) then
  1525. begin
  1526. DebugMsg('Peephole AndStrb2Strb done', p);
  1527. taicpu(hp1).loadReg(0,taicpu(p).oper[1]^.reg);
  1528. GetNextInstruction(p, hp1);
  1529. asml.remove(p);
  1530. p.free;
  1531. p:=hp1;
  1532. result:=true;
  1533. end
  1534. {
  1535. change
  1536. and reg2,reg1,255
  1537. uxtb/uxth reg3,reg2
  1538. dealloc reg2
  1539. to
  1540. and reg3,reg1,x
  1541. }
  1542. else if (taicpu(p).oper[2]^.val = $FF) and
  1543. MatchInstruction(p, A_AND, [C_None], [PF_None]) and
  1544. GetNextInstructionUsingReg(p,hp1,taicpu(p).oper[0]^.reg) and
  1545. MatchInstruction(hp1, [A_UXTB,A_UXTH], [C_None], [PF_None]) and
  1546. (taicpu(hp1).ops = 2) and
  1547. RegEndofLife(taicpu(p).oper[0]^.reg,taicpu(hp1)) and
  1548. MatchOperand(taicpu(hp1).oper[1]^, taicpu(p).oper[0]^.reg) and
  1549. { reg1 might not be modified inbetween }
  1550. not(RegModifiedBetween(taicpu(p).oper[1]^.reg,p,hp1)) then
  1551. begin
  1552. DebugMsg('Peephole AndUxt2And done', p);
  1553. taicpu(hp1).opcode:=A_AND;
  1554. taicpu(hp1).ops:=3;
  1555. taicpu(hp1).loadReg(1,taicpu(p).oper[1]^.reg);
  1556. taicpu(hp1).loadconst(2,255);
  1557. GetNextInstruction(p,hp1);
  1558. asml.remove(p);
  1559. p.Free;
  1560. p:=hp1;
  1561. result:=true;
  1562. end
  1563. {
  1564. from
  1565. and reg1,reg0,2^n-1
  1566. mov reg2,reg1, lsl imm1
  1567. (mov reg3,reg2, lsr/asr imm1)
  1568. remove either the and or the lsl/xsr sequence if possible
  1569. }
  1570. else if cutils.ispowerof2(taicpu(p).oper[2]^.val+1,i) and
  1571. GetNextInstructionUsingReg(p,hp1,taicpu(p).oper[0]^.reg) and
  1572. MatchInstruction(hp1, A_MOV, [taicpu(p).condition], [PF_None]) and
  1573. (taicpu(hp1).ops=3) and
  1574. MatchOperand(taicpu(hp1).oper[1]^, taicpu(p).oper[0]^.reg) and
  1575. (taicpu(hp1).oper[2]^.typ = top_shifterop) and
  1576. (taicpu(hp1).oper[2]^.shifterop^.rs = NR_NO) and
  1577. (taicpu(hp1).oper[2]^.shifterop^.shiftmode=SM_LSL) and
  1578. RegEndOfLife(taicpu(p).oper[0]^.reg,taicpu(hp1)) then
  1579. begin
  1580. {
  1581. and reg1,reg0,2^n-1
  1582. mov reg2,reg1, lsl imm1
  1583. mov reg3,reg2, lsr/asr imm1
  1584. =>
  1585. and reg1,reg0,2^n-1
  1586. if lsr and 2^n-1>=imm1 or asr and 2^n-1>imm1
  1587. }
  1588. if GetNextInstructionUsingReg(hp1,hp2,taicpu(p).oper[0]^.reg) and
  1589. MatchInstruction(hp2, A_MOV, [taicpu(p).condition], [PF_None]) and
  1590. (taicpu(hp2).ops=3) and
  1591. MatchOperand(taicpu(hp2).oper[1]^, taicpu(hp1).oper[0]^.reg) and
  1592. (taicpu(hp2).oper[2]^.typ = top_shifterop) and
  1593. (taicpu(hp2).oper[2]^.shifterop^.rs = NR_NO) and
  1594. (taicpu(hp2).oper[2]^.shifterop^.shiftmode in [SM_ASR,SM_LSR]) and
  1595. (taicpu(hp1).oper[2]^.shifterop^.shiftimm=taicpu(hp2).oper[2]^.shifterop^.shiftimm) and
  1596. RegEndOfLife(taicpu(hp1).oper[0]^.reg,taicpu(hp2)) and
  1597. ((i<32-taicpu(hp1).oper[2]^.shifterop^.shiftimm) or
  1598. ((i=32-taicpu(hp1).oper[2]^.shifterop^.shiftimm) and
  1599. (taicpu(hp2).oper[2]^.shifterop^.shiftmode=SM_LSR))) then
  1600. begin
  1601. DebugMsg('Peephole AndLslXsr2And done', p);
  1602. taicpu(p).oper[0]^.reg:=taicpu(hp2).oper[0]^.reg;
  1603. asml.Remove(hp1);
  1604. asml.Remove(hp2);
  1605. hp1.free;
  1606. hp2.free;
  1607. result:=true;
  1608. end
  1609. {
  1610. and reg1,reg0,2^n-1
  1611. mov reg2,reg1, lsl imm1
  1612. =>
  1613. mov reg2,reg0, lsl imm1
  1614. if imm1>i
  1615. }
  1616. else if (i>32-taicpu(hp1).oper[2]^.shifterop^.shiftimm) and
  1617. not(RegModifiedBetween(taicpu(p).oper[1]^.reg, p, hp1)) then
  1618. begin
  1619. DebugMsg('Peephole AndLsl2Lsl done', p);
  1620. taicpu(hp1).oper[1]^.reg:=taicpu(p).oper[1]^.reg;
  1621. GetNextInstruction(p, hp1);
  1622. asml.Remove(p);
  1623. p.free;
  1624. p:=hp1;
  1625. result:=true;
  1626. end
  1627. end;
  1628. end;
  1629. {
  1630. change
  1631. add/sub reg2,reg1,const1
  1632. str/ldr reg3,[reg2,const2]
  1633. dealloc reg2
  1634. to
  1635. str/ldr reg3,[reg1,const2+/-const1]
  1636. }
  1637. if (not GenerateThumbCode) and
  1638. (taicpu(p).opcode in [A_ADD,A_SUB]) and
  1639. (taicpu(p).ops>2) and
  1640. (taicpu(p).oper[1]^.typ = top_reg) and
  1641. (taicpu(p).oper[2]^.typ = top_const) then
  1642. begin
  1643. hp1:=p;
  1644. while GetNextInstructionUsingReg(hp1, hp1, taicpu(p).oper[0]^.reg) and
  1645. { we cannot check NR_DEFAULTFLAGS for modification yet so don't allow a condition }
  1646. MatchInstruction(hp1, [A_LDR, A_STR], [C_None], []) and
  1647. (taicpu(hp1).oper[1]^.typ = top_ref) and
  1648. (taicpu(hp1).oper[1]^.ref^.base=taicpu(p).oper[0]^.reg) and
  1649. { don't optimize if the register is stored/overwritten }
  1650. (taicpu(hp1).oper[0]^.reg<>taicpu(p).oper[1]^.reg) and
  1651. (taicpu(hp1).oper[1]^.ref^.index=NR_NO) and
  1652. (taicpu(hp1).oper[1]^.ref^.addressmode=AM_OFFSET) and
  1653. { new offset must be valid: either in the range of 8 or 12 bit, depend on the
  1654. ldr postfix }
  1655. (((taicpu(p).opcode=A_ADD) and
  1656. isValidConstLoadStoreOffset(taicpu(hp1).oper[1]^.ref^.offset+taicpu(p).oper[2]^.val, taicpu(hp1).oppostfix)
  1657. ) or
  1658. ((taicpu(p).opcode=A_SUB) and
  1659. isValidConstLoadStoreOffset(taicpu(hp1).oper[1]^.ref^.offset-taicpu(p).oper[2]^.val, taicpu(hp1).oppostfix)
  1660. )
  1661. ) do
  1662. begin
  1663. { neither reg1 nor reg2 might be changed inbetween }
  1664. if RegModifiedBetween(taicpu(p).oper[0]^.reg,p,hp1) or
  1665. RegModifiedBetween(taicpu(p).oper[1]^.reg,p,hp1) then
  1666. break;
  1667. { reg2 must be either overwritten by the ldr or it is deallocated afterwards }
  1668. if ((taicpu(hp1).opcode=A_LDR) and (taicpu(p).oper[0]^.reg=taicpu(hp1).oper[0]^.reg)) or
  1669. assigned(FindRegDeAlloc(taicpu(p).oper[0]^.reg,tai(hp1.Next))) then
  1670. begin
  1671. { remember last instruction }
  1672. hp2:=hp1;
  1673. DebugMsg('Peephole Add/SubLdr2Ldr done', p);
  1674. hp1:=p;
  1675. { fix all ldr/str }
  1676. while GetNextInstructionUsingReg(hp1, hp1, taicpu(p).oper[0]^.reg) do
  1677. begin
  1678. taicpu(hp1).oper[1]^.ref^.base:=taicpu(p).oper[1]^.reg;
  1679. if taicpu(p).opcode=A_ADD then
  1680. inc(taicpu(hp1).oper[1]^.ref^.offset,taicpu(p).oper[2]^.val)
  1681. else
  1682. dec(taicpu(hp1).oper[1]^.ref^.offset,taicpu(p).oper[2]^.val);
  1683. if hp1=hp2 then
  1684. break;
  1685. end;
  1686. GetNextInstruction(p,hp1);
  1687. asml.remove(p);
  1688. p.free;
  1689. p:=hp1;
  1690. result:=true;
  1691. break;
  1692. end;
  1693. end;
  1694. end;
  1695. {
  1696. change
  1697. add reg1, ...
  1698. mov reg2, reg1
  1699. to
  1700. add reg2, ...
  1701. }
  1702. if GetNextInstructionUsingReg(p, hp1, taicpu(p).oper[0]^.reg) and
  1703. (taicpu(p).ops>=3) and
  1704. RemoveSuperfluousMove(p, hp1, 'DataMov2Data') then
  1705. Result:=true;
  1706. if MatchInstruction(p, [A_ADD,A_SUB], [C_None], [PF_None]) and
  1707. LookForPreindexedPattern(taicpu(p)) then
  1708. begin
  1709. GetNextInstruction(p,hp1);
  1710. DebugMsg('Peephole Add/Sub to Preindexed done', p);
  1711. asml.remove(p);
  1712. p.free;
  1713. p:=hp1;
  1714. Result:=true;
  1715. end;
  1716. {
  1717. Turn
  1718. mul reg0, z,w
  1719. sub/add x, y, reg0
  1720. dealloc reg0
  1721. into
  1722. mls/mla x,z,w,y
  1723. }
  1724. if MatchInstruction(p, [A_MUL], [C_None], [PF_None]) and
  1725. (taicpu(p).ops=3) and
  1726. (taicpu(p).oper[0]^.typ = top_reg) and
  1727. (taicpu(p).oper[1]^.typ = top_reg) and
  1728. (taicpu(p).oper[2]^.typ = top_reg) and
  1729. GetNextInstructionUsingReg(p,hp1,taicpu(p).oper[0]^.reg) and
  1730. MatchInstruction(hp1,[A_ADD,A_SUB],[C_None],[PF_None]) and
  1731. (not RegModifiedBetween(taicpu(p).oper[1]^.reg, p, hp1)) and
  1732. (not RegModifiedBetween(taicpu(p).oper[2]^.reg, p, hp1)) and
  1733. (((taicpu(hp1).opcode=A_ADD) and (current_settings.cputype>=cpu_armv4)) or
  1734. ((taicpu(hp1).opcode=A_SUB) and (current_settings.cputype in [cpu_armv6t2,cpu_armv7,cpu_armv7a,cpu_armv7r,cpu_armv7m,cpu_armv7em]))) and
  1735. // CPUs before ARMv6 don't recommend having the same Rd and Rm for MLA.
  1736. // TODO: A workaround would be to swap Rm and Rs
  1737. (not ((taicpu(hp1).opcode=A_ADD) and (current_settings.cputype<=cpu_armv6) and MatchOperand(taicpu(hp1).oper[0]^, taicpu(p).oper[1]^))) and
  1738. (((taicpu(hp1).ops=3) and
  1739. (taicpu(hp1).oper[2]^.typ=top_reg) and
  1740. ((MatchOperand(taicpu(hp1).oper[2]^, taicpu(p).oper[0]^.reg) and
  1741. (not RegModifiedBetween(taicpu(hp1).oper[1]^.reg, p, hp1))) or
  1742. ((MatchOperand(taicpu(hp1).oper[1]^, taicpu(p).oper[0]^.reg) and
  1743. (taicpu(hp1).opcode=A_ADD) and
  1744. (not RegModifiedBetween(taicpu(hp1).oper[2]^.reg, p, hp1)))))) or
  1745. ((taicpu(hp1).ops=2) and
  1746. (taicpu(hp1).oper[1]^.typ=top_reg) and
  1747. MatchOperand(taicpu(hp1).oper[1]^, taicpu(p).oper[0]^.reg))) and
  1748. (RegEndOfLife(taicpu(p).oper[0]^.reg,taicpu(hp1))) then
  1749. begin
  1750. if taicpu(hp1).opcode=A_ADD then
  1751. begin
  1752. taicpu(hp1).opcode:=A_MLA;
  1753. if taicpu(hp1).ops=3 then
  1754. begin
  1755. if MatchOperand(taicpu(hp1).oper[1]^, taicpu(p).oper[0]^) then
  1756. oldreg:=taicpu(hp1).oper[2]^.reg
  1757. else
  1758. oldreg:=taicpu(hp1).oper[1]^.reg;
  1759. end
  1760. else
  1761. oldreg:=taicpu(hp1).oper[0]^.reg;
  1762. taicpu(hp1).loadreg(1,taicpu(p).oper[1]^.reg);
  1763. taicpu(hp1).loadreg(2,taicpu(p).oper[2]^.reg);
  1764. taicpu(hp1).loadreg(3,oldreg);
  1765. DebugMsg('MulAdd2MLA done', p);
  1766. taicpu(hp1).ops:=4;
  1767. asml.remove(p);
  1768. p.free;
  1769. p:=hp1;
  1770. end
  1771. else
  1772. begin
  1773. taicpu(hp1).opcode:=A_MLS;
  1774. taicpu(hp1).loadreg(3,taicpu(hp1).oper[1]^.reg);
  1775. if taicpu(hp1).ops=2 then
  1776. taicpu(hp1).loadreg(1,taicpu(hp1).oper[0]^.reg)
  1777. else
  1778. taicpu(hp1).loadreg(1,taicpu(p).oper[2]^.reg);
  1779. taicpu(hp1).loadreg(2,taicpu(p).oper[1]^.reg);
  1780. DebugMsg('MulSub2MLS done', p);
  1781. taicpu(hp1).ops:=4;
  1782. asml.remove(p);
  1783. p.free;
  1784. p:=hp1;
  1785. end;
  1786. result:=true;
  1787. end
  1788. end;
  1789. {$ifdef dummy}
  1790. A_MVN:
  1791. begin
  1792. {
  1793. change
  1794. mvn reg2,reg1
  1795. and reg3,reg4,reg2
  1796. dealloc reg2
  1797. to
  1798. bic reg3,reg4,reg1
  1799. }
  1800. if (taicpu(p).oper[1]^.typ = top_reg) and
  1801. GetNextInstructionUsingReg(p,hp1,taicpu(p).oper[0]^.reg) and
  1802. MatchInstruction(hp1,A_AND,[],[]) and
  1803. (((taicpu(hp1).ops=3) and
  1804. (taicpu(hp1).oper[2]^.typ=top_reg) and
  1805. (MatchOperand(taicpu(hp1).oper[2]^, taicpu(p).oper[0]^.reg) or
  1806. MatchOperand(taicpu(hp1).oper[1]^, taicpu(p).oper[0]^.reg))) or
  1807. ((taicpu(hp1).ops=2) and
  1808. (taicpu(hp1).oper[1]^.typ=top_reg) and
  1809. MatchOperand(taicpu(hp1).oper[1]^, taicpu(p).oper[0]^.reg))) and
  1810. assigned(FindRegDealloc(taicpu(p).oper[0]^.reg,tai(hp1.Next))) and
  1811. { reg1 might not be modified inbetween }
  1812. not(RegModifiedBetween(taicpu(p).oper[1]^.reg,p,hp1)) then
  1813. begin
  1814. DebugMsg('Peephole MvnAnd2Bic done', p);
  1815. taicpu(hp1).opcode:=A_BIC;
  1816. if taicpu(hp1).ops=3 then
  1817. begin
  1818. if MatchOperand(taicpu(hp1).oper[1]^, taicpu(p).oper[0]^.reg) then
  1819. taicpu(hp1).loadReg(1,taicpu(hp1).oper[2]^.reg); // Swap operands
  1820. taicpu(hp1).loadReg(2,taicpu(p).oper[1]^.reg);
  1821. end
  1822. else
  1823. taicpu(hp1).loadReg(1,taicpu(p).oper[1]^.reg);
  1824. GetNextInstruction(p, hp1);
  1825. asml.remove(p);
  1826. p.free;
  1827. p:=hp1;
  1828. end;
  1829. end;
  1830. {$endif dummy}
  1831. A_UXTB:
  1832. begin
  1833. {
  1834. change
  1835. uxtb reg2,reg1
  1836. strb reg2,[...]
  1837. dealloc reg2
  1838. to
  1839. strb reg1,[...]
  1840. }
  1841. if MatchInstruction(p, taicpu(p).opcode, [C_None], [PF_None]) and
  1842. GetNextInstructionUsingReg(p,hp1,taicpu(p).oper[0]^.reg) and
  1843. MatchInstruction(hp1, A_STR, [C_None], [PF_B]) and
  1844. assigned(FindRegDealloc(taicpu(p).oper[0]^.reg,tai(hp1.Next))) and
  1845. { the reference in strb might not use reg2 }
  1846. not(RegInRef(taicpu(p).oper[0]^.reg,taicpu(hp1).oper[1]^.ref^)) and
  1847. { reg1 might not be modified inbetween }
  1848. not(RegModifiedBetween(taicpu(p).oper[1]^.reg,p,hp1)) then
  1849. begin
  1850. DebugMsg('Peephole UxtbStrb2Strb done', p);
  1851. taicpu(hp1).loadReg(0,taicpu(p).oper[1]^.reg);
  1852. GetNextInstruction(p,hp2);
  1853. asml.remove(p);
  1854. p.free;
  1855. p:=hp2;
  1856. result:=true;
  1857. end
  1858. {
  1859. change
  1860. uxtb reg2,reg1
  1861. uxth reg3,reg2
  1862. dealloc reg2
  1863. to
  1864. uxtb reg3,reg1
  1865. }
  1866. else if MatchInstruction(p, A_UXTB, [C_None], [PF_None]) and
  1867. GetNextInstructionUsingReg(p,hp1,taicpu(p).oper[0]^.reg) and
  1868. MatchInstruction(hp1, A_UXTH, [C_None], [PF_None]) and
  1869. (taicpu(hp1).ops = 2) and
  1870. MatchOperand(taicpu(hp1).oper[1]^, taicpu(p).oper[0]^.reg) and
  1871. RegEndofLife(taicpu(p).oper[0]^.reg,taicpu(hp1)) and
  1872. { reg1 might not be modified inbetween }
  1873. not(RegModifiedBetween(taicpu(p).oper[1]^.reg,p,hp1)) then
  1874. begin
  1875. DebugMsg('Peephole UxtbUxth2Uxtb done', p);
  1876. taicpu(p).loadReg(0,taicpu(hp1).oper[0]^.reg);
  1877. asml.remove(hp1);
  1878. hp1.free;
  1879. result:=true;
  1880. end
  1881. {
  1882. change
  1883. uxtb reg2,reg1
  1884. uxtb reg3,reg2
  1885. dealloc reg2
  1886. to
  1887. uxtb reg3,reg1
  1888. }
  1889. else if MatchInstruction(p, A_UXTB, [C_None], [PF_None]) and
  1890. GetNextInstructionUsingReg(p,hp1,taicpu(p).oper[0]^.reg) and
  1891. MatchInstruction(hp1, A_UXTB, [C_None], [PF_None]) and
  1892. (taicpu(hp1).ops = 2) and
  1893. MatchOperand(taicpu(hp1).oper[1]^, taicpu(p).oper[0]^.reg) and
  1894. RegEndofLife(taicpu(p).oper[0]^.reg,taicpu(hp1)) and
  1895. { reg1 might not be modified inbetween }
  1896. not(RegModifiedBetween(taicpu(p).oper[1]^.reg,p,hp1)) then
  1897. begin
  1898. DebugMsg('Peephole UxtbUxtb2Uxtb done', p);
  1899. taicpu(p).loadReg(0,taicpu(hp1).oper[0]^.reg);
  1900. asml.remove(hp1);
  1901. hp1.free;
  1902. result:=true;
  1903. end
  1904. {
  1905. change
  1906. uxtb reg2,reg1
  1907. and reg3,reg2,#0x*FF
  1908. dealloc reg2
  1909. to
  1910. uxtb reg3,reg1
  1911. }
  1912. else if MatchInstruction(p, A_UXTB, [C_None], [PF_None]) and
  1913. GetNextInstructionUsingReg(p,hp1,taicpu(p).oper[0]^.reg) and
  1914. (taicpu(p).ops=2) and
  1915. MatchInstruction(hp1, A_AND, [C_None], [PF_None]) and
  1916. (taicpu(hp1).ops=3) and
  1917. (taicpu(hp1).oper[2]^.typ=top_const) and
  1918. ((taicpu(hp1).oper[2]^.val and $FF)=$FF) and
  1919. MatchOperand(taicpu(hp1).oper[1]^, taicpu(p).oper[0]^.reg) and
  1920. RegEndofLife(taicpu(p).oper[0]^.reg,taicpu(hp1)) and
  1921. { reg1 might not be modified inbetween }
  1922. not(RegModifiedBetween(taicpu(p).oper[1]^.reg,p,hp1)) then
  1923. begin
  1924. DebugMsg('Peephole UxtbAndImm2Uxtb done', p);
  1925. taicpu(hp1).opcode:=A_UXTB;
  1926. taicpu(hp1).ops:=2;
  1927. taicpu(hp1).loadReg(1,taicpu(p).oper[1]^.reg);
  1928. GetNextInstruction(p,hp2);
  1929. asml.remove(p);
  1930. p.free;
  1931. p:=hp2;
  1932. result:=true;
  1933. end
  1934. else if GetNextInstructionUsingReg(p, hp1, taicpu(p).oper[0]^.reg) and
  1935. RemoveSuperfluousMove(p, hp1, 'UxtbMov2Data') then
  1936. Result:=true;
  1937. end;
  1938. A_UXTH:
  1939. begin
  1940. {
  1941. change
  1942. uxth reg2,reg1
  1943. strh reg2,[...]
  1944. dealloc reg2
  1945. to
  1946. strh reg1,[...]
  1947. }
  1948. if MatchInstruction(p, taicpu(p).opcode, [C_None], [PF_None]) and
  1949. GetNextInstructionUsingReg(p,hp1,taicpu(p).oper[0]^.reg) and
  1950. MatchInstruction(hp1, A_STR, [C_None], [PF_H]) and
  1951. RegEndofLife(taicpu(p).oper[0]^.reg,taicpu(hp1)) and
  1952. { the reference in strb might not use reg2 }
  1953. not(RegInRef(taicpu(p).oper[0]^.reg,taicpu(hp1).oper[1]^.ref^)) and
  1954. { reg1 might not be modified inbetween }
  1955. not(RegModifiedBetween(taicpu(p).oper[1]^.reg,p,hp1)) then
  1956. begin
  1957. DebugMsg('Peephole UXTHStrh2Strh done', p);
  1958. taicpu(hp1).loadReg(0,taicpu(p).oper[1]^.reg);
  1959. GetNextInstruction(p, hp1);
  1960. asml.remove(p);
  1961. p.free;
  1962. p:=hp1;
  1963. result:=true;
  1964. end
  1965. {
  1966. change
  1967. uxth reg2,reg1
  1968. uxth reg3,reg2
  1969. dealloc reg2
  1970. to
  1971. uxth reg3,reg1
  1972. }
  1973. else if MatchInstruction(p, A_UXTH, [C_None], [PF_None]) and
  1974. GetNextInstructionUsingReg(p,hp1,taicpu(p).oper[0]^.reg) and
  1975. MatchInstruction(hp1, A_UXTH, [C_None], [PF_None]) and
  1976. (taicpu(hp1).ops=2) and
  1977. MatchOperand(taicpu(hp1).oper[1]^, taicpu(p).oper[0]^.reg) and
  1978. RegEndofLife(taicpu(p).oper[0]^.reg,taicpu(hp1)) and
  1979. { reg1 might not be modified inbetween }
  1980. not(RegModifiedBetween(taicpu(p).oper[1]^.reg,p,hp1)) then
  1981. begin
  1982. DebugMsg('Peephole UxthUxth2Uxth done', p);
  1983. taicpu(hp1).opcode:=A_UXTH;
  1984. taicpu(hp1).loadReg(1,taicpu(p).oper[1]^.reg);
  1985. GetNextInstruction(p, hp1);
  1986. asml.remove(p);
  1987. p.free;
  1988. p:=hp1;
  1989. result:=true;
  1990. end
  1991. {
  1992. change
  1993. uxth reg2,reg1
  1994. and reg3,reg2,#65535
  1995. dealloc reg2
  1996. to
  1997. uxth reg3,reg1
  1998. }
  1999. else if MatchInstruction(p, A_UXTH, [C_None], [PF_None]) and
  2000. GetNextInstructionUsingReg(p,hp1,taicpu(p).oper[0]^.reg) and
  2001. MatchInstruction(hp1, A_AND, [C_None], [PF_None]) and
  2002. (taicpu(hp1).ops=3) and
  2003. (taicpu(hp1).oper[2]^.typ=top_const) and
  2004. ((taicpu(hp1).oper[2]^.val and $FFFF)=$FFFF) and
  2005. MatchOperand(taicpu(hp1).oper[1]^, taicpu(p).oper[0]^.reg) and
  2006. RegEndofLife(taicpu(p).oper[0]^.reg,taicpu(hp1)) and
  2007. { reg1 might not be modified inbetween }
  2008. not(RegModifiedBetween(taicpu(p).oper[1]^.reg,p,hp1)) then
  2009. begin
  2010. DebugMsg('Peephole UxthAndImm2Uxth done', p);
  2011. taicpu(hp1).opcode:=A_UXTH;
  2012. taicpu(hp1).ops:=2;
  2013. taicpu(hp1).loadReg(1,taicpu(p).oper[1]^.reg);
  2014. GetNextInstruction(p, hp1);
  2015. asml.remove(p);
  2016. p.free;
  2017. p:=hp1;
  2018. result:=true;
  2019. end
  2020. else if GetNextInstructionUsingReg(p, hp1, taicpu(p).oper[0]^.reg) and
  2021. RemoveSuperfluousMove(p, hp1, 'UxthMov2Data') then
  2022. Result:=true;
  2023. end;
  2024. A_CMP:
  2025. begin
  2026. {
  2027. change
  2028. cmp reg,const1
  2029. moveq reg,const1
  2030. movne reg,const2
  2031. to
  2032. cmp reg,const1
  2033. movne reg,const2
  2034. }
  2035. if (taicpu(p).oper[1]^.typ = top_const) and
  2036. GetNextInstruction(p, hp1) and
  2037. MatchInstruction(hp1, A_MOV, [C_EQ, C_NE], [PF_NONE]) and
  2038. (taicpu(hp1).oper[1]^.typ = top_const) and
  2039. GetNextInstruction(hp1, hp2) and
  2040. MatchInstruction(hp2, A_MOV, [C_EQ, C_NE], [PF_NONE]) and
  2041. (taicpu(hp1).oper[1]^.typ = top_const) then
  2042. begin
  2043. Result:=RemoveRedundantMove(p, hp1, asml) or Result;
  2044. Result:=RemoveRedundantMove(p, hp2, asml) or Result;
  2045. end;
  2046. end;
  2047. A_STM:
  2048. begin
  2049. {
  2050. change
  2051. stmfd r13!,[r14]
  2052. sub r13,r13,#4
  2053. bl abc
  2054. add r13,r13,#4
  2055. ldmfd r13!,[r15]
  2056. into
  2057. b abc
  2058. }
  2059. if not(ts_thumb_interworking in current_settings.targetswitches) and
  2060. MatchInstruction(p, A_STM, [C_None], [PF_FD]) and
  2061. GetNextInstruction(p, hp1) and
  2062. GetNextInstruction(hp1, hp2) and
  2063. SkipEntryExitMarker(hp2, hp2) and
  2064. GetNextInstruction(hp2, hp3) and
  2065. SkipEntryExitMarker(hp3, hp3) and
  2066. GetNextInstruction(hp3, hp4) and
  2067. (taicpu(p).oper[0]^.typ = top_ref) and
  2068. (taicpu(p).oper[0]^.ref^.index=NR_STACK_POINTER_REG) and
  2069. (taicpu(p).oper[0]^.ref^.base=NR_NO) and
  2070. (taicpu(p).oper[0]^.ref^.offset=0) and
  2071. (taicpu(p).oper[0]^.ref^.addressmode=AM_PREINDEXED) and
  2072. (taicpu(p).oper[1]^.typ = top_regset) and
  2073. (taicpu(p).oper[1]^.regset^ = [RS_R14]) and
  2074. MatchInstruction(hp1, A_SUB, [C_None], [PF_NONE]) and
  2075. (taicpu(hp1).oper[0]^.typ = top_reg) and
  2076. (taicpu(hp1).oper[0]^.reg = NR_STACK_POINTER_REG) and
  2077. MatchOperand(taicpu(hp1).oper[0]^,taicpu(hp1).oper[1]^) and
  2078. (taicpu(hp1).oper[2]^.typ = top_const) and
  2079. MatchInstruction(hp3, A_ADD, [C_None], [PF_NONE]) and
  2080. MatchOperand(taicpu(hp1).oper[0]^,taicpu(hp3).oper[0]^) and
  2081. MatchOperand(taicpu(hp1).oper[0]^,taicpu(hp3).oper[1]^) and
  2082. MatchOperand(taicpu(hp1).oper[2]^,taicpu(hp3).oper[2]^) and
  2083. MatchInstruction(hp2, [A_BL,A_BLX], [C_None], [PF_NONE]) and
  2084. (taicpu(hp2).oper[0]^.typ = top_ref) and
  2085. MatchInstruction(hp4, A_LDM, [C_None], [PF_FD]) and
  2086. MatchOperand(taicpu(p).oper[0]^,taicpu(hp4).oper[0]^) and
  2087. (taicpu(hp4).oper[1]^.typ = top_regset) and
  2088. (taicpu(hp4).oper[1]^.regset^ = [RS_R15]) then
  2089. begin
  2090. asml.Remove(p);
  2091. asml.Remove(hp1);
  2092. asml.Remove(hp3);
  2093. asml.Remove(hp4);
  2094. taicpu(hp2).opcode:=A_B;
  2095. p.free;
  2096. hp1.free;
  2097. hp3.free;
  2098. hp4.free;
  2099. p:=hp2;
  2100. DebugMsg('Peephole Bl2B done', p);
  2101. end;
  2102. end;
  2103. A_VADD,
  2104. A_VMUL,
  2105. A_VDIV,
  2106. A_VSUB,
  2107. A_VSQRT,
  2108. A_VNEG,
  2109. A_VCVT,
  2110. A_VABS:
  2111. begin
  2112. if GetNextInstructionUsingReg(p, hp1, taicpu(p).oper[0]^.reg) and
  2113. RemoveSuperfluousVMov(p, hp1, 'VOpVMov2VOp') then
  2114. Result:=true;
  2115. end
  2116. end;
  2117. end;
  2118. end;
  2119. end;
  2120. { instructions modifying the CPSR can be only the last instruction }
  2121. function MustBeLast(p : tai) : boolean;
  2122. begin
  2123. Result:=(p.typ=ait_instruction) and
  2124. ((taicpu(p).opcode in [A_BL,A_BLX,A_CMP,A_CMN,A_SWI,A_TEQ,A_TST,A_CMF,A_CMFE {,A_MSR}]) or
  2125. ((taicpu(p).ops>=1) and (taicpu(p).oper[0]^.typ=top_reg) and (taicpu(p).oper[0]^.reg=NR_PC)) or
  2126. (taicpu(p).oppostfix=PF_S));
  2127. end;
  2128. procedure TCpuAsmOptimizer.PeepHoleOptPass2;
  2129. var
  2130. p,hp1,hp2: tai;
  2131. l : longint;
  2132. condition : tasmcond;
  2133. hp3: tai;
  2134. WasLast: boolean;
  2135. { UsedRegs, TmpUsedRegs: TRegSet; }
  2136. begin
  2137. p := BlockStart;
  2138. { UsedRegs := []; }
  2139. while (p <> BlockEnd) Do
  2140. begin
  2141. { UpdateUsedRegs(UsedRegs, tai(p.next)); }
  2142. case p.Typ Of
  2143. Ait_Instruction:
  2144. begin
  2145. case taicpu(p).opcode Of
  2146. A_B:
  2147. if (taicpu(p).condition<>C_None) and
  2148. not(GenerateThumbCode) then
  2149. begin
  2150. { check for
  2151. Bxx xxx
  2152. <several instructions>
  2153. xxx:
  2154. }
  2155. l:=0;
  2156. WasLast:=False;
  2157. GetNextInstruction(p, hp1);
  2158. while assigned(hp1) and
  2159. (l<=4) and
  2160. CanBeCond(hp1) and
  2161. { stop on labels }
  2162. not(hp1.typ=ait_label) do
  2163. begin
  2164. inc(l);
  2165. if MustBeLast(hp1) then
  2166. begin
  2167. WasLast:=True;
  2168. GetNextInstruction(hp1,hp1);
  2169. break;
  2170. end
  2171. else
  2172. GetNextInstruction(hp1,hp1);
  2173. end;
  2174. if assigned(hp1) then
  2175. begin
  2176. if FindLabel(tasmlabel(taicpu(p).oper[0]^.ref^.symbol),hp1) then
  2177. begin
  2178. if (l<=4) and (l>0) then
  2179. begin
  2180. condition:=inverse_cond(taicpu(p).condition);
  2181. hp2:=p;
  2182. GetNextInstruction(p,hp1);
  2183. p:=hp1;
  2184. repeat
  2185. if hp1.typ=ait_instruction then
  2186. taicpu(hp1).condition:=condition;
  2187. if MustBeLast(hp1) then
  2188. begin
  2189. GetNextInstruction(hp1,hp1);
  2190. break;
  2191. end
  2192. else
  2193. GetNextInstruction(hp1,hp1);
  2194. until not(assigned(hp1)) or
  2195. not(CanBeCond(hp1)) or
  2196. (hp1.typ=ait_label);
  2197. { wait with removing else GetNextInstruction could
  2198. ignore the label if it was the only usage in the
  2199. jump moved away }
  2200. tasmlabel(taicpu(hp2).oper[0]^.ref^.symbol).decrefs;
  2201. asml.remove(hp2);
  2202. hp2.free;
  2203. continue;
  2204. end;
  2205. end
  2206. else
  2207. { do not perform further optimizations if there is inctructon
  2208. in block #1 which can not be optimized.
  2209. }
  2210. if not WasLast then
  2211. begin
  2212. { check further for
  2213. Bcc xxx
  2214. <several instructions 1>
  2215. B yyy
  2216. xxx:
  2217. <several instructions 2>
  2218. yyy:
  2219. }
  2220. { hp2 points to jmp yyy }
  2221. hp2:=hp1;
  2222. { skip hp1 to xxx }
  2223. GetNextInstruction(hp1, hp1);
  2224. if assigned(hp2) and
  2225. assigned(hp1) and
  2226. (l<=3) and
  2227. (hp2.typ=ait_instruction) and
  2228. (taicpu(hp2).is_jmp) and
  2229. (taicpu(hp2).condition=C_None) and
  2230. { real label and jump, no further references to the
  2231. label are allowed }
  2232. (tasmlabel(taicpu(p).oper[0]^.ref^.symbol).getrefs=2) and
  2233. FindLabel(tasmlabel(taicpu(p).oper[0]^.ref^.symbol),hp1) then
  2234. begin
  2235. l:=0;
  2236. { skip hp1 to <several moves 2> }
  2237. GetNextInstruction(hp1, hp1);
  2238. while assigned(hp1) and
  2239. CanBeCond(hp1) do
  2240. begin
  2241. inc(l);
  2242. GetNextInstruction(hp1, hp1);
  2243. end;
  2244. { hp1 points to yyy: }
  2245. if assigned(hp1) and
  2246. FindLabel(tasmlabel(taicpu(hp2).oper[0]^.ref^.symbol),hp1) then
  2247. begin
  2248. condition:=inverse_cond(taicpu(p).condition);
  2249. GetNextInstruction(p,hp1);
  2250. hp3:=p;
  2251. p:=hp1;
  2252. repeat
  2253. if hp1.typ=ait_instruction then
  2254. taicpu(hp1).condition:=condition;
  2255. GetNextInstruction(hp1,hp1);
  2256. until not(assigned(hp1)) or
  2257. not(CanBeCond(hp1));
  2258. { hp2 is still at jmp yyy }
  2259. GetNextInstruction(hp2,hp1);
  2260. { hp2 is now at xxx: }
  2261. condition:=inverse_cond(condition);
  2262. GetNextInstruction(hp1,hp1);
  2263. { hp1 is now at <several movs 2> }
  2264. repeat
  2265. taicpu(hp1).condition:=condition;
  2266. GetNextInstruction(hp1,hp1);
  2267. until not(assigned(hp1)) or
  2268. not(CanBeCond(hp1)) or
  2269. (hp1.typ=ait_label);
  2270. {
  2271. asml.remove(hp1.next)
  2272. hp1.next.free;
  2273. asml.remove(hp1);
  2274. hp1.free;
  2275. }
  2276. { remove Bcc }
  2277. tasmlabel(taicpu(hp3).oper[0]^.ref^.symbol).decrefs;
  2278. asml.remove(hp3);
  2279. hp3.free;
  2280. { remove jmp }
  2281. tasmlabel(taicpu(hp2).oper[0]^.ref^.symbol).decrefs;
  2282. asml.remove(hp2);
  2283. hp2.free;
  2284. continue;
  2285. end;
  2286. end;
  2287. end;
  2288. end;
  2289. end;
  2290. end;
  2291. end;
  2292. end;
  2293. p := tai(p.next)
  2294. end;
  2295. end;
  2296. function TCpuAsmOptimizer.RegInInstruction(Reg: TRegister; p1: tai): Boolean;
  2297. begin
  2298. If (p1.typ = ait_instruction) and (taicpu(p1).opcode=A_BL) then
  2299. Result:=true
  2300. else If MatchInstruction(p1, [A_LDR, A_STR], [], [PF_D]) and
  2301. (getsupreg(taicpu(p1).oper[0]^.reg)+1=getsupreg(reg)) then
  2302. Result:=true
  2303. else
  2304. Result:=inherited RegInInstruction(Reg, p1);
  2305. end;
  2306. const
  2307. { set of opcode which might or do write to memory }
  2308. { TODO : extend armins.dat to contain r/w info }
  2309. opcode_could_mem_write = [A_B,A_BL,A_BLX,A_BKPT,A_BX,A_STR,A_STRB,A_STRBT,
  2310. A_STRH,A_STRT,A_STF,A_SFM,A_STM,A_FSTS,A_FSTD,A_VSTR,A_VSTM];
  2311. { adjust the register live information when swapping the two instructions p and hp1,
  2312. they must follow one after the other }
  2313. procedure TCpuPreRegallocScheduler.SwapRegLive(p,hp1 : taicpu);
  2314. procedure CheckLiveEnd(reg : tregister);
  2315. var
  2316. supreg : TSuperRegister;
  2317. regtype : TRegisterType;
  2318. begin
  2319. if reg=NR_NO then
  2320. exit;
  2321. regtype:=getregtype(reg);
  2322. supreg:=getsupreg(reg);
  2323. if (cg.rg[regtype].live_end[supreg]=hp1) and
  2324. RegInInstruction(reg,p) then
  2325. cg.rg[regtype].live_end[supreg]:=p;
  2326. end;
  2327. procedure CheckLiveStart(reg : TRegister);
  2328. var
  2329. supreg : TSuperRegister;
  2330. regtype : TRegisterType;
  2331. begin
  2332. if reg=NR_NO then
  2333. exit;
  2334. regtype:=getregtype(reg);
  2335. supreg:=getsupreg(reg);
  2336. if (cg.rg[regtype].live_start[supreg]=p) and
  2337. RegInInstruction(reg,hp1) then
  2338. cg.rg[regtype].live_start[supreg]:=hp1;
  2339. end;
  2340. var
  2341. i : longint;
  2342. r : TSuperRegister;
  2343. begin
  2344. { assumption: p is directly followed by hp1 }
  2345. { if live of any reg used by p starts at p and hp1 uses this register then
  2346. set live start to hp1 }
  2347. for i:=0 to p.ops-1 do
  2348. case p.oper[i]^.typ of
  2349. Top_Reg:
  2350. CheckLiveStart(p.oper[i]^.reg);
  2351. Top_Ref:
  2352. begin
  2353. CheckLiveStart(p.oper[i]^.ref^.base);
  2354. CheckLiveStart(p.oper[i]^.ref^.index);
  2355. end;
  2356. Top_Shifterop:
  2357. CheckLiveStart(p.oper[i]^.shifterop^.rs);
  2358. Top_RegSet:
  2359. for r:=RS_R0 to RS_R15 do
  2360. if r in p.oper[i]^.regset^ then
  2361. CheckLiveStart(newreg(R_INTREGISTER,r,R_SUBWHOLE));
  2362. end;
  2363. { if live of any reg used by hp1 ends at hp1 and p uses this register then
  2364. set live end to p }
  2365. for i:=0 to hp1.ops-1 do
  2366. case hp1.oper[i]^.typ of
  2367. Top_Reg:
  2368. CheckLiveEnd(hp1.oper[i]^.reg);
  2369. Top_Ref:
  2370. begin
  2371. CheckLiveEnd(hp1.oper[i]^.ref^.base);
  2372. CheckLiveEnd(hp1.oper[i]^.ref^.index);
  2373. end;
  2374. Top_Shifterop:
  2375. CheckLiveStart(hp1.oper[i]^.shifterop^.rs);
  2376. Top_RegSet:
  2377. for r:=RS_R0 to RS_R15 do
  2378. if r in hp1.oper[i]^.regset^ then
  2379. CheckLiveEnd(newreg(R_INTREGISTER,r,R_SUBWHOLE));
  2380. end;
  2381. end;
  2382. function TCpuPreRegallocScheduler.SchedulerPass1Cpu(var p: tai): boolean;
  2383. { TODO : schedule also forward }
  2384. { TODO : schedule distance > 1 }
  2385. { returns true if p might be a load of a pc relative tls offset }
  2386. function PossibleTLSLoad(const p: tai) : boolean;
  2387. begin
  2388. Result:=(p.typ=ait_instruction) and (taicpu(p).opcode=A_LDR) and (taicpu(p).oper[1]^.typ=top_ref) and (((taicpu(p).oper[1]^.ref^.base=NR_PC) and
  2389. (taicpu(p).oper[1]^.ref^.index<>NR_NO)) or ((taicpu(p).oper[1]^.ref^.base<>NR_NO) and
  2390. (taicpu(p).oper[1]^.ref^.index=NR_PC)));
  2391. end;
  2392. var
  2393. hp1,hp2,hp3,hp4,hp5,insertpos : tai;
  2394. list : TAsmList;
  2395. begin
  2396. result:=true;
  2397. list:=TAsmList.create;
  2398. p:=BlockStart;
  2399. while p<>BlockEnd Do
  2400. begin
  2401. if (p.typ=ait_instruction) and
  2402. GetNextInstruction(p,hp1) and
  2403. (hp1.typ=ait_instruction) and
  2404. (taicpu(hp1).opcode in [A_LDR,A_LDRB,A_LDRH,A_LDRSB,A_LDRSH]) and
  2405. (taicpu(hp1).oppostfix in [PF_NONE, PF_B, PF_H, PF_SB, PF_SH]) and
  2406. { for now we don't reschedule if the previous instruction changes potentially a memory location }
  2407. ( (not(taicpu(p).opcode in opcode_could_mem_write) and
  2408. not(RegModifiedByInstruction(NR_PC,p))
  2409. ) or
  2410. ((taicpu(p).opcode in [A_STM,A_STRB,A_STRH,A_STR]) and
  2411. ((taicpu(hp1).oper[1]^.ref^.base=NR_PC) or
  2412. (assigned(taicpu(hp1).oper[1]^.ref^.symboldata) and
  2413. (taicpu(hp1).oper[1]^.ref^.offset=0)
  2414. )
  2415. ) or
  2416. { try to prove that the memory accesses don't overlapp }
  2417. ((taicpu(p).opcode in [A_STRB,A_STRH,A_STR]) and
  2418. (taicpu(p).oper[1]^.typ = top_ref) and
  2419. (taicpu(p).oper[1]^.ref^.base=taicpu(hp1).oper[1]^.ref^.base) and
  2420. (taicpu(p).oppostfix=PF_None) and
  2421. (taicpu(hp1).oppostfix=PF_None) and
  2422. (taicpu(p).oper[1]^.ref^.index=NR_NO) and
  2423. (taicpu(hp1).oper[1]^.ref^.index=NR_NO) and
  2424. { get operand sizes and check if the offset distance is large enough to ensure no overlapp }
  2425. (abs(taicpu(p).oper[1]^.ref^.offset-taicpu(hp1).oper[1]^.ref^.offset)>=max(tcgsize2size[reg_cgsize(taicpu(p).oper[0]^.reg)],tcgsize2size[reg_cgsize(taicpu(hp1).oper[0]^.reg)]))
  2426. )
  2427. )
  2428. ) and
  2429. GetNextInstruction(hp1,hp2) and
  2430. (hp2.typ=ait_instruction) and
  2431. { loaded register used by next instruction? }
  2432. (RegInInstruction(taicpu(hp1).oper[0]^.reg,hp2)) and
  2433. { loaded register not used by previous instruction? }
  2434. not(RegInInstruction(taicpu(hp1).oper[0]^.reg,p)) and
  2435. { same condition? }
  2436. (taicpu(p).condition=taicpu(hp1).condition) and
  2437. { first instruction might not change the register used as base }
  2438. ((taicpu(hp1).oper[1]^.ref^.base=NR_NO) or
  2439. not(RegModifiedByInstruction(taicpu(hp1).oper[1]^.ref^.base,p))
  2440. ) and
  2441. { first instruction might not change the register used as index }
  2442. ((taicpu(hp1).oper[1]^.ref^.index=NR_NO) or
  2443. not(RegModifiedByInstruction(taicpu(hp1).oper[1]^.ref^.index,p))
  2444. ) and
  2445. { if we modify the basereg AND the first instruction used that reg, we can not schedule }
  2446. ((taicpu(hp1).oper[1]^.ref^.addressmode = AM_OFFSET) or
  2447. not(instructionLoadsFromReg(taicpu(hp1).oper[1]^.ref^.base,p))) and
  2448. not(PossibleTLSLoad(p)) and
  2449. not(PossibleTLSLoad(hp1)) then
  2450. begin
  2451. hp3:=tai(p.Previous);
  2452. hp5:=tai(p.next);
  2453. asml.Remove(p);
  2454. { if there is a reg. alloc/dealloc/sync instructions or address labels (e.g. for GOT-less PIC)
  2455. associated with p, move it together with p }
  2456. { before the instruction? }
  2457. { find reg allocs,deallocs and PIC labels }
  2458. while assigned(hp3) and (hp3.typ<>ait_instruction) do
  2459. begin
  2460. if ( (hp3.typ=ait_regalloc) and (tai_regalloc(hp3).ratype in [ra_alloc, ra_dealloc]) and
  2461. RegInInstruction(tai_regalloc(hp3).reg,p) )
  2462. or ( (hp3.typ=ait_label) and (tai_label(hp3).labsym.typ=AT_ADDR) )
  2463. then
  2464. begin
  2465. hp4:=hp3;
  2466. hp3:=tai(hp3.Previous);
  2467. asml.Remove(hp4);
  2468. list.Insert(hp4);
  2469. end
  2470. else
  2471. hp3:=tai(hp3.Previous);
  2472. end;
  2473. list.Concat(p);
  2474. SwapRegLive(taicpu(p),taicpu(hp1));
  2475. { after the instruction? }
  2476. { find reg deallocs and reg syncs }
  2477. while assigned(hp5) and (hp5.typ<>ait_instruction) do
  2478. begin
  2479. if (hp5.typ=ait_regalloc) and (tai_regalloc(hp5).ratype in [ra_dealloc, ra_sync]) and
  2480. RegInInstruction(tai_regalloc(hp5).reg,p) then
  2481. begin
  2482. hp4:=hp5;
  2483. hp5:=tai(hp5.next);
  2484. asml.Remove(hp4);
  2485. list.Concat(hp4);
  2486. end
  2487. else
  2488. hp5:=tai(hp5.Next);
  2489. end;
  2490. asml.Remove(hp1);
  2491. { if there are address labels associated with hp2, those must
  2492. stay with hp2 (e.g. for GOT-less PIC) }
  2493. insertpos:=hp2;
  2494. while assigned(hp2.previous) and
  2495. (tai(hp2.previous).typ<>ait_instruction) do
  2496. begin
  2497. hp2:=tai(hp2.previous);
  2498. if (hp2.typ=ait_label) and
  2499. (tai_label(hp2).labsym.typ=AT_ADDR) then
  2500. insertpos:=hp2;
  2501. end;
  2502. {$ifdef DEBUG_PREREGSCHEDULER}
  2503. asml.insertbefore(tai_comment.Create(strpnew('Rescheduled')),insertpos);
  2504. {$endif DEBUG_PREREGSCHEDULER}
  2505. asml.InsertBefore(hp1,insertpos);
  2506. asml.InsertListBefore(insertpos,list);
  2507. p:=tai(p.next);
  2508. end
  2509. else if p.typ=ait_instruction then
  2510. p:=hp1
  2511. else
  2512. p:=tai(p.next);
  2513. end;
  2514. list.Free;
  2515. end;
  2516. procedure DecrementPreceedingIT(list: TAsmList; p: tai);
  2517. var
  2518. hp : tai;
  2519. l : longint;
  2520. begin
  2521. hp := tai(p.Previous);
  2522. l := 1;
  2523. while assigned(hp) and
  2524. (l <= 4) do
  2525. begin
  2526. if hp.typ=ait_instruction then
  2527. begin
  2528. if (taicpu(hp).opcode>=A_IT) and
  2529. (taicpu(hp).opcode <= A_ITTTT) then
  2530. begin
  2531. if (taicpu(hp).opcode = A_IT) and
  2532. (l=1) then
  2533. list.Remove(hp)
  2534. else
  2535. case taicpu(hp).opcode of
  2536. A_ITE:
  2537. if l=2 then taicpu(hp).opcode := A_IT;
  2538. A_ITT:
  2539. if l=2 then taicpu(hp).opcode := A_IT;
  2540. A_ITEE:
  2541. if l=3 then taicpu(hp).opcode := A_ITE;
  2542. A_ITTE:
  2543. if l=3 then taicpu(hp).opcode := A_ITT;
  2544. A_ITET:
  2545. if l=3 then taicpu(hp).opcode := A_ITE;
  2546. A_ITTT:
  2547. if l=3 then taicpu(hp).opcode := A_ITT;
  2548. A_ITEEE:
  2549. if l=4 then taicpu(hp).opcode := A_ITEE;
  2550. A_ITTEE:
  2551. if l=4 then taicpu(hp).opcode := A_ITTE;
  2552. A_ITETE:
  2553. if l=4 then taicpu(hp).opcode := A_ITET;
  2554. A_ITTTE:
  2555. if l=4 then taicpu(hp).opcode := A_ITTT;
  2556. A_ITEET:
  2557. if l=4 then taicpu(hp).opcode := A_ITEE;
  2558. A_ITTET:
  2559. if l=4 then taicpu(hp).opcode := A_ITTE;
  2560. A_ITETT:
  2561. if l=4 then taicpu(hp).opcode := A_ITET;
  2562. A_ITTTT:
  2563. if l=4 then taicpu(hp).opcode := A_ITTT;
  2564. end;
  2565. break;
  2566. end;
  2567. {else if (taicpu(hp).condition<>taicpu(p).condition) or
  2568. (taicpu(hp).condition<>inverse_cond(taicpu(p).condition)) then
  2569. break;}
  2570. inc(l);
  2571. end;
  2572. hp := tai(hp.Previous);
  2573. end;
  2574. end;
  2575. function TCpuThumb2AsmOptimizer.PeepHoleOptPass1Cpu(var p: tai): boolean;
  2576. var
  2577. hp : taicpu;
  2578. //hp1,hp2 : tai;
  2579. begin
  2580. result:=false;
  2581. if inherited PeepHoleOptPass1Cpu(p) then
  2582. result:=true
  2583. else if (p.typ=ait_instruction) and
  2584. MatchInstruction(p, A_STM, [C_None], [PF_FD,PF_DB]) and
  2585. (taicpu(p).oper[0]^.ref^.addressmode=AM_PREINDEXED) and
  2586. (taicpu(p).oper[0]^.ref^.index=NR_STACK_POINTER_REG) and
  2587. ((taicpu(p).oper[1]^.regset^*[8..13,15])=[]) then
  2588. begin
  2589. DebugMsg('Peephole Stm2Push done', p);
  2590. hp := taicpu.op_regset(A_PUSH, R_INTREGISTER, R_SUBWHOLE, taicpu(p).oper[1]^.regset^);
  2591. AsmL.InsertAfter(hp, p);
  2592. asml.Remove(p);
  2593. p:=hp;
  2594. result:=true;
  2595. end
  2596. {else if (p.typ=ait_instruction) and
  2597. MatchInstruction(p, A_STR, [C_None], [PF_None]) and
  2598. (taicpu(p).oper[1]^.ref^.addressmode=AM_PREINDEXED) and
  2599. (taicpu(p).oper[1]^.ref^.index=NR_STACK_POINTER_REG) and
  2600. (taicpu(p).oper[1]^.ref^.offset=-4) and
  2601. (getsupreg(taicpu(p).oper[0]^.reg) in [0..7,14]) then
  2602. begin
  2603. DebugMsg('Peephole Str2Push done', p);
  2604. hp := taicpu.op_regset(A_PUSH, R_INTREGISTER, R_SUBWHOLE, [getsupreg(taicpu(p).oper[0]^.reg)]);
  2605. asml.InsertAfter(hp, p);
  2606. asml.Remove(p);
  2607. p.Free;
  2608. p:=hp;
  2609. result:=true;
  2610. end}
  2611. else if (p.typ=ait_instruction) and
  2612. MatchInstruction(p, A_LDM, [C_None], [PF_FD,PF_IA]) and
  2613. (taicpu(p).oper[0]^.ref^.addressmode=AM_PREINDEXED) and
  2614. (taicpu(p).oper[0]^.ref^.index=NR_STACK_POINTER_REG) and
  2615. ((taicpu(p).oper[1]^.regset^*[8..14])=[]) then
  2616. begin
  2617. DebugMsg('Peephole Ldm2Pop done', p);
  2618. hp := taicpu.op_regset(A_POP, R_INTREGISTER, R_SUBWHOLE, taicpu(p).oper[1]^.regset^);
  2619. asml.InsertBefore(hp, p);
  2620. asml.Remove(p);
  2621. p.Free;
  2622. p:=hp;
  2623. result:=true;
  2624. end
  2625. {else if (p.typ=ait_instruction) and
  2626. MatchInstruction(p, A_LDR, [C_None], [PF_None]) and
  2627. (taicpu(p).oper[1]^.ref^.addressmode=AM_POSTINDEXED) and
  2628. (taicpu(p).oper[1]^.ref^.index=NR_STACK_POINTER_REG) and
  2629. (taicpu(p).oper[1]^.ref^.offset=4) and
  2630. (getsupreg(taicpu(p).oper[0]^.reg) in [0..7,15]) then
  2631. begin
  2632. DebugMsg('Peephole Ldr2Pop done', p);
  2633. hp := taicpu.op_regset(A_POP, R_INTREGISTER, R_SUBWHOLE, [getsupreg(taicpu(p).oper[0]^.reg)]);
  2634. asml.InsertBefore(hp, p);
  2635. asml.Remove(p);
  2636. p.Free;
  2637. p:=hp;
  2638. result:=true;
  2639. end}
  2640. else if (p.typ=ait_instruction) and
  2641. MatchInstruction(p, [A_AND], [], [PF_None]) and
  2642. (taicpu(p).ops = 2) and
  2643. (taicpu(p).oper[1]^.typ=top_const) and
  2644. ((taicpu(p).oper[1]^.val=255) or
  2645. (taicpu(p).oper[1]^.val=65535)) then
  2646. begin
  2647. DebugMsg('Peephole AndR2Uxt done', p);
  2648. if taicpu(p).oper[1]^.val=255 then
  2649. taicpu(p).opcode:=A_UXTB
  2650. else
  2651. taicpu(p).opcode:=A_UXTH;
  2652. taicpu(p).loadreg(1, taicpu(p).oper[0]^.reg);
  2653. result := true;
  2654. end
  2655. else if (p.typ=ait_instruction) and
  2656. MatchInstruction(p, [A_AND], [], [PF_None]) and
  2657. (taicpu(p).ops = 3) and
  2658. (taicpu(p).oper[2]^.typ=top_const) and
  2659. ((taicpu(p).oper[2]^.val=255) or
  2660. (taicpu(p).oper[2]^.val=65535)) then
  2661. begin
  2662. DebugMsg('Peephole AndRR2Uxt done', p);
  2663. if taicpu(p).oper[2]^.val=255 then
  2664. taicpu(p).opcode:=A_UXTB
  2665. else
  2666. taicpu(p).opcode:=A_UXTH;
  2667. taicpu(p).ops:=2;
  2668. result := true;
  2669. end
  2670. {else if (p.typ=ait_instruction) and
  2671. MatchInstruction(p, [A_CMP], [C_None], [PF_None]) and
  2672. (taicpu(p).oper[1]^.typ=top_const) and
  2673. (taicpu(p).oper[1]^.val=0) and
  2674. GetNextInstruction(p,hp1) and
  2675. (taicpu(hp1).opcode=A_B) and
  2676. (taicpu(hp1).condition in [C_EQ,C_NE]) then
  2677. begin
  2678. if taicpu(hp1).condition = C_EQ then
  2679. hp2:=taicpu.op_reg_ref(A_CBZ, taicpu(p).oper[0]^.reg, taicpu(hp1).oper[0]^.ref^)
  2680. else
  2681. hp2:=taicpu.op_reg_ref(A_CBNZ, taicpu(p).oper[0]^.reg, taicpu(hp1).oper[0]^.ref^);
  2682. taicpu(hp2).is_jmp := true;
  2683. asml.InsertAfter(hp2, hp1);
  2684. asml.Remove(hp1);
  2685. hp1.Free;
  2686. asml.Remove(p);
  2687. p.Free;
  2688. p := hp2;
  2689. result := true;
  2690. end}
  2691. end;
  2692. procedure TCpuThumb2AsmOptimizer.PeepHoleOptPass2;
  2693. var
  2694. p,hp1,hp2: tai;
  2695. l : longint;
  2696. condition : tasmcond;
  2697. { UsedRegs, TmpUsedRegs: TRegSet; }
  2698. begin
  2699. p := BlockStart;
  2700. { UsedRegs := []; }
  2701. while (p <> BlockEnd) Do
  2702. begin
  2703. { UpdateUsedRegs(UsedRegs, tai(p.next)); }
  2704. case p.Typ Of
  2705. Ait_Instruction:
  2706. begin
  2707. case taicpu(p).opcode Of
  2708. A_B:
  2709. if taicpu(p).condition<>C_None then
  2710. begin
  2711. { check for
  2712. Bxx xxx
  2713. <several instructions>
  2714. xxx:
  2715. }
  2716. l:=0;
  2717. GetNextInstruction(p, hp1);
  2718. while assigned(hp1) and
  2719. (l<=4) and
  2720. CanBeCond(hp1) and
  2721. { stop on labels }
  2722. not(hp1.typ=ait_label) do
  2723. begin
  2724. inc(l);
  2725. if MustBeLast(hp1) then
  2726. begin
  2727. //hp1:=nil;
  2728. GetNextInstruction(hp1,hp1);
  2729. break;
  2730. end
  2731. else
  2732. GetNextInstruction(hp1,hp1);
  2733. end;
  2734. if assigned(hp1) then
  2735. begin
  2736. if FindLabel(tasmlabel(taicpu(p).oper[0]^.ref^.symbol),hp1) then
  2737. begin
  2738. if (l<=4) and (l>0) then
  2739. begin
  2740. condition:=inverse_cond(taicpu(p).condition);
  2741. hp2:=p;
  2742. GetNextInstruction(p,hp1);
  2743. p:=hp1;
  2744. repeat
  2745. if hp1.typ=ait_instruction then
  2746. taicpu(hp1).condition:=condition;
  2747. if MustBeLast(hp1) then
  2748. begin
  2749. GetNextInstruction(hp1,hp1);
  2750. break;
  2751. end
  2752. else
  2753. GetNextInstruction(hp1,hp1);
  2754. until not(assigned(hp1)) or
  2755. not(CanBeCond(hp1)) or
  2756. (hp1.typ=ait_label);
  2757. { wait with removing else GetNextInstruction could
  2758. ignore the label if it was the only usage in the
  2759. jump moved away }
  2760. asml.InsertAfter(tai_comment.create(strpnew('Collapsed')), hp2);
  2761. DecrementPreceedingIT(asml, hp2);
  2762. case l of
  2763. 1: asml.InsertAfter(taicpu.op_cond(A_IT,condition), hp2);
  2764. 2: asml.InsertAfter(taicpu.op_cond(A_ITT,condition), hp2);
  2765. 3: asml.InsertAfter(taicpu.op_cond(A_ITTT,condition), hp2);
  2766. 4: asml.InsertAfter(taicpu.op_cond(A_ITTTT,condition), hp2);
  2767. end;
  2768. tasmlabel(taicpu(hp2).oper[0]^.ref^.symbol).decrefs;
  2769. asml.remove(hp2);
  2770. hp2.free;
  2771. continue;
  2772. end;
  2773. end;
  2774. end;
  2775. end;
  2776. end;
  2777. end;
  2778. end;
  2779. p := tai(p.next)
  2780. end;
  2781. end;
  2782. function TCpuThumb2AsmOptimizer.PostPeepHoleOptsCpu(var p: tai): boolean;
  2783. begin
  2784. result:=false;
  2785. if p.typ = ait_instruction then
  2786. begin
  2787. if MatchInstruction(p, A_MOV, [C_None], [PF_None]) and
  2788. (taicpu(p).oper[1]^.typ=top_const) and
  2789. (taicpu(p).oper[1]^.val >= 0) and
  2790. (taicpu(p).oper[1]^.val < 256) and
  2791. (not RegInUsedRegs(NR_DEFAULTFLAGS,UsedRegs)) then
  2792. begin
  2793. DebugMsg('Peephole Mov2Movs done', p);
  2794. asml.InsertBefore(tai_regalloc.alloc(NR_DEFAULTFLAGS,p), p);
  2795. asml.InsertAfter(tai_regalloc.dealloc(NR_DEFAULTFLAGS,p), p);
  2796. IncludeRegInUsedRegs(NR_DEFAULTFLAGS,UsedRegs);
  2797. taicpu(p).oppostfix:=PF_S;
  2798. result:=true;
  2799. end
  2800. else if MatchInstruction(p, A_MVN, [C_None], [PF_None]) and
  2801. (taicpu(p).oper[1]^.typ=top_reg) and
  2802. (not RegInUsedRegs(NR_DEFAULTFLAGS,UsedRegs)) then
  2803. begin
  2804. DebugMsg('Peephole Mvn2Mvns done', p);
  2805. asml.InsertBefore(tai_regalloc.alloc(NR_DEFAULTFLAGS,p), p);
  2806. asml.InsertAfter(tai_regalloc.dealloc(NR_DEFAULTFLAGS,p), p);
  2807. IncludeRegInUsedRegs(NR_DEFAULTFLAGS,UsedRegs);
  2808. taicpu(p).oppostfix:=PF_S;
  2809. result:=true;
  2810. end
  2811. else if MatchInstruction(p, A_RSB, [C_None], [PF_None]) and
  2812. (taicpu(p).ops = 3) and
  2813. (taicpu(p).oper[2]^.typ=top_const) and
  2814. (taicpu(p).oper[2]^.val=0) and
  2815. (not RegInUsedRegs(NR_DEFAULTFLAGS,UsedRegs)) then
  2816. begin
  2817. DebugMsg('Peephole Rsb2Rsbs done', p);
  2818. asml.InsertBefore(tai_regalloc.alloc(NR_DEFAULTFLAGS,p), p);
  2819. asml.InsertAfter(tai_regalloc.dealloc(NR_DEFAULTFLAGS,p), p);
  2820. IncludeRegInUsedRegs(NR_DEFAULTFLAGS,UsedRegs);
  2821. taicpu(p).oppostfix:=PF_S;
  2822. result:=true;
  2823. end
  2824. else if MatchInstruction(p, [A_ADD,A_SUB], [C_None], [PF_None]) and
  2825. (taicpu(p).ops = 3) and
  2826. MatchOperand(taicpu(p).oper[0]^, taicpu(p).oper[1]^) and
  2827. (not MatchOperand(taicpu(p).oper[0]^, NR_STACK_POINTER_REG)) and
  2828. (taicpu(p).oper[2]^.typ=top_const) and
  2829. (taicpu(p).oper[2]^.val >= 0) and
  2830. (taicpu(p).oper[2]^.val < 256) and
  2831. (not RegInUsedRegs(NR_DEFAULTFLAGS,UsedRegs)) then
  2832. begin
  2833. DebugMsg('Peephole AddSub2*s done', p);
  2834. asml.InsertBefore(tai_regalloc.alloc(NR_DEFAULTFLAGS,p), p);
  2835. asml.InsertAfter(tai_regalloc.dealloc(NR_DEFAULTFLAGS,p), p);
  2836. IncludeRegInUsedRegs(NR_DEFAULTFLAGS,UsedRegs);
  2837. taicpu(p).loadconst(1,taicpu(p).oper[2]^.val);
  2838. taicpu(p).oppostfix:=PF_S;
  2839. taicpu(p).ops := 2;
  2840. result:=true;
  2841. end
  2842. else if MatchInstruction(p, [A_ADD,A_SUB], [C_None], [PF_None]) and
  2843. (taicpu(p).ops = 2) and
  2844. (taicpu(p).oper[1]^.typ=top_reg) and
  2845. (not MatchOperand(taicpu(p).oper[0]^, NR_STACK_POINTER_REG)) and
  2846. (not MatchOperand(taicpu(p).oper[1]^, NR_STACK_POINTER_REG)) and
  2847. (not RegInUsedRegs(NR_DEFAULTFLAGS,UsedRegs)) then
  2848. begin
  2849. DebugMsg('Peephole AddSub2*s done', p);
  2850. asml.InsertBefore(tai_regalloc.alloc(NR_DEFAULTFLAGS,p), p);
  2851. asml.InsertAfter(tai_regalloc.dealloc(NR_DEFAULTFLAGS,p), p);
  2852. IncludeRegInUsedRegs(NR_DEFAULTFLAGS,UsedRegs);
  2853. taicpu(p).oppostfix:=PF_S;
  2854. result:=true;
  2855. end
  2856. else if MatchInstruction(p, [A_ADD], [C_None], [PF_None]) and
  2857. (taicpu(p).ops = 3) and
  2858. MatchOperand(taicpu(p).oper[0]^, taicpu(p).oper[1]^) and
  2859. (taicpu(p).oper[2]^.typ=top_reg) then
  2860. begin
  2861. DebugMsg('Peephole AddRRR2AddRR done', p);
  2862. taicpu(p).ops := 2;
  2863. taicpu(p).loadreg(1,taicpu(p).oper[2]^.reg);
  2864. result:=true;
  2865. end
  2866. else if MatchInstruction(p, [A_AND,A_ORR,A_EOR,A_BIC,A_LSL,A_LSR,A_ASR,A_ROR], [C_None], [PF_None]) and
  2867. (taicpu(p).ops = 3) and
  2868. MatchOperand(taicpu(p).oper[0]^, taicpu(p).oper[1]^) and
  2869. (taicpu(p).oper[2]^.typ=top_reg) and
  2870. (not RegInUsedRegs(NR_DEFAULTFLAGS,UsedRegs)) then
  2871. begin
  2872. DebugMsg('Peephole opXXY2opsXY done', p);
  2873. asml.InsertBefore(tai_regalloc.alloc(NR_DEFAULTFLAGS,p), p);
  2874. asml.InsertAfter(tai_regalloc.dealloc(NR_DEFAULTFLAGS,p), p);
  2875. IncludeRegInUsedRegs(NR_DEFAULTFLAGS,UsedRegs);
  2876. taicpu(p).ops := 2;
  2877. taicpu(p).loadreg(1,taicpu(p).oper[2]^.reg);
  2878. taicpu(p).oppostfix:=PF_S;
  2879. result:=true;
  2880. end
  2881. else if MatchInstruction(p, [A_AND,A_ORR,A_EOR,A_BIC,A_LSL,A_LSR,A_ASR,A_ROR], [C_None], [PF_S]) and
  2882. (taicpu(p).ops = 3) and
  2883. MatchOperand(taicpu(p).oper[0]^, taicpu(p).oper[1]^) and
  2884. (taicpu(p).oper[2]^.typ in [top_reg,top_const]) then
  2885. begin
  2886. DebugMsg('Peephole opXXY2opXY done', p);
  2887. taicpu(p).ops := 2;
  2888. if taicpu(p).oper[2]^.typ=top_reg then
  2889. taicpu(p).loadreg(1,taicpu(p).oper[2]^.reg)
  2890. else
  2891. taicpu(p).loadconst(1,taicpu(p).oper[2]^.val);
  2892. result:=true;
  2893. end
  2894. else if MatchInstruction(p, [A_AND,A_ORR,A_EOR], [C_None], [PF_None,PF_S]) and
  2895. (taicpu(p).ops = 3) and
  2896. MatchOperand(taicpu(p).oper[0]^, taicpu(p).oper[2]^) and
  2897. (not RegInUsedRegs(NR_DEFAULTFLAGS,UsedRegs)) then
  2898. begin
  2899. DebugMsg('Peephole opXYX2opsXY done', p);
  2900. asml.InsertBefore(tai_regalloc.alloc(NR_DEFAULTFLAGS,p), p);
  2901. asml.InsertAfter(tai_regalloc.dealloc(NR_DEFAULTFLAGS,p), p);
  2902. IncludeRegInUsedRegs(NR_DEFAULTFLAGS,UsedRegs);
  2903. taicpu(p).oppostfix:=PF_S;
  2904. taicpu(p).ops := 2;
  2905. result:=true;
  2906. end
  2907. else if MatchInstruction(p, [A_MOV], [C_None], [PF_None]) and
  2908. (taicpu(p).ops=3) and
  2909. (taicpu(p).oper[2]^.typ=top_shifterop) and
  2910. (taicpu(p).oper[2]^.shifterop^.shiftmode in [SM_LSL,SM_LSR,SM_ASR,SM_ROR]) and
  2911. //MatchOperand(taicpu(p).oper[0]^, taicpu(p).oper[1]^) and
  2912. (not RegInUsedRegs(NR_DEFAULTFLAGS,UsedRegs)) then
  2913. begin
  2914. DebugMsg('Peephole Mov2Shift done', p);
  2915. asml.InsertBefore(tai_regalloc.alloc(NR_DEFAULTFLAGS,p), p);
  2916. asml.InsertAfter(tai_regalloc.dealloc(NR_DEFAULTFLAGS,p), p);
  2917. IncludeRegInUsedRegs(NR_DEFAULTFLAGS,UsedRegs);
  2918. taicpu(p).oppostfix:=PF_S;
  2919. case taicpu(p).oper[2]^.shifterop^.shiftmode of
  2920. SM_LSL: taicpu(p).opcode:=A_LSL;
  2921. SM_LSR: taicpu(p).opcode:=A_LSR;
  2922. SM_ASR: taicpu(p).opcode:=A_ASR;
  2923. SM_ROR: taicpu(p).opcode:=A_ROR;
  2924. end;
  2925. if taicpu(p).oper[2]^.shifterop^.rs<>NR_NO then
  2926. taicpu(p).loadreg(2, taicpu(p).oper[2]^.shifterop^.rs)
  2927. else
  2928. taicpu(p).loadconst(2, taicpu(p).oper[2]^.shifterop^.shiftimm);
  2929. result:=true;
  2930. end
  2931. end;
  2932. end;
  2933. begin
  2934. casmoptimizer:=TCpuAsmOptimizer;
  2935. cpreregallocscheduler:=TCpuPreRegallocScheduler;
  2936. End.