aoptcpu.pas 114 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894189518961897189818991900190119021903190419051906190719081909191019111912191319141915191619171918191919201921192219231924192519261927192819291930193119321933193419351936193719381939194019411942194319441945194619471948194919501951195219531954195519561957195819591960196119621963196419651966196719681969197019711972197319741975197619771978197919801981198219831984198519861987198819891990199119921993199419951996199719981999200020012002200320042005200620072008200920102011201220132014201520162017201820192020202120222023202420252026202720282029203020312032203320342035203620372038203920402041204220432044204520462047204820492050205120522053205420552056205720582059206020612062206320642065206620672068206920702071207220732074207520762077207820792080208120822083208420852086208720882089209020912092209320942095209620972098209921002101210221032104210521062107210821092110211121122113211421152116211721182119212021212122212321242125212621272128212921302131213221332134213521362137213821392140214121422143214421452146214721482149215021512152215321542155215621572158215921602161216221632164216521662167216821692170217121722173217421752176217721782179218021812182218321842185218621872188218921902191219221932194219521962197219821992200220122022203220422052206220722082209221022112212221322142215221622172218221922202221222222232224222522262227222822292230223122322233223422352236223722382239224022412242224322442245224622472248224922502251225222532254225522562257225822592260226122622263226422652266226722682269227022712272227322742275227622772278227922802281228222832284228522862287228822892290229122922293229422952296229722982299230023012302230323042305230623072308230923102311231223132314231523162317231823192320232123222323232423252326232723282329233023312332233323342335233623372338233923402341234223432344234523462347234823492350235123522353235423552356235723582359236023612362236323642365236623672368236923702371237223732374237523762377237823792380238123822383238423852386238723882389239023912392239323942395239623972398239924002401240224032404240524062407240824092410241124122413241424152416241724182419242024212422242324242425242624272428242924302431243224332434243524362437243824392440244124422443244424452446244724482449245024512452245324542455245624572458245924602461246224632464246524662467246824692470247124722473247424752476247724782479248024812482248324842485248624872488248924902491249224932494249524962497249824992500250125022503250425052506250725082509251025112512251325142515251625172518251925202521252225232524252525262527252825292530253125322533253425352536253725382539254025412542254325442545254625472548254925502551255225532554255525562557255825592560256125622563256425652566256725682569257025712572257325742575257625772578257925802581258225832584258525862587258825892590259125922593259425952596259725982599260026012602260326042605260626072608260926102611261226132614261526162617261826192620262126222623262426252626262726282629263026312632263326342635263626372638263926402641264226432644264526462647264826492650265126522653265426552656265726582659266026612662266326642665266626672668266926702671267226732674267526762677267826792680268126822683268426852686268726882689269026912692269326942695269626972698269927002701270227032704270527062707270827092710271127122713271427152716271727182719272027212722272327242725272627272728272927302731273227332734273527362737273827392740274127422743274427452746274727482749275027512752275327542755275627572758275927602761276227632764276527662767276827692770277127722773277427752776277727782779278027812782278327842785278627872788278927902791279227932794279527962797279827992800280128022803280428052806280728082809281028112812281328142815281628172818281928202821282228232824282528262827282828292830283128322833283428352836283728382839284028412842284328442845284628472848284928502851285228532854
  1. {
  2. Copyright (c) 1998-2002 by Jonas Maebe, member of the Free Pascal
  3. Development Team
  4. This unit implements the ARM optimizer object
  5. This program is free software; you can redistribute it and/or modify
  6. it under the terms of the GNU General Public License as published by
  7. the Free Software Foundation; either version 2 of the License, or
  8. (at your option) any later version.
  9. This program is distributed in the hope that it will be useful,
  10. but WITHOUT ANY WARRANTY; without even the implied warranty of
  11. MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  12. GNU General Public License for more details.
  13. You should have received a copy of the GNU General Public License
  14. along with this program; if not, write to the Free Software
  15. Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
  16. ****************************************************************************
  17. }
  18. Unit aoptcpu;
  19. {$i fpcdefs.inc}
  20. { $define DEBUG_PREREGSCHEDULER}
  21. { $define DEBUG_AOPTCPU}
  22. Interface
  23. uses
  24. cgbase, cgutils, cpubase, aasmtai,
  25. aasmcpu,
  26. aopt, aoptobj, aoptarm;
  27. Type
  28. TCpuAsmOptimizer = class(TARMAsmOptimizer)
  29. { Can't be done in some cases due to the limited range of jumps }
  30. function CanDoJumpOpts: Boolean; override;
  31. { uses the same constructor as TAopObj }
  32. function PeepHoleOptPass1Cpu(var p: tai): boolean; override;
  33. procedure PeepHoleOptPass2;override;
  34. Function RegInInstruction(Reg: TRegister; p1: tai): Boolean;override;
  35. function RemoveSuperfluousVMov(const p : tai; movp : tai; const optimizer : string) : boolean;
  36. { gets the next tai object after current that contains info relevant
  37. to the optimizer in p1 which used the given register or does a
  38. change in program flow.
  39. If there is none, it returns false and
  40. sets p1 to nil }
  41. Function GetNextInstructionUsingRef(Current: tai; Out Next: tai; const ref: TReference; StopOnStore: Boolean = true): Boolean;
  42. { outputs a debug message into the assembler file }
  43. procedure DebugMsg(const s: string; p: tai);
  44. function InstructionLoadsFromReg(const reg : TRegister; const hp : tai) : boolean; override;
  45. function RegLoadedWithNewValue(reg : tregister; hp : tai) : boolean; override;
  46. function OptPass1And(var p: tai): Boolean; override; { There's optimisation code that's general for all ARM platforms }
  47. protected
  48. function LookForPreindexedPattern(p: taicpu): boolean;
  49. function LookForPostindexedPattern(p: taicpu): boolean;
  50. { Individual optimisation routines }
  51. function OptPass1DataCheckMov(var p: tai): Boolean;
  52. function OptPass1ADDSUB(var p: tai): Boolean;
  53. function OptPass1CMP(var p: tai): Boolean;
  54. function OptPass1LDR(var p: tai): Boolean;
  55. function OptPass1STM(var p: tai): Boolean;
  56. function OptPass1STR(var p: tai): Boolean;
  57. function OptPass1MOV(var p: tai): Boolean;
  58. function OptPass1MUL(var p: tai): Boolean;
  59. function OptPass1MVN(var p: tai): Boolean;
  60. function OptPass1VMov(var p: tai): Boolean;
  61. function OptPass1VOp(var p: tai): Boolean;
  62. End;
  63. TCpuPreRegallocScheduler = class(TAsmScheduler)
  64. function SchedulerPass1Cpu(var p: tai): boolean;override;
  65. procedure SwapRegLive(p, hp1: taicpu);
  66. end;
  67. TCpuThumb2AsmOptimizer = class(TCpuAsmOptimizer)
  68. { uses the same constructor as TAopObj }
  69. function PeepHoleOptPass1Cpu(var p: tai): boolean; override;
  70. procedure PeepHoleOptPass2;override;
  71. function PostPeepHoleOptsCpu(var p: tai): boolean; override;
  72. End;
  73. function MustBeLast(p : tai) : boolean;
  74. Implementation
  75. uses
  76. cutils,verbose,globtype,globals,
  77. systems,
  78. cpuinfo,
  79. cgobj,procinfo,
  80. aasmbase,aasmdata;
  81. { Range check must be disabled explicitly as conversions between signed and unsigned
  82. 32-bit values are done without explicit typecasts }
  83. {$R-}
  84. function CanBeCond(p : tai) : boolean;
  85. begin
  86. result:=
  87. not(GenerateThumbCode) and
  88. (p.typ=ait_instruction) and
  89. (taicpu(p).condition=C_None) and
  90. ((taicpu(p).opcode<A_IT) or (taicpu(p).opcode>A_ITTTT)) and
  91. (taicpu(p).opcode<>A_CBZ) and
  92. (taicpu(p).opcode<>A_CBNZ) and
  93. (taicpu(p).opcode<>A_PLD) and
  94. (((taicpu(p).opcode<>A_BLX) and
  95. { BL may need to be converted into BLX by the linker -- could possibly
  96. be allowed in case it's to a local symbol of which we know that it
  97. uses the same instruction set as the current one }
  98. (taicpu(p).opcode<>A_BL)) or
  99. (taicpu(p).oper[0]^.typ=top_reg));
  100. end;
  101. function RemoveRedundantMove(const cmpp: tai; movp: tai; asml: TAsmList):Boolean;
  102. begin
  103. Result:=false;
  104. if (taicpu(movp).condition = C_EQ) and
  105. (taicpu(cmpp).oper[0]^.reg = taicpu(movp).oper[0]^.reg) and
  106. (taicpu(cmpp).oper[1]^.val = taicpu(movp).oper[1]^.val) then
  107. begin
  108. asml.insertafter(tai_comment.Create(strpnew('Peephole Optimization: CmpMovMov - Removed redundant moveq')), movp);
  109. asml.remove(movp);
  110. movp.free;
  111. Result:=true;
  112. end;
  113. end;
  114. function AlignedToQWord(const ref : treference) : boolean;
  115. begin
  116. { (safe) heuristics to ensure alignment }
  117. result:=(target_info.abi in [abi_eabi,abi_armeb,abi_eabihf]) and
  118. (((ref.offset>=0) and
  119. ((ref.offset mod 8)=0) and
  120. ((ref.base=NR_R13) or
  121. (ref.index=NR_R13))
  122. ) or
  123. ((ref.offset<=0) and
  124. { when using NR_R11, it has always a value of <qword align>+4 }
  125. ((abs(ref.offset+4) mod 8)=0) and
  126. (current_procinfo.framepointer=NR_R11) and
  127. ((ref.base=NR_R11) or
  128. (ref.index=NR_R11))
  129. )
  130. );
  131. end;
  132. function isValidConstLoadStoreOffset(const aoffset: longint; const pf: TOpPostfix) : boolean;
  133. begin
  134. if GenerateThumb2Code then
  135. result := (aoffset<4096) and (aoffset>-256)
  136. else
  137. result := ((pf in [PF_None,PF_B]) and
  138. (abs(aoffset)<4096)) or
  139. (abs(aoffset)<256);
  140. end;
  141. function TCpuAsmOptimizer.InstructionLoadsFromReg(const reg: TRegister; const hp: tai): boolean;
  142. var
  143. p: taicpu;
  144. i: longint;
  145. begin
  146. instructionLoadsFromReg := false;
  147. if not (assigned(hp) and (hp.typ = ait_instruction)) then
  148. exit;
  149. p:=taicpu(hp);
  150. i:=1;
  151. {For these instructions we have to start on oper[0]}
  152. if (p.opcode in [A_STR, A_LDM, A_STM, A_PLD,
  153. A_CMP, A_CMN, A_TST, A_TEQ,
  154. A_B, A_BL, A_BX, A_BLX,
  155. A_SMLAL, A_UMLAL, A_VSTM, A_VLDM]) then i:=0;
  156. while(i<p.ops) do
  157. begin
  158. case p.oper[I]^.typ of
  159. top_reg:
  160. instructionLoadsFromReg := (p.oper[I]^.reg = reg) or
  161. { STRD }
  162. ((i=0) and (p.opcode=A_STR) and (p.oppostfix=PF_D) and (getsupreg(p.oper[0]^.reg)+1=getsupreg(reg)));
  163. top_regset:
  164. instructionLoadsFromReg := (getsupreg(reg) in p.oper[I]^.regset^);
  165. top_shifterop:
  166. instructionLoadsFromReg := p.oper[I]^.shifterop^.rs = reg;
  167. top_ref:
  168. instructionLoadsFromReg :=
  169. (p.oper[I]^.ref^.base = reg) or
  170. (p.oper[I]^.ref^.index = reg);
  171. else
  172. ;
  173. end;
  174. if (i=0) and (p.opcode in [A_LDM,A_VLDM]) then
  175. exit;
  176. if instructionLoadsFromReg then
  177. exit; {Bailout if we found something}
  178. Inc(I);
  179. end;
  180. end;
  181. function TCpuAsmOptimizer.RegLoadedWithNewValue(reg: tregister; hp: tai): boolean;
  182. var
  183. p: taicpu;
  184. begin
  185. Result := false;
  186. if not ((assigned(hp)) and (hp.typ = ait_instruction)) then
  187. exit;
  188. p := taicpu(hp);
  189. case p.opcode of
  190. { These operands do not write into a register at all }
  191. A_CMP, A_CMN, A_TST, A_TEQ, A_B, A_BL, A_BX, A_BLX, A_SWI, A_MSR, A_PLD,
  192. A_VCMP:
  193. exit;
  194. {Take care of post/preincremented store and loads, they will change their base register}
  195. A_STR, A_LDR:
  196. begin
  197. Result := false;
  198. { actually, this does not apply here because post-/preindexed does not mean that a register
  199. is loaded with a new value, it is only modified
  200. (taicpu(p).oper[1]^.typ=top_ref) and
  201. (taicpu(p).oper[1]^.ref^.addressmode in [AM_PREINDEXED,AM_POSTINDEXED]) and
  202. (taicpu(p).oper[1]^.ref^.base = reg);
  203. }
  204. { STR does not load into it's first register }
  205. if p.opcode = A_STR then
  206. exit;
  207. end;
  208. A_VSTR:
  209. begin
  210. Result := false;
  211. exit;
  212. end;
  213. { These four are writing into the first 2 register, UMLAL and SMLAL will also read from them }
  214. A_UMLAL, A_UMULL, A_SMLAL, A_SMULL:
  215. Result :=
  216. (p.oper[1]^.typ = top_reg) and
  217. (p.oper[1]^.reg = reg);
  218. {Loads to oper2 from coprocessor}
  219. {
  220. MCR/MRC is currently not supported in FPC
  221. A_MRC:
  222. Result :=
  223. (p.oper[2]^.typ = top_reg) and
  224. (p.oper[2]^.reg = reg);
  225. }
  226. {Loads to all register in the registerset}
  227. A_LDM, A_VLDM:
  228. Result := (getsupreg(reg) in p.oper[1]^.regset^);
  229. A_POP:
  230. Result := (getsupreg(reg) in p.oper[0]^.regset^) or
  231. (reg=NR_STACK_POINTER_REG);
  232. else
  233. ;
  234. end;
  235. if Result then
  236. exit;
  237. case p.oper[0]^.typ of
  238. {This is the case}
  239. top_reg:
  240. Result := (p.oper[0]^.reg = reg) or
  241. { LDRD }
  242. (p.opcode=A_LDR) and (p.oppostfix=PF_D) and (getsupreg(p.oper[0]^.reg)+1=getsupreg(reg));
  243. {LDM/STM might write a new value to their index register}
  244. top_ref:
  245. Result :=
  246. (taicpu(p).oper[0]^.ref^.addressmode in [AM_PREINDEXED,AM_POSTINDEXED]) and
  247. (taicpu(p).oper[0]^.ref^.base = reg);
  248. else
  249. ;
  250. end;
  251. end;
  252. function TCpuAsmOptimizer.GetNextInstructionUsingRef(Current: tai;
  253. Out Next: tai; const ref: TReference; StopOnStore: Boolean = true): Boolean;
  254. begin
  255. Next:=Current;
  256. repeat
  257. Result:=GetNextInstruction(Next,Next);
  258. if Result and
  259. (Next.typ=ait_instruction) and
  260. (taicpu(Next).opcode in [A_LDR, A_STR]) and
  261. (
  262. ((taicpu(Next).ops = 2) and
  263. (taicpu(Next).oper[1]^.typ = top_ref) and
  264. RefsEqual(taicpu(Next).oper[1]^.ref^,ref)) or
  265. ((taicpu(Next).ops = 3) and { LDRD/STRD }
  266. (taicpu(Next).oper[2]^.typ = top_ref) and
  267. RefsEqual(taicpu(Next).oper[2]^.ref^,ref))
  268. ) then
  269. {We've found an instruction LDR or STR with the same reference}
  270. exit;
  271. until not(Result) or
  272. (Next.typ<>ait_instruction) or
  273. not(cs_opt_level3 in current_settings.optimizerswitches) or
  274. is_calljmp(taicpu(Next).opcode) or
  275. (StopOnStore and (taicpu(Next).opcode in [A_STR, A_STM])) or
  276. RegModifiedByInstruction(NR_PC,Next);
  277. Result:=false;
  278. end;
  279. {$ifdef DEBUG_AOPTCPU}
  280. procedure TCpuAsmOptimizer.DebugMsg(const s: string;p : tai);
  281. begin
  282. asml.insertbefore(tai_comment.Create(strpnew(s)), p);
  283. end;
  284. {$else DEBUG_AOPTCPU}
  285. procedure TCpuAsmOptimizer.DebugMsg(const s: string;p : tai);inline;
  286. begin
  287. end;
  288. {$endif DEBUG_AOPTCPU}
  289. function TCpuAsmOptimizer.CanDoJumpOpts: Boolean;
  290. begin
  291. { Cannot perform these jump optimisations if the ARM architecture has 16-bit thumb codes }
  292. Result := not (
  293. (current_settings.instructionset = is_thumb) and not (CPUARM_HAS_THUMB2 in cpu_capabilities[current_settings.cputype])
  294. );
  295. end;
  296. function TCpuAsmOptimizer.RemoveSuperfluousVMov(const p: tai; movp: tai; const optimizer: string):boolean;
  297. var
  298. alloc,
  299. dealloc : tai_regalloc;
  300. hp1 : tai;
  301. begin
  302. Result:=false;
  303. if ((MatchInstruction(movp, A_VMOV, [taicpu(p).condition], [taicpu(p).oppostfix]) and
  304. ((getregtype(taicpu(movp).oper[0]^.reg)=R_MMREGISTER) or (taicpu(p).opcode=A_VLDR))
  305. ) or
  306. (((taicpu(p).oppostfix in [PF_F64F32,PF_F64S16,PF_F64S32,PF_F64U16,PF_F64U32]) or (getsubreg(taicpu(p).oper[0]^.reg)=R_SUBFD)) and MatchInstruction(movp, A_VMOV, [taicpu(p).condition], [PF_F64])) or
  307. (((taicpu(p).oppostfix in [PF_F32F64,PF_F32S16,PF_F32S32,PF_F32U16,PF_F32U32]) or (getsubreg(taicpu(p).oper[0]^.reg)=R_SUBFS)) and MatchInstruction(movp, A_VMOV, [taicpu(p).condition], [PF_F32]))
  308. ) and
  309. (taicpu(movp).ops=2) and
  310. MatchOperand(taicpu(movp).oper[1]^, taicpu(p).oper[0]^.reg) and
  311. { the destination register of the mov might not be used beween p and movp }
  312. not(RegUsedBetween(taicpu(movp).oper[0]^.reg,p,movp)) and
  313. { Take care to only do this for instructions which REALLY load to the first register.
  314. Otherwise
  315. vstr reg0, [reg1]
  316. vmov reg2, reg0
  317. will be optimized to
  318. vstr reg2, [reg1]
  319. }
  320. regLoadedWithNewValue(taicpu(p).oper[0]^.reg, p) then
  321. begin
  322. dealloc:=FindRegDeAlloc(taicpu(p).oper[0]^.reg,tai(movp.Next));
  323. if assigned(dealloc) then
  324. begin
  325. DebugMsg('Peephole Optimization: '+optimizer+' removed superfluous vmov', movp);
  326. result:=true;
  327. { taicpu(p).oper[0]^.reg is not used anymore, try to find its allocation
  328. and remove it if possible }
  329. asml.Remove(dealloc);
  330. alloc:=FindRegAllocBackward(taicpu(p).oper[0]^.reg,tai(p.previous));
  331. if assigned(alloc) then
  332. begin
  333. asml.Remove(alloc);
  334. alloc.free;
  335. dealloc.free;
  336. end
  337. else
  338. asml.InsertAfter(dealloc,p);
  339. { try to move the allocation of the target register }
  340. GetLastInstruction(movp,hp1);
  341. alloc:=FindRegAlloc(taicpu(movp).oper[0]^.reg,tai(hp1.Next));
  342. if assigned(alloc) then
  343. begin
  344. asml.Remove(alloc);
  345. asml.InsertBefore(alloc,p);
  346. { adjust used regs }
  347. IncludeRegInUsedRegs(taicpu(movp).oper[0]^.reg,UsedRegs);
  348. end;
  349. { change
  350. vldr reg0,[reg1]
  351. vmov reg2,reg0
  352. into
  353. ldr reg2,[reg1]
  354. if reg2 is an int register
  355. }
  356. if (taicpu(p).opcode=A_VLDR) and (getregtype(taicpu(movp).oper[0]^.reg)=R_INTREGISTER) then
  357. taicpu(p).opcode:=A_LDR;
  358. { finally get rid of the mov }
  359. taicpu(p).loadreg(0,taicpu(movp).oper[0]^.reg);
  360. asml.remove(movp);
  361. movp.free;
  362. end;
  363. end;
  364. end;
  365. {
  366. optimize
  367. add/sub reg1,reg1,regY/const
  368. ...
  369. ldr/str regX,[reg1]
  370. into
  371. ldr/str regX,[reg1, regY/const]!
  372. }
  373. function TCpuAsmOptimizer.LookForPreindexedPattern(p: taicpu): boolean;
  374. var
  375. hp1: tai;
  376. begin
  377. if GenerateARMCode and
  378. (p.ops=3) and
  379. MatchOperand(p.oper[0]^, p.oper[1]^.reg) and
  380. GetNextInstructionUsingReg(p, hp1, p.oper[0]^.reg) and
  381. (not RegModifiedBetween(p.oper[0]^.reg, p, hp1)) and
  382. MatchInstruction(hp1, [A_LDR,A_STR], [C_None], [PF_None,PF_B,PF_H,PF_SH,PF_SB]) and
  383. (taicpu(hp1).oper[1]^.ref^.addressmode=AM_OFFSET) and
  384. (taicpu(hp1).oper[1]^.ref^.base=p.oper[0]^.reg) and
  385. (taicpu(hp1).oper[0]^.reg<>p.oper[0]^.reg) and
  386. (taicpu(hp1).oper[1]^.ref^.offset=0) and
  387. (taicpu(hp1).oper[1]^.ref^.index=NR_NO) and
  388. (((p.oper[2]^.typ=top_reg) and
  389. (not RegModifiedBetween(p.oper[2]^.reg, p, hp1))) or
  390. ((p.oper[2]^.typ=top_const) and
  391. ((abs(p.oper[2]^.val) < 256) or
  392. ((abs(p.oper[2]^.val) < 4096) and
  393. (taicpu(hp1).oppostfix in [PF_None,PF_B]))))) then
  394. begin
  395. taicpu(hp1).oper[1]^.ref^.addressmode:=AM_PREINDEXED;
  396. if p.oper[2]^.typ=top_reg then
  397. begin
  398. taicpu(hp1).oper[1]^.ref^.index:=p.oper[2]^.reg;
  399. if p.opcode=A_ADD then
  400. taicpu(hp1).oper[1]^.ref^.signindex:=1
  401. else
  402. taicpu(hp1).oper[1]^.ref^.signindex:=-1;
  403. end
  404. else
  405. begin
  406. if p.opcode=A_ADD then
  407. taicpu(hp1).oper[1]^.ref^.offset:=p.oper[2]^.val
  408. else
  409. taicpu(hp1).oper[1]^.ref^.offset:=-p.oper[2]^.val;
  410. end;
  411. result:=true;
  412. end
  413. else
  414. result:=false;
  415. end;
  416. {
  417. optimize
  418. ldr/str regX,[reg1]
  419. ...
  420. add/sub reg1,reg1,regY/const
  421. into
  422. ldr/str regX,[reg1], regY/const
  423. }
  424. function TCpuAsmOptimizer.LookForPostindexedPattern(p: taicpu) : boolean;
  425. var
  426. hp1 : tai;
  427. begin
  428. Result:=false;
  429. if (p.oper[1]^.typ = top_ref) and
  430. (p.oper[1]^.ref^.addressmode=AM_OFFSET) and
  431. (p.oper[1]^.ref^.index=NR_NO) and
  432. (p.oper[1]^.ref^.offset=0) and
  433. GetNextInstructionUsingReg(p, hp1, p.oper[1]^.ref^.base) and
  434. { we cannot check NR_DEFAULTFLAGS for modification yet so don't allow a condition }
  435. MatchInstruction(hp1, [A_ADD, A_SUB], [C_None], [PF_None]) and
  436. (taicpu(hp1).oper[0]^.reg=p.oper[1]^.ref^.base) and
  437. (taicpu(hp1).oper[1]^.reg=p.oper[1]^.ref^.base) and
  438. (
  439. (taicpu(hp1).oper[2]^.typ=top_reg) or
  440. { valid offset? }
  441. ((taicpu(hp1).oper[2]^.typ=top_const) and
  442. ((abs(taicpu(hp1).oper[2]^.val)<256) or
  443. ((abs(taicpu(hp1).oper[2]^.val)<4096) and (p.oppostfix in [PF_None,PF_B]))
  444. )
  445. )
  446. ) and
  447. { don't apply the optimization if the base register is loaded }
  448. (p.oper[0]^.reg<>p.oper[1]^.ref^.base) and
  449. not(RegModifiedBetween(taicpu(hp1).oper[0]^.reg,p,hp1)) and
  450. { don't apply the optimization if the (new) index register is loaded }
  451. (p.oper[0]^.reg<>taicpu(hp1).oper[2]^.reg) and
  452. not(RegModifiedBetween(taicpu(hp1).oper[2]^.reg,p,hp1)) and
  453. GenerateARMCode then
  454. begin
  455. DebugMsg('Peephole Optimization: Str/LdrAdd/Sub2Str/Ldr Postindex done', p);
  456. p.oper[1]^.ref^.addressmode:=AM_POSTINDEXED;
  457. if taicpu(hp1).oper[2]^.typ=top_const then
  458. begin
  459. if taicpu(hp1).opcode=A_ADD then
  460. p.oper[1]^.ref^.offset:=taicpu(hp1).oper[2]^.val
  461. else
  462. p.oper[1]^.ref^.offset:=-taicpu(hp1).oper[2]^.val;
  463. end
  464. else
  465. begin
  466. p.oper[1]^.ref^.index:=taicpu(hp1).oper[2]^.reg;
  467. if taicpu(hp1).opcode=A_ADD then
  468. p.oper[1]^.ref^.signindex:=1
  469. else
  470. p.oper[1]^.ref^.signindex:=-1;
  471. end;
  472. asml.Remove(hp1);
  473. hp1.Free;
  474. Result:=true;
  475. end;
  476. end;
  477. function TCpuAsmOptimizer.OptPass1ADDSUB(var p: tai): Boolean;
  478. var
  479. hp1,hp2: tai;
  480. begin
  481. Result := OptPass1DataCheckMov(p);
  482. {
  483. change
  484. add/sub reg2,reg1,const1
  485. str/ldr reg3,[reg2,const2]
  486. dealloc reg2
  487. to
  488. str/ldr reg3,[reg1,const2+/-const1]
  489. }
  490. if (not GenerateThumbCode) and
  491. (taicpu(p).ops>2) and
  492. (taicpu(p).oper[1]^.typ = top_reg) and
  493. (taicpu(p).oper[2]^.typ = top_const) then
  494. begin
  495. hp1:=p;
  496. while GetNextInstructionUsingReg(hp1, hp1, taicpu(p).oper[0]^.reg) and
  497. { we cannot check NR_DEFAULTFLAGS for modification yet so don't allow a condition }
  498. MatchInstruction(hp1, [A_LDR, A_STR], [C_None], []) and
  499. (taicpu(hp1).oper[1]^.typ = top_ref) and
  500. (taicpu(hp1).oper[1]^.ref^.base=taicpu(p).oper[0]^.reg) and
  501. { don't optimize if the register is stored/overwritten }
  502. (taicpu(hp1).oper[0]^.reg<>taicpu(p).oper[1]^.reg) and
  503. (taicpu(hp1).oper[1]^.ref^.index=NR_NO) and
  504. (taicpu(hp1).oper[1]^.ref^.addressmode=AM_OFFSET) and
  505. { new offset must be valid: either in the range of 8 or 12 bit, depend on the
  506. ldr postfix }
  507. (((taicpu(p).opcode=A_ADD) and
  508. isValidConstLoadStoreOffset(taicpu(hp1).oper[1]^.ref^.offset+taicpu(p).oper[2]^.val, taicpu(hp1).oppostfix)
  509. ) or
  510. ((taicpu(p).opcode=A_SUB) and
  511. isValidConstLoadStoreOffset(taicpu(hp1).oper[1]^.ref^.offset-taicpu(p).oper[2]^.val, taicpu(hp1).oppostfix)
  512. )
  513. ) do
  514. begin
  515. { neither reg1 nor reg2 might be changed inbetween }
  516. if RegModifiedBetween(taicpu(p).oper[0]^.reg,p,hp1) or
  517. RegModifiedBetween(taicpu(p).oper[1]^.reg,p,hp1) then
  518. break;
  519. { reg2 must be either overwritten by the ldr or it is deallocated afterwards }
  520. if ((taicpu(hp1).opcode=A_LDR) and (taicpu(p).oper[0]^.reg=taicpu(hp1).oper[0]^.reg)) or
  521. assigned(FindRegDeAlloc(taicpu(p).oper[0]^.reg,tai(hp1.Next))) then
  522. begin
  523. { remember last instruction }
  524. hp2:=hp1;
  525. DebugMsg('Peephole Optimization: Add/SubLdr2Ldr done', p);
  526. hp1:=p;
  527. { fix all ldr/str }
  528. while GetNextInstructionUsingReg(hp1, hp1, taicpu(p).oper[0]^.reg) do
  529. begin
  530. taicpu(hp1).oper[1]^.ref^.base:=taicpu(p).oper[1]^.reg;
  531. if taicpu(p).opcode=A_ADD then
  532. inc(taicpu(hp1).oper[1]^.ref^.offset,taicpu(p).oper[2]^.val)
  533. else
  534. dec(taicpu(hp1).oper[1]^.ref^.offset,taicpu(p).oper[2]^.val);
  535. if hp1=hp2 then
  536. break;
  537. end;
  538. RemoveCurrentP(p);
  539. result:=true;
  540. Exit;
  541. end;
  542. end;
  543. end;
  544. if (taicpu(p).condition = C_None) and
  545. (taicpu(p).oppostfix = PF_None) and
  546. LookForPreindexedPattern(taicpu(p)) then
  547. begin
  548. DebugMsg('Peephole Optimization: Add/Sub to Preindexed done', p);
  549. RemoveCurrentP(p);
  550. Result:=true;
  551. Exit;
  552. end;
  553. end;
  554. function TCpuAsmOptimizer.OptPass1MUL(var p: tai): Boolean;
  555. var
  556. hp1: tai;
  557. oldreg: tregister;
  558. begin
  559. Result := OptPass1DataCheckMov(p);
  560. {
  561. Turn
  562. mul reg0, z,w
  563. sub/add x, y, reg0
  564. dealloc reg0
  565. into
  566. mls/mla x,z,w,y
  567. }
  568. if (taicpu(p).condition = C_None) and
  569. (taicpu(p).oppostfix = PF_None) and
  570. (taicpu(p).ops=3) and
  571. (taicpu(p).oper[0]^.typ = top_reg) and
  572. (taicpu(p).oper[1]^.typ = top_reg) and
  573. (taicpu(p).oper[2]^.typ = top_reg) and
  574. GetNextInstructionUsingReg(p,hp1,taicpu(p).oper[0]^.reg) and
  575. MatchInstruction(hp1,[A_ADD,A_SUB],[C_None],[PF_None]) and
  576. (not RegModifiedBetween(taicpu(p).oper[1]^.reg, p, hp1)) and
  577. (not RegModifiedBetween(taicpu(p).oper[2]^.reg, p, hp1)) and
  578. (((taicpu(hp1).opcode=A_ADD) and (current_settings.cputype>=cpu_armv4)) or
  579. ((taicpu(hp1).opcode=A_SUB) and (current_settings.cputype in [cpu_armv6t2,cpu_armv7,cpu_armv7a,cpu_armv7r,cpu_armv7m,cpu_armv7em]))) and
  580. // CPUs before ARMv6 don't recommend having the same Rd and Rm for MLA.
  581. // TODO: A workaround would be to swap Rm and Rs
  582. (not ((taicpu(hp1).opcode=A_ADD) and (current_settings.cputype<=cpu_armv6) and MatchOperand(taicpu(hp1).oper[0]^, taicpu(p).oper[1]^))) and
  583. (((taicpu(hp1).ops=3) and
  584. (taicpu(hp1).oper[2]^.typ=top_reg) and
  585. ((MatchOperand(taicpu(hp1).oper[2]^, taicpu(p).oper[0]^.reg) and
  586. (not RegModifiedBetween(taicpu(hp1).oper[1]^.reg, p, hp1))) or
  587. ((MatchOperand(taicpu(hp1).oper[1]^, taicpu(p).oper[0]^.reg) and
  588. (taicpu(hp1).opcode=A_ADD) and
  589. (not RegModifiedBetween(taicpu(hp1).oper[2]^.reg, p, hp1)))))) or
  590. ((taicpu(hp1).ops=2) and
  591. (taicpu(hp1).oper[1]^.typ=top_reg) and
  592. MatchOperand(taicpu(hp1).oper[1]^, taicpu(p).oper[0]^.reg))) and
  593. (RegEndOfLife(taicpu(p).oper[0]^.reg,taicpu(hp1))) then
  594. begin
  595. if taicpu(hp1).opcode=A_ADD then
  596. begin
  597. taicpu(hp1).opcode:=A_MLA;
  598. if taicpu(hp1).ops=3 then
  599. begin
  600. if MatchOperand(taicpu(hp1).oper[1]^, taicpu(p).oper[0]^) then
  601. oldreg:=taicpu(hp1).oper[2]^.reg
  602. else
  603. oldreg:=taicpu(hp1).oper[1]^.reg;
  604. end
  605. else
  606. oldreg:=taicpu(hp1).oper[0]^.reg;
  607. taicpu(hp1).loadreg(1,taicpu(p).oper[1]^.reg);
  608. taicpu(hp1).loadreg(2,taicpu(p).oper[2]^.reg);
  609. taicpu(hp1).loadreg(3,oldreg);
  610. DebugMsg('Peephole Optimization: MulAdd2MLA done', p);
  611. end
  612. else
  613. begin
  614. taicpu(hp1).opcode:=A_MLS;
  615. taicpu(hp1).loadreg(3,taicpu(hp1).oper[1]^.reg);
  616. if taicpu(hp1).ops=2 then
  617. taicpu(hp1).loadreg(1,taicpu(hp1).oper[0]^.reg)
  618. else
  619. taicpu(hp1).loadreg(1,taicpu(p).oper[2]^.reg);
  620. taicpu(hp1).loadreg(2,taicpu(p).oper[1]^.reg);
  621. DebugMsg('Peephole Optimization: MulSub2MLS done', p);
  622. AllocRegBetween(taicpu(hp1).oper[1]^.reg,p,hp1,UsedRegs);
  623. AllocRegBetween(taicpu(hp1).oper[2]^.reg,p,hp1,UsedRegs);
  624. AllocRegBetween(taicpu(hp1).oper[3]^.reg,p,hp1,UsedRegs);
  625. end;
  626. taicpu(hp1).ops:=4;
  627. RemoveCurrentP(p);
  628. Result := True;
  629. Exit;
  630. end
  631. end;
  632. function TCpuAsmOptimizer.OptPass1And(var p: tai): Boolean;
  633. begin
  634. Result := OptPass1DataCheckMov(p);
  635. Result := inherited OptPass1And(p) or Result;
  636. end;
  637. function TCpuAsmOptimizer.OptPass1DataCheckMov(var p: tai): Boolean;
  638. var
  639. hp1: tai;
  640. begin
  641. {
  642. change
  643. op reg1, ...
  644. mov reg2, reg1
  645. to
  646. op reg2, ...
  647. }
  648. Result := (taicpu(p).ops >= 3) and
  649. GetNextInstructionUsingReg(p, hp1, taicpu(p).oper[0]^.reg) and
  650. RemoveSuperfluousMove(p, hp1, 'DataMov2Data');
  651. end;
  652. function TCpuAsmOptimizer.OptPass1CMP(var p: tai): Boolean;
  653. var
  654. hp1, hp2, hp_last: tai;
  655. MovRem1, MovRem2: Boolean;
  656. begin
  657. Result := False;
  658. { These optimizations can be applied only to the currently enabled operations because
  659. the other operations do not update all flags and FPC does not track flag usage }
  660. if (taicpu(p).condition = C_None) and
  661. (taicpu(p).oper[1]^.typ = top_const) and
  662. GetNextInstruction(p, hp1) then
  663. begin
  664. {
  665. change
  666. cmp reg,const1
  667. moveq reg,const1
  668. movne reg,const2
  669. to
  670. cmp reg,const1
  671. movne reg,const2
  672. }
  673. if MatchInstruction(hp1, A_MOV, [C_EQ, C_NE], [PF_NONE]) and
  674. (taicpu(hp1).oper[1]^.typ = top_const) and
  675. GetNextInstruction(hp1, hp2) and
  676. MatchInstruction(hp2, A_MOV, [C_EQ, C_NE], [PF_NONE]) and
  677. (taicpu(hp2).oper[1]^.typ = top_const) then
  678. begin
  679. MovRem1 := RemoveRedundantMove(p, hp1, asml);
  680. MovRem2 := RemoveRedundantMove(p, hp2, asml);
  681. Result:= MovRem1 or MovRem2;
  682. { Make sure that hp1 is still the next instruction after p }
  683. if MovRem1 then
  684. if MovRem2 then
  685. begin
  686. if not GetNextInstruction(p, hp1) then
  687. Exit;
  688. end
  689. else
  690. hp1 := hp2;
  691. end;
  692. {
  693. change
  694. <op> reg,x,y
  695. cmp reg,#0
  696. into
  697. <op>s reg,x,y
  698. }
  699. if (taicpu(p).oppostfix = PF_None) and
  700. (taicpu(p).oper[1]^.val = 0) and
  701. { be careful here, following instructions could use other flags
  702. however after a jump fpc never depends on the value of flags }
  703. { All above instructions set Z and N according to the following
  704. Z := result = 0;
  705. N := result[31];
  706. EQ = Z=1; NE = Z=0;
  707. MI = N=1; PL = N=0; }
  708. (MatchInstruction(hp1, A_B, [C_EQ,C_NE,C_MI,C_PL], []) or
  709. { mov is also possible, but only if there is no shifter operand, it could be an rxx,
  710. we are too lazy to check if it is rxx or something else }
  711. (MatchInstruction(hp1, A_MOV, [C_EQ,C_NE,C_MI,C_PL], []) and (taicpu(hp1).ops=2))) and
  712. GetLastInstruction(p, hp_last) and
  713. MatchInstruction(hp_last, [A_ADC,A_ADD,A_BIC,A_SUB,A_MUL,A_MVN,A_MOV,A_ORR,
  714. A_EOR,A_AND,A_RSB,A_RSC,A_SBC,A_MLA], [C_None], [PF_None]) and
  715. (
  716. { mlas is only allowed in arm mode }
  717. (taicpu(hp_last).opcode<>A_MLA) or
  718. (current_settings.instructionset<>is_thumb)
  719. ) and
  720. (taicpu(hp_last).oper[0]^.reg = taicpu(p).oper[0]^.reg) and
  721. assigned(FindRegDealloc(NR_DEFAULTFLAGS,tai(hp1.Next))) then
  722. begin
  723. DebugMsg('Peephole Optimization: OpCmp2OpS done', hp_last);
  724. taicpu(hp_last).oppostfix:=PF_S;
  725. { move flag allocation if possible }
  726. hp1:=FindRegAlloc(NR_DEFAULTFLAGS,tai(hp_last.Next));
  727. if assigned(hp1) then
  728. begin
  729. asml.Remove(hp1);
  730. asml.insertbefore(hp1, hp_last);
  731. end;
  732. RemoveCurrentP(p);
  733. Result:=true;
  734. end;
  735. end;
  736. end;
  737. function TCpuAsmOptimizer.OptPass1LDR(var p: tai): Boolean;
  738. var
  739. hp1: tai;
  740. begin
  741. Result := False;
  742. { change
  743. ldr reg1,ref
  744. ldr reg2,ref
  745. into ...
  746. }
  747. if (taicpu(p).oper[1]^.typ = top_ref) and
  748. (taicpu(p).oper[1]^.ref^.addressmode=AM_OFFSET) and
  749. GetNextInstruction(p,hp1) and
  750. { ldrd is not allowed here }
  751. MatchInstruction(hp1, A_LDR, [taicpu(p).condition, C_None], [taicpu(p).oppostfix,PF_None]-[PF_D]) then
  752. begin
  753. {
  754. ...
  755. ldr reg1,ref
  756. mov reg2,reg1
  757. }
  758. if (taicpu(p).oppostfix=taicpu(hp1).oppostfix) and
  759. RefsEqual(taicpu(p).oper[1]^.ref^,taicpu(hp1).oper[1]^.ref^) and
  760. (taicpu(p).oper[0]^.reg<>taicpu(hp1).oper[1]^.ref^.index) and
  761. (taicpu(p).oper[0]^.reg<>taicpu(hp1).oper[1]^.ref^.base) and
  762. (taicpu(hp1).oper[1]^.ref^.addressmode=AM_OFFSET) then
  763. begin
  764. if taicpu(hp1).oper[0]^.reg=taicpu(p).oper[0]^.reg then
  765. begin
  766. DebugMsg('Peephole Optimization: LdrLdr2Ldr done', hp1);
  767. asml.remove(hp1);
  768. hp1.free;
  769. end
  770. else
  771. begin
  772. DebugMsg('Peephole Optimization: LdrLdr2LdrMov done', hp1);
  773. taicpu(hp1).opcode:=A_MOV;
  774. taicpu(hp1).oppostfix:=PF_None;
  775. taicpu(hp1).loadreg(1,taicpu(p).oper[0]^.reg);
  776. end;
  777. result := true;
  778. end
  779. {
  780. ...
  781. ldrd reg1,reg1+1,ref
  782. }
  783. else if (GenerateARMCode or GenerateThumb2Code) and
  784. (CPUARM_HAS_EDSP in cpu_capabilities[current_settings.cputype]) and
  785. { ldrd does not allow any postfixes ... }
  786. (taicpu(p).oppostfix=PF_None) and
  787. not(odd(getsupreg(taicpu(p).oper[0]^.reg))) and
  788. (getsupreg(taicpu(p).oper[0]^.reg)+1=getsupreg(taicpu(hp1).oper[0]^.reg)) and
  789. { ldr ensures that either base or index contain no register, else ldr wouldn't
  790. use an offset either
  791. }
  792. (taicpu(p).oper[1]^.ref^.base=taicpu(hp1).oper[1]^.ref^.base) and
  793. (taicpu(p).oper[1]^.ref^.index=taicpu(hp1).oper[1]^.ref^.index) and
  794. (taicpu(p).oper[1]^.ref^.offset+4=taicpu(hp1).oper[1]^.ref^.offset) and
  795. (abs(taicpu(p).oper[1]^.ref^.offset)<256) and
  796. AlignedToQWord(taicpu(p).oper[1]^.ref^) then
  797. begin
  798. DebugMsg('Peephole Optimization: LdrLdr2Ldrd done', p);
  799. taicpu(p).loadref(2,taicpu(p).oper[1]^.ref^);
  800. taicpu(p).loadreg(1, taicpu(hp1).oper[0]^.reg);
  801. taicpu(p).ops:=3;
  802. taicpu(p).oppostfix:=PF_D;
  803. asml.remove(hp1);
  804. hp1.free;
  805. result:=true;
  806. end;
  807. end;
  808. {
  809. Change
  810. ldrb dst1, [REF]
  811. and dst2, dst1, #255
  812. into
  813. ldrb dst2, [ref]
  814. }
  815. if not(GenerateThumbCode) and
  816. (taicpu(p).oppostfix=PF_B) and
  817. GetNextInstructionUsingReg(p, hp1, taicpu(p).oper[0]^.reg) and
  818. MatchInstruction(hp1, A_AND, [taicpu(p).condition], [PF_NONE]) and
  819. (taicpu(hp1).oper[1]^.reg = taicpu(p).oper[0]^.reg) and
  820. (taicpu(hp1).oper[2]^.typ = top_const) and
  821. (taicpu(hp1).oper[2]^.val = $FF) and
  822. not(RegUsedBetween(taicpu(hp1).oper[0]^.reg, p, hp1)) and
  823. RegEndOfLife(taicpu(p).oper[0]^.reg, taicpu(hp1)) then
  824. begin
  825. DebugMsg('Peephole Optimization: LdrbAnd2Ldrb done', p);
  826. taicpu(p).oper[0]^.reg := taicpu(hp1).oper[0]^.reg;
  827. asml.remove(hp1);
  828. hp1.free;
  829. result:=true;
  830. end;
  831. Result:=LookForPostindexedPattern(taicpu(p)) or Result;
  832. { Remove superfluous mov after ldr
  833. changes
  834. ldr reg1, ref
  835. mov reg2, reg1
  836. to
  837. ldr reg2, ref
  838. conditions are:
  839. * no ldrd usage
  840. * reg1 must be released after mov
  841. * mov can not contain shifterops
  842. * ldr+mov have the same conditions
  843. * mov does not set flags
  844. }
  845. if (taicpu(p).oppostfix<>PF_D) and
  846. GetNextInstructionUsingReg(p, hp1, taicpu(p).oper[0]^.reg) and
  847. RemoveSuperfluousMove(p, hp1, 'LdrMov2Ldr') then
  848. Result:=true;
  849. end;
  850. function TCpuAsmOptimizer.OptPass1STM(var p: tai): Boolean;
  851. var
  852. hp1, hp2, hp3, hp4: tai;
  853. begin
  854. Result := False;
  855. {
  856. change
  857. stmfd r13!,[r14]
  858. sub r13,r13,#4
  859. bl abc
  860. add r13,r13,#4
  861. ldmfd r13!,[r15]
  862. into
  863. b abc
  864. }
  865. if not(ts_thumb_interworking in current_settings.targetswitches) and
  866. (taicpu(p).condition = C_None) and
  867. (taicpu(p).oppostfix = PF_FD) and
  868. (taicpu(p).oper[0]^.typ = top_ref) and
  869. (taicpu(p).oper[0]^.ref^.index=NR_STACK_POINTER_REG) and
  870. (taicpu(p).oper[0]^.ref^.base=NR_NO) and
  871. (taicpu(p).oper[0]^.ref^.offset=0) and
  872. (taicpu(p).oper[0]^.ref^.addressmode=AM_PREINDEXED) and
  873. (taicpu(p).oper[1]^.typ = top_regset) and
  874. (taicpu(p).oper[1]^.regset^ = [RS_R14]) and
  875. GetNextInstruction(p, hp1) and
  876. MatchInstruction(hp1, A_SUB, [C_None], [PF_NONE]) and
  877. (taicpu(hp1).oper[0]^.typ = top_reg) and
  878. (taicpu(hp1).oper[0]^.reg = NR_STACK_POINTER_REG) and
  879. MatchOperand(taicpu(hp1).oper[0]^,taicpu(hp1).oper[1]^) and
  880. (taicpu(hp1).oper[2]^.typ = top_const) and
  881. GetNextInstruction(hp1, hp2) and
  882. SkipEntryExitMarker(hp2, hp2) and
  883. MatchInstruction(hp2, [A_BL,A_BLX], [C_None], [PF_NONE]) and
  884. (taicpu(hp2).oper[0]^.typ = top_ref) and
  885. GetNextInstruction(hp2, hp3) and
  886. SkipEntryExitMarker(hp3, hp3) and
  887. MatchInstruction(hp3, A_ADD, [C_None], [PF_NONE]) and
  888. MatchOperand(taicpu(hp1).oper[0]^,taicpu(hp3).oper[0]^) and
  889. MatchOperand(taicpu(hp1).oper[0]^,taicpu(hp3).oper[1]^) and
  890. MatchOperand(taicpu(hp1).oper[2]^,taicpu(hp3).oper[2]^) and
  891. GetNextInstruction(hp3, hp4) and
  892. MatchInstruction(hp4, A_LDM, [C_None], [PF_FD]) and
  893. MatchOperand(taicpu(p).oper[0]^,taicpu(hp4).oper[0]^) and
  894. (taicpu(hp4).oper[1]^.typ = top_regset) and
  895. (taicpu(hp4).oper[1]^.regset^ = [RS_R15]) then
  896. begin
  897. asml.Remove(hp1);
  898. asml.Remove(hp3);
  899. asml.Remove(hp4);
  900. taicpu(hp2).opcode:=A_B;
  901. hp1.free;
  902. hp3.free;
  903. hp4.free;
  904. RemoveCurrentp(p, hp2);
  905. DebugMsg('Peephole Optimization: Bl2B done', p);
  906. Result := True;
  907. end;
  908. end;
  909. function TCpuAsmOptimizer.OptPass1STR(var p: tai): Boolean;
  910. var
  911. hp1: tai;
  912. begin
  913. Result := False;
  914. { Common conditions }
  915. if (taicpu(p).oper[1]^.typ = top_ref) and
  916. (taicpu(p).oper[1]^.ref^.addressmode=AM_OFFSET) and
  917. (taicpu(p).oppostfix=PF_None) then
  918. begin
  919. { change
  920. str reg1,ref
  921. ldr reg2,ref
  922. into
  923. str reg1,ref
  924. mov reg2,reg1
  925. }
  926. if (taicpu(p).condition=C_None) and
  927. GetNextInstructionUsingRef(p,hp1,taicpu(p).oper[1]^.ref^) and
  928. MatchInstruction(hp1, A_LDR, [taicpu(p).condition], [PF_None]) and
  929. (taicpu(hp1).oper[1]^.typ=top_ref) and
  930. (taicpu(hp1).oper[1]^.ref^.addressmode=AM_OFFSET) and
  931. not(RegModifiedBetween(taicpu(p).oper[0]^.reg, p, hp1)) and
  932. ((taicpu(hp1).oper[1]^.ref^.index=NR_NO) or not (RegModifiedBetween(taicpu(hp1).oper[1]^.ref^.index, p, hp1))) and
  933. ((taicpu(hp1).oper[1]^.ref^.base=NR_NO) or not (RegModifiedBetween(taicpu(hp1).oper[1]^.ref^.base, p, hp1))) then
  934. begin
  935. if taicpu(hp1).oper[0]^.reg=taicpu(p).oper[0]^.reg then
  936. begin
  937. DebugMsg('Peephole Optimization: StrLdr2StrMov 1 done', hp1);
  938. asml.remove(hp1);
  939. hp1.free;
  940. end
  941. else
  942. begin
  943. taicpu(hp1).opcode:=A_MOV;
  944. taicpu(hp1).oppostfix:=PF_None;
  945. taicpu(hp1).loadreg(1,taicpu(p).oper[0]^.reg);
  946. DebugMsg('Peephole Optimization: StrLdr2StrMov 2 done', hp1);
  947. end;
  948. result := True;
  949. end
  950. { change
  951. str reg1,ref
  952. str reg2,ref
  953. into
  954. strd reg1,reg2,ref
  955. }
  956. else if (GenerateARMCode or GenerateThumb2Code) and
  957. (CPUARM_HAS_EDSP in cpu_capabilities[current_settings.cputype]) and
  958. not(odd(getsupreg(taicpu(p).oper[0]^.reg))) and
  959. (abs(taicpu(p).oper[1]^.ref^.offset)<256) and
  960. AlignedToQWord(taicpu(p).oper[1]^.ref^) and
  961. GetNextInstruction(p,hp1) and
  962. MatchInstruction(hp1, A_STR, [taicpu(p).condition, C_None], [PF_None]) and
  963. (getsupreg(taicpu(p).oper[0]^.reg)+1=getsupreg(taicpu(hp1).oper[0]^.reg)) and
  964. { str ensures that either base or index contain no register, else ldr wouldn't
  965. use an offset either
  966. }
  967. (taicpu(p).oper[1]^.ref^.base=taicpu(hp1).oper[1]^.ref^.base) and
  968. (taicpu(p).oper[1]^.ref^.index=taicpu(hp1).oper[1]^.ref^.index) and
  969. (taicpu(p).oper[1]^.ref^.offset+4=taicpu(hp1).oper[1]^.ref^.offset) then
  970. begin
  971. DebugMsg('Peephole Optimization: StrStr2Strd done', p);
  972. taicpu(p).oppostfix:=PF_D;
  973. taicpu(p).loadref(2,taicpu(p).oper[1]^.ref^);
  974. taicpu(p).loadreg(1, taicpu(hp1).oper[0]^.reg);
  975. taicpu(p).ops:=3;
  976. asml.remove(hp1);
  977. hp1.free;
  978. result:=true;
  979. end;
  980. end;
  981. Result:=LookForPostindexedPattern(taicpu(p)) or Result;
  982. end;
  983. function TCpuAsmOptimizer.OptPass1MOV(var p: tai): Boolean;
  984. var
  985. hp1, hpfar1, hp2: tai;
  986. i, i2: longint;
  987. tempop: tasmop;
  988. dealloc: tai_regalloc;
  989. begin
  990. Result := False;
  991. hp1 := nil;
  992. { fold
  993. mov reg1,reg0, shift imm1
  994. mov reg1,reg1, shift imm2
  995. }
  996. if (taicpu(p).ops=3) and
  997. (taicpu(p).oper[2]^.typ = top_shifterop) and
  998. (taicpu(p).oper[2]^.shifterop^.rs = NR_NO) and
  999. getnextinstruction(p,hp1) and
  1000. MatchInstruction(hp1, A_MOV, [taicpu(p).condition], [PF_None]) and
  1001. (taicpu(hp1).ops=3) and
  1002. MatchOperand(taicpu(hp1).oper[0]^, taicpu(p).oper[0]^.reg) and
  1003. MatchOperand(taicpu(hp1).oper[1]^, taicpu(p).oper[0]^.reg) and
  1004. (taicpu(hp1).oper[2]^.typ = top_shifterop) and
  1005. (taicpu(hp1).oper[2]^.shifterop^.rs = NR_NO) then
  1006. begin
  1007. { fold
  1008. mov reg1,reg0, lsl 16
  1009. mov reg1,reg1, lsr 16
  1010. strh reg1, ...
  1011. dealloc reg1
  1012. to
  1013. strh reg1, ...
  1014. dealloc reg1
  1015. }
  1016. if (taicpu(p).oper[2]^.shifterop^.shiftmode=SM_LSL) and
  1017. (taicpu(p).oper[2]^.shifterop^.shiftimm=16) and
  1018. (taicpu(hp1).oper[2]^.shifterop^.shiftmode in [SM_LSR,SM_ASR]) and
  1019. (taicpu(hp1).oper[2]^.shifterop^.shiftimm=16) and
  1020. getnextinstruction(hp1,hp2) and
  1021. MatchInstruction(hp2, A_STR, [taicpu(p).condition], [PF_H]) and
  1022. MatchOperand(taicpu(hp2).oper[0]^, taicpu(p).oper[0]^.reg) then
  1023. begin
  1024. TransferUsedRegs(TmpUsedRegs);
  1025. UpdateUsedRegs(TmpUsedRegs, tai(p.next));
  1026. UpdateUsedRegs(TmpUsedRegs, tai(hp1.next));
  1027. if not(RegUsedAfterInstruction(taicpu(p).oper[0]^.reg,hp2,TmpUsedRegs)) then
  1028. begin
  1029. DebugMsg('Peephole Optimization: removed superfluous 16 Bit zero extension', hp1);
  1030. taicpu(hp2).loadreg(0,taicpu(p).oper[1]^.reg);
  1031. asml.remove(hp1);
  1032. hp1.free;
  1033. RemoveCurrentP(p, hp2);
  1034. Result:=true;
  1035. Exit;
  1036. end;
  1037. end
  1038. { fold
  1039. mov reg1,reg0, shift imm1
  1040. mov reg1,reg1, shift imm2
  1041. to
  1042. mov reg1,reg0, shift imm1+imm2
  1043. }
  1044. else if (taicpu(p).oper[2]^.shifterop^.shiftmode=taicpu(hp1).oper[2]^.shifterop^.shiftmode) or
  1045. { asr makes no use after a lsr, the asr can be foled into the lsr }
  1046. ((taicpu(p).oper[2]^.shifterop^.shiftmode=SM_LSR) and (taicpu(hp1).oper[2]^.shifterop^.shiftmode=SM_ASR) ) then
  1047. begin
  1048. inc(taicpu(p).oper[2]^.shifterop^.shiftimm,taicpu(hp1).oper[2]^.shifterop^.shiftimm);
  1049. { avoid overflows }
  1050. if taicpu(p).oper[2]^.shifterop^.shiftimm>31 then
  1051. case taicpu(p).oper[2]^.shifterop^.shiftmode of
  1052. SM_ROR:
  1053. taicpu(p).oper[2]^.shifterop^.shiftimm:=taicpu(p).oper[2]^.shifterop^.shiftimm and 31;
  1054. SM_ASR:
  1055. taicpu(p).oper[2]^.shifterop^.shiftimm:=31;
  1056. SM_LSR,
  1057. SM_LSL:
  1058. begin
  1059. hp2:=taicpu.op_reg_const(A_MOV,taicpu(p).oper[0]^.reg,0);
  1060. InsertLLItem(p.previous, p.next, hp2);
  1061. p.free;
  1062. p:=hp2;
  1063. end;
  1064. else
  1065. internalerror(2008072803);
  1066. end;
  1067. DebugMsg('Peephole Optimization: ShiftShift2Shift 1 done', p);
  1068. asml.remove(hp1);
  1069. hp1.free;
  1070. hp1 := nil;
  1071. result := true;
  1072. end
  1073. { fold
  1074. mov reg1,reg0, shift imm1
  1075. mov reg1,reg1, shift imm2
  1076. mov reg1,reg1, shift imm3 ...
  1077. mov reg2,reg1, shift imm3 ...
  1078. }
  1079. else if GetNextInstructionUsingReg(hp1,hp2, taicpu(hp1).oper[0]^.reg) and
  1080. MatchInstruction(hp2, A_MOV, [taicpu(p).condition], [PF_None]) and
  1081. (taicpu(hp2).ops=3) and
  1082. MatchOperand(taicpu(hp2).oper[1]^, taicpu(hp1).oper[0]^.reg) and
  1083. RegEndofLife(taicpu(p).oper[0]^.reg,taicpu(hp2)) and
  1084. (taicpu(hp2).oper[2]^.typ = top_shifterop) and
  1085. (taicpu(hp2).oper[2]^.shifterop^.rs = NR_NO) then
  1086. begin
  1087. { mov reg1,reg0, lsl imm1
  1088. mov reg1,reg1, lsr/asr imm2
  1089. mov reg2,reg1, lsl imm3 ...
  1090. to
  1091. mov reg1,reg0, lsl imm1
  1092. mov reg2,reg1, lsr/asr imm2-imm3
  1093. if
  1094. imm1>=imm2
  1095. }
  1096. if (taicpu(p).oper[2]^.shifterop^.shiftmode=SM_LSL) and (taicpu(hp2).oper[2]^.shifterop^.shiftmode=SM_LSL) and
  1097. (taicpu(hp1).oper[2]^.shifterop^.shiftmode in [SM_ASR,SM_LSR]) and
  1098. (taicpu(p).oper[2]^.shifterop^.shiftimm>=taicpu(hp1).oper[2]^.shifterop^.shiftimm) then
  1099. begin
  1100. if (taicpu(hp2).oper[2]^.shifterop^.shiftimm>=taicpu(hp1).oper[2]^.shifterop^.shiftimm) then
  1101. begin
  1102. if not(RegUsedBetween(taicpu(hp2).oper[0]^.reg,p,hp1)) and
  1103. not(RegUsedBetween(taicpu(hp2).oper[0]^.reg,hp1,hp2)) then
  1104. begin
  1105. DebugMsg('Peephole Optimization: ShiftShiftShift2ShiftShift 1a done', p);
  1106. inc(taicpu(p).oper[2]^.shifterop^.shiftimm,taicpu(hp2).oper[2]^.shifterop^.shiftimm-taicpu(hp1).oper[2]^.shifterop^.shiftimm);
  1107. taicpu(p).oper[0]^.reg:=taicpu(hp2).oper[0]^.reg;
  1108. asml.remove(hp1);
  1109. asml.remove(hp2);
  1110. hp1.free;
  1111. hp2.free;
  1112. if taicpu(p).oper[2]^.shifterop^.shiftimm>=32 then
  1113. begin
  1114. taicpu(p).freeop(1);
  1115. taicpu(p).freeop(2);
  1116. taicpu(p).loadconst(1,0);
  1117. end;
  1118. result := true;
  1119. Exit;
  1120. end;
  1121. end
  1122. else if not(RegUsedBetween(taicpu(hp2).oper[0]^.reg,hp1,hp2)) then
  1123. begin
  1124. DebugMsg('Peephole Optimization: ShiftShiftShift2ShiftShift 1b done', p);
  1125. dec(taicpu(hp1).oper[2]^.shifterop^.shiftimm,taicpu(hp2).oper[2]^.shifterop^.shiftimm);
  1126. taicpu(hp1).oper[0]^.reg:=taicpu(hp2).oper[0]^.reg;
  1127. asml.remove(hp2);
  1128. hp2.free;
  1129. result := true;
  1130. Exit;
  1131. end;
  1132. end
  1133. { mov reg1,reg0, lsr/asr imm1
  1134. mov reg1,reg1, lsl imm2
  1135. mov reg1,reg1, lsr/asr imm3 ...
  1136. if imm3>=imm1 and imm2>=imm1
  1137. to
  1138. mov reg1,reg0, lsl imm2-imm1
  1139. mov reg1,reg1, lsr/asr imm3 ...
  1140. }
  1141. else if (taicpu(p).oper[2]^.shifterop^.shiftmode in [SM_ASR,SM_LSR]) and (taicpu(hp2).oper[2]^.shifterop^.shiftmode in [SM_ASR,SM_LSR]) and
  1142. (taicpu(hp1).oper[2]^.shifterop^.shiftmode=SM_LSL) and
  1143. (taicpu(hp2).oper[2]^.shifterop^.shiftimm>=taicpu(p).oper[2]^.shifterop^.shiftimm) and
  1144. (taicpu(hp1).oper[2]^.shifterop^.shiftimm>=taicpu(p).oper[2]^.shifterop^.shiftimm) then
  1145. begin
  1146. dec(taicpu(hp1).oper[2]^.shifterop^.shiftimm,taicpu(p).oper[2]^.shifterop^.shiftimm);
  1147. taicpu(hp1).oper[1]^.reg:=taicpu(p).oper[1]^.reg;
  1148. DebugMsg('Peephole Optimization: ShiftShiftShift2ShiftShift 2 done', p);
  1149. if taicpu(hp1).oper[2]^.shifterop^.shiftimm=0 then
  1150. begin
  1151. taicpu(hp2).oper[1]^.reg:=taicpu(hp1).oper[1]^.reg;
  1152. asml.remove(hp1);
  1153. hp1.free;
  1154. end;
  1155. RemoveCurrentp(p);
  1156. result := true;
  1157. Exit;
  1158. end;
  1159. end;
  1160. end;
  1161. { All the optimisations from this point on require GetNextInstructionUsingReg
  1162. to return True }
  1163. while (
  1164. GetNextInstructionUsingReg(p, hpfar1, taicpu(p).oper[0]^.reg) and
  1165. (hpfar1.typ = ait_instruction)
  1166. ) do
  1167. begin
  1168. { Change the common
  1169. mov r0, r0, lsr #xxx
  1170. and r0, r0, #yyy/bic r0, r0, #xxx
  1171. and remove the superfluous and/bic if possible
  1172. This could be extended to handle more cases.
  1173. }
  1174. { Change
  1175. mov rx, ry, lsr/ror #xxx
  1176. uxtb/uxth rz,rx/and rz,rx,0xFF
  1177. dealloc rx
  1178. to
  1179. uxtb/uxth rz,ry,ror #xxx
  1180. }
  1181. if (GenerateThumb2Code) and
  1182. (taicpu(p).ops=3) and
  1183. (taicpu(p).oper[2]^.typ = top_shifterop) and
  1184. (taicpu(p).oper[2]^.shifterop^.rs = NR_NO) and
  1185. (taicpu(p).oper[2]^.shifterop^.shiftmode in [SM_LSR,SM_ROR]) and
  1186. RegEndOfLife(taicpu(p).oper[0]^.reg, taicpu(hpfar1)) then
  1187. begin
  1188. if MatchInstruction(hpfar1, A_UXTB, [C_None], [PF_None]) and
  1189. (taicpu(hpfar1).ops = 2) and
  1190. (taicpu(p).oper[2]^.shifterop^.shiftimm in [8,16,24]) and
  1191. MatchOperand(taicpu(hpfar1).oper[1]^, taicpu(p).oper[0]^.reg) then
  1192. begin
  1193. taicpu(hpfar1).oper[1]^.reg := taicpu(p).oper[1]^.reg;
  1194. taicpu(hpfar1).loadshifterop(2,taicpu(p).oper[2]^.shifterop^);
  1195. taicpu(hpfar1).oper[2]^.shifterop^.shiftmode:=SM_ROR;
  1196. taicpu(hpfar1).ops := 3;
  1197. if not Assigned(hp1) then
  1198. GetNextInstruction(p,hp1);
  1199. RemoveCurrentP(p, hp1);
  1200. result:=true;
  1201. exit;
  1202. end
  1203. else if MatchInstruction(hpfar1, A_UXTH, [C_None], [PF_None]) and
  1204. (taicpu(hpfar1).ops=2) and
  1205. (taicpu(p).oper[2]^.shifterop^.shiftimm in [16]) and
  1206. MatchOperand(taicpu(hpfar1).oper[1]^, taicpu(p).oper[0]^.reg) then
  1207. begin
  1208. taicpu(hpfar1).oper[1]^.reg := taicpu(p).oper[1]^.reg;
  1209. taicpu(hpfar1).loadshifterop(2,taicpu(p).oper[2]^.shifterop^);
  1210. taicpu(hpfar1).oper[2]^.shifterop^.shiftmode:=SM_ROR;
  1211. taicpu(hpfar1).ops := 3;
  1212. if not Assigned(hp1) then
  1213. GetNextInstruction(p,hp1);
  1214. RemoveCurrentP(p, hp1);
  1215. result:=true;
  1216. exit;
  1217. end
  1218. else if MatchInstruction(hpfar1, A_AND, [C_None], [PF_None]) and
  1219. (taicpu(hpfar1).ops = 3) and
  1220. (taicpu(hpfar1).oper[2]^.typ = top_const) and
  1221. (taicpu(hpfar1).oper[2]^.val = $FF) and
  1222. (taicpu(p).oper[2]^.shifterop^.shiftimm in [8,16,24]) and
  1223. MatchOperand(taicpu(hpfar1).oper[1]^, taicpu(p).oper[0]^.reg) then
  1224. begin
  1225. taicpu(hpfar1).ops := 3;
  1226. taicpu(hpfar1).opcode := A_UXTB;
  1227. taicpu(hpfar1).oper[1]^.reg := taicpu(p).oper[1]^.reg;
  1228. taicpu(hpfar1).loadshifterop(2,taicpu(p).oper[2]^.shifterop^);
  1229. taicpu(hpfar1).oper[2]^.shifterop^.shiftmode:=SM_ROR;
  1230. if not Assigned(hp1) then
  1231. GetNextInstruction(p,hp1);
  1232. RemoveCurrentP(p, hp1);
  1233. result:=true;
  1234. exit;
  1235. end;
  1236. end;
  1237. { 2-operald mov optimisations }
  1238. if (taicpu(p).ops = 2) then
  1239. begin
  1240. {
  1241. This removes the mul from
  1242. mov rX,0
  1243. ...
  1244. mul ...,rX,...
  1245. }
  1246. if (taicpu(p).oper[1]^.typ = top_const) then
  1247. begin
  1248. (* if false and
  1249. (taicpu(p).oper[1]^.val=0) and
  1250. MatchInstruction(hpfar1, [A_MUL,A_MLA], [taicpu(p).condition], [taicpu(p).oppostfix]) and
  1251. (((taicpu(hpfar1).oper[1]^.typ=top_reg) and MatchOperand(taicpu(p).oper[0]^, taicpu(hpfar1).oper[1]^)) or
  1252. ((taicpu(hpfar1).oper[2]^.typ=top_reg) and MatchOperand(taicpu(p).oper[0]^, taicpu(hpfar1).oper[2]^))) then
  1253. begin
  1254. TransferUsedRegs(TmpUsedRegs);
  1255. UpdateUsedRegs(TmpUsedRegs, tai(p.next));
  1256. UpdateUsedRegs(TmpUsedRegs, tai(hpfar1.next));
  1257. DebugMsg('Peephole Optimization: MovMUL/MLA2Mov0 done', p);
  1258. if taicpu(hpfar1).opcode=A_MUL then
  1259. taicpu(hpfar1).loadconst(1,0)
  1260. else
  1261. taicpu(hpfar1).loadreg(1,taicpu(hpfar1).oper[3]^.reg);
  1262. taicpu(hpfar1).ops:=2;
  1263. taicpu(hpfar1).opcode:=A_MOV;
  1264. if not(RegUsedAfterInstruction(taicpu(p).oper[0]^.reg,hpfar1,TmpUsedRegs)) then
  1265. RemoveCurrentP(p);
  1266. Result:=true;
  1267. exit;
  1268. end
  1269. else*) if (taicpu(p).oper[1]^.val=0) and
  1270. MatchInstruction(hpfar1, A_MLA, [taicpu(p).condition], [taicpu(p).oppostfix]) and
  1271. MatchOperand(taicpu(p).oper[0]^, taicpu(hpfar1).oper[3]^) then
  1272. begin
  1273. TransferUsedRegs(TmpUsedRegs);
  1274. UpdateUsedRegs(TmpUsedRegs, tai(p.next));
  1275. UpdateUsedRegs(TmpUsedRegs, tai(hpfar1.next));
  1276. DebugMsg('Peephole Optimization: MovMLA2MUL 1 done', p);
  1277. taicpu(hpfar1).ops:=3;
  1278. taicpu(hpfar1).opcode:=A_MUL;
  1279. if not(RegUsedAfterInstruction(taicpu(p).oper[0]^.reg,hpfar1,TmpUsedRegs)) then
  1280. begin
  1281. RemoveCurrentP(p);
  1282. Result:=true;
  1283. end;
  1284. exit;
  1285. end
  1286. {
  1287. This changes the very common
  1288. mov r0, #0
  1289. str r0, [...]
  1290. mov r0, #0
  1291. str r0, [...]
  1292. and removes all superfluous mov instructions
  1293. }
  1294. else if (taicpu(hpfar1).opcode=A_STR) then
  1295. begin
  1296. hp1 := hpfar1;
  1297. while MatchInstruction(hp1, A_STR, [taicpu(p).condition], []) and
  1298. MatchOperand(taicpu(p).oper[0]^, taicpu(hpfar1).oper[0]^) and
  1299. GetNextInstruction(hp1, hp2) and
  1300. MatchInstruction(hp2, A_MOV, [taicpu(p).condition], [PF_None]) and
  1301. (taicpu(hp2).ops = 2) and
  1302. MatchOperand(taicpu(hp2).oper[0]^, taicpu(p).oper[0]^) and
  1303. MatchOperand(taicpu(hp2).oper[1]^, taicpu(p).oper[1]^) do
  1304. begin
  1305. DebugMsg('Peephole Optimization: MovStrMov done', hp2);
  1306. GetNextInstruction(hp2,hp1);
  1307. asml.remove(hp2);
  1308. hp2.free;
  1309. result:=true;
  1310. if not assigned(hp1) then break;
  1311. end;
  1312. if Result then
  1313. Exit;
  1314. end;
  1315. end;
  1316. {
  1317. This removes the first mov from
  1318. mov rX,...
  1319. mov rX,...
  1320. }
  1321. if taicpu(hpfar1).opcode=A_MOV then
  1322. begin
  1323. hp1 := p;
  1324. while MatchInstruction(hpfar1, A_MOV, [taicpu(hp1).condition], [taicpu(hp1).oppostfix]) and
  1325. (taicpu(hpfar1).ops = 2) and
  1326. MatchOperand(taicpu(hp1).oper[0]^, taicpu(hpfar1).oper[0]^) and
  1327. { don't remove the first mov if the second is a mov rX,rX }
  1328. not(MatchOperand(taicpu(hpfar1).oper[0]^, taicpu(hpfar1).oper[1]^)) do
  1329. begin
  1330. { Defer removing the first p until after the while loop }
  1331. if p <> hp1 then
  1332. begin
  1333. DebugMsg('Peephole Optimization: MovMov done', hp1);
  1334. asml.remove(hp1);
  1335. hp1.free;
  1336. end;
  1337. hp1:=hpfar1;
  1338. GetNextInstruction(hpfar1,hpfar1);
  1339. result:=true;
  1340. if not assigned(hpfar1) then
  1341. Break;
  1342. end;
  1343. if Result then
  1344. begin
  1345. DebugMsg('Peephole Optimization: MovMov done', p);
  1346. RemoveCurrentp(p);
  1347. Exit;
  1348. end;
  1349. end;
  1350. if RedundantMovProcess(p,hpfar1) then
  1351. begin
  1352. Result:=true;
  1353. { p might not point at a mov anymore }
  1354. exit;
  1355. end;
  1356. { If hpfar1 is nil after the call to RedundantMovProcess, it is
  1357. because it would have become a dangling pointer, so reinitialise it. }
  1358. if not Assigned(hpfar1) then
  1359. Continue;
  1360. { Fold the very common sequence
  1361. mov regA, regB
  1362. ldr* regA, [regA]
  1363. to
  1364. ldr* regA, [regB]
  1365. CAUTION! If this one is successful p might not be a mov instruction anymore!
  1366. }
  1367. if
  1368. // Make sure that Thumb code doesn't propagate a high register into a reference
  1369. (
  1370. (
  1371. GenerateThumbCode and
  1372. (getsupreg(taicpu(p).oper[1]^.reg) < RS_R8)
  1373. ) or (not GenerateThumbCode)
  1374. ) and
  1375. (taicpu(p).oper[1]^.typ = top_reg) and
  1376. (taicpu(p).oppostfix = PF_NONE) and
  1377. MatchInstruction(hpfar1, [A_LDR, A_STR], [taicpu(p).condition], []) and
  1378. (taicpu(hpfar1).oper[1]^.typ = top_ref) and
  1379. { We can change the base register only when the instruction uses AM_OFFSET }
  1380. ((taicpu(hpfar1).oper[1]^.ref^.index = taicpu(p).oper[0]^.reg) or
  1381. ((taicpu(hpfar1).oper[1]^.ref^.addressmode = AM_OFFSET) and
  1382. (taicpu(hpfar1).oper[1]^.ref^.base = taicpu(p).oper[0]^.reg))
  1383. ) and
  1384. not(RegModifiedBetween(taicpu(p).oper[1]^.reg,p,hpfar1)) and
  1385. RegEndOfLife(taicpu(p).oper[0]^.reg, taicpu(hpfar1)) then
  1386. begin
  1387. DebugMsg('Peephole Optimization: MovLdr2Ldr done', hpfar1);
  1388. if (taicpu(hpfar1).oper[1]^.ref^.addressmode = AM_OFFSET) and
  1389. (taicpu(hpfar1).oper[1]^.ref^.base = taicpu(p).oper[0]^.reg) then
  1390. taicpu(hpfar1).oper[1]^.ref^.base := taicpu(p).oper[1]^.reg;
  1391. if taicpu(hpfar1).oper[1]^.ref^.index = taicpu(p).oper[0]^.reg then
  1392. taicpu(hpfar1).oper[1]^.ref^.index := taicpu(p).oper[1]^.reg;
  1393. dealloc:=FindRegDeAlloc(taicpu(p).oper[1]^.reg, tai(p.Next));
  1394. if Assigned(dealloc) then
  1395. begin
  1396. asml.remove(dealloc);
  1397. asml.InsertAfter(dealloc,hpfar1);
  1398. end;
  1399. if (not Assigned(hp1)) or (p=hp1) then
  1400. GetNextInstruction(p, hp1);
  1401. RemoveCurrentP(p, hp1);
  1402. result:=true;
  1403. Exit;
  1404. end
  1405. end
  1406. { 3-operald mov optimisations }
  1407. else if (taicpu(p).ops = 3) then
  1408. begin
  1409. if (taicpu(p).oper[2]^.typ = top_shifterop) and
  1410. (taicpu(p).oper[2]^.shifterop^.rs = NR_NO) and
  1411. (taicpu(p).oper[2]^.shifterop^.shiftmode = SM_LSR) and
  1412. (taicpu(hpfar1).ops>=1) and
  1413. (taicpu(hpfar1).oper[0]^.typ=top_reg) and
  1414. (not RegModifiedBetween(taicpu(hpfar1).oper[0]^.reg, p, hpfar1)) and
  1415. RegEndOfLife(taicpu(p).oper[0]^.reg, taicpu(hpfar1)) then
  1416. begin
  1417. if (taicpu(p).oper[2]^.shifterop^.shiftimm >= 24 ) and
  1418. MatchInstruction(hpfar1, A_AND, [taicpu(p).condition], [taicpu(p).oppostfix]) and
  1419. (taicpu(hpfar1).ops=3) and
  1420. MatchOperand(taicpu(p).oper[0]^, taicpu(hpfar1).oper[1]^) and
  1421. (taicpu(hpfar1).oper[2]^.typ = top_const) and
  1422. { Check if the AND actually would only mask out bits being already zero because of the shift
  1423. }
  1424. ((($ffffffff shr taicpu(p).oper[2]^.shifterop^.shiftimm) and taicpu(hpfar1).oper[2]^.val) =
  1425. ($ffffffff shr taicpu(p).oper[2]^.shifterop^.shiftimm)) then
  1426. begin
  1427. DebugMsg('Peephole Optimization: LsrAnd2Lsr done', hpfar1);
  1428. taicpu(p).oper[0]^.reg:=taicpu(hpfar1).oper[0]^.reg;
  1429. asml.remove(hpfar1);
  1430. hpfar1.free;
  1431. result:=true;
  1432. Exit;
  1433. end
  1434. else if MatchInstruction(hpfar1, A_BIC, [taicpu(p).condition], [taicpu(p).oppostfix]) and
  1435. (taicpu(hpfar1).ops=3) and
  1436. MatchOperand(taicpu(p).oper[0]^, taicpu(hpfar1).oper[1]^) and
  1437. (taicpu(hpfar1).oper[2]^.typ = top_const) and
  1438. { Check if the BIC actually would only mask out bits beeing already zero because of the shift }
  1439. (taicpu(hpfar1).oper[2]^.val<>0) and
  1440. (BsfDWord(taicpu(hpfar1).oper[2]^.val)>=32-taicpu(p).oper[2]^.shifterop^.shiftimm) then
  1441. begin
  1442. DebugMsg('Peephole Optimization: LsrBic2Lsr done', hpfar1);
  1443. taicpu(p).oper[0]^.reg:=taicpu(hpfar1).oper[0]^.reg;
  1444. asml.remove(hpfar1);
  1445. hpfar1.free;
  1446. result:=true;
  1447. Exit;
  1448. end;
  1449. end;
  1450. { This folds shifterops into following instructions
  1451. mov r0, r1, lsl #8
  1452. add r2, r3, r0
  1453. to
  1454. add r2, r3, r1, lsl #8
  1455. CAUTION! If this one is successful p might not be a mov instruction anymore!
  1456. }
  1457. if (taicpu(p).oper[1]^.typ = top_reg) and
  1458. (taicpu(p).oper[2]^.typ = top_shifterop) and
  1459. (taicpu(p).oppostfix = PF_NONE) and
  1460. MatchInstruction(hpfar1, [A_ADD, A_ADC, A_RSB, A_RSC, A_SUB, A_SBC,
  1461. A_AND, A_BIC, A_EOR, A_ORR, A_TEQ, A_TST,
  1462. A_CMP, A_CMN],
  1463. [taicpu(p).condition], [PF_None]) and
  1464. (not ((GenerateThumb2Code) and
  1465. (taicpu(hpfar1).opcode in [A_SBC]) and
  1466. (((taicpu(hpfar1).ops=3) and
  1467. MatchOperand(taicpu(p).oper[0]^, taicpu(hpfar1).oper[1]^.reg)) or
  1468. ((taicpu(hpfar1).ops=2) and
  1469. MatchOperand(taicpu(p).oper[0]^, taicpu(hpfar1).oper[0]^.reg))))) and
  1470. RegEndOfLife(taicpu(p).oper[0]^.reg, taicpu(hpfar1)) and
  1471. (taicpu(hpfar1).ops >= 2) and
  1472. {Currently we can't fold into another shifterop}
  1473. (taicpu(hpfar1).oper[taicpu(hpfar1).ops-1]^.typ = top_reg) and
  1474. {Folding rrx is problematic because of the C-Flag, as we currently can't check
  1475. NR_DEFAULTFLAGS for modification}
  1476. (
  1477. {Everything is fine if we don't use RRX}
  1478. (taicpu(p).oper[2]^.shifterop^.shiftmode <> SM_RRX) or
  1479. (
  1480. {If it is RRX, then check if we're just accessing the next instruction}
  1481. Assigned(hp1) and
  1482. (hpfar1 = hp1)
  1483. )
  1484. ) and
  1485. { reg1 might not be modified inbetween }
  1486. not(RegModifiedBetween(taicpu(p).oper[1]^.reg,p,hpfar1)) and
  1487. { The shifterop can contain a register, might not be modified}
  1488. (
  1489. (taicpu(p).oper[2]^.shifterop^.rs = NR_NO) or
  1490. not(RegModifiedBetween(taicpu(p).oper[2]^.shifterop^.rs, p, hpfar1))
  1491. ) and
  1492. (
  1493. {Only ONE of the two src operands is allowed to match}
  1494. MatchOperand(taicpu(p).oper[0]^, taicpu(hpfar1).oper[taicpu(hpfar1).ops-2]^) xor
  1495. MatchOperand(taicpu(p).oper[0]^, taicpu(hpfar1).oper[taicpu(hpfar1).ops-1]^)
  1496. ) then
  1497. begin
  1498. if taicpu(hpfar1).opcode in [A_TST, A_TEQ, A_CMN] then
  1499. I2:=0
  1500. else
  1501. I2:=1;
  1502. for I:=I2 to taicpu(hpfar1).ops-1 do
  1503. if MatchOperand(taicpu(p).oper[0]^, taicpu(hpfar1).oper[I]^.reg) then
  1504. begin
  1505. { If the parameter matched on the second op from the RIGHT
  1506. we have to switch the parameters, this will not happen for CMP
  1507. were we're only evaluating the most right parameter
  1508. }
  1509. if I <> taicpu(hpfar1).ops-1 then
  1510. begin
  1511. {The SUB operators need to be changed when we swap parameters}
  1512. case taicpu(hpfar1).opcode of
  1513. A_SUB: tempop:=A_RSB;
  1514. A_SBC: tempop:=A_RSC;
  1515. A_RSB: tempop:=A_SUB;
  1516. A_RSC: tempop:=A_SBC;
  1517. else tempop:=taicpu(hpfar1).opcode;
  1518. end;
  1519. if taicpu(hpfar1).ops = 3 then
  1520. hp2:=taicpu.op_reg_reg_reg_shifterop(tempop,
  1521. taicpu(hpfar1).oper[0]^.reg, taicpu(hpfar1).oper[2]^.reg,
  1522. taicpu(p).oper[1]^.reg, taicpu(p).oper[2]^.shifterop^)
  1523. else
  1524. hp2:=taicpu.op_reg_reg_shifterop(tempop,
  1525. taicpu(hpfar1).oper[0]^.reg, taicpu(p).oper[1]^.reg,
  1526. taicpu(p).oper[2]^.shifterop^);
  1527. end
  1528. else
  1529. if taicpu(hpfar1).ops = 3 then
  1530. hp2:=taicpu.op_reg_reg_reg_shifterop(taicpu(hpfar1).opcode,
  1531. taicpu(hpfar1).oper[0]^.reg, taicpu(hpfar1).oper[1]^.reg,
  1532. taicpu(p).oper[1]^.reg, taicpu(p).oper[2]^.shifterop^)
  1533. else
  1534. hp2:=taicpu.op_reg_reg_shifterop(taicpu(hpfar1).opcode,
  1535. taicpu(hpfar1).oper[0]^.reg, taicpu(p).oper[1]^.reg,
  1536. taicpu(p).oper[2]^.shifterop^);
  1537. if taicpu(p).oper[2]^.shifterop^.rs<>NR_NO then
  1538. AllocRegBetween(taicpu(p).oper[2]^.shifterop^.rs,p,hpfar1,UsedRegs);
  1539. AllocRegBetween(taicpu(p).oper[1]^.reg,p,hpfar1,UsedRegs);
  1540. asml.insertbefore(hp2, hpfar1);
  1541. asml.remove(hpfar1);
  1542. hpfar1.free;
  1543. DebugMsg('Peephole Optimization: FoldShiftProcess done', hp2);
  1544. if not Assigned(hp1) then
  1545. GetNextInstruction(p, hp1)
  1546. else if hp1 = hpfar1 then
  1547. { If hp1 = hpfar1, then it's a dangling pointer }
  1548. hp1 := hp2;
  1549. RemoveCurrentP(p, hp1);
  1550. Result:=true;
  1551. Exit;
  1552. end;
  1553. end;
  1554. {
  1555. Fold
  1556. mov r1, r1, lsl #2
  1557. ldr/ldrb r0, [r0, r1]
  1558. to
  1559. ldr/ldrb r0, [r0, r1, lsl #2]
  1560. XXX: This still needs some work, as we quite often encounter something like
  1561. mov r1, r2, lsl #2
  1562. add r2, r3, #imm
  1563. ldr r0, [r2, r1]
  1564. which can't be folded because r2 is overwritten between the shift and the ldr.
  1565. We could try to shuffle the registers around and fold it into.
  1566. add r1, r3, #imm
  1567. ldr r0, [r1, r2, lsl #2]
  1568. }
  1569. if (not(GenerateThumbCode)) and
  1570. { thumb2 allows only lsl #0..#3 }
  1571. (not(GenerateThumb2Code) or
  1572. ((taicpu(p).oper[2]^.shifterop^.shiftimm in [0..3]) and
  1573. (taicpu(p).oper[2]^.shifterop^.shiftmode=SM_LSL)
  1574. )
  1575. ) and
  1576. (taicpu(p).oper[1]^.typ = top_reg) and
  1577. (taicpu(p).oper[2]^.typ = top_shifterop) and
  1578. { RRX is tough to handle, because it requires tracking the C-Flag,
  1579. it is also extremly unlikely to be emitted this way}
  1580. (taicpu(p).oper[2]^.shifterop^.shiftmode <> SM_RRX) and
  1581. (taicpu(p).oper[2]^.shifterop^.shiftimm <> 0) and
  1582. (taicpu(p).oppostfix = PF_NONE) and
  1583. {Only LDR, LDRB, STR, STRB can handle scaled register indexing}
  1584. (MatchInstruction(hpfar1, [A_LDR, A_STR], [taicpu(p).condition], [PF_None, PF_B]) or
  1585. (GenerateThumb2Code and
  1586. MatchInstruction(hpfar1, [A_LDR, A_STR], [taicpu(p).condition], [PF_None, PF_B, PF_SB, PF_H, PF_SH]))
  1587. ) and
  1588. (
  1589. {If this is address by offset, one of the two registers can be used}
  1590. ((taicpu(hpfar1).oper[1]^.ref^.addressmode=AM_OFFSET) and
  1591. (
  1592. (taicpu(hpfar1).oper[1]^.ref^.index = taicpu(p).oper[0]^.reg) xor
  1593. (taicpu(hpfar1).oper[1]^.ref^.base = taicpu(p).oper[0]^.reg)
  1594. )
  1595. ) or
  1596. {For post and preindexed only the index register can be used}
  1597. ((taicpu(hpfar1).oper[1]^.ref^.addressmode in [AM_POSTINDEXED, AM_PREINDEXED]) and
  1598. (
  1599. (taicpu(hpfar1).oper[1]^.ref^.index = taicpu(p).oper[0]^.reg) and
  1600. (taicpu(hpfar1).oper[1]^.ref^.base <> taicpu(p).oper[0]^.reg)
  1601. ) and
  1602. (not GenerateThumb2Code)
  1603. )
  1604. ) and
  1605. { Only fold if both registers are used. Otherwise we are folding p with itself }
  1606. (taicpu(hpfar1).oper[1]^.ref^.index<>NR_NO) and
  1607. (taicpu(hpfar1).oper[1]^.ref^.base<>NR_NO) and
  1608. { Only fold if there isn't another shifterop already, and offset is zero. }
  1609. (taicpu(hpfar1).oper[1]^.ref^.offset = 0) and
  1610. (taicpu(hpfar1).oper[1]^.ref^.shiftmode = SM_None) and
  1611. not(RegModifiedBetween(taicpu(p).oper[1]^.reg,p,hpfar1)) and
  1612. RegEndOfLife(taicpu(p).oper[0]^.reg, taicpu(hpfar1)) then
  1613. begin
  1614. { If the register we want to do the shift for resides in base, we need to swap that}
  1615. if (taicpu(hpfar1).oper[1]^.ref^.base = taicpu(p).oper[0]^.reg) then
  1616. taicpu(hpfar1).oper[1]^.ref^.base := taicpu(hpfar1).oper[1]^.ref^.index;
  1617. taicpu(hpfar1).oper[1]^.ref^.index := taicpu(p).oper[1]^.reg;
  1618. taicpu(hpfar1).oper[1]^.ref^.shiftmode := taicpu(p).oper[2]^.shifterop^.shiftmode;
  1619. taicpu(hpfar1).oper[1]^.ref^.shiftimm := taicpu(p).oper[2]^.shifterop^.shiftimm;
  1620. DebugMsg('Peephole Optimization: FoldShiftLdrStr done', hpfar1);
  1621. RemoveCurrentP(p);
  1622. Result:=true;
  1623. Exit;
  1624. end;
  1625. end;
  1626. {
  1627. Often we see shifts and then a superfluous mov to another register
  1628. In the future this might be handled in RedundantMovProcess when it uses RegisterTracking
  1629. }
  1630. if RemoveSuperfluousMove(p, hpfar1, 'MovMov2Mov') then
  1631. Result:=true;
  1632. Exit;
  1633. end;
  1634. end;
  1635. function TCpuAsmOptimizer.OptPass1MVN(var p: tai): Boolean;
  1636. var
  1637. hp1: tai;
  1638. begin
  1639. {
  1640. change
  1641. mvn reg2,reg1
  1642. and reg3,reg4,reg2
  1643. dealloc reg2
  1644. to
  1645. bic reg3,reg4,reg1
  1646. }
  1647. Result := False;
  1648. if (taicpu(p).oper[1]^.typ = top_reg) and
  1649. GetNextInstructionUsingReg(p,hp1,taicpu(p).oper[0]^.reg) and
  1650. MatchInstruction(hp1,A_AND,[],[]) and
  1651. (((taicpu(hp1).ops=3) and
  1652. (taicpu(hp1).oper[2]^.typ=top_reg) and
  1653. (MatchOperand(taicpu(hp1).oper[2]^, taicpu(p).oper[0]^.reg) or
  1654. MatchOperand(taicpu(hp1).oper[1]^, taicpu(p).oper[0]^.reg))) or
  1655. ((taicpu(hp1).ops=2) and
  1656. (taicpu(hp1).oper[1]^.typ=top_reg) and
  1657. MatchOperand(taicpu(hp1).oper[1]^, taicpu(p).oper[0]^.reg))) and
  1658. assigned(FindRegDealloc(taicpu(p).oper[0]^.reg,tai(hp1.Next))) and
  1659. { reg1 might not be modified inbetween }
  1660. not(RegModifiedBetween(taicpu(p).oper[1]^.reg,p,hp1)) then
  1661. begin
  1662. DebugMsg('Peephole Optimization: MvnAnd2Bic done', p);
  1663. taicpu(hp1).opcode:=A_BIC;
  1664. if taicpu(hp1).ops=3 then
  1665. begin
  1666. if MatchOperand(taicpu(hp1).oper[1]^, taicpu(p).oper[0]^.reg) then
  1667. taicpu(hp1).loadReg(1,taicpu(hp1).oper[2]^.reg); // Swap operands
  1668. taicpu(hp1).loadReg(2,taicpu(p).oper[1]^.reg);
  1669. end
  1670. else
  1671. taicpu(hp1).loadReg(1,taicpu(p).oper[1]^.reg);
  1672. RemoveCurrentp(p);
  1673. Result := True;
  1674. Exit;
  1675. end;
  1676. end;
  1677. function TCpuAsmOptimizer.OptPass1VMov(var p: tai): Boolean;
  1678. var
  1679. hp1: tai;
  1680. begin
  1681. {
  1682. change
  1683. vmov reg0,reg1,reg2
  1684. vmov reg1,reg2,reg0
  1685. into
  1686. vmov reg0,reg1,reg2
  1687. can be applied regardless if reg0 or reg2 is the vfp register
  1688. }
  1689. Result := False;
  1690. if (taicpu(p).ops = 3) then
  1691. while GetNextInstruction(p, hp1) and
  1692. MatchInstruction(hp1, A_VMOV, [taicpu(p).condition], [taicpu(p).oppostfix]) and
  1693. (taicpu(hp1).ops = 3) and
  1694. MatchOperand(taicpu(p).oper[0]^, taicpu(hp1).oper[2]^) and
  1695. MatchOperand(taicpu(p).oper[1]^, taicpu(hp1).oper[0]^) and
  1696. MatchOperand(taicpu(p).oper[2]^, taicpu(hp1).oper[1]^) do
  1697. begin
  1698. asml.Remove(hp1);
  1699. hp1.free;
  1700. DebugMsg('Peephole Optimization: VMovVMov2VMov done', p);
  1701. { Can we do it again? }
  1702. end;
  1703. end;
  1704. function TCpuAsmOptimizer.OptPass1VOp(var p: tai): Boolean;
  1705. var
  1706. hp1: tai;
  1707. begin
  1708. Result := GetNextInstructionUsingReg(p, hp1, taicpu(p).oper[0]^.reg) and
  1709. RemoveSuperfluousVMov(p, hp1, 'VOpVMov2VOp');
  1710. end;
  1711. function TCpuAsmOptimizer.PeepHoleOptPass1Cpu(var p: tai): boolean;
  1712. begin
  1713. result := false;
  1714. if p.typ = ait_instruction then
  1715. begin
  1716. case taicpu(p).opcode of
  1717. A_CMP:
  1718. Result := OptPass1CMP(p);
  1719. A_STR:
  1720. Result := OptPass1STR(p);
  1721. A_LDR:
  1722. Result := OptPass1LDR(p);
  1723. A_MOV:
  1724. Result := OptPass1MOV(p);
  1725. A_AND:
  1726. Result := OptPass1And(p);
  1727. A_ADD,
  1728. A_SUB:
  1729. Result := OptPass1ADDSUB(p);
  1730. A_MUL:
  1731. REsult := OptPass1MUL(p);
  1732. A_ADC,
  1733. A_RSB,
  1734. A_RSC,
  1735. A_SBC,
  1736. A_BIC,
  1737. A_EOR,
  1738. A_ORR,
  1739. A_MLA,
  1740. A_MLS,
  1741. A_QADD,A_QADD16,A_QADD8,
  1742. A_QSUB,A_QSUB16,A_QSUB8,
  1743. A_QDADD,A_QDSUB,A_QASX,A_QSAX,
  1744. A_SHADD16,A_SHADD8,A_UHADD16,A_UHADD8,
  1745. A_SHSUB16,A_SHSUB8,A_UHSUB16,A_UHSUB8,
  1746. A_PKHTB,A_PKHBT,
  1747. A_SMUAD,A_SMUSD:
  1748. Result := OptPass1DataCheckMov(p);
  1749. {$ifdef dummy}
  1750. A_MVN:
  1751. Result := OPtPass1MVN(p);
  1752. {$endif dummy}
  1753. A_UXTB:
  1754. Result := OptPass1UXTB(p);
  1755. A_UXTH:
  1756. Result := OptPass1UXTH(p);
  1757. A_SXTB:
  1758. Result := OptPass1SXTB(p);
  1759. A_SXTH:
  1760. Result := OptPass1SXTH(p);
  1761. A_STM:
  1762. Result := OptPass1STM(p);
  1763. A_VMOV:
  1764. Result := OptPass1VMov(p);
  1765. A_VLDR,
  1766. A_VADD,
  1767. A_VMUL,
  1768. A_VDIV,
  1769. A_VSUB,
  1770. A_VSQRT,
  1771. A_VNEG,
  1772. A_VCVT,
  1773. A_VABS:
  1774. Result := OptPass1VOp(p);
  1775. else
  1776. ;
  1777. end;
  1778. end;
  1779. end;
  1780. { instructions modifying the CPSR can be only the last instruction }
  1781. function MustBeLast(p : tai) : boolean;
  1782. begin
  1783. Result:=(p.typ=ait_instruction) and
  1784. ((taicpu(p).opcode in [A_BL,A_BLX,A_CMP,A_CMN,A_SWI,A_TEQ,A_TST,A_CMF,A_CMFE {,A_MSR}]) or
  1785. ((taicpu(p).ops>=1) and (taicpu(p).oper[0]^.typ=top_reg) and (taicpu(p).oper[0]^.reg=NR_PC)) or
  1786. (taicpu(p).oppostfix=PF_S));
  1787. end;
  1788. procedure TCpuAsmOptimizer.PeepHoleOptPass2;
  1789. var
  1790. p,hp1,hp2: tai;
  1791. l : longint;
  1792. condition : tasmcond;
  1793. hp3: tai;
  1794. WasLast: boolean;
  1795. { UsedRegs, TmpUsedRegs: TRegSet; }
  1796. begin
  1797. p := BlockStart;
  1798. { UsedRegs := []; }
  1799. while (p <> BlockEnd) Do
  1800. begin
  1801. { UpdateUsedRegs(UsedRegs, tai(p.next)); }
  1802. case p.Typ Of
  1803. Ait_Instruction:
  1804. begin
  1805. case taicpu(p).opcode Of
  1806. A_B:
  1807. if (taicpu(p).condition<>C_None) and
  1808. not(GenerateThumbCode) then
  1809. begin
  1810. { check for
  1811. Bxx xxx
  1812. <several instructions>
  1813. xxx:
  1814. }
  1815. l:=0;
  1816. WasLast:=False;
  1817. GetNextInstruction(p, hp1);
  1818. while assigned(hp1) and
  1819. (l<=4) and
  1820. CanBeCond(hp1) and
  1821. { stop on labels }
  1822. not(hp1.typ=ait_label) and
  1823. { avoid that we cannot recognize the case BccB2Cond }
  1824. not((hp1.typ=ait_instruction) and (taicpu(hp1).opcode=A_B)) do
  1825. begin
  1826. inc(l);
  1827. if MustBeLast(hp1) then
  1828. begin
  1829. WasLast:=True;
  1830. GetNextInstruction(hp1,hp1);
  1831. break;
  1832. end
  1833. else
  1834. GetNextInstruction(hp1,hp1);
  1835. end;
  1836. if assigned(hp1) then
  1837. begin
  1838. if FindLabel(tasmlabel(taicpu(p).oper[0]^.ref^.symbol),hp1) then
  1839. begin
  1840. if (l<=4) and (l>0) then
  1841. begin
  1842. condition:=inverse_cond(taicpu(p).condition);
  1843. hp2:=p;
  1844. GetNextInstruction(p,hp1);
  1845. p:=hp1;
  1846. repeat
  1847. if hp1.typ=ait_instruction then
  1848. taicpu(hp1).condition:=condition;
  1849. if MustBeLast(hp1) then
  1850. begin
  1851. GetNextInstruction(hp1,hp1);
  1852. break;
  1853. end
  1854. else
  1855. GetNextInstruction(hp1,hp1);
  1856. until not(assigned(hp1)) or
  1857. not(CanBeCond(hp1)) or
  1858. (hp1.typ=ait_label);
  1859. DebugMsg('Peephole Bcc2Cond done',hp2);
  1860. { wait with removing else GetNextInstruction could
  1861. ignore the label if it was the only usage in the
  1862. jump moved away }
  1863. tasmlabel(taicpu(hp2).oper[0]^.ref^.symbol).decrefs;
  1864. asml.remove(hp2);
  1865. hp2.free;
  1866. continue;
  1867. end;
  1868. end
  1869. else
  1870. { do not perform further optimizations if there is inctructon
  1871. in block #1 which can not be optimized.
  1872. }
  1873. if not WasLast then
  1874. begin
  1875. { check further for
  1876. Bcc xxx
  1877. <several instructions 1>
  1878. B yyy
  1879. xxx:
  1880. <several instructions 2>
  1881. yyy:
  1882. }
  1883. { hp2 points to jmp yyy }
  1884. hp2:=hp1;
  1885. { skip hp1 to xxx }
  1886. GetNextInstruction(hp1, hp1);
  1887. if assigned(hp2) and
  1888. assigned(hp1) and
  1889. (l<=3) and
  1890. (hp2.typ=ait_instruction) and
  1891. (taicpu(hp2).is_jmp) and
  1892. (taicpu(hp2).condition=C_None) and
  1893. { real label and jump, no further references to the
  1894. label are allowed }
  1895. (tasmlabel(taicpu(p).oper[0]^.ref^.symbol).getrefs=1) and
  1896. FindLabel(tasmlabel(taicpu(p).oper[0]^.ref^.symbol),hp1) then
  1897. begin
  1898. l:=0;
  1899. { skip hp1 to <several moves 2> }
  1900. GetNextInstruction(hp1, hp1);
  1901. while assigned(hp1) and
  1902. CanBeCond(hp1) and
  1903. (l<=3) do
  1904. begin
  1905. inc(l);
  1906. if MustBeLast(hp1) then
  1907. begin
  1908. GetNextInstruction(hp1, hp1);
  1909. break;
  1910. end
  1911. else
  1912. GetNextInstruction(hp1, hp1);
  1913. end;
  1914. { hp1 points to yyy: }
  1915. if assigned(hp1) and
  1916. FindLabel(tasmlabel(taicpu(hp2).oper[0]^.ref^.symbol),hp1) then
  1917. begin
  1918. condition:=inverse_cond(taicpu(p).condition);
  1919. GetNextInstruction(p,hp1);
  1920. hp3:=p;
  1921. p:=hp1;
  1922. repeat
  1923. if hp1.typ=ait_instruction then
  1924. taicpu(hp1).condition:=condition;
  1925. if MustBeLast(hp1) then
  1926. begin
  1927. GetNextInstruction(hp1, hp1);
  1928. break;
  1929. end
  1930. else
  1931. GetNextInstruction(hp1, hp1);
  1932. until not(assigned(hp1)) or
  1933. not(CanBeCond(hp1)) or
  1934. ((hp1.typ=ait_instruction) and (taicpu(hp1).opcode=A_B));
  1935. { hp2 is still at jmp yyy }
  1936. GetNextInstruction(hp2,hp1);
  1937. { hp1 is now at xxx: }
  1938. condition:=inverse_cond(condition);
  1939. GetNextInstruction(hp1,hp1);
  1940. { hp1 is now at <several movs 2> }
  1941. repeat
  1942. if hp1.typ=ait_instruction then
  1943. taicpu(hp1).condition:=condition;
  1944. GetNextInstruction(hp1,hp1);
  1945. until not(assigned(hp1)) or
  1946. not(CanBeCond(hp1)) or
  1947. (hp1.typ=ait_label);
  1948. DebugMsg('Peephole BccB2Cond done',hp3);
  1949. { remove Bcc }
  1950. tasmlabel(taicpu(hp3).oper[0]^.ref^.symbol).decrefs;
  1951. asml.remove(hp3);
  1952. hp3.free;
  1953. { remove B }
  1954. tasmlabel(taicpu(hp2).oper[0]^.ref^.symbol).decrefs;
  1955. asml.remove(hp2);
  1956. hp2.free;
  1957. continue;
  1958. end;
  1959. end;
  1960. end;
  1961. end;
  1962. end;
  1963. else
  1964. ;
  1965. end;
  1966. end;
  1967. else
  1968. ;
  1969. end;
  1970. p := tai(p.next)
  1971. end;
  1972. end;
  1973. function TCpuAsmOptimizer.RegInInstruction(Reg: TRegister; p1: tai): Boolean;
  1974. begin
  1975. If (p1.typ = ait_instruction) and (taicpu(p1).opcode=A_BL) then
  1976. Result:=true
  1977. else If MatchInstruction(p1, [A_LDR, A_STR], [], [PF_D]) and
  1978. (getsupreg(taicpu(p1).oper[0]^.reg)+1=getsupreg(reg)) then
  1979. Result:=true
  1980. else
  1981. Result:=inherited RegInInstruction(Reg, p1);
  1982. end;
  1983. const
  1984. { set of opcode which might or do write to memory }
  1985. { TODO : extend armins.dat to contain r/w info }
  1986. opcode_could_mem_write = [A_B,A_BL,A_BLX,A_BKPT,A_BX,A_STR,A_STRB,A_STRBT,
  1987. A_STRH,A_STRT,A_STF,A_SFM,A_STM,A_FSTS,A_FSTD,A_VSTR,A_VSTM];
  1988. { adjust the register live information when swapping the two instructions p and hp1,
  1989. they must follow one after the other }
  1990. procedure TCpuPreRegallocScheduler.SwapRegLive(p,hp1 : taicpu);
  1991. procedure CheckLiveEnd(reg : tregister);
  1992. var
  1993. supreg : TSuperRegister;
  1994. regtype : TRegisterType;
  1995. begin
  1996. if reg=NR_NO then
  1997. exit;
  1998. regtype:=getregtype(reg);
  1999. supreg:=getsupreg(reg);
  2000. if assigned(cg.rg[regtype]) and (cg.rg[regtype].live_end[supreg]=hp1) and
  2001. RegInInstruction(reg,p) then
  2002. cg.rg[regtype].live_end[supreg]:=p;
  2003. end;
  2004. procedure CheckLiveStart(reg : TRegister);
  2005. var
  2006. supreg : TSuperRegister;
  2007. regtype : TRegisterType;
  2008. begin
  2009. if reg=NR_NO then
  2010. exit;
  2011. regtype:=getregtype(reg);
  2012. supreg:=getsupreg(reg);
  2013. if assigned(cg.rg[regtype]) and (cg.rg[regtype].live_start[supreg]=p) and
  2014. RegInInstruction(reg,hp1) then
  2015. cg.rg[regtype].live_start[supreg]:=hp1;
  2016. end;
  2017. var
  2018. i : longint;
  2019. r : TSuperRegister;
  2020. begin
  2021. { assumption: p is directly followed by hp1 }
  2022. { if live of any reg used by p starts at p and hp1 uses this register then
  2023. set live start to hp1 }
  2024. for i:=0 to p.ops-1 do
  2025. case p.oper[i]^.typ of
  2026. Top_Reg:
  2027. CheckLiveStart(p.oper[i]^.reg);
  2028. Top_Ref:
  2029. begin
  2030. CheckLiveStart(p.oper[i]^.ref^.base);
  2031. CheckLiveStart(p.oper[i]^.ref^.index);
  2032. end;
  2033. Top_Shifterop:
  2034. CheckLiveStart(p.oper[i]^.shifterop^.rs);
  2035. Top_RegSet:
  2036. for r:=RS_R0 to RS_R15 do
  2037. if r in p.oper[i]^.regset^ then
  2038. CheckLiveStart(newreg(R_INTREGISTER,r,R_SUBWHOLE));
  2039. else
  2040. ;
  2041. end;
  2042. { if live of any reg used by hp1 ends at hp1 and p uses this register then
  2043. set live end to p }
  2044. for i:=0 to hp1.ops-1 do
  2045. case hp1.oper[i]^.typ of
  2046. Top_Reg:
  2047. CheckLiveEnd(hp1.oper[i]^.reg);
  2048. Top_Ref:
  2049. begin
  2050. CheckLiveEnd(hp1.oper[i]^.ref^.base);
  2051. CheckLiveEnd(hp1.oper[i]^.ref^.index);
  2052. end;
  2053. Top_Shifterop:
  2054. CheckLiveStart(hp1.oper[i]^.shifterop^.rs);
  2055. Top_RegSet:
  2056. for r:=RS_R0 to RS_R15 do
  2057. if r in hp1.oper[i]^.regset^ then
  2058. CheckLiveEnd(newreg(R_INTREGISTER,r,R_SUBWHOLE));
  2059. else
  2060. ;
  2061. end;
  2062. end;
  2063. function TCpuPreRegallocScheduler.SchedulerPass1Cpu(var p: tai): boolean;
  2064. { TODO : schedule also forward }
  2065. { TODO : schedule distance > 1 }
  2066. { returns true if p might be a load of a pc relative tls offset }
  2067. function PossibleTLSLoad(const p: tai) : boolean;
  2068. begin
  2069. Result:=(p.typ=ait_instruction) and (taicpu(p).opcode=A_LDR) and (taicpu(p).oper[1]^.typ=top_ref) and (((taicpu(p).oper[1]^.ref^.base=NR_PC) and
  2070. (taicpu(p).oper[1]^.ref^.index<>NR_NO)) or ((taicpu(p).oper[1]^.ref^.base<>NR_NO) and
  2071. (taicpu(p).oper[1]^.ref^.index=NR_PC)));
  2072. end;
  2073. var
  2074. hp1,hp2,hp3,hp4,hp5,insertpos : tai;
  2075. list : TAsmList;
  2076. begin
  2077. result:=true;
  2078. list:=TAsmList.create;
  2079. p:=BlockStart;
  2080. while p<>BlockEnd Do
  2081. begin
  2082. if (p.typ=ait_instruction) and
  2083. GetNextInstruction(p,hp1) and
  2084. (hp1.typ=ait_instruction) and
  2085. (taicpu(hp1).opcode in [A_LDR,A_LDRB,A_LDRH,A_LDRSB,A_LDRSH]) and
  2086. (taicpu(hp1).oppostfix in [PF_NONE, PF_B, PF_H, PF_SB, PF_SH]) and
  2087. { for now we don't reschedule if the previous instruction changes potentially a memory location }
  2088. ( (not(taicpu(p).opcode in opcode_could_mem_write) and
  2089. not(RegModifiedByInstruction(NR_PC,p))
  2090. ) or
  2091. ((taicpu(p).opcode in [A_STM,A_STRB,A_STRH,A_STR]) and
  2092. ((taicpu(hp1).oper[1]^.ref^.base=NR_PC) or
  2093. (assigned(taicpu(hp1).oper[1]^.ref^.symboldata) and
  2094. (taicpu(hp1).oper[1]^.ref^.offset=0)
  2095. )
  2096. ) or
  2097. { try to prove that the memory accesses don't overlapp }
  2098. ((taicpu(p).opcode in [A_STRB,A_STRH,A_STR]) and
  2099. (taicpu(p).oper[1]^.typ = top_ref) and
  2100. (taicpu(p).oper[1]^.ref^.base=taicpu(hp1).oper[1]^.ref^.base) and
  2101. (taicpu(p).oppostfix=PF_None) and
  2102. (taicpu(hp1).oppostfix=PF_None) and
  2103. (taicpu(p).oper[1]^.ref^.index=NR_NO) and
  2104. (taicpu(hp1).oper[1]^.ref^.index=NR_NO) and
  2105. { get operand sizes and check if the offset distance is large enough to ensure no overlapp }
  2106. (abs(taicpu(p).oper[1]^.ref^.offset-taicpu(hp1).oper[1]^.ref^.offset)>=max(tcgsize2size[reg_cgsize(taicpu(p).oper[0]^.reg)],tcgsize2size[reg_cgsize(taicpu(hp1).oper[0]^.reg)]))
  2107. )
  2108. )
  2109. ) and
  2110. GetNextInstruction(hp1,hp2) and
  2111. (hp2.typ=ait_instruction) and
  2112. { loaded register used by next instruction?
  2113. if we ever support labels (they could be skipped in theory) here, the gnu2 tls general-dynamic code could get broken (the ldr before
  2114. the bl may not be scheduled away from the bl) and it needs to be taken care of this case
  2115. }
  2116. (RegInInstruction(taicpu(hp1).oper[0]^.reg,hp2)) and
  2117. { loaded register not used by previous instruction? }
  2118. not(RegInInstruction(taicpu(hp1).oper[0]^.reg,p)) and
  2119. { same condition? }
  2120. (taicpu(p).condition=taicpu(hp1).condition) and
  2121. { first instruction might not change the register used as base }
  2122. ((taicpu(hp1).oper[1]^.ref^.base=NR_NO) or
  2123. not(RegModifiedByInstruction(taicpu(hp1).oper[1]^.ref^.base,p))
  2124. ) and
  2125. { first instruction might not change the register used as index }
  2126. ((taicpu(hp1).oper[1]^.ref^.index=NR_NO) or
  2127. not(RegModifiedByInstruction(taicpu(hp1).oper[1]^.ref^.index,p))
  2128. ) and
  2129. { if we modify the basereg AND the first instruction used that reg, we can not schedule }
  2130. ((taicpu(hp1).oper[1]^.ref^.addressmode = AM_OFFSET) or
  2131. not(instructionLoadsFromReg(taicpu(hp1).oper[1]^.ref^.base,p))) and
  2132. not(PossibleTLSLoad(p)) and
  2133. not(PossibleTLSLoad(hp1)) then
  2134. begin
  2135. hp3:=tai(p.Previous);
  2136. hp5:=tai(p.next);
  2137. asml.Remove(p);
  2138. { if there is a reg. alloc/dealloc/sync instructions or address labels (e.g. for GOT-less PIC)
  2139. associated with p, move it together with p }
  2140. { before the instruction? }
  2141. { find reg allocs,deallocs and PIC labels }
  2142. while assigned(hp3) and (hp3.typ<>ait_instruction) do
  2143. begin
  2144. if ( (hp3.typ=ait_regalloc) and (tai_regalloc(hp3).ratype in [ra_alloc, ra_dealloc]) and
  2145. RegInInstruction(tai_regalloc(hp3).reg,p) )
  2146. or ( (hp3.typ=ait_label) and (tai_label(hp3).labsym.typ=AT_ADDR) )
  2147. then
  2148. begin
  2149. hp4:=hp3;
  2150. hp3:=tai(hp3.Previous);
  2151. asml.Remove(hp4);
  2152. list.Insert(hp4);
  2153. end
  2154. else
  2155. hp3:=tai(hp3.Previous);
  2156. end;
  2157. list.Concat(p);
  2158. SwapRegLive(taicpu(p),taicpu(hp1));
  2159. { after the instruction? }
  2160. { find reg deallocs and reg syncs }
  2161. while assigned(hp5) and (hp5.typ<>ait_instruction) do
  2162. begin
  2163. if (hp5.typ=ait_regalloc) and (tai_regalloc(hp5).ratype in [ra_dealloc, ra_sync]) and
  2164. RegInInstruction(tai_regalloc(hp5).reg,p) then
  2165. begin
  2166. hp4:=hp5;
  2167. hp5:=tai(hp5.next);
  2168. asml.Remove(hp4);
  2169. list.Concat(hp4);
  2170. end
  2171. else
  2172. hp5:=tai(hp5.Next);
  2173. end;
  2174. asml.Remove(hp1);
  2175. { if there are address labels associated with hp2, those must
  2176. stay with hp2 (e.g. for GOT-less PIC) }
  2177. insertpos:=hp2;
  2178. while assigned(hp2.previous) and
  2179. (tai(hp2.previous).typ<>ait_instruction) do
  2180. begin
  2181. hp2:=tai(hp2.previous);
  2182. if (hp2.typ=ait_label) and
  2183. (tai_label(hp2).labsym.typ=AT_ADDR) then
  2184. insertpos:=hp2;
  2185. end;
  2186. {$ifdef DEBUG_PREREGSCHEDULER}
  2187. asml.insertbefore(tai_comment.Create(strpnew('Rescheduled')),insertpos);
  2188. {$endif DEBUG_PREREGSCHEDULER}
  2189. asml.InsertBefore(hp1,insertpos);
  2190. asml.InsertListBefore(insertpos,list);
  2191. p:=tai(p.next);
  2192. end
  2193. else if p.typ=ait_instruction then
  2194. p:=hp1
  2195. else
  2196. p:=tai(p.next);
  2197. end;
  2198. list.Free;
  2199. end;
  2200. procedure DecrementPreceedingIT(list: TAsmList; p: tai);
  2201. var
  2202. hp : tai;
  2203. l : longint;
  2204. begin
  2205. hp := tai(p.Previous);
  2206. l := 1;
  2207. while assigned(hp) and
  2208. (l <= 4) do
  2209. begin
  2210. if hp.typ=ait_instruction then
  2211. begin
  2212. if (taicpu(hp).opcode>=A_IT) and
  2213. (taicpu(hp).opcode <= A_ITTTT) then
  2214. begin
  2215. if (taicpu(hp).opcode = A_IT) and
  2216. (l=1) then
  2217. list.Remove(hp)
  2218. else
  2219. case taicpu(hp).opcode of
  2220. A_ITE:
  2221. if l=2 then taicpu(hp).opcode := A_IT;
  2222. A_ITT:
  2223. if l=2 then taicpu(hp).opcode := A_IT;
  2224. A_ITEE:
  2225. if l=3 then taicpu(hp).opcode := A_ITE;
  2226. A_ITTE:
  2227. if l=3 then taicpu(hp).opcode := A_ITT;
  2228. A_ITET:
  2229. if l=3 then taicpu(hp).opcode := A_ITE;
  2230. A_ITTT:
  2231. if l=3 then taicpu(hp).opcode := A_ITT;
  2232. A_ITEEE:
  2233. if l=4 then taicpu(hp).opcode := A_ITEE;
  2234. A_ITTEE:
  2235. if l=4 then taicpu(hp).opcode := A_ITTE;
  2236. A_ITETE:
  2237. if l=4 then taicpu(hp).opcode := A_ITET;
  2238. A_ITTTE:
  2239. if l=4 then taicpu(hp).opcode := A_ITTT;
  2240. A_ITEET:
  2241. if l=4 then taicpu(hp).opcode := A_ITEE;
  2242. A_ITTET:
  2243. if l=4 then taicpu(hp).opcode := A_ITTE;
  2244. A_ITETT:
  2245. if l=4 then taicpu(hp).opcode := A_ITET;
  2246. A_ITTTT:
  2247. begin
  2248. if l=4 then taicpu(hp).opcode := A_ITTT;
  2249. end
  2250. else
  2251. ;
  2252. end;
  2253. break;
  2254. end;
  2255. {else if (taicpu(hp).condition<>taicpu(p).condition) or
  2256. (taicpu(hp).condition<>inverse_cond(taicpu(p).condition)) then
  2257. break;}
  2258. inc(l);
  2259. end;
  2260. hp := tai(hp.Previous);
  2261. end;
  2262. end;
  2263. function TCpuThumb2AsmOptimizer.PeepHoleOptPass1Cpu(var p: tai): boolean;
  2264. var
  2265. hp : taicpu;
  2266. //hp1,hp2 : tai;
  2267. begin
  2268. result:=false;
  2269. if inherited PeepHoleOptPass1Cpu(p) then
  2270. result:=true
  2271. else if (p.typ=ait_instruction) and
  2272. MatchInstruction(p, A_STM, [C_None], [PF_FD,PF_DB]) and
  2273. (taicpu(p).oper[0]^.ref^.addressmode=AM_PREINDEXED) and
  2274. (taicpu(p).oper[0]^.ref^.index=NR_STACK_POINTER_REG) and
  2275. ((taicpu(p).oper[1]^.regset^*[8..13,15])=[]) then
  2276. begin
  2277. DebugMsg('Peephole Stm2Push done', p);
  2278. hp := taicpu.op_regset(A_PUSH, R_INTREGISTER, R_SUBWHOLE, taicpu(p).oper[1]^.regset^);
  2279. AsmL.InsertAfter(hp, p);
  2280. asml.Remove(p);
  2281. p:=hp;
  2282. result:=true;
  2283. end
  2284. {else if (p.typ=ait_instruction) and
  2285. MatchInstruction(p, A_STR, [C_None], [PF_None]) and
  2286. (taicpu(p).oper[1]^.ref^.addressmode=AM_PREINDEXED) and
  2287. (taicpu(p).oper[1]^.ref^.index=NR_STACK_POINTER_REG) and
  2288. (taicpu(p).oper[1]^.ref^.offset=-4) and
  2289. (getsupreg(taicpu(p).oper[0]^.reg) in [0..7,14]) then
  2290. begin
  2291. DebugMsg('Peephole Str2Push done', p);
  2292. hp := taicpu.op_regset(A_PUSH, R_INTREGISTER, R_SUBWHOLE, [getsupreg(taicpu(p).oper[0]^.reg)]);
  2293. asml.InsertAfter(hp, p);
  2294. asml.Remove(p);
  2295. p.Free;
  2296. p:=hp;
  2297. result:=true;
  2298. end}
  2299. else if (p.typ=ait_instruction) and
  2300. MatchInstruction(p, A_LDM, [C_None], [PF_FD,PF_IA]) and
  2301. (taicpu(p).oper[0]^.ref^.addressmode=AM_PREINDEXED) and
  2302. (taicpu(p).oper[0]^.ref^.index=NR_STACK_POINTER_REG) and
  2303. ((taicpu(p).oper[1]^.regset^*[8..14])=[]) then
  2304. begin
  2305. DebugMsg('Peephole Ldm2Pop done', p);
  2306. hp := taicpu.op_regset(A_POP, R_INTREGISTER, R_SUBWHOLE, taicpu(p).oper[1]^.regset^);
  2307. asml.InsertBefore(hp, p);
  2308. asml.Remove(p);
  2309. p.Free;
  2310. p:=hp;
  2311. result:=true;
  2312. end
  2313. {else if (p.typ=ait_instruction) and
  2314. MatchInstruction(p, A_LDR, [C_None], [PF_None]) and
  2315. (taicpu(p).oper[1]^.ref^.addressmode=AM_POSTINDEXED) and
  2316. (taicpu(p).oper[1]^.ref^.index=NR_STACK_POINTER_REG) and
  2317. (taicpu(p).oper[1]^.ref^.offset=4) and
  2318. (getsupreg(taicpu(p).oper[0]^.reg) in [0..7,15]) then
  2319. begin
  2320. DebugMsg('Peephole Ldr2Pop done', p);
  2321. hp := taicpu.op_regset(A_POP, R_INTREGISTER, R_SUBWHOLE, [getsupreg(taicpu(p).oper[0]^.reg)]);
  2322. asml.InsertBefore(hp, p);
  2323. asml.Remove(p);
  2324. p.Free;
  2325. p:=hp;
  2326. result:=true;
  2327. end}
  2328. else if (p.typ=ait_instruction) and
  2329. MatchInstruction(p, [A_AND], [], [PF_None]) and
  2330. (taicpu(p).ops = 2) and
  2331. (taicpu(p).oper[1]^.typ=top_const) and
  2332. ((taicpu(p).oper[1]^.val=255) or
  2333. (taicpu(p).oper[1]^.val=65535)) then
  2334. begin
  2335. DebugMsg('Peephole AndR2Uxt done', p);
  2336. if taicpu(p).oper[1]^.val=255 then
  2337. taicpu(p).opcode:=A_UXTB
  2338. else
  2339. taicpu(p).opcode:=A_UXTH;
  2340. taicpu(p).loadreg(1, taicpu(p).oper[0]^.reg);
  2341. result := true;
  2342. end
  2343. else if (p.typ=ait_instruction) and
  2344. MatchInstruction(p, [A_AND], [], [PF_None]) and
  2345. (taicpu(p).ops = 3) and
  2346. (taicpu(p).oper[2]^.typ=top_const) and
  2347. ((taicpu(p).oper[2]^.val=255) or
  2348. (taicpu(p).oper[2]^.val=65535)) then
  2349. begin
  2350. DebugMsg('Peephole AndRR2Uxt done', p);
  2351. if taicpu(p).oper[2]^.val=255 then
  2352. taicpu(p).opcode:=A_UXTB
  2353. else
  2354. taicpu(p).opcode:=A_UXTH;
  2355. taicpu(p).ops:=2;
  2356. result := true;
  2357. end
  2358. {else if (p.typ=ait_instruction) and
  2359. MatchInstruction(p, [A_CMP], [C_None], [PF_None]) and
  2360. (taicpu(p).oper[1]^.typ=top_const) and
  2361. (taicpu(p).oper[1]^.val=0) and
  2362. GetNextInstruction(p,hp1) and
  2363. (taicpu(hp1).opcode=A_B) and
  2364. (taicpu(hp1).condition in [C_EQ,C_NE]) then
  2365. begin
  2366. if taicpu(hp1).condition = C_EQ then
  2367. hp2:=taicpu.op_reg_ref(A_CBZ, taicpu(p).oper[0]^.reg, taicpu(hp1).oper[0]^.ref^)
  2368. else
  2369. hp2:=taicpu.op_reg_ref(A_CBNZ, taicpu(p).oper[0]^.reg, taicpu(hp1).oper[0]^.ref^);
  2370. taicpu(hp2).is_jmp := true;
  2371. asml.InsertAfter(hp2, hp1);
  2372. asml.Remove(hp1);
  2373. hp1.Free;
  2374. asml.Remove(p);
  2375. p.Free;
  2376. p := hp2;
  2377. result := true;
  2378. end}
  2379. end;
  2380. procedure TCpuThumb2AsmOptimizer.PeepHoleOptPass2;
  2381. var
  2382. p,hp1,hp2: tai;
  2383. l : longint;
  2384. condition : tasmcond;
  2385. { UsedRegs, TmpUsedRegs: TRegSet; }
  2386. begin
  2387. p := BlockStart;
  2388. { UsedRegs := []; }
  2389. while (p <> BlockEnd) Do
  2390. begin
  2391. { UpdateUsedRegs(UsedRegs, tai(p.next)); }
  2392. case p.Typ Of
  2393. Ait_Instruction:
  2394. begin
  2395. case taicpu(p).opcode Of
  2396. A_B:
  2397. if taicpu(p).condition<>C_None then
  2398. begin
  2399. { check for
  2400. Bxx xxx
  2401. <several instructions>
  2402. xxx:
  2403. }
  2404. l:=0;
  2405. GetNextInstruction(p, hp1);
  2406. while assigned(hp1) and
  2407. (l<=4) and
  2408. CanBeCond(hp1) and
  2409. { stop on labels }
  2410. not(hp1.typ=ait_label) do
  2411. begin
  2412. inc(l);
  2413. if MustBeLast(hp1) then
  2414. begin
  2415. //hp1:=nil;
  2416. GetNextInstruction(hp1,hp1);
  2417. break;
  2418. end
  2419. else
  2420. GetNextInstruction(hp1,hp1);
  2421. end;
  2422. if assigned(hp1) then
  2423. begin
  2424. if FindLabel(tasmlabel(taicpu(p).oper[0]^.ref^.symbol),hp1) then
  2425. begin
  2426. if (l<=4) and (l>0) then
  2427. begin
  2428. condition:=inverse_cond(taicpu(p).condition);
  2429. hp2:=p;
  2430. GetNextInstruction(p,hp1);
  2431. p:=hp1;
  2432. repeat
  2433. if hp1.typ=ait_instruction then
  2434. taicpu(hp1).condition:=condition;
  2435. if MustBeLast(hp1) then
  2436. begin
  2437. GetNextInstruction(hp1,hp1);
  2438. break;
  2439. end
  2440. else
  2441. GetNextInstruction(hp1,hp1);
  2442. until not(assigned(hp1)) or
  2443. not(CanBeCond(hp1)) or
  2444. (hp1.typ=ait_label);
  2445. { wait with removing else GetNextInstruction could
  2446. ignore the label if it was the only usage in the
  2447. jump moved away }
  2448. asml.InsertAfter(tai_comment.create(strpnew('Collapsed')), hp2);
  2449. DecrementPreceedingIT(asml, hp2);
  2450. case l of
  2451. 1: asml.InsertAfter(taicpu.op_cond(A_IT,condition), hp2);
  2452. 2: asml.InsertAfter(taicpu.op_cond(A_ITT,condition), hp2);
  2453. 3: asml.InsertAfter(taicpu.op_cond(A_ITTT,condition), hp2);
  2454. 4: asml.InsertAfter(taicpu.op_cond(A_ITTTT,condition), hp2);
  2455. end;
  2456. tasmlabel(taicpu(hp2).oper[0]^.ref^.symbol).decrefs;
  2457. asml.remove(hp2);
  2458. hp2.free;
  2459. continue;
  2460. end;
  2461. end;
  2462. end;
  2463. end;
  2464. else
  2465. ;
  2466. end;
  2467. end;
  2468. else
  2469. ;
  2470. end;
  2471. p := tai(p.next)
  2472. end;
  2473. end;
  2474. function TCpuThumb2AsmOptimizer.PostPeepHoleOptsCpu(var p: tai): boolean;
  2475. begin
  2476. result:=false;
  2477. if p.typ = ait_instruction then
  2478. begin
  2479. if MatchInstruction(p, A_MOV, [C_None], [PF_None]) and
  2480. (taicpu(p).oper[1]^.typ=top_const) and
  2481. (taicpu(p).oper[1]^.val >= 0) and
  2482. (taicpu(p).oper[1]^.val < 256) and
  2483. (not RegInUsedRegs(NR_DEFAULTFLAGS,UsedRegs)) then
  2484. begin
  2485. DebugMsg('Peephole Mov2Movs done', p);
  2486. asml.InsertBefore(tai_regalloc.alloc(NR_DEFAULTFLAGS,p), p);
  2487. asml.InsertAfter(tai_regalloc.dealloc(NR_DEFAULTFLAGS,p), p);
  2488. IncludeRegInUsedRegs(NR_DEFAULTFLAGS,UsedRegs);
  2489. taicpu(p).oppostfix:=PF_S;
  2490. result:=true;
  2491. end
  2492. else if MatchInstruction(p, A_MVN, [C_None], [PF_None]) and
  2493. (taicpu(p).oper[1]^.typ=top_reg) and
  2494. (not RegInUsedRegs(NR_DEFAULTFLAGS,UsedRegs)) then
  2495. begin
  2496. DebugMsg('Peephole Mvn2Mvns done', p);
  2497. asml.InsertBefore(tai_regalloc.alloc(NR_DEFAULTFLAGS,p), p);
  2498. asml.InsertAfter(tai_regalloc.dealloc(NR_DEFAULTFLAGS,p), p);
  2499. IncludeRegInUsedRegs(NR_DEFAULTFLAGS,UsedRegs);
  2500. taicpu(p).oppostfix:=PF_S;
  2501. result:=true;
  2502. end
  2503. else if MatchInstruction(p, A_RSB, [C_None], [PF_None]) and
  2504. (taicpu(p).ops = 3) and
  2505. (taicpu(p).oper[2]^.typ=top_const) and
  2506. (taicpu(p).oper[2]^.val=0) and
  2507. (not RegInUsedRegs(NR_DEFAULTFLAGS,UsedRegs)) then
  2508. begin
  2509. DebugMsg('Peephole Rsb2Rsbs done', p);
  2510. asml.InsertBefore(tai_regalloc.alloc(NR_DEFAULTFLAGS,p), p);
  2511. asml.InsertAfter(tai_regalloc.dealloc(NR_DEFAULTFLAGS,p), p);
  2512. IncludeRegInUsedRegs(NR_DEFAULTFLAGS,UsedRegs);
  2513. taicpu(p).oppostfix:=PF_S;
  2514. result:=true;
  2515. end
  2516. else if MatchInstruction(p, [A_ADD,A_SUB], [C_None], [PF_None]) and
  2517. (taicpu(p).ops = 3) and
  2518. MatchOperand(taicpu(p).oper[0]^, taicpu(p).oper[1]^) and
  2519. (not MatchOperand(taicpu(p).oper[0]^, NR_STACK_POINTER_REG)) and
  2520. (taicpu(p).oper[2]^.typ=top_const) and
  2521. (taicpu(p).oper[2]^.val >= 0) and
  2522. (taicpu(p).oper[2]^.val < 256) and
  2523. (not RegInUsedRegs(NR_DEFAULTFLAGS,UsedRegs)) then
  2524. begin
  2525. DebugMsg('Peephole AddSub2*s done', p);
  2526. asml.InsertBefore(tai_regalloc.alloc(NR_DEFAULTFLAGS,p), p);
  2527. asml.InsertAfter(tai_regalloc.dealloc(NR_DEFAULTFLAGS,p), p);
  2528. IncludeRegInUsedRegs(NR_DEFAULTFLAGS,UsedRegs);
  2529. taicpu(p).loadconst(1,taicpu(p).oper[2]^.val);
  2530. taicpu(p).oppostfix:=PF_S;
  2531. taicpu(p).ops := 2;
  2532. result:=true;
  2533. end
  2534. else if MatchInstruction(p, [A_ADD,A_SUB], [C_None], [PF_None]) and
  2535. (taicpu(p).ops = 2) and
  2536. (taicpu(p).oper[1]^.typ=top_reg) and
  2537. (not MatchOperand(taicpu(p).oper[0]^, NR_STACK_POINTER_REG)) and
  2538. (not MatchOperand(taicpu(p).oper[1]^, NR_STACK_POINTER_REG)) and
  2539. (not RegInUsedRegs(NR_DEFAULTFLAGS,UsedRegs)) then
  2540. begin
  2541. DebugMsg('Peephole AddSub2*s done', p);
  2542. asml.InsertBefore(tai_regalloc.alloc(NR_DEFAULTFLAGS,p), p);
  2543. asml.InsertAfter(tai_regalloc.dealloc(NR_DEFAULTFLAGS,p), p);
  2544. IncludeRegInUsedRegs(NR_DEFAULTFLAGS,UsedRegs);
  2545. taicpu(p).oppostfix:=PF_S;
  2546. result:=true;
  2547. end
  2548. else if MatchInstruction(p, [A_ADD], [C_None], [PF_None]) and
  2549. (taicpu(p).ops = 3) and
  2550. MatchOperand(taicpu(p).oper[0]^, taicpu(p).oper[1]^) and
  2551. (taicpu(p).oper[2]^.typ=top_reg) then
  2552. begin
  2553. DebugMsg('Peephole AddRRR2AddRR done', p);
  2554. taicpu(p).ops := 2;
  2555. taicpu(p).loadreg(1,taicpu(p).oper[2]^.reg);
  2556. result:=true;
  2557. end
  2558. else if MatchInstruction(p, [A_AND,A_ORR,A_EOR,A_BIC,A_LSL,A_LSR,A_ASR,A_ROR], [C_None], [PF_None]) and
  2559. (taicpu(p).ops = 3) and
  2560. MatchOperand(taicpu(p).oper[0]^, taicpu(p).oper[1]^) and
  2561. (taicpu(p).oper[2]^.typ=top_reg) and
  2562. (not RegInUsedRegs(NR_DEFAULTFLAGS,UsedRegs)) then
  2563. begin
  2564. DebugMsg('Peephole opXXY2opsXY done', p);
  2565. asml.InsertBefore(tai_regalloc.alloc(NR_DEFAULTFLAGS,p), p);
  2566. asml.InsertAfter(tai_regalloc.dealloc(NR_DEFAULTFLAGS,p), p);
  2567. IncludeRegInUsedRegs(NR_DEFAULTFLAGS,UsedRegs);
  2568. taicpu(p).ops := 2;
  2569. taicpu(p).loadreg(1,taicpu(p).oper[2]^.reg);
  2570. taicpu(p).oppostfix:=PF_S;
  2571. result:=true;
  2572. end
  2573. else if MatchInstruction(p, [A_AND,A_ORR,A_EOR,A_BIC,A_LSL,A_LSR,A_ASR,A_ROR], [C_None], [PF_S]) and
  2574. (taicpu(p).ops = 3) and
  2575. MatchOperand(taicpu(p).oper[0]^, taicpu(p).oper[1]^) and
  2576. (taicpu(p).oper[2]^.typ in [top_reg,top_const]) then
  2577. begin
  2578. DebugMsg('Peephole opXXY2opXY done', p);
  2579. taicpu(p).ops := 2;
  2580. if taicpu(p).oper[2]^.typ=top_reg then
  2581. taicpu(p).loadreg(1,taicpu(p).oper[2]^.reg)
  2582. else
  2583. taicpu(p).loadconst(1,taicpu(p).oper[2]^.val);
  2584. result:=true;
  2585. end
  2586. else if MatchInstruction(p, [A_AND,A_ORR,A_EOR], [C_None], [PF_None,PF_S]) and
  2587. (taicpu(p).ops = 3) and
  2588. MatchOperand(taicpu(p).oper[0]^, taicpu(p).oper[2]^) and
  2589. (not RegInUsedRegs(NR_DEFAULTFLAGS,UsedRegs)) then
  2590. begin
  2591. DebugMsg('Peephole opXYX2opsXY done', p);
  2592. asml.InsertBefore(tai_regalloc.alloc(NR_DEFAULTFLAGS,p), p);
  2593. asml.InsertAfter(tai_regalloc.dealloc(NR_DEFAULTFLAGS,p), p);
  2594. IncludeRegInUsedRegs(NR_DEFAULTFLAGS,UsedRegs);
  2595. taicpu(p).oppostfix:=PF_S;
  2596. taicpu(p).ops := 2;
  2597. result:=true;
  2598. end
  2599. else if MatchInstruction(p, [A_MOV], [C_None], [PF_None]) and
  2600. (taicpu(p).ops=3) and
  2601. (taicpu(p).oper[2]^.typ=top_shifterop) and
  2602. (taicpu(p).oper[2]^.shifterop^.shiftmode in [SM_LSL,SM_LSR,SM_ASR,SM_ROR]) and
  2603. //MatchOperand(taicpu(p).oper[0]^, taicpu(p).oper[1]^) and
  2604. (not RegInUsedRegs(NR_DEFAULTFLAGS,UsedRegs)) then
  2605. begin
  2606. DebugMsg('Peephole Mov2Shift done', p);
  2607. asml.InsertBefore(tai_regalloc.alloc(NR_DEFAULTFLAGS,p), p);
  2608. asml.InsertAfter(tai_regalloc.dealloc(NR_DEFAULTFLAGS,p), p);
  2609. IncludeRegInUsedRegs(NR_DEFAULTFLAGS,UsedRegs);
  2610. taicpu(p).oppostfix:=PF_S;
  2611. case taicpu(p).oper[2]^.shifterop^.shiftmode of
  2612. SM_LSL: taicpu(p).opcode:=A_LSL;
  2613. SM_LSR: taicpu(p).opcode:=A_LSR;
  2614. SM_ASR: taicpu(p).opcode:=A_ASR;
  2615. SM_ROR: taicpu(p).opcode:=A_ROR;
  2616. else
  2617. internalerror(2019050912);
  2618. end;
  2619. if taicpu(p).oper[2]^.shifterop^.rs<>NR_NO then
  2620. taicpu(p).loadreg(2, taicpu(p).oper[2]^.shifterop^.rs)
  2621. else
  2622. taicpu(p).loadconst(2, taicpu(p).oper[2]^.shifterop^.shiftimm);
  2623. result:=true;
  2624. end
  2625. end;
  2626. end;
  2627. begin
  2628. casmoptimizer:=TCpuAsmOptimizer;
  2629. cpreregallocscheduler:=TCpuPreRegallocScheduler;
  2630. End.