aoptcpu.pas 115 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894189518961897189818991900190119021903190419051906190719081909191019111912191319141915191619171918191919201921192219231924192519261927192819291930193119321933193419351936193719381939194019411942194319441945194619471948194919501951195219531954195519561957195819591960196119621963196419651966196719681969197019711972197319741975197619771978197919801981198219831984198519861987198819891990199119921993199419951996199719981999200020012002200320042005200620072008200920102011201220132014201520162017201820192020202120222023202420252026202720282029203020312032203320342035203620372038203920402041204220432044204520462047204820492050205120522053205420552056205720582059206020612062206320642065206620672068206920702071207220732074207520762077207820792080208120822083208420852086208720882089209020912092209320942095209620972098209921002101210221032104210521062107210821092110211121122113211421152116211721182119212021212122212321242125212621272128212921302131213221332134213521362137213821392140214121422143214421452146214721482149215021512152215321542155215621572158215921602161216221632164216521662167216821692170217121722173217421752176217721782179218021812182218321842185218621872188218921902191219221932194219521962197219821992200220122022203220422052206220722082209221022112212221322142215221622172218221922202221222222232224222522262227222822292230223122322233223422352236223722382239224022412242224322442245224622472248224922502251225222532254225522562257225822592260226122622263226422652266226722682269227022712272227322742275227622772278227922802281228222832284228522862287228822892290229122922293229422952296229722982299230023012302230323042305230623072308230923102311231223132314231523162317231823192320232123222323232423252326232723282329233023312332233323342335233623372338233923402341234223432344234523462347234823492350235123522353235423552356235723582359236023612362236323642365236623672368236923702371237223732374237523762377237823792380238123822383238423852386238723882389239023912392239323942395239623972398239924002401240224032404240524062407240824092410241124122413241424152416241724182419242024212422242324242425242624272428242924302431243224332434243524362437243824392440244124422443244424452446244724482449245024512452245324542455245624572458245924602461246224632464246524662467246824692470247124722473247424752476247724782479248024812482248324842485248624872488248924902491249224932494249524962497249824992500250125022503250425052506250725082509251025112512251325142515251625172518251925202521252225232524252525262527252825292530253125322533253425352536253725382539254025412542254325442545254625472548254925502551255225532554255525562557255825592560256125622563256425652566256725682569257025712572257325742575257625772578257925802581258225832584258525862587258825892590259125922593259425952596259725982599260026012602260326042605260626072608260926102611261226132614261526162617261826192620262126222623262426252626262726282629263026312632263326342635263626372638263926402641264226432644264526462647264826492650265126522653265426552656265726582659266026612662266326642665266626672668266926702671267226732674267526762677267826792680268126822683268426852686268726882689269026912692269326942695269626972698269927002701270227032704270527062707270827092710271127122713271427152716271727182719272027212722272327242725272627272728272927302731273227332734273527362737273827392740274127422743274427452746274727482749275027512752275327542755275627572758275927602761276227632764276527662767276827692770277127722773277427752776277727782779278027812782278327842785278627872788278927902791279227932794279527962797279827992800280128022803280428052806280728082809281028112812281328142815281628172818281928202821282228232824282528262827282828292830283128322833283428352836283728382839284028412842284328442845284628472848284928502851285228532854285528562857285828592860
  1. {
  2. Copyright (c) 1998-2002 by Jonas Maebe, member of the Free Pascal
  3. Development Team
  4. This unit implements the ARM optimizer object
  5. This program is free software; you can redistribute it and/or modify
  6. it under the terms of the GNU General Public License as published by
  7. the Free Software Foundation; either version 2 of the License, or
  8. (at your option) any later version.
  9. This program is distributed in the hope that it will be useful,
  10. but WITHOUT ANY WARRANTY; without even the implied warranty of
  11. MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  12. GNU General Public License for more details.
  13. You should have received a copy of the GNU General Public License
  14. along with this program; if not, write to the Free Software
  15. Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
  16. ****************************************************************************
  17. }
  18. Unit aoptcpu;
  19. {$i fpcdefs.inc}
  20. { $define DEBUG_PREREGSCHEDULER}
  21. { $define DEBUG_AOPTCPU}
  22. Interface
  23. uses
  24. cgbase, cgutils, cpubase, aasmtai,
  25. aasmcpu,
  26. aopt, aoptobj, aoptarm;
  27. Type
  28. TCpuAsmOptimizer = class(TARMAsmOptimizer)
  29. { Can't be done in some cases due to the limited range of jumps }
  30. function CanDoJumpOpts: Boolean; override;
  31. { uses the same constructor as TAopObj }
  32. function PeepHoleOptPass1Cpu(var p: tai): boolean; override;
  33. procedure PeepHoleOptPass2;override;
  34. Function RegInInstruction(Reg: TRegister; p1: tai): Boolean;override;
  35. function RemoveSuperfluousVMov(const p : tai; movp : tai; const optimizer : string) : boolean;
  36. { gets the next tai object after current that contains info relevant
  37. to the optimizer in p1 which used the given register or does a
  38. change in program flow.
  39. If there is none, it returns false and
  40. sets p1 to nil }
  41. Function GetNextInstructionUsingRef(Current: tai; Out Next: tai; const ref: TReference; StopOnStore: Boolean = true): Boolean;
  42. { outputs a debug message into the assembler file }
  43. procedure DebugMsg(const s: string; p: tai);
  44. function InstructionLoadsFromReg(const reg : TRegister; const hp : tai) : boolean; override;
  45. function RegLoadedWithNewValue(reg : tregister; hp : tai) : boolean; override;
  46. { With these routines, there's optimisation code that's general for all ARM platforms }
  47. function OptPass1And(var p: tai): Boolean; override;
  48. function OptPass1LDR(var p: tai): Boolean; override;
  49. function OptPass1STR(var p: tai): Boolean; override;
  50. protected
  51. function LookForPreindexedPattern(p: taicpu): boolean;
  52. function LookForPostindexedPattern(p: taicpu): boolean;
  53. { Individual optimisation routines }
  54. function OptPass1DataCheckMov(var p: tai): Boolean;
  55. function OptPass1ADDSUB(var p: tai): Boolean;
  56. function OptPass1CMP(var p: tai): Boolean;
  57. function OptPass1STM(var p: tai): Boolean;
  58. function OptPass1MOV(var p: tai): Boolean;
  59. function OptPass1MUL(var p: tai): Boolean;
  60. function OptPass1MVN(var p: tai): Boolean;
  61. function OptPass1VMov(var p: tai): Boolean;
  62. function OptPass1VOp(var p: tai): Boolean;
  63. End;
  64. TCpuPreRegallocScheduler = class(TAsmScheduler)
  65. function SchedulerPass1Cpu(var p: tai): boolean;override;
  66. procedure SwapRegLive(p, hp1: taicpu);
  67. end;
  68. TCpuThumb2AsmOptimizer = class(TCpuAsmOptimizer)
  69. { uses the same constructor as TAopObj }
  70. function PeepHoleOptPass1Cpu(var p: tai): boolean; override;
  71. procedure PeepHoleOptPass2;override;
  72. function PostPeepHoleOptsCpu(var p: tai): boolean; override;
  73. End;
  74. function MustBeLast(p : tai) : boolean;
  75. Implementation
  76. uses
  77. cutils,verbose,globtype,globals,
  78. systems,
  79. cpuinfo,
  80. cgobj,procinfo,
  81. aasmbase,aasmdata;
  82. { Range check must be disabled explicitly as conversions between signed and unsigned
  83. 32-bit values are done without explicit typecasts }
  84. {$R-}
  85. function CanBeCond(p : tai) : boolean;
  86. begin
  87. result:=
  88. not(GenerateThumbCode) and
  89. (p.typ=ait_instruction) and
  90. (taicpu(p).condition=C_None) and
  91. ((taicpu(p).opcode<A_IT) or (taicpu(p).opcode>A_ITTTT)) and
  92. (taicpu(p).opcode<>A_CBZ) and
  93. (taicpu(p).opcode<>A_CBNZ) and
  94. (taicpu(p).opcode<>A_PLD) and
  95. (((taicpu(p).opcode<>A_BLX) and
  96. { BL may need to be converted into BLX by the linker -- could possibly
  97. be allowed in case it's to a local symbol of which we know that it
  98. uses the same instruction set as the current one }
  99. (taicpu(p).opcode<>A_BL)) or
  100. (taicpu(p).oper[0]^.typ=top_reg));
  101. end;
  102. function RemoveRedundantMove(const cmpp: tai; movp: tai; asml: TAsmList):Boolean;
  103. begin
  104. Result:=false;
  105. if (taicpu(movp).condition = C_EQ) and
  106. (taicpu(cmpp).oper[0]^.reg = taicpu(movp).oper[0]^.reg) and
  107. (taicpu(cmpp).oper[1]^.val = taicpu(movp).oper[1]^.val) then
  108. begin
  109. asml.insertafter(tai_comment.Create(strpnew('Peephole Optimization: CmpMovMov - Removed redundant moveq')), movp);
  110. asml.remove(movp);
  111. movp.free;
  112. Result:=true;
  113. end;
  114. end;
  115. function AlignedToQWord(const ref : treference) : boolean;
  116. begin
  117. { (safe) heuristics to ensure alignment }
  118. result:=(target_info.abi in [abi_eabi,abi_armeb,abi_eabihf]) and
  119. (((ref.offset>=0) and
  120. ((ref.offset mod 8)=0) and
  121. ((ref.base=NR_R13) or
  122. (ref.index=NR_R13))
  123. ) or
  124. ((ref.offset<=0) and
  125. { when using NR_R11, it has always a value of <qword align>+4 }
  126. ((abs(ref.offset+4) mod 8)=0) and
  127. (current_procinfo.framepointer=NR_R11) and
  128. ((ref.base=NR_R11) or
  129. (ref.index=NR_R11))
  130. )
  131. );
  132. end;
  133. function isValidConstLoadStoreOffset(const aoffset: longint; const pf: TOpPostfix) : boolean;
  134. begin
  135. if GenerateThumb2Code then
  136. result := (aoffset<4096) and (aoffset>-256)
  137. else
  138. result := ((pf in [PF_None,PF_B]) and
  139. (abs(aoffset)<4096)) or
  140. (abs(aoffset)<256);
  141. end;
  142. function TCpuAsmOptimizer.InstructionLoadsFromReg(const reg: TRegister; const hp: tai): boolean;
  143. var
  144. p: taicpu;
  145. i: longint;
  146. begin
  147. instructionLoadsFromReg := false;
  148. if not (assigned(hp) and (hp.typ = ait_instruction)) then
  149. exit;
  150. p:=taicpu(hp);
  151. i:=1;
  152. {For these instructions we have to start on oper[0]}
  153. if (p.opcode in [A_STR, A_LDM, A_STM, A_PLD,
  154. A_CMP, A_CMN, A_TST, A_TEQ,
  155. A_B, A_BL, A_BX, A_BLX,
  156. A_SMLAL, A_UMLAL, A_VSTM, A_VLDM]) then i:=0;
  157. while(i<p.ops) do
  158. begin
  159. case p.oper[I]^.typ of
  160. top_reg:
  161. instructionLoadsFromReg := (p.oper[I]^.reg = reg) or
  162. { STRD }
  163. ((i=0) and (p.opcode=A_STR) and (p.oppostfix=PF_D) and (getsupreg(p.oper[0]^.reg)+1=getsupreg(reg)));
  164. top_regset:
  165. instructionLoadsFromReg := (getsupreg(reg) in p.oper[I]^.regset^);
  166. top_shifterop:
  167. instructionLoadsFromReg := p.oper[I]^.shifterop^.rs = reg;
  168. top_ref:
  169. instructionLoadsFromReg :=
  170. (p.oper[I]^.ref^.base = reg) or
  171. (p.oper[I]^.ref^.index = reg);
  172. else
  173. ;
  174. end;
  175. if (i=0) and (p.opcode in [A_LDM,A_VLDM]) then
  176. exit;
  177. if instructionLoadsFromReg then
  178. exit; {Bailout if we found something}
  179. Inc(I);
  180. end;
  181. end;
  182. function TCpuAsmOptimizer.RegLoadedWithNewValue(reg: tregister; hp: tai): boolean;
  183. var
  184. p: taicpu;
  185. begin
  186. Result := false;
  187. if not ((assigned(hp)) and (hp.typ = ait_instruction)) then
  188. exit;
  189. p := taicpu(hp);
  190. case p.opcode of
  191. { These operands do not write into a register at all }
  192. A_CMP, A_CMN, A_TST, A_TEQ, A_B, A_BL, A_BX, A_BLX, A_SWI, A_MSR, A_PLD,
  193. A_VCMP:
  194. exit;
  195. {Take care of post/preincremented store and loads, they will change their base register}
  196. A_STR, A_LDR:
  197. begin
  198. Result := false;
  199. { actually, this does not apply here because post-/preindexed does not mean that a register
  200. is loaded with a new value, it is only modified
  201. (taicpu(p).oper[1]^.typ=top_ref) and
  202. (taicpu(p).oper[1]^.ref^.addressmode in [AM_PREINDEXED,AM_POSTINDEXED]) and
  203. (taicpu(p).oper[1]^.ref^.base = reg);
  204. }
  205. { STR does not load into it's first register }
  206. if p.opcode = A_STR then
  207. exit;
  208. end;
  209. A_VSTR:
  210. begin
  211. Result := false;
  212. exit;
  213. end;
  214. { These four are writing into the first 2 register, UMLAL and SMLAL will also read from them }
  215. A_UMLAL, A_UMULL, A_SMLAL, A_SMULL:
  216. Result :=
  217. (p.oper[1]^.typ = top_reg) and
  218. (p.oper[1]^.reg = reg);
  219. {Loads to oper2 from coprocessor}
  220. {
  221. MCR/MRC is currently not supported in FPC
  222. A_MRC:
  223. Result :=
  224. (p.oper[2]^.typ = top_reg) and
  225. (p.oper[2]^.reg = reg);
  226. }
  227. {Loads to all register in the registerset}
  228. A_LDM, A_VLDM:
  229. Result := (getsupreg(reg) in p.oper[1]^.regset^);
  230. A_POP:
  231. Result := (getsupreg(reg) in p.oper[0]^.regset^) or
  232. (reg=NR_STACK_POINTER_REG);
  233. else
  234. ;
  235. end;
  236. if Result then
  237. exit;
  238. case p.oper[0]^.typ of
  239. {This is the case}
  240. top_reg:
  241. Result := (p.oper[0]^.reg = reg) or
  242. { LDRD }
  243. (p.opcode=A_LDR) and (p.oppostfix=PF_D) and (getsupreg(p.oper[0]^.reg)+1=getsupreg(reg));
  244. {LDM/STM might write a new value to their index register}
  245. top_ref:
  246. Result :=
  247. (taicpu(p).oper[0]^.ref^.addressmode in [AM_PREINDEXED,AM_POSTINDEXED]) and
  248. (taicpu(p).oper[0]^.ref^.base = reg);
  249. else
  250. ;
  251. end;
  252. end;
  253. function TCpuAsmOptimizer.GetNextInstructionUsingRef(Current: tai;
  254. Out Next: tai; const ref: TReference; StopOnStore: Boolean = true): Boolean;
  255. begin
  256. Next:=Current;
  257. repeat
  258. Result:=GetNextInstruction(Next,Next);
  259. if Result and
  260. (Next.typ=ait_instruction) and
  261. (taicpu(Next).opcode in [A_LDR, A_STR]) and
  262. (
  263. ((taicpu(Next).ops = 2) and
  264. (taicpu(Next).oper[1]^.typ = top_ref) and
  265. RefsEqual(taicpu(Next).oper[1]^.ref^,ref)) or
  266. ((taicpu(Next).ops = 3) and { LDRD/STRD }
  267. (taicpu(Next).oper[2]^.typ = top_ref) and
  268. RefsEqual(taicpu(Next).oper[2]^.ref^,ref))
  269. ) then
  270. {We've found an instruction LDR or STR with the same reference}
  271. exit;
  272. until not(Result) or
  273. (Next.typ<>ait_instruction) or
  274. not(cs_opt_level3 in current_settings.optimizerswitches) or
  275. is_calljmp(taicpu(Next).opcode) or
  276. (StopOnStore and (taicpu(Next).opcode in [A_STR, A_STM])) or
  277. RegModifiedByInstruction(NR_PC,Next);
  278. Result:=false;
  279. end;
  280. {$ifdef DEBUG_AOPTCPU}
  281. procedure TCpuAsmOptimizer.DebugMsg(const s: string;p : tai);
  282. begin
  283. asml.insertbefore(tai_comment.Create(strpnew(s)), p);
  284. end;
  285. {$else DEBUG_AOPTCPU}
  286. procedure TCpuAsmOptimizer.DebugMsg(const s: string;p : tai);inline;
  287. begin
  288. end;
  289. {$endif DEBUG_AOPTCPU}
  290. function TCpuAsmOptimizer.CanDoJumpOpts: Boolean;
  291. begin
  292. { Cannot perform these jump optimisations if the ARM architecture has 16-bit thumb codes }
  293. Result := not (
  294. (current_settings.instructionset = is_thumb) and not (CPUARM_HAS_THUMB2 in cpu_capabilities[current_settings.cputype])
  295. );
  296. end;
  297. function TCpuAsmOptimizer.RemoveSuperfluousVMov(const p: tai; movp: tai; const optimizer: string):boolean;
  298. var
  299. alloc,
  300. dealloc : tai_regalloc;
  301. hp1 : tai;
  302. begin
  303. Result:=false;
  304. if ((MatchInstruction(movp, A_VMOV, [taicpu(p).condition], [taicpu(p).oppostfix]) and
  305. ((getregtype(taicpu(movp).oper[0]^.reg)=R_MMREGISTER) or (taicpu(p).opcode=A_VLDR))
  306. ) or
  307. (((taicpu(p).oppostfix in [PF_F64F32,PF_F64S16,PF_F64S32,PF_F64U16,PF_F64U32]) or (getsubreg(taicpu(p).oper[0]^.reg)=R_SUBFD)) and MatchInstruction(movp, A_VMOV, [taicpu(p).condition], [PF_F64])) or
  308. (((taicpu(p).oppostfix in [PF_F32F64,PF_F32S16,PF_F32S32,PF_F32U16,PF_F32U32]) or (getsubreg(taicpu(p).oper[0]^.reg)=R_SUBFS)) and MatchInstruction(movp, A_VMOV, [taicpu(p).condition], [PF_F32]))
  309. ) and
  310. (taicpu(movp).ops=2) and
  311. MatchOperand(taicpu(movp).oper[1]^, taicpu(p).oper[0]^.reg) and
  312. { the destination register of the mov might not be used beween p and movp }
  313. not(RegUsedBetween(taicpu(movp).oper[0]^.reg,p,movp)) and
  314. { Take care to only do this for instructions which REALLY load to the first register.
  315. Otherwise
  316. vstr reg0, [reg1]
  317. vmov reg2, reg0
  318. will be optimized to
  319. vstr reg2, [reg1]
  320. }
  321. regLoadedWithNewValue(taicpu(p).oper[0]^.reg, p) then
  322. begin
  323. dealloc:=FindRegDeAlloc(taicpu(p).oper[0]^.reg,tai(movp.Next));
  324. if assigned(dealloc) then
  325. begin
  326. DebugMsg('Peephole Optimization: '+optimizer+' removed superfluous vmov', movp);
  327. result:=true;
  328. { taicpu(p).oper[0]^.reg is not used anymore, try to find its allocation
  329. and remove it if possible }
  330. asml.Remove(dealloc);
  331. alloc:=FindRegAllocBackward(taicpu(p).oper[0]^.reg,tai(p.previous));
  332. if assigned(alloc) then
  333. begin
  334. asml.Remove(alloc);
  335. alloc.free;
  336. dealloc.free;
  337. end
  338. else
  339. asml.InsertAfter(dealloc,p);
  340. { try to move the allocation of the target register }
  341. GetLastInstruction(movp,hp1);
  342. alloc:=FindRegAlloc(taicpu(movp).oper[0]^.reg,tai(hp1.Next));
  343. if assigned(alloc) then
  344. begin
  345. asml.Remove(alloc);
  346. asml.InsertBefore(alloc,p);
  347. { adjust used regs }
  348. IncludeRegInUsedRegs(taicpu(movp).oper[0]^.reg,UsedRegs);
  349. end;
  350. { change
  351. vldr reg0,[reg1]
  352. vmov reg2,reg0
  353. into
  354. ldr reg2,[reg1]
  355. if reg2 is an int register
  356. }
  357. if (taicpu(p).opcode=A_VLDR) and (getregtype(taicpu(movp).oper[0]^.reg)=R_INTREGISTER) then
  358. taicpu(p).opcode:=A_LDR;
  359. { finally get rid of the mov }
  360. taicpu(p).loadreg(0,taicpu(movp).oper[0]^.reg);
  361. asml.remove(movp);
  362. movp.free;
  363. end;
  364. end;
  365. end;
  366. {
  367. optimize
  368. add/sub reg1,reg1,regY/const
  369. ...
  370. ldr/str regX,[reg1]
  371. into
  372. ldr/str regX,[reg1, regY/const]!
  373. }
  374. function TCpuAsmOptimizer.LookForPreindexedPattern(p: taicpu): boolean;
  375. var
  376. hp1: tai;
  377. begin
  378. if GenerateARMCode and
  379. (p.ops=3) and
  380. MatchOperand(p.oper[0]^, p.oper[1]^.reg) and
  381. GetNextInstructionUsingReg(p, hp1, p.oper[0]^.reg) and
  382. (not RegModifiedBetween(p.oper[0]^.reg, p, hp1)) and
  383. MatchInstruction(hp1, [A_LDR,A_STR], [C_None], [PF_None,PF_B,PF_H,PF_SH,PF_SB]) and
  384. (taicpu(hp1).oper[1]^.ref^.addressmode=AM_OFFSET) and
  385. (taicpu(hp1).oper[1]^.ref^.base=p.oper[0]^.reg) and
  386. (taicpu(hp1).oper[0]^.reg<>p.oper[0]^.reg) and
  387. (taicpu(hp1).oper[1]^.ref^.offset=0) and
  388. (taicpu(hp1).oper[1]^.ref^.index=NR_NO) and
  389. (((p.oper[2]^.typ=top_reg) and
  390. (not RegModifiedBetween(p.oper[2]^.reg, p, hp1))) or
  391. ((p.oper[2]^.typ=top_const) and
  392. ((abs(p.oper[2]^.val) < 256) or
  393. ((abs(p.oper[2]^.val) < 4096) and
  394. (taicpu(hp1).oppostfix in [PF_None,PF_B]))))) then
  395. begin
  396. taicpu(hp1).oper[1]^.ref^.addressmode:=AM_PREINDEXED;
  397. if p.oper[2]^.typ=top_reg then
  398. begin
  399. taicpu(hp1).oper[1]^.ref^.index:=p.oper[2]^.reg;
  400. if p.opcode=A_ADD then
  401. taicpu(hp1).oper[1]^.ref^.signindex:=1
  402. else
  403. taicpu(hp1).oper[1]^.ref^.signindex:=-1;
  404. end
  405. else
  406. begin
  407. if p.opcode=A_ADD then
  408. taicpu(hp1).oper[1]^.ref^.offset:=p.oper[2]^.val
  409. else
  410. taicpu(hp1).oper[1]^.ref^.offset:=-p.oper[2]^.val;
  411. end;
  412. result:=true;
  413. end
  414. else
  415. result:=false;
  416. end;
  417. {
  418. optimize
  419. ldr/str regX,[reg1]
  420. ...
  421. add/sub reg1,reg1,regY/const
  422. into
  423. ldr/str regX,[reg1], regY/const
  424. }
  425. function TCpuAsmOptimizer.LookForPostindexedPattern(p: taicpu) : boolean;
  426. var
  427. hp1 : tai;
  428. begin
  429. Result:=false;
  430. if (p.oper[1]^.typ = top_ref) and
  431. (p.oper[1]^.ref^.addressmode=AM_OFFSET) and
  432. (p.oper[1]^.ref^.index=NR_NO) and
  433. (p.oper[1]^.ref^.offset=0) and
  434. GetNextInstructionUsingReg(p, hp1, p.oper[1]^.ref^.base) and
  435. { we cannot check NR_DEFAULTFLAGS for modification yet so don't allow a condition }
  436. MatchInstruction(hp1, [A_ADD, A_SUB], [C_None], [PF_None]) and
  437. (taicpu(hp1).oper[0]^.reg=p.oper[1]^.ref^.base) and
  438. (taicpu(hp1).oper[1]^.reg=p.oper[1]^.ref^.base) and
  439. (
  440. (taicpu(hp1).oper[2]^.typ=top_reg) or
  441. { valid offset? }
  442. ((taicpu(hp1).oper[2]^.typ=top_const) and
  443. ((abs(taicpu(hp1).oper[2]^.val)<256) or
  444. ((abs(taicpu(hp1).oper[2]^.val)<4096) and (p.oppostfix in [PF_None,PF_B]))
  445. )
  446. )
  447. ) and
  448. { don't apply the optimization if the base register is loaded }
  449. (p.oper[0]^.reg<>p.oper[1]^.ref^.base) and
  450. not(RegModifiedBetween(taicpu(hp1).oper[0]^.reg,p,hp1)) and
  451. { don't apply the optimization if the (new) index register is loaded }
  452. (p.oper[0]^.reg<>taicpu(hp1).oper[2]^.reg) and
  453. not(RegModifiedBetween(taicpu(hp1).oper[2]^.reg,p,hp1)) and
  454. GenerateARMCode then
  455. begin
  456. DebugMsg('Peephole Optimization: Str/LdrAdd/Sub2Str/Ldr Postindex done', p);
  457. p.oper[1]^.ref^.addressmode:=AM_POSTINDEXED;
  458. if taicpu(hp1).oper[2]^.typ=top_const then
  459. begin
  460. if taicpu(hp1).opcode=A_ADD then
  461. p.oper[1]^.ref^.offset:=taicpu(hp1).oper[2]^.val
  462. else
  463. p.oper[1]^.ref^.offset:=-taicpu(hp1).oper[2]^.val;
  464. end
  465. else
  466. begin
  467. p.oper[1]^.ref^.index:=taicpu(hp1).oper[2]^.reg;
  468. if taicpu(hp1).opcode=A_ADD then
  469. p.oper[1]^.ref^.signindex:=1
  470. else
  471. p.oper[1]^.ref^.signindex:=-1;
  472. end;
  473. asml.Remove(hp1);
  474. hp1.Free;
  475. Result:=true;
  476. end;
  477. end;
  478. function TCpuAsmOptimizer.OptPass1ADDSUB(var p: tai): Boolean;
  479. var
  480. hp1,hp2: tai;
  481. begin
  482. Result := OptPass1DataCheckMov(p);
  483. {
  484. change
  485. add/sub reg2,reg1,const1
  486. str/ldr reg3,[reg2,const2]
  487. dealloc reg2
  488. to
  489. str/ldr reg3,[reg1,const2+/-const1]
  490. }
  491. if (not GenerateThumbCode) and
  492. (taicpu(p).ops>2) and
  493. (taicpu(p).oper[1]^.typ = top_reg) and
  494. (taicpu(p).oper[2]^.typ = top_const) then
  495. begin
  496. hp1:=p;
  497. while GetNextInstructionUsingReg(hp1, hp1, taicpu(p).oper[0]^.reg) and
  498. { we cannot check NR_DEFAULTFLAGS for modification yet so don't allow a condition }
  499. MatchInstruction(hp1, [A_LDR, A_STR], [C_None], []) and
  500. (taicpu(hp1).oper[1]^.typ = top_ref) and
  501. (taicpu(hp1).oper[1]^.ref^.base=taicpu(p).oper[0]^.reg) and
  502. { don't optimize if the register is stored/overwritten }
  503. (taicpu(hp1).oper[0]^.reg<>taicpu(p).oper[1]^.reg) and
  504. (taicpu(hp1).oper[1]^.ref^.index=NR_NO) and
  505. (taicpu(hp1).oper[1]^.ref^.addressmode=AM_OFFSET) and
  506. { new offset must be valid: either in the range of 8 or 12 bit, depend on the
  507. ldr postfix }
  508. (((taicpu(p).opcode=A_ADD) and
  509. isValidConstLoadStoreOffset(taicpu(hp1).oper[1]^.ref^.offset+taicpu(p).oper[2]^.val, taicpu(hp1).oppostfix)
  510. ) or
  511. ((taicpu(p).opcode=A_SUB) and
  512. isValidConstLoadStoreOffset(taicpu(hp1).oper[1]^.ref^.offset-taicpu(p).oper[2]^.val, taicpu(hp1).oppostfix)
  513. )
  514. ) do
  515. begin
  516. { neither reg1 nor reg2 might be changed inbetween }
  517. if RegModifiedBetween(taicpu(p).oper[0]^.reg,p,hp1) or
  518. RegModifiedBetween(taicpu(p).oper[1]^.reg,p,hp1) then
  519. break;
  520. { reg2 must be either overwritten by the ldr or it is deallocated afterwards }
  521. if ((taicpu(hp1).opcode=A_LDR) and (taicpu(p).oper[0]^.reg=taicpu(hp1).oper[0]^.reg)) or
  522. assigned(FindRegDeAlloc(taicpu(p).oper[0]^.reg,tai(hp1.Next))) then
  523. begin
  524. { remember last instruction }
  525. hp2:=hp1;
  526. DebugMsg('Peephole Optimization: Add/SubLdr2Ldr done', p);
  527. hp1:=p;
  528. { fix all ldr/str }
  529. while GetNextInstructionUsingReg(hp1, hp1, taicpu(p).oper[0]^.reg) do
  530. begin
  531. taicpu(hp1).oper[1]^.ref^.base:=taicpu(p).oper[1]^.reg;
  532. if taicpu(p).opcode=A_ADD then
  533. inc(taicpu(hp1).oper[1]^.ref^.offset,taicpu(p).oper[2]^.val)
  534. else
  535. dec(taicpu(hp1).oper[1]^.ref^.offset,taicpu(p).oper[2]^.val);
  536. if hp1=hp2 then
  537. break;
  538. end;
  539. RemoveCurrentP(p);
  540. result:=true;
  541. Exit;
  542. end;
  543. end;
  544. end;
  545. if (taicpu(p).condition = C_None) and
  546. (taicpu(p).oppostfix = PF_None) and
  547. LookForPreindexedPattern(taicpu(p)) then
  548. begin
  549. DebugMsg('Peephole Optimization: Add/Sub to Preindexed done', p);
  550. RemoveCurrentP(p);
  551. Result:=true;
  552. Exit;
  553. end;
  554. end;
  555. function TCpuAsmOptimizer.OptPass1MUL(var p: tai): Boolean;
  556. var
  557. hp1: tai;
  558. oldreg: tregister;
  559. begin
  560. Result := OptPass1DataCheckMov(p);
  561. {
  562. Turn
  563. mul reg0, z,w
  564. sub/add x, y, reg0
  565. dealloc reg0
  566. into
  567. mls/mla x,z,w,y
  568. }
  569. if (taicpu(p).condition = C_None) and
  570. (taicpu(p).oppostfix = PF_None) and
  571. (taicpu(p).ops=3) and
  572. (taicpu(p).oper[0]^.typ = top_reg) and
  573. (taicpu(p).oper[1]^.typ = top_reg) and
  574. (taicpu(p).oper[2]^.typ = top_reg) and
  575. GetNextInstructionUsingReg(p,hp1,taicpu(p).oper[0]^.reg) and
  576. MatchInstruction(hp1,[A_ADD,A_SUB],[C_None],[PF_None]) and
  577. (not RegModifiedBetween(taicpu(p).oper[1]^.reg, p, hp1)) and
  578. (not RegModifiedBetween(taicpu(p).oper[2]^.reg, p, hp1)) and
  579. (((taicpu(hp1).opcode=A_ADD) and (current_settings.cputype>=cpu_armv4)) or
  580. ((taicpu(hp1).opcode=A_SUB) and (current_settings.cputype in [cpu_armv6t2,cpu_armv7,cpu_armv7a,cpu_armv7r,cpu_armv7m,cpu_armv7em]))) and
  581. // CPUs before ARMv6 don't recommend having the same Rd and Rm for MLA.
  582. // TODO: A workaround would be to swap Rm and Rs
  583. (not ((taicpu(hp1).opcode=A_ADD) and (current_settings.cputype<=cpu_armv6) and MatchOperand(taicpu(hp1).oper[0]^, taicpu(p).oper[1]^))) and
  584. (((taicpu(hp1).ops=3) and
  585. (taicpu(hp1).oper[2]^.typ=top_reg) and
  586. ((MatchOperand(taicpu(hp1).oper[2]^, taicpu(p).oper[0]^.reg) and
  587. (not RegModifiedBetween(taicpu(hp1).oper[1]^.reg, p, hp1))) or
  588. ((MatchOperand(taicpu(hp1).oper[1]^, taicpu(p).oper[0]^.reg) and
  589. (taicpu(hp1).opcode=A_ADD) and
  590. (not RegModifiedBetween(taicpu(hp1).oper[2]^.reg, p, hp1)))))) or
  591. ((taicpu(hp1).ops=2) and
  592. (taicpu(hp1).oper[1]^.typ=top_reg) and
  593. MatchOperand(taicpu(hp1).oper[1]^, taicpu(p).oper[0]^.reg))) and
  594. (RegEndOfLife(taicpu(p).oper[0]^.reg,taicpu(hp1))) then
  595. begin
  596. if taicpu(hp1).opcode=A_ADD then
  597. begin
  598. taicpu(hp1).opcode:=A_MLA;
  599. if taicpu(hp1).ops=3 then
  600. begin
  601. if MatchOperand(taicpu(hp1).oper[1]^, taicpu(p).oper[0]^) then
  602. oldreg:=taicpu(hp1).oper[2]^.reg
  603. else
  604. oldreg:=taicpu(hp1).oper[1]^.reg;
  605. end
  606. else
  607. oldreg:=taicpu(hp1).oper[0]^.reg;
  608. taicpu(hp1).loadreg(1,taicpu(p).oper[1]^.reg);
  609. taicpu(hp1).loadreg(2,taicpu(p).oper[2]^.reg);
  610. taicpu(hp1).loadreg(3,oldreg);
  611. DebugMsg('Peephole Optimization: MulAdd2MLA done', p);
  612. end
  613. else
  614. begin
  615. taicpu(hp1).opcode:=A_MLS;
  616. taicpu(hp1).loadreg(3,taicpu(hp1).oper[1]^.reg);
  617. if taicpu(hp1).ops=2 then
  618. taicpu(hp1).loadreg(1,taicpu(hp1).oper[0]^.reg)
  619. else
  620. taicpu(hp1).loadreg(1,taicpu(p).oper[2]^.reg);
  621. taicpu(hp1).loadreg(2,taicpu(p).oper[1]^.reg);
  622. DebugMsg('Peephole Optimization: MulSub2MLS done', p);
  623. AllocRegBetween(taicpu(hp1).oper[1]^.reg,p,hp1,UsedRegs);
  624. AllocRegBetween(taicpu(hp1).oper[2]^.reg,p,hp1,UsedRegs);
  625. AllocRegBetween(taicpu(hp1).oper[3]^.reg,p,hp1,UsedRegs);
  626. end;
  627. taicpu(hp1).ops:=4;
  628. RemoveCurrentP(p);
  629. Result := True;
  630. Exit;
  631. end
  632. end;
  633. function TCpuAsmOptimizer.OptPass1And(var p: tai): Boolean;
  634. begin
  635. Result := OptPass1DataCheckMov(p);
  636. Result := inherited OptPass1And(p) or Result;
  637. end;
  638. function TCpuAsmOptimizer.OptPass1DataCheckMov(var p: tai): Boolean;
  639. var
  640. hp1: tai;
  641. begin
  642. {
  643. change
  644. op reg1, ...
  645. mov reg2, reg1
  646. to
  647. op reg2, ...
  648. }
  649. Result := (taicpu(p).ops >= 3) and
  650. GetNextInstructionUsingReg(p, hp1, taicpu(p).oper[0]^.reg) and
  651. RemoveSuperfluousMove(p, hp1, 'DataMov2Data');
  652. end;
  653. function TCpuAsmOptimizer.OptPass1CMP(var p: tai): Boolean;
  654. var
  655. hp1, hp2, hp_last: tai;
  656. MovRem1, MovRem2: Boolean;
  657. begin
  658. Result := False;
  659. { These optimizations can be applied only to the currently enabled operations because
  660. the other operations do not update all flags and FPC does not track flag usage }
  661. if (taicpu(p).condition = C_None) and
  662. (taicpu(p).oper[1]^.typ = top_const) and
  663. GetNextInstruction(p, hp1) then
  664. begin
  665. {
  666. change
  667. cmp reg,const1
  668. moveq reg,const1
  669. movne reg,const2
  670. to
  671. cmp reg,const1
  672. movne reg,const2
  673. }
  674. if MatchInstruction(hp1, A_MOV, [C_EQ, C_NE], [PF_NONE]) and
  675. (taicpu(hp1).oper[1]^.typ = top_const) and
  676. GetNextInstruction(hp1, hp2) and
  677. MatchInstruction(hp2, A_MOV, [C_EQ, C_NE], [PF_NONE]) and
  678. (taicpu(hp2).oper[1]^.typ = top_const) then
  679. begin
  680. MovRem1 := RemoveRedundantMove(p, hp1, asml);
  681. MovRem2 := RemoveRedundantMove(p, hp2, asml);
  682. Result:= MovRem1 or MovRem2;
  683. { Make sure that hp1 is still the next instruction after p }
  684. if MovRem1 then
  685. if MovRem2 then
  686. begin
  687. if not GetNextInstruction(p, hp1) then
  688. Exit;
  689. end
  690. else
  691. hp1 := hp2;
  692. end;
  693. {
  694. change
  695. <op> reg,x,y
  696. cmp reg,#0
  697. into
  698. <op>s reg,x,y
  699. }
  700. if (taicpu(p).oppostfix = PF_None) and
  701. (taicpu(p).oper[1]^.val = 0) and
  702. { be careful here, following instructions could use other flags
  703. however after a jump fpc never depends on the value of flags }
  704. { All above instructions set Z and N according to the following
  705. Z := result = 0;
  706. N := result[31];
  707. EQ = Z=1; NE = Z=0;
  708. MI = N=1; PL = N=0; }
  709. (MatchInstruction(hp1, A_B, [C_EQ,C_NE,C_MI,C_PL], []) or
  710. { mov is also possible, but only if there is no shifter operand, it could be an rxx,
  711. we are too lazy to check if it is rxx or something else }
  712. (MatchInstruction(hp1, A_MOV, [C_EQ,C_NE,C_MI,C_PL], []) and (taicpu(hp1).ops=2))) and
  713. GetLastInstruction(p, hp_last) and
  714. MatchInstruction(hp_last, [A_ADC,A_ADD,A_BIC,A_SUB,A_MUL,A_MVN,A_MOV,A_ORR,
  715. A_EOR,A_AND,A_RSB,A_RSC,A_SBC,A_MLA], [C_None], [PF_None]) and
  716. (
  717. { mlas is only allowed in arm mode }
  718. (taicpu(hp_last).opcode<>A_MLA) or
  719. (current_settings.instructionset<>is_thumb)
  720. ) and
  721. (taicpu(hp_last).oper[0]^.reg = taicpu(p).oper[0]^.reg) and
  722. assigned(FindRegDealloc(NR_DEFAULTFLAGS,tai(hp1.Next))) then
  723. begin
  724. DebugMsg('Peephole Optimization: OpCmp2OpS done', hp_last);
  725. taicpu(hp_last).oppostfix:=PF_S;
  726. { move flag allocation if possible }
  727. hp1:=FindRegAlloc(NR_DEFAULTFLAGS,tai(hp_last.Next));
  728. if assigned(hp1) then
  729. begin
  730. asml.Remove(hp1);
  731. asml.insertbefore(hp1, hp_last);
  732. end;
  733. RemoveCurrentP(p);
  734. Result:=true;
  735. end;
  736. end;
  737. end;
  738. function TCpuAsmOptimizer.OptPass1LDR(var p: tai): Boolean;
  739. var
  740. hp1: tai;
  741. begin
  742. Result := inherited OptPass1LDR(p);
  743. if Result then
  744. Exit;
  745. { change
  746. ldr reg1,ref
  747. ldr reg2,ref
  748. into ...
  749. }
  750. if (taicpu(p).oper[1]^.typ = top_ref) and
  751. (taicpu(p).oper[1]^.ref^.addressmode=AM_OFFSET) and
  752. GetNextInstruction(p,hp1) and
  753. { ldrd is not allowed here }
  754. MatchInstruction(hp1, A_LDR, [taicpu(p).condition, C_None], [taicpu(p).oppostfix,PF_None]-[PF_D]) then
  755. begin
  756. {
  757. ...
  758. ldr reg1,ref
  759. mov reg2,reg1
  760. }
  761. if (taicpu(p).oppostfix=taicpu(hp1).oppostfix) and
  762. RefsEqual(taicpu(p).oper[1]^.ref^,taicpu(hp1).oper[1]^.ref^) and
  763. (taicpu(p).oper[0]^.reg<>taicpu(hp1).oper[1]^.ref^.index) and
  764. (taicpu(p).oper[0]^.reg<>taicpu(hp1).oper[1]^.ref^.base) and
  765. (taicpu(hp1).oper[1]^.ref^.addressmode=AM_OFFSET) then
  766. begin
  767. if taicpu(hp1).oper[0]^.reg=taicpu(p).oper[0]^.reg then
  768. begin
  769. DebugMsg('Peephole Optimization: LdrLdr2Ldr done', hp1);
  770. asml.remove(hp1);
  771. hp1.free;
  772. end
  773. else
  774. begin
  775. DebugMsg('Peephole Optimization: LdrLdr2LdrMov done', hp1);
  776. taicpu(hp1).opcode:=A_MOV;
  777. taicpu(hp1).oppostfix:=PF_None;
  778. taicpu(hp1).loadreg(1,taicpu(p).oper[0]^.reg);
  779. end;
  780. result := true;
  781. end
  782. {
  783. ...
  784. ldrd reg1,reg1+1,ref
  785. }
  786. else if (GenerateARMCode or GenerateThumb2Code) and
  787. (CPUARM_HAS_EDSP in cpu_capabilities[current_settings.cputype]) and
  788. { ldrd does not allow any postfixes ... }
  789. (taicpu(p).oppostfix=PF_None) and
  790. not(odd(getsupreg(taicpu(p).oper[0]^.reg))) and
  791. (getsupreg(taicpu(p).oper[0]^.reg)+1=getsupreg(taicpu(hp1).oper[0]^.reg)) and
  792. { ldr ensures that either base or index contain no register, else ldr wouldn't
  793. use an offset either
  794. }
  795. (taicpu(p).oper[1]^.ref^.base=taicpu(hp1).oper[1]^.ref^.base) and
  796. (taicpu(p).oper[1]^.ref^.index=taicpu(hp1).oper[1]^.ref^.index) and
  797. (taicpu(p).oper[1]^.ref^.offset+4=taicpu(hp1).oper[1]^.ref^.offset) and
  798. (abs(taicpu(p).oper[1]^.ref^.offset)<256) and
  799. AlignedToQWord(taicpu(p).oper[1]^.ref^) then
  800. begin
  801. DebugMsg('Peephole Optimization: LdrLdr2Ldrd done', p);
  802. taicpu(p).loadref(2,taicpu(p).oper[1]^.ref^);
  803. taicpu(p).loadreg(1, taicpu(hp1).oper[0]^.reg);
  804. taicpu(p).ops:=3;
  805. taicpu(p).oppostfix:=PF_D;
  806. asml.remove(hp1);
  807. hp1.free;
  808. result:=true;
  809. end;
  810. end;
  811. {
  812. Change
  813. ldrb dst1, [REF]
  814. and dst2, dst1, #255
  815. into
  816. ldrb dst2, [ref]
  817. }
  818. if not(GenerateThumbCode) and
  819. (taicpu(p).oppostfix=PF_B) and
  820. GetNextInstructionUsingReg(p, hp1, taicpu(p).oper[0]^.reg) and
  821. MatchInstruction(hp1, A_AND, [taicpu(p).condition], [PF_NONE]) and
  822. (taicpu(hp1).oper[1]^.reg = taicpu(p).oper[0]^.reg) and
  823. (taicpu(hp1).oper[2]^.typ = top_const) and
  824. (taicpu(hp1).oper[2]^.val = $FF) and
  825. not(RegUsedBetween(taicpu(hp1).oper[0]^.reg, p, hp1)) and
  826. RegEndOfLife(taicpu(p).oper[0]^.reg, taicpu(hp1)) then
  827. begin
  828. DebugMsg('Peephole Optimization: LdrbAnd2Ldrb done', p);
  829. taicpu(p).oper[0]^.reg := taicpu(hp1).oper[0]^.reg;
  830. asml.remove(hp1);
  831. hp1.free;
  832. result:=true;
  833. end;
  834. Result:=LookForPostindexedPattern(taicpu(p)) or Result;
  835. { Remove superfluous mov after ldr
  836. changes
  837. ldr reg1, ref
  838. mov reg2, reg1
  839. to
  840. ldr reg2, ref
  841. conditions are:
  842. * no ldrd usage
  843. * reg1 must be released after mov
  844. * mov can not contain shifterops
  845. * ldr+mov have the same conditions
  846. * mov does not set flags
  847. }
  848. if (taicpu(p).oppostfix<>PF_D) and
  849. GetNextInstructionUsingReg(p, hp1, taicpu(p).oper[0]^.reg) and
  850. RemoveSuperfluousMove(p, hp1, 'LdrMov2Ldr') then
  851. Result:=true;
  852. end;
  853. function TCpuAsmOptimizer.OptPass1STM(var p: tai): Boolean;
  854. var
  855. hp1, hp2, hp3, hp4: tai;
  856. begin
  857. Result := False;
  858. {
  859. change
  860. stmfd r13!,[r14]
  861. sub r13,r13,#4
  862. bl abc
  863. add r13,r13,#4
  864. ldmfd r13!,[r15]
  865. into
  866. b abc
  867. }
  868. if not(ts_thumb_interworking in current_settings.targetswitches) and
  869. (taicpu(p).condition = C_None) and
  870. (taicpu(p).oppostfix = PF_FD) and
  871. (taicpu(p).oper[0]^.typ = top_ref) and
  872. (taicpu(p).oper[0]^.ref^.index=NR_STACK_POINTER_REG) and
  873. (taicpu(p).oper[0]^.ref^.base=NR_NO) and
  874. (taicpu(p).oper[0]^.ref^.offset=0) and
  875. (taicpu(p).oper[0]^.ref^.addressmode=AM_PREINDEXED) and
  876. (taicpu(p).oper[1]^.typ = top_regset) and
  877. (taicpu(p).oper[1]^.regset^ = [RS_R14]) and
  878. GetNextInstruction(p, hp1) and
  879. MatchInstruction(hp1, A_SUB, [C_None], [PF_NONE]) and
  880. (taicpu(hp1).oper[0]^.typ = top_reg) and
  881. (taicpu(hp1).oper[0]^.reg = NR_STACK_POINTER_REG) and
  882. MatchOperand(taicpu(hp1).oper[0]^,taicpu(hp1).oper[1]^) and
  883. (taicpu(hp1).oper[2]^.typ = top_const) and
  884. GetNextInstruction(hp1, hp2) and
  885. SkipEntryExitMarker(hp2, hp2) and
  886. MatchInstruction(hp2, [A_BL,A_BLX], [C_None], [PF_NONE]) and
  887. (taicpu(hp2).oper[0]^.typ = top_ref) and
  888. GetNextInstruction(hp2, hp3) and
  889. SkipEntryExitMarker(hp3, hp3) and
  890. MatchInstruction(hp3, A_ADD, [C_None], [PF_NONE]) and
  891. MatchOperand(taicpu(hp1).oper[0]^,taicpu(hp3).oper[0]^) and
  892. MatchOperand(taicpu(hp1).oper[0]^,taicpu(hp3).oper[1]^) and
  893. MatchOperand(taicpu(hp1).oper[2]^,taicpu(hp3).oper[2]^) and
  894. GetNextInstruction(hp3, hp4) and
  895. MatchInstruction(hp4, A_LDM, [C_None], [PF_FD]) and
  896. MatchOperand(taicpu(p).oper[0]^,taicpu(hp4).oper[0]^) and
  897. (taicpu(hp4).oper[1]^.typ = top_regset) and
  898. (taicpu(hp4).oper[1]^.regset^ = [RS_R15]) then
  899. begin
  900. asml.Remove(hp1);
  901. asml.Remove(hp3);
  902. asml.Remove(hp4);
  903. taicpu(hp2).opcode:=A_B;
  904. hp1.free;
  905. hp3.free;
  906. hp4.free;
  907. RemoveCurrentp(p, hp2);
  908. DebugMsg('Peephole Optimization: Bl2B done', p);
  909. Result := True;
  910. end;
  911. end;
  912. function TCpuAsmOptimizer.OptPass1STR(var p: tai): Boolean;
  913. var
  914. hp1: tai;
  915. begin
  916. Result := inherited OptPass1STR(p);
  917. if Result then
  918. Exit;
  919. { Common conditions }
  920. if (taicpu(p).oper[1]^.typ = top_ref) and
  921. (taicpu(p).oper[1]^.ref^.addressmode=AM_OFFSET) and
  922. (taicpu(p).oppostfix=PF_None) then
  923. begin
  924. { change
  925. str reg1,ref
  926. ldr reg2,ref
  927. into
  928. str reg1,ref
  929. mov reg2,reg1
  930. }
  931. if (taicpu(p).condition=C_None) and
  932. GetNextInstructionUsingRef(p,hp1,taicpu(p).oper[1]^.ref^) and
  933. MatchInstruction(hp1, A_LDR, [taicpu(p).condition], [PF_None]) and
  934. (taicpu(hp1).oper[1]^.typ=top_ref) and
  935. (taicpu(hp1).oper[1]^.ref^.addressmode=AM_OFFSET) and
  936. not(RegModifiedBetween(taicpu(p).oper[0]^.reg, p, hp1)) and
  937. ((taicpu(hp1).oper[1]^.ref^.index=NR_NO) or not (RegModifiedBetween(taicpu(hp1).oper[1]^.ref^.index, p, hp1))) and
  938. ((taicpu(hp1).oper[1]^.ref^.base=NR_NO) or not (RegModifiedBetween(taicpu(hp1).oper[1]^.ref^.base, p, hp1))) then
  939. begin
  940. if taicpu(hp1).oper[0]^.reg=taicpu(p).oper[0]^.reg then
  941. begin
  942. DebugMsg('Peephole Optimization: StrLdr2StrMov 1 done', hp1);
  943. asml.remove(hp1);
  944. hp1.free;
  945. end
  946. else
  947. begin
  948. taicpu(hp1).opcode:=A_MOV;
  949. taicpu(hp1).oppostfix:=PF_None;
  950. taicpu(hp1).loadreg(1,taicpu(p).oper[0]^.reg);
  951. DebugMsg('Peephole Optimization: StrLdr2StrMov 2 done', hp1);
  952. end;
  953. result := True;
  954. end
  955. { change
  956. str reg1,ref
  957. str reg2,ref
  958. into
  959. strd reg1,reg2,ref
  960. }
  961. else if (GenerateARMCode or GenerateThumb2Code) and
  962. (CPUARM_HAS_EDSP in cpu_capabilities[current_settings.cputype]) and
  963. not(odd(getsupreg(taicpu(p).oper[0]^.reg))) and
  964. (abs(taicpu(p).oper[1]^.ref^.offset)<256) and
  965. AlignedToQWord(taicpu(p).oper[1]^.ref^) and
  966. GetNextInstruction(p,hp1) and
  967. MatchInstruction(hp1, A_STR, [taicpu(p).condition, C_None], [PF_None]) and
  968. (getsupreg(taicpu(p).oper[0]^.reg)+1=getsupreg(taicpu(hp1).oper[0]^.reg)) and
  969. { str ensures that either base or index contain no register, else ldr wouldn't
  970. use an offset either
  971. }
  972. (taicpu(p).oper[1]^.ref^.base=taicpu(hp1).oper[1]^.ref^.base) and
  973. (taicpu(p).oper[1]^.ref^.index=taicpu(hp1).oper[1]^.ref^.index) and
  974. (taicpu(p).oper[1]^.ref^.offset+4=taicpu(hp1).oper[1]^.ref^.offset) then
  975. begin
  976. DebugMsg('Peephole Optimization: StrStr2Strd done', p);
  977. taicpu(p).oppostfix:=PF_D;
  978. taicpu(p).loadref(2,taicpu(p).oper[1]^.ref^);
  979. taicpu(p).loadreg(1, taicpu(hp1).oper[0]^.reg);
  980. taicpu(p).ops:=3;
  981. asml.remove(hp1);
  982. hp1.free;
  983. result:=true;
  984. end;
  985. end;
  986. Result:=LookForPostindexedPattern(taicpu(p)) or Result;
  987. end;
  988. function TCpuAsmOptimizer.OptPass1MOV(var p: tai): Boolean;
  989. var
  990. hp1, hpfar1, hp2: tai;
  991. i, i2: longint;
  992. tempop: tasmop;
  993. dealloc: tai_regalloc;
  994. begin
  995. Result := False;
  996. hp1 := nil;
  997. { fold
  998. mov reg1,reg0, shift imm1
  999. mov reg1,reg1, shift imm2
  1000. }
  1001. if (taicpu(p).ops=3) and
  1002. (taicpu(p).oper[2]^.typ = top_shifterop) and
  1003. (taicpu(p).oper[2]^.shifterop^.rs = NR_NO) and
  1004. getnextinstruction(p,hp1) and
  1005. MatchInstruction(hp1, A_MOV, [taicpu(p).condition], [PF_None]) and
  1006. (taicpu(hp1).ops=3) and
  1007. MatchOperand(taicpu(hp1).oper[0]^, taicpu(p).oper[0]^.reg) and
  1008. MatchOperand(taicpu(hp1).oper[1]^, taicpu(p).oper[0]^.reg) and
  1009. (taicpu(hp1).oper[2]^.typ = top_shifterop) and
  1010. (taicpu(hp1).oper[2]^.shifterop^.rs = NR_NO) then
  1011. begin
  1012. { fold
  1013. mov reg1,reg0, lsl 16
  1014. mov reg1,reg1, lsr 16
  1015. strh reg1, ...
  1016. dealloc reg1
  1017. to
  1018. strh reg1, ...
  1019. dealloc reg1
  1020. }
  1021. if (taicpu(p).oper[2]^.shifterop^.shiftmode=SM_LSL) and
  1022. (taicpu(p).oper[2]^.shifterop^.shiftimm=16) and
  1023. (taicpu(hp1).oper[2]^.shifterop^.shiftmode in [SM_LSR,SM_ASR]) and
  1024. (taicpu(hp1).oper[2]^.shifterop^.shiftimm=16) and
  1025. getnextinstruction(hp1,hp2) and
  1026. MatchInstruction(hp2, A_STR, [taicpu(p).condition], [PF_H]) and
  1027. MatchOperand(taicpu(hp2).oper[0]^, taicpu(p).oper[0]^.reg) then
  1028. begin
  1029. TransferUsedRegs(TmpUsedRegs);
  1030. UpdateUsedRegs(TmpUsedRegs, tai(p.next));
  1031. UpdateUsedRegs(TmpUsedRegs, tai(hp1.next));
  1032. if not(RegUsedAfterInstruction(taicpu(p).oper[0]^.reg,hp2,TmpUsedRegs)) then
  1033. begin
  1034. DebugMsg('Peephole Optimization: removed superfluous 16 Bit zero extension', hp1);
  1035. taicpu(hp2).loadreg(0,taicpu(p).oper[1]^.reg);
  1036. asml.remove(hp1);
  1037. hp1.free;
  1038. RemoveCurrentP(p, hp2);
  1039. Result:=true;
  1040. Exit;
  1041. end;
  1042. end
  1043. { fold
  1044. mov reg1,reg0, shift imm1
  1045. mov reg1,reg1, shift imm2
  1046. to
  1047. mov reg1,reg0, shift imm1+imm2
  1048. }
  1049. else if (taicpu(p).oper[2]^.shifterop^.shiftmode=taicpu(hp1).oper[2]^.shifterop^.shiftmode) or
  1050. { asr makes no use after a lsr, the asr can be foled into the lsr }
  1051. ((taicpu(p).oper[2]^.shifterop^.shiftmode=SM_LSR) and (taicpu(hp1).oper[2]^.shifterop^.shiftmode=SM_ASR) ) then
  1052. begin
  1053. inc(taicpu(p).oper[2]^.shifterop^.shiftimm,taicpu(hp1).oper[2]^.shifterop^.shiftimm);
  1054. { avoid overflows }
  1055. if taicpu(p).oper[2]^.shifterop^.shiftimm>31 then
  1056. case taicpu(p).oper[2]^.shifterop^.shiftmode of
  1057. SM_ROR:
  1058. taicpu(p).oper[2]^.shifterop^.shiftimm:=taicpu(p).oper[2]^.shifterop^.shiftimm and 31;
  1059. SM_ASR:
  1060. taicpu(p).oper[2]^.shifterop^.shiftimm:=31;
  1061. SM_LSR,
  1062. SM_LSL:
  1063. begin
  1064. hp2:=taicpu.op_reg_const(A_MOV,taicpu(p).oper[0]^.reg,0);
  1065. InsertLLItem(p.previous, p.next, hp2);
  1066. p.free;
  1067. p:=hp2;
  1068. end;
  1069. else
  1070. internalerror(2008072803);
  1071. end;
  1072. DebugMsg('Peephole Optimization: ShiftShift2Shift 1 done', p);
  1073. asml.remove(hp1);
  1074. hp1.free;
  1075. hp1 := nil;
  1076. result := true;
  1077. end
  1078. { fold
  1079. mov reg1,reg0, shift imm1
  1080. mov reg1,reg1, shift imm2
  1081. mov reg1,reg1, shift imm3 ...
  1082. mov reg2,reg1, shift imm3 ...
  1083. }
  1084. else if GetNextInstructionUsingReg(hp1,hp2, taicpu(hp1).oper[0]^.reg) and
  1085. MatchInstruction(hp2, A_MOV, [taicpu(p).condition], [PF_None]) and
  1086. (taicpu(hp2).ops=3) and
  1087. MatchOperand(taicpu(hp2).oper[1]^, taicpu(hp1).oper[0]^.reg) and
  1088. RegEndofLife(taicpu(p).oper[0]^.reg,taicpu(hp2)) and
  1089. (taicpu(hp2).oper[2]^.typ = top_shifterop) and
  1090. (taicpu(hp2).oper[2]^.shifterop^.rs = NR_NO) then
  1091. begin
  1092. { mov reg1,reg0, lsl imm1
  1093. mov reg1,reg1, lsr/asr imm2
  1094. mov reg2,reg1, lsl imm3 ...
  1095. to
  1096. mov reg1,reg0, lsl imm1
  1097. mov reg2,reg1, lsr/asr imm2-imm3
  1098. if
  1099. imm1>=imm2
  1100. }
  1101. if (taicpu(p).oper[2]^.shifterop^.shiftmode=SM_LSL) and (taicpu(hp2).oper[2]^.shifterop^.shiftmode=SM_LSL) and
  1102. (taicpu(hp1).oper[2]^.shifterop^.shiftmode in [SM_ASR,SM_LSR]) and
  1103. (taicpu(p).oper[2]^.shifterop^.shiftimm>=taicpu(hp1).oper[2]^.shifterop^.shiftimm) then
  1104. begin
  1105. if (taicpu(hp2).oper[2]^.shifterop^.shiftimm>=taicpu(hp1).oper[2]^.shifterop^.shiftimm) then
  1106. begin
  1107. if not(RegUsedBetween(taicpu(hp2).oper[0]^.reg,p,hp1)) and
  1108. not(RegUsedBetween(taicpu(hp2).oper[0]^.reg,hp1,hp2)) then
  1109. begin
  1110. DebugMsg('Peephole Optimization: ShiftShiftShift2ShiftShift 1a done', p);
  1111. inc(taicpu(p).oper[2]^.shifterop^.shiftimm,taicpu(hp2).oper[2]^.shifterop^.shiftimm-taicpu(hp1).oper[2]^.shifterop^.shiftimm);
  1112. taicpu(p).oper[0]^.reg:=taicpu(hp2).oper[0]^.reg;
  1113. asml.remove(hp1);
  1114. asml.remove(hp2);
  1115. hp1.free;
  1116. hp2.free;
  1117. if taicpu(p).oper[2]^.shifterop^.shiftimm>=32 then
  1118. begin
  1119. taicpu(p).freeop(1);
  1120. taicpu(p).freeop(2);
  1121. taicpu(p).loadconst(1,0);
  1122. end;
  1123. result := true;
  1124. Exit;
  1125. end;
  1126. end
  1127. else if not(RegUsedBetween(taicpu(hp2).oper[0]^.reg,hp1,hp2)) then
  1128. begin
  1129. DebugMsg('Peephole Optimization: ShiftShiftShift2ShiftShift 1b done', p);
  1130. dec(taicpu(hp1).oper[2]^.shifterop^.shiftimm,taicpu(hp2).oper[2]^.shifterop^.shiftimm);
  1131. taicpu(hp1).oper[0]^.reg:=taicpu(hp2).oper[0]^.reg;
  1132. asml.remove(hp2);
  1133. hp2.free;
  1134. result := true;
  1135. Exit;
  1136. end;
  1137. end
  1138. { mov reg1,reg0, lsr/asr imm1
  1139. mov reg1,reg1, lsl imm2
  1140. mov reg1,reg1, lsr/asr imm3 ...
  1141. if imm3>=imm1 and imm2>=imm1
  1142. to
  1143. mov reg1,reg0, lsl imm2-imm1
  1144. mov reg1,reg1, lsr/asr imm3 ...
  1145. }
  1146. else if (taicpu(p).oper[2]^.shifterop^.shiftmode in [SM_ASR,SM_LSR]) and (taicpu(hp2).oper[2]^.shifterop^.shiftmode in [SM_ASR,SM_LSR]) and
  1147. (taicpu(hp1).oper[2]^.shifterop^.shiftmode=SM_LSL) and
  1148. (taicpu(hp2).oper[2]^.shifterop^.shiftimm>=taicpu(p).oper[2]^.shifterop^.shiftimm) and
  1149. (taicpu(hp1).oper[2]^.shifterop^.shiftimm>=taicpu(p).oper[2]^.shifterop^.shiftimm) then
  1150. begin
  1151. dec(taicpu(hp1).oper[2]^.shifterop^.shiftimm,taicpu(p).oper[2]^.shifterop^.shiftimm);
  1152. taicpu(hp1).oper[1]^.reg:=taicpu(p).oper[1]^.reg;
  1153. DebugMsg('Peephole Optimization: ShiftShiftShift2ShiftShift 2 done', p);
  1154. if taicpu(hp1).oper[2]^.shifterop^.shiftimm=0 then
  1155. begin
  1156. taicpu(hp2).oper[1]^.reg:=taicpu(hp1).oper[1]^.reg;
  1157. asml.remove(hp1);
  1158. hp1.free;
  1159. end;
  1160. RemoveCurrentp(p);
  1161. result := true;
  1162. Exit;
  1163. end;
  1164. end;
  1165. end;
  1166. { All the optimisations from this point on require GetNextInstructionUsingReg
  1167. to return True }
  1168. while (
  1169. GetNextInstructionUsingReg(p, hpfar1, taicpu(p).oper[0]^.reg) and
  1170. (hpfar1.typ = ait_instruction)
  1171. ) do
  1172. begin
  1173. { Change the common
  1174. mov r0, r0, lsr #xxx
  1175. and r0, r0, #yyy/bic r0, r0, #xxx
  1176. and remove the superfluous and/bic if possible
  1177. This could be extended to handle more cases.
  1178. }
  1179. { Change
  1180. mov rx, ry, lsr/ror #xxx
  1181. uxtb/uxth rz,rx/and rz,rx,0xFF
  1182. dealloc rx
  1183. to
  1184. uxtb/uxth rz,ry,ror #xxx
  1185. }
  1186. if (GenerateThumb2Code) and
  1187. (taicpu(p).ops=3) and
  1188. (taicpu(p).oper[2]^.typ = top_shifterop) and
  1189. (taicpu(p).oper[2]^.shifterop^.rs = NR_NO) and
  1190. (taicpu(p).oper[2]^.shifterop^.shiftmode in [SM_LSR,SM_ROR]) and
  1191. RegEndOfLife(taicpu(p).oper[0]^.reg, taicpu(hpfar1)) then
  1192. begin
  1193. if MatchInstruction(hpfar1, A_UXTB, [C_None], [PF_None]) and
  1194. (taicpu(hpfar1).ops = 2) and
  1195. (taicpu(p).oper[2]^.shifterop^.shiftimm in [8,16,24]) and
  1196. MatchOperand(taicpu(hpfar1).oper[1]^, taicpu(p).oper[0]^.reg) then
  1197. begin
  1198. taicpu(hpfar1).oper[1]^.reg := taicpu(p).oper[1]^.reg;
  1199. taicpu(hpfar1).loadshifterop(2,taicpu(p).oper[2]^.shifterop^);
  1200. taicpu(hpfar1).oper[2]^.shifterop^.shiftmode:=SM_ROR;
  1201. taicpu(hpfar1).ops := 3;
  1202. if not Assigned(hp1) then
  1203. GetNextInstruction(p,hp1);
  1204. RemoveCurrentP(p, hp1);
  1205. result:=true;
  1206. exit;
  1207. end
  1208. else if MatchInstruction(hpfar1, A_UXTH, [C_None], [PF_None]) and
  1209. (taicpu(hpfar1).ops=2) and
  1210. (taicpu(p).oper[2]^.shifterop^.shiftimm in [16]) and
  1211. MatchOperand(taicpu(hpfar1).oper[1]^, taicpu(p).oper[0]^.reg) then
  1212. begin
  1213. taicpu(hpfar1).oper[1]^.reg := taicpu(p).oper[1]^.reg;
  1214. taicpu(hpfar1).loadshifterop(2,taicpu(p).oper[2]^.shifterop^);
  1215. taicpu(hpfar1).oper[2]^.shifterop^.shiftmode:=SM_ROR;
  1216. taicpu(hpfar1).ops := 3;
  1217. if not Assigned(hp1) then
  1218. GetNextInstruction(p,hp1);
  1219. RemoveCurrentP(p, hp1);
  1220. result:=true;
  1221. exit;
  1222. end
  1223. else if MatchInstruction(hpfar1, A_AND, [C_None], [PF_None]) and
  1224. (taicpu(hpfar1).ops = 3) and
  1225. (taicpu(hpfar1).oper[2]^.typ = top_const) and
  1226. (taicpu(hpfar1).oper[2]^.val = $FF) and
  1227. (taicpu(p).oper[2]^.shifterop^.shiftimm in [8,16,24]) and
  1228. MatchOperand(taicpu(hpfar1).oper[1]^, taicpu(p).oper[0]^.reg) then
  1229. begin
  1230. taicpu(hpfar1).ops := 3;
  1231. taicpu(hpfar1).opcode := A_UXTB;
  1232. taicpu(hpfar1).oper[1]^.reg := taicpu(p).oper[1]^.reg;
  1233. taicpu(hpfar1).loadshifterop(2,taicpu(p).oper[2]^.shifterop^);
  1234. taicpu(hpfar1).oper[2]^.shifterop^.shiftmode:=SM_ROR;
  1235. if not Assigned(hp1) then
  1236. GetNextInstruction(p,hp1);
  1237. RemoveCurrentP(p, hp1);
  1238. result:=true;
  1239. exit;
  1240. end;
  1241. end;
  1242. { 2-operald mov optimisations }
  1243. if (taicpu(p).ops = 2) then
  1244. begin
  1245. {
  1246. This removes the mul from
  1247. mov rX,0
  1248. ...
  1249. mul ...,rX,...
  1250. }
  1251. if (taicpu(p).oper[1]^.typ = top_const) then
  1252. begin
  1253. (* if false and
  1254. (taicpu(p).oper[1]^.val=0) and
  1255. MatchInstruction(hpfar1, [A_MUL,A_MLA], [taicpu(p).condition], [taicpu(p).oppostfix]) and
  1256. (((taicpu(hpfar1).oper[1]^.typ=top_reg) and MatchOperand(taicpu(p).oper[0]^, taicpu(hpfar1).oper[1]^)) or
  1257. ((taicpu(hpfar1).oper[2]^.typ=top_reg) and MatchOperand(taicpu(p).oper[0]^, taicpu(hpfar1).oper[2]^))) then
  1258. begin
  1259. TransferUsedRegs(TmpUsedRegs);
  1260. UpdateUsedRegs(TmpUsedRegs, tai(p.next));
  1261. UpdateUsedRegs(TmpUsedRegs, tai(hpfar1.next));
  1262. DebugMsg('Peephole Optimization: MovMUL/MLA2Mov0 done', p);
  1263. if taicpu(hpfar1).opcode=A_MUL then
  1264. taicpu(hpfar1).loadconst(1,0)
  1265. else
  1266. taicpu(hpfar1).loadreg(1,taicpu(hpfar1).oper[3]^.reg);
  1267. taicpu(hpfar1).ops:=2;
  1268. taicpu(hpfar1).opcode:=A_MOV;
  1269. if not(RegUsedAfterInstruction(taicpu(p).oper[0]^.reg,hpfar1,TmpUsedRegs)) then
  1270. RemoveCurrentP(p);
  1271. Result:=true;
  1272. exit;
  1273. end
  1274. else*) if (taicpu(p).oper[1]^.val=0) and
  1275. MatchInstruction(hpfar1, A_MLA, [taicpu(p).condition], [taicpu(p).oppostfix]) and
  1276. MatchOperand(taicpu(p).oper[0]^, taicpu(hpfar1).oper[3]^) then
  1277. begin
  1278. TransferUsedRegs(TmpUsedRegs);
  1279. UpdateUsedRegs(TmpUsedRegs, tai(p.next));
  1280. UpdateUsedRegs(TmpUsedRegs, tai(hpfar1.next));
  1281. DebugMsg('Peephole Optimization: MovMLA2MUL 1 done', p);
  1282. taicpu(hpfar1).ops:=3;
  1283. taicpu(hpfar1).opcode:=A_MUL;
  1284. if not(RegUsedAfterInstruction(taicpu(p).oper[0]^.reg,hpfar1,TmpUsedRegs)) then
  1285. begin
  1286. RemoveCurrentP(p);
  1287. Result:=true;
  1288. end;
  1289. exit;
  1290. end
  1291. {
  1292. This changes the very common
  1293. mov r0, #0
  1294. str r0, [...]
  1295. mov r0, #0
  1296. str r0, [...]
  1297. and removes all superfluous mov instructions
  1298. }
  1299. else if (taicpu(hpfar1).opcode=A_STR) then
  1300. begin
  1301. hp1 := hpfar1;
  1302. while MatchInstruction(hp1, A_STR, [taicpu(p).condition], []) and
  1303. MatchOperand(taicpu(p).oper[0]^, taicpu(hpfar1).oper[0]^) and
  1304. GetNextInstruction(hp1, hp2) and
  1305. MatchInstruction(hp2, A_MOV, [taicpu(p).condition], [PF_None]) and
  1306. (taicpu(hp2).ops = 2) and
  1307. MatchOperand(taicpu(hp2).oper[0]^, taicpu(p).oper[0]^) and
  1308. MatchOperand(taicpu(hp2).oper[1]^, taicpu(p).oper[1]^) do
  1309. begin
  1310. DebugMsg('Peephole Optimization: MovStrMov done', hp2);
  1311. GetNextInstruction(hp2,hp1);
  1312. asml.remove(hp2);
  1313. hp2.free;
  1314. result:=true;
  1315. if not assigned(hp1) then break;
  1316. end;
  1317. if Result then
  1318. Exit;
  1319. end;
  1320. end;
  1321. {
  1322. This removes the first mov from
  1323. mov rX,...
  1324. mov rX,...
  1325. }
  1326. if taicpu(hpfar1).opcode=A_MOV then
  1327. begin
  1328. hp1 := p;
  1329. while MatchInstruction(hpfar1, A_MOV, [taicpu(hp1).condition], [taicpu(hp1).oppostfix]) and
  1330. (taicpu(hpfar1).ops = 2) and
  1331. MatchOperand(taicpu(hp1).oper[0]^, taicpu(hpfar1).oper[0]^) and
  1332. { don't remove the first mov if the second is a mov rX,rX }
  1333. not(MatchOperand(taicpu(hpfar1).oper[0]^, taicpu(hpfar1).oper[1]^)) do
  1334. begin
  1335. { Defer removing the first p until after the while loop }
  1336. if p <> hp1 then
  1337. begin
  1338. DebugMsg('Peephole Optimization: MovMov done', hp1);
  1339. asml.remove(hp1);
  1340. hp1.free;
  1341. end;
  1342. hp1:=hpfar1;
  1343. GetNextInstruction(hpfar1,hpfar1);
  1344. result:=true;
  1345. if not assigned(hpfar1) then
  1346. Break;
  1347. end;
  1348. if Result then
  1349. begin
  1350. DebugMsg('Peephole Optimization: MovMov done', p);
  1351. RemoveCurrentp(p);
  1352. Exit;
  1353. end;
  1354. end;
  1355. if RedundantMovProcess(p,hpfar1) then
  1356. begin
  1357. Result:=true;
  1358. { p might not point at a mov anymore }
  1359. exit;
  1360. end;
  1361. { If hpfar1 is nil after the call to RedundantMovProcess, it is
  1362. because it would have become a dangling pointer, so reinitialise it. }
  1363. if not Assigned(hpfar1) then
  1364. Continue;
  1365. { Fold the very common sequence
  1366. mov regA, regB
  1367. ldr* regA, [regA]
  1368. to
  1369. ldr* regA, [regB]
  1370. CAUTION! If this one is successful p might not be a mov instruction anymore!
  1371. }
  1372. if
  1373. // Make sure that Thumb code doesn't propagate a high register into a reference
  1374. (
  1375. (
  1376. GenerateThumbCode and
  1377. (getsupreg(taicpu(p).oper[1]^.reg) < RS_R8)
  1378. ) or (not GenerateThumbCode)
  1379. ) and
  1380. (taicpu(p).oper[1]^.typ = top_reg) and
  1381. (taicpu(p).oppostfix = PF_NONE) and
  1382. MatchInstruction(hpfar1, [A_LDR, A_STR], [taicpu(p).condition], []) and
  1383. (taicpu(hpfar1).oper[1]^.typ = top_ref) and
  1384. { We can change the base register only when the instruction uses AM_OFFSET }
  1385. ((taicpu(hpfar1).oper[1]^.ref^.index = taicpu(p).oper[0]^.reg) or
  1386. ((taicpu(hpfar1).oper[1]^.ref^.addressmode = AM_OFFSET) and
  1387. (taicpu(hpfar1).oper[1]^.ref^.base = taicpu(p).oper[0]^.reg))
  1388. ) and
  1389. not(RegModifiedBetween(taicpu(p).oper[1]^.reg,p,hpfar1)) and
  1390. RegEndOfLife(taicpu(p).oper[0]^.reg, taicpu(hpfar1)) then
  1391. begin
  1392. DebugMsg('Peephole Optimization: MovLdr2Ldr done', hpfar1);
  1393. if (taicpu(hpfar1).oper[1]^.ref^.addressmode = AM_OFFSET) and
  1394. (taicpu(hpfar1).oper[1]^.ref^.base = taicpu(p).oper[0]^.reg) then
  1395. taicpu(hpfar1).oper[1]^.ref^.base := taicpu(p).oper[1]^.reg;
  1396. if taicpu(hpfar1).oper[1]^.ref^.index = taicpu(p).oper[0]^.reg then
  1397. taicpu(hpfar1).oper[1]^.ref^.index := taicpu(p).oper[1]^.reg;
  1398. dealloc:=FindRegDeAlloc(taicpu(p).oper[1]^.reg, tai(p.Next));
  1399. if Assigned(dealloc) then
  1400. begin
  1401. asml.remove(dealloc);
  1402. asml.InsertAfter(dealloc,hpfar1);
  1403. end;
  1404. if (not Assigned(hp1)) or (p=hp1) then
  1405. GetNextInstruction(p, hp1);
  1406. RemoveCurrentP(p, hp1);
  1407. result:=true;
  1408. Exit;
  1409. end
  1410. end
  1411. { 3-operald mov optimisations }
  1412. else if (taicpu(p).ops = 3) then
  1413. begin
  1414. if (taicpu(p).oper[2]^.typ = top_shifterop) and
  1415. (taicpu(p).oper[2]^.shifterop^.rs = NR_NO) and
  1416. (taicpu(p).oper[2]^.shifterop^.shiftmode = SM_LSR) and
  1417. (taicpu(hpfar1).ops>=1) and
  1418. (taicpu(hpfar1).oper[0]^.typ=top_reg) and
  1419. (not RegModifiedBetween(taicpu(hpfar1).oper[0]^.reg, p, hpfar1)) and
  1420. RegEndOfLife(taicpu(p).oper[0]^.reg, taicpu(hpfar1)) then
  1421. begin
  1422. if (taicpu(p).oper[2]^.shifterop^.shiftimm >= 24 ) and
  1423. MatchInstruction(hpfar1, A_AND, [taicpu(p).condition], [taicpu(p).oppostfix]) and
  1424. (taicpu(hpfar1).ops=3) and
  1425. MatchOperand(taicpu(p).oper[0]^, taicpu(hpfar1).oper[1]^) and
  1426. (taicpu(hpfar1).oper[2]^.typ = top_const) and
  1427. { Check if the AND actually would only mask out bits being already zero because of the shift
  1428. }
  1429. ((($ffffffff shr taicpu(p).oper[2]^.shifterop^.shiftimm) and taicpu(hpfar1).oper[2]^.val) =
  1430. ($ffffffff shr taicpu(p).oper[2]^.shifterop^.shiftimm)) then
  1431. begin
  1432. DebugMsg('Peephole Optimization: LsrAnd2Lsr done', hpfar1);
  1433. taicpu(p).oper[0]^.reg:=taicpu(hpfar1).oper[0]^.reg;
  1434. asml.remove(hpfar1);
  1435. hpfar1.free;
  1436. result:=true;
  1437. Exit;
  1438. end
  1439. else if MatchInstruction(hpfar1, A_BIC, [taicpu(p).condition], [taicpu(p).oppostfix]) and
  1440. (taicpu(hpfar1).ops=3) and
  1441. MatchOperand(taicpu(p).oper[0]^, taicpu(hpfar1).oper[1]^) and
  1442. (taicpu(hpfar1).oper[2]^.typ = top_const) and
  1443. { Check if the BIC actually would only mask out bits beeing already zero because of the shift }
  1444. (taicpu(hpfar1).oper[2]^.val<>0) and
  1445. (BsfDWord(taicpu(hpfar1).oper[2]^.val)>=32-taicpu(p).oper[2]^.shifterop^.shiftimm) then
  1446. begin
  1447. DebugMsg('Peephole Optimization: LsrBic2Lsr done', hpfar1);
  1448. taicpu(p).oper[0]^.reg:=taicpu(hpfar1).oper[0]^.reg;
  1449. asml.remove(hpfar1);
  1450. hpfar1.free;
  1451. result:=true;
  1452. Exit;
  1453. end;
  1454. end;
  1455. { This folds shifterops into following instructions
  1456. mov r0, r1, lsl #8
  1457. add r2, r3, r0
  1458. to
  1459. add r2, r3, r1, lsl #8
  1460. CAUTION! If this one is successful p might not be a mov instruction anymore!
  1461. }
  1462. if (taicpu(p).oper[1]^.typ = top_reg) and
  1463. (taicpu(p).oper[2]^.typ = top_shifterop) and
  1464. (taicpu(p).oppostfix = PF_NONE) and
  1465. MatchInstruction(hpfar1, [A_ADD, A_ADC, A_RSB, A_RSC, A_SUB, A_SBC,
  1466. A_AND, A_BIC, A_EOR, A_ORR, A_TEQ, A_TST,
  1467. A_CMP, A_CMN],
  1468. [taicpu(p).condition], [PF_None]) and
  1469. (not ((GenerateThumb2Code) and
  1470. (taicpu(hpfar1).opcode in [A_SBC]) and
  1471. (((taicpu(hpfar1).ops=3) and
  1472. MatchOperand(taicpu(p).oper[0]^, taicpu(hpfar1).oper[1]^.reg)) or
  1473. ((taicpu(hpfar1).ops=2) and
  1474. MatchOperand(taicpu(p).oper[0]^, taicpu(hpfar1).oper[0]^.reg))))) and
  1475. RegEndOfLife(taicpu(p).oper[0]^.reg, taicpu(hpfar1)) and
  1476. (taicpu(hpfar1).ops >= 2) and
  1477. {Currently we can't fold into another shifterop}
  1478. (taicpu(hpfar1).oper[taicpu(hpfar1).ops-1]^.typ = top_reg) and
  1479. {Folding rrx is problematic because of the C-Flag, as we currently can't check
  1480. NR_DEFAULTFLAGS for modification}
  1481. (
  1482. {Everything is fine if we don't use RRX}
  1483. (taicpu(p).oper[2]^.shifterop^.shiftmode <> SM_RRX) or
  1484. (
  1485. {If it is RRX, then check if we're just accessing the next instruction}
  1486. Assigned(hp1) and
  1487. (hpfar1 = hp1)
  1488. )
  1489. ) and
  1490. { reg1 might not be modified inbetween }
  1491. not(RegModifiedBetween(taicpu(p).oper[1]^.reg,p,hpfar1)) and
  1492. { The shifterop can contain a register, might not be modified}
  1493. (
  1494. (taicpu(p).oper[2]^.shifterop^.rs = NR_NO) or
  1495. not(RegModifiedBetween(taicpu(p).oper[2]^.shifterop^.rs, p, hpfar1))
  1496. ) and
  1497. (
  1498. {Only ONE of the two src operands is allowed to match}
  1499. MatchOperand(taicpu(p).oper[0]^, taicpu(hpfar1).oper[taicpu(hpfar1).ops-2]^) xor
  1500. MatchOperand(taicpu(p).oper[0]^, taicpu(hpfar1).oper[taicpu(hpfar1).ops-1]^)
  1501. ) then
  1502. begin
  1503. if taicpu(hpfar1).opcode in [A_TST, A_TEQ, A_CMN] then
  1504. I2:=0
  1505. else
  1506. I2:=1;
  1507. for I:=I2 to taicpu(hpfar1).ops-1 do
  1508. if MatchOperand(taicpu(p).oper[0]^, taicpu(hpfar1).oper[I]^.reg) then
  1509. begin
  1510. { If the parameter matched on the second op from the RIGHT
  1511. we have to switch the parameters, this will not happen for CMP
  1512. were we're only evaluating the most right parameter
  1513. }
  1514. if I <> taicpu(hpfar1).ops-1 then
  1515. begin
  1516. {The SUB operators need to be changed when we swap parameters}
  1517. case taicpu(hpfar1).opcode of
  1518. A_SUB: tempop:=A_RSB;
  1519. A_SBC: tempop:=A_RSC;
  1520. A_RSB: tempop:=A_SUB;
  1521. A_RSC: tempop:=A_SBC;
  1522. else tempop:=taicpu(hpfar1).opcode;
  1523. end;
  1524. if taicpu(hpfar1).ops = 3 then
  1525. hp2:=taicpu.op_reg_reg_reg_shifterop(tempop,
  1526. taicpu(hpfar1).oper[0]^.reg, taicpu(hpfar1).oper[2]^.reg,
  1527. taicpu(p).oper[1]^.reg, taicpu(p).oper[2]^.shifterop^)
  1528. else
  1529. hp2:=taicpu.op_reg_reg_shifterop(tempop,
  1530. taicpu(hpfar1).oper[0]^.reg, taicpu(p).oper[1]^.reg,
  1531. taicpu(p).oper[2]^.shifterop^);
  1532. end
  1533. else
  1534. if taicpu(hpfar1).ops = 3 then
  1535. hp2:=taicpu.op_reg_reg_reg_shifterop(taicpu(hpfar1).opcode,
  1536. taicpu(hpfar1).oper[0]^.reg, taicpu(hpfar1).oper[1]^.reg,
  1537. taicpu(p).oper[1]^.reg, taicpu(p).oper[2]^.shifterop^)
  1538. else
  1539. hp2:=taicpu.op_reg_reg_shifterop(taicpu(hpfar1).opcode,
  1540. taicpu(hpfar1).oper[0]^.reg, taicpu(p).oper[1]^.reg,
  1541. taicpu(p).oper[2]^.shifterop^);
  1542. if taicpu(p).oper[2]^.shifterop^.rs<>NR_NO then
  1543. AllocRegBetween(taicpu(p).oper[2]^.shifterop^.rs,p,hpfar1,UsedRegs);
  1544. AllocRegBetween(taicpu(p).oper[1]^.reg,p,hpfar1,UsedRegs);
  1545. asml.insertbefore(hp2, hpfar1);
  1546. asml.remove(hpfar1);
  1547. hpfar1.free;
  1548. DebugMsg('Peephole Optimization: FoldShiftProcess done', hp2);
  1549. if not Assigned(hp1) then
  1550. GetNextInstruction(p, hp1)
  1551. else if hp1 = hpfar1 then
  1552. { If hp1 = hpfar1, then it's a dangling pointer }
  1553. hp1 := hp2;
  1554. RemoveCurrentP(p, hp1);
  1555. Result:=true;
  1556. Exit;
  1557. end;
  1558. end;
  1559. {
  1560. Fold
  1561. mov r1, r1, lsl #2
  1562. ldr/ldrb r0, [r0, r1]
  1563. to
  1564. ldr/ldrb r0, [r0, r1, lsl #2]
  1565. XXX: This still needs some work, as we quite often encounter something like
  1566. mov r1, r2, lsl #2
  1567. add r2, r3, #imm
  1568. ldr r0, [r2, r1]
  1569. which can't be folded because r2 is overwritten between the shift and the ldr.
  1570. We could try to shuffle the registers around and fold it into.
  1571. add r1, r3, #imm
  1572. ldr r0, [r1, r2, lsl #2]
  1573. }
  1574. if (not(GenerateThumbCode)) and
  1575. { thumb2 allows only lsl #0..#3 }
  1576. (not(GenerateThumb2Code) or
  1577. ((taicpu(p).oper[2]^.shifterop^.shiftimm in [0..3]) and
  1578. (taicpu(p).oper[2]^.shifterop^.shiftmode=SM_LSL)
  1579. )
  1580. ) and
  1581. (taicpu(p).oper[1]^.typ = top_reg) and
  1582. (taicpu(p).oper[2]^.typ = top_shifterop) and
  1583. { RRX is tough to handle, because it requires tracking the C-Flag,
  1584. it is also extremly unlikely to be emitted this way}
  1585. (taicpu(p).oper[2]^.shifterop^.shiftmode <> SM_RRX) and
  1586. (taicpu(p).oper[2]^.shifterop^.shiftimm <> 0) and
  1587. (taicpu(p).oppostfix = PF_NONE) and
  1588. {Only LDR, LDRB, STR, STRB can handle scaled register indexing}
  1589. (MatchInstruction(hpfar1, [A_LDR, A_STR], [taicpu(p).condition], [PF_None, PF_B]) or
  1590. (GenerateThumb2Code and
  1591. MatchInstruction(hpfar1, [A_LDR, A_STR], [taicpu(p).condition], [PF_None, PF_B, PF_SB, PF_H, PF_SH]))
  1592. ) and
  1593. (
  1594. {If this is address by offset, one of the two registers can be used}
  1595. ((taicpu(hpfar1).oper[1]^.ref^.addressmode=AM_OFFSET) and
  1596. (
  1597. (taicpu(hpfar1).oper[1]^.ref^.index = taicpu(p).oper[0]^.reg) xor
  1598. (taicpu(hpfar1).oper[1]^.ref^.base = taicpu(p).oper[0]^.reg)
  1599. )
  1600. ) or
  1601. {For post and preindexed only the index register can be used}
  1602. ((taicpu(hpfar1).oper[1]^.ref^.addressmode in [AM_POSTINDEXED, AM_PREINDEXED]) and
  1603. (
  1604. (taicpu(hpfar1).oper[1]^.ref^.index = taicpu(p).oper[0]^.reg) and
  1605. (taicpu(hpfar1).oper[1]^.ref^.base <> taicpu(p).oper[0]^.reg)
  1606. ) and
  1607. (not GenerateThumb2Code)
  1608. )
  1609. ) and
  1610. { Only fold if both registers are used. Otherwise we are folding p with itself }
  1611. (taicpu(hpfar1).oper[1]^.ref^.index<>NR_NO) and
  1612. (taicpu(hpfar1).oper[1]^.ref^.base<>NR_NO) and
  1613. { Only fold if there isn't another shifterop already, and offset is zero. }
  1614. (taicpu(hpfar1).oper[1]^.ref^.offset = 0) and
  1615. (taicpu(hpfar1).oper[1]^.ref^.shiftmode = SM_None) and
  1616. not(RegModifiedBetween(taicpu(p).oper[1]^.reg,p,hpfar1)) and
  1617. RegEndOfLife(taicpu(p).oper[0]^.reg, taicpu(hpfar1)) then
  1618. begin
  1619. { If the register we want to do the shift for resides in base, we need to swap that}
  1620. if (taicpu(hpfar1).oper[1]^.ref^.base = taicpu(p).oper[0]^.reg) then
  1621. taicpu(hpfar1).oper[1]^.ref^.base := taicpu(hpfar1).oper[1]^.ref^.index;
  1622. taicpu(hpfar1).oper[1]^.ref^.index := taicpu(p).oper[1]^.reg;
  1623. taicpu(hpfar1).oper[1]^.ref^.shiftmode := taicpu(p).oper[2]^.shifterop^.shiftmode;
  1624. taicpu(hpfar1).oper[1]^.ref^.shiftimm := taicpu(p).oper[2]^.shifterop^.shiftimm;
  1625. DebugMsg('Peephole Optimization: FoldShiftLdrStr done', hpfar1);
  1626. RemoveCurrentP(p);
  1627. Result:=true;
  1628. Exit;
  1629. end;
  1630. end;
  1631. {
  1632. Often we see shifts and then a superfluous mov to another register
  1633. In the future this might be handled in RedundantMovProcess when it uses RegisterTracking
  1634. }
  1635. if RemoveSuperfluousMove(p, hpfar1, 'MovMov2Mov') then
  1636. Result:=true;
  1637. Exit;
  1638. end;
  1639. end;
  1640. function TCpuAsmOptimizer.OptPass1MVN(var p: tai): Boolean;
  1641. var
  1642. hp1: tai;
  1643. begin
  1644. {
  1645. change
  1646. mvn reg2,reg1
  1647. and reg3,reg4,reg2
  1648. dealloc reg2
  1649. to
  1650. bic reg3,reg4,reg1
  1651. }
  1652. Result := False;
  1653. if (taicpu(p).oper[1]^.typ = top_reg) and
  1654. GetNextInstructionUsingReg(p,hp1,taicpu(p).oper[0]^.reg) and
  1655. MatchInstruction(hp1,A_AND,[],[]) and
  1656. (((taicpu(hp1).ops=3) and
  1657. (taicpu(hp1).oper[2]^.typ=top_reg) and
  1658. (MatchOperand(taicpu(hp1).oper[2]^, taicpu(p).oper[0]^.reg) or
  1659. MatchOperand(taicpu(hp1).oper[1]^, taicpu(p).oper[0]^.reg))) or
  1660. ((taicpu(hp1).ops=2) and
  1661. (taicpu(hp1).oper[1]^.typ=top_reg) and
  1662. MatchOperand(taicpu(hp1).oper[1]^, taicpu(p).oper[0]^.reg))) and
  1663. assigned(FindRegDealloc(taicpu(p).oper[0]^.reg,tai(hp1.Next))) and
  1664. { reg1 might not be modified inbetween }
  1665. not(RegModifiedBetween(taicpu(p).oper[1]^.reg,p,hp1)) then
  1666. begin
  1667. DebugMsg('Peephole Optimization: MvnAnd2Bic done', p);
  1668. taicpu(hp1).opcode:=A_BIC;
  1669. if taicpu(hp1).ops=3 then
  1670. begin
  1671. if MatchOperand(taicpu(hp1).oper[1]^, taicpu(p).oper[0]^.reg) then
  1672. taicpu(hp1).loadReg(1,taicpu(hp1).oper[2]^.reg); // Swap operands
  1673. taicpu(hp1).loadReg(2,taicpu(p).oper[1]^.reg);
  1674. end
  1675. else
  1676. taicpu(hp1).loadReg(1,taicpu(p).oper[1]^.reg);
  1677. RemoveCurrentp(p);
  1678. Result := True;
  1679. Exit;
  1680. end;
  1681. end;
  1682. function TCpuAsmOptimizer.OptPass1VMov(var p: tai): Boolean;
  1683. var
  1684. hp1: tai;
  1685. begin
  1686. {
  1687. change
  1688. vmov reg0,reg1,reg2
  1689. vmov reg1,reg2,reg0
  1690. into
  1691. vmov reg0,reg1,reg2
  1692. can be applied regardless if reg0 or reg2 is the vfp register
  1693. }
  1694. Result := False;
  1695. if (taicpu(p).ops = 3) then
  1696. while GetNextInstruction(p, hp1) and
  1697. MatchInstruction(hp1, A_VMOV, [taicpu(p).condition], [taicpu(p).oppostfix]) and
  1698. (taicpu(hp1).ops = 3) and
  1699. MatchOperand(taicpu(p).oper[0]^, taicpu(hp1).oper[2]^) and
  1700. MatchOperand(taicpu(p).oper[1]^, taicpu(hp1).oper[0]^) and
  1701. MatchOperand(taicpu(p).oper[2]^, taicpu(hp1).oper[1]^) do
  1702. begin
  1703. asml.Remove(hp1);
  1704. hp1.free;
  1705. DebugMsg('Peephole Optimization: VMovVMov2VMov done', p);
  1706. { Can we do it again? }
  1707. end;
  1708. end;
  1709. function TCpuAsmOptimizer.OptPass1VOp(var p: tai): Boolean;
  1710. var
  1711. hp1: tai;
  1712. begin
  1713. Result := GetNextInstructionUsingReg(p, hp1, taicpu(p).oper[0]^.reg) and
  1714. RemoveSuperfluousVMov(p, hp1, 'VOpVMov2VOp');
  1715. end;
  1716. function TCpuAsmOptimizer.PeepHoleOptPass1Cpu(var p: tai): boolean;
  1717. begin
  1718. result := false;
  1719. if p.typ = ait_instruction then
  1720. begin
  1721. case taicpu(p).opcode of
  1722. A_CMP:
  1723. Result := OptPass1CMP(p);
  1724. A_STR:
  1725. Result := OptPass1STR(p);
  1726. A_LDR:
  1727. Result := OptPass1LDR(p);
  1728. A_MOV:
  1729. Result := OptPass1MOV(p);
  1730. A_AND:
  1731. Result := OptPass1And(p);
  1732. A_ADD,
  1733. A_SUB:
  1734. Result := OptPass1ADDSUB(p);
  1735. A_MUL:
  1736. REsult := OptPass1MUL(p);
  1737. A_ADC,
  1738. A_RSB,
  1739. A_RSC,
  1740. A_SBC,
  1741. A_BIC,
  1742. A_EOR,
  1743. A_ORR,
  1744. A_MLA,
  1745. A_MLS,
  1746. A_QADD,A_QADD16,A_QADD8,
  1747. A_QSUB,A_QSUB16,A_QSUB8,
  1748. A_QDADD,A_QDSUB,A_QASX,A_QSAX,
  1749. A_SHADD16,A_SHADD8,A_UHADD16,A_UHADD8,
  1750. A_SHSUB16,A_SHSUB8,A_UHSUB16,A_UHSUB8,
  1751. A_PKHTB,A_PKHBT,
  1752. A_SMUAD,A_SMUSD:
  1753. Result := OptPass1DataCheckMov(p);
  1754. {$ifdef dummy}
  1755. A_MVN:
  1756. Result := OPtPass1MVN(p);
  1757. {$endif dummy}
  1758. A_UXTB:
  1759. Result := OptPass1UXTB(p);
  1760. A_UXTH:
  1761. Result := OptPass1UXTH(p);
  1762. A_SXTB:
  1763. Result := OptPass1SXTB(p);
  1764. A_SXTH:
  1765. Result := OptPass1SXTH(p);
  1766. A_STM:
  1767. Result := OptPass1STM(p);
  1768. A_VMOV:
  1769. Result := OptPass1VMov(p);
  1770. A_VLDR,
  1771. A_VADD,
  1772. A_VMUL,
  1773. A_VDIV,
  1774. A_VSUB,
  1775. A_VSQRT,
  1776. A_VNEG,
  1777. A_VCVT,
  1778. A_VABS:
  1779. Result := OptPass1VOp(p);
  1780. else
  1781. ;
  1782. end;
  1783. end;
  1784. end;
  1785. { instructions modifying the CPSR can be only the last instruction }
  1786. function MustBeLast(p : tai) : boolean;
  1787. begin
  1788. Result:=(p.typ=ait_instruction) and
  1789. ((taicpu(p).opcode in [A_BL,A_BLX,A_CMP,A_CMN,A_SWI,A_TEQ,A_TST,A_CMF,A_CMFE {,A_MSR}]) or
  1790. ((taicpu(p).ops>=1) and (taicpu(p).oper[0]^.typ=top_reg) and (taicpu(p).oper[0]^.reg=NR_PC)) or
  1791. (taicpu(p).oppostfix=PF_S));
  1792. end;
  1793. procedure TCpuAsmOptimizer.PeepHoleOptPass2;
  1794. var
  1795. p,hp1,hp2: tai;
  1796. l : longint;
  1797. condition : tasmcond;
  1798. hp3: tai;
  1799. WasLast: boolean;
  1800. { UsedRegs, TmpUsedRegs: TRegSet; }
  1801. begin
  1802. p := BlockStart;
  1803. { UsedRegs := []; }
  1804. while (p <> BlockEnd) Do
  1805. begin
  1806. { UpdateUsedRegs(UsedRegs, tai(p.next)); }
  1807. case p.Typ Of
  1808. Ait_Instruction:
  1809. begin
  1810. case taicpu(p).opcode Of
  1811. A_B:
  1812. if (taicpu(p).condition<>C_None) and
  1813. not(GenerateThumbCode) then
  1814. begin
  1815. { check for
  1816. Bxx xxx
  1817. <several instructions>
  1818. xxx:
  1819. }
  1820. l:=0;
  1821. WasLast:=False;
  1822. GetNextInstruction(p, hp1);
  1823. while assigned(hp1) and
  1824. (l<=4) and
  1825. CanBeCond(hp1) and
  1826. { stop on labels }
  1827. not(hp1.typ=ait_label) and
  1828. { avoid that we cannot recognize the case BccB2Cond }
  1829. not((hp1.typ=ait_instruction) and (taicpu(hp1).opcode=A_B)) do
  1830. begin
  1831. inc(l);
  1832. if MustBeLast(hp1) then
  1833. begin
  1834. WasLast:=True;
  1835. GetNextInstruction(hp1,hp1);
  1836. break;
  1837. end
  1838. else
  1839. GetNextInstruction(hp1,hp1);
  1840. end;
  1841. if assigned(hp1) then
  1842. begin
  1843. if FindLabel(tasmlabel(taicpu(p).oper[0]^.ref^.symbol),hp1) then
  1844. begin
  1845. if (l<=4) and (l>0) then
  1846. begin
  1847. condition:=inverse_cond(taicpu(p).condition);
  1848. hp2:=p;
  1849. GetNextInstruction(p,hp1);
  1850. p:=hp1;
  1851. repeat
  1852. if hp1.typ=ait_instruction then
  1853. taicpu(hp1).condition:=condition;
  1854. if MustBeLast(hp1) then
  1855. begin
  1856. GetNextInstruction(hp1,hp1);
  1857. break;
  1858. end
  1859. else
  1860. GetNextInstruction(hp1,hp1);
  1861. until not(assigned(hp1)) or
  1862. not(CanBeCond(hp1)) or
  1863. (hp1.typ=ait_label);
  1864. DebugMsg('Peephole Bcc2Cond done',hp2);
  1865. { wait with removing else GetNextInstruction could
  1866. ignore the label if it was the only usage in the
  1867. jump moved away }
  1868. tasmlabel(taicpu(hp2).oper[0]^.ref^.symbol).decrefs;
  1869. asml.remove(hp2);
  1870. hp2.free;
  1871. continue;
  1872. end;
  1873. end
  1874. else
  1875. { do not perform further optimizations if there is inctructon
  1876. in block #1 which can not be optimized.
  1877. }
  1878. if not WasLast then
  1879. begin
  1880. { check further for
  1881. Bcc xxx
  1882. <several instructions 1>
  1883. B yyy
  1884. xxx:
  1885. <several instructions 2>
  1886. yyy:
  1887. }
  1888. { hp2 points to jmp yyy }
  1889. hp2:=hp1;
  1890. { skip hp1 to xxx }
  1891. GetNextInstruction(hp1, hp1);
  1892. if assigned(hp2) and
  1893. assigned(hp1) and
  1894. (l<=3) and
  1895. (hp2.typ=ait_instruction) and
  1896. (taicpu(hp2).is_jmp) and
  1897. (taicpu(hp2).condition=C_None) and
  1898. { real label and jump, no further references to the
  1899. label are allowed }
  1900. (tasmlabel(taicpu(p).oper[0]^.ref^.symbol).getrefs=1) and
  1901. FindLabel(tasmlabel(taicpu(p).oper[0]^.ref^.symbol),hp1) then
  1902. begin
  1903. l:=0;
  1904. { skip hp1 to <several moves 2> }
  1905. GetNextInstruction(hp1, hp1);
  1906. while assigned(hp1) and
  1907. CanBeCond(hp1) and
  1908. (l<=3) do
  1909. begin
  1910. inc(l);
  1911. if MustBeLast(hp1) then
  1912. begin
  1913. GetNextInstruction(hp1, hp1);
  1914. break;
  1915. end
  1916. else
  1917. GetNextInstruction(hp1, hp1);
  1918. end;
  1919. { hp1 points to yyy: }
  1920. if assigned(hp1) and
  1921. FindLabel(tasmlabel(taicpu(hp2).oper[0]^.ref^.symbol),hp1) then
  1922. begin
  1923. condition:=inverse_cond(taicpu(p).condition);
  1924. GetNextInstruction(p,hp1);
  1925. hp3:=p;
  1926. p:=hp1;
  1927. repeat
  1928. if hp1.typ=ait_instruction then
  1929. taicpu(hp1).condition:=condition;
  1930. if MustBeLast(hp1) then
  1931. begin
  1932. GetNextInstruction(hp1, hp1);
  1933. break;
  1934. end
  1935. else
  1936. GetNextInstruction(hp1, hp1);
  1937. until not(assigned(hp1)) or
  1938. not(CanBeCond(hp1)) or
  1939. ((hp1.typ=ait_instruction) and (taicpu(hp1).opcode=A_B));
  1940. { hp2 is still at jmp yyy }
  1941. GetNextInstruction(hp2,hp1);
  1942. { hp1 is now at xxx: }
  1943. condition:=inverse_cond(condition);
  1944. GetNextInstruction(hp1,hp1);
  1945. { hp1 is now at <several movs 2> }
  1946. repeat
  1947. if hp1.typ=ait_instruction then
  1948. taicpu(hp1).condition:=condition;
  1949. GetNextInstruction(hp1,hp1);
  1950. until not(assigned(hp1)) or
  1951. not(CanBeCond(hp1)) or
  1952. (hp1.typ=ait_label);
  1953. DebugMsg('Peephole BccB2Cond done',hp3);
  1954. { remove Bcc }
  1955. tasmlabel(taicpu(hp3).oper[0]^.ref^.symbol).decrefs;
  1956. asml.remove(hp3);
  1957. hp3.free;
  1958. { remove B }
  1959. tasmlabel(taicpu(hp2).oper[0]^.ref^.symbol).decrefs;
  1960. asml.remove(hp2);
  1961. hp2.free;
  1962. continue;
  1963. end;
  1964. end;
  1965. end;
  1966. end;
  1967. end;
  1968. else
  1969. ;
  1970. end;
  1971. end;
  1972. else
  1973. ;
  1974. end;
  1975. p := tai(p.next)
  1976. end;
  1977. end;
  1978. function TCpuAsmOptimizer.RegInInstruction(Reg: TRegister; p1: tai): Boolean;
  1979. begin
  1980. If (p1.typ = ait_instruction) and (taicpu(p1).opcode=A_BL) then
  1981. Result:=true
  1982. else If MatchInstruction(p1, [A_LDR, A_STR], [], [PF_D]) and
  1983. (getsupreg(taicpu(p1).oper[0]^.reg)+1=getsupreg(reg)) then
  1984. Result:=true
  1985. else
  1986. Result:=inherited RegInInstruction(Reg, p1);
  1987. end;
  1988. const
  1989. { set of opcode which might or do write to memory }
  1990. { TODO : extend armins.dat to contain r/w info }
  1991. opcode_could_mem_write = [A_B,A_BL,A_BLX,A_BKPT,A_BX,A_STR,A_STRB,A_STRBT,
  1992. A_STRH,A_STRT,A_STF,A_SFM,A_STM,A_FSTS,A_FSTD,A_VSTR,A_VSTM];
  1993. { adjust the register live information when swapping the two instructions p and hp1,
  1994. they must follow one after the other }
  1995. procedure TCpuPreRegallocScheduler.SwapRegLive(p,hp1 : taicpu);
  1996. procedure CheckLiveEnd(reg : tregister);
  1997. var
  1998. supreg : TSuperRegister;
  1999. regtype : TRegisterType;
  2000. begin
  2001. if reg=NR_NO then
  2002. exit;
  2003. regtype:=getregtype(reg);
  2004. supreg:=getsupreg(reg);
  2005. if assigned(cg.rg[regtype]) and (cg.rg[regtype].live_end[supreg]=hp1) and
  2006. RegInInstruction(reg,p) then
  2007. cg.rg[regtype].live_end[supreg]:=p;
  2008. end;
  2009. procedure CheckLiveStart(reg : TRegister);
  2010. var
  2011. supreg : TSuperRegister;
  2012. regtype : TRegisterType;
  2013. begin
  2014. if reg=NR_NO then
  2015. exit;
  2016. regtype:=getregtype(reg);
  2017. supreg:=getsupreg(reg);
  2018. if assigned(cg.rg[regtype]) and (cg.rg[regtype].live_start[supreg]=p) and
  2019. RegInInstruction(reg,hp1) then
  2020. cg.rg[regtype].live_start[supreg]:=hp1;
  2021. end;
  2022. var
  2023. i : longint;
  2024. r : TSuperRegister;
  2025. begin
  2026. { assumption: p is directly followed by hp1 }
  2027. { if live of any reg used by p starts at p and hp1 uses this register then
  2028. set live start to hp1 }
  2029. for i:=0 to p.ops-1 do
  2030. case p.oper[i]^.typ of
  2031. Top_Reg:
  2032. CheckLiveStart(p.oper[i]^.reg);
  2033. Top_Ref:
  2034. begin
  2035. CheckLiveStart(p.oper[i]^.ref^.base);
  2036. CheckLiveStart(p.oper[i]^.ref^.index);
  2037. end;
  2038. Top_Shifterop:
  2039. CheckLiveStart(p.oper[i]^.shifterop^.rs);
  2040. Top_RegSet:
  2041. for r:=RS_R0 to RS_R15 do
  2042. if r in p.oper[i]^.regset^ then
  2043. CheckLiveStart(newreg(R_INTREGISTER,r,R_SUBWHOLE));
  2044. else
  2045. ;
  2046. end;
  2047. { if live of any reg used by hp1 ends at hp1 and p uses this register then
  2048. set live end to p }
  2049. for i:=0 to hp1.ops-1 do
  2050. case hp1.oper[i]^.typ of
  2051. Top_Reg:
  2052. CheckLiveEnd(hp1.oper[i]^.reg);
  2053. Top_Ref:
  2054. begin
  2055. CheckLiveEnd(hp1.oper[i]^.ref^.base);
  2056. CheckLiveEnd(hp1.oper[i]^.ref^.index);
  2057. end;
  2058. Top_Shifterop:
  2059. CheckLiveStart(hp1.oper[i]^.shifterop^.rs);
  2060. Top_RegSet:
  2061. for r:=RS_R0 to RS_R15 do
  2062. if r in hp1.oper[i]^.regset^ then
  2063. CheckLiveEnd(newreg(R_INTREGISTER,r,R_SUBWHOLE));
  2064. else
  2065. ;
  2066. end;
  2067. end;
  2068. function TCpuPreRegallocScheduler.SchedulerPass1Cpu(var p: tai): boolean;
  2069. { TODO : schedule also forward }
  2070. { TODO : schedule distance > 1 }
  2071. { returns true if p might be a load of a pc relative tls offset }
  2072. function PossibleTLSLoad(const p: tai) : boolean;
  2073. begin
  2074. Result:=(p.typ=ait_instruction) and (taicpu(p).opcode=A_LDR) and (taicpu(p).oper[1]^.typ=top_ref) and (((taicpu(p).oper[1]^.ref^.base=NR_PC) and
  2075. (taicpu(p).oper[1]^.ref^.index<>NR_NO)) or ((taicpu(p).oper[1]^.ref^.base<>NR_NO) and
  2076. (taicpu(p).oper[1]^.ref^.index=NR_PC)));
  2077. end;
  2078. var
  2079. hp1,hp2,hp3,hp4,hp5,insertpos : tai;
  2080. list : TAsmList;
  2081. begin
  2082. result:=true;
  2083. list:=TAsmList.create;
  2084. p:=BlockStart;
  2085. while p<>BlockEnd Do
  2086. begin
  2087. if (p.typ=ait_instruction) and
  2088. GetNextInstruction(p,hp1) and
  2089. (hp1.typ=ait_instruction) and
  2090. (taicpu(hp1).opcode in [A_LDR,A_LDRB,A_LDRH,A_LDRSB,A_LDRSH]) and
  2091. (taicpu(hp1).oppostfix in [PF_NONE, PF_B, PF_H, PF_SB, PF_SH]) and
  2092. { for now we don't reschedule if the previous instruction changes potentially a memory location }
  2093. ( (not(taicpu(p).opcode in opcode_could_mem_write) and
  2094. not(RegModifiedByInstruction(NR_PC,p))
  2095. ) or
  2096. ((taicpu(p).opcode in [A_STM,A_STRB,A_STRH,A_STR]) and
  2097. ((taicpu(hp1).oper[1]^.ref^.base=NR_PC) or
  2098. (assigned(taicpu(hp1).oper[1]^.ref^.symboldata) and
  2099. (taicpu(hp1).oper[1]^.ref^.offset=0)
  2100. )
  2101. ) or
  2102. { try to prove that the memory accesses don't overlapp }
  2103. ((taicpu(p).opcode in [A_STRB,A_STRH,A_STR]) and
  2104. (taicpu(p).oper[1]^.typ = top_ref) and
  2105. (taicpu(p).oper[1]^.ref^.base=taicpu(hp1).oper[1]^.ref^.base) and
  2106. (taicpu(p).oppostfix=PF_None) and
  2107. (taicpu(hp1).oppostfix=PF_None) and
  2108. (taicpu(p).oper[1]^.ref^.index=NR_NO) and
  2109. (taicpu(hp1).oper[1]^.ref^.index=NR_NO) and
  2110. { get operand sizes and check if the offset distance is large enough to ensure no overlapp }
  2111. (abs(taicpu(p).oper[1]^.ref^.offset-taicpu(hp1).oper[1]^.ref^.offset)>=max(tcgsize2size[reg_cgsize(taicpu(p).oper[0]^.reg)],tcgsize2size[reg_cgsize(taicpu(hp1).oper[0]^.reg)]))
  2112. )
  2113. )
  2114. ) and
  2115. GetNextInstruction(hp1,hp2) and
  2116. (hp2.typ=ait_instruction) and
  2117. { loaded register used by next instruction?
  2118. if we ever support labels (they could be skipped in theory) here, the gnu2 tls general-dynamic code could get broken (the ldr before
  2119. the bl may not be scheduled away from the bl) and it needs to be taken care of this case
  2120. }
  2121. (RegInInstruction(taicpu(hp1).oper[0]^.reg,hp2)) and
  2122. { loaded register not used by previous instruction? }
  2123. not(RegInInstruction(taicpu(hp1).oper[0]^.reg,p)) and
  2124. { same condition? }
  2125. (taicpu(p).condition=taicpu(hp1).condition) and
  2126. { first instruction might not change the register used as base }
  2127. ((taicpu(hp1).oper[1]^.ref^.base=NR_NO) or
  2128. not(RegModifiedByInstruction(taicpu(hp1).oper[1]^.ref^.base,p))
  2129. ) and
  2130. { first instruction might not change the register used as index }
  2131. ((taicpu(hp1).oper[1]^.ref^.index=NR_NO) or
  2132. not(RegModifiedByInstruction(taicpu(hp1).oper[1]^.ref^.index,p))
  2133. ) and
  2134. { if we modify the basereg AND the first instruction used that reg, we can not schedule }
  2135. ((taicpu(hp1).oper[1]^.ref^.addressmode = AM_OFFSET) or
  2136. not(instructionLoadsFromReg(taicpu(hp1).oper[1]^.ref^.base,p))) and
  2137. not(PossibleTLSLoad(p)) and
  2138. not(PossibleTLSLoad(hp1)) then
  2139. begin
  2140. hp3:=tai(p.Previous);
  2141. hp5:=tai(p.next);
  2142. asml.Remove(p);
  2143. { if there is a reg. alloc/dealloc/sync instructions or address labels (e.g. for GOT-less PIC)
  2144. associated with p, move it together with p }
  2145. { before the instruction? }
  2146. { find reg allocs,deallocs and PIC labels }
  2147. while assigned(hp3) and (hp3.typ<>ait_instruction) do
  2148. begin
  2149. if ( (hp3.typ=ait_regalloc) and (tai_regalloc(hp3).ratype in [ra_alloc, ra_dealloc]) and
  2150. RegInInstruction(tai_regalloc(hp3).reg,p) )
  2151. or ( (hp3.typ=ait_label) and (tai_label(hp3).labsym.typ=AT_ADDR) )
  2152. then
  2153. begin
  2154. hp4:=hp3;
  2155. hp3:=tai(hp3.Previous);
  2156. asml.Remove(hp4);
  2157. list.Insert(hp4);
  2158. end
  2159. else
  2160. hp3:=tai(hp3.Previous);
  2161. end;
  2162. list.Concat(p);
  2163. SwapRegLive(taicpu(p),taicpu(hp1));
  2164. { after the instruction? }
  2165. { find reg deallocs and reg syncs }
  2166. while assigned(hp5) and (hp5.typ<>ait_instruction) do
  2167. begin
  2168. if (hp5.typ=ait_regalloc) and (tai_regalloc(hp5).ratype in [ra_dealloc, ra_sync]) and
  2169. RegInInstruction(tai_regalloc(hp5).reg,p) then
  2170. begin
  2171. hp4:=hp5;
  2172. hp5:=tai(hp5.next);
  2173. asml.Remove(hp4);
  2174. list.Concat(hp4);
  2175. end
  2176. else
  2177. hp5:=tai(hp5.Next);
  2178. end;
  2179. asml.Remove(hp1);
  2180. { if there are address labels associated with hp2, those must
  2181. stay with hp2 (e.g. for GOT-less PIC) }
  2182. insertpos:=hp2;
  2183. while assigned(hp2.previous) and
  2184. (tai(hp2.previous).typ<>ait_instruction) do
  2185. begin
  2186. hp2:=tai(hp2.previous);
  2187. if (hp2.typ=ait_label) and
  2188. (tai_label(hp2).labsym.typ=AT_ADDR) then
  2189. insertpos:=hp2;
  2190. end;
  2191. {$ifdef DEBUG_PREREGSCHEDULER}
  2192. asml.insertbefore(tai_comment.Create(strpnew('Rescheduled')),insertpos);
  2193. {$endif DEBUG_PREREGSCHEDULER}
  2194. asml.InsertBefore(hp1,insertpos);
  2195. asml.InsertListBefore(insertpos,list);
  2196. p:=tai(p.next);
  2197. end
  2198. else if p.typ=ait_instruction then
  2199. p:=hp1
  2200. else
  2201. p:=tai(p.next);
  2202. end;
  2203. list.Free;
  2204. end;
  2205. procedure DecrementPreceedingIT(list: TAsmList; p: tai);
  2206. var
  2207. hp : tai;
  2208. l : longint;
  2209. begin
  2210. hp := tai(p.Previous);
  2211. l := 1;
  2212. while assigned(hp) and
  2213. (l <= 4) do
  2214. begin
  2215. if hp.typ=ait_instruction then
  2216. begin
  2217. if (taicpu(hp).opcode>=A_IT) and
  2218. (taicpu(hp).opcode <= A_ITTTT) then
  2219. begin
  2220. if (taicpu(hp).opcode = A_IT) and
  2221. (l=1) then
  2222. list.Remove(hp)
  2223. else
  2224. case taicpu(hp).opcode of
  2225. A_ITE:
  2226. if l=2 then taicpu(hp).opcode := A_IT;
  2227. A_ITT:
  2228. if l=2 then taicpu(hp).opcode := A_IT;
  2229. A_ITEE:
  2230. if l=3 then taicpu(hp).opcode := A_ITE;
  2231. A_ITTE:
  2232. if l=3 then taicpu(hp).opcode := A_ITT;
  2233. A_ITET:
  2234. if l=3 then taicpu(hp).opcode := A_ITE;
  2235. A_ITTT:
  2236. if l=3 then taicpu(hp).opcode := A_ITT;
  2237. A_ITEEE:
  2238. if l=4 then taicpu(hp).opcode := A_ITEE;
  2239. A_ITTEE:
  2240. if l=4 then taicpu(hp).opcode := A_ITTE;
  2241. A_ITETE:
  2242. if l=4 then taicpu(hp).opcode := A_ITET;
  2243. A_ITTTE:
  2244. if l=4 then taicpu(hp).opcode := A_ITTT;
  2245. A_ITEET:
  2246. if l=4 then taicpu(hp).opcode := A_ITEE;
  2247. A_ITTET:
  2248. if l=4 then taicpu(hp).opcode := A_ITTE;
  2249. A_ITETT:
  2250. if l=4 then taicpu(hp).opcode := A_ITET;
  2251. A_ITTTT:
  2252. begin
  2253. if l=4 then taicpu(hp).opcode := A_ITTT;
  2254. end
  2255. else
  2256. ;
  2257. end;
  2258. break;
  2259. end;
  2260. {else if (taicpu(hp).condition<>taicpu(p).condition) or
  2261. (taicpu(hp).condition<>inverse_cond(taicpu(p).condition)) then
  2262. break;}
  2263. inc(l);
  2264. end;
  2265. hp := tai(hp.Previous);
  2266. end;
  2267. end;
  2268. function TCpuThumb2AsmOptimizer.PeepHoleOptPass1Cpu(var p: tai): boolean;
  2269. var
  2270. hp : taicpu;
  2271. //hp1,hp2 : tai;
  2272. begin
  2273. result:=false;
  2274. if inherited PeepHoleOptPass1Cpu(p) then
  2275. result:=true
  2276. else if (p.typ=ait_instruction) and
  2277. MatchInstruction(p, A_STM, [C_None], [PF_FD,PF_DB]) and
  2278. (taicpu(p).oper[0]^.ref^.addressmode=AM_PREINDEXED) and
  2279. (taicpu(p).oper[0]^.ref^.index=NR_STACK_POINTER_REG) and
  2280. ((taicpu(p).oper[1]^.regset^*[8..13,15])=[]) then
  2281. begin
  2282. DebugMsg('Peephole Stm2Push done', p);
  2283. hp := taicpu.op_regset(A_PUSH, R_INTREGISTER, R_SUBWHOLE, taicpu(p).oper[1]^.regset^);
  2284. AsmL.InsertAfter(hp, p);
  2285. asml.Remove(p);
  2286. p:=hp;
  2287. result:=true;
  2288. end
  2289. {else if (p.typ=ait_instruction) and
  2290. MatchInstruction(p, A_STR, [C_None], [PF_None]) and
  2291. (taicpu(p).oper[1]^.ref^.addressmode=AM_PREINDEXED) and
  2292. (taicpu(p).oper[1]^.ref^.index=NR_STACK_POINTER_REG) and
  2293. (taicpu(p).oper[1]^.ref^.offset=-4) and
  2294. (getsupreg(taicpu(p).oper[0]^.reg) in [0..7,14]) then
  2295. begin
  2296. DebugMsg('Peephole Str2Push done', p);
  2297. hp := taicpu.op_regset(A_PUSH, R_INTREGISTER, R_SUBWHOLE, [getsupreg(taicpu(p).oper[0]^.reg)]);
  2298. asml.InsertAfter(hp, p);
  2299. asml.Remove(p);
  2300. p.Free;
  2301. p:=hp;
  2302. result:=true;
  2303. end}
  2304. else if (p.typ=ait_instruction) and
  2305. MatchInstruction(p, A_LDM, [C_None], [PF_FD,PF_IA]) and
  2306. (taicpu(p).oper[0]^.ref^.addressmode=AM_PREINDEXED) and
  2307. (taicpu(p).oper[0]^.ref^.index=NR_STACK_POINTER_REG) and
  2308. ((taicpu(p).oper[1]^.regset^*[8..14])=[]) then
  2309. begin
  2310. DebugMsg('Peephole Ldm2Pop done', p);
  2311. hp := taicpu.op_regset(A_POP, R_INTREGISTER, R_SUBWHOLE, taicpu(p).oper[1]^.regset^);
  2312. asml.InsertBefore(hp, p);
  2313. asml.Remove(p);
  2314. p.Free;
  2315. p:=hp;
  2316. result:=true;
  2317. end
  2318. {else if (p.typ=ait_instruction) and
  2319. MatchInstruction(p, A_LDR, [C_None], [PF_None]) and
  2320. (taicpu(p).oper[1]^.ref^.addressmode=AM_POSTINDEXED) and
  2321. (taicpu(p).oper[1]^.ref^.index=NR_STACK_POINTER_REG) and
  2322. (taicpu(p).oper[1]^.ref^.offset=4) and
  2323. (getsupreg(taicpu(p).oper[0]^.reg) in [0..7,15]) then
  2324. begin
  2325. DebugMsg('Peephole Ldr2Pop done', p);
  2326. hp := taicpu.op_regset(A_POP, R_INTREGISTER, R_SUBWHOLE, [getsupreg(taicpu(p).oper[0]^.reg)]);
  2327. asml.InsertBefore(hp, p);
  2328. asml.Remove(p);
  2329. p.Free;
  2330. p:=hp;
  2331. result:=true;
  2332. end}
  2333. else if (p.typ=ait_instruction) and
  2334. MatchInstruction(p, [A_AND], [], [PF_None]) and
  2335. (taicpu(p).ops = 2) and
  2336. (taicpu(p).oper[1]^.typ=top_const) and
  2337. ((taicpu(p).oper[1]^.val=255) or
  2338. (taicpu(p).oper[1]^.val=65535)) then
  2339. begin
  2340. DebugMsg('Peephole AndR2Uxt done', p);
  2341. if taicpu(p).oper[1]^.val=255 then
  2342. taicpu(p).opcode:=A_UXTB
  2343. else
  2344. taicpu(p).opcode:=A_UXTH;
  2345. taicpu(p).loadreg(1, taicpu(p).oper[0]^.reg);
  2346. result := true;
  2347. end
  2348. else if (p.typ=ait_instruction) and
  2349. MatchInstruction(p, [A_AND], [], [PF_None]) and
  2350. (taicpu(p).ops = 3) and
  2351. (taicpu(p).oper[2]^.typ=top_const) and
  2352. ((taicpu(p).oper[2]^.val=255) or
  2353. (taicpu(p).oper[2]^.val=65535)) then
  2354. begin
  2355. DebugMsg('Peephole AndRR2Uxt done', p);
  2356. if taicpu(p).oper[2]^.val=255 then
  2357. taicpu(p).opcode:=A_UXTB
  2358. else
  2359. taicpu(p).opcode:=A_UXTH;
  2360. taicpu(p).ops:=2;
  2361. result := true;
  2362. end
  2363. {else if (p.typ=ait_instruction) and
  2364. MatchInstruction(p, [A_CMP], [C_None], [PF_None]) and
  2365. (taicpu(p).oper[1]^.typ=top_const) and
  2366. (taicpu(p).oper[1]^.val=0) and
  2367. GetNextInstruction(p,hp1) and
  2368. (taicpu(hp1).opcode=A_B) and
  2369. (taicpu(hp1).condition in [C_EQ,C_NE]) then
  2370. begin
  2371. if taicpu(hp1).condition = C_EQ then
  2372. hp2:=taicpu.op_reg_ref(A_CBZ, taicpu(p).oper[0]^.reg, taicpu(hp1).oper[0]^.ref^)
  2373. else
  2374. hp2:=taicpu.op_reg_ref(A_CBNZ, taicpu(p).oper[0]^.reg, taicpu(hp1).oper[0]^.ref^);
  2375. taicpu(hp2).is_jmp := true;
  2376. asml.InsertAfter(hp2, hp1);
  2377. asml.Remove(hp1);
  2378. hp1.Free;
  2379. asml.Remove(p);
  2380. p.Free;
  2381. p := hp2;
  2382. result := true;
  2383. end}
  2384. end;
  2385. procedure TCpuThumb2AsmOptimizer.PeepHoleOptPass2;
  2386. var
  2387. p,hp1,hp2: tai;
  2388. l : longint;
  2389. condition : tasmcond;
  2390. { UsedRegs, TmpUsedRegs: TRegSet; }
  2391. begin
  2392. p := BlockStart;
  2393. { UsedRegs := []; }
  2394. while (p <> BlockEnd) Do
  2395. begin
  2396. { UpdateUsedRegs(UsedRegs, tai(p.next)); }
  2397. case p.Typ Of
  2398. Ait_Instruction:
  2399. begin
  2400. case taicpu(p).opcode Of
  2401. A_B:
  2402. if taicpu(p).condition<>C_None then
  2403. begin
  2404. { check for
  2405. Bxx xxx
  2406. <several instructions>
  2407. xxx:
  2408. }
  2409. l:=0;
  2410. GetNextInstruction(p, hp1);
  2411. while assigned(hp1) and
  2412. (l<=4) and
  2413. CanBeCond(hp1) and
  2414. { stop on labels }
  2415. not(hp1.typ=ait_label) do
  2416. begin
  2417. inc(l);
  2418. if MustBeLast(hp1) then
  2419. begin
  2420. //hp1:=nil;
  2421. GetNextInstruction(hp1,hp1);
  2422. break;
  2423. end
  2424. else
  2425. GetNextInstruction(hp1,hp1);
  2426. end;
  2427. if assigned(hp1) then
  2428. begin
  2429. if FindLabel(tasmlabel(taicpu(p).oper[0]^.ref^.symbol),hp1) then
  2430. begin
  2431. if (l<=4) and (l>0) then
  2432. begin
  2433. condition:=inverse_cond(taicpu(p).condition);
  2434. hp2:=p;
  2435. GetNextInstruction(p,hp1);
  2436. p:=hp1;
  2437. repeat
  2438. if hp1.typ=ait_instruction then
  2439. taicpu(hp1).condition:=condition;
  2440. if MustBeLast(hp1) then
  2441. begin
  2442. GetNextInstruction(hp1,hp1);
  2443. break;
  2444. end
  2445. else
  2446. GetNextInstruction(hp1,hp1);
  2447. until not(assigned(hp1)) or
  2448. not(CanBeCond(hp1)) or
  2449. (hp1.typ=ait_label);
  2450. { wait with removing else GetNextInstruction could
  2451. ignore the label if it was the only usage in the
  2452. jump moved away }
  2453. asml.InsertAfter(tai_comment.create(strpnew('Collapsed')), hp2);
  2454. DecrementPreceedingIT(asml, hp2);
  2455. case l of
  2456. 1: asml.InsertAfter(taicpu.op_cond(A_IT,condition), hp2);
  2457. 2: asml.InsertAfter(taicpu.op_cond(A_ITT,condition), hp2);
  2458. 3: asml.InsertAfter(taicpu.op_cond(A_ITTT,condition), hp2);
  2459. 4: asml.InsertAfter(taicpu.op_cond(A_ITTTT,condition), hp2);
  2460. end;
  2461. tasmlabel(taicpu(hp2).oper[0]^.ref^.symbol).decrefs;
  2462. asml.remove(hp2);
  2463. hp2.free;
  2464. continue;
  2465. end;
  2466. end;
  2467. end;
  2468. end;
  2469. else
  2470. ;
  2471. end;
  2472. end;
  2473. else
  2474. ;
  2475. end;
  2476. p := tai(p.next)
  2477. end;
  2478. end;
  2479. function TCpuThumb2AsmOptimizer.PostPeepHoleOptsCpu(var p: tai): boolean;
  2480. begin
  2481. result:=false;
  2482. if p.typ = ait_instruction then
  2483. begin
  2484. if MatchInstruction(p, A_MOV, [C_None], [PF_None]) and
  2485. (taicpu(p).oper[1]^.typ=top_const) and
  2486. (taicpu(p).oper[1]^.val >= 0) and
  2487. (taicpu(p).oper[1]^.val < 256) and
  2488. (not RegInUsedRegs(NR_DEFAULTFLAGS,UsedRegs)) then
  2489. begin
  2490. DebugMsg('Peephole Mov2Movs done', p);
  2491. asml.InsertBefore(tai_regalloc.alloc(NR_DEFAULTFLAGS,p), p);
  2492. asml.InsertAfter(tai_regalloc.dealloc(NR_DEFAULTFLAGS,p), p);
  2493. IncludeRegInUsedRegs(NR_DEFAULTFLAGS,UsedRegs);
  2494. taicpu(p).oppostfix:=PF_S;
  2495. result:=true;
  2496. end
  2497. else if MatchInstruction(p, A_MVN, [C_None], [PF_None]) and
  2498. (taicpu(p).oper[1]^.typ=top_reg) and
  2499. (not RegInUsedRegs(NR_DEFAULTFLAGS,UsedRegs)) then
  2500. begin
  2501. DebugMsg('Peephole Mvn2Mvns done', p);
  2502. asml.InsertBefore(tai_regalloc.alloc(NR_DEFAULTFLAGS,p), p);
  2503. asml.InsertAfter(tai_regalloc.dealloc(NR_DEFAULTFLAGS,p), p);
  2504. IncludeRegInUsedRegs(NR_DEFAULTFLAGS,UsedRegs);
  2505. taicpu(p).oppostfix:=PF_S;
  2506. result:=true;
  2507. end
  2508. else if MatchInstruction(p, A_RSB, [C_None], [PF_None]) and
  2509. (taicpu(p).ops = 3) and
  2510. (taicpu(p).oper[2]^.typ=top_const) and
  2511. (taicpu(p).oper[2]^.val=0) and
  2512. (not RegInUsedRegs(NR_DEFAULTFLAGS,UsedRegs)) then
  2513. begin
  2514. DebugMsg('Peephole Rsb2Rsbs done', p);
  2515. asml.InsertBefore(tai_regalloc.alloc(NR_DEFAULTFLAGS,p), p);
  2516. asml.InsertAfter(tai_regalloc.dealloc(NR_DEFAULTFLAGS,p), p);
  2517. IncludeRegInUsedRegs(NR_DEFAULTFLAGS,UsedRegs);
  2518. taicpu(p).oppostfix:=PF_S;
  2519. result:=true;
  2520. end
  2521. else if MatchInstruction(p, [A_ADD,A_SUB], [C_None], [PF_None]) and
  2522. (taicpu(p).ops = 3) and
  2523. MatchOperand(taicpu(p).oper[0]^, taicpu(p).oper[1]^) and
  2524. (not MatchOperand(taicpu(p).oper[0]^, NR_STACK_POINTER_REG)) and
  2525. (taicpu(p).oper[2]^.typ=top_const) and
  2526. (taicpu(p).oper[2]^.val >= 0) and
  2527. (taicpu(p).oper[2]^.val < 256) and
  2528. (not RegInUsedRegs(NR_DEFAULTFLAGS,UsedRegs)) then
  2529. begin
  2530. DebugMsg('Peephole AddSub2*s done', p);
  2531. asml.InsertBefore(tai_regalloc.alloc(NR_DEFAULTFLAGS,p), p);
  2532. asml.InsertAfter(tai_regalloc.dealloc(NR_DEFAULTFLAGS,p), p);
  2533. IncludeRegInUsedRegs(NR_DEFAULTFLAGS,UsedRegs);
  2534. taicpu(p).loadconst(1,taicpu(p).oper[2]^.val);
  2535. taicpu(p).oppostfix:=PF_S;
  2536. taicpu(p).ops := 2;
  2537. result:=true;
  2538. end
  2539. else if MatchInstruction(p, [A_ADD,A_SUB], [C_None], [PF_None]) and
  2540. (taicpu(p).ops = 2) and
  2541. (taicpu(p).oper[1]^.typ=top_reg) and
  2542. (not MatchOperand(taicpu(p).oper[0]^, NR_STACK_POINTER_REG)) and
  2543. (not MatchOperand(taicpu(p).oper[1]^, NR_STACK_POINTER_REG)) and
  2544. (not RegInUsedRegs(NR_DEFAULTFLAGS,UsedRegs)) then
  2545. begin
  2546. DebugMsg('Peephole AddSub2*s done', p);
  2547. asml.InsertBefore(tai_regalloc.alloc(NR_DEFAULTFLAGS,p), p);
  2548. asml.InsertAfter(tai_regalloc.dealloc(NR_DEFAULTFLAGS,p), p);
  2549. IncludeRegInUsedRegs(NR_DEFAULTFLAGS,UsedRegs);
  2550. taicpu(p).oppostfix:=PF_S;
  2551. result:=true;
  2552. end
  2553. else if MatchInstruction(p, [A_ADD], [C_None], [PF_None]) and
  2554. (taicpu(p).ops = 3) and
  2555. MatchOperand(taicpu(p).oper[0]^, taicpu(p).oper[1]^) and
  2556. (taicpu(p).oper[2]^.typ=top_reg) then
  2557. begin
  2558. DebugMsg('Peephole AddRRR2AddRR done', p);
  2559. taicpu(p).ops := 2;
  2560. taicpu(p).loadreg(1,taicpu(p).oper[2]^.reg);
  2561. result:=true;
  2562. end
  2563. else if MatchInstruction(p, [A_AND,A_ORR,A_EOR,A_BIC,A_LSL,A_LSR,A_ASR,A_ROR], [C_None], [PF_None]) and
  2564. (taicpu(p).ops = 3) and
  2565. MatchOperand(taicpu(p).oper[0]^, taicpu(p).oper[1]^) and
  2566. (taicpu(p).oper[2]^.typ=top_reg) and
  2567. (not RegInUsedRegs(NR_DEFAULTFLAGS,UsedRegs)) then
  2568. begin
  2569. DebugMsg('Peephole opXXY2opsXY done', p);
  2570. asml.InsertBefore(tai_regalloc.alloc(NR_DEFAULTFLAGS,p), p);
  2571. asml.InsertAfter(tai_regalloc.dealloc(NR_DEFAULTFLAGS,p), p);
  2572. IncludeRegInUsedRegs(NR_DEFAULTFLAGS,UsedRegs);
  2573. taicpu(p).ops := 2;
  2574. taicpu(p).loadreg(1,taicpu(p).oper[2]^.reg);
  2575. taicpu(p).oppostfix:=PF_S;
  2576. result:=true;
  2577. end
  2578. else if MatchInstruction(p, [A_AND,A_ORR,A_EOR,A_BIC,A_LSL,A_LSR,A_ASR,A_ROR], [C_None], [PF_S]) and
  2579. (taicpu(p).ops = 3) and
  2580. MatchOperand(taicpu(p).oper[0]^, taicpu(p).oper[1]^) and
  2581. (taicpu(p).oper[2]^.typ in [top_reg,top_const]) then
  2582. begin
  2583. DebugMsg('Peephole opXXY2opXY done', p);
  2584. taicpu(p).ops := 2;
  2585. if taicpu(p).oper[2]^.typ=top_reg then
  2586. taicpu(p).loadreg(1,taicpu(p).oper[2]^.reg)
  2587. else
  2588. taicpu(p).loadconst(1,taicpu(p).oper[2]^.val);
  2589. result:=true;
  2590. end
  2591. else if MatchInstruction(p, [A_AND,A_ORR,A_EOR], [C_None], [PF_None,PF_S]) and
  2592. (taicpu(p).ops = 3) and
  2593. MatchOperand(taicpu(p).oper[0]^, taicpu(p).oper[2]^) and
  2594. (not RegInUsedRegs(NR_DEFAULTFLAGS,UsedRegs)) then
  2595. begin
  2596. DebugMsg('Peephole opXYX2opsXY done', p);
  2597. asml.InsertBefore(tai_regalloc.alloc(NR_DEFAULTFLAGS,p), p);
  2598. asml.InsertAfter(tai_regalloc.dealloc(NR_DEFAULTFLAGS,p), p);
  2599. IncludeRegInUsedRegs(NR_DEFAULTFLAGS,UsedRegs);
  2600. taicpu(p).oppostfix:=PF_S;
  2601. taicpu(p).ops := 2;
  2602. result:=true;
  2603. end
  2604. else if MatchInstruction(p, [A_MOV], [C_None], [PF_None]) and
  2605. (taicpu(p).ops=3) and
  2606. (taicpu(p).oper[2]^.typ=top_shifterop) and
  2607. (taicpu(p).oper[2]^.shifterop^.shiftmode in [SM_LSL,SM_LSR,SM_ASR,SM_ROR]) and
  2608. //MatchOperand(taicpu(p).oper[0]^, taicpu(p).oper[1]^) and
  2609. (not RegInUsedRegs(NR_DEFAULTFLAGS,UsedRegs)) then
  2610. begin
  2611. DebugMsg('Peephole Mov2Shift done', p);
  2612. asml.InsertBefore(tai_regalloc.alloc(NR_DEFAULTFLAGS,p), p);
  2613. asml.InsertAfter(tai_regalloc.dealloc(NR_DEFAULTFLAGS,p), p);
  2614. IncludeRegInUsedRegs(NR_DEFAULTFLAGS,UsedRegs);
  2615. taicpu(p).oppostfix:=PF_S;
  2616. case taicpu(p).oper[2]^.shifterop^.shiftmode of
  2617. SM_LSL: taicpu(p).opcode:=A_LSL;
  2618. SM_LSR: taicpu(p).opcode:=A_LSR;
  2619. SM_ASR: taicpu(p).opcode:=A_ASR;
  2620. SM_ROR: taicpu(p).opcode:=A_ROR;
  2621. else
  2622. internalerror(2019050912);
  2623. end;
  2624. if taicpu(p).oper[2]^.shifterop^.rs<>NR_NO then
  2625. taicpu(p).loadreg(2, taicpu(p).oper[2]^.shifterop^.rs)
  2626. else
  2627. taicpu(p).loadconst(2, taicpu(p).oper[2]^.shifterop^.shiftimm);
  2628. result:=true;
  2629. end
  2630. end;
  2631. end;
  2632. begin
  2633. casmoptimizer:=TCpuAsmOptimizer;
  2634. cpreregallocscheduler:=TCpuPreRegallocScheduler;
  2635. End.