narmmat.pas 26 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611
  1. {
  2. Copyright (c) 1998-2002 by Florian Klaempfl
  3. Generate ARM assembler for math nodes
  4. This program is free software; you can redistribute it and/or modify
  5. it under the terms of the GNU General Public License as published by
  6. the Free Software Foundation; either version 2 of the License, or
  7. (at your option) any later version.
  8. This program is distributed in the hope that it will be useful,
  9. but WITHOUT ANY WARRANTY; without even the implied warranty of
  10. MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  11. GNU General Public License for more details.
  12. You should have received a copy of the GNU General Public License
  13. along with this program; if not, write to the Free Software
  14. Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
  15. ****************************************************************************
  16. }
  17. unit narmmat;
  18. {$i fpcdefs.inc}
  19. interface
  20. uses
  21. node,nmat,ncgmat;
  22. type
  23. tarmmoddivnode = class(tmoddivnode)
  24. function first_moddivint: tnode;override;
  25. procedure pass_generate_code;override;
  26. end;
  27. tarmnotnode = class(tcgnotnode)
  28. procedure second_boolean;override;
  29. end;
  30. tarmunaryminusnode = class(tcgunaryminusnode)
  31. function pass_1: tnode; override;
  32. procedure second_float;override;
  33. end;
  34. tarmshlshrnode = class(tcgshlshrnode)
  35. procedure second_64bit;override;
  36. function first_shlshr64bitint: tnode; override;
  37. end;
  38. implementation
  39. uses
  40. globtype,
  41. cutils,verbose,globals,constexp,
  42. aasmbase,aasmcpu,aasmtai,aasmdata,
  43. defutil,
  44. symtype,symconst,symtable,
  45. cgbase,cgobj,hlcgobj,cgutils,
  46. pass_2,procinfo,
  47. ncon,ncnv,ncal,ninl,
  48. cpubase,cpuinfo,
  49. ncgutil,
  50. nadd,pass_1,symdef;
  51. {*****************************************************************************
  52. TARMMODDIVNODE
  53. *****************************************************************************}
  54. function tarmmoddivnode.first_moddivint: tnode;
  55. var
  56. power : longint;
  57. begin
  58. if not(cs_check_overflow in current_settings.localswitches) and
  59. (right.nodetype=ordconstn) and
  60. (nodetype=divn) and
  61. (ispowerof2(tordconstnode(right).value,power) or
  62. (tordconstnode(right).value=1) or
  63. (tordconstnode(right).value=int64(-1))
  64. ) and
  65. not(is_64bitint(resultdef)) then
  66. result:=nil
  67. else if ((GenerateThumbCode or GenerateThumb2Code) and (CPUARM_HAS_THUMB_IDIV in cpu_capabilities[current_settings.cputype])) and
  68. (nodetype=divn) and
  69. not(is_64bitint(resultdef)) then
  70. result:=nil
  71. else if ((GenerateThumbCode or GenerateThumb2Code) and (CPUARM_HAS_THUMB_IDIV in cpu_capabilities[current_settings.cputype])) and
  72. (nodetype=modn) and
  73. not(is_64bitint(resultdef)) then
  74. begin
  75. if (right.nodetype=ordconstn) and
  76. ispowerof2(tordconstnode(right).value,power) and
  77. (tordconstnode(right).value<=256) and
  78. (tordconstnode(right).value>0) then
  79. result:=caddnode.create_internal(andn,left,cordconstnode.create(tordconstnode(right).value-1,sinttype,false))
  80. else
  81. begin
  82. result:=caddnode.create_internal(subn,left,caddnode.create_internal(muln,right,cmoddivnode.Create(divn,left.getcopy,right.getcopy)));
  83. right:=nil;
  84. end;
  85. left:=nil;
  86. firstpass(result);
  87. end
  88. else if (nodetype=modn) and
  89. (is_signed(left.resultdef)) and
  90. (right.nodetype=ordconstn) and
  91. (tordconstnode(right).value=2) then
  92. begin
  93. // result:=(0-(left and 1)) and (1+(sarlongint(left,31) shl 1))
  94. result:=caddnode.create_internal(andn,caddnode.create_internal(subn,cordconstnode.create(0,sinttype,false),caddnode.create_internal(andn,left,cordconstnode.create(1,sinttype,false))),
  95. caddnode.create_internal(addn,cordconstnode.create(1,sinttype,false),
  96. cshlshrnode.create(shln,cinlinenode.create(in_sar_x_y,false,ccallparanode.create(cordconstnode.create(31,sinttype,false),ccallparanode.Create(left.getcopy,nil))),cordconstnode.create(1,sinttype,false))));
  97. left:=nil;
  98. firstpass(result);
  99. end
  100. else
  101. result:=inherited first_moddivint;
  102. { we may not change the result type here }
  103. if assigned(result) and (torddef(result.resultdef).ordtype<>torddef(resultdef).ordtype) then
  104. inserttypeconv(result,resultdef);
  105. end;
  106. procedure tarmmoddivnode.pass_generate_code;
  107. var
  108. power : longint;
  109. numerator,
  110. helper1,
  111. helper2,
  112. resultreg : tregister;
  113. size : Tcgsize;
  114. so : tshifterop;
  115. procedure genOrdConstNodeDiv;
  116. begin
  117. if tordconstnode(right).value=0 then
  118. internalerror(2005061701)
  119. else if tordconstnode(right).value=1 then
  120. cg.a_load_reg_reg(current_asmdata.CurrAsmList, OS_INT, OS_INT, numerator, resultreg)
  121. else if (tordconstnode(right).value = int64(-1)) then
  122. begin
  123. // note: only in the signed case possible..., may overflow
  124. if cs_check_overflow in current_settings.localswitches then
  125. cg.a_reg_alloc(current_asmdata.CurrAsmList,NR_DEFAULTFLAGS);
  126. current_asmdata.CurrAsmList.concat(setoppostfix(taicpu.op_reg_reg(A_MVN,
  127. resultreg,numerator),toppostfix(ord(cs_check_overflow in current_settings.localswitches)*ord(PF_S))));
  128. end
  129. else if ispowerof2(tordconstnode(right).value,power) then
  130. begin
  131. if (is_signed(right.resultdef)) then
  132. begin
  133. helper1:=cg.getintregister(current_asmdata.CurrAsmList,OS_INT);
  134. helper2:=cg.getintregister(current_asmdata.CurrAsmList,OS_INT);
  135. if power = 1 then
  136. cg.a_load_reg_reg(current_asmdata.CurrAsmList,OS_INT,OS_INT,numerator,helper1)
  137. else
  138. cg.a_op_const_reg_reg(current_asmdata.CurrAsmList,OP_SAR,OS_INT,31,numerator,helper1);
  139. if GenerateThumbCode then
  140. begin
  141. cg.a_op_const_reg(current_asmdata.CurrAsmList,OP_SAR,OS_INT,32-power,helper1);
  142. current_asmdata.CurrAsmList.concat(taicpu.op_reg_reg_reg(A_ADD,helper2,numerator,helper1));
  143. end
  144. else
  145. begin
  146. shifterop_reset(so);
  147. so.shiftmode:=SM_LSR;
  148. so.shiftimm:=32-power;
  149. current_asmdata.CurrAsmList.concat(taicpu.op_reg_reg_reg_shifterop(A_ADD,helper2,numerator,helper1,so));
  150. end;
  151. cg.a_op_const_reg_reg(current_asmdata.CurrAsmList,OP_SAR,OS_INT,power,helper2,resultreg);
  152. end
  153. else
  154. cg.a_op_const_reg_reg(current_asmdata.CurrAsmList,OP_SHR,OS_INT,power,numerator,resultreg)
  155. end;
  156. end;
  157. {
  158. procedure genOrdConstNodeMod;
  159. var
  160. modreg, maskreg, tempreg : tregister;
  161. begin
  162. if (tordconstnode(right).value = 0) then begin
  163. internalerror(2005061702);
  164. end
  165. else if (abs(tordconstnode(right).value.svalue) = 1) then
  166. begin
  167. // x mod +/-1 is always zero
  168. cg.a_load_const_reg(current_asmdata.CurrAsmList, OS_INT, 0, resultreg);
  169. end
  170. else if (ispowerof2(tordconstnode(right).value, power)) then
  171. begin
  172. if (is_signed(right.resultdef)) then begin
  173. tempreg := cg.getintregister(current_asmdata.CurrAsmList, OS_INT);
  174. maskreg := cg.getintregister(current_asmdata.CurrAsmList, OS_INT);
  175. modreg := cg.getintregister(current_asmdata.CurrAsmList, OS_INT);
  176. cg.a_load_const_reg(current_asmdata.CurrAsmList, OS_INT, abs(tordconstnode(right).value.svalue)-1, modreg);
  177. cg.a_op_const_reg_reg(current_asmdata.CurrAsmList, OP_SAR, OS_INT, 31, numerator, maskreg);
  178. cg.a_op_reg_reg_reg(current_asmdata.CurrAsmList, OP_AND, OS_INT, numerator, modreg, tempreg);
  179. current_asmdata.CurrAsmList.concat(taicpu.op_reg_reg_reg(A_ANDC, maskreg, maskreg, modreg));
  180. current_asmdata.CurrAsmList.concat(taicpu.op_reg_reg_const(A_SUBFIC, modreg, tempreg, 0));
  181. current_asmdata.CurrAsmList.concat(taicpu.op_reg_reg_reg(A_SUBFE, modreg, modreg, modreg));
  182. cg.a_op_reg_reg_reg(current_asmdata.CurrAsmList, OP_AND, OS_INT, modreg, maskreg, maskreg);
  183. cg.a_op_reg_reg_reg(current_asmdata.CurrAsmList, OP_OR, OS_INT, maskreg, tempreg, resultreg);
  184. end else begin
  185. cg.a_op_const_reg_reg(current_asmdata.CurrAsmList, OP_AND, OS_INT, tordconstnode(right).value.svalue-1, numerator, resultreg);
  186. end;
  187. end else begin
  188. genOrdConstNodeDiv();
  189. cg.a_op_const_reg_reg(current_asmdata.CurrAsmList, OP_MUL, OS_INT, tordconstnode(right).value.svalue, resultreg, resultreg);
  190. cg.a_op_reg_reg_reg(current_asmdata.CurrAsmList, OP_SUB, OS_INT, resultreg, numerator, resultreg);
  191. end;
  192. end;
  193. }
  194. begin
  195. secondpass(left);
  196. secondpass(right);
  197. if ((GenerateThumbCode or GenerateThumb2Code) and (CPUARM_HAS_THUMB_IDIV in cpu_capabilities[current_settings.cputype])) and
  198. (nodetype=divn) and
  199. not(is_64bitint(resultdef)) then
  200. begin
  201. size:=def_cgsize(left.resultdef);
  202. hlcg.location_force_reg(current_asmdata.CurrAsmList,left.location,left.resultdef,left.resultdef,true);
  203. location_copy(location,left.location);
  204. location.loc := LOC_REGISTER;
  205. location.register := cg.getintregister(current_asmdata.CurrAsmList,size);
  206. resultreg:=location.register;
  207. if (right.nodetype=ordconstn) and
  208. ((tordconstnode(right).value=1) or
  209. (tordconstnode(right).value=int64(-1)) or
  210. (tordconstnode(right).value=0) or
  211. ispowerof2(tordconstnode(right).value,power)) then
  212. begin
  213. numerator:=left.location.register;
  214. genOrdConstNodeDiv;
  215. end
  216. else
  217. begin
  218. hlcg.location_force_reg(current_asmdata.CurrAsmList,right.location,right.resultdef,left.resultdef,true);
  219. if is_signed(left.resultdef) or
  220. is_signed(right.resultdef) then
  221. cg.a_op_reg_reg_reg(current_asmdata.CurrAsmList,OP_IDIV,OS_INT,right.location.register,left.location.register,location.register)
  222. else
  223. cg.a_op_reg_reg_reg(current_asmdata.CurrAsmList,OP_DIV,OS_INT,right.location.register,left.location.register,location.register);
  224. end;
  225. end
  226. else
  227. begin
  228. location_copy(location,left.location);
  229. { put numerator in register }
  230. size:=def_cgsize(left.resultdef);
  231. hlcg.location_force_reg(current_asmdata.CurrAsmList,left.location,
  232. left.resultdef,left.resultdef,true);
  233. location_copy(location,left.location);
  234. numerator:=location.register;
  235. resultreg:=location.register;
  236. if location.loc=LOC_CREGISTER then
  237. begin
  238. location.loc := LOC_REGISTER;
  239. location.register := cg.getintregister(current_asmdata.CurrAsmList,size);
  240. resultreg:=location.register;
  241. end
  242. else if (nodetype=modn) or (right.nodetype=ordconstn) then
  243. begin
  244. // for a modulus op, and for const nodes we need the result register
  245. // to be an extra register
  246. resultreg:=cg.getintregister(current_asmdata.CurrAsmList,size);
  247. end;
  248. if right.nodetype=ordconstn then
  249. begin
  250. if nodetype=divn then
  251. genOrdConstNodeDiv
  252. else
  253. // genOrdConstNodeMod;
  254. end;
  255. location.register:=resultreg;
  256. end;
  257. { unsigned division/module can only overflow in case of division by zero }
  258. { (but checking this overflow flag is more convoluted than performing a }
  259. { simple comparison with 0) }
  260. if is_signed(right.resultdef) then
  261. cg.g_overflowcheck(current_asmdata.CurrAsmList,location,resultdef);
  262. end;
  263. {*****************************************************************************
  264. TARMNOTNODE
  265. *****************************************************************************}
  266. procedure tarmnotnode.second_boolean;
  267. var
  268. hl : tasmlabel;
  269. begin
  270. { if the location is LOC_JUMP, we do the secondpass after the
  271. labels are allocated
  272. }
  273. if left.expectloc=LOC_JUMP then
  274. begin
  275. hl:=current_procinfo.CurrTrueLabel;
  276. current_procinfo.CurrTrueLabel:=current_procinfo.CurrFalseLabel;
  277. current_procinfo.CurrFalseLabel:=hl;
  278. secondpass(left);
  279. if left.location.loc<>LOC_JUMP then
  280. internalerror(2012081305);
  281. maketojumpbool(current_asmdata.CurrAsmList,left,lr_load_regvars);
  282. hl:=current_procinfo.CurrTrueLabel;
  283. current_procinfo.CurrTrueLabel:=current_procinfo.CurrFalseLabel;
  284. current_procinfo.CurrFalseLabel:=hl;
  285. location.loc:=LOC_JUMP;
  286. end
  287. else
  288. begin
  289. secondpass(left);
  290. case left.location.loc of
  291. LOC_FLAGS :
  292. begin
  293. location_copy(location,left.location);
  294. inverse_flags(location.resflags);
  295. end;
  296. LOC_REGISTER,LOC_CREGISTER,LOC_REFERENCE,LOC_CREFERENCE,
  297. LOC_SUBSETREG,LOC_CSUBSETREG,LOC_SUBSETREF,LOC_CSUBSETREF :
  298. begin
  299. hlcg.location_force_reg(current_asmdata.CurrAsmList,left.location,left.resultdef,left.resultdef,true);
  300. cg.a_reg_alloc(current_asmdata.CurrAsmList,NR_DEFAULTFLAGS);
  301. current_asmdata.CurrAsmList.concat(taicpu.op_reg_const(A_CMP,left.location.register,0));
  302. location_reset(location,LOC_FLAGS,OS_NO);
  303. location.resflags:=F_EQ;
  304. end;
  305. else
  306. internalerror(2003042401);
  307. end;
  308. end;
  309. end;
  310. {*****************************************************************************
  311. TARMUNARYMINUSNODE
  312. *****************************************************************************}
  313. function tarmunaryminusnode.pass_1: tnode;
  314. var
  315. procname: string[31];
  316. fdef : tdef;
  317. begin
  318. if (current_settings.fputype<>fpu_fpv4_s16) or
  319. (tfloatdef(resultdef).floattype=s32real) then
  320. exit(inherited pass_1);
  321. result:=nil;
  322. firstpass(left);
  323. if codegenerror then
  324. exit;
  325. if (left.resultdef.typ=floatdef) then
  326. begin
  327. case tfloatdef(resultdef).floattype of
  328. s64real:
  329. begin
  330. procname:='float64_sub';
  331. fdef:=search_system_type('FLOAT64').typedef;
  332. end;
  333. else
  334. internalerror(2005082801);
  335. end;
  336. result:=ctypeconvnode.create_internal(ccallnode.createintern(procname,ccallparanode.create(
  337. ctypeconvnode.create_internal(left,fDef),
  338. ccallparanode.create(ctypeconvnode.create_internal(crealconstnode.create(0,resultdef),fdef),nil))),resultdef);
  339. left:=nil;
  340. end
  341. else
  342. begin
  343. if (left.resultdef.typ=floatdef) then
  344. expectloc:=LOC_FPUREGISTER
  345. else if (left.resultdef.typ=orddef) then
  346. expectloc:=LOC_REGISTER;
  347. end;
  348. end;
  349. procedure tarmunaryminusnode.second_float;
  350. var
  351. op: tasmop;
  352. begin
  353. secondpass(left);
  354. case current_settings.fputype of
  355. fpu_fpa,
  356. fpu_fpa10,
  357. fpu_fpa11:
  358. begin
  359. location_force_fpureg(current_asmdata.CurrAsmList,left.location,false);
  360. location:=left.location;
  361. current_asmdata.CurrAsmList.concat(setoppostfix(taicpu.op_reg_reg_const(A_RSF,
  362. location.register,left.location.register,0),
  363. cgsize2fpuoppostfix[def_cgsize(resultdef)]));
  364. end;
  365. fpu_vfpv2,
  366. fpu_vfpv3,
  367. fpu_vfpv3_d16:
  368. begin
  369. hlcg.location_force_mmregscalar(current_asmdata.CurrAsmList,left.location,left.resultdef,true);
  370. location:=left.location;
  371. if (left.location.loc=LOC_CMMREGISTER) then
  372. location.register:=cg.getmmregister(current_asmdata.CurrAsmList,location.size);
  373. if (location.size=OS_F32) then
  374. op:=A_FNEGS
  375. else
  376. op:=A_FNEGD;
  377. current_asmdata.CurrAsmList.concat(taicpu.op_reg_reg(op,
  378. location.register,left.location.register));
  379. end;
  380. fpu_fpv4_s16:
  381. begin
  382. hlcg.location_force_mmregscalar(current_asmdata.CurrAsmList,left.location,left.resultdef,true);
  383. location:=left.location;
  384. if (left.location.loc=LOC_CMMREGISTER) then
  385. location.register:=cg.getmmregister(current_asmdata.CurrAsmList,location.size);
  386. current_asmdata.CurrAsmList.concat(setoppostfix(taicpu.op_reg_reg(A_VNEG,
  387. location.register,left.location.register), PF_F32));
  388. end
  389. else
  390. internalerror(2009112602);
  391. end;
  392. end;
  393. function tarmshlshrnode.first_shlshr64bitint: tnode;
  394. begin
  395. if GenerateThumbCode or GenerateThumb2Code then//or
  396. // (right.nodetype <> ordconstn) then
  397. result:=inherited
  398. else
  399. result := nil;
  400. end;
  401. procedure tarmshlshrnode.second_64bit;
  402. var
  403. v : TConstExprInt;
  404. l1,l2,l3:Tasmlabel;
  405. so: tshifterop;
  406. lreg, resreg: TRegister64;
  407. procedure emit_instr(p: tai);
  408. begin
  409. current_asmdata.CurrAsmList.concat(p);
  410. end;
  411. {This code is build like it gets called with sm=SM_LSR all the time, for SM_LSL dst* and src* have to be reversed}
  412. procedure shift_less_than_32(srchi, srclo, dsthi, dstlo: TRegister; shiftval: Byte; sm: TShiftMode);
  413. begin
  414. shifterop_reset(so);
  415. so.shiftimm:=shiftval;
  416. so.shiftmode:=sm;
  417. emit_instr(taicpu.op_reg_reg_shifterop(A_MOV, dstlo, srclo, so));
  418. emit_instr(taicpu.op_reg_reg_shifterop(A_MOV, dsthi, srchi, so));
  419. if sm = SM_LSR then so.shiftmode:=SM_LSL else so.shiftmode:=SM_LSR;
  420. so.shiftimm:=32-shiftval;
  421. emit_instr(taicpu.op_reg_reg_reg_shifterop(A_ORR, dstlo, dstlo, srchi, so));
  422. end;
  423. {This code is build like it gets called with sm=SM_LSR all the time, for SM_LSL dst* and src* have to be reversed
  424. This will generate
  425. mov shiftval1, shiftval
  426. cmp shiftval1, #64
  427. movcs shiftval1, #64
  428. rsb shiftval2, shiftval1, #32
  429. mov dstlo, srclo, lsr shiftval1
  430. mov dsthi, srchi, lsr shiftval1
  431. orr dstlo, srchi, lsl shiftval2
  432. subs shiftval2, shiftval1, #32
  433. movpl dstlo, srchi, lsr shiftval2
  434. }
  435. procedure shift_by_variable(srchi, srclo, dsthi, dstlo, shiftval: TRegister; sm: TShiftMode);
  436. var
  437. shiftval1,shiftval2:TRegister;
  438. begin
  439. shifterop_reset(so);
  440. shiftval1:=cg.getintregister(current_asmdata.CurrAsmList,OS_INT);
  441. shiftval2:=cg.getintregister(current_asmdata.CurrAsmList,OS_INT);
  442. cg.a_load_reg_reg(current_asmdata.CurrAsmList, OS_INT, OS_INT, shiftval, shiftval1);
  443. {The ARM barrel shifter only considers the lower 8 bits of a register for the shift}
  444. cg.a_reg_alloc(current_asmdata.CurrAsmList,NR_DEFAULTFLAGS);
  445. emit_instr(taicpu.op_reg_const(A_CMP, shiftval1, 64));
  446. emit_instr(setcondition(taicpu.op_reg_const(A_MOV, shiftval1, 64), C_CS));
  447. cg.a_reg_dealloc(current_asmdata.CurrAsmList,NR_DEFAULTFLAGS);
  448. {Calculate how much the upper register needs to be shifted left}
  449. emit_instr(taicpu.op_reg_reg_const(A_RSB, shiftval2, shiftval1, 32));
  450. so.shiftmode:=sm;
  451. so.rs:=shiftval1;
  452. {Shift and zerofill the hi+lo register}
  453. emit_instr(taicpu.op_reg_reg_shifterop(A_MOV, dstlo, srclo, so));
  454. emit_instr(taicpu.op_reg_reg_shifterop(A_MOV, dsthi, srchi, so));
  455. {Fold in the lower 32-shiftval bits}
  456. if sm = SM_LSR then so.shiftmode:=SM_LSL else so.shiftmode:=SM_LSR;
  457. so.rs:=shiftval2;
  458. emit_instr(taicpu.op_reg_reg_reg_shifterop(A_ORR, dstlo, dstlo, srchi, so));
  459. cg.a_reg_alloc(current_asmdata.CurrAsmList,NR_DEFAULTFLAGS);
  460. emit_instr(setoppostfix(taicpu.op_reg_reg_const(A_SUB, shiftval2, shiftval1, 32), PF_S));
  461. so.shiftmode:=sm;
  462. emit_instr(setcondition(taicpu.op_reg_reg_shifterop(A_MOV, dstlo, srchi, so), C_PL));
  463. cg.a_reg_dealloc(current_asmdata.CurrAsmList,NR_DEFAULTFLAGS);
  464. end;
  465. begin
  466. if GenerateThumbCode or GenerateThumb2Code then
  467. begin
  468. inherited;
  469. exit;
  470. end;
  471. location_reset(location,LOC_REGISTER,def_cgsize(resultdef));
  472. location.register64.reghi:=cg.getintregister(current_asmdata.CurrAsmList,OS_INT);
  473. location.register64.reglo:=cg.getintregister(current_asmdata.CurrAsmList,OS_INT);
  474. { load left operator in a register }
  475. if not(left.location.loc in [LOC_CREGISTER,LOC_REGISTER]) or
  476. (left.location.size<>OS_64) then
  477. hlcg.location_force_reg(current_asmdata.CurrAsmList,left.location,left.resultdef,resultdef,true);
  478. lreg := left.location.register64;
  479. resreg := location.register64;
  480. shifterop_reset(so);
  481. { shifting by a constant directly coded: }
  482. if (right.nodetype=ordconstn) then
  483. begin
  484. v:=Tordconstnode(right).value and 63;
  485. {Single bit shift}
  486. if v = 1 then
  487. if nodetype=shln then
  488. begin
  489. {Shift left by one by 2 simple 32bit additions}
  490. cg.a_reg_alloc(current_asmdata.CurrAsmList,NR_DEFAULTFLAGS);
  491. emit_instr(setoppostfix(taicpu.op_reg_reg_reg(A_ADD, resreg.reglo, lreg.reglo, lreg.reglo), PF_S));
  492. emit_instr(taicpu.op_reg_reg_reg(A_ADC, resreg.reghi, lreg.reghi, lreg.reghi));
  493. cg.a_reg_dealloc(current_asmdata.CurrAsmList,NR_DEFAULTFLAGS);
  494. end
  495. else
  496. begin
  497. {Shift right by first shifting hi by one and then using RRX (rotate right extended), which rotates through the carry}
  498. shifterop_reset(so); so.shiftmode:=SM_LSR; so.shiftimm:=1;
  499. cg.a_reg_alloc(current_asmdata.CurrAsmList,NR_DEFAULTFLAGS);
  500. emit_instr(setoppostfix(taicpu.op_reg_reg_shifterop(A_MOV, resreg.reghi, lreg.reghi, so), PF_S));
  501. so.shiftmode:=SM_RRX; so.shiftimm:=0; {RRX does NOT have a shift amount}
  502. emit_instr(taicpu.op_reg_reg_shifterop(A_MOV, resreg.reglo, lreg.reglo, so));
  503. cg.a_reg_dealloc(current_asmdata.CurrAsmList,NR_DEFAULTFLAGS);
  504. end
  505. {Clear one register and use the cg to generate a normal 32-bit shift}
  506. else if v >= 32 then
  507. if nodetype=shln then
  508. begin
  509. emit_instr(taicpu.op_reg_const(A_MOV, resreg.reglo, 0));
  510. cg.a_op_const_reg_reg(current_asmdata.CurrAsmList,OP_SHL,OS_32,v.uvalue-32,lreg.reglo,resreg.reghi);
  511. end
  512. else
  513. begin
  514. emit_instr(taicpu.op_reg_const(A_MOV, resreg.reghi, 0));
  515. cg.a_op_const_reg_reg(current_asmdata.CurrAsmList,OP_SHR,OS_32,v.uvalue-32,lreg.reghi,resreg.reglo);
  516. end
  517. {Shift LESS than 32, thats the tricky one}
  518. else if (v < 32) and (v > 1) then
  519. if nodetype=shln then
  520. shift_less_than_32(lreg.reglo, lreg.reghi, resreg.reglo, resreg.reghi, v.uvalue, SM_LSL)
  521. else
  522. shift_less_than_32(lreg.reghi, lreg.reglo, resreg.reghi, resreg.reglo, v.uvalue, SM_LSR);
  523. end
  524. else
  525. begin
  526. { force right operator into a register }
  527. if not(right.location.loc in [LOC_CREGISTER,LOC_REGISTER]) or
  528. (right.location.size<>OS_32) then
  529. hlcg.location_force_reg(current_asmdata.CurrAsmList,right.location,right.resultdef,u32inttype,true);
  530. if nodetype = shln then
  531. shift_by_variable(lreg.reglo, lreg.reghi, resreg.reglo, resreg.reghi, right.location.register, SM_LSL)
  532. else
  533. shift_by_variable(lreg.reghi, lreg.reglo, resreg.reghi, resreg.reglo, right.location.register, SM_LSR);
  534. end;
  535. end;
  536. begin
  537. cmoddivnode:=tarmmoddivnode;
  538. cnotnode:=tarmnotnode;
  539. cunaryminusnode:=tarmunaryminusnode;
  540. cshlshrnode:=tarmshlshrnode;
  541. end.