narmmat.pas 27 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629
  1. {
  2. Copyright (c) 1998-2002 by Florian Klaempfl
  3. Generate ARM assembler for math nodes
  4. This program is free software; you can redistribute it and/or modify
  5. it under the terms of the GNU General Public License as published by
  6. the Free Software Foundation; either version 2 of the License, or
  7. (at your option) any later version.
  8. This program is distributed in the hope that it will be useful,
  9. but WITHOUT ANY WARRANTY; without even the implied warranty of
  10. MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  11. GNU General Public License for more details.
  12. You should have received a copy of the GNU General Public License
  13. along with this program; if not, write to the Free Software
  14. Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
  15. ****************************************************************************
  16. }
  17. unit narmmat;
  18. {$i fpcdefs.inc}
  19. interface
  20. uses
  21. node,nmat,ncgmat;
  22. type
  23. tarmmoddivnode = class(tmoddivnode)
  24. function first_moddivint: tnode;override;
  25. procedure pass_generate_code;override;
  26. end;
  27. tarmnotnode = class(tcgnotnode)
  28. procedure second_boolean;override;
  29. end;
  30. tarmunaryminusnode = class(tcgunaryminusnode)
  31. function pass_1: tnode; override;
  32. procedure second_float;override;
  33. end;
  34. tarmshlshrnode = class(tcgshlshrnode)
  35. procedure second_64bit;override;
  36. function first_shlshr64bitint: tnode; override;
  37. end;
  38. implementation
  39. uses
  40. globtype,compinnr,
  41. cutils,verbose,globals,constexp,
  42. aasmbase,aasmcpu,aasmtai,aasmdata,
  43. defutil,systems,
  44. symtype,symconst,symtable,
  45. cgbase,cgobj,hlcgobj,cgutils,
  46. pass_2,procinfo,
  47. ncon,ncnv,ncal,ninl,
  48. cpubase,cpuinfo,
  49. ncgutil,
  50. nadd,pass_1,symdef;
  51. {*****************************************************************************
  52. TARMMODDIVNODE
  53. *****************************************************************************}
  54. function tarmmoddivnode.first_moddivint: tnode;
  55. var
  56. power : longint;
  57. begin
  58. {We can handle all cases of constant division}
  59. if not(cs_check_overflow in current_settings.localswitches) and
  60. (right.nodetype=ordconstn) and
  61. (nodetype=divn) and
  62. not(is_64bit(resultdef)) and
  63. {Only the ARM and thumb2-isa support umull and smull, which are required for arbitary division by const optimization}
  64. (GenerateArmCode or
  65. GenerateThumb2Code or
  66. (ispowerof2(tordconstnode(right).value,power) or
  67. (tordconstnode(right).value=1) or
  68. (tordconstnode(right).value=int64(-1))
  69. )
  70. ) then
  71. result:=nil
  72. else if ((GenerateThumbCode or GenerateThumb2Code) and (CPUARM_HAS_THUMB_IDIV in cpu_capabilities[current_settings.cputype])) and
  73. (nodetype=divn) and
  74. not(is_64bit(resultdef)) then
  75. result:=nil
  76. else if ((GenerateThumbCode or GenerateThumb2Code) and (CPUARM_HAS_THUMB_IDIV in cpu_capabilities[current_settings.cputype])) and
  77. (nodetype=modn) and
  78. not(is_64bit(resultdef)) then
  79. begin
  80. if (right.nodetype=ordconstn) and
  81. ispowerof2(tordconstnode(right).value,power) and
  82. (tordconstnode(right).value<=256) and
  83. (tordconstnode(right).value>0) then
  84. result:=caddnode.create_internal(andn,left,cordconstnode.create(tordconstnode(right).value-1,sinttype,false))
  85. else
  86. begin
  87. result:=caddnode.create_internal(subn,left,caddnode.create_internal(muln,right,cmoddivnode.Create(divn,left.getcopy,right.getcopy)));
  88. right:=nil;
  89. end;
  90. left:=nil;
  91. firstpass(result);
  92. end
  93. else if (nodetype=modn) and
  94. (is_signed(left.resultdef)) and
  95. (right.nodetype=ordconstn) and
  96. (tordconstnode(right).value=2) then
  97. begin
  98. // result:=(0-(left and 1)) and (1+(sarlongint(left,31) shl 1))
  99. result:=caddnode.create_internal(andn,caddnode.create_internal(subn,cordconstnode.create(0,sinttype,false),caddnode.create_internal(andn,left,cordconstnode.create(1,sinttype,false))),
  100. caddnode.create_internal(addn,cordconstnode.create(1,sinttype,false),
  101. cshlshrnode.create(shln,cinlinenode.create(in_sar_x_y,false,ccallparanode.create(cordconstnode.create(31,sinttype,false),ccallparanode.Create(left.getcopy,nil))),cordconstnode.create(1,sinttype,false))));
  102. left:=nil;
  103. firstpass(result);
  104. end
  105. else
  106. result:=inherited first_moddivint;
  107. { we may not change the result type here }
  108. if assigned(result) and (torddef(result.resultdef).ordtype<>torddef(resultdef).ordtype) then
  109. inserttypeconv(result,resultdef);
  110. end;
  111. procedure tarmmoddivnode.pass_generate_code;
  112. var
  113. power : longint;
  114. numerator,
  115. helper1,
  116. helper2,
  117. resultreg : tregister;
  118. size : Tcgsize;
  119. so : tshifterop;
  120. procedure genOrdConstNodeDiv;
  121. begin
  122. if tordconstnode(right).value=0 then
  123. internalerror(2005061701)
  124. else if tordconstnode(right).value=1 then
  125. cg.a_load_reg_reg(current_asmdata.CurrAsmList, OS_INT, OS_INT, numerator, resultreg)
  126. else if (tordconstnode(right).value = int64(-1)) then
  127. begin
  128. // note: only in the signed case possible..., may overflow
  129. if cs_check_overflow in current_settings.localswitches then
  130. cg.a_reg_alloc(current_asmdata.CurrAsmList,NR_DEFAULTFLAGS);
  131. current_asmdata.CurrAsmList.concat(setoppostfix(taicpu.op_reg_reg(A_MVN,
  132. resultreg,numerator),toppostfix(ord(cs_check_overflow in current_settings.localswitches)*ord(PF_S))));
  133. end
  134. else if ispowerof2(tordconstnode(right).value,power) then
  135. begin
  136. if (is_signed(right.resultdef)) then
  137. begin
  138. helper1:=cg.getintregister(current_asmdata.CurrAsmList,OS_INT);
  139. helper2:=cg.getintregister(current_asmdata.CurrAsmList,OS_INT);
  140. if power = 1 then
  141. cg.a_load_reg_reg(current_asmdata.CurrAsmList,OS_INT,OS_INT,numerator,helper1)
  142. else
  143. cg.a_op_const_reg_reg(current_asmdata.CurrAsmList,OP_SAR,OS_INT,31,numerator,helper1);
  144. if GenerateThumbCode then
  145. begin
  146. cg.a_op_const_reg(current_asmdata.CurrAsmList,OP_SHR,OS_INT,32-power,helper1);
  147. current_asmdata.CurrAsmList.concat(taicpu.op_reg_reg_reg(A_ADD,helper2,numerator,helper1));
  148. end
  149. else
  150. begin
  151. shifterop_reset(so);
  152. so.shiftmode:=SM_LSR;
  153. so.shiftimm:=32-power;
  154. current_asmdata.CurrAsmList.concat(taicpu.op_reg_reg_reg_shifterop(A_ADD,helper2,numerator,helper1,so));
  155. end;
  156. cg.a_op_const_reg_reg(current_asmdata.CurrAsmList,OP_SAR,OS_INT,power,helper2,resultreg);
  157. end
  158. else
  159. cg.a_op_const_reg_reg(current_asmdata.CurrAsmList,OP_SHR,OS_INT,power,numerator,resultreg)
  160. end
  161. else if CPUARM_HAS_UMULL in cpu_capabilities[current_settings.cputype] then
  162. {Everything else is handled the generic code}
  163. cg.g_div_const_reg_reg(current_asmdata.CurrAsmList,def_cgsize(resultdef),
  164. tordconstnode(right).value.svalue,numerator,resultreg)
  165. else
  166. internalerror(2019012601);
  167. end;
  168. {
  169. procedure genOrdConstNodeMod;
  170. var
  171. modreg, maskreg, tempreg : tregister;
  172. begin
  173. if (tordconstnode(right).value = 0) then begin
  174. internalerror(2005061702);
  175. end
  176. else if (abs(tordconstnode(right).value.svalue) = 1) then
  177. begin
  178. // x mod +/-1 is always zero
  179. cg.a_load_const_reg(current_asmdata.CurrAsmList, OS_INT, 0, resultreg);
  180. end
  181. else if (ispowerof2(tordconstnode(right).value, power)) then
  182. begin
  183. if (is_signed(right.resultdef)) then begin
  184. tempreg := cg.getintregister(current_asmdata.CurrAsmList, OS_INT);
  185. maskreg := cg.getintregister(current_asmdata.CurrAsmList, OS_INT);
  186. modreg := cg.getintregister(current_asmdata.CurrAsmList, OS_INT);
  187. cg.a_load_const_reg(current_asmdata.CurrAsmList, OS_INT, abs(tordconstnode(right).value.svalue)-1, modreg);
  188. cg.a_op_const_reg_reg(current_asmdata.CurrAsmList, OP_SAR, OS_INT, 31, numerator, maskreg);
  189. cg.a_op_reg_reg_reg(current_asmdata.CurrAsmList, OP_AND, OS_INT, numerator, modreg, tempreg);
  190. current_asmdata.CurrAsmList.concat(taicpu.op_reg_reg_reg(A_ANDC, maskreg, maskreg, modreg));
  191. current_asmdata.CurrAsmList.concat(taicpu.op_reg_reg_const(A_SUBFIC, modreg, tempreg, 0));
  192. current_asmdata.CurrAsmList.concat(taicpu.op_reg_reg_reg(A_SUBFE, modreg, modreg, modreg));
  193. cg.a_op_reg_reg_reg(current_asmdata.CurrAsmList, OP_AND, OS_INT, modreg, maskreg, maskreg);
  194. cg.a_op_reg_reg_reg(current_asmdata.CurrAsmList, OP_OR, OS_INT, maskreg, tempreg, resultreg);
  195. end else begin
  196. cg.a_op_const_reg_reg(current_asmdata.CurrAsmList, OP_AND, OS_INT, tordconstnode(right).value.svalue-1, numerator, resultreg);
  197. end;
  198. end else begin
  199. genOrdConstNodeDiv();
  200. cg.a_op_const_reg_reg(current_asmdata.CurrAsmList, OP_MUL, OS_INT, tordconstnode(right).value.svalue, resultreg, resultreg);
  201. cg.a_op_reg_reg_reg(current_asmdata.CurrAsmList, OP_SUB, OS_INT, resultreg, numerator, resultreg);
  202. end;
  203. end;
  204. }
  205. begin
  206. secondpass(left);
  207. secondpass(right);
  208. if ((GenerateThumbCode or GenerateThumb2Code) and (CPUARM_HAS_THUMB_IDIV in cpu_capabilities[current_settings.cputype])) and
  209. (nodetype=divn) and
  210. not(is_64bitint(resultdef)) then
  211. begin
  212. size:=def_cgsize(left.resultdef);
  213. hlcg.location_force_reg(current_asmdata.CurrAsmList,left.location,left.resultdef,left.resultdef,true);
  214. location_copy(location,left.location);
  215. location.loc := LOC_REGISTER;
  216. location.register := cg.getintregister(current_asmdata.CurrAsmList,size);
  217. resultreg:=location.register;
  218. if (right.nodetype=ordconstn) and
  219. ((tordconstnode(right).value=1) or
  220. (tordconstnode(right).value=int64(-1)) or
  221. (tordconstnode(right).value=0) or
  222. ispowerof2(tordconstnode(right).value,power)) then
  223. begin
  224. numerator:=left.location.register;
  225. genOrdConstNodeDiv;
  226. end
  227. else
  228. begin
  229. hlcg.location_force_reg(current_asmdata.CurrAsmList,right.location,right.resultdef,left.resultdef,true);
  230. if is_signed(left.resultdef) or
  231. is_signed(right.resultdef) then
  232. cg.a_op_reg_reg_reg(current_asmdata.CurrAsmList,OP_IDIV,OS_INT,right.location.register,left.location.register,location.register)
  233. else
  234. cg.a_op_reg_reg_reg(current_asmdata.CurrAsmList,OP_DIV,OS_INT,right.location.register,left.location.register,location.register);
  235. end;
  236. end
  237. else
  238. begin
  239. location_copy(location,left.location);
  240. { put numerator in register }
  241. size:=def_cgsize(left.resultdef);
  242. hlcg.location_force_reg(current_asmdata.CurrAsmList,left.location,
  243. left.resultdef,left.resultdef,true);
  244. location_copy(location,left.location);
  245. numerator:=location.register;
  246. resultreg:=location.register;
  247. if location.loc=LOC_CREGISTER then
  248. begin
  249. location.loc := LOC_REGISTER;
  250. location.register := cg.getintregister(current_asmdata.CurrAsmList,size);
  251. resultreg:=location.register;
  252. end
  253. else if (nodetype=modn) or (right.nodetype=ordconstn) then
  254. begin
  255. // for a modulus op, and for const nodes we need the result register
  256. // to be an extra register
  257. resultreg:=cg.getintregister(current_asmdata.CurrAsmList,size);
  258. end;
  259. if (right.nodetype=ordconstn) then
  260. begin
  261. if nodetype=divn then
  262. genOrdConstNodeDiv
  263. else
  264. // genOrdConstNodeMod;
  265. end;
  266. location.register:=resultreg;
  267. end;
  268. { unsigned division/module can only overflow in case of division by zero }
  269. { (but checking this overflow flag is more convoluted than performing a }
  270. { simple comparison with 0) }
  271. if is_signed(right.resultdef) then
  272. cg.g_overflowcheck(current_asmdata.CurrAsmList,location,resultdef);
  273. end;
  274. {*****************************************************************************
  275. TARMNOTNODE
  276. *****************************************************************************}
  277. procedure tarmnotnode.second_boolean;
  278. var
  279. tmpreg : TRegister;
  280. begin
  281. { if the location is LOC_JUMP, we do the secondpass after the
  282. labels are allocated
  283. }
  284. if not handle_locjump then
  285. begin
  286. secondpass(left);
  287. case left.location.loc of
  288. LOC_FLAGS :
  289. begin
  290. location_copy(location,left.location);
  291. inverse_flags(location.resflags);
  292. end;
  293. LOC_REGISTER,LOC_CREGISTER,LOC_REFERENCE,LOC_CREFERENCE,
  294. LOC_SUBSETREG,LOC_CSUBSETREG,LOC_SUBSETREF,LOC_CSUBSETREF :
  295. begin
  296. hlcg.location_force_reg(current_asmdata.CurrAsmList,left.location,left.resultdef,left.resultdef,true);
  297. cg.a_reg_alloc(current_asmdata.CurrAsmList,NR_DEFAULTFLAGS);
  298. if is_64bit(resultdef) then
  299. begin
  300. tmpreg:=cg.GetIntRegister(current_asmdata.CurrAsmList,OS_INT);
  301. { OR low and high parts together }
  302. current_asmdata.CurrAsmList.concat(setoppostfix(taicpu.op_reg_reg_reg(A_ORR,tmpreg,left.location.register64.reglo,left.location.register64.reghi),PF_S));
  303. end
  304. else
  305. current_asmdata.CurrAsmList.concat(taicpu.op_reg_const(A_CMP,left.location.register,0));
  306. location_reset(location,LOC_FLAGS,OS_NO);
  307. location.resflags:=F_EQ;
  308. end;
  309. else
  310. internalerror(2003042401);
  311. end;
  312. end;
  313. end;
  314. {*****************************************************************************
  315. TARMUNARYMINUSNODE
  316. *****************************************************************************}
  317. function tarmunaryminusnode.pass_1: tnode;
  318. var
  319. procname: string[31];
  320. fdef : tdef;
  321. begin
  322. if (FPUARM_HAS_VFP_DOUBLE in fpu_capabilities[current_settings.fputype]) or
  323. (target_info.system = system_arm_wince) or
  324. is_single(resultdef) then
  325. exit(inherited pass_1);
  326. result:=nil;
  327. firstpass(left);
  328. if codegenerror then
  329. exit;
  330. { if we get here and VFP support is on, there is no 64 bit VFP operation support available,
  331. so in this case the software version needs to be called }
  332. if (left.resultdef.typ=floatdef) and ((current_settings.fputype=fpu_soft) or
  333. (FPUARM_HAS_VFP_EXTENSION in fpu_capabilities[current_settings.fputype])) then
  334. begin
  335. case tfloatdef(resultdef).floattype of
  336. s64real:
  337. begin
  338. procname:='float64_sub';
  339. fdef:=search_system_type('FLOAT64').typedef;
  340. end;
  341. else
  342. internalerror(2005082801);
  343. end;
  344. result:=ctypeconvnode.create_internal(ccallnode.createintern(procname,ccallparanode.create(
  345. ctypeconvnode.create_internal(left,fDef),
  346. ccallparanode.create(ctypeconvnode.create_internal(crealconstnode.create(0,resultdef),fdef),nil))),resultdef);
  347. left:=nil;
  348. end
  349. else
  350. begin
  351. if (left.resultdef.typ=floatdef) then
  352. expectloc:=LOC_FPUREGISTER
  353. else if (left.resultdef.typ=orddef) then
  354. expectloc:=LOC_REGISTER;
  355. end;
  356. end;
  357. procedure tarmunaryminusnode.second_float;
  358. var
  359. pf: TOpPostfix;
  360. begin
  361. secondpass(left);
  362. case current_settings.fputype of
  363. fpu_fpa,
  364. fpu_fpa10,
  365. fpu_fpa11:
  366. begin
  367. hlcg.location_force_fpureg(current_asmdata.CurrAsmList,left.location,left.resultdef,false);
  368. location:=left.location;
  369. current_asmdata.CurrAsmList.concat(setoppostfix(taicpu.op_reg_reg_const(A_RSF,
  370. location.register,left.location.register,0),
  371. cgsize2fpuoppostfix[def_cgsize(resultdef)]));
  372. end;
  373. fpu_soft:
  374. begin
  375. hlcg.location_force_reg(current_asmdata.CurrAsmList,left.location,left.resultdef,left.resultdef,false);
  376. location:=left.location;
  377. case location.size of
  378. OS_32:
  379. cg.a_op_const_reg(current_asmdata.CurrAsmList,OP_XOR,OS_32,tcgint($80000000),location.register);
  380. OS_64:
  381. cg.a_op_const_reg(current_asmdata.CurrAsmList,OP_XOR,OS_32,tcgint($80000000),location.registerhi);
  382. else
  383. internalerror(2014033101);
  384. end;
  385. end
  386. else if FPUARM_HAS_VFP_DOUBLE in fpu_capabilities[init_settings.fputype] then
  387. begin
  388. hlcg.location_force_mmregscalar(current_asmdata.CurrAsmList,left.location,left.resultdef,true);
  389. location:=left.location;
  390. if (left.location.loc=LOC_CMMREGISTER) then
  391. location.register:=cg.getmmregister(current_asmdata.CurrAsmList,location.size);
  392. if (tfloatdef(left.resultdef).floattype=s32real) then
  393. pf:=PF_F32
  394. else
  395. pf:=PF_F64;
  396. current_asmdata.CurrAsmList.concat(setoppostfix(taicpu.op_reg_reg(A_VNEG,
  397. location.register,left.location.register), pf));
  398. cg.maybe_check_for_fpu_exception(current_asmdata.CurrAsmList);
  399. end
  400. else if FPUARM_HAS_VFP_EXTENSION in fpu_capabilities[init_settings.fputype] then
  401. begin
  402. hlcg.location_force_mmregscalar(current_asmdata.CurrAsmList,left.location,left.resultdef,true);
  403. location:=left.location;
  404. if (left.location.loc=LOC_CMMREGISTER) then
  405. location.register:=cg.getmmregister(current_asmdata.CurrAsmList,location.size);
  406. current_asmdata.CurrAsmList.concat(setoppostfix(taicpu.op_reg_reg(A_VNEG,
  407. location.register,left.location.register), PF_F32));
  408. cg.maybe_check_for_fpu_exception(current_asmdata.CurrAsmList);
  409. end
  410. else
  411. internalerror(2009112602);
  412. end;
  413. end;
  414. function tarmshlshrnode.first_shlshr64bitint: tnode;
  415. begin
  416. if GenerateThumbCode or GenerateThumb2Code then
  417. result:=inherited
  418. else
  419. result := nil;
  420. end;
  421. procedure tarmshlshrnode.second_64bit;
  422. var
  423. v : TConstExprInt;
  424. so: tshifterop;
  425. lreg, resreg: TRegister64;
  426. procedure emit_instr(p: tai);
  427. begin
  428. current_asmdata.CurrAsmList.concat(p);
  429. end;
  430. {This code is build like it gets called with sm=SM_LSR all the time, for SM_LSL dst* and src* have to be reversed}
  431. procedure shift_less_than_32(srchi, srclo, dsthi, dstlo: TRegister; shiftval: Byte; sm: TShiftMode);
  432. begin
  433. shifterop_reset(so);
  434. so.shiftimm:=shiftval;
  435. so.shiftmode:=sm;
  436. emit_instr(taicpu.op_reg_reg_shifterop(A_MOV, dstlo, srclo, so));
  437. emit_instr(taicpu.op_reg_reg_shifterop(A_MOV, dsthi, srchi, so));
  438. if sm = SM_LSR then so.shiftmode:=SM_LSL else so.shiftmode:=SM_LSR;
  439. so.shiftimm:=32-shiftval;
  440. emit_instr(taicpu.op_reg_reg_reg_shifterop(A_ORR, dstlo, dstlo, srchi, so));
  441. end;
  442. {This code is build like it gets called with sm=SM_LSR all the time, for SM_LSL dst* and src* have to be reversed
  443. This will generate
  444. mov shiftval1, shiftval
  445. cmp shiftval1, #64
  446. movcs shiftval1, #64
  447. rsb shiftval2, shiftval1, #32
  448. mov dstlo, srclo, lsr shiftval1
  449. mov dsthi, srchi, lsr shiftval1
  450. orr dstlo, srchi, lsl shiftval2
  451. subs shiftval2, shiftval1, #32
  452. movpl dstlo, srchi, lsr shiftval2
  453. }
  454. procedure shift_by_variable(srchi, srclo, dsthi, dstlo, shiftval: TRegister; sm: TShiftMode);
  455. var
  456. shiftval1,shiftval2:TRegister;
  457. begin
  458. shifterop_reset(so);
  459. shiftval1:=cg.getintregister(current_asmdata.CurrAsmList,OS_INT);
  460. shiftval2:=cg.getintregister(current_asmdata.CurrAsmList,OS_INT);
  461. cg.a_load_reg_reg(current_asmdata.CurrAsmList, OS_INT, OS_INT, shiftval, shiftval1);
  462. {The ARM barrel shifter only considers the lower 8 bits of a register for the shift}
  463. cg.a_reg_alloc(current_asmdata.CurrAsmList,NR_DEFAULTFLAGS);
  464. emit_instr(taicpu.op_reg_const(A_CMP, shiftval1, 64));
  465. emit_instr(setcondition(taicpu.op_reg_const(A_MOV, shiftval1, 64), C_CS));
  466. cg.a_reg_dealloc(current_asmdata.CurrAsmList,NR_DEFAULTFLAGS);
  467. {Calculate how much the upper register needs to be shifted left}
  468. emit_instr(taicpu.op_reg_reg_const(A_RSB, shiftval2, shiftval1, 32));
  469. so.shiftmode:=sm;
  470. so.rs:=shiftval1;
  471. {Shift and zerofill the hi+lo register}
  472. emit_instr(taicpu.op_reg_reg_shifterop(A_MOV, dstlo, srclo, so));
  473. emit_instr(taicpu.op_reg_reg_shifterop(A_MOV, dsthi, srchi, so));
  474. {Fold in the lower 32-shiftval bits}
  475. if sm = SM_LSR then so.shiftmode:=SM_LSL else so.shiftmode:=SM_LSR;
  476. so.rs:=shiftval2;
  477. emit_instr(taicpu.op_reg_reg_reg_shifterop(A_ORR, dstlo, dstlo, srchi, so));
  478. cg.a_reg_alloc(current_asmdata.CurrAsmList,NR_DEFAULTFLAGS);
  479. emit_instr(setoppostfix(taicpu.op_reg_reg_const(A_SUB, shiftval2, shiftval1, 32), PF_S));
  480. so.shiftmode:=sm;
  481. emit_instr(setcondition(taicpu.op_reg_reg_shifterop(A_MOV, dstlo, srchi, so), C_PL));
  482. cg.a_reg_dealloc(current_asmdata.CurrAsmList,NR_DEFAULTFLAGS);
  483. end;
  484. begin
  485. if GenerateThumbCode or GenerateThumb2Code then
  486. begin
  487. inherited;
  488. exit;
  489. end;
  490. location_reset(location,LOC_REGISTER,def_cgsize(resultdef));
  491. location.register64.reghi:=cg.getintregister(current_asmdata.CurrAsmList,OS_INT);
  492. location.register64.reglo:=cg.getintregister(current_asmdata.CurrAsmList,OS_INT);
  493. { load left operator in a register }
  494. if not(left.location.loc in [LOC_CREGISTER,LOC_REGISTER]) or
  495. (left.location.size<>OS_64) then
  496. hlcg.location_force_reg(current_asmdata.CurrAsmList,left.location,left.resultdef,resultdef,true);
  497. lreg := left.location.register64;
  498. resreg := location.register64;
  499. shifterop_reset(so);
  500. { shifting by a constant directly coded: }
  501. if (right.nodetype=ordconstn) then
  502. begin
  503. v:=Tordconstnode(right).value and 63;
  504. {Single bit shift}
  505. if v = 1 then
  506. if nodetype=shln then
  507. begin
  508. {Shift left by one by 2 simple 32bit additions}
  509. cg.a_reg_alloc(current_asmdata.CurrAsmList,NR_DEFAULTFLAGS);
  510. emit_instr(setoppostfix(taicpu.op_reg_reg_reg(A_ADD, resreg.reglo, lreg.reglo, lreg.reglo), PF_S));
  511. emit_instr(taicpu.op_reg_reg_reg(A_ADC, resreg.reghi, lreg.reghi, lreg.reghi));
  512. cg.a_reg_dealloc(current_asmdata.CurrAsmList,NR_DEFAULTFLAGS);
  513. end
  514. else
  515. begin
  516. {Shift right by first shifting hi by one and then using RRX (rotate right extended), which rotates through the carry}
  517. shifterop_reset(so); so.shiftmode:=SM_LSR; so.shiftimm:=1;
  518. cg.a_reg_alloc(current_asmdata.CurrAsmList,NR_DEFAULTFLAGS);
  519. emit_instr(setoppostfix(taicpu.op_reg_reg_shifterop(A_MOV, resreg.reghi, lreg.reghi, so), PF_S));
  520. so.shiftmode:=SM_RRX; so.shiftimm:=0; {RRX does NOT have a shift amount}
  521. emit_instr(taicpu.op_reg_reg_shifterop(A_MOV, resreg.reglo, lreg.reglo, so));
  522. cg.a_reg_dealloc(current_asmdata.CurrAsmList,NR_DEFAULTFLAGS);
  523. end
  524. {Clear one register and use the cg to generate a normal 32-bit shift}
  525. else if v >= 32 then
  526. if nodetype=shln then
  527. begin
  528. emit_instr(taicpu.op_reg_const(A_MOV, resreg.reglo, 0));
  529. cg.a_op_const_reg_reg(current_asmdata.CurrAsmList,OP_SHL,OS_32,v.uvalue-32,lreg.reglo,resreg.reghi);
  530. end
  531. else
  532. begin
  533. emit_instr(taicpu.op_reg_const(A_MOV, resreg.reghi, 0));
  534. cg.a_op_const_reg_reg(current_asmdata.CurrAsmList,OP_SHR,OS_32,v.uvalue-32,lreg.reghi,resreg.reglo);
  535. end
  536. {Shift LESS than 32, thats the tricky one}
  537. else if (v < 32) and (v > 1) then
  538. if nodetype=shln then
  539. shift_less_than_32(lreg.reglo, lreg.reghi, resreg.reglo, resreg.reghi, v.uvalue, SM_LSL)
  540. else
  541. shift_less_than_32(lreg.reghi, lreg.reglo, resreg.reghi, resreg.reglo, v.uvalue, SM_LSR);
  542. end
  543. else
  544. begin
  545. { force right operator into a register }
  546. if not(right.location.loc in [LOC_CREGISTER,LOC_REGISTER]) or
  547. (right.location.size<>OS_32) then
  548. hlcg.location_force_reg(current_asmdata.CurrAsmList,right.location,right.resultdef,u32inttype,true);
  549. if nodetype = shln then
  550. shift_by_variable(lreg.reglo, lreg.reghi, resreg.reglo, resreg.reghi, right.location.register, SM_LSL)
  551. else
  552. shift_by_variable(lreg.reghi, lreg.reglo, resreg.reghi, resreg.reglo, right.location.register, SM_LSR);
  553. end;
  554. end;
  555. begin
  556. cmoddivnode:=tarmmoddivnode;
  557. cnotnode:=tarmnotnode;
  558. cunaryminusnode:=tarmunaryminusnode;
  559. cshlshrnode:=tarmshlshrnode;
  560. end.