nx86mat.pas 33 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799
  1. {
  2. Copyright (c) 1998-2002 by Florian Klaempfl
  3. Generate x86 code for math nodes
  4. This program is free software; you can redistribute it and/or modify
  5. it under the terms of the GNU General Public License as published by
  6. the Free Software Foundation; either version 2 of the License, or
  7. (at your option) any later version.
  8. This program is distributed in the hope that it will be useful,
  9. but WITHOUT ANY WARRANTY; without even the implied warranty of
  10. MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  11. GNU General Public License for more details.
  12. You should have received a copy of the GNU General Public License
  13. along with this program; if not, write to the Free Software
  14. Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
  15. ****************************************************************************
  16. }
  17. unit nx86mat;
  18. {$i fpcdefs.inc}
  19. interface
  20. uses
  21. node,ncgmat;
  22. type
  23. tx86unaryminusnode = class(tcgunaryminusnode)
  24. {$ifdef SUPPORT_MMX}
  25. procedure second_mmx;override;
  26. {$endif SUPPORT_MMX}
  27. procedure second_float;override;
  28. function pass_1:tnode;override;
  29. end;
  30. tx86notnode = class(tcgnotnode)
  31. procedure second_boolean;override;
  32. {$ifdef SUPPORT_MMX}
  33. procedure second_mmx;override;
  34. {$endif SUPPORT_MMX}
  35. end;
  36. tx86moddivnode = class(tcgmoddivnode)
  37. procedure pass_generate_code;override;
  38. end;
  39. tx86shlshrnode = class(tcgshlshrnode)
  40. {$ifdef SUPPORT_MMX}
  41. procedure second_mmx;override;
  42. {$endif SUPPORT_MMX}
  43. end;
  44. implementation
  45. uses
  46. globtype,
  47. constexp,
  48. cutils,verbose,globals,
  49. symconst,symdef,
  50. aasmbase,aasmtai,aasmcpu,aasmdata,defutil,
  51. cgbase,pass_1,pass_2,
  52. ncon,
  53. cpubase,cpuinfo,
  54. cga,cgobj,hlcgobj,cgx86,cgutils,
  55. tgobj;
  56. {*****************************************************************************
  57. TI386UNARYMINUSNODE
  58. *****************************************************************************}
  59. function tx86unaryminusnode.pass_1 : tnode;
  60. begin
  61. result:=nil;
  62. firstpass(left);
  63. if codegenerror then
  64. exit;
  65. if (left.resultdef.typ=floatdef) then
  66. begin
  67. if use_vectorfpu(left.resultdef) then
  68. expectloc:=LOC_MMREGISTER
  69. else
  70. expectloc:=LOC_FPUREGISTER;
  71. end
  72. {$ifdef SUPPORT_MMX}
  73. else
  74. if (cs_mmx in current_settings.localswitches) and
  75. is_mmx_able_array(left.resultdef) then
  76. begin
  77. expectloc:=LOC_MMXREGISTER;
  78. end
  79. {$endif SUPPORT_MMX}
  80. else
  81. inherited pass_1;
  82. end;
  83. {$ifdef SUPPORT_MMX}
  84. procedure tx86unaryminusnode.second_mmx;
  85. var
  86. op : tasmop;
  87. hreg : tregister;
  88. begin
  89. op:=A_NONE;
  90. secondpass(left);
  91. location_reset(location,LOC_MMXREGISTER,OS_NO);
  92. hreg:=tcgx86(cg).getmmxregister(current_asmdata.CurrAsmList);
  93. emit_reg_reg(A_PXOR,S_NO,hreg,hreg);
  94. case left.location.loc of
  95. LOC_MMXREGISTER:
  96. begin
  97. location.register:=left.location.register;
  98. end;
  99. LOC_CMMXREGISTER:
  100. begin
  101. location.register:=tcgx86(cg).getmmxregister(current_asmdata.CurrAsmList);
  102. emit_reg_reg(A_MOVQ,S_NO,left.location.register,location.register);
  103. end;
  104. LOC_REFERENCE,
  105. LOC_CREFERENCE:
  106. begin
  107. location.register:=tcgx86(cg).getmmxregister(current_asmdata.CurrAsmList);
  108. emit_ref_reg(A_MOVQ,S_NO,left.location.reference,location.register);
  109. end;
  110. else
  111. internalerror(200203225);
  112. end;
  113. if cs_mmx_saturation in current_settings.localswitches then
  114. case mmx_type(resultdef) of
  115. mmxs8bit:
  116. op:=A_PSUBSB;
  117. mmxu8bit:
  118. op:=A_PSUBUSB;
  119. mmxs16bit,mmxfixed16:
  120. op:=A_PSUBSW;
  121. mmxu16bit:
  122. op:=A_PSUBUSW;
  123. end
  124. else
  125. case mmx_type(resultdef) of
  126. mmxs8bit,mmxu8bit:
  127. op:=A_PSUBB;
  128. mmxs16bit,mmxu16bit,mmxfixed16:
  129. op:=A_PSUBW;
  130. mmxs32bit,mmxu32bit:
  131. op:=A_PSUBD;
  132. end;
  133. if op = A_NONE then
  134. internalerror(201408202);
  135. emit_reg_reg(op,S_NO,location.register,hreg);
  136. emit_reg_reg(A_MOVQ,S_NO,hreg,location.register);
  137. end;
  138. {$endif SUPPORT_MMX}
  139. procedure tx86unaryminusnode.second_float;
  140. var
  141. reg : tregister;
  142. href : treference;
  143. l1 : tasmlabel;
  144. begin
  145. secondpass(left);
  146. if expectloc=LOC_MMREGISTER then
  147. begin
  148. hlcg.location_force_mmregscalar(current_asmdata.CurrAsmList,left.location,left.resultdef,true);
  149. location_reset(location,LOC_MMREGISTER,def_cgsize(resultdef));
  150. { make life of register allocator easier }
  151. location.register:=cg.getmmregister(current_asmdata.CurrAsmList,def_cgsize(resultdef));
  152. current_asmdata.getglobaldatalabel(l1);
  153. new_section(current_asmdata.asmlists[al_typedconsts],sec_rodata_norel,l1.name,const_align(sizeof(pint)));
  154. current_asmdata.asmlists[al_typedconsts].concat(Tai_label.Create(l1));
  155. case def_cgsize(resultdef) of
  156. OS_F32:
  157. current_asmdata.asmlists[al_typedconsts].concat(tai_const.create_32bit(longint(1 shl 31)));
  158. OS_F64:
  159. begin
  160. current_asmdata.asmlists[al_typedconsts].concat(tai_const.create_32bit(0));
  161. current_asmdata.asmlists[al_typedconsts].concat(tai_const.create_32bit(-(1 shl 31)));
  162. end
  163. else
  164. internalerror(2004110215);
  165. end;
  166. reference_reset_symbol(href,l1,0,resultdef.alignment,[]);
  167. if UseAVX then
  168. cg.a_opmm_ref_reg_reg(current_asmdata.CurrAsmList,OP_XOR,left.location.size,href,left.location.register,location.register,nil)
  169. else
  170. begin
  171. reg:=cg.getmmregister(current_asmdata.CurrAsmList,def_cgsize(resultdef));
  172. cg.a_loadmm_ref_reg(current_asmdata.CurrAsmList,def_cgsize(resultdef),def_cgsize(resultdef),href,reg,mms_movescalar);
  173. cg.a_loadmm_reg_reg(current_asmdata.CurrAsmList,def_cgsize(resultdef),def_cgsize(resultdef),left.location.register,location.register,mms_movescalar);
  174. cg.a_opmm_reg_reg(current_asmdata.CurrAsmList,OP_XOR,left.location.size,reg,location.register,nil);
  175. end;
  176. end
  177. else
  178. begin
  179. location_reset(location,LOC_FPUREGISTER,def_cgsize(resultdef));
  180. case left.location.loc of
  181. LOC_REFERENCE,
  182. LOC_CREFERENCE:
  183. begin
  184. location.register:=NR_ST;
  185. cg.a_loadfpu_ref_reg(current_asmdata.CurrAsmList,
  186. left.location.size,location.size,
  187. left.location.reference,location.register);
  188. emit_none(A_FCHS,S_NO);
  189. end;
  190. LOC_FPUREGISTER,
  191. LOC_CFPUREGISTER:
  192. begin
  193. { "load st,st" is ignored by the code generator }
  194. cg.a_loadfpu_reg_reg(current_asmdata.CurrAsmList,left.location.size,location.size,left.location.register,NR_ST);
  195. location.register:=NR_ST;
  196. emit_none(A_FCHS,S_NO);
  197. end;
  198. else
  199. internalerror(200312241);
  200. end;
  201. end;
  202. end;
  203. {*****************************************************************************
  204. TX86NOTNODE
  205. *****************************************************************************}
  206. procedure tx86notnode.second_boolean;
  207. var
  208. opsize : tcgsize;
  209. {$if defined(cpu32bitalu) or defined(cpu16bitalu)}
  210. hreg: tregister;
  211. {$endif}
  212. begin
  213. opsize:=def_cgsize(resultdef);
  214. if not handle_locjump then
  215. begin
  216. { the second pass could change the location of left }
  217. { if it is a register variable, so we've to do }
  218. { this before the case statement }
  219. secondpass(left);
  220. case left.location.loc of
  221. LOC_FLAGS :
  222. begin
  223. location_reset(location,LOC_FLAGS,OS_NO);
  224. location.resflags:=left.location.resflags;
  225. inverse_flags(location.resflags);
  226. end;
  227. LOC_CREFERENCE,
  228. LOC_REFERENCE:
  229. begin
  230. {$if defined(cpu32bitalu)}
  231. if is_64bit(resultdef) then
  232. begin
  233. hreg:=cg.GetIntRegister(current_asmdata.CurrAsmList,OS_32);
  234. tcgx86(cg).make_simple_ref(current_asmdata.CurrAsmList,left.location.reference);
  235. cg.a_load_ref_reg(current_asmdata.CurrAsmList,OS_32,OS_32,left.location.reference,hreg);
  236. inc(left.location.reference.offset,4);
  237. cg.a_reg_alloc(current_asmdata.CurrAsmList,NR_DEFAULTFLAGS);
  238. cg.a_op_ref_reg(current_asmdata.CurrAsmList,OP_OR,OS_32,left.location.reference,hreg);
  239. end
  240. else
  241. {$elseif defined(cpu16bitalu)}
  242. if is_64bit(resultdef) then
  243. begin
  244. hreg:=cg.GetIntRegister(current_asmdata.CurrAsmList,OS_16);
  245. tcgx86(cg).make_simple_ref(current_asmdata.CurrAsmList,left.location.reference);
  246. cg.a_load_ref_reg(current_asmdata.CurrAsmList,OS_16,OS_16,left.location.reference,hreg);
  247. inc(left.location.reference.offset,2);
  248. cg.a_reg_alloc(current_asmdata.CurrAsmList,NR_DEFAULTFLAGS);
  249. cg.a_op_ref_reg(current_asmdata.CurrAsmList,OP_OR,OS_16,left.location.reference,hreg);
  250. inc(left.location.reference.offset,2);
  251. cg.a_op_ref_reg(current_asmdata.CurrAsmList,OP_OR,OS_16,left.location.reference,hreg);
  252. inc(left.location.reference.offset,2);
  253. cg.a_op_ref_reg(current_asmdata.CurrAsmList,OP_OR,OS_16,left.location.reference,hreg);
  254. end
  255. else if is_32bit(resultdef) then
  256. begin
  257. hreg:=cg.GetIntRegister(current_asmdata.CurrAsmList,OS_16);
  258. tcgx86(cg).make_simple_ref(current_asmdata.CurrAsmList,left.location.reference);
  259. cg.a_load_ref_reg(current_asmdata.CurrAsmList,OS_16,OS_16,left.location.reference,hreg);
  260. inc(left.location.reference.offset,2);
  261. cg.a_reg_alloc(current_asmdata.CurrAsmList,NR_DEFAULTFLAGS);
  262. cg.a_op_ref_reg(current_asmdata.CurrAsmList,OP_OR,OS_16,left.location.reference,hreg);
  263. end
  264. else
  265. {$endif}
  266. begin
  267. cg.a_reg_alloc(current_asmdata.CurrAsmList,NR_DEFAULTFLAGS);
  268. emit_const_ref(A_CMP, TCGSize2Opsize[opsize], 0, left.location.reference);
  269. end;
  270. location_reset(location,LOC_FLAGS,OS_NO);
  271. location.resflags:=F_E;
  272. end;
  273. LOC_CONSTANT,
  274. LOC_REGISTER,
  275. LOC_CREGISTER,
  276. LOC_SUBSETREG,
  277. LOC_CSUBSETREG,
  278. LOC_SUBSETREF,
  279. LOC_CSUBSETREF :
  280. begin
  281. {$if defined(cpu32bitalu)}
  282. if is_64bit(resultdef) then
  283. begin
  284. hlcg.location_force_reg(current_asmdata.CurrAsmList,left.location,left.resultdef,resultdef,false);
  285. cg.a_reg_alloc(current_asmdata.CurrAsmList,NR_DEFAULTFLAGS);
  286. emit_reg_reg(A_OR,S_L,left.location.register64.reghi,left.location.register64.reglo);
  287. end
  288. else
  289. {$elseif defined(cpu16bitalu)}
  290. if is_64bit(resultdef) then
  291. begin
  292. hlcg.location_force_reg(current_asmdata.CurrAsmList,left.location,left.resultdef,resultdef,false);
  293. cg.a_reg_alloc(current_asmdata.CurrAsmList,NR_DEFAULTFLAGS);
  294. emit_reg_reg(A_OR,S_W,cg.GetNextReg(left.location.register64.reghi),left.location.register64.reghi);
  295. emit_reg_reg(A_OR,S_W,cg.GetNextReg(left.location.register64.reglo),left.location.register64.reglo);
  296. emit_reg_reg(A_OR,S_W,left.location.register64.reghi,left.location.register64.reglo);
  297. end
  298. else if is_32bit(resultdef) then
  299. begin
  300. hlcg.location_force_reg(current_asmdata.CurrAsmList,left.location,left.resultdef,resultdef,false);
  301. cg.a_reg_alloc(current_asmdata.CurrAsmList,NR_DEFAULTFLAGS);
  302. emit_reg_reg(A_OR,S_L,cg.GetNextReg(left.location.register),left.location.register);
  303. end
  304. else
  305. {$endif}
  306. begin
  307. hlcg.location_force_reg(current_asmdata.CurrAsmList,left.location,left.resultdef,resultdef,true);
  308. cg.a_reg_alloc(current_asmdata.CurrAsmList,NR_DEFAULTFLAGS);
  309. emit_reg_reg(A_TEST,TCGSize2Opsize[opsize],left.location.register,left.location.register);
  310. end;
  311. location_reset(location,LOC_FLAGS,OS_NO);
  312. location.resflags:=F_E;
  313. end;
  314. else
  315. internalerror(200203224);
  316. end;
  317. end;
  318. end;
  319. {$ifdef SUPPORT_MMX}
  320. procedure tx86notnode.second_mmx;
  321. var hreg,r:Tregister;
  322. begin
  323. secondpass(left);
  324. location_reset(location,LOC_MMXREGISTER,OS_NO);
  325. r:=cg.getintregister(current_asmdata.CurrAsmList,OS_INT);
  326. emit_const_reg(A_MOV,S_L,longint($ffffffff),r);
  327. { load operand }
  328. case left.location.loc of
  329. LOC_MMXREGISTER:
  330. location_copy(location,left.location);
  331. LOC_CMMXREGISTER:
  332. begin
  333. location.register:=tcgx86(cg).getmmxregister(current_asmdata.CurrAsmList);
  334. emit_reg_reg(A_MOVQ,S_NO,left.location.register,location.register);
  335. end;
  336. LOC_REFERENCE,
  337. LOC_CREFERENCE:
  338. begin
  339. location.register:=tcgx86(cg).getmmxregister(current_asmdata.CurrAsmList);
  340. emit_ref_reg(A_MOVQ,S_NO,left.location.reference,location.register);
  341. end;
  342. end;
  343. { load mask }
  344. hreg:=tcgx86(cg).getmmxregister(current_asmdata.CurrAsmList);
  345. emit_reg_reg(A_MOVD,S_NO,r,hreg);
  346. { lower 32 bit }
  347. emit_reg_reg(A_PXOR,S_NO,hreg,location.register);
  348. { shift mask }
  349. emit_const_reg(A_PSLLQ,S_B,32,hreg);
  350. { higher 32 bit }
  351. emit_reg_reg(A_PXOR,S_NO,hreg,location.register);
  352. end;
  353. {$endif SUPPORT_MMX}
  354. {*****************************************************************************
  355. TX86MODDIVNODE
  356. *****************************************************************************}
  357. procedure tx86moddivnode.pass_generate_code;
  358. var
  359. hreg1,hreg2,hreg3,rega,regd,tempreg:Tregister;
  360. power:longint;
  361. instr:TAiCpu;
  362. op:Tasmop;
  363. cgsize:TCgSize;
  364. opsize:topsize;
  365. e, sm: aint;
  366. d,m: aword;
  367. m_add, invertsign: boolean;
  368. s: byte;
  369. label
  370. DefaultDiv;
  371. begin
  372. secondpass(left);
  373. if codegenerror then
  374. exit;
  375. secondpass(right);
  376. if codegenerror then
  377. exit;
  378. { put numerator in register }
  379. cgsize:=def_cgsize(resultdef);
  380. opsize:=TCGSize2OpSize[cgsize];
  381. if not (cgsize in [OS_32,OS_S32,OS_64,OS_S64]) then
  382. InternalError(2013102702);
  383. rega:=newreg(R_INTREGISTER,RS_EAX,cgsize2subreg(R_INTREGISTER,cgsize));
  384. regd:=newreg(R_INTREGISTER,RS_EDX,cgsize2subreg(R_INTREGISTER,cgsize));
  385. location_reset(location,LOC_REGISTER,cgsize);
  386. hlcg.location_force_reg(current_asmdata.CurrAsmList,left.location,left.resultdef,resultdef,false);
  387. hreg1:=left.location.register;
  388. if (nodetype=divn) and (right.nodetype=ordconstn) then
  389. begin
  390. if isabspowerof2(tordconstnode(right).value,power) then
  391. begin
  392. { for signed numbers, the numerator must be adjusted before the
  393. shift instruction, but not with unsigned numbers! Otherwise,
  394. "Cardinal($ffffffff) div 16" overflows! (JM) }
  395. if is_signed(left.resultdef) Then
  396. begin
  397. invertsign:=tordconstnode(right).value<0;
  398. { use a sequence without jumps, saw this in
  399. comp.compilers (JM) }
  400. { no jumps, but more operations }
  401. hreg2:=cg.getintregister(current_asmdata.CurrAsmList,cgsize);
  402. emit_reg_reg(A_MOV,opsize,hreg1,hreg2);
  403. if power=1 then
  404. begin
  405. {If the left value is negative, hreg2=(1 shl power)-1=1, otherwise 0.}
  406. cg.a_op_const_reg(current_asmdata.CurrAsmList,OP_SHR,cgsize,resultdef.size*8-1,hreg2);
  407. end
  408. else
  409. begin
  410. {If the left value is negative, hreg2=$ffffffff, otherwise 0.}
  411. cg.a_op_const_reg(current_asmdata.CurrAsmList,OP_SAR,cgsize,resultdef.size*8-1,hreg2);
  412. {If negative, hreg2=(1 shl power)-1, otherwise 0.}
  413. { (don't use emit_const_reg, because if value>high(longint)
  414. then it must first be loaded into a register) }
  415. cg.a_op_const_reg(current_asmdata.CurrAsmList,OP_AND,cgsize,(aint(1) shl power)-1,hreg2);
  416. end;
  417. { add to the left value }
  418. emit_reg_reg(A_ADD,opsize,hreg2,hreg1);
  419. { do the shift }
  420. cg.a_op_const_reg(current_asmdata.CurrAsmList,OP_SAR,cgsize,power,hreg1);
  421. if invertsign then
  422. emit_reg(A_NEG,opsize,hreg1);
  423. end
  424. else
  425. cg.a_op_const_reg(current_asmdata.CurrAsmList,OP_SHR,cgsize,power,hreg1);
  426. location.register:=hreg1;
  427. end
  428. else
  429. begin
  430. if is_signed(left.resultdef) then
  431. begin
  432. e:=tordconstnode(right).value.svalue;
  433. calc_divconst_magic_signed(resultdef.size*8,e,sm,s);
  434. cg.getcpuregister(current_asmdata.CurrAsmList,rega);
  435. emit_const_reg(A_MOV,opsize,sm,rega);
  436. cg.getcpuregister(current_asmdata.CurrAsmList,regd);
  437. emit_reg(A_IMUL,opsize,hreg1);
  438. { only the high half of result is used }
  439. cg.ungetcpuregister(current_asmdata.CurrAsmList,rega);
  440. { add or subtract dividend }
  441. if (e>0) and (sm<0) then
  442. emit_reg_reg(A_ADD,opsize,hreg1,regd)
  443. else if (e<0) and (sm>0) then
  444. emit_reg_reg(A_SUB,opsize,hreg1,regd);
  445. { shift if necessary }
  446. if (s<>0) then
  447. emit_const_reg(A_SAR,opsize,s,regd);
  448. { extract and add the sign bit }
  449. if (e<0) then
  450. emit_reg_reg(A_MOV,opsize,regd,hreg1);
  451. { if e>=0, hreg1 still contains dividend }
  452. emit_const_reg(A_SHR,opsize,left.resultdef.size*8-1,hreg1);
  453. emit_reg_reg(A_ADD,opsize,hreg1,regd);
  454. cg.ungetcpuregister(current_asmdata.CurrAsmList,regd);
  455. location.register:=cg.getintregister(current_asmdata.CurrAsmList,cgsize);
  456. cg.a_load_reg_reg(current_asmdata.CurrAsmList,cgsize,cgsize,regd,location.register)
  457. end
  458. else
  459. begin
  460. d:=tordconstnode(right).value.svalue;
  461. if d>=aword(1) shl (left.resultdef.size*8-1) then
  462. begin
  463. location.register:=cg.getintregister(current_asmdata.CurrAsmList,cgsize);
  464. { Ensure that the whole register is 0, since SETcc only sets the lowest byte }
  465. { If the operands are 64 bits, this XOR routine will be shrunk by the
  466. peephole optimizer. [Kit] }
  467. emit_reg_reg(A_XOR,opsize,location.register,location.register);
  468. if (cgsize in [OS_64,OS_S64]) then { Cannot use 64-bit constants in CMP }
  469. begin
  470. hreg2:=cg.getintregister(current_asmdata.CurrAsmList,cgsize);
  471. emit_const_reg(A_MOV,opsize,aint(d),hreg2);
  472. cg.a_reg_alloc(current_asmdata.CurrAsmList,NR_DEFAULTFLAGS);
  473. emit_reg_reg(A_CMP,opsize,hreg2,hreg1);
  474. end
  475. else
  476. begin
  477. cg.a_reg_alloc(current_asmdata.CurrAsmList,NR_DEFAULTFLAGS);
  478. emit_const_reg(A_CMP,opsize,aint(d),hreg1);
  479. end;
  480. { NOTE: SBB and SETAE are both 3 bytes long without the REX prefix,
  481. both use an ALU for their execution and take a single cycle to
  482. run. The only difference is that SETAE does not modify the flags,
  483. allowing for some possible reuse. [Kit] }
  484. { Emit a SETcc instruction that depends on the carry bit being zero,
  485. that is, the numerator is greater than or equal to the denominator. }
  486. tempreg:=cg.makeregsize(current_asmdata.CurrAsmList,location.register,OS_8);
  487. instr:=TAiCpu.op_reg(A_SETcc,S_B,tempreg);
  488. instr.condition:=C_AE;
  489. current_asmdata.CurrAsmList.concat(instr);
  490. cg.a_reg_dealloc(current_asmdata.CurrAsmList,NR_DEFAULTFLAGS);
  491. end
  492. else
  493. begin
  494. calc_divconst_magic_unsigned(resultdef.size*8,d,m,m_add,s);
  495. cg.getcpuregister(current_asmdata.CurrAsmList,rega);
  496. emit_const_reg(A_MOV,opsize,aint(m),rega);
  497. cg.getcpuregister(current_asmdata.CurrAsmList,regd);
  498. emit_reg(A_MUL,opsize,hreg1);
  499. cg.ungetcpuregister(current_asmdata.CurrAsmList,rega);
  500. if m_add then
  501. begin
  502. { addition can overflow, shift first bit considering carry,
  503. then shift remaining bits in regular way. }
  504. emit_reg_reg(A_ADD,opsize,hreg1,regd);
  505. emit_const_reg(A_RCR,opsize,1,regd);
  506. dec(s);
  507. end;
  508. if s<>0 then
  509. emit_const_reg(A_SHR,opsize,aint(s),regd);
  510. cg.ungetcpuregister(current_asmdata.CurrAsmList,regd);
  511. location.register:=cg.getintregister(current_asmdata.CurrAsmList,cgsize);
  512. cg.a_load_reg_reg(current_asmdata.CurrAsmList,cgsize,cgsize,regd,location.register)
  513. end;
  514. end;
  515. end;
  516. end
  517. else if (nodetype=modn) and (right.nodetype=ordconstn) and not(is_signed(left.resultdef)) then
  518. begin
  519. { unsigned modulus by a (+/-)power-of-2 constant? }
  520. if isabspowerof2(tordconstnode(right).value,power) then
  521. begin
  522. emit_const_reg(A_AND,opsize,(aint(1) shl power)-1,hreg1);
  523. location.register:=hreg1;
  524. end
  525. else
  526. begin
  527. d:=tordconstnode(right).value.svalue;
  528. if d>=aword(1) shl (left.resultdef.size*8-1) then
  529. begin
  530. if not (CPUX86_HAS_CMOV in cpu_capabilities[current_settings.cputype]) then
  531. goto DefaultDiv;
  532. location.register:=cg.getintregister(current_asmdata.CurrAsmList,cgsize);
  533. hreg3:=cg.getintregister(current_asmdata.CurrAsmList,cgsize);
  534. m := aword(-aint(d)); { Two's complement of d }
  535. if (cgsize in [OS_64,OS_S64]) then { Cannot use 64-bit constants in CMP }
  536. begin
  537. hreg2:=cg.getintregister(current_asmdata.CurrAsmList,cgsize);
  538. emit_const_reg(A_MOV,opsize,aint(d),hreg2);
  539. emit_const_reg(A_MOV,opsize,aint(m),hreg3);
  540. emit_reg_reg(A_XOR,opsize,location.register,location.register);
  541. cg.a_reg_alloc(current_asmdata.CurrAsmList,NR_DEFAULTFLAGS);
  542. emit_reg_reg(A_CMP,opsize,hreg2,hreg1);
  543. end
  544. else
  545. begin
  546. emit_const_reg(A_MOV,opsize,aint(m),hreg3);
  547. emit_reg_reg(A_XOR,opsize,location.register,location.register);
  548. cg.a_reg_alloc(current_asmdata.CurrAsmList,NR_DEFAULTFLAGS);
  549. emit_const_reg(A_CMP,opsize,aint(d),hreg1);
  550. end;
  551. { Emit conditional move that depends on the carry flag being zero,
  552. that is, the comparison result is above or equal }
  553. instr:=TAiCpu.op_reg_reg(A_CMOVcc,opsize,hreg3,location.register);
  554. instr.condition := C_AE;
  555. current_asmdata.CurrAsmList.concat(instr);
  556. cg.a_reg_dealloc(current_asmdata.CurrAsmList,NR_DEFAULTFLAGS);
  557. emit_reg_reg(A_ADD,opsize,hreg1,location.register);
  558. end
  559. else
  560. begin
  561. { Convert the division to a multiplication }
  562. calc_divconst_magic_unsigned(resultdef.size*8,d,m,m_add,s);
  563. cg.getcpuregister(current_asmdata.CurrAsmList,rega);
  564. emit_const_reg(A_MOV,opsize,aint(m),rega);
  565. cg.getcpuregister(current_asmdata.CurrAsmList,regd);
  566. emit_reg(A_MUL,opsize,hreg1);
  567. cg.ungetcpuregister(current_asmdata.CurrAsmList,rega);
  568. hreg2:=cg.getintregister(current_asmdata.CurrAsmList,cgsize);
  569. emit_reg_reg(A_MOV,opsize,hreg1,hreg2);
  570. if m_add then
  571. begin
  572. { addition can overflow, shift first bit considering carry,
  573. then shift remaining bits in regular way. }
  574. cg.a_reg_alloc(current_asmdata.CurrAsmList,NR_DEFAULTFLAGS);
  575. emit_reg_reg(A_ADD,opsize,hreg1,regd);
  576. emit_const_reg(A_RCR,opsize,1,regd);
  577. cg.a_reg_dealloc(current_asmdata.CurrAsmList,NR_DEFAULTFLAGS);
  578. dec(s);
  579. end;
  580. if s<>0 then
  581. emit_const_reg(A_SHR,opsize,aint(s),regd); { R/EDX now contains the quotient }
  582. { Now multiply the quotient by the original denominator and
  583. subtract the product from the original numerator to get
  584. the remainder. }
  585. if (cgsize in [OS_64,OS_S64]) then { Cannot use 64-bit constants in IMUL }
  586. begin
  587. hreg3:=cg.getintregister(current_asmdata.CurrAsmList,cgsize);
  588. emit_const_reg(A_MOV,opsize,aint(d),hreg3);
  589. emit_reg_reg(A_IMUL,opsize,hreg3,regd);
  590. end
  591. else
  592. emit_const_reg(A_IMUL,opsize,aint(d),regd);
  593. emit_reg_reg(A_SUB,opsize,regd,hreg2);
  594. cg.ungetcpuregister(current_asmdata.CurrAsmList,regd);
  595. location.register:=hreg2;
  596. end;
  597. end;
  598. end
  599. else
  600. begin
  601. DefaultDiv:
  602. {Bring denominator to a register.}
  603. cg.getcpuregister(current_asmdata.CurrAsmList,rega);
  604. emit_reg_reg(A_MOV,opsize,hreg1,rega);
  605. cg.getcpuregister(current_asmdata.CurrAsmList,regd);
  606. {Sign extension depends on the left type.}
  607. if is_signed(left.resultdef) then
  608. case left.resultdef.size of
  609. {$ifdef x86_64}
  610. 8:
  611. emit_none(A_CQO,S_NO);
  612. {$endif x86_64}
  613. 4:
  614. emit_none(A_CDQ,S_NO);
  615. else
  616. internalerror(2013102701);
  617. end
  618. else
  619. emit_reg_reg(A_XOR,opsize,regd,regd);
  620. { Division depends on the result type }
  621. if is_signed(resultdef) then
  622. op:=A_IDIV
  623. else
  624. op:=A_DIV;
  625. if right.location.loc in [LOC_REFERENCE,LOC_CREFERENCE] then
  626. emit_ref(op,opsize,right.location.reference)
  627. else if right.location.loc in [LOC_REGISTER,LOC_CREGISTER] then
  628. emit_reg(op,opsize,right.location.register)
  629. else
  630. begin
  631. hreg1:=cg.getintregister(current_asmdata.CurrAsmList,right.location.size);
  632. hlcg.a_load_loc_reg(current_asmdata.CurrAsmList,right.resultdef,right.resultdef,right.location,hreg1);
  633. emit_reg(op,opsize,hreg1);
  634. end;
  635. { Copy the result into a new register. Release R/EAX & R/EDX.}
  636. cg.ungetcpuregister(current_asmdata.CurrAsmList,regd);
  637. cg.ungetcpuregister(current_asmdata.CurrAsmList,rega);
  638. location.register:=cg.getintregister(current_asmdata.CurrAsmList,cgsize);
  639. if nodetype=divn then
  640. cg.a_load_reg_reg(current_asmdata.CurrAsmList,cgsize,cgsize,rega,location.register)
  641. else
  642. cg.a_load_reg_reg(current_asmdata.CurrAsmList,cgsize,cgsize,regd,location.register);
  643. end;
  644. end;
  645. {$ifdef SUPPORT_MMX}
  646. procedure tx86shlshrnode.second_mmx;
  647. var
  648. op : TAsmOp;
  649. cmpop : boolean;
  650. mmxbase : tmmxtype;
  651. hreg,
  652. hregister : tregister;
  653. begin
  654. secondpass(left);
  655. if codegenerror then
  656. exit;
  657. secondpass(right);
  658. if codegenerror then
  659. exit;
  660. cmpop:=false;
  661. op:=A_NOP;
  662. mmxbase:=mmx_type(left.resultdef);
  663. location_reset(location,LOC_MMXREGISTER,def_cgsize(resultdef));
  664. case nodetype of
  665. shrn :
  666. case mmxbase of
  667. mmxs16bit,mmxu16bit,mmxfixed16:
  668. op:=A_PSRLW;
  669. mmxs32bit,mmxu32bit:
  670. op:=A_PSRLD;
  671. mmxs64bit,mmxu64bit:
  672. op:=A_PSRLQ;
  673. else
  674. Internalerror(2018022504);
  675. end;
  676. shln :
  677. case mmxbase of
  678. mmxs16bit,mmxu16bit,mmxfixed16:
  679. op:=A_PSLLW;
  680. mmxs32bit,mmxu32bit:
  681. op:=A_PSLLD;
  682. mmxs64bit,mmxu64bit:
  683. op:=A_PSLLD;
  684. else
  685. Internalerror(2018022503);
  686. end;
  687. else
  688. internalerror(2018022502);
  689. end;
  690. { left and right no register? }
  691. { then one must be demanded }
  692. if (left.location.loc<>LOC_MMXREGISTER) then
  693. begin
  694. { register variable ? }
  695. if (left.location.loc=LOC_CMMXREGISTER) then
  696. begin
  697. hregister:=tcgx86(cg).getmmxregister(current_asmdata.CurrAsmList);
  698. emit_reg_reg(A_MOVQ,S_NO,left.location.register,hregister);
  699. end
  700. else
  701. begin
  702. if not(left.location.loc in [LOC_REFERENCE,LOC_CREFERENCE]) then
  703. internalerror(2018022505);
  704. hregister:=tcgx86(cg).getmmxregister(current_asmdata.CurrAsmList);
  705. tcgx86(cg).make_simple_ref(current_asmdata.CurrAsmList,left.location.reference);
  706. emit_ref_reg(A_MOVQ,S_NO,left.location.reference,hregister);
  707. end;
  708. location_reset(left.location,LOC_MMXREGISTER,OS_NO);
  709. left.location.register:=hregister;
  710. end;
  711. { at this point, left.location.loc should be LOC_MMXREGISTER }
  712. case right.location.loc of
  713. LOC_MMXREGISTER,LOC_CMMXREGISTER:
  714. begin
  715. emit_reg_reg(op,S_NO,right.location.register,left.location.register);
  716. location.register:=left.location.register;
  717. end;
  718. LOC_CONSTANT:
  719. emit_const_reg(op,S_NO,right.location.value,left.location.register);
  720. LOC_REFERENCE,LOC_CREFERENCE:
  721. begin
  722. tcgx86(cg).make_simple_ref(current_asmdata.CurrAsmList,right.location.reference);
  723. emit_ref_reg(op,S_NO,right.location.reference,left.location.register);
  724. end;
  725. else
  726. internalerror(2018022506);
  727. end;
  728. location.register:=left.location.register;
  729. location_freetemp(current_asmdata.CurrAsmList,right.location);
  730. end;
  731. {$endif SUPPORT_MMX}
  732. end.