nx86mat.pas 23 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554
  1. {
  2. Copyright (c) 1998-2002 by Florian Klaempfl
  3. Generate x86 code for math nodes
  4. This program is free software; you can redistribute it and/or modify
  5. it under the terms of the GNU General Public License as published by
  6. the Free Software Foundation; either version 2 of the License, or
  7. (at your option) any later version.
  8. This program is distributed in the hope that it will be useful,
  9. but WITHOUT ANY WARRANTY; without even the implied warranty of
  10. MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  11. GNU General Public License for more details.
  12. You should have received a copy of the GNU General Public License
  13. along with this program; if not, write to the Free Software
  14. Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
  15. ****************************************************************************
  16. }
  17. unit nx86mat;
  18. {$i fpcdefs.inc}
  19. interface
  20. uses
  21. node,nmat,ncgmat;
  22. type
  23. tx86unaryminusnode = class(tcgunaryminusnode)
  24. {$ifdef SUPPORT_MMX}
  25. procedure second_mmx;override;
  26. {$endif SUPPORT_MMX}
  27. procedure second_float;override;
  28. function pass_1:tnode;override;
  29. end;
  30. tx86notnode = class(tcgnotnode)
  31. procedure second_boolean;override;
  32. {$ifdef SUPPORT_MMX}
  33. procedure second_mmx;override;
  34. {$endif SUPPORT_MMX}
  35. end;
  36. tx86moddivnode = class(tcgmoddivnode)
  37. procedure pass_generate_code;override;
  38. end;
  39. implementation
  40. uses
  41. globtype,
  42. systems,constexp,
  43. cutils,verbose,globals,
  44. symconst,symdef,
  45. aasmbase,aasmtai,aasmdata,defutil,
  46. cgbase,pass_1,pass_2,
  47. ncon,
  48. cpubase,procinfo,
  49. cga,ncgutil,cgobj,hlcgobj,cgx86,cgutils;
  50. {*****************************************************************************
  51. TI386UNARYMINUSNODE
  52. *****************************************************************************}
  53. function tx86unaryminusnode.pass_1 : tnode;
  54. begin
  55. result:=nil;
  56. firstpass(left);
  57. if codegenerror then
  58. exit;
  59. if (left.resultdef.typ=floatdef) then
  60. begin
  61. if use_vectorfpu(left.resultdef) then
  62. expectloc:=LOC_MMREGISTER
  63. else
  64. expectloc:=LOC_FPUREGISTER;
  65. end
  66. {$ifdef SUPPORT_MMX}
  67. else
  68. if (cs_mmx in current_settings.localswitches) and
  69. is_mmx_able_array(left.resultdef) then
  70. begin
  71. expectloc:=LOC_MMXREGISTER;
  72. end
  73. {$endif SUPPORT_MMX}
  74. else
  75. inherited pass_1;
  76. end;
  77. {$ifdef SUPPORT_MMX}
  78. procedure tx86unaryminusnode.second_mmx;
  79. var
  80. op : tasmop;
  81. hreg : tregister;
  82. begin
  83. secondpass(left);
  84. location_reset(location,LOC_MMXREGISTER,OS_NO);
  85. hreg:=tcgx86(cg).getmmxregister(current_asmdata.CurrAsmList);
  86. emit_reg_reg(A_PXOR,S_NO,hreg,hreg);
  87. case left.location.loc of
  88. LOC_MMXREGISTER:
  89. begin
  90. location.register:=left.location.register;
  91. end;
  92. LOC_CMMXREGISTER:
  93. begin
  94. location.register:=tcgx86(cg).getmmxregister(current_asmdata.CurrAsmList);
  95. emit_reg_reg(A_MOVQ,S_NO,left.location.register,location.register);
  96. end;
  97. LOC_REFERENCE,
  98. LOC_CREFERENCE:
  99. begin
  100. location.register:=tcgx86(cg).getmmxregister(current_asmdata.CurrAsmList);
  101. emit_ref_reg(A_MOVQ,S_NO,left.location.reference,location.register);
  102. end;
  103. else
  104. internalerror(200203225);
  105. end;
  106. if cs_mmx_saturation in current_settings.localswitches then
  107. case mmx_type(resultdef) of
  108. mmxs8bit:
  109. op:=A_PSUBSB;
  110. mmxu8bit:
  111. op:=A_PSUBUSB;
  112. mmxs16bit,mmxfixed16:
  113. op:=A_PSUBSW;
  114. mmxu16bit:
  115. op:=A_PSUBUSW;
  116. end
  117. else
  118. case mmx_type(resultdef) of
  119. mmxs8bit,mmxu8bit:
  120. op:=A_PSUBB;
  121. mmxs16bit,mmxu16bit,mmxfixed16:
  122. op:=A_PSUBW;
  123. mmxs32bit,mmxu32bit:
  124. op:=A_PSUBD;
  125. end;
  126. emit_reg_reg(op,S_NO,location.register,hreg);
  127. emit_reg_reg(A_MOVQ,S_NO,hreg,location.register);
  128. end;
  129. {$endif SUPPORT_MMX}
  130. procedure tx86unaryminusnode.second_float;
  131. var
  132. reg : tregister;
  133. href : treference;
  134. l1 : tasmlabel;
  135. begin
  136. secondpass(left);
  137. if expectloc=LOC_MMREGISTER then
  138. begin
  139. hlcg.location_force_mmregscalar(current_asmdata.CurrAsmList,left.location,left.resultdef,true);
  140. location_reset(location,LOC_MMREGISTER,def_cgsize(resultdef));
  141. { make life of register allocator easier }
  142. location.register:=cg.getmmregister(current_asmdata.CurrAsmList,def_cgsize(resultdef));
  143. current_asmdata.getdatalabel(l1);
  144. new_section(current_asmdata.asmlists[al_typedconsts],sec_rodata_norel,l1.name,const_align(sizeof(pint)));
  145. current_asmdata.asmlists[al_typedconsts].concat(Tai_label.Create(l1));
  146. case def_cgsize(resultdef) of
  147. OS_F32:
  148. current_asmdata.asmlists[al_typedconsts].concat(tai_const.create_32bit(longint(1 shl 31)));
  149. OS_F64:
  150. begin
  151. current_asmdata.asmlists[al_typedconsts].concat(tai_const.create_32bit(0));
  152. current_asmdata.asmlists[al_typedconsts].concat(tai_const.create_32bit(-(1 shl 31)));
  153. end
  154. else
  155. internalerror(2004110215);
  156. end;
  157. reference_reset_symbol(href,l1,0,resultdef.alignment);
  158. if UseAVX then
  159. cg.a_opmm_ref_reg_reg(current_asmdata.CurrAsmList,OP_XOR,left.location.size,href,left.location.register,location.register,nil)
  160. else
  161. begin
  162. reg:=cg.getmmregister(current_asmdata.CurrAsmList,def_cgsize(resultdef));
  163. cg.a_loadmm_ref_reg(current_asmdata.CurrAsmList,def_cgsize(resultdef),def_cgsize(resultdef),href,reg,mms_movescalar);
  164. cg.a_loadmm_reg_reg(current_asmdata.CurrAsmList,def_cgsize(resultdef),def_cgsize(resultdef),left.location.register,location.register,mms_movescalar);
  165. cg.a_opmm_reg_reg(current_asmdata.CurrAsmList,OP_XOR,left.location.size,reg,location.register,nil);
  166. end;
  167. end
  168. else
  169. begin
  170. location_reset(location,LOC_FPUREGISTER,def_cgsize(resultdef));
  171. case left.location.loc of
  172. LOC_REFERENCE,
  173. LOC_CREFERENCE:
  174. begin
  175. location.register:=NR_ST;
  176. cg.a_loadfpu_ref_reg(current_asmdata.CurrAsmList,
  177. left.location.size,location.size,
  178. left.location.reference,location.register);
  179. emit_none(A_FCHS,S_NO);
  180. end;
  181. LOC_FPUREGISTER,
  182. LOC_CFPUREGISTER:
  183. begin
  184. { "load st,st" is ignored by the code generator }
  185. cg.a_loadfpu_reg_reg(current_asmdata.CurrAsmList,left.location.size,location.size,left.location.register,NR_ST);
  186. location.register:=NR_ST;
  187. emit_none(A_FCHS,S_NO);
  188. end;
  189. else
  190. internalerror(200312241);
  191. end;
  192. end;
  193. end;
  194. {*****************************************************************************
  195. TX86NOTNODE
  196. *****************************************************************************}
  197. procedure tx86notnode.second_boolean;
  198. var
  199. opsize : tcgsize;
  200. hreg: tregister;
  201. begin
  202. opsize:=def_cgsize(resultdef);
  203. if not handle_locjump then
  204. begin
  205. { the second pass could change the location of left }
  206. { if it is a register variable, so we've to do }
  207. { this before the case statement }
  208. secondpass(left);
  209. case left.expectloc of
  210. LOC_FLAGS :
  211. begin
  212. location_reset(location,LOC_FLAGS,OS_NO);
  213. location.resflags:=left.location.resflags;
  214. inverse_flags(location.resflags);
  215. end;
  216. LOC_CREFERENCE,
  217. LOC_REFERENCE:
  218. begin
  219. {$if defined(cpu32bitalu)}
  220. if is_64bit(resultdef) then
  221. begin
  222. hreg:=cg.GetIntRegister(current_asmdata.CurrAsmList,OS_32);
  223. tcgx86(cg).make_simple_ref(current_asmdata.CurrAsmList,left.location.reference);
  224. cg.a_load_ref_reg(current_asmdata.CurrAsmList,OS_32,OS_32,left.location.reference,hreg);
  225. inc(left.location.reference.offset,4);
  226. cg.a_op_ref_reg(current_asmdata.CurrAsmList,OP_OR,OS_32,left.location.reference,hreg);
  227. end
  228. else
  229. {$elseif defined(cpu16bitalu)}
  230. if is_64bit(resultdef) then
  231. begin
  232. hreg:=cg.GetIntRegister(current_asmdata.CurrAsmList,OS_16);
  233. tcgx86(cg).make_simple_ref(current_asmdata.CurrAsmList,left.location.reference);
  234. cg.a_load_ref_reg(current_asmdata.CurrAsmList,OS_16,OS_16,left.location.reference,hreg);
  235. inc(left.location.reference.offset,2);
  236. cg.a_op_ref_reg(current_asmdata.CurrAsmList,OP_OR,OS_16,left.location.reference,hreg);
  237. inc(left.location.reference.offset,2);
  238. cg.a_op_ref_reg(current_asmdata.CurrAsmList,OP_OR,OS_16,left.location.reference,hreg);
  239. inc(left.location.reference.offset,2);
  240. cg.a_op_ref_reg(current_asmdata.CurrAsmList,OP_OR,OS_16,left.location.reference,hreg);
  241. end
  242. else if is_32bit(resultdef) then
  243. begin
  244. hreg:=cg.GetIntRegister(current_asmdata.CurrAsmList,OS_16);
  245. tcgx86(cg).make_simple_ref(current_asmdata.CurrAsmList,left.location.reference);
  246. cg.a_load_ref_reg(current_asmdata.CurrAsmList,OS_16,OS_16,left.location.reference,hreg);
  247. inc(left.location.reference.offset,2);
  248. cg.a_op_ref_reg(current_asmdata.CurrAsmList,OP_OR,OS_16,left.location.reference,hreg);
  249. end
  250. else
  251. {$endif}
  252. emit_const_ref(A_CMP, TCGSize2Opsize[opsize], 0, left.location.reference);
  253. location_reset(location,LOC_FLAGS,OS_NO);
  254. location.resflags:=F_E;
  255. end;
  256. LOC_CONSTANT,
  257. LOC_REGISTER,
  258. LOC_CREGISTER,
  259. LOC_SUBSETREG,
  260. LOC_CSUBSETREG,
  261. LOC_SUBSETREF,
  262. LOC_CSUBSETREF :
  263. begin
  264. {$if defined(cpu32bitalu)}
  265. if is_64bit(resultdef) then
  266. begin
  267. hlcg.location_force_reg(current_asmdata.CurrAsmList,left.location,left.resultdef,resultdef,false);
  268. emit_reg_reg(A_OR,S_L,left.location.register64.reghi,left.location.register64.reglo);
  269. end
  270. else
  271. {$elseif defined(cpu16bitalu)}
  272. if is_64bit(resultdef) then
  273. begin
  274. hlcg.location_force_reg(current_asmdata.CurrAsmList,left.location,left.resultdef,resultdef,false);
  275. emit_reg_reg(A_OR,S_W,GetNextReg(left.location.register64.reghi),left.location.register64.reghi);
  276. emit_reg_reg(A_OR,S_W,GetNextReg(left.location.register64.reglo),left.location.register64.reglo);
  277. emit_reg_reg(A_OR,S_W,left.location.register64.reghi,left.location.register64.reglo);
  278. end
  279. else if is_32bit(resultdef) then
  280. begin
  281. hlcg.location_force_reg(current_asmdata.CurrAsmList,left.location,left.resultdef,resultdef,false);
  282. emit_reg_reg(A_OR,S_L,GetNextReg(left.location.register),left.location.register);
  283. end
  284. else
  285. {$endif}
  286. begin
  287. hlcg.location_force_reg(current_asmdata.CurrAsmList,left.location,left.resultdef,resultdef,true);
  288. emit_reg_reg(A_TEST,TCGSize2Opsize[opsize],left.location.register,left.location.register);
  289. end;
  290. location_reset(location,LOC_FLAGS,OS_NO);
  291. location.resflags:=F_E;
  292. end;
  293. else
  294. internalerror(200203224);
  295. end;
  296. end;
  297. end;
  298. {$ifdef SUPPORT_MMX}
  299. procedure tx86notnode.second_mmx;
  300. var hreg,r:Tregister;
  301. begin
  302. secondpass(left);
  303. location_reset(location,LOC_MMXREGISTER,OS_NO);
  304. r:=cg.getintregister(current_asmdata.CurrAsmList,OS_INT);
  305. emit_const_reg(A_MOV,S_L,longint($ffffffff),r);
  306. { load operand }
  307. case left.location.loc of
  308. LOC_MMXREGISTER:
  309. location_copy(location,left.location);
  310. LOC_CMMXREGISTER:
  311. begin
  312. location.register:=tcgx86(cg).getmmxregister(current_asmdata.CurrAsmList);
  313. emit_reg_reg(A_MOVQ,S_NO,left.location.register,location.register);
  314. end;
  315. LOC_REFERENCE,
  316. LOC_CREFERENCE:
  317. begin
  318. location.register:=tcgx86(cg).getmmxregister(current_asmdata.CurrAsmList);
  319. emit_ref_reg(A_MOVQ,S_NO,left.location.reference,location.register);
  320. end;
  321. end;
  322. { load mask }
  323. hreg:=tcgx86(cg).getmmxregister(current_asmdata.CurrAsmList);
  324. emit_reg_reg(A_MOVD,S_NO,r,hreg);
  325. { lower 32 bit }
  326. emit_reg_reg(A_PXOR,S_NO,hreg,location.register);
  327. { shift mask }
  328. emit_const_reg(A_PSLLQ,S_B,32,hreg);
  329. { higher 32 bit }
  330. emit_reg_reg(A_PXOR,S_NO,hreg,location.register);
  331. end;
  332. {$endif SUPPORT_MMX}
  333. {*****************************************************************************
  334. TX86MODDIVNODE
  335. *****************************************************************************}
  336. procedure tx86moddivnode.pass_generate_code;
  337. var
  338. hreg1,hreg2,rega,regd:Tregister;
  339. power:longint;
  340. op:Tasmop;
  341. cgsize:TCgSize;
  342. opsize:topsize;
  343. e, sm: aint;
  344. d,m: aword;
  345. m_add: boolean;
  346. s: byte;
  347. begin
  348. secondpass(left);
  349. if codegenerror then
  350. exit;
  351. secondpass(right);
  352. if codegenerror then
  353. exit;
  354. { put numerator in register }
  355. cgsize:=def_cgsize(resultdef);
  356. opsize:=TCGSize2OpSize[cgsize];
  357. if not (cgsize in [OS_32,OS_S32,OS_64,OS_S64]) then
  358. InternalError(2013102702);
  359. rega:=newreg(R_INTREGISTER,RS_EAX,cgsize2subreg(R_INTREGISTER,cgsize));
  360. regd:=newreg(R_INTREGISTER,RS_EDX,cgsize2subreg(R_INTREGISTER,cgsize));
  361. location_reset(location,LOC_REGISTER,cgsize);
  362. hlcg.location_force_reg(current_asmdata.CurrAsmList,left.location,left.resultdef,resultdef,false);
  363. hreg1:=left.location.register;
  364. if (nodetype=divn) and (right.nodetype=ordconstn) then
  365. begin
  366. if ispowerof2(int64(tordconstnode(right).value),power) then
  367. begin
  368. { for signed numbers, the numerator must be adjusted before the
  369. shift instruction, but not wih unsigned numbers! Otherwise,
  370. "Cardinal($ffffffff) div 16" overflows! (JM) }
  371. if is_signed(left.resultdef) Then
  372. begin
  373. { use a sequence without jumps, saw this in
  374. comp.compilers (JM) }
  375. { no jumps, but more operations }
  376. hreg2:=cg.getintregister(current_asmdata.CurrAsmList,cgsize);
  377. emit_reg_reg(A_MOV,opsize,hreg1,hreg2);
  378. {If the left value is signed, hreg2=$ffffffff, otherwise 0.}
  379. emit_const_reg(A_SAR,opsize,resultdef.size*8-1,hreg2);
  380. {If signed, hreg2=right value-1, otherwise 0.}
  381. { (don't use emit_const_reg, because if value>high(longint)
  382. then it must first be loaded into a register) }
  383. cg.a_op_const_reg(current_asmdata.CurrAsmList,OP_AND,cgsize,tordconstnode(right).value-1,hreg2);
  384. { add to the left value }
  385. emit_reg_reg(A_ADD,opsize,hreg2,hreg1);
  386. { do the shift }
  387. emit_const_reg(A_SAR,opsize,power,hreg1);
  388. end
  389. else
  390. emit_const_reg(A_SHR,opsize,power,hreg1);
  391. location.register:=hreg1;
  392. end
  393. else
  394. begin
  395. if is_signed(left.resultdef) then
  396. begin
  397. e:=tordconstnode(right).value.svalue;
  398. calc_divconst_magic_signed(resultdef.size*8,e,sm,s);
  399. cg.getcpuregister(current_asmdata.CurrAsmList,rega);
  400. emit_const_reg(A_MOV,opsize,sm,rega);
  401. cg.getcpuregister(current_asmdata.CurrAsmList,regd);
  402. emit_reg(A_IMUL,opsize,hreg1);
  403. { only the high half of result is used }
  404. cg.ungetcpuregister(current_asmdata.CurrAsmList,rega);
  405. { add or subtract dividend }
  406. if (e>0) and (sm<0) then
  407. emit_reg_reg(A_ADD,opsize,hreg1,regd)
  408. else if (e<0) and (sm>0) then
  409. emit_reg_reg(A_SUB,opsize,hreg1,regd);
  410. { shift if necessary }
  411. if (s<>0) then
  412. emit_const_reg(A_SAR,opsize,s,regd);
  413. { extract and add the sign bit }
  414. if (e<0) then
  415. emit_reg_reg(A_MOV,opsize,regd,hreg1);
  416. { if e>=0, hreg1 still contains dividend }
  417. emit_const_reg(A_SHR,opsize,left.resultdef.size*8-1,hreg1);
  418. emit_reg_reg(A_ADD,opsize,hreg1,regd);
  419. cg.ungetcpuregister(current_asmdata.CurrAsmList,regd);
  420. location.register:=cg.getintregister(current_asmdata.CurrAsmList,cgsize);
  421. cg.a_load_reg_reg(current_asmdata.CurrAsmList,cgsize,cgsize,regd,location.register)
  422. end
  423. else
  424. begin
  425. d:=tordconstnode(right).value.svalue;
  426. if d>=aword(1) shl (left.resultdef.size*8-1) then
  427. begin
  428. if (cgsize in [OS_64,OS_S64]) then
  429. begin
  430. hreg2:=cg.getintregister(current_asmdata.CurrAsmList,cgsize);
  431. emit_const_reg(A_MOV,opsize,aint(d),hreg2);
  432. emit_reg_reg(A_CMP,opsize,hreg2,hreg1);
  433. end
  434. else
  435. emit_const_reg(A_CMP,opsize,aint(d),hreg1);
  436. location.register:=cg.getintregister(current_asmdata.CurrAsmList,cgsize);
  437. emit_const_reg(A_MOV,opsize,0,location.register);
  438. emit_const_reg(A_SBB,opsize,-1,location.register);
  439. end
  440. else
  441. begin
  442. calc_divconst_magic_unsigned(resultdef.size*8,d,m,m_add,s);
  443. cg.getcpuregister(current_asmdata.CurrAsmList,rega);
  444. emit_const_reg(A_MOV,opsize,aint(m),rega);
  445. cg.getcpuregister(current_asmdata.CurrAsmList,regd);
  446. emit_reg(A_MUL,opsize,hreg1);
  447. cg.ungetcpuregister(current_asmdata.CurrAsmList,rega);
  448. if m_add then
  449. begin
  450. { addition can overflow, shift first bit considering carry,
  451. then shift remaining bits in regular way. }
  452. emit_reg_reg(A_ADD,opsize,hreg1,regd);
  453. emit_const_reg(A_RCR,opsize,1,regd);
  454. dec(s);
  455. end;
  456. if s<>0 then
  457. emit_const_reg(A_SHR,opsize,aint(s),regd);
  458. cg.ungetcpuregister(current_asmdata.CurrAsmList,regd);
  459. location.register:=cg.getintregister(current_asmdata.CurrAsmList,cgsize);
  460. cg.a_load_reg_reg(current_asmdata.CurrAsmList,cgsize,cgsize,regd,location.register)
  461. end;
  462. end;
  463. end;
  464. end
  465. else
  466. begin
  467. {Bring denominator to a register.}
  468. cg.getcpuregister(current_asmdata.CurrAsmList,rega);
  469. emit_reg_reg(A_MOV,opsize,hreg1,rega);
  470. cg.getcpuregister(current_asmdata.CurrAsmList,regd);
  471. {Sign extension depends on the left type.}
  472. if is_signed(left.resultdef) then
  473. case left.resultdef.size of
  474. {$ifdef x86_64}
  475. 8:
  476. emit_none(A_CQO,S_NO);
  477. {$endif x86_64}
  478. 4:
  479. emit_none(A_CDQ,S_NO);
  480. else
  481. internalerror(2013102701);
  482. end
  483. else
  484. emit_reg_reg(A_XOR,opsize,regd,regd);
  485. {Division depends on the right type.}
  486. if is_signed(right.resultdef) then
  487. op:=A_IDIV
  488. else
  489. op:=A_DIV;
  490. if right.location.loc in [LOC_REFERENCE,LOC_CREFERENCE] then
  491. emit_ref(op,opsize,right.location.reference)
  492. else if right.location.loc in [LOC_REGISTER,LOC_CREGISTER] then
  493. emit_reg(op,opsize,right.location.register)
  494. else
  495. begin
  496. hreg1:=cg.getintregister(current_asmdata.CurrAsmList,right.location.size);
  497. hlcg.a_load_loc_reg(current_asmdata.CurrAsmList,right.resultdef,right.resultdef,right.location,hreg1);
  498. emit_reg(op,opsize,hreg1);
  499. end;
  500. { Copy the result into a new register. Release R/EAX & R/EDX.}
  501. cg.ungetcpuregister(current_asmdata.CurrAsmList,regd);
  502. cg.ungetcpuregister(current_asmdata.CurrAsmList,rega);
  503. location.register:=cg.getintregister(current_asmdata.CurrAsmList,cgsize);
  504. if nodetype=divn then
  505. cg.a_load_reg_reg(current_asmdata.CurrAsmList,cgsize,cgsize,rega,location.register)
  506. else
  507. cg.a_load_reg_reg(current_asmdata.CurrAsmList,cgsize,cgsize,regd,location.register);
  508. end;
  509. end;
  510. end.