n386add.pas 22 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571
  1. {
  2. Copyright (c) 2000-2002 by Florian Klaempfl
  3. Code generation for add nodes on the i386
  4. This program is free software; you can redistribute it and/or modify
  5. it under the terms of the GNU General Public License as published by
  6. the Free Software Foundation; either version 2 of the License, or
  7. (at your option) any later version.
  8. This program is distributed in the hope that it will be useful,
  9. but WITHOUT ANY WARRANTY; without even the implied warranty of
  10. MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  11. GNU General Public License for more details.
  12. You should have received a copy of the GNU General Public License
  13. along with this program; if not, write to the Free Software
  14. Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
  15. ****************************************************************************
  16. }
  17. unit n386add;
  18. {$i fpcdefs.inc}
  19. interface
  20. uses
  21. node,nadd,cpubase,nx86add;
  22. type
  23. ti386addnode = class(tx86addnode)
  24. function use_generic_mul32to64: boolean; override;
  25. function use_generic_mul64bit: boolean; override;
  26. procedure second_addordinal; override;
  27. procedure second_add64bit;override;
  28. procedure second_cmp64bit;override;
  29. procedure second_mul(unsigned: boolean);
  30. procedure second_mul64bit;
  31. protected
  32. procedure set_mul_result_location;
  33. end;
  34. implementation
  35. uses
  36. globtype,systems,
  37. cutils,verbose,globals,
  38. symconst,symdef,paramgr,defutil,
  39. aasmbase,aasmtai,aasmdata,aasmcpu,
  40. cgbase,procinfo,
  41. ncon,nset,cgutils,tgobj,
  42. cga,ncgutil,cgobj,cg64f32,cgx86,
  43. hlcgobj;
  44. {*****************************************************************************
  45. use_generic_mul32to64
  46. *****************************************************************************}
  47. function ti386addnode.use_generic_mul32to64: boolean;
  48. begin
  49. result := False;
  50. end;
  51. function ti386addnode.use_generic_mul64bit: boolean;
  52. begin
  53. result:=(cs_check_overflow in current_settings.localswitches) or
  54. (cs_opt_size in current_settings.optimizerswitches);
  55. end;
  56. { handles all unsigned multiplications, and 32->64 bit signed ones.
  57. 32bit-only signed mul is handled by generic codegen }
  58. procedure ti386addnode.second_addordinal;
  59. var
  60. unsigned: boolean;
  61. begin
  62. unsigned:=not(is_signed(left.resultdef)) or
  63. not(is_signed(right.resultdef));
  64. { use IMUL instead of MUL in case overflow checking is off and we're
  65. doing a 32->32-bit multiplication }
  66. if not (cs_check_overflow in current_settings.localswitches) and
  67. not is_64bit(resultdef) then
  68. unsigned:=false;
  69. if (nodetype=muln) and (unsigned or is_64bit(resultdef)) then
  70. second_mul(unsigned)
  71. else
  72. inherited second_addordinal;
  73. end;
  74. {*****************************************************************************
  75. Add64bit
  76. *****************************************************************************}
  77. procedure ti386addnode.second_add64bit;
  78. var
  79. op : TOpCG;
  80. op1,op2 : TAsmOp;
  81. opsize : TOpSize;
  82. hregister,
  83. hregister2 : tregister;
  84. hl4 : tasmlabel;
  85. mboverflow,
  86. unsigned:boolean;
  87. r:Tregister;
  88. begin
  89. pass_left_right;
  90. op1:=A_NONE;
  91. op2:=A_NONE;
  92. mboverflow:=false;
  93. opsize:=S_L;
  94. unsigned:=((left.resultdef.typ=orddef) and
  95. (torddef(left.resultdef).ordtype=u64bit)) or
  96. ((right.resultdef.typ=orddef) and
  97. (torddef(right.resultdef).ordtype=u64bit));
  98. case nodetype of
  99. addn :
  100. begin
  101. op:=OP_ADD;
  102. mboverflow:=true;
  103. end;
  104. subn :
  105. begin
  106. op:=OP_SUB;
  107. op1:=A_SUB;
  108. op2:=A_SBB;
  109. mboverflow:=true;
  110. end;
  111. xorn:
  112. op:=OP_XOR;
  113. orn:
  114. op:=OP_OR;
  115. andn:
  116. op:=OP_AND;
  117. muln:
  118. begin
  119. second_mul64bit;
  120. exit;
  121. end
  122. else
  123. begin
  124. { everything should be handled in pass_1 (JM) }
  125. internalerror(200109051);
  126. end;
  127. end;
  128. { left and right no register? }
  129. { then one must be demanded }
  130. if (left.location.loc<>LOC_REGISTER) then
  131. begin
  132. if (right.location.loc<>LOC_REGISTER) then
  133. begin
  134. hregister:=cg.getintregister(current_asmdata.CurrAsmList,OS_INT);
  135. hregister2:=cg.getintregister(current_asmdata.CurrAsmList,OS_INT);
  136. cg64.a_load64_loc_reg(current_asmdata.CurrAsmList,left.location,joinreg64(hregister,hregister2));
  137. location_reset(left.location,LOC_REGISTER,left.location.size);
  138. left.location.register64.reglo:=hregister;
  139. left.location.register64.reghi:=hregister2;
  140. end
  141. else
  142. begin
  143. location_swap(left.location,right.location);
  144. toggleflag(nf_swapped);
  145. end;
  146. end;
  147. { at this point, left.location.loc should be LOC_REGISTER }
  148. if right.location.loc=LOC_REGISTER then
  149. begin
  150. { when swapped another result register }
  151. if (nodetype=subn) and (nf_swapped in flags) then
  152. begin
  153. cg64.a_op64_reg_reg(current_asmdata.CurrAsmList,op,location.size,
  154. left.location.register64,
  155. right.location.register64);
  156. location_swap(left.location,right.location);
  157. toggleflag(nf_swapped);
  158. end
  159. else
  160. begin
  161. cg64.a_op64_reg_reg(current_asmdata.CurrAsmList,op,location.size,
  162. right.location.register64,
  163. left.location.register64);
  164. end;
  165. end
  166. else
  167. begin
  168. { right.location<>LOC_REGISTER }
  169. if (nodetype=subn) and (nf_swapped in flags) then
  170. begin
  171. r:=cg.getintregister(current_asmdata.CurrAsmList,OS_INT);
  172. cg64.a_load64low_loc_reg(current_asmdata.CurrAsmList,right.location,r);
  173. emit_reg_reg(op1,opsize,left.location.register64.reglo,r);
  174. emit_reg_reg(A_MOV,opsize,r,left.location.register64.reglo);
  175. cg64.a_load64high_loc_reg(current_asmdata.CurrAsmList,right.location,r);
  176. { the carry flag is still ok }
  177. emit_reg_reg(op2,opsize,left.location.register64.reghi,r);
  178. emit_reg_reg(A_MOV,opsize,r,left.location.register64.reghi);
  179. end
  180. else
  181. begin
  182. cg64.a_op64_loc_reg(current_asmdata.CurrAsmList,op,location.size,right.location,
  183. left.location.register64);
  184. end;
  185. location_freetemp(current_asmdata.CurrAsmList,right.location);
  186. end;
  187. { only in case of overflow operations }
  188. { produce overflow code }
  189. { we must put it here directly, because sign of operation }
  190. { is in unsigned VAR!! }
  191. if mboverflow then
  192. begin
  193. if cs_check_overflow in current_settings.localswitches then
  194. begin
  195. current_asmdata.getjumplabel(hl4);
  196. if unsigned then
  197. cg.a_jmp_flags(current_asmdata.CurrAsmList,F_AE,hl4)
  198. else
  199. cg.a_jmp_flags(current_asmdata.CurrAsmList,F_NO,hl4);
  200. cg.a_call_name(current_asmdata.CurrAsmList,'FPC_OVERFLOW',false);
  201. cg.a_label(current_asmdata.CurrAsmList,hl4);
  202. end;
  203. end;
  204. location_copy(location,left.location);
  205. end;
  206. procedure ti386addnode.second_cmp64bit;
  207. var
  208. hregister,
  209. hregister2 : tregister;
  210. href : treference;
  211. unsigned : boolean;
  212. procedure firstjmp64bitcmp;
  213. var
  214. oldnodetype : tnodetype;
  215. begin
  216. {$ifdef OLDREGVARS}
  217. load_all_regvars(current_asmdata.CurrAsmList);
  218. {$endif OLDREGVARS}
  219. { the jump the sequence is a little bit hairy }
  220. case nodetype of
  221. ltn,gtn:
  222. begin
  223. cg.a_jmp_flags(current_asmdata.CurrAsmList,getresflags(unsigned),current_procinfo.CurrTrueLabel);
  224. { cheat a little bit for the negative test }
  225. toggleflag(nf_swapped);
  226. cg.a_jmp_flags(current_asmdata.CurrAsmList,getresflags(unsigned),current_procinfo.CurrFalseLabel);
  227. toggleflag(nf_swapped);
  228. end;
  229. lten,gten:
  230. begin
  231. oldnodetype:=nodetype;
  232. if nodetype=lten then
  233. nodetype:=ltn
  234. else
  235. nodetype:=gtn;
  236. cg.a_jmp_flags(current_asmdata.CurrAsmList,getresflags(unsigned),current_procinfo.CurrTrueLabel);
  237. { cheat for the negative test }
  238. if nodetype=ltn then
  239. nodetype:=gtn
  240. else
  241. nodetype:=ltn;
  242. cg.a_jmp_flags(current_asmdata.CurrAsmList,getresflags(unsigned),current_procinfo.CurrFalseLabel);
  243. nodetype:=oldnodetype;
  244. end;
  245. equaln:
  246. cg.a_jmp_flags(current_asmdata.CurrAsmList,F_NE,current_procinfo.CurrFalseLabel);
  247. unequaln:
  248. cg.a_jmp_flags(current_asmdata.CurrAsmList,F_NE,current_procinfo.CurrTrueLabel);
  249. end;
  250. end;
  251. procedure secondjmp64bitcmp;
  252. begin
  253. { the jump the sequence is a little bit hairy }
  254. case nodetype of
  255. ltn,gtn,lten,gten:
  256. begin
  257. { the comparisaion of the low dword have to be }
  258. { always unsigned! }
  259. cg.a_jmp_flags(current_asmdata.CurrAsmList,getresflags(true),current_procinfo.CurrTrueLabel);
  260. cg.a_jmp_always(current_asmdata.CurrAsmList,current_procinfo.CurrFalseLabel);
  261. end;
  262. equaln:
  263. begin
  264. cg.a_jmp_flags(current_asmdata.CurrAsmList,F_NE,current_procinfo.CurrFalseLabel);
  265. cg.a_jmp_always(current_asmdata.CurrAsmList,current_procinfo.CurrTrueLabel);
  266. end;
  267. unequaln:
  268. begin
  269. cg.a_jmp_flags(current_asmdata.CurrAsmList,F_NE,current_procinfo.CurrTrueLabel);
  270. cg.a_jmp_always(current_asmdata.CurrAsmList,current_procinfo.CurrFalseLabel);
  271. end;
  272. end;
  273. end;
  274. begin
  275. pass_left_right;
  276. unsigned:=((left.resultdef.typ=orddef) and
  277. (torddef(left.resultdef).ordtype=u64bit)) or
  278. ((right.resultdef.typ=orddef) and
  279. (torddef(right.resultdef).ordtype=u64bit));
  280. { left and right no register? }
  281. { then one must be demanded }
  282. if (left.location.loc<>LOC_REGISTER) then
  283. begin
  284. if (right.location.loc<>LOC_REGISTER) then
  285. begin
  286. { we can reuse a CREGISTER for comparison }
  287. if (left.location.loc<>LOC_CREGISTER) then
  288. begin
  289. hregister:=cg.getintregister(current_asmdata.CurrAsmList,OS_INT);
  290. hregister2:=cg.getintregister(current_asmdata.CurrAsmList,OS_INT);
  291. cg64.a_load64_loc_reg(current_asmdata.CurrAsmList,left.location,joinreg64(hregister,hregister2));
  292. location_freetemp(current_asmdata.CurrAsmList,left.location);
  293. location_reset(left.location,LOC_REGISTER,left.location.size);
  294. left.location.register64.reglo:=hregister;
  295. left.location.register64.reghi:=hregister2;
  296. end;
  297. end
  298. else
  299. begin
  300. location_swap(left.location,right.location);
  301. toggleflag(nf_swapped);
  302. end;
  303. end;
  304. { at this point, left.location.loc should be LOC_REGISTER }
  305. if right.location.loc=LOC_REGISTER then
  306. begin
  307. emit_reg_reg(A_CMP,S_L,right.location.register64.reghi,left.location.register64.reghi);
  308. firstjmp64bitcmp;
  309. emit_reg_reg(A_CMP,S_L,right.location.register64.reglo,left.location.register64.reglo);
  310. secondjmp64bitcmp;
  311. end
  312. else
  313. begin
  314. case right.location.loc of
  315. LOC_CREGISTER :
  316. begin
  317. emit_reg_reg(A_CMP,S_L,right.location.register64.reghi,left.location.register64.reghi);
  318. firstjmp64bitcmp;
  319. emit_reg_reg(A_CMP,S_L,right.location.register64.reglo,left.location.register64.reglo);
  320. secondjmp64bitcmp;
  321. end;
  322. LOC_CREFERENCE,
  323. LOC_REFERENCE :
  324. begin
  325. tcgx86(cg).make_simple_ref(current_asmdata.CurrAsmList,right.location.reference);
  326. href:=right.location.reference;
  327. inc(href.offset,4);
  328. emit_ref_reg(A_CMP,S_L,href,left.location.register64.reghi);
  329. firstjmp64bitcmp;
  330. emit_ref_reg(A_CMP,S_L,right.location.reference,left.location.register64.reglo);
  331. secondjmp64bitcmp;
  332. cg.a_jmp_always(current_asmdata.CurrAsmList,current_procinfo.CurrFalseLabel);
  333. location_freetemp(current_asmdata.CurrAsmList,right.location);
  334. end;
  335. LOC_CONSTANT :
  336. begin
  337. current_asmdata.CurrAsmList.concat(taicpu.op_const_reg(A_CMP,S_L,aint(hi(right.location.value64)),left.location.register64.reghi));
  338. firstjmp64bitcmp;
  339. current_asmdata.CurrAsmList.concat(taicpu.op_const_reg(A_CMP,S_L,aint(lo(right.location.value64)),left.location.register64.reglo));
  340. secondjmp64bitcmp;
  341. end;
  342. else
  343. internalerror(200203282);
  344. end;
  345. end;
  346. { we have LOC_JUMP as result }
  347. location_reset(location,LOC_JUMP,OS_NO)
  348. end;
  349. {*****************************************************************************
  350. x86 MUL
  351. *****************************************************************************}
  352. procedure ti386addnode.set_mul_result_location;
  353. begin
  354. location_reset(location,LOC_REGISTER,def_cgsize(resultdef));
  355. {Free EAX,EDX}
  356. cg.ungetcpuregister(current_asmdata.CurrAsmList,NR_EDX);
  357. if is_64bit(resultdef) then
  358. begin
  359. {Allocate a couple of registers and store EDX:EAX into it}
  360. location.register64.reghi := cg.getintregister(current_asmdata.CurrAsmList,OS_INT);
  361. cg.a_load_reg_reg(current_asmdata.CurrAsmList, OS_INT, OS_INT, NR_EDX, location.register64.reghi);
  362. cg.ungetcpuregister(current_asmdata.CurrAsmList,NR_EAX);
  363. location.register64.reglo := cg.getintregister(current_asmdata.CurrAsmList,OS_INT);
  364. cg.a_load_reg_reg(current_asmdata.CurrAsmList, OS_INT, OS_INT, NR_EAX, location.register64.reglo);
  365. end
  366. else
  367. begin
  368. {Allocate a new register and store the result in EAX in it.}
  369. location.register:=cg.getintregister(current_asmdata.CurrAsmList,OS_INT);
  370. cg.ungetcpuregister(current_asmdata.CurrAsmList,NR_EAX);
  371. cg.a_load_reg_reg(current_asmdata.CurrAsmList,OS_INT,OS_INT,NR_EAX,location.register);
  372. end;
  373. location_freetemp(current_asmdata.CurrAsmList,left.location);
  374. location_freetemp(current_asmdata.CurrAsmList,right.location);
  375. end;
  376. procedure ti386addnode.second_mul(unsigned: boolean);
  377. var reg:Tregister;
  378. ref:Treference;
  379. use_ref:boolean;
  380. hl4 : tasmlabel;
  381. const
  382. asmops: array[boolean] of tasmop = (A_IMUL, A_MUL);
  383. begin
  384. pass_left_right;
  385. { Mul supports registers and references, so if not register/reference,
  386. load the location into a register.
  387. The variant of IMUL which is capable of doing 32->64 bits has the same restrictions. }
  388. use_ref:=false;
  389. if left.location.loc in [LOC_REGISTER,LOC_CREGISTER] then
  390. reg:=left.location.register
  391. else if left.location.loc in [LOC_REFERENCE,LOC_CREFERENCE] then
  392. begin
  393. tcgx86(cg).make_simple_ref(current_asmdata.CurrAsmList,left.location.reference);
  394. ref:=left.location.reference;
  395. use_ref:=true;
  396. end
  397. else
  398. begin
  399. {LOC_CONSTANT for example.}
  400. reg:=cg.getintregister(current_asmdata.CurrAsmList,OS_INT);
  401. hlcg.a_load_loc_reg(current_asmdata.CurrAsmList,left.resultdef,osuinttype,left.location,reg);
  402. end;
  403. {Allocate EAX.}
  404. cg.getcpuregister(current_asmdata.CurrAsmList,NR_EAX);
  405. {Load the right value.}
  406. hlcg.a_load_loc_reg(current_asmdata.CurrAsmList,right.resultdef,osuinttype,right.location,NR_EAX);
  407. {Also allocate EDX, since it is also modified by a mul (JM).}
  408. cg.getcpuregister(current_asmdata.CurrAsmList,NR_EDX);
  409. if use_ref then
  410. emit_ref(asmops[unsigned],S_L,ref)
  411. else
  412. emit_reg(asmops[unsigned],S_L,reg);
  413. if (cs_check_overflow in current_settings.localswitches) and
  414. { 32->64 bit cannot overflow }
  415. (not is_64bit(resultdef)) then
  416. begin
  417. current_asmdata.getjumplabel(hl4);
  418. cg.a_jmp_flags(current_asmdata.CurrAsmList,F_AE,hl4);
  419. cg.a_call_name(current_asmdata.CurrAsmList,'FPC_OVERFLOW',false);
  420. cg.a_label(current_asmdata.CurrAsmList,hl4);
  421. end;
  422. set_mul_result_location;
  423. end;
  424. procedure ti386addnode.second_mul64bit;
  425. var
  426. list: TAsmList;
  427. hreg1,hreg2: tregister;
  428. begin
  429. { 64x64 multiplication yields 128-bit result, but we're only
  430. interested in its lower 64 bits. This lower part is independent
  431. of operand signs, and so is the generated code. }
  432. { pass_left_right already called from second_add64bit }
  433. list:=current_asmdata.CurrAsmList;
  434. if left.location.loc in [LOC_REFERENCE,LOC_CREFERENCE] then
  435. tcgx86(cg).make_simple_ref(list,left.location.reference);
  436. if right.location.loc in [LOC_REFERENCE,LOC_CREFERENCE] then
  437. tcgx86(cg).make_simple_ref(list,right.location.reference);
  438. { calculate 32-bit terms lo(right)*hi(left) and hi(left)*lo(right) }
  439. if (right.location.loc=LOC_CONSTANT) then
  440. begin
  441. { Omit zero terms, if any }
  442. hreg1:=NR_NO;
  443. hreg2:=NR_NO;
  444. if lo(right.location.value64)<>0 then
  445. hreg1:=cg.getintregister(list,OS_INT);
  446. if hi(right.location.value64)<>0 then
  447. hreg2:=cg.getintregister(list,OS_INT);
  448. { Take advantage of 3-operand form of IMUL }
  449. case left.location.loc of
  450. LOC_REGISTER,LOC_CREGISTER:
  451. begin
  452. if hreg1<>NR_NO then
  453. emit_const_reg_reg(A_IMUL,S_L,longint(lo(right.location.value64)),left.location.register64.reghi,hreg1);
  454. if hreg2<>NR_NO then
  455. emit_const_reg_reg(A_IMUL,S_L,longint(hi(right.location.value64)),left.location.register64.reglo,hreg2);
  456. end;
  457. LOC_REFERENCE,LOC_CREFERENCE:
  458. begin
  459. if hreg2<>NR_NO then
  460. list.concat(taicpu.op_const_ref_reg(A_IMUL,S_L,longint(hi(right.location.value64)),left.location.reference,hreg2));
  461. inc(left.location.reference.offset,4);
  462. if hreg1<>NR_NO then
  463. list.concat(taicpu.op_const_ref_reg(A_IMUL,S_L,longint(lo(right.location.value64)),left.location.reference,hreg1));
  464. dec(left.location.reference.offset,4);
  465. end;
  466. else
  467. InternalError(2014011602);
  468. end;
  469. end
  470. else
  471. begin
  472. hreg1:=cg.getintregister(list,OS_INT);
  473. hreg2:=cg.getintregister(list,OS_INT);
  474. cg64.a_load64low_loc_reg(list,left.location,hreg1);
  475. cg64.a_load64high_loc_reg(list,left.location,hreg2);
  476. case right.location.loc of
  477. LOC_REGISTER,LOC_CREGISTER:
  478. begin
  479. emit_reg_reg(A_IMUL,S_L,right.location.register64.reghi,hreg1);
  480. emit_reg_reg(A_IMUL,S_L,right.location.register64.reglo,hreg2);
  481. end;
  482. LOC_REFERENCE,LOC_CREFERENCE:
  483. begin
  484. emit_ref_reg(A_IMUL,S_L,right.location.reference,hreg2);
  485. inc(right.location.reference.offset,4);
  486. emit_ref_reg(A_IMUL,S_L,right.location.reference,hreg1);
  487. dec(right.location.reference.offset,4);
  488. end;
  489. else
  490. InternalError(2014011603);
  491. end;
  492. end;
  493. { add hi*lo and lo*hi terms together }
  494. if (hreg1<>NR_NO) and (hreg2<>NR_NO) then
  495. emit_reg_reg(A_ADD,S_L,hreg2,hreg1);
  496. { load lo(right) into EAX }
  497. cg.getcpuregister(list,NR_EAX);
  498. cg64.a_load64low_loc_reg(list,right.location,NR_EAX);
  499. { multiply EAX by lo(left), producing 64-bit value in EDX:EAX }
  500. cg.getcpuregister(list,NR_EDX);
  501. if (left.location.loc in [LOC_REGISTER,LOC_CREGISTER]) then
  502. emit_reg(A_MUL,S_L,left.location.register64.reglo)
  503. else if (left.location.loc in [LOC_REFERENCE,LOC_CREFERENCE]) then
  504. emit_ref(A_MUL,S_L,left.location.reference)
  505. else
  506. InternalError(2014011604);
  507. { add previously calculated terms to the high half }
  508. if (hreg1<>NR_NO) then
  509. emit_reg_reg(A_ADD,S_L,hreg1,NR_EDX)
  510. else if (hreg2<>NR_NO) then
  511. emit_reg_reg(A_ADD,S_L,hreg2,NR_EDX)
  512. else
  513. InternalError(2014011604);
  514. { Result is now in EDX:EAX. Copy it to virtual registers. }
  515. set_mul_result_location;
  516. end;
  517. begin
  518. caddnode:=ti386addnode;
  519. end.