2
0

cgcpu.pas 51 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213
  1. {
  2. Copyright (c) 1998-2002 by Florian Klaempfl
  3. This unit implements the code generator for the i386
  4. This program is free software; you can redistribute it and/or modify
  5. it under the terms of the GNU General Public License as published by
  6. the Free Software Foundation; either version 2 of the License, or
  7. (at your option) any later version.
  8. This program is distributed in the hope that it will be useful,
  9. but WITHOUT ANY WARRANTY; without even the implied warranty of
  10. MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  11. GNU General Public License for more details.
  12. You should have received a copy of the GNU General Public License
  13. along with this program; if not, write to the Free Software
  14. Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
  15. ****************************************************************************
  16. }
  17. unit cgcpu;
  18. {$i fpcdefs.inc}
  19. interface
  20. uses
  21. globtype,
  22. cgbase,cgobj,cg64f32,cgx86,
  23. aasmbase,aasmtai,aasmdata,aasmcpu,
  24. cpubase,parabase,cgutils,
  25. symconst,symdef,symsym
  26. ;
  27. type
  28. tcg386 = class(tcgx86)
  29. procedure init_register_allocators;override;
  30. { passing parameter using push instead of mov }
  31. procedure a_load_reg_cgpara(list : TAsmList;size : tcgsize;r : tregister;const cgpara : tcgpara);override;
  32. procedure a_load_const_cgpara(list : TAsmList;size : tcgsize;a : tcgint;const cgpara : tcgpara);override;
  33. procedure a_load_ref_cgpara(list : TAsmList;size : tcgsize;const r : treference;const cgpara : tcgpara);override;
  34. procedure a_loadaddr_ref_cgpara(list : TAsmList;const r : treference;const cgpara : tcgpara);override;
  35. procedure g_proc_exit(list : TAsmList;parasize:longint;nostackframe:boolean);override;
  36. procedure g_copyvaluepara_openarray(list : TAsmList;const ref:treference;const lenloc:tlocation;elesize:tcgint;destreg:tregister);
  37. procedure g_releasevaluepara_openarray(list : TAsmList;const l:tlocation);
  38. procedure g_maybe_got_init(list: TAsmList); override;
  39. end;
  40. tcg64f386 = class(tcg64f32)
  41. procedure a_op64_ref_reg(list : TAsmList;op:TOpCG;size : tcgsize;const ref : treference;reg : tregister64);override;
  42. procedure a_op64_reg_ref(list : TAsmList;op:TOpCG;size : tcgsize;reg : tregister64; const ref: treference);override;
  43. procedure a_op64_reg_reg(list : TAsmList;op:TOpCG;size : tcgsize;regsrc,regdst : tregister64);override;
  44. procedure a_op64_const_reg(list : TAsmList;op:TOpCG;size : tcgsize;value : int64;reg : tregister64);override;
  45. procedure a_op64_const_ref(list : TAsmList;op:TOpCG;size : tcgsize;value : int64;const ref : treference);override;
  46. procedure a_op64_ref(list : TAsmList;op:TOpCG;size : tcgsize;const ref: treference);override;
  47. private
  48. procedure get_64bit_ops(op:TOpCG;var op1,op2:TAsmOp);
  49. end;
  50. procedure create_codegen;
  51. implementation
  52. uses
  53. globals,verbose,systems,cutils,
  54. paramgr,procinfo,fmodule,
  55. rgcpu,rgx86,cpuinfo;
  56. function use_push(const cgpara:tcgpara):boolean;
  57. begin
  58. result:=(not paramanager.use_fixed_stack) and
  59. assigned(cgpara.location) and
  60. (cgpara.location^.loc=LOC_REFERENCE) and
  61. (cgpara.location^.reference.index=NR_STACK_POINTER_REG);
  62. end;
  63. procedure tcg386.init_register_allocators;
  64. begin
  65. inherited init_register_allocators;
  66. if assigned(current_procinfo) and (current_procinfo.framepointer<>NR_EBP) then
  67. begin
  68. { Sometimes, whole program optimization will forego a frame pointer on leaf functions }
  69. if (cs_useebp in current_settings.optimizerswitches) then
  70. rg[R_INTREGISTER]:=trgcpu.create(R_INTREGISTER,R_SUBWHOLE,[RS_EAX,RS_EDX,RS_ECX,RS_EBX,RS_ESI,RS_EDI,RS_EBP],first_int_imreg,[])
  71. else
  72. rg[R_INTREGISTER]:=trgcpu.create(R_INTREGISTER,R_SUBWHOLE,[RS_EAX,RS_EDX,RS_ECX,RS_EBX,RS_ESI,RS_EDI],first_int_imreg,[]);
  73. end
  74. else
  75. rg[R_INTREGISTER]:=trgcpu.create(R_INTREGISTER,R_SUBWHOLE,[RS_EAX,RS_EDX,RS_ECX,RS_EBX,RS_ESI,RS_EDI],first_int_imreg,[RS_EBP]);
  76. rg[R_MMXREGISTER]:=trgcpu.create(R_MMXREGISTER,R_SUBNONE,[RS_XMM0,RS_XMM1,RS_XMM2,RS_XMM3,RS_XMM4,RS_XMM5,RS_XMM6,RS_XMM7],first_mm_imreg,[]);
  77. rg[R_MMREGISTER]:=trgcpu.create(R_MMREGISTER,R_SUBWHOLE,[RS_XMM0,RS_XMM1,RS_XMM2,RS_XMM3,RS_XMM4,RS_XMM5,RS_XMM6,RS_XMM7],first_mm_imreg,[]);
  78. rgfpu:=Trgx86fpu.create;
  79. end;
  80. procedure tcg386.a_load_reg_cgpara(list : TAsmList;size : tcgsize;r : tregister;const cgpara : tcgpara);
  81. var
  82. pushsize : tcgsize;
  83. begin
  84. check_register_size(size,r);
  85. if use_push(cgpara) then
  86. begin
  87. cgpara.check_simple_location;
  88. if tcgsize2size[cgpara.location^.size]>cgpara.alignment then
  89. pushsize:=cgpara.location^.size
  90. else
  91. pushsize:=int_cgsize(cgpara.alignment);
  92. list.concat(taicpu.op_reg(A_PUSH,tcgsize2opsize[pushsize],makeregsize(list,r,pushsize)));
  93. end
  94. else
  95. inherited a_load_reg_cgpara(list,size,r,cgpara);
  96. end;
  97. procedure tcg386.a_load_const_cgpara(list : TAsmList;size : tcgsize;a : tcgint;const cgpara : tcgpara);
  98. var
  99. pushsize : tcgsize;
  100. begin
  101. if use_push(cgpara) then
  102. begin
  103. cgpara.check_simple_location;
  104. if tcgsize2size[cgpara.location^.size]>cgpara.alignment then
  105. pushsize:=cgpara.location^.size
  106. else
  107. pushsize:=int_cgsize(cgpara.alignment);
  108. list.concat(taicpu.op_const(A_PUSH,tcgsize2opsize[pushsize],a));
  109. end
  110. else
  111. inherited a_load_const_cgpara(list,size,a,cgpara);
  112. end;
  113. procedure tcg386.a_load_ref_cgpara(list : TAsmList;size : tcgsize;const r : treference;const cgpara : tcgpara);
  114. procedure pushdata(paraloc:pcgparalocation;ofs:tcgint);
  115. var
  116. pushsize : tcgsize;
  117. opsize : topsize;
  118. tmpreg : tregister;
  119. href : treference;
  120. begin
  121. if not assigned(paraloc) then
  122. exit;
  123. if (paraloc^.loc<>LOC_REFERENCE) or
  124. (paraloc^.reference.index<>NR_STACK_POINTER_REG) or
  125. (tcgsize2size[paraloc^.size]>sizeof(aint)) then
  126. internalerror(200501162);
  127. { Pushes are needed in reverse order, add the size of the
  128. current location to the offset where to load from. This
  129. prevents wrong calculations for the last location when
  130. the size is not a power of 2 }
  131. if assigned(paraloc^.next) then
  132. pushdata(paraloc^.next,ofs+tcgsize2size[paraloc^.size]);
  133. { Push the data starting at ofs }
  134. href:=r;
  135. inc(href.offset,ofs);
  136. if tcgsize2size[paraloc^.size]>cgpara.alignment then
  137. pushsize:=paraloc^.size
  138. else
  139. pushsize:=int_cgsize(cgpara.alignment);
  140. opsize:=TCgsize2opsize[pushsize];
  141. { for go32v2 we obtain OS_F32,
  142. but pushs is not valid, we need pushl }
  143. if opsize=S_FS then
  144. opsize:=S_L;
  145. if tcgsize2size[paraloc^.size]<cgpara.alignment then
  146. begin
  147. tmpreg:=getintregister(list,pushsize);
  148. a_load_ref_reg(list,paraloc^.size,pushsize,href,tmpreg);
  149. list.concat(taicpu.op_reg(A_PUSH,opsize,tmpreg));
  150. end
  151. else
  152. begin
  153. make_simple_ref(list,href);
  154. list.concat(taicpu.op_ref(A_PUSH,opsize,href));
  155. end;
  156. end;
  157. var
  158. len : tcgint;
  159. href : treference;
  160. begin
  161. { cgpara.size=OS_NO requires a copy on the stack }
  162. if use_push(cgpara) then
  163. begin
  164. { Record copy? }
  165. if (cgpara.size=OS_NO) or (size=OS_NO) then
  166. begin
  167. cgpara.check_simple_location;
  168. len:=align(cgpara.intsize,cgpara.alignment);
  169. g_stackpointer_alloc(list,len);
  170. reference_reset_base(href,NR_STACK_POINTER_REG,0,ctempposinvalid,4,[]);
  171. g_concatcopy(list,r,href,len);
  172. end
  173. else
  174. begin
  175. if tcgsize2size[cgpara.size]<>tcgsize2size[size] then
  176. internalerror(200501161);
  177. if (cgpara.size=OS_F64) then
  178. begin
  179. href:=r;
  180. make_simple_ref(list,href);
  181. inc(href.offset,4);
  182. list.concat(taicpu.op_ref(A_PUSH,S_L,href));
  183. dec(href.offset,4);
  184. list.concat(taicpu.op_ref(A_PUSH,S_L,href));
  185. end
  186. else
  187. { We need to push the data in reverse order,
  188. therefor we use a recursive algorithm }
  189. pushdata(cgpara.location,0);
  190. end
  191. end
  192. else
  193. begin
  194. href:=r;
  195. make_simple_ref(list,href);
  196. inherited a_load_ref_cgpara(list,size,href,cgpara);
  197. end;
  198. end;
  199. procedure tcg386.a_loadaddr_ref_cgpara(list : TAsmList;const r : treference;const cgpara : tcgpara);
  200. var
  201. tmpreg : tregister;
  202. opsize : topsize;
  203. tmpref,dirref : treference;
  204. begin
  205. dirref:=r;
  206. { this could probably done in a more optimized way, but for now this
  207. is sufficent }
  208. make_direct_ref(list,dirref);
  209. with dirref do
  210. begin
  211. if use_push(cgpara) then
  212. begin
  213. cgpara.check_simple_location;
  214. opsize:=tcgsize2opsize[OS_ADDR];
  215. if (segment=NR_NO) and (base=NR_NO) and (index=NR_NO) then
  216. begin
  217. if assigned(symbol) then
  218. begin
  219. if (target_info.system in [system_i386_darwin,system_i386_iphonesim]) and
  220. ((dirref.symbol.bind in [AB_EXTERNAL,AB_WEAK_EXTERNAL]) or
  221. (cs_create_pic in current_settings.moduleswitches)) then
  222. begin
  223. tmpreg:=getaddressregister(list);
  224. a_loadaddr_ref_reg(list,dirref,tmpreg);
  225. list.concat(taicpu.op_reg(A_PUSH,opsize,tmpreg));
  226. end
  227. else if cs_create_pic in current_settings.moduleswitches then
  228. begin
  229. if offset<>0 then
  230. begin
  231. tmpreg:=getaddressregister(list);
  232. a_loadaddr_ref_reg(list,dirref,tmpreg);
  233. list.concat(taicpu.op_reg(A_PUSH,opsize,tmpreg));
  234. end
  235. else
  236. begin
  237. reference_reset_symbol(tmpref,dirref.symbol,0,sizeof(pint),[]);
  238. tmpref.refaddr:=addr_pic;
  239. tmpref.base:=current_procinfo.got;
  240. include(current_procinfo.flags,pi_needs_got);
  241. list.concat(taicpu.op_ref(A_PUSH,S_L,tmpref));
  242. end
  243. end
  244. else
  245. list.concat(Taicpu.Op_sym_ofs(A_PUSH,opsize,symbol,offset));
  246. end
  247. else
  248. list.concat(Taicpu.Op_const(A_PUSH,opsize,offset));
  249. end
  250. else if (segment=NR_NO) and (base=NR_NO) and (index<>NR_NO) and
  251. (offset=0) and (scalefactor=0) and (symbol=nil) then
  252. list.concat(Taicpu.Op_reg(A_PUSH,opsize,index))
  253. else if (segment=NR_NO) and (base<>NR_NO) and (index=NR_NO) and
  254. (offset=0) and (symbol=nil) then
  255. list.concat(Taicpu.Op_reg(A_PUSH,opsize,base))
  256. else
  257. begin
  258. tmpreg:=getaddressregister(list);
  259. a_loadaddr_ref_reg(list,dirref,tmpreg);
  260. list.concat(taicpu.op_reg(A_PUSH,opsize,tmpreg));
  261. end;
  262. end
  263. else
  264. inherited a_loadaddr_ref_cgpara(list,dirref,cgpara);
  265. end;
  266. end;
  267. procedure tcg386.g_proc_exit(list : TAsmList;parasize:longint;nostackframe:boolean);
  268. procedure increase_sp(a : tcgint);
  269. var
  270. href : treference;
  271. begin
  272. reference_reset_base(href,NR_STACK_POINTER_REG,a,ctempposinvalid,0,[]);
  273. { normally, lea is a better choice than an add }
  274. list.concat(Taicpu.op_ref_reg(A_LEA,TCGSize2OpSize[OS_ADDR],href,NR_STACK_POINTER_REG));
  275. end;
  276. begin
  277. { MMX needs to call EMMS }
  278. if assigned(rg[R_MMXREGISTER]) and
  279. (rg[R_MMXREGISTER].uses_registers) then
  280. list.concat(Taicpu.op_none(A_EMMS,S_NO));
  281. { remove stackframe }
  282. if not(nostackframe) and
  283. { we do not need an exit stack frame when we never return
  284. * the final ret is left so the peephole optimizer can easily do call/ret -> jmp or call conversions
  285. * the entry stack frame must be normally generated because the subroutine could be still left by
  286. an exception and then the unwinding code might need to restore the registers stored by the entry code
  287. }
  288. not(po_noreturn in current_procinfo.procdef.procoptions) then
  289. begin
  290. if (current_procinfo.framepointer=NR_STACK_POINTER_REG) or
  291. (current_procinfo.procdef.proctypeoption=potype_exceptfilter) then
  292. begin
  293. if current_procinfo.final_localsize<>0 then
  294. increase_sp(current_procinfo.final_localsize);
  295. if (not paramanager.use_fixed_stack) then
  296. internal_restore_regs(list,true);
  297. if (current_procinfo.procdef.proctypeoption=potype_exceptfilter) then
  298. list.concat(Taicpu.op_reg(A_POP,tcgsize2opsize[OS_ADDR],NR_FRAME_POINTER_REG));
  299. current_asmdata.asmcfi.cfa_def_cfa_offset(list,sizeof(pint));
  300. end
  301. else
  302. begin
  303. if (not paramanager.use_fixed_stack) then
  304. internal_restore_regs(list,not (pi_has_stack_allocs in current_procinfo.flags));
  305. generate_leave(list);
  306. end;
  307. list.concat(tai_regalloc.dealloc(current_procinfo.framepointer,nil));
  308. end;
  309. if pi_uses_ymm in current_procinfo.flags then
  310. list.Concat(taicpu.op_none(A_VZEROUPPER));
  311. { return from proc }
  312. if po_interrupt in current_procinfo.procdef.procoptions then
  313. begin
  314. if assigned(current_procinfo.procdef.funcretloc[calleeside].location) and
  315. (current_procinfo.procdef.funcretloc[calleeside].location^.loc=LOC_REGISTER) then
  316. begin
  317. if (getsupreg(current_procinfo.procdef.funcretloc[calleeside].location^.register)=RS_EAX) then
  318. list.concat(Taicpu.Op_const_reg(A_ADD,S_L,4,NR_ESP))
  319. else
  320. internalerror(2010053001);
  321. end
  322. else
  323. list.concat(Taicpu.Op_reg(A_POP,S_L,NR_EAX));
  324. list.concat(Taicpu.Op_reg(A_POP,S_L,NR_EBX));
  325. list.concat(Taicpu.Op_reg(A_POP,S_L,NR_ECX));
  326. if (current_procinfo.procdef.funcretloc[calleeside].size in [OS_64,OS_S64]) and
  327. assigned(current_procinfo.procdef.funcretloc[calleeside].location) and
  328. assigned(current_procinfo.procdef.funcretloc[calleeside].location^.next) and
  329. (current_procinfo.procdef.funcretloc[calleeside].location^.next^.loc=LOC_REGISTER) then
  330. begin
  331. if (getsupreg(current_procinfo.procdef.funcretloc[calleeside].location^.next^.register)=RS_EDX) then
  332. list.concat(Taicpu.Op_const_reg(A_ADD,S_L,4,NR_ESP))
  333. else
  334. internalerror(2010053002);
  335. end
  336. else
  337. list.concat(Taicpu.Op_reg(A_POP,S_L,NR_EDX));
  338. list.concat(Taicpu.Op_reg(A_POP,S_L,NR_ESI));
  339. list.concat(Taicpu.Op_reg(A_POP,S_L,NR_EDI));
  340. { .... also the segment registers }
  341. list.concat(Taicpu.Op_reg(A_POP,S_W,NR_DS));
  342. list.concat(Taicpu.Op_reg(A_POP,S_W,NR_ES));
  343. list.concat(Taicpu.Op_reg(A_POP,S_W,NR_FS));
  344. list.concat(Taicpu.Op_reg(A_POP,S_W,NR_GS));
  345. { this restores the flags }
  346. if current_procinfo.framepointer<>NR_STACK_POINTER_REG then
  347. list.concat(tai_regalloc.dealloc(NR_STACK_POINTER_REG,nil));
  348. list.concat(Taicpu.Op_none(A_IRET,S_NO));
  349. end
  350. { Routines with the poclearstack flag set use only a ret }
  351. else if (current_procinfo.procdef.proccalloption in clearstack_pocalls) and
  352. (not paramanager.use_fixed_stack) then
  353. begin
  354. { complex return values are removed from stack in C code PM }
  355. { but not on win32 }
  356. { and not for safecall with hidden exceptions, because the result }
  357. { wich contains the exception is passed in EAX }
  358. if current_procinfo.framepointer<>NR_STACK_POINTER_REG then
  359. list.concat(tai_regalloc.dealloc(NR_STACK_POINTER_REG,nil));
  360. if ((target_info.system <> system_i386_win32) or
  361. (target_info.abi=abi_old_win32_gnu)) and
  362. not ((current_procinfo.procdef.proccalloption = pocall_safecall) and
  363. (tf_safecall_exceptions in target_info.flags)) and
  364. paramanager.ret_in_param(current_procinfo.procdef.returndef,
  365. current_procinfo.procdef) then
  366. list.concat(Taicpu.Op_const(A_RET,S_W,sizeof(aint)))
  367. else
  368. list.concat(Taicpu.Op_none(A_RET,S_NO));
  369. end
  370. { ... also routines with parasize=0 }
  371. else if (parasize=0) then
  372. begin
  373. if current_procinfo.framepointer<>NR_STACK_POINTER_REG then
  374. list.concat(tai_regalloc.dealloc(NR_STACK_POINTER_REG,nil));
  375. list.concat(Taicpu.Op_none(A_RET,S_NO))
  376. end
  377. else
  378. begin
  379. { parameters are limited to 65535 bytes because ret allows only imm16 }
  380. if (parasize>65535) then
  381. CGMessage(cg_e_parasize_too_big);
  382. if current_procinfo.framepointer<>NR_STACK_POINTER_REG then
  383. list.concat(tai_regalloc.dealloc(NR_STACK_POINTER_REG,nil));
  384. list.concat(Taicpu.Op_const(A_RET,S_W,parasize));
  385. end;
  386. end;
  387. procedure tcg386.g_copyvaluepara_openarray(list : TAsmList;const ref:treference;const lenloc:tlocation;elesize:tcgint;destreg:tregister);
  388. var
  389. power : longint;
  390. opsize : topsize;
  391. {$ifndef __NOWINPECOFF__}
  392. again,ok : tasmlabel;
  393. {$endif}
  394. begin
  395. { get stack space }
  396. getcpuregister(list,NR_EDI);
  397. a_load_loc_reg(list,OS_INT,lenloc,NR_EDI);
  398. list.concat(Taicpu.op_reg(A_INC,S_L,NR_EDI));
  399. { Now EDI contains (high+1). }
  400. { special case handling for elesize=8, 4 and 2:
  401. set ECX = (high+1) instead of ECX = (high+1)*elesize.
  402. In the case of elesize=4 and 2, this allows us to avoid the SHR later.
  403. In the case of elesize=8, we can later use a SHL ECX, 1 instead of
  404. SHR ECX, 2 which is one byte shorter. }
  405. if (elesize=8) or (elesize=4) or (elesize=2) then
  406. begin
  407. { Now EDI contains (high+1). Copy it to ECX for later use. }
  408. getcpuregister(list,NR_ECX);
  409. list.concat(Taicpu.op_reg_reg(A_MOV,S_L,NR_EDI,NR_ECX));
  410. end;
  411. { EDI := EDI * elesize }
  412. if (elesize<>1) then
  413. begin
  414. if ispowerof2(elesize, power) then
  415. list.concat(Taicpu.op_const_reg(A_SHL,S_L,power,NR_EDI))
  416. else
  417. list.concat(Taicpu.op_const_reg(A_IMUL,S_L,elesize,NR_EDI));
  418. end;
  419. if (elesize<>8) and (elesize<>4) and (elesize<>2) then
  420. begin
  421. { Now EDI contains (high+1)*elesize. Copy it to ECX for later use. }
  422. getcpuregister(list,NR_ECX);
  423. list.concat(Taicpu.op_reg_reg(A_MOV,S_L,NR_EDI,NR_ECX));
  424. end;
  425. {$ifndef __NOWINPECOFF__}
  426. { windows guards only a few pages for stack growing, }
  427. { so we have to access every page first }
  428. if target_info.system=system_i386_win32 then
  429. begin
  430. current_asmdata.getjumplabel(again);
  431. current_asmdata.getjumplabel(ok);
  432. a_label(list,again);
  433. cg.a_reg_alloc(list,NR_DEFAULTFLAGS);
  434. list.concat(Taicpu.op_const_reg(A_CMP,S_L,winstackpagesize,NR_EDI));
  435. a_jmp_cond(list,OC_B,ok);
  436. cg.a_reg_dealloc(list,NR_DEFAULTFLAGS);
  437. list.concat(Taicpu.op_const_reg(A_SUB,S_L,winstackpagesize-4,NR_ESP));
  438. list.concat(Taicpu.op_reg(A_PUSH,S_L,NR_EDI));
  439. list.concat(Taicpu.op_const_reg(A_SUB,S_L,winstackpagesize,NR_EDI));
  440. a_jmp_always(list,again);
  441. a_label(list,ok);
  442. end;
  443. {$endif __NOWINPECOFF__}
  444. { If we were probing pages, EDI=(size mod pagesize) and ESP is decremented
  445. by (size div pagesize)*pagesize, otherwise EDI=size.
  446. Either way, subtracting EDI from ESP will set ESP to desired final value. }
  447. list.concat(Taicpu.op_reg_reg(A_SUB,S_L,NR_EDI,NR_ESP));
  448. { align stack on 4 bytes }
  449. list.concat(Taicpu.op_const_reg(A_AND,S_L,aint($fffffff4),NR_ESP));
  450. { load destination, don't use a_load_reg_reg, that will add a move instruction
  451. that can confuse the reg allocator }
  452. list.concat(Taicpu.Op_reg_reg(A_MOV,S_L,NR_ESP,NR_EDI));
  453. { Allocate ESI and load it with source }
  454. getcpuregister(list,NR_ESI);
  455. a_loadaddr_ref_reg(list,ref,NR_ESI);
  456. { calculate size }
  457. opsize:=S_B;
  458. if elesize=8 then
  459. begin
  460. opsize:=S_L;
  461. { ECX is number of qwords, convert to dwords }
  462. list.concat(Taicpu.op_const_reg(A_SHL,S_L,1,NR_ECX))
  463. end
  464. else if elesize=4 then
  465. begin
  466. opsize:=S_L;
  467. { ECX is already number of dwords, so no need to SHL/SHR }
  468. end
  469. else if elesize=2 then
  470. begin
  471. opsize:=S_W;
  472. { ECX is already number of words, so no need to SHL/SHR }
  473. end
  474. else
  475. if (elesize and 3)=0 then
  476. begin
  477. opsize:=S_L;
  478. { ECX is number of bytes, convert to dwords }
  479. list.concat(Taicpu.op_const_reg(A_SHR,S_L,2,NR_ECX))
  480. end
  481. else
  482. if (elesize and 1)=0 then
  483. begin
  484. opsize:=S_W;
  485. { ECX is number of bytes, convert to words }
  486. list.concat(Taicpu.op_const_reg(A_SHR,S_L,1,NR_ECX))
  487. end;
  488. if ts_cld in current_settings.targetswitches then
  489. list.concat(Taicpu.op_none(A_CLD,S_NO));
  490. list.concat(Taicpu.op_none(A_REP,S_NO));
  491. case opsize of
  492. S_B : list.concat(Taicpu.Op_none(A_MOVSB,S_NO));
  493. S_W : list.concat(Taicpu.Op_none(A_MOVSW,S_NO));
  494. S_L : list.concat(Taicpu.Op_none(A_MOVSD,S_NO));
  495. else
  496. internalerror(2019050901);
  497. end;
  498. ungetcpuregister(list,NR_EDI);
  499. ungetcpuregister(list,NR_ECX);
  500. ungetcpuregister(list,NR_ESI);
  501. { patch the new address, but don't use a_load_reg_reg, that will add a move instruction
  502. that can confuse the reg allocator }
  503. list.concat(Taicpu.Op_reg_reg(A_MOV,S_L,NR_ESP,destreg));
  504. include(current_procinfo.flags,pi_has_stack_allocs);
  505. end;
  506. procedure tcg386.g_releasevaluepara_openarray(list : TAsmList;const l:tlocation);
  507. begin
  508. { Nothing to release }
  509. end;
  510. procedure tcg386.g_maybe_got_init(list: TAsmList);
  511. var
  512. i: longint;
  513. tmpreg: TRegister;
  514. begin
  515. { allocate PIC register }
  516. if (tf_pic_uses_got in target_info.flags) and
  517. (pi_needs_got in current_procinfo.flags) then
  518. begin
  519. if not (target_info.system in [system_i386_darwin,system_i386_iphonesim]) then
  520. begin
  521. { Use ECX as a temp register by default }
  522. if current_procinfo.got = NR_EBX then
  523. tmpreg:=NR_EBX
  524. else
  525. tmpreg:=NR_ECX;
  526. { Allocate registers used for parameters to make sure they
  527. never allocated during this PIC init code }
  528. for i:=0 to current_procinfo.procdef.paras.Count - 1 do
  529. with tparavarsym(current_procinfo.procdef.paras[i]).paraloc[calleeside].Location^ do
  530. if Loc in [LOC_REGISTER, LOC_CREGISTER] then begin
  531. a_reg_alloc(list, register);
  532. { If ECX is used for a parameter, use EBX as temp }
  533. if getsupreg(register) = RS_ECX then
  534. tmpreg:=NR_EBX;
  535. end;
  536. if tmpreg = NR_EBX then
  537. begin
  538. { Mark EBX as used in the proc }
  539. include(rg[R_INTREGISTER].used_in_proc,RS_EBX);
  540. current_module.requires_ebx_pic_helper:=true;
  541. a_call_name_static(list,'fpc_geteipasebx');
  542. end
  543. else
  544. begin
  545. current_module.requires_ecx_pic_helper:=true;
  546. a_call_name_static(list,'fpc_geteipasecx');
  547. end;
  548. list.concat(taicpu.op_sym_ofs_reg(A_ADD,S_L,current_asmdata.RefAsmSymbol('_GLOBAL_OFFSET_TABLE_',AT_DATA),0,tmpreg));
  549. list.concat(taicpu.op_reg_reg(A_MOV,S_L,tmpreg,current_procinfo.got));
  550. { Deallocate parameter registers }
  551. for i:=0 to current_procinfo.procdef.paras.Count - 1 do
  552. with tparavarsym(current_procinfo.procdef.paras[i]).paraloc[calleeside].Location^ do
  553. if Loc in [LOC_REGISTER, LOC_CREGISTER] then
  554. a_reg_dealloc(list, register);
  555. end
  556. else
  557. begin
  558. { call/pop is faster than call/ret/mov on Core Solo and later
  559. according to Apple's benchmarking -- and all Intel Macs
  560. have at least a Core Solo (furthermore, the i386 - Pentium 1
  561. don't have a return stack buffer) }
  562. a_call_name_static(list,current_procinfo.CurrGOTLabel.name);
  563. a_label(list,current_procinfo.CurrGotLabel);
  564. list.concat(taicpu.op_reg(A_POP,S_L,current_procinfo.got))
  565. end;
  566. end;
  567. end;
  568. { ************* 64bit operations ************ }
  569. procedure tcg64f386.get_64bit_ops(op:TOpCG;var op1,op2:TAsmOp);
  570. begin
  571. case op of
  572. OP_ADD :
  573. begin
  574. op1:=A_ADD;
  575. op2:=A_ADC;
  576. end;
  577. OP_SUB :
  578. begin
  579. op1:=A_SUB;
  580. op2:=A_SBB;
  581. end;
  582. OP_XOR :
  583. begin
  584. op1:=A_XOR;
  585. op2:=A_XOR;
  586. end;
  587. OP_OR :
  588. begin
  589. op1:=A_OR;
  590. op2:=A_OR;
  591. end;
  592. OP_AND :
  593. begin
  594. op1:=A_AND;
  595. op2:=A_AND;
  596. end;
  597. else
  598. internalerror(2002032408);
  599. end;
  600. end;
  601. procedure tcg64f386.a_op64_ref_reg(list : TAsmList;op:TOpCG;size : tcgsize;const ref : treference;reg : tregister64);
  602. var
  603. op1,op2 : TAsmOp;
  604. tempref : treference;
  605. begin
  606. if not(op in [OP_NEG,OP_NOT]) then
  607. begin
  608. get_64bit_ops(op,op1,op2);
  609. tempref:=ref;
  610. tcgx86(cg).make_simple_ref(list,tempref);
  611. if op in [OP_ADD,OP_SUB] then
  612. cg.a_reg_alloc(list,NR_DEFAULTFLAGS);
  613. list.concat(taicpu.op_ref_reg(op1,S_L,tempref,reg.reglo));
  614. inc(tempref.offset,4);
  615. list.concat(taicpu.op_ref_reg(op2,S_L,tempref,reg.reghi));
  616. if op in [OP_ADD,OP_SUB] then
  617. cg.a_reg_dealloc(list,NR_DEFAULTFLAGS);
  618. end
  619. else
  620. begin
  621. a_load64_ref_reg(list,ref,reg);
  622. a_op64_reg_reg(list,op,size,reg,reg);
  623. end;
  624. end;
  625. procedure tcg64f386.a_op64_reg_ref(list : TAsmList;op:TOpCG;size : tcgsize;reg : tregister64; const ref: treference);
  626. var
  627. op1,op2 : TAsmOp;
  628. tempref : treference;
  629. tmpreg: TRegister;
  630. l1, l2: TAsmLabel;
  631. begin
  632. case op of
  633. OP_NOT,OP_NEG:
  634. inherited;
  635. OP_SHR,OP_SHL,OP_SAR:
  636. begin
  637. { load right operators in a register }
  638. cg.getcpuregister(list,NR_ECX);
  639. cg.a_load_reg_reg(list,OS_32,OS_32,reg.reglo,NR_ECX);
  640. tempref:=ref;
  641. tcgx86(cg).make_simple_ref(list,tempref);
  642. { the damned shift instructions work only til a count of 32 }
  643. { so we've to do some tricks here }
  644. current_asmdata.getjumplabel(l1);
  645. current_asmdata.getjumplabel(l2);
  646. cg.a_reg_alloc(list,NR_DEFAULTFLAGS);
  647. list.Concat(taicpu.op_const_reg(A_TEST,S_B,32,NR_CL));
  648. cg.a_jmp_flags(list,F_E,l1);
  649. cg.a_reg_dealloc(list,NR_DEFAULTFLAGS);
  650. tmpreg:=cg.getintregister(list,OS_32);
  651. case op of
  652. OP_SHL:
  653. begin
  654. cg.a_load_ref_reg(list,OS_32,OS_32,tempref,tmpreg);
  655. list.Concat(taicpu.op_reg_reg(A_SHL,S_L,NR_CL,tmpreg));
  656. inc(tempref.offset,4);
  657. cg.a_load_reg_ref(list,OS_32,OS_32,tmpreg,tempref);
  658. dec(tempref.offset,4);
  659. cg.a_load_const_ref(list,OS_32,0,tempref);
  660. cg.a_jmp_always(list,l2);
  661. cg.a_label(list,l1);
  662. cg.a_load_ref_reg(list,OS_32,OS_32,tempref,tmpreg);
  663. inc(tempref.offset,4);
  664. list.Concat(taicpu.op_reg_reg_ref(A_SHLD,S_L,NR_CL,tmpreg,tempref));
  665. dec(tempref.offset,4);
  666. if cs_opt_size in current_settings.optimizerswitches then
  667. list.concat(taicpu.op_reg_ref(A_SHL,S_L,NR_CL,tempref))
  668. else
  669. begin
  670. list.concat(taicpu.op_reg_reg(A_SHL,S_L,NR_CL,tmpreg));
  671. cg.a_load_reg_ref(list,OS_32,OS_32,tmpreg,tempref);
  672. end;
  673. end;
  674. OP_SHR:
  675. begin
  676. inc(tempref.offset,4);
  677. cg.a_load_ref_reg(list,OS_32,OS_32,tempref,tmpreg);
  678. list.Concat(taicpu.op_reg_reg(A_SHR,S_L,NR_CL,tmpreg));
  679. dec(tempref.offset,4);
  680. cg.a_load_reg_ref(list,OS_32,OS_32,tmpreg,tempref);
  681. inc(tempref.offset,4);
  682. cg.a_load_const_ref(list,OS_32,0,tempref);
  683. cg.a_jmp_always(list,l2);
  684. cg.a_label(list,l1);
  685. cg.a_load_ref_reg(list,OS_32,OS_32,tempref,tmpreg);
  686. dec(tempref.offset,4);
  687. list.Concat(taicpu.op_reg_reg_ref(A_SHRD,S_L,NR_CL,tmpreg,tempref));
  688. inc(tempref.offset,4);
  689. if cs_opt_size in current_settings.optimizerswitches then
  690. list.concat(taicpu.op_reg_ref(A_SHR,S_L,NR_CL,tempref))
  691. else
  692. begin
  693. list.concat(taicpu.op_reg_reg(A_SHR,S_L,NR_CL,tmpreg));
  694. cg.a_load_reg_ref(list,OS_32,OS_32,tmpreg,tempref);
  695. end;
  696. end;
  697. OP_SAR:
  698. begin
  699. inc(tempref.offset,4);
  700. cg.a_load_ref_reg(list,OS_32,OS_32,tempref,tmpreg);
  701. list.Concat(taicpu.op_reg_reg(A_SAR,S_L,NR_CL,tmpreg));
  702. dec(tempref.offset,4);
  703. cg.a_load_reg_ref(list,OS_32,OS_32,tmpreg,tempref);
  704. inc(tempref.offset,4);
  705. list.Concat(taicpu.op_const_reg(A_SAR,S_L,31,tmpreg));
  706. cg.a_load_reg_ref(list,OS_32,OS_32,tmpreg,tempref);
  707. cg.a_jmp_always(list,l2);
  708. cg.a_label(list,l1);
  709. cg.a_load_ref_reg(list,OS_32,OS_32,tempref,tmpreg);
  710. dec(tempref.offset,4);
  711. list.Concat(taicpu.op_reg_reg_ref(A_SHRD,S_L,NR_CL,tmpreg,tempref));
  712. inc(tempref.offset,4);
  713. if cs_opt_size in current_settings.optimizerswitches then
  714. list.concat(taicpu.op_reg_ref(A_SAR,S_L,NR_CL,tempref))
  715. else
  716. begin
  717. list.concat(taicpu.op_reg_reg(A_SAR,S_L,NR_CL,tmpreg));
  718. cg.a_load_reg_ref(list,OS_32,OS_32,tmpreg,tempref);
  719. end;
  720. end;
  721. else
  722. internalerror(2017041801);
  723. end;
  724. cg.a_label(list,l2);
  725. cg.ungetcpuregister(list,NR_ECX);
  726. exit;
  727. end;
  728. else
  729. begin
  730. get_64bit_ops(op,op1,op2);
  731. tempref:=ref;
  732. tcgx86(cg).make_simple_ref(list,tempref);
  733. if op in [OP_ADD,OP_SUB] then
  734. cg.a_reg_alloc(list,NR_DEFAULTFLAGS);
  735. list.concat(taicpu.op_reg_ref(op1,S_L,reg.reglo,tempref));
  736. inc(tempref.offset,4);
  737. list.concat(taicpu.op_reg_ref(op2,S_L,reg.reghi,tempref));
  738. if op in [OP_ADD,OP_SUB] then
  739. cg.a_reg_dealloc(list,NR_DEFAULTFLAGS);
  740. end;
  741. end;
  742. end;
  743. procedure tcg64f386.a_op64_reg_reg(list : TAsmList;op:TOpCG;size : tcgsize;regsrc,regdst : tregister64);
  744. var
  745. op1,op2 : TAsmOp;
  746. l1, l2: TAsmLabel;
  747. begin
  748. case op of
  749. OP_NEG :
  750. begin
  751. if (regsrc.reglo<>regdst.reglo) then
  752. a_load64_reg_reg(list,regsrc,regdst);
  753. list.concat(taicpu.op_reg(A_NOT,S_L,regdst.reghi));
  754. cg.a_reg_alloc(list,NR_DEFAULTFLAGS);
  755. list.concat(taicpu.op_reg(A_NEG,S_L,regdst.reglo));
  756. list.concat(taicpu.op_const_reg(A_SBB,S_L,-1,regdst.reghi));
  757. cg.a_reg_dealloc(list,NR_DEFAULTFLAGS);
  758. exit;
  759. end;
  760. OP_NOT :
  761. begin
  762. if (regsrc.reglo<>regdst.reglo) then
  763. a_load64_reg_reg(list,regsrc,regdst);
  764. list.concat(taicpu.op_reg(A_NOT,S_L,regdst.reghi));
  765. list.concat(taicpu.op_reg(A_NOT,S_L,regdst.reglo));
  766. exit;
  767. end;
  768. OP_SHR,OP_SHL,OP_SAR:
  769. begin
  770. { load right operators in a register }
  771. cg.getcpuregister(list,NR_ECX);
  772. cg.a_load_reg_reg(list,OS_32,OS_32,regsrc.reglo,NR_ECX);
  773. { the damned shift instructions work only til a count of 32 }
  774. { so we've to do some tricks here }
  775. current_asmdata.getjumplabel(l1);
  776. current_asmdata.getjumplabel(l2);
  777. cg.a_reg_alloc(list,NR_DEFAULTFLAGS);
  778. list.Concat(taicpu.op_const_reg(A_TEST,S_B,32,NR_CL));
  779. cg.a_jmp_flags(list,F_E,l1);
  780. cg.a_reg_dealloc(list,NR_DEFAULTFLAGS);
  781. case op of
  782. OP_SHL:
  783. begin
  784. list.Concat(taicpu.op_reg_reg(A_SHL,S_L,NR_CL,regdst.reglo));
  785. cg.a_load_reg_reg(list,OS_32,OS_32,regdst.reglo,regdst.reghi);
  786. list.Concat(taicpu.op_reg_reg(A_XOR,S_L,regdst.reglo,regdst.reglo));
  787. cg.a_jmp_always(list,l2);
  788. cg.a_label(list,l1);
  789. list.Concat(taicpu.op_reg_reg_reg(A_SHLD,S_L,NR_CL,regdst.reglo,regdst.reghi));
  790. list.Concat(taicpu.op_reg_reg(A_SHL,S_L,NR_CL,regdst.reglo));
  791. end;
  792. OP_SHR:
  793. begin
  794. list.Concat(taicpu.op_reg_reg(A_SHR,S_L,NR_CL,regdst.reghi));
  795. cg.a_load_reg_reg(list,OS_32,OS_32,regdst.reghi,regdst.reglo);
  796. list.Concat(taicpu.op_reg_reg(A_XOR,S_L,regdst.reghi,regdst.reghi));
  797. cg.a_jmp_always(list,l2);
  798. cg.a_label(list,l1);
  799. list.Concat(taicpu.op_reg_reg_reg(A_SHRD,S_L,NR_CL,regdst.reghi,regdst.reglo));
  800. list.Concat(taicpu.op_reg_reg(A_SHR,S_L,NR_CL,regdst.reghi));
  801. end;
  802. OP_SAR:
  803. begin
  804. cg.a_load_reg_reg(list,OS_32,OS_32,regdst.reghi,regdst.reglo);
  805. list.Concat(taicpu.op_reg_reg(A_SAR,S_L,NR_CL,regdst.reglo));
  806. list.Concat(taicpu.op_const_reg(A_SAR,S_L,31,regdst.reghi));
  807. cg.a_jmp_always(list,l2);
  808. cg.a_label(list,l1);
  809. list.Concat(taicpu.op_reg_reg_reg(A_SHRD,S_L,NR_CL,regdst.reghi,regdst.reglo));
  810. list.Concat(taicpu.op_reg_reg(A_SAR,S_L,NR_CL,regdst.reghi));
  811. end;
  812. else
  813. internalerror(2017041802);
  814. end;
  815. cg.a_label(list,l2);
  816. cg.ungetcpuregister(list,NR_ECX);
  817. exit;
  818. end;
  819. else
  820. ;
  821. end;
  822. get_64bit_ops(op,op1,op2);
  823. if op in [OP_ADD,OP_SUB] then
  824. cg.a_reg_alloc(list,NR_DEFAULTFLAGS);
  825. list.concat(taicpu.op_reg_reg(op1,S_L,regsrc.reglo,regdst.reglo));
  826. list.concat(taicpu.op_reg_reg(op2,S_L,regsrc.reghi,regdst.reghi));
  827. if op in [OP_ADD,OP_SUB] then
  828. cg.a_reg_dealloc(list,NR_DEFAULTFLAGS);
  829. end;
  830. procedure tcg64f386.a_op64_const_reg(list : TAsmList;op:TOpCG;size : tcgsize;value : int64;reg : tregister64);
  831. var
  832. op1,op2 : TAsmOp;
  833. begin
  834. case op of
  835. OP_AND,OP_OR,OP_XOR:
  836. begin
  837. cg.a_op_const_reg(list,op,OS_32,tcgint(lo(value)),reg.reglo);
  838. cg.a_op_const_reg(list,op,OS_32,tcgint(hi(value)),reg.reghi);
  839. end;
  840. OP_ADD, OP_SUB:
  841. begin
  842. // can't use a_op_const_ref because this may use dec/inc
  843. get_64bit_ops(op,op1,op2);
  844. cg.a_reg_alloc(list,NR_DEFAULTFLAGS);
  845. list.concat(taicpu.op_const_reg(op1,S_L,aint(lo(value)),reg.reglo));
  846. list.concat(taicpu.op_const_reg(op2,S_L,aint(hi(value)),reg.reghi));
  847. cg.a_reg_dealloc(list,NR_DEFAULTFLAGS);
  848. end;
  849. OP_SHR,OP_SHL,OP_SAR:
  850. begin
  851. value:=value and 63;
  852. if value<>0 then
  853. begin
  854. if (value=1) and (op=OP_SHL) and
  855. (current_settings.optimizecputype<=cpu_486) and
  856. not (cs_opt_size in current_settings.optimizerswitches) then
  857. begin
  858. cg.a_reg_alloc(list,NR_DEFAULTFLAGS);
  859. list.concat(taicpu.op_reg_reg(A_ADD,S_L,reg.reglo,reg.reglo));
  860. list.concat(taicpu.op_reg_reg(A_ADC,S_L,reg.reghi,reg.reghi));
  861. cg.a_reg_dealloc(list,NR_DEFAULTFLAGS);
  862. end
  863. else if (value=1) and (cs_opt_size in current_settings.optimizerswitches) then
  864. case op of
  865. OP_SHR:
  866. begin
  867. cg.a_reg_alloc(list,NR_DEFAULTFLAGS);
  868. list.concat(taicpu.op_const_reg(A_SHR,S_L,value,reg.reghi));
  869. list.concat(taicpu.op_const_reg(A_RCR,S_L,value,reg.reglo));
  870. cg.a_reg_dealloc(list,NR_DEFAULTFLAGS);
  871. end;
  872. OP_SHL:
  873. begin
  874. cg.a_reg_alloc(list,NR_DEFAULTFLAGS);
  875. list.concat(taicpu.op_const_reg(A_SHL,S_L,value,reg.reglo));
  876. list.concat(taicpu.op_const_reg(A_RCL,S_L,value,reg.reghi));
  877. cg.a_reg_dealloc(list,NR_DEFAULTFLAGS);
  878. end;
  879. OP_SAR:
  880. begin
  881. cg.a_reg_alloc(list,NR_DEFAULTFLAGS);
  882. list.concat(taicpu.op_const_reg(A_SAR,S_L,value,reg.reghi));
  883. list.concat(taicpu.op_const_reg(A_RCR,S_L,value,reg.reglo));
  884. cg.a_reg_dealloc(list,NR_DEFAULTFLAGS);
  885. end;
  886. else
  887. internalerror(2019050902);
  888. end
  889. else if value>31 then
  890. case op of
  891. OP_SAR:
  892. begin
  893. cg.a_load_reg_reg(list,OS_32,OS_32,reg.reghi,reg.reglo);
  894. list.concat(taicpu.op_const_reg(A_SAR,S_L,31,reg.reghi));
  895. if (value and 31)<>0 then
  896. list.concat(taicpu.op_const_reg(A_SAR,S_L,value and 31,reg.reglo));
  897. end;
  898. OP_SHR:
  899. begin
  900. cg.a_load_reg_reg(list,OS_32,OS_32,reg.reghi,reg.reglo);
  901. list.concat(taicpu.op_reg_reg(A_XOR,S_L,reg.reghi,reg.reghi));
  902. if (value and 31)<>0 then
  903. list.concat(taicpu.op_const_reg(A_SHR,S_L,value and 31,reg.reglo));
  904. end;
  905. OP_SHL:
  906. begin
  907. cg.a_load_reg_reg(list,OS_32,OS_32,reg.reglo,reg.reghi);
  908. list.concat(taicpu.op_reg_reg(A_XOR,S_L,reg.reglo,reg.reglo));
  909. if (value and 31)<>0 then
  910. list.concat(taicpu.op_const_reg(A_SHL,S_L,value and 31,reg.reghi));
  911. end;
  912. else
  913. internalerror(2017041201);
  914. end
  915. else
  916. case op of
  917. OP_SAR:
  918. begin
  919. list.concat(taicpu.op_const_reg_reg(A_SHRD,S_L,value,reg.reghi,reg.reglo));
  920. list.concat(taicpu.op_const_reg(A_SAR,S_L,value,reg.reghi));
  921. end;
  922. OP_SHR:
  923. begin
  924. list.concat(taicpu.op_const_reg_reg(A_SHRD,S_L,value,reg.reghi,reg.reglo));
  925. list.concat(taicpu.op_const_reg(A_SHR,S_L,value,reg.reghi));
  926. end;
  927. OP_SHL:
  928. begin
  929. list.concat(taicpu.op_const_reg_reg(A_SHLD,S_L,value,reg.reglo,reg.reghi));
  930. list.concat(taicpu.op_const_reg(A_SHL,S_L,value,reg.reglo));
  931. end;
  932. else
  933. internalerror(2017041202);
  934. end;
  935. end;
  936. end;
  937. else
  938. internalerror(200204021);
  939. end;
  940. end;
  941. procedure tcg64f386.a_op64_const_ref(list : TAsmList;op:TOpCG;size : tcgsize;value : int64;const ref : treference);
  942. var
  943. op1,op2 : TAsmOp;
  944. tempref : treference;
  945. tmpreg: TRegister;
  946. begin
  947. tempref:=ref;
  948. tcgx86(cg).make_simple_ref(list,tempref);
  949. case op of
  950. OP_AND,OP_OR,OP_XOR:
  951. begin
  952. cg.a_op_const_ref(list,op,OS_32,aint(lo(value)),tempref);
  953. inc(tempref.offset,4);
  954. cg.a_op_const_ref(list,op,OS_32,aint(hi(value)),tempref);
  955. end;
  956. OP_ADD, OP_SUB:
  957. begin
  958. get_64bit_ops(op,op1,op2);
  959. // can't use a_op_const_ref because this may use dec/inc
  960. cg.a_reg_alloc(list,NR_DEFAULTFLAGS);
  961. list.concat(taicpu.op_const_ref(op1,S_L,aint(lo(value)),tempref));
  962. inc(tempref.offset,4);
  963. list.concat(taicpu.op_const_ref(op2,S_L,aint(hi(value)),tempref));
  964. cg.a_reg_dealloc(list,NR_DEFAULTFLAGS);
  965. end;
  966. OP_SHR,OP_SHL,OP_SAR:
  967. begin
  968. value:=value and 63;
  969. if value<>0 then
  970. begin
  971. if value=1 then
  972. case op of
  973. OP_SHR:
  974. begin
  975. cg.a_reg_alloc(list,NR_DEFAULTFLAGS);
  976. inc(tempref.offset,4);
  977. list.concat(taicpu.op_const_ref(A_SHR,S_L,value,tempref));
  978. dec(tempref.offset,4);
  979. list.concat(taicpu.op_const_ref(A_RCR,S_L,value,tempref));
  980. cg.a_reg_dealloc(list,NR_DEFAULTFLAGS);
  981. end;
  982. OP_SHL:
  983. begin
  984. cg.a_reg_alloc(list,NR_DEFAULTFLAGS);
  985. list.concat(taicpu.op_const_ref(A_SHL,S_L,value,tempref));
  986. inc(tempref.offset,4);
  987. list.concat(taicpu.op_const_ref(A_RCL,S_L,value,tempref));
  988. cg.a_reg_dealloc(list,NR_DEFAULTFLAGS);
  989. end;
  990. OP_SAR:
  991. begin
  992. cg.a_reg_alloc(list,NR_DEFAULTFLAGS);
  993. inc(tempref.offset,4);
  994. list.concat(taicpu.op_const_ref(A_SAR,S_L,value,tempref));
  995. dec(tempref.offset,4);
  996. list.concat(taicpu.op_const_ref(A_RCR,S_L,value,tempref));
  997. cg.a_reg_dealloc(list,NR_DEFAULTFLAGS);
  998. end;
  999. else
  1000. internalerror(2019050903);
  1001. end
  1002. else if value>31 then
  1003. case op of
  1004. OP_SHR,OP_SAR:
  1005. begin
  1006. tmpreg:=cg.getintregister(list,OS_32);
  1007. inc(tempref.offset,4);
  1008. cg.a_load_ref_reg(list,OS_32,OS_32,tempref,tmpreg);
  1009. if (value and 31)<>0 then
  1010. if op=OP_SHR then
  1011. list.concat(taicpu.op_const_reg(A_SHR,S_L,value and 31,tmpreg))
  1012. else
  1013. list.concat(taicpu.op_const_reg(A_SAR,S_L,value and 31,tmpreg));
  1014. dec(tempref.offset,4);
  1015. cg.a_load_reg_ref(list,OS_32,OS_32,tmpreg,tempref);
  1016. inc(tempref.offset,4);
  1017. if op=OP_SHR then
  1018. cg.a_load_const_ref(list,OS_32,0,tempref)
  1019. else
  1020. begin
  1021. list.concat(taicpu.op_const_reg(A_SAR,S_L,31,tmpreg));
  1022. cg.a_load_reg_ref(list,OS_32,OS_32,tmpreg,tempref);
  1023. end;
  1024. end;
  1025. OP_SHL:
  1026. begin
  1027. tmpreg:=cg.getintregister(list,OS_32);
  1028. cg.a_load_ref_reg(list,OS_32,OS_32,tempref,tmpreg);
  1029. if (value and 31)<>0 then
  1030. list.concat(taicpu.op_const_reg(A_SHL,S_L,value and 31,tmpreg));
  1031. inc(tempref.offset,4);
  1032. cg.a_load_reg_ref(list,OS_32,OS_32,tmpreg,tempref);
  1033. dec(tempref.offset,4);
  1034. cg.a_load_const_ref(list,OS_32,0,tempref);
  1035. end;
  1036. else
  1037. internalerror(2017041803);
  1038. end
  1039. else
  1040. case op of
  1041. OP_SHR,OP_SAR:
  1042. begin
  1043. tmpreg:=cg.getintregister(list,OS_32);
  1044. inc(tempref.offset,4);
  1045. cg.a_load_ref_reg(list,OS_32,OS_32,tempref,tmpreg);
  1046. dec(tempref.offset,4);
  1047. list.concat(taicpu.op_const_reg_ref(A_SHRD,S_L,value,tmpreg,tempref));
  1048. inc(tempref.offset,4);
  1049. if cs_opt_size in current_settings.optimizerswitches then
  1050. begin
  1051. if op=OP_SHR then
  1052. list.concat(taicpu.op_const_ref(A_SHR,S_L,value,tempref))
  1053. else
  1054. list.concat(taicpu.op_const_ref(A_SAR,S_L,value,tempref));
  1055. end
  1056. else
  1057. begin
  1058. if op=OP_SHR then
  1059. list.concat(taicpu.op_const_reg(A_SHR,S_L,value,tmpreg))
  1060. else
  1061. list.concat(taicpu.op_const_reg(A_SAR,S_L,value,tmpreg));
  1062. cg.a_load_reg_ref(list,OS_32,OS_32,tmpreg,tempref);
  1063. end;
  1064. end;
  1065. OP_SHL:
  1066. begin
  1067. tmpreg:=cg.getintregister(list,OS_32);
  1068. cg.a_load_ref_reg(list,OS_32,OS_32,tempref,tmpreg);
  1069. inc(tempref.offset,4);
  1070. list.concat(taicpu.op_const_reg_ref(A_SHLD,S_L,value,tmpreg,tempref));
  1071. dec(tempref.offset,4);
  1072. if cs_opt_size in current_settings.optimizerswitches then
  1073. list.concat(taicpu.op_const_ref(A_SHL,S_L,value,tempref))
  1074. else
  1075. begin
  1076. list.concat(taicpu.op_const_reg(A_SHL,S_L,value,tmpreg));
  1077. cg.a_load_reg_ref(list,OS_32,OS_32,tmpreg,tempref);
  1078. end;
  1079. end;
  1080. else
  1081. internalerror(2017041203);
  1082. end;
  1083. end;
  1084. end;
  1085. else
  1086. internalerror(200204022);
  1087. end;
  1088. end;
  1089. procedure tcg64f386.a_op64_ref(list: TAsmList; op: TOpCG; size: tcgsize; const ref: treference);
  1090. var
  1091. tempref : treference;
  1092. begin
  1093. case op of
  1094. OP_NOT:
  1095. begin
  1096. tempref:=ref;
  1097. tcgx86(cg).make_simple_ref(list,tempref);
  1098. list.concat(taicpu.op_ref(A_NOT,S_L,tempref));
  1099. inc(tempref.offset,4);
  1100. list.concat(taicpu.op_ref(A_NOT,S_L,tempref));
  1101. end;
  1102. OP_NEG:
  1103. begin
  1104. tempref:=ref;
  1105. tcgx86(cg).make_simple_ref(list,tempref);
  1106. inc(tempref.offset,4);
  1107. list.concat(taicpu.op_ref(A_NOT,S_L,tempref));
  1108. cg.a_reg_alloc(list,NR_DEFAULTFLAGS);
  1109. dec(tempref.offset,4);
  1110. list.concat(taicpu.op_ref(A_NEG,S_L,tempref));
  1111. inc(tempref.offset,4);
  1112. list.concat(taicpu.op_const_ref(A_SBB,S_L,-1,tempref));
  1113. cg.a_reg_dealloc(list,NR_DEFAULTFLAGS);
  1114. end;
  1115. else
  1116. internalerror(2020050708);
  1117. end;
  1118. end;
  1119. procedure create_codegen;
  1120. begin
  1121. cg := tcg386.create;
  1122. cg64 := tcg64f386.create;
  1123. end;
  1124. end.