cgcpu.pas 49 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156
  1. {
  2. Copyright (c) 1998-2002 by Florian Klaempfl
  3. This unit implements the code generator for the i386
  4. This program is free software; you can redistribute it and/or modify
  5. it under the terms of the GNU General Public License as published by
  6. the Free Software Foundation; either version 2 of the License, or
  7. (at your option) any later version.
  8. This program is distributed in the hope that it will be useful,
  9. but WITHOUT ANY WARRANTY; without even the implied warranty of
  10. MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  11. GNU General Public License for more details.
  12. You should have received a copy of the GNU General Public License
  13. along with this program; if not, write to the Free Software
  14. Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
  15. ****************************************************************************
  16. }
  17. unit cgcpu;
  18. {$i fpcdefs.inc}
  19. interface
  20. uses
  21. globtype,
  22. cgbase,cgobj,cg64f32,cgx86,
  23. aasmbase,aasmtai,aasmdata,aasmcpu,
  24. cpubase,parabase,cgutils,
  25. symconst,symdef,symsym
  26. ;
  27. type
  28. tcg386 = class(tcgx86)
  29. procedure init_register_allocators;override;
  30. { passing parameter using push instead of mov }
  31. procedure a_load_reg_cgpara(list : TAsmList;size : tcgsize;r : tregister;const cgpara : tcgpara);override;
  32. procedure a_load_const_cgpara(list : TAsmList;size : tcgsize;a : tcgint;const cgpara : tcgpara);override;
  33. procedure a_load_ref_cgpara(list : TAsmList;size : tcgsize;const r : treference;const cgpara : tcgpara);override;
  34. procedure a_loadaddr_ref_cgpara(list : TAsmList;const r : treference;const cgpara : tcgpara);override;
  35. procedure g_proc_exit(list : TAsmList;parasize:longint;nostackframe:boolean);override;
  36. procedure g_copyvaluepara_openarray(list : TAsmList;const ref:treference;const lenloc:tlocation;elesize:tcgint;destreg:tregister);
  37. procedure g_releasevaluepara_openarray(list : TAsmList;const l:tlocation);
  38. procedure g_maybe_got_init(list: TAsmList); override;
  39. end;
  40. tcg64f386 = class(tcg64f32)
  41. procedure a_op64_ref_reg(list : TAsmList;op:TOpCG;size : tcgsize;const ref : treference;reg : tregister64);override;
  42. procedure a_op64_reg_ref(list : TAsmList;op:TOpCG;size : tcgsize;reg : tregister64; const ref: treference);override;
  43. procedure a_op64_reg_reg(list : TAsmList;op:TOpCG;size : tcgsize;regsrc,regdst : tregister64);override;
  44. procedure a_op64_const_reg(list : TAsmList;op:TOpCG;size : tcgsize;value : int64;reg : tregister64);override;
  45. procedure a_op64_const_ref(list : TAsmList;op:TOpCG;size : tcgsize;value : int64;const ref : treference);override;
  46. private
  47. procedure get_64bit_ops(op:TOpCG;var op1,op2:TAsmOp);
  48. end;
  49. procedure create_codegen;
  50. implementation
  51. uses
  52. globals,verbose,systems,cutils,
  53. paramgr,procinfo,fmodule,
  54. rgcpu,rgx86,cpuinfo;
  55. function use_push(const cgpara:tcgpara):boolean;
  56. begin
  57. result:=(not paramanager.use_fixed_stack) and
  58. assigned(cgpara.location) and
  59. (cgpara.location^.loc=LOC_REFERENCE) and
  60. (cgpara.location^.reference.index=NR_STACK_POINTER_REG);
  61. end;
  62. procedure tcg386.init_register_allocators;
  63. begin
  64. inherited init_register_allocators;
  65. if (cs_useebp in current_settings.optimizerswitches) and assigned(current_procinfo) and (current_procinfo.framepointer<>NR_EBP) then
  66. rg[R_INTREGISTER]:=trgcpu.create(R_INTREGISTER,R_SUBWHOLE,[RS_EAX,RS_EDX,RS_ECX,RS_EBX,RS_ESI,RS_EDI,RS_EBP],first_int_imreg,[])
  67. else
  68. rg[R_INTREGISTER]:=trgcpu.create(R_INTREGISTER,R_SUBWHOLE,[RS_EAX,RS_EDX,RS_ECX,RS_EBX,RS_ESI,RS_EDI],first_int_imreg,[RS_EBP]);
  69. rg[R_MMXREGISTER]:=trgcpu.create(R_MMXREGISTER,R_SUBNONE,[RS_XMM0,RS_XMM1,RS_XMM2,RS_XMM3,RS_XMM4,RS_XMM5,RS_XMM6,RS_XMM7],first_mm_imreg,[]);
  70. rg[R_MMREGISTER]:=trgcpu.create(R_MMREGISTER,R_SUBWHOLE,[RS_XMM0,RS_XMM1,RS_XMM2,RS_XMM3,RS_XMM4,RS_XMM5,RS_XMM6,RS_XMM7],first_mm_imreg,[]);
  71. rgfpu:=Trgx86fpu.create;
  72. end;
  73. procedure tcg386.a_load_reg_cgpara(list : TAsmList;size : tcgsize;r : tregister;const cgpara : tcgpara);
  74. var
  75. pushsize : tcgsize;
  76. begin
  77. check_register_size(size,r);
  78. if use_push(cgpara) then
  79. begin
  80. cgpara.check_simple_location;
  81. if tcgsize2size[cgpara.location^.size]>cgpara.alignment then
  82. pushsize:=cgpara.location^.size
  83. else
  84. pushsize:=int_cgsize(cgpara.alignment);
  85. list.concat(taicpu.op_reg(A_PUSH,tcgsize2opsize[pushsize],makeregsize(list,r,pushsize)));
  86. end
  87. else
  88. inherited a_load_reg_cgpara(list,size,r,cgpara);
  89. end;
  90. procedure tcg386.a_load_const_cgpara(list : TAsmList;size : tcgsize;a : tcgint;const cgpara : tcgpara);
  91. var
  92. pushsize : tcgsize;
  93. begin
  94. if use_push(cgpara) then
  95. begin
  96. cgpara.check_simple_location;
  97. if tcgsize2size[cgpara.location^.size]>cgpara.alignment then
  98. pushsize:=cgpara.location^.size
  99. else
  100. pushsize:=int_cgsize(cgpara.alignment);
  101. list.concat(taicpu.op_const(A_PUSH,tcgsize2opsize[pushsize],a));
  102. end
  103. else
  104. inherited a_load_const_cgpara(list,size,a,cgpara);
  105. end;
  106. procedure tcg386.a_load_ref_cgpara(list : TAsmList;size : tcgsize;const r : treference;const cgpara : tcgpara);
  107. procedure pushdata(paraloc:pcgparalocation;ofs:tcgint);
  108. var
  109. pushsize : tcgsize;
  110. opsize : topsize;
  111. tmpreg : tregister;
  112. href : treference;
  113. begin
  114. if not assigned(paraloc) then
  115. exit;
  116. if (paraloc^.loc<>LOC_REFERENCE) or
  117. (paraloc^.reference.index<>NR_STACK_POINTER_REG) or
  118. (tcgsize2size[paraloc^.size]>sizeof(aint)) then
  119. internalerror(200501162);
  120. { Pushes are needed in reverse order, add the size of the
  121. current location to the offset where to load from. This
  122. prevents wrong calculations for the last location when
  123. the size is not a power of 2 }
  124. if assigned(paraloc^.next) then
  125. pushdata(paraloc^.next,ofs+tcgsize2size[paraloc^.size]);
  126. { Push the data starting at ofs }
  127. href:=r;
  128. inc(href.offset,ofs);
  129. if tcgsize2size[paraloc^.size]>cgpara.alignment then
  130. pushsize:=paraloc^.size
  131. else
  132. pushsize:=int_cgsize(cgpara.alignment);
  133. opsize:=TCgsize2opsize[pushsize];
  134. { for go32v2 we obtain OS_F32,
  135. but pushs is not valid, we need pushl }
  136. if opsize=S_FS then
  137. opsize:=S_L;
  138. if tcgsize2size[paraloc^.size]<cgpara.alignment then
  139. begin
  140. tmpreg:=getintregister(list,pushsize);
  141. a_load_ref_reg(list,paraloc^.size,pushsize,href,tmpreg);
  142. list.concat(taicpu.op_reg(A_PUSH,opsize,tmpreg));
  143. end
  144. else
  145. begin
  146. make_simple_ref(list,href);
  147. list.concat(taicpu.op_ref(A_PUSH,opsize,href));
  148. end;
  149. end;
  150. var
  151. len : tcgint;
  152. href : treference;
  153. begin
  154. { cgpara.size=OS_NO requires a copy on the stack }
  155. if use_push(cgpara) then
  156. begin
  157. { Record copy? }
  158. if (cgpara.size=OS_NO) or (size=OS_NO) then
  159. begin
  160. cgpara.check_simple_location;
  161. len:=align(cgpara.intsize,cgpara.alignment);
  162. g_stackpointer_alloc(list,len);
  163. reference_reset_base(href,NR_STACK_POINTER_REG,0,ctempposinvalid,4,[]);
  164. g_concatcopy(list,r,href,len);
  165. end
  166. else
  167. begin
  168. if tcgsize2size[cgpara.size]<>tcgsize2size[size] then
  169. internalerror(200501161);
  170. if (cgpara.size=OS_F64) then
  171. begin
  172. href:=r;
  173. make_simple_ref(list,href);
  174. inc(href.offset,4);
  175. list.concat(taicpu.op_ref(A_PUSH,S_L,href));
  176. dec(href.offset,4);
  177. list.concat(taicpu.op_ref(A_PUSH,S_L,href));
  178. end
  179. else
  180. { We need to push the data in reverse order,
  181. therefor we use a recursive algorithm }
  182. pushdata(cgpara.location,0);
  183. end
  184. end
  185. else
  186. begin
  187. href:=r;
  188. make_simple_ref(list,href);
  189. inherited a_load_ref_cgpara(list,size,href,cgpara);
  190. end;
  191. end;
  192. procedure tcg386.a_loadaddr_ref_cgpara(list : TAsmList;const r : treference;const cgpara : tcgpara);
  193. var
  194. tmpreg : tregister;
  195. opsize : topsize;
  196. tmpref,dirref : treference;
  197. begin
  198. dirref:=r;
  199. { this could probably done in a more optimized way, but for now this
  200. is sufficent }
  201. make_direct_ref(list,dirref);
  202. with dirref do
  203. begin
  204. if use_push(cgpara) then
  205. begin
  206. cgpara.check_simple_location;
  207. opsize:=tcgsize2opsize[OS_ADDR];
  208. if (segment=NR_NO) and (base=NR_NO) and (index=NR_NO) then
  209. begin
  210. if assigned(symbol) then
  211. begin
  212. if (target_info.system in [system_i386_darwin,system_i386_iphonesim]) and
  213. ((dirref.symbol.bind in [AB_EXTERNAL,AB_WEAK_EXTERNAL]) or
  214. (cs_create_pic in current_settings.moduleswitches)) then
  215. begin
  216. tmpreg:=getaddressregister(list);
  217. a_loadaddr_ref_reg(list,dirref,tmpreg);
  218. list.concat(taicpu.op_reg(A_PUSH,opsize,tmpreg));
  219. end
  220. else if cs_create_pic in current_settings.moduleswitches then
  221. begin
  222. if offset<>0 then
  223. begin
  224. tmpreg:=getaddressregister(list);
  225. a_loadaddr_ref_reg(list,dirref,tmpreg);
  226. list.concat(taicpu.op_reg(A_PUSH,opsize,tmpreg));
  227. end
  228. else
  229. begin
  230. reference_reset_symbol(tmpref,dirref.symbol,0,sizeof(pint),[]);
  231. tmpref.refaddr:=addr_pic;
  232. tmpref.base:=current_procinfo.got;
  233. include(current_procinfo.flags,pi_needs_got);
  234. list.concat(taicpu.op_ref(A_PUSH,S_L,tmpref));
  235. end
  236. end
  237. else
  238. list.concat(Taicpu.Op_sym_ofs(A_PUSH,opsize,symbol,offset));
  239. end
  240. else
  241. list.concat(Taicpu.Op_const(A_PUSH,opsize,offset));
  242. end
  243. else if (segment=NR_NO) and (base=NR_NO) and (index<>NR_NO) and
  244. (offset=0) and (scalefactor=0) and (symbol=nil) then
  245. list.concat(Taicpu.Op_reg(A_PUSH,opsize,index))
  246. else if (segment=NR_NO) and (base<>NR_NO) and (index=NR_NO) and
  247. (offset=0) and (symbol=nil) then
  248. list.concat(Taicpu.Op_reg(A_PUSH,opsize,base))
  249. else
  250. begin
  251. tmpreg:=getaddressregister(list);
  252. a_loadaddr_ref_reg(list,dirref,tmpreg);
  253. list.concat(taicpu.op_reg(A_PUSH,opsize,tmpreg));
  254. end;
  255. end
  256. else
  257. inherited a_loadaddr_ref_cgpara(list,dirref,cgpara);
  258. end;
  259. end;
  260. procedure tcg386.g_proc_exit(list : TAsmList;parasize:longint;nostackframe:boolean);
  261. procedure increase_sp(a : tcgint);
  262. var
  263. href : treference;
  264. begin
  265. reference_reset_base(href,NR_STACK_POINTER_REG,a,ctempposinvalid,0,[]);
  266. { normally, lea is a better choice than an add }
  267. list.concat(Taicpu.op_ref_reg(A_LEA,TCGSize2OpSize[OS_ADDR],href,NR_STACK_POINTER_REG));
  268. end;
  269. begin
  270. { MMX needs to call EMMS }
  271. if assigned(rg[R_MMXREGISTER]) and
  272. (rg[R_MMXREGISTER].uses_registers) then
  273. list.concat(Taicpu.op_none(A_EMMS,S_NO));
  274. { remove stackframe }
  275. if not nostackframe then
  276. begin
  277. if (current_procinfo.framepointer=NR_STACK_POINTER_REG) or
  278. (current_procinfo.procdef.proctypeoption=potype_exceptfilter) then
  279. begin
  280. if current_procinfo.final_localsize<>0 then
  281. increase_sp(current_procinfo.final_localsize);
  282. if (not paramanager.use_fixed_stack) then
  283. internal_restore_regs(list,true);
  284. if (current_procinfo.procdef.proctypeoption=potype_exceptfilter) then
  285. list.concat(Taicpu.op_reg(A_POP,tcgsize2opsize[OS_ADDR],NR_FRAME_POINTER_REG));
  286. current_asmdata.asmcfi.cfa_def_cfa_offset(list,sizeof(pint));
  287. end
  288. else
  289. begin
  290. if (not paramanager.use_fixed_stack) then
  291. internal_restore_regs(list,not (pi_has_stack_allocs in current_procinfo.flags));
  292. generate_leave(list);
  293. end;
  294. list.concat(tai_regalloc.dealloc(current_procinfo.framepointer,nil));
  295. end;
  296. { return from proc }
  297. if po_interrupt in current_procinfo.procdef.procoptions then
  298. begin
  299. if assigned(current_procinfo.procdef.funcretloc[calleeside].location) and
  300. (current_procinfo.procdef.funcretloc[calleeside].location^.loc=LOC_REGISTER) then
  301. begin
  302. if (getsupreg(current_procinfo.procdef.funcretloc[calleeside].location^.register)=RS_EAX) then
  303. list.concat(Taicpu.Op_const_reg(A_ADD,S_L,4,NR_ESP))
  304. else
  305. internalerror(2010053001);
  306. end
  307. else
  308. list.concat(Taicpu.Op_reg(A_POP,S_L,NR_EAX));
  309. list.concat(Taicpu.Op_reg(A_POP,S_L,NR_EBX));
  310. list.concat(Taicpu.Op_reg(A_POP,S_L,NR_ECX));
  311. if (current_procinfo.procdef.funcretloc[calleeside].size in [OS_64,OS_S64]) and
  312. assigned(current_procinfo.procdef.funcretloc[calleeside].location) and
  313. assigned(current_procinfo.procdef.funcretloc[calleeside].location^.next) and
  314. (current_procinfo.procdef.funcretloc[calleeside].location^.next^.loc=LOC_REGISTER) then
  315. begin
  316. if (getsupreg(current_procinfo.procdef.funcretloc[calleeside].location^.next^.register)=RS_EDX) then
  317. list.concat(Taicpu.Op_const_reg(A_ADD,S_L,4,NR_ESP))
  318. else
  319. internalerror(2010053002);
  320. end
  321. else
  322. list.concat(Taicpu.Op_reg(A_POP,S_L,NR_EDX));
  323. list.concat(Taicpu.Op_reg(A_POP,S_L,NR_ESI));
  324. list.concat(Taicpu.Op_reg(A_POP,S_L,NR_EDI));
  325. { .... also the segment registers }
  326. list.concat(Taicpu.Op_reg(A_POP,S_W,NR_DS));
  327. list.concat(Taicpu.Op_reg(A_POP,S_W,NR_ES));
  328. list.concat(Taicpu.Op_reg(A_POP,S_W,NR_FS));
  329. list.concat(Taicpu.Op_reg(A_POP,S_W,NR_GS));
  330. { this restores the flags }
  331. list.concat(Taicpu.Op_none(A_IRET,S_NO));
  332. end
  333. { Routines with the poclearstack flag set use only a ret }
  334. else if (current_procinfo.procdef.proccalloption in clearstack_pocalls) and
  335. (not paramanager.use_fixed_stack) then
  336. begin
  337. { complex return values are removed from stack in C code PM }
  338. { but not on win32 }
  339. { and not for safecall with hidden exceptions, because the result }
  340. { wich contains the exception is passed in EAX }
  341. if ((target_info.system <> system_i386_win32) or
  342. (target_info.abi=abi_old_win32_gnu)) and
  343. not ((current_procinfo.procdef.proccalloption = pocall_safecall) and
  344. (tf_safecall_exceptions in target_info.flags)) and
  345. paramanager.ret_in_param(current_procinfo.procdef.returndef,
  346. current_procinfo.procdef) then
  347. list.concat(Taicpu.Op_const(A_RET,S_W,sizeof(aint)))
  348. else
  349. list.concat(Taicpu.Op_none(A_RET,S_NO));
  350. end
  351. { ... also routines with parasize=0 }
  352. else if (parasize=0) then
  353. list.concat(Taicpu.Op_none(A_RET,S_NO))
  354. else
  355. begin
  356. { parameters are limited to 65535 bytes because ret allows only imm16 }
  357. if (parasize>65535) then
  358. CGMessage(cg_e_parasize_too_big);
  359. list.concat(Taicpu.Op_const(A_RET,S_W,parasize));
  360. end;
  361. end;
  362. procedure tcg386.g_copyvaluepara_openarray(list : TAsmList;const ref:treference;const lenloc:tlocation;elesize:tcgint;destreg:tregister);
  363. var
  364. power : longint;
  365. opsize : topsize;
  366. {$ifndef __NOWINPECOFF__}
  367. again,ok : tasmlabel;
  368. {$endif}
  369. begin
  370. { get stack space }
  371. getcpuregister(list,NR_EDI);
  372. a_load_loc_reg(list,OS_INT,lenloc,NR_EDI);
  373. list.concat(Taicpu.op_reg(A_INC,S_L,NR_EDI));
  374. { Now EDI contains (high+1). }
  375. { special case handling for elesize=8, 4 and 2:
  376. set ECX = (high+1) instead of ECX = (high+1)*elesize.
  377. In the case of elesize=4 and 2, this allows us to avoid the SHR later.
  378. In the case of elesize=8, we can later use a SHL ECX, 1 instead of
  379. SHR ECX, 2 which is one byte shorter. }
  380. if (elesize=8) or (elesize=4) or (elesize=2) then
  381. begin
  382. { Now EDI contains (high+1). Copy it to ECX for later use. }
  383. getcpuregister(list,NR_ECX);
  384. list.concat(Taicpu.op_reg_reg(A_MOV,S_L,NR_EDI,NR_ECX));
  385. end;
  386. { EDI := EDI * elesize }
  387. if (elesize<>1) then
  388. begin
  389. if ispowerof2(elesize, power) then
  390. list.concat(Taicpu.op_const_reg(A_SHL,S_L,power,NR_EDI))
  391. else
  392. list.concat(Taicpu.op_const_reg(A_IMUL,S_L,elesize,NR_EDI));
  393. end;
  394. if (elesize<>8) and (elesize<>4) and (elesize<>2) then
  395. begin
  396. { Now EDI contains (high+1)*elesize. Copy it to ECX for later use. }
  397. getcpuregister(list,NR_ECX);
  398. list.concat(Taicpu.op_reg_reg(A_MOV,S_L,NR_EDI,NR_ECX));
  399. end;
  400. {$ifndef __NOWINPECOFF__}
  401. { windows guards only a few pages for stack growing, }
  402. { so we have to access every page first }
  403. if target_info.system=system_i386_win32 then
  404. begin
  405. current_asmdata.getjumplabel(again);
  406. current_asmdata.getjumplabel(ok);
  407. a_label(list,again);
  408. list.concat(Taicpu.op_const_reg(A_CMP,S_L,winstackpagesize,NR_EDI));
  409. a_jmp_cond(list,OC_B,ok);
  410. list.concat(Taicpu.op_const_reg(A_SUB,S_L,winstackpagesize-4,NR_ESP));
  411. list.concat(Taicpu.op_reg(A_PUSH,S_L,NR_EDI));
  412. list.concat(Taicpu.op_const_reg(A_SUB,S_L,winstackpagesize,NR_EDI));
  413. a_jmp_always(list,again);
  414. a_label(list,ok);
  415. end;
  416. {$endif __NOWINPECOFF__}
  417. { If we were probing pages, EDI=(size mod pagesize) and ESP is decremented
  418. by (size div pagesize)*pagesize, otherwise EDI=size.
  419. Either way, subtracting EDI from ESP will set ESP to desired final value. }
  420. list.concat(Taicpu.op_reg_reg(A_SUB,S_L,NR_EDI,NR_ESP));
  421. { align stack on 4 bytes }
  422. list.concat(Taicpu.op_const_reg(A_AND,S_L,aint($fffffff4),NR_ESP));
  423. { load destination, don't use a_load_reg_reg, that will add a move instruction
  424. that can confuse the reg allocator }
  425. list.concat(Taicpu.Op_reg_reg(A_MOV,S_L,NR_ESP,NR_EDI));
  426. { Allocate ESI and load it with source }
  427. getcpuregister(list,NR_ESI);
  428. a_loadaddr_ref_reg(list,ref,NR_ESI);
  429. { calculate size }
  430. opsize:=S_B;
  431. if elesize=8 then
  432. begin
  433. opsize:=S_L;
  434. { ECX is number of qwords, convert to dwords }
  435. list.concat(Taicpu.op_const_reg(A_SHL,S_L,1,NR_ECX))
  436. end
  437. else if elesize=4 then
  438. begin
  439. opsize:=S_L;
  440. { ECX is already number of dwords, so no need to SHL/SHR }
  441. end
  442. else if elesize=2 then
  443. begin
  444. opsize:=S_W;
  445. { ECX is already number of words, so no need to SHL/SHR }
  446. end
  447. else
  448. if (elesize and 3)=0 then
  449. begin
  450. opsize:=S_L;
  451. { ECX is number of bytes, convert to dwords }
  452. list.concat(Taicpu.op_const_reg(A_SHR,S_L,2,NR_ECX))
  453. end
  454. else
  455. if (elesize and 1)=0 then
  456. begin
  457. opsize:=S_W;
  458. { ECX is number of bytes, convert to words }
  459. list.concat(Taicpu.op_const_reg(A_SHR,S_L,1,NR_ECX))
  460. end;
  461. if ts_cld in current_settings.targetswitches then
  462. list.concat(Taicpu.op_none(A_CLD,S_NO));
  463. list.concat(Taicpu.op_none(A_REP,S_NO));
  464. case opsize of
  465. S_B : list.concat(Taicpu.Op_none(A_MOVSB,S_NO));
  466. S_W : list.concat(Taicpu.Op_none(A_MOVSW,S_NO));
  467. S_L : list.concat(Taicpu.Op_none(A_MOVSD,S_NO));
  468. else
  469. internalerror(2019050901);
  470. end;
  471. ungetcpuregister(list,NR_EDI);
  472. ungetcpuregister(list,NR_ECX);
  473. ungetcpuregister(list,NR_ESI);
  474. { patch the new address, but don't use a_load_reg_reg, that will add a move instruction
  475. that can confuse the reg allocator }
  476. list.concat(Taicpu.Op_reg_reg(A_MOV,S_L,NR_ESP,destreg));
  477. include(current_procinfo.flags,pi_has_stack_allocs);
  478. end;
  479. procedure tcg386.g_releasevaluepara_openarray(list : TAsmList;const l:tlocation);
  480. begin
  481. { Nothing to release }
  482. end;
  483. procedure tcg386.g_maybe_got_init(list: TAsmList);
  484. var
  485. i: longint;
  486. tmpreg: TRegister;
  487. begin
  488. { allocate PIC register }
  489. if (tf_pic_uses_got in target_info.flags) and
  490. (pi_needs_got in current_procinfo.flags) then
  491. begin
  492. if not (target_info.system in [system_i386_darwin,system_i386_iphonesim]) then
  493. begin
  494. { Use ECX as a temp register by default }
  495. if current_procinfo.got = NR_EBX then
  496. tmpreg:=NR_EBX
  497. else
  498. tmpreg:=NR_ECX;
  499. { Allocate registers used for parameters to make sure they
  500. never allocated during this PIC init code }
  501. for i:=0 to current_procinfo.procdef.paras.Count - 1 do
  502. with tparavarsym(current_procinfo.procdef.paras[i]).paraloc[calleeside].Location^ do
  503. if Loc in [LOC_REGISTER, LOC_CREGISTER] then begin
  504. a_reg_alloc(list, register);
  505. { If ECX is used for a parameter, use EBX as temp }
  506. if getsupreg(register) = RS_ECX then
  507. tmpreg:=NR_EBX;
  508. end;
  509. if tmpreg = NR_EBX then
  510. begin
  511. { Mark EBX as used in the proc }
  512. include(rg[R_INTREGISTER].used_in_proc,RS_EBX);
  513. current_module.requires_ebx_pic_helper:=true;
  514. a_call_name_static(list,'fpc_geteipasebx');
  515. end
  516. else
  517. begin
  518. current_module.requires_ecx_pic_helper:=true;
  519. a_call_name_static(list,'fpc_geteipasecx');
  520. end;
  521. list.concat(taicpu.op_sym_ofs_reg(A_ADD,S_L,current_asmdata.RefAsmSymbol('_GLOBAL_OFFSET_TABLE_',AT_DATA),0,tmpreg));
  522. list.concat(taicpu.op_reg_reg(A_MOV,S_L,tmpreg,current_procinfo.got));
  523. { Deallocate parameter registers }
  524. for i:=0 to current_procinfo.procdef.paras.Count - 1 do
  525. with tparavarsym(current_procinfo.procdef.paras[i]).paraloc[calleeside].Location^ do
  526. if Loc in [LOC_REGISTER, LOC_CREGISTER] then
  527. a_reg_dealloc(list, register);
  528. end
  529. else
  530. begin
  531. { call/pop is faster than call/ret/mov on Core Solo and later
  532. according to Apple's benchmarking -- and all Intel Macs
  533. have at least a Core Solo (furthermore, the i386 - Pentium 1
  534. don't have a return stack buffer) }
  535. a_call_name_static(list,current_procinfo.CurrGOTLabel.name);
  536. a_label(list,current_procinfo.CurrGotLabel);
  537. list.concat(taicpu.op_reg(A_POP,S_L,current_procinfo.got))
  538. end;
  539. end;
  540. end;
  541. { ************* 64bit operations ************ }
  542. procedure tcg64f386.get_64bit_ops(op:TOpCG;var op1,op2:TAsmOp);
  543. begin
  544. case op of
  545. OP_ADD :
  546. begin
  547. op1:=A_ADD;
  548. op2:=A_ADC;
  549. end;
  550. OP_SUB :
  551. begin
  552. op1:=A_SUB;
  553. op2:=A_SBB;
  554. end;
  555. OP_XOR :
  556. begin
  557. op1:=A_XOR;
  558. op2:=A_XOR;
  559. end;
  560. OP_OR :
  561. begin
  562. op1:=A_OR;
  563. op2:=A_OR;
  564. end;
  565. OP_AND :
  566. begin
  567. op1:=A_AND;
  568. op2:=A_AND;
  569. end;
  570. else
  571. internalerror(200203241);
  572. end;
  573. end;
  574. procedure tcg64f386.a_op64_ref_reg(list : TAsmList;op:TOpCG;size : tcgsize;const ref : treference;reg : tregister64);
  575. var
  576. op1,op2 : TAsmOp;
  577. tempref : treference;
  578. begin
  579. if not(op in [OP_NEG,OP_NOT]) then
  580. begin
  581. get_64bit_ops(op,op1,op2);
  582. tempref:=ref;
  583. tcgx86(cg).make_simple_ref(list,tempref);
  584. if op in [OP_ADD,OP_SUB] then
  585. cg.a_reg_alloc(list,NR_DEFAULTFLAGS);
  586. list.concat(taicpu.op_ref_reg(op1,S_L,tempref,reg.reglo));
  587. inc(tempref.offset,4);
  588. list.concat(taicpu.op_ref_reg(op2,S_L,tempref,reg.reghi));
  589. if op in [OP_ADD,OP_SUB] then
  590. cg.a_reg_dealloc(list,NR_DEFAULTFLAGS);
  591. end
  592. else
  593. begin
  594. a_load64_ref_reg(list,ref,reg);
  595. a_op64_reg_reg(list,op,size,reg,reg);
  596. end;
  597. end;
  598. procedure tcg64f386.a_op64_reg_ref(list : TAsmList;op:TOpCG;size : tcgsize;reg : tregister64; const ref: treference);
  599. var
  600. op1,op2 : TAsmOp;
  601. tempref : treference;
  602. tmpreg: TRegister;
  603. l1, l2: TAsmLabel;
  604. begin
  605. case op of
  606. OP_NOT:
  607. begin
  608. tempref:=ref;
  609. tcgx86(cg).make_simple_ref(list,tempref);
  610. list.concat(taicpu.op_ref(A_NOT,S_L,tempref));
  611. inc(tempref.offset,4);
  612. list.concat(taicpu.op_ref(A_NOT,S_L,tempref));
  613. end;
  614. OP_NEG:
  615. begin
  616. tempref:=ref;
  617. tcgx86(cg).make_simple_ref(list,tempref);
  618. inc(tempref.offset,4);
  619. list.concat(taicpu.op_ref(A_NOT,S_L,tempref));
  620. cg.a_reg_alloc(list,NR_DEFAULTFLAGS);
  621. dec(tempref.offset,4);
  622. list.concat(taicpu.op_ref(A_NEG,S_L,tempref));
  623. inc(tempref.offset,4);
  624. list.concat(taicpu.op_const_ref(A_SBB,S_L,-1,tempref));
  625. cg.a_reg_dealloc(list,NR_DEFAULTFLAGS);
  626. end;
  627. OP_SHR,OP_SHL,OP_SAR:
  628. begin
  629. { load right operators in a register }
  630. cg.getcpuregister(list,NR_ECX);
  631. cg.a_load_reg_reg(list,OS_32,OS_32,reg.reglo,NR_ECX);
  632. tempref:=ref;
  633. tcgx86(cg).make_simple_ref(list,tempref);
  634. { the damned shift instructions work only til a count of 32 }
  635. { so we've to do some tricks here }
  636. current_asmdata.getjumplabel(l1);
  637. current_asmdata.getjumplabel(l2);
  638. list.Concat(taicpu.op_const_reg(A_TEST,S_B,32,NR_CL));
  639. cg.a_jmp_flags(list,F_E,l1);
  640. tmpreg:=cg.getintregister(list,OS_32);
  641. case op of
  642. OP_SHL:
  643. begin
  644. cg.a_load_ref_reg(list,OS_32,OS_32,tempref,tmpreg);
  645. list.Concat(taicpu.op_reg_reg(A_SHL,S_L,NR_CL,tmpreg));
  646. inc(tempref.offset,4);
  647. cg.a_load_reg_ref(list,OS_32,OS_32,tmpreg,tempref);
  648. dec(tempref.offset,4);
  649. cg.a_load_const_ref(list,OS_32,0,tempref);
  650. cg.a_jmp_always(list,l2);
  651. cg.a_label(list,l1);
  652. cg.a_load_ref_reg(list,OS_32,OS_32,tempref,tmpreg);
  653. inc(tempref.offset,4);
  654. list.Concat(taicpu.op_reg_reg_ref(A_SHLD,S_L,NR_CL,tmpreg,tempref));
  655. dec(tempref.offset,4);
  656. if cs_opt_size in current_settings.optimizerswitches then
  657. list.concat(taicpu.op_reg_ref(A_SHL,S_L,NR_CL,tempref))
  658. else
  659. begin
  660. list.concat(taicpu.op_reg_reg(A_SHL,S_L,NR_CL,tmpreg));
  661. cg.a_load_reg_ref(list,OS_32,OS_32,tmpreg,tempref);
  662. end;
  663. end;
  664. OP_SHR:
  665. begin
  666. inc(tempref.offset,4);
  667. cg.a_load_ref_reg(list,OS_32,OS_32,tempref,tmpreg);
  668. list.Concat(taicpu.op_reg_reg(A_SHR,S_L,NR_CL,tmpreg));
  669. dec(tempref.offset,4);
  670. cg.a_load_reg_ref(list,OS_32,OS_32,tmpreg,tempref);
  671. inc(tempref.offset,4);
  672. cg.a_load_const_ref(list,OS_32,0,tempref);
  673. cg.a_jmp_always(list,l2);
  674. cg.a_label(list,l1);
  675. cg.a_load_ref_reg(list,OS_32,OS_32,tempref,tmpreg);
  676. dec(tempref.offset,4);
  677. list.Concat(taicpu.op_reg_reg_ref(A_SHRD,S_L,NR_CL,tmpreg,tempref));
  678. inc(tempref.offset,4);
  679. if cs_opt_size in current_settings.optimizerswitches then
  680. list.concat(taicpu.op_reg_ref(A_SHR,S_L,NR_CL,tempref))
  681. else
  682. begin
  683. list.concat(taicpu.op_reg_reg(A_SHR,S_L,NR_CL,tmpreg));
  684. cg.a_load_reg_ref(list,OS_32,OS_32,tmpreg,tempref);
  685. end;
  686. end;
  687. OP_SAR:
  688. begin
  689. inc(tempref.offset,4);
  690. cg.a_load_ref_reg(list,OS_32,OS_32,tempref,tmpreg);
  691. list.Concat(taicpu.op_reg_reg(A_SAR,S_L,NR_CL,tmpreg));
  692. dec(tempref.offset,4);
  693. cg.a_load_reg_ref(list,OS_32,OS_32,tmpreg,tempref);
  694. inc(tempref.offset,4);
  695. list.Concat(taicpu.op_const_reg(A_SAR,S_L,31,tmpreg));
  696. cg.a_load_reg_ref(list,OS_32,OS_32,tmpreg,tempref);
  697. cg.a_jmp_always(list,l2);
  698. cg.a_label(list,l1);
  699. cg.a_load_ref_reg(list,OS_32,OS_32,tempref,tmpreg);
  700. dec(tempref.offset,4);
  701. list.Concat(taicpu.op_reg_reg_ref(A_SHRD,S_L,NR_CL,tmpreg,tempref));
  702. inc(tempref.offset,4);
  703. if cs_opt_size in current_settings.optimizerswitches then
  704. list.concat(taicpu.op_reg_ref(A_SAR,S_L,NR_CL,tempref))
  705. else
  706. begin
  707. list.concat(taicpu.op_reg_reg(A_SAR,S_L,NR_CL,tmpreg));
  708. cg.a_load_reg_ref(list,OS_32,OS_32,tmpreg,tempref);
  709. end;
  710. end;
  711. else
  712. internalerror(2017041801);
  713. end;
  714. cg.a_label(list,l2);
  715. cg.ungetcpuregister(list,NR_ECX);
  716. exit;
  717. end;
  718. else
  719. begin
  720. get_64bit_ops(op,op1,op2);
  721. tempref:=ref;
  722. tcgx86(cg).make_simple_ref(list,tempref);
  723. if op in [OP_ADD,OP_SUB] then
  724. cg.a_reg_alloc(list,NR_DEFAULTFLAGS);
  725. list.concat(taicpu.op_reg_ref(op1,S_L,reg.reglo,tempref));
  726. inc(tempref.offset,4);
  727. list.concat(taicpu.op_reg_ref(op2,S_L,reg.reghi,tempref));
  728. if op in [OP_ADD,OP_SUB] then
  729. cg.a_reg_dealloc(list,NR_DEFAULTFLAGS);
  730. end;
  731. end;
  732. end;
  733. procedure tcg64f386.a_op64_reg_reg(list : TAsmList;op:TOpCG;size : tcgsize;regsrc,regdst : tregister64);
  734. var
  735. op1,op2 : TAsmOp;
  736. l1, l2: TAsmLabel;
  737. begin
  738. case op of
  739. OP_NEG :
  740. begin
  741. if (regsrc.reglo<>regdst.reglo) then
  742. a_load64_reg_reg(list,regsrc,regdst);
  743. list.concat(taicpu.op_reg(A_NOT,S_L,regdst.reghi));
  744. cg.a_reg_alloc(list,NR_DEFAULTFLAGS);
  745. list.concat(taicpu.op_reg(A_NEG,S_L,regdst.reglo));
  746. list.concat(taicpu.op_const_reg(A_SBB,S_L,-1,regdst.reghi));
  747. cg.a_reg_dealloc(list,NR_DEFAULTFLAGS);
  748. exit;
  749. end;
  750. OP_NOT :
  751. begin
  752. if (regsrc.reglo<>regdst.reglo) then
  753. a_load64_reg_reg(list,regsrc,regdst);
  754. list.concat(taicpu.op_reg(A_NOT,S_L,regdst.reghi));
  755. list.concat(taicpu.op_reg(A_NOT,S_L,regdst.reglo));
  756. exit;
  757. end;
  758. OP_SHR,OP_SHL,OP_SAR:
  759. begin
  760. { load right operators in a register }
  761. cg.getcpuregister(list,NR_ECX);
  762. cg.a_load_reg_reg(list,OS_32,OS_32,regsrc.reglo,NR_ECX);
  763. { the damned shift instructions work only til a count of 32 }
  764. { so we've to do some tricks here }
  765. current_asmdata.getjumplabel(l1);
  766. current_asmdata.getjumplabel(l2);
  767. list.Concat(taicpu.op_const_reg(A_TEST,S_B,32,NR_CL));
  768. cg.a_jmp_flags(list,F_E,l1);
  769. case op of
  770. OP_SHL:
  771. begin
  772. list.Concat(taicpu.op_reg_reg(A_SHL,S_L,NR_CL,regdst.reglo));
  773. cg.a_load_reg_reg(list,OS_32,OS_32,regdst.reglo,regdst.reghi);
  774. list.Concat(taicpu.op_reg_reg(A_XOR,S_L,regdst.reglo,regdst.reglo));
  775. cg.a_jmp_always(list,l2);
  776. cg.a_label(list,l1);
  777. list.Concat(taicpu.op_reg_reg_reg(A_SHLD,S_L,NR_CL,regdst.reglo,regdst.reghi));
  778. list.Concat(taicpu.op_reg_reg(A_SHL,S_L,NR_CL,regdst.reglo));
  779. end;
  780. OP_SHR:
  781. begin
  782. list.Concat(taicpu.op_reg_reg(A_SHR,S_L,NR_CL,regdst.reghi));
  783. cg.a_load_reg_reg(list,OS_32,OS_32,regdst.reghi,regdst.reglo);
  784. list.Concat(taicpu.op_reg_reg(A_XOR,S_L,regdst.reghi,regdst.reghi));
  785. cg.a_jmp_always(list,l2);
  786. cg.a_label(list,l1);
  787. list.Concat(taicpu.op_reg_reg_reg(A_SHRD,S_L,NR_CL,regdst.reghi,regdst.reglo));
  788. list.Concat(taicpu.op_reg_reg(A_SHR,S_L,NR_CL,regdst.reghi));
  789. end;
  790. OP_SAR:
  791. begin
  792. cg.a_load_reg_reg(list,OS_32,OS_32,regdst.reghi,regdst.reglo);
  793. list.Concat(taicpu.op_reg_reg(A_SAR,S_L,NR_CL,regdst.reglo));
  794. list.Concat(taicpu.op_const_reg(A_SAR,S_L,31,regdst.reghi));
  795. cg.a_jmp_always(list,l2);
  796. cg.a_label(list,l1);
  797. list.Concat(taicpu.op_reg_reg_reg(A_SHRD,S_L,NR_CL,regdst.reghi,regdst.reglo));
  798. list.Concat(taicpu.op_reg_reg(A_SAR,S_L,NR_CL,regdst.reghi));
  799. end;
  800. else
  801. internalerror(2017041801);
  802. end;
  803. cg.a_label(list,l2);
  804. cg.ungetcpuregister(list,NR_ECX);
  805. exit;
  806. end;
  807. else
  808. ;
  809. end;
  810. get_64bit_ops(op,op1,op2);
  811. if op in [OP_ADD,OP_SUB] then
  812. cg.a_reg_alloc(list,NR_DEFAULTFLAGS);
  813. list.concat(taicpu.op_reg_reg(op1,S_L,regsrc.reglo,regdst.reglo));
  814. list.concat(taicpu.op_reg_reg(op2,S_L,regsrc.reghi,regdst.reghi));
  815. if op in [OP_ADD,OP_SUB] then
  816. cg.a_reg_dealloc(list,NR_DEFAULTFLAGS);
  817. end;
  818. procedure tcg64f386.a_op64_const_reg(list : TAsmList;op:TOpCG;size : tcgsize;value : int64;reg : tregister64);
  819. var
  820. op1,op2 : TAsmOp;
  821. begin
  822. case op of
  823. OP_AND,OP_OR,OP_XOR:
  824. begin
  825. cg.a_op_const_reg(list,op,OS_32,tcgint(lo(value)),reg.reglo);
  826. cg.a_op_const_reg(list,op,OS_32,tcgint(hi(value)),reg.reghi);
  827. end;
  828. OP_ADD, OP_SUB:
  829. begin
  830. // can't use a_op_const_ref because this may use dec/inc
  831. get_64bit_ops(op,op1,op2);
  832. cg.a_reg_alloc(list,NR_DEFAULTFLAGS);
  833. list.concat(taicpu.op_const_reg(op1,S_L,aint(lo(value)),reg.reglo));
  834. list.concat(taicpu.op_const_reg(op2,S_L,aint(hi(value)),reg.reghi));
  835. cg.a_reg_dealloc(list,NR_DEFAULTFLAGS);
  836. end;
  837. OP_SHR,OP_SHL,OP_SAR:
  838. begin
  839. value:=value and 63;
  840. if value<>0 then
  841. begin
  842. if (value=1) and (op=OP_SHL) and
  843. (current_settings.optimizecputype<=cpu_486) and
  844. not (cs_opt_size in current_settings.optimizerswitches) then
  845. begin
  846. cg.a_reg_alloc(list,NR_DEFAULTFLAGS);
  847. list.concat(taicpu.op_reg_reg(A_ADD,S_L,reg.reglo,reg.reglo));
  848. list.concat(taicpu.op_reg_reg(A_ADC,S_L,reg.reghi,reg.reghi));
  849. cg.a_reg_dealloc(list,NR_DEFAULTFLAGS);
  850. end
  851. else if (value=1) and (cs_opt_size in current_settings.optimizerswitches) then
  852. case op of
  853. OP_SHR:
  854. begin
  855. cg.a_reg_alloc(list,NR_DEFAULTFLAGS);
  856. list.concat(taicpu.op_const_reg(A_SHR,S_L,value,reg.reghi));
  857. list.concat(taicpu.op_const_reg(A_RCR,S_L,value,reg.reglo));
  858. cg.a_reg_dealloc(list,NR_DEFAULTFLAGS);
  859. end;
  860. OP_SHL:
  861. begin
  862. cg.a_reg_alloc(list,NR_DEFAULTFLAGS);
  863. list.concat(taicpu.op_const_reg(A_SHL,S_L,value,reg.reglo));
  864. list.concat(taicpu.op_const_reg(A_RCL,S_L,value,reg.reghi));
  865. cg.a_reg_dealloc(list,NR_DEFAULTFLAGS);
  866. end;
  867. OP_SAR:
  868. begin
  869. cg.a_reg_alloc(list,NR_DEFAULTFLAGS);
  870. list.concat(taicpu.op_const_reg(A_SAR,S_L,value,reg.reghi));
  871. list.concat(taicpu.op_const_reg(A_RCR,S_L,value,reg.reglo));
  872. cg.a_reg_dealloc(list,NR_DEFAULTFLAGS);
  873. end;
  874. else
  875. internalerror(2019050902);
  876. end
  877. else if value>31 then
  878. case op of
  879. OP_SAR:
  880. begin
  881. cg.a_load_reg_reg(list,OS_32,OS_32,reg.reghi,reg.reglo);
  882. list.concat(taicpu.op_const_reg(A_SAR,S_L,31,reg.reghi));
  883. if (value and 31)<>0 then
  884. list.concat(taicpu.op_const_reg(A_SAR,S_L,value and 31,reg.reglo));
  885. end;
  886. OP_SHR:
  887. begin
  888. cg.a_load_reg_reg(list,OS_32,OS_32,reg.reghi,reg.reglo);
  889. list.concat(taicpu.op_reg_reg(A_XOR,S_L,reg.reghi,reg.reghi));
  890. if (value and 31)<>0 then
  891. list.concat(taicpu.op_const_reg(A_SHR,S_L,value and 31,reg.reglo));
  892. end;
  893. OP_SHL:
  894. begin
  895. cg.a_load_reg_reg(list,OS_32,OS_32,reg.reglo,reg.reghi);
  896. list.concat(taicpu.op_reg_reg(A_XOR,S_L,reg.reglo,reg.reglo));
  897. if (value and 31)<>0 then
  898. list.concat(taicpu.op_const_reg(A_SHL,S_L,value and 31,reg.reghi));
  899. end;
  900. else
  901. internalerror(2017041201);
  902. end
  903. else
  904. case op of
  905. OP_SAR:
  906. begin
  907. list.concat(taicpu.op_const_reg_reg(A_SHRD,S_L,value,reg.reghi,reg.reglo));
  908. list.concat(taicpu.op_const_reg(A_SAR,S_L,value,reg.reghi));
  909. end;
  910. OP_SHR:
  911. begin
  912. list.concat(taicpu.op_const_reg_reg(A_SHRD,S_L,value,reg.reghi,reg.reglo));
  913. list.concat(taicpu.op_const_reg(A_SHR,S_L,value,reg.reghi));
  914. end;
  915. OP_SHL:
  916. begin
  917. list.concat(taicpu.op_const_reg_reg(A_SHLD,S_L,value,reg.reglo,reg.reghi));
  918. list.concat(taicpu.op_const_reg(A_SHL,S_L,value,reg.reglo));
  919. end;
  920. else
  921. internalerror(2017041201);
  922. end;
  923. end;
  924. end;
  925. else
  926. internalerror(200204021);
  927. end;
  928. end;
  929. procedure tcg64f386.a_op64_const_ref(list : TAsmList;op:TOpCG;size : tcgsize;value : int64;const ref : treference);
  930. var
  931. op1,op2 : TAsmOp;
  932. tempref : treference;
  933. tmpreg: TRegister;
  934. begin
  935. tempref:=ref;
  936. tcgx86(cg).make_simple_ref(list,tempref);
  937. case op of
  938. OP_AND,OP_OR,OP_XOR:
  939. begin
  940. cg.a_op_const_ref(list,op,OS_32,aint(lo(value)),tempref);
  941. inc(tempref.offset,4);
  942. cg.a_op_const_ref(list,op,OS_32,aint(hi(value)),tempref);
  943. end;
  944. OP_ADD, OP_SUB:
  945. begin
  946. get_64bit_ops(op,op1,op2);
  947. // can't use a_op_const_ref because this may use dec/inc
  948. cg.a_reg_alloc(list,NR_DEFAULTFLAGS);
  949. list.concat(taicpu.op_const_ref(op1,S_L,aint(lo(value)),tempref));
  950. inc(tempref.offset,4);
  951. list.concat(taicpu.op_const_ref(op2,S_L,aint(hi(value)),tempref));
  952. cg.a_reg_dealloc(list,NR_DEFAULTFLAGS);
  953. end;
  954. OP_SHR,OP_SHL,OP_SAR:
  955. begin
  956. value:=value and 63;
  957. if value<>0 then
  958. begin
  959. if value=1 then
  960. case op of
  961. OP_SHR:
  962. begin
  963. cg.a_reg_alloc(list,NR_DEFAULTFLAGS);
  964. inc(tempref.offset,4);
  965. list.concat(taicpu.op_const_ref(A_SHR,S_L,value,tempref));
  966. dec(tempref.offset,4);
  967. list.concat(taicpu.op_const_ref(A_RCR,S_L,value,tempref));
  968. cg.a_reg_dealloc(list,NR_DEFAULTFLAGS);
  969. end;
  970. OP_SHL:
  971. begin
  972. cg.a_reg_alloc(list,NR_DEFAULTFLAGS);
  973. list.concat(taicpu.op_const_ref(A_SHL,S_L,value,tempref));
  974. inc(tempref.offset,4);
  975. list.concat(taicpu.op_const_ref(A_RCL,S_L,value,tempref));
  976. cg.a_reg_dealloc(list,NR_DEFAULTFLAGS);
  977. end;
  978. OP_SAR:
  979. begin
  980. cg.a_reg_alloc(list,NR_DEFAULTFLAGS);
  981. inc(tempref.offset,4);
  982. list.concat(taicpu.op_const_ref(A_SAR,S_L,value,tempref));
  983. dec(tempref.offset,4);
  984. list.concat(taicpu.op_const_ref(A_RCR,S_L,value,tempref));
  985. cg.a_reg_dealloc(list,NR_DEFAULTFLAGS);
  986. end;
  987. else
  988. internalerror(2019050901);
  989. end
  990. else if value>31 then
  991. case op of
  992. OP_SHR,OP_SAR:
  993. begin
  994. tmpreg:=cg.getintregister(list,OS_32);
  995. inc(tempref.offset,4);
  996. cg.a_load_ref_reg(list,OS_32,OS_32,tempref,tmpreg);
  997. if (value and 31)<>0 then
  998. if op=OP_SHR then
  999. list.concat(taicpu.op_const_reg(A_SHR,S_L,value and 31,tmpreg))
  1000. else
  1001. list.concat(taicpu.op_const_reg(A_SAR,S_L,value and 31,tmpreg));
  1002. dec(tempref.offset,4);
  1003. cg.a_load_reg_ref(list,OS_32,OS_32,tmpreg,tempref);
  1004. inc(tempref.offset,4);
  1005. if op=OP_SHR then
  1006. cg.a_load_const_ref(list,OS_32,0,tempref)
  1007. else
  1008. begin
  1009. list.concat(taicpu.op_const_reg(A_SAR,S_L,31,tmpreg));
  1010. cg.a_load_reg_ref(list,OS_32,OS_32,tmpreg,tempref);
  1011. end;
  1012. end;
  1013. OP_SHL:
  1014. begin
  1015. tmpreg:=cg.getintregister(list,OS_32);
  1016. cg.a_load_ref_reg(list,OS_32,OS_32,tempref,tmpreg);
  1017. if (value and 31)<>0 then
  1018. list.concat(taicpu.op_const_reg(A_SHL,S_L,value and 31,tmpreg));
  1019. inc(tempref.offset,4);
  1020. cg.a_load_reg_ref(list,OS_32,OS_32,tmpreg,tempref);
  1021. dec(tempref.offset,4);
  1022. cg.a_load_const_ref(list,OS_32,0,tempref);
  1023. end;
  1024. else
  1025. internalerror(2017041801);
  1026. end
  1027. else
  1028. case op of
  1029. OP_SHR,OP_SAR:
  1030. begin
  1031. tmpreg:=cg.getintregister(list,OS_32);
  1032. inc(tempref.offset,4);
  1033. cg.a_load_ref_reg(list,OS_32,OS_32,tempref,tmpreg);
  1034. dec(tempref.offset,4);
  1035. list.concat(taicpu.op_const_reg_ref(A_SHRD,S_L,value,tmpreg,tempref));
  1036. inc(tempref.offset,4);
  1037. if cs_opt_size in current_settings.optimizerswitches then
  1038. begin
  1039. if op=OP_SHR then
  1040. list.concat(taicpu.op_const_ref(A_SHR,S_L,value,tempref))
  1041. else
  1042. list.concat(taicpu.op_const_ref(A_SAR,S_L,value,tempref));
  1043. end
  1044. else
  1045. begin
  1046. if op=OP_SHR then
  1047. list.concat(taicpu.op_const_reg(A_SHR,S_L,value,tmpreg))
  1048. else
  1049. list.concat(taicpu.op_const_reg(A_SAR,S_L,value,tmpreg));
  1050. cg.a_load_reg_ref(list,OS_32,OS_32,tmpreg,tempref);
  1051. end;
  1052. end;
  1053. OP_SHL:
  1054. begin
  1055. tmpreg:=cg.getintregister(list,OS_32);
  1056. cg.a_load_ref_reg(list,OS_32,OS_32,tempref,tmpreg);
  1057. inc(tempref.offset,4);
  1058. list.concat(taicpu.op_const_reg_ref(A_SHLD,S_L,value,tmpreg,tempref));
  1059. dec(tempref.offset,4);
  1060. if cs_opt_size in current_settings.optimizerswitches then
  1061. list.concat(taicpu.op_const_ref(A_SHL,S_L,value,tempref))
  1062. else
  1063. begin
  1064. list.concat(taicpu.op_const_reg(A_SHL,S_L,value,tmpreg));
  1065. cg.a_load_reg_ref(list,OS_32,OS_32,tmpreg,tempref);
  1066. end;
  1067. end;
  1068. else
  1069. internalerror(2017041201);
  1070. end;
  1071. end;
  1072. end;
  1073. else
  1074. internalerror(200204022);
  1075. end;
  1076. end;
  1077. procedure create_codegen;
  1078. begin
  1079. cg := tcg386.create;
  1080. cg64 := tcg64f386.create;
  1081. end;
  1082. end.