cgcpu.pas 50 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163
  1. {
  2. Copyright (c) 1998-2002 by Florian Klaempfl
  3. This unit implements the code generator for the i386
  4. This program is free software; you can redistribute it and/or modify
  5. it under the terms of the GNU General Public License as published by
  6. the Free Software Foundation; either version 2 of the License, or
  7. (at your option) any later version.
  8. This program is distributed in the hope that it will be useful,
  9. but WITHOUT ANY WARRANTY; without even the implied warranty of
  10. MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  11. GNU General Public License for more details.
  12. You should have received a copy of the GNU General Public License
  13. along with this program; if not, write to the Free Software
  14. Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
  15. ****************************************************************************
  16. }
  17. unit cgcpu;
  18. {$i fpcdefs.inc}
  19. interface
  20. uses
  21. globtype,
  22. cgbase,cgobj,cg64f32,cgx86,
  23. aasmbase,aasmtai,aasmdata,aasmcpu,
  24. cpubase,parabase,cgutils,
  25. symconst,symdef,symsym
  26. ;
  27. type
  28. tcg386 = class(tcgx86)
  29. procedure init_register_allocators;override;
  30. { passing parameter using push instead of mov }
  31. procedure a_load_reg_cgpara(list : TAsmList;size : tcgsize;r : tregister;const cgpara : tcgpara);override;
  32. procedure a_load_const_cgpara(list : TAsmList;size : tcgsize;a : tcgint;const cgpara : tcgpara);override;
  33. procedure a_load_ref_cgpara(list : TAsmList;size : tcgsize;const r : treference;const cgpara : tcgpara);override;
  34. procedure a_loadaddr_ref_cgpara(list : TAsmList;const r : treference;const cgpara : tcgpara);override;
  35. procedure g_proc_exit(list : TAsmList;parasize:longint;nostackframe:boolean);override;
  36. procedure g_copyvaluepara_openarray(list : TAsmList;const ref:treference;const lenloc:tlocation;elesize:tcgint;destreg:tregister);
  37. procedure g_releasevaluepara_openarray(list : TAsmList;const l:tlocation);
  38. procedure g_maybe_got_init(list: TAsmList); override;
  39. end;
  40. tcg64f386 = class(tcg64f32)
  41. procedure a_op64_ref_reg(list : TAsmList;op:TOpCG;size : tcgsize;const ref : treference;reg : tregister64);override;
  42. procedure a_op64_reg_ref(list : TAsmList;op:TOpCG;size : tcgsize;reg : tregister64; const ref: treference);override;
  43. procedure a_op64_reg_reg(list : TAsmList;op:TOpCG;size : tcgsize;regsrc,regdst : tregister64);override;
  44. procedure a_op64_const_reg(list : TAsmList;op:TOpCG;size : tcgsize;value : int64;reg : tregister64);override;
  45. procedure a_op64_const_ref(list : TAsmList;op:TOpCG;size : tcgsize;value : int64;const ref : treference);override;
  46. private
  47. procedure get_64bit_ops(op:TOpCG;var op1,op2:TAsmOp);
  48. end;
  49. procedure create_codegen;
  50. implementation
  51. uses
  52. globals,verbose,systems,cutils,
  53. paramgr,procinfo,fmodule,
  54. rgcpu,rgx86,cpuinfo;
  55. function use_push(const cgpara:tcgpara):boolean;
  56. begin
  57. result:=(not paramanager.use_fixed_stack) and
  58. assigned(cgpara.location) and
  59. (cgpara.location^.loc=LOC_REFERENCE) and
  60. (cgpara.location^.reference.index=NR_STACK_POINTER_REG);
  61. end;
  62. procedure tcg386.init_register_allocators;
  63. begin
  64. inherited init_register_allocators;
  65. if (cs_useebp in current_settings.optimizerswitches) and assigned(current_procinfo) and (current_procinfo.framepointer<>NR_EBP) then
  66. rg[R_INTREGISTER]:=trgcpu.create(R_INTREGISTER,R_SUBWHOLE,[RS_EAX,RS_EDX,RS_ECX,RS_EBX,RS_ESI,RS_EDI,RS_EBP],first_int_imreg,[])
  67. else
  68. rg[R_INTREGISTER]:=trgcpu.create(R_INTREGISTER,R_SUBWHOLE,[RS_EAX,RS_EDX,RS_ECX,RS_EBX,RS_ESI,RS_EDI],first_int_imreg,[RS_EBP]);
  69. rg[R_MMXREGISTER]:=trgcpu.create(R_MMXREGISTER,R_SUBNONE,[RS_XMM0,RS_XMM1,RS_XMM2,RS_XMM3,RS_XMM4,RS_XMM5,RS_XMM6,RS_XMM7],first_mm_imreg,[]);
  70. rg[R_MMREGISTER]:=trgcpu.create(R_MMREGISTER,R_SUBWHOLE,[RS_XMM0,RS_XMM1,RS_XMM2,RS_XMM3,RS_XMM4,RS_XMM5,RS_XMM6,RS_XMM7],first_mm_imreg,[]);
  71. rgfpu:=Trgx86fpu.create;
  72. end;
  73. procedure tcg386.a_load_reg_cgpara(list : TAsmList;size : tcgsize;r : tregister;const cgpara : tcgpara);
  74. var
  75. pushsize : tcgsize;
  76. begin
  77. check_register_size(size,r);
  78. if use_push(cgpara) then
  79. begin
  80. cgpara.check_simple_location;
  81. if tcgsize2size[cgpara.location^.size]>cgpara.alignment then
  82. pushsize:=cgpara.location^.size
  83. else
  84. pushsize:=int_cgsize(cgpara.alignment);
  85. list.concat(taicpu.op_reg(A_PUSH,tcgsize2opsize[pushsize],makeregsize(list,r,pushsize)));
  86. end
  87. else
  88. inherited a_load_reg_cgpara(list,size,r,cgpara);
  89. end;
  90. procedure tcg386.a_load_const_cgpara(list : TAsmList;size : tcgsize;a : tcgint;const cgpara : tcgpara);
  91. var
  92. pushsize : tcgsize;
  93. begin
  94. if use_push(cgpara) then
  95. begin
  96. cgpara.check_simple_location;
  97. if tcgsize2size[cgpara.location^.size]>cgpara.alignment then
  98. pushsize:=cgpara.location^.size
  99. else
  100. pushsize:=int_cgsize(cgpara.alignment);
  101. list.concat(taicpu.op_const(A_PUSH,tcgsize2opsize[pushsize],a));
  102. end
  103. else
  104. inherited a_load_const_cgpara(list,size,a,cgpara);
  105. end;
  106. procedure tcg386.a_load_ref_cgpara(list : TAsmList;size : tcgsize;const r : treference;const cgpara : tcgpara);
  107. procedure pushdata(paraloc:pcgparalocation;ofs:tcgint);
  108. var
  109. pushsize : tcgsize;
  110. opsize : topsize;
  111. tmpreg : tregister;
  112. href : treference;
  113. begin
  114. if not assigned(paraloc) then
  115. exit;
  116. if (paraloc^.loc<>LOC_REFERENCE) or
  117. (paraloc^.reference.index<>NR_STACK_POINTER_REG) or
  118. (tcgsize2size[paraloc^.size]>sizeof(aint)) then
  119. internalerror(200501162);
  120. { Pushes are needed in reverse order, add the size of the
  121. current location to the offset where to load from. This
  122. prevents wrong calculations for the last location when
  123. the size is not a power of 2 }
  124. if assigned(paraloc^.next) then
  125. pushdata(paraloc^.next,ofs+tcgsize2size[paraloc^.size]);
  126. { Push the data starting at ofs }
  127. href:=r;
  128. inc(href.offset,ofs);
  129. if tcgsize2size[paraloc^.size]>cgpara.alignment then
  130. pushsize:=paraloc^.size
  131. else
  132. pushsize:=int_cgsize(cgpara.alignment);
  133. opsize:=TCgsize2opsize[pushsize];
  134. { for go32v2 we obtain OS_F32,
  135. but pushs is not valid, we need pushl }
  136. if opsize=S_FS then
  137. opsize:=S_L;
  138. if tcgsize2size[paraloc^.size]<cgpara.alignment then
  139. begin
  140. tmpreg:=getintregister(list,pushsize);
  141. a_load_ref_reg(list,paraloc^.size,pushsize,href,tmpreg);
  142. list.concat(taicpu.op_reg(A_PUSH,opsize,tmpreg));
  143. end
  144. else
  145. begin
  146. make_simple_ref(list,href);
  147. list.concat(taicpu.op_ref(A_PUSH,opsize,href));
  148. end;
  149. end;
  150. var
  151. len : tcgint;
  152. href : treference;
  153. begin
  154. { cgpara.size=OS_NO requires a copy on the stack }
  155. if use_push(cgpara) then
  156. begin
  157. { Record copy? }
  158. if (cgpara.size=OS_NO) or (size=OS_NO) then
  159. begin
  160. cgpara.check_simple_location;
  161. len:=align(cgpara.intsize,cgpara.alignment);
  162. g_stackpointer_alloc(list,len);
  163. reference_reset_base(href,NR_STACK_POINTER_REG,0,ctempposinvalid,4,[]);
  164. g_concatcopy(list,r,href,len);
  165. end
  166. else
  167. begin
  168. if tcgsize2size[cgpara.size]<>tcgsize2size[size] then
  169. internalerror(200501161);
  170. if (cgpara.size=OS_F64) then
  171. begin
  172. href:=r;
  173. make_simple_ref(list,href);
  174. inc(href.offset,4);
  175. list.concat(taicpu.op_ref(A_PUSH,S_L,href));
  176. dec(href.offset,4);
  177. list.concat(taicpu.op_ref(A_PUSH,S_L,href));
  178. end
  179. else
  180. { We need to push the data in reverse order,
  181. therefor we use a recursive algorithm }
  182. pushdata(cgpara.location,0);
  183. end
  184. end
  185. else
  186. begin
  187. href:=r;
  188. make_simple_ref(list,href);
  189. inherited a_load_ref_cgpara(list,size,href,cgpara);
  190. end;
  191. end;
  192. procedure tcg386.a_loadaddr_ref_cgpara(list : TAsmList;const r : treference;const cgpara : tcgpara);
  193. var
  194. tmpreg : tregister;
  195. opsize : topsize;
  196. tmpref,dirref : treference;
  197. begin
  198. dirref:=r;
  199. { this could probably done in a more optimized way, but for now this
  200. is sufficent }
  201. make_direct_ref(list,dirref);
  202. with dirref do
  203. begin
  204. if use_push(cgpara) then
  205. begin
  206. cgpara.check_simple_location;
  207. opsize:=tcgsize2opsize[OS_ADDR];
  208. if (segment=NR_NO) and (base=NR_NO) and (index=NR_NO) then
  209. begin
  210. if assigned(symbol) then
  211. begin
  212. if (target_info.system in [system_i386_darwin,system_i386_iphonesim]) and
  213. ((dirref.symbol.bind in [AB_EXTERNAL,AB_WEAK_EXTERNAL]) or
  214. (cs_create_pic in current_settings.moduleswitches)) then
  215. begin
  216. tmpreg:=getaddressregister(list);
  217. a_loadaddr_ref_reg(list,dirref,tmpreg);
  218. list.concat(taicpu.op_reg(A_PUSH,opsize,tmpreg));
  219. end
  220. else if cs_create_pic in current_settings.moduleswitches then
  221. begin
  222. if offset<>0 then
  223. begin
  224. tmpreg:=getaddressregister(list);
  225. a_loadaddr_ref_reg(list,dirref,tmpreg);
  226. list.concat(taicpu.op_reg(A_PUSH,opsize,tmpreg));
  227. end
  228. else
  229. begin
  230. reference_reset_symbol(tmpref,dirref.symbol,0,sizeof(pint),[]);
  231. tmpref.refaddr:=addr_pic;
  232. tmpref.base:=current_procinfo.got;
  233. include(current_procinfo.flags,pi_needs_got);
  234. list.concat(taicpu.op_ref(A_PUSH,S_L,tmpref));
  235. end
  236. end
  237. else
  238. list.concat(Taicpu.Op_sym_ofs(A_PUSH,opsize,symbol,offset));
  239. end
  240. else
  241. list.concat(Taicpu.Op_const(A_PUSH,opsize,offset));
  242. end
  243. else if (segment=NR_NO) and (base=NR_NO) and (index<>NR_NO) and
  244. (offset=0) and (scalefactor=0) and (symbol=nil) then
  245. list.concat(Taicpu.Op_reg(A_PUSH,opsize,index))
  246. else if (segment=NR_NO) and (base<>NR_NO) and (index=NR_NO) and
  247. (offset=0) and (symbol=nil) then
  248. list.concat(Taicpu.Op_reg(A_PUSH,opsize,base))
  249. else
  250. begin
  251. tmpreg:=getaddressregister(list);
  252. a_loadaddr_ref_reg(list,dirref,tmpreg);
  253. list.concat(taicpu.op_reg(A_PUSH,opsize,tmpreg));
  254. end;
  255. end
  256. else
  257. inherited a_loadaddr_ref_cgpara(list,dirref,cgpara);
  258. end;
  259. end;
  260. procedure tcg386.g_proc_exit(list : TAsmList;parasize:longint;nostackframe:boolean);
  261. procedure increase_sp(a : tcgint);
  262. var
  263. href : treference;
  264. begin
  265. reference_reset_base(href,NR_STACK_POINTER_REG,a,ctempposinvalid,0,[]);
  266. { normally, lea is a better choice than an add }
  267. list.concat(Taicpu.op_ref_reg(A_LEA,TCGSize2OpSize[OS_ADDR],href,NR_STACK_POINTER_REG));
  268. end;
  269. begin
  270. { MMX needs to call EMMS }
  271. if assigned(rg[R_MMXREGISTER]) and
  272. (rg[R_MMXREGISTER].uses_registers) then
  273. list.concat(Taicpu.op_none(A_EMMS,S_NO));
  274. { remove stackframe }
  275. if not(nostackframe) and
  276. { we do not need an exit stack frame when we never return
  277. * the final ret is left so the peephole optimizer can easily do call/ret -> jmp or call conversions
  278. * the entry stack frame must be normally generated because the subroutine could be still left by
  279. an exception and then the unwinding code might need to restore the registers stored by the entry code
  280. }
  281. not(po_noreturn in current_procinfo.procdef.procoptions) then
  282. begin
  283. if (current_procinfo.framepointer=NR_STACK_POINTER_REG) or
  284. (current_procinfo.procdef.proctypeoption=potype_exceptfilter) then
  285. begin
  286. if current_procinfo.final_localsize<>0 then
  287. increase_sp(current_procinfo.final_localsize);
  288. if (not paramanager.use_fixed_stack) then
  289. internal_restore_regs(list,true);
  290. if (current_procinfo.procdef.proctypeoption=potype_exceptfilter) then
  291. list.concat(Taicpu.op_reg(A_POP,tcgsize2opsize[OS_ADDR],NR_FRAME_POINTER_REG));
  292. current_asmdata.asmcfi.cfa_def_cfa_offset(list,sizeof(pint));
  293. end
  294. else
  295. begin
  296. if (not paramanager.use_fixed_stack) then
  297. internal_restore_regs(list,not (pi_has_stack_allocs in current_procinfo.flags));
  298. generate_leave(list);
  299. end;
  300. list.concat(tai_regalloc.dealloc(current_procinfo.framepointer,nil));
  301. end;
  302. { return from proc }
  303. if po_interrupt in current_procinfo.procdef.procoptions then
  304. begin
  305. if assigned(current_procinfo.procdef.funcretloc[calleeside].location) and
  306. (current_procinfo.procdef.funcretloc[calleeside].location^.loc=LOC_REGISTER) then
  307. begin
  308. if (getsupreg(current_procinfo.procdef.funcretloc[calleeside].location^.register)=RS_EAX) then
  309. list.concat(Taicpu.Op_const_reg(A_ADD,S_L,4,NR_ESP))
  310. else
  311. internalerror(2010053001);
  312. end
  313. else
  314. list.concat(Taicpu.Op_reg(A_POP,S_L,NR_EAX));
  315. list.concat(Taicpu.Op_reg(A_POP,S_L,NR_EBX));
  316. list.concat(Taicpu.Op_reg(A_POP,S_L,NR_ECX));
  317. if (current_procinfo.procdef.funcretloc[calleeside].size in [OS_64,OS_S64]) and
  318. assigned(current_procinfo.procdef.funcretloc[calleeside].location) and
  319. assigned(current_procinfo.procdef.funcretloc[calleeside].location^.next) and
  320. (current_procinfo.procdef.funcretloc[calleeside].location^.next^.loc=LOC_REGISTER) then
  321. begin
  322. if (getsupreg(current_procinfo.procdef.funcretloc[calleeside].location^.next^.register)=RS_EDX) then
  323. list.concat(Taicpu.Op_const_reg(A_ADD,S_L,4,NR_ESP))
  324. else
  325. internalerror(2010053002);
  326. end
  327. else
  328. list.concat(Taicpu.Op_reg(A_POP,S_L,NR_EDX));
  329. list.concat(Taicpu.Op_reg(A_POP,S_L,NR_ESI));
  330. list.concat(Taicpu.Op_reg(A_POP,S_L,NR_EDI));
  331. { .... also the segment registers }
  332. list.concat(Taicpu.Op_reg(A_POP,S_W,NR_DS));
  333. list.concat(Taicpu.Op_reg(A_POP,S_W,NR_ES));
  334. list.concat(Taicpu.Op_reg(A_POP,S_W,NR_FS));
  335. list.concat(Taicpu.Op_reg(A_POP,S_W,NR_GS));
  336. { this restores the flags }
  337. list.concat(Taicpu.Op_none(A_IRET,S_NO));
  338. end
  339. { Routines with the poclearstack flag set use only a ret }
  340. else if (current_procinfo.procdef.proccalloption in clearstack_pocalls) and
  341. (not paramanager.use_fixed_stack) then
  342. begin
  343. { complex return values are removed from stack in C code PM }
  344. { but not on win32 }
  345. { and not for safecall with hidden exceptions, because the result }
  346. { wich contains the exception is passed in EAX }
  347. if ((target_info.system <> system_i386_win32) or
  348. (target_info.abi=abi_old_win32_gnu)) and
  349. not ((current_procinfo.procdef.proccalloption = pocall_safecall) and
  350. (tf_safecall_exceptions in target_info.flags)) and
  351. paramanager.ret_in_param(current_procinfo.procdef.returndef,
  352. current_procinfo.procdef) then
  353. list.concat(Taicpu.Op_const(A_RET,S_W,sizeof(aint)))
  354. else
  355. list.concat(Taicpu.Op_none(A_RET,S_NO));
  356. end
  357. { ... also routines with parasize=0 }
  358. else if (parasize=0) then
  359. list.concat(Taicpu.Op_none(A_RET,S_NO))
  360. else
  361. begin
  362. { parameters are limited to 65535 bytes because ret allows only imm16 }
  363. if (parasize>65535) then
  364. CGMessage(cg_e_parasize_too_big);
  365. list.concat(Taicpu.Op_const(A_RET,S_W,parasize));
  366. end;
  367. end;
  368. procedure tcg386.g_copyvaluepara_openarray(list : TAsmList;const ref:treference;const lenloc:tlocation;elesize:tcgint;destreg:tregister);
  369. var
  370. power : longint;
  371. opsize : topsize;
  372. {$ifndef __NOWINPECOFF__}
  373. again,ok : tasmlabel;
  374. {$endif}
  375. begin
  376. { get stack space }
  377. getcpuregister(list,NR_EDI);
  378. a_load_loc_reg(list,OS_INT,lenloc,NR_EDI);
  379. list.concat(Taicpu.op_reg(A_INC,S_L,NR_EDI));
  380. { Now EDI contains (high+1). }
  381. { special case handling for elesize=8, 4 and 2:
  382. set ECX = (high+1) instead of ECX = (high+1)*elesize.
  383. In the case of elesize=4 and 2, this allows us to avoid the SHR later.
  384. In the case of elesize=8, we can later use a SHL ECX, 1 instead of
  385. SHR ECX, 2 which is one byte shorter. }
  386. if (elesize=8) or (elesize=4) or (elesize=2) then
  387. begin
  388. { Now EDI contains (high+1). Copy it to ECX for later use. }
  389. getcpuregister(list,NR_ECX);
  390. list.concat(Taicpu.op_reg_reg(A_MOV,S_L,NR_EDI,NR_ECX));
  391. end;
  392. { EDI := EDI * elesize }
  393. if (elesize<>1) then
  394. begin
  395. if ispowerof2(elesize, power) then
  396. list.concat(Taicpu.op_const_reg(A_SHL,S_L,power,NR_EDI))
  397. else
  398. list.concat(Taicpu.op_const_reg(A_IMUL,S_L,elesize,NR_EDI));
  399. end;
  400. if (elesize<>8) and (elesize<>4) and (elesize<>2) then
  401. begin
  402. { Now EDI contains (high+1)*elesize. Copy it to ECX for later use. }
  403. getcpuregister(list,NR_ECX);
  404. list.concat(Taicpu.op_reg_reg(A_MOV,S_L,NR_EDI,NR_ECX));
  405. end;
  406. {$ifndef __NOWINPECOFF__}
  407. { windows guards only a few pages for stack growing, }
  408. { so we have to access every page first }
  409. if target_info.system=system_i386_win32 then
  410. begin
  411. current_asmdata.getjumplabel(again);
  412. current_asmdata.getjumplabel(ok);
  413. a_label(list,again);
  414. list.concat(Taicpu.op_const_reg(A_CMP,S_L,winstackpagesize,NR_EDI));
  415. a_jmp_cond(list,OC_B,ok);
  416. list.concat(Taicpu.op_const_reg(A_SUB,S_L,winstackpagesize-4,NR_ESP));
  417. list.concat(Taicpu.op_reg(A_PUSH,S_L,NR_EDI));
  418. list.concat(Taicpu.op_const_reg(A_SUB,S_L,winstackpagesize,NR_EDI));
  419. a_jmp_always(list,again);
  420. a_label(list,ok);
  421. end;
  422. {$endif __NOWINPECOFF__}
  423. { If we were probing pages, EDI=(size mod pagesize) and ESP is decremented
  424. by (size div pagesize)*pagesize, otherwise EDI=size.
  425. Either way, subtracting EDI from ESP will set ESP to desired final value. }
  426. list.concat(Taicpu.op_reg_reg(A_SUB,S_L,NR_EDI,NR_ESP));
  427. { align stack on 4 bytes }
  428. list.concat(Taicpu.op_const_reg(A_AND,S_L,aint($fffffff4),NR_ESP));
  429. { load destination, don't use a_load_reg_reg, that will add a move instruction
  430. that can confuse the reg allocator }
  431. list.concat(Taicpu.Op_reg_reg(A_MOV,S_L,NR_ESP,NR_EDI));
  432. { Allocate ESI and load it with source }
  433. getcpuregister(list,NR_ESI);
  434. a_loadaddr_ref_reg(list,ref,NR_ESI);
  435. { calculate size }
  436. opsize:=S_B;
  437. if elesize=8 then
  438. begin
  439. opsize:=S_L;
  440. { ECX is number of qwords, convert to dwords }
  441. list.concat(Taicpu.op_const_reg(A_SHL,S_L,1,NR_ECX))
  442. end
  443. else if elesize=4 then
  444. begin
  445. opsize:=S_L;
  446. { ECX is already number of dwords, so no need to SHL/SHR }
  447. end
  448. else if elesize=2 then
  449. begin
  450. opsize:=S_W;
  451. { ECX is already number of words, so no need to SHL/SHR }
  452. end
  453. else
  454. if (elesize and 3)=0 then
  455. begin
  456. opsize:=S_L;
  457. { ECX is number of bytes, convert to dwords }
  458. list.concat(Taicpu.op_const_reg(A_SHR,S_L,2,NR_ECX))
  459. end
  460. else
  461. if (elesize and 1)=0 then
  462. begin
  463. opsize:=S_W;
  464. { ECX is number of bytes, convert to words }
  465. list.concat(Taicpu.op_const_reg(A_SHR,S_L,1,NR_ECX))
  466. end;
  467. if ts_cld in current_settings.targetswitches then
  468. list.concat(Taicpu.op_none(A_CLD,S_NO));
  469. list.concat(Taicpu.op_none(A_REP,S_NO));
  470. case opsize of
  471. S_B : list.concat(Taicpu.Op_none(A_MOVSB,S_NO));
  472. S_W : list.concat(Taicpu.Op_none(A_MOVSW,S_NO));
  473. S_L : list.concat(Taicpu.Op_none(A_MOVSD,S_NO));
  474. else
  475. internalerror(2019050901);
  476. end;
  477. ungetcpuregister(list,NR_EDI);
  478. ungetcpuregister(list,NR_ECX);
  479. ungetcpuregister(list,NR_ESI);
  480. { patch the new address, but don't use a_load_reg_reg, that will add a move instruction
  481. that can confuse the reg allocator }
  482. list.concat(Taicpu.Op_reg_reg(A_MOV,S_L,NR_ESP,destreg));
  483. include(current_procinfo.flags,pi_has_stack_allocs);
  484. end;
  485. procedure tcg386.g_releasevaluepara_openarray(list : TAsmList;const l:tlocation);
  486. begin
  487. { Nothing to release }
  488. end;
  489. procedure tcg386.g_maybe_got_init(list: TAsmList);
  490. var
  491. i: longint;
  492. tmpreg: TRegister;
  493. begin
  494. { allocate PIC register }
  495. if (tf_pic_uses_got in target_info.flags) and
  496. (pi_needs_got in current_procinfo.flags) then
  497. begin
  498. if not (target_info.system in [system_i386_darwin,system_i386_iphonesim]) then
  499. begin
  500. { Use ECX as a temp register by default }
  501. if current_procinfo.got = NR_EBX then
  502. tmpreg:=NR_EBX
  503. else
  504. tmpreg:=NR_ECX;
  505. { Allocate registers used for parameters to make sure they
  506. never allocated during this PIC init code }
  507. for i:=0 to current_procinfo.procdef.paras.Count - 1 do
  508. with tparavarsym(current_procinfo.procdef.paras[i]).paraloc[calleeside].Location^ do
  509. if Loc in [LOC_REGISTER, LOC_CREGISTER] then begin
  510. a_reg_alloc(list, register);
  511. { If ECX is used for a parameter, use EBX as temp }
  512. if getsupreg(register) = RS_ECX then
  513. tmpreg:=NR_EBX;
  514. end;
  515. if tmpreg = NR_EBX then
  516. begin
  517. { Mark EBX as used in the proc }
  518. include(rg[R_INTREGISTER].used_in_proc,RS_EBX);
  519. current_module.requires_ebx_pic_helper:=true;
  520. a_call_name_static(list,'fpc_geteipasebx');
  521. end
  522. else
  523. begin
  524. current_module.requires_ecx_pic_helper:=true;
  525. a_call_name_static(list,'fpc_geteipasecx');
  526. end;
  527. list.concat(taicpu.op_sym_ofs_reg(A_ADD,S_L,current_asmdata.RefAsmSymbol('_GLOBAL_OFFSET_TABLE_',AT_DATA),0,tmpreg));
  528. list.concat(taicpu.op_reg_reg(A_MOV,S_L,tmpreg,current_procinfo.got));
  529. { Deallocate parameter registers }
  530. for i:=0 to current_procinfo.procdef.paras.Count - 1 do
  531. with tparavarsym(current_procinfo.procdef.paras[i]).paraloc[calleeside].Location^ do
  532. if Loc in [LOC_REGISTER, LOC_CREGISTER] then
  533. a_reg_dealloc(list, register);
  534. end
  535. else
  536. begin
  537. { call/pop is faster than call/ret/mov on Core Solo and later
  538. according to Apple's benchmarking -- and all Intel Macs
  539. have at least a Core Solo (furthermore, the i386 - Pentium 1
  540. don't have a return stack buffer) }
  541. a_call_name_static(list,current_procinfo.CurrGOTLabel.name);
  542. a_label(list,current_procinfo.CurrGotLabel);
  543. list.concat(taicpu.op_reg(A_POP,S_L,current_procinfo.got))
  544. end;
  545. end;
  546. end;
  547. { ************* 64bit operations ************ }
  548. procedure tcg64f386.get_64bit_ops(op:TOpCG;var op1,op2:TAsmOp);
  549. begin
  550. case op of
  551. OP_ADD :
  552. begin
  553. op1:=A_ADD;
  554. op2:=A_ADC;
  555. end;
  556. OP_SUB :
  557. begin
  558. op1:=A_SUB;
  559. op2:=A_SBB;
  560. end;
  561. OP_XOR :
  562. begin
  563. op1:=A_XOR;
  564. op2:=A_XOR;
  565. end;
  566. OP_OR :
  567. begin
  568. op1:=A_OR;
  569. op2:=A_OR;
  570. end;
  571. OP_AND :
  572. begin
  573. op1:=A_AND;
  574. op2:=A_AND;
  575. end;
  576. else
  577. internalerror(200203241);
  578. end;
  579. end;
  580. procedure tcg64f386.a_op64_ref_reg(list : TAsmList;op:TOpCG;size : tcgsize;const ref : treference;reg : tregister64);
  581. var
  582. op1,op2 : TAsmOp;
  583. tempref : treference;
  584. begin
  585. if not(op in [OP_NEG,OP_NOT]) then
  586. begin
  587. get_64bit_ops(op,op1,op2);
  588. tempref:=ref;
  589. tcgx86(cg).make_simple_ref(list,tempref);
  590. if op in [OP_ADD,OP_SUB] then
  591. cg.a_reg_alloc(list,NR_DEFAULTFLAGS);
  592. list.concat(taicpu.op_ref_reg(op1,S_L,tempref,reg.reglo));
  593. inc(tempref.offset,4);
  594. list.concat(taicpu.op_ref_reg(op2,S_L,tempref,reg.reghi));
  595. if op in [OP_ADD,OP_SUB] then
  596. cg.a_reg_dealloc(list,NR_DEFAULTFLAGS);
  597. end
  598. else
  599. begin
  600. a_load64_ref_reg(list,ref,reg);
  601. a_op64_reg_reg(list,op,size,reg,reg);
  602. end;
  603. end;
  604. procedure tcg64f386.a_op64_reg_ref(list : TAsmList;op:TOpCG;size : tcgsize;reg : tregister64; const ref: treference);
  605. var
  606. op1,op2 : TAsmOp;
  607. tempref : treference;
  608. tmpreg: TRegister;
  609. l1, l2: TAsmLabel;
  610. begin
  611. case op of
  612. OP_NOT:
  613. begin
  614. tempref:=ref;
  615. tcgx86(cg).make_simple_ref(list,tempref);
  616. list.concat(taicpu.op_ref(A_NOT,S_L,tempref));
  617. inc(tempref.offset,4);
  618. list.concat(taicpu.op_ref(A_NOT,S_L,tempref));
  619. end;
  620. OP_NEG:
  621. begin
  622. tempref:=ref;
  623. tcgx86(cg).make_simple_ref(list,tempref);
  624. inc(tempref.offset,4);
  625. list.concat(taicpu.op_ref(A_NOT,S_L,tempref));
  626. cg.a_reg_alloc(list,NR_DEFAULTFLAGS);
  627. dec(tempref.offset,4);
  628. list.concat(taicpu.op_ref(A_NEG,S_L,tempref));
  629. inc(tempref.offset,4);
  630. list.concat(taicpu.op_const_ref(A_SBB,S_L,-1,tempref));
  631. cg.a_reg_dealloc(list,NR_DEFAULTFLAGS);
  632. end;
  633. OP_SHR,OP_SHL,OP_SAR:
  634. begin
  635. { load right operators in a register }
  636. cg.getcpuregister(list,NR_ECX);
  637. cg.a_load_reg_reg(list,OS_32,OS_32,reg.reglo,NR_ECX);
  638. tempref:=ref;
  639. tcgx86(cg).make_simple_ref(list,tempref);
  640. { the damned shift instructions work only til a count of 32 }
  641. { so we've to do some tricks here }
  642. current_asmdata.getjumplabel(l1);
  643. current_asmdata.getjumplabel(l2);
  644. list.Concat(taicpu.op_const_reg(A_TEST,S_B,32,NR_CL));
  645. cg.a_jmp_flags(list,F_E,l1);
  646. tmpreg:=cg.getintregister(list,OS_32);
  647. case op of
  648. OP_SHL:
  649. begin
  650. cg.a_load_ref_reg(list,OS_32,OS_32,tempref,tmpreg);
  651. list.Concat(taicpu.op_reg_reg(A_SHL,S_L,NR_CL,tmpreg));
  652. inc(tempref.offset,4);
  653. cg.a_load_reg_ref(list,OS_32,OS_32,tmpreg,tempref);
  654. dec(tempref.offset,4);
  655. cg.a_load_const_ref(list,OS_32,0,tempref);
  656. cg.a_jmp_always(list,l2);
  657. cg.a_label(list,l1);
  658. cg.a_load_ref_reg(list,OS_32,OS_32,tempref,tmpreg);
  659. inc(tempref.offset,4);
  660. list.Concat(taicpu.op_reg_reg_ref(A_SHLD,S_L,NR_CL,tmpreg,tempref));
  661. dec(tempref.offset,4);
  662. if cs_opt_size in current_settings.optimizerswitches then
  663. list.concat(taicpu.op_reg_ref(A_SHL,S_L,NR_CL,tempref))
  664. else
  665. begin
  666. list.concat(taicpu.op_reg_reg(A_SHL,S_L,NR_CL,tmpreg));
  667. cg.a_load_reg_ref(list,OS_32,OS_32,tmpreg,tempref);
  668. end;
  669. end;
  670. OP_SHR:
  671. begin
  672. inc(tempref.offset,4);
  673. cg.a_load_ref_reg(list,OS_32,OS_32,tempref,tmpreg);
  674. list.Concat(taicpu.op_reg_reg(A_SHR,S_L,NR_CL,tmpreg));
  675. dec(tempref.offset,4);
  676. cg.a_load_reg_ref(list,OS_32,OS_32,tmpreg,tempref);
  677. inc(tempref.offset,4);
  678. cg.a_load_const_ref(list,OS_32,0,tempref);
  679. cg.a_jmp_always(list,l2);
  680. cg.a_label(list,l1);
  681. cg.a_load_ref_reg(list,OS_32,OS_32,tempref,tmpreg);
  682. dec(tempref.offset,4);
  683. list.Concat(taicpu.op_reg_reg_ref(A_SHRD,S_L,NR_CL,tmpreg,tempref));
  684. inc(tempref.offset,4);
  685. if cs_opt_size in current_settings.optimizerswitches then
  686. list.concat(taicpu.op_reg_ref(A_SHR,S_L,NR_CL,tempref))
  687. else
  688. begin
  689. list.concat(taicpu.op_reg_reg(A_SHR,S_L,NR_CL,tmpreg));
  690. cg.a_load_reg_ref(list,OS_32,OS_32,tmpreg,tempref);
  691. end;
  692. end;
  693. OP_SAR:
  694. begin
  695. inc(tempref.offset,4);
  696. cg.a_load_ref_reg(list,OS_32,OS_32,tempref,tmpreg);
  697. list.Concat(taicpu.op_reg_reg(A_SAR,S_L,NR_CL,tmpreg));
  698. dec(tempref.offset,4);
  699. cg.a_load_reg_ref(list,OS_32,OS_32,tmpreg,tempref);
  700. inc(tempref.offset,4);
  701. list.Concat(taicpu.op_const_reg(A_SAR,S_L,31,tmpreg));
  702. cg.a_load_reg_ref(list,OS_32,OS_32,tmpreg,tempref);
  703. cg.a_jmp_always(list,l2);
  704. cg.a_label(list,l1);
  705. cg.a_load_ref_reg(list,OS_32,OS_32,tempref,tmpreg);
  706. dec(tempref.offset,4);
  707. list.Concat(taicpu.op_reg_reg_ref(A_SHRD,S_L,NR_CL,tmpreg,tempref));
  708. inc(tempref.offset,4);
  709. if cs_opt_size in current_settings.optimizerswitches then
  710. list.concat(taicpu.op_reg_ref(A_SAR,S_L,NR_CL,tempref))
  711. else
  712. begin
  713. list.concat(taicpu.op_reg_reg(A_SAR,S_L,NR_CL,tmpreg));
  714. cg.a_load_reg_ref(list,OS_32,OS_32,tmpreg,tempref);
  715. end;
  716. end;
  717. else
  718. internalerror(2017041801);
  719. end;
  720. cg.a_label(list,l2);
  721. cg.ungetcpuregister(list,NR_ECX);
  722. exit;
  723. end;
  724. else
  725. begin
  726. get_64bit_ops(op,op1,op2);
  727. tempref:=ref;
  728. tcgx86(cg).make_simple_ref(list,tempref);
  729. if op in [OP_ADD,OP_SUB] then
  730. cg.a_reg_alloc(list,NR_DEFAULTFLAGS);
  731. list.concat(taicpu.op_reg_ref(op1,S_L,reg.reglo,tempref));
  732. inc(tempref.offset,4);
  733. list.concat(taicpu.op_reg_ref(op2,S_L,reg.reghi,tempref));
  734. if op in [OP_ADD,OP_SUB] then
  735. cg.a_reg_dealloc(list,NR_DEFAULTFLAGS);
  736. end;
  737. end;
  738. end;
  739. procedure tcg64f386.a_op64_reg_reg(list : TAsmList;op:TOpCG;size : tcgsize;regsrc,regdst : tregister64);
  740. var
  741. op1,op2 : TAsmOp;
  742. l1, l2: TAsmLabel;
  743. begin
  744. case op of
  745. OP_NEG :
  746. begin
  747. if (regsrc.reglo<>regdst.reglo) then
  748. a_load64_reg_reg(list,regsrc,regdst);
  749. list.concat(taicpu.op_reg(A_NOT,S_L,regdst.reghi));
  750. cg.a_reg_alloc(list,NR_DEFAULTFLAGS);
  751. list.concat(taicpu.op_reg(A_NEG,S_L,regdst.reglo));
  752. list.concat(taicpu.op_const_reg(A_SBB,S_L,-1,regdst.reghi));
  753. cg.a_reg_dealloc(list,NR_DEFAULTFLAGS);
  754. exit;
  755. end;
  756. OP_NOT :
  757. begin
  758. if (regsrc.reglo<>regdst.reglo) then
  759. a_load64_reg_reg(list,regsrc,regdst);
  760. list.concat(taicpu.op_reg(A_NOT,S_L,regdst.reghi));
  761. list.concat(taicpu.op_reg(A_NOT,S_L,regdst.reglo));
  762. exit;
  763. end;
  764. OP_SHR,OP_SHL,OP_SAR:
  765. begin
  766. { load right operators in a register }
  767. cg.getcpuregister(list,NR_ECX);
  768. cg.a_load_reg_reg(list,OS_32,OS_32,regsrc.reglo,NR_ECX);
  769. { the damned shift instructions work only til a count of 32 }
  770. { so we've to do some tricks here }
  771. current_asmdata.getjumplabel(l1);
  772. current_asmdata.getjumplabel(l2);
  773. list.Concat(taicpu.op_const_reg(A_TEST,S_B,32,NR_CL));
  774. cg.a_jmp_flags(list,F_E,l1);
  775. case op of
  776. OP_SHL:
  777. begin
  778. list.Concat(taicpu.op_reg_reg(A_SHL,S_L,NR_CL,regdst.reglo));
  779. cg.a_load_reg_reg(list,OS_32,OS_32,regdst.reglo,regdst.reghi);
  780. list.Concat(taicpu.op_reg_reg(A_XOR,S_L,regdst.reglo,regdst.reglo));
  781. cg.a_jmp_always(list,l2);
  782. cg.a_label(list,l1);
  783. list.Concat(taicpu.op_reg_reg_reg(A_SHLD,S_L,NR_CL,regdst.reglo,regdst.reghi));
  784. list.Concat(taicpu.op_reg_reg(A_SHL,S_L,NR_CL,regdst.reglo));
  785. end;
  786. OP_SHR:
  787. begin
  788. list.Concat(taicpu.op_reg_reg(A_SHR,S_L,NR_CL,regdst.reghi));
  789. cg.a_load_reg_reg(list,OS_32,OS_32,regdst.reghi,regdst.reglo);
  790. list.Concat(taicpu.op_reg_reg(A_XOR,S_L,regdst.reghi,regdst.reghi));
  791. cg.a_jmp_always(list,l2);
  792. cg.a_label(list,l1);
  793. list.Concat(taicpu.op_reg_reg_reg(A_SHRD,S_L,NR_CL,regdst.reghi,regdst.reglo));
  794. list.Concat(taicpu.op_reg_reg(A_SHR,S_L,NR_CL,regdst.reghi));
  795. end;
  796. OP_SAR:
  797. begin
  798. cg.a_load_reg_reg(list,OS_32,OS_32,regdst.reghi,regdst.reglo);
  799. list.Concat(taicpu.op_reg_reg(A_SAR,S_L,NR_CL,regdst.reglo));
  800. list.Concat(taicpu.op_const_reg(A_SAR,S_L,31,regdst.reghi));
  801. cg.a_jmp_always(list,l2);
  802. cg.a_label(list,l1);
  803. list.Concat(taicpu.op_reg_reg_reg(A_SHRD,S_L,NR_CL,regdst.reghi,regdst.reglo));
  804. list.Concat(taicpu.op_reg_reg(A_SAR,S_L,NR_CL,regdst.reghi));
  805. end;
  806. else
  807. internalerror(2017041801);
  808. end;
  809. cg.a_label(list,l2);
  810. cg.ungetcpuregister(list,NR_ECX);
  811. exit;
  812. end;
  813. else
  814. ;
  815. end;
  816. get_64bit_ops(op,op1,op2);
  817. if op in [OP_ADD,OP_SUB] then
  818. cg.a_reg_alloc(list,NR_DEFAULTFLAGS);
  819. list.concat(taicpu.op_reg_reg(op1,S_L,regsrc.reglo,regdst.reglo));
  820. list.concat(taicpu.op_reg_reg(op2,S_L,regsrc.reghi,regdst.reghi));
  821. if op in [OP_ADD,OP_SUB] then
  822. cg.a_reg_dealloc(list,NR_DEFAULTFLAGS);
  823. end;
  824. procedure tcg64f386.a_op64_const_reg(list : TAsmList;op:TOpCG;size : tcgsize;value : int64;reg : tregister64);
  825. var
  826. op1,op2 : TAsmOp;
  827. begin
  828. case op of
  829. OP_AND,OP_OR,OP_XOR:
  830. begin
  831. cg.a_op_const_reg(list,op,OS_32,tcgint(lo(value)),reg.reglo);
  832. cg.a_op_const_reg(list,op,OS_32,tcgint(hi(value)),reg.reghi);
  833. end;
  834. OP_ADD, OP_SUB:
  835. begin
  836. // can't use a_op_const_ref because this may use dec/inc
  837. get_64bit_ops(op,op1,op2);
  838. cg.a_reg_alloc(list,NR_DEFAULTFLAGS);
  839. list.concat(taicpu.op_const_reg(op1,S_L,aint(lo(value)),reg.reglo));
  840. list.concat(taicpu.op_const_reg(op2,S_L,aint(hi(value)),reg.reghi));
  841. cg.a_reg_dealloc(list,NR_DEFAULTFLAGS);
  842. end;
  843. OP_SHR,OP_SHL,OP_SAR:
  844. begin
  845. value:=value and 63;
  846. if value<>0 then
  847. begin
  848. if (value=1) and (op=OP_SHL) and
  849. (current_settings.optimizecputype<=cpu_486) and
  850. not (cs_opt_size in current_settings.optimizerswitches) then
  851. begin
  852. cg.a_reg_alloc(list,NR_DEFAULTFLAGS);
  853. list.concat(taicpu.op_reg_reg(A_ADD,S_L,reg.reglo,reg.reglo));
  854. list.concat(taicpu.op_reg_reg(A_ADC,S_L,reg.reghi,reg.reghi));
  855. cg.a_reg_dealloc(list,NR_DEFAULTFLAGS);
  856. end
  857. else if (value=1) and (cs_opt_size in current_settings.optimizerswitches) then
  858. case op of
  859. OP_SHR:
  860. begin
  861. cg.a_reg_alloc(list,NR_DEFAULTFLAGS);
  862. list.concat(taicpu.op_const_reg(A_SHR,S_L,value,reg.reghi));
  863. list.concat(taicpu.op_const_reg(A_RCR,S_L,value,reg.reglo));
  864. cg.a_reg_dealloc(list,NR_DEFAULTFLAGS);
  865. end;
  866. OP_SHL:
  867. begin
  868. cg.a_reg_alloc(list,NR_DEFAULTFLAGS);
  869. list.concat(taicpu.op_const_reg(A_SHL,S_L,value,reg.reglo));
  870. list.concat(taicpu.op_const_reg(A_RCL,S_L,value,reg.reghi));
  871. cg.a_reg_dealloc(list,NR_DEFAULTFLAGS);
  872. end;
  873. OP_SAR:
  874. begin
  875. cg.a_reg_alloc(list,NR_DEFAULTFLAGS);
  876. list.concat(taicpu.op_const_reg(A_SAR,S_L,value,reg.reghi));
  877. list.concat(taicpu.op_const_reg(A_RCR,S_L,value,reg.reglo));
  878. cg.a_reg_dealloc(list,NR_DEFAULTFLAGS);
  879. end;
  880. else
  881. internalerror(2019050902);
  882. end
  883. else if value>31 then
  884. case op of
  885. OP_SAR:
  886. begin
  887. cg.a_load_reg_reg(list,OS_32,OS_32,reg.reghi,reg.reglo);
  888. list.concat(taicpu.op_const_reg(A_SAR,S_L,31,reg.reghi));
  889. if (value and 31)<>0 then
  890. list.concat(taicpu.op_const_reg(A_SAR,S_L,value and 31,reg.reglo));
  891. end;
  892. OP_SHR:
  893. begin
  894. cg.a_load_reg_reg(list,OS_32,OS_32,reg.reghi,reg.reglo);
  895. list.concat(taicpu.op_reg_reg(A_XOR,S_L,reg.reghi,reg.reghi));
  896. if (value and 31)<>0 then
  897. list.concat(taicpu.op_const_reg(A_SHR,S_L,value and 31,reg.reglo));
  898. end;
  899. OP_SHL:
  900. begin
  901. cg.a_load_reg_reg(list,OS_32,OS_32,reg.reglo,reg.reghi);
  902. list.concat(taicpu.op_reg_reg(A_XOR,S_L,reg.reglo,reg.reglo));
  903. if (value and 31)<>0 then
  904. list.concat(taicpu.op_const_reg(A_SHL,S_L,value and 31,reg.reghi));
  905. end;
  906. else
  907. internalerror(2017041201);
  908. end
  909. else
  910. case op of
  911. OP_SAR:
  912. begin
  913. list.concat(taicpu.op_const_reg_reg(A_SHRD,S_L,value,reg.reghi,reg.reglo));
  914. list.concat(taicpu.op_const_reg(A_SAR,S_L,value,reg.reghi));
  915. end;
  916. OP_SHR:
  917. begin
  918. list.concat(taicpu.op_const_reg_reg(A_SHRD,S_L,value,reg.reghi,reg.reglo));
  919. list.concat(taicpu.op_const_reg(A_SHR,S_L,value,reg.reghi));
  920. end;
  921. OP_SHL:
  922. begin
  923. list.concat(taicpu.op_const_reg_reg(A_SHLD,S_L,value,reg.reglo,reg.reghi));
  924. list.concat(taicpu.op_const_reg(A_SHL,S_L,value,reg.reglo));
  925. end;
  926. else
  927. internalerror(2017041201);
  928. end;
  929. end;
  930. end;
  931. else
  932. internalerror(200204021);
  933. end;
  934. end;
  935. procedure tcg64f386.a_op64_const_ref(list : TAsmList;op:TOpCG;size : tcgsize;value : int64;const ref : treference);
  936. var
  937. op1,op2 : TAsmOp;
  938. tempref : treference;
  939. tmpreg: TRegister;
  940. begin
  941. tempref:=ref;
  942. tcgx86(cg).make_simple_ref(list,tempref);
  943. case op of
  944. OP_AND,OP_OR,OP_XOR:
  945. begin
  946. cg.a_op_const_ref(list,op,OS_32,aint(lo(value)),tempref);
  947. inc(tempref.offset,4);
  948. cg.a_op_const_ref(list,op,OS_32,aint(hi(value)),tempref);
  949. end;
  950. OP_ADD, OP_SUB:
  951. begin
  952. get_64bit_ops(op,op1,op2);
  953. // can't use a_op_const_ref because this may use dec/inc
  954. cg.a_reg_alloc(list,NR_DEFAULTFLAGS);
  955. list.concat(taicpu.op_const_ref(op1,S_L,aint(lo(value)),tempref));
  956. inc(tempref.offset,4);
  957. list.concat(taicpu.op_const_ref(op2,S_L,aint(hi(value)),tempref));
  958. cg.a_reg_dealloc(list,NR_DEFAULTFLAGS);
  959. end;
  960. OP_SHR,OP_SHL,OP_SAR:
  961. begin
  962. value:=value and 63;
  963. if value<>0 then
  964. begin
  965. if value=1 then
  966. case op of
  967. OP_SHR:
  968. begin
  969. cg.a_reg_alloc(list,NR_DEFAULTFLAGS);
  970. inc(tempref.offset,4);
  971. list.concat(taicpu.op_const_ref(A_SHR,S_L,value,tempref));
  972. dec(tempref.offset,4);
  973. list.concat(taicpu.op_const_ref(A_RCR,S_L,value,tempref));
  974. cg.a_reg_dealloc(list,NR_DEFAULTFLAGS);
  975. end;
  976. OP_SHL:
  977. begin
  978. cg.a_reg_alloc(list,NR_DEFAULTFLAGS);
  979. list.concat(taicpu.op_const_ref(A_SHL,S_L,value,tempref));
  980. inc(tempref.offset,4);
  981. list.concat(taicpu.op_const_ref(A_RCL,S_L,value,tempref));
  982. cg.a_reg_dealloc(list,NR_DEFAULTFLAGS);
  983. end;
  984. OP_SAR:
  985. begin
  986. cg.a_reg_alloc(list,NR_DEFAULTFLAGS);
  987. inc(tempref.offset,4);
  988. list.concat(taicpu.op_const_ref(A_SAR,S_L,value,tempref));
  989. dec(tempref.offset,4);
  990. list.concat(taicpu.op_const_ref(A_RCR,S_L,value,tempref));
  991. cg.a_reg_dealloc(list,NR_DEFAULTFLAGS);
  992. end;
  993. else
  994. internalerror(2019050901);
  995. end
  996. else if value>31 then
  997. case op of
  998. OP_SHR,OP_SAR:
  999. begin
  1000. tmpreg:=cg.getintregister(list,OS_32);
  1001. inc(tempref.offset,4);
  1002. cg.a_load_ref_reg(list,OS_32,OS_32,tempref,tmpreg);
  1003. if (value and 31)<>0 then
  1004. if op=OP_SHR then
  1005. list.concat(taicpu.op_const_reg(A_SHR,S_L,value and 31,tmpreg))
  1006. else
  1007. list.concat(taicpu.op_const_reg(A_SAR,S_L,value and 31,tmpreg));
  1008. dec(tempref.offset,4);
  1009. cg.a_load_reg_ref(list,OS_32,OS_32,tmpreg,tempref);
  1010. inc(tempref.offset,4);
  1011. if op=OP_SHR then
  1012. cg.a_load_const_ref(list,OS_32,0,tempref)
  1013. else
  1014. begin
  1015. list.concat(taicpu.op_const_reg(A_SAR,S_L,31,tmpreg));
  1016. cg.a_load_reg_ref(list,OS_32,OS_32,tmpreg,tempref);
  1017. end;
  1018. end;
  1019. OP_SHL:
  1020. begin
  1021. tmpreg:=cg.getintregister(list,OS_32);
  1022. cg.a_load_ref_reg(list,OS_32,OS_32,tempref,tmpreg);
  1023. if (value and 31)<>0 then
  1024. list.concat(taicpu.op_const_reg(A_SHL,S_L,value and 31,tmpreg));
  1025. inc(tempref.offset,4);
  1026. cg.a_load_reg_ref(list,OS_32,OS_32,tmpreg,tempref);
  1027. dec(tempref.offset,4);
  1028. cg.a_load_const_ref(list,OS_32,0,tempref);
  1029. end;
  1030. else
  1031. internalerror(2017041801);
  1032. end
  1033. else
  1034. case op of
  1035. OP_SHR,OP_SAR:
  1036. begin
  1037. tmpreg:=cg.getintregister(list,OS_32);
  1038. inc(tempref.offset,4);
  1039. cg.a_load_ref_reg(list,OS_32,OS_32,tempref,tmpreg);
  1040. dec(tempref.offset,4);
  1041. list.concat(taicpu.op_const_reg_ref(A_SHRD,S_L,value,tmpreg,tempref));
  1042. inc(tempref.offset,4);
  1043. if cs_opt_size in current_settings.optimizerswitches then
  1044. begin
  1045. if op=OP_SHR then
  1046. list.concat(taicpu.op_const_ref(A_SHR,S_L,value,tempref))
  1047. else
  1048. list.concat(taicpu.op_const_ref(A_SAR,S_L,value,tempref));
  1049. end
  1050. else
  1051. begin
  1052. if op=OP_SHR then
  1053. list.concat(taicpu.op_const_reg(A_SHR,S_L,value,tmpreg))
  1054. else
  1055. list.concat(taicpu.op_const_reg(A_SAR,S_L,value,tmpreg));
  1056. cg.a_load_reg_ref(list,OS_32,OS_32,tmpreg,tempref);
  1057. end;
  1058. end;
  1059. OP_SHL:
  1060. begin
  1061. tmpreg:=cg.getintregister(list,OS_32);
  1062. cg.a_load_ref_reg(list,OS_32,OS_32,tempref,tmpreg);
  1063. inc(tempref.offset,4);
  1064. list.concat(taicpu.op_const_reg_ref(A_SHLD,S_L,value,tmpreg,tempref));
  1065. dec(tempref.offset,4);
  1066. if cs_opt_size in current_settings.optimizerswitches then
  1067. list.concat(taicpu.op_const_ref(A_SHL,S_L,value,tempref))
  1068. else
  1069. begin
  1070. list.concat(taicpu.op_const_reg(A_SHL,S_L,value,tmpreg));
  1071. cg.a_load_reg_ref(list,OS_32,OS_32,tmpreg,tempref);
  1072. end;
  1073. end;
  1074. else
  1075. internalerror(2017041201);
  1076. end;
  1077. end;
  1078. end;
  1079. else
  1080. internalerror(200204022);
  1081. end;
  1082. end;
  1083. procedure create_codegen;
  1084. begin
  1085. cg := tcg386.create;
  1086. cg64 := tcg64f386.create;
  1087. end;
  1088. end.