cgcpu.pas 49 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151
  1. {
  2. Copyright (c) 1998-2002 by Florian Klaempfl
  3. This unit implements the code generator for the i386
  4. This program is free software; you can redistribute it and/or modify
  5. it under the terms of the GNU General Public License as published by
  6. the Free Software Foundation; either version 2 of the License, or
  7. (at your option) any later version.
  8. This program is distributed in the hope that it will be useful,
  9. but WITHOUT ANY WARRANTY; without even the implied warranty of
  10. MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  11. GNU General Public License for more details.
  12. You should have received a copy of the GNU General Public License
  13. along with this program; if not, write to the Free Software
  14. Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
  15. ****************************************************************************
  16. }
  17. unit cgcpu;
  18. {$i fpcdefs.inc}
  19. interface
  20. uses
  21. globtype,
  22. cgbase,cgobj,cg64f32,cgx86,
  23. aasmbase,aasmtai,aasmdata,aasmcpu,
  24. cpubase,parabase,cgutils,
  25. symconst,symdef,symsym
  26. ;
  27. type
  28. tcg386 = class(tcgx86)
  29. procedure init_register_allocators;override;
  30. { passing parameter using push instead of mov }
  31. procedure a_load_reg_cgpara(list : TAsmList;size : tcgsize;r : tregister;const cgpara : tcgpara);override;
  32. procedure a_load_const_cgpara(list : TAsmList;size : tcgsize;a : tcgint;const cgpara : tcgpara);override;
  33. procedure a_load_ref_cgpara(list : TAsmList;size : tcgsize;const r : treference;const cgpara : tcgpara);override;
  34. procedure a_loadaddr_ref_cgpara(list : TAsmList;const r : treference;const cgpara : tcgpara);override;
  35. procedure g_proc_exit(list : TAsmList;parasize:longint;nostackframe:boolean);override;
  36. procedure g_copyvaluepara_openarray(list : TAsmList;const ref:treference;const lenloc:tlocation;elesize:tcgint;destreg:tregister);
  37. procedure g_releasevaluepara_openarray(list : TAsmList;const l:tlocation);
  38. procedure g_maybe_got_init(list: TAsmList); override;
  39. end;
  40. tcg64f386 = class(tcg64f32)
  41. procedure a_op64_ref_reg(list : TAsmList;op:TOpCG;size : tcgsize;const ref : treference;reg : tregister64);override;
  42. procedure a_op64_reg_ref(list : TAsmList;op:TOpCG;size : tcgsize;reg : tregister64; const ref: treference);override;
  43. procedure a_op64_reg_reg(list : TAsmList;op:TOpCG;size : tcgsize;regsrc,regdst : tregister64);override;
  44. procedure a_op64_const_reg(list : TAsmList;op:TOpCG;size : tcgsize;value : int64;reg : tregister64);override;
  45. procedure a_op64_const_ref(list : TAsmList;op:TOpCG;size : tcgsize;value : int64;const ref : treference);override;
  46. private
  47. procedure get_64bit_ops(op:TOpCG;var op1,op2:TAsmOp);
  48. end;
  49. procedure create_codegen;
  50. implementation
  51. uses
  52. globals,verbose,systems,cutils,
  53. paramgr,procinfo,fmodule,
  54. rgcpu,rgx86,cpuinfo;
  55. function use_push(const cgpara:tcgpara):boolean;
  56. begin
  57. result:=(not paramanager.use_fixed_stack) and
  58. assigned(cgpara.location) and
  59. (cgpara.location^.loc=LOC_REFERENCE) and
  60. (cgpara.location^.reference.index=NR_STACK_POINTER_REG);
  61. end;
  62. procedure tcg386.init_register_allocators;
  63. begin
  64. inherited init_register_allocators;
  65. if (cs_useebp in current_settings.optimizerswitches) and assigned(current_procinfo) and (current_procinfo.framepointer<>NR_EBP) then
  66. rg[R_INTREGISTER]:=trgcpu.create(R_INTREGISTER,R_SUBWHOLE,[RS_EAX,RS_EDX,RS_ECX,RS_EBX,RS_ESI,RS_EDI,RS_EBP],first_int_imreg,[])
  67. else
  68. rg[R_INTREGISTER]:=trgcpu.create(R_INTREGISTER,R_SUBWHOLE,[RS_EAX,RS_EDX,RS_ECX,RS_EBX,RS_ESI,RS_EDI],first_int_imreg,[RS_EBP]);
  69. rg[R_MMXREGISTER]:=trgcpu.create(R_MMXREGISTER,R_SUBNONE,[RS_XMM0,RS_XMM1,RS_XMM2,RS_XMM3,RS_XMM4,RS_XMM5,RS_XMM6,RS_XMM7],first_mm_imreg,[]);
  70. rg[R_MMREGISTER]:=trgcpu.create(R_MMREGISTER,R_SUBWHOLE,[RS_XMM0,RS_XMM1,RS_XMM2,RS_XMM3,RS_XMM4,RS_XMM5,RS_XMM6,RS_XMM7],first_mm_imreg,[]);
  71. rgfpu:=Trgx86fpu.create;
  72. end;
  73. procedure tcg386.a_load_reg_cgpara(list : TAsmList;size : tcgsize;r : tregister;const cgpara : tcgpara);
  74. var
  75. pushsize : tcgsize;
  76. begin
  77. check_register_size(size,r);
  78. if use_push(cgpara) then
  79. begin
  80. cgpara.check_simple_location;
  81. if tcgsize2size[cgpara.location^.size]>cgpara.alignment then
  82. pushsize:=cgpara.location^.size
  83. else
  84. pushsize:=int_cgsize(cgpara.alignment);
  85. list.concat(taicpu.op_reg(A_PUSH,tcgsize2opsize[pushsize],makeregsize(list,r,pushsize)));
  86. end
  87. else
  88. inherited a_load_reg_cgpara(list,size,r,cgpara);
  89. end;
  90. procedure tcg386.a_load_const_cgpara(list : TAsmList;size : tcgsize;a : tcgint;const cgpara : tcgpara);
  91. var
  92. pushsize : tcgsize;
  93. begin
  94. if use_push(cgpara) then
  95. begin
  96. cgpara.check_simple_location;
  97. if tcgsize2size[cgpara.location^.size]>cgpara.alignment then
  98. pushsize:=cgpara.location^.size
  99. else
  100. pushsize:=int_cgsize(cgpara.alignment);
  101. list.concat(taicpu.op_const(A_PUSH,tcgsize2opsize[pushsize],a));
  102. end
  103. else
  104. inherited a_load_const_cgpara(list,size,a,cgpara);
  105. end;
  106. procedure tcg386.a_load_ref_cgpara(list : TAsmList;size : tcgsize;const r : treference;const cgpara : tcgpara);
  107. procedure pushdata(paraloc:pcgparalocation;ofs:tcgint);
  108. var
  109. pushsize : tcgsize;
  110. opsize : topsize;
  111. tmpreg : tregister;
  112. href : treference;
  113. begin
  114. if not assigned(paraloc) then
  115. exit;
  116. if (paraloc^.loc<>LOC_REFERENCE) or
  117. (paraloc^.reference.index<>NR_STACK_POINTER_REG) or
  118. (tcgsize2size[paraloc^.size]>sizeof(aint)) then
  119. internalerror(200501162);
  120. { Pushes are needed in reverse order, add the size of the
  121. current location to the offset where to load from. This
  122. prevents wrong calculations for the last location when
  123. the size is not a power of 2 }
  124. if assigned(paraloc^.next) then
  125. pushdata(paraloc^.next,ofs+tcgsize2size[paraloc^.size]);
  126. { Push the data starting at ofs }
  127. href:=r;
  128. inc(href.offset,ofs);
  129. if tcgsize2size[paraloc^.size]>cgpara.alignment then
  130. pushsize:=paraloc^.size
  131. else
  132. pushsize:=int_cgsize(cgpara.alignment);
  133. opsize:=TCgsize2opsize[pushsize];
  134. { for go32v2 we obtain OS_F32,
  135. but pushs is not valid, we need pushl }
  136. if opsize=S_FS then
  137. opsize:=S_L;
  138. if tcgsize2size[paraloc^.size]<cgpara.alignment then
  139. begin
  140. tmpreg:=getintregister(list,pushsize);
  141. a_load_ref_reg(list,paraloc^.size,pushsize,href,tmpreg);
  142. list.concat(taicpu.op_reg(A_PUSH,opsize,tmpreg));
  143. end
  144. else
  145. begin
  146. make_simple_ref(list,href);
  147. list.concat(taicpu.op_ref(A_PUSH,opsize,href));
  148. end;
  149. end;
  150. var
  151. len : tcgint;
  152. href : treference;
  153. begin
  154. { cgpara.size=OS_NO requires a copy on the stack }
  155. if use_push(cgpara) then
  156. begin
  157. { Record copy? }
  158. if (cgpara.size=OS_NO) or (size=OS_NO) then
  159. begin
  160. cgpara.check_simple_location;
  161. len:=align(cgpara.intsize,cgpara.alignment);
  162. g_stackpointer_alloc(list,len);
  163. reference_reset_base(href,NR_STACK_POINTER_REG,0,4,[]);
  164. g_concatcopy(list,r,href,len);
  165. end
  166. else
  167. begin
  168. if tcgsize2size[cgpara.size]<>tcgsize2size[size] then
  169. internalerror(200501161);
  170. if (cgpara.size=OS_F64) then
  171. begin
  172. href:=r;
  173. make_simple_ref(list,href);
  174. inc(href.offset,4);
  175. list.concat(taicpu.op_ref(A_PUSH,S_L,href));
  176. dec(href.offset,4);
  177. list.concat(taicpu.op_ref(A_PUSH,S_L,href));
  178. end
  179. else
  180. { We need to push the data in reverse order,
  181. therefor we use a recursive algorithm }
  182. pushdata(cgpara.location,0);
  183. end
  184. end
  185. else
  186. begin
  187. href:=r;
  188. make_simple_ref(list,href);
  189. inherited a_load_ref_cgpara(list,size,href,cgpara);
  190. end;
  191. end;
  192. procedure tcg386.a_loadaddr_ref_cgpara(list : TAsmList;const r : treference;const cgpara : tcgpara);
  193. var
  194. tmpreg : tregister;
  195. opsize : topsize;
  196. tmpref,dirref : treference;
  197. begin
  198. dirref:=r;
  199. { this could probably done in a more optimized way, but for now this
  200. is sufficent }
  201. make_direct_ref(list,dirref);
  202. with dirref do
  203. begin
  204. if use_push(cgpara) then
  205. begin
  206. cgpara.check_simple_location;
  207. opsize:=tcgsize2opsize[OS_ADDR];
  208. if (segment=NR_NO) and (base=NR_NO) and (index=NR_NO) then
  209. begin
  210. if assigned(symbol) then
  211. begin
  212. if (target_info.system in [system_i386_darwin,system_i386_iphonesim]) and
  213. ((dirref.symbol.bind in [AB_EXTERNAL,AB_WEAK_EXTERNAL]) or
  214. (cs_create_pic in current_settings.moduleswitches)) then
  215. begin
  216. tmpreg:=getaddressregister(list);
  217. a_loadaddr_ref_reg(list,dirref,tmpreg);
  218. list.concat(taicpu.op_reg(A_PUSH,opsize,tmpreg));
  219. end
  220. else if cs_create_pic in current_settings.moduleswitches then
  221. begin
  222. if offset<>0 then
  223. begin
  224. tmpreg:=getaddressregister(list);
  225. a_loadaddr_ref_reg(list,dirref,tmpreg);
  226. list.concat(taicpu.op_reg(A_PUSH,opsize,tmpreg));
  227. end
  228. else
  229. begin
  230. reference_reset_symbol(tmpref,dirref.symbol,0,sizeof(pint),[]);
  231. tmpref.refaddr:=addr_pic;
  232. tmpref.base:=current_procinfo.got;
  233. {$ifdef EXTDEBUG}
  234. if not (pi_needs_got in current_procinfo.flags) then
  235. Comment(V_warning,'pi_needs_got not included');
  236. {$endif EXTDEBUG}
  237. include(current_procinfo.flags,pi_needs_got);
  238. list.concat(taicpu.op_ref(A_PUSH,S_L,tmpref));
  239. end
  240. end
  241. else
  242. list.concat(Taicpu.Op_sym_ofs(A_PUSH,opsize,symbol,offset));
  243. end
  244. else
  245. list.concat(Taicpu.Op_const(A_PUSH,opsize,offset));
  246. end
  247. else if (segment=NR_NO) and (base=NR_NO) and (index<>NR_NO) and
  248. (offset=0) and (scalefactor=0) and (symbol=nil) then
  249. list.concat(Taicpu.Op_reg(A_PUSH,opsize,index))
  250. else if (segment=NR_NO) and (base<>NR_NO) and (index=NR_NO) and
  251. (offset=0) and (symbol=nil) then
  252. list.concat(Taicpu.Op_reg(A_PUSH,opsize,base))
  253. else
  254. begin
  255. tmpreg:=getaddressregister(list);
  256. a_loadaddr_ref_reg(list,dirref,tmpreg);
  257. list.concat(taicpu.op_reg(A_PUSH,opsize,tmpreg));
  258. end;
  259. end
  260. else
  261. inherited a_loadaddr_ref_cgpara(list,dirref,cgpara);
  262. end;
  263. end;
  264. procedure tcg386.g_proc_exit(list : TAsmList;parasize:longint;nostackframe:boolean);
  265. procedure increase_sp(a : tcgint);
  266. var
  267. href : treference;
  268. begin
  269. reference_reset_base(href,NR_STACK_POINTER_REG,a,0,[]);
  270. { normally, lea is a better choice than an add }
  271. list.concat(Taicpu.op_ref_reg(A_LEA,TCGSize2OpSize[OS_ADDR],href,NR_STACK_POINTER_REG));
  272. end;
  273. begin
  274. { MMX needs to call EMMS }
  275. if assigned(rg[R_MMXREGISTER]) and
  276. (rg[R_MMXREGISTER].uses_registers) then
  277. list.concat(Taicpu.op_none(A_EMMS,S_NO));
  278. { remove stackframe }
  279. if not nostackframe then
  280. begin
  281. if (current_procinfo.framepointer=NR_STACK_POINTER_REG) or
  282. (current_procinfo.procdef.proctypeoption=potype_exceptfilter) then
  283. begin
  284. if current_procinfo.final_localsize<>0 then
  285. increase_sp(current_procinfo.final_localsize);
  286. if (not paramanager.use_fixed_stack) then
  287. internal_restore_regs(list,true);
  288. if (current_procinfo.procdef.proctypeoption=potype_exceptfilter) then
  289. list.concat(Taicpu.op_reg(A_POP,tcgsize2opsize[OS_ADDR],NR_FRAME_POINTER_REG));
  290. end
  291. else
  292. begin
  293. if (not paramanager.use_fixed_stack) then
  294. internal_restore_regs(list,not (pi_has_stack_allocs in current_procinfo.flags));
  295. generate_leave(list);
  296. end;
  297. list.concat(tai_regalloc.dealloc(current_procinfo.framepointer,nil));
  298. end;
  299. { return from proc }
  300. if (po_interrupt in current_procinfo.procdef.procoptions) and
  301. { this messes up stack alignment }
  302. (target_info.stackalign=4) then
  303. begin
  304. if assigned(current_procinfo.procdef.funcretloc[calleeside].location) and
  305. (current_procinfo.procdef.funcretloc[calleeside].location^.loc=LOC_REGISTER) then
  306. begin
  307. if (getsupreg(current_procinfo.procdef.funcretloc[calleeside].location^.register)=RS_EAX) then
  308. list.concat(Taicpu.Op_const_reg(A_ADD,S_L,4,NR_ESP))
  309. else
  310. internalerror(2010053001);
  311. end
  312. else
  313. list.concat(Taicpu.Op_reg(A_POP,S_L,NR_EAX));
  314. list.concat(Taicpu.Op_reg(A_POP,S_L,NR_EBX));
  315. list.concat(Taicpu.Op_reg(A_POP,S_L,NR_ECX));
  316. if (current_procinfo.procdef.funcretloc[calleeside].size in [OS_64,OS_S64]) and
  317. assigned(current_procinfo.procdef.funcretloc[calleeside].location) and
  318. assigned(current_procinfo.procdef.funcretloc[calleeside].location^.next) and
  319. (current_procinfo.procdef.funcretloc[calleeside].location^.next^.loc=LOC_REGISTER) then
  320. begin
  321. if (getsupreg(current_procinfo.procdef.funcretloc[calleeside].location^.next^.register)=RS_EDX) then
  322. list.concat(Taicpu.Op_const_reg(A_ADD,S_L,4,NR_ESP))
  323. else
  324. internalerror(2010053002);
  325. end
  326. else
  327. list.concat(Taicpu.Op_reg(A_POP,S_L,NR_EDX));
  328. list.concat(Taicpu.Op_reg(A_POP,S_L,NR_ESI));
  329. list.concat(Taicpu.Op_reg(A_POP,S_L,NR_EDI));
  330. { .... also the segment registers }
  331. list.concat(Taicpu.Op_reg(A_POP,S_W,NR_DS));
  332. list.concat(Taicpu.Op_reg(A_POP,S_W,NR_ES));
  333. list.concat(Taicpu.Op_reg(A_POP,S_W,NR_FS));
  334. list.concat(Taicpu.Op_reg(A_POP,S_W,NR_GS));
  335. { this restores the flags }
  336. list.concat(Taicpu.Op_none(A_IRET,S_NO));
  337. end
  338. { Routines with the poclearstack flag set use only a ret }
  339. else if (current_procinfo.procdef.proccalloption in clearstack_pocalls) and
  340. (not paramanager.use_fixed_stack) then
  341. begin
  342. { complex return values are removed from stack in C code PM }
  343. { but not on win32 }
  344. { and not for safecall with hidden exceptions, because the result }
  345. { wich contains the exception is passed in EAX }
  346. if ((target_info.system <> system_i386_win32) or
  347. (target_info.abi=abi_old_win32_gnu)) and
  348. not ((current_procinfo.procdef.proccalloption = pocall_safecall) and
  349. (tf_safecall_exceptions in target_info.flags)) and
  350. paramanager.ret_in_param(current_procinfo.procdef.returndef,
  351. current_procinfo.procdef) then
  352. list.concat(Taicpu.Op_const(A_RET,S_W,sizeof(aint)))
  353. else
  354. list.concat(Taicpu.Op_none(A_RET,S_NO));
  355. end
  356. { ... also routines with parasize=0 }
  357. else if (parasize=0) then
  358. list.concat(Taicpu.Op_none(A_RET,S_NO))
  359. else
  360. begin
  361. { parameters are limited to 65535 bytes because ret allows only imm16 }
  362. if (parasize>65535) then
  363. CGMessage(cg_e_parasize_too_big);
  364. list.concat(Taicpu.Op_const(A_RET,S_W,parasize));
  365. end;
  366. end;
  367. procedure tcg386.g_copyvaluepara_openarray(list : TAsmList;const ref:treference;const lenloc:tlocation;elesize:tcgint;destreg:tregister);
  368. var
  369. power : longint;
  370. opsize : topsize;
  371. {$ifndef __NOWINPECOFF__}
  372. again,ok : tasmlabel;
  373. {$endif}
  374. begin
  375. { get stack space }
  376. getcpuregister(list,NR_EDI);
  377. a_load_loc_reg(list,OS_INT,lenloc,NR_EDI);
  378. list.concat(Taicpu.op_reg(A_INC,S_L,NR_EDI));
  379. { Now EDI contains (high+1). }
  380. { special case handling for elesize=8, 4 and 2:
  381. set ECX = (high+1) instead of ECX = (high+1)*elesize.
  382. In the case of elesize=4 and 2, this allows us to avoid the SHR later.
  383. In the case of elesize=8, we can later use a SHL ECX, 1 instead of
  384. SHR ECX, 2 which is one byte shorter. }
  385. if (elesize=8) or (elesize=4) or (elesize=2) then
  386. begin
  387. { Now EDI contains (high+1). Copy it to ECX for later use. }
  388. getcpuregister(list,NR_ECX);
  389. list.concat(Taicpu.op_reg_reg(A_MOV,S_L,NR_EDI,NR_ECX));
  390. end;
  391. { EDI := EDI * elesize }
  392. if (elesize<>1) then
  393. begin
  394. if ispowerof2(elesize, power) then
  395. list.concat(Taicpu.op_const_reg(A_SHL,S_L,power,NR_EDI))
  396. else
  397. list.concat(Taicpu.op_const_reg(A_IMUL,S_L,elesize,NR_EDI));
  398. end;
  399. if (elesize<>8) and (elesize<>4) and (elesize<>2) then
  400. begin
  401. { Now EDI contains (high+1)*elesize. Copy it to ECX for later use. }
  402. getcpuregister(list,NR_ECX);
  403. list.concat(Taicpu.op_reg_reg(A_MOV,S_L,NR_EDI,NR_ECX));
  404. end;
  405. {$ifndef __NOWINPECOFF__}
  406. { windows guards only a few pages for stack growing, }
  407. { so we have to access every page first }
  408. if target_info.system=system_i386_win32 then
  409. begin
  410. current_asmdata.getjumplabel(again);
  411. current_asmdata.getjumplabel(ok);
  412. a_label(list,again);
  413. list.concat(Taicpu.op_const_reg(A_CMP,S_L,winstackpagesize,NR_EDI));
  414. a_jmp_cond(list,OC_B,ok);
  415. list.concat(Taicpu.op_const_reg(A_SUB,S_L,winstackpagesize-4,NR_ESP));
  416. list.concat(Taicpu.op_reg(A_PUSH,S_L,NR_EDI));
  417. list.concat(Taicpu.op_const_reg(A_SUB,S_L,winstackpagesize,NR_EDI));
  418. a_jmp_always(list,again);
  419. a_label(list,ok);
  420. end;
  421. {$endif __NOWINPECOFF__}
  422. { If we were probing pages, EDI=(size mod pagesize) and ESP is decremented
  423. by (size div pagesize)*pagesize, otherwise EDI=size.
  424. Either way, subtracting EDI from ESP will set ESP to desired final value. }
  425. list.concat(Taicpu.op_reg_reg(A_SUB,S_L,NR_EDI,NR_ESP));
  426. { align stack on 4 bytes }
  427. list.concat(Taicpu.op_const_reg(A_AND,S_L,aint($fffffff4),NR_ESP));
  428. { load destination, don't use a_load_reg_reg, that will add a move instruction
  429. that can confuse the reg allocator }
  430. list.concat(Taicpu.Op_reg_reg(A_MOV,S_L,NR_ESP,NR_EDI));
  431. { Allocate ESI and load it with source }
  432. getcpuregister(list,NR_ESI);
  433. a_loadaddr_ref_reg(list,ref,NR_ESI);
  434. { calculate size }
  435. opsize:=S_B;
  436. if elesize=8 then
  437. begin
  438. opsize:=S_L;
  439. { ECX is number of qwords, convert to dwords }
  440. list.concat(Taicpu.op_const_reg(A_SHL,S_L,1,NR_ECX))
  441. end
  442. else if elesize=4 then
  443. begin
  444. opsize:=S_L;
  445. { ECX is already number of dwords, so no need to SHL/SHR }
  446. end
  447. else if elesize=2 then
  448. begin
  449. opsize:=S_W;
  450. { ECX is already number of words, so no need to SHL/SHR }
  451. end
  452. else
  453. if (elesize and 3)=0 then
  454. begin
  455. opsize:=S_L;
  456. { ECX is number of bytes, convert to dwords }
  457. list.concat(Taicpu.op_const_reg(A_SHR,S_L,2,NR_ECX))
  458. end
  459. else
  460. if (elesize and 1)=0 then
  461. begin
  462. opsize:=S_W;
  463. { ECX is number of bytes, convert to words }
  464. list.concat(Taicpu.op_const_reg(A_SHR,S_L,1,NR_ECX))
  465. end;
  466. if ts_cld in current_settings.targetswitches then
  467. list.concat(Taicpu.op_none(A_CLD,S_NO));
  468. list.concat(Taicpu.op_none(A_REP,S_NO));
  469. case opsize of
  470. S_B : list.concat(Taicpu.Op_none(A_MOVSB,S_NO));
  471. S_W : list.concat(Taicpu.Op_none(A_MOVSW,S_NO));
  472. S_L : list.concat(Taicpu.Op_none(A_MOVSD,S_NO));
  473. end;
  474. ungetcpuregister(list,NR_EDI);
  475. ungetcpuregister(list,NR_ECX);
  476. ungetcpuregister(list,NR_ESI);
  477. { patch the new address, but don't use a_load_reg_reg, that will add a move instruction
  478. that can confuse the reg allocator }
  479. list.concat(Taicpu.Op_reg_reg(A_MOV,S_L,NR_ESP,destreg));
  480. include(current_procinfo.flags,pi_has_stack_allocs);
  481. end;
  482. procedure tcg386.g_releasevaluepara_openarray(list : TAsmList;const l:tlocation);
  483. begin
  484. { Nothing to release }
  485. end;
  486. procedure tcg386.g_maybe_got_init(list: TAsmList);
  487. var
  488. i: longint;
  489. tmpreg: TRegister;
  490. begin
  491. { allocate PIC register }
  492. if (cs_create_pic in current_settings.moduleswitches) and
  493. (tf_pic_uses_got in target_info.flags) and
  494. (pi_needs_got in current_procinfo.flags) then
  495. begin
  496. if not (target_info.system in [system_i386_darwin,system_i386_iphonesim]) then
  497. begin
  498. { Use ECX as a temp register by default }
  499. tmpreg:=NR_ECX;
  500. { Allocate registers used for parameters to make sure they
  501. never allocated during this PIC init code }
  502. for i:=0 to current_procinfo.procdef.paras.Count - 1 do
  503. with tparavarsym(current_procinfo.procdef.paras[i]).paraloc[calleeside].Location^ do
  504. if Loc in [LOC_REGISTER, LOC_CREGISTER] then begin
  505. a_reg_alloc(list, register);
  506. { If ECX is used for a parameter, use EBX as temp }
  507. if getsupreg(register) = RS_ECX then
  508. tmpreg:=NR_EBX;
  509. end;
  510. if tmpreg = NR_EBX then
  511. begin
  512. { Mark EBX as used in the proc }
  513. include(rg[R_INTREGISTER].used_in_proc,RS_EBX);
  514. current_module.requires_ebx_pic_helper:=true;
  515. a_call_name_static(list,'fpc_geteipasebx');
  516. end
  517. else
  518. begin
  519. current_module.requires_ecx_pic_helper:=true;
  520. a_call_name_static(list,'fpc_geteipasecx');
  521. end;
  522. list.concat(taicpu.op_sym_ofs_reg(A_ADD,S_L,current_asmdata.RefAsmSymbol('_GLOBAL_OFFSET_TABLE_',AT_DATA),0,tmpreg));
  523. list.concat(taicpu.op_reg_reg(A_MOV,S_L,tmpreg,current_procinfo.got));
  524. { Deallocate parameter registers }
  525. for i:=0 to current_procinfo.procdef.paras.Count - 1 do
  526. with tparavarsym(current_procinfo.procdef.paras[i]).paraloc[calleeside].Location^ do
  527. if Loc in [LOC_REGISTER, LOC_CREGISTER] then
  528. a_reg_dealloc(list, register);
  529. end
  530. else
  531. begin
  532. { call/pop is faster than call/ret/mov on Core Solo and later
  533. according to Apple's benchmarking -- and all Intel Macs
  534. have at least a Core Solo (furthermore, the i386 - Pentium 1
  535. don't have a return stack buffer) }
  536. a_call_name_static(list,current_procinfo.CurrGOTLabel.name);
  537. a_label(list,current_procinfo.CurrGotLabel);
  538. list.concat(taicpu.op_reg(A_POP,S_L,current_procinfo.got))
  539. end;
  540. end;
  541. end;
  542. { ************* 64bit operations ************ }
  543. procedure tcg64f386.get_64bit_ops(op:TOpCG;var op1,op2:TAsmOp);
  544. begin
  545. case op of
  546. OP_ADD :
  547. begin
  548. op1:=A_ADD;
  549. op2:=A_ADC;
  550. end;
  551. OP_SUB :
  552. begin
  553. op1:=A_SUB;
  554. op2:=A_SBB;
  555. end;
  556. OP_XOR :
  557. begin
  558. op1:=A_XOR;
  559. op2:=A_XOR;
  560. end;
  561. OP_OR :
  562. begin
  563. op1:=A_OR;
  564. op2:=A_OR;
  565. end;
  566. OP_AND :
  567. begin
  568. op1:=A_AND;
  569. op2:=A_AND;
  570. end;
  571. else
  572. internalerror(200203241);
  573. end;
  574. end;
  575. procedure tcg64f386.a_op64_ref_reg(list : TAsmList;op:TOpCG;size : tcgsize;const ref : treference;reg : tregister64);
  576. var
  577. op1,op2 : TAsmOp;
  578. tempref : treference;
  579. begin
  580. if not(op in [OP_NEG,OP_NOT]) then
  581. begin
  582. get_64bit_ops(op,op1,op2);
  583. tempref:=ref;
  584. tcgx86(cg).make_simple_ref(list,tempref);
  585. if op in [OP_ADD,OP_SUB] then
  586. cg.a_reg_alloc(list,NR_DEFAULTFLAGS);
  587. list.concat(taicpu.op_ref_reg(op1,S_L,tempref,reg.reglo));
  588. inc(tempref.offset,4);
  589. list.concat(taicpu.op_ref_reg(op2,S_L,tempref,reg.reghi));
  590. if op in [OP_ADD,OP_SUB] then
  591. cg.a_reg_dealloc(list,NR_DEFAULTFLAGS);
  592. end
  593. else
  594. begin
  595. a_load64_ref_reg(list,ref,reg);
  596. a_op64_reg_reg(list,op,size,reg,reg);
  597. end;
  598. end;
  599. procedure tcg64f386.a_op64_reg_ref(list : TAsmList;op:TOpCG;size : tcgsize;reg : tregister64; const ref: treference);
  600. var
  601. op1,op2 : TAsmOp;
  602. tempref : treference;
  603. tmpreg: TRegister;
  604. l1, l2: TAsmLabel;
  605. begin
  606. case op of
  607. OP_NOT:
  608. begin
  609. tempref:=ref;
  610. tcgx86(cg).make_simple_ref(list,tempref);
  611. list.concat(taicpu.op_ref(A_NOT,S_L,tempref));
  612. inc(tempref.offset,4);
  613. list.concat(taicpu.op_ref(A_NOT,S_L,tempref));
  614. end;
  615. OP_NEG:
  616. begin
  617. tempref:=ref;
  618. tcgx86(cg).make_simple_ref(list,tempref);
  619. inc(tempref.offset,4);
  620. list.concat(taicpu.op_ref(A_NOT,S_L,tempref));
  621. cg.a_reg_alloc(list,NR_DEFAULTFLAGS);
  622. dec(tempref.offset,4);
  623. list.concat(taicpu.op_ref(A_NEG,S_L,tempref));
  624. inc(tempref.offset,4);
  625. list.concat(taicpu.op_const_ref(A_SBB,S_L,-1,tempref));
  626. cg.a_reg_dealloc(list,NR_DEFAULTFLAGS);
  627. end;
  628. OP_SHR,OP_SHL,OP_SAR:
  629. begin
  630. { load right operators in a register }
  631. cg.getcpuregister(list,NR_ECX);
  632. cg.a_load_reg_reg(list,OS_32,OS_32,reg.reglo,NR_ECX);
  633. tempref:=ref;
  634. tcgx86(cg).make_simple_ref(list,tempref);
  635. { the damned shift instructions work only til a count of 32 }
  636. { so we've to do some tricks here }
  637. current_asmdata.getjumplabel(l1);
  638. current_asmdata.getjumplabel(l2);
  639. list.Concat(taicpu.op_const_reg(A_TEST,S_L,32,NR_ECX));
  640. cg.a_jmp_flags(list,F_E,l1);
  641. tmpreg:=cg.getintregister(list,OS_32);
  642. case op of
  643. OP_SHL:
  644. begin
  645. cg.a_load_ref_reg(list,OS_32,OS_32,tempref,tmpreg);
  646. list.Concat(taicpu.op_reg_reg(A_SHL,S_L,NR_CL,tmpreg));
  647. inc(tempref.offset,4);
  648. cg.a_load_reg_ref(list,OS_32,OS_32,tmpreg,tempref);
  649. dec(tempref.offset,4);
  650. cg.a_load_const_ref(list,OS_32,0,tempref);
  651. cg.a_jmp_always(list,l2);
  652. cg.a_label(list,l1);
  653. cg.a_load_ref_reg(list,OS_32,OS_32,tempref,tmpreg);
  654. inc(tempref.offset,4);
  655. list.Concat(taicpu.op_reg_reg_ref(A_SHLD,S_L,NR_CL,tmpreg,tempref));
  656. dec(tempref.offset,4);
  657. if cs_opt_size in current_settings.optimizerswitches then
  658. list.concat(taicpu.op_reg_ref(A_SHL,S_L,NR_CL,tempref))
  659. else
  660. begin
  661. list.concat(taicpu.op_reg_reg(A_SHL,S_L,NR_CL,tmpreg));
  662. cg.a_load_reg_ref(list,OS_32,OS_32,tmpreg,tempref);
  663. end;
  664. end;
  665. OP_SHR:
  666. begin
  667. inc(tempref.offset,4);
  668. cg.a_load_ref_reg(list,OS_32,OS_32,tempref,tmpreg);
  669. list.Concat(taicpu.op_reg_reg(A_SHR,S_L,NR_CL,tmpreg));
  670. dec(tempref.offset,4);
  671. cg.a_load_reg_ref(list,OS_32,OS_32,tmpreg,tempref);
  672. inc(tempref.offset,4);
  673. cg.a_load_const_ref(list,OS_32,0,tempref);
  674. cg.a_jmp_always(list,l2);
  675. cg.a_label(list,l1);
  676. cg.a_load_ref_reg(list,OS_32,OS_32,tempref,tmpreg);
  677. dec(tempref.offset,4);
  678. list.Concat(taicpu.op_reg_reg_ref(A_SHRD,S_L,NR_CL,tmpreg,tempref));
  679. inc(tempref.offset,4);
  680. if cs_opt_size in current_settings.optimizerswitches then
  681. list.concat(taicpu.op_reg_ref(A_SHR,S_L,NR_CL,tempref))
  682. else
  683. begin
  684. list.concat(taicpu.op_reg_reg(A_SHR,S_L,NR_CL,tmpreg));
  685. cg.a_load_reg_ref(list,OS_32,OS_32,tmpreg,tempref);
  686. end;
  687. end;
  688. OP_SAR:
  689. begin
  690. inc(tempref.offset,4);
  691. cg.a_load_ref_reg(list,OS_32,OS_32,tempref,tmpreg);
  692. list.Concat(taicpu.op_reg_reg(A_SAR,S_L,NR_CL,tmpreg));
  693. dec(tempref.offset,4);
  694. cg.a_load_reg_ref(list,OS_32,OS_32,tmpreg,tempref);
  695. inc(tempref.offset,4);
  696. list.Concat(taicpu.op_const_reg(A_SAR,S_L,31,tmpreg));
  697. cg.a_load_reg_ref(list,OS_32,OS_32,tmpreg,tempref);
  698. cg.a_jmp_always(list,l2);
  699. cg.a_label(list,l1);
  700. cg.a_load_ref_reg(list,OS_32,OS_32,tempref,tmpreg);
  701. dec(tempref.offset,4);
  702. list.Concat(taicpu.op_reg_reg_ref(A_SHRD,S_L,NR_CL,tmpreg,tempref));
  703. inc(tempref.offset,4);
  704. if cs_opt_size in current_settings.optimizerswitches then
  705. list.concat(taicpu.op_reg_ref(A_SAR,S_L,NR_CL,tempref))
  706. else
  707. begin
  708. list.concat(taicpu.op_reg_reg(A_SAR,S_L,NR_CL,tmpreg));
  709. cg.a_load_reg_ref(list,OS_32,OS_32,tmpreg,tempref);
  710. end;
  711. end;
  712. else
  713. internalerror(2017041801);
  714. end;
  715. cg.a_label(list,l2);
  716. cg.ungetcpuregister(list,NR_ECX);
  717. exit;
  718. end;
  719. else
  720. begin
  721. get_64bit_ops(op,op1,op2);
  722. tempref:=ref;
  723. tcgx86(cg).make_simple_ref(list,tempref);
  724. if op in [OP_ADD,OP_SUB] then
  725. cg.a_reg_alloc(list,NR_DEFAULTFLAGS);
  726. list.concat(taicpu.op_reg_ref(op1,S_L,reg.reglo,tempref));
  727. inc(tempref.offset,4);
  728. list.concat(taicpu.op_reg_ref(op2,S_L,reg.reghi,tempref));
  729. if op in [OP_ADD,OP_SUB] then
  730. cg.a_reg_dealloc(list,NR_DEFAULTFLAGS);
  731. end;
  732. end;
  733. end;
  734. procedure tcg64f386.a_op64_reg_reg(list : TAsmList;op:TOpCG;size : tcgsize;regsrc,regdst : tregister64);
  735. var
  736. op1,op2 : TAsmOp;
  737. l1, l2: TAsmLabel;
  738. begin
  739. case op of
  740. OP_NEG :
  741. begin
  742. if (regsrc.reglo<>regdst.reglo) then
  743. a_load64_reg_reg(list,regsrc,regdst);
  744. list.concat(taicpu.op_reg(A_NOT,S_L,regdst.reghi));
  745. cg.a_reg_alloc(list,NR_DEFAULTFLAGS);
  746. list.concat(taicpu.op_reg(A_NEG,S_L,regdst.reglo));
  747. list.concat(taicpu.op_const_reg(A_SBB,S_L,-1,regdst.reghi));
  748. cg.a_reg_dealloc(list,NR_DEFAULTFLAGS);
  749. exit;
  750. end;
  751. OP_NOT :
  752. begin
  753. if (regsrc.reglo<>regdst.reglo) then
  754. a_load64_reg_reg(list,regsrc,regdst);
  755. list.concat(taicpu.op_reg(A_NOT,S_L,regdst.reghi));
  756. list.concat(taicpu.op_reg(A_NOT,S_L,regdst.reglo));
  757. exit;
  758. end;
  759. OP_SHR,OP_SHL,OP_SAR:
  760. begin
  761. { load right operators in a register }
  762. cg.getcpuregister(list,NR_ECX);
  763. cg.a_load_reg_reg(list,OS_32,OS_32,regsrc.reglo,NR_ECX);
  764. { the damned shift instructions work only til a count of 32 }
  765. { so we've to do some tricks here }
  766. current_asmdata.getjumplabel(l1);
  767. current_asmdata.getjumplabel(l2);
  768. list.Concat(taicpu.op_const_reg(A_TEST,S_L,32,NR_ECX));
  769. cg.a_jmp_flags(list,F_E,l1);
  770. case op of
  771. OP_SHL:
  772. begin
  773. list.Concat(taicpu.op_reg_reg(A_SHL,S_L,NR_CL,regdst.reglo));
  774. cg.a_load_reg_reg(list,OS_32,OS_32,regdst.reglo,regdst.reghi);
  775. list.Concat(taicpu.op_reg_reg(A_XOR,S_L,regdst.reglo,regdst.reglo));
  776. cg.a_jmp_always(list,l2);
  777. cg.a_label(list,l1);
  778. list.Concat(taicpu.op_reg_reg_reg(A_SHLD,S_L,NR_CL,regdst.reglo,regdst.reghi));
  779. list.Concat(taicpu.op_reg_reg(A_SHL,S_L,NR_CL,regdst.reglo));
  780. end;
  781. OP_SHR:
  782. begin
  783. list.Concat(taicpu.op_reg_reg(A_SHR,S_L,NR_CL,regdst.reghi));
  784. cg.a_load_reg_reg(list,OS_32,OS_32,regdst.reghi,regdst.reglo);
  785. list.Concat(taicpu.op_reg_reg(A_XOR,S_L,regdst.reghi,regdst.reghi));
  786. cg.a_jmp_always(list,l2);
  787. cg.a_label(list,l1);
  788. list.Concat(taicpu.op_reg_reg_reg(A_SHRD,S_L,NR_CL,regdst.reghi,regdst.reglo));
  789. list.Concat(taicpu.op_reg_reg(A_SHR,S_L,NR_CL,regdst.reghi));
  790. end;
  791. OP_SAR:
  792. begin
  793. cg.a_load_reg_reg(list,OS_32,OS_32,regdst.reghi,regdst.reglo);
  794. list.Concat(taicpu.op_reg_reg(A_SAR,S_L,NR_CL,regdst.reglo));
  795. list.Concat(taicpu.op_const_reg(A_SAR,S_L,31,regdst.reghi));
  796. cg.a_jmp_always(list,l2);
  797. cg.a_label(list,l1);
  798. list.Concat(taicpu.op_reg_reg_reg(A_SHRD,S_L,NR_CL,regdst.reghi,regdst.reglo));
  799. list.Concat(taicpu.op_reg_reg(A_SAR,S_L,NR_CL,regdst.reghi));
  800. end;
  801. else
  802. internalerror(2017041801);
  803. end;
  804. cg.a_label(list,l2);
  805. cg.ungetcpuregister(list,NR_ECX);
  806. exit;
  807. end;
  808. end;
  809. get_64bit_ops(op,op1,op2);
  810. if op in [OP_ADD,OP_SUB] then
  811. cg.a_reg_alloc(list,NR_DEFAULTFLAGS);
  812. list.concat(taicpu.op_reg_reg(op1,S_L,regsrc.reglo,regdst.reglo));
  813. list.concat(taicpu.op_reg_reg(op2,S_L,regsrc.reghi,regdst.reghi));
  814. if op in [OP_ADD,OP_SUB] then
  815. cg.a_reg_dealloc(list,NR_DEFAULTFLAGS);
  816. end;
  817. procedure tcg64f386.a_op64_const_reg(list : TAsmList;op:TOpCG;size : tcgsize;value : int64;reg : tregister64);
  818. var
  819. op1,op2 : TAsmOp;
  820. begin
  821. case op of
  822. OP_AND,OP_OR,OP_XOR:
  823. begin
  824. cg.a_op_const_reg(list,op,OS_32,tcgint(lo(value)),reg.reglo);
  825. cg.a_op_const_reg(list,op,OS_32,tcgint(hi(value)),reg.reghi);
  826. end;
  827. OP_ADD, OP_SUB:
  828. begin
  829. // can't use a_op_const_ref because this may use dec/inc
  830. get_64bit_ops(op,op1,op2);
  831. cg.a_reg_alloc(list,NR_DEFAULTFLAGS);
  832. list.concat(taicpu.op_const_reg(op1,S_L,aint(lo(value)),reg.reglo));
  833. list.concat(taicpu.op_const_reg(op2,S_L,aint(hi(value)),reg.reghi));
  834. cg.a_reg_dealloc(list,NR_DEFAULTFLAGS);
  835. end;
  836. OP_SHR,OP_SHL,OP_SAR:
  837. begin
  838. value:=value and 63;
  839. if value<>0 then
  840. begin
  841. if (value=1) and (op=OP_SHL) and
  842. (current_settings.optimizecputype<=cpu_486) and
  843. not (cs_opt_size in current_settings.optimizerswitches) then
  844. begin
  845. cg.a_reg_alloc(list,NR_DEFAULTFLAGS);
  846. list.concat(taicpu.op_reg_reg(A_ADD,S_L,reg.reglo,reg.reglo));
  847. list.concat(taicpu.op_reg_reg(A_ADC,S_L,reg.reghi,reg.reghi));
  848. cg.a_reg_dealloc(list,NR_DEFAULTFLAGS);
  849. end
  850. else if (value=1) and (cs_opt_size in current_settings.optimizerswitches) then
  851. case op of
  852. OP_SHR:
  853. begin
  854. cg.a_reg_alloc(list,NR_DEFAULTFLAGS);
  855. list.concat(taicpu.op_const_reg(A_SHR,S_L,value,reg.reghi));
  856. list.concat(taicpu.op_const_reg(A_RCR,S_L,value,reg.reglo));
  857. cg.a_reg_dealloc(list,NR_DEFAULTFLAGS);
  858. end;
  859. OP_SHL:
  860. begin
  861. cg.a_reg_alloc(list,NR_DEFAULTFLAGS);
  862. list.concat(taicpu.op_const_reg(A_SHL,S_L,value,reg.reglo));
  863. list.concat(taicpu.op_const_reg(A_RCL,S_L,value,reg.reghi));
  864. cg.a_reg_dealloc(list,NR_DEFAULTFLAGS);
  865. end;
  866. OP_SAR:
  867. begin
  868. cg.a_reg_alloc(list,NR_DEFAULTFLAGS);
  869. list.concat(taicpu.op_const_reg(A_SAR,S_L,value,reg.reghi));
  870. list.concat(taicpu.op_const_reg(A_RCR,S_L,value,reg.reglo));
  871. cg.a_reg_dealloc(list,NR_DEFAULTFLAGS);
  872. end;
  873. end
  874. else if value>31 then
  875. case op of
  876. OP_SAR:
  877. begin
  878. cg.a_load_reg_reg(list,OS_32,OS_32,reg.reghi,reg.reglo);
  879. list.concat(taicpu.op_const_reg(A_SAR,S_L,31,reg.reghi));
  880. if (value and 31)<>0 then
  881. list.concat(taicpu.op_const_reg(A_SAR,S_L,value and 31,reg.reglo));
  882. end;
  883. OP_SHR:
  884. begin
  885. cg.a_load_reg_reg(list,OS_32,OS_32,reg.reghi,reg.reglo);
  886. list.concat(taicpu.op_reg_reg(A_XOR,S_L,reg.reghi,reg.reghi));
  887. if (value and 31)<>0 then
  888. list.concat(taicpu.op_const_reg(A_SHR,S_L,value and 31,reg.reglo));
  889. end;
  890. OP_SHL:
  891. begin
  892. cg.a_load_reg_reg(list,OS_32,OS_32,reg.reglo,reg.reghi);
  893. list.concat(taicpu.op_reg_reg(A_XOR,S_L,reg.reglo,reg.reglo));
  894. if (value and 31)<>0 then
  895. list.concat(taicpu.op_const_reg(A_SHL,S_L,value and 31,reg.reghi));
  896. end;
  897. else
  898. internalerror(2017041201);
  899. end
  900. else
  901. case op of
  902. OP_SAR:
  903. begin
  904. list.concat(taicpu.op_const_reg_reg(A_SHRD,S_L,value,reg.reghi,reg.reglo));
  905. list.concat(taicpu.op_const_reg(A_SAR,S_L,value,reg.reghi));
  906. end;
  907. OP_SHR:
  908. begin
  909. list.concat(taicpu.op_const_reg_reg(A_SHRD,S_L,value,reg.reghi,reg.reglo));
  910. list.concat(taicpu.op_const_reg(A_SHR,S_L,value,reg.reghi));
  911. end;
  912. OP_SHL:
  913. begin
  914. list.concat(taicpu.op_const_reg_reg(A_SHLD,S_L,value,reg.reglo,reg.reghi));
  915. list.concat(taicpu.op_const_reg(A_SHL,S_L,value,reg.reglo));
  916. end;
  917. else
  918. internalerror(2017041201);
  919. end;
  920. end;
  921. end;
  922. else
  923. internalerror(200204021);
  924. end;
  925. end;
  926. procedure tcg64f386.a_op64_const_ref(list : TAsmList;op:TOpCG;size : tcgsize;value : int64;const ref : treference);
  927. var
  928. op1,op2 : TAsmOp;
  929. tempref : treference;
  930. tmpreg: TRegister;
  931. begin
  932. tempref:=ref;
  933. tcgx86(cg).make_simple_ref(list,tempref);
  934. case op of
  935. OP_AND,OP_OR,OP_XOR:
  936. begin
  937. cg.a_op_const_ref(list,op,OS_32,tcgint(lo(value)),tempref);
  938. inc(tempref.offset,4);
  939. cg.a_op_const_ref(list,op,OS_32,tcgint(hi(value)),tempref);
  940. end;
  941. OP_ADD, OP_SUB:
  942. begin
  943. get_64bit_ops(op,op1,op2);
  944. // can't use a_op_const_ref because this may use dec/inc
  945. cg.a_reg_alloc(list,NR_DEFAULTFLAGS);
  946. list.concat(taicpu.op_const_ref(op1,S_L,aint(lo(value)),tempref));
  947. inc(tempref.offset,4);
  948. list.concat(taicpu.op_const_ref(op2,S_L,aint(hi(value)),tempref));
  949. cg.a_reg_dealloc(list,NR_DEFAULTFLAGS);
  950. end;
  951. OP_SHR,OP_SHL,OP_SAR:
  952. begin
  953. value:=value and 63;
  954. if value<>0 then
  955. begin
  956. if value=1 then
  957. case op of
  958. OP_SHR:
  959. begin
  960. cg.a_reg_alloc(list,NR_DEFAULTFLAGS);
  961. inc(tempref.offset,4);
  962. list.concat(taicpu.op_const_ref(A_SHR,S_L,value,tempref));
  963. dec(tempref.offset,4);
  964. list.concat(taicpu.op_const_ref(A_RCR,S_L,value,tempref));
  965. cg.a_reg_dealloc(list,NR_DEFAULTFLAGS);
  966. end;
  967. OP_SHL:
  968. begin
  969. cg.a_reg_alloc(list,NR_DEFAULTFLAGS);
  970. list.concat(taicpu.op_const_ref(A_SHL,S_L,value,tempref));
  971. inc(tempref.offset,4);
  972. list.concat(taicpu.op_const_ref(A_RCL,S_L,value,tempref));
  973. cg.a_reg_dealloc(list,NR_DEFAULTFLAGS);
  974. end;
  975. OP_SAR:
  976. begin
  977. cg.a_reg_alloc(list,NR_DEFAULTFLAGS);
  978. inc(tempref.offset,4);
  979. list.concat(taicpu.op_const_ref(A_SAR,S_L,value,tempref));
  980. dec(tempref.offset,4);
  981. list.concat(taicpu.op_const_ref(A_RCR,S_L,value,tempref));
  982. cg.a_reg_dealloc(list,NR_DEFAULTFLAGS);
  983. end;
  984. end
  985. else if value>31 then
  986. case op of
  987. OP_SHR,OP_SAR:
  988. begin
  989. tmpreg:=cg.getintregister(list,OS_32);
  990. inc(tempref.offset,4);
  991. cg.a_load_ref_reg(list,OS_32,OS_32,tempref,tmpreg);
  992. if (value and 31)<>0 then
  993. if op=OP_SHR then
  994. list.concat(taicpu.op_const_reg(A_SHR,S_L,value and 31,tmpreg))
  995. else
  996. list.concat(taicpu.op_const_reg(A_SAR,S_L,value and 31,tmpreg));
  997. dec(tempref.offset,4);
  998. cg.a_load_reg_ref(list,OS_32,OS_32,tmpreg,tempref);
  999. inc(tempref.offset,4);
  1000. if op=OP_SHR then
  1001. cg.a_load_const_ref(list,OS_32,0,tempref)
  1002. else
  1003. begin
  1004. list.concat(taicpu.op_const_reg(A_SAR,S_L,31,tmpreg));
  1005. cg.a_load_reg_ref(list,OS_32,OS_32,tmpreg,tempref);
  1006. end;
  1007. end;
  1008. OP_SHL:
  1009. begin
  1010. tmpreg:=cg.getintregister(list,OS_32);
  1011. cg.a_load_ref_reg(list,OS_32,OS_32,tempref,tmpreg);
  1012. if (value and 31)<>0 then
  1013. list.concat(taicpu.op_const_reg(A_SHL,S_L,value and 31,tmpreg));
  1014. inc(tempref.offset,4);
  1015. cg.a_load_reg_ref(list,OS_32,OS_32,tmpreg,tempref);
  1016. dec(tempref.offset,4);
  1017. cg.a_load_const_ref(list,OS_32,0,tempref);
  1018. end;
  1019. else
  1020. internalerror(2017041801);
  1021. end
  1022. else
  1023. case op of
  1024. OP_SHR,OP_SAR:
  1025. begin
  1026. tmpreg:=cg.getintregister(list,OS_32);
  1027. inc(tempref.offset,4);
  1028. cg.a_load_ref_reg(list,OS_32,OS_32,tempref,tmpreg);
  1029. dec(tempref.offset,4);
  1030. list.concat(taicpu.op_const_reg_ref(A_SHRD,S_L,value,tmpreg,tempref));
  1031. inc(tempref.offset,4);
  1032. if cs_opt_size in current_settings.optimizerswitches then
  1033. begin
  1034. if op=OP_SHR then
  1035. list.concat(taicpu.op_const_ref(A_SHR,S_L,value,tempref))
  1036. else
  1037. list.concat(taicpu.op_const_ref(A_SAR,S_L,value,tempref));
  1038. end
  1039. else
  1040. begin
  1041. if op=OP_SHR then
  1042. list.concat(taicpu.op_const_reg(A_SHR,S_L,value,tmpreg))
  1043. else
  1044. list.concat(taicpu.op_const_reg(A_SAR,S_L,value,tmpreg));
  1045. cg.a_load_reg_ref(list,OS_32,OS_32,tmpreg,tempref);
  1046. end;
  1047. end;
  1048. OP_SHL:
  1049. begin
  1050. tmpreg:=cg.getintregister(list,OS_32);
  1051. cg.a_load_ref_reg(list,OS_32,OS_32,tempref,tmpreg);
  1052. inc(tempref.offset,4);
  1053. list.concat(taicpu.op_const_reg_ref(A_SHLD,S_L,value,tmpreg,tempref));
  1054. dec(tempref.offset,4);
  1055. if cs_opt_size in current_settings.optimizerswitches then
  1056. list.concat(taicpu.op_const_ref(A_SHL,S_L,value,tempref))
  1057. else
  1058. begin
  1059. list.concat(taicpu.op_const_reg(A_SHL,S_L,value,tmpreg));
  1060. cg.a_load_reg_ref(list,OS_32,OS_32,tmpreg,tempref);
  1061. end;
  1062. end;
  1063. else
  1064. internalerror(2017041201);
  1065. end;
  1066. end;
  1067. end;
  1068. else
  1069. internalerror(200204022);
  1070. end;
  1071. end;
  1072. procedure create_codegen;
  1073. begin
  1074. cg := tcg386.create;
  1075. cg64 := tcg64f386.create;
  1076. end;
  1077. end.