cgcpu.pas 51 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205
  1. {
  2. Copyright (c) 1998-2002 by Florian Klaempfl
  3. This unit implements the code generator for the i386
  4. This program is free software; you can redistribute it and/or modify
  5. it under the terms of the GNU General Public License as published by
  6. the Free Software Foundation; either version 2 of the License, or
  7. (at your option) any later version.
  8. This program is distributed in the hope that it will be useful,
  9. but WITHOUT ANY WARRANTY; without even the implied warranty of
  10. MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  11. GNU General Public License for more details.
  12. You should have received a copy of the GNU General Public License
  13. along with this program; if not, write to the Free Software
  14. Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
  15. ****************************************************************************
  16. }
  17. unit cgcpu;
  18. {$i fpcdefs.inc}
  19. interface
  20. uses
  21. globtype,
  22. cgbase,cgobj,cg64f32,cgx86,
  23. aasmbase,aasmtai,aasmdata,aasmcpu,
  24. cpubase,parabase,cgutils,
  25. symconst,symdef,symsym
  26. ;
  27. type
  28. tcg386 = class(tcgx86)
  29. procedure init_register_allocators;override;
  30. { passing parameter using push instead of mov }
  31. procedure a_load_reg_cgpara(list : TAsmList;size : tcgsize;r : tregister;const cgpara : tcgpara);override;
  32. procedure a_load_const_cgpara(list : TAsmList;size : tcgsize;a : tcgint;const cgpara : tcgpara);override;
  33. procedure a_load_ref_cgpara(list : TAsmList;size : tcgsize;const r : treference;const cgpara : tcgpara);override;
  34. procedure a_loadaddr_ref_cgpara(list : TAsmList;const r : treference;const cgpara : tcgpara);override;
  35. procedure g_proc_exit(list : TAsmList;parasize:longint;nostackframe:boolean);override;
  36. procedure g_copyvaluepara_openarray(list : TAsmList;const ref:treference;const lenloc:tlocation;elesize:tcgint;destreg:tregister);
  37. procedure g_releasevaluepara_openarray(list : TAsmList;const l:tlocation);
  38. procedure g_maybe_got_init(list: TAsmList); override;
  39. end;
  40. tcg64f386 = class(tcg64f32)
  41. procedure a_op64_ref_reg(list : TAsmList;op:TOpCG;size : tcgsize;const ref : treference;reg : tregister64);override;
  42. procedure a_op64_reg_ref(list : TAsmList;op:TOpCG;size : tcgsize;reg : tregister64; const ref: treference);override;
  43. procedure a_op64_reg_reg(list : TAsmList;op:TOpCG;size : tcgsize;regsrc,regdst : tregister64);override;
  44. procedure a_op64_const_reg(list : TAsmList;op:TOpCG;size : tcgsize;value : int64;reg : tregister64);override;
  45. procedure a_op64_const_ref(list : TAsmList;op:TOpCG;size : tcgsize;value : int64;const ref : treference);override;
  46. procedure a_op64_ref(list : TAsmList;op:TOpCG;size : tcgsize;const ref: treference);override;
  47. private
  48. procedure get_64bit_ops(op:TOpCG;var op1,op2:TAsmOp);
  49. end;
  50. procedure create_codegen;
  51. implementation
  52. uses
  53. globals,verbose,systems,cutils,
  54. paramgr,procinfo,fmodule,
  55. rgcpu,rgx86,cpuinfo;
  56. function use_push(const cgpara:tcgpara):boolean;
  57. begin
  58. result:=(not paramanager.use_fixed_stack) and
  59. assigned(cgpara.location) and
  60. (cgpara.location^.loc=LOC_REFERENCE) and
  61. (cgpara.location^.reference.index=NR_STACK_POINTER_REG);
  62. end;
  63. procedure tcg386.init_register_allocators;
  64. begin
  65. inherited init_register_allocators;
  66. if (cs_useebp in current_settings.optimizerswitches) and assigned(current_procinfo) and (current_procinfo.framepointer<>NR_EBP) then
  67. rg[R_INTREGISTER]:=trgcpu.create(R_INTREGISTER,R_SUBWHOLE,[RS_EAX,RS_EDX,RS_ECX,RS_EBX,RS_ESI,RS_EDI,RS_EBP],first_int_imreg,[])
  68. else
  69. rg[R_INTREGISTER]:=trgcpu.create(R_INTREGISTER,R_SUBWHOLE,[RS_EAX,RS_EDX,RS_ECX,RS_EBX,RS_ESI,RS_EDI],first_int_imreg,[RS_EBP]);
  70. rg[R_MMXREGISTER]:=trgcpu.create(R_MMXREGISTER,R_SUBNONE,[RS_XMM0,RS_XMM1,RS_XMM2,RS_XMM3,RS_XMM4,RS_XMM5,RS_XMM6,RS_XMM7],first_mm_imreg,[]);
  71. rg[R_MMREGISTER]:=trgcpu.create(R_MMREGISTER,R_SUBWHOLE,[RS_XMM0,RS_XMM1,RS_XMM2,RS_XMM3,RS_XMM4,RS_XMM5,RS_XMM6,RS_XMM7],first_mm_imreg,[]);
  72. rgfpu:=Trgx86fpu.create;
  73. end;
  74. procedure tcg386.a_load_reg_cgpara(list : TAsmList;size : tcgsize;r : tregister;const cgpara : tcgpara);
  75. var
  76. pushsize : tcgsize;
  77. begin
  78. check_register_size(size,r);
  79. if use_push(cgpara) then
  80. begin
  81. cgpara.check_simple_location;
  82. if tcgsize2size[cgpara.location^.size]>cgpara.alignment then
  83. pushsize:=cgpara.location^.size
  84. else
  85. pushsize:=int_cgsize(cgpara.alignment);
  86. list.concat(taicpu.op_reg(A_PUSH,tcgsize2opsize[pushsize],makeregsize(list,r,pushsize)));
  87. end
  88. else
  89. inherited a_load_reg_cgpara(list,size,r,cgpara);
  90. end;
  91. procedure tcg386.a_load_const_cgpara(list : TAsmList;size : tcgsize;a : tcgint;const cgpara : tcgpara);
  92. var
  93. pushsize : tcgsize;
  94. begin
  95. if use_push(cgpara) then
  96. begin
  97. cgpara.check_simple_location;
  98. if tcgsize2size[cgpara.location^.size]>cgpara.alignment then
  99. pushsize:=cgpara.location^.size
  100. else
  101. pushsize:=int_cgsize(cgpara.alignment);
  102. list.concat(taicpu.op_const(A_PUSH,tcgsize2opsize[pushsize],a));
  103. end
  104. else
  105. inherited a_load_const_cgpara(list,size,a,cgpara);
  106. end;
  107. procedure tcg386.a_load_ref_cgpara(list : TAsmList;size : tcgsize;const r : treference;const cgpara : tcgpara);
  108. procedure pushdata(paraloc:pcgparalocation;ofs:tcgint);
  109. var
  110. pushsize : tcgsize;
  111. opsize : topsize;
  112. tmpreg : tregister;
  113. href : treference;
  114. begin
  115. if not assigned(paraloc) then
  116. exit;
  117. if (paraloc^.loc<>LOC_REFERENCE) or
  118. (paraloc^.reference.index<>NR_STACK_POINTER_REG) or
  119. (tcgsize2size[paraloc^.size]>sizeof(aint)) then
  120. internalerror(200501162);
  121. { Pushes are needed in reverse order, add the size of the
  122. current location to the offset where to load from. This
  123. prevents wrong calculations for the last location when
  124. the size is not a power of 2 }
  125. if assigned(paraloc^.next) then
  126. pushdata(paraloc^.next,ofs+tcgsize2size[paraloc^.size]);
  127. { Push the data starting at ofs }
  128. href:=r;
  129. inc(href.offset,ofs);
  130. if tcgsize2size[paraloc^.size]>cgpara.alignment then
  131. pushsize:=paraloc^.size
  132. else
  133. pushsize:=int_cgsize(cgpara.alignment);
  134. opsize:=TCgsize2opsize[pushsize];
  135. { for go32v2 we obtain OS_F32,
  136. but pushs is not valid, we need pushl }
  137. if opsize=S_FS then
  138. opsize:=S_L;
  139. if tcgsize2size[paraloc^.size]<cgpara.alignment then
  140. begin
  141. tmpreg:=getintregister(list,pushsize);
  142. a_load_ref_reg(list,paraloc^.size,pushsize,href,tmpreg);
  143. list.concat(taicpu.op_reg(A_PUSH,opsize,tmpreg));
  144. end
  145. else
  146. begin
  147. make_simple_ref(list,href);
  148. list.concat(taicpu.op_ref(A_PUSH,opsize,href));
  149. end;
  150. end;
  151. var
  152. len : tcgint;
  153. href : treference;
  154. begin
  155. { cgpara.size=OS_NO requires a copy on the stack }
  156. if use_push(cgpara) then
  157. begin
  158. { Record copy? }
  159. if (cgpara.size=OS_NO) or (size=OS_NO) then
  160. begin
  161. cgpara.check_simple_location;
  162. len:=align(cgpara.intsize,cgpara.alignment);
  163. g_stackpointer_alloc(list,len);
  164. reference_reset_base(href,NR_STACK_POINTER_REG,0,ctempposinvalid,4,[]);
  165. g_concatcopy(list,r,href,len);
  166. end
  167. else
  168. begin
  169. if tcgsize2size[cgpara.size]<>tcgsize2size[size] then
  170. internalerror(200501161);
  171. if (cgpara.size=OS_F64) then
  172. begin
  173. href:=r;
  174. make_simple_ref(list,href);
  175. inc(href.offset,4);
  176. list.concat(taicpu.op_ref(A_PUSH,S_L,href));
  177. dec(href.offset,4);
  178. list.concat(taicpu.op_ref(A_PUSH,S_L,href));
  179. end
  180. else
  181. { We need to push the data in reverse order,
  182. therefor we use a recursive algorithm }
  183. pushdata(cgpara.location,0);
  184. end
  185. end
  186. else
  187. begin
  188. href:=r;
  189. make_simple_ref(list,href);
  190. inherited a_load_ref_cgpara(list,size,href,cgpara);
  191. end;
  192. end;
  193. procedure tcg386.a_loadaddr_ref_cgpara(list : TAsmList;const r : treference;const cgpara : tcgpara);
  194. var
  195. tmpreg : tregister;
  196. opsize : topsize;
  197. tmpref,dirref : treference;
  198. begin
  199. dirref:=r;
  200. { this could probably done in a more optimized way, but for now this
  201. is sufficent }
  202. make_direct_ref(list,dirref);
  203. with dirref do
  204. begin
  205. if use_push(cgpara) then
  206. begin
  207. cgpara.check_simple_location;
  208. opsize:=tcgsize2opsize[OS_ADDR];
  209. if (segment=NR_NO) and (base=NR_NO) and (index=NR_NO) then
  210. begin
  211. if assigned(symbol) then
  212. begin
  213. if (target_info.system in [system_i386_darwin,system_i386_iphonesim]) and
  214. ((dirref.symbol.bind in [AB_EXTERNAL,AB_WEAK_EXTERNAL]) or
  215. (cs_create_pic in current_settings.moduleswitches)) then
  216. begin
  217. tmpreg:=getaddressregister(list);
  218. a_loadaddr_ref_reg(list,dirref,tmpreg);
  219. list.concat(taicpu.op_reg(A_PUSH,opsize,tmpreg));
  220. end
  221. else if cs_create_pic in current_settings.moduleswitches then
  222. begin
  223. if offset<>0 then
  224. begin
  225. tmpreg:=getaddressregister(list);
  226. a_loadaddr_ref_reg(list,dirref,tmpreg);
  227. list.concat(taicpu.op_reg(A_PUSH,opsize,tmpreg));
  228. end
  229. else
  230. begin
  231. reference_reset_symbol(tmpref,dirref.symbol,0,sizeof(pint),[]);
  232. tmpref.refaddr:=addr_pic;
  233. tmpref.base:=current_procinfo.got;
  234. include(current_procinfo.flags,pi_needs_got);
  235. list.concat(taicpu.op_ref(A_PUSH,S_L,tmpref));
  236. end
  237. end
  238. else
  239. list.concat(Taicpu.Op_sym_ofs(A_PUSH,opsize,symbol,offset));
  240. end
  241. else
  242. list.concat(Taicpu.Op_const(A_PUSH,opsize,offset));
  243. end
  244. else if (segment=NR_NO) and (base=NR_NO) and (index<>NR_NO) and
  245. (offset=0) and (scalefactor=0) and (symbol=nil) then
  246. list.concat(Taicpu.Op_reg(A_PUSH,opsize,index))
  247. else if (segment=NR_NO) and (base<>NR_NO) and (index=NR_NO) and
  248. (offset=0) and (symbol=nil) then
  249. list.concat(Taicpu.Op_reg(A_PUSH,opsize,base))
  250. else
  251. begin
  252. tmpreg:=getaddressregister(list);
  253. a_loadaddr_ref_reg(list,dirref,tmpreg);
  254. list.concat(taicpu.op_reg(A_PUSH,opsize,tmpreg));
  255. end;
  256. end
  257. else
  258. inherited a_loadaddr_ref_cgpara(list,dirref,cgpara);
  259. end;
  260. end;
  261. procedure tcg386.g_proc_exit(list : TAsmList;parasize:longint;nostackframe:boolean);
  262. procedure increase_sp(a : tcgint);
  263. var
  264. href : treference;
  265. begin
  266. reference_reset_base(href,NR_STACK_POINTER_REG,a,ctempposinvalid,0,[]);
  267. { normally, lea is a better choice than an add }
  268. list.concat(Taicpu.op_ref_reg(A_LEA,TCGSize2OpSize[OS_ADDR],href,NR_STACK_POINTER_REG));
  269. end;
  270. begin
  271. { MMX needs to call EMMS }
  272. if assigned(rg[R_MMXREGISTER]) and
  273. (rg[R_MMXREGISTER].uses_registers) then
  274. list.concat(Taicpu.op_none(A_EMMS,S_NO));
  275. { remove stackframe }
  276. if not(nostackframe) and
  277. { we do not need an exit stack frame when we never return
  278. * the final ret is left so the peephole optimizer can easily do call/ret -> jmp or call conversions
  279. * the entry stack frame must be normally generated because the subroutine could be still left by
  280. an exception and then the unwinding code might need to restore the registers stored by the entry code
  281. }
  282. not(po_noreturn in current_procinfo.procdef.procoptions) then
  283. begin
  284. if (current_procinfo.framepointer=NR_STACK_POINTER_REG) or
  285. (current_procinfo.procdef.proctypeoption=potype_exceptfilter) then
  286. begin
  287. if current_procinfo.final_localsize<>0 then
  288. increase_sp(current_procinfo.final_localsize);
  289. if (not paramanager.use_fixed_stack) then
  290. internal_restore_regs(list,true);
  291. if (current_procinfo.procdef.proctypeoption=potype_exceptfilter) then
  292. list.concat(Taicpu.op_reg(A_POP,tcgsize2opsize[OS_ADDR],NR_FRAME_POINTER_REG));
  293. current_asmdata.asmcfi.cfa_def_cfa_offset(list,sizeof(pint));
  294. end
  295. else
  296. begin
  297. if (not paramanager.use_fixed_stack) then
  298. internal_restore_regs(list,not (pi_has_stack_allocs in current_procinfo.flags));
  299. generate_leave(list);
  300. end;
  301. list.concat(tai_regalloc.dealloc(current_procinfo.framepointer,nil));
  302. end;
  303. if pi_uses_ymm in current_procinfo.flags then
  304. list.Concat(taicpu.op_none(A_VZEROUPPER));
  305. { return from proc }
  306. if po_interrupt in current_procinfo.procdef.procoptions then
  307. begin
  308. if assigned(current_procinfo.procdef.funcretloc[calleeside].location) and
  309. (current_procinfo.procdef.funcretloc[calleeside].location^.loc=LOC_REGISTER) then
  310. begin
  311. if (getsupreg(current_procinfo.procdef.funcretloc[calleeside].location^.register)=RS_EAX) then
  312. list.concat(Taicpu.Op_const_reg(A_ADD,S_L,4,NR_ESP))
  313. else
  314. internalerror(2010053001);
  315. end
  316. else
  317. list.concat(Taicpu.Op_reg(A_POP,S_L,NR_EAX));
  318. list.concat(Taicpu.Op_reg(A_POP,S_L,NR_EBX));
  319. list.concat(Taicpu.Op_reg(A_POP,S_L,NR_ECX));
  320. if (current_procinfo.procdef.funcretloc[calleeside].size in [OS_64,OS_S64]) and
  321. assigned(current_procinfo.procdef.funcretloc[calleeside].location) and
  322. assigned(current_procinfo.procdef.funcretloc[calleeside].location^.next) and
  323. (current_procinfo.procdef.funcretloc[calleeside].location^.next^.loc=LOC_REGISTER) then
  324. begin
  325. if (getsupreg(current_procinfo.procdef.funcretloc[calleeside].location^.next^.register)=RS_EDX) then
  326. list.concat(Taicpu.Op_const_reg(A_ADD,S_L,4,NR_ESP))
  327. else
  328. internalerror(2010053002);
  329. end
  330. else
  331. list.concat(Taicpu.Op_reg(A_POP,S_L,NR_EDX));
  332. list.concat(Taicpu.Op_reg(A_POP,S_L,NR_ESI));
  333. list.concat(Taicpu.Op_reg(A_POP,S_L,NR_EDI));
  334. { .... also the segment registers }
  335. list.concat(Taicpu.Op_reg(A_POP,S_W,NR_DS));
  336. list.concat(Taicpu.Op_reg(A_POP,S_W,NR_ES));
  337. list.concat(Taicpu.Op_reg(A_POP,S_W,NR_FS));
  338. list.concat(Taicpu.Op_reg(A_POP,S_W,NR_GS));
  339. { this restores the flags }
  340. if current_procinfo.framepointer<>NR_STACK_POINTER_REG then
  341. list.concat(tai_regalloc.dealloc(NR_STACK_POINTER_REG,nil));
  342. list.concat(Taicpu.Op_none(A_IRET,S_NO));
  343. end
  344. { Routines with the poclearstack flag set use only a ret }
  345. else if (current_procinfo.procdef.proccalloption in clearstack_pocalls) and
  346. (not paramanager.use_fixed_stack) then
  347. begin
  348. { complex return values are removed from stack in C code PM }
  349. { but not on win32 }
  350. { and not for safecall with hidden exceptions, because the result }
  351. { wich contains the exception is passed in EAX }
  352. if current_procinfo.framepointer<>NR_STACK_POINTER_REG then
  353. list.concat(tai_regalloc.dealloc(NR_STACK_POINTER_REG,nil));
  354. if ((target_info.system <> system_i386_win32) or
  355. (target_info.abi=abi_old_win32_gnu)) and
  356. not ((current_procinfo.procdef.proccalloption = pocall_safecall) and
  357. (tf_safecall_exceptions in target_info.flags)) and
  358. paramanager.ret_in_param(current_procinfo.procdef.returndef,
  359. current_procinfo.procdef) then
  360. list.concat(Taicpu.Op_const(A_RET,S_W,sizeof(aint)))
  361. else
  362. list.concat(Taicpu.Op_none(A_RET,S_NO));
  363. end
  364. { ... also routines with parasize=0 }
  365. else if (parasize=0) then
  366. begin
  367. if current_procinfo.framepointer<>NR_STACK_POINTER_REG then
  368. list.concat(tai_regalloc.dealloc(NR_STACK_POINTER_REG,nil));
  369. list.concat(Taicpu.Op_none(A_RET,S_NO))
  370. end
  371. else
  372. begin
  373. { parameters are limited to 65535 bytes because ret allows only imm16 }
  374. if (parasize>65535) then
  375. CGMessage(cg_e_parasize_too_big);
  376. if current_procinfo.framepointer<>NR_STACK_POINTER_REG then
  377. list.concat(tai_regalloc.dealloc(NR_STACK_POINTER_REG,nil));
  378. list.concat(Taicpu.Op_const(A_RET,S_W,parasize));
  379. end;
  380. end;
  381. procedure tcg386.g_copyvaluepara_openarray(list : TAsmList;const ref:treference;const lenloc:tlocation;elesize:tcgint;destreg:tregister);
  382. var
  383. power : longint;
  384. opsize : topsize;
  385. {$ifndef __NOWINPECOFF__}
  386. again,ok : tasmlabel;
  387. {$endif}
  388. begin
  389. { get stack space }
  390. getcpuregister(list,NR_EDI);
  391. a_load_loc_reg(list,OS_INT,lenloc,NR_EDI);
  392. list.concat(Taicpu.op_reg(A_INC,S_L,NR_EDI));
  393. { Now EDI contains (high+1). }
  394. { special case handling for elesize=8, 4 and 2:
  395. set ECX = (high+1) instead of ECX = (high+1)*elesize.
  396. In the case of elesize=4 and 2, this allows us to avoid the SHR later.
  397. In the case of elesize=8, we can later use a SHL ECX, 1 instead of
  398. SHR ECX, 2 which is one byte shorter. }
  399. if (elesize=8) or (elesize=4) or (elesize=2) then
  400. begin
  401. { Now EDI contains (high+1). Copy it to ECX for later use. }
  402. getcpuregister(list,NR_ECX);
  403. list.concat(Taicpu.op_reg_reg(A_MOV,S_L,NR_EDI,NR_ECX));
  404. end;
  405. { EDI := EDI * elesize }
  406. if (elesize<>1) then
  407. begin
  408. if ispowerof2(elesize, power) then
  409. list.concat(Taicpu.op_const_reg(A_SHL,S_L,power,NR_EDI))
  410. else
  411. list.concat(Taicpu.op_const_reg(A_IMUL,S_L,elesize,NR_EDI));
  412. end;
  413. if (elesize<>8) and (elesize<>4) and (elesize<>2) then
  414. begin
  415. { Now EDI contains (high+1)*elesize. Copy it to ECX for later use. }
  416. getcpuregister(list,NR_ECX);
  417. list.concat(Taicpu.op_reg_reg(A_MOV,S_L,NR_EDI,NR_ECX));
  418. end;
  419. {$ifndef __NOWINPECOFF__}
  420. { windows guards only a few pages for stack growing, }
  421. { so we have to access every page first }
  422. if target_info.system=system_i386_win32 then
  423. begin
  424. current_asmdata.getjumplabel(again);
  425. current_asmdata.getjumplabel(ok);
  426. a_label(list,again);
  427. cg.a_reg_alloc(list,NR_DEFAULTFLAGS);
  428. list.concat(Taicpu.op_const_reg(A_CMP,S_L,winstackpagesize,NR_EDI));
  429. a_jmp_cond(list,OC_B,ok);
  430. cg.a_reg_dealloc(list,NR_DEFAULTFLAGS);
  431. list.concat(Taicpu.op_const_reg(A_SUB,S_L,winstackpagesize-4,NR_ESP));
  432. list.concat(Taicpu.op_reg(A_PUSH,S_L,NR_EDI));
  433. list.concat(Taicpu.op_const_reg(A_SUB,S_L,winstackpagesize,NR_EDI));
  434. a_jmp_always(list,again);
  435. a_label(list,ok);
  436. end;
  437. {$endif __NOWINPECOFF__}
  438. { If we were probing pages, EDI=(size mod pagesize) and ESP is decremented
  439. by (size div pagesize)*pagesize, otherwise EDI=size.
  440. Either way, subtracting EDI from ESP will set ESP to desired final value. }
  441. list.concat(Taicpu.op_reg_reg(A_SUB,S_L,NR_EDI,NR_ESP));
  442. { align stack on 4 bytes }
  443. list.concat(Taicpu.op_const_reg(A_AND,S_L,aint($fffffff4),NR_ESP));
  444. { load destination, don't use a_load_reg_reg, that will add a move instruction
  445. that can confuse the reg allocator }
  446. list.concat(Taicpu.Op_reg_reg(A_MOV,S_L,NR_ESP,NR_EDI));
  447. { Allocate ESI and load it with source }
  448. getcpuregister(list,NR_ESI);
  449. a_loadaddr_ref_reg(list,ref,NR_ESI);
  450. { calculate size }
  451. opsize:=S_B;
  452. if elesize=8 then
  453. begin
  454. opsize:=S_L;
  455. { ECX is number of qwords, convert to dwords }
  456. list.concat(Taicpu.op_const_reg(A_SHL,S_L,1,NR_ECX))
  457. end
  458. else if elesize=4 then
  459. begin
  460. opsize:=S_L;
  461. { ECX is already number of dwords, so no need to SHL/SHR }
  462. end
  463. else if elesize=2 then
  464. begin
  465. opsize:=S_W;
  466. { ECX is already number of words, so no need to SHL/SHR }
  467. end
  468. else
  469. if (elesize and 3)=0 then
  470. begin
  471. opsize:=S_L;
  472. { ECX is number of bytes, convert to dwords }
  473. list.concat(Taicpu.op_const_reg(A_SHR,S_L,2,NR_ECX))
  474. end
  475. else
  476. if (elesize and 1)=0 then
  477. begin
  478. opsize:=S_W;
  479. { ECX is number of bytes, convert to words }
  480. list.concat(Taicpu.op_const_reg(A_SHR,S_L,1,NR_ECX))
  481. end;
  482. if ts_cld in current_settings.targetswitches then
  483. list.concat(Taicpu.op_none(A_CLD,S_NO));
  484. list.concat(Taicpu.op_none(A_REP,S_NO));
  485. case opsize of
  486. S_B : list.concat(Taicpu.Op_none(A_MOVSB,S_NO));
  487. S_W : list.concat(Taicpu.Op_none(A_MOVSW,S_NO));
  488. S_L : list.concat(Taicpu.Op_none(A_MOVSD,S_NO));
  489. else
  490. internalerror(2019050901);
  491. end;
  492. ungetcpuregister(list,NR_EDI);
  493. ungetcpuregister(list,NR_ECX);
  494. ungetcpuregister(list,NR_ESI);
  495. { patch the new address, but don't use a_load_reg_reg, that will add a move instruction
  496. that can confuse the reg allocator }
  497. list.concat(Taicpu.Op_reg_reg(A_MOV,S_L,NR_ESP,destreg));
  498. include(current_procinfo.flags,pi_has_stack_allocs);
  499. end;
  500. procedure tcg386.g_releasevaluepara_openarray(list : TAsmList;const l:tlocation);
  501. begin
  502. { Nothing to release }
  503. end;
  504. procedure tcg386.g_maybe_got_init(list: TAsmList);
  505. var
  506. i: longint;
  507. tmpreg: TRegister;
  508. begin
  509. { allocate PIC register }
  510. if (tf_pic_uses_got in target_info.flags) and
  511. (pi_needs_got in current_procinfo.flags) then
  512. begin
  513. if not (target_info.system in [system_i386_darwin,system_i386_iphonesim]) then
  514. begin
  515. { Use ECX as a temp register by default }
  516. if current_procinfo.got = NR_EBX then
  517. tmpreg:=NR_EBX
  518. else
  519. tmpreg:=NR_ECX;
  520. { Allocate registers used for parameters to make sure they
  521. never allocated during this PIC init code }
  522. for i:=0 to current_procinfo.procdef.paras.Count - 1 do
  523. with tparavarsym(current_procinfo.procdef.paras[i]).paraloc[calleeside].Location^ do
  524. if Loc in [LOC_REGISTER, LOC_CREGISTER] then begin
  525. a_reg_alloc(list, register);
  526. { If ECX is used for a parameter, use EBX as temp }
  527. if getsupreg(register) = RS_ECX then
  528. tmpreg:=NR_EBX;
  529. end;
  530. if tmpreg = NR_EBX then
  531. begin
  532. { Mark EBX as used in the proc }
  533. include(rg[R_INTREGISTER].used_in_proc,RS_EBX);
  534. current_module.requires_ebx_pic_helper:=true;
  535. a_call_name_static(list,'fpc_geteipasebx');
  536. end
  537. else
  538. begin
  539. current_module.requires_ecx_pic_helper:=true;
  540. a_call_name_static(list,'fpc_geteipasecx');
  541. end;
  542. list.concat(taicpu.op_sym_ofs_reg(A_ADD,S_L,current_asmdata.RefAsmSymbol('_GLOBAL_OFFSET_TABLE_',AT_DATA),0,tmpreg));
  543. list.concat(taicpu.op_reg_reg(A_MOV,S_L,tmpreg,current_procinfo.got));
  544. { Deallocate parameter registers }
  545. for i:=0 to current_procinfo.procdef.paras.Count - 1 do
  546. with tparavarsym(current_procinfo.procdef.paras[i]).paraloc[calleeside].Location^ do
  547. if Loc in [LOC_REGISTER, LOC_CREGISTER] then
  548. a_reg_dealloc(list, register);
  549. end
  550. else
  551. begin
  552. { call/pop is faster than call/ret/mov on Core Solo and later
  553. according to Apple's benchmarking -- and all Intel Macs
  554. have at least a Core Solo (furthermore, the i386 - Pentium 1
  555. don't have a return stack buffer) }
  556. a_call_name_static(list,current_procinfo.CurrGOTLabel.name);
  557. a_label(list,current_procinfo.CurrGotLabel);
  558. list.concat(taicpu.op_reg(A_POP,S_L,current_procinfo.got))
  559. end;
  560. end;
  561. end;
  562. { ************* 64bit operations ************ }
  563. procedure tcg64f386.get_64bit_ops(op:TOpCG;var op1,op2:TAsmOp);
  564. begin
  565. case op of
  566. OP_ADD :
  567. begin
  568. op1:=A_ADD;
  569. op2:=A_ADC;
  570. end;
  571. OP_SUB :
  572. begin
  573. op1:=A_SUB;
  574. op2:=A_SBB;
  575. end;
  576. OP_XOR :
  577. begin
  578. op1:=A_XOR;
  579. op2:=A_XOR;
  580. end;
  581. OP_OR :
  582. begin
  583. op1:=A_OR;
  584. op2:=A_OR;
  585. end;
  586. OP_AND :
  587. begin
  588. op1:=A_AND;
  589. op2:=A_AND;
  590. end;
  591. else
  592. internalerror(2002032408);
  593. end;
  594. end;
  595. procedure tcg64f386.a_op64_ref_reg(list : TAsmList;op:TOpCG;size : tcgsize;const ref : treference;reg : tregister64);
  596. var
  597. op1,op2 : TAsmOp;
  598. tempref : treference;
  599. begin
  600. if not(op in [OP_NEG,OP_NOT]) then
  601. begin
  602. get_64bit_ops(op,op1,op2);
  603. tempref:=ref;
  604. tcgx86(cg).make_simple_ref(list,tempref);
  605. if op in [OP_ADD,OP_SUB] then
  606. cg.a_reg_alloc(list,NR_DEFAULTFLAGS);
  607. list.concat(taicpu.op_ref_reg(op1,S_L,tempref,reg.reglo));
  608. inc(tempref.offset,4);
  609. list.concat(taicpu.op_ref_reg(op2,S_L,tempref,reg.reghi));
  610. if op in [OP_ADD,OP_SUB] then
  611. cg.a_reg_dealloc(list,NR_DEFAULTFLAGS);
  612. end
  613. else
  614. begin
  615. a_load64_ref_reg(list,ref,reg);
  616. a_op64_reg_reg(list,op,size,reg,reg);
  617. end;
  618. end;
  619. procedure tcg64f386.a_op64_reg_ref(list : TAsmList;op:TOpCG;size : tcgsize;reg : tregister64; const ref: treference);
  620. var
  621. op1,op2 : TAsmOp;
  622. tempref : treference;
  623. tmpreg: TRegister;
  624. l1, l2: TAsmLabel;
  625. begin
  626. case op of
  627. OP_NOT,OP_NEG:
  628. inherited;
  629. OP_SHR,OP_SHL,OP_SAR:
  630. begin
  631. { load right operators in a register }
  632. cg.getcpuregister(list,NR_ECX);
  633. cg.a_load_reg_reg(list,OS_32,OS_32,reg.reglo,NR_ECX);
  634. tempref:=ref;
  635. tcgx86(cg).make_simple_ref(list,tempref);
  636. { the damned shift instructions work only til a count of 32 }
  637. { so we've to do some tricks here }
  638. current_asmdata.getjumplabel(l1);
  639. current_asmdata.getjumplabel(l2);
  640. cg.a_reg_alloc(list,NR_DEFAULTFLAGS);
  641. list.Concat(taicpu.op_const_reg(A_TEST,S_B,32,NR_CL));
  642. cg.a_jmp_flags(list,F_E,l1);
  643. cg.a_reg_dealloc(list,NR_DEFAULTFLAGS);
  644. tmpreg:=cg.getintregister(list,OS_32);
  645. case op of
  646. OP_SHL:
  647. begin
  648. cg.a_load_ref_reg(list,OS_32,OS_32,tempref,tmpreg);
  649. list.Concat(taicpu.op_reg_reg(A_SHL,S_L,NR_CL,tmpreg));
  650. inc(tempref.offset,4);
  651. cg.a_load_reg_ref(list,OS_32,OS_32,tmpreg,tempref);
  652. dec(tempref.offset,4);
  653. cg.a_load_const_ref(list,OS_32,0,tempref);
  654. cg.a_jmp_always(list,l2);
  655. cg.a_label(list,l1);
  656. cg.a_load_ref_reg(list,OS_32,OS_32,tempref,tmpreg);
  657. inc(tempref.offset,4);
  658. list.Concat(taicpu.op_reg_reg_ref(A_SHLD,S_L,NR_CL,tmpreg,tempref));
  659. dec(tempref.offset,4);
  660. if cs_opt_size in current_settings.optimizerswitches then
  661. list.concat(taicpu.op_reg_ref(A_SHL,S_L,NR_CL,tempref))
  662. else
  663. begin
  664. list.concat(taicpu.op_reg_reg(A_SHL,S_L,NR_CL,tmpreg));
  665. cg.a_load_reg_ref(list,OS_32,OS_32,tmpreg,tempref);
  666. end;
  667. end;
  668. OP_SHR:
  669. begin
  670. inc(tempref.offset,4);
  671. cg.a_load_ref_reg(list,OS_32,OS_32,tempref,tmpreg);
  672. list.Concat(taicpu.op_reg_reg(A_SHR,S_L,NR_CL,tmpreg));
  673. dec(tempref.offset,4);
  674. cg.a_load_reg_ref(list,OS_32,OS_32,tmpreg,tempref);
  675. inc(tempref.offset,4);
  676. cg.a_load_const_ref(list,OS_32,0,tempref);
  677. cg.a_jmp_always(list,l2);
  678. cg.a_label(list,l1);
  679. cg.a_load_ref_reg(list,OS_32,OS_32,tempref,tmpreg);
  680. dec(tempref.offset,4);
  681. list.Concat(taicpu.op_reg_reg_ref(A_SHRD,S_L,NR_CL,tmpreg,tempref));
  682. inc(tempref.offset,4);
  683. if cs_opt_size in current_settings.optimizerswitches then
  684. list.concat(taicpu.op_reg_ref(A_SHR,S_L,NR_CL,tempref))
  685. else
  686. begin
  687. list.concat(taicpu.op_reg_reg(A_SHR,S_L,NR_CL,tmpreg));
  688. cg.a_load_reg_ref(list,OS_32,OS_32,tmpreg,tempref);
  689. end;
  690. end;
  691. OP_SAR:
  692. begin
  693. inc(tempref.offset,4);
  694. cg.a_load_ref_reg(list,OS_32,OS_32,tempref,tmpreg);
  695. list.Concat(taicpu.op_reg_reg(A_SAR,S_L,NR_CL,tmpreg));
  696. dec(tempref.offset,4);
  697. cg.a_load_reg_ref(list,OS_32,OS_32,tmpreg,tempref);
  698. inc(tempref.offset,4);
  699. list.Concat(taicpu.op_const_reg(A_SAR,S_L,31,tmpreg));
  700. cg.a_load_reg_ref(list,OS_32,OS_32,tmpreg,tempref);
  701. cg.a_jmp_always(list,l2);
  702. cg.a_label(list,l1);
  703. cg.a_load_ref_reg(list,OS_32,OS_32,tempref,tmpreg);
  704. dec(tempref.offset,4);
  705. list.Concat(taicpu.op_reg_reg_ref(A_SHRD,S_L,NR_CL,tmpreg,tempref));
  706. inc(tempref.offset,4);
  707. if cs_opt_size in current_settings.optimizerswitches then
  708. list.concat(taicpu.op_reg_ref(A_SAR,S_L,NR_CL,tempref))
  709. else
  710. begin
  711. list.concat(taicpu.op_reg_reg(A_SAR,S_L,NR_CL,tmpreg));
  712. cg.a_load_reg_ref(list,OS_32,OS_32,tmpreg,tempref);
  713. end;
  714. end;
  715. else
  716. internalerror(2017041801);
  717. end;
  718. cg.a_label(list,l2);
  719. cg.ungetcpuregister(list,NR_ECX);
  720. exit;
  721. end;
  722. else
  723. begin
  724. get_64bit_ops(op,op1,op2);
  725. tempref:=ref;
  726. tcgx86(cg).make_simple_ref(list,tempref);
  727. if op in [OP_ADD,OP_SUB] then
  728. cg.a_reg_alloc(list,NR_DEFAULTFLAGS);
  729. list.concat(taicpu.op_reg_ref(op1,S_L,reg.reglo,tempref));
  730. inc(tempref.offset,4);
  731. list.concat(taicpu.op_reg_ref(op2,S_L,reg.reghi,tempref));
  732. if op in [OP_ADD,OP_SUB] then
  733. cg.a_reg_dealloc(list,NR_DEFAULTFLAGS);
  734. end;
  735. end;
  736. end;
  737. procedure tcg64f386.a_op64_reg_reg(list : TAsmList;op:TOpCG;size : tcgsize;regsrc,regdst : tregister64);
  738. var
  739. op1,op2 : TAsmOp;
  740. l1, l2: TAsmLabel;
  741. begin
  742. case op of
  743. OP_NEG :
  744. begin
  745. if (regsrc.reglo<>regdst.reglo) then
  746. a_load64_reg_reg(list,regsrc,regdst);
  747. list.concat(taicpu.op_reg(A_NOT,S_L,regdst.reghi));
  748. cg.a_reg_alloc(list,NR_DEFAULTFLAGS);
  749. list.concat(taicpu.op_reg(A_NEG,S_L,regdst.reglo));
  750. list.concat(taicpu.op_const_reg(A_SBB,S_L,-1,regdst.reghi));
  751. cg.a_reg_dealloc(list,NR_DEFAULTFLAGS);
  752. exit;
  753. end;
  754. OP_NOT :
  755. begin
  756. if (regsrc.reglo<>regdst.reglo) then
  757. a_load64_reg_reg(list,regsrc,regdst);
  758. list.concat(taicpu.op_reg(A_NOT,S_L,regdst.reghi));
  759. list.concat(taicpu.op_reg(A_NOT,S_L,regdst.reglo));
  760. exit;
  761. end;
  762. OP_SHR,OP_SHL,OP_SAR:
  763. begin
  764. { load right operators in a register }
  765. cg.getcpuregister(list,NR_ECX);
  766. cg.a_load_reg_reg(list,OS_32,OS_32,regsrc.reglo,NR_ECX);
  767. { the damned shift instructions work only til a count of 32 }
  768. { so we've to do some tricks here }
  769. current_asmdata.getjumplabel(l1);
  770. current_asmdata.getjumplabel(l2);
  771. cg.a_reg_alloc(list,NR_DEFAULTFLAGS);
  772. list.Concat(taicpu.op_const_reg(A_TEST,S_B,32,NR_CL));
  773. cg.a_jmp_flags(list,F_E,l1);
  774. cg.a_reg_dealloc(list,NR_DEFAULTFLAGS);
  775. case op of
  776. OP_SHL:
  777. begin
  778. list.Concat(taicpu.op_reg_reg(A_SHL,S_L,NR_CL,regdst.reglo));
  779. cg.a_load_reg_reg(list,OS_32,OS_32,regdst.reglo,regdst.reghi);
  780. list.Concat(taicpu.op_reg_reg(A_XOR,S_L,regdst.reglo,regdst.reglo));
  781. cg.a_jmp_always(list,l2);
  782. cg.a_label(list,l1);
  783. list.Concat(taicpu.op_reg_reg_reg(A_SHLD,S_L,NR_CL,regdst.reglo,regdst.reghi));
  784. list.Concat(taicpu.op_reg_reg(A_SHL,S_L,NR_CL,regdst.reglo));
  785. end;
  786. OP_SHR:
  787. begin
  788. list.Concat(taicpu.op_reg_reg(A_SHR,S_L,NR_CL,regdst.reghi));
  789. cg.a_load_reg_reg(list,OS_32,OS_32,regdst.reghi,regdst.reglo);
  790. list.Concat(taicpu.op_reg_reg(A_XOR,S_L,regdst.reghi,regdst.reghi));
  791. cg.a_jmp_always(list,l2);
  792. cg.a_label(list,l1);
  793. list.Concat(taicpu.op_reg_reg_reg(A_SHRD,S_L,NR_CL,regdst.reghi,regdst.reglo));
  794. list.Concat(taicpu.op_reg_reg(A_SHR,S_L,NR_CL,regdst.reghi));
  795. end;
  796. OP_SAR:
  797. begin
  798. cg.a_load_reg_reg(list,OS_32,OS_32,regdst.reghi,regdst.reglo);
  799. list.Concat(taicpu.op_reg_reg(A_SAR,S_L,NR_CL,regdst.reglo));
  800. list.Concat(taicpu.op_const_reg(A_SAR,S_L,31,regdst.reghi));
  801. cg.a_jmp_always(list,l2);
  802. cg.a_label(list,l1);
  803. list.Concat(taicpu.op_reg_reg_reg(A_SHRD,S_L,NR_CL,regdst.reghi,regdst.reglo));
  804. list.Concat(taicpu.op_reg_reg(A_SAR,S_L,NR_CL,regdst.reghi));
  805. end;
  806. else
  807. internalerror(2017041802);
  808. end;
  809. cg.a_label(list,l2);
  810. cg.ungetcpuregister(list,NR_ECX);
  811. exit;
  812. end;
  813. else
  814. ;
  815. end;
  816. get_64bit_ops(op,op1,op2);
  817. if op in [OP_ADD,OP_SUB] then
  818. cg.a_reg_alloc(list,NR_DEFAULTFLAGS);
  819. list.concat(taicpu.op_reg_reg(op1,S_L,regsrc.reglo,regdst.reglo));
  820. list.concat(taicpu.op_reg_reg(op2,S_L,regsrc.reghi,regdst.reghi));
  821. if op in [OP_ADD,OP_SUB] then
  822. cg.a_reg_dealloc(list,NR_DEFAULTFLAGS);
  823. end;
  824. procedure tcg64f386.a_op64_const_reg(list : TAsmList;op:TOpCG;size : tcgsize;value : int64;reg : tregister64);
  825. var
  826. op1,op2 : TAsmOp;
  827. begin
  828. case op of
  829. OP_AND,OP_OR,OP_XOR:
  830. begin
  831. cg.a_op_const_reg(list,op,OS_32,tcgint(lo(value)),reg.reglo);
  832. cg.a_op_const_reg(list,op,OS_32,tcgint(hi(value)),reg.reghi);
  833. end;
  834. OP_ADD, OP_SUB:
  835. begin
  836. // can't use a_op_const_ref because this may use dec/inc
  837. get_64bit_ops(op,op1,op2);
  838. cg.a_reg_alloc(list,NR_DEFAULTFLAGS);
  839. list.concat(taicpu.op_const_reg(op1,S_L,aint(lo(value)),reg.reglo));
  840. list.concat(taicpu.op_const_reg(op2,S_L,aint(hi(value)),reg.reghi));
  841. cg.a_reg_dealloc(list,NR_DEFAULTFLAGS);
  842. end;
  843. OP_SHR,OP_SHL,OP_SAR:
  844. begin
  845. value:=value and 63;
  846. if value<>0 then
  847. begin
  848. if (value=1) and (op=OP_SHL) and
  849. (current_settings.optimizecputype<=cpu_486) and
  850. not (cs_opt_size in current_settings.optimizerswitches) then
  851. begin
  852. cg.a_reg_alloc(list,NR_DEFAULTFLAGS);
  853. list.concat(taicpu.op_reg_reg(A_ADD,S_L,reg.reglo,reg.reglo));
  854. list.concat(taicpu.op_reg_reg(A_ADC,S_L,reg.reghi,reg.reghi));
  855. cg.a_reg_dealloc(list,NR_DEFAULTFLAGS);
  856. end
  857. else if (value=1) and (cs_opt_size in current_settings.optimizerswitches) then
  858. case op of
  859. OP_SHR:
  860. begin
  861. cg.a_reg_alloc(list,NR_DEFAULTFLAGS);
  862. list.concat(taicpu.op_const_reg(A_SHR,S_L,value,reg.reghi));
  863. list.concat(taicpu.op_const_reg(A_RCR,S_L,value,reg.reglo));
  864. cg.a_reg_dealloc(list,NR_DEFAULTFLAGS);
  865. end;
  866. OP_SHL:
  867. begin
  868. cg.a_reg_alloc(list,NR_DEFAULTFLAGS);
  869. list.concat(taicpu.op_const_reg(A_SHL,S_L,value,reg.reglo));
  870. list.concat(taicpu.op_const_reg(A_RCL,S_L,value,reg.reghi));
  871. cg.a_reg_dealloc(list,NR_DEFAULTFLAGS);
  872. end;
  873. OP_SAR:
  874. begin
  875. cg.a_reg_alloc(list,NR_DEFAULTFLAGS);
  876. list.concat(taicpu.op_const_reg(A_SAR,S_L,value,reg.reghi));
  877. list.concat(taicpu.op_const_reg(A_RCR,S_L,value,reg.reglo));
  878. cg.a_reg_dealloc(list,NR_DEFAULTFLAGS);
  879. end;
  880. else
  881. internalerror(2019050902);
  882. end
  883. else if value>31 then
  884. case op of
  885. OP_SAR:
  886. begin
  887. cg.a_load_reg_reg(list,OS_32,OS_32,reg.reghi,reg.reglo);
  888. list.concat(taicpu.op_const_reg(A_SAR,S_L,31,reg.reghi));
  889. if (value and 31)<>0 then
  890. list.concat(taicpu.op_const_reg(A_SAR,S_L,value and 31,reg.reglo));
  891. end;
  892. OP_SHR:
  893. begin
  894. cg.a_load_reg_reg(list,OS_32,OS_32,reg.reghi,reg.reglo);
  895. list.concat(taicpu.op_reg_reg(A_XOR,S_L,reg.reghi,reg.reghi));
  896. if (value and 31)<>0 then
  897. list.concat(taicpu.op_const_reg(A_SHR,S_L,value and 31,reg.reglo));
  898. end;
  899. OP_SHL:
  900. begin
  901. cg.a_load_reg_reg(list,OS_32,OS_32,reg.reglo,reg.reghi);
  902. list.concat(taicpu.op_reg_reg(A_XOR,S_L,reg.reglo,reg.reglo));
  903. if (value and 31)<>0 then
  904. list.concat(taicpu.op_const_reg(A_SHL,S_L,value and 31,reg.reghi));
  905. end;
  906. else
  907. internalerror(2017041201);
  908. end
  909. else
  910. case op of
  911. OP_SAR:
  912. begin
  913. list.concat(taicpu.op_const_reg_reg(A_SHRD,S_L,value,reg.reghi,reg.reglo));
  914. list.concat(taicpu.op_const_reg(A_SAR,S_L,value,reg.reghi));
  915. end;
  916. OP_SHR:
  917. begin
  918. list.concat(taicpu.op_const_reg_reg(A_SHRD,S_L,value,reg.reghi,reg.reglo));
  919. list.concat(taicpu.op_const_reg(A_SHR,S_L,value,reg.reghi));
  920. end;
  921. OP_SHL:
  922. begin
  923. list.concat(taicpu.op_const_reg_reg(A_SHLD,S_L,value,reg.reglo,reg.reghi));
  924. list.concat(taicpu.op_const_reg(A_SHL,S_L,value,reg.reglo));
  925. end;
  926. else
  927. internalerror(2017041202);
  928. end;
  929. end;
  930. end;
  931. else
  932. internalerror(200204021);
  933. end;
  934. end;
  935. procedure tcg64f386.a_op64_const_ref(list : TAsmList;op:TOpCG;size : tcgsize;value : int64;const ref : treference);
  936. var
  937. op1,op2 : TAsmOp;
  938. tempref : treference;
  939. tmpreg: TRegister;
  940. begin
  941. tempref:=ref;
  942. tcgx86(cg).make_simple_ref(list,tempref);
  943. case op of
  944. OP_AND,OP_OR,OP_XOR:
  945. begin
  946. cg.a_op_const_ref(list,op,OS_32,aint(lo(value)),tempref);
  947. inc(tempref.offset,4);
  948. cg.a_op_const_ref(list,op,OS_32,aint(hi(value)),tempref);
  949. end;
  950. OP_ADD, OP_SUB:
  951. begin
  952. get_64bit_ops(op,op1,op2);
  953. // can't use a_op_const_ref because this may use dec/inc
  954. cg.a_reg_alloc(list,NR_DEFAULTFLAGS);
  955. list.concat(taicpu.op_const_ref(op1,S_L,aint(lo(value)),tempref));
  956. inc(tempref.offset,4);
  957. list.concat(taicpu.op_const_ref(op2,S_L,aint(hi(value)),tempref));
  958. cg.a_reg_dealloc(list,NR_DEFAULTFLAGS);
  959. end;
  960. OP_SHR,OP_SHL,OP_SAR:
  961. begin
  962. value:=value and 63;
  963. if value<>0 then
  964. begin
  965. if value=1 then
  966. case op of
  967. OP_SHR:
  968. begin
  969. cg.a_reg_alloc(list,NR_DEFAULTFLAGS);
  970. inc(tempref.offset,4);
  971. list.concat(taicpu.op_const_ref(A_SHR,S_L,value,tempref));
  972. dec(tempref.offset,4);
  973. list.concat(taicpu.op_const_ref(A_RCR,S_L,value,tempref));
  974. cg.a_reg_dealloc(list,NR_DEFAULTFLAGS);
  975. end;
  976. OP_SHL:
  977. begin
  978. cg.a_reg_alloc(list,NR_DEFAULTFLAGS);
  979. list.concat(taicpu.op_const_ref(A_SHL,S_L,value,tempref));
  980. inc(tempref.offset,4);
  981. list.concat(taicpu.op_const_ref(A_RCL,S_L,value,tempref));
  982. cg.a_reg_dealloc(list,NR_DEFAULTFLAGS);
  983. end;
  984. OP_SAR:
  985. begin
  986. cg.a_reg_alloc(list,NR_DEFAULTFLAGS);
  987. inc(tempref.offset,4);
  988. list.concat(taicpu.op_const_ref(A_SAR,S_L,value,tempref));
  989. dec(tempref.offset,4);
  990. list.concat(taicpu.op_const_ref(A_RCR,S_L,value,tempref));
  991. cg.a_reg_dealloc(list,NR_DEFAULTFLAGS);
  992. end;
  993. else
  994. internalerror(2019050903);
  995. end
  996. else if value>31 then
  997. case op of
  998. OP_SHR,OP_SAR:
  999. begin
  1000. tmpreg:=cg.getintregister(list,OS_32);
  1001. inc(tempref.offset,4);
  1002. cg.a_load_ref_reg(list,OS_32,OS_32,tempref,tmpreg);
  1003. if (value and 31)<>0 then
  1004. if op=OP_SHR then
  1005. list.concat(taicpu.op_const_reg(A_SHR,S_L,value and 31,tmpreg))
  1006. else
  1007. list.concat(taicpu.op_const_reg(A_SAR,S_L,value and 31,tmpreg));
  1008. dec(tempref.offset,4);
  1009. cg.a_load_reg_ref(list,OS_32,OS_32,tmpreg,tempref);
  1010. inc(tempref.offset,4);
  1011. if op=OP_SHR then
  1012. cg.a_load_const_ref(list,OS_32,0,tempref)
  1013. else
  1014. begin
  1015. list.concat(taicpu.op_const_reg(A_SAR,S_L,31,tmpreg));
  1016. cg.a_load_reg_ref(list,OS_32,OS_32,tmpreg,tempref);
  1017. end;
  1018. end;
  1019. OP_SHL:
  1020. begin
  1021. tmpreg:=cg.getintregister(list,OS_32);
  1022. cg.a_load_ref_reg(list,OS_32,OS_32,tempref,tmpreg);
  1023. if (value and 31)<>0 then
  1024. list.concat(taicpu.op_const_reg(A_SHL,S_L,value and 31,tmpreg));
  1025. inc(tempref.offset,4);
  1026. cg.a_load_reg_ref(list,OS_32,OS_32,tmpreg,tempref);
  1027. dec(tempref.offset,4);
  1028. cg.a_load_const_ref(list,OS_32,0,tempref);
  1029. end;
  1030. else
  1031. internalerror(2017041803);
  1032. end
  1033. else
  1034. case op of
  1035. OP_SHR,OP_SAR:
  1036. begin
  1037. tmpreg:=cg.getintregister(list,OS_32);
  1038. inc(tempref.offset,4);
  1039. cg.a_load_ref_reg(list,OS_32,OS_32,tempref,tmpreg);
  1040. dec(tempref.offset,4);
  1041. list.concat(taicpu.op_const_reg_ref(A_SHRD,S_L,value,tmpreg,tempref));
  1042. inc(tempref.offset,4);
  1043. if cs_opt_size in current_settings.optimizerswitches then
  1044. begin
  1045. if op=OP_SHR then
  1046. list.concat(taicpu.op_const_ref(A_SHR,S_L,value,tempref))
  1047. else
  1048. list.concat(taicpu.op_const_ref(A_SAR,S_L,value,tempref));
  1049. end
  1050. else
  1051. begin
  1052. if op=OP_SHR then
  1053. list.concat(taicpu.op_const_reg(A_SHR,S_L,value,tmpreg))
  1054. else
  1055. list.concat(taicpu.op_const_reg(A_SAR,S_L,value,tmpreg));
  1056. cg.a_load_reg_ref(list,OS_32,OS_32,tmpreg,tempref);
  1057. end;
  1058. end;
  1059. OP_SHL:
  1060. begin
  1061. tmpreg:=cg.getintregister(list,OS_32);
  1062. cg.a_load_ref_reg(list,OS_32,OS_32,tempref,tmpreg);
  1063. inc(tempref.offset,4);
  1064. list.concat(taicpu.op_const_reg_ref(A_SHLD,S_L,value,tmpreg,tempref));
  1065. dec(tempref.offset,4);
  1066. if cs_opt_size in current_settings.optimizerswitches then
  1067. list.concat(taicpu.op_const_ref(A_SHL,S_L,value,tempref))
  1068. else
  1069. begin
  1070. list.concat(taicpu.op_const_reg(A_SHL,S_L,value,tmpreg));
  1071. cg.a_load_reg_ref(list,OS_32,OS_32,tmpreg,tempref);
  1072. end;
  1073. end;
  1074. else
  1075. internalerror(2017041203);
  1076. end;
  1077. end;
  1078. end;
  1079. else
  1080. internalerror(200204022);
  1081. end;
  1082. end;
  1083. procedure tcg64f386.a_op64_ref(list: TAsmList; op: TOpCG; size: tcgsize; const ref: treference);
  1084. var
  1085. tempref : treference;
  1086. begin
  1087. case op of
  1088. OP_NOT:
  1089. begin
  1090. tempref:=ref;
  1091. tcgx86(cg).make_simple_ref(list,tempref);
  1092. list.concat(taicpu.op_ref(A_NOT,S_L,tempref));
  1093. inc(tempref.offset,4);
  1094. list.concat(taicpu.op_ref(A_NOT,S_L,tempref));
  1095. end;
  1096. OP_NEG:
  1097. begin
  1098. tempref:=ref;
  1099. tcgx86(cg).make_simple_ref(list,tempref);
  1100. inc(tempref.offset,4);
  1101. list.concat(taicpu.op_ref(A_NOT,S_L,tempref));
  1102. cg.a_reg_alloc(list,NR_DEFAULTFLAGS);
  1103. dec(tempref.offset,4);
  1104. list.concat(taicpu.op_ref(A_NEG,S_L,tempref));
  1105. inc(tempref.offset,4);
  1106. list.concat(taicpu.op_const_ref(A_SBB,S_L,-1,tempref));
  1107. cg.a_reg_dealloc(list,NR_DEFAULTFLAGS);
  1108. end;
  1109. else
  1110. internalerror(2020050708);
  1111. end;
  1112. end;
  1113. procedure create_codegen;
  1114. begin
  1115. cg := tcg386.create;
  1116. cg64 := tcg64f386.create;
  1117. end;
  1118. end.