cgcpu.pas 50 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170
  1. {
  2. Copyright (c) 1998-2002 by Florian Klaempfl
  3. This unit implements the code generator for the i386
  4. This program is free software; you can redistribute it and/or modify
  5. it under the terms of the GNU General Public License as published by
  6. the Free Software Foundation; either version 2 of the License, or
  7. (at your option) any later version.
  8. This program is distributed in the hope that it will be useful,
  9. but WITHOUT ANY WARRANTY; without even the implied warranty of
  10. MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  11. GNU General Public License for more details.
  12. You should have received a copy of the GNU General Public License
  13. along with this program; if not, write to the Free Software
  14. Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
  15. ****************************************************************************
  16. }
  17. unit cgcpu;
  18. {$i fpcdefs.inc}
  19. interface
  20. uses
  21. globtype,
  22. cgbase,cgobj,cg64f32,cgx86,
  23. aasmbase,aasmtai,aasmdata,aasmcpu,
  24. cpubase,parabase,cgutils,
  25. symconst,symdef,symsym
  26. ;
  27. type
  28. tcg386 = class(tcgx86)
  29. procedure init_register_allocators;override;
  30. { passing parameter using push instead of mov }
  31. procedure a_load_reg_cgpara(list : TAsmList;size : tcgsize;r : tregister;const cgpara : tcgpara);override;
  32. procedure a_load_const_cgpara(list : TAsmList;size : tcgsize;a : tcgint;const cgpara : tcgpara);override;
  33. procedure a_load_ref_cgpara(list : TAsmList;size : tcgsize;const r : treference;const cgpara : tcgpara);override;
  34. procedure a_loadaddr_ref_cgpara(list : TAsmList;const r : treference;const cgpara : tcgpara);override;
  35. procedure g_proc_exit(list : TAsmList;parasize:longint;nostackframe:boolean);override;
  36. procedure g_copyvaluepara_openarray(list : TAsmList;const ref:treference;const lenloc:tlocation;elesize:tcgint;destreg:tregister);
  37. procedure g_releasevaluepara_openarray(list : TAsmList;const l:tlocation);
  38. procedure g_maybe_got_init(list: TAsmList); override;
  39. end;
  40. tcg64f386 = class(tcg64f32)
  41. procedure a_op64_ref_reg(list : TAsmList;op:TOpCG;size : tcgsize;const ref : treference;reg : tregister64);override;
  42. procedure a_op64_reg_ref(list : TAsmList;op:TOpCG;size : tcgsize;reg : tregister64; const ref: treference);override;
  43. procedure a_op64_reg_reg(list : TAsmList;op:TOpCG;size : tcgsize;regsrc,regdst : tregister64);override;
  44. procedure a_op64_const_reg(list : TAsmList;op:TOpCG;size : tcgsize;value : int64;reg : tregister64);override;
  45. procedure a_op64_const_ref(list : TAsmList;op:TOpCG;size : tcgsize;value : int64;const ref : treference);override;
  46. procedure a_op64_ref(list : TAsmList;op:TOpCG;size : tcgsize;const ref: treference);override;
  47. private
  48. procedure get_64bit_ops(op:TOpCG;var op1,op2:TAsmOp);
  49. end;
  50. procedure create_codegen;
  51. implementation
  52. uses
  53. globals,verbose,systems,cutils,
  54. paramgr,procinfo,fmodule,
  55. rgcpu,rgx86,cpuinfo;
  56. function use_push(const cgpara:tcgpara):boolean;
  57. begin
  58. result:=(not paramanager.use_fixed_stack) and
  59. assigned(cgpara.location) and
  60. (cgpara.location^.loc=LOC_REFERENCE) and
  61. (cgpara.location^.reference.index=NR_STACK_POINTER_REG);
  62. end;
  63. procedure tcg386.init_register_allocators;
  64. begin
  65. inherited init_register_allocators;
  66. if (cs_useebp in current_settings.optimizerswitches) and assigned(current_procinfo) and (current_procinfo.framepointer<>NR_EBP) then
  67. rg[R_INTREGISTER]:=trgcpu.create(R_INTREGISTER,R_SUBWHOLE,[RS_EAX,RS_EDX,RS_ECX,RS_EBX,RS_ESI,RS_EDI,RS_EBP],first_int_imreg,[])
  68. else
  69. rg[R_INTREGISTER]:=trgcpu.create(R_INTREGISTER,R_SUBWHOLE,[RS_EAX,RS_EDX,RS_ECX,RS_EBX,RS_ESI,RS_EDI],first_int_imreg,[RS_EBP]);
  70. rg[R_MMXREGISTER]:=trgcpu.create(R_MMXREGISTER,R_SUBNONE,[RS_XMM0,RS_XMM1,RS_XMM2,RS_XMM3,RS_XMM4,RS_XMM5,RS_XMM6,RS_XMM7],first_mm_imreg,[]);
  71. rg[R_MMREGISTER]:=trgcpu.create(R_MMREGISTER,R_SUBWHOLE,[RS_XMM0,RS_XMM1,RS_XMM2,RS_XMM3,RS_XMM4,RS_XMM5,RS_XMM6,RS_XMM7],first_mm_imreg,[]);
  72. rgfpu:=Trgx86fpu.create;
  73. end;
  74. procedure tcg386.a_load_reg_cgpara(list : TAsmList;size : tcgsize;r : tregister;const cgpara : tcgpara);
  75. var
  76. pushsize : tcgsize;
  77. begin
  78. check_register_size(size,r);
  79. if use_push(cgpara) then
  80. begin
  81. cgpara.check_simple_location;
  82. if tcgsize2size[cgpara.location^.size]>cgpara.alignment then
  83. pushsize:=cgpara.location^.size
  84. else
  85. pushsize:=int_cgsize(cgpara.alignment);
  86. list.concat(taicpu.op_reg(A_PUSH,tcgsize2opsize[pushsize],makeregsize(list,r,pushsize)));
  87. end
  88. else
  89. inherited a_load_reg_cgpara(list,size,r,cgpara);
  90. end;
  91. procedure tcg386.a_load_const_cgpara(list : TAsmList;size : tcgsize;a : tcgint;const cgpara : tcgpara);
  92. var
  93. pushsize : tcgsize;
  94. begin
  95. if use_push(cgpara) then
  96. begin
  97. cgpara.check_simple_location;
  98. if tcgsize2size[cgpara.location^.size]>cgpara.alignment then
  99. pushsize:=cgpara.location^.size
  100. else
  101. pushsize:=int_cgsize(cgpara.alignment);
  102. list.concat(taicpu.op_const(A_PUSH,tcgsize2opsize[pushsize],a));
  103. end
  104. else
  105. inherited a_load_const_cgpara(list,size,a,cgpara);
  106. end;
  107. procedure tcg386.a_load_ref_cgpara(list : TAsmList;size : tcgsize;const r : treference;const cgpara : tcgpara);
  108. procedure pushdata(paraloc:pcgparalocation;ofs:tcgint);
  109. var
  110. pushsize : tcgsize;
  111. opsize : topsize;
  112. tmpreg : tregister;
  113. href : treference;
  114. begin
  115. if not assigned(paraloc) then
  116. exit;
  117. if (paraloc^.loc<>LOC_REFERENCE) or
  118. (paraloc^.reference.index<>NR_STACK_POINTER_REG) or
  119. (tcgsize2size[paraloc^.size]>sizeof(aint)) then
  120. internalerror(200501162);
  121. { Pushes are needed in reverse order, add the size of the
  122. current location to the offset where to load from. This
  123. prevents wrong calculations for the last location when
  124. the size is not a power of 2 }
  125. if assigned(paraloc^.next) then
  126. pushdata(paraloc^.next,ofs+tcgsize2size[paraloc^.size]);
  127. { Push the data starting at ofs }
  128. href:=r;
  129. inc(href.offset,ofs);
  130. if tcgsize2size[paraloc^.size]>cgpara.alignment then
  131. pushsize:=paraloc^.size
  132. else
  133. pushsize:=int_cgsize(cgpara.alignment);
  134. opsize:=TCgsize2opsize[pushsize];
  135. { for go32v2 we obtain OS_F32,
  136. but pushs is not valid, we need pushl }
  137. if opsize=S_FS then
  138. opsize:=S_L;
  139. if tcgsize2size[paraloc^.size]<cgpara.alignment then
  140. begin
  141. tmpreg:=getintregister(list,pushsize);
  142. a_load_ref_reg(list,paraloc^.size,pushsize,href,tmpreg);
  143. list.concat(taicpu.op_reg(A_PUSH,opsize,tmpreg));
  144. end
  145. else
  146. begin
  147. make_simple_ref(list,href);
  148. list.concat(taicpu.op_ref(A_PUSH,opsize,href));
  149. end;
  150. end;
  151. var
  152. len : tcgint;
  153. href : treference;
  154. begin
  155. { cgpara.size=OS_NO requires a copy on the stack }
  156. if use_push(cgpara) then
  157. begin
  158. { Record copy? }
  159. if (cgpara.size=OS_NO) or (size=OS_NO) then
  160. begin
  161. cgpara.check_simple_location;
  162. len:=align(cgpara.intsize,cgpara.alignment);
  163. g_stackpointer_alloc(list,len);
  164. reference_reset_base(href,NR_STACK_POINTER_REG,0,ctempposinvalid,4,[]);
  165. g_concatcopy(list,r,href,len);
  166. end
  167. else
  168. begin
  169. if tcgsize2size[cgpara.size]<>tcgsize2size[size] then
  170. internalerror(200501161);
  171. if (cgpara.size=OS_F64) then
  172. begin
  173. href:=r;
  174. make_simple_ref(list,href);
  175. inc(href.offset,4);
  176. list.concat(taicpu.op_ref(A_PUSH,S_L,href));
  177. dec(href.offset,4);
  178. list.concat(taicpu.op_ref(A_PUSH,S_L,href));
  179. end
  180. else
  181. { We need to push the data in reverse order,
  182. therefor we use a recursive algorithm }
  183. pushdata(cgpara.location,0);
  184. end
  185. end
  186. else
  187. begin
  188. href:=r;
  189. make_simple_ref(list,href);
  190. inherited a_load_ref_cgpara(list,size,href,cgpara);
  191. end;
  192. end;
  193. procedure tcg386.a_loadaddr_ref_cgpara(list : TAsmList;const r : treference;const cgpara : tcgpara);
  194. var
  195. tmpreg : tregister;
  196. opsize : topsize;
  197. tmpref,dirref : treference;
  198. begin
  199. dirref:=r;
  200. { this could probably done in a more optimized way, but for now this
  201. is sufficent }
  202. make_direct_ref(list,dirref);
  203. with dirref do
  204. begin
  205. if use_push(cgpara) then
  206. begin
  207. cgpara.check_simple_location;
  208. opsize:=tcgsize2opsize[OS_ADDR];
  209. if (segment=NR_NO) and (base=NR_NO) and (index=NR_NO) then
  210. begin
  211. if assigned(symbol) then
  212. begin
  213. if (target_info.system in [system_i386_darwin,system_i386_iphonesim]) and
  214. ((dirref.symbol.bind in [AB_EXTERNAL,AB_WEAK_EXTERNAL]) or
  215. (cs_create_pic in current_settings.moduleswitches)) then
  216. begin
  217. tmpreg:=getaddressregister(list);
  218. a_loadaddr_ref_reg(list,dirref,tmpreg);
  219. list.concat(taicpu.op_reg(A_PUSH,opsize,tmpreg));
  220. end
  221. else if cs_create_pic in current_settings.moduleswitches then
  222. begin
  223. if offset<>0 then
  224. begin
  225. tmpreg:=getaddressregister(list);
  226. a_loadaddr_ref_reg(list,dirref,tmpreg);
  227. list.concat(taicpu.op_reg(A_PUSH,opsize,tmpreg));
  228. end
  229. else
  230. begin
  231. reference_reset_symbol(tmpref,dirref.symbol,0,sizeof(pint),[]);
  232. tmpref.refaddr:=addr_pic;
  233. tmpref.base:=current_procinfo.got;
  234. {$ifdef EXTDEBUG}
  235. if not (pi_needs_got in current_procinfo.flags) then
  236. Comment(V_warning,'pi_needs_got not included');
  237. {$endif EXTDEBUG}
  238. include(current_procinfo.flags,pi_needs_got);
  239. list.concat(taicpu.op_ref(A_PUSH,S_L,tmpref));
  240. end
  241. end
  242. else
  243. list.concat(Taicpu.Op_sym_ofs(A_PUSH,opsize,symbol,offset));
  244. end
  245. else
  246. list.concat(Taicpu.Op_const(A_PUSH,opsize,offset));
  247. end
  248. else if (segment=NR_NO) and (base=NR_NO) and (index<>NR_NO) and
  249. (offset=0) and (scalefactor=0) and (symbol=nil) then
  250. list.concat(Taicpu.Op_reg(A_PUSH,opsize,index))
  251. else if (segment=NR_NO) and (base<>NR_NO) and (index=NR_NO) and
  252. (offset=0) and (symbol=nil) then
  253. list.concat(Taicpu.Op_reg(A_PUSH,opsize,base))
  254. else
  255. begin
  256. tmpreg:=getaddressregister(list);
  257. a_loadaddr_ref_reg(list,dirref,tmpreg);
  258. list.concat(taicpu.op_reg(A_PUSH,opsize,tmpreg));
  259. end;
  260. end
  261. else
  262. inherited a_loadaddr_ref_cgpara(list,dirref,cgpara);
  263. end;
  264. end;
  265. procedure tcg386.g_proc_exit(list : TAsmList;parasize:longint;nostackframe:boolean);
  266. procedure increase_sp(a : tcgint);
  267. var
  268. href : treference;
  269. begin
  270. reference_reset_base(href,NR_STACK_POINTER_REG,a,ctempposinvalid,0,[]);
  271. { normally, lea is a better choice than an add }
  272. list.concat(Taicpu.op_ref_reg(A_LEA,TCGSize2OpSize[OS_ADDR],href,NR_STACK_POINTER_REG));
  273. end;
  274. begin
  275. { MMX needs to call EMMS }
  276. if assigned(rg[R_MMXREGISTER]) and
  277. (rg[R_MMXREGISTER].uses_registers) then
  278. list.concat(Taicpu.op_none(A_EMMS,S_NO));
  279. { remove stackframe }
  280. if not nostackframe then
  281. begin
  282. if (current_procinfo.framepointer=NR_STACK_POINTER_REG) or
  283. (current_procinfo.procdef.proctypeoption=potype_exceptfilter) then
  284. begin
  285. if current_procinfo.final_localsize<>0 then
  286. increase_sp(current_procinfo.final_localsize);
  287. if (not paramanager.use_fixed_stack) then
  288. internal_restore_regs(list,true);
  289. if (current_procinfo.procdef.proctypeoption=potype_exceptfilter) then
  290. list.concat(Taicpu.op_reg(A_POP,tcgsize2opsize[OS_ADDR],NR_FRAME_POINTER_REG));
  291. end
  292. else
  293. begin
  294. if (not paramanager.use_fixed_stack) then
  295. internal_restore_regs(list,not (pi_has_stack_allocs in current_procinfo.flags));
  296. generate_leave(list);
  297. end;
  298. list.concat(tai_regalloc.dealloc(current_procinfo.framepointer,nil));
  299. end;
  300. { return from proc }
  301. if po_interrupt in current_procinfo.procdef.procoptions then
  302. begin
  303. if assigned(current_procinfo.procdef.funcretloc[calleeside].location) and
  304. (current_procinfo.procdef.funcretloc[calleeside].location^.loc=LOC_REGISTER) then
  305. begin
  306. if (getsupreg(current_procinfo.procdef.funcretloc[calleeside].location^.register)=RS_EAX) then
  307. list.concat(Taicpu.Op_const_reg(A_ADD,S_L,4,NR_ESP))
  308. else
  309. internalerror(2010053001);
  310. end
  311. else
  312. list.concat(Taicpu.Op_reg(A_POP,S_L,NR_EAX));
  313. list.concat(Taicpu.Op_reg(A_POP,S_L,NR_EBX));
  314. list.concat(Taicpu.Op_reg(A_POP,S_L,NR_ECX));
  315. if (current_procinfo.procdef.funcretloc[calleeside].size in [OS_64,OS_S64]) and
  316. assigned(current_procinfo.procdef.funcretloc[calleeside].location) and
  317. assigned(current_procinfo.procdef.funcretloc[calleeside].location^.next) and
  318. (current_procinfo.procdef.funcretloc[calleeside].location^.next^.loc=LOC_REGISTER) then
  319. begin
  320. if (getsupreg(current_procinfo.procdef.funcretloc[calleeside].location^.next^.register)=RS_EDX) then
  321. list.concat(Taicpu.Op_const_reg(A_ADD,S_L,4,NR_ESP))
  322. else
  323. internalerror(2010053002);
  324. end
  325. else
  326. list.concat(Taicpu.Op_reg(A_POP,S_L,NR_EDX));
  327. list.concat(Taicpu.Op_reg(A_POP,S_L,NR_ESI));
  328. list.concat(Taicpu.Op_reg(A_POP,S_L,NR_EDI));
  329. { .... also the segment registers }
  330. list.concat(Taicpu.Op_reg(A_POP,S_W,NR_DS));
  331. list.concat(Taicpu.Op_reg(A_POP,S_W,NR_ES));
  332. list.concat(Taicpu.Op_reg(A_POP,S_W,NR_FS));
  333. list.concat(Taicpu.Op_reg(A_POP,S_W,NR_GS));
  334. { this restores the flags }
  335. list.concat(Taicpu.Op_none(A_IRET,S_NO));
  336. end
  337. { Routines with the poclearstack flag set use only a ret }
  338. else if (current_procinfo.procdef.proccalloption in clearstack_pocalls) and
  339. (not paramanager.use_fixed_stack) then
  340. begin
  341. { complex return values are removed from stack in C code PM }
  342. { but not on win32 }
  343. { and not for safecall with hidden exceptions, because the result }
  344. { wich contains the exception is passed in EAX }
  345. if ((target_info.system <> system_i386_win32) or
  346. (target_info.abi=abi_old_win32_gnu)) and
  347. not ((current_procinfo.procdef.proccalloption = pocall_safecall) and
  348. (tf_safecall_exceptions in target_info.flags)) and
  349. paramanager.ret_in_param(current_procinfo.procdef.returndef,
  350. current_procinfo.procdef) then
  351. list.concat(Taicpu.Op_const(A_RET,S_W,sizeof(aint)))
  352. else
  353. list.concat(Taicpu.Op_none(A_RET,S_NO));
  354. end
  355. { ... also routines with parasize=0 }
  356. else if (parasize=0) then
  357. list.concat(Taicpu.Op_none(A_RET,S_NO))
  358. else
  359. begin
  360. { parameters are limited to 65535 bytes because ret allows only imm16 }
  361. if (parasize>65535) then
  362. CGMessage(cg_e_parasize_too_big);
  363. list.concat(Taicpu.Op_const(A_RET,S_W,parasize));
  364. end;
  365. end;
  366. procedure tcg386.g_copyvaluepara_openarray(list : TAsmList;const ref:treference;const lenloc:tlocation;elesize:tcgint;destreg:tregister);
  367. var
  368. power : longint;
  369. opsize : topsize;
  370. {$ifndef __NOWINPECOFF__}
  371. again,ok : tasmlabel;
  372. {$endif}
  373. begin
  374. { get stack space }
  375. getcpuregister(list,NR_EDI);
  376. a_load_loc_reg(list,OS_INT,lenloc,NR_EDI);
  377. list.concat(Taicpu.op_reg(A_INC,S_L,NR_EDI));
  378. { Now EDI contains (high+1). }
  379. { special case handling for elesize=8, 4 and 2:
  380. set ECX = (high+1) instead of ECX = (high+1)*elesize.
  381. In the case of elesize=4 and 2, this allows us to avoid the SHR later.
  382. In the case of elesize=8, we can later use a SHL ECX, 1 instead of
  383. SHR ECX, 2 which is one byte shorter. }
  384. if (elesize=8) or (elesize=4) or (elesize=2) then
  385. begin
  386. { Now EDI contains (high+1). Copy it to ECX for later use. }
  387. getcpuregister(list,NR_ECX);
  388. list.concat(Taicpu.op_reg_reg(A_MOV,S_L,NR_EDI,NR_ECX));
  389. end;
  390. { EDI := EDI * elesize }
  391. if (elesize<>1) then
  392. begin
  393. if ispowerof2(elesize, power) then
  394. list.concat(Taicpu.op_const_reg(A_SHL,S_L,power,NR_EDI))
  395. else
  396. list.concat(Taicpu.op_const_reg(A_IMUL,S_L,elesize,NR_EDI));
  397. end;
  398. if (elesize<>8) and (elesize<>4) and (elesize<>2) then
  399. begin
  400. { Now EDI contains (high+1)*elesize. Copy it to ECX for later use. }
  401. getcpuregister(list,NR_ECX);
  402. list.concat(Taicpu.op_reg_reg(A_MOV,S_L,NR_EDI,NR_ECX));
  403. end;
  404. {$ifndef __NOWINPECOFF__}
  405. { windows guards only a few pages for stack growing, }
  406. { so we have to access every page first }
  407. if target_info.system=system_i386_win32 then
  408. begin
  409. current_asmdata.getjumplabel(again);
  410. current_asmdata.getjumplabel(ok);
  411. a_label(list,again);
  412. cg.a_reg_alloc(list,NR_DEFAULTFLAGS);
  413. list.concat(Taicpu.op_const_reg(A_CMP,S_L,winstackpagesize,NR_EDI));
  414. a_jmp_cond(list,OC_B,ok);
  415. cg.a_reg_dealloc(list,NR_DEFAULTFLAGS);
  416. list.concat(Taicpu.op_const_reg(A_SUB,S_L,winstackpagesize-4,NR_ESP));
  417. list.concat(Taicpu.op_reg(A_PUSH,S_L,NR_EDI));
  418. list.concat(Taicpu.op_const_reg(A_SUB,S_L,winstackpagesize,NR_EDI));
  419. a_jmp_always(list,again);
  420. a_label(list,ok);
  421. end;
  422. {$endif __NOWINPECOFF__}
  423. { If we were probing pages, EDI=(size mod pagesize) and ESP is decremented
  424. by (size div pagesize)*pagesize, otherwise EDI=size.
  425. Either way, subtracting EDI from ESP will set ESP to desired final value. }
  426. list.concat(Taicpu.op_reg_reg(A_SUB,S_L,NR_EDI,NR_ESP));
  427. { align stack on 4 bytes }
  428. list.concat(Taicpu.op_const_reg(A_AND,S_L,aint($fffffff4),NR_ESP));
  429. { load destination, don't use a_load_reg_reg, that will add a move instruction
  430. that can confuse the reg allocator }
  431. list.concat(Taicpu.Op_reg_reg(A_MOV,S_L,NR_ESP,NR_EDI));
  432. { Allocate ESI and load it with source }
  433. getcpuregister(list,NR_ESI);
  434. a_loadaddr_ref_reg(list,ref,NR_ESI);
  435. { calculate size }
  436. opsize:=S_B;
  437. if elesize=8 then
  438. begin
  439. opsize:=S_L;
  440. { ECX is number of qwords, convert to dwords }
  441. list.concat(Taicpu.op_const_reg(A_SHL,S_L,1,NR_ECX))
  442. end
  443. else if elesize=4 then
  444. begin
  445. opsize:=S_L;
  446. { ECX is already number of dwords, so no need to SHL/SHR }
  447. end
  448. else if elesize=2 then
  449. begin
  450. opsize:=S_W;
  451. { ECX is already number of words, so no need to SHL/SHR }
  452. end
  453. else
  454. if (elesize and 3)=0 then
  455. begin
  456. opsize:=S_L;
  457. { ECX is number of bytes, convert to dwords }
  458. list.concat(Taicpu.op_const_reg(A_SHR,S_L,2,NR_ECX))
  459. end
  460. else
  461. if (elesize and 1)=0 then
  462. begin
  463. opsize:=S_W;
  464. { ECX is number of bytes, convert to words }
  465. list.concat(Taicpu.op_const_reg(A_SHR,S_L,1,NR_ECX))
  466. end;
  467. if ts_cld in current_settings.targetswitches then
  468. list.concat(Taicpu.op_none(A_CLD,S_NO));
  469. list.concat(Taicpu.op_none(A_REP,S_NO));
  470. case opsize of
  471. S_B : list.concat(Taicpu.Op_none(A_MOVSB,S_NO));
  472. S_W : list.concat(Taicpu.Op_none(A_MOVSW,S_NO));
  473. S_L : list.concat(Taicpu.Op_none(A_MOVSD,S_NO));
  474. end;
  475. ungetcpuregister(list,NR_EDI);
  476. ungetcpuregister(list,NR_ECX);
  477. ungetcpuregister(list,NR_ESI);
  478. { patch the new address, but don't use a_load_reg_reg, that will add a move instruction
  479. that can confuse the reg allocator }
  480. list.concat(Taicpu.Op_reg_reg(A_MOV,S_L,NR_ESP,destreg));
  481. include(current_procinfo.flags,pi_has_stack_allocs);
  482. end;
  483. procedure tcg386.g_releasevaluepara_openarray(list : TAsmList;const l:tlocation);
  484. begin
  485. { Nothing to release }
  486. end;
  487. procedure tcg386.g_maybe_got_init(list: TAsmList);
  488. var
  489. i: longint;
  490. tmpreg: TRegister;
  491. begin
  492. { allocate PIC register }
  493. if (cs_create_pic in current_settings.moduleswitches) and
  494. (tf_pic_uses_got in target_info.flags) and
  495. (pi_needs_got in current_procinfo.flags) then
  496. begin
  497. if not (target_info.system in [system_i386_darwin,system_i386_iphonesim]) then
  498. begin
  499. { Use ECX as a temp register by default }
  500. tmpreg:=NR_ECX;
  501. { Allocate registers used for parameters to make sure they
  502. never allocated during this PIC init code }
  503. for i:=0 to current_procinfo.procdef.paras.Count - 1 do
  504. with tparavarsym(current_procinfo.procdef.paras[i]).paraloc[calleeside].Location^ do
  505. if Loc in [LOC_REGISTER, LOC_CREGISTER] then begin
  506. a_reg_alloc(list, register);
  507. { If ECX is used for a parameter, use EBX as temp }
  508. if getsupreg(register) = RS_ECX then
  509. tmpreg:=NR_EBX;
  510. end;
  511. if tmpreg = NR_EBX then
  512. begin
  513. { Mark EBX as used in the proc }
  514. include(rg[R_INTREGISTER].used_in_proc,RS_EBX);
  515. current_module.requires_ebx_pic_helper:=true;
  516. a_call_name_static(list,'fpc_geteipasebx');
  517. end
  518. else
  519. begin
  520. current_module.requires_ecx_pic_helper:=true;
  521. a_call_name_static(list,'fpc_geteipasecx');
  522. end;
  523. list.concat(taicpu.op_sym_ofs_reg(A_ADD,S_L,current_asmdata.RefAsmSymbol('_GLOBAL_OFFSET_TABLE_',AT_DATA),0,tmpreg));
  524. list.concat(taicpu.op_reg_reg(A_MOV,S_L,tmpreg,current_procinfo.got));
  525. { Deallocate parameter registers }
  526. for i:=0 to current_procinfo.procdef.paras.Count - 1 do
  527. with tparavarsym(current_procinfo.procdef.paras[i]).paraloc[calleeside].Location^ do
  528. if Loc in [LOC_REGISTER, LOC_CREGISTER] then
  529. a_reg_dealloc(list, register);
  530. end
  531. else
  532. begin
  533. { call/pop is faster than call/ret/mov on Core Solo and later
  534. according to Apple's benchmarking -- and all Intel Macs
  535. have at least a Core Solo (furthermore, the i386 - Pentium 1
  536. don't have a return stack buffer) }
  537. a_call_name_static(list,current_procinfo.CurrGOTLabel.name);
  538. a_label(list,current_procinfo.CurrGotLabel);
  539. list.concat(taicpu.op_reg(A_POP,S_L,current_procinfo.got))
  540. end;
  541. end;
  542. end;
  543. { ************* 64bit operations ************ }
  544. procedure tcg64f386.get_64bit_ops(op:TOpCG;var op1,op2:TAsmOp);
  545. begin
  546. case op of
  547. OP_ADD :
  548. begin
  549. op1:=A_ADD;
  550. op2:=A_ADC;
  551. end;
  552. OP_SUB :
  553. begin
  554. op1:=A_SUB;
  555. op2:=A_SBB;
  556. end;
  557. OP_XOR :
  558. begin
  559. op1:=A_XOR;
  560. op2:=A_XOR;
  561. end;
  562. OP_OR :
  563. begin
  564. op1:=A_OR;
  565. op2:=A_OR;
  566. end;
  567. OP_AND :
  568. begin
  569. op1:=A_AND;
  570. op2:=A_AND;
  571. end;
  572. else
  573. internalerror(200203241);
  574. end;
  575. end;
  576. procedure tcg64f386.a_op64_ref_reg(list : TAsmList;op:TOpCG;size : tcgsize;const ref : treference;reg : tregister64);
  577. var
  578. op1,op2 : TAsmOp;
  579. tempref : treference;
  580. begin
  581. if not(op in [OP_NEG,OP_NOT]) then
  582. begin
  583. get_64bit_ops(op,op1,op2);
  584. tempref:=ref;
  585. tcgx86(cg).make_simple_ref(list,tempref);
  586. if op in [OP_ADD,OP_SUB] then
  587. cg.a_reg_alloc(list,NR_DEFAULTFLAGS);
  588. list.concat(taicpu.op_ref_reg(op1,S_L,tempref,reg.reglo));
  589. inc(tempref.offset,4);
  590. list.concat(taicpu.op_ref_reg(op2,S_L,tempref,reg.reghi));
  591. if op in [OP_ADD,OP_SUB] then
  592. cg.a_reg_dealloc(list,NR_DEFAULTFLAGS);
  593. end
  594. else
  595. begin
  596. a_load64_ref_reg(list,ref,reg);
  597. a_op64_reg_reg(list,op,size,reg,reg);
  598. end;
  599. end;
  600. procedure tcg64f386.a_op64_reg_ref(list : TAsmList;op:TOpCG;size : tcgsize;reg : tregister64; const ref: treference);
  601. var
  602. op1,op2 : TAsmOp;
  603. tempref : treference;
  604. tmpreg: TRegister;
  605. l1, l2: TAsmLabel;
  606. begin
  607. case op of
  608. OP_NOT,OP_NEG:
  609. inherited;
  610. OP_SHR,OP_SHL,OP_SAR:
  611. begin
  612. { load right operators in a register }
  613. cg.getcpuregister(list,NR_ECX);
  614. cg.a_load_reg_reg(list,OS_32,OS_32,reg.reglo,NR_ECX);
  615. tempref:=ref;
  616. tcgx86(cg).make_simple_ref(list,tempref);
  617. { the damned shift instructions work only til a count of 32 }
  618. { so we've to do some tricks here }
  619. current_asmdata.getjumplabel(l1);
  620. current_asmdata.getjumplabel(l2);
  621. cg.a_reg_alloc(list,NR_DEFAULTFLAGS);
  622. list.Concat(taicpu.op_const_reg(A_TEST,S_B,32,NR_CL));
  623. cg.a_jmp_flags(list,F_E,l1);
  624. cg.a_reg_dealloc(list,NR_DEFAULTFLAGS);
  625. tmpreg:=cg.getintregister(list,OS_32);
  626. case op of
  627. OP_SHL:
  628. begin
  629. cg.a_load_ref_reg(list,OS_32,OS_32,tempref,tmpreg);
  630. list.Concat(taicpu.op_reg_reg(A_SHL,S_L,NR_CL,tmpreg));
  631. inc(tempref.offset,4);
  632. cg.a_load_reg_ref(list,OS_32,OS_32,tmpreg,tempref);
  633. dec(tempref.offset,4);
  634. cg.a_load_const_ref(list,OS_32,0,tempref);
  635. cg.a_jmp_always(list,l2);
  636. cg.a_label(list,l1);
  637. cg.a_load_ref_reg(list,OS_32,OS_32,tempref,tmpreg);
  638. inc(tempref.offset,4);
  639. list.Concat(taicpu.op_reg_reg_ref(A_SHLD,S_L,NR_CL,tmpreg,tempref));
  640. dec(tempref.offset,4);
  641. if cs_opt_size in current_settings.optimizerswitches then
  642. list.concat(taicpu.op_reg_ref(A_SHL,S_L,NR_CL,tempref))
  643. else
  644. begin
  645. list.concat(taicpu.op_reg_reg(A_SHL,S_L,NR_CL,tmpreg));
  646. cg.a_load_reg_ref(list,OS_32,OS_32,tmpreg,tempref);
  647. end;
  648. end;
  649. OP_SHR:
  650. begin
  651. inc(tempref.offset,4);
  652. cg.a_load_ref_reg(list,OS_32,OS_32,tempref,tmpreg);
  653. list.Concat(taicpu.op_reg_reg(A_SHR,S_L,NR_CL,tmpreg));
  654. dec(tempref.offset,4);
  655. cg.a_load_reg_ref(list,OS_32,OS_32,tmpreg,tempref);
  656. inc(tempref.offset,4);
  657. cg.a_load_const_ref(list,OS_32,0,tempref);
  658. cg.a_jmp_always(list,l2);
  659. cg.a_label(list,l1);
  660. cg.a_load_ref_reg(list,OS_32,OS_32,tempref,tmpreg);
  661. dec(tempref.offset,4);
  662. list.Concat(taicpu.op_reg_reg_ref(A_SHRD,S_L,NR_CL,tmpreg,tempref));
  663. inc(tempref.offset,4);
  664. if cs_opt_size in current_settings.optimizerswitches then
  665. list.concat(taicpu.op_reg_ref(A_SHR,S_L,NR_CL,tempref))
  666. else
  667. begin
  668. list.concat(taicpu.op_reg_reg(A_SHR,S_L,NR_CL,tmpreg));
  669. cg.a_load_reg_ref(list,OS_32,OS_32,tmpreg,tempref);
  670. end;
  671. end;
  672. OP_SAR:
  673. begin
  674. inc(tempref.offset,4);
  675. cg.a_load_ref_reg(list,OS_32,OS_32,tempref,tmpreg);
  676. list.Concat(taicpu.op_reg_reg(A_SAR,S_L,NR_CL,tmpreg));
  677. dec(tempref.offset,4);
  678. cg.a_load_reg_ref(list,OS_32,OS_32,tmpreg,tempref);
  679. inc(tempref.offset,4);
  680. list.Concat(taicpu.op_const_reg(A_SAR,S_L,31,tmpreg));
  681. cg.a_load_reg_ref(list,OS_32,OS_32,tmpreg,tempref);
  682. cg.a_jmp_always(list,l2);
  683. cg.a_label(list,l1);
  684. cg.a_load_ref_reg(list,OS_32,OS_32,tempref,tmpreg);
  685. dec(tempref.offset,4);
  686. list.Concat(taicpu.op_reg_reg_ref(A_SHRD,S_L,NR_CL,tmpreg,tempref));
  687. inc(tempref.offset,4);
  688. if cs_opt_size in current_settings.optimizerswitches then
  689. list.concat(taicpu.op_reg_ref(A_SAR,S_L,NR_CL,tempref))
  690. else
  691. begin
  692. list.concat(taicpu.op_reg_reg(A_SAR,S_L,NR_CL,tmpreg));
  693. cg.a_load_reg_ref(list,OS_32,OS_32,tmpreg,tempref);
  694. end;
  695. end;
  696. else
  697. internalerror(2017041801);
  698. end;
  699. cg.a_label(list,l2);
  700. cg.ungetcpuregister(list,NR_ECX);
  701. exit;
  702. end;
  703. else
  704. begin
  705. get_64bit_ops(op,op1,op2);
  706. tempref:=ref;
  707. tcgx86(cg).make_simple_ref(list,tempref);
  708. if op in [OP_ADD,OP_SUB] then
  709. cg.a_reg_alloc(list,NR_DEFAULTFLAGS);
  710. list.concat(taicpu.op_reg_ref(op1,S_L,reg.reglo,tempref));
  711. inc(tempref.offset,4);
  712. list.concat(taicpu.op_reg_ref(op2,S_L,reg.reghi,tempref));
  713. if op in [OP_ADD,OP_SUB] then
  714. cg.a_reg_dealloc(list,NR_DEFAULTFLAGS);
  715. end;
  716. end;
  717. end;
  718. procedure tcg64f386.a_op64_reg_reg(list : TAsmList;op:TOpCG;size : tcgsize;regsrc,regdst : tregister64);
  719. var
  720. op1,op2 : TAsmOp;
  721. l1, l2: TAsmLabel;
  722. begin
  723. case op of
  724. OP_NEG :
  725. begin
  726. if (regsrc.reglo<>regdst.reglo) then
  727. a_load64_reg_reg(list,regsrc,regdst);
  728. list.concat(taicpu.op_reg(A_NOT,S_L,regdst.reghi));
  729. cg.a_reg_alloc(list,NR_DEFAULTFLAGS);
  730. list.concat(taicpu.op_reg(A_NEG,S_L,regdst.reglo));
  731. list.concat(taicpu.op_const_reg(A_SBB,S_L,-1,regdst.reghi));
  732. cg.a_reg_dealloc(list,NR_DEFAULTFLAGS);
  733. exit;
  734. end;
  735. OP_NOT :
  736. begin
  737. if (regsrc.reglo<>regdst.reglo) then
  738. a_load64_reg_reg(list,regsrc,regdst);
  739. list.concat(taicpu.op_reg(A_NOT,S_L,regdst.reghi));
  740. list.concat(taicpu.op_reg(A_NOT,S_L,regdst.reglo));
  741. exit;
  742. end;
  743. OP_SHR,OP_SHL,OP_SAR:
  744. begin
  745. { load right operators in a register }
  746. cg.getcpuregister(list,NR_ECX);
  747. cg.a_load_reg_reg(list,OS_32,OS_32,regsrc.reglo,NR_ECX);
  748. { the damned shift instructions work only til a count of 32 }
  749. { so we've to do some tricks here }
  750. current_asmdata.getjumplabel(l1);
  751. current_asmdata.getjumplabel(l2);
  752. cg.a_reg_alloc(list,NR_DEFAULTFLAGS);
  753. list.Concat(taicpu.op_const_reg(A_TEST,S_B,32,NR_CL));
  754. cg.a_jmp_flags(list,F_E,l1);
  755. cg.a_reg_dealloc(list,NR_DEFAULTFLAGS);
  756. case op of
  757. OP_SHL:
  758. begin
  759. list.Concat(taicpu.op_reg_reg(A_SHL,S_L,NR_CL,regdst.reglo));
  760. cg.a_load_reg_reg(list,OS_32,OS_32,regdst.reglo,regdst.reghi);
  761. list.Concat(taicpu.op_reg_reg(A_XOR,S_L,regdst.reglo,regdst.reglo));
  762. cg.a_jmp_always(list,l2);
  763. cg.a_label(list,l1);
  764. list.Concat(taicpu.op_reg_reg_reg(A_SHLD,S_L,NR_CL,regdst.reglo,regdst.reghi));
  765. list.Concat(taicpu.op_reg_reg(A_SHL,S_L,NR_CL,regdst.reglo));
  766. end;
  767. OP_SHR:
  768. begin
  769. list.Concat(taicpu.op_reg_reg(A_SHR,S_L,NR_CL,regdst.reghi));
  770. cg.a_load_reg_reg(list,OS_32,OS_32,regdst.reghi,regdst.reglo);
  771. list.Concat(taicpu.op_reg_reg(A_XOR,S_L,regdst.reghi,regdst.reghi));
  772. cg.a_jmp_always(list,l2);
  773. cg.a_label(list,l1);
  774. list.Concat(taicpu.op_reg_reg_reg(A_SHRD,S_L,NR_CL,regdst.reghi,regdst.reglo));
  775. list.Concat(taicpu.op_reg_reg(A_SHR,S_L,NR_CL,regdst.reghi));
  776. end;
  777. OP_SAR:
  778. begin
  779. cg.a_load_reg_reg(list,OS_32,OS_32,regdst.reghi,regdst.reglo);
  780. list.Concat(taicpu.op_reg_reg(A_SAR,S_L,NR_CL,regdst.reglo));
  781. list.Concat(taicpu.op_const_reg(A_SAR,S_L,31,regdst.reghi));
  782. cg.a_jmp_always(list,l2);
  783. cg.a_label(list,l1);
  784. list.Concat(taicpu.op_reg_reg_reg(A_SHRD,S_L,NR_CL,regdst.reghi,regdst.reglo));
  785. list.Concat(taicpu.op_reg_reg(A_SAR,S_L,NR_CL,regdst.reghi));
  786. end;
  787. else
  788. internalerror(2017041801);
  789. end;
  790. cg.a_label(list,l2);
  791. cg.ungetcpuregister(list,NR_ECX);
  792. exit;
  793. end;
  794. end;
  795. get_64bit_ops(op,op1,op2);
  796. if op in [OP_ADD,OP_SUB] then
  797. cg.a_reg_alloc(list,NR_DEFAULTFLAGS);
  798. list.concat(taicpu.op_reg_reg(op1,S_L,regsrc.reglo,regdst.reglo));
  799. list.concat(taicpu.op_reg_reg(op2,S_L,regsrc.reghi,regdst.reghi));
  800. if op in [OP_ADD,OP_SUB] then
  801. cg.a_reg_dealloc(list,NR_DEFAULTFLAGS);
  802. end;
  803. procedure tcg64f386.a_op64_const_reg(list : TAsmList;op:TOpCG;size : tcgsize;value : int64;reg : tregister64);
  804. var
  805. op1,op2 : TAsmOp;
  806. begin
  807. case op of
  808. OP_AND,OP_OR,OP_XOR:
  809. begin
  810. cg.a_op_const_reg(list,op,OS_32,tcgint(lo(value)),reg.reglo);
  811. cg.a_op_const_reg(list,op,OS_32,tcgint(hi(value)),reg.reghi);
  812. end;
  813. OP_ADD, OP_SUB:
  814. begin
  815. // can't use a_op_const_ref because this may use dec/inc
  816. get_64bit_ops(op,op1,op2);
  817. cg.a_reg_alloc(list,NR_DEFAULTFLAGS);
  818. list.concat(taicpu.op_const_reg(op1,S_L,aint(lo(value)),reg.reglo));
  819. list.concat(taicpu.op_const_reg(op2,S_L,aint(hi(value)),reg.reghi));
  820. cg.a_reg_dealloc(list,NR_DEFAULTFLAGS);
  821. end;
  822. OP_SHR,OP_SHL,OP_SAR:
  823. begin
  824. value:=value and 63;
  825. if value<>0 then
  826. begin
  827. if (value=1) and (op=OP_SHL) and
  828. (current_settings.optimizecputype<=cpu_486) and
  829. not (cs_opt_size in current_settings.optimizerswitches) then
  830. begin
  831. cg.a_reg_alloc(list,NR_DEFAULTFLAGS);
  832. list.concat(taicpu.op_reg_reg(A_ADD,S_L,reg.reglo,reg.reglo));
  833. list.concat(taicpu.op_reg_reg(A_ADC,S_L,reg.reghi,reg.reghi));
  834. cg.a_reg_dealloc(list,NR_DEFAULTFLAGS);
  835. end
  836. else if (value=1) and (cs_opt_size in current_settings.optimizerswitches) then
  837. case op of
  838. OP_SHR:
  839. begin
  840. cg.a_reg_alloc(list,NR_DEFAULTFLAGS);
  841. list.concat(taicpu.op_const_reg(A_SHR,S_L,value,reg.reghi));
  842. list.concat(taicpu.op_const_reg(A_RCR,S_L,value,reg.reglo));
  843. cg.a_reg_dealloc(list,NR_DEFAULTFLAGS);
  844. end;
  845. OP_SHL:
  846. begin
  847. cg.a_reg_alloc(list,NR_DEFAULTFLAGS);
  848. list.concat(taicpu.op_const_reg(A_SHL,S_L,value,reg.reglo));
  849. list.concat(taicpu.op_const_reg(A_RCL,S_L,value,reg.reghi));
  850. cg.a_reg_dealloc(list,NR_DEFAULTFLAGS);
  851. end;
  852. OP_SAR:
  853. begin
  854. cg.a_reg_alloc(list,NR_DEFAULTFLAGS);
  855. list.concat(taicpu.op_const_reg(A_SAR,S_L,value,reg.reghi));
  856. list.concat(taicpu.op_const_reg(A_RCR,S_L,value,reg.reglo));
  857. cg.a_reg_dealloc(list,NR_DEFAULTFLAGS);
  858. end;
  859. end
  860. else if value>31 then
  861. case op of
  862. OP_SAR:
  863. begin
  864. cg.a_load_reg_reg(list,OS_32,OS_32,reg.reghi,reg.reglo);
  865. list.concat(taicpu.op_const_reg(A_SAR,S_L,31,reg.reghi));
  866. if (value and 31)<>0 then
  867. list.concat(taicpu.op_const_reg(A_SAR,S_L,value and 31,reg.reglo));
  868. end;
  869. OP_SHR:
  870. begin
  871. cg.a_load_reg_reg(list,OS_32,OS_32,reg.reghi,reg.reglo);
  872. list.concat(taicpu.op_reg_reg(A_XOR,S_L,reg.reghi,reg.reghi));
  873. if (value and 31)<>0 then
  874. list.concat(taicpu.op_const_reg(A_SHR,S_L,value and 31,reg.reglo));
  875. end;
  876. OP_SHL:
  877. begin
  878. cg.a_load_reg_reg(list,OS_32,OS_32,reg.reglo,reg.reghi);
  879. list.concat(taicpu.op_reg_reg(A_XOR,S_L,reg.reglo,reg.reglo));
  880. if (value and 31)<>0 then
  881. list.concat(taicpu.op_const_reg(A_SHL,S_L,value and 31,reg.reghi));
  882. end;
  883. else
  884. internalerror(2017041201);
  885. end
  886. else
  887. case op of
  888. OP_SAR:
  889. begin
  890. list.concat(taicpu.op_const_reg_reg(A_SHRD,S_L,value,reg.reghi,reg.reglo));
  891. list.concat(taicpu.op_const_reg(A_SAR,S_L,value,reg.reghi));
  892. end;
  893. OP_SHR:
  894. begin
  895. list.concat(taicpu.op_const_reg_reg(A_SHRD,S_L,value,reg.reghi,reg.reglo));
  896. list.concat(taicpu.op_const_reg(A_SHR,S_L,value,reg.reghi));
  897. end;
  898. OP_SHL:
  899. begin
  900. list.concat(taicpu.op_const_reg_reg(A_SHLD,S_L,value,reg.reglo,reg.reghi));
  901. list.concat(taicpu.op_const_reg(A_SHL,S_L,value,reg.reglo));
  902. end;
  903. else
  904. internalerror(2017041201);
  905. end;
  906. end;
  907. end;
  908. else
  909. internalerror(200204021);
  910. end;
  911. end;
  912. procedure tcg64f386.a_op64_const_ref(list : TAsmList;op:TOpCG;size : tcgsize;value : int64;const ref : treference);
  913. var
  914. op1,op2 : TAsmOp;
  915. tempref : treference;
  916. tmpreg: TRegister;
  917. begin
  918. tempref:=ref;
  919. tcgx86(cg).make_simple_ref(list,tempref);
  920. case op of
  921. OP_AND,OP_OR,OP_XOR:
  922. begin
  923. cg.a_op_const_ref(list,op,OS_32,aint(lo(value)),tempref);
  924. inc(tempref.offset,4);
  925. cg.a_op_const_ref(list,op,OS_32,aint(hi(value)),tempref);
  926. end;
  927. OP_ADD, OP_SUB:
  928. begin
  929. get_64bit_ops(op,op1,op2);
  930. // can't use a_op_const_ref because this may use dec/inc
  931. cg.a_reg_alloc(list,NR_DEFAULTFLAGS);
  932. list.concat(taicpu.op_const_ref(op1,S_L,aint(lo(value)),tempref));
  933. inc(tempref.offset,4);
  934. list.concat(taicpu.op_const_ref(op2,S_L,aint(hi(value)),tempref));
  935. cg.a_reg_dealloc(list,NR_DEFAULTFLAGS);
  936. end;
  937. OP_SHR,OP_SHL,OP_SAR:
  938. begin
  939. value:=value and 63;
  940. if value<>0 then
  941. begin
  942. if value=1 then
  943. case op of
  944. OP_SHR:
  945. begin
  946. cg.a_reg_alloc(list,NR_DEFAULTFLAGS);
  947. inc(tempref.offset,4);
  948. list.concat(taicpu.op_const_ref(A_SHR,S_L,value,tempref));
  949. dec(tempref.offset,4);
  950. list.concat(taicpu.op_const_ref(A_RCR,S_L,value,tempref));
  951. cg.a_reg_dealloc(list,NR_DEFAULTFLAGS);
  952. end;
  953. OP_SHL:
  954. begin
  955. cg.a_reg_alloc(list,NR_DEFAULTFLAGS);
  956. list.concat(taicpu.op_const_ref(A_SHL,S_L,value,tempref));
  957. inc(tempref.offset,4);
  958. list.concat(taicpu.op_const_ref(A_RCL,S_L,value,tempref));
  959. cg.a_reg_dealloc(list,NR_DEFAULTFLAGS);
  960. end;
  961. OP_SAR:
  962. begin
  963. cg.a_reg_alloc(list,NR_DEFAULTFLAGS);
  964. inc(tempref.offset,4);
  965. list.concat(taicpu.op_const_ref(A_SAR,S_L,value,tempref));
  966. dec(tempref.offset,4);
  967. list.concat(taicpu.op_const_ref(A_RCR,S_L,value,tempref));
  968. cg.a_reg_dealloc(list,NR_DEFAULTFLAGS);
  969. end;
  970. end
  971. else if value>31 then
  972. case op of
  973. OP_SHR,OP_SAR:
  974. begin
  975. tmpreg:=cg.getintregister(list,OS_32);
  976. inc(tempref.offset,4);
  977. cg.a_load_ref_reg(list,OS_32,OS_32,tempref,tmpreg);
  978. if (value and 31)<>0 then
  979. if op=OP_SHR then
  980. list.concat(taicpu.op_const_reg(A_SHR,S_L,value and 31,tmpreg))
  981. else
  982. list.concat(taicpu.op_const_reg(A_SAR,S_L,value and 31,tmpreg));
  983. dec(tempref.offset,4);
  984. cg.a_load_reg_ref(list,OS_32,OS_32,tmpreg,tempref);
  985. inc(tempref.offset,4);
  986. if op=OP_SHR then
  987. cg.a_load_const_ref(list,OS_32,0,tempref)
  988. else
  989. begin
  990. list.concat(taicpu.op_const_reg(A_SAR,S_L,31,tmpreg));
  991. cg.a_load_reg_ref(list,OS_32,OS_32,tmpreg,tempref);
  992. end;
  993. end;
  994. OP_SHL:
  995. begin
  996. tmpreg:=cg.getintregister(list,OS_32);
  997. cg.a_load_ref_reg(list,OS_32,OS_32,tempref,tmpreg);
  998. if (value and 31)<>0 then
  999. list.concat(taicpu.op_const_reg(A_SHL,S_L,value and 31,tmpreg));
  1000. inc(tempref.offset,4);
  1001. cg.a_load_reg_ref(list,OS_32,OS_32,tmpreg,tempref);
  1002. dec(tempref.offset,4);
  1003. cg.a_load_const_ref(list,OS_32,0,tempref);
  1004. end;
  1005. else
  1006. internalerror(2017041801);
  1007. end
  1008. else
  1009. case op of
  1010. OP_SHR,OP_SAR:
  1011. begin
  1012. tmpreg:=cg.getintregister(list,OS_32);
  1013. inc(tempref.offset,4);
  1014. cg.a_load_ref_reg(list,OS_32,OS_32,tempref,tmpreg);
  1015. dec(tempref.offset,4);
  1016. list.concat(taicpu.op_const_reg_ref(A_SHRD,S_L,value,tmpreg,tempref));
  1017. inc(tempref.offset,4);
  1018. if cs_opt_size in current_settings.optimizerswitches then
  1019. begin
  1020. if op=OP_SHR then
  1021. list.concat(taicpu.op_const_ref(A_SHR,S_L,value,tempref))
  1022. else
  1023. list.concat(taicpu.op_const_ref(A_SAR,S_L,value,tempref));
  1024. end
  1025. else
  1026. begin
  1027. if op=OP_SHR then
  1028. list.concat(taicpu.op_const_reg(A_SHR,S_L,value,tmpreg))
  1029. else
  1030. list.concat(taicpu.op_const_reg(A_SAR,S_L,value,tmpreg));
  1031. cg.a_load_reg_ref(list,OS_32,OS_32,tmpreg,tempref);
  1032. end;
  1033. end;
  1034. OP_SHL:
  1035. begin
  1036. tmpreg:=cg.getintregister(list,OS_32);
  1037. cg.a_load_ref_reg(list,OS_32,OS_32,tempref,tmpreg);
  1038. inc(tempref.offset,4);
  1039. list.concat(taicpu.op_const_reg_ref(A_SHLD,S_L,value,tmpreg,tempref));
  1040. dec(tempref.offset,4);
  1041. if cs_opt_size in current_settings.optimizerswitches then
  1042. list.concat(taicpu.op_const_ref(A_SHL,S_L,value,tempref))
  1043. else
  1044. begin
  1045. list.concat(taicpu.op_const_reg(A_SHL,S_L,value,tmpreg));
  1046. cg.a_load_reg_ref(list,OS_32,OS_32,tmpreg,tempref);
  1047. end;
  1048. end;
  1049. else
  1050. internalerror(2017041201);
  1051. end;
  1052. end;
  1053. end;
  1054. else
  1055. internalerror(200204022);
  1056. end;
  1057. end;
  1058. procedure tcg64f386.a_op64_ref(list: TAsmList; op: TOpCG; size: tcgsize; const ref: treference);
  1059. var
  1060. tempref : treference;
  1061. begin
  1062. case op of
  1063. OP_NOT:
  1064. begin
  1065. tempref:=ref;
  1066. tcgx86(cg).make_simple_ref(list,tempref);
  1067. list.concat(taicpu.op_ref(A_NOT,S_L,tempref));
  1068. inc(tempref.offset,4);
  1069. list.concat(taicpu.op_ref(A_NOT,S_L,tempref));
  1070. end;
  1071. OP_NEG:
  1072. begin
  1073. tempref:=ref;
  1074. tcgx86(cg).make_simple_ref(list,tempref);
  1075. inc(tempref.offset,4);
  1076. list.concat(taicpu.op_ref(A_NOT,S_L,tempref));
  1077. cg.a_reg_alloc(list,NR_DEFAULTFLAGS);
  1078. dec(tempref.offset,4);
  1079. list.concat(taicpu.op_ref(A_NEG,S_L,tempref));
  1080. inc(tempref.offset,4);
  1081. list.concat(taicpu.op_const_ref(A_SBB,S_L,-1,tempref));
  1082. cg.a_reg_dealloc(list,NR_DEFAULTFLAGS);
  1083. end;
  1084. else
  1085. internalerror(2020050708);
  1086. end;
  1087. end;
  1088. procedure create_codegen;
  1089. begin
  1090. cg := tcg386.create;
  1091. cg64 := tcg64f386.create;
  1092. end;
  1093. end.