nppcmat.pas 26 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584
  1. {
  2. Copyright (c) 1998-2002 by Florian Klaempfl
  3. Generate PowerPC assembler for math nodes
  4. This program is free software; you can redistribute it and/or modify
  5. it under the terms of the GNU General Public License as published by
  6. the Free Software Foundation; either version 2 of the License, or
  7. (at your option) any later version.
  8. This program is distributed in the hope that it will be useful,
  9. but WITHOUT ANY WARRANTY; without even the implied warranty of
  10. MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  11. GNU General Public License for more details.
  12. You should have received a copy of the GNU General Public License
  13. along with this program; if not, write to the Free Software
  14. Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
  15. ****************************************************************************
  16. }
  17. unit nppcmat;
  18. {$i fpcdefs.inc}
  19. interface
  20. uses
  21. node,nmat, ncgmat;
  22. type
  23. tppcmoddivnode = class(tmoddivnode)
  24. function pass_1: tnode;override;
  25. procedure pass_generate_code;override;
  26. end;
  27. tppcshlshrnode = class(tshlshrnode)
  28. procedure pass_generate_code;override;
  29. { everything will be handled in pass_2 }
  30. function first_shlshr64bitint: tnode; override;
  31. end;
  32. tppcunaryminusnode = class(tunaryminusnode)
  33. procedure pass_generate_code;override;
  34. end;
  35. tppcnotnode = class(tcgnotnode)
  36. procedure pass_generate_code;override;
  37. end;
  38. implementation
  39. uses
  40. globtype,systems,constexp,
  41. cutils,verbose,globals,
  42. symconst,symdef,
  43. aasmbase,aasmcpu,aasmtai,aasmdata,
  44. defutil,
  45. cgbase,cgutils,cgobj,hlcgobj,pass_2,
  46. ncon,procinfo,
  47. cpubase,
  48. ncgutil,cgcpu;
  49. {*****************************************************************************
  50. TPPCMODDIVNODE
  51. *****************************************************************************}
  52. function tppcmoddivnode.pass_1: tnode;
  53. begin
  54. result := inherited pass_1;
  55. if not assigned(result) then
  56. include(current_procinfo.flags,pi_do_call);
  57. end;
  58. procedure tppcmoddivnode.pass_generate_code;
  59. const
  60. { signed overflow }
  61. divops: array[boolean, boolean] of tasmop =
  62. ((A_DIVWU,A_DIVWU_),(A_DIVW,A_DIVWO_));
  63. zerocond: tasmcond = (dirhint: DH_Plus; simple: true; cond:C_NE; cr: RS_CR1);
  64. var
  65. power : longint;
  66. op : tasmop;
  67. numerator,
  68. divider,
  69. resultreg : tregister;
  70. size : Tcgsize;
  71. hl : tasmlabel;
  72. done: boolean;
  73. procedure genOrdConstNodeDiv;
  74. const
  75. negops : array[boolean] of tasmop = (A_NEG, A_NEGO);
  76. begin
  77. if (tordconstnode(right).value = 0) then begin
  78. internalerror(2005061701);
  79. end else if (tordconstnode(right).value = 1) then begin
  80. cg.a_load_reg_reg(current_asmdata.CurrAsmList, OS_INT, OS_INT, numerator, resultreg);
  81. end else if (tordconstnode(right).value = int64(-1)) then begin
  82. // note: only in the signed case possible..., may overflow
  83. current_asmdata.CurrAsmList.concat(taicpu.op_reg_reg(negops[cs_check_overflow in current_settings.localswitches], resultreg, numerator));
  84. end else if (ispowerof2(tordconstnode(right).value, power)) then begin
  85. if (is_signed(right.resultdef)) then begin
  86. { From "The PowerPC Compiler Writer's Guide", pg. 52ff }
  87. cg.a_op_const_reg_reg(current_asmdata.CurrAsmList, OP_SAR, OS_INT, power,
  88. numerator, resultreg);
  89. current_asmdata.CurrAsmList.concat(taicpu.op_reg_reg(A_ADDZE, resultreg, resultreg));
  90. end else begin
  91. cg.a_op_const_reg_reg(current_asmdata.CurrAsmList, OP_SHR, OS_INT, power, numerator, resultreg)
  92. end;
  93. end else begin
  94. cg.g_div_const_reg_reg(current_asmdata.CurrAsmList,def_cgsize(resultdef),
  95. tordconstnode(right).value.svalue,numerator,resultreg);
  96. end;
  97. done := true;
  98. end;
  99. procedure genOrdConstNodeMod;
  100. var
  101. modreg, maskreg, tempreg : tregister;
  102. begin
  103. if (tordconstnode(right).value = 0) then begin
  104. internalerror(2005061702);
  105. end else if (abs(tordconstnode(right).value.svalue) = 1) then begin
  106. // x mod +/-1 is always zero
  107. cg.a_load_const_reg(current_asmdata.CurrAsmList, OS_INT, 0, resultreg);
  108. end else if (ispowerof2(tordconstnode(right).value, power)) then begin
  109. if (is_signed(right.resultdef)) then begin
  110. tempreg := cg.getintregister(current_asmdata.CurrAsmList, OS_INT);
  111. maskreg := cg.getintregister(current_asmdata.CurrAsmList, OS_INT);
  112. modreg := cg.getintregister(current_asmdata.CurrAsmList, OS_INT);
  113. cg.a_load_const_reg(current_asmdata.CurrAsmList, OS_INT, abs(tordconstnode(right).value.svalue)-1, modreg);
  114. cg.a_op_const_reg_reg(current_asmdata.CurrAsmList, OP_SAR, OS_INT, 31, numerator, maskreg);
  115. cg.a_op_reg_reg_reg(current_asmdata.CurrAsmList, OP_AND, OS_INT, numerator, modreg, tempreg);
  116. current_asmdata.CurrAsmList.concat(taicpu.op_reg_reg_reg(A_ANDC, maskreg, maskreg, modreg));
  117. current_asmdata.CurrAsmList.concat(taicpu.op_reg_reg_const(A_SUBFIC, modreg, tempreg, 0));
  118. current_asmdata.CurrAsmList.concat(taicpu.op_reg_reg_reg(A_SUBFE, modreg, modreg, modreg));
  119. cg.a_op_reg_reg_reg(current_asmdata.CurrAsmList, OP_AND, OS_INT, modreg, maskreg, maskreg);
  120. cg.a_op_reg_reg_reg(current_asmdata.CurrAsmList, OP_OR, OS_INT, maskreg, tempreg, resultreg);
  121. end else begin
  122. cg.a_op_const_reg_reg(current_asmdata.CurrAsmList, OP_AND, OS_INT, tordconstnode(right).value.svalue-1, numerator, resultreg);
  123. end;
  124. end else begin
  125. genOrdConstNodeDiv();
  126. cg.a_op_const_reg_reg(current_asmdata.CurrAsmList, OP_MUL, OS_INT, tordconstnode(right).value.svalue, resultreg, resultreg);
  127. cg.a_op_reg_reg_reg(current_asmdata.CurrAsmList, OP_SUB, OS_INT, resultreg, numerator, resultreg);
  128. end;
  129. end;
  130. begin
  131. secondpass(left);
  132. secondpass(right);
  133. location_copy(location,left.location);
  134. { put numerator in register }
  135. size:=def_cgsize(left.resultdef);
  136. hlcg.location_force_reg(current_asmdata.CurrAsmList,left.location,
  137. left.resultdef,left.resultdef,true);
  138. location_copy(location,left.location);
  139. numerator := location.register;
  140. resultreg := location.register;
  141. if (location.loc = LOC_CREGISTER) then begin
  142. location.loc := LOC_REGISTER;
  143. location.register := cg.getintregister(current_asmdata.CurrAsmList,size);
  144. resultreg := location.register;
  145. end else if (nodetype = modn) or (right.nodetype = ordconstn) then begin
  146. // for a modulus op, and for const nodes we need the result register
  147. // to be an extra register
  148. resultreg := cg.getintregister(current_asmdata.CurrAsmList,size);
  149. end;
  150. done := false;
  151. if (right.nodetype = ordconstn) then begin
  152. if (nodetype = divn) then
  153. genOrdConstNodeDiv
  154. else
  155. genOrdConstNodeMod;
  156. done := true;
  157. end;
  158. if (not done) then begin
  159. { load divider in a register if necessary }
  160. hlcg.location_force_reg(current_asmdata.CurrAsmList,right.location,
  161. right.resultdef,right.resultdef,true);
  162. if (right.nodetype <> ordconstn) then
  163. current_asmdata.CurrAsmList.concat(taicpu.op_reg_reg_const(A_CMPWI,NR_CR1,
  164. right.location.register,0));
  165. divider := right.location.register;
  166. { needs overflow checking, (-maxlongint-1) div (-1) overflows! }
  167. op := divops[is_signed(right.resultdef),
  168. cs_check_overflow in current_settings.localswitches];
  169. current_asmdata.CurrAsmList.concat(taicpu.op_reg_reg_reg(op,resultreg,numerator,
  170. divider));
  171. if (nodetype = modn) then
  172. begin
  173. current_asmdata.CurrAsmList.concat(taicpu.op_reg_reg_reg(A_MULLW,resultreg,
  174. divider,resultreg));
  175. current_asmdata.CurrAsmList.concat(taicpu.op_reg_reg_reg(A_SUB,location.register,
  176. numerator,resultreg));
  177. resultreg := location.register;
  178. end;
  179. end;
  180. { set result location }
  181. location.loc:=LOC_REGISTER;
  182. location.register:=resultreg;
  183. if right.nodetype <> ordconstn then
  184. begin
  185. current_asmdata.getjumplabel(hl);
  186. current_asmdata.CurrAsmList.concat(taicpu.op_cond_sym(A_BC,zerocond,hl));
  187. cg.a_call_name(current_asmdata.CurrAsmList,'FPC_DIVBYZERO',false);
  188. cg.a_label(current_asmdata.CurrAsmList,hl);
  189. end;
  190. { unsigned division/module can only overflow in case of division by zero }
  191. { (but checking this overflow flag is more convoluted than performing a }
  192. { simple comparison with 0) }
  193. if is_signed(right.resultdef) then
  194. cg.g_overflowcheck(current_asmdata.CurrAsmList,location,resultdef);
  195. end;
  196. {*****************************************************************************
  197. TPPCSHLRSHRNODE
  198. *****************************************************************************}
  199. function tppcshlshrnode.first_shlshr64bitint: tnode;
  200. begin
  201. result := nil;
  202. end;
  203. procedure tppcshlshrnode.pass_generate_code;
  204. var
  205. resultreg, hregister1,hregister2,
  206. hreg64hi,hreg64lo : tregister;
  207. op : topcg;
  208. asmop1, asmop2: tasmop;
  209. shiftval: aint;
  210. begin
  211. secondpass(left);
  212. secondpass(right);
  213. if is_64bit(left.resultdef) then
  214. begin
  215. hlcg.location_force_reg(current_asmdata.CurrAsmList,left.location,
  216. left.resultdef,left.resultdef,true);
  217. location_copy(location,left.location);
  218. hreg64hi := location.register64.reghi;
  219. hreg64lo := location.register64.reglo;
  220. if (location.loc = LOC_CREGISTER) then
  221. begin
  222. location.loc := LOC_REGISTER;
  223. location.register64.reghi := cg.getintregister(current_asmdata.CurrAsmList,OS_32);
  224. location.register64.reglo := cg.getintregister(current_asmdata.CurrAsmList,OS_32);
  225. end;
  226. if (right.nodetype = ordconstn) then
  227. begin
  228. shiftval := tordconstnode(right).value.svalue;
  229. shiftval := shiftval and 63;
  230. {
  231. I think the statements below is much more correct instead of the hack above,
  232. but then we fail tshlshr.pp :/
  233. if shiftval > 63 then
  234. begin
  235. cg.a_load_const_reg(current_asmdata.CurrAsmList,OS_32,0,location.register64.reglo);
  236. cg.a_load_const_reg(current_asmdata.CurrAsmList,OS_32,0,location.register64.reglo);
  237. end
  238. else }
  239. if shiftval = 0 then
  240. begin
  241. cg.a_load_reg_reg(current_asmdata.CurrAsmList,OS_32,OS_32,left.location.register64.reghi,location.register64.reghi);
  242. cg.a_load_reg_reg(current_asmdata.CurrAsmList,OS_32,OS_32,left.location.register64.reglo,location.register64.reglo);
  243. end
  244. else if shiftval > 31 then
  245. begin
  246. if nodetype = shln then
  247. begin
  248. cg.a_op_const_reg_reg(current_asmdata.CurrAsmList,OP_SHL,OS_32,
  249. shiftval and 31,hreg64lo,location.register64.reghi);
  250. cg.a_load_const_reg(current_asmdata.CurrAsmList,OS_32,0,location.register64.reglo);
  251. end
  252. else
  253. begin
  254. cg.a_op_const_reg_reg(current_asmdata.CurrAsmList,OP_SHR,OS_32,
  255. shiftval and 31,hreg64hi,location.register64.reglo);
  256. cg.a_load_const_reg(current_asmdata.CurrAsmList,OS_32,0,location.register64.reghi);
  257. end;
  258. end
  259. else
  260. begin
  261. if nodetype = shln then
  262. begin
  263. current_asmdata.CurrAsmList.concat(taicpu.op_reg_reg_const_const_const(
  264. A_RLWINM,location.register64.reghi,hreg64hi,shiftval,
  265. 0,31-shiftval));
  266. current_asmdata.CurrAsmList.concat(taicpu.op_reg_reg_const_const_const(
  267. A_RLWIMI,location.register64.reghi,hreg64lo,shiftval,
  268. 32-shiftval,31));
  269. current_asmdata.CurrAsmList.concat(taicpu.op_reg_reg_const_const_const(
  270. A_RLWINM,location.register64.reglo,hreg64lo,shiftval,
  271. 0,31-shiftval));
  272. end
  273. else
  274. begin
  275. current_asmdata.CurrAsmList.concat(taicpu.op_reg_reg_const_const_const(
  276. A_RLWINM,location.register64.reglo,hreg64lo,32-shiftval,
  277. shiftval,31));
  278. current_asmdata.CurrAsmList.concat(taicpu.op_reg_reg_const_const_const(
  279. A_RLWIMI,location.register64.reglo,hreg64hi,32-shiftval,
  280. 0,shiftval-1));
  281. current_asmdata.CurrAsmList.concat(taicpu.op_reg_reg_const_const_const(
  282. A_RLWINM,location.register64.reghi,hreg64hi,32-shiftval,
  283. shiftval,31));
  284. end;
  285. end;
  286. end
  287. else
  288. { no constant shiftcount }
  289. begin
  290. hlcg.location_force_reg(current_asmdata.CurrAsmList,right.location,right.resultdef,s32inttype,true);
  291. hregister1 := right.location.register;
  292. if nodetype = shln then
  293. begin
  294. asmop1 := A_SLW;
  295. asmop2 := A_SRW;
  296. end
  297. else
  298. begin
  299. asmop1 := A_SRW;
  300. asmop2 := A_SLW;
  301. resultreg := hreg64hi;
  302. hreg64hi := hreg64lo;
  303. hreg64lo := resultreg;
  304. resultreg := location.register64.reghi;
  305. location.register64.reghi := location.register64.reglo;
  306. location.register64.reglo := resultreg;
  307. end;
  308. cg.getcpuregister(current_asmdata.CurrAsmList,NR_R0);
  309. current_asmdata.CurrAsmList.concat(taicpu.op_reg_reg_const(A_SUBFIC,
  310. NR_R0,hregister1,32));
  311. current_asmdata.CurrAsmList.concat(taicpu.op_reg_reg_reg(asmop1,
  312. location.register64.reghi,hreg64hi,hregister1));
  313. current_asmdata.CurrAsmList.concat(taicpu.op_reg_reg_reg(asmop2,
  314. NR_R0,hreg64lo,NR_R0));
  315. current_asmdata.CurrAsmList.concat(taicpu.op_reg_reg_reg(A_OR,
  316. location.register64.reghi,location.register64.reghi,NR_R0));
  317. current_asmdata.CurrAsmList.concat(taicpu.op_reg_reg_const(A_SUBI,
  318. NR_R0,hregister1,32));
  319. current_asmdata.CurrAsmList.concat(taicpu.op_reg_reg_reg(asmop1,
  320. NR_R0,hreg64lo,NR_R0));
  321. current_asmdata.CurrAsmList.concat(taicpu.op_reg_reg_reg(A_OR,
  322. location.register64.reghi,location.register64.reghi,NR_R0));
  323. current_asmdata.CurrAsmList.concat(taicpu.op_reg_reg_reg(asmop1,
  324. location.register64.reglo,hreg64lo,hregister1));
  325. cg.ungetcpuregister(current_asmdata.CurrAsmList,NR_R0);
  326. if nodetype = shrn then
  327. begin
  328. resultreg := location.register64.reghi;
  329. location.register64.reghi := location.register64.reglo;
  330. location.register64.reglo := resultreg;
  331. end;
  332. end
  333. end
  334. else
  335. begin
  336. { load left operators in a register }
  337. hlcg.location_force_reg(current_asmdata.CurrAsmList,left.location,left.resultdef,left.resultdef,true);
  338. location_copy(location,left.location);
  339. resultreg := location.register;
  340. hregister1 := location.register;
  341. location.loc := LOC_REGISTER;
  342. resultreg := cg.getintregister(current_asmdata.CurrAsmList,location.size);
  343. location.register := resultreg;
  344. { determine operator }
  345. if nodetype=shln then
  346. op:=OP_SHL
  347. else
  348. op:=OP_SHR;
  349. { shifting by a constant directly coded: }
  350. if (right.nodetype=ordconstn) then
  351. cg.a_op_const_reg_reg(current_asmdata.CurrAsmList,op,location.size,
  352. tordconstnode(right).value.svalue and 31,hregister1,resultreg)
  353. else
  354. begin
  355. { load shift count in a register if necessary }
  356. hlcg.location_force_reg(current_asmdata.CurrAsmList,right.location,right.resultdef,right.resultdef,true);
  357. hregister2 := right.location.register;
  358. cg.a_op_reg_reg_reg(current_asmdata.CurrAsmList,op,location.size,hregister2,
  359. hregister1,resultreg);
  360. end;
  361. end;
  362. end;
  363. {*****************************************************************************
  364. TPPCUNARYMINUSNODE
  365. *****************************************************************************}
  366. procedure tppcunaryminusnode.pass_generate_code;
  367. var
  368. src1: tregister;
  369. op: tasmop;
  370. begin
  371. src1:=NR_NO;
  372. secondpass(left);
  373. if is_64bit(left.resultdef) then
  374. begin
  375. hlcg.location_force_reg(current_asmdata.CurrAsmList,left.location,left.resultdef,left.resultdef,true);
  376. location_copy(location,left.location);
  377. if (location.loc = LOC_CREGISTER) then
  378. begin
  379. location.register64.reglo := cg.getintregister(current_asmdata.CurrAsmList,OS_INT);
  380. location.register64.reghi := cg.getintregister(current_asmdata.CurrAsmList,OS_INT);
  381. location.loc := LOC_REGISTER;
  382. end;
  383. current_asmdata.CurrAsmList.concat(taicpu.op_reg_reg_const(A_SUBFIC,
  384. location.register64.reglo,left.location.register64.reglo,0));
  385. if not(cs_check_overflow in current_settings.localswitches) then
  386. current_asmdata.CurrAsmList.concat(taicpu.op_reg_reg(A_SUBFZE,
  387. location.register64.reghi,left.location.register64.reghi))
  388. else
  389. current_asmdata.CurrAsmList.concat(taicpu.op_reg_reg(A_SUBFZEO_,
  390. location.register64.reghi,left.location.register64.reghi));
  391. end
  392. else
  393. begin
  394. if left.location.loc in [LOC_SUBSETREG,LOC_CSUBSETREG,LOC_SUBSETREF,LOC_CSUBSETREF] then
  395. hlcg.location_force_reg(current_asmdata.CurrAsmList,left.location,left.resultdef,left.resultdef,true);
  396. location_copy(location,left.location);
  397. location.loc:=LOC_REGISTER;
  398. case left.location.loc of
  399. LOC_FPUREGISTER, LOC_REGISTER:
  400. begin
  401. src1 := left.location.register;
  402. location.register := src1;
  403. end;
  404. LOC_CFPUREGISTER, LOC_CREGISTER:
  405. begin
  406. src1 := left.location.register;
  407. if left.location.loc = LOC_CREGISTER then
  408. location.register := cg.getintregister(current_asmdata.CurrAsmList,OS_INT)
  409. else
  410. location.register := cg.getfpuregister(current_asmdata.CurrAsmList,location.size);
  411. end;
  412. LOC_REFERENCE,LOC_CREFERENCE:
  413. begin
  414. if (left.resultdef.typ=floatdef) then
  415. begin
  416. src1 := cg.getfpuregister(current_asmdata.CurrAsmList,def_cgsize(left.resultdef));
  417. location.register := src1;
  418. cg.a_loadfpu_ref_reg(current_asmdata.CurrAsmList,
  419. left.location.size,left.location.size,
  420. left.location.reference,src1);
  421. end
  422. else
  423. begin
  424. src1 := cg.getintregister(current_asmdata.CurrAsmList,OS_32);
  425. location.register:= src1;
  426. cg.a_load_ref_reg(current_asmdata.CurrAsmList,OS_32,OS_32,
  427. left.location.reference,src1);
  428. end;
  429. end;
  430. else
  431. internalerror(2019050913);
  432. end;
  433. { choose appropriate operand }
  434. if left.resultdef.typ <> floatdef then
  435. begin
  436. if not(cs_check_overflow in current_settings.localswitches) then
  437. op := A_NEG
  438. else
  439. op := A_NEGO_;
  440. location.loc := LOC_REGISTER;
  441. end
  442. else
  443. begin
  444. op := A_FNEG;
  445. location.loc := LOC_FPUREGISTER;
  446. end;
  447. { emit operation }
  448. current_asmdata.CurrAsmList.concat(taicpu.op_reg_reg(op,location.register,src1));
  449. end;
  450. { Here was a problem... }
  451. { Operand to be negated always }
  452. { seems to be converted to signed }
  453. { 32-bit before doing neg!! }
  454. { So this is useless... }
  455. { that's not true: -2^31 gives an overflow error if it is negated (FK) }
  456. cg.g_overflowcheck(current_asmdata.CurrAsmList,location,resultdef);
  457. end;
  458. {*****************************************************************************
  459. TPPCNOTNODE
  460. *****************************************************************************}
  461. procedure tppcnotnode.pass_generate_code;
  462. var
  463. tmpreg: tregister;
  464. begin
  465. secondpass(left);
  466. if is_boolean(resultdef) then
  467. begin
  468. if not handle_locjump then
  469. begin
  470. { handle_locjump does call secondpass }
  471. case left.location.loc of
  472. LOC_FLAGS :
  473. begin
  474. location_copy(location,left.location);
  475. inverse_flags(location.resflags);
  476. end;
  477. LOC_REGISTER, LOC_CREGISTER,
  478. LOC_REFERENCE, LOC_CREFERENCE,
  479. LOC_SUBSETREG, LOC_CSUBSETREG,
  480. LOC_SUBSETREF, LOC_CSUBSETREF:
  481. begin
  482. hlcg.location_force_reg(current_asmdata.CurrAsmList,left.location,left.resultdef,left.resultdef,true);
  483. tmpreg:=left.location.register;
  484. {$ifndef cpu64bitalu}
  485. { 64 bit pascal booleans have their truth value stored in
  486. the lower 32 bits; with cbools, it can be anywhere }
  487. if (left.location.size in [OS_64,OS_S64]) and
  488. not is_pasbool(left.resultdef) then
  489. begin
  490. tmpreg:=cg.getintregister(current_asmdata.CurrAsmList,OS_32);
  491. cg.a_op_reg_reg_reg(current_asmdata.CurrAsmList,OP_OR,OS_32,left.location.register64.reglo,left.location.register64.reghi,tmpreg);
  492. end;
  493. {$endif not cpu64bitalu}
  494. current_asmdata.CurrAsmList.concat(taicpu.op_reg_const(A_CMPWI,tmpreg,0));
  495. location_reset(location,LOC_FLAGS,OS_NO);
  496. location.resflags.cr:=RS_CR0;
  497. location.resflags.flag:=F_EQ;
  498. end;
  499. else
  500. internalerror(2003042401);
  501. end;
  502. end;
  503. end
  504. else if is_64bitint(left.resultdef) then
  505. begin
  506. hlcg.location_force_reg(current_asmdata.CurrAsmList,left.location,left.resultdef,left.resultdef,false);
  507. location_copy(location,left.location);
  508. { perform the NOT operation }
  509. current_asmdata.CurrAsmList.concat(taicpu.op_reg_reg(A_NOT,location.register64.reghi,
  510. location.register64.reghi));
  511. current_asmdata.CurrAsmList.concat(taicpu.op_reg_reg(A_NOT,location.register64.reglo,
  512. location.register64.reglo));
  513. end
  514. else
  515. begin
  516. hlcg.location_force_reg(current_asmdata.CurrAsmList,left.location,left.resultdef,left.resultdef,true);
  517. location_copy(location,left.location);
  518. location.loc := LOC_REGISTER;
  519. location.register := cg.getintregister(current_asmdata.CurrAsmList,OS_INT);
  520. { perform the NOT operation }
  521. cg.a_op_reg_reg(current_asmdata.CurrAsmList,OP_NOT,def_cgsize(resultdef),left.location.register,
  522. location.register);
  523. end;
  524. end;
  525. begin
  526. cmoddivnode:=tppcmoddivnode;
  527. cshlshrnode:=tppcshlshrnode;
  528. cunaryminusnode:=tppcunaryminusnode;
  529. cnotnode:=tppcnotnode;
  530. end.